Rehash efficiency

Former-commit-id: fab383156626ec683881101c22eb2f6c2cea4c5d
This commit is contained in:
John Sully 2020-08-15 23:05:56 +00:00
parent 07c019fd3d
commit 34937b0ad5
5 changed files with 19 additions and 11 deletions

View File

@ -430,7 +430,7 @@ bool redisDbPersistentData::syncDelete(robj *key)
auto itr = m_pdbSnapshot->find_cached_threadsafe(szFromObj(key));
if (itr != nullptr)
{
sds keyTombstone = sdsdup(szFromObj(key));
sds keyTombstone = sdsdupshared(itr.key());
uint64_t hash = dictGetHash(m_pdict, keyTombstone);
if (dictAdd(m_pdictTombstone, keyTombstone, (void*)hash) != DICT_OK)
sdsfree(keyTombstone);
@ -2300,7 +2300,7 @@ void redisDbPersistentData::initialize()
{
m_pdbSnapshot = nullptr;
m_pdict = dictCreate(&dbDictType,this);
m_pdictTombstone = dictCreate(&dbDictTypeTombstone,this);
m_pdictTombstone = dictCreate(&dbTombstoneDictType,this);
m_setexpire = new(MALLOC_LOCAL) expireset();
m_fAllChanged = 0;
m_fTrackingChanges = 0;

View File

@ -326,7 +326,7 @@ int dictRehashMilliseconds(dict *d, int ms) {
static void _dictRehashStep(dict *d) {
unsigned long iterators;
__atomic_load(&d->iterators, &iterators, __ATOMIC_RELAXED);
if (iterators == 0) dictRehash(d,10);
if (iterators == 0) dictRehash(d,2);
}
/* Add an element to the target hash table */
@ -1220,7 +1220,9 @@ void dictGetStats(char *buf, size_t bufsize, dict *d) {
void dictForceRehash(dict *d)
{
while (dictIsRehashing(d)) _dictRehashStep(d);
unsigned long iterators;
__atomic_load(&d->iterators, &iterators, __ATOMIC_RELAXED);
while (iterators == 0 && dictIsRehashing(d)) _dictRehashStep(d);
}
/* ------------------------------- Benchmark ---------------------------------*/

View File

@ -1387,8 +1387,8 @@ dictType dbDictType = {
dictObjectDestructor /* val destructor */
};
/* db->pdict, keys are sds strings, vals uints. */
dictType dbDictTypeTombstone = {
/* db->pdict, keys are sds strings, vals are Redis objects. */
dictType dbTombstoneDictType = {
dictSdsHash, /* hash function */
NULL, /* key dup */
NULL, /* val dup */
@ -1539,8 +1539,9 @@ void tryResizeHashTables(int dbid) {
* is returned. */
int redisDbPersistentData::incrementallyRehash() {
/* Keys dictionary */
if (dictIsRehashing(m_pdict)) {
if (dictIsRehashing(m_pdict) || dictIsRehashing(m_pdictTombstone)) {
dictRehashMilliseconds(m_pdict,1);
dictRehashMilliseconds(m_pdictTombstone,1);
return 1; /* already used our millisecond for this loop... */
}
return 0;

View File

@ -2564,7 +2564,7 @@ extern dictType zsetDictType;
extern dictType clusterNodesDictType;
extern dictType clusterNodesBlackListDictType;
extern dictType dbDictType;
extern dictType dbDictTypeTombstone;
extern dictType dbTombstoneDictType;
extern dictType dbSnapshotDictType;
extern dictType shaScriptObjectDictType;
extern double R_Zero, R_PosInf, R_NegInf, R_Nan;

View File

@ -91,8 +91,13 @@ const redisDbPersistentDataSnapshot *redisDbPersistentData::createSnapshot(uint6
spdb->m_setexpire->pause_rehash(); // needs to be const
}
if (dictIsRehashing(spdb->m_pdict) || dictIsRehashing(spdb->m_pdictTombstone)) {
serverLog(LL_NOTICE, "NOTICE: Suboptimal snapshot");
}
m_pdict = dictCreate(&dbDictType,this);
m_pdictTombstone = dictCreate(&dbDictTypeTombstone, this);
dictExpand(m_pdict, 1024); // minimize rehash overhead
m_pdictTombstone = dictCreate(&dbTombstoneDictType, this);
serverAssert(spdb->m_pdict->iterators == 1);