Fix issue where database size is off when setting key with expire

Former-commit-id: 7796918bfb4a98bc056b3b8f4065f1416da8d89a
This commit is contained in:
John Sully 2019-12-23 22:07:33 -05:00
parent 0ab5ea7b9b
commit f8c903475a
4 changed files with 23 additions and 12 deletions

View File

@ -315,11 +315,8 @@ int dbMerge(redisDb *db, robj *key, robj *val, int fReplace)
*
* All the new keys in the database should be created via this interface. */
void setKey(redisDb *db, robj *key, robj *val) {
auto itr = db->find(szFromObj(key));
if (itr == NULL) {
dbAdd(db,key,val);
} else {
db->dbOverwriteCore(itr,key,val,!!g_pserver->fActiveReplica,true);
if (!dbAddCore(db, key, val)) {
dbOverwrite(db, key, val);
}
incrRefCount(val);
signalModifiedKey(db,key);
@ -2149,7 +2146,7 @@ redisDbPersistentData::changelist redisDbPersistentData::processChanges()
}
else
{
for (unique_sds_ptr &key : m_vecchanged)
for (auto &key : m_setchanged)
{
dictEntry *de = dictFind(m_pdict, key.get());
if (de == nullptr)
@ -2159,7 +2156,7 @@ redisDbPersistentData::changelist redisDbPersistentData::processChanges()
vecRet.emplace_back(std::move(key), unique_sds_ptr(temp));
}
}
m_vecchanged.clear();
m_setchanged.clear();
}
}
@ -2226,9 +2223,9 @@ bool redisDbPersistentData::removeCachedValue(const char *key)
{
serverAssert(m_spstorage != nullptr);
// First ensure its not a pending key
for (auto &spkey : m_vecchanged)
for (auto &strkey : m_setchanged)
{
if (sdscmp(spkey.get(), (sds)key) == 0)
if (sdscmp(strkey.get(), (sds)key) == 0)
return false; // NOP
}

View File

@ -373,6 +373,8 @@ public:
return sdslen(m_str);
}
const char *get() const { return m_str; }
explicit operator const char*() const { return m_str; }
};

View File

@ -1271,7 +1271,7 @@ public:
void trackkey(const char *key)
{
if (m_fTrackingChanges && !m_fAllChanged && m_spstorage)
m_vecchanged.push_back(unique_sds_ptr(sdsdupshared(key)));
m_setchanged.emplace(sdsdupshared(key));
}
dict_iter find(const char *key)
@ -1324,7 +1324,7 @@ public:
// to allow you to release the global lock before commiting. To prevent deadlocks you *must*
// either release the global lock or keep the same global lock between the two functions as
// a second look is kept to ensure writes to secondary storage are ordered
typedef std::vector<std::pair<unique_sds_ptr, unique_sds_ptr>> changelist;
typedef std::vector<std::pair<sdsimmutablestring, unique_sds_ptr>> changelist;
changelist processChanges();
void commitChanges(const changelist &vec);
@ -1356,7 +1356,7 @@ private:
dict *m_pdictTombstone = nullptr; /* Track deletes when we have a snapshot */
int m_fTrackingChanges = 0; // Note: Stack based
int m_fAllChanged = 0;
std::vector<unique_sds_ptr> m_vecchanged;
std::set<sdsimmutablestring> m_setchanged;
std::shared_ptr<IStorage> m_spstorage = nullptr;
uint64_t mvccCheckpoint = 0;

View File

@ -21,6 +21,18 @@ start_server [list tags {flash} overrides [list storage-provider {flash ./rocks.
r set testkey bar
assert_equal {1} [r dbsize] "Only one key after overwrite"
assert_equal {bar} [r get testkey]
}
test { SET of existing but flushed key with EXPIRE works } {
r flushall
assert_equal {0} [r dbsize]
r set testkey foo ex 10000
assert_equal {1} [r dbsize] "Only one key after first insert"
r flushall cache
assert_equal {1} [r dbsize] "Only one key after flushall cache"
r set testkey bar ex 10000
assert_equal {1} [r dbsize] "Only one key after overwrite"
assert_equal {bar} [r get testkey]
}
r flushall