Merge branch 'keydbpro' into keydbpro_collab

Former-commit-id: e4e5c6696c6d831924f314a198b266b10d831e14
This commit is contained in:
John Sully 2021-07-19 20:51:27 +00:00
commit ea19d31df8
9 changed files with 239 additions and 20 deletions

147
.gitlab-ci.yml Normal file
View File

@ -0,0 +1,147 @@
build:
rules:
- if: '$COVERAGE'
when: never
- if: '$ENDURANCE'
when: never
- when: always
tags:
- docker
stage: build
script:
- git submodule init && git submodule update
- make distclean
- make -j
make-test:
rules:
- if: '$COVERAGE'
when: never
- if: '$ENDURANCE'
when: never
- when: always
tags:
- docker
stage: test
script:
- git submodule init && git submodule update
- make distclean
- make -j
- make test -j
node-redis-test:
rules:
- if: '$COVERAGE'
when: never
- if: '$ENDURANCE'
when: never
- when: always
tags:
- docker
- ipv6
stage: test
script:
- git submodule init && git submodule update
- make distclean
- make -j
- make install
- git clone https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.eqalpha.com/keydb-dev/node-redis.git
- cd node-redis
- npm install
- npm run test
jedis-test:
rules:
- if: '$COVERAGE'
when: never
- if: '$ENDURANCE'
when: never
- when: always
tags:
- docker
- ipv4
stage: test
script:
- git submodule init && git submodule update
- make distclean
- make -j
- make install
- git clone https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.eqalpha.com/keydb-dev/jedis.git
- cd jedis
- make test
redis-rs-test:
rules:
- if: '$COVERAGE'
when: never
- if: '$ENDURANCE'
when: never
- when: always
tags:
- docker
stage: test
script:
- git submodule init && git submodule update
- make distclean
- make -j
- make install
- git clone https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.eqalpha.com/keydb-dev/redis-rs.git
- cd redis-rs
- make test
endurance-test:
rules:
- if: '$ENDURANCE'
tags:
- docker
stage: test
script:
- git submodule init && git submodule update
- make distclean
- make -j
- ./runtest --loop --stop
coverage-test:
rules:
- if: '$COVERAGE'
tags:
- docker
stage: test
script:
- git submodule init && git submodule update
- make distclean
- make gcov -j
- make install
- ./runtest || true
- pkill keydb-server || true
- pkill stunnel || true
- ./runtest-cluster || true
- pkill keydb-server || true
- pkill stunnel || true
- ./runtest-sentinel || true
- pkill keydb-server || true
- pkill stunnel || true
- ./runtest-moduleapi || true
- pkill keydb-server || true
- pkill stunnel || true
- git clone https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.eqalpha.com/keydb-dev/redis-rs.git
- cd redis-rs
- make test || true
- pkill keydb-server || true
- pkill stunnel || true
- cd ..
- git clone https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.eqalpha.com/keydb-dev/jedis.git
- cd jedis
- make test || true
- pkill keydb-server || true
- pkill stunnel || true
- cd ..
- git clone https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.eqalpha.com/keydb-dev/node-redis.git
- cd node-redis
- npm install
- npm run test || true
- pkill keydb-server || true
- pkill stunnel || true
- cd ..
- geninfo -o KeyDB.info --no-external .
- genhtml --legend -o lcov-html KeyDB.info

View File

@ -25,6 +25,12 @@ StorageCache::StorageCache(IStorage *storage, bool fCache)
m_pdict = dictCreate(&dbStorageCacheType, nullptr);
}
StorageCache::~StorageCache()
{
if (m_pdict != nullptr)
dictRelease(m_pdict);
}
void StorageCache::clear()
{
std::unique_lock<fastlock> ul(m_lock);

View File

@ -29,6 +29,8 @@ class StorageCache
}
public:
~StorageCache();
static StorageCache *create(IStorageFactory *pfactory, int db, IStorageFactory::key_load_iterator fn, void *privdata) {
StorageCache *cache = new StorageCache(nullptr, pfactory->FSlow() /*fCache*/);
load_iter_data data = {cache, fn, privdata};

View File

@ -2470,6 +2470,7 @@ static int updateReplBacklogSize(long long val, long long prev, const char **err
* being able to tell when the size changes, so restore prev before calling it. */
UNUSED(err);
g_pserver->repl_backlog_size = prev;
g_pserver->repl_backlog_config_size = val;
resizeReplicationBacklog(val);
return 1;
}

View File

@ -2963,13 +2963,13 @@ dict_iter redisDbPersistentData::random()
return dict_iter(m_pdict, de);
}
size_t redisDbPersistentData::size() const
size_t redisDbPersistentData::size(bool fCachedOnly) const
{
if (m_spstorage != nullptr && !m_fAllChanged)
if (m_spstorage != nullptr && !m_fAllChanged && !fCachedOnly)
return m_spstorage->count() + m_cnewKeysPending;
return dictSize(m_pdict)
+ (m_pdbSnapshot ? (m_pdbSnapshot->size() - dictSize(m_pdictTombstone)) : 0);
+ (m_pdbSnapshot ? (m_pdbSnapshot->size(fCachedOnly) - dictSize(m_pdictTombstone)) : 0);
}
bool redisDbPersistentData::removeCachedValue(const char *key, dictEntry **ppde)
@ -3024,13 +3024,13 @@ void redisDbPersistentData::removeAllCachedValues()
trackChanges(false);
}
if (m_pdict->pauserehash == 0) {
if (m_pdict->pauserehash == 0 && m_pdict->refcount == 1) {
dict *dT = m_pdict;
m_pdict = dictCreate(&dbDictType, this);
dictExpand(m_pdict, dictSize(dT)/2, false); // Make room for about half so we don't excessively rehash
g_pserver->asyncworkqueue->AddWorkFunction([dT]{
dictRelease(dT);
}, true);
}, false);
} else {
dictEmpty(m_pdict, nullptr);
}

View File

@ -2884,6 +2884,8 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) {
do this every 16 keys to limit the perf impact */
if (g_pserver->m_pstorageFactory && (ckeysLoaded % 128) == 0)
{
g_pserver->garbageCollector.endEpoch(serverTL->gcEpoch);
serverTL->gcEpoch = g_pserver->garbageCollector.startEpoch();
bool fHighMemory = (getMaxmemoryState(NULL,NULL,NULL,NULL) != C_OK);
if (fHighMemory || (ckeysLoaded % (1024)) == 0)
{

View File

@ -254,9 +254,11 @@ void resizeReplicationBacklog(long long newsize) {
zfree(g_pserver->repl_backlog);
g_pserver->repl_backlog = backlog;
g_pserver->repl_backlog_idx = g_pserver->repl_backlog_histlen;
g_pserver->repl_batch_idxStart -= earliest_idx;
if (g_pserver->repl_batch_idxStart < 0)
g_pserver->repl_batch_idxStart += g_pserver->repl_backlog_size;
if (g_pserver->repl_batch_idxStart >= 0) {
g_pserver->repl_batch_idxStart -= earliest_idx;
if (g_pserver->repl_batch_idxStart < 0)
g_pserver->repl_batch_idxStart += g_pserver->repl_backlog_size;
}
g_pserver->repl_backlog_start = earliest_off;
} else {
zfree(g_pserver->repl_backlog);
@ -301,19 +303,56 @@ void feedReplicationBacklog(const void *ptr, size_t len) {
if (lower_bound == -1)
lower_bound = g_pserver->repl_batch_offStart;
long long minimumsize = g_pserver->master_repl_offset + len - lower_bound + 1;
if (minimumsize > g_pserver->repl_backlog_size) {
flushReplBacklogToClients();
lower_bound = g_pserver->repl_lowest_off.load(std::memory_order_seq_cst);
if (lower_bound == -1)
lower_bound = g_pserver->repl_batch_offStart;
listIter li;
listNode *ln;
listRewind(g_pserver->slaves, &li);
long long maxClientBuffer = (long long)cserver.client_obuf_limits[CLIENT_TYPE_SLAVE].hard_limit_bytes;
if (maxClientBuffer <= 0)
maxClientBuffer = LLONG_MAX; // infinite essentially
long long min_offset = LLONG_MAX;
int listening_replicas = 0;
while ((ln = listNext(&li))) {
client *replica = (client*)listNodeValue(ln);
if (!canFeedReplicaReplBuffer(replica)) continue;
if (replica->flags & CLIENT_CLOSE_ASAP) continue;
minimumsize = g_pserver->master_repl_offset + len - lower_bound + 1;
std::unique_lock<fastlock> ul(replica->lock);
if (minimumsize > g_pserver->repl_backlog_size && minimumsize < (long long)cserver.client_obuf_limits[CLIENT_TYPE_SLAVE].hard_limit_bytes) {
// Would this client overflow? If so close it
long long neededBuffer = g_pserver->master_repl_offset + len - replica->repl_curr_off + 1;
if (neededBuffer > maxClientBuffer) {
sds clientInfo = catClientInfoString(sdsempty(),replica);
freeClientAsync(replica);
serverLog(LL_WARNING,"Client %s scheduled to be closed ASAP due to exceeding output buffer hard limit.", clientInfo);
sdsfree(clientInfo);
continue;
}
min_offset = std::min(min_offset, replica->repl_curr_off);
++listening_replicas;
}
if (min_offset == LLONG_MAX) {
min_offset = g_pserver->repl_batch_offStart;
g_pserver->repl_lowest_off = -1;
} else {
g_pserver->repl_lowest_off = min_offset;
}
minimumsize = g_pserver->master_repl_offset + len - min_offset + 1;
serverAssert(listening_replicas == 0 || minimumsize <= maxClientBuffer);
if (minimumsize > g_pserver->repl_backlog_size && listening_replicas) {
// This is an emergency overflow, we better resize to fit
long long newsize = std::max(g_pserver->repl_backlog_size*2, minimumsize);
serverLog(LL_WARNING, "Replication backlog is too small, resizing to: %lld", newsize);
serverLog(LL_WARNING, "Replication backlog is too small, resizing to: %lld bytes", newsize);
resizeReplicationBacklog(newsize);
} else if (!listening_replicas) {
// We need to update a few variables or later asserts will notice we dropped data
g_pserver->repl_batch_offStart = g_pserver->master_repl_offset + len;
g_pserver->repl_lowest_off = -1;
}
}
}
@ -4318,6 +4357,8 @@ void replicationCron(void) {
replicationStartPendingFork();
trimReplicationBacklog();
/* Remove the RDB file used for replication if Redis is not running
* with any persistence. */
removeRDBUsedToSyncReplicas();
@ -5076,3 +5117,17 @@ void updateFailoverStatus(void) {
g_pserver->target_replica_port);
}
}
// If we automatically grew the backlog we need to trim it back to
// the config setting when possible
void trimReplicationBacklog() {
serverAssert(GlobalLocksAcquired());
serverAssert(g_pserver->repl_batch_offStart < 0); // we shouldn't be in a batch
if (g_pserver->repl_backlog_size <= g_pserver->repl_backlog_config_size)
return; // We're already a good size
if (g_pserver->repl_lowest_off > 0 && (g_pserver->master_repl_offset - g_pserver->repl_lowest_off + 1) > g_pserver->repl_backlog_config_size)
return; // There is untransmitted data we can't truncate
serverLog(LL_NOTICE, "Reclaiming %lld replication backlog bytes", g_pserver->repl_backlog_size - g_pserver->repl_backlog_config_size);
resizeReplicationBacklog(g_pserver->repl_backlog_config_size);
}

View File

@ -2124,7 +2124,7 @@ void databasesCron(bool fMainThread) {
::dict *dict = g_pserver->db[rehash_db]->dictUnsafeKeyOnly();
/* Are we async rehashing? And if so is it time to re-calibrate? */
/* The recalibration limit is a prime number to ensure balancing across threads */
if (rehashes_per_ms > 0 && async_rehashes < 131 && !cserver.active_defrag_enabled && cserver.cthreads > 1) {
if (rehashes_per_ms > 0 && async_rehashes < 131 && !cserver.active_defrag_enabled && cserver.cthreads > 1 && dictSize(dict) > 2048 && dictIsRehashing(dict) && !g_pserver->loading) {
serverTL->rehashCtl = dictRehashAsyncStart(dict, rehashes_per_ms);
++async_rehashes;
}
@ -6112,10 +6112,11 @@ sds genRedisInfoString(const char *section) {
if (sections++) info = sdscat(info,"\r\n");
info = sdscatprintf(info, "# Keyspace\r\n");
for (j = 0; j < cserver.dbnum; j++) {
long long keys, vkeys;
long long keys, vkeys, cachedKeys;
keys = g_pserver->db[j]->size();
vkeys = g_pserver->db[j]->expireSize();
cachedKeys = g_pserver->db[j]->size(true /* fCachedOnly */);
// Adjust TTL by the current time
mstime_t mstime;
@ -6127,8 +6128,8 @@ sds genRedisInfoString(const char *section) {
if (keys || vkeys) {
info = sdscatprintf(info,
"db%d:keys=%lld,expires=%lld,avg_ttl=%lld\r\n",
j, keys, vkeys, static_cast<long long>(g_pserver->db[j]->avg_ttl));
"db%d:keys=%lld,expires=%lld,avg_ttl=%lld,cached_keys=%lld\r\n",
j, keys, vkeys, static_cast<long long>(g_pserver->db[j]->avg_ttl), cachedKeys);
}
}
}
@ -7100,6 +7101,8 @@ static void validateConfiguration()
serverLog(LL_WARNING, "\tKeyDB will now exit. Please update your configuration file.");
exit(EXIT_FAILURE);
}
g_pserver->repl_backlog_config_size = g_pserver->repl_backlog_size; // this is normally set in the update logic, but not on initial config
}
int iAmMaster(void) {

View File

@ -1095,7 +1095,7 @@ public:
redisDbPersistentData(redisDbPersistentData &&) = default;
size_t slots() const { return dictSlots(m_pdict); }
size_t size() const;
size_t size(bool fCachedOnly = false) const;
void expand(uint64_t slots) { dictExpand(m_pdict, slots); }
void trackkey(robj_roptr o, bool fUpdate)
@ -2362,6 +2362,7 @@ struct redisServer {
int repl_ping_slave_period; /* Master pings the replica every N seconds */
char *repl_backlog; /* Replication backlog for partial syncs */
long long repl_backlog_size; /* Backlog circular buffer size */
long long repl_backlog_config_size; /* The repl backlog may grow but we want to know what the user set it to */
long long repl_backlog_histlen; /* Backlog actual data length */
long long repl_backlog_idx; /* Backlog circular buffer current offset,
that is the next byte will'll write to.*/
@ -3028,6 +3029,8 @@ void clearFailoverState(void);
void updateFailoverStatus(void);
void abortFailover(redisMaster *mi, const char *err);
const char *getFailoverStateString();
int canFeedReplicaReplBuffer(client *replica);
void trimReplicationBacklog();
/* Generic persistence functions */
void startLoadingFile(FILE* fp, const char * filename, int rdbflags);