Optimize async command snapshot creation, and make slip configurable
This commit is contained in:
parent
0d917c8580
commit
4697923f41
@ -2826,6 +2826,7 @@ standardConfig configs[] = {
|
|||||||
createLongLongConfig("stream-node-max-entries", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, g_pserver->stream_node_max_entries, 100, INTEGER_CONFIG, NULL, NULL),
|
createLongLongConfig("stream-node-max-entries", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, g_pserver->stream_node_max_entries, 100, INTEGER_CONFIG, NULL, NULL),
|
||||||
createLongLongConfig("repl-backlog-size", NULL, MODIFIABLE_CONFIG, 1, LLONG_MAX, g_pserver->repl_backlog_size, 1024*1024, MEMORY_CONFIG, NULL, updateReplBacklogSize), /* Default: 1mb */
|
createLongLongConfig("repl-backlog-size", NULL, MODIFIABLE_CONFIG, 1, LLONG_MAX, g_pserver->repl_backlog_size, 1024*1024, MEMORY_CONFIG, NULL, updateReplBacklogSize), /* Default: 1mb */
|
||||||
createLongLongConfig("repl-backlog-disk-reserve", NULL, IMMUTABLE_CONFIG, 0, LLONG_MAX, cserver.repl_backlog_disk_size, 0, MEMORY_CONFIG, NULL, NULL),
|
createLongLongConfig("repl-backlog-disk-reserve", NULL, IMMUTABLE_CONFIG, 0, LLONG_MAX, cserver.repl_backlog_disk_size, 0, MEMORY_CONFIG, NULL, NULL),
|
||||||
|
createLongLongConfig("max-snapshot-slip", NULL, MODIFIABLE_CONFIG, 0, 5000, g_pserver->snapshot_slip, 400, 0, NULL, NULL),
|
||||||
|
|
||||||
/* Unsigned Long Long configs */
|
/* Unsigned Long Long configs */
|
||||||
createULongLongConfig("maxmemory", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, g_pserver->maxmemory, 0, MEMORY_CONFIG, NULL, updateMaxmemory),
|
createULongLongConfig("maxmemory", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, g_pserver->maxmemory, 0, MEMORY_CONFIG, NULL, updateMaxmemory),
|
||||||
|
@ -208,7 +208,7 @@ client *createClient(connection *conn, int iel) {
|
|||||||
c->paused_list_node = NULL;
|
c->paused_list_node = NULL;
|
||||||
c->client_tracking_redirection = 0;
|
c->client_tracking_redirection = 0;
|
||||||
c->casyncOpsPending = 0;
|
c->casyncOpsPending = 0;
|
||||||
c->mvccCheckpoint = getMvccTstamp();
|
c->mvccCheckpoint = 0;
|
||||||
c->master_error = 0;
|
c->master_error = 0;
|
||||||
memset(c->uuid, 0, UUID_BINARY_LEN);
|
memset(c->uuid, 0, UUID_BINARY_LEN);
|
||||||
|
|
||||||
@ -2754,7 +2754,7 @@ void readQueryFromClient(connection *conn) {
|
|||||||
// Frequent writers aren't good candidates for this optimization, they cause us to renew the snapshot too often
|
// Frequent writers aren't good candidates for this optimization, they cause us to renew the snapshot too often
|
||||||
// so we exclude them unless the snapshot we need already exists
|
// so we exclude them unless the snapshot we need already exists
|
||||||
bool fSnapshotExists = c->db->mvccLastSnapshot >= c->mvccCheckpoint;
|
bool fSnapshotExists = c->db->mvccLastSnapshot >= c->mvccCheckpoint;
|
||||||
bool fWriteTooRecent = (((getMvccTstamp() - c->mvccCheckpoint) >> MVCC_MS_SHIFT) < redisDbPersistentDataSnapshot::msStaleThreshold/2);
|
bool fWriteTooRecent = (((getMvccTstamp() - c->mvccCheckpoint) >> MVCC_MS_SHIFT) < static_cast<uint64_t>(g_pserver->snapshot_slip)/2);
|
||||||
|
|
||||||
// The check below avoids running async commands if this is a frequent writer unless a snapshot is already there to service it
|
// The check below avoids running async commands if this is a frequent writer unless a snapshot is already there to service it
|
||||||
if (!fWriteTooRecent || fSnapshotExists) {
|
if (!fWriteTooRecent || fSnapshotExists) {
|
||||||
|
@ -2776,7 +2776,7 @@ void beforeSleep(struct aeEventLoop *eventLoop) {
|
|||||||
|
|
||||||
/* end any snapshots created by fast async commands */
|
/* end any snapshots created by fast async commands */
|
||||||
for (int idb = 0; idb < cserver.dbnum; ++idb) {
|
for (int idb = 0; idb < cserver.dbnum; ++idb) {
|
||||||
if (serverTL->rgdbSnapshot[idb] != nullptr) {
|
if (serverTL->rgdbSnapshot[idb] != nullptr && serverTL->rgdbSnapshot[idb]->FStale()) {
|
||||||
g_pserver->db[idb]->endSnapshot(serverTL->rgdbSnapshot[idb]);
|
g_pserver->db[idb]->endSnapshot(serverTL->rgdbSnapshot[idb]);
|
||||||
serverTL->rgdbSnapshot[idb] = nullptr;
|
serverTL->rgdbSnapshot[idb] = nullptr;
|
||||||
}
|
}
|
||||||
|
@ -1286,8 +1286,6 @@ public:
|
|||||||
// These need to be fixed
|
// These need to be fixed
|
||||||
using redisDbPersistentData::size;
|
using redisDbPersistentData::size;
|
||||||
using redisDbPersistentData::expireSize;
|
using redisDbPersistentData::expireSize;
|
||||||
|
|
||||||
static const uint64_t msStaleThreshold = 500;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Redis database representation. There are multiple databases identified
|
/* Redis database representation. There are multiple databases identified
|
||||||
@ -2605,6 +2603,8 @@ struct redisServer {
|
|||||||
IStorageFactory *m_pstorageFactory = nullptr;
|
IStorageFactory *m_pstorageFactory = nullptr;
|
||||||
int storage_flush_period; // The time between flushes in the CRON job
|
int storage_flush_period; // The time between flushes in the CRON job
|
||||||
|
|
||||||
|
long long snapshot_slip = 500; // The amount of time in milliseconds we let a snapshot be behind the current database
|
||||||
|
|
||||||
/* TLS Configuration */
|
/* TLS Configuration */
|
||||||
int tls_cluster;
|
int tls_cluster;
|
||||||
int tls_replication;
|
int tls_replication;
|
||||||
|
@ -654,7 +654,7 @@ int redisDbPersistentDataSnapshot::snapshot_depth() const
|
|||||||
|
|
||||||
bool redisDbPersistentDataSnapshot::FStale() const
|
bool redisDbPersistentDataSnapshot::FStale() const
|
||||||
{
|
{
|
||||||
return ((getMvccTstamp() - m_mvccCheckpoint) >> MVCC_MS_SHIFT) >= redisDbPersistentDataSnapshot::msStaleThreshold;
|
return ((getMvccTstamp() - m_mvccCheckpoint) >> MVCC_MS_SHIFT) >= static_cast<uint64_t>(g_pserver->snapshot_slip);
|
||||||
}
|
}
|
||||||
|
|
||||||
void dictGCAsyncFree(dictAsyncRehashCtl *async) {
|
void dictGCAsyncFree(dictAsyncRehashCtl *async) {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user