Merge commit 'cbcd07777dc569618a34f59e5fd0de53178f4f1d' into unstable

Former-commit-id: 49784c8122e827bd9de86bbae4d88c313400e86e
This commit is contained in:
John Sully 2020-05-21 18:48:49 -04:00
commit 3384af024d
9 changed files with 76 additions and 46 deletions

View File

@ -943,13 +943,20 @@ replica-priority 100
# In all the above cases the default is to delete objects in a blocking way,
# like if DEL was called. However you can configure each case specifically
# in order to instead release memory in a non-blocking way like if UNLINK
# was called, using the following configuration directives:
# was called, using the following configuration directives.
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
# It is also possible, for the case when to replace the user code DEL calls
# with UNLINK calls is not easy, to modify the default behavior of the DEL
# command to act exactly like UNLINK, using the following configuration
# directive:
lazyfree-lazy-user-del no
################################ THREADED I/O #################################
# Redis is mostly single threaded, however there are certain threaded

View File

@ -1849,18 +1849,18 @@ void aclCommand(client *c) {
}
} else if (!strcasecmp(sub,"help")) {
const char *help[] = {
"LOAD -- Reload users from the ACL file.",
"SAVE -- Save the current config to the ACL file."
"LIST -- Show user details in config file format.",
"USERS -- List all the registered usernames.",
"SETUSER <username> [attribs ...] -- Create or modify a user.",
"GETUSER <username> -- Get the user details.",
"DELUSER <username> [...] -- Delete a list of users.",
"CAT -- List available categories.",
"CAT <category> -- List commands inside category.",
"GENPASS -- Generate a secure user password.",
"WHOAMI -- Return the current connection username.",
"LOG [<count> | RESET] -- Show the ACL log entries.",
"LOAD -- Reload users from the ACL file.",
"SAVE -- Save the current config to the ACL file.",
"LIST -- Show user details in config file format.",
"USERS -- List all the registered usernames.",
"SETUSER <username> [attribs ...] -- Create or modify a user.",
"GETUSER <username> -- Get the user details.",
"DELUSER <username> [...] -- Delete a list of users.",
"CAT -- List available categories.",
"CAT <category> -- List commands inside category.",
"GENPASS -- Generate a secure user password.",
"WHOAMI -- Return the current connection username.",
"LOG [<count> | RESET] -- Show the ACL log entries.",
NULL
};
addReplyHelp(c,help);

View File

@ -660,7 +660,7 @@ void delGenericCommand(client *c, int lazy) {
}
void delCommand(client *c) {
delGenericCommand(c,0);
delGenericCommand(c,g_pserver->lazyfree_lazy_user_del);
}
void unlinkCommand(client *c) {

View File

@ -178,6 +178,8 @@ client *createClient(connection *conn, int iel) {
memset(c->uuid, 0, UUID_BINARY_LEN);
c->client_tracking_prefixes = NULL;
c->client_cron_last_memory_usage = 0;
c->client_cron_last_memory_type = CLIENT_TYPE_NORMAL;
c->auth_callback = NULL;
c->auth_callback_privdata = NULL;
c->auth_module = NULL;
@ -1577,6 +1579,11 @@ bool freeClient(client *c) {
listDelNode(g_pserver->clients_to_close,ln);
}
/* Remove the contribution that this client gave to our
* incrementally computed memory usage. */
g_pserver->stat_clients_type_memory[c->client_cron_last_memory_type] -=
c->client_cron_last_memory_usage;
/* Release other dynamically allocated client structure fields,
* and finally release the client structure itself. */
zfree(c->bufAsync);

View File

@ -1025,32 +1025,15 @@ struct redisMemOverhead *getMemoryOverheadData(void) {
mh->repl_backlog = mem;
mem_total += mem;
mem = 0;
if (listLength(g_pserver->clients)) {
listIter li;
listNode *ln;
size_t mem_normal = 0, mem_slaves = 0;
listRewind(g_pserver->clients,&li);
while((ln = listNext(&li))) {
size_t mem_curr = 0;
client *c = (client*)listNodeValue(ln);
std::unique_lock<fastlock> ul(c->lock);
int type = getClientType(c);
mem_curr += getClientOutputBufferMemoryUsage(c);
mem_curr += sdsAllocSize(c->querybuf);
mem_curr += sizeof(client);
if (type == CLIENT_TYPE_SLAVE)
mem_slaves += mem_curr;
else
mem_normal += mem_curr;
}
mh->clients_slaves = mem_slaves;
mh->clients_normal = mem_normal;
mem = mem_slaves + mem_normal;
}
mem_total+=mem;
/* Computing the memory used by the clients would be O(N) if done
* here online. We use our values computed incrementally by
* clientsCronTrackClientsMemUsage(). */
mh->clients_slaves = g_pserver->stat_clients_type_memory[CLIENT_TYPE_SLAVE];
mh->clients_normal = g_pserver->stat_clients_type_memory[CLIENT_TYPE_MASTER]+
g_pserver->stat_clients_type_memory[CLIENT_TYPE_PUBSUB]+
g_pserver->stat_clients_type_memory[CLIENT_TYPE_NORMAL];
mem_total += mh->clients_slaves;
mem_total += mh->clients_normal;
mem = 0;
if (g_pserver->aof_state != AOF_OFF) {

View File

@ -1236,10 +1236,7 @@ int rdbSaveRio(rio *rdb, int *error, int rdbflags, rdbSaveInfo *rsi) {
if (rdbSaveType(rdb,RDB_OPCODE_SELECTDB) == -1) goto werr;
if (rdbSaveLen(rdb,j) == -1) goto werr;
/* Write the RESIZE DB opcode. We trim the size to UINT32_MAX, which
* is currently the largest type we are able to represent in RDB sizes.
* However this does not limit the actual size of the DB to load since
* these sizes are just hints to resize the hash tables. */
/* Write the RESIZE DB opcode. */
uint64_t db_size, expires_size;
db_size = dictSize(db->pdict);
expires_size = db->setexpire->size();

View File

@ -972,6 +972,7 @@ int luaLogCommand(lua_State *lua) {
lua_pushstring(lua, "Invalid debug level.");
return lua_error(lua);
}
if (level < cserver.verbosity) return 0;
/* Glue together all the arguments */
log = sdsempty();

View File

@ -1632,6 +1632,28 @@ int clientsCronTrackExpansiveClients(client *c) {
return 0; /* This function never terminates the client. */
}
/* Iterating all the clients in getMemoryOverheadData() is too slow and
* in turn would make the INFO command too slow. So we perform this
* computation incrementally and track the (not instantaneous but updated
* to the second) total memory used by clients using clinetsCron() in
* a more incremental way (depending on server.hz). */
int clientsCronTrackClientsMemUsage(client *c) {
size_t mem = 0;
int type = getClientType(c);
mem += getClientOutputBufferMemoryUsage(c);
mem += sdsAllocSize(c->querybuf);
mem += sizeof(client);
/* Now that we have the memory used by the client, remove the old
* value from the old categoty, and add it back. */
g_pserver->stat_clients_type_memory[c->client_cron_last_memory_type] -=
c->client_cron_last_memory_usage;
g_pserver->stat_clients_type_memory[type] += mem;
/* Remember what we added and where, to remove it next time. */
c->client_cron_last_memory_usage = mem;
c->client_cron_last_memory_type = type;
return 0;
}
/* Return the max samples in the memory usage of clients tracked by
* the function clientsCronTrackExpansiveClients(). */
void getExpansiveClientsInfo(size_t *in_usage, size_t *out_usage) {
@ -1695,6 +1717,7 @@ void clientsCron(int iel) {
if (clientsCronHandleTimeout(c,now)) continue; // Client free'd so don't release the lock
if (clientsCronResizeQueryBuffer(c)) goto LContinue;
if (clientsCronTrackExpansiveClients(c)) goto LContinue;
if (clientsCronTrackClientsMemUsage(c)) goto LContinue;
LContinue:
fastlock_unlock(&c->lock);
}
@ -3042,6 +3065,8 @@ void initServer(void) {
g_pserver->stat_rdb_cow_bytes = 0;
g_pserver->stat_aof_cow_bytes = 0;
g_pserver->stat_module_cow_bytes = 0;
for (int j = 0; j < CLIENT_TYPE_COUNT; j++)
g_pserver->stat_clients_type_memory[j] = 0;
g_pserver->cron_malloc_stats.zmalloc_used = 0;
g_pserver->cron_malloc_stats.process_rss = 0;
g_pserver->cron_malloc_stats.allocator_allocated = 0;

View File

@ -450,6 +450,7 @@ public:
#define CLIENT_TYPE_SLAVE 1 /* Slaves. */
#define CLIENT_TYPE_PUBSUB 2 /* Clients subscribed to PubSub channels. */
#define CLIENT_TYPE_MASTER 3 /* Master. */
#define CLIENT_TYPE_COUNT 4 /* Total number of client types. */
#define CLIENT_TYPE_OBUF_COUNT 3 /* Number of clients to expose to output
buffer configuration. Just the first
three: normal, replica, pubsub. */
@ -1310,6 +1311,13 @@ typedef struct client {
rax *client_tracking_prefixes; /* A dictionary of prefixes we are already
subscribed to in BCAST mode, in the
context of client side caching. */
/* In clientsCronTrackClientsMemUsage() we track the memory usage of
* each client and add it to the sum of all the clients of a given type,
* however we need to remember what was the old contribution of each
* client, and in which categoty the client was, in order to remove it
* before adding it the new value. */
uint64_t client_cron_last_memory_usage;
int client_cron_last_memory_type;
/* Response buffer */
int bufpos;
char buf[PROTO_REPLY_CHUNK_BYTES];
@ -1709,6 +1717,7 @@ struct redisServer {
size_t stat_rdb_cow_bytes; /* Copy on write bytes during RDB saving. */
size_t stat_aof_cow_bytes; /* Copy on write bytes during AOF rewrite. */
size_t stat_module_cow_bytes; /* Copy on write bytes during module fork. */
uint64_t stat_clients_type_memory[CLIENT_TYPE_COUNT];/* Mem usage by type */
long long stat_unexpected_error_replies; /* Number of unexpected (aof-loading, replica to master, etc.) error replies */
/* The following two are used to track instantaneous metrics, like
* number of operations per second, network traffic. */
@ -1940,6 +1949,7 @@ struct redisServer {
int lazyfree_lazy_eviction;
int lazyfree_lazy_expire;
int lazyfree_lazy_server_del;
int lazyfree_lazy_user_del;
/* Latency monitor */
long long latency_monitor_threshold;
dict *latency_events;