From 795c807fed6e109c2b8706ffc2621643e0180e61 Mon Sep 17 00:00:00 2001 From: Paul Spooren Date: Tue, 9 Jun 2020 16:53:14 -0400 Subject: [PATCH 001/377] LRANK: Add command (the command will be renamed LPOS). The `LRANK` command returns the index (position) of a given element within a list. Using the `direction` argument it is possible to specify going from head to tail (acending, 1) or from tail to head (decending, -1). Only the first found index is returend. The complexity is O(N). When using lists as a queue it can be of interest at what position a given element is, for instance to monitor a job processing through a work queue. This came up within the Python `rq` project which is based on Redis[0]. [0]: https://github.com/rq/rq/issues/1197 Signed-off-by: Paul Spooren --- src/help.h | 5 +++++ src/server.c | 4 ++++ src/server.h | 1 + src/t_list.c | 34 ++++++++++++++++++++++++++++++++++ 4 files changed, 44 insertions(+) diff --git a/src/help.h b/src/help.h index 6d3eb33ed..9b4e90f9f 100644 --- a/src/help.h +++ b/src/help.h @@ -668,6 +668,11 @@ struct commandHelp { "Remove elements from a list", 2, "1.0.0" }, + { "LRANK", + "key direction element", + "Return first index of element in list based on direction", + 2, + "9.9.9" }, { "LSET", "key index element", "Set the value of an element in a list by its index", diff --git a/src/server.c b/src/server.c index e8e711240..767d3374f 100644 --- a/src/server.c +++ b/src/server.c @@ -326,6 +326,10 @@ struct redisCommand redisCommandTable[] = { "write @list", 0,NULL,1,1,1,0,0,0}, + {"lrank",lrankCommand,4, + "read-only fast @list", + 0,NULL,1,1,1,0,0,0}, + {"lrem",lremCommand,4, "write @list", 0,NULL,1,1,1,0,0,0}, diff --git a/src/server.h b/src/server.h index a08585292..68932f656 100644 --- a/src/server.h +++ b/src/server.h @@ -2269,6 +2269,7 @@ void flushdbCommand(client *c); void flushallCommand(client *c); void sortCommand(client *c); void lremCommand(client *c); +void lrankCommand(client *c); void rpoplpushCommand(client *c); void infoCommand(client *c); void mgetCommand(client *c); diff --git a/src/t_list.c b/src/t_list.c index 4770a2272..899a20f47 100644 --- a/src/t_list.c +++ b/src/t_list.c @@ -487,6 +487,40 @@ void ltrimCommand(client *c) { addReply(c,shared.ok); } +void lrankCommand(client *c) { + robj *subject, *obj; + obj = c->argv[3]; + long direction = 0; + long index = 0; + + if ((getLongFromObjectOrReply(c, c->argv[2], &direction, NULL) != C_OK)) + return; + + subject = lookupKeyWriteOrReply(c,c->argv[1],shared.czero); + if (subject == NULL || checkType(c,subject,OBJ_LIST)) return; + + listTypeIterator *li; + if (direction < 0) { + direction = -1; + li = listTypeInitIterator(subject,-1,LIST_HEAD); + } else { + direction = 1; + li = listTypeInitIterator(subject,0,LIST_TAIL); + } + + listTypeEntry entry; + while (listTypeNext(li,&entry)) { + if (listTypeEqual(&entry,obj)) { + break; + } + index++; + } + + listTypeReleaseIterator(li); + + addReplyLongLong(c,index * direction); +} + void lremCommand(client *c) { robj *subject, *obj; obj = c->argv[3]; From 00e400ed66ceafdca0856b5e473195dae217dbb1 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 10 Jun 2020 12:40:24 +0200 Subject: [PATCH 002/377] LPOS: implement the final design. --- src/server.c | 4 +- src/server.h | 2 +- src/t_list.c | 105 ++++++++++++++++++++++++++++++++++++++++----------- 3 files changed, 87 insertions(+), 24 deletions(-) diff --git a/src/server.c b/src/server.c index 767d3374f..53dccf875 100644 --- a/src/server.c +++ b/src/server.c @@ -326,8 +326,8 @@ struct redisCommand redisCommandTable[] = { "write @list", 0,NULL,1,1,1,0,0,0}, - {"lrank",lrankCommand,4, - "read-only fast @list", + {"lpos",lposCommand,-3, + "read-only @list", 0,NULL,1,1,1,0,0,0}, {"lrem",lremCommand,4, diff --git a/src/server.h b/src/server.h index 68932f656..841e1f941 100644 --- a/src/server.h +++ b/src/server.h @@ -2269,7 +2269,7 @@ void flushdbCommand(client *c); void flushallCommand(client *c); void sortCommand(client *c); void lremCommand(client *c); -void lrankCommand(client *c); +void lposCommand(client *c); void rpoplpushCommand(client *c); void infoCommand(client *c); void mgetCommand(client *c); diff --git a/src/t_list.c b/src/t_list.c index 899a20f47..3735280ff 100644 --- a/src/t_list.c +++ b/src/t_list.c @@ -487,38 +487,101 @@ void ltrimCommand(client *c) { addReply(c,shared.ok); } -void lrankCommand(client *c) { - robj *subject, *obj; - obj = c->argv[3]; - long direction = 0; - long index = 0; +/* LPOS key element [matchpos] [ALL] [RELATIVE] + * + * matchnum is the position of the match, so if it is 1, the first match + * is returned, if it is 2 the second match is returned and so forth. + * It is 1 by default. If negative has the same meaning but the search is + * performed starting from the end of the list. + * + * A matchnum of 0 is accepted only if ALL is given, and means to return + * all the elements. + * + * If ALL is given, instead of returning the single elmenet, a list of + * all the matching elements up to "matchnum" are returned. + * + * The returned elements indexes are always referring to what LINDEX + * would return. So first element from head is 0, and so forth. + * However if RELATIVE is given and a negative matchpos is given, the + * indexes are returned as if the last element of the list is the element 0, + * the penultimante is 1, and so forth. */ +void lposCommand(client *c) { + robj *o, *ele; + ele = c->argv[2]; + int all = 0, direction = LIST_TAIL; + long matchpos = 1; - if ((getLongFromObjectOrReply(c, c->argv[2], &direction, NULL) != C_OK)) + /* Parse the optional "matchpos" argument, and the ALL option. */ + if (c->argc >= 4 && + getLongFromObjectOrReply(c, c->argv[3], &matchpos, NULL) != C_OK) + { return; - - subject = lookupKeyWriteOrReply(c,c->argv[1],shared.czero); - if (subject == NULL || checkType(c,subject,OBJ_LIST)) return; - - listTypeIterator *li; - if (direction < 0) { - direction = -1; - li = listTypeInitIterator(subject,-1,LIST_HEAD); - } else { - direction = 1; - li = listTypeInitIterator(subject,0,LIST_TAIL); } + if (c->argc == 5) { + if (!strcasecmp(c->argv[4]->ptr,"all")) { + all = 1; + } else { + addReply(c,shared.syntaxerr); + return; + } + } + + /* Raise an error on incompatible options. */ + if (!all && matchpos == 0) { + addReplyError(c,"A match position of zero is valid only " + "when using the ALL option"); + return; + } + + /* A negative matchpos means start from the tail. */ + if (matchpos < 0) { + matchpos = -matchpos; + direction = LIST_HEAD; + } + + /* We return NULL or an empty array if there is no such key (or + * if we find no matches, depending on the presence of the ALL option. */ + if ((o = lookupKeyWriteOrReply(c,c->argv[1],NULL)) == NULL) { + if (all) + addReply(c,shared.emptyarray); + else + addReply(c,shared.null[c->resp]); + return; + } + if (checkType(c,o,OBJ_LIST)) return; + + /* If we got the ALL option, prepare to emit an array. */ + void *arraylenptr = NULL; + if (all) arraylenptr = addReplyDeferredLen(c); + + /* Seek the element. */ + listTypeIterator *li; + li = listTypeInitIterator(o,direction == LIST_HEAD ? -1 : 0,direction); listTypeEntry entry; + long llen = listTypeLength(o); + long index = 0, matches = 0, matchindex = -1; while (listTypeNext(li,&entry)) { - if (listTypeEqual(&entry,obj)) { - break; + if (listTypeEqual(&entry,ele)) { + matches++; + matchindex = (direction == LIST_TAIL) ? index : llen - index - 1; + if (all) addReplyLongLong(c,matchindex); + if (matches == matchpos) break; } index++; } - listTypeReleaseIterator(li); - addReplyLongLong(c,index * direction); + /* Reply to the client. Note that arraylenptr is not NULL only if + * the ALL option was selected. */ + if (arraylenptr != NULL) { + setDeferredArrayLen(c,arraylenptr,matches); + } else { + if (matchindex != -1) + addReplyLongLong(c,matchindex); + else + addReply(c,shared.null[c->resp]); + } } void lremCommand(client *c) { From 010dd2b320d7f5642a2fbd821db7046830951c6a Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 11 Jun 2020 11:18:16 +0200 Subject: [PATCH 003/377] LPOS: update to latest proposal. See https://gist.github.com/antirez/3591c5096bc79cad8b5a992e08304f48 --- src/t_list.c | 99 +++++++++++++++++++++++++++++++--------------------- 1 file changed, 59 insertions(+), 40 deletions(-) diff --git a/src/t_list.c b/src/t_list.c index 3735280ff..653337d78 100644 --- a/src/t_list.c +++ b/src/t_list.c @@ -487,63 +487,75 @@ void ltrimCommand(client *c) { addReply(c,shared.ok); } -/* LPOS key element [matchpos] [ALL] [RELATIVE] +/* LPOS key element [FIRST rank] [COUNT num-matches] [MAXLEN len] * - * matchnum is the position of the match, so if it is 1, the first match + * FIRST "rank" is the position of the match, so if it is 1, the first match * is returned, if it is 2 the second match is returned and so forth. * It is 1 by default. If negative has the same meaning but the search is * performed starting from the end of the list. * - * A matchnum of 0 is accepted only if ALL is given, and means to return - * all the elements. + * If COUNT is given, instead of returning the single element, a list of + * all the matching elements up to "num-matches" are returned. COUNT can + * be combiled with FIRST in order to returning only the element starting + * from the Nth. If COUNT is zero, all the matching elements are returned. * - * If ALL is given, instead of returning the single elmenet, a list of - * all the matching elements up to "matchnum" are returned. + * MAXLEN tells the command to scan a max of len elements. If zero (the + * default), all the elements in the list are scanned if needed. * * The returned elements indexes are always referring to what LINDEX - * would return. So first element from head is 0, and so forth. - * However if RELATIVE is given and a negative matchpos is given, the - * indexes are returned as if the last element of the list is the element 0, - * the penultimante is 1, and so forth. */ + * would return. So first element from head is 0, and so forth. */ void lposCommand(client *c) { robj *o, *ele; ele = c->argv[2]; - int all = 0, direction = LIST_TAIL; - long matchpos = 1; + int direction = LIST_TAIL; + long rank = 1, count = -1, maxlen = 0; /* Count -1: option not given. */ - /* Parse the optional "matchpos" argument, and the ALL option. */ - if (c->argc >= 4 && - getLongFromObjectOrReply(c, c->argv[3], &matchpos, NULL) != C_OK) - { - return; - } + /* Parse the optional arguments. */ + for (int j = 3; j < c->argc; j++) { + char *opt = c->argv[j]->ptr; + int moreargs = (c->argc-1)-j; - if (c->argc == 5) { - if (!strcasecmp(c->argv[4]->ptr,"all")) { - all = 1; + if (!strcasecmp(opt,"FIRST") && moreargs) { + j++; + if (getLongFromObjectOrReply(c, c->argv[j], &rank, NULL) != C_OK) + return; + if (rank == 0) { + addReplyError(c,"FIRST can't be zero: use 1 to start from " + "the first match, 2 from the second, ..."); + return; + } + } else if (!strcasecmp(opt,"COUNT") && moreargs) { + j++; + if (getLongFromObjectOrReply(c, c->argv[j], &count, NULL) != C_OK) + return; + if (count < 0) { + addReplyError(c,"COUNT can't be negative"); + return; + } + } else if (!strcasecmp(opt,"MAXLEN") && moreargs) { + j++; + if (getLongFromObjectOrReply(c, c->argv[j], &maxlen, NULL) != C_OK) + return; + if (maxlen < 0) { + addReplyError(c,"MAXLEN can't be negative"); + return; + } } else { addReply(c,shared.syntaxerr); return; } } - /* Raise an error on incompatible options. */ - if (!all && matchpos == 0) { - addReplyError(c,"A match position of zero is valid only " - "when using the ALL option"); - return; - } - - /* A negative matchpos means start from the tail. */ - if (matchpos < 0) { - matchpos = -matchpos; + /* A negative rank means start from the tail. */ + if (rank < 0) { + rank = -rank; direction = LIST_HEAD; } /* We return NULL or an empty array if there is no such key (or - * if we find no matches, depending on the presence of the ALL option. */ + * if we find no matches, depending on the presence of the COUNT option. */ if ((o = lookupKeyWriteOrReply(c,c->argv[1],NULL)) == NULL) { - if (all) + if (count != -1) addReply(c,shared.emptyarray); else addReply(c,shared.null[c->resp]); @@ -551,9 +563,9 @@ void lposCommand(client *c) { } if (checkType(c,o,OBJ_LIST)) return; - /* If we got the ALL option, prepare to emit an array. */ + /* If we got the COUNT option, prepare to emit an array. */ void *arraylenptr = NULL; - if (all) arraylenptr = addReplyDeferredLen(c); + if (count != -1) arraylenptr = addReplyDeferredLen(c); /* Seek the element. */ listTypeIterator *li; @@ -561,21 +573,28 @@ void lposCommand(client *c) { listTypeEntry entry; long llen = listTypeLength(o); long index = 0, matches = 0, matchindex = -1; - while (listTypeNext(li,&entry)) { + while (listTypeNext(li,&entry) && (maxlen == 0 || index < maxlen)) { if (listTypeEqual(&entry,ele)) { matches++; matchindex = (direction == LIST_TAIL) ? index : llen - index - 1; - if (all) addReplyLongLong(c,matchindex); - if (matches == matchpos) break; + if (matches >= rank) { + if (arraylenptr) { + addReplyLongLong(c,matchindex); + if (count && matches-rank+1 >= count) break; + } else { + break; + } + } } index++; + matchindex = -1; /* Remember if we exit the loop without a match. */ } listTypeReleaseIterator(li); /* Reply to the client. Note that arraylenptr is not NULL only if - * the ALL option was selected. */ + * the COUNT option was selected. */ if (arraylenptr != NULL) { - setDeferredArrayLen(c,arraylenptr,matches); + setDeferredArrayLen(c,arraylenptr,matches-rank+1); } else { if (matchindex != -1) addReplyLongLong(c,matchindex); From 3dc6657ea86cd965695df566cfede140456f5c05 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 11 Jun 2020 12:38:51 +0200 Subject: [PATCH 004/377] LPOS: tests + crash fix. --- src/t_list.c | 2 +- tests/unit/type/list.tcl | 44 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/src/t_list.c b/src/t_list.c index 653337d78..e580139ab 100644 --- a/src/t_list.c +++ b/src/t_list.c @@ -554,7 +554,7 @@ void lposCommand(client *c) { /* We return NULL or an empty array if there is no such key (or * if we find no matches, depending on the presence of the COUNT option. */ - if ((o = lookupKeyWriteOrReply(c,c->argv[1],NULL)) == NULL) { + if ((o = lookupKeyRead(c->db,c->argv[1])) == NULL) { if (count != -1) addReply(c,shared.emptyarray); else diff --git a/tests/unit/type/list.tcl b/tests/unit/type/list.tcl index 676896a75..a0c04dcaa 100644 --- a/tests/unit/type/list.tcl +++ b/tests/unit/type/list.tcl @@ -6,6 +6,50 @@ start_server { } { source "tests/unit/type/list-common.tcl" + test {LPOS basic usage} { + r DEL mylist + r RPUSH mylist a b c 1 2 3 c c + assert {[r LPOS mylist a] == 0} + assert {[r LPOS mylist c] == 2} + } + + test {LPOS FIRST (positive and negative rank) option} { + assert {[r LPOS mylist c FIRST 1] == 2} + assert {[r LPOS mylist c FIRST 2] == 6} + assert {[r LPOS mylist c FIRST 4] eq ""} + assert {[r LPOS mylist c FIRST -1] == 7} + assert {[r LPOS mylist c FIRST -2] == 6} + } + + test {LPOS COUNT option} { + assert {[r LPOS mylist c COUNT 0] == {2 6 7}} + assert {[r LPOS mylist c COUNT 1] == {2}} + assert {[r LPOS mylist c COUNT 2] == {2 6}} + assert {[r LPOS mylist c COUNT 100] == {2 6 7}} + } + + test {LPOS COUNT + FIRST option} { + assert {[r LPOS mylist c COUNT 0 FIRST 2] == {6 7}} + assert {[r LPOS mylist c COUNT 2 FIRST -1] == {7 6}} + } + + test {LPOS non existing key} { + assert {[r LPOS mylistxxx c COUNT 0 FIRST 2] eq {}} + } + + test {LPOS no match} { + assert {[r LPOS mylist x COUNT 2 FIRST -1] eq {}} + assert {[r LPOS mylist x FIRST -1] eq {}} + } + + test {LPOS MAXLEN} { + assert {[r LPOS mylist a COUNT 0 MAXLEN 1] == {0}} + assert {[r LPOS mylist c COUNT 0 MAXLEN 1] == {}} + assert {[r LPOS mylist c COUNT 0 MAXLEN 3] == {2}} + assert {[r LPOS mylist c COUNT 0 MAXLEN 3 FIRST -1] == {7 6}} + assert {[r LPOS mylist c COUNT 0 MAXLEN 7 FIRST 2] == {6}} + } + test {LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - ziplist} { # first lpush then rpush assert_equal 1 [r lpush myziplist1 aa] From d05cc51e4e5bcdfbdd0e65578a31b753881194e0 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 12 Jun 2020 12:16:19 +0200 Subject: [PATCH 005/377] help.h updated. --- src/help.h | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/src/help.h b/src/help.h index 9b4e90f9f..1b1ac5e08 100644 --- a/src/help.h +++ b/src/help.h @@ -43,6 +43,16 @@ struct commandHelp { "Generate a pseudorandom secure password to use for ACL users", 9, "6.0.0" }, + { "ACL GETUSER", + "username", + "Get the rules for a specific ACL user", + 9, + "6.0.0" }, + { "ACL HELP", + "-", + "Show helpful text about the different subcommands", + 9, + "6.0.0" }, { "ACL LIST", "-", "List the current ACL rules in ACL config file format", @@ -64,7 +74,7 @@ struct commandHelp { 9, "6.0.0" }, { "ACL SETUSER", - "rule [rule ...]", + "username [rule [rule ...]]", "Modify or create the rules for a specific ACL user", 9, "6.0.0" }, @@ -164,7 +174,7 @@ struct commandHelp { 8, "5.0.0" }, { "CLIENT KILL", - "[ip:port] [ID client-id] [TYPE normal|master|slave|pubsub] [ADDR ip:port] [SKIPME yes/no]", + "[ip:port] [ID client-id] [TYPE normal|master|slave|pubsub] [USER username] [ADDR ip:port] [SKIPME yes/no]", "Kill the connection of a client", 8, "2.4.0" }, @@ -182,14 +192,14 @@ struct commandHelp { "ON|OFF|SKIP", "Instruct the server whether to reply to commands", 8, - "3.2" }, + "3.2.0" }, { "CLIENT SETNAME", "connection-name", "Set the current connection name", 8, "2.6.9" }, { "CLIENT TRACKING", - "ON|OFF [REDIRECT client-id] [PREFIX prefix] [BCAST] [OPTIN] [OPTOUT] [NOLOOP]", + "ON|OFF [REDIRECT client-id] [PREFIX prefix [PREFIX prefix ...]] [BCAST] [OPTIN] [OPTOUT] [NOLOOP]", "Enable or disable server assisted client side caching support", 8, "6.0.0" }, @@ -619,7 +629,7 @@ struct commandHelp { 9, "2.8.13" }, { "LATENCY RESET", - "[event]", + "[event [event ...]]", "Reset latency data for one or more events.", 9, "2.8.13" }, @@ -648,6 +658,11 @@ struct commandHelp { "Remove and get the first element in a list", 2, "1.0.0" }, + { "LPOS", + "key element [FIRST rank] [COUNT num-matches] [MAXLEN len]", + "Return the index of matching elements on a list", + 2, + "6.0.6" }, { "LPUSH", "key element [element ...]", "Prepend one or multiple elements to a list", @@ -668,11 +683,6 @@ struct commandHelp { "Remove elements from a list", 2, "1.0.0" }, - { "LRANK", - "key direction element", - "Return first index of element in list based on direction", - 2, - "9.9.9" }, { "LSET", "key index element", "Set the value of an element in a list by its index", From 333f763693ea4d2bf1909f5410621e2ed775ce50 Mon Sep 17 00:00:00 2001 From: Jamie Scott Date: Sun, 12 Apr 2020 17:56:58 -0700 Subject: [PATCH 006/377] minor fix (cherry picked from commit 3ba9724d1637aad9699d652b150ad7860b7016d0) --- redis.conf | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/redis.conf b/redis.conf index 1aa760599..5cea06a47 100644 --- a/redis.conf +++ b/redis.conf @@ -745,9 +745,8 @@ replica-priority 100 # # The ACL Log tracks failed commands and authentication events associated # with ACLs. The ACL Log is useful to troubleshoot failed commands blocked -# by ACLs. The ACL Log is stored in and consumes memory. There is no limit -# to its length.You can reclaim memory with ACL LOG RESET or set a maximum -# length below. +# by ACLs. The ACL Log is stored in memory. You can reclaim memory with +# ACL LOG RESET. Define the maximum entry length of the ACL Log below. acllog-max-len 128 # Using an external ACL file From 2f9da08ead73b1a9d9ccc02e88c6fbdc6b3e10f7 Mon Sep 17 00:00:00 2001 From: Benjamin Sergeant Date: Fri, 1 May 2020 20:57:51 -0700 Subject: [PATCH 007/377] Update redis-cli.c (cherry picked from commit 52a477c661beabb3308767a442178824579be912) --- src/redis-cli.c | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 96eb3c3dd..75845f346 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -3423,6 +3423,7 @@ static redisReply *clusterManagerMigrateKeysInReply(clusterManagerNode *source, size_t *argv_len = NULL; int c = (replace ? 8 : 7); if (config.auth) c += 2; + if (config.user) c += 1; size_t argc = c + reply->elements; size_t i, offset = 6; // Keys Offset argv = zcalloc(argc * sizeof(char *)); @@ -3449,12 +3450,24 @@ static redisReply *clusterManagerMigrateKeysInReply(clusterManagerNode *source, offset++; } if (config.auth) { - argv[offset] = "AUTH"; - argv_len[offset] = 4; - offset++; - argv[offset] = config.auth; - argv_len[offset] = strlen(config.auth); - offset++; + if (config.user) { + argv[offset] = "AUTH2"; + argv_len[offset] = 5; + offset++; + argv[offset] = config.user; + argv_len[offset] = strlen(config.user); + offset++; + argv[offset] = config.auth; + argv_len[offset] = strlen(config.auth); + offset++; + } else { + argv[offset] = "AUTH"; + argv_len[offset] = 4; + offset++; + argv[offset] = config.auth; + argv_len[offset] = strlen(config.auth); + offset++; + } } argv[offset] = "KEYS"; argv_len[offset] = 4; From 4a03e1e1f8a39623e7835e832c279ab820851333 Mon Sep 17 00:00:00 2001 From: hwware Date: Mon, 8 Jun 2020 23:36:01 -0400 Subject: [PATCH 008/377] fix server crash in STRALGO command (cherry picked from commit 44195a2047efbe4db1b37365bd4ed66ba0f9d306) --- src/t_string.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/t_string.c b/src/t_string.c index 5306069bf..d1a3e1b96 100644 --- a/src/t_string.c +++ b/src/t_string.c @@ -534,6 +534,13 @@ void stralgoLCS(client *c) { } obja = lookupKeyRead(c->db,c->argv[j+1]); objb = lookupKeyRead(c->db,c->argv[j+2]); + + if ( !(obja->type == OBJ_STRING) || !(objb->type == OBJ_STRING) ) { + addReplyError(c,"Object associate with KEYS option should only be string type"); + return; + + } + obja = obja ? getDecodedObject(obja) : createStringObject("",0); objb = objb ? getDecodedObject(objb) : createStringObject("",0); a = obja->ptr; From 3dbde34323e7357f0d86b43cc0b06174108fb120 Mon Sep 17 00:00:00 2001 From: hwware Date: Mon, 8 Jun 2020 23:48:51 -0400 Subject: [PATCH 009/377] fix memory leak (cherry picked from commit 8ab655bd7b92f4cc310cdfbf974c6c8627446628) --- src/t_string.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/src/t_string.c b/src/t_string.c index d1a3e1b96..8e367ec80 100644 --- a/src/t_string.c +++ b/src/t_string.c @@ -516,13 +516,13 @@ void stralgoLCS(client *c) { withmatchlen = 1; } else if (!strcasecmp(opt,"MINMATCHLEN") && moreargs) { if (getLongLongFromObjectOrReply(c,c->argv[j+1],&minmatchlen,NULL) - != C_OK) return; + != C_OK) goto clean_up_obj; if (minmatchlen < 0) minmatchlen = 0; j++; } else if (!strcasecmp(opt,"STRINGS") && moreargs > 1) { if (a != NULL) { addReplyError(c,"Either use STRINGS or KEYS"); - return; + goto clean_up_obj; } a = c->argv[j+1]->ptr; b = c->argv[j+2]->ptr; @@ -530,17 +530,14 @@ void stralgoLCS(client *c) { } else if (!strcasecmp(opt,"KEYS") && moreargs > 1) { if (a != NULL) { addReplyError(c,"Either use STRINGS or KEYS"); - return; + goto clean_up_obj; } obja = lookupKeyRead(c->db,c->argv[j+1]); objb = lookupKeyRead(c->db,c->argv[j+2]); - if ( !(obja->type == OBJ_STRING) || !(objb->type == OBJ_STRING) ) { addReplyError(c,"Object associate with KEYS option should only be string type"); - return; - + goto clean_up_obj; } - obja = obja ? getDecodedObject(obja) : createStringObject("",0); objb = objb ? getDecodedObject(objb) : createStringObject("",0); a = obja->ptr; @@ -548,7 +545,7 @@ void stralgoLCS(client *c) { j += 2; } else { addReply(c,shared.syntaxerr); - return; + goto clean_up_obj; } } @@ -556,12 +553,12 @@ void stralgoLCS(client *c) { if (a == NULL) { addReplyError(c,"Please specify two strings: " "STRINGS or KEYS options are mandatory"); - return; + goto clean_up_obj; } else if (getlen && getidx) { addReplyError(c, "If you want both the length and indexes, please " "just use IDX."); - return; + goto clean_up_obj; } /* Compute the LCS using the vanilla dynamic programming technique of @@ -696,10 +693,12 @@ void stralgoLCS(client *c) { } /* Cleanup. */ - if (obja) decrRefCount(obja); - if (objb) decrRefCount(objb); sdsfree(result); zfree(lcs); + +clean_up_obj: + if (obja) decrRefCount(obja); + if (objb) decrRefCount(objb); return; } From 9d3e874179d89acc5bfb107f7dd3b995f5d72581 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 12 Jun 2020 12:34:44 +0200 Subject: [PATCH 010/377] Fix LCS object type checking. Related to #7379. (cherry picked from commit 00e1f87a08f7e0f2e1706a8f937671b83dc63f12) --- src/t_string.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/src/t_string.c b/src/t_string.c index 8e367ec80..259f43142 100644 --- a/src/t_string.c +++ b/src/t_string.c @@ -516,13 +516,13 @@ void stralgoLCS(client *c) { withmatchlen = 1; } else if (!strcasecmp(opt,"MINMATCHLEN") && moreargs) { if (getLongLongFromObjectOrReply(c,c->argv[j+1],&minmatchlen,NULL) - != C_OK) goto clean_up_obj; + != C_OK) goto cleanup; if (minmatchlen < 0) minmatchlen = 0; j++; } else if (!strcasecmp(opt,"STRINGS") && moreargs > 1) { if (a != NULL) { addReplyError(c,"Either use STRINGS or KEYS"); - goto clean_up_obj; + goto cleanup; } a = c->argv[j+1]->ptr; b = c->argv[j+2]->ptr; @@ -530,13 +530,20 @@ void stralgoLCS(client *c) { } else if (!strcasecmp(opt,"KEYS") && moreargs > 1) { if (a != NULL) { addReplyError(c,"Either use STRINGS or KEYS"); - goto clean_up_obj; + goto cleanup; } obja = lookupKeyRead(c->db,c->argv[j+1]); objb = lookupKeyRead(c->db,c->argv[j+2]); - if ( !(obja->type == OBJ_STRING) || !(objb->type == OBJ_STRING) ) { - addReplyError(c,"Object associate with KEYS option should only be string type"); - goto clean_up_obj; + if ((obja && obja->type != OBJ_STRING) || + (objb && objb->type != OBJ_STRING)) + { + addReplyError(c, + "The specified keys must contain string values"); + /* Don't cleanup the objects, we need to do that + * only after callign getDecodedObject(). */ + obja = NULL; + objb = NULL; + goto cleanup; } obja = obja ? getDecodedObject(obja) : createStringObject("",0); objb = objb ? getDecodedObject(objb) : createStringObject("",0); @@ -545,7 +552,7 @@ void stralgoLCS(client *c) { j += 2; } else { addReply(c,shared.syntaxerr); - goto clean_up_obj; + goto cleanup; } } @@ -553,12 +560,12 @@ void stralgoLCS(client *c) { if (a == NULL) { addReplyError(c,"Please specify two strings: " "STRINGS or KEYS options are mandatory"); - goto clean_up_obj; + goto cleanup; } else if (getlen && getidx) { addReplyError(c, "If you want both the length and indexes, please " "just use IDX."); - goto clean_up_obj; + goto cleanup; } /* Compute the LCS using the vanilla dynamic programming technique of @@ -696,7 +703,7 @@ void stralgoLCS(client *c) { sdsfree(result); zfree(lcs); -clean_up_obj: +cleanup: if (obja) decrRefCount(obja); if (objb) decrRefCount(objb); return; From cb06a48ef3634992ad01c2d11eb65afbfbf98911 Mon Sep 17 00:00:00 2001 From: "meir@redislabs.com" Date: Sun, 14 Jun 2020 10:06:00 +0300 Subject: [PATCH 011/377] Fix RM_ScanKey module api not to return int encoded strings The scan key module API provides the scan callback with the current field name and value (if it exists). Those arguments are RedisModuleString* which means it supposes to point to robj which is encoded as a string. Using createStringObjectFromLongLong function might return robj that points to an integer and so break a module that tries for example to use RedisModule_StringPtrLen on the given field/value. The PR introduces a fix that uses the createObject function and sdsfromlonglong function. Using those function promise that the field and value pass to the to the scan callback will be Strings. The PR also changes the Scan test module to use RedisModule_StringPtrLen to catch the issue. without this, the issue is hidden because RedisModule_ReplyWithString knows to handle integer encoding of the given robj (RedisModuleString). The PR also introduces a new test to verify the issue is solved. (cherry picked from commit e37c16e42551a3a5c61e1f8a90cfc672d3e010e4) --- src/module.c | 6 +++--- tests/modules/scan.c | 20 ++++++++++++++++---- tests/unit/moduleapi/scan.tcl | 5 +++++ 3 files changed, 24 insertions(+), 7 deletions(-) diff --git a/src/module.c b/src/module.c index e3a338dad..226c60fd0 100644 --- a/src/module.c +++ b/src/module.c @@ -6708,7 +6708,7 @@ int RM_ScanKey(RedisModuleKey *key, RedisModuleScanCursor *cursor, RedisModuleSc int pos = 0; int64_t ll; while(intsetGet(o->ptr,pos++,&ll)) { - robj *field = createStringObjectFromLongLong(ll); + robj *field = createObject(OBJ_STRING,sdsfromlonglong(ll)); fn(key, field, NULL, privdata); decrRefCount(field); } @@ -6724,12 +6724,12 @@ int RM_ScanKey(RedisModuleKey *key, RedisModuleScanCursor *cursor, RedisModuleSc ziplistGet(p,&vstr,&vlen,&vll); robj *field = (vstr != NULL) ? createStringObject((char*)vstr,vlen) : - createStringObjectFromLongLong(vll); + createObject(OBJ_STRING,sdsfromlonglong(vll)); p = ziplistNext(o->ptr,p); ziplistGet(p,&vstr,&vlen,&vll); robj *value = (vstr != NULL) ? createStringObject((char*)vstr,vlen) : - createStringObjectFromLongLong(vll); + createObject(OBJ_STRING,sdsfromlonglong(vll)); fn(key, field, value, privdata); p = ziplistNext(o->ptr,p); decrRefCount(field); diff --git a/tests/modules/scan.c b/tests/modules/scan.c index afede244b..1576bae9e 100644 --- a/tests/modules/scan.c +++ b/tests/modules/scan.c @@ -55,11 +55,23 @@ void scan_key_callback(RedisModuleKey *key, RedisModuleString* field, RedisModul REDISMODULE_NOT_USED(key); scan_key_pd* pd = privdata; RedisModule_ReplyWithArray(pd->ctx, 2); - RedisModule_ReplyWithString(pd->ctx, field); - if (value) - RedisModule_ReplyWithString(pd->ctx, value); - else + size_t fieldCStrLen; + + // The implementation of RedisModuleString is robj with lots of encodings. + // We want to make sure the robj that passes to this callback in + // String encoded, this is why we use RedisModule_StringPtrLen and + // RedisModule_ReplyWithStringBuffer instead of directly use + // RedisModule_ReplyWithString. + const char* fieldCStr = RedisModule_StringPtrLen(field, &fieldCStrLen); + RedisModule_ReplyWithStringBuffer(pd->ctx, fieldCStr, fieldCStrLen); + if(value){ + size_t valueCStrLen; + const char* valueCStr = RedisModule_StringPtrLen(value, &valueCStrLen); + RedisModule_ReplyWithStringBuffer(pd->ctx, valueCStr, valueCStrLen); + } else { RedisModule_ReplyWithNull(pd->ctx); + } + pd->nreplies++; } diff --git a/tests/unit/moduleapi/scan.tcl b/tests/unit/moduleapi/scan.tcl index de1672e0a..43a0c4d8a 100644 --- a/tests/unit/moduleapi/scan.tcl +++ b/tests/unit/moduleapi/scan.tcl @@ -16,6 +16,11 @@ start_server {tags {"modules"}} { r hmset hh f1 v1 f2 v2 lsort [r scan.scan_key hh] } {{f1 v1} {f2 v2}} + + test {Module scan hash dict with int value} { + r hmset hh1 f1 1 + lsort [r scan.scan_key hh1] + } {{f1 1}} test {Module scan hash dict} { r config set hash-max-ziplist-entries 2 From 09e960ff62b291165a4352394a621d0b9462d712 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 15 Jun 2020 10:18:14 +0800 Subject: [PATCH 012/377] cluster.c remove if of clusterSendFail in markNodeAsFailingIfNeeded (cherry picked from commit 009a2d443a92f30e0f45ade08ce6fea275a5d71f) --- src/cluster.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cluster.c b/src/cluster.c index 24b14d1dc..332d5a4ac 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -1255,7 +1255,7 @@ void markNodeAsFailingIfNeeded(clusterNode *node) { /* Broadcast the failing node name to everybody, forcing all the other * reachable nodes to flag the node as FAIL. */ - if (nodeIsMaster(myself)) clusterSendFail(node->name); + clusterSendFail(node->name); clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG); } From d5515331e24c02c802ed883f0be9d94154ff6b3d Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 16 Jun 2020 11:09:45 +0200 Subject: [PATCH 013/377] Tracking: fix enableBcastTrackingForPrefix() invalid sdslen() call. Related to #7387. (cherry picked from commit ae770c30349cb4c72393f3b70318371081f3cc65) --- src/tracking.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tracking.c b/src/tracking.c index eb4113131..8c2dca7ba 100644 --- a/src/tracking.c +++ b/src/tracking.c @@ -102,7 +102,7 @@ void disableTracking(client *c) { /* Set the client 'c' to track the prefix 'prefix'. If the client 'c' is * already registered for the specified prefix, no operation is performed. */ void enableBcastTrackingForPrefix(client *c, char *prefix, size_t plen) { - bcastState *bs = raxFind(PrefixTable,(unsigned char*)prefix,sdslen(prefix)); + bcastState *bs = raxFind(PrefixTable,(unsigned char*)prefix,plen); /* If this is the first client subscribing to such prefix, create * the prefix in the table. */ if (bs == raxNotFound) { From 7412dbbef5e15f3630e43bea7f7385f4a2f318fd Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 16 Jun 2020 11:45:03 +0200 Subject: [PATCH 014/377] Use cluster connections too, to limit maxclients. See #7401. (cherry picked from commit fc08cafdb0e7f86ddd4ededfed87026b02f8447d) --- src/cluster.c | 7 +++++++ src/cluster.h | 1 + src/networking.c | 23 +++++++++++++++-------- 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index 332d5a4ac..ccbc373ca 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -691,6 +691,13 @@ void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) { } } +/* Return the approximated number of sockets we are using in order to + * take the cluster bus connections. */ +unsigned long getClusterConnectionsCount(void) { + return server.cluster_enabled ? + (dictSize(server.cluster->nodes)*2) : 0; +} + /* ----------------------------------------------------------------------------- * Key space handling * -------------------------------------------------------------------------- */ diff --git a/src/cluster.h b/src/cluster.h index d3af4a355..596a4629a 100644 --- a/src/cluster.h +++ b/src/cluster.h @@ -283,5 +283,6 @@ typedef struct { clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, int argc, int *hashslot, int *ask); int clusterRedirectBlockedClientIfNeeded(client *c); void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_code); +unsigned long getClusterConnectionsCount(void); #endif /* __CLUSTER_H */ diff --git a/src/networking.c b/src/networking.c index 77b9a6fcf..9d36ed3a2 100644 --- a/src/networking.c +++ b/src/networking.c @@ -892,17 +892,24 @@ static void acceptCommonHandler(connection *conn, int flags, char *ip) { client *c; UNUSED(ip); - /* Admission control will happen before a client is created and connAccept() + /* Limit the number of connections we take at the same time. + * + * Admission control will happen before a client is created and connAccept() * called, because we don't want to even start transport-level negotiation - * if rejected. - */ - if (listLength(server.clients) >= server.maxclients) { - char *err = "-ERR max number of clients reached\r\n"; + * if rejected. */ + if (listLength(server.clients) + getClusterConnectionsCount() + >= server.maxclients) + { + char *err; + if (server.cluster_enabled) + err = "-ERR max number of clients reached\r\n"; + else + err = "-ERR max number of clients + cluster " + "connections reached\r\n"; /* That's a best effort error message, don't check write errors. - * Note that for TLS connections, no handshake was done yet so nothing is written - * and the connection will just drop. - */ + * Note that for TLS connections, no handshake was done yet so nothing + * is written and the connection will just drop. */ if (connWrite(conn,err,strlen(err)) == -1) { /* Nothing to do, Just to avoid the warning... */ } From 1c733df3d87bde1d9f8228b03f078e4190d782de Mon Sep 17 00:00:00 2001 From: chenhui0212 Date: Tue, 16 Jun 2020 17:50:38 +0800 Subject: [PATCH 015/377] fix comments in listpack.c (cherry picked from commit 6b82471098a03babcd9cd9a2ff63ba0d138a4279) --- src/listpack.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/listpack.c b/src/listpack.c index e1f4d9a02..9e77ab12d 100644 --- a/src/listpack.c +++ b/src/listpack.c @@ -773,13 +773,13 @@ unsigned char *lpSeek(unsigned char *lp, long index) { * is past the half of the listpack. */ if (index > numele/2) { forward = 0; - /* Left to right scanning always expects a negative index. Convert + /* Right to left scanning always expects a negative index. Convert * our index to negative form. */ index -= numele; } } else { /* If the listpack length is unspecified, for negative indexes we - * want to always scan left-to-right. */ + * want to always scan right-to-left. */ if (index < 0) forward = 0; } From f13381da22391c451b670de85766293015240ef1 Mon Sep 17 00:00:00 2001 From: Tomasz Poradowski Date: Wed, 17 Jun 2020 22:22:49 +0200 Subject: [PATCH 016/377] ensure SHUTDOWN_NOSAVE in Sentinel mode - enforcing of SHUTDOWN_NOSAVE flag in one place to make it consitent when running in Sentinel mode (cherry picked from commit 6782243054d7f8b5b861b5ae367db22b0f47a3b9) --- src/db.c | 8 -------- src/server.c | 9 +++++++++ 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/db.c b/src/db.c index dc4a0b63e..19b2c48e4 100644 --- a/src/db.c +++ b/src/db.c @@ -963,14 +963,6 @@ void shutdownCommand(client *c) { return; } } - /* When SHUTDOWN is called while the server is loading a dataset in - * memory we need to make sure no attempt is performed to save - * the dataset on shutdown (otherwise it could overwrite the current DB - * with half-read data). - * - * Also when in Sentinel mode clear the SAVE flag and force NOSAVE. */ - if (server.loading || server.sentinel_mode) - flags = (flags & ~SHUTDOWN_SAVE) | SHUTDOWN_NOSAVE; if (prepareForShutdown(flags) == C_OK) exit(0); addReplyError(c,"Errors trying to SHUTDOWN. Check logs."); } diff --git a/src/server.c b/src/server.c index 53dccf875..7c92c9244 100644 --- a/src/server.c +++ b/src/server.c @@ -3680,6 +3680,15 @@ void closeListeningSockets(int unlink_unix_socket) { } int prepareForShutdown(int flags) { + /* When SHUTDOWN is called while the server is loading a dataset in + * memory we need to make sure no attempt is performed to save + * the dataset on shutdown (otherwise it could overwrite the current DB + * with half-read data). + * + * Also when in Sentinel mode clear the SAVE flag and force NOSAVE. */ + if (server.loading || server.sentinel_mode) + flags = (flags & ~SHUTDOWN_SAVE) | SHUTDOWN_NOSAVE; + int save = flags & SHUTDOWN_SAVE; int nosave = flags & SHUTDOWN_NOSAVE; From ee992ea113dd26762137f4cdbada437771f24bba Mon Sep 17 00:00:00 2001 From: chenhui0212 Date: Thu, 18 Jun 2020 17:28:26 +0800 Subject: [PATCH 017/377] Fix comments in function raxLowWalk of listpack.c (cherry picked from commit 44235ac718e34576fa9d202bd86132f5b2976b77) --- src/rax.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rax.c b/src/rax.c index 7dcf04582..c8a1fb6b4 100644 --- a/src/rax.c +++ b/src/rax.c @@ -487,8 +487,8 @@ static inline size_t raxLowWalk(rax *rax, unsigned char *s, size_t len, raxNode if (h->iscompr) j = 0; /* Compressed node only child is at index 0. */ memcpy(&h,children+j,sizeof(h)); parentlink = children+j; - j = 0; /* If the new node is compressed and we do not - iterate again (since i == l) set the split + j = 0; /* If the new node is non compressed and we do not + iterate again (since i == len) set the split position to 0 to signal this node represents the searched key. */ } From e4e223ad798a65ca42e915330cfcd80c390a71f1 Mon Sep 17 00:00:00 2001 From: hwware Date: Sun, 21 Jun 2020 23:04:28 -0400 Subject: [PATCH 018/377] fix memory leak in sentinel connection sharing (cherry picked from commit bdac36b70e222e9457bad0c682f2f5bc2ba25238) --- src/sentinel.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/sentinel.c b/src/sentinel.c index fb504ae4d..5be4193dc 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -1076,6 +1076,7 @@ int sentinelTryConnectionSharing(sentinelRedisInstance *ri) { releaseInstanceLink(ri->link,NULL); ri->link = match->link; match->link->refcount++; + dictReleaseIterator(di); return C_OK; } dictReleaseIterator(di); From 93fa64f65a1d5ff944b7f1902d0c5c7eddc13851 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 22 Jun 2020 11:21:21 +0200 Subject: [PATCH 019/377] Clarify maxclients and cluster in conf. Remove myself too. (cherry picked from commit c10a7a040f10e32f4a27288499c0c1e104754a1b) --- redis.conf | 5 +++++ src/cluster.c | 5 ++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/redis.conf b/redis.conf index 5cea06a47..a51ef007d 100644 --- a/redis.conf +++ b/redis.conf @@ -805,6 +805,11 @@ acllog-max-len 128 # Once the limit is reached Redis will close all the new connections sending # an error 'max number of clients reached'. # +# IMPORTANT: When Redis Cluster is used, the max number of connections is also +# shared with the cluster bus: every node in the cluster will use two +# connections, one incoming and another outgoing. It is important to size the +# limit accordingly in case of very large clusters. +# # maxclients 10000 ############################## MEMORY MANAGEMENT ################################ diff --git a/src/cluster.c b/src/cluster.c index ccbc373ca..e15e59fda 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -694,8 +694,11 @@ void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) { /* Return the approximated number of sockets we are using in order to * take the cluster bus connections. */ unsigned long getClusterConnectionsCount(void) { + /* We decrement the number of nodes by one, since there is the + * "myself" node too in the list. Each node uses two file descriptors, + * one incoming and one outgoing, thus the multiplication by 2. */ return server.cluster_enabled ? - (dictSize(server.cluster->nodes)*2) : 0; + ((dictSize(server.cluster->nodes)-1)*2) : 0; } /* ----------------------------------------------------------------------------- From 5f889001cf3c9a4555d1031714ed3d9be9fc8a84 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 22 Jun 2020 11:41:19 +0200 Subject: [PATCH 020/377] Fix BITFIELD i64 type handling, see #7417. (cherry picked from commit 448a8bb0ed9e970a1c6db594a5dc88ded1036695) --- src/bitops.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/bitops.c b/src/bitops.c index f506a881b..b37bea2bf 100644 --- a/src/bitops.c +++ b/src/bitops.c @@ -257,7 +257,7 @@ int64_t getSignedBitfield(unsigned char *p, uint64_t offset, uint64_t bits) { /* If the top significant bit is 1, propagate it to all the * higher bits for two's complement representation of signed * integers. */ - if (value & ((uint64_t)1 << (bits-1))) + if (bits < 64 && (value & ((uint64_t)1 << (bits-1)))) value |= ((uint64_t)-1) << bits; return value; } @@ -356,7 +356,6 @@ int checkSignedBitfieldOverflow(int64_t value, int64_t incr, uint64_t bits, int handle_wrap: { - uint64_t mask = ((uint64_t)-1) << bits; uint64_t msb = (uint64_t)1 << (bits-1); uint64_t a = value, b = incr, c; c = a+b; /* Perform addition as unsigned so that's defined. */ @@ -364,10 +363,13 @@ handle_wrap: /* If the sign bit is set, propagate to all the higher order * bits, to cap the negative value. If it's clear, mask to * the positive integer limit. */ - if (c & msb) { - c |= mask; - } else { - c &= ~mask; + if (bits < 64) { + uint64_t mask = ((uint64_t)-1) << bits; + if (c & msb) { + c |= mask; + } else { + c &= ~mask; + } } *limit = c; } From 63073e6a447bc804afec53ea16f921de947e1526 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 22 Jun 2020 11:44:11 +0200 Subject: [PATCH 021/377] Include cluster.h for getClusterConnectionsCount(). (cherry picked from commit 79e15d1686462c750073ed12c1bfab7fa8b090c8) --- src/networking.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/networking.c b/src/networking.c index 9d36ed3a2..2795deafd 100644 --- a/src/networking.c +++ b/src/networking.c @@ -29,6 +29,7 @@ #include "server.h" #include "atomicvar.h" +#include "cluster.h" #include #include #include From 851058821319285c4c771c51f65f82740466bfb3 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Thu, 11 Jun 2020 21:09:35 +0300 Subject: [PATCH 022/377] EXEC always fails with EXECABORT and multi-state is cleared In order to support the use of multi-exec in pipeline, it is important that MULTI and EXEC are never rejected and it is easy for the client to know if the connection is still in multi state. It was easy to make sure MULTI and DISCARD never fail (done by previous commits) since these only change the client state and don't do any actual change in the server, but EXEC is a different story. Since in the past, it was possible for clients to handle some EXEC errors and retry the EXEC, we now can't affort to return any error on EXEC other than EXECABORT, which now carries with it the real reason for the abort too. Other fixes in this commit: - Some checks that where performed at the time of queuing need to be re- validated when EXEC runs, for instance if the transaction contains writes commands, it needs to be aborted. there was one check that was already done in execCommand (-READONLY), but other checks where missing: -OOM, -MISCONF, -NOREPLICAS, -MASTERDOWN - When a command is rejected by processCommand it was rejected with addReply, which was not recognized as an error in case the bad command came from the master. this will enable to count or MONITOR these errors in the future. - make it easier for tests to create additional (non deferred) clients. - add tests for the fixes of this commit. (cherry picked from commit fe8d6fe74920798c146a587810ee91ff049a9093) --- src/multi.c | 43 +++++++-------- src/networking.c | 20 ++++--- src/server.c | 93 ++++++++++++++++++++------------- src/server.h | 5 ++ tests/test_helper.tcl | 15 ++++++ tests/unit/multi.tcl | 119 +++++++++++++++++++++++++++++++++--------- 6 files changed, 204 insertions(+), 91 deletions(-) diff --git a/src/multi.c b/src/multi.c index 60a07dfc7..35ddf92af 100644 --- a/src/multi.c +++ b/src/multi.c @@ -36,6 +36,7 @@ void initClientMultiState(client *c) { c->mstate.commands = NULL; c->mstate.count = 0; c->mstate.cmd_flags = 0; + c->mstate.cmd_inv_flags = 0; } /* Release all the resources associated with MULTI/EXEC state */ @@ -76,6 +77,7 @@ void queueMultiCommand(client *c) { incrRefCount(mc->argv[j]); c->mstate.count++; c->mstate.cmd_flags |= c->cmd->flags; + c->mstate.cmd_inv_flags |= ~c->cmd->flags; } void discardTransaction(client *c) { @@ -122,6 +124,23 @@ void execCommandPropagateExec(client *c) { PROPAGATE_AOF|PROPAGATE_REPL); } +/* Aborts a transaction, with a specific error message. + * The transaction is always aboarted with -EXECABORT so that the client knows + * the server exited the multi state, but the actual reason for the abort is + * included too. */ +void execCommandAbort(client *c, sds error) { + discardTransaction(c); + + if (error[0] == '-') error++; + addReplyErrorFormat(c, "-EXECABORT Transaction discarded because of: %s", error); + + /* Send EXEC to clients waiting data from MONITOR. We did send a MULTI + * already, and didn't send any of the queued commands, now we'll just send + * EXEC so it is clear that the transaction is over. */ + if (listLength(server.monitors) && !server.loading) + replicationFeedMonitors(c,server.monitors,c->db->id,c->argv,c->argc); +} + void execCommand(client *c) { int j; robj **orig_argv; @@ -135,15 +154,6 @@ void execCommand(client *c) { return; } - /* If we are in -BUSY state, flag the transaction and return the - * -BUSY error, like Redis <= 5. This is a temporary fix, may be changed - * ASAP, see issue #7353 on Github. */ - if (server.lua_timedout) { - flagTransaction(c); - addReply(c, shared.slowscripterr); - return; - } - /* Check if we need to abort the EXEC because: * 1) Some WATCHed key was touched. * 2) There was a previous error while queueing commands. @@ -157,21 +167,6 @@ void execCommand(client *c) { goto handle_monitor; } - /* If there are write commands inside the transaction, and this is a read - * only slave, we want to send an error. This happens when the transaction - * was initiated when the instance was a master or a writable replica and - * then the configuration changed (for example instance was turned into - * a replica). */ - if (!server.loading && server.masterhost && server.repl_slave_ro && - !(c->flags & CLIENT_MASTER) && c->mstate.cmd_flags & CMD_WRITE) - { - addReplyError(c, - "Transaction contains write commands but instance " - "is now a read-only replica. EXEC aborted."); - discardTransaction(c); - goto handle_monitor; - } - /* Exec all the queued commands */ unwatchAllKeys(c); /* Unwatch ASAP otherwise we'll waste CPU cycles */ orig_argv = c->argv; diff --git a/src/networking.c b/src/networking.c index 2795deafd..d35347991 100644 --- a/src/networking.c +++ b/src/networking.c @@ -407,19 +407,23 @@ void addReplyError(client *c, const char *err) { addReplyErrorLength(c,err,strlen(err)); } +/* See addReplyErrorLength. + * Makes sure there are no newlines in the string, otherwise invalid protocol + * is emitted. */ +void addReplyErrorSafe(client *c, char *s, size_t len) { + size_t j; + for (j = 0; j < len; j++) { + if (s[j] == '\r' || s[j] == '\n') s[j] = ' '; + } + addReplyErrorLength(c,s,sdslen(s)); +} + void addReplyErrorFormat(client *c, const char *fmt, ...) { - size_t l, j; va_list ap; va_start(ap,fmt); sds s = sdscatvprintf(sdsempty(),fmt,ap); va_end(ap); - /* Make sure there are no newlines in the string, otherwise invalid protocol - * is emitted. */ - l = sdslen(s); - for (j = 0; j < l; j++) { - if (s[j] == '\r' || s[j] == '\n') s[j] = ' '; - } - addReplyErrorLength(c,s,sdslen(s)); + addReplyErrorSafe(c, s, sdslen(s)); sdsfree(s); } diff --git a/src/server.c b/src/server.c index 7c92c9244..1f794e4ed 100644 --- a/src/server.c +++ b/src/server.c @@ -3402,6 +3402,34 @@ void call(client *c, int flags) { server.stat_numcommands++; } +/* Used when a command that is ready for execution needs to be rejected, due to + * varios pre-execution checks. it returns the appropriate error to the client. + * If there's a transaction is flags it as dirty, and if the command is EXEC, + * it aborts the transaction. */ +void rejectCommand(client *c, robj *reply) { + flagTransaction(c); + if (c->cmd && c->cmd->proc == execCommand) { + execCommandAbort(c, reply->ptr); + } else { + /* using addReplyError* rather than addReply so that the error can be logged. */ + addReplyErrorSafe(c, reply->ptr, sdslen(reply->ptr)); + } +} + +void rejectCommandFormat(client *c, const char *fmt, ...) { + flagTransaction(c); + va_list ap; + va_start(ap,fmt); + sds s = sdscatvprintf(sdsempty(),fmt,ap); + va_end(ap); + if (c->cmd && c->cmd->proc == execCommand) { + execCommandAbort(c, s); + } else { + addReplyErrorSafe(c, s, sdslen(s)); + } + sdsfree(s); +} + /* If this function gets called we already read a whole * command, arguments are in the client argv/argc fields. * processCommand() execute the command or prepare the @@ -3427,23 +3455,30 @@ int processCommand(client *c) { * such as wrong arity, bad command name and so forth. */ c->cmd = c->lastcmd = lookupCommand(c->argv[0]->ptr); if (!c->cmd) { - flagTransaction(c); sds args = sdsempty(); int i; for (i=1; i < c->argc && sdslen(args) < 128; i++) args = sdscatprintf(args, "`%.*s`, ", 128-(int)sdslen(args), (char*)c->argv[i]->ptr); - addReplyErrorFormat(c,"unknown command `%s`, with args beginning with: %s", + rejectCommandFormat(c,"unknown command `%s`, with args beginning with: %s", (char*)c->argv[0]->ptr, args); sdsfree(args); return C_OK; } else if ((c->cmd->arity > 0 && c->cmd->arity != c->argc) || (c->argc < -c->cmd->arity)) { - flagTransaction(c); - addReplyErrorFormat(c,"wrong number of arguments for '%s' command", + rejectCommandFormat(c,"wrong number of arguments for '%s' command", c->cmd->name); return C_OK; } + int is_write_command = (c->cmd->flags & CMD_WRITE) || + (c->cmd->proc == execCommand && (c->mstate.cmd_flags & CMD_WRITE)); + int is_denyoom_command = (c->cmd->flags & CMD_DENYOOM) || + (c->cmd->proc == execCommand && (c->mstate.cmd_flags & CMD_DENYOOM)); + int is_denystale_command = !(c->cmd->flags & CMD_STALE) || + (c->cmd->proc == execCommand && (c->mstate.cmd_inv_flags & CMD_STALE)); + int is_denyloading_command = !(c->cmd->flags & CMD_LOADING) || + (c->cmd->proc == execCommand && (c->mstate.cmd_inv_flags & CMD_LOADING)); + /* Check if the user is authenticated. This check is skipped in case * the default user is flagged as "nopass" and is active. */ int auth_required = (!(DefaultUser->flags & USER_FLAG_NOPASS) || @@ -3453,8 +3488,7 @@ int processCommand(client *c) { /* AUTH and HELLO and no auth modules are valid even in * non-authenticated state. */ if (!(c->cmd->flags & CMD_NO_AUTH)) { - flagTransaction(c); - addReply(c,shared.noautherr); + rejectCommand(c,shared.noautherr); return C_OK; } } @@ -3465,13 +3499,12 @@ int processCommand(client *c) { int acl_retval = ACLCheckCommandPerm(c,&acl_keypos); if (acl_retval != ACL_OK) { addACLLogEntry(c,acl_retval,acl_keypos,NULL); - flagTransaction(c); if (acl_retval == ACL_DENIED_CMD) - addReplyErrorFormat(c, + rejectCommandFormat(c, "-NOPERM this user has no permissions to run " "the '%s' command or its subcommand", c->cmd->name); else - addReplyErrorFormat(c, + rejectCommandFormat(c, "-NOPERM this user has no permissions to access " "one of the keys used as arguments"); return C_OK; @@ -3519,13 +3552,11 @@ int processCommand(client *c) { * is trying to execute is denied during OOM conditions or the client * is in MULTI/EXEC context? Error. */ if (out_of_memory && - (c->cmd->flags & CMD_DENYOOM || + (is_denyoom_command || (c->flags & CLIENT_MULTI && - c->cmd->proc != execCommand && c->cmd->proc != discardCommand))) { - flagTransaction(c); - addReply(c, shared.oomerr); + rejectCommand(c, shared.oomerr); return C_OK; } @@ -3546,17 +3577,14 @@ int processCommand(client *c) { int deny_write_type = writeCommandsDeniedByDiskError(); if (deny_write_type != DISK_ERROR_TYPE_NONE && server.masterhost == NULL && - (c->cmd->flags & CMD_WRITE || - c->cmd->proc == pingCommand)) + (is_write_command ||c->cmd->proc == pingCommand)) { - flagTransaction(c); if (deny_write_type == DISK_ERROR_TYPE_RDB) - addReply(c, shared.bgsaveerr); + rejectCommand(c, shared.bgsaveerr); else - addReplySds(c, - sdscatprintf(sdsempty(), + rejectCommandFormat(c, "-MISCONF Errors writing to the AOF file: %s\r\n", - strerror(server.aof_last_write_errno))); + strerror(server.aof_last_write_errno)); return C_OK; } @@ -3565,11 +3593,10 @@ int processCommand(client *c) { if (server.masterhost == NULL && server.repl_min_slaves_to_write && server.repl_min_slaves_max_lag && - c->cmd->flags & CMD_WRITE && + is_write_command && server.repl_good_slaves_count < server.repl_min_slaves_to_write) { - flagTransaction(c); - addReply(c, shared.noreplicaserr); + rejectCommand(c, shared.noreplicaserr); return C_OK; } @@ -3577,10 +3604,9 @@ int processCommand(client *c) { * accept write commands if this is our master. */ if (server.masterhost && server.repl_slave_ro && !(c->flags & CLIENT_MASTER) && - c->cmd->flags & CMD_WRITE) + is_write_command) { - flagTransaction(c); - addReply(c, shared.roslaveerr); + rejectCommand(c, shared.roslaveerr); return C_OK; } @@ -3592,7 +3618,7 @@ int processCommand(client *c) { c->cmd->proc != unsubscribeCommand && c->cmd->proc != psubscribeCommand && c->cmd->proc != punsubscribeCommand) { - addReplyErrorFormat(c, + rejectCommandFormat(c, "Can't execute '%s': only (P)SUBSCRIBE / " "(P)UNSUBSCRIBE / PING / QUIT are allowed in this context", c->cmd->name); @@ -3604,17 +3630,16 @@ int processCommand(client *c) { * link with master. */ if (server.masterhost && server.repl_state != REPL_STATE_CONNECTED && server.repl_serve_stale_data == 0 && - !(c->cmd->flags & CMD_STALE)) + is_denystale_command) { - flagTransaction(c); - addReply(c, shared.masterdownerr); + rejectCommand(c, shared.masterdownerr); return C_OK; } /* Loading DB? Return an error if the command has not the * CMD_LOADING flag. */ - if (server.loading && !(c->cmd->flags & CMD_LOADING)) { - addReply(c, shared.loadingerr); + if (server.loading && is_denyloading_command) { + rejectCommand(c, shared.loadingerr); return C_OK; } @@ -3629,7 +3654,6 @@ int processCommand(client *c) { c->cmd->proc != helloCommand && c->cmd->proc != replconfCommand && c->cmd->proc != multiCommand && - c->cmd->proc != execCommand && c->cmd->proc != discardCommand && c->cmd->proc != watchCommand && c->cmd->proc != unwatchCommand && @@ -3640,8 +3664,7 @@ int processCommand(client *c) { c->argc == 2 && tolower(((char*)c->argv[1]->ptr)[0]) == 'k')) { - flagTransaction(c); - addReply(c, shared.slowscripterr); + rejectCommand(c, shared.slowscripterr); return C_OK; } diff --git a/src/server.h b/src/server.h index 841e1f941..6c36385e1 100644 --- a/src/server.h +++ b/src/server.h @@ -666,6 +666,9 @@ typedef struct multiState { int cmd_flags; /* The accumulated command flags OR-ed together. So if at least a command has a given flag, it will be set in this field. */ + int cmd_inv_flags; /* Same as cmd_flags, OR-ing the ~flags. so that it + is possible to know if all the commands have a + certain flag. */ int minreplicas; /* MINREPLICAS for synchronous replication */ time_t minreplicas_timeout; /* MINREPLICAS timeout as unixtime. */ } multiState; @@ -1626,6 +1629,7 @@ void addReplyBulkLongLong(client *c, long long ll); void addReply(client *c, robj *obj); void addReplySds(client *c, sds s); void addReplyBulkSds(client *c, sds s); +void addReplyErrorSafe(client *c, char *s, size_t len); void addReplyError(client *c, const char *err); void addReplyStatus(client *c, const char *status); void addReplyDouble(client *c, double d); @@ -1724,6 +1728,7 @@ void touchWatchedKey(redisDb *db, robj *key); void touchWatchedKeysOnFlush(int dbid); void discardTransaction(client *c); void flagTransaction(client *c); +void execCommandAbort(client *c, sds error); void execCommandPropagateMulti(client *c); void execCommandPropagateExec(client *c); diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index fba54acb5..ef9bf7fdf 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -196,6 +196,21 @@ proc redis_deferring_client {args} { return $client } +proc redis_client {args} { + set level 0 + if {[llength $args] > 0 && [string is integer [lindex $args 0]]} { + set level [lindex $args 0] + set args [lrange $args 1 end] + } + + # create client that defers reading reply + set client [redis [srv $level "host"] [srv $level "port"] 0 $::tls] + + # select the right db and read the response (OK) + $client select 9 + return $client +} + # Provide easy access to INFO properties. Same semantic as "proc r". proc s {args} { set level 0 diff --git a/tests/unit/multi.tcl b/tests/unit/multi.tcl index 0c70fbde7..44a822ba6 100644 --- a/tests/unit/multi.tcl +++ b/tests/unit/multi.tcl @@ -325,74 +325,145 @@ start_server {tags {"multi"}} { # check that if MULTI arrives during timeout, it is either refused, or # allowed to pass, and we don't end up executing half of the transaction set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set r2 [redis_client] r config set lua-time-limit 10 r set xx 1 $rd1 eval {while true do end} 0 after 200 - catch { $rd2 multi; $rd2 read } e - catch { $rd2 incr xx; $rd2 read } e + catch { $r2 multi; } e + catch { $r2 incr xx; } e r script kill after 200 ; # Give some time to Lua to call the hook again... - catch { $rd2 incr xx; $rd2 read } e - catch { $rd2 exec; $rd2 read } e + catch { $r2 incr xx; } e + catch { $r2 exec; } e + assert_match {EXECABORT*previous errors*} $e set xx [r get xx] # make sure that either the whole transcation passed or none of it (we actually expect none) assert { $xx == 1 || $xx == 3} # check that the connection is no longer in multi state - $rd2 ping asdf - set pong [$rd2 read] + set pong [$r2 ping asdf] assert_equal $pong "asdf" + $rd1 close; $r2 close } test {EXEC and script timeout} { # check that if EXEC arrives during timeout, we don't end up executing # half of the transaction, and also that we exit the multi state set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set r2 [redis_client] r config set lua-time-limit 10 r set xx 1 - catch { $rd2 multi; $rd2 read } e - catch { $rd2 incr xx; $rd2 read } e + catch { $r2 multi; } e + catch { $r2 incr xx; } e $rd1 eval {while true do end} 0 after 200 - catch { $rd2 incr xx; $rd2 read } e - catch { $rd2 exec; $rd2 read } e + catch { $r2 incr xx; } e + catch { $r2 exec; } e + assert_match {EXECABORT*BUSY*} $e r script kill after 200 ; # Give some time to Lua to call the hook again... set xx [r get xx] # make sure that either the whole transcation passed or none of it (we actually expect none) assert { $xx == 1 || $xx == 3} - # Discard the transaction since EXEC likely got -BUSY error - # so the client is still in MULTI state. - catch { $rd2 discard ;$rd2 read } e # check that the connection is no longer in multi state - $rd2 ping asdf - set pong [$rd2 read] + set pong [$r2 ping asdf] assert_equal $pong "asdf" + $rd1 close; $r2 close } test {MULTI-EXEC body and script timeout} { # check that we don't run an imcomplete transaction due to some commands # arriving during busy script set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set r2 [redis_client] r config set lua-time-limit 10 r set xx 1 - catch { $rd2 multi; $rd2 read } e - catch { $rd2 incr xx; $rd2 read } e + catch { $r2 multi; } e + catch { $r2 incr xx; } e $rd1 eval {while true do end} 0 after 200 - catch { $rd2 incr xx; $rd2 read } e + catch { $r2 incr xx; } e r script kill after 200 ; # Give some time to Lua to call the hook again... - catch { $rd2 exec; $rd2 read } e + catch { $r2 exec; } e + assert_match {EXECABORT*previous errors*} $e set xx [r get xx] # make sure that either the whole transcation passed or none of it (we actually expect none) assert { $xx == 1 || $xx == 3} # check that the connection is no longer in multi state - $rd2 ping asdf - set pong [$rd2 read] + set pong [$r2 ping asdf] assert_equal $pong "asdf" + $rd1 close; $r2 close + } + + test {just EXEC and script timeout} { + # check that if EXEC arrives during timeout, we don't end up executing + # actual commands during busy script, and also that we exit the multi state + set rd1 [redis_deferring_client] + set r2 [redis_client] + r config set lua-time-limit 10 + r set xx 1 + catch { $r2 multi; } e + catch { $r2 incr xx; } e + $rd1 eval {while true do end} 0 + after 200 + catch { $r2 exec; } e + assert_match {EXECABORT*BUSY*} $e + r script kill + after 200 ; # Give some time to Lua to call the hook again... + set xx [r get xx] + # make we didn't execute the transaction + assert { $xx == 1} + # check that the connection is no longer in multi state + set pong [$r2 ping asdf] + assert_equal $pong "asdf" + $rd1 close; $r2 close + } + + test {exec with write commands and state change} { + # check that exec that contains write commands fails if server state changed since they were queued + set r1 [redis_client] + r set xx 1 + r multi + r incr xx + $r1 config set min-replicas-to-write 2 + catch {r exec} e + assert_match {*EXECABORT*NOREPLICAS*} $e + set xx [r get xx] + # make sure that the INCR wasn't executed + assert { $xx == 1} + $r1 config set min-replicas-to-write 0 + $r1 close; + } + + test {exec with read commands and stale replica state change} { + # check that exec that contains read commands fails if server state changed since they were queued + r config set replica-serve-stale-data no + set r1 [redis_client] + r set xx 1 + + # check that GET is disallowed on stale replica, even if the replica becomes stale only after queuing. + r multi + r get xx + $r1 replicaof localhsot 0 + catch {r exec} e + assert_match {*EXECABORT*MASTERDOWN*} $e + + # check that PING is allowed + r multi + r ping + $r1 replicaof localhsot 0 + set pong [r exec] + assert {$pong == "PONG"} + + # check that when replica is not stale, GET is allowed + # while we're at it, let's check that multi is allowed on stale replica too + r multi + $r1 replicaof no one + r get xx + set xx [r exec] + # make sure that the INCR was executed + assert { $xx == 1 } + $r1 close; } } From a88be2c6eb6587c4a61895bccfeba11c093b6c9d Mon Sep 17 00:00:00 2001 From: Dave Nielsen Date: Tue, 23 Jun 2020 09:51:12 -0700 Subject: [PATCH 023/377] updated copyright year Changed "2015" to "2020" (cherry picked from commit e54ea02c4366632fe546c509453edc91748dbe95) --- COPYING | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/COPYING b/COPYING index ac68e012b..a381681a1 100644 --- a/COPYING +++ b/COPYING @@ -1,4 +1,4 @@ -Copyright (c) 2006-2015, Salvatore Sanfilippo +Copyright (c) 2006-2020, Salvatore Sanfilippo All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: From ecb2743e8a68d9e68caf90b38946037c19a724f0 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 24 Jun 2020 09:07:17 +0200 Subject: [PATCH 024/377] LPOS: option FIRST renamed RANK. (cherry picked from commit 5b16c2f1744abf2640549e36fa2744417d427b7c) --- src/t_list.c | 10 +++++----- tests/unit/type/list.tcl | 28 ++++++++++++++-------------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/t_list.c b/src/t_list.c index e580139ab..2c339888d 100644 --- a/src/t_list.c +++ b/src/t_list.c @@ -487,16 +487,16 @@ void ltrimCommand(client *c) { addReply(c,shared.ok); } -/* LPOS key element [FIRST rank] [COUNT num-matches] [MAXLEN len] +/* LPOS key element [RANK rank] [COUNT num-matches] [MAXLEN len] * - * FIRST "rank" is the position of the match, so if it is 1, the first match + * The "rank" is the position of the match, so if it is 1, the first match * is returned, if it is 2 the second match is returned and so forth. * It is 1 by default. If negative has the same meaning but the search is * performed starting from the end of the list. * * If COUNT is given, instead of returning the single element, a list of * all the matching elements up to "num-matches" are returned. COUNT can - * be combiled with FIRST in order to returning only the element starting + * be combiled with RANK in order to returning only the element starting * from the Nth. If COUNT is zero, all the matching elements are returned. * * MAXLEN tells the command to scan a max of len elements. If zero (the @@ -515,12 +515,12 @@ void lposCommand(client *c) { char *opt = c->argv[j]->ptr; int moreargs = (c->argc-1)-j; - if (!strcasecmp(opt,"FIRST") && moreargs) { + if (!strcasecmp(opt,"RANK") && moreargs) { j++; if (getLongFromObjectOrReply(c, c->argv[j], &rank, NULL) != C_OK) return; if (rank == 0) { - addReplyError(c,"FIRST can't be zero: use 1 to start from " + addReplyError(c,"RANK can't be zero: use 1 to start from " "the first match, 2 from the second, ..."); return; } diff --git a/tests/unit/type/list.tcl b/tests/unit/type/list.tcl index a0c04dcaa..0e39d7d95 100644 --- a/tests/unit/type/list.tcl +++ b/tests/unit/type/list.tcl @@ -13,12 +13,12 @@ start_server { assert {[r LPOS mylist c] == 2} } - test {LPOS FIRST (positive and negative rank) option} { - assert {[r LPOS mylist c FIRST 1] == 2} - assert {[r LPOS mylist c FIRST 2] == 6} - assert {[r LPOS mylist c FIRST 4] eq ""} - assert {[r LPOS mylist c FIRST -1] == 7} - assert {[r LPOS mylist c FIRST -2] == 6} + test {LPOS RANK (positive and negative rank) option} { + assert {[r LPOS mylist c RANK 1] == 2} + assert {[r LPOS mylist c RANK 2] == 6} + assert {[r LPOS mylist c RANK 4] eq ""} + assert {[r LPOS mylist c RANK -1] == 7} + assert {[r LPOS mylist c RANK -2] == 6} } test {LPOS COUNT option} { @@ -28,26 +28,26 @@ start_server { assert {[r LPOS mylist c COUNT 100] == {2 6 7}} } - test {LPOS COUNT + FIRST option} { - assert {[r LPOS mylist c COUNT 0 FIRST 2] == {6 7}} - assert {[r LPOS mylist c COUNT 2 FIRST -1] == {7 6}} + test {LPOS COUNT + RANK option} { + assert {[r LPOS mylist c COUNT 0 RANK 2] == {6 7}} + assert {[r LPOS mylist c COUNT 2 RANK -1] == {7 6}} } test {LPOS non existing key} { - assert {[r LPOS mylistxxx c COUNT 0 FIRST 2] eq {}} + assert {[r LPOS mylistxxx c COUNT 0 RANK 2] eq {}} } test {LPOS no match} { - assert {[r LPOS mylist x COUNT 2 FIRST -1] eq {}} - assert {[r LPOS mylist x FIRST -1] eq {}} + assert {[r LPOS mylist x COUNT 2 RANK -1] eq {}} + assert {[r LPOS mylist x RANK -1] eq {}} } test {LPOS MAXLEN} { assert {[r LPOS mylist a COUNT 0 MAXLEN 1] == {0}} assert {[r LPOS mylist c COUNT 0 MAXLEN 1] == {}} assert {[r LPOS mylist c COUNT 0 MAXLEN 3] == {2}} - assert {[r LPOS mylist c COUNT 0 MAXLEN 3 FIRST -1] == {7 6}} - assert {[r LPOS mylist c COUNT 0 MAXLEN 7 FIRST 2] == {6}} + assert {[r LPOS mylist c COUNT 0 MAXLEN 3 RANK -1] == {7 6}} + assert {[r LPOS mylist c COUNT 0 MAXLEN 7 RANK 2] == {6}} } test {LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - ziplist} { From ae2bcd49db38cfd61c81b705c436ee6d7ca58114 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 25 Jun 2020 12:58:21 +0200 Subject: [PATCH 025/377] Update comment to clarify change in #7398. (cherry picked from commit 7bb0342201aa317415da7b512fb69737581bf822) --- src/cluster.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/cluster.c b/src/cluster.c index e15e59fda..e7a32a9a2 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -1264,7 +1264,10 @@ void markNodeAsFailingIfNeeded(clusterNode *node) { node->fail_time = mstime(); /* Broadcast the failing node name to everybody, forcing all the other - * reachable nodes to flag the node as FAIL. */ + * reachable nodes to flag the node as FAIL. + * We do that even if this node is a replica and not a master: anyway + * the failing state is triggered collecting failure reports from masters, + * so here the replica is only helping propagating this status. */ clusterSendFail(node->name); clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG); } From a17f059d215ddf2e6d8fe75326f9f1e4f8547149 Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" <276441700@qq.com> Date: Fri, 10 Jul 2020 13:20:27 +0800 Subject: [PATCH 026/377] BITOP: propagate only when it really SET or DEL targetkey (#5783) For example: BITOP not targetkey sourcekey If targetkey and sourcekey doesn't exist, BITOP has no effect, we do not propagate it, thus can save aof and replica flow. (cherry picked from commit 2cf11ce5ca6804df9ff65bc90dbd1dfc5e2e497c) --- src/bitops.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/bitops.c b/src/bitops.c index b37bea2bf..4b1a09aa4 100644 --- a/src/bitops.c +++ b/src/bitops.c @@ -759,11 +759,12 @@ void bitopCommand(client *c) { setKey(c,c->db,targetkey,o); notifyKeyspaceEvent(NOTIFY_STRING,"set",targetkey,c->db->id); decrRefCount(o); + server.dirty++; } else if (dbDelete(c->db,targetkey)) { signalModifiedKey(c,c->db,targetkey); notifyKeyspaceEvent(NOTIFY_GENERIC,"del",targetkey,c->db->id); + server.dirty++; } - server.dirty++; addReplyLongLong(c,maxlen); /* Return the output string length in bytes. */ } From b71330af50d4145c5b6178d5961c7e8bafe24596 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Fri, 10 Jul 2020 08:25:26 +0300 Subject: [PATCH 027/377] change references to the github repo location (#7479) (cherry picked from commit 8df988207500e0c76e2276a733be54ad443d78bb) --- BUGS | 2 +- README.md | 6 +++--- src/debug.c | 2 +- utils/generate-command-help.rb | 2 +- utils/releasetools/changelog.tcl | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/BUGS b/BUGS index a8e936892..7af259340 100644 --- a/BUGS +++ b/BUGS @@ -1 +1 @@ -Please check https://github.com/antirez/redis/issues +Please check https://github.com/redis/redis/issues diff --git a/README.md b/README.md index c08013416..55537e01f 100644 --- a/README.md +++ b/README.md @@ -205,8 +205,8 @@ source distribution. Please see the [CONTRIBUTING][2] file in this source distribution for more information. -[1]: https://github.com/antirez/redis/blob/unstable/COPYING -[2]: https://github.com/antirez/redis/blob/unstable/CONTRIBUTING +[1]: https://github.com/redis/redis/blob/unstable/COPYING +[2]: https://github.com/redis/redis/blob/unstable/CONTRIBUTING Redis internals === @@ -236,7 +236,7 @@ Inside the root are the following important directories: * `src`: contains the Redis implementation, written in C. * `tests`: contains the unit tests, implemented in Tcl. -* `deps`: contains libraries Redis uses. Everything needed to compile Redis is inside this directory; your system just needs to provide `libc`, a POSIX compatible interface and a C compiler. Notably `deps` contains a copy of `jemalloc`, which is the default allocator of Redis under Linux. Note that under `deps` there are also things which started with the Redis project, but for which the main repository is not `antirez/redis`. +* `deps`: contains libraries Redis uses. Everything needed to compile Redis is inside this directory; your system just needs to provide `libc`, a POSIX compatible interface and a C compiler. Notably `deps` contains a copy of `jemalloc`, which is the default allocator of Redis under Linux. Note that under `deps` there are also things which started with the Redis project, but for which the main repository is not `redis/redis`. There are a few more directories but they are not very important for our goals here. We'll focus mostly on `src`, where the Redis implementation is contained, diff --git a/src/debug.c b/src/debug.c index d79226bf2..ca113bcaa 100644 --- a/src/debug.c +++ b/src/debug.c @@ -1569,7 +1569,7 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) { serverLogRaw(LL_WARNING|LL_RAW, "\n=== REDIS BUG REPORT END. Make sure to include from START to END. ===\n\n" " Please report the crash by opening an issue on github:\n\n" -" http://github.com/antirez/redis/issues\n\n" +" http://github.com/redis/redis/issues\n\n" " Suspect RAM error? Use redis-server --test-memory to verify it.\n\n" ); diff --git a/utils/generate-command-help.rb b/utils/generate-command-help.rb index 29acef69d..e57acf4b9 100755 --- a/utils/generate-command-help.rb +++ b/utils/generate-command-help.rb @@ -53,7 +53,7 @@ def commands require "json" require "uri" - url = URI.parse "https://raw.githubusercontent.com/antirez/redis-doc/master/commands.json" + url = URI.parse "https://raw.githubusercontent.com/redis/redis-doc/master/commands.json" client = Net::HTTP.new url.host, url.port client.use_ssl = true response = client.get url.path diff --git a/utils/releasetools/changelog.tcl b/utils/releasetools/changelog.tcl index 06e38ba99..2288794bb 100755 --- a/utils/releasetools/changelog.tcl +++ b/utils/releasetools/changelog.tcl @@ -30,6 +30,6 @@ append template [exec git log $branch~$count..$branch "--format=format:%an in co #Older, more verbose version. # -#append template [exec git log $branch~30..$branch "--format=format:+-------------------------------------------------------------------------------%n| %s%n| By %an, %ai%n+--------------------------------------------------------------------------------%nhttps://github.com/antirez/redis/commit/%H%n%n%b" --stat] +#append template [exec git log $branch~30..$branch "--format=format:+-------------------------------------------------------------------------------%n| %s%n| By %an, %ai%n+--------------------------------------------------------------------------------%nhttps://github.com/redis/redis/commit/%H%n%n%b" --stat] puts $template From 298e93c36015510fb9b865dae593af3ac1f513b1 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Fri, 10 Jul 2020 08:26:52 +0300 Subject: [PATCH 028/377] tests/valgrind: don't use debug restart (#7404) * tests/valgrind: don't use debug restart DEBUG REATART causes two issues: 1. it uses execve which replaces the original process and valgrind doesn't have a chance to check for errors, so leaks go unreported. 2. valgrind report invalid calls to close() which we're unable to resolve. So now the tests use restart_server mechanism in the tests, that terminates the old server and starts a new one, new PID, but same stdout, stderr. since the stderr can contain two or more valgrind report, it is not enough to just check for the absence of leaks, we also need to check for some known errors, we do both, and fail if we either find an error, or can't find a report saying there are no leaks. other changes: - when killing a server that was already terminated we check for leaks too. - adding DEBUG LEAK which was used to test it. - adding --trace-children to valgrind, although no longer needed. - since the stdout contains two or more runs, we need slightly different way of checking if the new process is up (explicitly looking for the new PID) - move the code that handles --wait-server to happen earlier (before watching the startup message in the log), and serve the restarted server too. * squashme - CR fixes (cherry picked from commit 8d4f055e43ab554adfce617c971f10c4b6423484) --- src/debug.c | 4 + tests/integration/psync2.tcl | 6 +- tests/integration/rdb.tcl | 14 +--- tests/support/server.tcl | 147 +++++++++++++++++++++++++---------- 4 files changed, 114 insertions(+), 57 deletions(-) diff --git a/src/debug.c b/src/debug.c index ca113bcaa..a74c22647 100644 --- a/src/debug.c +++ b/src/debug.c @@ -378,6 +378,7 @@ void debugCommand(client *c) { "DEBUG PROTOCOL [string|integer|double|bignum|null|array|set|map|attrib|push|verbatim|true|false]", "ERROR -- Return a Redis protocol error with as message. Useful for clients unit tests to simulate Redis errors.", "LOG -- write message to the server log.", +"LEAK -- Create a memory leak of the input string.", "HTSTATS -- Return hash table statistics of the specified Redis database.", "HTSTATS-KEY -- Like htstats but for the hash table stored as key's value.", "LOADAOF -- Flush the AOF buffers on disk and reload the AOF in memory.", @@ -430,6 +431,9 @@ NULL } else if (!strcasecmp(c->argv[1]->ptr,"log") && c->argc == 3) { serverLog(LL_WARNING, "DEBUG LOG: %s", (char*)c->argv[2]->ptr); addReply(c,shared.ok); + } else if (!strcasecmp(c->argv[1]->ptr,"leak") && c->argc == 3) { + sdsdup(c->argv[2]->ptr); + addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"reload")) { int flush = 1, save = 1; int flags = RDBFLAGS_NONE; diff --git a/tests/integration/psync2.tcl b/tests/integration/psync2.tcl index 3f636463a..1b996ffd4 100644 --- a/tests/integration/psync2.tcl +++ b/tests/integration/psync2.tcl @@ -280,7 +280,8 @@ start_server {} { set sync_partial_err [status $R($master_id) sync_partial_err] catch { $R($slave_id) config rewrite - $R($slave_id) debug restart + restart_server [expr {0-$slave_id}] true + set R($slave_id) [srv [expr {0-$slave_id}] client] } # note: just waiting for connected_slaves==4 has a race condition since # we might do the check before the master realized that the slave disconnected @@ -328,7 +329,8 @@ start_server {} { catch { $R($slave_id) config rewrite - $R($slave_id) debug restart + restart_server [expr {0-$slave_id}] true + set R($slave_id) [srv [expr {0-$slave_id}] client] } # Reconfigure the slave correctly again, when it's back online. diff --git a/tests/integration/rdb.tcl b/tests/integration/rdb.tcl index 123e9c8b6..b176bf199 100644 --- a/tests/integration/rdb.tcl +++ b/tests/integration/rdb.tcl @@ -137,18 +137,8 @@ test {client freed during loading} { # 100mb of rdb, 100k keys will load in more than 1 second r debug populate 100000 key 1000 - catch { - r debug restart - } + restart_server 0 false - set stdout [srv 0 stdout] - while 1 { - # check that the new server actually started and is ready for connections - if {[exec grep -i "Server initialized" | wc -l < $stdout] > 1} { - break - } - after 10 - } # make sure it's still loading assert_equal [s loading] 1 @@ -180,4 +170,4 @@ test {client freed during loading} { # no need to keep waiting for loading to complete exec kill [srv 0 pid] } -} \ No newline at end of file +} diff --git a/tests/support/server.tcl b/tests/support/server.tcl index 146ebc72c..ea7d0b13c 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -17,7 +17,14 @@ proc check_valgrind_errors stderr { set buf [read $fd] close $fd + # look for stack trace and other errors, or the absense of a leak free summary if {[regexp -- { at 0x} $buf] || + [regexp -- {Warning} $buf] || + [regexp -- {Invalid} $buf] || + [regexp -- {Mismatched} $buf] || + [regexp -- {uninitialized} $buf] || + [regexp -- {has a fishy} $buf] || + [regexp -- {overlap} $buf] || (![regexp -- {definitely lost: 0 bytes} $buf] && ![regexp -- {no leaks are possible} $buf])} { send_data_packet $::test_server_fd err "Valgrind error: $buf\n" @@ -29,7 +36,13 @@ proc kill_server config { if {$::external} return # nevermind if its already dead - if {![is_alive $config]} { return } + if {![is_alive $config]} { + # Check valgrind errors if needed + if {$::valgrind} { + check_valgrind_errors [dict get $config stderr] + } + return + } set pid [dict get $config pid] # check for leaks @@ -153,6 +166,55 @@ proc create_server_config_file {filename config} { close $fp } +proc spawn_server {config_file stdout stderr} { + if {$::valgrind} { + set pid [exec valgrind --track-origins=yes --trace-children=yes --suppressions=[pwd]/src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full src/redis-server $config_file >> $stdout 2>> $stderr &] + } elseif ($::stack_logging) { + set pid [exec /usr/bin/env MallocStackLogging=1 MallocLogFile=/tmp/malloc_log.txt src/redis-server $config_file >> $stdout 2>> $stderr &] + } else { + set pid [exec src/redis-server $config_file >> $stdout 2>> $stderr &] + } + + if {$::wait_server} { + set msg "server started PID: $pid. press any key to continue..." + puts $msg + read stdin 1 + } + + # Tell the test server about this new instance. + send_data_packet $::test_server_fd server-spawned $pid + return $pid +} + +# Wait for actual startup, return 1 if port is busy, 0 otherwise +proc wait_server_started {config_file stdout pid} { + set checkperiod 100; # Milliseconds + set maxiter [expr {120*1000/$checkperiod}] ; # Wait up to 2 minutes. + set port_busy 0 + while 1 { + if {[regexp -- " PID: $pid" [exec cat $stdout]]} { + break + } + after $checkperiod + incr maxiter -1 + if {$maxiter == 0} { + start_server_error $config_file "No PID detected in log $stdout" + puts "--- LOG CONTENT ---" + puts [exec cat $stdout] + puts "-------------------" + break + } + + # Check if the port is actually busy and the server failed + # for this reason. + if {[regexp {Could not create server TCP} [exec cat $stdout]]} { + set port_busy 1 + break + } + } + return $port_busy +} + proc start_server {options {code undefined}} { # If we are running against an external server, we just push the # host/port pair in the stack the first time @@ -248,44 +310,10 @@ proc start_server {options {code undefined}} { send_data_packet $::test_server_fd "server-spawning" "port $port" - if {$::valgrind} { - set pid [exec valgrind --track-origins=yes --suppressions=src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full src/redis-server $config_file > $stdout 2> $stderr &] - } elseif ($::stack_logging) { - set pid [exec /usr/bin/env MallocStackLogging=1 MallocLogFile=/tmp/malloc_log.txt src/redis-server $config_file > $stdout 2> $stderr &] - } else { - set pid [exec src/redis-server $config_file > $stdout 2> $stderr &] - } - - # Tell the test server about this new instance. - send_data_packet $::test_server_fd server-spawned $pid + set pid [spawn_server $config_file $stdout $stderr] # check that the server actually started - # ugly but tries to be as fast as possible... - if {$::valgrind} {set retrynum 1000} else {set retrynum 100} - - # Wait for actual startup - set checkperiod 100; # Milliseconds - set maxiter [expr {120*1000/100}] ; # Wait up to 2 minutes. - set port_busy 0 - while {![info exists _pid]} { - regexp {PID:\s(\d+)} [exec cat $stdout] _ _pid - after $checkperiod - incr maxiter -1 - if {$maxiter == 0} { - start_server_error $config_file "No PID detected in log $stdout" - puts "--- LOG CONTENT ---" - puts [exec cat $stdout] - puts "-------------------" - break - } - - # Check if the port is actually busy and the server failed - # for this reason. - if {[regexp {Could not create server TCP} [exec cat $stdout]]} { - set port_busy 1 - break - } - } + set port_busy [wait_server_started $config_file $stdout $pid] # Sometimes we have to try a different port, even if we checked # for availability. Other test clients may grab the port before we @@ -302,6 +330,7 @@ proc start_server {options {code undefined}} { continue; # Try again } + if {$::valgrind} {set retrynum 1000} else {set retrynum 100} if {$code ne "undefined"} { set serverisup [server_is_up $::host $port $retrynum] } else { @@ -345,12 +374,6 @@ proc start_server {options {code undefined}} { error_and_quit $config_file $line } - if {$::wait_server} { - set msg "server started PID: [dict get $srv "pid"]. press any key to continue..." - puts $msg - read stdin 1 - } - while 1 { # check that the server actually started and is ready for connections if {[exec grep -i "Ready to accept" | wc -l < $stdout] > 0} { @@ -370,6 +393,9 @@ proc start_server {options {code undefined}} { if {[catch { uplevel 1 $code } error]} { set backtrace $::errorInfo + # fetch srv back from the server list, in case it was restarted by restart_server (new PID) + set srv [lindex $::servers end] + # Kill the server without checking for leaks dict set srv "skipleaks" 1 kill_server $srv @@ -387,6 +413,9 @@ proc start_server {options {code undefined}} { error $error $backtrace } + # fetch srv back from the server list, in case it was restarted by restart_server (new PID) + set srv [lindex $::servers end] + # Don't do the leak check when no tests were run if {$num_tests == $::num_tests} { dict set srv "skipleaks" 1 @@ -402,3 +431,35 @@ proc start_server {options {code undefined}} { set _ $srv } } + +proc restart_server {level wait_ready} { + set srv [lindex $::servers end+$level] + kill_server $srv + + set stdout [dict get $srv "stdout"] + set stderr [dict get $srv "stderr"] + set config_file [dict get $srv "config_file"] + + set prev_ready_count [exec grep -i "Ready to accept" | wc -l < $stdout] + + set pid [spawn_server $config_file $stdout $stderr] + + # check that the server actually started + wait_server_started $config_file $stdout $pid + + # update the pid in the servers list + dict set srv "pid" $pid + # re-set $srv in the servers list + lset ::servers end+$level $srv + + if {$wait_ready} { + while 1 { + # check that the server actually started and is ready for connections + if {[exec grep -i "Ready to accept" | wc -l < $stdout] > $prev_ready_count + 1} { + break + } + after 10 + } + } + reconnect $level +} From c994e73c8e321cd754bbead5c851cb96d6823c4e Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Fri, 10 Jul 2020 08:28:22 +0300 Subject: [PATCH 029/377] stabilize tests that look for log lines (#7367) tests were sensitive to additional log lines appearing in the log causing the search to come empty handed. instead of just looking for the n last log lines, capture the log lines before performing the action, and then search from that offset. (cherry picked from commit efc4189b6227a17f26ed9bd6bbac62bf4bf7ab66) --- tests/integration/replication.tcl | 16 ++++++++++------ tests/support/util.tcl | 22 +++++++++++++++++++--- tests/unit/moduleapi/testrdb.tcl | 6 ++++-- 3 files changed, 33 insertions(+), 11 deletions(-) diff --git a/tests/integration/replication.tcl b/tests/integration/replication.tcl index 7c03c4bc6..d47ec4fe4 100644 --- a/tests/integration/replication.tcl +++ b/tests/integration/replication.tcl @@ -430,6 +430,7 @@ test {diskless loading short read} { } # Start the replication process... + set loglines [count_log_lines -1] $master config set repl-diskless-sync-delay 0 $replica replicaof $master_host $master_port @@ -439,7 +440,7 @@ test {diskless loading short read} { for {set i 0} {$i < $attempts} {incr i} { # wait for the replica to start reading the rdb # using the log file since the replica only responds to INFO once in 2mb - wait_for_log_message -1 "*Loading DB in memory*" 5 2000 1 + wait_for_log_message -1 "*Loading DB in memory*" $loglines 2000 1 # add some additional random sleep so that we kill the master on a different place each time after [expr {int(rand()*100)}] @@ -448,7 +449,7 @@ test {diskless loading short read} { set killed [$master client kill type replica] if {[catch { - set res [wait_for_log_message -1 "*Internal error in RDB*" 5 100 10] + set res [wait_for_log_message -1 "*Internal error in RDB*" $loglines 100 10] if {$::verbose} { puts $res } @@ -461,6 +462,7 @@ test {diskless loading short read} { $master config set repl-backlog-size [expr {16384 + $i}] } # wait for loading to stop (fail) + set loglines [count_log_lines -1] wait_for_condition 100 10 { [s -1 loading] eq 0 } else { @@ -535,6 +537,7 @@ start_server {tags {"repl"}} { # start replication # it's enough for just one replica to be slow, and have it's write handler enabled # so that the whole rdb generation process is bound to that + set loglines [count_log_lines -1] [lindex $replicas 0] config set repl-diskless-load swapdb [lindex $replicas 0] config set key-load-delay 100 [lindex $replicas 0] replicaof $master_host $master_port @@ -542,7 +545,7 @@ start_server {tags {"repl"}} { # wait for the replicas to start reading the rdb # using the log file since the replica only responds to INFO once in 2mb - wait_for_log_message -1 "*Loading DB in memory*" 8 800 10 + wait_for_log_message -1 "*Loading DB in memory*" $loglines 800 10 if {$measure_time} { set master_statfile "/proc/$master_pid/stat" @@ -558,6 +561,7 @@ start_server {tags {"repl"}} { $master incr $all_drop # disconnect replicas depending on the current test + set loglines [count_log_lines -2] if {$all_drop == "all" || $all_drop == "fast"} { exec kill [srv 0 pid] set replicas_alive [lreplace $replicas_alive 1 1] @@ -576,13 +580,13 @@ start_server {tags {"repl"}} { # make sure we got what we were aiming for, by looking for the message in the log file if {$all_drop == "all"} { - wait_for_log_message -2 "*Diskless rdb transfer, last replica dropped, killing fork child*" 12 1 1 + wait_for_log_message -2 "*Diskless rdb transfer, last replica dropped, killing fork child*" $loglines 1 1 } if {$all_drop == "no"} { - wait_for_log_message -2 "*Diskless rdb transfer, done reading from pipe, 2 replicas still up*" 12 1 1 + wait_for_log_message -2 "*Diskless rdb transfer, done reading from pipe, 2 replicas still up*" $loglines 1 1 } if {$all_drop == "slow" || $all_drop == "fast"} { - wait_for_log_message -2 "*Diskless rdb transfer, done reading from pipe, 1 replicas still up*" 12 1 1 + wait_for_log_message -2 "*Diskless rdb transfer, done reading from pipe, 1 replicas still up*" $loglines 1 1 } # make sure we don't have a busy loop going thought epoll_wait diff --git a/tests/support/util.tcl b/tests/support/util.tcl index 8bec95374..fce3ffd18 100644 --- a/tests/support/util.tcl +++ b/tests/support/util.tcl @@ -99,11 +99,27 @@ proc wait_for_ofs_sync {r1 r2} { } } -proc wait_for_log_message {srv_idx pattern last_lines maxtries delay} { +# count current log lines in server's stdout +proc count_log_lines {srv_idx} { + set _ [exec wc -l < [srv $srv_idx stdout]] +} + +# verify pattern exists in server's sdtout after a certain line number +proc verify_log_message {srv_idx pattern from_line} { + set lines_after [count_log_lines] + set lines [expr $lines_after - $from_line] + set result [exec tail -$lines < [srv $srv_idx stdout]] + if {![string match $pattern $result]} { + error "assertion:expected message not found in log file: $pattern" + } +} + +# wait for pattern to be found in server's stdout after certain line number +proc wait_for_log_message {srv_idx pattern from_line maxtries delay} { set retry $maxtries set stdout [srv $srv_idx stdout] while {$retry} { - set result [exec tail -$last_lines < $stdout] + set result [exec tail +$from_line < $stdout] set result [split $result "\n"] foreach line $result { if {[string match $pattern $line]} { @@ -114,7 +130,7 @@ proc wait_for_log_message {srv_idx pattern last_lines maxtries delay} { after $delay } if {$retry == 0} { - fail "log message of '$pattern' not found" + fail "log message of '$pattern' not found in $stdout after line: $from_line" } } diff --git a/tests/unit/moduleapi/testrdb.tcl b/tests/unit/moduleapi/testrdb.tcl index a93b34b69..98641ae0a 100644 --- a/tests/unit/moduleapi/testrdb.tcl +++ b/tests/unit/moduleapi/testrdb.tcl @@ -67,6 +67,7 @@ tags "modules" { } # Start the replication process... + set loglines [count_log_lines -1] $master config set repl-diskless-sync-delay 0 $replica replicaof $master_host $master_port @@ -76,7 +77,7 @@ tags "modules" { for {set i 0} {$i < $attempts} {incr i} { # wait for the replica to start reading the rdb # using the log file since the replica only responds to INFO once in 2mb - wait_for_log_message -1 "*Loading DB in memory*" 5 2000 1 + wait_for_log_message -1 "*Loading DB in memory*" $loglines 2000 1 # add some additional random sleep so that we kill the master on a different place each time after [expr {int(rand()*100)}] @@ -85,7 +86,7 @@ tags "modules" { set killed [$master client kill type replica] if {[catch { - set res [wait_for_log_message -1 "*Internal error in RDB*" 5 100 10] + set res [wait_for_log_message -1 "*Internal error in RDB*" $loglines 100 10] if {$::verbose} { puts $res } @@ -98,6 +99,7 @@ tags "modules" { $master config set repl-backlog-size [expr {16384 + $i}] } # wait for loading to stop (fail) + set loglines [count_log_lines -1] wait_for_condition 100 10 { [s -1 loading] eq 0 } else { From cf116e08cc56ffb7576aa4aa82da512ae08ab8c6 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Fri, 10 Jul 2020 08:29:02 +0300 Subject: [PATCH 030/377] skip a test that uses +inf on valgrind (#7440) On some platforms strtold("+inf") with valgrind returns a non-inf result [err]: INCRBYFLOAT does not allow NaN or Infinity in tests/unit/type/incr.tcl Expected 'ERR*would produce*' to equal or match '1189731495357231765085759.....' (cherry picked from commit 6b53c630d92c8fbeb9bea0c406c9454fb68b8467) --- tests/unit/type/incr.tcl | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/tests/unit/type/incr.tcl b/tests/unit/type/incr.tcl index b7a135203..dbf45e455 100644 --- a/tests/unit/type/incr.tcl +++ b/tests/unit/type/incr.tcl @@ -130,15 +130,18 @@ start_server {tags {"incr"}} { format $err } {WRONGTYPE*} - test {INCRBYFLOAT does not allow NaN or Infinity} { - r set foo 0 - set err {} - catch {r incrbyfloat foo +inf} err - set err - # p.s. no way I can force NaN to test it from the API because - # there is no way to increment / decrement by infinity nor to - # perform divisions. - } {ERR*would produce*} + # On some platforms strtold("+inf") with valgrind returns a non-inf result + if {!$::valgrind} { + test {INCRBYFLOAT does not allow NaN or Infinity} { + r set foo 0 + set err {} + catch {r incrbyfloat foo +inf} err + set err + # p.s. no way I can force NaN to test it from the API because + # there is no way to increment / decrement by infinity nor to + # perform divisions. + } {ERR*would produce*} + } test {INCRBYFLOAT decrement} { r set foo 1 From 5728ec50d3e552d930d56ce139bc06d750cd7ee0 Mon Sep 17 00:00:00 2001 From: huangzhw Date: Fri, 10 Jul 2020 13:29:44 +0800 Subject: [PATCH 031/377] defrag.c activeDefragSdsListAndDict when defrag sdsele, We can't use (#7492) it to calculate hash, we should use newsds. (cherry picked from commit 84bef6691bbd5c517bafdd09a3ce57fc17d1d66c) --- src/defrag.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/defrag.c b/src/defrag.c index 6e5296632..2d8db8ea5 100644 --- a/src/defrag.c +++ b/src/defrag.c @@ -348,7 +348,7 @@ long activeDefragSdsListAndDict(list *l, dict *d, int dict_val_type) { sdsele = ln->value; if ((newsds = activeDefragSds(sdsele))) { /* When defragging an sds value, we need to update the dict key */ - uint64_t hash = dictGetHash(d, sdsele); + uint64_t hash = dictGetHash(d, newsds); replaceSateliteDictKeyPtrAndOrDefragDictEntry(d, sdsele, newsds, hash, &defragged); ln->value = newsds; defragged++; From da96665c04bc95728b7048da1d11f9e5294cdab8 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Fri, 10 Jul 2020 10:02:37 +0300 Subject: [PATCH 032/377] RESTORE ABSTTL won't store expired keys into the db (#7472) Similarly to EXPIREAT with TTL in the past, which implicitly deletes the key and return success, RESTORE should not store key that are already expired into the db. When used together with REPLACE it should emit a DEL to keyspace notification and replication stream. (cherry picked from commit 1c35f8741baa0def2f87eeab17898c79f0147d11) --- src/cluster.c | 30 ++++++++++++++++++++++-------- src/expire.c | 18 +++++++++++------- src/server.h | 1 + tests/unit/dump.tcl | 13 ++++++++++++- 4 files changed, 46 insertions(+), 16 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index e7a32a9a2..88b810d13 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -4988,7 +4988,8 @@ void restoreCommand(client *c) { } /* Make sure this key does not already exist here... */ - if (!replace && lookupKeyWrite(c->db,c->argv[1]) != NULL) { + robj *key = c->argv[1]; + if (!replace && lookupKeyWrite(c->db,key) != NULL) { addReply(c,shared.busykeyerr); return; } @@ -5010,24 +5011,37 @@ void restoreCommand(client *c) { rioInitWithBuffer(&payload,c->argv[3]->ptr); if (((type = rdbLoadObjectType(&payload)) == -1) || - ((obj = rdbLoadObject(type,&payload,c->argv[1]->ptr)) == NULL)) + ((obj = rdbLoadObject(type,&payload,key->ptr)) == NULL)) { addReplyError(c,"Bad data format"); return; } /* Remove the old key if needed. */ - if (replace) dbDelete(c->db,c->argv[1]); + int deleted = 0; + if (replace) + deleted = dbDelete(c->db,key); + + if (ttl && !absttl) ttl+=mstime(); + if (ttl && checkAlreadyExpired(ttl)) { + if (deleted) { + rewriteClientCommandVector(c,2,shared.del,key); + signalModifiedKey(c,c->db,key); + notifyKeyspaceEvent(NOTIFY_GENERIC,"del",key,c->db->id); + server.dirty++; + } + addReply(c, shared.ok); + return; + } /* Create the key and set the TTL if any */ - dbAdd(c->db,c->argv[1],obj); + dbAdd(c->db,key,obj); if (ttl) { - if (!absttl) ttl+=mstime(); - setExpire(c,c->db,c->argv[1],ttl); + setExpire(c,c->db,key,ttl); } objectSetLRUOrLFU(obj,lfu_freq,lru_idle,lru_clock,1000); - signalModifiedKey(c,c->db,c->argv[1]); - notifyKeyspaceEvent(NOTIFY_GENERIC,"restore",c->argv[1],c->db->id); + signalModifiedKey(c,c->db,key); + notifyKeyspaceEvent(NOTIFY_GENERIC,"restore",key,c->db->id); addReply(c,shared.ok); server.dirty++; } diff --git a/src/expire.c b/src/expire.c index 30a27193d..f2d135e2b 100644 --- a/src/expire.c +++ b/src/expire.c @@ -475,6 +475,16 @@ void flushSlaveKeysWithExpireList(void) { } } +int checkAlreadyExpired(long long when) { + /* EXPIRE with negative TTL, or EXPIREAT with a timestamp into the past + * should never be executed as a DEL when load the AOF or in the context + * of a slave instance. + * + * Instead we add the already expired key to the database with expire time + * (possibly in the past) and wait for an explicit DEL from the master. */ + return (when <= mstime() && !server.loading && !server.masterhost); +} + /*----------------------------------------------------------------------------- * Expires Commands *----------------------------------------------------------------------------*/ @@ -502,13 +512,7 @@ void expireGenericCommand(client *c, long long basetime, int unit) { return; } - /* EXPIRE with negative TTL, or EXPIREAT with a timestamp into the past - * should never be executed as a DEL when load the AOF or in the context - * of a slave instance. - * - * Instead we take the other branch of the IF statement setting an expire - * (possibly in the past) and wait for an explicit DEL from the master. */ - if (when <= mstime() && !server.loading && !server.masterhost) { + if (checkAlreadyExpired(when)) { robj *aux; int deleted = server.lazyfree_lazy_expire ? dbAsyncDelete(c->db,key) : diff --git a/src/server.h b/src/server.h index 6c36385e1..8c0facd04 100644 --- a/src/server.h +++ b/src/server.h @@ -2070,6 +2070,7 @@ void propagateExpire(redisDb *db, robj *key, int lazy); int expireIfNeeded(redisDb *db, robj *key); long long getExpire(redisDb *db, robj *key); void setExpire(client *c, redisDb *db, robj *key, long long when); +int checkAlreadyExpired(long long when); robj *lookupKey(redisDb *db, robj *key, int flags); robj *lookupKeyRead(redisDb *db, robj *key); robj *lookupKeyWrite(redisDb *db, robj *key); diff --git a/tests/unit/dump.tcl b/tests/unit/dump.tcl index 062d803b5..a9def9206 100644 --- a/tests/unit/dump.tcl +++ b/tests/unit/dump.tcl @@ -36,7 +36,18 @@ start_server {tags {"dump"}} { assert {$ttl >= 2900 && $ttl <= 3100} r get foo } {bar} - + + test {RESTORE with ABSTTL in the past} { + r set foo bar + set encoded [r dump foo] + set now [clock milliseconds] + r debug set-active-expire 0 + r restore foo [expr $now-3000] $encoded absttl REPLACE + catch {r debug object foo} e + r debug set-active-expire 1 + set e + } {ERR no such key} + test {RESTORE can set LRU} { r set foo bar set encoded [r dump foo] From 86e45052c3d38929b820c56e1f78b3415a814a7a Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 5 Jul 2020 12:36:25 +0300 Subject: [PATCH 033/377] redis-cli --bigkeys fixed to handle non-printable key names (cherry picked from commit 3d760eaea2a32282d0a7edb2ebe88ebf2c07d47a) --- src/redis-cli.c | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 75845f346..e1f40373a 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -7246,7 +7246,9 @@ static void getKeyTypes(dict *types_dict, redisReply *keys, typeinfo **types) { /* Pipeline TYPE commands */ for(i=0;ielements;i++) { - redisAppendCommand(context, "TYPE %s", keys->element[i]->str); + const char* argv[] = {"TYPE", keys->element[i]->str}; + size_t lens[] = {4, keys->element[i]->len}; + redisAppendCommandArgv(context, 2, argv, lens); } /* Retrieve types */ @@ -7292,15 +7294,21 @@ static void getKeySizes(redisReply *keys, typeinfo **types, if(!types[i] || (!types[i]->sizecmd && !memkeys)) continue; - if (!memkeys) - redisAppendCommand(context, "%s %s", - types[i]->sizecmd, keys->element[i]->str); - else if (memkeys_samples==0) - redisAppendCommand(context, "%s %s %s", - "MEMORY", "USAGE", keys->element[i]->str); - else - redisAppendCommand(context, "%s %s %s SAMPLES %u", - "MEMORY", "USAGE", keys->element[i]->str, memkeys_samples); + if (!memkeys) { + const char* argv[] = {types[i]->sizecmd, keys->element[i]->str}; + size_t lens[] = {strlen(types[i]->sizecmd), keys->element[i]->len}; + redisAppendCommandArgv(context, 2, argv, lens); + } else if (memkeys_samples==0) { + const char* argv[] = {"MEMORY", "USAGE", keys->element[i]->str}; + size_t lens[] = {6, 5, keys->element[i]->len}; + redisAppendCommandArgv(context, 3, argv, lens); + } else { + sds samplesstr = sdsfromlonglong(memkeys_samples); + const char* argv[] = {"MEMORY", "USAGE", keys->element[i]->str, "SAMPLES", samplesstr}; + size_t lens[] = {6, 5, keys->element[i]->len, 7, sdslen(samplesstr)}; + redisAppendCommandArgv(context, 5, argv, lens); + sdsfree(samplesstr); + } } /* Retrieve sizes */ @@ -7396,20 +7404,20 @@ static void findBigKeys(int memkeys, unsigned memkeys_samples) { sampled++; if(type->biggestname, keys->element[i]->str, sizes[i], - !memkeys? type->sizeunit: "bytes"); - /* Keep track of biggest key name for this type */ if (type->biggest_key) sdsfree(type->biggest_key); - type->biggest_key = sdsnew(keys->element[i]->str); + type->biggest_key = sdscatrepr(sdsempty(), keys->element[i]->str, keys->element[i]->len); if(!type->biggest_key) { fprintf(stderr, "Failed to allocate memory for key!\n"); exit(1); } + printf( + "[%05.2f%%] Biggest %-6s found so far '%s' with %llu %s\n", + pct, type->name, type->biggest_key, sizes[i], + !memkeys? type->sizeunit: "bytes"); + /* Keep track of the biggest size for this type */ type->biggest = sizes[i]; } From 1f9793c996dabbe2ec6b85a61392feda29bf4f68 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 7 Jul 2020 16:15:44 +0300 Subject: [PATCH 034/377] redis-cli --hotkeys fixed to handle non-printable key names (cherry picked from commit a11940b112577b5290a3bf4ef7bea7e3028c7d76) --- src/redis-cli.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index e1f40373a..c5ba48447 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -7481,21 +7481,27 @@ static void getKeyFreqs(redisReply *keys, unsigned long long *freqs) { /* Pipeline OBJECT freq commands */ for(i=0;ielements;i++) { - redisAppendCommand(context, "OBJECT freq %s", keys->element[i]->str); + const char* argv[] = {"OBJECT", "FREQ", keys->element[i]->str}; + size_t lens[] = {6, 4, keys->element[i]->len}; + redisAppendCommandArgv(context, 3, argv, lens); } /* Retrieve freqs */ for(i=0;ielements;i++) { if(redisGetReply(context, (void**)&reply)!=REDIS_OK) { + sds keyname = sdscatrepr(sdsempty(), keys->element[i]->str, keys->element[i]->len); fprintf(stderr, "Error getting freq for key '%s' (%d: %s)\n", - keys->element[i]->str, context->err, context->errstr); + keyname, context->err, context->errstr); + sdsfree(keyname); exit(1); } else if(reply->type != REDIS_REPLY_INTEGER) { if(reply->type == REDIS_REPLY_ERROR) { fprintf(stderr, "Error: %s\n", reply->str); exit(1); } else { - fprintf(stderr, "Warning: OBJECT freq on '%s' failed (may have been deleted)\n", keys->element[i]->str); + sds keyname = sdscatrepr(sdsempty(), keys->element[i]->str, keys->element[i]->len); + fprintf(stderr, "Warning: OBJECT freq on '%s' failed (may have been deleted)\n", keyname); + sdsfree(keyname); freqs[i] = 0; } } else { @@ -7566,10 +7572,10 @@ static void findHotKeys(void) { memmove(hotkeys,hotkeys+1,sizeof(hotkeys[0])*k); } counters[k] = freqs[i]; - hotkeys[k] = sdsnew(keys->element[i]->str); + hotkeys[k] = sdscatrepr(sdsempty(), keys->element[i]->str, keys->element[i]->len); printf( "[%05.2f%%] Hot key '%s' found so far with counter %llu\n", - pct, keys->element[i]->str, freqs[i]); + pct, hotkeys[k], freqs[i]); } /* Sleep if we've been directed to do so */ From 6af3d57beb0a9bedb2627ace15841b46121f3897 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Fri, 10 Jul 2020 10:25:55 +0300 Subject: [PATCH 035/377] TLS: Add missing redis-cli options. (#7456) * Tests: fix and reintroduce redis-cli tests. These tests have been broken and disabled for 10 years now! * TLS: add remaining redis-cli support. This adds support for the redis-cli --pipe, --rdb and --replica options previously unsupported in --tls mode. * Fix writeConn(). (cherry picked from commit 99b920534f7710d544c38b870fd10c6053283d99) --- src/redis-cli.c | 102 +++++++++++++++++++--------- tests/integration/redis-cli.tcl | 115 ++++++++++++++++++++++++++------ tests/test_helper.tcl | 1 + 3 files changed, 166 insertions(+), 52 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index c5ba48447..0148964bf 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -1989,6 +1989,7 @@ static void repl(void) { if (argv == NULL) { printf("Invalid argument(s)\n"); + fflush(stdout); linenoiseFree(line); continue; } else if (argc > 0) { @@ -6784,10 +6785,53 @@ void sendCapa() { sendReplconf("capa", "eof"); } +/* Wrapper around hiredis to allow arbitrary reads and writes. + * + * We piggybacks on top of hiredis to achieve transparent TLS support, + * and use its internal buffers so it can co-exist with commands + * previously/later issued on the connection. + * + * Interface is close to enough to read()/write() so things should mostly + * work transparently. + */ + +/* Write a raw buffer through a redisContext. If we already have something + * in the buffer (leftovers from hiredis operations) it will be written + * as well. + */ +static ssize_t writeConn(redisContext *c, const char *buf, size_t buf_len) +{ + int done = 0; + + c->obuf = sdscatlen(c->obuf, buf, buf_len); + if (redisBufferWrite(c, &done) == REDIS_ERR) { + sdsrange(c->obuf, 0, -(buf_len+1)); + if (!(c->flags & REDIS_BLOCK)) + errno = EAGAIN; + return -1; + } + + size_t left = sdslen(c->obuf); + sdsclear(c->obuf); + if (!done) { + return buf_len - left; + } + + return buf_len; +} + +/* Read raw bytes through a redisContext. The read operation is not greedy + * and may not fill the buffer entirely. + */ +static ssize_t readConn(redisContext *c, char *buf, size_t len) +{ + return c->funcs->read(c, buf, len); +} + /* Sends SYNC and reads the number of bytes in the payload. Used both by * slaveMode() and getRDB(). * returns 0 in case an EOF marker is used. */ -unsigned long long sendSync(int fd, char *out_eof) { +unsigned long long sendSync(redisContext *c, char *out_eof) { /* To start we need to send the SYNC command and return the payload. * The hiredis client lib does not understand this part of the protocol * and we don't want to mess with its buffers, so everything is performed @@ -6796,7 +6840,7 @@ unsigned long long sendSync(int fd, char *out_eof) { ssize_t nread; /* Send the SYNC command. */ - if (write(fd,"SYNC\r\n",6) != 6) { + if (writeConn(c, "SYNC\r\n", 6) != 6) { fprintf(stderr,"Error writing to master\n"); exit(1); } @@ -6804,7 +6848,7 @@ unsigned long long sendSync(int fd, char *out_eof) { /* Read $\r\n, making sure to read just up to "\n" */ p = buf; while(1) { - nread = read(fd,p,1); + nread = readConn(c,p,1); if (nread <= 0) { fprintf(stderr,"Error reading bulk length while SYNCing\n"); exit(1); @@ -6825,11 +6869,10 @@ unsigned long long sendSync(int fd, char *out_eof) { } static void slaveMode(void) { - int fd = context->fd; static char eofmark[RDB_EOF_MARK_SIZE]; static char lastbytes[RDB_EOF_MARK_SIZE]; static int usemark = 0; - unsigned long long payload = sendSync(fd, eofmark); + unsigned long long payload = sendSync(context,eofmark); char buf[1024]; int original_output = config.output; @@ -6849,7 +6892,7 @@ static void slaveMode(void) { while(payload) { ssize_t nread; - nread = read(fd,buf,(payload > sizeof(buf)) ? sizeof(buf) : payload); + nread = readConn(context,buf,(payload > sizeof(buf)) ? sizeof(buf) : payload); if (nread <= 0) { fprintf(stderr,"Error reading RDB payload while SYNCing\n"); exit(1); @@ -6892,14 +6935,15 @@ static void slaveMode(void) { /* This function implements --rdb, so it uses the replication protocol in order * to fetch the RDB file from a remote server. */ static void getRDB(clusterManagerNode *node) { - int s, fd; + int fd; + redisContext *s; char *filename; if (node != NULL) { assert(node->context); - s = node->context->fd; + s = node->context; filename = clusterManagerGetNodeRDBFilename(node); } else { - s = context->fd; + s = context; filename = config.rdb_filename; } static char eofmark[RDB_EOF_MARK_SIZE]; @@ -6934,7 +6978,7 @@ static void getRDB(clusterManagerNode *node) { while(payload) { ssize_t nread, nwritten; - nread = read(s,buf,(payload > sizeof(buf)) ? sizeof(buf) : payload); + nread = readConn(s,buf,(payload > sizeof(buf)) ? sizeof(buf) : payload); if (nread <= 0) { fprintf(stderr,"I/O Error reading RDB payload from socket\n"); exit(1); @@ -6968,7 +7012,7 @@ static void getRDB(clusterManagerNode *node) { } else { fprintf(stderr,"Transfer finished with success.\n"); } - close(s); /* Close the file descriptor ASAP as fsync() may take time. */ + redisFree(s); /* Close the file descriptor ASAP as fsync() may take time. */ fsync(fd); close(fd); fprintf(stderr,"Transfer finished with success.\n"); @@ -6985,11 +7029,9 @@ static void getRDB(clusterManagerNode *node) { #define PIPEMODE_WRITE_LOOP_MAX_BYTES (128*1024) static void pipeMode(void) { - int fd = context->fd; long long errors = 0, replies = 0, obuf_len = 0, obuf_pos = 0; - char ibuf[1024*16], obuf[1024*16]; /* Input and output buffers */ + char obuf[1024*16]; /* Output buffer */ char aneterr[ANET_ERR_LEN]; - redisReader *reader = redisReaderCreate(); redisReply *reply; int eof = 0; /* True once we consumed all the standard input. */ int done = 0; @@ -6999,47 +7041,38 @@ static void pipeMode(void) { srand(time(NULL)); /* Use non blocking I/O. */ - if (anetNonBlock(aneterr,fd) == ANET_ERR) { + if (anetNonBlock(aneterr,context->fd) == ANET_ERR) { fprintf(stderr, "Can't set the socket in non blocking mode: %s\n", aneterr); exit(1); } + context->flags &= ~REDIS_BLOCK; + /* Transfer raw protocol and read replies from the server at the same * time. */ while(!done) { int mask = AE_READABLE; if (!eof || obuf_len != 0) mask |= AE_WRITABLE; - mask = aeWait(fd,mask,1000); + mask = aeWait(context->fd,mask,1000); /* Handle the readable state: we can read replies from the server. */ if (mask & AE_READABLE) { - ssize_t nread; int read_error = 0; - /* Read from socket and feed the hiredis reader. */ do { - nread = read(fd,ibuf,sizeof(ibuf)); - if (nread == -1 && errno != EAGAIN && errno != EINTR) { - fprintf(stderr, "Error reading from the server: %s\n", - strerror(errno)); + if (!read_error && redisBufferRead(context) == REDIS_ERR) { read_error = 1; - break; } - if (nread > 0) { - redisReaderFeed(reader,ibuf,nread); - last_read_time = time(NULL); - } - } while(nread > 0); - /* Consume replies. */ - do { - if (redisReaderGetReply(reader,(void**)&reply) == REDIS_ERR) { + reply = NULL; + if (redisGetReply(context, (void **) &reply) == REDIS_ERR) { fprintf(stderr, "Error reading replies from server\n"); exit(1); } if (reply) { + last_read_time = time(NULL); if (reply->type == REDIS_REPLY_ERROR) { fprintf(stderr,"%s\n", reply->str); errors++; @@ -7072,7 +7105,7 @@ static void pipeMode(void) { while(1) { /* Transfer current buffer to server. */ if (obuf_len != 0) { - ssize_t nwritten = write(fd,obuf+obuf_pos,obuf_len); + ssize_t nwritten = writeConn(context,obuf+obuf_pos,obuf_len); if (nwritten == -1) { if (errno != EAGAIN && errno != EINTR) { @@ -7088,6 +7121,10 @@ static void pipeMode(void) { loop_nwritten += nwritten; if (obuf_len != 0) break; /* Can't accept more data. */ } + if (context->err) { + fprintf(stderr, "Server I/O Error: %s\n", context->errstr); + exit(1); + } /* If buffer is empty, load from stdin. */ if (obuf_len == 0 && !eof) { ssize_t nread = read(STDIN_FILENO,obuf,sizeof(obuf)); @@ -7138,7 +7175,6 @@ static void pipeMode(void) { break; } } - redisReaderFree(reader); printf("errors: %lld, replies: %lld\n", errors, replies); if (errors) exit(1); diff --git a/tests/integration/redis-cli.tcl b/tests/integration/redis-cli.tcl index 5d1635950..016e4915c 100644 --- a/tests/integration/redis-cli.tcl +++ b/tests/integration/redis-cli.tcl @@ -1,14 +1,13 @@ source tests/support/cli.tcl start_server {tags {"cli"}} { - proc open_cli {} { + proc open_cli {{opts "-n 9"}} { set ::env(TERM) dumb - set cmdline [rediscli [srv port] "-n 9"] + set cmdline [rediscli [srv port] $opts] set fd [open "|$cmdline" "r+"] fconfigure $fd -buffering none fconfigure $fd -blocking false fconfigure $fd -translation binary - assert_equal "redis> " [read_cli $fd] set _ $fd } @@ -32,11 +31,14 @@ start_server {tags {"cli"}} { } # Helpers to run tests in interactive mode + + proc format_output {output} { + set _ [string trimright [regsub -all "\r" $output ""] "\n"] + } + proc run_command {fd cmd} { write_cli $fd $cmd - set lines [split [read_cli $fd] "\n"] - assert_equal "redis> " [lindex $lines end] - join [lrange $lines 0 end-1] "\n" + set _ [format_output [read_cli $fd]] } proc test_interactive_cli {name code} { @@ -58,7 +60,7 @@ start_server {tags {"cli"}} { proc _run_cli {opts args} { set cmd [rediscli [srv port] [list -n 9 {*}$args]] - foreach {key value} $args { + foreach {key value} $opts { if {$key eq "pipe"} { set cmd "sh -c \"$value | $cmd\"" } @@ -72,7 +74,7 @@ start_server {tags {"cli"}} { fconfigure $fd -translation binary set resp [read $fd 1048576] close $fd - set _ $resp + set _ [format_output $resp] } proc run_cli {args} { @@ -80,11 +82,11 @@ start_server {tags {"cli"}} { } proc run_cli_with_input_pipe {cmd args} { - _run_cli [list pipe $cmd] {*}$args + _run_cli [list pipe $cmd] -x {*}$args } proc run_cli_with_input_file {path args} { - _run_cli [list path $path] {*}$args + _run_cli [list path $path] -x {*}$args } proc test_nontty_cli {name code} { @@ -101,7 +103,7 @@ start_server {tags {"cli"}} { test_interactive_cli "INFO response should be printed raw" { set lines [split [run_command $fd info] "\n"] foreach line $lines { - assert [regexp {^[a-z0-9_]+:[a-z0-9_]+} $line] + assert [regexp {^$|^#|^[a-z0-9_]+:.+} $line] } } @@ -121,7 +123,7 @@ start_server {tags {"cli"}} { test_interactive_cli "Multi-bulk reply" { r rpush list foo r rpush list bar - assert_equal "1. \"foo\"\n2. \"bar\"" [run_command $fd "lrange list 0 -1"] + assert_equal "1) \"foo\"\n2) \"bar\"" [run_command $fd "lrange list 0 -1"] } test_interactive_cli "Parsing quotes" { @@ -144,35 +146,35 @@ start_server {tags {"cli"}} { } test_tty_cli "Status reply" { - assert_equal "OK\n" [run_cli set key bar] + assert_equal "OK" [run_cli set key bar] assert_equal "bar" [r get key] } test_tty_cli "Integer reply" { r del counter - assert_equal "(integer) 1\n" [run_cli incr counter] + assert_equal "(integer) 1" [run_cli incr counter] } test_tty_cli "Bulk reply" { r set key "tab\tnewline\n" - assert_equal "\"tab\\tnewline\\n\"\n" [run_cli get key] + assert_equal "\"tab\\tnewline\\n\"" [run_cli get key] } test_tty_cli "Multi-bulk reply" { r del list r rpush list foo r rpush list bar - assert_equal "1. \"foo\"\n2. \"bar\"\n" [run_cli lrange list 0 -1] + assert_equal "1) \"foo\"\n2) \"bar\"" [run_cli lrange list 0 -1] } test_tty_cli "Read last argument from pipe" { - assert_equal "OK\n" [run_cli_with_input_pipe "echo foo" set key] + assert_equal "OK" [run_cli_with_input_pipe "echo foo" set key] assert_equal "foo\n" [r get key] } test_tty_cli "Read last argument from file" { set tmpfile [write_tmpfile "from file"] - assert_equal "OK\n" [run_cli_with_input_file $tmpfile set key] + assert_equal "OK" [run_cli_with_input_file $tmpfile set key] assert_equal "from file" [r get key] } @@ -188,7 +190,7 @@ start_server {tags {"cli"}} { test_nontty_cli "Bulk reply" { r set key "tab\tnewline\n" - assert_equal "tab\tnewline\n" [run_cli get key] + assert_equal "tab\tnewline" [run_cli get key] } test_nontty_cli "Multi-bulk reply" { @@ -208,4 +210,79 @@ start_server {tags {"cli"}} { assert_equal "OK" [run_cli_with_input_file $tmpfile set key] assert_equal "from file" [r get key] } + + proc test_redis_cli_rdb_dump {} { + r flushdb + + set dir [lindex [r config get dir] 1] + + assert_equal "OK" [r debug populate 100000 key 1000] + catch {run_cli --rdb "$dir/cli.rdb"} output + assert_match {*Transfer finished with success*} $output + + file delete "$dir/dump.rdb" + file rename "$dir/cli.rdb" "$dir/dump.rdb" + + assert_equal "OK" [r set should-not-exist 1] + assert_equal "OK" [r debug reload nosave] + assert_equal {} [r get should-not-exist] + } + + test_nontty_cli "Dumping an RDB" { + # Disk-based master + assert_match "OK" [r config set repl-diskless-sync no] + test_redis_cli_rdb_dump + + # Disk-less master + assert_match "OK" [r config set repl-diskless-sync yes] + assert_match "OK" [r config set repl-diskless-sync-delay 0] + test_redis_cli_rdb_dump + } + + test_nontty_cli "Connecting as a replica" { + set fd [open_cli "--replica"] + wait_for_condition 50 500 { + [string match {*slave0:*state=online*} [r info]] + } else { + fail "redis-cli --replica did not connect" + } + + for {set i 0} {$i < 100} {incr i} { + r set test-key test-value-$i + } + r client kill type slave + catch { + assert_match {*SET*key-a*} [read_cli $fd] + } + + close_cli $fd + } + + test_nontty_cli "Piping raw protocol" { + set fd [open_cli "--pipe"] + fconfigure $fd -blocking true + + # Create a new deferring client and overwrite its fd + set client [redis [srv 0 "host"] [srv 0 "port"] 1 0] + set ::redis::fd($::redis::id) $fd + $client select 9 + + r del test-counter + for {set i 0} {$i < 10000} {incr i} { + $client incr test-counter + $client set large-key [string repeat "x" 20000] + } + + for {set i 0} {$i < 1000} {incr i} { + $client set very-large-key [string repeat "x" 512000] + } + + close $fd write + set output [read_cli $fd] + + assert_equal {10000} [r get test-counter] + assert_match {*All data transferred*errors: 0*replies: 21001*} $output + + close_cli $fd + } } diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index ef9bf7fdf..1527aa1b5 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -48,6 +48,7 @@ set ::all_tests { integration/psync2 integration/psync2-reg integration/psync2-pingoff + integration/redis-cli unit/pubsub unit/slowlog unit/scripting From e76db7509d8256a034460da6e1cfdb28ba3c562b Mon Sep 17 00:00:00 2001 From: James Hilliard Date: Fri, 10 Jul 2020 01:30:09 -0600 Subject: [PATCH 036/377] Use pkg-config to properly detect libssl and libcrypto libraries (#7452) (cherry picked from commit 5f16b65a5fae28d2343dd002fcfa2dc802dd18f7) --- src/Makefile | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/src/Makefile b/src/Makefile index b8c05c32b..80c627c24 100644 --- a/src/Makefile +++ b/src/Makefile @@ -192,9 +192,21 @@ ifeq ($(MALLOC),jemalloc) endif ifeq ($(BUILD_TLS),yes) - FINAL_CFLAGS+=-DUSE_OPENSSL $(OPENSSL_CFLAGS) - FINAL_LDFLAGS+=$(OPENSSL_LDFLAGS) - FINAL_LIBS += ../deps/hiredis/libhiredis_ssl.a -lssl -lcrypto + FINAL_CFLAGS+=-DUSE_OPENSSL $(OPENSSL_CFLAGS) + FINAL_LDFLAGS+=$(OPENSSL_LDFLAGS) + LIBSSL_PKGCONFIG := $(shell $(PKG_CONFIG) --exists libssl && echo $$?) +ifeq ($(LIBSSL_PKGCONFIG),0) + LIBSSL_LIBS=$(shell $(PKG_CONFIG) --libs libssl) +else + LIBSSL_LIBS=-lssl +endif + LIBCRYPTO_PKGCONFIG := $(shell $(PKG_CONFIG) --exists libcrypto && echo $$?) +ifeq ($(LIBCRYPTO_PKGCONFIG),0) + LIBCRYPTO_LIBS=$(shell $(PKG_CONFIG) --libs libcrypto) +else + LIBCRYPTO_LIBS=-lcrypto +endif + FINAL_LIBS += ../deps/hiredis/libhiredis_ssl.a $(LIBSSL_LIBS) $(LIBCRYPTO_LIBS) endif REDIS_CC=$(QUIET_CC)$(CC) $(FINAL_CFLAGS) From 8bacdf8b48f7dc1db29b7ff50dbc9273dac191ac Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Fri, 10 Jul 2020 10:32:21 +0300 Subject: [PATCH 037/377] TLS: Ignore client cert when tls-auth-clients off. (#7457) (cherry picked from commit 6dfbccc212b0977482fa1aba1dfe7a474ac34f2d) --- src/tls.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/tls.c b/src/tls.c index a62f2284e..4b9948195 100644 --- a/src/tls.c +++ b/src/tls.c @@ -337,9 +337,7 @@ connection *connCreateAcceptedTLS(int fd, int require_auth) { conn->c.state = CONN_STATE_ACCEPTING; if (!require_auth) { - /* We still verify certificates if provided, but don't require them. - */ - SSL_set_verify(conn->ssl, SSL_VERIFY_PEER, NULL); + SSL_set_verify(conn->ssl, SSL_VERIFY_NONE, NULL); } SSL_set_fd(conn->ssl, conn->c.fd); From 27d44fbf73dfa58b145459a2ebfc07a019c7cbf6 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Fri, 10 Jul 2020 11:33:47 +0300 Subject: [PATCH 038/377] TLS: Session caching configuration support. (#7420) * TLS: Session caching configuration support. * TLS: Remove redundant config initialization. (cherry picked from commit c611a836f630ecf358b5cfb0d3c5e21c9f0bc105) --- TLS.md | 2 -- redis.conf | 16 ++++++++++++++++ src/config.c | 11 ++++++++++- src/server.h | 3 +++ src/tls.c | 12 +++++++++--- tests/unit/introspection.tcl | 28 ++++++++++++++++++---------- 6 files changed, 56 insertions(+), 16 deletions(-) diff --git a/TLS.md b/TLS.md index e480c1e9d..2d020d0ce 100644 --- a/TLS.md +++ b/TLS.md @@ -68,8 +68,6 @@ but there are probably other good reasons to improve that part anyway. To-Do List ---------- -- [ ] Add session caching support. Check if/how it's handled by clients to - assess how useful/important it is. - [ ] redis-benchmark support. The current implementation is a mix of using hiredis for parsing and basic networking (establishing connections), but directly manipulating sockets for most actions. This will need to be cleaned diff --git a/redis.conf b/redis.conf index a51ef007d..8c53f015a 100644 --- a/redis.conf +++ b/redis.conf @@ -199,6 +199,22 @@ tcp-keepalive 300 # # tls-prefer-server-ciphers yes +# By default, TLS session caching is enabled to allow faster and less expensive +# reconnections by clients that support it. Use the following directive to disable +# caching. +# +# tls-session-caching no + +# Change the default number of TLS sessions cached. A zero value sets the cache +# to unlimited size. The default size is 20480. +# +# tls-session-cache-size 5000 + +# Change the default timeout of cached TLS sessions. The default timeout is 300 +# seconds. +# +# tls-session-cache-timeout 60 + ################################# GENERAL ##################################### # By default Redis does not run as a daemon. Use 'yes' if you need it. diff --git a/src/config.c b/src/config.c index 64854592c..acf1b069f 100644 --- a/src/config.c +++ b/src/config.c @@ -2071,7 +2071,7 @@ static int updateTlsCfg(char *val, char *prev, char **err) { UNUSED(prev); UNUSED(err); if (tlsConfigure(&server.tls_ctx_config) == C_ERR) { - *err = "Unable to configure tls-cert-file. Check server logs."; + *err = "Unable to update TLS configuration. Check server logs."; return 0; } return 1; @@ -2081,6 +2081,12 @@ static int updateTlsCfgBool(int val, int prev, char **err) { UNUSED(prev); return updateTlsCfg(NULL, NULL, err); } + +static int updateTlsCfgInt(long long val, long long prev, char **err) { + UNUSED(val); + UNUSED(prev); + return updateTlsCfg(NULL, NULL, err); +} #endif /* USE_OPENSSL */ standardConfig configs[] = { @@ -2216,10 +2222,13 @@ standardConfig configs[] = { #ifdef USE_OPENSSL createIntConfig("tls-port", NULL, IMMUTABLE_CONFIG, 0, 65535, server.tls_port, 0, INTEGER_CONFIG, NULL, NULL), /* TCP port. */ + createIntConfig("tls-session-cache-size", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_size, 20*1024, INTEGER_CONFIG, NULL, updateTlsCfgInt), + createIntConfig("tls-session-cache-timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_timeout, 300, INTEGER_CONFIG, NULL, updateTlsCfgInt), createBoolConfig("tls-cluster", NULL, MODIFIABLE_CONFIG, server.tls_cluster, 0, NULL, NULL), createBoolConfig("tls-replication", NULL, MODIFIABLE_CONFIG, server.tls_replication, 0, NULL, NULL), createBoolConfig("tls-auth-clients", NULL, MODIFIABLE_CONFIG, server.tls_auth_clients, 1, NULL, NULL), createBoolConfig("tls-prefer-server-ciphers", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.prefer_server_ciphers, 0, NULL, updateTlsCfgBool), + createBoolConfig("tls-session-caching", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.session_caching, 1, NULL, updateTlsCfgBool), createStringConfig("tls-cert-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.cert_file, NULL, NULL, updateTlsCfg), createStringConfig("tls-key-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.key_file, NULL, NULL, updateTlsCfg), createStringConfig("tls-dh-params-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.dh_params_file, NULL, NULL, updateTlsCfg), diff --git a/src/server.h b/src/server.h index 8c0facd04..3f471efcb 100644 --- a/src/server.h +++ b/src/server.h @@ -1011,6 +1011,9 @@ typedef struct redisTLSContextConfig { char *ciphers; char *ciphersuites; int prefer_server_ciphers; + int session_caching; + int session_cache_size; + int session_cache_timeout; } redisTLSContextConfig; /*----------------------------------------------------------------------------- diff --git a/src/tls.c b/src/tls.c index 4b9948195..8b2bb58e1 100644 --- a/src/tls.c +++ b/src/tls.c @@ -148,9 +148,6 @@ void tlsInit(void) { } pending_list = listCreate(); - - /* Server configuration */ - server.tls_auth_clients = 1; /* Secure by default */ } /* Attempt to configure/reconfigure TLS. This operation is atomic and will @@ -184,6 +181,15 @@ int tlsConfigure(redisTLSContextConfig *ctx_config) { SSL_CTX_set_options(ctx, SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS); #endif + if (ctx_config->session_caching) { + SSL_CTX_set_session_cache_mode(ctx, SSL_SESS_CACHE_SERVER); + SSL_CTX_sess_set_cache_size(ctx, ctx_config->session_cache_size); + SSL_CTX_set_timeout(ctx, ctx_config->session_cache_timeout); + SSL_CTX_set_session_id_context(ctx, (void *) "redis", 5); + } else { + SSL_CTX_set_session_cache_mode(ctx, SSL_SESS_CACHE_OFF); + } + int protocols = parseProtocolsConfig(ctx_config->protocols); if (protocols == -1) goto error; diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl index b60ca0d48..d681e06d5 100644 --- a/tests/unit/introspection.tcl +++ b/tests/unit/introspection.tcl @@ -78,17 +78,8 @@ start_server {tags {"introspection"}} { syslog-facility databases port - io-threads tls-port - tls-prefer-server-ciphers - tls-cert-file - tls-key-file - tls-dh-params-file - tls-ca-cert-file - tls-ca-cert-dir - tls-protocols - tls-ciphers - tls-ciphersuites + io-threads logfile unixsocketperm slaveof @@ -100,6 +91,23 @@ start_server {tags {"introspection"}} { bgsave_cpulist } + if {!$::tls} { + append skip_configs { + tls-prefer-server-ciphers + tls-session-cache-timeout + tls-session-cache-size + tls-session-caching + tls-cert-file + tls-key-file + tls-dh-params-file + tls-ca-cert-file + tls-ca-cert-dir + tls-protocols + tls-ciphers + tls-ciphersuites + } + } + set configs {} foreach {k v} [r config get *] { if {[lsearch $skip_configs $k] != -1} { From b6fd631d046499ce8fdc0ca2eec00c298686fb24 Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Fri, 10 Jul 2020 16:41:48 +0800 Subject: [PATCH 039/377] Add missing latency-monitor tcl test to test_helper.tcl. (#6782) (cherry picked from commit 136e5efeaba3296c79e67a6fe5dc796c0830b61e) --- tests/test_helper.tcl | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index 1527aa1b5..51c364601 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -35,6 +35,7 @@ set ::all_tests { unit/quit unit/aofrw unit/acl + unit/latency-monitor integration/block-repl integration/replication integration/replication-2 From b42a2bef01339453a252315d9e4338fae7217ef4 Mon Sep 17 00:00:00 2001 From: Jiayuan Chen Date: Fri, 10 Jul 2020 21:02:18 +0800 Subject: [PATCH 040/377] Fix typo in deps README (#7500) (cherry picked from commit f8c6c32178bafa55e13154cbcf4ef41e9ce7bad9) --- deps/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/README.md b/deps/README.md index 685dbb40d..f923c06ad 100644 --- a/deps/README.md +++ b/deps/README.md @@ -47,7 +47,7 @@ Hiredis Hiredis uses the SDS string library, that must be the same version used inside Redis itself. Hiredis is also very critical for Sentinel. Historically Redis often used forked versions of hiredis in a way or the other. In order to upgrade it is advised to take a lot of care: 1. Check with diff if hiredis API changed and what impact it could have in Redis. -2. Make sure thet the SDS library inside Hiredis and inside Redis are compatible. +2. Make sure that the SDS library inside Hiredis and inside Redis are compatible. 3. After the upgrade, run the Redis Sentinel test. 4. Check manually that redis-cli and redis-benchmark behave as expecteed, since we have no tests for CLI utilities currently. From 650f4c538d1181b67a32c17358d2f1de6e8b251f Mon Sep 17 00:00:00 2001 From: Abhishek Soni Date: Fri, 10 Jul 2020 18:35:29 +0530 Subject: [PATCH 041/377] fix: typo in CI job name (#7466) (cherry picked from commit 6d8fa206bb6b6bc50cf485b597aaac1870aa8992) --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 439e3f3df..730eaf0dd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,7 +31,7 @@ jobs: - name: make run: make - biuld-32bit: + build-32bit: runs-on: ubuntu-latest steps: - uses: actions/checkout@v1 From 796cbb68c9de1c9dfc897c6a4d097e5a7c29939a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=A9=AC=E6=B0=B8=E6=B3=BD?= <1014057907@qq.com> Date: Fri, 10 Jul 2020 21:37:11 +0800 Subject: [PATCH 042/377] fix benchmark in cluster mode fails to authenticate (#7488) Co-authored-by: Oran Agra (styling) (cherry picked from commit 40e930dab5d9943f760c65021a2b195d402af3ca) --- src/redis-benchmark.c | 98 +++++++++++++++++++++++++------------------ 1 file changed, 57 insertions(+), 41 deletions(-) diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c index 38d4ca51b..f47cbe333 100644 --- a/src/redis-benchmark.c +++ b/src/redis-benchmark.c @@ -1,4 +1,4 @@ -/* Redis benchmark utility. +/* Redis benchmark utility. * * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. @@ -183,6 +183,8 @@ static void *execBenchmarkThread(void *ptr); static clusterNode *createClusterNode(char *ip, int port); static redisConfig *getRedisConfig(const char *ip, int port, const char *hostsocket); +static redisContext *getRedisContext(const char *ip, int port, + const char *hostsocket); static void freeRedisConfig(redisConfig *cfg); static int fetchClusterSlotsConfiguration(client c); static void updateClusterSlotsConfiguration(); @@ -238,6 +240,52 @@ void _serverAssert(const char *estr, const char *file, int line) { *((char*)-1) = 'x'; } +static redisContext *getRedisContext(const char *ip, int port, + const char *hostsocket) +{ + redisContext *ctx = NULL; + redisReply *reply = NULL; + if (hostsocket == NULL) + ctx = redisConnect(ip, port); + else + ctx = redisConnectUnix(hostsocket); + if (ctx == NULL || ctx->err) { + fprintf(stderr,"Could not connect to Redis at "); + char *err = (ctx != NULL ? ctx->errstr : ""); + if (hostsocket == NULL) + fprintf(stderr,"%s:%d: %s\n",ip,port,err); + else + fprintf(stderr,"%s: %s\n",hostsocket,err); + goto cleanup; + } + if (config.auth == NULL) + return ctx; + if (config.user == NULL) + reply = redisCommand(ctx,"AUTH %s", config.auth); + else + reply = redisCommand(ctx,"AUTH %s %s", config.user, config.auth); + if (reply != NULL) { + if (reply->type == REDIS_REPLY_ERROR) { + if (hostsocket == NULL) + fprintf(stderr, "Node %s:%d replied with error:\n%s\n", ip, port, reply->str); + else + fprintf(stderr, "Node %s replied with error:\n%s\n", hostsocket, reply->str); + goto cleanup; + } + freeReplyObject(reply); + return ctx; + } + fprintf(stderr, "ERROR: failed to fetch reply from "); + if (hostsocket == NULL) + fprintf(stderr, "%s:%d\n", ip, port); + else + fprintf(stderr, "%s\n", hostsocket); +cleanup: + freeReplyObject(reply); + redisFree(ctx); + return NULL; +} + static redisConfig *getRedisConfig(const char *ip, int port, const char *hostsocket) { @@ -245,33 +293,11 @@ static redisConfig *getRedisConfig(const char *ip, int port, if (!cfg) return NULL; redisContext *c = NULL; redisReply *reply = NULL, *sub_reply = NULL; - if (hostsocket == NULL) - c = redisConnect(ip, port); - else - c = redisConnectUnix(hostsocket); - if (c == NULL || c->err) { - fprintf(stderr,"Could not connect to Redis at "); - char *err = (c != NULL ? c->errstr : ""); - if (hostsocket == NULL) fprintf(stderr,"%s:%d: %s\n",ip,port,err); - else fprintf(stderr,"%s: %s\n",hostsocket,err); - goto fail; + c = getRedisContext(ip, port, hostsocket); + if (c == NULL) { + freeRedisConfig(cfg); + return NULL; } - - if(config.auth) { - void *authReply = NULL; - if (config.user == NULL) - redisAppendCommand(c, "AUTH %s", config.auth); - else - redisAppendCommand(c, "AUTH %s %s", config.user, config.auth); - if (REDIS_OK != redisGetReply(c, &authReply)) goto fail; - if (reply) freeReplyObject(reply); - reply = ((redisReply *) authReply); - if (reply->type == REDIS_REPLY_ERROR) { - fprintf(stderr, "ERROR: %s\n", reply->str); - goto fail; - } - } - redisAppendCommand(c, "CONFIG GET %s", "save"); redisAppendCommand(c, "CONFIG GET %s", "appendonly"); int i = 0; @@ -994,16 +1020,8 @@ static int fetchClusterConfiguration() { int success = 1; redisContext *ctx = NULL; redisReply *reply = NULL; - if (config.hostsocket == NULL) - ctx = redisConnect(config.hostip,config.hostport); - else - ctx = redisConnectUnix(config.hostsocket); - if (ctx->err) { - fprintf(stderr,"Could not connect to Redis at "); - if (config.hostsocket == NULL) { - fprintf(stderr,"%s:%d: %s\n",config.hostip,config.hostport, - ctx->errstr); - } else fprintf(stderr,"%s: %s\n",config.hostsocket,ctx->errstr); + ctx = getRedisContext(config.hostip, config.hostport, config.hostsocket); + if (ctx == NULL) { exit(1); } clusterNode *firstNode = createClusterNode((char *) config.hostip, @@ -1199,11 +1217,9 @@ static int fetchClusterSlotsConfiguration(client c) { assert(node->port); /* Use first node as entry point to connect to. */ if (ctx == NULL) { - ctx = redisConnect(node->ip, node->port); - if (!ctx || ctx->err) { + ctx = getRedisContext(node->ip, node->port, NULL); + if (!ctx) { success = 0; - if (ctx && ctx->err) - fprintf(stderr, "REDIS CONNECTION ERROR: %s\n", ctx->errstr); goto cleanup; } } From 8610a176960f2c8ebee0d95b9dc0a40511e4d284 Mon Sep 17 00:00:00 2001 From: jimgreen2013 Date: Sun, 12 Jul 2020 03:51:44 +0800 Subject: [PATCH 043/377] fix description about ziplist, the code is ok (#6318) * fix description about ZIP_BIG_PREVLEN(the code is ok), it's similar to antirez#4705 * fix description about ziplist entry encoding field (the code is ok), the max length should be 2^32 - 1 when encoding is 5 bytes (cherry picked from commit c05d8a0a31286688249de14e42c5a69b553563db) --- src/ziplist.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ziplist.c b/src/ziplist.c index ddae0d96f..13881c117 100644 --- a/src/ziplist.c +++ b/src/ziplist.c @@ -86,7 +86,7 @@ * |10000000|qqqqqqqq|rrrrrrrr|ssssssss|tttttttt| - 5 bytes * String value with length greater than or equal to 16384 bytes. * Only the 4 bytes following the first byte represents the length - * up to 32^2-1. The 6 lower bits of the first byte are not used and + * up to 2^32-1. The 6 lower bits of the first byte are not used and * are set to zero. * IMPORTANT: The 32 bit number is stored in big endian. * |11000000| - 3 bytes @@ -194,7 +194,7 @@ #define ZIP_BIG_PREVLEN 254 /* Max number of bytes of the previous entry, for the "prevlen" field prefixing each entry, to be represented with just a single byte. Otherwise - it is represented as FF AA BB CC DD, where + it is represented as FE AA BB CC DD, where AA BB CC DD are a 4 bytes unsigned integer representing the previous entry len. */ From 7fff1d3500756e1a40fa78135f19f25b55633bfd Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 12 Jul 2020 13:55:26 +0300 Subject: [PATCH 044/377] update release scripts for new hosts, and CI to run more tests (#7480) * update daily CI to include cluster and sentinel tests * update daily CI to run when creating a new release * update release scripts to work on the new redis.io hosts (cherry picked from commit 2ee300481f3250760ba0ef4253438f6282cf9596) --- .github/workflows/daily.yml | 26 ++++++++++++++++--- utils/releasetools/01_create_tarball.sh | 3 +-- utils/releasetools/02_upload_tarball.sh | 22 ++++++++++++++--- utils/releasetools/03_test_release.sh | 33 +++++++++++++------------ utils/releasetools/04_release_hash.sh | 10 ++++++-- 5 files changed, 68 insertions(+), 26 deletions(-) diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index acc4dd33a..4d54fbc42 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -1,14 +1,16 @@ name: Daily on: + release: + types: [created] schedule: - - cron: '0 7 * * *' + - cron: '0 0 * * *' jobs: test-jemalloc: runs-on: ubuntu-latest - timeout-minutes: 1200 + timeout-minutes: 14400 steps: - uses: actions/checkout@v1 - name: make @@ -19,10 +21,14 @@ jobs: ./runtest --accurate --verbose - name: module api test run: ./runtest-moduleapi --verbose + - name: sentinel tests + run: ./runtest-sentinel + - name: cluster tests + run: ./runtest-cluster test-libc-malloc: runs-on: ubuntu-latest - timeout-minutes: 1200 + timeout-minutes: 14400 steps: - uses: actions/checkout@v1 - name: make @@ -33,9 +39,14 @@ jobs: ./runtest --accurate --verbose - name: module api test run: ./runtest-moduleapi --verbose + - name: sentinel tests + run: ./runtest-sentinel + - name: cluster tests + run: ./runtest-cluster test-32bit: runs-on: ubuntu-latest + timeout-minutes: 14400 steps: - uses: actions/checkout@v1 - name: make @@ -50,9 +61,14 @@ jobs: run: | make -C tests/modules 32bit # the script below doesn't have an argument, we must build manually ahead of time ./runtest-moduleapi --verbose + - name: sentinel tests + run: ./runtest-sentinel + - name: cluster tests + run: ./runtest-cluster test-tls: runs-on: ubuntu-latest + timeout-minutes: 14400 steps: - uses: actions/checkout@v1 - name: make @@ -65,6 +81,10 @@ jobs: ./runtest --accurate --verbose --tls - name: module api test run: ./runtest-moduleapi --verbose --tls + - name: sentinel tests + run: ./runtest-sentinel + - name: cluster tests + run: ./runtest-cluster test-valgrind: runs-on: ubuntu-latest diff --git a/utils/releasetools/01_create_tarball.sh b/utils/releasetools/01_create_tarball.sh index 54bca8c04..366a61e2c 100755 --- a/utils/releasetools/01_create_tarball.sh +++ b/utils/releasetools/01_create_tarball.sh @@ -1,14 +1,13 @@ #!/bin/sh if [ $# != "1" ] then - echo "Usage: ./mkrelease.sh " + echo "Usage: ./utils/releasetools/01_create_tarball.sh " exit 1 fi TAG=$1 TARNAME="redis-${TAG}.tar" echo "Generating /tmp/${TARNAME}" -cd ~/hack/redis git archive $TAG --prefix redis-${TAG}/ > /tmp/$TARNAME || exit 1 echo "Gizipping the archive" rm -f /tmp/$TARNAME.gz diff --git a/utils/releasetools/02_upload_tarball.sh b/utils/releasetools/02_upload_tarball.sh index ed7065388..6400efad7 100755 --- a/utils/releasetools/02_upload_tarball.sh +++ b/utils/releasetools/02_upload_tarball.sh @@ -1,6 +1,22 @@ #!/bin/bash +if [ $# != "1" ] +then + echo "Usage: ./utils/releasetools/02_upload_tarball.sh " + exit 1 +fi + echo "Uploading..." -scp /tmp/redis-${1}.tar.gz antirez@antirez.com:/var/virtual/download.redis.io/httpdocs/releases/ -echo "Updating web site... (press any key if it is a stable release, or Ctrl+C)" +scp /tmp/redis-${1}.tar.gz ubuntu@host.redis.io:/var/www/download/releases/ +echo "Updating web site... " +echo "Please check the github action tests for the release." +echo "Press any key if it is a stable release, or Ctrl+C to abort" read x -ssh antirez@antirez.com "cd /var/virtual/download.redis.io/httpdocs; ./update.sh ${1}" +ssh ubuntu@host.redis.io "cd /var/www/download; + rm -rf redis-${1}.tar.gz; + wget http://download.redis.io/releases/redis-${1}.tar.gz; + tar xvzf redis-${1}.tar.gz; + rm -rf redis-stable; + mv redis-${1} redis-stable; + tar cvzf redis-stable.tar.gz redis-stable; + rm -rf redis-${1}.tar.gz; + " diff --git a/utils/releasetools/03_test_release.sh b/utils/releasetools/03_test_release.sh index 3dfdcd6a3..169e965d5 100755 --- a/utils/releasetools/03_test_release.sh +++ b/utils/releasetools/03_test_release.sh @@ -1,7 +1,8 @@ #!/bin/sh +set -e if [ $# != "1" ] then - echo "Usage: ${0} " + echo "Usage: ./utils/releasetools/03_test_release.sh " exit 1 fi @@ -9,18 +10,18 @@ TAG=$1 TARNAME="redis-${TAG}.tar.gz" DOWNLOADURL="http://download.redis.io/releases/${TARNAME}" -ssh antirez@metal "export TERM=xterm; - cd /tmp; - rm -rf test_release_tmp_dir; - cd test_release_tmp_dir; - rm -f $TARNAME; - rm -rf redis-${TAG}; - wget $DOWNLOADURL; - tar xvzf $TARNAME; - cd redis-${TAG}; - make; - ./runtest; - ./runtest-sentinel; - if [ -x runtest-cluster ]; then - ./runtest-cluster; - fi" +echo "Doing sanity test on the actual tarball" + +cd /tmp +rm -rf test_release_tmp_dir +cd test_release_tmp_dir +rm -f $TARNAME +rm -rf redis-${TAG} +wget $DOWNLOADURL +tar xvzf $TARNAME +cd redis-${TAG} +make +./runtest +./runtest-sentinel +./runtest-cluster +./runtest-moduleapi diff --git a/utils/releasetools/04_release_hash.sh b/utils/releasetools/04_release_hash.sh index 9d5c6ad4b..bc1ebb66c 100755 --- a/utils/releasetools/04_release_hash.sh +++ b/utils/releasetools/04_release_hash.sh @@ -1,8 +1,14 @@ #!/bin/bash +if [ $# != "1" ] +then + echo "Usage: ./utils/releasetools/04_release_hash.sh " + exit 1 +fi + SHA=$(curl -s http://download.redis.io/releases/redis-${1}.tar.gz | shasum -a 256 | cut -f 1 -d' ') ENTRY="hash redis-${1}.tar.gz sha256 $SHA http://download.redis.io/releases/redis-${1}.tar.gz" echo $ENTRY >> ~/hack/redis-hashes/README -vi ~/hack/redis-hashes/README +vi ../redis-hashes/README echo "Press any key to commit, Ctrl-C to abort)." read yes -(cd ~/hack/redis-hashes; git commit -a -m "${1} hash."; git push) +(cd ../redis-hashes; git commit -a -m "${1} hash."; git push) From 905ffb72e91aa743257e44436196bff1e06e4243 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 13 Jul 2020 16:09:08 +0300 Subject: [PATCH 045/377] runtest --stop pause stops before terminating the redis server (#7513) in the majority of the cases (on this rarely used feature) we want to stop and be able to connect to the shard with redis-cli. since these are two different processes interracting with the tty we need to stop both, and we'll have to hit enter twice, but it's not that bad considering it is rarely used. (cherry picked from commit 3351549c22434337dfa8a262dce678679a35d7da) --- tests/support/test.tcl | 6 ++++++ tests/test_helper.tcl | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/support/test.tcl b/tests/support/test.tcl index 5e8916236..a5573f583 100644 --- a/tests/support/test.tcl +++ b/tests/support/test.tcl @@ -153,6 +153,12 @@ proc test {name code {okpattern undefined}} { incr ::num_failed send_data_packet $::test_server_fd err [join $details "\n"] + + if {$::stop_on_failure} { + puts "Test error (last server port:[srv port], log:[srv stdout]), press enter to teardown the test." + flush stdout + gets stdin + } } else { # Re-raise, let handler up the stack take care of this. error $error $::errorInfo diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index 51c364601..7ce0d545e 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -359,8 +359,8 @@ proc read_from_test_client fd { puts $err lappend ::failed_tests $err set ::active_clients_task($fd) "(ERR) $data" - if {$::stop_on_failure} { - puts -nonewline "(Test stopped, press enter to continue)" + if {$::stop_on_failure} { + puts -nonewline "(Test stopped, press enter to resume the tests)" flush stdout gets stdin } From dde79afbf7959d0bff2c78099e66b7b70d43a68c Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 13 Jul 2020 16:40:03 +0300 Subject: [PATCH 046/377] fix recently added time sensitive tests failing with valgrind (#7512) interestingly the latency monitor test fails because valgrind is slow enough so that the time inside PEXPIREAT command from the moment of the first mstime() call to get the basetime until checkAlreadyExpired calls mstime() again is more than 1ms, and that test was too sensitive. using this opportunity to speed up the test (unrelated to the failure) the fix is just the longer time passed to PEXPIRE. (cherry picked from commit 663e637da87ee9385527fe3a37edb241a1f97cc6) --- tests/integration/redis-cli.tcl | 2 +- tests/unit/latency-monitor.tcl | 16 +++++++++++----- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/tests/integration/redis-cli.tcl b/tests/integration/redis-cli.tcl index 016e4915c..aa8b92199 100644 --- a/tests/integration/redis-cli.tcl +++ b/tests/integration/redis-cli.tcl @@ -241,7 +241,7 @@ start_server {tags {"cli"}} { test_nontty_cli "Connecting as a replica" { set fd [open_cli "--replica"] - wait_for_condition 50 500 { + wait_for_condition 200 500 { [string match {*slave0:*state=online*} [r info]] } else { fail "redis-cli --replica did not connect" diff --git a/tests/unit/latency-monitor.tcl b/tests/unit/latency-monitor.tcl index 69da13f06..d76867cc6 100644 --- a/tests/unit/latency-monitor.tcl +++ b/tests/unit/latency-monitor.tcl @@ -50,15 +50,21 @@ start_server {tags {"latency-monitor"}} { test {LATENCY of expire events are correctly collected} { r config set latency-monitor-threshold 20 + r flushdb + if {$::valgrind} {set count 100000} else {set count 1000000} r eval { local i = 0 - while (i < 1000000) do - redis.call('sadd','mybigkey',i) + while (i < tonumber(ARGV[1])) do + redis.call('sadd',KEYS[1],i) i = i+1 end - } 0 - r pexpire mybigkey 1 - after 500 + } 1 mybigkey $count + r pexpire mybigkey 50 + wait_for_condition 5 100 { + [r dbsize] == 0 + } else { + fail "key wasn't expired" + } assert_match {*expire-cycle*} [r latency latest] } } From 90b6ed7a2c7cdfe4d9a3d91423948714e98e9ba7 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 13 Jul 2020 16:40:19 +0300 Subject: [PATCH 047/377] RESTORE ABSTTL skip expired keys - leak (#7511) (cherry picked from commit c10eabeeca9b79a6583ac2db75e7b1aed6c4dceb) --- src/cluster.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/cluster.c b/src/cluster.c index 88b810d13..45fb5552e 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -5030,6 +5030,7 @@ void restoreCommand(client *c) { notifyKeyspaceEvent(NOTIFY_GENERIC,"del",key,c->db->id); server.dirty++; } + decrRefCount(obj); addReply(c, shared.ok); return; } From 2c8ecc971324ab8b02fee3889943071f9626d607 Mon Sep 17 00:00:00 2001 From: Qu Chen Date: Mon, 13 Jul 2020 07:16:06 -0700 Subject: [PATCH 048/377] Replica always reports master's config epoch in CLUSTER NODES output. (#7235) (cherry picked from commit a517043c7acadbbc0e760092c1abce465636773e) --- src/cluster.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/cluster.c b/src/cluster.c index 45fb5552e..5dcd69ff8 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -4104,11 +4104,15 @@ sds clusterGenNodeDescription(clusterNode *node) { else ci = sdscatlen(ci," - ",3); + unsigned long long nodeEpoch = node->configEpoch; + if (nodeIsSlave(node) && node->slaveof) { + nodeEpoch = node->slaveof->configEpoch; + } /* Latency from the POV of this node, config epoch, link status */ ci = sdscatprintf(ci,"%lld %lld %llu %s", (long long) node->ping_sent, (long long) node->pong_received, - (unsigned long long) node->configEpoch, + nodeEpoch, (node->link || node->flags & CLUSTER_NODE_MYSELF) ? "connected" : "disconnected"); From b80f2ec8acacc9baf61e0686c16224900918198e Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Tue, 14 Jul 2020 16:35:04 +0800 Subject: [PATCH 049/377] Fix out of update help info in tcl tests. (#7516) Before this commit, the output of "./runtest-cluster --help" is incorrect. After this commit, the format of the following 3 output is consistent: ./runtest --help ./runtest-cluster --help ./runtest-sentinel --help (cherry picked from commit 24b6f62741483f3f695838c0ad091f1931c36df5) --- tests/instances.tcl | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/instances.tcl b/tests/instances.tcl index 3a4fadca0..677af6427 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -170,8 +170,6 @@ proc parse_options {} { -keyfile "$::tlsdir/redis.key" set ::tls 1 } elseif {$opt eq "--help"} { - puts "Hello, I'm sentinel.tcl and I run Sentinel unit tests." - puts "\nOptions:" puts "--single Only runs tests specified by pattern." puts "--pause-on-error Pause for manual inspection on error." puts "--fail Simulate a test failure." From 6bdc5a4a084f9aa3aae240c1fd41c4817f636f29 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 14 Jul 2020 18:04:08 +0300 Subject: [PATCH 050/377] redis-cli tests, fix valgrind timing issue (#7519) this test when run with valgrind on github actions takes 160 seconds (cherry picked from commit 8a14ce8634c49d992aa929cf0f98e96f03bccba4) --- tests/integration/redis-cli.tcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/redis-cli.tcl b/tests/integration/redis-cli.tcl index aa8b92199..c70d14eeb 100644 --- a/tests/integration/redis-cli.tcl +++ b/tests/integration/redis-cli.tcl @@ -241,7 +241,7 @@ start_server {tags {"cli"}} { test_nontty_cli "Connecting as a replica" { set fd [open_cli "--replica"] - wait_for_condition 200 500 { + wait_for_condition 500 500 { [string match {*slave0:*state=online*} [r info]] } else { fail "redis-cli --replica did not connect" From b0f08a04e0782c3af4e2a96ac6fec2fd696b3af8 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 14 Jul 2020 20:21:59 +0300 Subject: [PATCH 051/377] diskless master disconnect replicas when rdb child failed (#7518) in case the rdb child failed, crashed or terminated unexpectedly redis would have marked the replica clients with repl_put_online_on_ack and then kill them only after a minute when no ack was received. it would not stream anything to these connections, so the only effect of this bug is a delay of 1 minute in the replicas attempt to re-connect. (cherry picked from commit a3df70923431bee4aaac0efc46004484a63cb167) --- src/replication.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/replication.c b/src/replication.c index d9bff79ad..8457150a0 100644 --- a/src/replication.c +++ b/src/replication.c @@ -1240,6 +1240,12 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) { } else if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END) { struct redis_stat buf; + if (bgsaveerr != C_OK) { + freeClient(slave); + serverLog(LL_WARNING,"SYNC failed. BGSAVE child returned an error"); + continue; + } + /* If this was an RDB on disk save, we have to prepare to send * the RDB from disk to the slave socket. Otherwise if this was * already an RDB -> Slaves socket transfer, used in the case of @@ -1278,11 +1284,6 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) { slave->repl_put_online_on_ack = 1; slave->repl_ack_time = server.unixtime; /* Timeout otherwise. */ } else { - if (bgsaveerr != C_OK) { - freeClient(slave); - serverLog(LL_WARNING,"SYNC failed. BGSAVE child returned an error"); - continue; - } if ((slave->repldbfd = open(server.rdb_filename,O_RDONLY)) == -1 || redis_fstat(slave->repldbfd,&buf) == -1) { freeClient(slave); From bcf9ca362c98e63e9ad81912ed99c81f76e9a8bc Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Wed, 15 Jul 2020 17:37:44 +0800 Subject: [PATCH 052/377] Refactor RM_KeyType() by using macro. (#7486) (cherry picked from commit 7da8c062d5ee8eef8663fe3740267561bf65086d) --- src/module.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/module.c b/src/module.c index 226c60fd0..acfaf2ac7 100644 --- a/src/module.c +++ b/src/module.c @@ -2044,7 +2044,7 @@ int RM_KeyType(RedisModuleKey *key) { case OBJ_HASH: return REDISMODULE_KEYTYPE_HASH; case OBJ_MODULE: return REDISMODULE_KEYTYPE_MODULE; case OBJ_STREAM: return REDISMODULE_KEYTYPE_STREAM; - default: return 0; + default: return REDISMODULE_KEYTYPE_EMPTY; } } From 8b20802a090fedaef7d7ac47bdc5d7f097f3dc55 Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Wed, 15 Jul 2020 17:38:22 +0800 Subject: [PATCH 053/377] Fix command help for unexpected options (#7476) (cherry picked from commit e5166eccee3396a24dfd3a79d3211943e5a3d25e) --- src/acl.c | 2 +- src/latency.c | 2 +- src/t_stream.c | 2 +- tests/unit/acl.tcl | 5 +++++ tests/unit/latency-monitor.tcl | 5 +++++ tests/unit/type/stream.tcl | 7 +++++++ 6 files changed, 20 insertions(+), 3 deletions(-) diff --git a/src/acl.c b/src/acl.c index 6dd0f70ac..3ce45f03b 100644 --- a/src/acl.c +++ b/src/acl.c @@ -1911,7 +1911,7 @@ void aclCommand(client *c) { addReplyBulkCString(c,"client-info"); addReplyBulkCBuffer(c,le->cinfo,sdslen(le->cinfo)); } - } else if (!strcasecmp(sub,"help")) { + } else if (c->argc == 2 && !strcasecmp(sub,"help")) { const char *help[] = { "LOAD -- Reload users from the ACL file.", "SAVE -- Save the current config to the ACL file.", diff --git a/src/latency.c b/src/latency.c index 9a291ac9b..dfdc6668c 100644 --- a/src/latency.c +++ b/src/latency.c @@ -621,7 +621,7 @@ NULL resets += latencyResetEvent(c->argv[j]->ptr); addReplyLongLong(c,resets); } - } else if (!strcasecmp(c->argv[1]->ptr,"help") && c->argc >= 2) { + } else if (!strcasecmp(c->argv[1]->ptr,"help") && c->argc == 2) { addReplyHelp(c, help); } else { addReplySubcommandSyntaxError(c); diff --git a/src/t_stream.c b/src/t_stream.c index 676ddd9bb..f564b1ff9 100644 --- a/src/t_stream.c +++ b/src/t_stream.c @@ -1885,7 +1885,7 @@ NULL server.dirty++; notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-delconsumer", c->argv[2],c->db->id); - } else if (!strcasecmp(opt,"HELP")) { + } else if (c->argc == 2 && !strcasecmp(opt,"HELP")) { addReplyHelp(c, help); } else { addReplySubcommandSyntaxError(c); diff --git a/tests/unit/acl.tcl b/tests/unit/acl.tcl index 85c9b81a9..e81280995 100644 --- a/tests/unit/acl.tcl +++ b/tests/unit/acl.tcl @@ -255,4 +255,9 @@ start_server {tags {"acl"}} { r ACL setuser default on set e } {*NOAUTH*} + + test {ACL HELP should not have unexpected options} { + catch {r ACL help xxx} e + assert_match "*Unknown subcommand or wrong number of arguments*" $e + } } diff --git a/tests/unit/latency-monitor.tcl b/tests/unit/latency-monitor.tcl index d76867cc6..18b9ecebb 100644 --- a/tests/unit/latency-monitor.tcl +++ b/tests/unit/latency-monitor.tcl @@ -67,4 +67,9 @@ start_server {tags {"latency-monitor"}} { } assert_match {*expire-cycle*} [r latency latest] } + + test {LATENCY HELP should not have unexpected options} { + catch {r LATENCY help xxx} e + assert_match "*Unknown subcommand or wrong number of arguments*" $e + } } diff --git a/tests/unit/type/stream.tcl b/tests/unit/type/stream.tcl index c2b524d7f..0ff570cab 100644 --- a/tests/unit/type/stream.tcl +++ b/tests/unit/type/stream.tcl @@ -461,3 +461,10 @@ start_server {tags {"stream"} overrides {appendonly yes aof-use-rdb-preamble no} assert {[dict get [r xinfo stream mystream] last-generated-id] == "2-2"} } } + +start_server {tags {"stream"}} { + test {XGROUP HELP should not have unexpected options} { + catch {r XGROUP help xxx} e + assert_match "*Unknown subcommand or wrong number of arguments*" $e + } +} From b06b71a442753812b8d817a1c1929864075f502c Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Wed, 15 Jul 2020 05:38:47 -0400 Subject: [PATCH 054/377] correct error msg for num connections reaching maxclients in cluster mode (#7444) (cherry picked from commit 6c5f98b24ba3cefa20fdfe17c530bcce20f8c151) --- src/networking.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/networking.c b/src/networking.c index d35347991..589a459d4 100644 --- a/src/networking.c +++ b/src/networking.c @@ -907,10 +907,10 @@ static void acceptCommonHandler(connection *conn, int flags, char *ip) { { char *err; if (server.cluster_enabled) - err = "-ERR max number of clients reached\r\n"; - else err = "-ERR max number of clients + cluster " "connections reached\r\n"; + else + err = "-ERR max number of clients reached\r\n"; /* That's a best effort error message, don't check write errors. * Note that for TLS connections, no handshake was done yet so nothing From 1e144396e01b9dae82d3fd01dd33d1974a736014 Mon Sep 17 00:00:00 2001 From: Developer-Ecosystem-Engineering <65677710+Developer-Ecosystem-Engineering@users.noreply.github.com> Date: Wed, 15 Jul 2020 02:44:03 -0700 Subject: [PATCH 055/377] Add registers dump support for Apple silicon (#7453) Export following environment variables before building on macOS on Apple silicon export ARCH_FLAGS="-arch arm64" export SDK_NAME=macosx export SDK_PATH=$(xcrun --show-sdk-path --sdk $SDK_NAME) export CFLAGS="$ARCH_FLAGS -isysroot $SDK_PATH -I$SDK_PATH/usr/include" export CXXFLAGS=$CFLAGS export LDFLAGS="$ARCH_FLAGS" export CC="$(xcrun -sdk $SDK_PATH --find clang) $CFLAGS" export CXX="$(xcrun -sdk $SDK_PATH --find clang++) $CXXFLAGS" export LD="$(xcrun -sdk $SDK_PATH --find ld) $LDFLAGS" make make test .. All tests passed without errors! Backtrack logging assumes x86 and required updating (cherry picked from commit 004479c184f62324cb495b75e222b84e5544311e) --- src/debug.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 54 insertions(+), 2 deletions(-) diff --git a/src/debug.c b/src/debug.c index a74c22647..60cc2a1fa 100644 --- a/src/debug.c +++ b/src/debug.c @@ -928,8 +928,11 @@ static void *getMcontextEip(ucontext_t *uc) { /* OSX >= 10.6 */ #if defined(_STRUCT_X86_THREAD_STATE64) && !defined(__i386__) return (void*) uc->uc_mcontext->__ss.__rip; - #else + #elif defined(__i386__) return (void*) uc->uc_mcontext->__ss.__eip; + #else + /* OSX ARM64 */ + return (void*) arm_thread_state64_get_pc(uc->uc_mcontext->__ss); #endif #elif defined(__linux__) /* Linux */ @@ -1015,7 +1018,7 @@ void logRegisters(ucontext_t *uc) { (unsigned long) uc->uc_mcontext->__ss.__gs ); logStackContent((void**)uc->uc_mcontext->__ss.__rsp); - #else + #elif defined(__i386__) /* OSX x86 */ serverLog(LL_WARNING, "\n" @@ -1041,6 +1044,55 @@ void logRegisters(ucontext_t *uc) { (unsigned long) uc->uc_mcontext->__ss.__gs ); logStackContent((void**)uc->uc_mcontext->__ss.__esp); + #else + /* OSX ARM64 */ + serverLog(LL_WARNING, + "\n" + "x0:%016lx x1:%016lx x2:%016lx x3:%016lx\n" + "x4:%016lx x5:%016lx x6:%016lx x7:%016lx\n" + "x8:%016lx x9:%016lx x10:%016lx x11:%016lx\n" + "x12:%016lx x13:%016lx x14:%016lx x15:%016lx\n" + "x16:%016lx x17:%016lx x18:%016lx x19:%016lx\n" + "x20:%016lx x21:%016lx x22:%016lx x23:%016lx\n" + "x24:%016lx x25:%016lx x26:%016lx x27:%016lx\n" + "x28:%016lx fp:%016lx lr:%016lx\n" + "sp:%016lx pc:%016lx cpsr:%08lx\n", + (unsigned long) uc->uc_mcontext->__ss.__x[0], + (unsigned long) uc->uc_mcontext->__ss.__x[1], + (unsigned long) uc->uc_mcontext->__ss.__x[2], + (unsigned long) uc->uc_mcontext->__ss.__x[3], + (unsigned long) uc->uc_mcontext->__ss.__x[4], + (unsigned long) uc->uc_mcontext->__ss.__x[5], + (unsigned long) uc->uc_mcontext->__ss.__x[6], + (unsigned long) uc->uc_mcontext->__ss.__x[7], + (unsigned long) uc->uc_mcontext->__ss.__x[8], + (unsigned long) uc->uc_mcontext->__ss.__x[9], + (unsigned long) uc->uc_mcontext->__ss.__x[10], + (unsigned long) uc->uc_mcontext->__ss.__x[11], + (unsigned long) uc->uc_mcontext->__ss.__x[12], + (unsigned long) uc->uc_mcontext->__ss.__x[13], + (unsigned long) uc->uc_mcontext->__ss.__x[14], + (unsigned long) uc->uc_mcontext->__ss.__x[15], + (unsigned long) uc->uc_mcontext->__ss.__x[16], + (unsigned long) uc->uc_mcontext->__ss.__x[17], + (unsigned long) uc->uc_mcontext->__ss.__x[18], + (unsigned long) uc->uc_mcontext->__ss.__x[19], + (unsigned long) uc->uc_mcontext->__ss.__x[20], + (unsigned long) uc->uc_mcontext->__ss.__x[21], + (unsigned long) uc->uc_mcontext->__ss.__x[22], + (unsigned long) uc->uc_mcontext->__ss.__x[23], + (unsigned long) uc->uc_mcontext->__ss.__x[24], + (unsigned long) uc->uc_mcontext->__ss.__x[25], + (unsigned long) uc->uc_mcontext->__ss.__x[26], + (unsigned long) uc->uc_mcontext->__ss.__x[27], + (unsigned long) uc->uc_mcontext->__ss.__x[28], + (unsigned long) arm_thread_state64_get_fp(uc->uc_mcontext->__ss), + (unsigned long) arm_thread_state64_get_lr(uc->uc_mcontext->__ss), + (unsigned long) arm_thread_state64_get_sp(uc->uc_mcontext->__ss), + (unsigned long) arm_thread_state64_get_pc(uc->uc_mcontext->__ss), + (unsigned long) uc->uc_mcontext->__ss.__cpsr + ); + logStackContent((void**) arm_thread_state64_get_sp(uc->uc_mcontext->__ss)); #endif /* Linux */ #elif defined(__linux__) From 81d36bc6c8f6a60272d6019fe26f27cfe9c1eb60 Mon Sep 17 00:00:00 2001 From: dmurnane Date: Wed, 15 Jul 2020 06:29:26 -0400 Subject: [PATCH 056/377] Notify systemd on sentinel startup (#7168) Co-authored-by: Daniel Murnane (cherry picked from commit c292d43fec492f555e6889b6f53b7f380f54bccc) --- src/server.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/server.c b/src/server.c index 1f794e4ed..9c2126bc0 100644 --- a/src/server.c +++ b/src/server.c @@ -5198,6 +5198,10 @@ int main(int argc, char **argv) { } else { InitServerLast(); sentinelIsRunning(); + if (server.supervised_mode == SUPERVISED_SYSTEMD) { + redisCommunicateSystemd("STATUS=Ready to accept connections\n"); + redisCommunicateSystemd("READY=1\n"); + } } /* Warning the user about suspicious maxmemory setting. */ From 2871eb287acdfdc6247d695bc757908586f0d27f Mon Sep 17 00:00:00 2001 From: Luke Palmer Date: Wed, 15 Jul 2020 13:53:41 -0400 Subject: [PATCH 057/377] Send null for invalidate on flush (#7469) (cherry picked from commit df4c74ef07139d51b06e3d250107c6f71264c33c) --- src/tracking.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/src/tracking.c b/src/tracking.c index 8c2dca7ba..2721de32a 100644 --- a/src/tracking.c +++ b/src/tracking.c @@ -198,9 +198,11 @@ void trackingRememberKeys(client *c) { * * In case the 'proto' argument is non zero, the function will assume that * 'keyname' points to a buffer of 'keylen' bytes already expressed in the - * form of Redis RESP protocol, representing an array of keys to send - * to the client as value of the invalidation. This is used in BCAST mode - * in order to optimized the implementation to use less CPU time. */ + * form of Redis RESP protocol. This is used for: + * - In BCAST mode, to send an array of invalidated keys to all + * applicable clients + * - Following a flush command, to send a single RESP NULL to indicate + * that all keys are now invalid. */ void sendTrackingMessage(client *c, char *keyname, size_t keylen, int proto) { int using_redirection = 0; if (c->client_tracking_redirection) { @@ -342,17 +344,19 @@ void trackingInvalidateKey(client *c, robj *keyobj) { trackingInvalidateKeyRaw(c,keyobj->ptr,sdslen(keyobj->ptr),1); } -/* This function is called when one or all the Redis databases are flushed - * (dbid == -1 in case of FLUSHALL). Caching keys are not specific for - * each DB but are global: currently what we do is send a special - * notification to clients with tracking enabled, invalidating the caching - * key "", which means, "all the keys", in order to avoid flooding clients - * with many invalidation messages for all the keys they may hold. +/* This function is called when one or all the Redis databases are + * flushed (dbid == -1 in case of FLUSHALL). Caching keys are not + * specific for each DB but are global: currently what we do is send a + * special notification to clients with tracking enabled, sending a + * RESP NULL, which means, "all the keys", in order to avoid flooding + * clients with many invalidation messages for all the keys they may + * hold. */ void freeTrackingRadixTree(void *rt) { raxFree(rt); } +/* A RESP NULL is sent to indicate that all keys are invalid */ void trackingInvalidateKeysOnFlush(int dbid) { if (server.tracking_clients) { listNode *ln; @@ -361,7 +365,7 @@ void trackingInvalidateKeysOnFlush(int dbid) { while ((ln = listNext(&li)) != NULL) { client *c = listNodeValue(ln); if (c->flags & CLIENT_TRACKING) { - sendTrackingMessage(c,"",1,0); + sendTrackingMessage(c,shared.null[c->resp]->ptr,sdslen(shared.null[c->resp]->ptr),1); } } } From 575ffeb8797521960674d3cd2009602d2420e450 Mon Sep 17 00:00:00 2001 From: yoav-steinberg Date: Thu, 16 Jul 2020 20:59:38 +0300 Subject: [PATCH 058/377] Support passing stack allocated module strings to moduleCreateArgvFromUserFormat (#7528) Specifically, the key passed to the module aof_rewrite callback is a stack allocated robj. When passing it to RedisModule_EmitAOF (with appropriate "s" fmt string) redis used to panic when trying to inc the ref count of the stack allocated robj. Now support such robjs by coying them to a new heap robj. This doesn't affect performance because using the alternative "c" or "b" format strings also copies the input to a new heap robj. (cherry picked from commit 8a2b0472a78c09398e4416c06b7c5f343348f96b) --- src/module.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/module.c b/src/module.c index acfaf2ac7..9316e004d 100644 --- a/src/module.c +++ b/src/module.c @@ -3189,8 +3189,11 @@ robj **moduleCreateArgvFromUserFormat(const char *cmdname, const char *fmt, int argv[argc++] = createStringObject(cstr,strlen(cstr)); } else if (*p == 's') { robj *obj = va_arg(ap,void*); + if (obj->refcount == OBJ_STATIC_REFCOUNT) + obj = createStringObject(obj->ptr,sdslen(obj->ptr)); + else + incrRefCount(obj); argv[argc++] = obj; - incrRefCount(obj); } else if (*p == 'b') { char *buf = va_arg(ap,char*); size_t len = va_arg(ap,size_t); From c662a4e01b3187248c20c8dec38dc8d487b779c8 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Thu, 16 Jul 2020 21:31:36 +0300 Subject: [PATCH 059/377] Adds SHA256SUM to redis-stable tarball upload (cherry picked from commit efb04ab4a2981762aa4bd2f4fd26f032e9e5e7da) --- utils/releasetools/02_upload_tarball.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/releasetools/02_upload_tarball.sh b/utils/releasetools/02_upload_tarball.sh index 6400efad7..ef1e777cc 100755 --- a/utils/releasetools/02_upload_tarball.sh +++ b/utils/releasetools/02_upload_tarball.sh @@ -19,4 +19,5 @@ ssh ubuntu@host.redis.io "cd /var/www/download; mv redis-${1} redis-stable; tar cvzf redis-stable.tar.gz redis-stable; rm -rf redis-${1}.tar.gz; + shasum -a 256 redis-stable.tar.gz > redis-stable.tar.gz.SHA256SUM; " From 3da969eeaae3ed09741ca2d5d89b82bf3da3018a Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 19 Jul 2020 15:33:21 +0300 Subject: [PATCH 060/377] Run daily CI on PRs to release a branch --- .github/workflows/daily.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 4d54fbc42..5614aad1e 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -1,8 +1,10 @@ name: Daily on: - release: - types: [created] + pull_request: + branches: + # any PR to a release branch. + - '[0-9].[0-9]' schedule: - cron: '0 0 * * *' From 1c356d208490a7168b496179de1f4bee75e4d4a1 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 19 Jul 2020 14:00:20 +0300 Subject: [PATCH 061/377] Redis 6.0.6. --- 00-RELEASENOTES | 245 ++++++++++++++++++++++++++++++++++++++++++++++++ src/help.h | 4 +- src/version.h | 2 +- 3 files changed, 248 insertions(+), 3 deletions(-) diff --git a/00-RELEASENOTES b/00-RELEASENOTES index c6ee44246..484aeb621 100644 --- a/00-RELEASENOTES +++ b/00-RELEASENOTES @@ -11,6 +11,251 @@ CRITICAL: There is a critical bug affecting MOST USERS. Upgrade ASAP. SECURITY: There are security fixes in the release. -------------------------------------------------------------------------------- +================================================================================ +Redis 6.0.6 Released Mon Jul 20 09:31:30 IDT 2020 +================================================================================ + +Upgrade urgency MODERATE: several bugs with moderate impact are fixed here. + +The most important issues are listed here: + +* Fix crash when enabling CLIENT TRACKING with prefix +* EXEC always fails with EXECABORT and multi-state is cleared +* RESTORE ABSTTL won't store expired keys into the db +* redis-cli better handling of non-pritable key names +* TLS: Ignore client cert when tls-auth-clients off +* Tracking: fix invalidation message on flush +* Notify systemd on Sentinel startup +* Fix crash on a misuse of STRALGO +* Few fixes in module API +* Fix a few rare leaks (STRALGO error misuse, Sentinel) +* Fix a possible invalid access in defrag of scripts (unlikely to cause real harm) + +New features: + +* LPOS command to search in a list +* Use user+pass for MIGRATE in redis-cli and redis-benchmark in cluster mode +* redis-cli support TLS for --pipe, --rdb and --replica options +* TLS: Session caching configuration support + +And this is the full list of commits: + +Itamar Haber in commit 50548cafc: + Adds SHA256SUM to redis-stable tarball upload + 1 file changed, 1 insertion(+) + +yoav-steinberg in commit 3a4c6684f: + Support passing stack allocated module strings to moduleCreateArgvFromUserFormat (#7528) + 1 file changed, 4 insertions(+), 1 deletion(-) + +Luke Palmer in commit 2fd0b2bd6: + Send null for invalidate on flush (#7469) + 1 file changed, 14 insertions(+), 10 deletions(-) + +dmurnane in commit c3c81e1a8: + Notify systemd on sentinel startup (#7168) + 1 file changed, 4 insertions(+) + +Developer-Ecosystem-Engineering in commit e2770f29b: + Add registers dump support for Apple silicon (#7453) + 1 file changed, 54 insertions(+), 2 deletions(-) + +Wen Hui in commit b068eae97: + correct error msg for num connections reaching maxclients in cluster mode (#7444) + 1 file changed, 2 insertions(+), 2 deletions(-) + +WuYunlong in commit e6169ae5c: + Fix command help for unexpected options (#7476) + 6 files changed, 20 insertions(+), 3 deletions(-) + +WuYunlong in commit abf08fc02: + Refactor RM_KeyType() by using macro. (#7486) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Oran Agra in commit 11b83076a: + diskless master disconnect replicas when rdb child failed (#7518) + 1 file changed, 6 insertions(+), 5 deletions(-) + +Oran Agra in commit 8f27f2f7d: + redis-cli tests, fix valgrind timing issue (#7519) + 1 file changed, 1 insertion(+), 1 deletion(-) + +WuYunlong in commit 180b588e8: + Fix out of update help info in tcl tests. (#7516) + 1 file changed, 2 deletions(-) + +Qu Chen in commit 417c60bdc: + Replica always reports master's config epoch in CLUSTER NODES output. (#7235) + 1 file changed, 5 insertions(+), 1 deletion(-) + +Oran Agra in commit 72a242419: + RESTORE ABSTTL skip expired keys - leak (#7511) + 1 file changed, 1 insertion(+) + +Oran Agra in commit 2ca45239f: + fix recently added time sensitive tests failing with valgrind (#7512) + 2 files changed, 12 insertions(+), 6 deletions(-) + +Oran Agra in commit 123dc8b21: + runtest --stop pause stops before terminating the redis server (#7513) + 2 files changed, 8 insertions(+), 2 deletions(-) + +Oran Agra in commit a6added45: + update release scripts for new hosts, and CI to run more tests (#7480) + 5 files changed, 68 insertions(+), 26 deletions(-) + +jimgreen2013 in commit cf4869f9e: + fix description about ziplist, the code is ok (#6318) + 1 file changed, 2 insertions(+), 2 deletions(-) + +马永泽 in commit d548f219b: + fix benchmark in cluster mode fails to authenticate (#7488) + 1 file changed, 56 insertions(+), 40 deletions(-) + +Abhishek Soni in commit e58eb7b89: + fix: typo in CI job name (#7466) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Jiayuan Chen in commit 6def10a2b: + Fix typo in deps README (#7500) + 1 file changed, 1 insertion(+), 1 deletion(-) + +WuYunlong in commit 8af61afef: + Add missing latency-monitor tcl test to test_helper.tcl. (#6782) + 1 file changed, 1 insertion(+) + +Yossi Gottlieb in commit a419f400e: + TLS: Session caching configuration support. (#7420) + 6 files changed, 56 insertions(+), 16 deletions(-) + +Yossi Gottlieb in commit 2e4bb2667: + TLS: Ignore client cert when tls-auth-clients off. (#7457) + 1 file changed, 1 insertion(+), 3 deletions(-) + +James Hilliard in commit f0b1aee9e: + Use pkg-config to properly detect libssl and libcrypto libraries (#7452) + 1 file changed, 15 insertions(+), 3 deletions(-) + +Yossi Gottlieb in commit e92b99564: + TLS: Add missing redis-cli options. (#7456) + 3 files changed, 166 insertions(+), 52 deletions(-) + +Oran Agra in commit 1f3db5bf5: + redis-cli --hotkeys fixed to handle non-printable key names + 1 file changed, 11 insertions(+), 5 deletions(-) + +Oran Agra in commit c3044f369: + redis-cli --bigkeys fixed to handle non-printable key names + 1 file changed, 24 insertions(+), 16 deletions(-) + +Oran Agra in commit b3f75527b: + RESTORE ABSTTL won't store expired keys into the db (#7472) + 4 files changed, 46 insertions(+), 16 deletions(-) + +huangzhw in commit 6f87fc92f: + defrag.c activeDefragSdsListAndDict when defrag sdsele, We can't use (#7492) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Oran Agra in commit d8e6a3e5b: + skip a test that uses +inf on valgrind (#7440) + 1 file changed, 12 insertions(+), 9 deletions(-) + +Oran Agra in commit 28fd1a110: + stabilize tests that look for log lines (#7367) + 3 files changed, 33 insertions(+), 11 deletions(-) + +Oran Agra in commit a513b4ed9: + tests/valgrind: don't use debug restart (#7404) + 4 files changed, 114 insertions(+), 57 deletions(-) + +Oran Agra in commit 70e72fc1b: + change references to the github repo location (#7479) + 5 files changed, 7 insertions(+), 7 deletions(-) + +zhaozhao.zz in commit c63e533cc: + BITOP: propagate only when it really SET or DEL targetkey (#5783) + 1 file changed, 2 insertions(+), 1 deletion(-) + +antirez in commit 31040ff54: + Update comment to clarify change in #7398. + 1 file changed, 4 insertions(+), 1 deletion(-) + +antirez in commit b605fe827: + LPOS: option FIRST renamed RANK. + 2 files changed, 19 insertions(+), 19 deletions(-) + +Dave Nielsen in commit 8deb24954: + updated copyright year + 1 file changed, 1 insertion(+), 1 deletion(-) + +Oran Agra in commit a61c2930c: + EXEC always fails with EXECABORT and multi-state is cleared + 6 files changed, 204 insertions(+), 91 deletions(-) + +antirez in commit 3c8041637: + Include cluster.h for getClusterConnectionsCount(). + 1 file changed, 1 insertion(+) + +antirez in commit 5be673ee8: + Fix BITFIELD i64 type handling, see #7417. + 1 file changed, 8 insertions(+), 6 deletions(-) + +antirez in commit 5f289df9b: + Clarify maxclients and cluster in conf. Remove myself too. + 2 files changed, 9 insertions(+), 1 deletion(-) + +hwware in commit 000f928d6: + fix memory leak in sentinel connection sharing + 1 file changed, 1 insertion(+) + +chenhui0212 in commit d9a3c0171: + Fix comments in function raxLowWalk of listpack.c + 1 file changed, 2 insertions(+), 2 deletions(-) + +Tomasz Poradowski in commit 7526e4506: + ensure SHUTDOWN_NOSAVE in Sentinel mode + 2 files changed, 9 insertions(+), 8 deletions(-) + +chenhui0212 in commit 6487cbc33: + fix comments in listpack.c + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit 69b66bfca: + Use cluster connections too, to limit maxclients. + 3 files changed, 23 insertions(+), 8 deletions(-) + +antirez in commit 5a960a033: + Tracking: fix enableBcastTrackingForPrefix() invalid sdslen() call. + 1 file changed, 1 insertion(+), 1 deletion(-) + +root in commit 1c2e50de3: + cluster.c remove if of clusterSendFail in markNodeAsFailingIfNeeded + 1 file changed, 1 insertion(+), 1 deletion(-) + +meir@redislabs.com in commit 040efb697: + Fix RM_ScanKey module api not to return int encoded strings + 3 files changed, 24 insertions(+), 7 deletions(-) + +antirez in commit 1b8b7941d: + Fix LCS object type checking. Related to #7379. + 1 file changed, 17 insertions(+), 10 deletions(-) + +hwware in commit 6b571b45a: + fix memory leak + 1 file changed, 11 insertions(+), 12 deletions(-) + +hwware in commit 674759062: + fix server crash in STRALGO command + 1 file changed, 7 insertions(+) + +Benjamin Sergeant in commit a05ffefdc: + Update redis-cli.c + 1 file changed, 19 insertions(+), 6 deletions(-) + +Jamie Scott in commit 870b63733: + minor fix + 1 file changed, 2 insertions(+), 3 deletions(-) + ================================================================================ Redis 6.0.5 Released Tue Jun 09 11:56:08 CEST 2020 ================================================================================ diff --git a/src/help.h b/src/help.h index 1b1ac5e08..64344aa63 100644 --- a/src/help.h +++ b/src/help.h @@ -1,4 +1,4 @@ -/* Automatically generated by generate-command-help.rb, do not edit. */ +/* Automatically generated by ./utils/generate-command-help.rb, do not edit. */ #ifndef __REDIS_HELP_H #define __REDIS_HELP_H @@ -659,7 +659,7 @@ struct commandHelp { 2, "1.0.0" }, { "LPOS", - "key element [FIRST rank] [COUNT num-matches] [MAXLEN len]", + "key element [RANK rank] [COUNT num-matches] [MAXLEN len]", "Return the index of matching elements on a list", 2, "6.0.6" }, diff --git a/src/version.h b/src/version.h index e1eb096f3..9dfd8f274 100644 --- a/src/version.h +++ b/src/version.h @@ -1 +1 @@ -#define REDIS_VERSION "6.0.5" +#define REDIS_VERSION "6.0.6" From 925f1ce51e69c06ce96d97388c5edcd7d1fabc76 Mon Sep 17 00:00:00 2001 From: Scott Brenner Date: Sun, 19 Jul 2020 23:22:24 -0700 Subject: [PATCH 062/377] GitHub Actions workflows - use latest version of actions/checkout (#7534) (cherry picked from commit c7644eda713044545a135061e4863c2238ebd244) --- .github/workflows/ci.yml | 10 +++++----- .github/workflows/daily.yml | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 730eaf0dd..4d6c1c14c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,7 +7,7 @@ jobs: test-ubuntu-latest: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: make - name: test @@ -20,21 +20,21 @@ jobs: build-ubuntu-old: runs-on: ubuntu-16.04 steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: make build-macos-latest: runs-on: macos-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: make build-32bit: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: | sudo apt-get update && sudo apt-get install libc6-dev-i386 @@ -43,7 +43,7 @@ jobs: build-libc-malloc: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: make MALLOC=libc diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 5614aad1e..5b5f3f7d4 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 14400 steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: make - name: test @@ -32,7 +32,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 14400 steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: make MALLOC=libc - name: test @@ -50,7 +50,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 14400 steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: | sudo apt-get update && sudo apt-get install libc6-dev-i386 @@ -72,7 +72,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 14400 steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: | make BUILD_TLS=yes @@ -92,7 +92,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 14400 steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: make valgrind - name: test From 8739408147a6440cb221ca678a9ebc5511d5c92c Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" <276441700@qq.com> Date: Mon, 20 Jul 2020 22:21:55 +0800 Subject: [PATCH 063/377] replication: need handle -NOPERM error after send ping (#7538) (cherry picked from commit 57fbe4cbafb07aa9f036d7e2f5cf88830c27a921) --- src/replication.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/replication.c b/src/replication.c index 8457150a0..197acde79 100644 --- a/src/replication.c +++ b/src/replication.c @@ -2158,6 +2158,7 @@ void syncWithMaster(connection *conn) { * both. */ if (err[0] != '+' && strncmp(err,"-NOAUTH",7) != 0 && + strncmp(err,"-NOPERM",7) != 0 && strncmp(err,"-ERR operation not permitted",28) != 0) { serverLog(LL_WARNING,"Error reply to PING from master: '%s'",err); From 25ab97cbe378be17f8e4c513b3d32823b9aa50df Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Mon, 20 Jul 2020 21:53:03 -0400 Subject: [PATCH 064/377] add missing caching command in client help (#7399) (cherry picked from commit e035e5218f4245f5fa5eb0cafc56c572d9f4fa15) --- src/networking.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/networking.c b/src/networking.c index 589a459d4..e3b62f151 100644 --- a/src/networking.c +++ b/src/networking.c @@ -2147,6 +2147,7 @@ void clientCommand(client *c) { "SETNAME -- Assign the name to the current connection.", "UNBLOCK [TIMEOUT|ERROR] -- Unblock the specified blocked client.", "TRACKING (on|off) [REDIRECT ] [BCAST] [PREFIX first] [PREFIX second] [OPTIN] [OPTOUT]... -- Enable client keys tracking for client side caching.", +"CACHING (yes|no) -- Enable/Disable tracking of the keys for next command in OPTIN/OPTOUT mode.", "GETREDIR -- Return the client ID we are redirecting to when tracking is enabled.", NULL }; From fb9b6e0919aebfb173d9e4e727cbd673c7cff4a1 Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Tue, 21 Jul 2020 01:13:05 -0400 Subject: [PATCH 065/377] Add missing calls to raxStop (#7532) Since the dynamic allocations in raxIterator are only used for deep walks, memory leak due to missing call to raxStop can only happen for rax with key names longer than 32 bytes. Out of all the missing calls, the only ones that may lead to a leak are the rax for consumer groups and consumers, and these were only in AOFRW and rdbSave, which normally only happen in fork or at shutdown. (cherry picked from commit 0b8d47a9857142203c03846fc4284746695d3dc3) --- src/aof.c | 19 +++++++++++----- src/defrag.c | 1 + src/rdb.c | 61 ++++++++++++++++++++++++++++++++++++++++----------- src/timeout.c | 1 + 4 files changed, 63 insertions(+), 19 deletions(-) diff --git a/src/aof.c b/src/aof.c index 6f8e53712..cbc0989d0 100644 --- a/src/aof.c +++ b/src/aof.c @@ -1244,12 +1244,16 @@ int rewriteStreamObject(rio *r, robj *key, robj *o) { while(raxNext(&ri)) { streamCG *group = ri.data; /* Emit the XGROUP CREATE in order to create the group. */ - if (rioWriteBulkCount(r,'*',5) == 0) return 0; - if (rioWriteBulkString(r,"XGROUP",6) == 0) return 0; - if (rioWriteBulkString(r,"CREATE",6) == 0) return 0; - if (rioWriteBulkObject(r,key) == 0) return 0; - if (rioWriteBulkString(r,(char*)ri.key,ri.key_len) == 0) return 0; - if (rioWriteBulkStreamID(r,&group->last_id) == 0) return 0; + if (!rioWriteBulkCount(r,'*',5) || + !rioWriteBulkString(r,"XGROUP",6) || + !rioWriteBulkString(r,"CREATE",6) || + !rioWriteBulkObject(r,key) || + !rioWriteBulkString(r,(char*)ri.key,ri.key_len) || + !rioWriteBulkStreamID(r,&group->last_id)) + { + raxStop(&ri); + return 0; + } /* Generate XCLAIMs for each consumer that happens to * have pending entries. Empty consumers have no semantical @@ -1270,6 +1274,9 @@ int rewriteStreamObject(rio *r, robj *key, robj *o) { ri.key_len,consumer, ri_pel.key,nack) == 0) { + raxStop(&ri_pel); + raxStop(&ri_cons); + raxStop(&ri); return 0; } } diff --git a/src/defrag.c b/src/defrag.c index 2d8db8ea5..07a16ca6c 100644 --- a/src/defrag.c +++ b/src/defrag.c @@ -662,6 +662,7 @@ int scanLaterStraemListpacks(robj *ob, unsigned long *cursor, long long endtime, /* if cursor is non-zero, we seek to the static 'last' */ if (!raxSeek(&ri,">", last, sizeof(last))) { *cursor = 0; + raxStop(&ri); return 0; } /* assign the iterator node callback after the seek, so that the diff --git a/src/rdb.c b/src/rdb.c index 5cec208c5..ac1985d24 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -697,15 +697,23 @@ ssize_t rdbSaveStreamPEL(rio *rdb, rax *pel, int nacks) { while(raxNext(&ri)) { /* We store IDs in raw form as 128 big big endian numbers, like * they are inside the radix tree key. */ - if ((n = rdbWriteRaw(rdb,ri.key,sizeof(streamID))) == -1) return -1; + if ((n = rdbWriteRaw(rdb,ri.key,sizeof(streamID))) == -1) { + raxStop(&ri); + return -1; + } nwritten += n; if (nacks) { streamNACK *nack = ri.data; - if ((n = rdbSaveMillisecondTime(rdb,nack->delivery_time)) == -1) + if ((n = rdbSaveMillisecondTime(rdb,nack->delivery_time)) == -1) { + raxStop(&ri); return -1; + } nwritten += n; - if ((n = rdbSaveLen(rdb,nack->delivery_count)) == -1) return -1; + if ((n = rdbSaveLen(rdb,nack->delivery_count)) == -1) { + raxStop(&ri); + return -1; + } nwritten += n; /* We don't save the consumer name: we'll save the pending IDs * for each consumer in the consumer PEL, and resolve the consumer @@ -734,20 +742,27 @@ size_t rdbSaveStreamConsumers(rio *rdb, streamCG *cg) { streamConsumer *consumer = ri.data; /* Consumer name. */ - if ((n = rdbSaveRawString(rdb,ri.key,ri.key_len)) == -1) return -1; + if ((n = rdbSaveRawString(rdb,ri.key,ri.key_len)) == -1) { + raxStop(&ri); + return -1; + } nwritten += n; /* Last seen time. */ - if ((n = rdbSaveMillisecondTime(rdb,consumer->seen_time)) == -1) + if ((n = rdbSaveMillisecondTime(rdb,consumer->seen_time)) == -1) { + raxStop(&ri); return -1; + } nwritten += n; /* Consumer PEL, without the ACKs (see last parameter of the function * passed with value of 0), at loading time we'll lookup the ID * in the consumer group global PEL and will put a reference in the * consumer local PEL. */ - if ((n = rdbSaveStreamPEL(rdb,consumer->pel,0)) == -1) + if ((n = rdbSaveStreamPEL(rdb,consumer->pel,0)) == -1) { + raxStop(&ri); return -1; + } nwritten += n; } raxStop(&ri); @@ -912,9 +927,15 @@ ssize_t rdbSaveObject(rio *rdb, robj *o, robj *key) { while (raxNext(&ri)) { unsigned char *lp = ri.data; size_t lp_bytes = lpBytes(lp); - if ((n = rdbSaveRawString(rdb,ri.key,ri.key_len)) == -1) return -1; + if ((n = rdbSaveRawString(rdb,ri.key,ri.key_len)) == -1) { + raxStop(&ri); + return -1; + } nwritten += n; - if ((n = rdbSaveRawString(rdb,lp,lp_bytes)) == -1) return -1; + if ((n = rdbSaveRawString(rdb,lp,lp_bytes)) == -1) { + raxStop(&ri); + return -1; + } nwritten += n; } raxStop(&ri); @@ -946,22 +967,36 @@ ssize_t rdbSaveObject(rio *rdb, robj *o, robj *key) { streamCG *cg = ri.data; /* Save the group name. */ - if ((n = rdbSaveRawString(rdb,ri.key,ri.key_len)) == -1) + if ((n = rdbSaveRawString(rdb,ri.key,ri.key_len)) == -1) { + raxStop(&ri); return -1; + } nwritten += n; /* Last ID. */ - if ((n = rdbSaveLen(rdb,cg->last_id.ms)) == -1) return -1; + if ((n = rdbSaveLen(rdb,cg->last_id.ms)) == -1) { + raxStop(&ri); + return -1; + } nwritten += n; - if ((n = rdbSaveLen(rdb,cg->last_id.seq)) == -1) return -1; + if ((n = rdbSaveLen(rdb,cg->last_id.seq)) == -1) { + raxStop(&ri); + return -1; + } nwritten += n; /* Save the global PEL. */ - if ((n = rdbSaveStreamPEL(rdb,cg->pel,1)) == -1) return -1; + if ((n = rdbSaveStreamPEL(rdb,cg->pel,1)) == -1) { + raxStop(&ri); + return -1; + } nwritten += n; /* Save the consumers of this group. */ - if ((n = rdbSaveStreamConsumers(rdb,cg)) == -1) return -1; + if ((n = rdbSaveStreamConsumers(rdb,cg)) == -1) { + raxStop(&ri); + return -1; + } nwritten += n; } raxStop(&ri); diff --git a/src/timeout.c b/src/timeout.c index 7787a049f..d4c4690e5 100644 --- a/src/timeout.c +++ b/src/timeout.c @@ -150,6 +150,7 @@ void handleBlockedClientsTimeout(void) { raxRemove(server.clients_timeout_table,ri.key,ri.key_len,NULL); raxSeek(&ri,"^",NULL,0); } + raxStop(&ri); } /* Get a timeout value from an object and store it into 'timeout'. From 443e57b08ed6635042755824d2e0c792b2fb4410 Mon Sep 17 00:00:00 2001 From: Remi Collet Date: Tue, 21 Jul 2020 08:07:54 +0200 Subject: [PATCH 066/377] Fix deprecated tail syntax in tests (#7543) (cherry picked from commit 7853d8410b12c3ffac699c8a2e06f2a8e6df26b0) --- tests/support/util.tcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/support/util.tcl b/tests/support/util.tcl index fce3ffd18..69ea675dc 100644 --- a/tests/support/util.tcl +++ b/tests/support/util.tcl @@ -119,7 +119,7 @@ proc wait_for_log_message {srv_idx pattern from_line maxtries delay} { set retry $maxtries set stdout [srv $srv_idx stdout] while {$retry} { - set result [exec tail +$from_line < $stdout] + set result [exec tail -n +$from_line < $stdout] set result [split $result "\n"] foreach line $result { if {[string match $pattern $line]} { From d2fac9bdb9784282254d74ea797b57c410dd5072 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 21 Jul 2020 14:07:06 +0300 Subject: [PATCH 067/377] Fixes to release scripts (#7547) (cherry picked from commit a472f35efd20eb9dfe0261badb33353bc062cc98) --- utils/releasetools/03_test_release.sh | 1 + utils/releasetools/04_release_hash.sh | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/releasetools/03_test_release.sh b/utils/releasetools/03_test_release.sh index 169e965d5..493d0b74c 100755 --- a/utils/releasetools/03_test_release.sh +++ b/utils/releasetools/03_test_release.sh @@ -14,6 +14,7 @@ echo "Doing sanity test on the actual tarball" cd /tmp rm -rf test_release_tmp_dir +mkdir test_release_tmp_dir cd test_release_tmp_dir rm -f $TARNAME rm -rf redis-${TAG} diff --git a/utils/releasetools/04_release_hash.sh b/utils/releasetools/04_release_hash.sh index bc1ebb66c..d93292803 100755 --- a/utils/releasetools/04_release_hash.sh +++ b/utils/releasetools/04_release_hash.sh @@ -7,8 +7,7 @@ fi SHA=$(curl -s http://download.redis.io/releases/redis-${1}.tar.gz | shasum -a 256 | cut -f 1 -d' ') ENTRY="hash redis-${1}.tar.gz sha256 $SHA http://download.redis.io/releases/redis-${1}.tar.gz" -echo $ENTRY >> ~/hack/redis-hashes/README -vi ../redis-hashes/README +echo $ENTRY >> ../redis-hashes/README echo "Press any key to commit, Ctrl-C to abort)." read yes (cd ../redis-hashes; git commit -a -m "${1} hash."; git push) From 6d80011e73325a68e71597d017a3dbdb107a4945 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 21 Jul 2020 14:17:14 +0300 Subject: [PATCH 068/377] Tests: drop TCL 8.6 dependency. (#7548) This re-implements the redis-cli --pipe test so it no longer depends on a close feature available only in TCL 8.6. Basically what this test does is run redis-cli --pipe, generates a bunch of commands and pipes them through redis-cli, and inspects the result in both Redis and the redis-cli output. To do that, we need to close stdin for redis-cli to indicate we're done so it can flush its buffers and exit. TCL has bi-directional channels can only offers a way to "one-way close" a channel with TCL 8.6. To work around that, we now generate the commands into a file and feed that file to redis-cli directly. As we're writing to an actual file, the number of commands is now reduced. (cherry picked from commit dbc0a64843ccd07515ac41ca80497a9e5ffd107a) --- tests/integration/redis-cli.tcl | 51 ++++++++++++++++++--------------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/tests/integration/redis-cli.tcl b/tests/integration/redis-cli.tcl index c70d14eeb..44ff430e2 100644 --- a/tests/integration/redis-cli.tcl +++ b/tests/integration/redis-cli.tcl @@ -1,10 +1,16 @@ source tests/support/cli.tcl start_server {tags {"cli"}} { - proc open_cli {{opts "-n 9"}} { + proc open_cli {{opts "-n 9"} {infile ""}} { set ::env(TERM) dumb set cmdline [rediscli [srv port] $opts] - set fd [open "|$cmdline" "r+"] + if {$infile ne ""} { + set cmdline "$cmdline < $infile" + set mode "r" + } else { + set mode "r+" + } + set fd [open "|$cmdline" $mode] fconfigure $fd -buffering none fconfigure $fd -blocking false fconfigure $fd -translation binary @@ -228,7 +234,7 @@ start_server {tags {"cli"}} { assert_equal {} [r get should-not-exist] } - test_nontty_cli "Dumping an RDB" { + test "Dumping an RDB" { # Disk-based master assert_match "OK" [r config set repl-diskless-sync no] test_redis_cli_rdb_dump @@ -239,7 +245,7 @@ start_server {tags {"cli"}} { test_redis_cli_rdb_dump } - test_nontty_cli "Connecting as a replica" { + test "Connecting as a replica" { set fd [open_cli "--replica"] wait_for_condition 500 500 { [string match {*slave0:*state=online*} [r info]] @@ -258,31 +264,30 @@ start_server {tags {"cli"}} { close_cli $fd } - test_nontty_cli "Piping raw protocol" { - set fd [open_cli "--pipe"] - fconfigure $fd -blocking true + test "Piping raw protocol" { + set cmds [tmpfile "cli_cmds"] + set cmds_fd [open $cmds "w"] - # Create a new deferring client and overwrite its fd - set client [redis [srv 0 "host"] [srv 0 "port"] 1 0] - set ::redis::fd($::redis::id) $fd - $client select 9 - - r del test-counter - for {set i 0} {$i < 10000} {incr i} { - $client incr test-counter - $client set large-key [string repeat "x" 20000] - } + puts $cmds_fd [formatCommand select 9] + puts $cmds_fd [formatCommand del test-counter] for {set i 0} {$i < 1000} {incr i} { - $client set very-large-key [string repeat "x" 512000] + puts $cmds_fd [formatCommand incr test-counter] + puts $cmds_fd [formatCommand set large-key [string repeat "x" 20000]] } - close $fd write - set output [read_cli $fd] + for {set i 0} {$i < 100} {incr i} { + puts $cmds_fd [formatCommand set very-large-key [string repeat "x" 512000]] + } + close $cmds_fd - assert_equal {10000} [r get test-counter] - assert_match {*All data transferred*errors: 0*replies: 21001*} $output + set cli_fd [open_cli "--pipe" $cmds] + fconfigure $cli_fd -blocking true + set output [read_cli $cli_fd] - close_cli $fd + assert_equal {1000} [r get test-counter] + assert_match {*All data transferred*errors: 0*replies: 2102*} $output + + file delete $cmds } } From 2b45c88a6a5272b7fcf77a60d0c1e2fc6aba1f39 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 21 Jul 2020 16:56:19 +0300 Subject: [PATCH 069/377] testsuite may leave servers alive on error (#7549) in cases where you have test name { start_server { start_server { assert } } } the exception will be thrown to the test proc, and the servers are supposed to be killed on the way out. but it seems there was always a bug of not cleaning the server stack, and recently (#7404) we started relying on that stack in order to kill them, so with that bug sometimes we would have tried to kill the same server twice, and leave one alive. luckly, in most cases the pattern is: start_server { test name { } } (cherry picked from commit bb170fa06e5909dd816b6530121952d57c8209a0) --- tests/support/server.tcl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/support/server.tcl b/tests/support/server.tcl index ea7d0b13c..0afe89f7c 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -396,6 +396,9 @@ proc start_server {options {code undefined}} { # fetch srv back from the server list, in case it was restarted by restart_server (new PID) set srv [lindex $::servers end] + # pop the server object + set ::servers [lrange $::servers 0 end-1] + # Kill the server without checking for leaks dict set srv "skipleaks" 1 kill_server $srv From 32d225c6440c530c7de143edcf9d3863330d46a9 Mon Sep 17 00:00:00 2001 From: Madelyn Olson <34459052+madolson@users.noreply.github.com> Date: Tue, 21 Jul 2020 17:00:13 -0700 Subject: [PATCH 070/377] Properly reset errno for rdbLoad (#7542) (cherry picked from commit 9615c7480de53c920baadf6279b527b60de8f0d4) --- src/server.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/server.c b/src/server.c index 9c2126bc0..db32227bc 100644 --- a/src/server.c +++ b/src/server.c @@ -4876,6 +4876,7 @@ void loadDataFromDisk(void) { serverLog(LL_NOTICE,"DB loaded from append only file: %.3f seconds",(float)(ustime()-start)/1000000); } else { rdbSaveInfo rsi = RDB_SAVE_INFO_INIT; + errno = 0; /* Prevent a stale value from affecting error checking */ if (rdbLoad(server.rdb_filename,&rsi,RDBFLAGS_NONE) == C_OK) { serverLog(LL_NOTICE,"DB loaded from disk: %.3f seconds", (float)(ustime()-start)/1000000); From 558a343b3cea475e34284ef4e464a0925aa8d8b8 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Thu, 23 Jul 2020 13:06:24 +0300 Subject: [PATCH 071/377] Stabilize bgsave test that sometimes fails with valgrind (#7559) on ci.redis.io the test fails a lot, reporting that bgsave didn't end. increaseing the timeout we wait for that bgsave to get aborted. in addition to that, i also verify that it indeed got aborted by checking that the save counter wasn't reset. add another test to verify that a successful bgsave indeed resets the change counter. (cherry picked from commit 49d4aebce0a0b94cd2b302d276be95d1a1ce8610) --- tests/integration/rdb.tcl | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/tests/integration/rdb.tcl b/tests/integration/rdb.tcl index b176bf199..6535cd089 100644 --- a/tests/integration/rdb.tcl +++ b/tests/integration/rdb.tcl @@ -118,16 +118,34 @@ start_server_and_kill_it [list "dir" $server_path] { start_server {} { test {Test FLUSHALL aborts bgsave} { + # 1000 keys with 1ms sleep per key shuld take 1 second r config set rdb-key-save-delay 1000 r debug populate 1000 r bgsave assert_equal [s rdb_bgsave_in_progress] 1 r flushall - after 200 - assert_equal [s rdb_bgsave_in_progress] 0 + # wait half a second max + wait_for_condition 5 100 { + [s rdb_bgsave_in_progress] == 0 + } else { + fail "bgsave not aborted" + } + # veirfy that bgsave failed, by checking that the change counter is still high + assert_lessthan 999 [s rdb_changes_since_last_save] # make sure the server is still writable r set x xx } + + test {bgsave resets the change counter} { + r config set rdb-key-save-delay 0 + r bgsave + wait_for_condition 5 100 { + [s rdb_bgsave_in_progress] == 0 + } else { + fail "bgsave not aborted" + } + assert_equal [s rdb_changes_since_last_save] 0 + } } test {client freed during loading} { From 53f36dc5b62743dd84798dbfc96500931f4b49d5 Mon Sep 17 00:00:00 2001 From: grishaf Date: Sun, 26 Jul 2020 08:27:30 +0300 Subject: [PATCH 072/377] Fix prepareForShutdown function declaration (#7566) (cherry picked from commit f8751d03ba9635064c89844d5915c2ec2dcdc827) --- src/server.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server.h b/src/server.h index 3f471efcb..57047f55b 100644 --- a/src/server.h +++ b/src/server.h @@ -1976,7 +1976,7 @@ void forceCommandPropagation(client *c, int flags); void preventCommandPropagation(client *c); void preventCommandAOF(client *c); void preventCommandReplication(client *c); -int prepareForShutdown(); +int prepareForShutdown(int flags); #ifdef __GNUC__ void serverLog(int level, const char *fmt, ...) __attribute__((format(printf, 2, 3))); From ba559dc9036913f7c367d909f5db4b8fd58c6bb9 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Mon, 27 Jul 2020 12:52:13 +0300 Subject: [PATCH 073/377] TLS: support cluster/replication without tls-port. Initialize and configure OpenSSL even when tls-port is not used, because we may still have tls-cluster or tls-replication. Also, make sure to reconfigure OpenSSL when these parameters are changed as TLS could have been enabled for the first time. (cherry picked from commit b76a93c362091daafd8a8d15a45d527b7437d013) --- src/config.c | 6 +++--- src/server.c | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/config.c b/src/config.c index acf1b069f..9b6dc459c 100644 --- a/src/config.c +++ b/src/config.c @@ -2221,11 +2221,11 @@ standardConfig configs[] = { createOffTConfig("auto-aof-rewrite-min-size", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.aof_rewrite_min_size, 64*1024*1024, MEMORY_CONFIG, NULL, NULL), #ifdef USE_OPENSSL - createIntConfig("tls-port", NULL, IMMUTABLE_CONFIG, 0, 65535, server.tls_port, 0, INTEGER_CONFIG, NULL, NULL), /* TCP port. */ + createIntConfig("tls-port", NULL, IMMUTABLE_CONFIG, 0, 65535, server.tls_port, 0, INTEGER_CONFIG, NULL, updateTlsCfgInt), /* TCP port. */ createIntConfig("tls-session-cache-size", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_size, 20*1024, INTEGER_CONFIG, NULL, updateTlsCfgInt), createIntConfig("tls-session-cache-timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_timeout, 300, INTEGER_CONFIG, NULL, updateTlsCfgInt), - createBoolConfig("tls-cluster", NULL, MODIFIABLE_CONFIG, server.tls_cluster, 0, NULL, NULL), - createBoolConfig("tls-replication", NULL, MODIFIABLE_CONFIG, server.tls_replication, 0, NULL, NULL), + createBoolConfig("tls-cluster", NULL, MODIFIABLE_CONFIG, server.tls_cluster, 0, NULL, updateTlsCfgBool), + createBoolConfig("tls-replication", NULL, MODIFIABLE_CONFIG, server.tls_replication, 0, NULL, updateTlsCfgBool), createBoolConfig("tls-auth-clients", NULL, MODIFIABLE_CONFIG, server.tls_auth_clients, 1, NULL, NULL), createBoolConfig("tls-prefer-server-ciphers", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.prefer_server_ciphers, 0, NULL, updateTlsCfgBool), createBoolConfig("tls-session-caching", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.session_caching, 1, NULL, updateTlsCfgBool), diff --git a/src/server.c b/src/server.c index db32227bc..d624cb434 100644 --- a/src/server.c +++ b/src/server.c @@ -2774,7 +2774,8 @@ void initServer(void) { server.events_processed_while_blocked = 0; server.system_memory_size = zmalloc_get_memory_size(); - if (server.tls_port && tlsConfigure(&server.tls_ctx_config) == C_ERR) { + if ((server.tls_port || server.tls_replication || server.tls_cluster) + && tlsConfigure(&server.tls_ctx_config) == C_ERR) { serverLog(LL_WARNING, "Failed to configure TLS. Check logs for more info."); exit(1); } From d2ef4c0347a009f66cba0202998e5df9f64b154f Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 27 Jul 2020 15:30:36 +0300 Subject: [PATCH 074/377] Daily github action: run cluster and sentinel tests with tls (#7575) (cherry picked from commit 62e84b42d25a716411b108290f2caaff4b837488) --- .github/workflows/daily.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 5b5f3f7d4..acc811d3a 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -84,9 +84,9 @@ jobs: - name: module api test run: ./runtest-moduleapi --verbose --tls - name: sentinel tests - run: ./runtest-sentinel + run: ./runtest-sentinel --tls - name: cluster tests - run: ./runtest-cluster + run: ./runtest-cluster --tls test-valgrind: runs-on: ubuntu-latest From 7b2af983168e06ffb17b4d797259bd478adca491 Mon Sep 17 00:00:00 2001 From: Jiayuan Chen Date: Tue, 28 Jul 2020 15:45:21 +0800 Subject: [PATCH 075/377] Add optional tls verification (#7502) Adds an `optional` value to the previously boolean `tls-auth-clients` configuration keyword. Co-authored-by: Yossi Gottlieb (cherry picked from commit 198770751fdc4c46eb4971ead9b5787fd6ce39fd) --- redis.conf | 5 ++++- src/cluster.c | 3 ++- src/config.c | 8 +++++++- src/server.h | 5 +++++ src/tls.c | 12 ++++++++++-- tests/unit/tls.tcl | 12 ++++++++++++ 6 files changed, 40 insertions(+), 5 deletions(-) diff --git a/redis.conf b/redis.conf index 8c53f015a..d4e3e47f0 100644 --- a/redis.conf +++ b/redis.conf @@ -159,9 +159,12 @@ tcp-keepalive 300 # By default, clients (including replica servers) on a TLS port are required # to authenticate using valid client side certificates. # -# It is possible to disable authentication using this directive. +# If "no" is specified, client certificates are not required and not accepted. +# If "optional" is specified, client certificates are accepted and must be +# valid if provided, but are not required. # # tls-auth-clients no +# tls-auth-clients optional # By default, a Redis replica does not attempt to establish a TLS connection # with its master. diff --git a/src/cluster.c b/src/cluster.c index 5dcd69ff8..485683cf0 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -670,7 +670,8 @@ void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) { return; } - connection *conn = server.tls_cluster ? connCreateAcceptedTLS(cfd,1) : connCreateAcceptedSocket(cfd); + connection *conn = server.tls_cluster ? + connCreateAcceptedTLS(cfd, TLS_CLIENT_AUTH_YES) : connCreateAcceptedSocket(cfd); connNonBlock(conn); connEnableTcpNoDelay(conn); diff --git a/src/config.c b/src/config.c index 9b6dc459c..10faa3bea 100644 --- a/src/config.c +++ b/src/config.c @@ -98,6 +98,12 @@ configEnum repl_diskless_load_enum[] = { {NULL, 0} }; +configEnum tls_auth_clients_enum[] = { + {"no", TLS_CLIENT_AUTH_NO}, + {"yes", TLS_CLIENT_AUTH_YES}, + {"optional", TLS_CLIENT_AUTH_OPTIONAL}, + {NULL, 0} +}; /* Output buffer limits presets. */ clientBufferLimitsConfig clientBufferLimitsDefaults[CLIENT_TYPE_OBUF_COUNT] = { {0, 0, 0}, /* normal */ @@ -2226,7 +2232,7 @@ standardConfig configs[] = { createIntConfig("tls-session-cache-timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_timeout, 300, INTEGER_CONFIG, NULL, updateTlsCfgInt), createBoolConfig("tls-cluster", NULL, MODIFIABLE_CONFIG, server.tls_cluster, 0, NULL, updateTlsCfgBool), createBoolConfig("tls-replication", NULL, MODIFIABLE_CONFIG, server.tls_replication, 0, NULL, updateTlsCfgBool), - createBoolConfig("tls-auth-clients", NULL, MODIFIABLE_CONFIG, server.tls_auth_clients, 1, NULL, NULL), + createEnumConfig("tls-auth-clients", NULL, MODIFIABLE_CONFIG, tls_auth_clients_enum, server.tls_auth_clients, TLS_CLIENT_AUTH_YES, NULL, NULL), createBoolConfig("tls-prefer-server-ciphers", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.prefer_server_ciphers, 0, NULL, updateTlsCfgBool), createBoolConfig("tls-session-caching", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.session_caching, 1, NULL, updateTlsCfgBool), createStringConfig("tls-cert-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.cert_file, NULL, NULL, updateTlsCfg), diff --git a/src/server.h b/src/server.h index 57047f55b..1862e879e 100644 --- a/src/server.h +++ b/src/server.h @@ -358,6 +358,11 @@ typedef long long ustime_t; /* microsecond time type. */ #define REPL_DISKLESS_LOAD_WHEN_DB_EMPTY 1 #define REPL_DISKLESS_LOAD_SWAPDB 2 +/* TLS Client Authentication */ +#define TLS_CLIENT_AUTH_NO 0 +#define TLS_CLIENT_AUTH_YES 1 +#define TLS_CLIENT_AUTH_OPTIONAL 2 + /* Sets operations codes */ #define SET_OP_UNION 0 #define SET_OP_DIFF 1 diff --git a/src/tls.c b/src/tls.c index 8b2bb58e1..07d1775f8 100644 --- a/src/tls.c +++ b/src/tls.c @@ -342,8 +342,16 @@ connection *connCreateAcceptedTLS(int fd, int require_auth) { conn->c.fd = fd; conn->c.state = CONN_STATE_ACCEPTING; - if (!require_auth) { - SSL_set_verify(conn->ssl, SSL_VERIFY_NONE, NULL); + switch (require_auth) { + case TLS_CLIENT_AUTH_NO: + SSL_set_verify(conn->ssl, SSL_VERIFY_NONE, NULL); + break; + case TLS_CLIENT_AUTH_OPTIONAL: + SSL_set_verify(conn->ssl, SSL_VERIFY_PEER, NULL); + break; + default: /* TLS_CLIENT_AUTH_YES, also fall-secure */ + SSL_set_verify(conn->ssl, SSL_VERIFY_PEER|SSL_VERIFY_FAIL_IF_NO_PEER_CERT, NULL); + break; } SSL_set_fd(conn->ssl, conn->c.fd); diff --git a/tests/unit/tls.tcl b/tests/unit/tls.tcl index 2b04590cd..bb5b6d034 100644 --- a/tests/unit/tls.tcl +++ b/tests/unit/tls.tcl @@ -21,7 +21,19 @@ start_server {tags {"tls"}} { catch {$s PING} e assert_match {PONG} $e + r CONFIG SET tls-auth-clients optional + + set s [redis [srv 0 host] [srv 0 port]] + ::tls::import [$s channel] + catch {$s PING} e + assert_match {PONG} $e + r CONFIG SET tls-auth-clients yes + + set s [redis [srv 0 host] [srv 0 port]] + ::tls::import [$s channel] + catch {$s PING} e + assert_match {*error*} $e } test {TLS: Verify tls-protocols behaves as expected} { From 10a8407a4fa16e89d7c42c1e4848712743036ddd Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 28 Jul 2020 11:15:29 +0300 Subject: [PATCH 076/377] Fix failing tests due to issues with wait_for_log_message (#7572) - the test now waits for specific set of log messages rather than wait for timeout looking for just one message. - we don't wanna sample the current length of the log after an action, due to a race, we need to start the search from the line number of the last message we where waiting for. - when attempting to trigger a full sync, use multi-exec to avoid a race where the replica manages to re-connect before we completed the set of actions that should force a full sync. - fix verify_log_message which was broken and unused (cherry picked from commit 06aaeabaea9d9b248e8a790dde352cd14d66628a) --- tests/integration/replication.tcl | 30 +++++++++++++++--------------- tests/support/util.tcl | 20 ++++++++++++-------- tests/unit/moduleapi/testrdb.tcl | 22 +++++++++++----------- 3 files changed, 38 insertions(+), 34 deletions(-) diff --git a/tests/integration/replication.tcl b/tests/integration/replication.tcl index d47ec4fe4..1052fbdd5 100644 --- a/tests/integration/replication.tcl +++ b/tests/integration/replication.tcl @@ -440,30 +440,30 @@ test {diskless loading short read} { for {set i 0} {$i < $attempts} {incr i} { # wait for the replica to start reading the rdb # using the log file since the replica only responds to INFO once in 2mb - wait_for_log_message -1 "*Loading DB in memory*" $loglines 2000 1 + set res [wait_for_log_messages -1 {"*Loading DB in memory*"} $loglines 2000 1] + set loglines [lindex $res 1] # add some additional random sleep so that we kill the master on a different place each time - after [expr {int(rand()*100)}] + after [expr {int(rand()*50)}] # kill the replica connection on the master set killed [$master client kill type replica] - if {[catch { - set res [wait_for_log_message -1 "*Internal error in RDB*" $loglines 100 10] - if {$::verbose} { - puts $res - } - }]} { - puts "failed triggering short read" + set res [wait_for_log_messages -1 {"*Internal error in RDB*" "*Finished with success*" "*Successful partial resynchronization*"} $loglines 1000 1] + if {$::verbose} { puts $res } + set log_text [lindex $res 0] + set loglines [lindex $res 1] + if {![string match "*Internal error in RDB*" $log_text]} { # force the replica to try another full sync + $master multi $master client kill type replica $master set asdf asdf # the side effect of resizing the backlog is that it is flushed (16k is the min size) $master config set repl-backlog-size [expr {16384 + $i}] + $master exec } # wait for loading to stop (fail) - set loglines [count_log_lines -1] - wait_for_condition 100 10 { + wait_for_condition 1000 1 { [s -1 loading] eq 0 } else { fail "Replica didn't disconnect" @@ -545,7 +545,7 @@ start_server {tags {"repl"}} { # wait for the replicas to start reading the rdb # using the log file since the replica only responds to INFO once in 2mb - wait_for_log_message -1 "*Loading DB in memory*" $loglines 800 10 + wait_for_log_messages -1 {"*Loading DB in memory*"} $loglines 800 10 if {$measure_time} { set master_statfile "/proc/$master_pid/stat" @@ -580,13 +580,13 @@ start_server {tags {"repl"}} { # make sure we got what we were aiming for, by looking for the message in the log file if {$all_drop == "all"} { - wait_for_log_message -2 "*Diskless rdb transfer, last replica dropped, killing fork child*" $loglines 1 1 + wait_for_log_messages -2 {"*Diskless rdb transfer, last replica dropped, killing fork child*"} $loglines 1 1 } if {$all_drop == "no"} { - wait_for_log_message -2 "*Diskless rdb transfer, done reading from pipe, 2 replicas still up*" $loglines 1 1 + wait_for_log_messages -2 {"*Diskless rdb transfer, done reading from pipe, 2 replicas still up*"} $loglines 1 1 } if {$all_drop == "slow" || $all_drop == "fast"} { - wait_for_log_message -2 "*Diskless rdb transfer, done reading from pipe, 1 replicas still up*" $loglines 1 1 + wait_for_log_messages -2 {"*Diskless rdb transfer, done reading from pipe, 1 replicas still up*"} $loglines 1 1 } # make sure we don't have a busy loop going thought epoll_wait diff --git a/tests/support/util.tcl b/tests/support/util.tcl index 69ea675dc..8340ad207 100644 --- a/tests/support/util.tcl +++ b/tests/support/util.tcl @@ -106,31 +106,35 @@ proc count_log_lines {srv_idx} { # verify pattern exists in server's sdtout after a certain line number proc verify_log_message {srv_idx pattern from_line} { - set lines_after [count_log_lines] - set lines [expr $lines_after - $from_line] - set result [exec tail -$lines < [srv $srv_idx stdout]] + incr from_line + set result [exec tail -n +$from_line < [srv $srv_idx stdout]] if {![string match $pattern $result]} { error "assertion:expected message not found in log file: $pattern" } } # wait for pattern to be found in server's stdout after certain line number -proc wait_for_log_message {srv_idx pattern from_line maxtries delay} { +# return value is a list containing the line that matched the pattern and the line number +proc wait_for_log_messages {srv_idx patterns from_line maxtries delay} { set retry $maxtries + set next_line [expr $from_line + 1] ;# searching form the line after set stdout [srv $srv_idx stdout] while {$retry} { - set result [exec tail -n +$from_line < $stdout] + set result [exec tail -n +$next_line < $stdout] set result [split $result "\n"] foreach line $result { - if {[string match $pattern $line]} { - return $line + foreach pattern $patterns { + if {[string match $pattern $line]} { + return [list $line $next_line] + } } + incr next_line } incr retry -1 after $delay } if {$retry == 0} { - fail "log message of '$pattern' not found in $stdout after line: $from_line" + fail "log message of '$patterns' not found in $stdout after line: $from_line till line: [expr $next_line -1]" } } diff --git a/tests/unit/moduleapi/testrdb.tcl b/tests/unit/moduleapi/testrdb.tcl index 98641ae0a..02c82c7c3 100644 --- a/tests/unit/moduleapi/testrdb.tcl +++ b/tests/unit/moduleapi/testrdb.tcl @@ -77,30 +77,30 @@ tags "modules" { for {set i 0} {$i < $attempts} {incr i} { # wait for the replica to start reading the rdb # using the log file since the replica only responds to INFO once in 2mb - wait_for_log_message -1 "*Loading DB in memory*" $loglines 2000 1 + set res [wait_for_log_messages -1 {"*Loading DB in memory*"} $loglines 2000 1] + set loglines [lindex $res 1] # add some additional random sleep so that we kill the master on a different place each time - after [expr {int(rand()*100)}] + after [expr {int(rand()*50)}] # kill the replica connection on the master set killed [$master client kill type replica] - if {[catch { - set res [wait_for_log_message -1 "*Internal error in RDB*" $loglines 100 10] - if {$::verbose} { - puts $res - } - }]} { - puts "failed triggering short read" + set res [wait_for_log_messages -1 {"*Internal error in RDB*" "*Finished with success*" "*Successful partial resynchronization*"} $loglines 1000 1] + if {$::verbose} { puts $res } + set log_text [lindex $res 0] + set loglines [lindex $res 1] + if {![string match "*Internal error in RDB*" $log_text]} { # force the replica to try another full sync + $master multi $master client kill type replica $master set asdf asdf # the side effect of resizing the backlog is that it is flushed (16k is the min size) $master config set repl-backlog-size [expr {16384 + $i}] + $master exec } # wait for loading to stop (fail) - set loglines [count_log_lines -1] - wait_for_condition 100 10 { + wait_for_condition 1000 1 { [s -1 loading] eq 0 } else { fail "Replica didn't disconnect" From ebdfa715842e8712f1eed0ea893f9c7614ff4cbb Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 28 Jul 2020 11:32:47 +0300 Subject: [PATCH 077/377] TLS: Propagate and handle SSL_new() failures. (#7576) The connection API may create an accepted connection object in an error state, and callers are expected to check it before attempting to use it. Co-authored-by: mrpre (cherry picked from commit bc450c5f63d39d0f0b8c97fa91d15bb8d688b86d) --- src/cluster.c | 9 +++++++++ src/connection.c | 6 +++++- src/networking.c | 11 ++++++++++- src/tls.c | 28 ++++++++++++++++++++++++---- 4 files changed, 48 insertions(+), 6 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index 485683cf0..350aa7b6a 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -672,6 +672,15 @@ void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) { connection *conn = server.tls_cluster ? connCreateAcceptedTLS(cfd, TLS_CLIENT_AUTH_YES) : connCreateAcceptedSocket(cfd); + + /* Make sure connection is not in an error state */ + if (connGetState(conn) != CONN_STATE_ACCEPTING) { + serverLog(LL_VERBOSE, + "Error creating an accepting connection for cluster node: %s", + connGetLastError(conn)); + connClose(conn); + return; + } connNonBlock(conn); connEnableTcpNoDelay(conn); diff --git a/src/connection.c b/src/connection.c index 2015c9195..09fa12f2a 100644 --- a/src/connection.c +++ b/src/connection.c @@ -85,8 +85,12 @@ connection *connCreateSocket() { /* Create a new socket-type connection that is already associated with * an accepted connection. * - * The socket is not read for I/O until connAccept() was called and + * The socket is not ready for I/O until connAccept() was called and * invoked the connection-level accept handler. + * + * Callers should use connGetState() and verify the created connection + * is not in an error state (which is not possible for a socket connection, + * but could but possible with other protocols). */ connection *connCreateAcceptedSocket(int fd) { connection *conn = connCreateSocket(); diff --git a/src/networking.c b/src/networking.c index e3b62f151..a3c04efa6 100644 --- a/src/networking.c +++ b/src/networking.c @@ -895,8 +895,18 @@ void clientAcceptHandler(connection *conn) { #define MAX_ACCEPTS_PER_CALL 1000 static void acceptCommonHandler(connection *conn, int flags, char *ip) { client *c; + char conninfo[100]; UNUSED(ip); + if (connGetState(conn) != CONN_STATE_ACCEPTING) { + serverLog(LL_VERBOSE, + "Accepted client connection in error state: %s (conn: %s)", + connGetLastError(conn), + connGetInfo(conn, conninfo, sizeof(conninfo))); + connClose(conn); + return; + } + /* Limit the number of connections we take at the same time. * * Admission control will happen before a client is created and connAccept() @@ -925,7 +935,6 @@ static void acceptCommonHandler(connection *conn, int flags, char *ip) { /* Create connection and client */ if ((c = createClient(conn)) == NULL) { - char conninfo[100]; serverLog(LL_WARNING, "Error registering fd event for the new client: %s (conn: %s)", connGetLastError(conn), diff --git a/src/tls.c b/src/tls.c index 07d1775f8..4f0ea4d65 100644 --- a/src/tls.c +++ b/src/tls.c @@ -337,11 +337,34 @@ connection *connCreateTLS(void) { return (connection *) conn; } +/* Fetch the latest OpenSSL error and store it in the connection */ +static void updateTLSError(tls_connection *conn) { + conn->c.last_errno = 0; + if (conn->ssl_error) zfree(conn->ssl_error); + conn->ssl_error = zmalloc(512); + ERR_error_string_n(ERR_get_error(), conn->ssl_error, 512); +} + +/* Create a new TLS connection that is already associated with + * an accepted underlying file descriptor. + * + * The socket is not ready for I/O until connAccept() was called and + * invoked the connection-level accept handler. + * + * Callers should use connGetState() and verify the created connection + * is not in an error state. + */ connection *connCreateAcceptedTLS(int fd, int require_auth) { tls_connection *conn = (tls_connection *) connCreateTLS(); conn->c.fd = fd; conn->c.state = CONN_STATE_ACCEPTING; + if (!conn->ssl) { + updateTLSError(conn); + conn->c.state = CONN_STATE_ERROR; + return (connection *) conn; + } + switch (require_auth) { case TLS_CLIENT_AUTH_NO: SSL_set_verify(conn->ssl, SSL_VERIFY_NONE, NULL); @@ -384,10 +407,7 @@ static int handleSSLReturnCode(tls_connection *conn, int ret_value, WantIOType * break; default: /* Error! */ - conn->c.last_errno = 0; - if (conn->ssl_error) zfree(conn->ssl_error); - conn->ssl_error = zmalloc(512); - ERR_error_string_n(ERR_get_error(), conn->ssl_error, 512); + updateTLSError(conn); break; } From 2e6563cdcf205778316231f4287c8f8a20d09ac9 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 28 Jul 2020 14:04:06 +0300 Subject: [PATCH 078/377] Fix TLS cluster tests. (#7578) Fix consistency test added in 0c9916d00 without considering TLS redis-cli configuration. (cherry picked from commit 675b00c7e0b7d68bafa11fcc7f66a394c3c3cd36) --- tests/cluster/tests/14-consistency-check.tcl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/cluster/tests/14-consistency-check.tcl b/tests/cluster/tests/14-consistency-check.tcl index a43725ebc..d3b3052f3 100644 --- a/tests/cluster/tests/14-consistency-check.tcl +++ b/tests/cluster/tests/14-consistency-check.tcl @@ -64,7 +64,10 @@ proc test_slave_load_expired_keys {aof} { kill_instance redis $replica_id set master_port [get_instance_attrib redis $master_id port] - exec ../../../src/redis-cli -h 127.0.0.1 -p $master_port debug sleep [expr $data_ttl+3] > /dev/null & + exec ../../../src/redis-cli \ + -h 127.0.0.1 -p $master_port \ + {*}[rediscli_tls_config "../../../tests"] \ + debug sleep [expr $data_ttl+3] > /dev/null & while {[clock seconds] <= $end_time} { #wait for $data_ttl seconds From 9f873c6c2a43ef0c8c4b7dcc043b206a0ca7c734 Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Tue, 28 Jul 2020 15:05:48 -0400 Subject: [PATCH 079/377] fix leak in error handling of debug populate command (#7062) valsize was not modified during the for loop below instead of getting from c->argv[4], therefore there is no need to put inside the for loop.. Moreover, putting the check outside loop will also avoid memory leaking, decrRefCount(key) should be called in the original code if we put the check in for loop (cherry picked from commit 2afa308306fc641204f10a2bbe2fe35e28b6d259) --- src/debug.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/debug.c b/src/debug.c index 60cc2a1fa..0bea69876 100644 --- a/src/debug.c +++ b/src/debug.c @@ -591,14 +591,13 @@ NULL if (getLongFromObjectOrReply(c, c->argv[2], &keys, NULL) != C_OK) return; dictExpand(c->db->dict,keys); + long valsize = 0; + if ( c->argc == 5 && getLongFromObjectOrReply(c, c->argv[4], &valsize, NULL) != C_OK ) + return; for (j = 0; j < keys; j++) { - long valsize = 0; snprintf(buf,sizeof(buf),"%s:%lu", (c->argc == 3) ? "key" : (char*)c->argv[3]->ptr, j); key = createStringObject(buf,strlen(buf)); - if (c->argc == 5) - if (getLongFromObjectOrReply(c, c->argv[4], &valsize, NULL) != C_OK) - return; if (lookupKeyWrite(c->db,key) != NULL) { decrRefCount(key); continue; From 46686af7f5be3a55922bf6b3ced353c17b2571ed Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Wed, 29 Jul 2020 01:22:54 -0400 Subject: [PATCH 080/377] Add SignalModifiedKey hook in XGROUP CREATE with MKSTREAM option (#7562) (cherry picked from commit 0a2b019b79831e6c593c2de943af61ceef4327e1) --- src/t_stream.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/t_stream.c b/src/t_stream.c index f564b1ff9..a54671938 100644 --- a/src/t_stream.c +++ b/src/t_stream.c @@ -1841,6 +1841,7 @@ NULL o = createStreamObject(); dbAdd(c->db,c->argv[2],o); s = o->ptr; + signalModifiedKey(c,c->db,c->argv[2]); } streamCG *cg = streamCreateCG(s,grpname,sdslen(grpname),&id); From 3d98418f4afe7af410bdf99201a3c943c367cff5 Mon Sep 17 00:00:00 2001 From: namtsui <384455+namtsui@users.noreply.github.com> Date: Tue, 28 Jul 2020 22:25:56 -0700 Subject: [PATCH 081/377] Avoid an out-of-bounds read in the redis-sentinel (#7443) The Redis sentinel would crash with a segfault after a few minutes because it tried to read from a page without read permissions. Check up front whether the sds is long enough to contain redis:slave or redis:master before memcmp() as is done everywhere else in sentinelRefreshInstanceInfo(). Bug report and commit message from Theo Buehler. Fix from Nam Nguyen. Co-authored-by: Nam Nguyen (cherry picked from commit 8c03eb90da3951ed92d8d2729fc6d2ce4feb9e45) --- src/sentinel.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/sentinel.c b/src/sentinel.c index 5be4193dc..5bd594955 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -2218,8 +2218,8 @@ void sentinelRefreshInstanceInfo(sentinelRedisInstance *ri, const char *info) { } /* role: */ - if (!memcmp(l,"role:master",11)) role = SRI_MASTER; - else if (!memcmp(l,"role:slave",10)) role = SRI_SLAVE; + if (sdslen(l) >= 11 && !memcmp(l,"role:master",11)) role = SRI_MASTER; + else if (sdslen(l) >= 10 && !memcmp(l,"role:slave",10)) role = SRI_SLAVE; if (role == SRI_SLAVE) { /* master_host: */ From 2428b64d5a665a937b21f086e673b6b54d403fe3 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Wed, 29 Jul 2020 17:03:38 +0300 Subject: [PATCH 082/377] Clarify RM_BlockClient() error condition. (#6093) (cherry picked from commit 342d9a642f2d21fe906bf1be24d841fa6f314601) --- src/module.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/module.c b/src/module.c index 9316e004d..b381b4f99 100644 --- a/src/module.c +++ b/src/module.c @@ -4347,6 +4347,7 @@ void unblockClientFromModule(client *c) { * Even when blocking on keys, RM_UnblockClient() can be called however, but * in that case the privdata argument is disregarded, because we pass the * reply callback the privdata that is set here while blocking. + * */ RedisModuleBlockedClient *moduleBlockClient(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, RedisModuleCmdFunc timeout_callback, void (*free_privdata)(RedisModuleCtx*,void*), long long timeout_ms, RedisModuleString **keys, int numkeys, void *privdata) { client *c = ctx->client; @@ -4439,6 +4440,14 @@ int moduleTryServeClientBlockedOnKey(client *c, robj *key) { * Note: RedisModule_UnblockClient should be called for every blocked client, * even if client was killed, timed-out or disconnected. Failing to do so * will result in memory leaks. + * + * There are some cases where RedisModule_BlockClient() cannot be used: + * + * 1. If the client is a Lua script. + * 2. If the client is executing a MULTI block. + * + * In these cases, a call to RedisModule_BlockClient() will **not** block the + * client, but instead produce a specific error reply. */ RedisModuleBlockedClient *RM_BlockClient(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, RedisModuleCmdFunc timeout_callback, void (*free_privdata)(RedisModuleCtx*,void*), long long timeout_ms) { return moduleBlockClient(ctx,reply_callback,timeout_callback,free_privdata,timeout_ms, NULL,0,NULL); From 37fba8f4d805607706f34eb5e51602397fd64d1d Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Thu, 30 Jul 2020 13:56:21 +0800 Subject: [PATCH 083/377] Fix running single test 14-consistency-check.tcl (#7587) (cherry picked from commit be11e1b5eaf0d6ab5e68f86c1346570531eee766) --- tests/cluster/tests/14-consistency-check.tcl | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/cluster/tests/14-consistency-check.tcl b/tests/cluster/tests/14-consistency-check.tcl index d3b3052f3..5a80dd0df 100644 --- a/tests/cluster/tests/14-consistency-check.tcl +++ b/tests/cluster/tests/14-consistency-check.tcl @@ -1,4 +1,5 @@ source "../tests/includes/init-tests.tcl" +source "../../../tests/support/cli.tcl" test "Create a 5 nodes cluster" { create_cluster 5 5 From 0dbf112caf45b102b6ebe5a6e3d851574565441e Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Thu, 30 Jul 2020 13:25:10 +0300 Subject: [PATCH 084/377] CI: Add daily CentOS 7.x jobs. (#7582) (cherry picked from commit 92e089b1ab47897e170aab6e95341ae789ec77d5) --- .github/workflows/daily.yml | 54 ++++++++++++++++++++++++++++++++++--- 1 file changed, 50 insertions(+), 4 deletions(-) diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index acc811d3a..458f07c97 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -10,7 +10,7 @@ on: jobs: - test-jemalloc: + test-ubuntu-jemalloc: runs-on: ubuntu-latest timeout-minutes: 14400 steps: @@ -28,7 +28,7 @@ jobs: - name: cluster tests run: ./runtest-cluster - test-libc-malloc: + test-ubuntu-libc-malloc: runs-on: ubuntu-latest timeout-minutes: 14400 steps: @@ -46,7 +46,7 @@ jobs: - name: cluster tests run: ./runtest-cluster - test-32bit: + test-ubuntu-32bit: runs-on: ubuntu-latest timeout-minutes: 14400 steps: @@ -68,7 +68,7 @@ jobs: - name: cluster tests run: ./runtest-cluster - test-tls: + test-ubuntu-tls: runs-on: ubuntu-latest timeout-minutes: 14400 steps: @@ -101,3 +101,49 @@ jobs: ./runtest --valgrind --verbose --clients 1 - name: module api test run: ./runtest-moduleapi --valgrind --verbose --clients 1 + + test-centos7-jemalloc: + runs-on: ubuntu-latest + container: centos:7 + timeout-minutes: 14400 + steps: + - uses: actions/checkout@v2 + - name: make + run: | + yum -y install centos-release-scl + yum -y install devtoolset-7 + scl enable devtoolset-7 make + - name: test + run: | + yum -y install tcl + ./runtest --accurate --verbose + - name: module api test + run: ./runtest-moduleapi --verbose + - name: sentinel tests + run: ./runtest-sentinel + - name: cluster tests + run: ./runtest-cluster + + test-centos7-tls: + runs-on: ubuntu-latest + container: centos:7 + timeout-minutes: 14400 + steps: + - uses: actions/checkout@v2 + - name: make + run: | + yum -y install centos-release-scl epel-release + yum -y install devtoolset-7 openssl-devel openssl + scl enable devtoolset-7 make BUILD_TLS=yes + - name: test + run: | + yum -y install tcl tcltls + ./utils/gen-test-certs.sh + ./runtest --accurate --verbose --tls + - name: module api test + run: ./runtest-moduleapi --verbose --tls + - name: sentinel tests + run: ./runtest-sentinel --tls + - name: cluster tests + run: ./runtest-cluster --tls + From 3a4ee4b6d61963a23304f0dd28935988cea9e33a Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Fri, 31 Jul 2020 13:14:29 +0300 Subject: [PATCH 085/377] module hook for master link up missing on successful psync (#7584) besides, hooks test was time sensitive. when the replica managed to reconnect quickly after the client kill, the test would fail (cherry picked from commit c5d85c69c75438f98f84e549877c2999a2e450a8) --- src/replication.c | 5 +++++ tests/unit/moduleapi/hooks.tcl | 19 +++++++++++++++++-- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/src/replication.c b/src/replication.c index 197acde79..a8f46be95 100644 --- a/src/replication.c +++ b/src/replication.c @@ -2824,6 +2824,11 @@ void replicationResurrectCachedMaster(connection *conn) { server.repl_state = REPL_STATE_CONNECTED; server.repl_down_since = 0; + /* Fire the master link modules event. */ + moduleFireServerEvent(REDISMODULE_EVENT_MASTER_LINK_CHANGE, + REDISMODULE_SUBEVENT_MASTER_LINK_UP, + NULL); + /* Re-add to the list of clients. */ linkClient(server.master); if (connSetReadHandler(server.master->conn, readQueryFromClient)) { diff --git a/tests/unit/moduleapi/hooks.tcl b/tests/unit/moduleapi/hooks.tcl index bf2b9010b..da0307ce6 100644 --- a/tests/unit/moduleapi/hooks.tcl +++ b/tests/unit/moduleapi/hooks.tcl @@ -114,6 +114,21 @@ tags "modules" { test {Test master link down hook} { r client kill type master assert_equal [r hooks.event_count masterlink-down] 1 + + wait_for_condition 50 100 { + [string match {*master_link_status:up*} [r info replication]] + } else { + fail "Replica didn't reconnect" + } + + assert_equal [r hooks.event_count masterlink-down] 1 + assert_equal [r hooks.event_count masterlink-up] 2 + } + + wait_for_condition 50 10 { + [string match {*master_link_status:up*} [r info replication]] + } else { + fail "Can't turn the instance into a replica" } $replica replicaof no one @@ -125,8 +140,8 @@ tags "modules" { } test {Test replica-offline hook} { - assert_equal [r -1 hooks.event_count replica-online] 1 - assert_equal [r -1 hooks.event_count replica-offline] 1 + assert_equal [r -1 hooks.event_count replica-online] 2 + assert_equal [r -1 hooks.event_count replica-offline] 2 } # get the replica stdout, to be used by the next test set replica_stdout [srv 0 stdout] From dd94ca7295b6c055e0c7b3bb664a681b2f42ee68 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Fri, 31 Jul 2020 13:55:57 +0300 Subject: [PATCH 086/377] Fix test-centos7-tls daily job. (#7598) (cherry picked from commit e97cec2f28ca37b2f4d1a56fa4ff0b69c8c8fe62) --- .github/workflows/daily.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 458f07c97..986bd223b 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -112,7 +112,7 @@ jobs: run: | yum -y install centos-release-scl yum -y install devtoolset-7 - scl enable devtoolset-7 make + scl enable devtoolset-7 "make" - name: test run: | yum -y install tcl @@ -134,7 +134,7 @@ jobs: run: | yum -y install centos-release-scl epel-release yum -y install devtoolset-7 openssl-devel openssl - scl enable devtoolset-7 make BUILD_TLS=yes + scl enable devtoolset-7 "make BUILD_TLS=yes" - name: test run: | yum -y install tcl tcltls From b4a6b4f28ded50396fc1aba809ac3eedcde7d525 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 4 Aug 2020 08:53:50 +0300 Subject: [PATCH 087/377] fix new rdb test failing on timing issues (#7604) apparenlty on github actions sometimes 500ms is not enough (cherry picked from commit 191b1181023b0860ec60afde7a41bd4f03c55097) --- tests/integration/rdb.tcl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/rdb.tcl b/tests/integration/rdb.tcl index 6535cd089..9cd970350 100644 --- a/tests/integration/rdb.tcl +++ b/tests/integration/rdb.tcl @@ -139,10 +139,10 @@ start_server {} { test {bgsave resets the change counter} { r config set rdb-key-save-delay 0 r bgsave - wait_for_condition 5 100 { + wait_for_condition 50 100 { [s rdb_bgsave_in_progress] == 0 } else { - fail "bgsave not aborted" + fail "bgsave not done" } assert_equal [s rdb_changes_since_last_save] 0 } From 05e54ec9628eeb8aeb4d09b29b6ef38589af8806 Mon Sep 17 00:00:00 2001 From: Frank Meier <40453138+meierfra-ergon@users.noreply.github.com> Date: Wed, 5 Aug 2020 11:06:33 +0200 Subject: [PATCH 088/377] add force option to 'create-cluster create' script call (#7612) (cherry picked from commit c8f3182f378e069f8be0bbc333ae9c5e8682e804) --- utils/create-cluster/create-cluster | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/utils/create-cluster/create-cluster b/utils/create-cluster/create-cluster index 931f6f521..d9a3b8c35 100755 --- a/utils/create-cluster/create-cluster +++ b/utils/create-cluster/create-cluster @@ -38,7 +38,11 @@ then PORT=$((PORT+1)) HOSTS="$HOSTS $CLUSTER_HOST:$PORT" done - $BIN_PATH/redis-cli --cluster create $HOSTS --cluster-replicas $REPLICAS + OPT_ARG="" + if [ "$2" == "-f" ]; then + OPT_ARG="--cluster-yes" + fi + $BIN_PATH/redis-cli --cluster create $HOSTS --cluster-replicas $REPLICAS $OPT_ARG exit 0 fi @@ -104,7 +108,7 @@ fi echo "Usage: $0 [start|create|stop|watch|tail|clean|call]" echo "start -- Launch Redis Cluster instances." -echo "create -- Create a cluster using redis-cli --cluster create." +echo "create [-f] -- Create a cluster using redis-cli --cluster create." echo "stop -- Stop Redis Cluster instances." echo "watch -- Show CLUSTER NODES output (first 30 lines) of first node." echo "tail -- Run tail -f of instance at base port + ID." From d5f7ec90bc2c4e24eda1f9e502dfb621414581dd Mon Sep 17 00:00:00 2001 From: Frank Meier Date: Thu, 28 Feb 2019 10:59:06 +0100 Subject: [PATCH 089/377] reintroduce REDISCLI_CLUSTER_YES env variable in redis-cli the variable was introduced only in the 5.0 branch in #5879 bc6c1c40db (cherry picked from commit c6ac2588db27de9a71effa6dc21417f7c787ac69) --- src/redis-cli.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/redis-cli.c b/src/redis-cli.c index 0148964bf..ec7153d15 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -73,6 +73,7 @@ #define REDIS_CLI_RCFILE_ENV "REDISCLI_RCFILE" #define REDIS_CLI_RCFILE_DEFAULT ".redisclirc" #define REDIS_CLI_AUTH_ENV "REDISCLI_AUTH" +#define REDIS_CLI_CLUSTER_YES_ENV "REDISCLI_CLUSTER_YES" #define CLUSTER_MANAGER_SLOTS 16384 #define CLUSTER_MANAGER_MIGRATE_TIMEOUT 60000 @@ -1668,6 +1669,11 @@ static void parseEnv() { if (auth != NULL && config.auth == NULL) { config.auth = auth; } + + char *cluster_yes = getenv(REDIS_CLI_CLUSTER_YES_ENV); + if (cluster_yes != NULL && !strcmp(cluster_yes, "1")) { + config.cluster_manager_command.flags |= CLUSTER_MANAGER_CMD_FLAG_YES; + } } static sds readArgFromStdin(void) { From 6f6658ab605a7ea0d06fab3e3a11007c4b24d2ab Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Wed, 5 Aug 2020 12:18:44 +0300 Subject: [PATCH 090/377] redis-cli --cluster-yes - negate force flag for clarity this internal flag is there so that some commands do not comply to `--cluster-yes` (cherry picked from commit f519dcb21626e5fd214960c7d83ee4fab7a3929d) --- src/redis-cli.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index ec7153d15..de5d08149 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -1803,10 +1803,10 @@ static void usage(void) { exit(1); } -static int confirmWithYes(char *msg, int force) { - /* if force is true and --cluster-yes option is on, +static int confirmWithYes(char *msg, int ignore_force) { + /* if --cluster-yes option is set and ignore_force is false, * do not prompt for an answer */ - if (force && + if (!ignore_force && (config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_YES)) { return 1; } @@ -4500,7 +4500,7 @@ static int clusterManagerFixSlotsCoverage(char *all_slots) { dictReleaseIterator(iter); /* we want explicit manual confirmation from users for all the fix cases */ - int force = 0; + int ignore_force = 1; /* Handle case "1": keys in no node. */ if (listLength(none) > 0) { @@ -4508,7 +4508,7 @@ static int clusterManagerFixSlotsCoverage(char *all_slots) { "across the cluster:\n"); clusterManagerPrintSlotsList(none); if (confirmWithYes("Fix these slots by covering with a random node?", - force)) { + ignore_force)) { listIter li; listNode *ln; listRewind(none, &li); @@ -4535,7 +4535,7 @@ static int clusterManagerFixSlotsCoverage(char *all_slots) { printf("The following uncovered slots have keys in just one node:\n"); clusterManagerPrintSlotsList(single); if (confirmWithYes("Fix these slots by covering with those nodes?", - force)) { + ignore_force)) { listIter li; listNode *ln; listRewind(single, &li); @@ -4567,7 +4567,7 @@ static int clusterManagerFixSlotsCoverage(char *all_slots) { printf("The following uncovered slots have keys in multiple nodes:\n"); clusterManagerPrintSlotsList(multi); if (confirmWithYes("Fix these slots by moving keys " - "into a single node?", force)) { + "into a single node?", ignore_force)) { listIter li; listNode *ln; listRewind(multi, &li); @@ -5530,8 +5530,8 @@ assign_replicas: } clusterManagerOptimizeAntiAffinity(ip_nodes, ip_count); clusterManagerShowNodes(); - int force = 1; - if (confirmWithYes("Can I set the above configuration?", force)) { + int ignore_force = 0; + if (confirmWithYes("Can I set the above configuration?", ignore_force)) { listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *node = ln->value; From 9b7da52c3b21a223c839fd726c641bb8eebe5ffb Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Sat, 8 Aug 2020 07:42:32 -0400 Subject: [PATCH 091/377] fix memory leak in ACLLoadFromFile error handling (#7623) (cherry picked from commit 30ead1edaee1cea93ee5cdeb975880df12b41f5c) --- src/acl.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/acl.c b/src/acl.c index 3ce45f03b..e0432ba5c 100644 --- a/src/acl.c +++ b/src/acl.c @@ -1327,6 +1327,7 @@ sds ACLLoadFromFile(const char *filename) { errors = sdscatprintf(errors, "'%s:%d: username '%s' contains invalid characters. ", server.acl_filename, linenum, argv[1]); + sdsfreesplitres(argv,argc); continue; } From 753ff31fa3818687fa0215f2d0613805c8811155 Mon Sep 17 00:00:00 2001 From: Wang Yuan Date: Sun, 9 Aug 2020 03:03:56 +0800 Subject: [PATCH 092/377] Print error info if failed opening config file (#6943) (cherry picked from commit 514a1e223df5b62cd1fc6aff38126d4a74ebef7e) --- src/config.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/config.c b/src/config.c index 10faa3bea..cfe420552 100644 --- a/src/config.c +++ b/src/config.c @@ -556,7 +556,8 @@ void loadServerConfig(char *filename, char *options) { } else { if ((fp = fopen(filename,"r")) == NULL) { serverLog(LL_WARNING, - "Fatal error, can't open config file '%s'", filename); + "Fatal error, can't open config file '%s': %s", + filename, strerror(errno)); exit(1); } } From 5240ad73efc522b715490b3211c8c637b3d5c636 Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Tue, 11 Aug 2020 10:18:21 +0800 Subject: [PATCH 093/377] see #7250, fix signature of RedisModule_DeauthenticateAndCloseClient (#7645) In redismodule.h, RedisModule_DeauthenticateAndCloseClient returns void `void REDISMODULE_API_FUNC(RedisModule_DeauthenticateAndCloseClient)(RedisModuleCtx *ctx, uint64_t client_id);` But in module.c, RM_DeauthenticateAndCloseClient returns int `int RM_DeauthenticateAndCloseClient(RedisModuleCtx *ctx, uint64_t client_id)` It it safe to change return value from `void` to `int` from the user's perspective. (cherry picked from commit dd08aec539f8a8b35ccb8b8c953a76c07c739d62) --- src/redismodule.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redismodule.h b/src/redismodule.h index 23b4d26e0..d67b01f68 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -679,7 +679,7 @@ void REDISMODULE_API_FUNC(RedisModule_FreeModuleUser)(RedisModuleUser *user); int REDISMODULE_API_FUNC(RedisModule_SetModuleUserACL)(RedisModuleUser *user, const char* acl); int REDISMODULE_API_FUNC(RedisModule_AuthenticateClientWithACLUser)(RedisModuleCtx *ctx, const char *name, size_t len, RedisModuleUserChangedFunc callback, void *privdata, uint64_t *client_id); int REDISMODULE_API_FUNC(RedisModule_AuthenticateClientWithUser)(RedisModuleCtx *ctx, RedisModuleUser *user, RedisModuleUserChangedFunc callback, void *privdata, uint64_t *client_id); -void REDISMODULE_API_FUNC(RedisModule_DeauthenticateAndCloseClient)(RedisModuleCtx *ctx, uint64_t client_id); +int REDISMODULE_API_FUNC(RedisModule_DeauthenticateAndCloseClient)(RedisModuleCtx *ctx, uint64_t client_id); #endif #define RedisModule_IsAOFClient(id) ((id) == CLIENT_ID_AOF) From 9c7c0c511a4333d2e2b5e0c49d32309f79db3aff Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 11 Aug 2020 10:59:47 +0300 Subject: [PATCH 094/377] Run daily workflow on main repo only (no forks). (#7646) (cherry picked from commit b8f0c2de4aa76a5ba86a6970b7ab550a35ff12c6) --- .github/workflows/daily.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 986bd223b..010a32289 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -12,6 +12,7 @@ jobs: test-ubuntu-jemalloc: runs-on: ubuntu-latest + if: github.repository == 'redis/redis' timeout-minutes: 14400 steps: - uses: actions/checkout@v2 @@ -30,6 +31,7 @@ jobs: test-ubuntu-libc-malloc: runs-on: ubuntu-latest + if: github.repository == 'redis/redis' timeout-minutes: 14400 steps: - uses: actions/checkout@v2 @@ -48,6 +50,7 @@ jobs: test-ubuntu-32bit: runs-on: ubuntu-latest + if: github.repository == 'redis/redis' timeout-minutes: 14400 steps: - uses: actions/checkout@v2 @@ -70,6 +73,7 @@ jobs: test-ubuntu-tls: runs-on: ubuntu-latest + if: github.repository == 'redis/redis' timeout-minutes: 14400 steps: - uses: actions/checkout@v2 @@ -90,6 +94,7 @@ jobs: test-valgrind: runs-on: ubuntu-latest + if: github.repository == 'redis/redis' timeout-minutes: 14400 steps: - uses: actions/checkout@v2 @@ -104,6 +109,7 @@ jobs: test-centos7-jemalloc: runs-on: ubuntu-latest + if: github.repository == 'redis/redis' container: centos:7 timeout-minutes: 14400 steps: @@ -126,6 +132,7 @@ jobs: test-centos7-tls: runs-on: ubuntu-latest + if: github.repository == 'redis/redis' container: centos:7 timeout-minutes: 14400 steps: From 5e0a3e26268a912e8acfa095ae1dcf82496c2884 Mon Sep 17 00:00:00 2001 From: YoongHM Date: Tue, 11 Aug 2020 17:30:32 +0800 Subject: [PATCH 095/377] Start redis after network is online (#7639) The two lines allow systemd to start redis.service after the network is online. Only after the network is online that Redis could bind to IP address other than 127.0.0.1 during initial boot up process. (cherry picked from commit 1c6caaaef7d69e05b526626f284bb822fc82dd9f) --- utils/systemd-redis_server.service | 2 ++ 1 file changed, 2 insertions(+) diff --git a/utils/systemd-redis_server.service b/utils/systemd-redis_server.service index addee3498..cf158644a 100644 --- a/utils/systemd-redis_server.service +++ b/utils/systemd-redis_server.service @@ -20,6 +20,8 @@ Description=Redis data structure server Documentation=https://redis.io/documentation #Before=your_application.service another_example_application.service #AssertPathExists=/var/lib/redis +Wants=network-online.target +After=network-online.target [Service] ExecStart=/usr/local/bin/redis-server --supervised systemd --daemonize no From 0d78615532b6d3c12dd2e474ad6ee30cded47773 Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" Date: Tue, 30 Oct 2018 00:38:20 +0800 Subject: [PATCH 096/377] using proto-max-bulk-len in checkStringLength for SETRANGE and APPEND (cherry picked from commit 912b48e4b9d9a0738ab3010c4f30e47a3beec3fc) --- src/t_string.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/t_string.c b/src/t_string.c index 259f43142..4be758e65 100644 --- a/src/t_string.c +++ b/src/t_string.c @@ -35,8 +35,8 @@ *----------------------------------------------------------------------------*/ static int checkStringLength(client *c, long long size) { - if (size > 512*1024*1024) { - addReplyError(c,"string exceeds maximum allowed size (512MB)"); + if (!(c->flags & CLIENT_MASTER) && size > server.proto_max_bulk_len) { + addReplyError(c,"string exceeds maximum allowed size (proto-max-bulk-len)"); return C_ERR; } return C_OK; From f0f4ef442356d79498505f8d17feffa3f4f43904 Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" Date: Tue, 30 Oct 2018 00:47:49 +0800 Subject: [PATCH 097/377] config: proto-max-bulk-len must be 1mb or greater (cherry picked from commit 46323cec56793c66273005b72f1f29146c108840) --- redis.conf | 2 +- src/config.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/redis.conf b/redis.conf index d4e3e47f0..565c24eca 100644 --- a/redis.conf +++ b/redis.conf @@ -1641,7 +1641,7 @@ client-output-buffer-limit pubsub 32mb 8mb 60 # In the Redis protocol, bulk requests, that are, elements representing single # strings, are normally limited ot 512 mb. However you can change this limit -# here. +# here, but must be 1mb or greater # # proto-max-bulk-len 512mb diff --git a/src/config.c b/src/config.c index cfe420552..476956e18 100644 --- a/src/config.c +++ b/src/config.c @@ -2205,7 +2205,7 @@ standardConfig configs[] = { createLongLongConfig("cluster-node-timeout", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.cluster_node_timeout, 15000, INTEGER_CONFIG, NULL, NULL), createLongLongConfig("slowlog-log-slower-than", NULL, MODIFIABLE_CONFIG, -1, LLONG_MAX, server.slowlog_log_slower_than, 10000, INTEGER_CONFIG, NULL, NULL), createLongLongConfig("latency-monitor-threshold", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.latency_monitor_threshold, 0, INTEGER_CONFIG, NULL, NULL), - createLongLongConfig("proto-max-bulk-len", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.proto_max_bulk_len, 512ll*1024*1024, MEMORY_CONFIG, NULL, NULL), /* Bulk request max size */ + createLongLongConfig("proto-max-bulk-len", NULL, MODIFIABLE_CONFIG, 1024*1024, LLONG_MAX, server.proto_max_bulk_len, 512ll*1024*1024, MEMORY_CONFIG, NULL, NULL), /* Bulk request max size */ createLongLongConfig("stream-node-max-entries", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.stream_node_max_entries, 100, INTEGER_CONFIG, NULL, NULL), createLongLongConfig("repl-backlog-size", NULL, MODIFIABLE_CONFIG, 1, LLONG_MAX, server.repl_backlog_size, 1024*1024, MEMORY_CONFIG, NULL, updateReplBacklogSize), /* Default: 1mb */ From f573b23187fadb0d179607c09f4f5eeba3aa5559 Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" Date: Tue, 30 Oct 2018 00:50:36 +0800 Subject: [PATCH 098/377] CLIENT_MASTER should ignore server.proto_max_bulk_len (cherry picked from commit 0061dbba04918b3f62f943f469f41590d4200919) --- src/networking.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/networking.c b/src/networking.c index a3c04efa6..aa42c43e9 100644 --- a/src/networking.c +++ b/src/networking.c @@ -1691,7 +1691,8 @@ int processMultibulkBuffer(client *c) { } ok = string2ll(c->querybuf+c->qb_pos+1,newline-(c->querybuf+c->qb_pos+1),&ll); - if (!ok || ll < 0 || ll > server.proto_max_bulk_len) { + if (!ok || ll < 0 || + (!(c->flags & CLIENT_MASTER) && ll > server.proto_max_bulk_len)) { addReplyError(c,"Protocol error: invalid bulk length"); setProtocolError("invalid bulk length",c); return C_ERR; From a42e7f6876cf3767eb97dc4bdf39cf1b1222bdee Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" <276441700@qq.com> Date: Tue, 11 Aug 2020 20:51:27 +0800 Subject: [PATCH 099/377] redis-benchmark: fix wrong random key for hset (#4895) (cherry picked from commit b1a242b84a7ff4aadf8c47ea83a658db9cf0e98b) --- src/redis-benchmark.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c index f47cbe333..6385fa9b3 100644 --- a/src/redis-benchmark.c +++ b/src/redis-benchmark.c @@ -1722,7 +1722,7 @@ int main(int argc, const char **argv) { if (test_is_selected("hset")) { len = redisFormatCommand(&cmd, - "HSET myhash:{tag}:__rand_int__ element:__rand_int__ %s",data); + "HSET myhash:{tag} element:__rand_int__ %s",data); benchmark("HSET",cmd,len); free(cmd); } From 4772370a873d708d3ce54f31dde4790dd1bb8434 Mon Sep 17 00:00:00 2001 From: Wagner Francisco Mezaroba Date: Tue, 11 Aug 2020 19:57:21 +0100 Subject: [PATCH 100/377] allow --pattern to be used along with --bigkeys (#3586) Adds --pattern option to cli's --bigkeys, --hotkeys & --scan modes (cherry picked from commit 3ec7f8e915fb15634ea5babf217b8a8ecb3e3647) --- src/redis-cli.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index de5d08149..0b44ec252 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -1764,7 +1764,8 @@ static void usage(void) { " --hotkeys Sample Redis keys looking for hot keys.\n" " only works when maxmemory-policy is *lfu.\n" " --scan List all keys using the SCAN command.\n" -" --pattern Useful with --scan to specify a SCAN pattern.\n" +" --pattern Keys pattern when using the --scan, --bigkeys or --hotkeys\n" +" options (default: *).\n" " --intrinsic-latency Run a test to measure intrinsic system latency.\n" " The test will run for the specified amount of seconds.\n" " --eval Send an EVAL command using the Lua script at .\n" @@ -7193,7 +7194,13 @@ static void pipeMode(void) { *--------------------------------------------------------------------------- */ static redisReply *sendScan(unsigned long long *it) { - redisReply *reply = redisCommand(context, "SCAN %llu", *it); + redisReply *reply; + + if (config.pattern) + reply = redisCommand(context,"SCAN %llu MATCH %s", + *it,config.pattern); + else + reply = redisCommand(context,"SCAN %llu",*it); /* Handle any error conditions */ if(reply == NULL) { From e0f6c0de337337d64e7071f4a0b4dd453dcd0d4b Mon Sep 17 00:00:00 2001 From: Madelyn Olson <34459052+madolson@users.noreply.github.com> Date: Tue, 11 Aug 2020 20:16:41 -0700 Subject: [PATCH 101/377] Fixed timer warning (#5953) (cherry picked from commit 46ce35c55e3c6d5c3b2f43194de7694c27ad1d5d) --- src/modules/hellotimer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/modules/hellotimer.c b/src/modules/hellotimer.c index 57b111b7c..f6700df26 100644 --- a/src/modules/hellotimer.c +++ b/src/modules/hellotimer.c @@ -40,7 +40,7 @@ /* Timer callback. */ void timerHandler(RedisModuleCtx *ctx, void *data) { REDISMODULE_NOT_USED(ctx); - printf("Fired %s!\n", data); + printf("Fired %s!\n", (char *)data); RedisModule_Free(data); } From 3ed7d0af2034321be330649db9218e0b6ac831bc Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Mon, 17 Aug 2020 17:36:50 +0300 Subject: [PATCH 102/377] TLS: relax verification on CONFIG SET. (#7665) Avoid re-configuring (and validating) SSL/TLS configuration on `CONFIG SET` when TLS is not actively enabled for incoming connections, cluster bus or replication. This fixes failures when tests run without `--tls` on binaries that were built with TLS support. An additional benefit is that it's now possible to perform a multi-step configuration process while TLS is disabled. The new configuration will be verified and applied only when TLS is effectively enabled. (cherry picked from commit 24efd22e894c90f380aa05a5fa77134bb9423ad3) --- .github/workflows/daily.yml | 26 ++++++++++++++++++++------ src/config.c | 5 ++++- 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 010a32289..5b395351b 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -85,12 +85,19 @@ jobs: sudo apt-get install tcl8.5 tcl-tls ./utils/gen-test-certs.sh ./runtest --accurate --verbose --tls + ./runtest --accurate --verbose - name: module api test - run: ./runtest-moduleapi --verbose --tls + run: | + ./runtest-moduleapi --verbose --tls + ./runtest-moduleapi --verbose - name: sentinel tests - run: ./runtest-sentinel --tls + run: | + ./runtest-sentinel --tls + ./runtest-sentinel - name: cluster tests - run: ./runtest-cluster --tls + run: | + ./runtest-cluster --tls + ./runtest-cluster test-valgrind: runs-on: ubuntu-latest @@ -147,10 +154,17 @@ jobs: yum -y install tcl tcltls ./utils/gen-test-certs.sh ./runtest --accurate --verbose --tls + ./runtest --accurate --verbose - name: module api test - run: ./runtest-moduleapi --verbose --tls + run: | + ./runtest-moduleapi --verbose --tls + ./runtest-moduleapi --verbose - name: sentinel tests - run: ./runtest-sentinel --tls + run: | + ./runtest-sentinel --tls + ./runtest-sentinel - name: cluster tests - run: ./runtest-cluster --tls + run: | + ./runtest-cluster --tls + ./runtest-cluster diff --git a/src/config.c b/src/config.c index 476956e18..b3c437d49 100644 --- a/src/config.c +++ b/src/config.c @@ -2077,7 +2077,10 @@ static int updateTlsCfg(char *val, char *prev, char **err) { UNUSED(val); UNUSED(prev); UNUSED(err); - if (tlsConfigure(&server.tls_ctx_config) == C_ERR) { + + /* If TLS is enabled, try to configure OpenSSL. */ + if ((server.tls_port || server.tls_replication || server.tls_cluster) + && tlsConfigure(&server.tls_ctx_config) == C_ERR) { *err = "Unable to update TLS configuration. Check server logs."; return 0; } From d13c44583c20e7e1a61756ccdfa6fd793f96dbe4 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Mon, 17 Aug 2020 17:46:54 +0300 Subject: [PATCH 103/377] Module API: fix missing RM_CLIENTINFO_FLAG_SSL. (#7666) The `REDISMODULE_CLIENTINFO_FLAG_SSL` flag was already a part of the `RedisModuleClientInfo` structure but was not implemented. (cherry picked from commit 2ec11f941ae41188e517670fc3224b12c7666541) --- src/connection.c | 8 +++++++- src/connection.h | 9 +++++++++ src/module.c | 2 ++ src/tls.c | 7 +++++++ tests/modules/misc.c | 38 +++++++++++++++++++++++++++++++++++ tests/unit/moduleapi/misc.tcl | 19 ++++++++++++++++++ 6 files changed, 82 insertions(+), 1 deletion(-) diff --git a/src/connection.c b/src/connection.c index 09fa12f2a..23b44a314 100644 --- a/src/connection.c +++ b/src/connection.c @@ -329,6 +329,11 @@ static ssize_t connSocketSyncReadLine(connection *conn, char *ptr, ssize_t size, return syncReadLine(conn->fd, ptr, size, timeout); } +static int connSocketGetType(connection *conn) { + (void) conn; + + return CONN_TYPE_SOCKET; +} ConnectionType CT_Socket = { .ae_handler = connSocketEventHandler, @@ -343,7 +348,8 @@ ConnectionType CT_Socket = { .blocking_connect = connSocketBlockingConnect, .sync_write = connSocketSyncWrite, .sync_read = connSocketSyncRead, - .sync_readline = connSocketSyncReadLine + .sync_readline = connSocketSyncReadLine, + .get_type = connSocketGetType }; diff --git a/src/connection.h b/src/connection.h index 0fd6c5f24..85585a3d0 100644 --- a/src/connection.h +++ b/src/connection.h @@ -48,6 +48,9 @@ typedef enum { #define CONN_FLAG_CLOSE_SCHEDULED (1<<0) /* Closed scheduled by a handler */ #define CONN_FLAG_WRITE_BARRIER (1<<1) /* Write barrier requested */ +#define CONN_TYPE_SOCKET 1 +#define CONN_TYPE_TLS 2 + typedef void (*ConnectionCallbackFunc)(struct connection *conn); typedef struct ConnectionType { @@ -64,6 +67,7 @@ typedef struct ConnectionType { ssize_t (*sync_write)(struct connection *conn, char *ptr, ssize_t size, long long timeout); ssize_t (*sync_read)(struct connection *conn, char *ptr, ssize_t size, long long timeout); ssize_t (*sync_readline)(struct connection *conn, char *ptr, ssize_t size, long long timeout); + int (*get_type)(struct connection *conn); } ConnectionType; struct connection { @@ -194,6 +198,11 @@ static inline ssize_t connSyncReadLine(connection *conn, char *ptr, ssize_t size return conn->type->sync_readline(conn, ptr, size, timeout); } +/* Return CONN_TYPE_* for the specified connection */ +static inline int connGetType(connection *conn) { + return conn->type->get_type(conn); +} + connection *connCreateSocket(); connection *connCreateAcceptedSocket(int fd); diff --git a/src/module.c b/src/module.c index b381b4f99..81824cd1e 100644 --- a/src/module.c +++ b/src/module.c @@ -1694,6 +1694,8 @@ int modulePopulateClientInfoStructure(void *ci, client *client, int structver) { ci1->flags |= REDISMODULE_CLIENTINFO_FLAG_TRACKING; if (client->flags & CLIENT_BLOCKED) ci1->flags |= REDISMODULE_CLIENTINFO_FLAG_BLOCKED; + if (connGetType(client->conn) == CONN_TYPE_TLS) + ci1->flags |= REDISMODULE_CLIENTINFO_FLAG_SSL; int port; connPeerToString(client->conn,ci1->addr,sizeof(ci1->addr),&port); diff --git a/src/tls.c b/src/tls.c index 4f0ea4d65..52887cd23 100644 --- a/src/tls.c +++ b/src/tls.c @@ -823,6 +823,12 @@ exit: return nread; } +static int connTLSGetType(connection *conn_) { + (void) conn_; + + return CONN_TYPE_TLS; +} + ConnectionType CT_TLS = { .ae_handler = tlsEventHandler, .accept = connTLSAccept, @@ -837,6 +843,7 @@ ConnectionType CT_TLS = { .sync_write = connTLSSyncWrite, .sync_read = connTLSSyncRead, .sync_readline = connTLSSyncReadLine, + .get_type = connTLSGetType }; int tlsHasPendingData() { diff --git a/tests/modules/misc.c b/tests/modules/misc.c index 1048d5065..1f9cb1932 100644 --- a/tests/modules/misc.c +++ b/tests/modules/misc.c @@ -195,6 +195,42 @@ int test_setlfu(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) return REDISMODULE_OK; } +int test_clientinfo(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) +{ + (void) argv; + (void) argc; + + RedisModuleClientInfo ci = { .version = REDISMODULE_CLIENTINFO_VERSION }; + + if (RedisModule_GetClientInfoById(&ci, RedisModule_GetClientId(ctx)) == REDISMODULE_ERR) { + RedisModule_ReplyWithError(ctx, "failed to get client info"); + return REDISMODULE_OK; + } + + RedisModule_ReplyWithArray(ctx, 10); + char flags[512]; + snprintf(flags, sizeof(flags) - 1, "%s:%s:%s:%s:%s:%s", + ci.flags & REDISMODULE_CLIENTINFO_FLAG_SSL ? "ssl" : "", + ci.flags & REDISMODULE_CLIENTINFO_FLAG_PUBSUB ? "pubsub" : "", + ci.flags & REDISMODULE_CLIENTINFO_FLAG_BLOCKED ? "blocked" : "", + ci.flags & REDISMODULE_CLIENTINFO_FLAG_TRACKING ? "tracking" : "", + ci.flags & REDISMODULE_CLIENTINFO_FLAG_UNIXSOCKET ? "unixsocket" : "", + ci.flags & REDISMODULE_CLIENTINFO_FLAG_MULTI ? "multi" : ""); + + RedisModule_ReplyWithCString(ctx, "flags"); + RedisModule_ReplyWithCString(ctx, flags); + RedisModule_ReplyWithCString(ctx, "id"); + RedisModule_ReplyWithLongLong(ctx, ci.id); + RedisModule_ReplyWithCString(ctx, "addr"); + RedisModule_ReplyWithCString(ctx, ci.addr); + RedisModule_ReplyWithCString(ctx, "port"); + RedisModule_ReplyWithLongLong(ctx, ci.port); + RedisModule_ReplyWithCString(ctx, "db"); + RedisModule_ReplyWithLongLong(ctx, ci.db); + + return REDISMODULE_OK; +} + int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { REDISMODULE_NOT_USED(argv); REDISMODULE_NOT_USED(argc); @@ -221,6 +257,8 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) return REDISMODULE_ERR; if (RedisModule_CreateCommand(ctx,"test.getlfu", test_getlfu,"",0,0,0) == REDISMODULE_ERR) return REDISMODULE_ERR; + if (RedisModule_CreateCommand(ctx,"test.clientinfo", test_clientinfo,"",0,0,0) == REDISMODULE_ERR) + return REDISMODULE_ERR; return REDISMODULE_OK; } diff --git a/tests/unit/moduleapi/misc.tcl b/tests/unit/moduleapi/misc.tcl index 748016f1a..b57a94f6a 100644 --- a/tests/unit/moduleapi/misc.tcl +++ b/tests/unit/moduleapi/misc.tcl @@ -67,4 +67,23 @@ start_server {tags {"modules"}} { assert { $was_set == 0 } } + test {test module clientinfo api} { + # Test basic sanity and SSL flag + set info [r test.clientinfo] + set ssl_flag [expr $::tls ? {"ssl:"} : {":"}] + + assert { [dict get $info db] == 9 } + assert { [dict get $info flags] == "${ssl_flag}::::" } + + # Test MULTI flag + r multi + r test.clientinfo + set info [lindex [r exec] 0] + assert { [dict get $info flags] == "${ssl_flag}::::multi" } + + # Test TRACKING flag + r client tracking on + set info [r test.clientinfo] + assert { [dict get $info flags] == "${ssl_flag}::tracking::" } + } } From b091e0059dd5fdb250ec4201c6cc0ba7249342ba Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 18 Aug 2020 08:28:43 +0300 Subject: [PATCH 104/377] Trim trailing spaces in error replies coming from rejectCommand (#7668) fe8d6fe749 added rejectCommand which takes an robj reply and passes it through addReplyErrorSafe to addReplyErrorLength. The robj contains newline at it's end, but addReplyErrorSafe converts it to spaces, and passes it to addReplyErrorLength which adds the protocol newlines. The result was that most error replies (like OOM) had extra two trailing spaces in them. (cherry picked from commit 05a4af3464b16e42b31dfb1ea62e2a66dc032fb2) --- src/networking.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/networking.c b/src/networking.c index aa42c43e9..31c6664c7 100644 --- a/src/networking.c +++ b/src/networking.c @@ -412,10 +412,14 @@ void addReplyError(client *c, const char *err) { * is emitted. */ void addReplyErrorSafe(client *c, char *s, size_t len) { size_t j; + /* Trim any newlines at the end (ones will be added by addReplyErrorLength) */ + while (s[len-1] == '\r' || s[len-1] == '\n') + len--; + /* Replace any newlines in the rest of the string with spaces. */ for (j = 0; j < len; j++) { if (s[j] == '\r' || s[j] == '\n') s[j] = ' '; } - addReplyErrorLength(c,s,sdslen(s)); + addReplyErrorLength(c,s,len); } void addReplyErrorFormat(client *c, const char *fmt, ...) { From 3e11f9d967fe049352909bf2a44607fa9b220545 Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Tue, 18 Aug 2020 01:50:03 -0400 Subject: [PATCH 105/377] [module] using predefined REDISMODULE_NO_EXPIRE in RM_GetExpire (#7669) It was already defined in the API header and the documentation, but not used by the implementation. (cherry picked from commit b7236f0002bcaa15f3a487def9c5069b6c422e65) --- src/module.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/module.c b/src/module.c index 81824cd1e..2f14e1a50 100644 --- a/src/module.c +++ b/src/module.c @@ -2100,7 +2100,8 @@ int RM_UnlinkKey(RedisModuleKey *key) { * REDISMODULE_NO_EXPIRE is returned. */ mstime_t RM_GetExpire(RedisModuleKey *key) { mstime_t expire = getExpire(key->db,key->key); - if (expire == -1 || key->value == NULL) return -1; + if (expire == -1 || key->value == NULL) + return REDISMODULE_NO_EXPIRE; expire -= mstime(); return expire >= 0 ? expire : 0; } From 299c09207766c1ba53a8ba91d9fb2956bb2b8bbf Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 18 Aug 2020 09:53:59 +0300 Subject: [PATCH 106/377] OOM Crash log include size of allocation attempt. (#7670) Since users often post just the crash log in github issues, the log print that's above it is missing. No reason not to include the size in the panic message itself. (cherry picked from commit 1b5cc94836d24b7b36cb6618644f9e2d60113c59) --- src/server.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/server.c b/src/server.c index d624cb434..38b35c9e1 100644 --- a/src/server.c +++ b/src/server.c @@ -4911,7 +4911,8 @@ void loadDataFromDisk(void) { void redisOutOfMemoryHandler(size_t allocation_size) { serverLog(LL_WARNING,"Out Of Memory allocating %zu bytes!", allocation_size); - serverPanic("Redis aborting for OUT OF MEMORY"); + serverPanic("Redis aborting for OUT OF MEMORY. Allocating %zu bytes!", + allocation_size); } void redisSetProcTitle(char *title) { From d74a98eaac7c7ae9812a3c4dc3120c499a0d1244 Mon Sep 17 00:00:00 2001 From: guybe7 Date: Tue, 18 Aug 2020 18:07:59 +0200 Subject: [PATCH 107/377] PERSIST should signalModifiedKey (Like EXPIRE does) (#7671) (cherry picked from commit 64cceb12ad5982393f9b93557a2c7d3c77ffc946) --- src/expire.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/expire.c b/src/expire.c index f2d135e2b..1c4f71df3 100644 --- a/src/expire.c +++ b/src/expire.c @@ -594,6 +594,7 @@ void pttlCommand(client *c) { void persistCommand(client *c) { if (lookupKeyWrite(c->db,c->argv[1])) { if (removeExpire(c->db,c->argv[1])) { + signalModifiedKey(c,c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_GENERIC,"persist",c->argv[1],c->db->id); addReply(c,shared.cone); server.dirty++; From f80b75607680b9d90151219dc71251bebcd49dd8 Mon Sep 17 00:00:00 2001 From: Madelyn Olson <34459052+madolson@users.noreply.github.com> Date: Wed, 19 Aug 2020 19:07:43 -0700 Subject: [PATCH 108/377] Fixed hset error since it's shared with hmset (#7678) (cherry picked from commit 17c6b3c7eeb486a75d05ba7fb3c4490b7abbc597) --- src/t_hash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/t_hash.c b/src/t_hash.c index 866bcd25b..4a03cfb25 100644 --- a/src/t_hash.c +++ b/src/t_hash.c @@ -532,7 +532,7 @@ void hsetCommand(client *c) { robj *o; if ((c->argc % 2) == 1) { - addReplyError(c,"wrong number of arguments for HMSET"); + addReplyErrorFormat(c,"wrong number of arguments for '%s' command",c->cmd->name); return; } From 3564aab1a53912dd87e682e5ef7aa6f78dbc22a9 Mon Sep 17 00:00:00 2001 From: Raghav Muddur Date: Wed, 19 Aug 2020 19:13:32 -0700 Subject: [PATCH 109/377] Update clusterMsgDataPublish to clusterMsgModule (#7682) Correcting the variable to clusterMsgModule. (cherry picked from commit f7cf24b5da539dbf4fafc404355a32bac06aa2f5) --- src/cluster.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cluster.c b/src/cluster.c index 350aa7b6a..e54c249d9 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -1767,7 +1767,7 @@ int clusterProcessPacket(clusterLink *link) { } else if (type == CLUSTERMSG_TYPE_MODULE) { uint32_t explen = sizeof(clusterMsg)-sizeof(union clusterMsgData); - explen += sizeof(clusterMsgDataPublish) - + explen += sizeof(clusterMsgModule) - 3 + ntohl(hdr->data.module.msg.len); if (totlen != explen) return 1; } From b42976bd567a89a992c647d258494a11d872cf47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=A8=E5=8D=9A=E4=B8=9C?= Date: Thu, 20 Aug 2020 13:59:02 +0800 Subject: [PATCH 110/377] Fix flock cluster config may cause failure to restart after kill -9 (#7674) After fork, the child process(redis-aof-rewrite) will get the fd opened by the parent process(redis), when redis killed by kill -9, it will not graceful exit(call prepareForShutdown()), so redis-aof-rewrite thread may still alive, the fd(lock) will still be held by redis-aof-rewrite thread, and redis restart will fail to get lock, means fail to start. This issue was causing failures in the cluster tests in github actions. Co-authored-by: Oran Agra (cherry picked from commit 5e6212e087c4696abc682b64079202c9ade8666c) --- src/cluster.c | 11 ++++++++++- src/server.c | 12 +++++++++++- src/server.h | 1 + tests/instances.tcl | 14 +++++++++----- 4 files changed, 31 insertions(+), 7 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index e54c249d9..17d21df29 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -418,7 +418,15 @@ int clusterLockConfig(char *filename) { return C_ERR; } /* Lock acquired: leak the 'fd' by not closing it, so that we'll retain the - * lock to the file as long as the process exists. */ + * lock to the file as long as the process exists. + * + * After fork, the child process will get the fd opened by the parent process, + * we need save `fd` to `cluster_config_file_lock_fd`, so that in redisFork(), + * it will be closed in the child process. + * If it is not closed, when the main process is killed -9, but the child process + * (redis-aof-rewrite) is still alive, the fd(lock) will still be held by the + * child process, and the main process will fail to get lock, means fail to start. */ + server.cluster_config_file_lock_fd = fd; #endif /* __sun */ return C_OK; @@ -468,6 +476,7 @@ void clusterInit(void) { /* Lock the cluster config file to make sure every node uses * its own nodes.conf. */ + server.cluster_config_file_lock_fd = -1; if (clusterLockConfig(server.cluster_configfile) == C_ERR) exit(1); diff --git a/src/server.c b/src/server.c index 38b35c9e1..43b118759 100644 --- a/src/server.c +++ b/src/server.c @@ -4823,13 +4823,23 @@ void setupChildSignalHandlers(void) { return; } +/* After fork, the child process will inherit the resources + * of the parent process, e.g. fd(socket or flock) etc. + * should close the resources not used by the child process, so that if the + * parent restarts it can bind/lock despite the child possibly still running. */ +void closeClildUnusedResourceAfterFork() { + closeListeningSockets(0); + if (server.cluster_enabled && server.cluster_config_file_lock_fd != -1) + close(server.cluster_config_file_lock_fd); /* don't care if this fails */ +} + int redisFork() { int childpid; long long start = ustime(); if ((childpid = fork()) == 0) { /* Child */ - closeListeningSockets(0); setupChildSignalHandlers(); + closeClildUnusedResourceAfterFork(); } else { /* Parent */ server.stat_fork_time = ustime()-start; diff --git a/src/server.h b/src/server.h index 1862e879e..a6d4b58ca 100644 --- a/src/server.h +++ b/src/server.h @@ -1397,6 +1397,7 @@ struct redisServer { REDISMODULE_CLUSTER_FLAG_*. */ int cluster_allow_reads_when_down; /* Are reads allowed when the cluster is down? */ + int cluster_config_file_lock_fd; /* cluster config fd, will be flock */ /* Scripting */ lua_State *lua; /* The Lua interpreter. We use just one for all clients */ client *lua_client; /* The "fake client" to query Redis from Lua */ diff --git a/tests/instances.tcl b/tests/instances.tcl index 677af6427..691378b9b 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -98,7 +98,9 @@ proc spawn_instance {type base_port count {conf {}}} { # Check availability if {[server_is_up 127.0.0.1 $port 100] == 0} { - abort_sentinel_test "Problems starting $type #$j: ping timeout" + set logfile [file join $dirname log.txt] + puts [exec tail $logfile] + abort_sentinel_test "Problems starting $type #$j: ping timeout, maybe server start failed, check $logfile" } # Push the instance into the right list @@ -475,12 +477,12 @@ proc kill_instance {type id} { # Wait for the port it was using to be available again, so that's not # an issue to start a new server ASAP with the same port. - set retry 10 + set retry 100 while {[incr retry -1]} { - set port_is_free [catch {set s [socket 127.0.01 $port]}] + set port_is_free [catch {set s [socket 127.0.0.1 $port]}] if {$port_is_free} break catch {close $s} - after 1000 + after 100 } if {$retry == 0} { error "Port $port does not return available after killing instance." @@ -507,7 +509,9 @@ proc restart_instance {type id} { # Check that the instance is running if {[server_is_up 127.0.0.1 $port 100] == 0} { - abort_sentinel_test "Problems starting $type #$id: ping timeout" + set logfile [file join $dirname log.txt] + puts [exec tail $logfile] + abort_sentinel_test "Problems starting $type #$id: ping timeout, maybe server start failed, check $logfile" } # Connect with it with a fresh link From d0b8583167bc556c158b392961d7dc8cc311ea8c Mon Sep 17 00:00:00 2001 From: guybe7 Date: Thu, 20 Aug 2020 18:55:14 +0200 Subject: [PATCH 111/377] Modules: Invalidate saved_oparray after use (#7688) We wanna avoid a chance of someone using the pointer in it after it'll be freed / realloced. (cherry picked from commit 4de17eb032160c7ba94c505eab4b776a456e5117) --- src/module.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/module.c b/src/module.c index 2f14e1a50..2542a0e2a 100644 --- a/src/module.c +++ b/src/module.c @@ -609,6 +609,8 @@ void moduleHandlePropagationAfterCommandCallback(RedisModuleCtx *ctx) { redisOpArrayFree(&server.also_propagate); /* Restore the previous oparray in case of nexted use of the API. */ server.also_propagate = ctx->saved_oparray; + /* We're done with saved_oparray, let's invalidate it. */ + redisOpArrayInit(&ctx->saved_oparray); } } From 907b0124e82e7de4db762a189d4f73b102ea157a Mon Sep 17 00:00:00 2001 From: huangzhw Date: Fri, 21 Aug 2020 04:31:06 +0800 Subject: [PATCH 112/377] RedisModuleEvent_LoadingProgress always at 100% progress (#7685) It was also using the wrong struct, but luckily RedisModuleFlushInfo and RedisModuleLoadingProgress are identical. (cherry picked from commit b980e999293e9214a844712f9c88ca69acd20b1b) --- src/module.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/module.c b/src/module.c index 2542a0e2a..9755e282a 100644 --- a/src/module.c +++ b/src/module.c @@ -7200,8 +7200,8 @@ void processModuleLoadingProgressEvent(int is_aof) { /* Fire the loading progress modules end event. */ int progress = -1; if (server.loading_total_bytes) - progress = (server.loading_total_bytes<<10) / server.loading_total_bytes; - RedisModuleFlushInfoV1 fi = {REDISMODULE_LOADING_PROGRESS_VERSION, + progress = (server.loading_loaded_bytes<<10) / server.loading_total_bytes; + RedisModuleLoadingProgressV1 fi = {REDISMODULE_LOADING_PROGRESS_VERSION, server.hz, progress}; moduleFireServerEvent(REDISMODULE_EVENT_LOADING_PROGRESS, From c5675c66bc063ee46b3b5c24c82ebff00c432b96 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Sun, 23 Aug 2020 10:17:43 +0300 Subject: [PATCH 113/377] Tests: fix redis-cli with remote hosts. (#7693) (cherry picked from commit 257f9f462f7782dcaecf7bbf35f4701b20b88a45) --- tests/integration/redis-cli.tcl | 4 ++-- tests/support/cli.tcl | 4 ++-- tests/unit/wait.tcl | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/integration/redis-cli.tcl b/tests/integration/redis-cli.tcl index 44ff430e2..2d4145ff0 100644 --- a/tests/integration/redis-cli.tcl +++ b/tests/integration/redis-cli.tcl @@ -3,7 +3,7 @@ source tests/support/cli.tcl start_server {tags {"cli"}} { proc open_cli {{opts "-n 9"} {infile ""}} { set ::env(TERM) dumb - set cmdline [rediscli [srv port] $opts] + set cmdline [rediscli [srv host] [srv port] $opts] if {$infile ne ""} { set cmdline "$cmdline < $infile" set mode "r" @@ -65,7 +65,7 @@ start_server {tags {"cli"}} { } proc _run_cli {opts args} { - set cmd [rediscli [srv port] [list -n 9 {*}$args]] + set cmd [rediscli [srv host] [srv port] [list -n 9 {*}$args]] foreach {key value} $opts { if {$key eq "pipe"} { set cmd "sh -c \"$value | $cmd\"" diff --git a/tests/support/cli.tcl b/tests/support/cli.tcl index 37c902a50..d55487931 100644 --- a/tests/support/cli.tcl +++ b/tests/support/cli.tcl @@ -11,8 +11,8 @@ proc rediscli_tls_config {testsdir} { } } -proc rediscli {port {opts {}}} { - set cmd [list src/redis-cli -p $port] +proc rediscli {host port {opts {}}} { + set cmd [list src/redis-cli -h $host -p $port] lappend cmd {*}[rediscli_tls_config "tests"] lappend cmd {*}$opts return $cmd diff --git a/tests/unit/wait.tcl b/tests/unit/wait.tcl index c9cfa6ed4..b1500cff8 100644 --- a/tests/unit/wait.tcl +++ b/tests/unit/wait.tcl @@ -33,7 +33,7 @@ start_server {} { } test {WAIT should not acknowledge 1 additional copy if slave is blocked} { - set cmd [rediscli $slave_port "-h $slave_host debug sleep 5"] + set cmd [rediscli $slave_host $slave_port "debug sleep 5"] exec {*}$cmd > /dev/null 2> /dev/null & after 1000 ;# Give redis-cli the time to execute the command. $master set foo 0 From 34124fff88fd9e26945b8c6d565c140a23b1513c Mon Sep 17 00:00:00 2001 From: Valentino Geron Date: Thu, 20 Aug 2020 18:48:09 +0300 Subject: [PATCH 114/377] Fix LPOS command when RANK is greater than matches When calling to LPOS command when RANK is higher than matches, the return value is non valid response. For example: ``` LPUSH l a :1 LPOS l b RANK 5 COUNT 10 *-4 ``` It may break client-side parser. Now, we count how many replies were replied in the array. ``` LPUSH l a :1 LPOS l b RANK 5 COUNT 10 *0 ``` (cherry picked from commit 7a555da64f56a4fb2f300d84a35778bee8f471ca) --- src/t_list.c | 5 +++-- tests/unit/type/list.tcl | 6 ++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/t_list.c b/src/t_list.c index 2c339888d..a751dde26 100644 --- a/src/t_list.c +++ b/src/t_list.c @@ -572,13 +572,14 @@ void lposCommand(client *c) { li = listTypeInitIterator(o,direction == LIST_HEAD ? -1 : 0,direction); listTypeEntry entry; long llen = listTypeLength(o); - long index = 0, matches = 0, matchindex = -1; + long index = 0, matches = 0, matchindex = -1, arraylen = 0; while (listTypeNext(li,&entry) && (maxlen == 0 || index < maxlen)) { if (listTypeEqual(&entry,ele)) { matches++; matchindex = (direction == LIST_TAIL) ? index : llen - index - 1; if (matches >= rank) { if (arraylenptr) { + arraylen++; addReplyLongLong(c,matchindex); if (count && matches-rank+1 >= count) break; } else { @@ -594,7 +595,7 @@ void lposCommand(client *c) { /* Reply to the client. Note that arraylenptr is not NULL only if * the COUNT option was selected. */ if (arraylenptr != NULL) { - setDeferredArrayLen(c,arraylenptr,matches-rank+1); + setDeferredArrayLen(c,arraylenptr,arraylen); } else { if (matchindex != -1) addReplyLongLong(c,matchindex); diff --git a/tests/unit/type/list.tcl b/tests/unit/type/list.tcl index 0e39d7d95..5ea2b9cd1 100644 --- a/tests/unit/type/list.tcl +++ b/tests/unit/type/list.tcl @@ -50,6 +50,12 @@ start_server { assert {[r LPOS mylist c COUNT 0 MAXLEN 7 RANK 2] == {6}} } + test {LPOS when RANK is greater than matches} { + r DEL mylist + r LPUSH l a + assert {[r LPOS mylist b COUNT 10 RANK 5] eq {}} + } + test {LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - ziplist} { # first lpush then rpush assert_equal 1 [r lpush myziplist1 aa] From d727f527238f632d5aebb03dcde0440b4f65255b Mon Sep 17 00:00:00 2001 From: Valentino Geron Date: Sun, 23 Aug 2020 12:19:41 +0300 Subject: [PATCH 115/377] Assert that setDeferredAggregateLen isn't called with negative value In case the redis is about to return broken reply we want to crash with assert so that we are notified about the bug. see #7687. (cherry picked from commit 7e6c9ef8819a071679f8dd18035dbbe2455c7b12) --- src/networking.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/networking.c b/src/networking.c index 31c6664c7..e267565a6 100644 --- a/src/networking.c +++ b/src/networking.c @@ -492,6 +492,7 @@ void *addReplyDeferredLen(client *c) { /* Populate the length object and try gluing it to the next chunk. */ void setDeferredAggregateLen(client *c, void *node, long length, char prefix) { + serverAssert(length >= 0); listNode *ln = (listNode*)node; clientReplyBlock *next; char lenstr[128]; From fe6da3c36b2e51b285294fc631716accda2bd1e8 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Mon, 24 Aug 2020 12:54:56 +0300 Subject: [PATCH 116/377] Add language servers stuff, test/tls to gitignore. (#7698) (cherry picked from commit 74d9d9544969fed000bde5f8504b5b3f211e473f) --- .gitignore | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitignore b/.gitignore index e445fd201..721f1d74e 100644 --- a/.gitignore +++ b/.gitignore @@ -27,9 +27,13 @@ src/nodes.conf deps/lua/src/lua deps/lua/src/luac deps/lua/src/liblua.a +tests/tls/* .make-* .prerequisites *.dSYM Makefile.dep .vscode/* .idea/* +.ccls +.ccls-cache/* +compile_commands.json From c77e8e2f1c8e550ae27c1331c209c371fbd80e3d Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Tue, 25 Aug 2020 15:58:50 +0300 Subject: [PATCH 117/377] Expands lazyfree's effort estimate to include Streams (#5794) Otherwise, it is treated as a single allocation and freed synchronously. The following logic is used for estimating the effort in constant-ish time complexity: 1. Check the number of nodes. 1. Add an allocation for each consumer group registered inside the stream. 1. Check the number of PELs in the first CG, and then add this count times the number of CGs. 1. Check the number of consumers in the first CG, and then add this count times the number of CGs. (cherry picked from commit cb504d7fddd09149655e91496588c610e89ca131) --- src/lazyfree.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/lazyfree.c b/src/lazyfree.c index f01504e70..cbcc1c240 100644 --- a/src/lazyfree.c +++ b/src/lazyfree.c @@ -41,6 +41,30 @@ size_t lazyfreeGetFreeEffort(robj *obj) { } else if (obj->type == OBJ_HASH && obj->encoding == OBJ_ENCODING_HT) { dict *ht = obj->ptr; return dictSize(ht); + } else if (obj->type == OBJ_STREAM) { + size_t effort = 0; + stream *s = obj->ptr; + + /* Make a best effort estimate to maintain constant runtime. Every macro + * node in the Stream is one allocation. */ + effort += s->rax->numnodes; + + /* Every consumer group is an allocation and so are the entries in its + * PEL. We use size of the first group's PEL as an estimate for all + * others. */ + if (s->cgroups) { + raxIterator ri; + streamCG *cg; + raxStart(&ri,s->cgroups); + raxSeek(&ri,"^",NULL,0); + /* There must be at least one group so the following should always + * work. */ + serverAssert(raxNext(&ri)); + cg = ri.data; + effort += raxSize(s->cgroups)*(1+raxSize(cg->pel)); + raxStop(&ri); + } + return effort; } else { return 1; /* Everything else is a single allocation. */ } From 3c136a77774dcb875de3ad2e4cf726781bfd6112 Mon Sep 17 00:00:00 2001 From: valentinogeron Date: Thu, 27 Aug 2020 09:19:24 +0300 Subject: [PATCH 118/377] EXEC with only read commands should not be rejected when OOM (#7696) If the server gets MULTI command followed by only read commands, and right before it gets the EXEC it reaches OOM, the client will get OOM response. So, from now on, it will get OOM response only if there was at least one command that was tagged with `use-memory` flag (cherry picked from commit 0292720ccb0a189d3ed49d7bf912602360a4ecdd) --- src/server.c | 21 +++++++++++++-------- tests/unit/multi.tcl | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 8 deletions(-) diff --git a/src/server.c b/src/server.c index 43b118759..dc0236cbd 100644 --- a/src/server.c +++ b/src/server.c @@ -3549,14 +3549,19 @@ int processCommand(client *c) { * into a slave, that may be the active client, to be freed. */ if (server.current_client == NULL) return C_ERR; - /* It was impossible to free enough memory, and the command the client - * is trying to execute is denied during OOM conditions or the client - * is in MULTI/EXEC context? Error. */ - if (out_of_memory && - (is_denyoom_command || - (c->flags & CLIENT_MULTI && - c->cmd->proc != discardCommand))) - { + int reject_cmd_on_oom = is_denyoom_command; + /* If client is in MULTI/EXEC context, queuing may consume an unlimited + * amount of memory, so we want to stop that. + * However, we never want to reject DISCARD, or even EXEC (unless it + * contains denied commands, in which case is_denyoom_command is already + * set. */ + if (c->flags & CLIENT_MULTI && + c->cmd->proc != execCommand && + c->cmd->proc != discardCommand) { + reject_cmd_on_oom = 1; + } + + if (out_of_memory && reject_cmd_on_oom) { rejectCommand(c, shared.oomerr); return C_OK; } diff --git a/tests/unit/multi.tcl b/tests/unit/multi.tcl index 44a822ba6..817d509c5 100644 --- a/tests/unit/multi.tcl +++ b/tests/unit/multi.tcl @@ -466,4 +466,42 @@ start_server {tags {"multi"}} { assert { $xx == 1 } $r1 close; } + + test {EXEC with only read commands should not be rejected when OOM} { + set r2 [redis_client] + + r set x value + r multi + r get x + r ping + + # enforcing OOM + $r2 config set maxmemory 1 + + # finish the multi transaction with exec + assert { [r exec] == {value PONG} } + + # releasing OOM + $r2 config set maxmemory 0 + $r2 close + } + + test {EXEC with at least one use-memory command should fail} { + set r2 [redis_client] + + r multi + r set x 1 + r get x + + # enforcing OOM + $r2 config set maxmemory 1 + + # finish the multi transaction with exec + catch {r exec} e + assert_match {EXECABORT*OOM*} $e + + # releasing OOM + $r2 config set maxmemory 0 + $r2 close + } } From 888cbf6822ea293ffbfc11ff6c611b634d442206 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Thu, 27 Aug 2020 12:54:01 +0300 Subject: [PATCH 119/377] Fix rejectCommand trims newline in shared error objects, hung clients (#7714) fe8d6fe74 (released in 6.0.6) has a side effect, when processCommand rejects a command with pre-made shared object error string, it trims the newlines from the end of the string. if that string is later used with addReply, the newline will be missing, breaking the protocol, and leaving the client hung. It seems that the only scenario which this happens is when replying with -LOADING to some command, and later using that reply from the CONFIG SET command (still during loading). this will result in hung client. Refactoring the code in order to avoid trimming these newlines from shared string objects, and do the newline trimming only in other cases where it's needed. Co-authored-by: Guy Benoish (cherry picked from commit 2640897e3a01fbacb620c12e021c934e48eeccb9) --- src/multi.c | 3 ++- src/networking.c | 47 ++++++++++++++++++++++++++++++----------------- src/server.c | 12 ++++++++---- src/server.h | 3 ++- 4 files changed, 42 insertions(+), 23 deletions(-) diff --git a/src/multi.c b/src/multi.c index 35ddf92af..a99c308be 100644 --- a/src/multi.c +++ b/src/multi.c @@ -127,7 +127,8 @@ void execCommandPropagateExec(client *c) { /* Aborts a transaction, with a specific error message. * The transaction is always aboarted with -EXECABORT so that the client knows * the server exited the multi state, but the actual reason for the abort is - * included too. */ + * included too. + * Note: 'error' may or may not end with \r\n. see addReplyErrorFormat. */ void execCommandAbort(client *c, sds error) { discardTransaction(c); diff --git a/src/networking.c b/src/networking.c index e267565a6..8f06c6ba6 100644 --- a/src/networking.c +++ b/src/networking.c @@ -357,14 +357,18 @@ void addReplyProto(client *c, const char *s, size_t len) { * * If the error code is already passed in the string 's', the error * code provided is used, otherwise the string "-ERR " for the generic - * error code is automatically added. */ + * error code is automatically added. + * Note that 's' must NOT end with \r\n. */ void addReplyErrorLength(client *c, const char *s, size_t len) { /* If the string already starts with "-..." then the error code * is provided by the caller. Otherwise we use "-ERR". */ if (!len || s[0] != '-') addReplyProto(c,"-ERR ",5); addReplyProto(c,s,len); addReplyProto(c,"\r\n",2); +} +/* Do some actions after an error reply was sent (Log if needed, updates stats, etc.) */ +void afterErrorReply(client *c, const char *s, size_t len) { /* Sometimes it could be normal that a slave replies to a master with * an error and this function gets called. Actually the error will never * be sent because addReply*() against master clients has no effect... @@ -390,10 +394,11 @@ void addReplyErrorLength(client *c, const char *s, size_t len) { from = "master"; } + if (len > 4096) len = 4096; char *cmdname = c->lastcmd ? c->lastcmd->name : ""; serverLog(LL_WARNING,"== CRITICAL == This %s is sending an error " - "to its %s: '%s' after processing the command " - "'%s'", from, to, s, cmdname); + "to its %s: '%.*s' after processing the command " + "'%s'", from, to, (int)len, s, cmdname); if (ctype == CLIENT_TYPE_MASTER && server.repl_backlog && server.repl_backlog_histlen > 0) { @@ -403,31 +408,39 @@ void addReplyErrorLength(client *c, const char *s, size_t len) { } } +/* The 'err' object is expected to start with -ERRORCODE and end with \r\n. + * Unlike addReplyErrorSds and others alike which rely on addReplyErrorLength. */ +void addReplyErrorObject(client *c, robj *err) { + addReply(c, err); + afterErrorReply(c, err->ptr, sdslen(err->ptr)-2); /* Ignore trailing \r\n */ +} + +/* See addReplyErrorLength for expectations from the input string. */ void addReplyError(client *c, const char *err) { addReplyErrorLength(c,err,strlen(err)); + afterErrorReply(c,err,strlen(err)); } -/* See addReplyErrorLength. - * Makes sure there are no newlines in the string, otherwise invalid protocol - * is emitted. */ -void addReplyErrorSafe(client *c, char *s, size_t len) { - size_t j; - /* Trim any newlines at the end (ones will be added by addReplyErrorLength) */ - while (s[len-1] == '\r' || s[len-1] == '\n') - len--; - /* Replace any newlines in the rest of the string with spaces. */ - for (j = 0; j < len; j++) { - if (s[j] == '\r' || s[j] == '\n') s[j] = ' '; - } - addReplyErrorLength(c,s,len); +/* See addReplyErrorLength for expectations from the input string. */ +void addReplyErrorSds(client *c, sds err) { + addReplyErrorLength(c,err,sdslen(err)); + afterErrorReply(c,err,sdslen(err)); } +/* See addReplyErrorLength for expectations from the formatted string. + * The formatted string is safe to contain \r and \n anywhere. */ void addReplyErrorFormat(client *c, const char *fmt, ...) { va_list ap; va_start(ap,fmt); sds s = sdscatvprintf(sdsempty(),fmt,ap); va_end(ap); - addReplyErrorSafe(c, s, sdslen(s)); + /* Trim any newlines at the end (ones will be added by addReplyErrorLength) */ + s = sdstrim(s, "\r\n"); + /* Make sure there are no newlines in the middle of the string, otherwise + * invalid protocol is emitted. */ + s = sdsmapchars(s, "\r\n", " ", 2); + addReplyErrorLength(c,s,sdslen(s)); + afterErrorReply(c,s,sdslen(s)); sdsfree(s); } diff --git a/src/server.c b/src/server.c index dc0236cbd..9c3d71d6b 100644 --- a/src/server.c +++ b/src/server.c @@ -3406,14 +3406,15 @@ void call(client *c, int flags) { /* Used when a command that is ready for execution needs to be rejected, due to * varios pre-execution checks. it returns the appropriate error to the client. * If there's a transaction is flags it as dirty, and if the command is EXEC, - * it aborts the transaction. */ + * it aborts the transaction. + * Note: 'reply' is expected to end with \r\n */ void rejectCommand(client *c, robj *reply) { flagTransaction(c); if (c->cmd && c->cmd->proc == execCommand) { execCommandAbort(c, reply->ptr); } else { /* using addReplyError* rather than addReply so that the error can be logged. */ - addReplyErrorSafe(c, reply->ptr, sdslen(reply->ptr)); + addReplyErrorObject(c, reply); } } @@ -3423,10 +3424,13 @@ void rejectCommandFormat(client *c, const char *fmt, ...) { va_start(ap,fmt); sds s = sdscatvprintf(sdsempty(),fmt,ap); va_end(ap); + /* Make sure there are no newlines in the string, otherwise invalid protocol + * is emitted (The args come from the user, they may contain any character). */ + sdsmapchars(s, "\r\n", " ", 2); if (c->cmd && c->cmd->proc == execCommand) { execCommandAbort(c, s); } else { - addReplyErrorSafe(c, s, sdslen(s)); + addReplyErrorSds(c, s); } sdsfree(s); } @@ -3589,7 +3593,7 @@ int processCommand(client *c) { rejectCommand(c, shared.bgsaveerr); else rejectCommandFormat(c, - "-MISCONF Errors writing to the AOF file: %s\r\n", + "-MISCONF Errors writing to the AOF file: %s", strerror(server.aof_last_write_errno)); return C_OK; } diff --git a/src/server.h b/src/server.h index a6d4b58ca..ba6dffcef 100644 --- a/src/server.h +++ b/src/server.h @@ -1638,7 +1638,8 @@ void addReplyBulkLongLong(client *c, long long ll); void addReply(client *c, robj *obj); void addReplySds(client *c, sds s); void addReplyBulkSds(client *c, sds s); -void addReplyErrorSafe(client *c, char *s, size_t len); +void addReplyErrorObject(client *c, robj *err); +void addReplyErrorSds(client *c, sds err); void addReplyError(client *c, const char *err); void addReplyStatus(client *c, const char *status); void addReplyDouble(client *c, double d); From f63e428e5b15752bc55425115a06ef7d6427db83 Mon Sep 17 00:00:00 2001 From: "Meir Shpilraien (Spielrein)" Date: Thu, 23 Jul 2020 12:38:51 +0300 Subject: [PATCH 120/377] This PR introduces a new loaded keyspace event (#7536) Co-authored-by: Oran Agra Co-authored-by: Itamar Haber (cherry picked from commit 73198c50194cbf0254afd4cc5245f9274a538d13) --- runtest-moduleapi | 1 + src/module.c | 5 ++ src/rdb.c | 6 +- src/redismodule.h | 2 +- src/server.h | 1 + tests/modules/Makefile | 3 +- tests/modules/keyspace_events.c | 99 ++++++++++++++++++++++++ tests/unit/moduleapi/keyspace_events.tcl | 22 ++++++ 8 files changed, 135 insertions(+), 4 deletions(-) create mode 100644 tests/modules/keyspace_events.c create mode 100644 tests/unit/moduleapi/keyspace_events.tcl diff --git a/runtest-moduleapi b/runtest-moduleapi index f6cc0a258..71db27e5e 100755 --- a/runtest-moduleapi +++ b/runtest-moduleapi @@ -25,4 +25,5 @@ $TCLSH tests/test_helper.tcl \ --single unit/moduleapi/scan \ --single unit/moduleapi/datatype \ --single unit/moduleapi/auth \ +--single unit/moduleapi/keyspace_events \ "${@}" diff --git a/src/module.c b/src/module.c index 9755e282a..b15bb7276 100644 --- a/src/module.c +++ b/src/module.c @@ -4864,6 +4864,11 @@ void moduleReleaseGIL(void) { * - REDISMODULE_NOTIFY_STREAM: Stream events * - REDISMODULE_NOTIFY_KEYMISS: Key-miss events * - REDISMODULE_NOTIFY_ALL: All events (Excluding REDISMODULE_NOTIFY_KEYMISS) + * - REDISMODULE_NOTIFY_LOADED: A special notification available only for modules, + * indicates that the key was loaded from persistence. + * Notice, when this event fires, the given key + * can not be retained, use RM_CreateStringFromString + * instead. * * We do not distinguish between key events and keyspace events, and it is up * to the module to filter the actions taken based on the key. diff --git a/src/rdb.c b/src/rdb.c index ac1985d24..54a169cd8 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -2307,6 +2307,7 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { decrRefCount(val); } else { robj keyobj; + initStaticStringObject(keyobj,key); /* Add the new object in the hash table */ int added = dbAddRDBLoad(db,key,val); @@ -2315,7 +2316,6 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { /* This flag is useful for DEBUG RELOAD special modes. * When it's set we allow new keys to replace the current * keys with the same name. */ - initStaticStringObject(keyobj,key); dbSyncDelete(db,&keyobj); dbAddRDBLoad(db,key,val); } else { @@ -2327,12 +2327,14 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { /* Set the expire time if needed */ if (expiretime != -1) { - initStaticStringObject(keyobj,key); setExpire(NULL,db,&keyobj,expiretime); } /* Set usage information (for eviction). */ objectSetLRUOrLFU(val,lfu_freq,lru_idle,lru_clock,1000); + + /* call key space notification on key loaded for modules only */ + moduleNotifyKeyspaceEvent(NOTIFY_LOADED, "loaded", &keyobj, db->id); } /* Loading the database more slowly is useful in order to test diff --git a/src/redismodule.h b/src/redismodule.h index d67b01f68..ffc679ebc 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -128,9 +128,9 @@ #define REDISMODULE_NOTIFY_EVICTED (1<<9) /* e */ #define REDISMODULE_NOTIFY_STREAM (1<<10) /* t */ #define REDISMODULE_NOTIFY_KEY_MISS (1<<11) /* m (Note: This one is excluded from REDISMODULE_NOTIFY_ALL on purpose) */ +#define REDISMODULE_NOTIFY_LOADED (1<<12) /* module only key space notification, indicate a key loaded from rdb */ #define REDISMODULE_NOTIFY_ALL (REDISMODULE_NOTIFY_GENERIC | REDISMODULE_NOTIFY_STRING | REDISMODULE_NOTIFY_LIST | REDISMODULE_NOTIFY_SET | REDISMODULE_NOTIFY_HASH | REDISMODULE_NOTIFY_ZSET | REDISMODULE_NOTIFY_EXPIRED | REDISMODULE_NOTIFY_EVICTED | REDISMODULE_NOTIFY_STREAM) /* A */ - /* A special pointer that we can use between the core and the module to signal * field deletion, and that is impossible to be a valid pointer. */ #define REDISMODULE_HASH_DELETE ((RedisModuleString*)(long)1) diff --git a/src/server.h b/src/server.h index ba6dffcef..e9b4777ef 100644 --- a/src/server.h +++ b/src/server.h @@ -431,6 +431,7 @@ typedef long long ustime_t; /* microsecond time type. */ #define NOTIFY_EVICTED (1<<9) /* e */ #define NOTIFY_STREAM (1<<10) /* t */ #define NOTIFY_KEY_MISS (1<<11) /* m (Note: This one is excluded from NOTIFY_ALL on purpose) */ +#define NOTIFY_LOADED (1<<12) /* module only key space notification, indicate a key loaded from rdb */ #define NOTIFY_ALL (NOTIFY_GENERIC | NOTIFY_STRING | NOTIFY_LIST | NOTIFY_SET | NOTIFY_HASH | NOTIFY_ZSET | NOTIFY_EXPIRED | NOTIFY_EVICTED | NOTIFY_STREAM) /* A flag */ /* Get the first bind addr or NULL */ diff --git a/tests/modules/Makefile b/tests/modules/Makefile index 39b8e6efa..de7407a84 100644 --- a/tests/modules/Makefile +++ b/tests/modules/Makefile @@ -22,7 +22,8 @@ TEST_MODULES = \ blockonkeys.so \ scan.so \ datatype.so \ - auth.so + auth.so \ + keyspace_events.so .PHONY: all diff --git a/tests/modules/keyspace_events.c b/tests/modules/keyspace_events.c new file mode 100644 index 000000000..b2296c1cb --- /dev/null +++ b/tests/modules/keyspace_events.c @@ -0,0 +1,99 @@ +/* This module is used to test the server keyspace events API. + * + * ----------------------------------------------------------------------------- + * + * Copyright (c) 2020, Meir Shpilraien + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#define REDISMODULE_EXPERIMENTAL_API + +#include "redismodule.h" +#include +#include + +/** strores all the keys on which we got 'loaded' keyspace notification **/ +RedisModuleDict *loaded_event_log = NULL; + +static int KeySpace_Notification(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key){ + REDISMODULE_NOT_USED(ctx); + REDISMODULE_NOT_USED(type); + + if(strcmp(event, "loaded") == 0){ + const char* keyName = RedisModule_StringPtrLen(key, NULL); + int nokey; + RedisModule_DictGetC(loaded_event_log, (void*)keyName, strlen(keyName), &nokey); + if(nokey){ + RedisModule_DictSetC(loaded_event_log, (void*)keyName, strlen(keyName), NULL); + } + } + + return REDISMODULE_OK; +} + +static int cmdIsKeyLoaded(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){ + if(argc != 2){ + return RedisModule_WrongArity(ctx); + } + + const char* key = RedisModule_StringPtrLen(argv[1], NULL); + + int nokey; + RedisModule_DictGetC(loaded_event_log, (void*)key, strlen(key), &nokey); + + RedisModule_ReplyWithLongLong(ctx, !nokey); + return REDISMODULE_OK; +} + +/* This function must be present on each Redis module. It is used in order to + * register the commands into the Redis server. */ +int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + REDISMODULE_NOT_USED(argv); + REDISMODULE_NOT_USED(argc); + + if (RedisModule_Init(ctx,"testkeyspace",1,REDISMODULE_APIVER_1) == REDISMODULE_ERR){ + return REDISMODULE_ERR; + } + + loaded_event_log = RedisModule_CreateDict(ctx); + + if(RedisModule_SubscribeToKeyspaceEvents(ctx, REDISMODULE_NOTIFY_LOADED, KeySpace_Notification) != REDISMODULE_OK){ + return REDISMODULE_ERR; + } + + if (RedisModule_CreateCommand(ctx,"keyspace.is_key_loaded", cmdIsKeyLoaded,"",0,0,0) == REDISMODULE_ERR){ + return REDISMODULE_ERR; + } + + return REDISMODULE_OK; +} + +int RedisModule_OnUnload(RedisModuleCtx *ctx) { + RedisModule_FreeDict(ctx, loaded_event_log); + loaded_event_log = NULL; + return REDISMODULE_OK; +} diff --git a/tests/unit/moduleapi/keyspace_events.tcl b/tests/unit/moduleapi/keyspace_events.tcl new file mode 100644 index 000000000..cb959ab52 --- /dev/null +++ b/tests/unit/moduleapi/keyspace_events.tcl @@ -0,0 +1,22 @@ +set testmodule [file normalize tests/modules/keyspace_events.so] + +tags "modules" { + start_server [list overrides [list loadmodule "$testmodule"]] { + + test {Test loaded key space event} { + r set x 1 + r hset y f v + r lpush z 1 2 3 + r sadd p 1 2 3 + r zadd t 1 f1 2 f2 + r xadd s * f v + r debug reload + assert_equal 1 [r keyspace.is_key_loaded x] + assert_equal 1 [r keyspace.is_key_loaded y] + assert_equal 1 [r keyspace.is_key_loaded z] + assert_equal 1 [r keyspace.is_key_loaded p] + assert_equal 1 [r keyspace.is_key_loaded t] + assert_equal 1 [r keyspace.is_key_loaded s] + } + } +} \ No newline at end of file From 57c6b0e718be80db69363106d022a2bd6d72e110 Mon Sep 17 00:00:00 2001 From: Arun Ranganathan Date: Wed, 29 Jul 2020 01:46:44 -0400 Subject: [PATCH 121/377] Show threading configuration in INFO output (#7446) Co-authored-by: Oran Agra (cherry picked from commit 444b53e6402dabc7c2bf52be6603c4c9bbfb9921) --- src/networking.c | 33 +++++++++++++++++++++++---------- src/server.c | 22 ++++++++++++++++++---- src/server.h | 5 +++++ 3 files changed, 46 insertions(+), 14 deletions(-) diff --git a/src/networking.c b/src/networking.c index 8f06c6ba6..495be0ece 100644 --- a/src/networking.c +++ b/src/networking.c @@ -1313,6 +1313,9 @@ client *lookupClientByID(uint64_t id) { * set to 0. So when handler_installed is set to 0 the function must be * thread safe. */ int writeToClient(client *c, int handler_installed) { + /* Update total number of writes on server */ + server.stat_total_writes_processed++; + ssize_t nwritten = 0, totwritten = 0; size_t objlen; clientReplyBlock *o; @@ -1929,6 +1932,9 @@ void readQueryFromClient(connection *conn) { * the event loop. This is the case if threaded I/O is enabled. */ if (postponeClientRead(c)) return; + /* Update total number of reads on server */ + server.stat_total_reads_processed++; + readlen = PROTO_IOBUF_LEN; /* If this is a multi bulk request, and we are processing a bulk reply * that is large enough, try to maximize the probability that the query @@ -2926,7 +2932,6 @@ int tio_debug = 0; pthread_t io_threads[IO_THREADS_MAX_NUM]; pthread_mutex_t io_threads_mutex[IO_THREADS_MAX_NUM]; _Atomic unsigned long io_threads_pending[IO_THREADS_MAX_NUM]; -int io_threads_active; /* Are the threads currently spinning waiting I/O? */ int io_threads_op; /* IO_THREADS_OP_WRITE or IO_THREADS_OP_READ. */ /* This is the list of clients each thread will serve when threaded I/O is @@ -2985,7 +2990,7 @@ void *IOThreadMain(void *myid) { /* Initialize the data structures needed for threaded I/O. */ void initThreadedIO(void) { - io_threads_active = 0; /* We start with threads not active. */ + server.io_threads_active = 0; /* We start with threads not active. */ /* Don't spawn any thread if the user selected a single thread: * we'll handle I/O directly from the main thread. */ @@ -3019,10 +3024,10 @@ void initThreadedIO(void) { void startThreadedIO(void) { if (tio_debug) { printf("S"); fflush(stdout); } if (tio_debug) printf("--- STARTING THREADED IO ---\n"); - serverAssert(io_threads_active == 0); + serverAssert(server.io_threads_active == 0); for (int j = 1; j < server.io_threads_num; j++) pthread_mutex_unlock(&io_threads_mutex[j]); - io_threads_active = 1; + server.io_threads_active = 1; } void stopThreadedIO(void) { @@ -3033,10 +3038,10 @@ void stopThreadedIO(void) { if (tio_debug) printf("--- STOPPING THREADED IO [R%d] [W%d] ---\n", (int) listLength(server.clients_pending_read), (int) listLength(server.clients_pending_write)); - serverAssert(io_threads_active == 1); + serverAssert(server.io_threads_active == 1); for (int j = 1; j < server.io_threads_num; j++) pthread_mutex_lock(&io_threads_mutex[j]); - io_threads_active = 0; + server.io_threads_active = 0; } /* This function checks if there are not enough pending clients to justify @@ -3055,7 +3060,7 @@ int stopThreadedIOIfNeeded(void) { if (server.io_threads_num == 1) return 1; if (pending < (server.io_threads_num*2)) { - if (io_threads_active) stopThreadedIO(); + if (server.io_threads_active) stopThreadedIO(); return 1; } else { return 0; @@ -3073,7 +3078,7 @@ int handleClientsWithPendingWritesUsingThreads(void) { } /* Start threads if needed. */ - if (!io_threads_active) startThreadedIO(); + if (!server.io_threads_active) startThreadedIO(); if (tio_debug) printf("%d TOTAL WRITE pending clients\n", processed); @@ -3130,6 +3135,10 @@ int handleClientsWithPendingWritesUsingThreads(void) { } } listEmpty(server.clients_pending_write); + + /* Update processed count on server */ + server.stat_io_writes_processed += processed; + return processed; } @@ -3138,7 +3147,7 @@ int handleClientsWithPendingWritesUsingThreads(void) { * As a side effect of calling this function the client is put in the * pending read clients and flagged as such. */ int postponeClientRead(client *c) { - if (io_threads_active && + if (server.io_threads_active && server.io_threads_do_reads && !ProcessingEventsWhileBlocked && !(c->flags & (CLIENT_MASTER|CLIENT_SLAVE|CLIENT_PENDING_READ))) @@ -3158,7 +3167,7 @@ int postponeClientRead(client *c) { * the reads in the buffers, and also parse the first command available * rendering it in the client structures. */ int handleClientsWithPendingReadsUsingThreads(void) { - if (!io_threads_active || !server.io_threads_do_reads) return 0; + if (!server.io_threads_active || !server.io_threads_do_reads) return 0; int processed = listLength(server.clients_pending_read); if (processed == 0) return 0; @@ -3219,5 +3228,9 @@ int handleClientsWithPendingReadsUsingThreads(void) { } processInputBuffer(c); } + + /* Update processed count on server */ + server.stat_io_reads_processed += processed; + return processed; } diff --git a/src/server.c b/src/server.c index 9c3d71d6b..3381356ea 100644 --- a/src/server.c +++ b/src/server.c @@ -2726,6 +2726,10 @@ void resetServerStats(void) { server.stat_sync_full = 0; server.stat_sync_partial_ok = 0; server.stat_sync_partial_err = 0; + server.stat_io_reads_processed = 0; + server.stat_total_reads_processed = 0; + server.stat_io_writes_processed = 0; + server.stat_total_writes_processed = 0; for (j = 0; j < STATS_METRIC_COUNT; j++) { server.inst_metric[j].idx = 0; server.inst_metric[j].last_sample_time = mstime(); @@ -4075,7 +4079,8 @@ sds genRedisInfoString(const char *section) { "configured_hz:%i\r\n" "lru_clock:%u\r\n" "executable:%s\r\n" - "config_file:%s\r\n", + "config_file:%s\r\n" + "io_threads_active:%d\r\n", REDIS_VERSION, redisGitSHA1(), strtol(redisGitDirty(),NULL,10) > 0, @@ -4099,7 +4104,8 @@ sds genRedisInfoString(const char *section) { server.config_hz, server.lruclock, server.executable ? server.executable : "", - server.configfile ? server.configfile : ""); + server.configfile ? server.configfile : "", + server.io_threads_active); } /* Clients */ @@ -4371,7 +4377,11 @@ sds genRedisInfoString(const char *section) { "tracking_total_keys:%lld\r\n" "tracking_total_items:%lld\r\n" "tracking_total_prefixes:%lld\r\n" - "unexpected_error_replies:%lld\r\n", + "unexpected_error_replies:%lld\r\n" + "total_reads_processed:%lld\r\n" + "total_writes_processed:%lld\r\n" + "io_threaded_reads_processed:%lld\r\n" + "io_threaded_writes_processed:%lld\r\n", server.stat_numconnections, server.stat_numcommands, getInstantaneousMetric(STATS_METRIC_COMMAND), @@ -4402,7 +4412,11 @@ sds genRedisInfoString(const char *section) { (unsigned long long) trackingGetTotalKeys(), (unsigned long long) trackingGetTotalItems(), (unsigned long long) trackingGetTotalPrefixes(), - server.stat_unexpected_error_replies); + server.stat_unexpected_error_replies, + server.stat_total_reads_processed, + server.stat_total_writes_processed, + server.stat_io_reads_processed, + server.stat_io_writes_processed); } /* Replication */ diff --git a/src/server.h b/src/server.h index e9b4777ef..2d8279264 100644 --- a/src/server.h +++ b/src/server.h @@ -1108,6 +1108,7 @@ struct redisServer { queries. Will still serve RESP2 queries. */ int io_threads_num; /* Number of IO threads to use. */ int io_threads_do_reads; /* Read and parse from IO threads? */ + int io_threads_active; /* Is IO threads currently active? */ long long events_processed_while_blocked; /* processEventsWhileBlocked() */ /* RDB / AOF loading information */ @@ -1157,6 +1158,10 @@ struct redisServer { size_t stat_module_cow_bytes; /* Copy on write bytes during module fork. */ uint64_t stat_clients_type_memory[CLIENT_TYPE_COUNT];/* Mem usage by type */ long long stat_unexpected_error_replies; /* Number of unexpected (aof-loading, replica to master, etc.) error replies */ + long long stat_io_reads_processed; /* Number of read events processed by IO / Main threads */ + long long stat_io_writes_processed; /* Number of write events processed by IO / Main threads */ + _Atomic long long stat_total_reads_processed; /* Total number of read events processed */ + _Atomic long long stat_total_writes_processed; /* Total number of write events processed */ /* The following two are used to track instantaneous metrics, like * number of operations per second, network traffic. */ struct { From e7ce996d8c4d49717d9bedc0eb3b5ee00b8fcc1b Mon Sep 17 00:00:00 2001 From: ShooterIT Date: Thu, 6 Aug 2020 15:36:28 +0800 Subject: [PATCH 122/377] [Redis-benchmark] Support zset type (cherry picked from commit e6c811cd851cc1b37a0b626458258a26cea0bab3) --- src/redis-benchmark.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c index 6385fa9b3..89c84c278 100644 --- a/src/redis-benchmark.c +++ b/src/redis-benchmark.c @@ -1733,6 +1733,22 @@ int main(int argc, const char **argv) { free(cmd); } + if (test_is_selected("zadd")) { + char *score = "0"; + if (config.randomkeys) score = "__rand_int__"; + len = redisFormatCommand(&cmd, + "ZADD myzset:{tag} %s element:__rand_int__",score); + benchmark("ZADD",cmd,len); + free(cmd); + } + + if (test_is_selected("zrem")) { + len = redisFormatCommand(&cmd, + "ZREM myzset:{tag} element:__rand_int__"); + benchmark("ZREM",cmd,len); + free(cmd); + } + if (test_is_selected("lrange") || test_is_selected("lrange_100") || test_is_selected("lrange_300") || From d8ddc737960924bc778c0b06dfcb359c0f893680 Mon Sep 17 00:00:00 2001 From: ShooterIT Date: Sat, 8 Aug 2020 23:08:27 +0800 Subject: [PATCH 123/377] [Redis-benchmark] Remove zrem test, add zpopmin test (cherry picked from commit 8925fac39568295b7b6b2dbde0768d7275e663e9) --- src/redis-benchmark.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c index 89c84c278..fc24afbff 100644 --- a/src/redis-benchmark.c +++ b/src/redis-benchmark.c @@ -1433,7 +1433,8 @@ usage: " --cluster Enable cluster mode.\n" " --enable-tracking Send CLIENT TRACKING on before starting benchmark.\n" " -k 1=keep alive 0=reconnect (default 1)\n" -" -r Use random keys for SET/GET/INCR, random values for SADD\n" +" -r Use random keys for SET/GET/INCR, random values for SADD,\n" +" random members and scores for ZADD.\n" " Using this option the benchmark will expand the string __rand_int__\n" " inside an argument with a 12 digits number in the specified range\n" " from 0 to keyspacelen-1. The substitution changes every time a command\n" @@ -1742,10 +1743,9 @@ int main(int argc, const char **argv) { free(cmd); } - if (test_is_selected("zrem")) { - len = redisFormatCommand(&cmd, - "ZREM myzset:{tag} element:__rand_int__"); - benchmark("ZREM",cmd,len); + if (test_is_selected("zpopmin")) { + len = redisFormatCommand(&cmd,"ZPOPMIN myzset:{tag}"); + benchmark("ZPOPMIN",cmd,len); free(cmd); } From 63e3f1e4494243937ce19cdce87edbc5aae0ea90 Mon Sep 17 00:00:00 2001 From: "Meir Shpilraien (Spielrein)" Date: Sun, 9 Aug 2020 06:11:47 +0300 Subject: [PATCH 124/377] see #7544, added RedisModule_HoldString api. (#7577) Added RedisModule_HoldString that either returns a shallow copy of the given String (by increasing the String ref count) or a new deep copy of String in case its not possible to get a shallow copy. Co-authored-by: Itamar Haber (cherry picked from commit 4f99b22118ca91e3a7fe9c1c68c19dd717dfdbb5) --- src/module.c | 60 ++++++++++++++++++++++++ src/redismodule.h | 2 + tests/modules/keyspace_events.c | 17 ++++++- tests/unit/moduleapi/keyspace_events.tcl | 12 ++--- 4 files changed, 83 insertions(+), 8 deletions(-) diff --git a/src/module.c b/src/module.c index b15bb7276..9b44d5ecc 100644 --- a/src/module.c +++ b/src/module.c @@ -1140,6 +1140,65 @@ void RM_RetainString(RedisModuleCtx *ctx, RedisModuleString *str) { } } +/** +* This function can be used instead of RedisModule_RetainString(). +* The main difference between the two is that this function will always +* succeed, whereas RedisModule_RetainString() may fail because of an +* assertion. +* +* The function returns a pointer to RedisModuleString, which is owned +* by the caller. It requires a call to RedisModule_FreeString() to free +* the string when automatic memory management is disabled for the context. +* When automatic memory management is enabled, you can either call +* RedisModule_FreeString() or let the automation free it. +* +* This function is more efficient than RedisModule_CreateStringFromString() +* because whenever possible, it avoids copying the underlying +* RedisModuleString. The disadvantage of using this function is that it +* might not be possible to use RedisModule_StringAppendBuffer() on the +* returned RedisModuleString. +* +* It is possible to call this function with a NULL context. + */ +RedisModuleString* RM_HoldString(RedisModuleCtx *ctx, RedisModuleString *str) { + if (str->refcount == OBJ_STATIC_REFCOUNT) { + return RM_CreateStringFromString(ctx, str); + } + + incrRefCount(str); + if (ctx != NULL) { + /* + * Put the str in the auto memory management of the ctx. +         * It might already be there, in this case, the ref count will +         * be 2 and we will decrease the ref count twice and free the +         * object in the auto memory free function. +         * +         * Why we can not do the same trick of just remove the object +         * from the auto memory (like in RM_RetainString)? +         * This code shows the issue: +         * +         * RM_AutoMemory(ctx); +         * str1 = RM_CreateString(ctx, "test", 4); +         * str2 = RM_HoldString(ctx, str1); +         * RM_FreeString(str1); +         * RM_FreeString(str2); +         * +         * If after the RM_HoldString we would just remove the string from +         * the auto memory, this example will cause access to a freed memory +         * on 'RM_FreeString(str2);' because the String will be free +         * on 'RM_FreeString(str1);'. +         * +         * So it's safer to just increase the ref count +         * and add the String to auto memory again. +         * +         * The limitation is that it is not possible to use RedisModule_StringAppendBuffer +         * on the String. + */ + autoMemoryAdd(ctx,REDISMODULE_AM_STRING,str); + } + return str; +} + /* Given a string module object, this function returns the string pointer * and length of the string. The returned pointer and length should only * be used for read only accesses and never modified. */ @@ -7830,6 +7889,7 @@ void moduleRegisterCoreAPI(void) { REGISTER_API(LatencyAddSample); REGISTER_API(StringAppendBuffer); REGISTER_API(RetainString); + REGISTER_API(HoldString); REGISTER_API(StringCompare); REGISTER_API(GetContextFromIO); REGISTER_API(GetKeyNameFromIO); diff --git a/src/redismodule.h b/src/redismodule.h index ffc679ebc..5f828b9e3 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -569,6 +569,7 @@ void REDISMODULE_API_FUNC(RedisModule__Assert)(const char *estr, const char *fil void REDISMODULE_API_FUNC(RedisModule_LatencyAddSample)(const char *event, mstime_t latency); int REDISMODULE_API_FUNC(RedisModule_StringAppendBuffer)(RedisModuleCtx *ctx, RedisModuleString *str, const char *buf, size_t len); void REDISMODULE_API_FUNC(RedisModule_RetainString)(RedisModuleCtx *ctx, RedisModuleString *str); +RedisModuleString* REDISMODULE_API_FUNC(RedisModule_HoldString)(RedisModuleCtx *ctx, RedisModuleString *str); int REDISMODULE_API_FUNC(RedisModule_StringCompare)(RedisModuleString *a, RedisModuleString *b); RedisModuleCtx *REDISMODULE_API_FUNC(RedisModule_GetContextFromIO)(RedisModuleIO *io); const RedisModuleString *REDISMODULE_API_FUNC(RedisModule_GetKeyNameFromIO)(RedisModuleIO *io); @@ -807,6 +808,7 @@ static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int REDISMODULE_GET_API(LatencyAddSample); REDISMODULE_GET_API(StringAppendBuffer); REDISMODULE_GET_API(RetainString); + REDISMODULE_GET_API(HoldString); REDISMODULE_GET_API(StringCompare); REDISMODULE_GET_API(GetContextFromIO); REDISMODULE_GET_API(GetKeyNameFromIO); diff --git a/tests/modules/keyspace_events.c b/tests/modules/keyspace_events.c index b2296c1cb..db3977be1 100644 --- a/tests/modules/keyspace_events.c +++ b/tests/modules/keyspace_events.c @@ -48,7 +48,7 @@ static int KeySpace_Notification(RedisModuleCtx *ctx, int type, const char *even int nokey; RedisModule_DictGetC(loaded_event_log, (void*)keyName, strlen(keyName), &nokey); if(nokey){ - RedisModule_DictSetC(loaded_event_log, (void*)keyName, strlen(keyName), NULL); + RedisModule_DictSetC(loaded_event_log, (void*)keyName, strlen(keyName), RedisModule_HoldString(ctx, key)); } } @@ -63,9 +63,15 @@ static int cmdIsKeyLoaded(RedisModuleCtx *ctx, RedisModuleString **argv, int arg const char* key = RedisModule_StringPtrLen(argv[1], NULL); int nokey; - RedisModule_DictGetC(loaded_event_log, (void*)key, strlen(key), &nokey); + RedisModuleString* keyStr = RedisModule_DictGetC(loaded_event_log, (void*)key, strlen(key), &nokey); + RedisModule_ReplyWithArray(ctx, 2); RedisModule_ReplyWithLongLong(ctx, !nokey); + if(nokey){ + RedisModule_ReplyWithNull(ctx); + }else{ + RedisModule_ReplyWithString(ctx, keyStr); + } return REDISMODULE_OK; } @@ -93,6 +99,13 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) } int RedisModule_OnUnload(RedisModuleCtx *ctx) { + RedisModuleDictIter *iter = RedisModule_DictIteratorStartC(loaded_event_log, "^", NULL, 0); + char* key; + size_t keyLen; + RedisModuleString* val; + while((key = RedisModule_DictNextC(iter, &keyLen, (void**)&val))){ + RedisModule_FreeString(ctx, val); + } RedisModule_FreeDict(ctx, loaded_event_log); loaded_event_log = NULL; return REDISMODULE_OK; diff --git a/tests/unit/moduleapi/keyspace_events.tcl b/tests/unit/moduleapi/keyspace_events.tcl index cb959ab52..5b3db0c0a 100644 --- a/tests/unit/moduleapi/keyspace_events.tcl +++ b/tests/unit/moduleapi/keyspace_events.tcl @@ -11,12 +11,12 @@ tags "modules" { r zadd t 1 f1 2 f2 r xadd s * f v r debug reload - assert_equal 1 [r keyspace.is_key_loaded x] - assert_equal 1 [r keyspace.is_key_loaded y] - assert_equal 1 [r keyspace.is_key_loaded z] - assert_equal 1 [r keyspace.is_key_loaded p] - assert_equal 1 [r keyspace.is_key_loaded t] - assert_equal 1 [r keyspace.is_key_loaded s] + assert_equal {1 x} [r keyspace.is_key_loaded x] + assert_equal {1 y} [r keyspace.is_key_loaded y] + assert_equal {1 z} [r keyspace.is_key_loaded z] + assert_equal {1 p} [r keyspace.is_key_loaded p] + assert_equal {1 t} [r keyspace.is_key_loaded t] + assert_equal {1 s} [r keyspace.is_key_loaded s] } } } \ No newline at end of file From f0e28abc07b5907e9a94eef70541629ac110b88f Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Wed, 12 Aug 2020 17:58:56 +0300 Subject: [PATCH 125/377] Add oom-score-adj configuration option to control Linux OOM killer. (#1690) Add Linux kernel OOM killer control option. This adds the ability to control the Linux OOM killer oom_score_adj parameter for all Redis processes, depending on the process role (i.e. master, replica, background child). A oom-score-adj global boolean flag control this feature. In addition, specific values can be configured using oom-score-adj-values if additional tuning is required. (cherry picked from commit 70c823a64e800f22ac68f0172acdd1da82d7be32) --- redis.conf | 26 ++++++++ src/config.c | 120 ++++++++++++++++++++++++++++++++++- src/config.h | 1 + src/replication.c | 6 ++ src/server.c | 60 ++++++++++++++++++ src/server.h | 12 ++++ tests/test_helper.tcl | 1 + tests/unit/oom-score-adj.tcl | 81 +++++++++++++++++++++++ 8 files changed, 306 insertions(+), 1 deletion(-) create mode 100644 tests/unit/oom-score-adj.tcl diff --git a/redis.conf b/redis.conf index 565c24eca..f2e7f1964 100644 --- a/redis.conf +++ b/redis.conf @@ -1039,6 +1039,32 @@ lazyfree-lazy-user-del no # --threads option to match the number of Redis theads, otherwise you'll not # be able to notice the improvements. +############################ KERNEL OOM CONTROL ############################## + +# On Linux, it is possible to hint the kernel OOM killer on what processes +# should be killed first when out of memory. +# +# Enabling this feature makes Redis actively control the oom_score_adj value +# for all its processes, depending on their role. The default scores will +# attempt to have background child processes killed before all others, and +# replicas killed before masters. + +oom-score-adj no + +# When oom-score-adj is used, this directive controls the specific values used +# for master, replica and background child processes. Values range -1000 to +# 1000 (higher means more likely to be killed). +# +# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) +# can freely increase their value, but not decrease it below its initial +# settings. +# +# Values are used relative to the initial value of oom_score_adj when the server +# starts. Because typically the initial value is 0, they will often match the +# absolute values. + +oom-score-adj-values 0 200 800 + ############################## APPEND ONLY MODE ############################### # By default Redis asynchronously dumps the dataset on disk. This mode is diff --git a/src/config.c b/src/config.c index b3c437d49..52acb527b 100644 --- a/src/config.c +++ b/src/config.c @@ -111,6 +111,9 @@ clientBufferLimitsConfig clientBufferLimitsDefaults[CLIENT_TYPE_OBUF_COUNT] = { {1024*1024*32, 1024*1024*8, 60} /* pubsub */ }; +/* OOM Score defaults */ +int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT] = { 0, 200, 800 }; + /* Generic config infrastructure function pointers * int is_valid_fn(val, err) * Return 1 when val is valid, and 0 when invalid. @@ -286,6 +289,59 @@ void queueLoadModule(sds path, sds *argv, int argc) { listAddNodeTail(server.loadmodule_queue,loadmod); } +/* Parse an array of CONFIG_OOM_COUNT sds strings, validate and populate + * server.oom_score_adj_values if valid. + */ + +static int updateOOMScoreAdjValues(sds *args, char **err) { + int i; + int values[CONFIG_OOM_COUNT]; + + for (i = 0; i < CONFIG_OOM_COUNT; i++) { + char *eptr; + long long val = strtoll(args[i], &eptr, 10); + + if (*eptr != '\0' || val < -1000 || val > 1000) { + if (err) *err = "Invalid oom-score-adj-values, elements must be between -1000 and 1000."; + return C_ERR; + } + + values[i] = val; + } + + /* Verify that the values make sense. If they don't omit a warning but + * keep the configuration, which may still be valid for privileged processes. + */ + + if (values[CONFIG_OOM_REPLICA] < values[CONFIG_OOM_MASTER] || + values[CONFIG_OOM_BGCHILD] < values[CONFIG_OOM_REPLICA]) { + serverLog(LOG_WARNING, + "The oom-score-adj-values configuration may not work for non-privileged processes! " + "Please consult the documentation."); + } + + /* Store values, retain previous config for rollback in case we fail. */ + int old_values[CONFIG_OOM_COUNT]; + for (i = 0; i < CONFIG_OOM_COUNT; i++) { + old_values[i] = server.oom_score_adj_values[i]; + server.oom_score_adj_values[i] = values[i]; + } + + /* Update */ + if (setOOMScoreAdj(-1) == C_ERR) { + /* Roll back */ + for (i = 0; i < CONFIG_OOM_COUNT; i++) + server.oom_score_adj_values[i] = old_values[i]; + + if (err) + *err = "Failed to apply oom-score-adj-values configuration, check server logs."; + + return C_ERR; + } + + return C_OK; +} + void initConfigValues() { for (standardConfig *config = configs; config->name != NULL; config++) { config->interface.init(config->data); @@ -479,6 +535,8 @@ void loadServerConfigFromString(char *config) { server.client_obuf_limits[class].hard_limit_bytes = hard; server.client_obuf_limits[class].soft_limit_bytes = soft; server.client_obuf_limits[class].soft_limit_seconds = soft_seconds; + } else if (!strcasecmp(argv[0],"oom-score-adj-values") && argc == 1 + CONFIG_OOM_COUNT) { + if (updateOOMScoreAdjValues(&argv[1], &err) == C_ERR) goto loaderr; } else if (!strcasecmp(argv[0],"notify-keyspace-events") && argc == 2) { int flags = keyspaceEventsStringToFlags(argv[1]); @@ -728,6 +786,17 @@ void configSetCommand(client *c) { server.client_obuf_limits[class].soft_limit_seconds = soft_seconds; } sdsfreesplitres(v,vlen); + } config_set_special_field("oom-score-adj-values") { + int vlen; + int success = 1; + + sds *v = sdssplitlen(o->ptr, sdslen(o->ptr), " ", 1, &vlen); + if (vlen != CONFIG_OOM_COUNT || updateOOMScoreAdjValues(v, &errstr) == C_ERR) + success = 0; + + sdsfreesplitres(v, vlen); + if (!success) + goto badfmt; } config_set_special_field("notify-keyspace-events") { int flags = keyspaceEventsStringToFlags(o->ptr); @@ -923,6 +992,22 @@ void configGetCommand(client *c) { matches++; } + if (stringmatch(pattern,"oom-score-adj-values",0)) { + sds buf = sdsempty(); + int j; + + for (j = 0; j < CONFIG_OOM_COUNT; j++) { + buf = sdscatprintf(buf,"%d", server.oom_score_adj_values[j]); + if (j != CONFIG_OOM_COUNT-1) + buf = sdscatlen(buf," ",1); + } + + addReplyBulkCString(c,"oom-score-adj-values"); + addReplyBulkCString(c,buf); + sdsfree(buf); + matches++; + } + setDeferredMapLen(c,replylen,matches); } @@ -1330,6 +1415,25 @@ void rewriteConfigClientoutputbufferlimitOption(struct rewriteConfigState *state } } +/* Rewrite the oom-score-adj-values option. */ +void rewriteConfigOOMScoreAdjValuesOption(struct rewriteConfigState *state) { + int force = 0; + int j; + char *option = "oom-score-adj-values"; + sds line; + + line = sdsempty(); + for (j = 0; j < CONFIG_OOM_COUNT; j++) { + if (server.oom_score_adj_values[j] != configOOMScoreAdjValuesDefaults[j]) + force = 1; + + line = sdscatprintf(line, "%d", server.oom_score_adj_values[j]); + if (j+1 != CONFIG_OOM_COUNT) + line = sdscatlen(line, " ", 1); + } + rewriteConfigRewriteLine(state,option,line,force); +} + /* Rewrite the bind option. */ void rewriteConfigBindOption(struct rewriteConfigState *state) { int force = 1; @@ -1528,6 +1632,7 @@ int rewriteConfig(char *path) { rewriteConfigStringOption(state,"cluster-config-file",server.cluster_configfile,CONFIG_DEFAULT_CLUSTER_CONFIG_FILE); rewriteConfigNotifykeyspaceeventsOption(state); rewriteConfigClientoutputbufferlimitOption(state); + rewriteConfigOOMScoreAdjValuesOption(state); /* Rewrite Sentinel config if in Sentinel mode. */ if (server.sentinel_mode) rewriteConfigSentinelOption(state); @@ -2072,6 +2177,19 @@ static int updateMaxclients(long long val, long long prev, char **err) { return 1; } +static int updateOOMScoreAdj(int val, int prev, char **err) { + UNUSED(prev); + + if (val) { + if (setOOMScoreAdj(-1) == C_ERR) { + *err = "Failed to set current oom_score_adj. Check server logs."; + return 0; + } + } + + return 1; +} + #ifdef USE_OPENSSL static int updateTlsCfg(char *val, char *prev, char **err) { UNUSED(val); @@ -2136,7 +2254,7 @@ standardConfig configs[] = { createBoolConfig("cluster-enabled", NULL, IMMUTABLE_CONFIG, server.cluster_enabled, 0, NULL, NULL), createBoolConfig("appendonly", NULL, MODIFIABLE_CONFIG, server.aof_enabled, 0, NULL, updateAppendonly), createBoolConfig("cluster-allow-reads-when-down", NULL, MODIFIABLE_CONFIG, server.cluster_allow_reads_when_down, 0, NULL, NULL), - + createBoolConfig("oom-score-adj", NULL, MODIFIABLE_CONFIG, server.oom_score_adj, 0, NULL, updateOOMScoreAdj), /* String Configs */ createStringConfig("aclfile", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.acl_filename, "", NULL, NULL), diff --git a/src/config.h b/src/config.h index 0fcc42972..e807b9330 100644 --- a/src/config.h +++ b/src/config.h @@ -54,6 +54,7 @@ #define HAVE_PROC_MAPS 1 #define HAVE_PROC_SMAPS 1 #define HAVE_PROC_SOMAXCONN 1 +#define HAVE_PROC_OOM_SCORE_ADJ 1 #endif /* Test for task_info() */ diff --git a/src/replication.c b/src/replication.c index a8f46be95..8f4ad2c92 100644 --- a/src/replication.c +++ b/src/replication.c @@ -2483,6 +2483,9 @@ void replicationSetMaster(char *ip, int port) { } disconnectAllBlockedClients(); /* Clients blocked in master, now slave. */ + /* Update oom_score_adj */ + setOOMScoreAdj(-1); + /* Force our slaves to resync with us as well. They may hopefully be able * to partially resync with us, but we can notify the replid change. */ disconnectSlaves(); @@ -2545,6 +2548,9 @@ void replicationUnsetMaster(void) { * master switch. */ server.slaveseldb = -1; + /* Update oom_score_adj */ + setOOMScoreAdj(-1); + /* Once we turn from slave to master, we consider the starting time without * slaves (that is used to count the replication backlog time to live) as * starting from now. Otherwise the backlog will be freed after a diff --git a/src/server.c b/src/server.c index 3381356ea..5a986006e 100644 --- a/src/server.c +++ b/src/server.c @@ -2410,6 +2410,10 @@ void initServerConfig(void) { for (j = 0; j < CLIENT_TYPE_OBUF_COUNT; j++) server.client_obuf_limits[j] = clientBufferLimitsDefaults[j]; + /* Linux OOM Score config */ + for (j = 0; j < CONFIG_OOM_COUNT; j++) + server.oom_score_adj_values[j] = configOOMScoreAdjValuesDefaults[j]; + /* Double constants initialization */ R_Zero = 0.0; R_PosInf = 1.0/R_Zero; @@ -2519,6 +2523,58 @@ int restartServer(int flags, mstime_t delay) { return C_ERR; /* Never reached. */ } +static void readOOMScoreAdj(void) { +#ifdef HAVE_PROC_OOM_SCORE_ADJ + char buf[64]; + int fd = open("/proc/self/oom_score_adj", O_RDONLY); + + if (fd < 0) return; + if (read(fd, buf, sizeof(buf)) > 0) + server.oom_score_adj_base = atoi(buf); + close(fd); +#endif +} + +/* This function will configure the current process's oom_score_adj according + * to user specified configuration. This is currently implemented on Linux + * only. + * + * A process_class value of -1 implies OOM_CONFIG_MASTER or OOM_CONFIG_REPLICA, + * depending on current role. + */ +int setOOMScoreAdj(int process_class) { + int fd; + int val; + char buf[64]; + + if (!server.oom_score_adj) return C_OK; + if (process_class == -1) + process_class = (server.masterhost ? CONFIG_OOM_REPLICA : CONFIG_OOM_MASTER); + + serverAssert(process_class >= 0 && process_class < CONFIG_OOM_COUNT); + +#ifdef HAVE_PROC_OOM_SCORE_ADJ + val = server.oom_score_adj_base + server.oom_score_adj_values[process_class]; + if (val > 1000) val = 1000; + if (val < -1000) val = -1000; + + snprintf(buf, sizeof(buf) - 1, "%d\n", val); + + fd = open("/proc/self/oom_score_adj", O_WRONLY); + if (fd < 0 || write(fd, buf, strlen(buf)) < 0) { + serverLog(LOG_WARNING, "Unable to write oom_score_adj: %s", strerror(errno)); + if (fd != -1) close(fd); + return C_ERR; + } + + close(fd); + return C_OK; +#else + /* Unsupported */ + return C_ERR; +#endif +} + /* This function will try to raise the max number of open files accordingly to * the configured max number of clients. It also reserves a number of file * descriptors (CONFIG_MIN_RESERVED_FDS) for extra operations of @@ -4861,6 +4917,7 @@ int redisFork() { long long start = ustime(); if ((childpid = fork()) == 0) { /* Child */ + setOOMScoreAdj(CONFIG_OOM_BGCHILD); setupChildSignalHandlers(); closeClildUnusedResourceAfterFork(); } else { @@ -5194,6 +5251,7 @@ int main(int argc, char **argv) { server.supervised = redisIsSupervised(server.supervised_mode); int background = server.daemonize && !server.supervised; if (background) daemonize(); + readOOMScoreAdj(); initServer(); if (background || server.pidfile) createPidFile(); @@ -5246,6 +5304,8 @@ int main(int argc, char **argv) { } redisSetCpuAffinity(server.server_cpulist); + setOOMScoreAdj(-1); + aeMain(server.el); aeDeleteEventLoop(server.el); return 0; diff --git a/src/server.h b/src/server.h index 2d8279264..c42955b94 100644 --- a/src/server.h +++ b/src/server.h @@ -150,6 +150,14 @@ typedef long long ustime_t; /* microsecond time type. */ * in order to make sure of not over provisioning more than 128 fds. */ #define CONFIG_FDSET_INCR (CONFIG_MIN_RESERVED_FDS+96) +/* OOM Score Adjustment classes. */ +#define CONFIG_OOM_MASTER 0 +#define CONFIG_OOM_REPLICA 1 +#define CONFIG_OOM_BGCHILD 2 +#define CONFIG_OOM_COUNT 3 + +extern int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT]; + /* Hash table parameters */ #define HASHTABLE_MIN_FILL 10 /* Minimal hash table fill 10% */ @@ -1345,6 +1353,9 @@ struct redisServer { int lfu_log_factor; /* LFU logarithmic counter factor. */ int lfu_decay_time; /* LFU counter decay factor. */ long long proto_max_bulk_len; /* Protocol bulk length maximum size. */ + int oom_score_adj_base; /* Base oom_score_adj value, as observed on startup */ + int oom_score_adj_values[CONFIG_OOM_COUNT]; /* Linux oom_score_adj configuration */ + int oom_score_adj; /* If true, oom_score_adj is managed */ /* Blocked clients */ unsigned int blocked_clients; /* # of clients executing a blocking cmd.*/ unsigned int blocked_clients_by_type[BLOCKED_NUM]; @@ -2014,6 +2025,7 @@ const char *evictPolicyToString(void); struct redisMemOverhead *getMemoryOverheadData(void); void freeMemoryOverheadData(struct redisMemOverhead *mh); void checkChildrenDone(void); +int setOOMScoreAdj(int process_class); #define RESTART_SERVER_NONE 0 #define RESTART_SERVER_GRACEFULLY (1<<0) /* Do proper shutdown. */ diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index 7ce0d545e..d0f962762 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -68,6 +68,7 @@ set ::all_tests { unit/pendingquerybuf unit/tls unit/tracking + unit/oom-score-adj } # Index to the next test to run in the ::all_tests list. set ::next_test 0 diff --git a/tests/unit/oom-score-adj.tcl b/tests/unit/oom-score-adj.tcl new file mode 100644 index 000000000..894a70fb2 --- /dev/null +++ b/tests/unit/oom-score-adj.tcl @@ -0,0 +1,81 @@ +set system_name [string tolower [exec uname -s]] +set user_id [exec id -u] + +if {$system_name eq {linux}} { + start_server {tags {"oom-score-adj"}} { + proc get_oom_score_adj {{pid ""}} { + if {$pid == ""} { + set pid [srv 0 pid] + } + set fd [open "/proc/$pid/oom_score_adj" "r"] + set val [gets $fd] + close $fd + + return $val + } + + proc get_child_pid {} { + set pid [srv 0 pid] + set fd [open "|ps --ppid $pid -o pid -h" "r"] + set child_pid [string trim [read $fd]] + close $fd + + return $child_pid + } + + test {CONFIG SET oom-score-adj works as expected} { + set base [get_oom_score_adj] + + # Enable oom-score-adj, check defaults + r config set oom-score-adj-values "10 20 30" + r config set oom-score-adj yes + + assert {[get_oom_score_adj] == [expr $base + 10]} + + # Modify current class + r config set oom-score-adj-values "15 20 30" + assert {[get_oom_score_adj] == [expr $base + 15]} + + # Check replica class + r replicaof localhost 1 + assert {[get_oom_score_adj] == [expr $base + 20]} + r replicaof no one + assert {[get_oom_score_adj] == [expr $base + 15]} + + # Check child process + r set key-a value-a + r config set rdb-key-save-delay 100000 + r bgsave + + set child_pid [get_child_pid] + assert {[get_oom_score_adj $child_pid] == [expr $base + 30]} + } + + # Failed oom-score-adj tests can only run unprivileged + if {$user_id != 0} { + test {CONFIG SET oom-score-adj handles configuration failures} { + # Bad config + r config set oom-score-adj no + r config set oom-score-adj-values "-1000 -1000 -1000" + + # Make sure it fails + catch {r config set oom-score-adj yes} e + assert_match {*Failed to set*} $e + + # Make sure it remains off + assert {[r config get oom-score-adj] == "oom-score-adj no"} + + # Fix config + r config set oom-score-adj-values "0 100 100" + r config set oom-score-adj yes + + # Make sure it fails + catch {r config set oom-score-adj-values "-1000 -1000 -1000"} e + assert_match {*Failed*} $e + + # Make sure previous values remain + assert {[r config get oom-score-adj-values] == {oom-score-adj-values {0 100 100}}} + } + } + } +} From 7b04660be750572fefe22391daac27d59f51ed5a Mon Sep 17 00:00:00 2001 From: Nathan Scott Date: Fri, 14 Aug 2020 21:45:34 +1000 Subject: [PATCH 126/377] Annotate module API functions in redismodule.h for use with -fno-common (#6900) In order to keep the redismodule.h self-contained but still usable with gcc v10 and later, annotate each API function tentative definition with the __common__ attribute. This avoids the 'multiple definition' errors modules will otherwise see for all API functions at link time. Further details at gcc.gnu.org/gcc-10/porting_to.html Turn the existing __attribute__ ((unused)), ((__common__)) and ((print)) annotations into conditional macros for any compilers not accepting this syntax. These macros only expand to API annotations under gcc. Provide a pre- and post- macro for every API function, so that they can be defined differently by the file that includes redismodule.h. Removing REDISMODULE_API_FUNC in the interest of keeping the function declarations readable. Co-authored-by: Yossi Gottlieb Co-authored-by: Oran Agra (cherry picked from commit 9d4736b04441b609c17c414e0780882cf92c5e33) --- src/redismodule.h | 506 ++++++++++++++++++++++++---------------------- 1 file changed, 265 insertions(+), 241 deletions(-) diff --git a/src/redismodule.h b/src/redismodule.h index 5f828b9e3..460fdd480 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -380,6 +380,31 @@ typedef struct RedisModuleLoadingProgressInfo { typedef long long mstime_t; +/* Macro definitions specific to individual compilers */ +#ifndef REDISMODULE_ATTR_UNUSED +# ifdef __GNUC__ +# define REDISMODULE_ATTR_UNUSED __attribute__((unused)) +# else +# define REDISMODULE_ATTR_UNUSED +# endif +#endif + +#ifndef REDISMODULE_ATTR_PRINTF +# ifdef __GNUC__ +# define REDISMODULE_ATTR_PRINTF(idx,cnt) __attribute__((format(printf,idx,cnt))) +# else +# define REDISMODULE_ATTR_PRINTF(idx,cnt) +# endif +#endif + +#ifndef REDISMODULE_ATTR_COMMON +# if defined(__GNUC__) && !defined(__clang__) +# define REDISMODULE_ATTR_COMMON __attribute__((__common__)) +# else +# define REDISMODULE_ATTR_COMMON +# endif +#endif + /* Incomplete structures for compiler checks but opaque access. */ typedef struct RedisModuleCtx RedisModuleCtx; typedef struct RedisModuleKey RedisModuleKey; @@ -436,257 +461,256 @@ typedef struct RedisModuleTypeMethods { #define REDISMODULE_GET_API(name) \ RedisModule_GetApi("RedisModule_" #name, ((void **)&RedisModule_ ## name)) -#define REDISMODULE_API_FUNC(x) (*x) - - -void *REDISMODULE_API_FUNC(RedisModule_Alloc)(size_t bytes); -void *REDISMODULE_API_FUNC(RedisModule_Realloc)(void *ptr, size_t bytes); -void REDISMODULE_API_FUNC(RedisModule_Free)(void *ptr); -void *REDISMODULE_API_FUNC(RedisModule_Calloc)(size_t nmemb, size_t size); -char *REDISMODULE_API_FUNC(RedisModule_Strdup)(const char *str); -int REDISMODULE_API_FUNC(RedisModule_GetApi)(const char *, void *); -int REDISMODULE_API_FUNC(RedisModule_CreateCommand)(RedisModuleCtx *ctx, const char *name, RedisModuleCmdFunc cmdfunc, const char *strflags, int firstkey, int lastkey, int keystep); -void REDISMODULE_API_FUNC(RedisModule_SetModuleAttribs)(RedisModuleCtx *ctx, const char *name, int ver, int apiver); -int REDISMODULE_API_FUNC(RedisModule_IsModuleNameBusy)(const char *name); -int REDISMODULE_API_FUNC(RedisModule_WrongArity)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithLongLong)(RedisModuleCtx *ctx, long long ll); -int REDISMODULE_API_FUNC(RedisModule_GetSelectedDb)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_SelectDb)(RedisModuleCtx *ctx, int newid); -void *REDISMODULE_API_FUNC(RedisModule_OpenKey)(RedisModuleCtx *ctx, RedisModuleString *keyname, int mode); -void REDISMODULE_API_FUNC(RedisModule_CloseKey)(RedisModuleKey *kp); -int REDISMODULE_API_FUNC(RedisModule_KeyType)(RedisModuleKey *kp); -size_t REDISMODULE_API_FUNC(RedisModule_ValueLength)(RedisModuleKey *kp); -int REDISMODULE_API_FUNC(RedisModule_ListPush)(RedisModuleKey *kp, int where, RedisModuleString *ele); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_ListPop)(RedisModuleKey *key, int where); -RedisModuleCallReply *REDISMODULE_API_FUNC(RedisModule_Call)(RedisModuleCtx *ctx, const char *cmdname, const char *fmt, ...); -const char *REDISMODULE_API_FUNC(RedisModule_CallReplyProto)(RedisModuleCallReply *reply, size_t *len); -void REDISMODULE_API_FUNC(RedisModule_FreeCallReply)(RedisModuleCallReply *reply); -int REDISMODULE_API_FUNC(RedisModule_CallReplyType)(RedisModuleCallReply *reply); -long long REDISMODULE_API_FUNC(RedisModule_CallReplyInteger)(RedisModuleCallReply *reply); -size_t REDISMODULE_API_FUNC(RedisModule_CallReplyLength)(RedisModuleCallReply *reply); -RedisModuleCallReply *REDISMODULE_API_FUNC(RedisModule_CallReplyArrayElement)(RedisModuleCallReply *reply, size_t idx); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateString)(RedisModuleCtx *ctx, const char *ptr, size_t len); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateStringFromLongLong)(RedisModuleCtx *ctx, long long ll); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateStringFromDouble)(RedisModuleCtx *ctx, double d); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateStringFromLongDouble)(RedisModuleCtx *ctx, long double ld, int humanfriendly); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateStringFromString)(RedisModuleCtx *ctx, const RedisModuleString *str); -#ifdef __GNUC__ -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateStringPrintf)(RedisModuleCtx *ctx, const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); -#else -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateStringPrintf)(RedisModuleCtx *ctx, const char *fmt, ...); +/* Default API declaration prefix (not 'extern' for backwards compatibility) */ +#ifndef REDISMODULE_API +#define REDISMODULE_API #endif -void REDISMODULE_API_FUNC(RedisModule_FreeString)(RedisModuleCtx *ctx, RedisModuleString *str); -const char *REDISMODULE_API_FUNC(RedisModule_StringPtrLen)(const RedisModuleString *str, size_t *len); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithError)(RedisModuleCtx *ctx, const char *err); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithSimpleString)(RedisModuleCtx *ctx, const char *msg); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithArray)(RedisModuleCtx *ctx, long len); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithNullArray)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithEmptyArray)(RedisModuleCtx *ctx); -void REDISMODULE_API_FUNC(RedisModule_ReplySetArrayLength)(RedisModuleCtx *ctx, long len); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithStringBuffer)(RedisModuleCtx *ctx, const char *buf, size_t len); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithCString)(RedisModuleCtx *ctx, const char *buf); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithString)(RedisModuleCtx *ctx, RedisModuleString *str); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithEmptyString)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithVerbatimString)(RedisModuleCtx *ctx, const char *buf, size_t len); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithNull)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithDouble)(RedisModuleCtx *ctx, double d); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithLongDouble)(RedisModuleCtx *ctx, long double d); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithCallReply)(RedisModuleCtx *ctx, RedisModuleCallReply *reply); -int REDISMODULE_API_FUNC(RedisModule_StringToLongLong)(const RedisModuleString *str, long long *ll); -int REDISMODULE_API_FUNC(RedisModule_StringToDouble)(const RedisModuleString *str, double *d); -int REDISMODULE_API_FUNC(RedisModule_StringToLongDouble)(const RedisModuleString *str, long double *d); -void REDISMODULE_API_FUNC(RedisModule_AutoMemory)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_Replicate)(RedisModuleCtx *ctx, const char *cmdname, const char *fmt, ...); -int REDISMODULE_API_FUNC(RedisModule_ReplicateVerbatim)(RedisModuleCtx *ctx); -const char *REDISMODULE_API_FUNC(RedisModule_CallReplyStringPtr)(RedisModuleCallReply *reply, size_t *len); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateStringFromCallReply)(RedisModuleCallReply *reply); -int REDISMODULE_API_FUNC(RedisModule_DeleteKey)(RedisModuleKey *key); -int REDISMODULE_API_FUNC(RedisModule_UnlinkKey)(RedisModuleKey *key); -int REDISMODULE_API_FUNC(RedisModule_StringSet)(RedisModuleKey *key, RedisModuleString *str); -char *REDISMODULE_API_FUNC(RedisModule_StringDMA)(RedisModuleKey *key, size_t *len, int mode); -int REDISMODULE_API_FUNC(RedisModule_StringTruncate)(RedisModuleKey *key, size_t newlen); -mstime_t REDISMODULE_API_FUNC(RedisModule_GetExpire)(RedisModuleKey *key); -int REDISMODULE_API_FUNC(RedisModule_SetExpire)(RedisModuleKey *key, mstime_t expire); -void REDISMODULE_API_FUNC(RedisModule_ResetDataset)(int restart_aof, int async); -unsigned long long REDISMODULE_API_FUNC(RedisModule_DbSize)(RedisModuleCtx *ctx); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_RandomKey)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_ZsetAdd)(RedisModuleKey *key, double score, RedisModuleString *ele, int *flagsptr); -int REDISMODULE_API_FUNC(RedisModule_ZsetIncrby)(RedisModuleKey *key, double score, RedisModuleString *ele, int *flagsptr, double *newscore); -int REDISMODULE_API_FUNC(RedisModule_ZsetScore)(RedisModuleKey *key, RedisModuleString *ele, double *score); -int REDISMODULE_API_FUNC(RedisModule_ZsetRem)(RedisModuleKey *key, RedisModuleString *ele, int *deleted); -void REDISMODULE_API_FUNC(RedisModule_ZsetRangeStop)(RedisModuleKey *key); -int REDISMODULE_API_FUNC(RedisModule_ZsetFirstInScoreRange)(RedisModuleKey *key, double min, double max, int minex, int maxex); -int REDISMODULE_API_FUNC(RedisModule_ZsetLastInScoreRange)(RedisModuleKey *key, double min, double max, int minex, int maxex); -int REDISMODULE_API_FUNC(RedisModule_ZsetFirstInLexRange)(RedisModuleKey *key, RedisModuleString *min, RedisModuleString *max); -int REDISMODULE_API_FUNC(RedisModule_ZsetLastInLexRange)(RedisModuleKey *key, RedisModuleString *min, RedisModuleString *max); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_ZsetRangeCurrentElement)(RedisModuleKey *key, double *score); -int REDISMODULE_API_FUNC(RedisModule_ZsetRangeNext)(RedisModuleKey *key); -int REDISMODULE_API_FUNC(RedisModule_ZsetRangePrev)(RedisModuleKey *key); -int REDISMODULE_API_FUNC(RedisModule_ZsetRangeEndReached)(RedisModuleKey *key); -int REDISMODULE_API_FUNC(RedisModule_HashSet)(RedisModuleKey *key, int flags, ...); -int REDISMODULE_API_FUNC(RedisModule_HashGet)(RedisModuleKey *key, int flags, ...); -int REDISMODULE_API_FUNC(RedisModule_IsKeysPositionRequest)(RedisModuleCtx *ctx); -void REDISMODULE_API_FUNC(RedisModule_KeyAtPos)(RedisModuleCtx *ctx, int pos); -unsigned long long REDISMODULE_API_FUNC(RedisModule_GetClientId)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_GetClientInfoById)(void *ci, uint64_t id); -int REDISMODULE_API_FUNC(RedisModule_PublishMessage)(RedisModuleCtx *ctx, RedisModuleString *channel, RedisModuleString *message); -int REDISMODULE_API_FUNC(RedisModule_GetContextFlags)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_AvoidReplicaTraffic)(); -void *REDISMODULE_API_FUNC(RedisModule_PoolAlloc)(RedisModuleCtx *ctx, size_t bytes); -RedisModuleType *REDISMODULE_API_FUNC(RedisModule_CreateDataType)(RedisModuleCtx *ctx, const char *name, int encver, RedisModuleTypeMethods *typemethods); -int REDISMODULE_API_FUNC(RedisModule_ModuleTypeSetValue)(RedisModuleKey *key, RedisModuleType *mt, void *value); -int REDISMODULE_API_FUNC(RedisModule_ModuleTypeReplaceValue)(RedisModuleKey *key, RedisModuleType *mt, void *new_value, void **old_value); -RedisModuleType *REDISMODULE_API_FUNC(RedisModule_ModuleTypeGetType)(RedisModuleKey *key); -void *REDISMODULE_API_FUNC(RedisModule_ModuleTypeGetValue)(RedisModuleKey *key); -int REDISMODULE_API_FUNC(RedisModule_IsIOError)(RedisModuleIO *io); -void REDISMODULE_API_FUNC(RedisModule_SetModuleOptions)(RedisModuleCtx *ctx, int options); -int REDISMODULE_API_FUNC(RedisModule_SignalModifiedKey)(RedisModuleCtx *ctx, RedisModuleString *keyname); -void REDISMODULE_API_FUNC(RedisModule_SaveUnsigned)(RedisModuleIO *io, uint64_t value); -uint64_t REDISMODULE_API_FUNC(RedisModule_LoadUnsigned)(RedisModuleIO *io); -void REDISMODULE_API_FUNC(RedisModule_SaveSigned)(RedisModuleIO *io, int64_t value); -int64_t REDISMODULE_API_FUNC(RedisModule_LoadSigned)(RedisModuleIO *io); -void REDISMODULE_API_FUNC(RedisModule_EmitAOF)(RedisModuleIO *io, const char *cmdname, const char *fmt, ...); -void REDISMODULE_API_FUNC(RedisModule_SaveString)(RedisModuleIO *io, RedisModuleString *s); -void REDISMODULE_API_FUNC(RedisModule_SaveStringBuffer)(RedisModuleIO *io, const char *str, size_t len); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_LoadString)(RedisModuleIO *io); -char *REDISMODULE_API_FUNC(RedisModule_LoadStringBuffer)(RedisModuleIO *io, size_t *lenptr); -void REDISMODULE_API_FUNC(RedisModule_SaveDouble)(RedisModuleIO *io, double value); -double REDISMODULE_API_FUNC(RedisModule_LoadDouble)(RedisModuleIO *io); -void REDISMODULE_API_FUNC(RedisModule_SaveFloat)(RedisModuleIO *io, float value); -float REDISMODULE_API_FUNC(RedisModule_LoadFloat)(RedisModuleIO *io); -void REDISMODULE_API_FUNC(RedisModule_SaveLongDouble)(RedisModuleIO *io, long double value); -long double REDISMODULE_API_FUNC(RedisModule_LoadLongDouble)(RedisModuleIO *io); -void *REDISMODULE_API_FUNC(RedisModule_LoadDataTypeFromString)(const RedisModuleString *str, const RedisModuleType *mt); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_SaveDataTypeToString)(RedisModuleCtx *ctx, void *data, const RedisModuleType *mt); -#ifdef __GNUC__ -void REDISMODULE_API_FUNC(RedisModule_Log)(RedisModuleCtx *ctx, const char *level, const char *fmt, ...) __attribute__ ((format (printf, 3, 4))); -void REDISMODULE_API_FUNC(RedisModule_LogIOError)(RedisModuleIO *io, const char *levelstr, const char *fmt, ...) __attribute__ ((format (printf, 3, 4))); -#else -void REDISMODULE_API_FUNC(RedisModule_Log)(RedisModuleCtx *ctx, const char *level, const char *fmt, ...); -void REDISMODULE_API_FUNC(RedisModule_LogIOError)(RedisModuleIO *io, const char *levelstr, const char *fmt, ...); + +/* Default API declaration suffix (compiler attributes) */ +#ifndef REDISMODULE_ATTR +#define REDISMODULE_ATTR REDISMODULE_ATTR_COMMON #endif -void REDISMODULE_API_FUNC(RedisModule__Assert)(const char *estr, const char *file, int line); -void REDISMODULE_API_FUNC(RedisModule_LatencyAddSample)(const char *event, mstime_t latency); -int REDISMODULE_API_FUNC(RedisModule_StringAppendBuffer)(RedisModuleCtx *ctx, RedisModuleString *str, const char *buf, size_t len); -void REDISMODULE_API_FUNC(RedisModule_RetainString)(RedisModuleCtx *ctx, RedisModuleString *str); -RedisModuleString* REDISMODULE_API_FUNC(RedisModule_HoldString)(RedisModuleCtx *ctx, RedisModuleString *str); -int REDISMODULE_API_FUNC(RedisModule_StringCompare)(RedisModuleString *a, RedisModuleString *b); -RedisModuleCtx *REDISMODULE_API_FUNC(RedisModule_GetContextFromIO)(RedisModuleIO *io); -const RedisModuleString *REDISMODULE_API_FUNC(RedisModule_GetKeyNameFromIO)(RedisModuleIO *io); -const RedisModuleString *REDISMODULE_API_FUNC(RedisModule_GetKeyNameFromModuleKey)(RedisModuleKey *key); -long long REDISMODULE_API_FUNC(RedisModule_Milliseconds)(void); -void REDISMODULE_API_FUNC(RedisModule_DigestAddStringBuffer)(RedisModuleDigest *md, unsigned char *ele, size_t len); -void REDISMODULE_API_FUNC(RedisModule_DigestAddLongLong)(RedisModuleDigest *md, long long ele); -void REDISMODULE_API_FUNC(RedisModule_DigestEndSequence)(RedisModuleDigest *md); -RedisModuleDict *REDISMODULE_API_FUNC(RedisModule_CreateDict)(RedisModuleCtx *ctx); -void REDISMODULE_API_FUNC(RedisModule_FreeDict)(RedisModuleCtx *ctx, RedisModuleDict *d); -uint64_t REDISMODULE_API_FUNC(RedisModule_DictSize)(RedisModuleDict *d); -int REDISMODULE_API_FUNC(RedisModule_DictSetC)(RedisModuleDict *d, void *key, size_t keylen, void *ptr); -int REDISMODULE_API_FUNC(RedisModule_DictReplaceC)(RedisModuleDict *d, void *key, size_t keylen, void *ptr); -int REDISMODULE_API_FUNC(RedisModule_DictSet)(RedisModuleDict *d, RedisModuleString *key, void *ptr); -int REDISMODULE_API_FUNC(RedisModule_DictReplace)(RedisModuleDict *d, RedisModuleString *key, void *ptr); -void *REDISMODULE_API_FUNC(RedisModule_DictGetC)(RedisModuleDict *d, void *key, size_t keylen, int *nokey); -void *REDISMODULE_API_FUNC(RedisModule_DictGet)(RedisModuleDict *d, RedisModuleString *key, int *nokey); -int REDISMODULE_API_FUNC(RedisModule_DictDelC)(RedisModuleDict *d, void *key, size_t keylen, void *oldval); -int REDISMODULE_API_FUNC(RedisModule_DictDel)(RedisModuleDict *d, RedisModuleString *key, void *oldval); -RedisModuleDictIter *REDISMODULE_API_FUNC(RedisModule_DictIteratorStartC)(RedisModuleDict *d, const char *op, void *key, size_t keylen); -RedisModuleDictIter *REDISMODULE_API_FUNC(RedisModule_DictIteratorStart)(RedisModuleDict *d, const char *op, RedisModuleString *key); -void REDISMODULE_API_FUNC(RedisModule_DictIteratorStop)(RedisModuleDictIter *di); -int REDISMODULE_API_FUNC(RedisModule_DictIteratorReseekC)(RedisModuleDictIter *di, const char *op, void *key, size_t keylen); -int REDISMODULE_API_FUNC(RedisModule_DictIteratorReseek)(RedisModuleDictIter *di, const char *op, RedisModuleString *key); -void *REDISMODULE_API_FUNC(RedisModule_DictNextC)(RedisModuleDictIter *di, size_t *keylen, void **dataptr); -void *REDISMODULE_API_FUNC(RedisModule_DictPrevC)(RedisModuleDictIter *di, size_t *keylen, void **dataptr); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_DictNext)(RedisModuleCtx *ctx, RedisModuleDictIter *di, void **dataptr); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_DictPrev)(RedisModuleCtx *ctx, RedisModuleDictIter *di, void **dataptr); -int REDISMODULE_API_FUNC(RedisModule_DictCompareC)(RedisModuleDictIter *di, const char *op, void *key, size_t keylen); -int REDISMODULE_API_FUNC(RedisModule_DictCompare)(RedisModuleDictIter *di, const char *op, RedisModuleString *key); -int REDISMODULE_API_FUNC(RedisModule_RegisterInfoFunc)(RedisModuleCtx *ctx, RedisModuleInfoFunc cb); -int REDISMODULE_API_FUNC(RedisModule_InfoAddSection)(RedisModuleInfoCtx *ctx, char *name); -int REDISMODULE_API_FUNC(RedisModule_InfoBeginDictField)(RedisModuleInfoCtx *ctx, char *name); -int REDISMODULE_API_FUNC(RedisModule_InfoEndDictField)(RedisModuleInfoCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_InfoAddFieldString)(RedisModuleInfoCtx *ctx, char *field, RedisModuleString *value); -int REDISMODULE_API_FUNC(RedisModule_InfoAddFieldCString)(RedisModuleInfoCtx *ctx, char *field, char *value); -int REDISMODULE_API_FUNC(RedisModule_InfoAddFieldDouble)(RedisModuleInfoCtx *ctx, char *field, double value); -int REDISMODULE_API_FUNC(RedisModule_InfoAddFieldLongLong)(RedisModuleInfoCtx *ctx, char *field, long long value); -int REDISMODULE_API_FUNC(RedisModule_InfoAddFieldULongLong)(RedisModuleInfoCtx *ctx, char *field, unsigned long long value); -RedisModuleServerInfoData *REDISMODULE_API_FUNC(RedisModule_GetServerInfo)(RedisModuleCtx *ctx, const char *section); -void REDISMODULE_API_FUNC(RedisModule_FreeServerInfo)(RedisModuleCtx *ctx, RedisModuleServerInfoData *data); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_ServerInfoGetField)(RedisModuleCtx *ctx, RedisModuleServerInfoData *data, const char* field); -const char *REDISMODULE_API_FUNC(RedisModule_ServerInfoGetFieldC)(RedisModuleServerInfoData *data, const char* field); -long long REDISMODULE_API_FUNC(RedisModule_ServerInfoGetFieldSigned)(RedisModuleServerInfoData *data, const char* field, int *out_err); -unsigned long long REDISMODULE_API_FUNC(RedisModule_ServerInfoGetFieldUnsigned)(RedisModuleServerInfoData *data, const char* field, int *out_err); -double REDISMODULE_API_FUNC(RedisModule_ServerInfoGetFieldDouble)(RedisModuleServerInfoData *data, const char* field, int *out_err); -int REDISMODULE_API_FUNC(RedisModule_SubscribeToServerEvent)(RedisModuleCtx *ctx, RedisModuleEvent event, RedisModuleEventCallback callback); -int REDISMODULE_API_FUNC(RedisModule_SetLRU)(RedisModuleKey *key, mstime_t lru_idle); -int REDISMODULE_API_FUNC(RedisModule_GetLRU)(RedisModuleKey *key, mstime_t *lru_idle); -int REDISMODULE_API_FUNC(RedisModule_SetLFU)(RedisModuleKey *key, long long lfu_freq); -int REDISMODULE_API_FUNC(RedisModule_GetLFU)(RedisModuleKey *key, long long *lfu_freq); -RedisModuleBlockedClient *REDISMODULE_API_FUNC(RedisModule_BlockClientOnKeys)(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, RedisModuleCmdFunc timeout_callback, void (*free_privdata)(RedisModuleCtx*,void*), long long timeout_ms, RedisModuleString **keys, int numkeys, void *privdata); -void REDISMODULE_API_FUNC(RedisModule_SignalKeyAsReady)(RedisModuleCtx *ctx, RedisModuleString *key); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_GetBlockedClientReadyKey)(RedisModuleCtx *ctx); -RedisModuleScanCursor *REDISMODULE_API_FUNC(RedisModule_ScanCursorCreate)(); -void REDISMODULE_API_FUNC(RedisModule_ScanCursorRestart)(RedisModuleScanCursor *cursor); -void REDISMODULE_API_FUNC(RedisModule_ScanCursorDestroy)(RedisModuleScanCursor *cursor); -int REDISMODULE_API_FUNC(RedisModule_Scan)(RedisModuleCtx *ctx, RedisModuleScanCursor *cursor, RedisModuleScanCB fn, void *privdata); -int REDISMODULE_API_FUNC(RedisModule_ScanKey)(RedisModuleKey *key, RedisModuleScanCursor *cursor, RedisModuleScanKeyCB fn, void *privdata); + +REDISMODULE_API void * (*RedisModule_Alloc)(size_t bytes) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_Realloc)(void *ptr, size_t bytes) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_Free)(void *ptr) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_Calloc)(size_t nmemb, size_t size) REDISMODULE_ATTR; +REDISMODULE_API char * (*RedisModule_Strdup)(const char *str) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_GetApi)(const char *, void *) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_CreateCommand)(RedisModuleCtx *ctx, const char *name, RedisModuleCmdFunc cmdfunc, const char *strflags, int firstkey, int lastkey, int keystep) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SetModuleAttribs)(RedisModuleCtx *ctx, const char *name, int ver, int apiver) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_IsModuleNameBusy)(const char *name) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_WrongArity)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithLongLong)(RedisModuleCtx *ctx, long long ll) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_GetSelectedDb)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_SelectDb)(RedisModuleCtx *ctx, int newid) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_OpenKey)(RedisModuleCtx *ctx, RedisModuleString *keyname, int mode) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_CloseKey)(RedisModuleKey *kp) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_KeyType)(RedisModuleKey *kp) REDISMODULE_ATTR; +REDISMODULE_API size_t (*RedisModule_ValueLength)(RedisModuleKey *kp) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ListPush)(RedisModuleKey *kp, int where, RedisModuleString *ele) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_ListPop)(RedisModuleKey *key, int where) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleCallReply * (*RedisModule_Call)(RedisModuleCtx *ctx, const char *cmdname, const char *fmt, ...) REDISMODULE_ATTR; +REDISMODULE_API const char * (*RedisModule_CallReplyProto)(RedisModuleCallReply *reply, size_t *len) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_FreeCallReply)(RedisModuleCallReply *reply) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_CallReplyType)(RedisModuleCallReply *reply) REDISMODULE_ATTR; +REDISMODULE_API long long (*RedisModule_CallReplyInteger)(RedisModuleCallReply *reply) REDISMODULE_ATTR; +REDISMODULE_API size_t (*RedisModule_CallReplyLength)(RedisModuleCallReply *reply) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleCallReply * (*RedisModule_CallReplyArrayElement)(RedisModuleCallReply *reply, size_t idx) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_CreateString)(RedisModuleCtx *ctx, const char *ptr, size_t len) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_CreateStringFromLongLong)(RedisModuleCtx *ctx, long long ll) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_CreateStringFromDouble)(RedisModuleCtx *ctx, double d) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_CreateStringFromLongDouble)(RedisModuleCtx *ctx, long double ld, int humanfriendly) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_CreateStringFromString)(RedisModuleCtx *ctx, const RedisModuleString *str) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_CreateStringPrintf)(RedisModuleCtx *ctx, const char *fmt, ...) REDISMODULE_ATTR_PRINTF(2,3) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_FreeString)(RedisModuleCtx *ctx, RedisModuleString *str) REDISMODULE_ATTR; +REDISMODULE_API const char * (*RedisModule_StringPtrLen)(const RedisModuleString *str, size_t *len) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithError)(RedisModuleCtx *ctx, const char *err) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithSimpleString)(RedisModuleCtx *ctx, const char *msg) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithArray)(RedisModuleCtx *ctx, long len) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithNullArray)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithEmptyArray)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_ReplySetArrayLength)(RedisModuleCtx *ctx, long len) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithStringBuffer)(RedisModuleCtx *ctx, const char *buf, size_t len) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithCString)(RedisModuleCtx *ctx, const char *buf) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithString)(RedisModuleCtx *ctx, RedisModuleString *str) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithEmptyString)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithVerbatimString)(RedisModuleCtx *ctx, const char *buf, size_t len) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithNull)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithDouble)(RedisModuleCtx *ctx, double d) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithLongDouble)(RedisModuleCtx *ctx, long double d) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithCallReply)(RedisModuleCtx *ctx, RedisModuleCallReply *reply) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_StringToLongLong)(const RedisModuleString *str, long long *ll) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_StringToDouble)(const RedisModuleString *str, double *d) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_StringToLongDouble)(const RedisModuleString *str, long double *d) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_AutoMemory)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_Replicate)(RedisModuleCtx *ctx, const char *cmdname, const char *fmt, ...) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplicateVerbatim)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API const char * (*RedisModule_CallReplyStringPtr)(RedisModuleCallReply *reply, size_t *len) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_CreateStringFromCallReply)(RedisModuleCallReply *reply) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DeleteKey)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_UnlinkKey)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_StringSet)(RedisModuleKey *key, RedisModuleString *str) REDISMODULE_ATTR; +REDISMODULE_API char * (*RedisModule_StringDMA)(RedisModuleKey *key, size_t *len, int mode) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_StringTruncate)(RedisModuleKey *key, size_t newlen) REDISMODULE_ATTR; +REDISMODULE_API mstime_t (*RedisModule_GetExpire)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_SetExpire)(RedisModuleKey *key, mstime_t expire) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_ResetDataset)(int restart_aof, int async) REDISMODULE_ATTR; +REDISMODULE_API unsigned long long (*RedisModule_DbSize)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_RandomKey)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetAdd)(RedisModuleKey *key, double score, RedisModuleString *ele, int *flagsptr) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetIncrby)(RedisModuleKey *key, double score, RedisModuleString *ele, int *flagsptr, double *newscore) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetScore)(RedisModuleKey *key, RedisModuleString *ele, double *score) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetRem)(RedisModuleKey *key, RedisModuleString *ele, int *deleted) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_ZsetRangeStop)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetFirstInScoreRange)(RedisModuleKey *key, double min, double max, int minex, int maxex) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetLastInScoreRange)(RedisModuleKey *key, double min, double max, int minex, int maxex) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetFirstInLexRange)(RedisModuleKey *key, RedisModuleString *min, RedisModuleString *max) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetLastInLexRange)(RedisModuleKey *key, RedisModuleString *min, RedisModuleString *max) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_ZsetRangeCurrentElement)(RedisModuleKey *key, double *score) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetRangeNext)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetRangePrev)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetRangeEndReached)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_HashSet)(RedisModuleKey *key, int flags, ...) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_HashGet)(RedisModuleKey *key, int flags, ...) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_IsKeysPositionRequest)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_KeyAtPos)(RedisModuleCtx *ctx, int pos) REDISMODULE_ATTR; +REDISMODULE_API unsigned long long (*RedisModule_GetClientId)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_GetClientInfoById)(void *ci, uint64_t id) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_PublishMessage)(RedisModuleCtx *ctx, RedisModuleString *channel, RedisModuleString *message) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_GetContextFlags)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_AvoidReplicaTraffic)() REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_PoolAlloc)(RedisModuleCtx *ctx, size_t bytes) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleType * (*RedisModule_CreateDataType)(RedisModuleCtx *ctx, const char *name, int encver, RedisModuleTypeMethods *typemethods) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ModuleTypeSetValue)(RedisModuleKey *key, RedisModuleType *mt, void *value) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ModuleTypeReplaceValue)(RedisModuleKey *key, RedisModuleType *mt, void *new_value, void **old_value) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleType * (*RedisModule_ModuleTypeGetType)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_ModuleTypeGetValue)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_IsIOError)(RedisModuleIO *io) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SetModuleOptions)(RedisModuleCtx *ctx, int options) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_SignalModifiedKey)(RedisModuleCtx *ctx, RedisModuleString *keyname) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SaveUnsigned)(RedisModuleIO *io, uint64_t value) REDISMODULE_ATTR; +REDISMODULE_API uint64_t (*RedisModule_LoadUnsigned)(RedisModuleIO *io) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SaveSigned)(RedisModuleIO *io, int64_t value) REDISMODULE_ATTR; +REDISMODULE_API int64_t (*RedisModule_LoadSigned)(RedisModuleIO *io) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_EmitAOF)(RedisModuleIO *io, const char *cmdname, const char *fmt, ...) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SaveString)(RedisModuleIO *io, RedisModuleString *s) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SaveStringBuffer)(RedisModuleIO *io, const char *str, size_t len) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_LoadString)(RedisModuleIO *io) REDISMODULE_ATTR; +REDISMODULE_API char * (*RedisModule_LoadStringBuffer)(RedisModuleIO *io, size_t *lenptr) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SaveDouble)(RedisModuleIO *io, double value) REDISMODULE_ATTR; +REDISMODULE_API double (*RedisModule_LoadDouble)(RedisModuleIO *io) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SaveFloat)(RedisModuleIO *io, float value) REDISMODULE_ATTR; +REDISMODULE_API float (*RedisModule_LoadFloat)(RedisModuleIO *io) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SaveLongDouble)(RedisModuleIO *io, long double value) REDISMODULE_ATTR; +REDISMODULE_API long double (*RedisModule_LoadLongDouble)(RedisModuleIO *io) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_LoadDataTypeFromString)(const RedisModuleString *str, const RedisModuleType *mt) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_SaveDataTypeToString)(RedisModuleCtx *ctx, void *data, const RedisModuleType *mt) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_Log)(RedisModuleCtx *ctx, const char *level, const char *fmt, ...) REDISMODULE_ATTR REDISMODULE_ATTR_PRINTF(3,4); +REDISMODULE_API void (*RedisModule_LogIOError)(RedisModuleIO *io, const char *levelstr, const char *fmt, ...) REDISMODULE_ATTR REDISMODULE_ATTR_PRINTF(3,4); +REDISMODULE_API void (*RedisModule__Assert)(const char *estr, const char *file, int line) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_LatencyAddSample)(const char *event, mstime_t latency) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_StringAppendBuffer)(RedisModuleCtx *ctx, RedisModuleString *str, const char *buf, size_t len) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_RetainString)(RedisModuleCtx *ctx, RedisModuleString *str) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_HoldString)(RedisModuleCtx *ctx, RedisModuleString *str) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_StringCompare)(RedisModuleString *a, RedisModuleString *b) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleCtx * (*RedisModule_GetContextFromIO)(RedisModuleIO *io) REDISMODULE_ATTR; +REDISMODULE_API const RedisModuleString * (*RedisModule_GetKeyNameFromIO)(RedisModuleIO *io) REDISMODULE_ATTR; +REDISMODULE_API const RedisModuleString * (*RedisModule_GetKeyNameFromModuleKey)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API long long (*RedisModule_Milliseconds)(void) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_DigestAddStringBuffer)(RedisModuleDigest *md, unsigned char *ele, size_t len) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_DigestAddLongLong)(RedisModuleDigest *md, long long ele) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_DigestEndSequence)(RedisModuleDigest *md) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleDict * (*RedisModule_CreateDict)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_FreeDict)(RedisModuleCtx *ctx, RedisModuleDict *d) REDISMODULE_ATTR; +REDISMODULE_API uint64_t (*RedisModule_DictSize)(RedisModuleDict *d) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictSetC)(RedisModuleDict *d, void *key, size_t keylen, void *ptr) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictReplaceC)(RedisModuleDict *d, void *key, size_t keylen, void *ptr) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictSet)(RedisModuleDict *d, RedisModuleString *key, void *ptr) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictReplace)(RedisModuleDict *d, RedisModuleString *key, void *ptr) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_DictGetC)(RedisModuleDict *d, void *key, size_t keylen, int *nokey) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_DictGet)(RedisModuleDict *d, RedisModuleString *key, int *nokey) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictDelC)(RedisModuleDict *d, void *key, size_t keylen, void *oldval) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictDel)(RedisModuleDict *d, RedisModuleString *key, void *oldval) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleDictIter * (*RedisModule_DictIteratorStartC)(RedisModuleDict *d, const char *op, void *key, size_t keylen) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleDictIter * (*RedisModule_DictIteratorStart)(RedisModuleDict *d, const char *op, RedisModuleString *key) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_DictIteratorStop)(RedisModuleDictIter *di) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictIteratorReseekC)(RedisModuleDictIter *di, const char *op, void *key, size_t keylen) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictIteratorReseek)(RedisModuleDictIter *di, const char *op, RedisModuleString *key) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_DictNextC)(RedisModuleDictIter *di, size_t *keylen, void **dataptr) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_DictPrevC)(RedisModuleDictIter *di, size_t *keylen, void **dataptr) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_DictNext)(RedisModuleCtx *ctx, RedisModuleDictIter *di, void **dataptr) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_DictPrev)(RedisModuleCtx *ctx, RedisModuleDictIter *di, void **dataptr) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictCompareC)(RedisModuleDictIter *di, const char *op, void *key, size_t keylen) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictCompare)(RedisModuleDictIter *di, const char *op, RedisModuleString *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_RegisterInfoFunc)(RedisModuleCtx *ctx, RedisModuleInfoFunc cb) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_InfoAddSection)(RedisModuleInfoCtx *ctx, char *name) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_InfoBeginDictField)(RedisModuleInfoCtx *ctx, char *name) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_InfoEndDictField)(RedisModuleInfoCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_InfoAddFieldString)(RedisModuleInfoCtx *ctx, char *field, RedisModuleString *value) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_InfoAddFieldCString)(RedisModuleInfoCtx *ctx, char *field, char *value) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_InfoAddFieldDouble)(RedisModuleInfoCtx *ctx, char *field, double value) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_InfoAddFieldLongLong)(RedisModuleInfoCtx *ctx, char *field, long long value) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_InfoAddFieldULongLong)(RedisModuleInfoCtx *ctx, char *field, unsigned long long value) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleServerInfoData * (*RedisModule_GetServerInfo)(RedisModuleCtx *ctx, const char *section) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_FreeServerInfo)(RedisModuleCtx *ctx, RedisModuleServerInfoData *data) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_ServerInfoGetField)(RedisModuleCtx *ctx, RedisModuleServerInfoData *data, const char* field) REDISMODULE_ATTR; +REDISMODULE_API const char * (*RedisModule_ServerInfoGetFieldC)(RedisModuleServerInfoData *data, const char* field) REDISMODULE_ATTR; +REDISMODULE_API long long (*RedisModule_ServerInfoGetFieldSigned)(RedisModuleServerInfoData *data, const char* field, int *out_err) REDISMODULE_ATTR; +REDISMODULE_API unsigned long long (*RedisModule_ServerInfoGetFieldUnsigned)(RedisModuleServerInfoData *data, const char* field, int *out_err) REDISMODULE_ATTR; +REDISMODULE_API double (*RedisModule_ServerInfoGetFieldDouble)(RedisModuleServerInfoData *data, const char* field, int *out_err) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_SubscribeToServerEvent)(RedisModuleCtx *ctx, RedisModuleEvent event, RedisModuleEventCallback callback) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_SetLRU)(RedisModuleKey *key, mstime_t lru_idle) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_GetLRU)(RedisModuleKey *key, mstime_t *lru_idle) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_SetLFU)(RedisModuleKey *key, long long lfu_freq) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_GetLFU)(RedisModuleKey *key, long long *lfu_freq) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleBlockedClient * (*RedisModule_BlockClientOnKeys)(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, RedisModuleCmdFunc timeout_callback, void (*free_privdata)(RedisModuleCtx*,void*), long long timeout_ms, RedisModuleString **keys, int numkeys, void *privdata) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SignalKeyAsReady)(RedisModuleCtx *ctx, RedisModuleString *key) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_GetBlockedClientReadyKey)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleScanCursor * (*RedisModule_ScanCursorCreate)() REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_ScanCursorRestart)(RedisModuleScanCursor *cursor) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_ScanCursorDestroy)(RedisModuleScanCursor *cursor) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_Scan)(RedisModuleCtx *ctx, RedisModuleScanCursor *cursor, RedisModuleScanCB fn, void *privdata) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ScanKey)(RedisModuleKey *key, RedisModuleScanCursor *cursor, RedisModuleScanKeyCB fn, void *privdata) REDISMODULE_ATTR; + /* Experimental APIs */ #ifdef REDISMODULE_EXPERIMENTAL_API #define REDISMODULE_EXPERIMENTAL_API_VERSION 3 -RedisModuleBlockedClient *REDISMODULE_API_FUNC(RedisModule_BlockClient)(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, RedisModuleCmdFunc timeout_callback, void (*free_privdata)(RedisModuleCtx*,void*), long long timeout_ms); -int REDISMODULE_API_FUNC(RedisModule_UnblockClient)(RedisModuleBlockedClient *bc, void *privdata); -int REDISMODULE_API_FUNC(RedisModule_IsBlockedReplyRequest)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_IsBlockedTimeoutRequest)(RedisModuleCtx *ctx); -void *REDISMODULE_API_FUNC(RedisModule_GetBlockedClientPrivateData)(RedisModuleCtx *ctx); -RedisModuleBlockedClient *REDISMODULE_API_FUNC(RedisModule_GetBlockedClientHandle)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_AbortBlock)(RedisModuleBlockedClient *bc); -RedisModuleCtx *REDISMODULE_API_FUNC(RedisModule_GetThreadSafeContext)(RedisModuleBlockedClient *bc); -void REDISMODULE_API_FUNC(RedisModule_FreeThreadSafeContext)(RedisModuleCtx *ctx); -void REDISMODULE_API_FUNC(RedisModule_ThreadSafeContextLock)(RedisModuleCtx *ctx); -void REDISMODULE_API_FUNC(RedisModule_ThreadSafeContextUnlock)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_SubscribeToKeyspaceEvents)(RedisModuleCtx *ctx, int types, RedisModuleNotificationFunc cb); -int REDISMODULE_API_FUNC(RedisModule_NotifyKeyspaceEvent)(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key); -int REDISMODULE_API_FUNC(RedisModule_GetNotifyKeyspaceEvents)(); -int REDISMODULE_API_FUNC(RedisModule_BlockedClientDisconnected)(RedisModuleCtx *ctx); -void REDISMODULE_API_FUNC(RedisModule_RegisterClusterMessageReceiver)(RedisModuleCtx *ctx, uint8_t type, RedisModuleClusterMessageReceiver callback); -int REDISMODULE_API_FUNC(RedisModule_SendClusterMessage)(RedisModuleCtx *ctx, char *target_id, uint8_t type, unsigned char *msg, uint32_t len); -int REDISMODULE_API_FUNC(RedisModule_GetClusterNodeInfo)(RedisModuleCtx *ctx, const char *id, char *ip, char *master_id, int *port, int *flags); -char **REDISMODULE_API_FUNC(RedisModule_GetClusterNodesList)(RedisModuleCtx *ctx, size_t *numnodes); -void REDISMODULE_API_FUNC(RedisModule_FreeClusterNodesList)(char **ids); -RedisModuleTimerID REDISMODULE_API_FUNC(RedisModule_CreateTimer)(RedisModuleCtx *ctx, mstime_t period, RedisModuleTimerProc callback, void *data); -int REDISMODULE_API_FUNC(RedisModule_StopTimer)(RedisModuleCtx *ctx, RedisModuleTimerID id, void **data); -int REDISMODULE_API_FUNC(RedisModule_GetTimerInfo)(RedisModuleCtx *ctx, RedisModuleTimerID id, uint64_t *remaining, void **data); -const char *REDISMODULE_API_FUNC(RedisModule_GetMyClusterID)(void); -size_t REDISMODULE_API_FUNC(RedisModule_GetClusterSize)(void); -void REDISMODULE_API_FUNC(RedisModule_GetRandomBytes)(unsigned char *dst, size_t len); -void REDISMODULE_API_FUNC(RedisModule_GetRandomHexChars)(char *dst, size_t len); -void REDISMODULE_API_FUNC(RedisModule_SetDisconnectCallback)(RedisModuleBlockedClient *bc, RedisModuleDisconnectFunc callback); -void REDISMODULE_API_FUNC(RedisModule_SetClusterFlags)(RedisModuleCtx *ctx, uint64_t flags); -int REDISMODULE_API_FUNC(RedisModule_ExportSharedAPI)(RedisModuleCtx *ctx, const char *apiname, void *func); -void *REDISMODULE_API_FUNC(RedisModule_GetSharedAPI)(RedisModuleCtx *ctx, const char *apiname); -RedisModuleCommandFilter *REDISMODULE_API_FUNC(RedisModule_RegisterCommandFilter)(RedisModuleCtx *ctx, RedisModuleCommandFilterFunc cb, int flags); -int REDISMODULE_API_FUNC(RedisModule_UnregisterCommandFilter)(RedisModuleCtx *ctx, RedisModuleCommandFilter *filter); -int REDISMODULE_API_FUNC(RedisModule_CommandFilterArgsCount)(RedisModuleCommandFilterCtx *fctx); -const RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CommandFilterArgGet)(RedisModuleCommandFilterCtx *fctx, int pos); -int REDISMODULE_API_FUNC(RedisModule_CommandFilterArgInsert)(RedisModuleCommandFilterCtx *fctx, int pos, RedisModuleString *arg); -int REDISMODULE_API_FUNC(RedisModule_CommandFilterArgReplace)(RedisModuleCommandFilterCtx *fctx, int pos, RedisModuleString *arg); -int REDISMODULE_API_FUNC(RedisModule_CommandFilterArgDelete)(RedisModuleCommandFilterCtx *fctx, int pos); -int REDISMODULE_API_FUNC(RedisModule_Fork)(RedisModuleForkDoneHandler cb, void *user_data); -int REDISMODULE_API_FUNC(RedisModule_ExitFromChild)(int retcode); -int REDISMODULE_API_FUNC(RedisModule_KillForkChild)(int child_pid); -float REDISMODULE_API_FUNC(RedisModule_GetUsedMemoryRatio)(); -size_t REDISMODULE_API_FUNC(RedisModule_MallocSize)(void* ptr); -RedisModuleUser *REDISMODULE_API_FUNC(RedisModule_CreateModuleUser)(const char *name); -void REDISMODULE_API_FUNC(RedisModule_FreeModuleUser)(RedisModuleUser *user); -int REDISMODULE_API_FUNC(RedisModule_SetModuleUserACL)(RedisModuleUser *user, const char* acl); -int REDISMODULE_API_FUNC(RedisModule_AuthenticateClientWithACLUser)(RedisModuleCtx *ctx, const char *name, size_t len, RedisModuleUserChangedFunc callback, void *privdata, uint64_t *client_id); -int REDISMODULE_API_FUNC(RedisModule_AuthenticateClientWithUser)(RedisModuleCtx *ctx, RedisModuleUser *user, RedisModuleUserChangedFunc callback, void *privdata, uint64_t *client_id); -int REDISMODULE_API_FUNC(RedisModule_DeauthenticateAndCloseClient)(RedisModuleCtx *ctx, uint64_t client_id); +REDISMODULE_API RedisModuleBlockedClient * (*RedisModule_BlockClient)(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, RedisModuleCmdFunc timeout_callback, void (*free_privdata)(RedisModuleCtx*,void*), long long timeout_ms) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_UnblockClient)(RedisModuleBlockedClient *bc, void *privdata) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_IsBlockedReplyRequest)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_IsBlockedTimeoutRequest)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_GetBlockedClientPrivateData)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleBlockedClient * (*RedisModule_GetBlockedClientHandle)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_AbortBlock)(RedisModuleBlockedClient *bc) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleCtx * (*RedisModule_GetThreadSafeContext)(RedisModuleBlockedClient *bc) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_FreeThreadSafeContext)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_ThreadSafeContextLock)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_ThreadSafeContextUnlock)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_SubscribeToKeyspaceEvents)(RedisModuleCtx *ctx, int types, RedisModuleNotificationFunc cb) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_NotifyKeyspaceEvent)(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_GetNotifyKeyspaceEvents)() REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_BlockedClientDisconnected)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_RegisterClusterMessageReceiver)(RedisModuleCtx *ctx, uint8_t type, RedisModuleClusterMessageReceiver callback) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_SendClusterMessage)(RedisModuleCtx *ctx, char *target_id, uint8_t type, unsigned char *msg, uint32_t len) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_GetClusterNodeInfo)(RedisModuleCtx *ctx, const char *id, char *ip, char *master_id, int *port, int *flags) REDISMODULE_ATTR; +REDISMODULE_API char ** (*RedisModule_GetClusterNodesList)(RedisModuleCtx *ctx, size_t *numnodes) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_FreeClusterNodesList)(char **ids) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleTimerID (*RedisModule_CreateTimer)(RedisModuleCtx *ctx, mstime_t period, RedisModuleTimerProc callback, void *data) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_StopTimer)(RedisModuleCtx *ctx, RedisModuleTimerID id, void **data) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_GetTimerInfo)(RedisModuleCtx *ctx, RedisModuleTimerID id, uint64_t *remaining, void **data) REDISMODULE_ATTR; +REDISMODULE_API const char * (*RedisModule_GetMyClusterID)(void) REDISMODULE_ATTR; +REDISMODULE_API size_t (*RedisModule_GetClusterSize)(void) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_GetRandomBytes)(unsigned char *dst, size_t len) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_GetRandomHexChars)(char *dst, size_t len) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SetDisconnectCallback)(RedisModuleBlockedClient *bc, RedisModuleDisconnectFunc callback) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SetClusterFlags)(RedisModuleCtx *ctx, uint64_t flags) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ExportSharedAPI)(RedisModuleCtx *ctx, const char *apiname, void *func) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_GetSharedAPI)(RedisModuleCtx *ctx, const char *apiname) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleCommandFilter * (*RedisModule_RegisterCommandFilter)(RedisModuleCtx *ctx, RedisModuleCommandFilterFunc cb, int flags) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_UnregisterCommandFilter)(RedisModuleCtx *ctx, RedisModuleCommandFilter *filter) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_CommandFilterArgsCount)(RedisModuleCommandFilterCtx *fctx) REDISMODULE_ATTR; +REDISMODULE_API const RedisModuleString * (*RedisModule_CommandFilterArgGet)(RedisModuleCommandFilterCtx *fctx, int pos) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_CommandFilterArgInsert)(RedisModuleCommandFilterCtx *fctx, int pos, RedisModuleString *arg) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_CommandFilterArgReplace)(RedisModuleCommandFilterCtx *fctx, int pos, RedisModuleString *arg) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_CommandFilterArgDelete)(RedisModuleCommandFilterCtx *fctx, int pos) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_Fork)(RedisModuleForkDoneHandler cb, void *user_data) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ExitFromChild)(int retcode) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_KillForkChild)(int child_pid) REDISMODULE_ATTR; +REDISMODULE_API float (*RedisModule_GetUsedMemoryRatio)() REDISMODULE_ATTR; +REDISMODULE_API size_t (*RedisModule_MallocSize)(void* ptr) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleUser * (*RedisModule_CreateModuleUser)(const char *name) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_FreeModuleUser)(RedisModuleUser *user) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_SetModuleUserACL)(RedisModuleUser *user, const char* acl) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_AuthenticateClientWithACLUser)(RedisModuleCtx *ctx, const char *name, size_t len, RedisModuleUserChangedFunc callback, void *privdata, uint64_t *client_id) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_AuthenticateClientWithUser)(RedisModuleCtx *ctx, RedisModuleUser *user, RedisModuleUserChangedFunc callback, void *privdata, uint64_t *client_id) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DeauthenticateAndCloseClient)(RedisModuleCtx *ctx, uint64_t client_id) REDISMODULE_ATTR; #endif #define RedisModule_IsAOFClient(id) ((id) == CLIENT_ID_AOF) /* This is included inline inside each Redis module. */ -static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int apiver) __attribute__((unused)); +static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int apiver) REDISMODULE_ATTR_UNUSED; static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int apiver) { void *getapifuncptr = ((void**)ctx)[0]; RedisModule_GetApi = (int (*)(const char *, void *)) (unsigned long)getapifuncptr; From edcc2032e4f42865c0ec41247a7c6bc24ef588af Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Fri, 21 Aug 2020 16:37:49 -0400 Subject: [PATCH 127/377] fix make warnings (#7692) (cherry picked from commit 7386b998e80affe8696b89b750ba86c9d8b9f453) --- src/server.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/server.c b/src/server.c index 5a986006e..a37fd137a 100644 --- a/src/server.c +++ b/src/server.c @@ -2543,9 +2543,6 @@ static void readOOMScoreAdj(void) { * depending on current role. */ int setOOMScoreAdj(int process_class) { - int fd; - int val; - char buf[64]; if (!server.oom_score_adj) return C_OK; if (process_class == -1) @@ -2554,6 +2551,10 @@ int setOOMScoreAdj(int process_class) { serverAssert(process_class >= 0 && process_class < CONFIG_OOM_COUNT); #ifdef HAVE_PROC_OOM_SCORE_ADJ + int fd; + int val; + char buf[64]; + val = server.oom_score_adj_base + server.oom_score_adj_values[process_class]; if (val > 1000) val = 1000; if (val < -1000) val = -1000; From a399ca9bf7b8e4eb5612efac2af9c92dd87e21f4 Mon Sep 17 00:00:00 2001 From: Wang Yuan Date: Tue, 25 Aug 2020 03:59:56 +0800 Subject: [PATCH 128/377] Fix wrong format specifiers of 'sdscatfmt' for the INFO command (#7706) unlike printf, sdscatfmt doesn't take %d (cherry picked from commit 48a00e6b99430d493ae8e4daa169f4a9ee9a8fa6) --- src/server.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server.c b/src/server.c index a37fd137a..f9154794b 100644 --- a/src/server.c +++ b/src/server.c @@ -4137,7 +4137,7 @@ sds genRedisInfoString(const char *section) { "lru_clock:%u\r\n" "executable:%s\r\n" "config_file:%s\r\n" - "io_threads_active:%d\r\n", + "io_threads_active:%i\r\n", REDIS_VERSION, redisGitSHA1(), strtol(redisGitDirty(),NULL,10) > 0, From ba1da77a3d7a7614fdb3162ca86d10d5051bf4e0 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Sun, 30 Aug 2020 12:23:47 +0300 Subject: [PATCH 129/377] Fix oom-score-adj on older distros. (#7724) Don't assume `ps` handles `-h` to display output without headers and manually trim headers line from output. (cherry picked from commit ae8420298cacc2737e8e3ffa3c5acc038cd27849) --- tests/unit/oom-score-adj.tcl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit/oom-score-adj.tcl b/tests/unit/oom-score-adj.tcl index 894a70fb2..993004602 100644 --- a/tests/unit/oom-score-adj.tcl +++ b/tests/unit/oom-score-adj.tcl @@ -16,8 +16,8 @@ if {$system_name eq {linux}} { proc get_child_pid {} { set pid [srv 0 pid] - set fd [open "|ps --ppid $pid -o pid -h" "r"] - set child_pid [string trim [read $fd]] + set fd [open "|ps --ppid $pid -o pid" "r"] + set child_pid [string trim [lindex [split [read $fd] \n] 1]] close $fd return $child_pid From 6c68ac1d4c6af7a099e0331391029a9260a1bb9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Leo=C5=A1=20Liter=C3=A1k?= Date: Mon, 31 Aug 2020 11:44:09 +0200 Subject: [PATCH 130/377] Update README.md with instructions how to build with systemd support (#7730) #7728 - update instructions for systemd support (cherry picked from commit 635d6ca6390ebab09bca3214777253910cb46547) --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 55537e01f..ca205f2a0 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,11 @@ libssl-dev on Debian/Ubuntu) and run: % make BUILD_TLS=yes +To build with systemd support, you'll need systemd development libraries (such +as libsystemd-dev on Debian/Ubuntu or systemd-devel on CentOS) and run: + + % make BUILD_WITH_SYSTEMD=yes USE_SYSTEMD=yes + You can run a 32 bit Redis binary using: % make 32bit From f2ab7ac5d7b819ad5b296815e30f4db57a43984b Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Mon, 31 Aug 2020 20:42:46 +0300 Subject: [PATCH 131/377] Backport Lua 5.2.2 stack overflow fix. (#7733) This fixes the issue described in CVE-2014-5461. At this time we cannot confirm that the original issue has a real impact on Redis, but it is included as an extra safety measure. (cherry picked from commit 374270d3a04e8b224a12655518c815497aeb497d) --- deps/lua/src/ldo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/lua/src/ldo.c b/deps/lua/src/ldo.c index 514f7a2a3..939940a4c 100644 --- a/deps/lua/src/ldo.c +++ b/deps/lua/src/ldo.c @@ -274,7 +274,7 @@ int luaD_precall (lua_State *L, StkId func, int nresults) { CallInfo *ci; StkId st, base; Proto *p = cl->p; - luaD_checkstack(L, p->maxstacksize); + luaD_checkstack(L, p->maxstacksize + p->numparams); func = restorestack(L, funcr); if (!p->is_vararg) { /* no varargs? */ base = func + 1; From 6041fc99b5d1584078229325a77b855940cf7749 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 9 Aug 2020 06:08:00 +0300 Subject: [PATCH 132/377] Reduce the probability of failure when start redis in runtest-cluster #7554 (#7635) When runtest-cluster, at first, we need to create a cluster use spawn_instance, a port which is not used is choosen, however sometimes we can't run server on the port. possibley due to a race with another process taking it first. such as redis/redis/runs/896537490. It may be due to the machine problem or In order to reduce the probability of failure when start redis in runtest-cluster, we attemp to use another port when find server do not start up. Co-authored-by: Oran Agra Co-authored-by: yanhui13 (cherry picked from commit 1deaad884c38e92e5b691f36b253ef4ee2201ca4) --- tests/instances.tcl | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/tests/instances.tcl b/tests/instances.tcl index 691378b9b..a43a4cc87 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -59,8 +59,6 @@ proc exec_instance {type cfgfile} { proc spawn_instance {type base_port count {conf {}}} { for {set j 0} {$j < $count} {incr j} { set port [find_available_port $base_port $::redis_port_count] - incr base_port - puts "Starting $type #$j at port $port" # Create a directory for this instance. set dirname "${type}_${j}" @@ -93,10 +91,30 @@ proc spawn_instance {type base_port count {conf {}}} { close $cfg # Finally exec it and remember the pid for later cleanup. - set pid [exec_instance $type $cfgfile] - lappend ::pids $pid + set retry 100 + while {$retry} { + set pid [exec_instance $type $cfgfile] - # Check availability + # Check availability + if {[server_is_up 127.0.0.1 $port 100] == 0} { + puts "Starting $type #$j at port $port failed, try another" + incr retry -1 + set port [find_available_port $base_port $::redis_port_count] + set cfg [open $cfgfile a+] + if {$::tls} { + puts $cfg "tls-port $port" + } else { + puts $cfg "port $port" + } + close $cfg + } else { + puts "Starting $type #$j at port $port" + lappend ::pids $pid + break + } + } + + # Check availability finally if {[server_is_up 127.0.0.1 $port 100] == 0} { set logfile [file join $dirname log.txt] puts [exec tail $logfile] From 6e7733c276c7708f8a470f0c62672b71f8216791 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Fri, 28 Aug 2020 11:29:53 +0300 Subject: [PATCH 133/377] Redis 6.0.7 --- 00-RELEASENOTES | 324 ++++++++++++++++++++++++++++++++++++++++++++++++ src/version.h | 2 +- 2 files changed, 325 insertions(+), 1 deletion(-) diff --git a/00-RELEASENOTES b/00-RELEASENOTES index 484aeb621..ce75e19c2 100644 --- a/00-RELEASENOTES +++ b/00-RELEASENOTES @@ -11,6 +11,330 @@ CRITICAL: There is a critical bug affecting MOST USERS. Upgrade ASAP. SECURITY: There are security fixes in the release. -------------------------------------------------------------------------------- +================================================================================ +Redis 6.0.7 Released Fri Aug 28 11:05:09 IDT 2020 +================================================================================ + +Upgrade urgency MODERATE: several bugs with moderate impact are fixed, +Specifically the first two listed below which cause protocol errors for clients. + +Bug fixes: + +* CONFIG SET could hung the client when arrives during RDB/ROF loading (When + processed after another command that was also rejected with -LOADING error) +* LPOS command when RANK is greater than matches responded wiht broken protocol + (negative multi-bulk count) +* UNLINK / Lazyfree for stream type key would have never do async freeing +* PERSIST should invalidate WATCH (Like EXPIRE does) +* EXEC with only read commands could have be rejected when OOM +* TLS: relax verification on CONFIG SET (Don't error if some configs are set + and tls isn't enabled) +* TLS: support cluster/replication without tls-port +* Systemd startup after network is online +* Redis-benchmark improvements +* Various small bug fixes + +New features: + +* Add oom-score-adj configuration option to control Linux OOM killer +* Show IO threads statistics and status in INFO output +* Add optional tls verification mode (see tls-auth-clients) + +Module API: + +* Add RedisModule_HoldString +* Add loaded keyspace event +* Fix RedisModuleEvent_LoadingProgress +* Fix RedisModuleEvent_MasterLinkChange hook missing on successful psync +* Fix missing RM_CLIENTINFO_FLAG_SSL +* Refactor redismodule.h for use with -fno-common / extern + +Full list of commits: + +Oran Agra in commit c26394e4f: + Reduce the probability of failure when start redis in runtest-cluster #7554 (#7635) + 1 file changed, 23 insertions(+), 5 deletions(-) + +Leoš Literák in commit 745d5e802: + Update README.md with instructions how to build with systemd support (#7730) + 1 file changed, 5 insertions(+) + +Yossi Gottlieb in commit 03f1d208a: + Fix oom-score-adj on older distros. (#7724) + 1 file changed, 2 insertions(+), 2 deletions(-) + +Yossi Gottlieb in commit 941174d9c: + Backport Lua 5.2.2 stack overflow fix. (#7733) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Wang Yuan in commit c897dba14: + Fix wrong format specifiers of 'sdscatfmt' for the INFO command (#7706) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Wen Hui in commit 5e3fab5e7: + fix make warnings (#7692) + 1 file changed, 4 insertions(+), 3 deletions(-) + +Nathan Scott in commit a2b09c13f: + Annotate module API functions in redismodule.h for use with -fno-common (#6900) + 1 file changed, 265 insertions(+), 241 deletions(-) + +Yossi Gottlieb in commit bf244273f: + Add oom-score-adj configuration option to control Linux OOM killer. (#1690) + 8 files changed, 306 insertions(+), 1 deletion(-) + +Meir Shpilraien (Spielrein) in commit b5a6ab98f: + see #7544, added RedisModule_HoldString api. (#7577) + 4 files changed, 83 insertions(+), 8 deletions(-) + +ShooterIT in commit ff04cf62b: + [Redis-benchmark] Remove zrem test, add zpopmin test + 1 file changed, 5 insertions(+), 5 deletions(-) + +ShooterIT in commit 0f3260f31: + [Redis-benchmark] Support zset type + 1 file changed, 16 insertions(+) + +Arun Ranganathan in commit 45d0b94fc: + Show threading configuration in INFO output (#7446) + 3 files changed, 46 insertions(+), 14 deletions(-) + +Meir Shpilraien (Spielrein) in commit a22f61e12: + This PR introduces a new loaded keyspace event (#7536) + 8 files changed, 135 insertions(+), 4 deletions(-) + +Oran Agra in commit 1c9ca1030: + Fix rejectCommand trims newline in shared error objects, hung clients (#7714) + 4 files changed, 42 insertions(+), 23 deletions(-) + +valentinogeron in commit 217471795: + EXEC with only read commands should not be rejected when OOM (#7696) + 2 files changed, 51 insertions(+), 8 deletions(-) + +Itamar Haber in commit 6e6c47d16: + Expands lazyfree's effort estimate to include Streams (#5794) + 1 file changed, 24 insertions(+) + +Yossi Gottlieb in commit da6813623: + Add language servers stuff, test/tls to gitignore. (#7698) + 1 file changed, 4 insertions(+) + +Valentino Geron in commit de7fb126e: + Assert that setDeferredAggregateLen isn't called with negative value + 1 file changed, 1 insertion(+) + +Valentino Geron in commit 6cf27f25f: + Fix LPOS command when RANK is greater than matches + 2 files changed, 9 insertions(+), 2 deletions(-) + +Yossi Gottlieb in commit 9bba54ace: + Tests: fix redis-cli with remote hosts. (#7693) + 3 files changed, 5 insertions(+), 5 deletions(-) + +huangzhw in commit 0fec2cb81: + RedisModuleEvent_LoadingProgress always at 100% progress (#7685) + 1 file changed, 2 insertions(+), 2 deletions(-) + +guybe7 in commit 931e19aa6: + Modules: Invalidate saved_oparray after use (#7688) + 1 file changed, 2 insertions(+) + +杨博东 in commit 6f2065570: + Fix flock cluster config may cause failure to restart after kill -9 (#7674) + 4 files changed, 31 insertions(+), 7 deletions(-) + +Raghav Muddur in commit 200149a2a: + Update clusterMsgDataPublish to clusterMsgModule (#7682) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Madelyn Olson in commit 72daa1b4e: + Fixed hset error since it's shared with hmset (#7678) + 1 file changed, 1 insertion(+), 1 deletion(-) + +guybe7 in commit 3bf9ac994: + PERSIST should signalModifiedKey (Like EXPIRE does) (#7671) + 1 file changed, 1 insertion(+) + +Oran Agra in commit b37501684: + OOM Crash log include size of allocation attempt. (#7670) + 1 file changed, 2 insertions(+), 1 deletion(-) + +Wen Hui in commit 2136cb68f: + [module] using predefined REDISMODULE_NO_EXPIRE in RM_GetExpire (#7669) + 1 file changed, 2 insertions(+), 1 deletion(-) + +Oran Agra in commit f56aee4bc: + Trim trailing spaces in error replies coming from rejectCommand (#7668) + 1 file changed, 5 insertions(+), 1 deletion(-) + +Yossi Gottlieb in commit 012d7506a: + Module API: fix missing RM_CLIENTINFO_FLAG_SSL. (#7666) + 6 files changed, 82 insertions(+), 1 deletion(-) + +Yossi Gottlieb in commit a0adbc857: + TLS: relax verification on CONFIG SET. (#7665) + 2 files changed, 24 insertions(+), 7 deletions(-) + +Madelyn Olson in commit 2ef29715b: + Fixed timer warning (#5953) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Wagner Francisco Mezaroba in commit b76f171f5: + allow --pattern to be used along with --bigkeys (#3586) + 1 file changed, 9 insertions(+), 2 deletions(-) + +zhaozhao.zz in commit cc7b57765: + redis-benchmark: fix wrong random key for hset (#4895) + 1 file changed, 1 insertion(+), 1 deletion(-) + +zhaozhao.zz in commit 479c1ba77: + CLIENT_MASTER should ignore server.proto_max_bulk_len + 1 file changed, 2 insertions(+), 1 deletion(-) + +zhaozhao.zz in commit f61ce8a52: + config: proto-max-bulk-len must be 1mb or greater + 2 files changed, 2 insertions(+), 2 deletions(-) + +zhaozhao.zz in commit 0350f597a: + using proto-max-bulk-len in checkStringLength for SETRANGE and APPEND + 1 file changed, 2 insertions(+), 2 deletions(-) + +YoongHM in commit eea63548d: + Start redis after network is online (#7639) + 1 file changed, 2 insertions(+) + +Yossi Gottlieb in commit aef6d74fb: + Run daily workflow on main repo only (no forks). (#7646) + 1 file changed, 7 insertions(+) + +WuYunlong in commit 917b4d241: + see #7250, fix signature of RedisModule_DeauthenticateAndCloseClient (#7645) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Wang Yuan in commit efab7fd54: + Print error info if failed opening config file (#6943) + 1 file changed, 2 insertions(+), 1 deletion(-) + +Wen Hui in commit 8c4468bcf: + fix memory leak in ACLLoadFromFile error handling (#7623) + 1 file changed, 1 insertion(+) + +Oran Agra in commit 89724e1d2: + redis-cli --cluster-yes - negate force flag for clarity + 1 file changed, 9 insertions(+), 9 deletions(-) + +Frank Meier in commit c813739af: + reintroduce REDISCLI_CLUSTER_YES env variable in redis-cli + 1 file changed, 6 insertions(+) + +Frank Meier in commit 7e3b86c18: + add force option to 'create-cluster create' script call (#7612) + 1 file changed, 6 insertions(+), 2 deletions(-) + +Oran Agra in commit 3f7fa4312: + fix new rdb test failing on timing issues (#7604) + 1 file changed, 2 insertions(+), 2 deletions(-) + +Yossi Gottlieb in commit 417976d7a: + Fix test-centos7-tls daily job. (#7598) + 1 file changed, 2 insertions(+), 2 deletions(-) + +Oran Agra in commit c41818c51: + module hook for master link up missing on successful psync (#7584) + 2 files changed, 22 insertions(+), 2 deletions(-) + +Yossi Gottlieb in commit 6ef3fc185: + CI: Add daily CentOS 7.x jobs. (#7582) + 1 file changed, 50 insertions(+), 4 deletions(-) + +WuYunlong in commit 002c37482: + Fix running single test 14-consistency-check.tcl (#7587) + 1 file changed, 1 insertion(+) + +Yossi Gottlieb in commit 66cbbb6ad: + Clarify RM_BlockClient() error condition. (#6093) + 1 file changed, 9 insertions(+) + +namtsui in commit 22aba2207: + Avoid an out-of-bounds read in the redis-sentinel (#7443) + 1 file changed, 2 insertions(+), 2 deletions(-) + +Wen Hui in commit af08887dc: + Add SignalModifiedKey hook in XGROUP CREATE with MKSTREAM option (#7562) + 1 file changed, 1 insertion(+) + +Wen Hui in commit a5e0a64b0: + fix leak in error handling of debug populate command (#7062) + 1 file changed, 3 insertions(+), 4 deletions(-) + +Yossi Gottlieb in commit cbfdfa231: + Fix TLS cluster tests. (#7578) + 1 file changed, 4 insertions(+), 1 deletion(-) + +Yossi Gottlieb in commit 6d5376d30: + TLS: Propagate and handle SSL_new() failures. (#7576) + 4 files changed, 48 insertions(+), 6 deletions(-) + +Oran Agra in commit a662cd577: + Fix failing tests due to issues with wait_for_log_message (#7572) + 3 files changed, 38 insertions(+), 34 deletions(-) + +Jiayuan Chen in commit 2786a4b5e: + Add optional tls verification (#7502) + 6 files changed, 40 insertions(+), 5 deletions(-) + +Oran Agra in commit 3ef3d3612: + Daily github action: run cluster and sentinel tests with tls (#7575) + 1 file changed, 2 insertions(+), 2 deletions(-) + +Yossi Gottlieb in commit f20f63322: + TLS: support cluster/replication without tls-port. + 2 files changed, 5 insertions(+), 4 deletions(-) + +grishaf in commit 3c9ae059d: + Fix prepareForShutdown function declaration (#7566) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Oran Agra in commit 3f4803af9: + Stabilize bgsave test that sometimes fails with valgrind (#7559) + 1 file changed, 20 insertions(+), 2 deletions(-) + +Madelyn Olson in commit 1a3c51a1f: + Properly reset errno for rdbLoad (#7542) + 1 file changed, 1 insertion(+) + +Oran Agra in commit 92d80b13a: + testsuite may leave servers alive on error (#7549) + 1 file changed, 3 insertions(+) + +Yossi Gottlieb in commit 245582ba7: + Tests: drop TCL 8.6 dependency. (#7548) + 1 file changed, 27 insertions(+), 22 deletions(-) + +Oran Agra in commit f20e1ba2d: + Fixes to release scripts (#7547) + 2 files changed, 2 insertions(+), 2 deletions(-) + +Remi Collet in commit 60ff56993: + Fix deprecated tail syntax in tests (#7543) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Wen Hui in commit 34e8541b9: + Add missing calls to raxStop (#7532) + 4 files changed, 63 insertions(+), 19 deletions(-) + +Wen Hui in commit 2f7bc5435: + add missing caching command in client help (#7399) + 1 file changed, 1 insertion(+) + +zhaozhao.zz in commit c15be9ffe: + replication: need handle -NOPERM error after send ping (#7538) + 1 file changed, 1 insertion(+) + +Scott Brenner in commit 1b29152c3: + GitHub Actions workflows - use latest version of actions/checkout (#7534) + 2 files changed, 10 insertions(+), 10 deletions(-) + ================================================================================ Redis 6.0.6 Released Mon Jul 20 09:31:30 IDT 2020 ================================================================================ diff --git a/src/version.h b/src/version.h index 9dfd8f274..71f998326 100644 --- a/src/version.h +++ b/src/version.h @@ -1 +1 @@ -#define REDIS_VERSION "6.0.6" +#define REDIS_VERSION "6.0.7" From 7aa207ad2c6fdd81ef3d7367b5baee9e87031949 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 1 Sep 2020 10:02:14 +0300 Subject: [PATCH 134/377] Fix double-make issue with make && make install. (#7734) All user-supplied variables that affect the build should be explicitly persisted. Fixes #7254 (cherry picked from commit d377b116bad2eab176fe5f5271302823da50c94c) --- src/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Makefile b/src/Makefile index 80c627c24..873797330 100644 --- a/src/Makefile +++ b/src/Makefile @@ -255,6 +255,8 @@ persist-settings: distclean echo WARN=$(WARN) >> .make-settings echo OPT=$(OPT) >> .make-settings echo MALLOC=$(MALLOC) >> .make-settings + echo BUILD_TLS=$(BUILD_TLS) >> .make-settings + echo USE_SYSTEMD=$(USE_SYSTEMD) >> .make-settings echo CFLAGS=$(CFLAGS) >> .make-settings echo LDFLAGS=$(LDFLAGS) >> .make-settings echo REDIS_CFLAGS=$(REDIS_CFLAGS) >> .make-settings From 9f60eb0a3e459106cd34231f50e978e7d638a11c Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 1 Sep 2020 21:31:37 +0300 Subject: [PATCH 135/377] fix README about BUILD_WITH_SYSTEMD usage (#7739) BUILD_WITH_SYSTEMD is an internal variable. Users should use USE_SYSTEMD=yes. (cherry picked from commit 9b61917d7f749d29cd37d2ad6f6563683b0b0bf6) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ca205f2a0..a90b95cc1 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ libssl-dev on Debian/Ubuntu) and run: To build with systemd support, you'll need systemd development libraries (such as libsystemd-dev on Debian/Ubuntu or systemd-devel on CentOS) and run: - % make BUILD_WITH_SYSTEMD=yes USE_SYSTEMD=yes + % make USE_SYSTEMD=yes You can run a 32 bit Redis binary using: From f8d0d902a5967eadbf594ff602c58cbdc8f20708 Mon Sep 17 00:00:00 2001 From: Thandayuthapani Date: Wed, 2 Sep 2020 18:53:49 +0530 Subject: [PATCH 136/377] Add masters/replicas options to redis-cli --cluster call command (#6491) * Add master/slave option in --cluster call command * Update src/redis-cli.c * Update src/redis-cli.c Co-authored-by: Itamar Haber (cherry picked from commit 535222063951cc1f3c2c8f78fd84fb9e0d8acefd) --- src/redis-cli.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 0b44ec252..6c8ce068a 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -125,6 +125,8 @@ #define CLUSTER_MANAGER_CMD_FLAG_COLOR 1 << 8 #define CLUSTER_MANAGER_CMD_FLAG_CHECK_OWNERS 1 << 9 #define CLUSTER_MANAGER_CMD_FLAG_FIX_WITH_UNREACHABLE_MASTERS 1 << 10 +#define CLUSTER_MANAGER_CMD_FLAG_MASTERS_ONLY 1 << 11 +#define CLUSTER_MANAGER_CMD_FLAG_SLAVES_ONLY 1 << 12 #define CLUSTER_MANAGER_OPT_GETFRIENDS 1 << 0 #define CLUSTER_MANAGER_OPT_COLD 1 << 1 @@ -1543,6 +1545,12 @@ static int parseOptions(int argc, char **argv) { i = j; } else if (!strcmp(argv[i],"--cluster") && lastarg) { usage(); + } else if ((!strcmp(argv[i],"--cluster-only-masters"))) { + config.cluster_manager_command.flags |= + CLUSTER_MANAGER_CMD_FLAG_MASTERS_ONLY; + } else if ((!strcmp(argv[i],"--cluster-only-replicas"))) { + config.cluster_manager_command.flags |= + CLUSTER_MANAGER_CMD_FLAG_SLAVES_ONLY; } else if (!strcmp(argv[i],"--cluster-replicas") && !lastarg) { config.cluster_manager_command.replicas = atoi(argv[++i]); } else if (!strcmp(argv[i],"--cluster-master-id") && !lastarg) { @@ -2320,7 +2328,7 @@ clusterManagerCommandDef clusterManagerCommands[] = { "new_host:new_port existing_host:existing_port", "slave,master-id "}, {"del-node", clusterManagerCommandDeleteNode, 2, "host:port node_id",NULL}, {"call", clusterManagerCommandCall, -2, - "host:port command arg arg .. arg", NULL}, + "host:port command arg arg .. arg", "only-masters,only-replicas"}, {"set-timeout", clusterManagerCommandSetTimeout, 2, "host:port milliseconds", NULL}, {"import", clusterManagerCommandImport, 1, "host:port", @@ -6423,6 +6431,10 @@ static int clusterManagerCommandCall(int argc, char **argv) { listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; + if ((config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_MASTERS_ONLY) + && (n->replicate != NULL)) continue; // continue if node is slave + if ((config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_SLAVES_ONLY) + && (n->replicate == NULL)) continue; // continue if node is master if (!n->context && !clusterManagerNodeConnect(n)) continue; redisReply *reply = NULL; redisAppendCommandArgv(n->context, argc, (const char **) argv, argvlen); @@ -8196,4 +8208,3 @@ int main(int argc, char **argv) { return noninteractive(argc,convertToSds(argc,argv)); } } - From 894016c396097da3d32a7148dfc9f42256b5145d Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Wed, 2 Sep 2020 17:18:09 +0300 Subject: [PATCH 137/377] Print server startup messages after daemonization (#7743) When redis isn't configured to have a log file, having these prints before damonization puts them in the calling process stdout rather than /dev/null (cherry picked from commit 0db61f564991ad483e2a2014738f25628584476b) --- src/server.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/server.c b/src/server.c index f9154794b..990b23b7d 100644 --- a/src/server.c +++ b/src/server.c @@ -5234,6 +5234,10 @@ int main(int argc, char **argv) { sdsfree(options); } + server.supervised = redisIsSupervised(server.supervised_mode); + int background = server.daemonize && !server.supervised; + if (background) daemonize(); + serverLog(LL_WARNING, "oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo"); serverLog(LL_WARNING, "Redis version=%s, bits=%d, commit=%s, modified=%d, pid=%d, just started", @@ -5249,11 +5253,7 @@ int main(int argc, char **argv) { serverLog(LL_WARNING, "Configuration loaded"); } - server.supervised = redisIsSupervised(server.supervised_mode); - int background = server.daemonize && !server.supervised; - if (background) daemonize(); readOOMScoreAdj(); - initServer(); if (background || server.pidfile) createPidFile(); redisSetProcTitle(argv[0]); From e5db52f19f968a52880de7f81626bfbe50b75480 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Thu, 3 Sep 2020 18:15:48 +0300 Subject: [PATCH 138/377] redis-cli: fix writeConn() buffer handling. (#7749) Fix issues with writeConn() which resulted with corruption of the stream by leaving an extra byte in the buffer. The trigger for this is partial writes or write errors which were not experienced on Linux but reported on macOS. (cherry picked from commit 94cd74e5deb18e0383bcad5c596c72980e5350b6) --- src/redis-cli.c | 43 +++++++++++++++++++++++++++++++++++++------ 1 file changed, 37 insertions(+), 6 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 6c8ce068a..ca949b8f0 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -6822,21 +6822,52 @@ static ssize_t writeConn(redisContext *c, const char *buf, size_t buf_len) { int done = 0; + /* Append data to buffer which is *usually* expected to be empty + * but we don't assume that, and write. + */ c->obuf = sdscatlen(c->obuf, buf, buf_len); if (redisBufferWrite(c, &done) == REDIS_ERR) { - sdsrange(c->obuf, 0, -(buf_len+1)); if (!(c->flags & REDIS_BLOCK)) errno = EAGAIN; + + /* On error, we assume nothing was written and we roll back the + * buffer to its original state. + */ + if (sdslen(c->obuf) > buf_len) + sdsrange(c->obuf, 0, -(buf_len+1)); + else + sdsclear(c->obuf); + return -1; } - size_t left = sdslen(c->obuf); - sdsclear(c->obuf); - if (!done) { - return buf_len - left; + /* If we're done, free up everything. We may have written more than + * buf_len (if c->obuf was not initially empty) but we don't have to + * tell. + */ + if (done) { + sdsclear(c->obuf); + return buf_len; } - return buf_len; + /* Write was successful but we have some leftovers which we should + * remove from the buffer. + * + * Do we still have data that was there prior to our buf? If so, + * restore buffer to it's original state and report no new data was + * writen. + */ + if (sdslen(c->obuf) > buf_len) { + sdsrange(c->obuf, 0, -(buf_len+1)); + return 0; + } + + /* At this point we're sure no prior data is left. We flush the buffer + * and report how much we've written. + */ + size_t left = sdslen(c->obuf); + sdsclear(c->obuf); + return buf_len - left; } /* Read raw bytes through a redisContext. The read operation is not greedy From 60bec0c20cad71d8ca24e65bfd5dbdf447bf7120 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 31 Aug 2020 10:23:09 +0300 Subject: [PATCH 139/377] test infra - write test name to logfile (cherry picked from commit e783c03dd1828fbf67259ee037a4faf835c4700a) --- tests/support/server.tcl | 14 ++++++++++++++ tests/support/test.tcl | 13 +++++++++++++ tests/support/util.tcl | 8 ++++++++ 3 files changed, 35 insertions(+) diff --git a/tests/support/server.tcl b/tests/support/server.tcl index 0afe89f7c..14e59e55c 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -301,6 +301,13 @@ proc start_server {options {code undefined}} { set stdout [format "%s/%s" [dict get $config "dir"] "stdout"] set stderr [format "%s/%s" [dict get $config "dir"] "stderr"] + # if we're inside a test, write the test name to the server log file + if {[info exists ::cur_test]} { + set fd [open $stdout "a+"] + puts $fd "### Starting server for test $::cur_test" + close $fd + } + # We need a loop here to retry with different ports. set server_started 0 while {$server_started == 0} { @@ -443,6 +450,13 @@ proc restart_server {level wait_ready} { set stderr [dict get $srv "stderr"] set config_file [dict get $srv "config_file"] + # if we're inside a test, write the test name to the server log file + if {[info exists ::cur_test]} { + set fd [open $stdout "a+"] + puts $fd "### Restarting server for test $::cur_test" + close $fd + } + set prev_ready_count [exec grep -i "Ready to accept" | wc -l < $stdout] set pid [spawn_server $config_file $stdout $stderr] diff --git a/tests/support/test.tcl b/tests/support/test.tcl index a5573f583..d266eba41 100644 --- a/tests/support/test.tcl +++ b/tests/support/test.tcl @@ -143,6 +143,18 @@ proc test {name code {okpattern undefined}} { set details {} lappend details "$name in $::curfile" + # set a cur_test global to be logged into new servers that are spown + # and log the test name in all existing servers + set ::cur_test "$name in $::curfile" + if {!$::external} { + foreach srv $::servers { + set stdout [dict get $srv stdout] + set fd [open $stdout "a+"] + puts $fd "### Starting test $::cur_test" + close $fd + } + } + send_data_packet $::test_server_fd testing $name if {[catch {set retval [uplevel 1 $code]} error]} { @@ -183,4 +195,5 @@ proc test {name code {okpattern undefined}} { send_data_packet $::test_server_fd err "Detected a memory leak in test '$name': $output" } } + unset ::cur_test } diff --git a/tests/support/util.tcl b/tests/support/util.tcl index 8340ad207..979eccdf9 100644 --- a/tests/support/util.tcl +++ b/tests/support/util.tcl @@ -138,6 +138,14 @@ proc wait_for_log_messages {srv_idx patterns from_line maxtries delay} { } } +# write line to server log file +proc write_log_line {srv_idx msg} { + set logfile [srv $srv_idx stdout] + set fd [open $logfile "a+"] + puts $fd "### $msg" + close $fd +} + # Random integer between 0 and max (excluded). proc randomInt {max} { expr {int(rand()*$max)} From 7d3cec9686eda74415803747538eaf9fb338bf03 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 31 Aug 2020 10:44:43 +0300 Subject: [PATCH 140/377] test infra - reduce disk space usage this is important when running a test with --loop (cherry picked from commit fc18f16260d15b3584d92f73cebafa3a552e2686) --- tests/integration/rdb.tcl | 8 ++++---- tests/support/server.tcl | 30 ++++++++++++++++++++++++++---- tests/unit/moduleapi/testrdb.tcl | 6 +++--- 3 files changed, 33 insertions(+), 11 deletions(-) diff --git a/tests/integration/rdb.tcl b/tests/integration/rdb.tcl index 9cd970350..58dc6c968 100644 --- a/tests/integration/rdb.tcl +++ b/tests/integration/rdb.tcl @@ -25,7 +25,7 @@ start_server [list overrides [list "dir" $server_path "dbfilename" "encodings.rd set server_path [tmpdir "server.rdb-startup-test"] -start_server [list overrides [list "dir" $server_path]] { +start_server [list overrides [list "dir" $server_path] keep_persistence true] { test {Server started empty with non-existing RDB file} { r debug digest } {0000000000000000000000000000000000000000} @@ -33,13 +33,13 @@ start_server [list overrides [list "dir" $server_path]] { r save } -start_server [list overrides [list "dir" $server_path]] { +start_server [list overrides [list "dir" $server_path] keep_persistence true] { test {Server started empty with empty RDB file} { r debug digest } {0000000000000000000000000000000000000000} } -start_server [list overrides [list "dir" $server_path]] { +start_server [list overrides [list "dir" $server_path] keep_persistence true] { test {Test RDB stream encoding} { for {set j 0} {$j < 1000} {incr j} { if {rand() < 0.9} { @@ -64,7 +64,7 @@ set defaults {} proc start_server_and_kill_it {overrides code} { upvar defaults defaults srv srv server_path server_path set config [concat $defaults $overrides] - set srv [start_server [list overrides $config]] + set srv [start_server [list overrides $config keep_persistence true]] uplevel 1 $code kill_server $srv } diff --git a/tests/support/server.tcl b/tests/support/server.tcl index 14e59e55c..6775b125a 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -31,6 +31,16 @@ proc check_valgrind_errors stderr { } } +proc clean_persistence config { + # we may wanna keep the logs for later, but let's clean the persistence + # files right away, since they can accumulate and take up a lot of space + set config [dict get $config "config"] + set rdb [format "%s/%s" [dict get $config "dir"] "dump.rdb"] + set aof [format "%s/%s" [dict get $config "dir"] "appendonly.aof"] + catch {exec rm -rf $rdb} + catch {exec rm -rf $aof} +} + proc kill_server config { # nothing to kill when running against external server if {$::external} return @@ -238,19 +248,27 @@ proc start_server {options {code undefined}} { set baseconfig "default.conf" set overrides {} set tags {} + set keep_persistence false # parse options foreach {option value} $options { switch $option { "config" { - set baseconfig $value } + set baseconfig $value + } "overrides" { - set overrides $value } + set overrides $value + } "tags" { set tags $value - set ::tags [concat $::tags $value] } + set ::tags [concat $::tags $value] + } + "keep_persistence" { + set keep_persistence $value + } default { - error "Unknown option $option" } + error "Unknown option $option" + } } } @@ -436,6 +454,10 @@ proc start_server {options {code undefined}} { set ::tags [lrange $::tags 0 end-[llength $tags]] kill_server $srv + if {!$keep_persistence} { + clean_persistence $srv + } + set _ "" } else { set ::tags [lrange $::tags 0 end-[llength $tags]] set _ $srv diff --git a/tests/unit/moduleapi/testrdb.tcl b/tests/unit/moduleapi/testrdb.tcl index 02c82c7c3..2298a73c5 100644 --- a/tests/unit/moduleapi/testrdb.tcl +++ b/tests/unit/moduleapi/testrdb.tcl @@ -12,7 +12,7 @@ tags "modules" { test {modules global are lost without aux} { set server_path [tmpdir "server.module-testrdb"] - start_server [list overrides [list loadmodule "$testmodule" "dir" $server_path]] { + start_server [list overrides [list loadmodule "$testmodule" "dir" $server_path] keep_persistence true] { r testrdb.set.before global1 assert_equal "global1" [r testrdb.get.before] } @@ -23,7 +23,7 @@ tags "modules" { test {modules are able to persist globals before and after} { set server_path [tmpdir "server.module-testrdb"] - start_server [list overrides [list loadmodule "$testmodule 2" "dir" $server_path]] { + start_server [list overrides [list loadmodule "$testmodule 2" "dir" $server_path] keep_persistence true] { r testrdb.set.before global1 r testrdb.set.after global2 assert_equal "global1" [r testrdb.get.before] @@ -38,7 +38,7 @@ tags "modules" { test {modules are able to persist globals just after} { set server_path [tmpdir "server.module-testrdb"] - start_server [list overrides [list loadmodule "$testmodule 1" "dir" $server_path]] { + start_server [list overrides [list loadmodule "$testmodule 1" "dir" $server_path] keep_persistence true] { r testrdb.set.after global2 assert_equal "global2" [r testrdb.get.after] } From 575d07b7a87eb39925e43f9db54ab0f503a8c6c5 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 31 Aug 2020 11:05:30 +0300 Subject: [PATCH 141/377] test infra - improve test skipping ability - skip full units - skip a single test (not just a list of tests) - when skipping tag, skip spinning up servers, not just the tests - skip tags when running against an external server too - allow using multiple tags (split them) (cherry picked from commit 5c61f1a6ed876186b944e79f903354cd81077bb6) --- tests/support/server.tcl | 69 ++++++++++++++++++++++++++++------------ tests/support/test.tcl | 11 +------ tests/test_helper.tcl | 47 +++++++++++++++++++++++---- 3 files changed, 91 insertions(+), 36 deletions(-) diff --git a/tests/support/server.tcl b/tests/support/server.tcl index 6775b125a..8ab8cf66a 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -160,7 +160,19 @@ proc server_is_up {host port retrynum} { # doesn't really belong here, but highly coupled to code in start_server proc tags {tags code} { + # If we 'tags' contain multiple tags, quoted and seperated by spaces, + # we want to get rid of the quotes in order to have a proper list + set tags [string map { \" "" } $tags] set ::tags [concat $::tags $tags] + # We skip unwanted tags + foreach tag $::denytags { + if {[lsearch $::tags $tag] >= 0} { + incr ::num_aborted + send_data_packet $::test_server_fd ignore "Tag: $tag" + set ::tags [lrange $::tags 0 end-[llength $tags]] + return + } + } uplevel 1 $code set ::tags [lrange $::tags 0 end-[llength $tags]] } @@ -226,24 +238,6 @@ proc wait_server_started {config_file stdout pid} { } proc start_server {options {code undefined}} { - # If we are running against an external server, we just push the - # host/port pair in the stack the first time - if {$::external} { - if {[llength $::servers] == 0} { - set srv {} - dict set srv "host" $::host - dict set srv "port" $::port - set client [redis $::host $::port 0 $::tls] - dict set srv "client" $client - $client select 9 - - # append the server to the stack - lappend ::servers $srv - } - uplevel 1 $code - return - } - # setup defaults set baseconfig "default.conf" set overrides {} @@ -260,8 +254,10 @@ proc start_server {options {code undefined}} { set overrides $value } "tags" { - set tags $value - set ::tags [concat $::tags $value] + # If we 'tags' contain multiple tags, quoted and seperated by spaces, + # we want to get rid of the quotes in order to have a proper list + set tags [string map { \" "" } $value] + set ::tags [concat $::tags $tags] } "keep_persistence" { set keep_persistence $value @@ -272,6 +268,39 @@ proc start_server {options {code undefined}} { } } + # We skip unwanted tags + foreach tag $::denytags { + if {[lsearch $::tags $tag] >= 0} { + incr ::num_aborted + send_data_packet $::test_server_fd ignore "Tag: $tag" + set ::tags [lrange $::tags 0 end-[llength $tags]] + return + } + } + + # If we are running against an external server, we just push the + # host/port pair in the stack the first time + if {$::external} { + if {[llength $::servers] == 0} { + set srv {} + dict set srv "host" $::host + dict set srv "port" $::port + set client [redis $::host $::port 0 $::tls] + dict set srv "client" $client + $client select 9 + + set config {} + dict set config "port" $::port + dict set srv "config" $config + + # append the server to the stack + lappend ::servers $srv + } + uplevel 1 $code + set ::tags [lrange $::tags 0 end-[llength $tags]] + return + } + set data [split [exec cat "tests/assets/$baseconfig"] "\n"] set config {} if {$::tls} { diff --git a/tests/support/test.tcl b/tests/support/test.tcl index d266eba41..773461abb 100644 --- a/tests/support/test.tcl +++ b/tests/support/test.tcl @@ -99,16 +99,7 @@ proc wait_for_condition {maxtries delay e _else_ elsescript} { } } -proc test {name code {okpattern undefined}} { - # abort if tagged with a tag to deny - foreach tag $::denytags { - if {[lsearch $::tags $tag] >= 0} { - incr ::num_aborted - send_data_packet $::test_server_fd ignore $name - return - } - } - +proc test {name code {okpattern undefined} {options undefined}} { # abort if test name in skiptests if {[lsearch $::skiptests $name] >= 0} { incr ::num_skipped diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index d0f962762..4a470ec30 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -85,6 +85,7 @@ set ::verbose 0 set ::quiet 0 set ::denytags {} set ::skiptests {} +set ::skipunits {} set ::allowtags {} set ::only_tests {} set ::single_tests {} @@ -423,6 +424,12 @@ proc lpop {listVar {count 1}} { set ele } +proc lremove {listVar value} { + upvar 1 $listVar var + set idx [lsearch -exact $var $value] + set var [lreplace $var $idx $idx] +} + # A new client is idle. Remove it from the list of active clients and # if there are still test units to run, launch them. proc signal_idle_client fd { @@ -521,11 +528,13 @@ proc print_help_screen {} { "--list-tests List all the available test units." "--only Just execute the specified test by test name. this option can be repeated." "--skip-till Skip all units until (and including) the specified one." + "--skipunit Skip one unit." "--clients Number of test clients (default 16)." "--timeout Test timeout in seconds (default 10 min)." "--force-failure Force the execution of a test that always fails." "--config Extra config file argument." "--skipfile Name of a file containing test names that should be skipped (one per line)." + "--skiptest Name of a file containing test names that should be skipped (one per line)." "--dont-clean Don't delete redis log files after the run." "--stop Blocks once the first test fails." "--loop Execute the specified set of tests forever." @@ -563,6 +572,9 @@ for {set j 0} {$j < [llength $argv]} {incr j} { set file_data [read $fp] close $fp set ::skiptests [split $file_data "\n"] + } elseif {$opt eq {--skiptest}} { + lappend ::skiptests $arg + incr j } elseif {$opt eq {--valgrind}} { set ::valgrind 1 } elseif {$opt eq {--stack-logging}} { @@ -601,6 +613,9 @@ for {set j 0} {$j < [llength $argv]} {incr j} { } elseif {$opt eq {--only}} { lappend ::only_tests $arg incr j + } elseif {$opt eq {--skipunit}} { + lappend ::skipunits $arg + incr j } elseif {$opt eq {--skip-till}} { set ::skip_till $arg incr j @@ -638,13 +653,23 @@ for {set j 0} {$j < [llength $argv]} {incr j} { } } -# If --skil-till option was given, we populate the list of single tests +set filtered_tests {} + +# Set the filtered tests to be the short list (single_tests) if exists. +# Otherwise, we start filtering all_tests +if {[llength $::single_tests] > 0} { + set filtered_tests $::single_tests +} else { + set filtered_tests $::all_tests +} + +# If --skip-till option was given, we populate the list of single tests # to run with everything *after* the specified unit. if {$::skip_till != ""} { set skipping 1 foreach t $::all_tests { - if {$skipping == 0} { - lappend ::single_tests $t + if {$skipping == 1} { + lremove filtered_tests $t } if {$t == $::skip_till} { set skipping 0 @@ -656,10 +681,20 @@ if {$::skip_till != ""} { } } +# If --skipunits option was given, we populate the list of single tests +# to run with everything *not* in the skipunits list. +if {[llength $::skipunits] > 0} { + foreach t $::all_tests { + if {[lsearch $::skipunits $t] != -1} { + lremove filtered_tests $t + } + } +} + # Override the list of tests with the specific tests we want to run -# in case there was some filter, that is --single or --skip-till options. -if {[llength $::single_tests] > 0} { - set ::all_tests $::single_tests +# in case there was some filter, that is --single, -skipunit or --skip-till options. +if {[llength $filtered_tests] < [llength $::all_tests]} { + set ::all_tests $filtered_tests } proc attach_to_replication_stream {} { From f180326b65e5981755581ac2b2a71db6eb401f4c Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 31 Aug 2020 11:16:06 +0300 Subject: [PATCH 142/377] test infra - flushall between tests in external mode (cherry picked from commit 2468c17a3229ae37825466a18dce9a5272eeef30) --- tests/support/server.tcl | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/support/server.tcl b/tests/support/server.tcl index 8ab8cf66a..9894e6f2a 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -296,6 +296,7 @@ proc start_server {options {code undefined}} { # append the server to the stack lappend ::servers $srv } + r flushall uplevel 1 $code set ::tags [lrange $::tags 0 end-[llength $tags]] return From e001152825e4ef50584854a9d6bdd2b236e9f504 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 31 Aug 2020 11:20:02 +0300 Subject: [PATCH 143/377] test infra - wait_done_loading reduce code duplication in aof.tcl. move creation of clients into the test so that it can be skipped (cherry picked from commit cc455a710cc68d0fd8243cd1f04c5ee7332e4fdb) --- tests/integration/aof.tcl | 44 +++++++-------------------------------- tests/support/util.tcl | 8 +++++++ 2 files changed, 16 insertions(+), 36 deletions(-) diff --git a/tests/integration/aof.tcl b/tests/integration/aof.tcl index b82c87d71..d81521374 100644 --- a/tests/integration/aof.tcl +++ b/tests/integration/aof.tcl @@ -52,15 +52,9 @@ tags {"aof"} { assert_equal 1 [is_alive $srv] } - set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls] - - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } - test "Truncated AOF loaded: we expect foo to be equal to 5" { + set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls] + wait_done_loading $client assert {[$client get foo] eq "5"} } @@ -75,15 +69,9 @@ tags {"aof"} { assert_equal 1 [is_alive $srv] } - set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls] - - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } - test "Truncated AOF loaded: we expect foo to be equal to 6 now" { + set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls] + wait_done_loading $client assert {[$client get foo] eq "6"} } } @@ -183,11 +171,7 @@ tags {"aof"} { test "Fixed AOF: Keyspace should contain values that were parseable" { set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls] - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } + wait_done_loading $client assert_equal "hello" [$client get foo] assert_equal "" [$client get bar] } @@ -207,11 +191,7 @@ tags {"aof"} { test "AOF+SPOP: Set should have 1 member" { set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls] - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } + wait_done_loading $client assert_equal 1 [$client scard set] } } @@ -231,11 +211,7 @@ tags {"aof"} { test "AOF+SPOP: Set should have 1 member" { set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls] - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } + wait_done_loading $client assert_equal 1 [$client scard set] } } @@ -254,11 +230,7 @@ tags {"aof"} { test "AOF+EXPIRE: List should be empty" { set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls] - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } + wait_done_loading $client assert_equal 0 [$client llen list] } } diff --git a/tests/support/util.tcl b/tests/support/util.tcl index 979eccdf9..c698c255f 100644 --- a/tests/support/util.tcl +++ b/tests/support/util.tcl @@ -99,6 +99,14 @@ proc wait_for_ofs_sync {r1 r2} { } } +proc wait_done_loading r { + wait_for_condition 50 100 { + [catch {$r ping} e] == 0 + } else { + fail "Loading DB is taking too much time." + } +} + # count current log lines in server's stdout proc count_log_lines {srv_idx} { set _ [exec wc -l < [srv $srv_idx stdout]] From 81476c0cf7e9a60aed45f01cc2c2230972acff36 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 31 Aug 2020 11:24:17 +0300 Subject: [PATCH 144/377] test infra - add durable mode to work around test suite crashing in some cases a command that returns an error possibly due to a timing issue causes the tcl code to crash and thus prevents the rest of the tests from running. this adds an option to make the test proceed despite the crash. maybe it should be the default mode some day. (cherry picked from commit cf22e8eb91c2c1a769fda4c4de9eba3163dd7f05) --- tests/support/server.tcl | 28 ++++++++++++++++++++++++++-- tests/support/test.tcl | 6 +++++- tests/test_helper.tcl | 4 ++++ 3 files changed, 35 insertions(+), 3 deletions(-) diff --git a/tests/support/server.tcl b/tests/support/server.tcl index 9894e6f2a..f2f6ceece 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -297,7 +297,20 @@ proc start_server {options {code undefined}} { lappend ::servers $srv } r flushall - uplevel 1 $code + if {[catch {set retval [uplevel 1 $code]} error]} { + if {$::durable} { + set msg [string range $error 10 end] + lappend details $msg + lappend details $::errorInfo + lappend ::tests_failed $details + + incr ::num_failed + send_data_packet $::test_server_fd err [join $details "\n"] + } else { + # Re-raise, let handler up the stack take care of this. + error $error $::errorInfo + } + } set ::tags [lrange $::tags 0 end-[llength $tags]] return } @@ -468,7 +481,18 @@ proc start_server {options {code undefined}} { } puts "" - error $error $backtrace + if {$::durable} { + set msg [string range $error 10 end] + lappend details $msg + lappend details $backtrace + lappend ::tests_failed $details + + incr ::num_failed + send_data_packet $::test_server_fd err [join $details "\n"] + } else { + # Re-raise, let handler up the stack take care of this. + error $error $backtrace + } } # fetch srv back from the server list, in case it was restarted by restart_server (new PID) diff --git a/tests/support/test.tcl b/tests/support/test.tcl index 773461abb..f5b4c8bef 100644 --- a/tests/support/test.tcl +++ b/tests/support/test.tcl @@ -149,9 +149,13 @@ proc test {name code {okpattern undefined} {options undefined}} { send_data_packet $::test_server_fd testing $name if {[catch {set retval [uplevel 1 $code]} error]} { - if {[string match "assertion:*" $error]} { + set assertion [string match "assertion:*" $error] + if {$assertion || $::durable} { set msg [string range $error 10 end] lappend details $msg + if {!$assertion} { + lappend details $::errorInfo + } lappend ::tests_failed $details incr ::num_failed diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index 4a470ec30..fe2d484b8 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -79,6 +79,7 @@ set ::baseport 21111; # initial port for spawned redis servers set ::portcount 8000; # we don't wanna use more than 10000 to avoid collision with cluster bus ports set ::traceleaks 0 set ::valgrind 0 +set ::durable 0 set ::tls 0 set ::stack_logging 0 set ::verbose 0 @@ -521,6 +522,7 @@ proc send_data_packet {fd status data} { proc print_help_screen {} { puts [join { "--valgrind Run the test over valgrind." + "--durable suppress test crashes and keep running" "--stack-logging Enable OSX leaks/malloc stack logging." "--accurate Run slow randomized tests for more iterations." "--quiet Don't show individual tests." @@ -633,6 +635,8 @@ for {set j 0} {$j < [llength $argv]} {incr j} { } elseif {$opt eq {--clients}} { set ::numclients $arg incr j + } elseif {$opt eq {--durable}} { + set ::durable 1 } elseif {$opt eq {--dont-clean}} { set ::dont_clean 1 } elseif {$opt eq {--wait-server}} { From 540841d6f76dfdba588865caedce526fdc810d7b Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 6 Sep 2020 11:11:49 +0300 Subject: [PATCH 145/377] Improve valgrind support for cluster tests (#7725) - redirect valgrind reports to a dedicated file rather than console - try to avoid killing instances with SIGKILL so that we get the memory leak report (killing with SIGTERM before resorting to SIGKILL) - search for valgrind reports when done, print them and fail the tests - add --dont-clean option to keep the logs on exit - fix exit error code when crash is found (would have exited with 0) changes that affect the normal redis test suite: - refactor check_valgrind_errors into two functions one to search and one to report - move the search half into util.tcl to serve the cluster tests too - ignore "address range perms" valgrind warnings which seem non relevant. (cherry picked from commit da723a917dec7f2514d821a615668e158bb4f60c) --- tests/instances.tcl | 65 +++++++++++++++++++++++++++++++++++----- tests/support/server.tcl | 18 ++--------- tests/support/util.tcl | 23 ++++++++++++++ 3 files changed, 83 insertions(+), 23 deletions(-) diff --git a/tests/instances.tcl b/tests/instances.tcl index a43a4cc87..2029bc5f5 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -19,6 +19,7 @@ set ::verbose 0 set ::valgrind 0 set ::tls 0 set ::pause_on_error 0 +set ::dont_clean 0 set ::simulate_error 0 set ::failed 0 set ::sentinel_instances {} @@ -38,7 +39,7 @@ if {[catch {cd tmp}]} { # Execute the specified instance of the server specified by 'type', using # the provided configuration file. Returns the PID of the process. -proc exec_instance {type cfgfile} { +proc exec_instance {type dirname cfgfile} { if {$type eq "redis"} { set prgname redis-server } elseif {$type eq "sentinel"} { @@ -47,8 +48,9 @@ proc exec_instance {type cfgfile} { error "Unknown instance type." } + set errfile [file join $dirname err.txt] if {$::valgrind} { - set pid [exec valgrind --track-origins=yes --suppressions=../../../src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full ../../../src/${prgname} $cfgfile &] + set pid [exec valgrind --track-origins=yes --suppressions=../../../src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full ../../../src/${prgname} $cfgfile 2>> $errfile &] } else { set pid [exec ../../../src/${prgname} $cfgfile &] } @@ -93,7 +95,7 @@ proc spawn_instance {type base_port count {conf {}}} { # Finally exec it and remember the pid for later cleanup. set retry 100 while {$retry} { - set pid [exec_instance $type $cfgfile] + set pid [exec_instance $type $dirname $cfgfile] # Check availability if {[server_is_up 127.0.0.1 $port 100] == 0} { @@ -144,16 +146,60 @@ proc log_crashes {} { puts "\n*** Crash report found in $log ***" set found 1 } - if {$found} {puts $line} + if {$found} { + puts $line + incr ::failed + } } } + + set logs [glob */err.txt] + foreach log $logs { + set res [find_valgrind_errors $log] + if {$res != ""} { + puts $res + incr ::failed + } + } +} + +proc is_alive pid { + if {[catch {exec ps -p $pid} err]} { + return 0 + } else { + return 1 + } +} + +proc stop_instance pid { + catch {exec kill $pid} + if {$::valgrind} { + set max_wait 60000 + } else { + set max_wait 10000 + } + while {[is_alive $pid]} { + incr wait 10 + + if {$wait >= $max_wait} { + puts "Forcing process $pid to exit..." + catch {exec kill -KILL $pid} + } elseif {$wait % 1000 == 0} { + puts "Waiting for process $pid to exit..." + } + after 10 + } } proc cleanup {} { puts "Cleaning up..." - log_crashes foreach pid $::pids { - catch {exec kill -9 $pid} + puts "killing stale instance $pid" + stop_instance $pid + } + log_crashes + if {$::dont_clean} { + return } foreach dir $::dirs { catch {exec rm -rf $dir} @@ -178,6 +224,8 @@ proc parse_options {} { set ::run_matching "*${val}*" } elseif {$opt eq "--pause-on-error"} { set ::pause_on_error 1 + } elseif {$opt eq {--dont-clean}} { + set ::dont_clean 1 } elseif {$opt eq "--fail"} { set ::simulate_error 1 } elseif {$opt eq {--valgrind}} { @@ -191,6 +239,7 @@ proc parse_options {} { set ::tls 1 } elseif {$opt eq "--help"} { puts "--single Only runs tests specified by pattern." + puts "--dont-clean Keep log files on exit." puts "--pause-on-error Pause for manual inspection on error." puts "--fail Simulate a test failure." puts "--valgrind Run with valgrind." @@ -486,7 +535,7 @@ proc kill_instance {type id} { error "You tried to kill $type $id twice." } - exec kill -9 $pid + stop_instance $pid set_instance_attrib $type $id pid -1 set_instance_attrib $type $id link you_tried_to_talk_with_killed_instance @@ -521,7 +570,7 @@ proc restart_instance {type id} { # Execute the instance with its old setup and append the new pid # file for cleanup. - set pid [exec_instance $type $cfgfile] + set pid [exec_instance $type $dirname $cfgfile] set_instance_attrib $type $id pid $pid lappend ::pids $pid diff --git a/tests/support/server.tcl b/tests/support/server.tcl index f2f6ceece..f74d839ad 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -13,21 +13,9 @@ proc start_server_error {config_file error} { } proc check_valgrind_errors stderr { - set fd [open $stderr] - set buf [read $fd] - close $fd - - # look for stack trace and other errors, or the absense of a leak free summary - if {[regexp -- { at 0x} $buf] || - [regexp -- {Warning} $buf] || - [regexp -- {Invalid} $buf] || - [regexp -- {Mismatched} $buf] || - [regexp -- {uninitialized} $buf] || - [regexp -- {has a fishy} $buf] || - [regexp -- {overlap} $buf] || - (![regexp -- {definitely lost: 0 bytes} $buf] && - ![regexp -- {no leaks are possible} $buf])} { - send_data_packet $::test_server_fd err "Valgrind error: $buf\n" + set res [find_valgrind_errors $stderr] + if {$res != ""} { + send_data_packet $::test_server_fd err "Valgrind error: $res\n" } } diff --git a/tests/support/util.tcl b/tests/support/util.tcl index c698c255f..ecf9f230f 100644 --- a/tests/support/util.tcl +++ b/tests/support/util.tcl @@ -432,6 +432,29 @@ proc colorstr {color str} { } } +proc find_valgrind_errors {stderr} { + set fd [open $stderr] + set buf [read $fd] + close $fd + + # Look for stack trace (" at 0x") and other errors (Invalid, Mismatched, etc). + # Look for "Warnings", but not the "set address range perms". These don't indicate any real concern. + # Look for the absense of a leak free summary (happens when redis isn't terminated properly). + if {[regexp -- { at 0x} $buf] || + [regexp -- {^(?=.*Warning)(?:(?!set address range perms).)*$} $buf] || + [regexp -- {Invalid} $buf] || + [regexp -- {Mismatched} $buf] || + [regexp -- {uninitialized} $buf] || + [regexp -- {has a fishy} $buf] || + [regexp -- {overlap} $buf] || + (![regexp -- {definitely lost: 0 bytes} $buf] && + ![regexp -- {no leaks are possible} $buf])} { + return $buf + } + + return "" +} + # Execute a background process writing random data for the specified number # of seconds to the specified Redis instance. proc start_write_load {host port seconds} { From d37b03432179acdfe42384174d111e19442de1bf Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 7 Sep 2020 16:26:11 +0300 Subject: [PATCH 146/377] fix broken cluster/sentinel tests by recent commit (#7752) da723a917 added a file for stderr to keep valgrind log but i forgot to add a similar thing when valgrind isn't being used. the result is that `glob */err.txt` fails. (cherry picked from commit 470de9a516b0dcb92acb8cf2841ddac604bcbd3a) --- tests/instances.tcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/instances.tcl b/tests/instances.tcl index 2029bc5f5..82c35854b 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -52,7 +52,7 @@ proc exec_instance {type dirname cfgfile} { if {$::valgrind} { set pid [exec valgrind --track-origins=yes --suppressions=../../../src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full ../../../src/${prgname} $cfgfile 2>> $errfile &] } else { - set pid [exec ../../../src/${prgname} $cfgfile &] + set pid [exec ../../../src/${prgname} $cfgfile 2>> $errfile &] } return $pid } From 9275c8b990530cf8e78d2ed687a635182b65c9c6 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Mon, 7 Sep 2020 17:30:36 +0300 Subject: [PATCH 147/377] Tests: fix unmonitored servers. (#7756) There is an inherent race condition in port allocation for spawned servers. If a server fails to start because a port is taken, a new port is allocated. This fixes a problem where the logs are not truncated and as a result a large number of unmonitored servers are started. (cherry picked from commit 871e85b8a75a53f90044ac04b0f5a9ba415c3bfa) --- tests/support/server.tcl | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/support/server.tcl b/tests/support/server.tcl index f74d839ad..30d0d4045 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -383,6 +383,11 @@ proc start_server {options {code undefined}} { dict set config port $port } create_server_config_file $config_file $config + + # Truncate log so wait_server_started will not be looking at + # output of the failed server. + close [open $stdout "w"] + continue; # Try again } From 95966ceb24962538b74a7d1a982817cbe36558ec Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 7 Sep 2020 18:06:25 +0300 Subject: [PATCH 148/377] Fix cluster consistency-check test (#7754) This test was failing from time to time see discussion at the bottom of #7635 This was probably due to timing, the DEBUG SLEEP executed by redis-cli didn't sleep for enough time. This commit changes: 1) use SET-ACTIVE-EXPIRE instead of DEBUG SLEEP 2) reduce many `after` sleeps with retry loops to speed up the test. 3) add many comment explaining the different steps of the test and it's purpose. 4) config appendonly before populating the volatile keys, so that they'll be part of the AOF command stream rather than the preamble RDB portion. other complications: recently kill_instance switched from SIGKILL to SIGTERM, and this would sometimes fail since there was an AOFRW running in the background. now we wait for it to end before attempting the kill. (cherry picked from commit 541d2709a0bd1a7f88681afa001c714b19df5dc1) --- tests/cluster/tests/14-consistency-check.tcl | 86 +++++++++++++------- 1 file changed, 56 insertions(+), 30 deletions(-) diff --git a/tests/cluster/tests/14-consistency-check.tcl b/tests/cluster/tests/14-consistency-check.tcl index 5a80dd0df..ddc0570e6 100644 --- a/tests/cluster/tests/14-consistency-check.tcl +++ b/tests/cluster/tests/14-consistency-check.tcl @@ -39,53 +39,79 @@ proc cluster_write_keys_with_expire {id ttl} { $cluster close } +# make sure that replica who restarts from persistence will load keys +# that have already expired, critical for correct execution of commands +# that arrive from the master proc test_slave_load_expired_keys {aof} { test "Slave expired keys is loaded when restarted: appendonly=$aof" { set master_id [find_non_empty_master] set replica_id [get_one_of_my_replica $master_id] - set master_dbsize [R $master_id dbsize] - set slave_dbsize [R $replica_id dbsize] - assert_equal $master_dbsize $slave_dbsize - - set data_ttl 5 - cluster_write_keys_with_expire $master_id $data_ttl - after 100 - set replica_dbsize_1 [R $replica_id dbsize] - assert {$replica_dbsize_1 > $slave_dbsize} + set master_dbsize_0 [R $master_id dbsize] + set replica_dbsize_0 [R $replica_id dbsize] + assert_equal $master_dbsize_0 $replica_dbsize_0 + # config the replica persistency and rewrite the config file to survive restart + # note that this needs to be done before populating the volatile keys since + # that triggers and AOFRW, and we rather the AOF file to have SETEX commands + # rather than an RDB with volatile keys R $replica_id config set appendonly $aof R $replica_id config rewrite - set start_time [clock seconds] - set end_time [expr $start_time+$data_ttl+2] - R $replica_id save - set replica_dbsize_2 [R $replica_id dbsize] - assert {$replica_dbsize_2 > $slave_dbsize} + # fill with 100 keys with 3 second TTL + set data_ttl 3 + cluster_write_keys_with_expire $master_id $data_ttl + + # wait for replica to be in sync with master + wait_for_condition 500 10 { + [R $replica_id dbsize] eq [R $master_id dbsize] + } else { + fail "replica didn't sync" + } + + set replica_dbsize_1 [R $replica_id dbsize] + assert {$replica_dbsize_1 > $replica_dbsize_0} + + # make replica create persistence file + if {$aof == "yes"} { + # we need to wait for the initial AOFRW to be done, otherwise + # kill_instance (which now uses SIGTERM will fail ("Writing initial AOF, can't exit") + wait_for_condition 100 10 { + [RI $replica_id aof_rewrite_in_progress] eq 0 + } else { + fail "keys didn't expire" + } + } else { + R $replica_id save + } + + # kill the replica (would stay down until re-started) kill_instance redis $replica_id - set master_port [get_instance_attrib redis $master_id port] - exec ../../../src/redis-cli \ - -h 127.0.0.1 -p $master_port \ - {*}[rediscli_tls_config "../../../tests"] \ - debug sleep [expr $data_ttl+3] > /dev/null & + # Make sure the master doesn't do active expire (sending DELs to the replica) + R $master_id DEBUG SET-ACTIVE-EXPIRE 0 - while {[clock seconds] <= $end_time} { - #wait for $data_ttl seconds - } + # wait for all the keys to get logically expired + after [expr $data_ttl*1000] + + # start the replica again (loading an RDB or AOF file) restart_instance redis $replica_id - wait_for_condition 200 50 { - [R $replica_id ping] eq {PONG} - } else { - fail "replica #$replica_id not started" - } - + # make sure the keys are still there set replica_dbsize_3 [R $replica_id dbsize] - assert {$replica_dbsize_3 > $slave_dbsize} + assert {$replica_dbsize_3 > $replica_dbsize_0} + + # restore settings + R $master_id DEBUG SET-ACTIVE-EXPIRE 1 + + # wait for the master to expire all keys and replica to get the DELs + wait_for_condition 500 10 { + [R $replica_id dbsize] eq $master_dbsize_0 + } else { + fail "keys didn't expire" + } } } test_slave_load_expired_keys no -after 5000 test_slave_load_expired_keys yes From cb4f96657b1d50338d274ef0efea22ca8acf4588 Mon Sep 17 00:00:00 2001 From: "bodong.ybd" Date: Tue, 8 Sep 2020 10:45:03 +0800 Subject: [PATCH 149/377] Tests: Some fixes for macOS 1) cur_test: when restart_server, "no such variable" error occurs ./runtest --single integration/rdb test {client freed during loading} SET ::cur_test restart_server kill_server test "Check for memory leaks (pid $pid)" SET ::cur_test UNSET ::cur_test UNSET ::cur_test // This global variable has been unset. 2) `ps --ppid` not available on macOS platform, can be replaced with `pgrep -P pid`. (cherry picked from commit e90385e2232d41fd7c40dc239279f9837e7bdf57) --- tests/support/test.tcl | 1 - tests/support/util.tcl | 25 +++++++++++++++++++++++++ tests/unit/oom-score-adj.tcl | 11 +---------- 3 files changed, 26 insertions(+), 11 deletions(-) diff --git a/tests/support/test.tcl b/tests/support/test.tcl index f5b4c8bef..54d323fa2 100644 --- a/tests/support/test.tcl +++ b/tests/support/test.tcl @@ -190,5 +190,4 @@ proc test {name code {okpattern undefined} {options undefined}} { send_data_packet $::test_server_fd err "Detected a memory leak in test '$name': $output" } } - unset ::cur_test } diff --git a/tests/support/util.tcl b/tests/support/util.tcl index ecf9f230f..b9a65358f 100644 --- a/tests/support/util.tcl +++ b/tests/support/util.tcl @@ -494,3 +494,28 @@ proc start_bg_complex_data {host port db ops} { proc stop_bg_complex_data {handle} { catch {exec /bin/kill -9 $handle} } + +proc populate {num prefix size} { + set rd [redis_deferring_client] + for {set j 0} {$j < $num} {incr j} { + $rd set $prefix$j [string repeat A $size] + } + for {set j 0} {$j < $num} {incr j} { + $rd read + } + $rd close +} + +proc get_child_pid {idx} { + set pid [srv $idx pid] + if {[string match {*Darwin*} [exec uname -a]]} { + set fd [open "|pgrep -P $pid" "r"] + set child_pid [string trim [lindex [split [read $fd] \n] 0]] + } else { + set fd [open "|ps --ppid $pid -o pid" "r"] + set child_pid [string trim [lindex [split [read $fd] \n] 1]] + } + close $fd + + return $child_pid +} diff --git a/tests/unit/oom-score-adj.tcl b/tests/unit/oom-score-adj.tcl index 993004602..8eb09a993 100644 --- a/tests/unit/oom-score-adj.tcl +++ b/tests/unit/oom-score-adj.tcl @@ -14,15 +14,6 @@ if {$system_name eq {linux}} { return $val } - proc get_child_pid {} { - set pid [srv 0 pid] - set fd [open "|ps --ppid $pid -o pid" "r"] - set child_pid [string trim [lindex [split [read $fd] \n] 1]] - close $fd - - return $child_pid - } - test {CONFIG SET oom-score-adj works as expected} { set base [get_oom_score_adj] @@ -47,7 +38,7 @@ if {$system_name eq {linux}} { r config set rdb-key-save-delay 100000 r bgsave - set child_pid [get_child_pid] + set child_pid [get_child_pid 0] assert {[get_oom_score_adj $child_pid] == [expr $base + 30]} } From 25efadabbe4d650110e0d27e4c050c35325776f0 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 8 Sep 2020 10:59:25 +0300 Subject: [PATCH 150/377] Add daily CI for MacOS (#7759) (cherry picked from commit 1701f23b1f8fa4a0f4808e450c50b779674a3e42) --- .github/workflows/daily.yml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 5b395351b..07cd55c87 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -168,3 +168,21 @@ jobs: ./runtest-cluster --tls ./runtest-cluster + test-macos-latest: + runs-on: macos-latest + if: github.repository == 'redis/redis' + timeout-minutes: 14400 + steps: + - uses: actions/checkout@v2 + - name: make + run: make + - name: test + run: | + ./runtest --accurate --verbose + - name: module api test + run: ./runtest-moduleapi --verbose + - name: sentinel tests + run: ./runtest-sentinel + - name: cluster tests + run: ./runtest-cluster + From 826b49bcb49db2207bf6eff49d55f8f16a1e5224 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 8 Sep 2020 14:12:03 +0300 Subject: [PATCH 151/377] handle cur_test for nested tests if there are nested tests and nested servers, we need to restore the previous value of cur_test when a test exist. example: ``` test{test 1} { start_server { test{test 1.1 - master only} { } start_server { test{test 1.2 - with replication} { } } } } ``` when `test 1.1 - master only exists`, we're still inside `test 1` (cherry picked from commit 610b4ff16a62062338588c4508a73784fb962c0b) --- tests/support/test.tcl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/support/test.tcl b/tests/support/test.tcl index 54d323fa2..55937b8f4 100644 --- a/tests/support/test.tcl +++ b/tests/support/test.tcl @@ -4,6 +4,7 @@ set ::num_failed 0 set ::num_skipped 0 set ::num_aborted 0 set ::tests_failed {} +set ::cur_test "" proc fail {msg} { error "assertion:$msg" @@ -136,6 +137,7 @@ proc test {name code {okpattern undefined} {options undefined}} { # set a cur_test global to be logged into new servers that are spown # and log the test name in all existing servers + set prev_test $::cur_test set ::cur_test "$name in $::curfile" if {!$::external} { foreach srv $::servers { @@ -190,4 +192,5 @@ proc test {name code {okpattern undefined} {options undefined}} { send_data_packet $::test_server_fd err "Detected a memory leak in test '$name': $output" } } + set ::cur_test $prev_test } From aa54351c5bc4ae3ec98b67c805ef5b5b5f2ab424 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 8 Sep 2020 16:00:20 +0300 Subject: [PATCH 152/377] Fix CONFIG REWRITE of oom-score-adj-values. (#7761) (cherry picked from commit b3782098ae594a5457a9de74ce5e15f1629d077d) --- src/config.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/config.c b/src/config.c index 52acb527b..de1269e48 100644 --- a/src/config.c +++ b/src/config.c @@ -1422,7 +1422,8 @@ void rewriteConfigOOMScoreAdjValuesOption(struct rewriteConfigState *state) { char *option = "oom-score-adj-values"; sds line; - line = sdsempty(); + line = sdsnew(option); + line = sdscatlen(line, " ", 1); for (j = 0; j < CONFIG_OOM_COUNT; j++) { if (server.oom_score_adj_values[j] != configOOMScoreAdjValuesDefaults[j]) force = 1; From 7fa69e6394093fb76032fb52ed48ea56de566766 Mon Sep 17 00:00:00 2001 From: Eran Liberty Date: Wed, 9 Sep 2020 09:35:42 +0300 Subject: [PATCH 153/377] Allow exec with read commands on readonly replica in cluster (#7766) There was a bug. Although cluster replicas would allow read commands, they would not allow a MULTI-EXEC that's composed solely of read commands. Adds tests for coverage. Co-authored-by: Oran Agra Co-authored-by: Eran Liberty (cherry picked from commit 7bee51bb5b2cccbaae76f4721761880acf4d5a93) --- src/cluster.c | 4 +- .../tests/16-transactions-on-replica.tcl | 48 +++++++++++++++++++ tests/instances.tcl | 10 +++- 3 files changed, 59 insertions(+), 3 deletions(-) create mode 100644 tests/cluster/tests/16-transactions-on-replica.tcl diff --git a/src/cluster.c b/src/cluster.c index 17d21df29..8d8b61ab4 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -5765,8 +5765,10 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in /* Handle the read-only client case reading from a slave: if this * node is a slave and the request is about an hash slot our master * is serving, we can reply without redirection. */ + int is_readonly_command = (c->cmd->flags & CMD_READONLY) || + (c->cmd->proc == execCommand && !(c->mstate.cmd_inv_flags & CMD_READONLY)); if (c->flags & CLIENT_READONLY && - (cmd->flags & CMD_READONLY || cmd->proc == evalCommand || + (is_readonly_command || cmd->proc == evalCommand || cmd->proc == evalShaCommand) && nodeIsSlave(myself) && myself->slaveof == n) diff --git a/tests/cluster/tests/16-transactions-on-replica.tcl b/tests/cluster/tests/16-transactions-on-replica.tcl new file mode 100644 index 000000000..da9dff1ca --- /dev/null +++ b/tests/cluster/tests/16-transactions-on-replica.tcl @@ -0,0 +1,48 @@ +# Check basic transactions on a replica. + +source "../tests/includes/init-tests.tcl" + +test "Create a primary with a replica" { + create_cluster 1 1 +} + +test "Cluster should start ok" { + assert_cluster_state ok +} + +set primary [Rn 0] +set replica [Rn 1] + +test "Cant read from replica without READONLY" { + $primary SET a 1 + catch {$replica GET a} err + assert {[string range $err 0 4] eq {MOVED}} +} + +test "Can read from replica after READONLY" { + $replica READONLY + assert {[$replica GET a] eq {1}} +} + +test "Can preform HSET primary and HGET from replica" { + $primary HSET h a 1 + $primary HSET h b 2 + $primary HSET h c 3 + assert {[$replica HGET h a] eq {1}} + assert {[$replica HGET h b] eq {2}} + assert {[$replica HGET h c] eq {3}} +} + +test "Can MULTI-EXEC transaction of HGET operations from replica" { + $replica MULTI + assert {[$replica HGET h a] eq {QUEUED}} + assert {[$replica HGET h b] eq {QUEUED}} + assert {[$replica HGET h c] eq {QUEUED}} + assert {[$replica EXEC] eq {1 2 3}} +} + +test "MULTI-EXEC with write operations is MOVED" { + $replica MULTI + catch {$replica HSET h b 4} err + assert {[string range $err 0 4] eq {MOVED}} +} diff --git a/tests/instances.tcl b/tests/instances.tcl index 82c35854b..2199cfcd4 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -422,10 +422,16 @@ proc S {n args} { [dict get $s link] {*}$args } +# Returns a Redis instance by index. +# Example: +# [Rn 0] info +proc Rn {n} { + return [dict get [lindex $::redis_instances $n] link] +} + # Like R but to chat with Redis instances. proc R {n args} { - set r [lindex $::redis_instances $n] - [dict get $r link] {*}$args + [Rn $n] {*}$args } proc get_info_field {info field} { From 82aee21c9281b46851150bd1432564681f9ec5a5 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Wed, 9 Sep 2020 12:30:43 +0300 Subject: [PATCH 154/377] Tests: clean up stale .cli files. (#7768) (cherry picked from commit e5b1ad413bdc05e6539dbaa23b5114e15103516e) --- tests/integration/redis-cli.tcl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/redis-cli.tcl b/tests/integration/redis-cli.tcl index 2d4145ff0..1e346a9a5 100644 --- a/tests/integration/redis-cli.tcl +++ b/tests/integration/redis-cli.tcl @@ -182,6 +182,7 @@ start_server {tags {"cli"}} { set tmpfile [write_tmpfile "from file"] assert_equal "OK" [run_cli_with_input_file $tmpfile set key] assert_equal "from file" [r get key] + file delete $tmpfile } test_nontty_cli "Status reply" { @@ -215,6 +216,7 @@ start_server {tags {"cli"}} { set tmpfile [write_tmpfile "from file"] assert_equal "OK" [run_cli_with_input_file $tmpfile set key] assert_equal "from file" [r get key] + file delete $tmpfile } proc test_redis_cli_rdb_dump {} { From b8dec46e0b5b63faf518017aa74d0ffc458fe992 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Wed, 9 Sep 2020 14:06:04 +0200 Subject: [PATCH 155/377] Check that THP is not set to always (madvise is ok) (#4001) THP can also be set to madvise, in which case it shouldn't cause problems for Redis since redis (or the allocator) doesn't use madvise to activate it. (cherry picked from commit 60097d361d4096d3826c7580acffd4053f8a4835) --- src/latency.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/latency.c b/src/latency.c index dfdc6668c..b5ccc7cc6 100644 --- a/src/latency.c +++ b/src/latency.c @@ -71,7 +71,7 @@ int THPIsEnabled(void) { return 0; } fclose(fp); - return (strstr(buf,"[never]") == NULL) ? 1 : 0; + return (strstr(buf,"[always]") != NULL) ? 1 : 0; } #endif From dc11921961592b0b362674cd2e40605a056b2a23 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Wed, 9 Sep 2020 15:09:41 +0300 Subject: [PATCH 156/377] Documents RM_Call's fmt (#5448) Improve RM_Call inline documentation about the fmt argument so that we don't completely depend on the web docs. Co-authored-by: Oran Agra (cherry picked from commit c13fa0aa3619c595f06e191a30710d85a109ad48) --- src/module.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/module.c b/src/module.c index 9b44d5ecc..b7321f7d9 100644 --- a/src/module.c +++ b/src/module.c @@ -3303,6 +3303,23 @@ fmterr: } /* Exported API to call any Redis command from modules. + * + * * **cmdname**: The Redis command to call. + * * **fmt**: A format specifier string for the command's arguments. Each + * of the arguments should be specified by a valid type specification: + * b The argument is a buffer and is immediately followed by another + * argument that is the buffer's length. + * c The argument is a pointer to a plain C string (null-terminated). + * l The argument is long long integer. + * s The argument is a RedisModuleString. + * v The argument(s) is a vector of RedisModuleString. + * + * The format specifier can also include modifiers: + * ! Sends the Redis command and its arguments to replicas and AOF. + * A Suppress AOF propagation, send only to replicas (requires `!`). + * R Suppress replicas propagation, send only to AOF (requires `!`). + * * **...**: The actual arguments to the Redis command. + * * On success a RedisModuleCallReply object is returned, otherwise * NULL is returned and errno is set to the following values: * @@ -3314,6 +3331,14 @@ fmterr: * in a readonly state. * ENETDOWN: operation in Cluster instance when cluster is down. * + * Example code fragment: + * + * reply = RedisModule_Call(ctx,"INCRBY","sc",argv[1],"10"); + * if (RedisModule_CallReplyType(reply) == REDISMODULE_REPLY_INTEGER) { + * long long myval = RedisModule_CallReplyInteger(reply); + * // Do something with myval. + * } + * * This API is documented here: https://redis.io/topics/modules-intro */ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const char *fmt, ...) { From de8c720c74008e7b6c576f647fad592df6be2ce9 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Wed, 9 Sep 2020 15:39:57 +0300 Subject: [PATCH 157/377] Change THP warning to use madvise rather than never (#7771) completes 60097d361d4096d3826c7580acffd4053f8a4835 (cherry picked from commit 73e0cd5a7d7c1af90f58b6af260acca4b7eb795e) --- src/server.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server.c b/src/server.c index 990b23b7d..6447c69e4 100644 --- a/src/server.c +++ b/src/server.c @@ -4728,7 +4728,7 @@ void linuxMemoryWarnings(void) { serverLog(LL_WARNING,"WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect."); } if (THPIsEnabled()) { - serverLog(LL_WARNING,"WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled."); + serverLog(LL_WARNING,"WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo madvise > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled (set to 'madvise' or 'never')."); } } #endif /* __linux__ */ From 8fb8c237465da375175dcce87fbd34b1f34da65c Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Wed, 9 Sep 2020 15:43:11 +0300 Subject: [PATCH 158/377] Tests: validate CONFIG REWRITE for all params. (#7764) This is a catch-all test to confirm that that rewrite produces a valid output for all parameters and that this process does not introduce undesired configuration changes. (cherry picked from commit 995f1fc53f7daf3d289d5d70d7b45cdd486dc6cc) --- src/config.c | 12 +++++++++--- src/debug.c | 7 +++++++ src/sentinel.c | 2 +- src/server.c | 2 +- src/server.h | 2 +- tests/unit/introspection.tcl | 24 ++++++++++++++++++++++++ 6 files changed, 43 insertions(+), 6 deletions(-) diff --git a/src/config.c b/src/config.c index de1269e48..2c69540db 100644 --- a/src/config.c +++ b/src/config.c @@ -1055,6 +1055,8 @@ struct rewriteConfigState { sds *lines; /* Current lines as an array of sds strings */ int has_tail; /* True if we already added directives that were not present in the original config file. */ + int force_all; /* True if we want all keywords to be force + written. Currently only used for testing. */ }; /* Append the new line to the current configuration state. */ @@ -1101,6 +1103,7 @@ struct rewriteConfigState *rewriteConfigReadOldFile(char *path) { state->numlines = 0; state->lines = NULL; state->has_tail = 0; + state->force_all = 0; if (fp == NULL) return state; /* Read the old file line by line, populate the state. */ @@ -1179,7 +1182,7 @@ void rewriteConfigRewriteLine(struct rewriteConfigState *state, const char *opti rewriteConfigMarkAsProcessed(state,option); - if (!l && !force) { + if (!l && !force && !state->force_all) { /* Option not used previously, and we are not forced to use it. */ sdsfree(line); sdsfree(o); @@ -1603,15 +1606,18 @@ cleanup: * * Configuration parameters that are at their default value, unless already * explicitly included in the old configuration file, are not rewritten. + * The force_all flag overrides this behavior and forces everything to be + * written. This is currently only used for testing purposes. * * On error -1 is returned and errno is set accordingly, otherwise 0. */ -int rewriteConfig(char *path) { +int rewriteConfig(char *path, int force_all) { struct rewriteConfigState *state; sds newcontent; int retval; /* Step 1: read the old config into our rewrite state. */ if ((state = rewriteConfigReadOldFile(path)) == NULL) return -1; + if (force_all) state->force_all = 1; /* Step 2: rewrite every single option, replacing or appending it inside * the rewrite state. */ @@ -2405,7 +2411,7 @@ NULL addReplyError(c,"The server is running without a config file"); return; } - if (rewriteConfig(server.configfile) == -1) { + if (rewriteConfig(server.configfile, 0) == -1) { serverLog(LL_WARNING,"CONFIG REWRITE failed: %s", strerror(errno)); addReplyErrorFormat(c,"Rewriting config file: %s", strerror(errno)); } else { diff --git a/src/debug.c b/src/debug.c index 0bea69876..4831c4d74 100644 --- a/src/debug.c +++ b/src/debug.c @@ -397,6 +397,7 @@ void debugCommand(client *c) { "STRUCTSIZE -- Return the size of different Redis core C structures.", "ZIPLIST -- Show low level info about the ziplist encoding.", "STRINGMATCH-TEST -- Run a fuzz tester against the stringmatchlen() function.", +"CONFIG-REWRITE-FORCE-ALL -- Like CONFIG REWRITE but writes all configuration options, including keywords not listed in original configuration file or default values.", #ifdef USE_JEMALLOC "MALLCTL [] -- Get or set a malloc tunning integer.", "MALLCTL-STR [] -- Get or set a malloc tunning string.", @@ -794,6 +795,12 @@ NULL { stringmatchlen_fuzz_test(); addReplyStatus(c,"Apparently Redis did not crash: test passed"); + } else if (!strcasecmp(c->argv[1]->ptr,"config-rewrite-force-all") && c->argc == 2) + { + if (rewriteConfig(server.configfile, 1) == -1) + addReplyError(c, "CONFIG-REWRITE-FORCE-ALL failed"); + else + addReply(c, shared.ok); #ifdef USE_JEMALLOC } else if(!strcasecmp(c->argv[1]->ptr,"mallctl") && c->argc >= 3) { mallctl_int(c, c->argv+2, c->argc-2); diff --git a/src/sentinel.c b/src/sentinel.c index 5bd594955..bdc339674 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -1954,7 +1954,7 @@ void sentinelFlushConfig(void) { int rewrite_status; server.hz = CONFIG_DEFAULT_HZ; - rewrite_status = rewriteConfig(server.configfile); + rewrite_status = rewriteConfig(server.configfile, 0); server.hz = saved_hz; if (rewrite_status == -1) goto werr; diff --git a/src/server.c b/src/server.c index 6447c69e4..a7a36df13 100644 --- a/src/server.c +++ b/src/server.c @@ -2488,7 +2488,7 @@ int restartServer(int flags, mstime_t delay) { /* Config rewriting. */ if (flags & RESTART_SERVER_CONFIG_REWRITE && server.configfile && - rewriteConfig(server.configfile) == -1) + rewriteConfig(server.configfile, 0) == -1) { serverLog(LL_WARNING,"Can't restart: configuration rewrite process " "failed"); diff --git a/src/server.h b/src/server.h index c42955b94..d77df93b5 100644 --- a/src/server.h +++ b/src/server.h @@ -2089,7 +2089,7 @@ void appendServerSaveParams(time_t seconds, int changes); void resetServerSaveParams(void); struct rewriteConfigState; /* Forward declaration to export API. */ void rewriteConfigRewriteLine(struct rewriteConfigState *state, const char *option, sds line, int force); -int rewriteConfig(char *path); +int rewriteConfig(char *path, int force_all); void initConfigValues(); /* db.c -- Keyspace access API */ diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl index d681e06d5..37470c068 100644 --- a/tests/unit/introspection.tcl +++ b/tests/unit/introspection.tcl @@ -134,4 +134,28 @@ start_server {tags {"introspection"}} { } } + + # Do a force-all config rewrite and make sure we're able to parse + # it. + test {CONFIG REWRITE sanity} { + # Capture state of config before + set configs {} + foreach {k v} [r config get *] { + dict set configs $k $v + } + + # Rewrite entire configuration, restart and confirm the + # server is able to parse it and start. + assert_equal [r debug config-rewrite-force-all] "OK" + restart_server 0 0 + assert_equal [r ping] "PONG" + + # Verify no changes were introduced + dict for {k v} $configs { + assert_equal $v [lindex [r config get $k] 1] + } + } + + # Config file at this point is at a wierd state, and includes all + # known keywords. Might be a good idea to avoid adding tests here. } From ee3e45ac6e35e0fef47939f7786e3a35d1f9a531 Mon Sep 17 00:00:00 2001 From: Roi Lipman Date: Wed, 9 Sep 2020 16:01:16 +0300 Subject: [PATCH 159/377] RM_ThreadSafeContextTryLock a non-blocking method for acquiring GIL (#7738) Co-authored-by: Yossi Gottlieb Co-authored-by: Oran Agra (cherry picked from commit b1de173ec0f6a03d6083b87f1505fbf843708685) --- runtest-moduleapi | 1 + src/module.c | 22 +++++++ src/redismodule.h | 2 + src/server.h | 1 + tests/modules/Makefile | 4 +- tests/modules/blockedclient.c | 82 ++++++++++++++++++++++++++ tests/unit/moduleapi/blockedclient.tcl | 11 ++++ 7 files changed, 122 insertions(+), 1 deletion(-) create mode 100644 tests/modules/blockedclient.c create mode 100644 tests/unit/moduleapi/blockedclient.tcl diff --git a/runtest-moduleapi b/runtest-moduleapi index 71db27e5e..f3abde740 100755 --- a/runtest-moduleapi +++ b/runtest-moduleapi @@ -26,4 +26,5 @@ $TCLSH tests/test_helper.tcl \ --single unit/moduleapi/datatype \ --single unit/moduleapi/auth \ --single unit/moduleapi/keyspace_events \ +--single unit/moduleapi/blockedclient \ "${@}" diff --git a/src/module.c b/src/module.c index b7321f7d9..f293d6a6c 100644 --- a/src/module.c +++ b/src/module.c @@ -4906,6 +4906,23 @@ void RM_ThreadSafeContextLock(RedisModuleCtx *ctx) { moduleAcquireGIL(); } +/* Similar to RM_ThreadSafeContextLock but this function + * would not block if the server lock is already acquired. + * + * If successful (lock acquired) REDISMODULE_OK is returned, + * otherwise REDISMODULE_ERR is returned and errno is set + * accordingly. */ +int RM_ThreadSafeContextTryLock(RedisModuleCtx *ctx) { + UNUSED(ctx); + + int res = moduleTryAcquireGIL(); + if(res != 0) { + errno = res; + return REDISMODULE_ERR; + } + return REDISMODULE_OK; +} + /* Release the server lock after a thread safe API call was executed. */ void RM_ThreadSafeContextUnlock(RedisModuleCtx *ctx) { UNUSED(ctx); @@ -4916,6 +4933,10 @@ void moduleAcquireGIL(void) { pthread_mutex_lock(&moduleGIL); } +int moduleTryAcquireGIL(void) { + return pthread_mutex_trylock(&moduleGIL); +} + void moduleReleaseGIL(void) { pthread_mutex_unlock(&moduleGIL); } @@ -7929,6 +7950,7 @@ void moduleRegisterCoreAPI(void) { REGISTER_API(GetThreadSafeContext); REGISTER_API(FreeThreadSafeContext); REGISTER_API(ThreadSafeContextLock); + REGISTER_API(ThreadSafeContextTryLock); REGISTER_API(ThreadSafeContextUnlock); REGISTER_API(DigestAddStringBuffer); REGISTER_API(DigestAddLongLong); diff --git a/src/redismodule.h b/src/redismodule.h index 460fdd480..4bfc14cc7 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -666,6 +666,7 @@ REDISMODULE_API int (*RedisModule_AbortBlock)(RedisModuleBlockedClient *bc) REDI REDISMODULE_API RedisModuleCtx * (*RedisModule_GetThreadSafeContext)(RedisModuleBlockedClient *bc) REDISMODULE_ATTR; REDISMODULE_API void (*RedisModule_FreeThreadSafeContext)(RedisModuleCtx *ctx) REDISMODULE_ATTR; REDISMODULE_API void (*RedisModule_ThreadSafeContextLock)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ThreadSafeContextTryLock)(RedisModuleCtx *ctx) REDISMODULE_ATTR; REDISMODULE_API void (*RedisModule_ThreadSafeContextUnlock)(RedisModuleCtx *ctx) REDISMODULE_ATTR; REDISMODULE_API int (*RedisModule_SubscribeToKeyspaceEvents)(RedisModuleCtx *ctx, int types, RedisModuleNotificationFunc cb) REDISMODULE_ATTR; REDISMODULE_API int (*RedisModule_NotifyKeyspaceEvent)(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key) REDISMODULE_ATTR; @@ -899,6 +900,7 @@ static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int REDISMODULE_GET_API(GetThreadSafeContext); REDISMODULE_GET_API(FreeThreadSafeContext); REDISMODULE_GET_API(ThreadSafeContextLock); + REDISMODULE_GET_API(ThreadSafeContextTryLock); REDISMODULE_GET_API(ThreadSafeContextUnlock); REDISMODULE_GET_API(BlockClient); REDISMODULE_GET_API(UnblockClient); diff --git a/src/server.h b/src/server.h index d77df93b5..980011938 100644 --- a/src/server.h +++ b/src/server.h @@ -1595,6 +1595,7 @@ void moduleBlockedClientTimedOut(client *c); void moduleBlockedClientPipeReadable(aeEventLoop *el, int fd, void *privdata, int mask); size_t moduleCount(void); void moduleAcquireGIL(void); +int moduleTryAcquireGIL(void); void moduleReleaseGIL(void); void moduleNotifyKeyspaceEvent(int type, const char *event, robj *key, int dbid); void moduleCallCommandFilters(client *c); diff --git a/tests/modules/Makefile b/tests/modules/Makefile index de7407a84..fad6e55d8 100644 --- a/tests/modules/Makefile +++ b/tests/modules/Makefile @@ -23,7 +23,9 @@ TEST_MODULES = \ scan.so \ datatype.so \ auth.so \ - keyspace_events.so + keyspace_events.so \ + blockedclient.so + .PHONY: all diff --git a/tests/modules/blockedclient.c b/tests/modules/blockedclient.c new file mode 100644 index 000000000..9d59114a1 --- /dev/null +++ b/tests/modules/blockedclient.c @@ -0,0 +1,82 @@ +#define REDISMODULE_EXPERIMENTAL_API +#include "redismodule.h" +#include +#include +#include + +#define UNUSED(V) ((void) V) + +void *sub_worker(void *arg) { + // Get Redis module context + RedisModuleCtx *ctx = (RedisModuleCtx *)arg; + + // Try acquiring GIL + int res = RedisModule_ThreadSafeContextTryLock(ctx); + + // GIL is already taken by the calling thread expecting to fail. + assert(res != REDISMODULE_OK); + + return NULL; +} + +void *worker(void *arg) { + // Retrieve blocked client + RedisModuleBlockedClient *bc = (RedisModuleBlockedClient *)arg; + + // Get Redis module context + RedisModuleCtx *ctx = RedisModule_GetThreadSafeContext(bc); + + // Acquire GIL + RedisModule_ThreadSafeContextLock(ctx); + + // Create another thread which will try to acquire the GIL + pthread_t tid; + int res = pthread_create(&tid, NULL, sub_worker, ctx); + assert(res == 0); + + // Wait for thread + pthread_join(tid, NULL); + + // Release GIL + RedisModule_ThreadSafeContextUnlock(ctx); + + // Reply to client + RedisModule_ReplyWithSimpleString(ctx, "OK"); + + // Unblock client + RedisModule_UnblockClient(bc, NULL); + + return NULL; +} + +int acquire_gil(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) +{ + UNUSED(argv); + UNUSED(argc); + + /* This command handler tries to acquire the GIL twice + * once in the worker thread using "RedisModule_ThreadSafeContextLock" + * second in the sub-worker thread + * using "RedisModule_ThreadSafeContextTryLock" + * as the GIL is already locked. */ + RedisModuleBlockedClient *bc = RedisModule_BlockClient(ctx, NULL, NULL, NULL, 0); + + pthread_t tid; + int res = pthread_create(&tid, NULL, worker, bc); + assert(res == 0); + + return REDISMODULE_OK; +} + +int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + REDISMODULE_NOT_USED(argv); + REDISMODULE_NOT_USED(argc); + + if (RedisModule_Init(ctx, "blockedclient", 1, REDISMODULE_APIVER_1)== REDISMODULE_ERR) + return REDISMODULE_ERR; + + if (RedisModule_CreateCommand(ctx, "acquire_gil", acquire_gil, "", 0, 0, 0) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + return REDISMODULE_OK; +} diff --git a/tests/unit/moduleapi/blockedclient.tcl b/tests/unit/moduleapi/blockedclient.tcl new file mode 100644 index 000000000..d093a0297 --- /dev/null +++ b/tests/unit/moduleapi/blockedclient.tcl @@ -0,0 +1,11 @@ +# source tests/support/util.tcl + +set testmodule [file normalize tests/modules/blockedclient.so] + +start_server {tags {"modules"}} { + r module load $testmodule + + test {Locked GIL acquisition} { + assert_match "OK" [r acquire_gil] + } +} From d12e141780b36da2fd0e0be9a55059ed1aeb7334 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=A8=E5=8D=9A=E4=B8=9C?= Date: Wed, 9 Sep 2020 22:13:35 +0800 Subject: [PATCH 160/377] Tests: Add aclfile load and save tests (#7765) improves test coverage (cherry picked from commit ce1466831686b617f72ffbdc51dde137ce5cf9ff) --- tests/assets/user.acl | 2 ++ tests/unit/acl.tcl | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) create mode 100644 tests/assets/user.acl diff --git a/tests/assets/user.acl b/tests/assets/user.acl new file mode 100644 index 000000000..2f065dab6 --- /dev/null +++ b/tests/assets/user.acl @@ -0,0 +1,2 @@ +user alice on allcommands allkeys >alice +user bob on -@all +@set +acl ~set* >bob \ No newline at end of file diff --git a/tests/unit/acl.tcl b/tests/unit/acl.tcl index e81280995..381f2f95f 100644 --- a/tests/unit/acl.tcl +++ b/tests/unit/acl.tcl @@ -261,3 +261,42 @@ start_server {tags {"acl"}} { assert_match "*Unknown subcommand or wrong number of arguments*" $e } } + +set server_path [tmpdir "server.acl"] +exec cp -f tests/assets/user.acl $server_path +start_server [list overrides [list "dir" $server_path "aclfile" "user.acl"]] { + # user alice on allcommands allkeys >alice + # user bob on -@all +@set +acl ~set* >bob + + test "Alice: can excute all command" { + r AUTH alice alice + assert_equal "alice" [r acl whoami] + r SET key value + } + + test "Bob: just excute @set and acl command" { + r AUTH bob bob + assert_equal "bob" [r acl whoami] + assert_equal "3" [r sadd set 1 2 3] + catch {r SET key value} e + set e + } {*NOPERM*} + + test "ACL load and save" { + r ACL setuser eve +get allkeys >eve on + r ACL save + + # ACL load will free user and kill clients + r ACL load + catch {r ACL LIST} e + assert_match {*I/O error*} $e + + reconnect + r AUTH alice alice + r SET key value + r AUTH eve eve + r GET key + catch {r SET key value} e + set e + } {*NOPERM*} +} From d3945c636d9b438b91a43a7ed56e40d51f161957 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Wed, 9 Sep 2020 18:58:06 +0300 Subject: [PATCH 161/377] Tests: fix oom-score-adj false positives. (#7772) The key save delay is too short and on certain systems the child process is gone before we have a chance to inspect it. (cherry picked from commit 1abc94155a26356f7fcaf5d20b80f031a55a3e82) --- tests/unit/oom-score-adj.tcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/oom-score-adj.tcl b/tests/unit/oom-score-adj.tcl index 8eb09a993..cf671fe6a 100644 --- a/tests/unit/oom-score-adj.tcl +++ b/tests/unit/oom-score-adj.tcl @@ -35,7 +35,7 @@ if {$system_name eq {linux}} { # Check child process r set key-a value-a - r config set rdb-key-save-delay 100000 + r config set rdb-key-save-delay 1000000 r bgsave set child_pid [get_child_pid 0] From 4de93718a759800d82182ff9577549011d353a42 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Thu, 10 Sep 2020 08:18:20 +0300 Subject: [PATCH 162/377] Fix leak in new blockedclient module API test (cherry picked from commit 0e20ad14a6a857cb168b808f94721df19b23dc0c) --- tests/modules/blockedclient.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/modules/blockedclient.c b/tests/modules/blockedclient.c index 9d59114a1..ca98281a4 100644 --- a/tests/modules/blockedclient.c +++ b/tests/modules/blockedclient.c @@ -46,6 +46,9 @@ void *worker(void *arg) { // Unblock client RedisModule_UnblockClient(bc, NULL); + // Free the Redis module context + RedisModule_FreeThreadSafeContext(ctx); + return NULL; } From e4a7840d349c4e18a192b5e31c328be39db8a493 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Thu, 10 Sep 2020 09:01:10 +0300 Subject: [PATCH 163/377] Fix RESP3 response for HKEYS/HVALS on non-existing key --- src/t_hash.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/t_hash.c b/src/t_hash.c index 4a03cfb25..240e11c91 100644 --- a/src/t_hash.c +++ b/src/t_hash.c @@ -772,7 +772,9 @@ void genericHgetallCommand(client *c, int flags) { hashTypeIterator *hi; int length, count = 0; - if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.emptymap[c->resp])) + robj *emptyResp = (flags & OBJ_HASH_KEY && flags & OBJ_HASH_VALUE) ? + shared.emptymap[c->resp] : shared.emptyarray; + if ((o = lookupKeyReadOrReply(c,c->argv[1],emptyResp)) == NULL || checkType(c,o,OBJ_HASH)) return; /* We return a map if the user requested keys and values, like in the From 40f2c892e5613154981682674e0a4a592fa47f1a Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Thu, 10 Sep 2020 00:04:21 +0300 Subject: [PATCH 164/377] 6.0.8 --- 00-RELEASENOTES | 148 ++++++++++++++++++++++++++++++++++++++++++++++++ src/help.h | 2 +- src/version.h | 2 +- 3 files changed, 150 insertions(+), 2 deletions(-) diff --git a/00-RELEASENOTES b/00-RELEASENOTES index ce75e19c2..c9c09f76e 100644 --- a/00-RELEASENOTES +++ b/00-RELEASENOTES @@ -11,6 +11,154 @@ CRITICAL: There is a critical bug affecting MOST USERS. Upgrade ASAP. SECURITY: There are security fixes in the release. -------------------------------------------------------------------------------- +================================================================================ +Redis 6.0.8 Released Wed Sep 09 23:34:17 IDT 2020 +================================================================================ + +Upgrade urgency HIGH: Anyone who's using Redis 6.0.7 with Sentinel or +CONFIG REWRITE command is affected and should upgrade ASAP, see #7760. + +Bug fixes: + +* CONFIG REWRITE after setting oom-score-adj-values either via CONFIG SET or + loading it from a config file, will generate a corrupt config file that will + cause Redis to fail to start +* Fix issue with redis-cli --pipe on MacOS +* Fix RESP3 response for HKEYS/HVALS on non-existing key +* Various small bug fixes + +New features / Changes: + +* Remove THP warning when set to madvise +* Allow EXEC with read commands on readonly replica in cluster +* Add masters/replicas options to redis-cli --cluster call command + +Module API: + +* Add RedisModule_ThreadSafeContextTryLock + +Full list of commits: + +Oran Agra in commit cdabf696a: + Fix RESP3 response for HKEYS/HVALS on non-existing key + 1 file changed, 3 insertions(+), 1 deletion(-) + +Oran Agra in commit ec633c716: + Fix leak in new blockedclient module API test + 1 file changed, 3 insertions(+) + +Yossi Gottlieb in commit 6bac07c5c: + Tests: fix oom-score-adj false positives. (#7772) + 1 file changed, 1 insertion(+), 1 deletion(-) + +杨博东 in commit 6043dc614: + Tests: Add aclfile load and save tests (#7765) + 2 files changed, 41 insertions(+) + +Roi Lipman in commit c0b5f9bf0: + RM_ThreadSafeContextTryLock a non-blocking method for acquiring GIL (#7738) + 7 files changed, 122 insertions(+), 1 deletion(-) + +Yossi Gottlieb in commit 5780a1599: + Tests: validate CONFIG REWRITE for all params. (#7764) + 6 files changed, 43 insertions(+), 6 deletions(-) + +Oran Agra in commit e3c14b25d: + Change THP warning to use madvise rather than never (#7771) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Itamar Haber in commit 28929917b: + Documents RM_Call's fmt (#5448) + 1 file changed, 25 insertions(+) + +Jan-Erik Rediger in commit 9146402c2: + Check that THP is not set to always (madvise is ok) (#4001) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Yossi Gottlieb in commit d05089429: + Tests: clean up stale .cli files. (#7768) + 1 file changed, 2 insertions(+) + +Eran Liberty in commit 8861c1bae: + Allow exec with read commands on readonly replica in cluster (#7766) + 3 files changed, 59 insertions(+), 3 deletions(-) + +Yossi Gottlieb in commit 2cf2ff2f6: + Fix CONFIG REWRITE of oom-score-adj-values. (#7761) + 1 file changed, 2 insertions(+), 1 deletion(-) + +Oran Agra in commit 1386c80f7: + handle cur_test for nested tests + 1 file changed, 3 insertions(+) + +Oran Agra in commit c7d4945f0: + Add daily CI for MacOS (#7759) + 1 file changed, 18 insertions(+) + +bodong.ybd in commit 32548264c: + Tests: Some fixes for macOS + 3 files changed, 26 insertions(+), 11 deletions(-) + +Oran Agra in commit 1e17f9812: + Fix cluster consistency-check test (#7754) + 1 file changed, 55 insertions(+), 29 deletions(-) + +Yossi Gottlieb in commit f4ecdf86a: + Tests: fix unmonitored servers. (#7756) + 1 file changed, 5 insertions(+) + +Oran Agra in commit 9f020050d: + fix broken cluster/sentinel tests by recent commit (#7752) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Oran Agra in commit fdbabb496: + Improve valgrind support for cluster tests (#7725) + 3 files changed, 83 insertions(+), 23 deletions(-) + +Oran Agra in commit 35a6a0bbc: + test infra - add durable mode to work around test suite crashing + 3 files changed, 35 insertions(+), 3 deletions(-) + +Oran Agra in commit e3136b13f: + test infra - wait_done_loading + 2 files changed, 16 insertions(+), 36 deletions(-) + +Oran Agra in commit 83c75dbd9: + test infra - flushall between tests in external mode + 1 file changed, 1 insertion(+) + +Oran Agra in commit 265f5d3cf: + test infra - improve test skipping ability + 3 files changed, 91 insertions(+), 36 deletions(-) + +Oran Agra in commit fcd3a9908: + test infra - reduce disk space usage + 3 files changed, 33 insertions(+), 11 deletions(-) + +Oran Agra in commit b6ea4699f: + test infra - write test name to logfile + 3 files changed, 35 insertions(+) + +Yossi Gottlieb in commit 4a4b07fc6: + redis-cli: fix writeConn() buffer handling. (#7749) + 1 file changed, 37 insertions(+), 6 deletions(-) + +Oran Agra in commit f2d08de2e: + Print server startup messages after daemonization (#7743) + 1 file changed, 4 insertions(+), 4 deletions(-) + +Thandayuthapani in commit 77541d555: + Add masters/replicas options to redis-cli --cluster call command (#6491) + 1 file changed, 13 insertions(+), 2 deletions(-) + +Oran Agra in commit 91d13a854: + fix README about BUILD_WITH_SYSTEMD usage (#7739) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Yossi Gottlieb in commit 88d03d965: + Fix double-make issue with make && make install. (#7734) + 1 file changed, 2 insertions(+) + ================================================================================ Redis 6.0.7 Released Fri Aug 28 11:05:09 IDT 2020 ================================================================================ diff --git a/src/help.h b/src/help.h index 64344aa63..5a8af427e 100644 --- a/src/help.h +++ b/src/help.h @@ -974,7 +974,7 @@ struct commandHelp { 8, "1.0.0" }, { "SET", - "key value [EX seconds|PX milliseconds] [NX|XX] [KEEPTTL]", + "key value [EX seconds|PX milliseconds|KEEPTTL] [NX|XX]", "Set the string value of a key", 1, "1.0.0" }, diff --git a/src/version.h b/src/version.h index 71f998326..e09e5c4c4 100644 --- a/src/version.h +++ b/src/version.h @@ -1 +1 @@ -#define REDIS_VERSION "6.0.7" +#define REDIS_VERSION "6.0.8" From 59e86a111bd2b53a00c5d6a0585370df960141cb Mon Sep 17 00:00:00 2001 From: John Sully Date: Mon, 28 Sep 2020 19:17:36 +0000 Subject: [PATCH 165/377] Drop min-clients-per-thread to a more reasonable number Former-commit-id: a0abc1eddd071f984950ad8918fad0259c495184 --- src/config.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config.cpp b/src/config.cpp index fe1d5cd41..ea8d7a507 100644 --- a/src/config.cpp +++ b/src/config.cpp @@ -2294,7 +2294,7 @@ standardConfig configs[] = { createIntConfig("hz", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, g_pserver->config_hz, CONFIG_DEFAULT_HZ, INTEGER_CONFIG, NULL, updateHZ), createIntConfig("min-replicas-to-write", "min-slaves-to-write", MODIFIABLE_CONFIG, 0, INT_MAX, g_pserver->repl_min_slaves_to_write, 0, INTEGER_CONFIG, NULL, updateGoodSlaves), createIntConfig("min-replicas-max-lag", "min-slaves-max-lag", MODIFIABLE_CONFIG, 0, INT_MAX, g_pserver->repl_min_slaves_max_lag, 10, INTEGER_CONFIG, NULL, updateGoodSlaves), - createIntConfig("min-clients-per-thread", NULL, MODIFIABLE_CONFIG, 0, 400, cserver.thread_min_client_threshold, 50, INTEGER_CONFIG, NULL, NULL), + createIntConfig("min-clients-per-thread", NULL, MODIFIABLE_CONFIG, 0, 400, cserver.thread_min_client_threshold, 20, INTEGER_CONFIG, NULL, NULL), createIntConfig("replica-quorum", NULL, MODIFIABLE_CONFIG, -1, INT_MAX, g_pserver->repl_quorum, -1, INTEGER_CONFIG, NULL, NULL), /* Unsigned int configs */ createUIntConfig("maxclients", NULL, MODIFIABLE_CONFIG, 1, UINT_MAX, g_pserver->maxclients, 10000, INTEGER_CONFIG, NULL, updateMaxclients), From c0df1ac1738c683d156bc500b7104705997dad26 Mon Sep 17 00:00:00 2001 From: John Sully Date: Tue, 29 Sep 2020 03:26:06 +0000 Subject: [PATCH 166/377] Test RDB merge on load with active replication Former-commit-id: 28183f4b66fc4c865048080b61e599eeb1d2293b --- tests/integration/replication-active.tcl | 30 +++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/tests/integration/replication-active.tcl b/tests/integration/replication-active.tcl index 6c3c6d674..d6501fbeb 100644 --- a/tests/integration/replication-active.tcl +++ b/tests/integration/replication-active.tcl @@ -215,14 +215,38 @@ start_server {tags {"active-repl"} overrides {active-replica yes}} { assert_equal {1} [$slave wait 1 500] { "value should propogate within 0.5 seconds" } exec kill -SIGSTOP $slave_pid - after 3000 + after 3000 # Ensure testkey1 is gone. Note, we can't do this directly as the normal commands lie to us # about what is actually in the dict. The only way to know is with a count from info - assert_equal {1} [expr [string first {keys=1} [$master info keyspace]] >= 0] {"slave expired"} + assert_equal {1} [expr [string first {keys=1} [$master info keyspace]] >= 0] {"slave expired"} } - + exec kill -SIGCONT $slave_pid + test {Active replica merge works when reconnecting} { + $slave flushall + $slave set testkey foo + wait_for_condition 50 1000 { + [string match *foo* [$master get testkey]] + } else { + fail "Replication failed to propogate" + } + $slave replicaof no one + $master replicaof no one + after 100 + $master set testkey baz + after 100 + $slave set testkey bar + after 100 + $slave replicaof $master_host $master_port + after 1000 + $master replicaof $slave_host $slave_port + after 1000 + + assert_equal {bar} [$slave get testkey] + assert_equal {bar} [$master get testkey] + } + test {Active replica different databases} { $master select 3 $master set testkey abcd From 6448de44614b6bd72f36df6f007e05a502713153 Mon Sep 17 00:00:00 2001 From: John Sully Date: Wed, 30 Sep 2020 20:12:54 +0000 Subject: [PATCH 167/377] Fix stream replication failure with active replication, issue #238 Former-commit-id: a41366cc3a6568c0249a5ee022e517add55e286d --- src/t_stream.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/t_stream.cpp b/src/t_stream.cpp index b54df9c71..fd7a1329b 100644 --- a/src/t_stream.cpp +++ b/src/t_stream.cpp @@ -43,6 +43,7 @@ void streamFreeCG(streamCG *cg); void streamFreeNACK(streamNACK *na); size_t streamReplyWithRangeFromConsumerPEL(client *c, stream *s, streamID *start, streamID *end, size_t count, streamConsumer *consumer); +bool FInReplicaReplay(); /* ----------------------------------------------------------------------- * Low level stream encoding: a radix tree of listpacks. @@ -838,6 +839,9 @@ void streamPropagateXCLAIM(client *c, robj *key, streamCG *group, robj *groupnam * * Note that JUSTID is useful in order to avoid that XCLAIM will do * useless work in the replica side, trying to fetch the stream item. */ + if (FInReplicaReplay()) + return; + robj *argv[14]; argv[0] = createStringObject("XCLAIM",6); argv[1] = key; From 5fd3c39c05a080a96f3c8da69025df62702d40e0 Mon Sep 17 00:00:00 2001 From: John Sully Date: Wed, 30 Sep 2020 20:17:02 +0000 Subject: [PATCH 168/377] Mac build break fix Former-commit-id: 5f3543921b4123c2216d9294c5eb7bfed007cbf5 --- src/server.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server.h b/src/server.h index 197b9bf88..3705b199a 100644 --- a/src/server.h +++ b/src/server.h @@ -819,7 +819,7 @@ struct redisObjectExtended { uint64_t mvcc_tstamp; }; -typedef class redisObject { +typedef struct redisObject { protected: redisObject() {} From 13d472b160272dbf9054652c7863d5c788480839 Mon Sep 17 00:00:00 2001 From: John Sully Date: Wed, 30 Sep 2020 20:17:51 +0000 Subject: [PATCH 169/377] Keep redis-cli.c C89 compatible Former-commit-id: 9798f2d711939b03e972207638d18dcaa3b2e473 --- src/redis-cli.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 2b736b3e3..5dfcd6fad 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -1442,7 +1442,7 @@ static int parseOptions(int argc, char **argv) { sdsfree(version); exit(0); } else if (!strcmp(argv[i],"--no-motd")) { - config.disable_motd = true; + config.disable_motd = 1; } else if (!strcmp(argv[i],"-3")) { config.resp3 = 1; } else if (CLUSTER_MANAGER_MODE() && argv[i][0] != '-') { From 3529a1564b11a99462e43c0b4ba1e49aa40dd8cf Mon Sep 17 00:00:00 2001 From: John Sully Date: Wed, 30 Sep 2020 20:21:40 +0000 Subject: [PATCH 170/377] Fix TLS test failure due to bad merge Former-commit-id: 41466a4147bf675f69670016135f88589cc02a5c --- src/config.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config.cpp b/src/config.cpp index 6158e7a87..d58bbbade 100644 --- a/src/config.cpp +++ b/src/config.cpp @@ -2477,7 +2477,7 @@ standardConfig configs[] = { createIntConfig("tls-session-cache-timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, g_pserver->tls_ctx_config.session_cache_timeout, 300, INTEGER_CONFIG, NULL, updateTlsCfgInt), createBoolConfig("tls-cluster", NULL, MODIFIABLE_CONFIG, g_pserver->tls_cluster, 0, NULL, NULL), createBoolConfig("tls-replication", NULL, MODIFIABLE_CONFIG, g_pserver->tls_replication, 0, NULL, NULL), - createBoolConfig("tls-auth-clients", NULL, MODIFIABLE_CONFIG, g_pserver->tls_auth_clients, 1, NULL, NULL), + createEnumConfig("tls-auth-clients", NULL, MODIFIABLE_CONFIG, tls_auth_clients_enum, g_pserver->tls_auth_clients, TLS_CLIENT_AUTH_YES, NULL, NULL), createBoolConfig("tls-prefer-server-ciphers", NULL, MODIFIABLE_CONFIG, g_pserver->tls_ctx_config.prefer_server_ciphers, 0, NULL, updateTlsCfgBool), createBoolConfig("tls-session-caching", NULL, MODIFIABLE_CONFIG, g_pserver->tls_ctx_config.session_caching, 1, NULL, updateTlsCfgBool), createStringConfig("tls-cert-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, g_pserver->tls_ctx_config.cert_file, NULL, NULL, updateTlsCfg), From 04f834037a263e051cdb7d15e9c8148f685e28f7 Mon Sep 17 00:00:00 2001 From: John Sully Date: Thu, 1 Oct 2020 00:08:54 +0000 Subject: [PATCH 171/377] Fix module test failures due to locking Former-commit-id: 420ccdfbaebc452b9b374b54c6ebeec4a3ffea36 --- src/module.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/module.cpp b/src/module.cpp index c37071f87..73e176e7a 100644 --- a/src/module.cpp +++ b/src/module.cpp @@ -329,6 +329,7 @@ static int s_cAcquisitionsServer = 0; static int s_cAcquisitionsModule = 0; static std::mutex s_mutex; static std::condition_variable s_cv; +static std::recursive_mutex s_mutexModule; typedef void (*RedisModuleForkDoneHandler) (int exitcode, int bysignal, void *user_data); @@ -5055,6 +5056,7 @@ void moduleAcquireGIL(int fServerThread) { } else { + s_mutexModule.lock(); ++s_cAcquisitionsModule; fModuleGILWlocked++; } @@ -5079,6 +5081,8 @@ int moduleTryAcquireGIL(bool fServerThread) { } else { + if (!s_mutexModule.try_lock()) + return 1; ++s_cAcquisitionsModule; fModuleGILWlocked++; } @@ -5098,6 +5102,7 @@ void moduleReleaseGIL(int fServerThread) { } else { + s_mutexModule.unlock(); --s_cAcquisitionsModule; fModuleGILWlocked--; } From 9d8897c50e2f2eeef080c92b129fab2cf33b4ebf Mon Sep 17 00:00:00 2001 From: John Sully Date: Thu, 1 Oct 2020 11:17:56 -0400 Subject: [PATCH 172/377] Delete CONTRIBUTING Former-commit-id: b042495f37f3d22f7759a4c59044af40273c3a1a --- CONTRIBUTING | 50 -------------------------------------------------- 1 file changed, 50 deletions(-) delete mode 100644 CONTRIBUTING diff --git a/CONTRIBUTING b/CONTRIBUTING deleted file mode 100644 index 000edbeaf..000000000 --- a/CONTRIBUTING +++ /dev/null @@ -1,50 +0,0 @@ -Note: by contributing code to the Redis project in any form, including sending -a pull request via Github, a code fragment or patch via private email or -public discussion groups, you agree to release your code under the terms -of the BSD license that you can find in the COPYING file included in the Redis -source distribution. You will include BSD license in the COPYING file within -each source file that you contribute. - -# IMPORTANT: HOW TO USE REDIS GITHUB ISSUES - -* Github issues SHOULD ONLY BE USED to report bugs, and for DETAILED feature - requests. Everything else belongs to the Redis Google Group: - - https://groups.google.com/forum/m/#!forum/Redis-db - - PLEASE DO NOT POST GENERAL QUESTIONS that are not about bugs or suspected - bugs in the Github issues system. We'll be very happy to help you and provide - all the support in the mailing list. - - There is also an active community of Redis users at Stack Overflow: - - http://stackoverflow.com/questions/tagged/redis - -# How to provide a patch for a new feature - -1. If it is a major feature or a semantical change, please don't start coding -straight away: if your feature is not a conceptual fit you'll lose a lot of -time writing the code without any reason. Start by posting in the mailing list -and creating an issue at Github with the description of, exactly, what you want -to accomplish and why. Use cases are important for features to be accepted. -Here you'll see if there is consensus about your idea. - -2. If in step 1 you get an acknowledgment from the project leaders, use the - following procedure to submit a patch: - - a. Fork Redis on github ( http://help.github.com/fork-a-repo/ ) - b. Create a topic branch (git checkout -b my_branch) - c. Push to your branch (git push origin my_branch) - d. Initiate a pull request on github ( https://help.github.com/articles/creating-a-pull-request/ ) - e. Done :) - -3. Keep in mind that we are very overloaded, so issues and PRs sometimes wait -for a *very* long time. However this is not lack of interest, as the project -gets more and more users, we find ourselves in a constant need to prioritize -certain issues/PRs over others. If you think your issue/PR is very important -try to popularize it, have other users commenting and sharing their point of -view and so forth. This helps. - -4. For minor fixes just open a pull request on Github. - -Thanks! From 4b63a5202b98da666e82fd16ea378668a9372b41 Mon Sep 17 00:00:00 2001 From: Hanif Bin Ariffin Date: Sat, 3 Oct 2020 23:11:03 +0800 Subject: [PATCH 173/377] Removed dead code from a macro in zmalloc.cpp I think the compiler would have removed this no-op anyways but it definitely wasted me some 30 minutes on this :( I was hoping I could remove the branch through some bit-hacking but apparently its dead code :). Oh well, its 30 minutes of refreshing bit hacking. Signed-off-by: Hanif Bin Ariffin Former-commit-id: 8171e6de13311e3ad2e87c32d63060dcf3bd6055 --- src/zmalloc.cpp | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/src/zmalloc.cpp b/src/zmalloc.cpp index e3d364a97..f88fee638 100644 --- a/src/zmalloc.cpp +++ b/src/zmalloc.cpp @@ -83,17 +83,8 @@ static_assert((PREFIX_SIZE % 16) == 0, "Our prefix must be modulo 16-bytes or ou #define realloc(ptr,size,type) realloc(ptr,size) #endif -#define update_zmalloc_stat_alloc(__n) do { \ - size_t _n = (__n); \ - if (_n&(sizeof(long)-1)) _n += sizeof(long)-(_n&(sizeof(long)-1)); \ - atomicIncr(used_memory,__n); \ -} while(0) - -#define update_zmalloc_stat_free(__n) do { \ - size_t _n = (__n); \ - if (_n&(sizeof(long)-1)) _n += sizeof(long)-(_n&(sizeof(long)-1)); \ - atomicDecr(used_memory,__n); \ -} while(0) +#define update_zmalloc_stat_alloc(__n) atomicIncr(used_memory,(__n)) +#define update_zmalloc_stat_free(__n) atomicDecr(used_memory,(__n)) static size_t used_memory = 0; pthread_mutex_t used_memory_mutex = PTHREAD_MUTEX_INITIALIZER; From b238eca1ce17483c45e0c98e7aafab43014ebdf0 Mon Sep 17 00:00:00 2001 From: Hanif Bin Ariffin Date: Mon, 5 Oct 2020 21:57:42 +0800 Subject: [PATCH 174/377] Fixed non-empty check in src/Makefile Per [GNU Make Manual](https://www.gnu.org/software/make/manual/html_node/Conditional-Syntax.html). To properly check for non-empty variable, one must strip whitespaces. Signed-off-by: Hanif Bin Ariffin Former-commit-id: 05e0f323456b8e667d10dabfa804757a2fc81b04 --- src/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Makefile b/src/Makefile index c0aa26737..9db3e6934 100644 --- a/src/Makefile +++ b/src/Makefile @@ -47,7 +47,7 @@ endif USEASM?=true -ifneq ($(SANITIZE),) +ifneq ($(strip $(SANITIZE)),) CFLAGS+= -fsanitize=$(SANITIZE) -DSANITIZE CXXFLAGS+= -fsanitize=$(SANITIZE) -DSANITIZE LDFLAGS+= -fsanitize=$(SANITIZE) From 27e17220c201c41b79b404c9f358d00f0e97ebec Mon Sep 17 00:00:00 2001 From: John Sully Date: Fri, 9 Oct 2020 21:02:09 +0000 Subject: [PATCH 175/377] Remove dead code Former-commit-id: c6f13892e04607700e27ec963fa1da695e784c11 --- src/server.cpp | 31 ------------------------------- 1 file changed, 31 deletions(-) diff --git a/src/server.cpp b/src/server.cpp index 02b68f3da..baf53f8fe 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -2302,37 +2302,6 @@ void beforeSleep(struct aeEventLoop *eventLoop) { if (moduleCount()) moduleReleaseGIL(TRUE /*fServerThread*/); } -void beforeSleepLite(struct aeEventLoop *eventLoop) -{ - int iel = ielFromEventLoop(eventLoop); - - /* Try to process pending commands for clients that were just unblocked. */ - aeAcquireLock(); - processClients(); - if (listLength(g_pserver->rgthreadvar[iel].unblocked_clients)) { - processUnblockedClients(iel); - } - - /* Check if there are clients unblocked by modules that implement - * blocking commands. */ - if (moduleCount()) moduleHandleBlockedClients(ielFromEventLoop(eventLoop)); - int aof_state = g_pserver->aof_state; - aeReleaseLock(); - - /* Handle writes with pending output buffers. */ - handleClientsWithPendingWrites(iel, aof_state); - - aeAcquireLock(); - /* Close clients that need to be closed asynchronous */ - freeClientsInAsyncFreeQueue(iel); - aeReleaseLock(); - - /* Before we are going to sleep, let the threads access the dataset by - * releasing the GIL. Redis main thread will not touch anything at this - * time. */ - if (moduleCount()) moduleReleaseGIL(TRUE /*fServerThread*/); -} - /* This function is called immadiately after the event loop multiplexing * API returned, and the control is going to soon return to Redis by invoking * the different events callbacks. */ From 6de6515bfcc1fa30b01d95f81965c12ca131d303 Mon Sep 17 00:00:00 2001 From: John Sully Date: Mon, 12 Oct 2020 04:28:39 +0000 Subject: [PATCH 176/377] Perf: remove unnecessary vector operations Former-commit-id: 1b46d4f09ab73e08a1e77fd7f73d18e98dbdce7c --- src/networking.cpp | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/networking.cpp b/src/networking.cpp index 719cfcc1b..f117839b2 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -1920,8 +1920,7 @@ void ProcessPendingAsyncWrites() * get it called, and so forth. */ int handleClientsWithPendingWrites(int iel, int aof_state) { std::unique_lock lockf(g_pserver->rgthreadvar[iel].lockPendingWrite); - auto &vec = g_pserver->rgthreadvar[iel].clients_pending_write; - int processed = (int)vec.size(); + int processed = 0; serverAssert(iel == (serverTL - g_pserver->rgthreadvar)); int ae_flags = AE_WRITABLE|AE_WRITE_THREADSAFE; @@ -1936,16 +1935,17 @@ int handleClientsWithPendingWrites(int iel, int aof_state) { ae_flags |= AE_BARRIER; } - while(!vec.empty()) { - client *c = vec.back(); + auto vec = std::move(g_pserver->rgthreadvar[iel].clients_pending_write); + processed += (int)vec.size(); + + for (client *c : vec) { AssertCorrectThread(c); - c->flags &= ~CLIENT_PENDING_WRITE; - vec.pop_back(); + uint64_t flags = c->flags.fetch_and(~CLIENT_PENDING_WRITE, std::memory_order_relaxed); /* If a client is protected, don't do anything, - * that may trigger write error or recreate handler. */ - if (c->flags & CLIENT_PROTECTED) continue; + * that may trigger write error or recreate handler. */ + if (flags & CLIENT_PROTECTED) continue; std::unique_locklock)> lock(c->lock); @@ -1964,7 +1964,7 @@ int handleClientsWithPendingWrites(int iel, int aof_state) { } /* If after the synchronous writes above we still have data to - * output to the client, we need to install the writable handler. */ + * output to the client, we need to install the writable handler. */ if (clientHasPendingReplies(c)) { if (connSetWriteHandlerWithBarrier(c->conn, sendReplyToClient, ae_flags, true) == C_ERR) freeClientAsync(c); From 6496ea5145cbf88d8a17c41f2523d42246f2606c Mon Sep 17 00:00:00 2001 From: John Sully Date: Mon, 12 Oct 2020 05:50:58 +0000 Subject: [PATCH 177/377] Avoid excess locking, seeing up to 8% performance improvements Former-commit-id: 69a74a567bd381a84e71f954d4cb35eb878f6d3c --- src/server.cpp | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/src/server.cpp b/src/server.cpp index baf53f8fe..7ad913809 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -2223,15 +2223,19 @@ void processClients(); void beforeSleep(struct aeEventLoop *eventLoop) { UNUSED(eventLoop); int iel = ielFromEventLoop(eventLoop); + + aeAcquireLock(); processClients(); /* Handle precise timeouts of blocked clients. */ handleBlockedClientsTimeout(); /* Handle TLS pending data. (must be done before flushAppendOnlyFile) */ - aeReleaseLock(); - tlsProcessPendingData(); - aeAcquireLock(); + if (tlsHasPendingData()) { + aeReleaseLock(); + tlsProcessPendingData(); + aeAcquireLock(); + } /* If tls still has pending unread data don't sleep at all. */ aeSetDontWait(eventLoop, tlsHasPendingData()); @@ -2289,9 +2293,17 @@ void beforeSleep(struct aeEventLoop *eventLoop) { /* Handle writes with pending output buffers. */ int aof_state = g_pserver->aof_state; - aeReleaseLock(); - handleClientsWithPendingWrites(iel, aof_state); - aeAcquireLock(); + + /* We try to handle writes at the end so we don't have to reacquire the lock, + but if there is a pending async close we need to ensure the writes happen + first so perform it here */ + bool fSentReplies = false; + if (listLength(g_pserver->clients_to_close)) { + aeReleaseLock(); + handleClientsWithPendingWrites(iel, aof_state); + aeAcquireLock(); + fSentReplies = true; + } /* Close clients that need to be closed asynchronous */ freeClientsInAsyncFreeQueue(iel); @@ -2299,6 +2311,9 @@ void beforeSleep(struct aeEventLoop *eventLoop) { /* Before we are going to sleep, let the threads access the dataset by * releasing the GIL. Redis main thread will not touch anything at this * time. */ + aeReleaseLock(); + if (!fSentReplies) + handleClientsWithPendingWrites(iel, aof_state); if (moduleCount()) moduleReleaseGIL(TRUE /*fServerThread*/); } @@ -3018,7 +3033,7 @@ static void initServerThread(struct redisServerThreadVars *pvar, int fMain) pvar->tlsfd_count = 0; pvar->cclients = 0; pvar->el = aeCreateEventLoop(g_pserver->maxclients+CONFIG_FDSET_INCR); - aeSetBeforeSleepProc(pvar->el, beforeSleep, 0); + aeSetBeforeSleepProc(pvar->el, beforeSleep, AE_SLEEP_THREADSAFE); aeSetAfterSleepProc(pvar->el, afterSleep, AE_SLEEP_THREADSAFE); pvar->current_client = nullptr; pvar->clients_paused = 0; @@ -3519,7 +3534,7 @@ void call(client *c, int flags) { /* Send the command to clients in MONITOR mode if applicable. * Administrative commands are considered too dangerous to be shown. */ if (listLength(g_pserver->monitors) && - !g_pserver->loading && + !g_pserver->loading.load(std::memory_order_relaxed) && !(c->cmd->flags & (CMD_SKIP_MONITOR|CMD_ADMIN))) { replicationFeedMonitors(c,g_pserver->monitors,c->db->id,c->argv,c->argc); From 8c33983e57bbc1c2e72a41842da625e70b9cc7e5 Mon Sep 17 00:00:00 2001 From: John Sully Date: Mon, 12 Oct 2020 15:27:03 +0000 Subject: [PATCH 178/377] Significantly improve perf in replication scenarios Former-commit-id: ae8a94d6158cada41b7497d55fe12f5d776f0c75 --- src/server.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/server.cpp b/src/server.cpp index 7ad913809..32610a974 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -3669,8 +3669,6 @@ void call(client *c, int flags) { } redisOpArrayFree(&g_pserver->also_propagate); } - - ProcessPendingAsyncWrites(); g_pserver->also_propagate = prev_also_propagate; From f275456439250ac45bb50b4aa6b211075f4c873a Mon Sep 17 00:00:00 2001 From: John Sully Date: Tue, 13 Oct 2020 06:35:16 +0000 Subject: [PATCH 179/377] Relax memory order where possible Former-commit-id: 3e996035ea1d5a40d02f84e916837a1d350b844b --- src/networking.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/networking.cpp b/src/networking.cpp index f117839b2..14b6a4428 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -293,7 +293,7 @@ int prepareClientToWrite(client *c, bool fAsync) { * -------------------------------------------------------------------------- */ int _addReplyToBuffer(client *c, const char *s, size_t len, bool fAsync) { - if (c->flags & CLIENT_CLOSE_AFTER_REPLY) return C_OK; + if (c->flags.load(std::memory_order_relaxed) & CLIENT_CLOSE_AFTER_REPLY) return C_OK; fAsync = fAsync && !FCorrectThread(c); // Not async if we're on the right thread if (fAsync) @@ -327,7 +327,7 @@ int _addReplyToBuffer(client *c, const char *s, size_t len, bool fAsync) { } void _addReplyProtoToList(client *c, const char *s, size_t len) { - if (c->flags & CLIENT_CLOSE_AFTER_REPLY) return; + if (c->flags.load(std::memory_order_relaxed) & CLIENT_CLOSE_AFTER_REPLY) return; AssertCorrectThread(c); listNode *ln = listLast(c->reply); From 654379bd0cf4dcd83e85787c87be7fc1641d624f Mon Sep 17 00:00:00 2001 From: John Sully Date: Thu, 15 Oct 2020 20:15:35 +0000 Subject: [PATCH 180/377] Fix branding Former-commit-id: fb32bb12b1b070d230f89845b0415b1e7d8bc669 --- src/config.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config.cpp b/src/config.cpp index d58bbbade..839a8b91e 100644 --- a/src/config.cpp +++ b/src/config.cpp @@ -641,7 +641,7 @@ void loadServerConfigFromString(char *config) { return; loaderr: - fprintf(stderr, "\n*** FATAL CONFIG FILE ERROR (Redis %s) ***\n", + fprintf(stderr, "\n*** FATAL CONFIG FILE ERROR (KeyDB %s) ***\n", KEYDB_REAL_VERSION); fprintf(stderr, "Reading the configuration file, at line %d\n", linenum); fprintf(stderr, ">>> '%s'\n", lines[i]); From 85492ccee87ab83eeb8ab09235938c6154dd80e2 Mon Sep 17 00:00:00 2001 From: John Sully Date: Thu, 15 Oct 2020 23:10:17 +0000 Subject: [PATCH 181/377] Reduce async write copies Former-commit-id: ed369d722335ed4105748ef2bed5d64f3f32c433 --- src/networking.cpp | 87 ++++++++++++++++++++++++++-------------------- src/server.h | 5 ++- 2 files changed, 51 insertions(+), 41 deletions(-) diff --git a/src/networking.cpp b/src/networking.cpp index 14b6a4428..94f1c21c9 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -172,9 +172,7 @@ client *createClient(connection *conn, int iel) { c->pubsub_patterns = listCreate(); c->peerid = NULL; c->client_list_node = NULL; - c->bufAsync = NULL; - c->buflenAsync = 0; - c->bufposAsync = 0; + c->replyAsync = NULL; c->client_tracking_redirection = 0; c->casyncOpsPending = 0; c->master_error = 0; @@ -299,15 +297,27 @@ int _addReplyToBuffer(client *c, const char *s, size_t len, bool fAsync) { if (fAsync) { serverAssert(GlobalLocksAcquired()); - if ((c->buflenAsync - c->bufposAsync) < (int)len) + if (c->replyAsync == nullptr || (c->replyAsync->size - c->replyAsync->used) < len) { - int minsize = len + c->bufposAsync; - c->buflenAsync = std::max(minsize, c->buflenAsync*2 - c->buflenAsync); - c->bufAsync = (char*)zrealloc(c->bufAsync, c->buflenAsync, MALLOC_LOCAL); - c->buflenAsync = zmalloc_usable(c->bufAsync); + if (c->replyAsync == nullptr) { + size_t newsize = std::max(len, (size_t)PROTO_ASYNC_REPLY_CHUNK_BYTES); + + clientReplyBlock *replyNew = (clientReplyBlock*)zmalloc(sizeof(clientReplyBlock) + newsize); + replyNew->size = zmalloc_usable(replyNew) - sizeof(clientReplyBlock); + replyNew->used = 0; + c->replyAsync = replyNew; + } else { + size_t newsize = std::max(c->replyAsync->used + len, c->replyAsync->size*2); + clientReplyBlock *replyNew = (clientReplyBlock*)zmalloc(sizeof(clientReplyBlock) + newsize); + replyNew->size = zmalloc_usable(replyNew) - sizeof(clientReplyBlock); + replyNew->used = c->replyAsync->used; + memcpy(replyNew->buf(), c->replyAsync->buf(), c->replyAsync->used); + zfree(c->replyAsync); + c->replyAsync = replyNew; + } } - memcpy(c->bufAsync+c->bufposAsync,s,len); - c->bufposAsync += len; + memcpy(c->replyAsync->buf() + c->replyAsync->used,s,len); + c->replyAsync->used += len; } else { @@ -633,7 +643,7 @@ void *addReplyDeferredLenAsync(client *c) { if (FCorrectThread(c)) return addReplyDeferredLen(c); - return (void*)((ssize_t)c->bufposAsync); + return (void*)((ssize_t)(c->replyAsync ? c->replyAsync->used : 0)); } /* Populate the length object and try gluing it to the next chunk. */ @@ -689,17 +699,22 @@ void setDeferredAggregateLenAsync(client *c, void *node, long length, char prefi char lenstr[128]; int lenstr_len = sprintf(lenstr, "%c%ld\r\n", prefix, length); - ssize_t idxSplice = (ssize_t)node; - serverAssert(idxSplice <= c->bufposAsync); - if (c->buflenAsync < (c->bufposAsync + lenstr_len)) + size_t idxSplice = (size_t)node; + serverAssert(idxSplice <= c->replyAsync->used); + if (c->replyAsync->size < (c->replyAsync->used + lenstr_len)) { - c->buflenAsync = std::max((int)(c->bufposAsync+lenstr_len), c->buflenAsync*2 - c->buflenAsync); - c->bufAsync = (char*)zrealloc(c->bufAsync, c->buflenAsync, MALLOC_LOCAL); + int newsize = std::max(c->replyAsync->used + lenstr_len, c->replyAsync->size*2); + clientReplyBlock *replyNew = (clientReplyBlock*)zmalloc(sizeof(clientReplyBlock) + newsize); + replyNew->size = zmalloc_usable(replyNew) - sizeof(clientReplyBlock); + replyNew->used = c->replyAsync->used; + memcpy(replyNew->buf(), c->replyAsync->buf(), c->replyAsync->used); + zfree(c->replyAsync); + c->replyAsync = replyNew; } - memmove(c->bufAsync + idxSplice + lenstr_len, c->bufAsync + idxSplice, c->bufposAsync - idxSplice); - memcpy(c->bufAsync + idxSplice, lenstr, lenstr_len); - c->bufposAsync += lenstr_len; + memmove(c->replyAsync->buf() + idxSplice + lenstr_len, c->replyAsync->buf() + idxSplice, c->replyAsync->used - idxSplice); + memcpy(c->replyAsync->buf() + idxSplice, lenstr, lenstr_len); + c->replyAsync->used += lenstr_len; } void setDeferredArrayLen(client *c, void *node, long length) { @@ -1640,7 +1655,7 @@ bool freeClient(client *c) { /* Release other dynamically allocated client structure fields, * and finally release the client structure itself. */ - zfree(c->bufAsync); + zfree(c->replyAsync); if (c->name) decrRefCount(c->name); zfree(c->argv); freeClientMultiState(c); @@ -1846,29 +1861,25 @@ void ProcessPendingAsyncWrites() serverAssert(c->fPendingAsyncWrite); if (c->flags & (CLIENT_CLOSE_ASAP | CLIENT_CLOSE_AFTER_REPLY)) { - c->bufposAsync = 0; - c->buflenAsync = 0; - zfree(c->bufAsync); - c->bufAsync = nullptr; + zfree(c->replyAsync); + c->replyAsync = nullptr; c->fPendingAsyncWrite = FALSE; continue; } - // TODO: Append to end of reply block? + int size = c->replyAsync->used; - size_t size = c->bufposAsync; - clientReplyBlock *reply = (clientReplyBlock*)zmalloc(size + sizeof(clientReplyBlock), MALLOC_LOCAL); - /* take over the allocation's internal fragmentation */ - reply->size = zmalloc_usable(reply) - sizeof(clientReplyBlock); - reply->used = c->bufposAsync; - memcpy(reply->buf(), c->bufAsync, c->bufposAsync); - listAddNodeTail(c->reply, reply); - c->reply_bytes += reply->size; + if (listLength(c->reply) == 0 && size <= (PROTO_REPLY_CHUNK_BYTES - c->bufpos)) { + memcpy(c->buf + c->bufpos, c->replyAsync->buf(), size); + c->bufpos += size; + } else { + c->reply_bytes += c->replyAsync->size; + listAddNodeTail(c->reply, c->replyAsync); + c->replyAsync = nullptr; + } - c->bufposAsync = 0; - c->buflenAsync = 0; - zfree(c->bufAsync); - c->bufAsync = nullptr; + zfree(c->replyAsync); + c->replyAsync = nullptr; c->fPendingAsyncWrite = FALSE; // Now install the write event handler @@ -3241,7 +3252,7 @@ void rewriteClientCommandArgument(client *c, int i, robj *newval) { * enforcing the client output length limits. */ unsigned long getClientOutputBufferMemoryUsage(client *c) { unsigned long list_item_size = sizeof(listNode) + sizeof(clientReplyBlock); - return c->reply_bytes + (list_item_size*listLength(c->reply)) + c->buflenAsync; + return c->reply_bytes + (list_item_size*listLength(c->reply)) + (c->replyAsync ? c->replyAsync->size : 0); } /* Get the class of a client, used in order to enforce limits to different diff --git a/src/server.h b/src/server.h index 3705b199a..3e0c48f77 100644 --- a/src/server.h +++ b/src/server.h @@ -327,6 +327,7 @@ inline bool operator!=(const void *p, const robj_sharedptr &rhs) #define PROTO_MAX_QUERYBUF_LEN (1024*1024*1024) /* 1GB max query buffer. */ #define PROTO_IOBUF_LEN (1024*16) /* Generic I/O buffer size */ #define PROTO_REPLY_CHUNK_BYTES (16*1024) /* 16k output buffer */ +#define PROTO_ASYNC_REPLY_CHUNK_BYTES (1024) #define PROTO_INLINE_MAX_SIZE (1024*64) /* Max size of inline reads */ #define PROTO_MBULK_BIG_ARG (1024*32) #define LONG_STR_SIZE 21 /* Bytes needed for long -> str + '\0' */ @@ -1145,9 +1146,7 @@ typedef struct client { char buf[PROTO_REPLY_CHUNK_BYTES]; /* Async Response Buffer - other threads write here */ - int bufposAsync; - int buflenAsync; - char *bufAsync; + clientReplyBlock *replyAsync; int iel; /* the event loop index we're registered with */ struct fastlock lock; From 9c65bd8f3dec77d4923bdc42cb31d1bee3c93437 Mon Sep 17 00:00:00 2001 From: John Sully Date: Fri, 16 Oct 2020 06:19:52 +0000 Subject: [PATCH 182/377] Avoid locking if we won't run a time event Former-commit-id: 33b05c859afd6665feae43c47d19f7a0a764c36b --- src/ae.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/ae.cpp b/src/ae.cpp index 789a6888b..cb500d7b6 100644 --- a/src/ae.cpp +++ b/src/ae.cpp @@ -589,7 +589,7 @@ static aeTimeEvent *aeSearchNearestTimer(aeEventLoop *eventLoop) /* Process time events */ static int processTimeEvents(aeEventLoop *eventLoop) { - std::unique_lock ulock(g_lock); + std::unique_lock ulock(g_lock, std::defer_lock); int processed = 0; aeTimeEvent *te; long long maxId; @@ -634,8 +634,10 @@ static int processTimeEvents(aeEventLoop *eventLoop) { eventLoop->timeEventHead = te->next; if (te->next) te->next->prev = te->prev; - if (te->finalizerProc) + if (te->finalizerProc) { + if (!ulock.owns_lock()) ulock.lock(); te->finalizerProc(eventLoop, te->clientData); + } zfree(te); te = next; continue; @@ -654,6 +656,7 @@ static int processTimeEvents(aeEventLoop *eventLoop) { if (now_sec > te->when_sec || (now_sec == te->when_sec && now_ms >= te->when_ms)) { + if (!ulock.owns_lock()) ulock.lock(); int retval; id = te->id; From 91388bd42d5d1a0c71bb97c3ffea3393ab9d3ee9 Mon Sep 17 00:00:00 2001 From: John Sully Date: Fri, 16 Oct 2020 06:47:40 +0000 Subject: [PATCH 183/377] Fix replica buffer overflows Former-commit-id: 738c782f02517744662991091beb3f724661317e --- src/networking.cpp | 9 +++++---- src/server.cpp | 4 ++-- src/server.h | 4 ++-- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/networking.cpp b/src/networking.cpp index 94f1c21c9..f5698282b 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -1911,12 +1911,13 @@ void ProcessPendingAsyncWrites() } else { - if (!c->fPendingAsyncWriteHandler) { - c->fPendingAsyncWriteHandler = true; + bool expected = false; + if (c->fPendingAsyncWriteHandler.compare_exchange_strong(expected, true)) { bool fResult = c->postFunction([](client *c) { c->fPendingAsyncWriteHandler = false; - connSetWriteHandler(c->conn, sendReplyToClient, true); - }); + clientInstallWriteHandler(c); + handleClientsWithPendingWrites(c->iel, g_pserver->aof_state); + }, false); if (!fResult) c->fPendingAsyncWriteHandler = false; // if we failed to set the handler then prevent this from never being reset diff --git a/src/server.cpp b/src/server.cpp index 32610a974..3cf0b0189 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -3992,13 +3992,13 @@ int processCommand(client *c, int callFlags) { return C_OK; } -bool client::postFunction(std::function fn) { +bool client::postFunction(std::function fn, bool fLock) { this->casyncOpsPending++; return aePostFunction(g_pserver->rgthreadvar[this->iel].el, [this, fn]{ std::lock_guardlock)> lock(this->lock); --casyncOpsPending; fn(this); - }) == AE_OK; + }, false, fLock) == AE_OK; } /*================================== Shutdown =============================== */ diff --git a/src/server.h b/src/server.h index 3e0c48f77..198633f90 100644 --- a/src/server.h +++ b/src/server.h @@ -1083,7 +1083,7 @@ typedef struct client { std::atomic flags; /* Client flags: CLIENT_* macros. */ int casyncOpsPending; int fPendingAsyncWrite; /* NOTE: Not a flag because it is written to outside of the client lock (locked by the global lock instead) */ - int fPendingAsyncWriteHandler; + std::atomic fPendingAsyncWriteHandler; int authenticated; /* Needed when the default user requires auth. */ int replstate; /* Replication state if this is a replica. */ int repl_put_online_on_ack; /* Install replica write handler on ACK. */ @@ -1153,7 +1153,7 @@ typedef struct client { int master_error; // post a function from a non-client thread to run on its client thread - bool postFunction(std::function fn); + bool postFunction(std::function fn, bool fLock = true); } client; struct saveparam { From 4c4a8da208f1eece4664b8d982b7d8fb01e4f882 Mon Sep 17 00:00:00 2001 From: John Sully Date: Fri, 16 Oct 2020 07:00:03 +0000 Subject: [PATCH 184/377] run when necessary Former-commit-id: d724e7226c690fdce0e6ee6cbf8afdb9481f51c5 --- src/networking.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/networking.cpp b/src/networking.cpp index f5698282b..9c4266df4 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -1915,8 +1915,10 @@ void ProcessPendingAsyncWrites() if (c->fPendingAsyncWriteHandler.compare_exchange_strong(expected, true)) { bool fResult = c->postFunction([](client *c) { c->fPendingAsyncWriteHandler = false; - clientInstallWriteHandler(c); - handleClientsWithPendingWrites(c->iel, g_pserver->aof_state); + if (c->bufpos || listLength(c->reply) || (c->flags & CLIENT_PENDING_WRITE)) { + clientInstallWriteHandler(c); + handleClientsWithPendingWrites(c->iel, g_pserver->aof_state); + } }, false); if (!fResult) From 5fb0c880a87ad7fc3c95106da3459e122f518414 Mon Sep 17 00:00:00 2001 From: John Sully Date: Wed, 21 Oct 2020 21:16:08 +0000 Subject: [PATCH 185/377] Remove leak warning with ASAN in the CLI Former-commit-id: 05cd92f7661ba1a96cdcac278720b7da0b6ba60e --- src/redis-cli.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 5dfcd6fad..412760278 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -1878,14 +1878,15 @@ static void repl(void) { exit(0); } -static int noninteractive(int argc, char **argv) { +static int noninteractive(int argc, char ***argv) { int retval = 0; if (config.stdinarg) { - argv = zrealloc(argv, (argc+1)*sizeof(char*), MALLOC_LOCAL); - argv[argc] = readArgFromStdin(); - retval = issueCommand(argc+1, argv); + *argv = zrealloc(*argv, (argc+1)*sizeof(char*), MALLOC_LOCAL); + (*argv)[argc] = readArgFromStdin(); + retval = issueCommand(argc+1, *argv); + sdsfree((*argv)[argc]); } else { - retval = issueCommand(argc, argv); + retval = issueCommand(argc, *argv); } return retval; } @@ -7134,6 +7135,11 @@ int main(int argc, char **argv) { if (config.eval) { return evalMode(argc,argv); } else { - return noninteractive(argc,convertToSds(argc,argv)); + sds *sdsArgs = convertToSds(argc,argv); + int rval = noninteractive(argc,&sdsArgs); + for (int i = 0; i < argc; ++i) + sdsfree(sdsArgs[i]); + zfree(sdsArgs); + return rval; } } From 0691608ba029a3bf888806fe97b03c6b1ea0508a Mon Sep 17 00:00:00 2001 From: John Sully Date: Fri, 23 Oct 2020 15:57:39 +0000 Subject: [PATCH 186/377] Fix multithread test failures Former-commit-id: 7c39a9b0e193f5c0b327ff21cd49210037142642 --- src/networking.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/networking.cpp b/src/networking.cpp index 9c4266df4..f5698282b 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -1915,10 +1915,8 @@ void ProcessPendingAsyncWrites() if (c->fPendingAsyncWriteHandler.compare_exchange_strong(expected, true)) { bool fResult = c->postFunction([](client *c) { c->fPendingAsyncWriteHandler = false; - if (c->bufpos || listLength(c->reply) || (c->flags & CLIENT_PENDING_WRITE)) { - clientInstallWriteHandler(c); - handleClientsWithPendingWrites(c->iel, g_pserver->aof_state); - } + clientInstallWriteHandler(c); + handleClientsWithPendingWrites(c->iel, g_pserver->aof_state); }, false); if (!fResult) From 7a9fbad132e755fbf776975d8570784488e87e8e Mon Sep 17 00:00:00 2001 From: John Sully Date: Fri, 23 Oct 2020 20:21:49 +0000 Subject: [PATCH 187/377] Allow the locker to be used even without a client Former-commit-id: 041446005b03121adf7ac061bd0fd2ec70d9418e --- src/aelocker.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aelocker.h b/src/aelocker.h index e854f907b..777be4832 100644 --- a/src/aelocker.h +++ b/src/aelocker.h @@ -9,7 +9,7 @@ public: { } - void arm(client *c, bool fIfNeeded = false) // if a client is passed, then the client is already locked + void arm(client *c = nullptr, bool fIfNeeded = false) // if a client is passed, then the client is already locked { if (m_fArmed) return; From 19a4d2d358e941c9d2de7dba59304c2f5bfa28af Mon Sep 17 00:00:00 2001 From: John Sully Date: Sat, 24 Oct 2020 02:18:03 +0000 Subject: [PATCH 188/377] Remove addReply*Async methods since we already know if its async or not. This is just a source of bugs Former-commit-id: df22cdf6e91a1b9c390b69c4209c719ecf1e44f1 --- src/blocked.cpp | 16 +- src/module.cpp | 36 ++--- src/networking.cpp | 348 ++++++++++++-------------------------------- src/object.cpp | 2 +- src/pubsub.cpp | 22 +-- src/replication.cpp | 30 ++-- src/server.h | 25 +--- src/t_list.cpp | 8 +- src/t_stream.cpp | 14 +- src/t_zset.cpp | 10 +- src/timeout.cpp | 2 +- src/tracking.cpp | 16 +- 12 files changed, 172 insertions(+), 357 deletions(-) diff --git a/src/blocked.cpp b/src/blocked.cpp index 7f96fcfec..63cce0996 100644 --- a/src/blocked.cpp +++ b/src/blocked.cpp @@ -188,9 +188,9 @@ void replyToBlockedClientTimedOut(client *c) { if (c->btype == BLOCKED_LIST || c->btype == BLOCKED_ZSET || c->btype == BLOCKED_STREAM) { - addReplyNullArrayAsync(c); + addReplyNullArray(c); } else if (c->btype == BLOCKED_WAIT) { - addReplyLongLongAsync(c,replicationCountAcksByOffset(c->bpop.reploffset)); + addReplyLongLong(c,replicationCountAcksByOffset(c->bpop.reploffset)); } else if (c->btype == BLOCKED_MODULE) { moduleBlockedClientTimedOut(c); } else { @@ -216,7 +216,7 @@ void disconnectAllBlockedClients(void) { fastlock_lock(&c->lock); if (c->flags & CLIENT_BLOCKED) { - addReplySdsAsync(c,sdsnew( + addReplySds(c,sdsnew( "-UNBLOCKED force unblock from blocking operation, " "instance state changed (master -> replica?)\r\n")); unblockClient(c); @@ -373,7 +373,7 @@ void serveClientsBlockedOnStreamKey(robj *o, readyList *rl) { /* If the group was not found, send an error * to the consumer. */ if (!group) { - addReplyErrorAsync(receiver, + addReplyError(receiver, "-NOGROUP the consumer group this client " "was blocked on no longer exists"); unblockClient(receiver); @@ -404,12 +404,12 @@ void serveClientsBlockedOnStreamKey(robj *o, readyList *rl) { * extracted from it. Wrapped in a single-item * array, since we have just one key. */ if (receiver->resp == 2) { - addReplyArrayLenAsync(receiver,1); - addReplyArrayLenAsync(receiver,2); + addReplyArrayLen(receiver,1); + addReplyArrayLen(receiver,2); } else { - addReplyMapLenAsync(receiver,1); + addReplyMapLen(receiver,1); } - addReplyBulkAsync(receiver,rl->key); + addReplyBulk(receiver,rl->key); streamPropInfo pi = { rl->key, diff --git a/src/module.cpp b/src/module.cpp index 73e176e7a..028ffd4cc 100644 --- a/src/module.cpp +++ b/src/module.cpp @@ -1358,7 +1358,7 @@ int RM_ReplyWithLongLong(RedisModuleCtx *ctx, long long ll) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyLongLongAsync(c,ll); + addReplyLongLong(c,ll); return REDISMODULE_OK; } @@ -1371,9 +1371,9 @@ int replyWithStatus(RedisModuleCtx *ctx, const char *msg, const char *prefix) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyProtoAsync(c,prefix,strlen(prefix)); - addReplyProtoAsync(c,msg,strlen(msg)); - addReplyProtoAsync(c,"\r\n",2); + addReplyProto(c,prefix,strlen(prefix)); + addReplyProto(c,msg,strlen(msg)); + addReplyProto(c,"\r\n",2); return REDISMODULE_OK; } @@ -1426,10 +1426,10 @@ int RM_ReplyWithArray(RedisModuleCtx *ctx, long len) { ctx->postponed_arrays = (void**)zrealloc(ctx->postponed_arrays,sizeof(void*)* (ctx->postponed_arrays_count+1), MALLOC_LOCAL); ctx->postponed_arrays[ctx->postponed_arrays_count] = - addReplyDeferredLenAsync(c); + addReplyDeferredLen(c); ctx->postponed_arrays_count++; } else { - addReplyArrayLenAsync(c,len); + addReplyArrayLen(c,len); } return REDISMODULE_OK; } @@ -1444,7 +1444,7 @@ int RM_ReplyWithNullArray(RedisModuleCtx *ctx) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyNullArrayAsync(c); + addReplyNullArray(c); return REDISMODULE_OK; } @@ -1457,7 +1457,7 @@ int RM_ReplyWithEmptyArray(RedisModuleCtx *ctx) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyAsync(c,shared.emptyarray); + addReply(c,shared.emptyarray); return REDISMODULE_OK; } @@ -1502,7 +1502,7 @@ void RM_ReplySetArrayLength(RedisModuleCtx *ctx, long len) { return; } ctx->postponed_arrays_count--; - setDeferredArrayLenAsync(c, + setDeferredArrayLen(c, ctx->postponed_arrays[ctx->postponed_arrays_count], len); if (ctx->postponed_arrays_count == 0) { @@ -1520,7 +1520,7 @@ int RM_ReplyWithStringBuffer(RedisModuleCtx *ctx, const char *buf, size_t len) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyBulkCBufferAsync(c,(char*)buf,len); + addReplyBulkCBuffer(c,(char*)buf,len); return REDISMODULE_OK; } @@ -1534,7 +1534,7 @@ int RM_ReplyWithCString(RedisModuleCtx *ctx, const char *buf) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyBulkCStringAsync(c,(char*)buf); + addReplyBulkCString(c,(char*)buf); return REDISMODULE_OK; } @@ -1547,7 +1547,7 @@ int RM_ReplyWithString(RedisModuleCtx *ctx, RedisModuleString *str) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyBulkAsync(c,str); + addReplyBulk(c,str); return REDISMODULE_OK; } @@ -1560,7 +1560,7 @@ int RM_ReplyWithEmptyString(RedisModuleCtx *ctx) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyAsync(c,shared.emptybulk); + addReply(c,shared.emptybulk); return REDISMODULE_OK; } @@ -1574,7 +1574,7 @@ int RM_ReplyWithVerbatimString(RedisModuleCtx *ctx, const char *buf, size_t len) AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyVerbatimAsync(c, buf, len, "txt"); + addReplyVerbatim(c, buf, len, "txt"); return REDISMODULE_OK; } @@ -1587,7 +1587,7 @@ int RM_ReplyWithNull(RedisModuleCtx *ctx) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyNullAsync(c); + addReplyNull(c); return REDISMODULE_OK; } @@ -1604,7 +1604,7 @@ int RM_ReplyWithCallReply(RedisModuleCtx *ctx, RedisModuleCallReply *reply) { std::unique_lock lock(c->lock); locker.arm(c); sds proto = sdsnewlen(reply->proto, reply->protolen); - addReplySdsAsync(c,proto); + addReplySds(c,proto); return REDISMODULE_OK; } @@ -1620,7 +1620,7 @@ int RM_ReplyWithDouble(RedisModuleCtx *ctx, double d) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyDoubleAsync(c,d); + addReplyDouble(c,d); return REDISMODULE_OK; } @@ -1638,7 +1638,7 @@ int RM_ReplyWithLongDouble(RedisModuleCtx *ctx, long double ld) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyHumanLongDoubleAsync(c, ld); + addReplyHumanLongDouble(c, ld); return REDISMODULE_OK; } diff --git a/src/networking.cpp b/src/networking.cpp index f5698282b..1c3e528c5 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -40,8 +40,6 @@ #include "aelocker.h" static void setProtocolError(const char *errstr, client *c); -void addReplyLongLongWithPrefixCore(client *c, long long ll, char prefix, bool fAsync); -void addReplyBulkCStringCore(client *c, const char *s, bool fAsync); /* Return the size consumed from the allocator, for the specified SDS string, * including internal fragmentation. This function is used in order to compute @@ -251,10 +249,10 @@ void clientInstallAsyncWriteHandler(client *c) { * Typically gets called every time a reply is built, before adding more * data to the clients output buffers. If the function returns C_ERR no * data should be appended to the output buffers. */ -int prepareClientToWrite(client *c, bool fAsync) { - fAsync = fAsync && !FCorrectThread(c); // Not async if we're on the right thread - serverAssert(FCorrectThread(c) || fAsync); - if (FCorrectThread(c)) { +int prepareClientToWrite(client *c) { + bool fAsync = !FCorrectThread(c); // Not async if we're on the right thread + + if (!fAsync) { serverAssert(c->conn == nullptr || c->lock.fOwnLock()); } else { serverAssert(GlobalLocksAcquired()); @@ -290,10 +288,10 @@ int prepareClientToWrite(client *c, bool fAsync) { * Low level functions to add more data to output buffers. * -------------------------------------------------------------------------- */ -int _addReplyToBuffer(client *c, const char *s, size_t len, bool fAsync) { +int _addReplyToBuffer(client *c, const char *s, size_t len) { if (c->flags.load(std::memory_order_relaxed) & CLIENT_CLOSE_AFTER_REPLY) return C_OK; - fAsync = fAsync && !FCorrectThread(c); // Not async if we're on the right thread + bool fAsync = !FCorrectThread(c); if (fAsync) { serverAssert(GlobalLocksAcquired()); @@ -377,11 +375,12 @@ void _addReplyProtoToList(client *c, const char *s, size_t len) { * Higher level functions to queue data on the client output buffer. * The following functions are the ones that commands implementations will call. * -------------------------------------------------------------------------- */ -void addReplyCore(client *c, robj_roptr obj, bool fAsync) { - if (prepareClientToWrite(c, fAsync) != C_OK) return; +/* Add the object 'obj' string representation to the client output buffer. */ +void addReply(client *c, robj_roptr obj) { + if (prepareClientToWrite(c) != C_OK) return; if (sdsEncodedObject(obj)) { - if (_addReplyToBuffer(c,(const char*)ptrFromObj(obj),sdslen((sds)ptrFromObj(obj)),fAsync) != C_OK) + if (_addReplyToBuffer(c,(const char*)ptrFromObj(obj),sdslen((sds)ptrFromObj(obj))) != C_OK) _addReplyProtoToList(c,(const char*)ptrFromObj(obj),sdslen((sds)ptrFromObj(obj))); } else if (obj->encoding == OBJ_ENCODING_INT) { /* For integer encoded strings we just convert it into a string @@ -389,44 +388,26 @@ void addReplyCore(client *c, robj_roptr obj, bool fAsync) { * to the output buffer. */ char buf[32]; size_t len = ll2string(buf,sizeof(buf),(long)ptrFromObj(obj)); - if (_addReplyToBuffer(c,buf,len,fAsync) != C_OK) + if (_addReplyToBuffer(c,buf,len) != C_OK) _addReplyProtoToList(c,buf,len); } else { serverPanic("Wrong obj->encoding in addReply()"); } } -/* Add the object 'obj' string representation to the client output buffer. */ -void addReply(client *c, robj_roptr obj) -{ - addReplyCore(c, obj, false); -} -void addReplyAsync(client *c, robj_roptr obj) -{ - addReplyCore(c, obj, true); -} - /* Add the SDS 's' string to the client output buffer, as a side effect * the SDS string is freed. */ -void addReplySdsCore(client *c, sds s, bool fAsync) { - if (prepareClientToWrite(c, fAsync) != C_OK) { +void addReplySds(client *c, sds s) { + if (prepareClientToWrite(c) != C_OK) { /* The caller expects the sds to be free'd. */ sdsfree(s); return; } - if (_addReplyToBuffer(c,s,sdslen(s), fAsync) != C_OK) + if (_addReplyToBuffer(c,s,sdslen(s)) != C_OK) _addReplyProtoToList(c,s,sdslen(s)); sdsfree(s); } -void addReplySds(client *c, sds s) { - addReplySdsCore(c, s, false); -} - -void addReplySdsAsync(client *c, sds s) { - addReplySdsCore(c, s, true); -} - /* This low level function just adds whatever protocol you send it to the * client buffer, trying the static buffer initially, and using the string * of objects if not possible. @@ -435,18 +416,10 @@ void addReplySdsAsync(client *c, sds s) { * if not needed. The object will only be created by calling * _addReplyProtoToList() if we fail to extend the existing tail object * in the list of objects. */ -void addReplyProtoCore(client *c, const char *s, size_t len, bool fAsync) { - if (prepareClientToWrite(c, fAsync) != C_OK) return; - if (_addReplyToBuffer(c,s,len,fAsync) != C_OK) - _addReplyProtoToList(c,s,len); -} - void addReplyProto(client *c, const char *s, size_t len) { - addReplyProtoCore(c, s, len, false); -} - -void addReplyProtoAsync(client *c, const char *s, size_t len) { - addReplyProtoCore(c, s, len, true); + if (prepareClientToWrite(c) != C_OK) return; + if (_addReplyToBuffer(c,s,len) != C_OK) + _addReplyProtoToList(c,s,len); } std::string escapeString(sds str) @@ -486,12 +459,12 @@ std::string escapeString(sds str) * code provided is used, otherwise the string "-ERR " for the generic * error code is automatically added. * Note that 's' must NOT end with \r\n. */ -void addReplyErrorLengthCore(client *c, const char *s, size_t len, bool fAsync) { +void addReplyErrorLength(client *c, const char *s, size_t len) { /* If the string already starts with "-..." then the error code * is provided by the caller. Otherwise we use "-ERR". */ - if (!len || s[0] != '-') addReplyProtoCore(c,"-ERR ",5,fAsync); - addReplyProtoCore(c,s,len,fAsync); - addReplyProtoCore(c,"\r\n",2,fAsync); + if (!len || s[0] != '-') addReplyProto(c,"-ERR ",5); + addReplyProto(c,s,len); + addReplyProto(c,"\r\n",2); } /* Do some actions after an error reply was sent (Log if needed, updates stats, etc.) */ @@ -535,11 +508,6 @@ void afterErrorReply(client *c, const char *s, size_t len) { } } -void addReplyErrorLength(client *c, const char *s, size_t len) -{ - addReplyErrorLengthCore(c, s, len, false); -} - /* The 'err' object is expected to start with -ERRORCODE and end with \r\n. * Unlike addReplyErrorSds and others alike which rely on addReplyErrorLength. */ void addReplyErrorObject(client *c, robj *err) { @@ -547,13 +515,8 @@ void addReplyErrorObject(client *c, robj *err) { afterErrorReply(c, szFromObj(err), sdslen(szFromObj(err))-2); /* Ignore trailing \r\n */ } -/* See addReplyErrorLength for expectations from the input string. */ void addReplyError(client *c, const char *err) { - addReplyErrorLengthCore(c,err,strlen(err), false); -} - -void addReplyErrorAsync(client *c, const char *err) { - addReplyErrorLengthCore(c, err, strlen(err), true); + addReplyErrorLength(c, err, strlen(err)); afterErrorReply(c,err,strlen(err)); } @@ -629,19 +592,19 @@ void trimReplyUnusedTailSpace(client *c) { /* Adds an empty object to the reply list that will contain the multi bulk * length, which is not known when this function is called. */ -void *addReplyDeferredLen(client *c) { +void *addReplyDeferredLenCore(client *c) { /* Note that we install the write event here even if the object is not * ready to be sent, since we are sure that before returning to the * event loop setDeferredAggregateLen() will be called. */ - if (prepareClientToWrite(c, false) != C_OK) return NULL; + if (prepareClientToWrite(c) != C_OK) return NULL; trimReplyUnusedTailSpace(c); listAddNodeTail(c->reply,NULL); /* NULL is our placeholder. */ return listLast(c->reply); } -void *addReplyDeferredLenAsync(client *c) { +void *addReplyDeferredLen(client *c) { if (FCorrectThread(c)) - return addReplyDeferredLen(c); + return addReplyDeferredLenCore(c); return (void*)((ssize_t)(c->replyAsync ? c->replyAsync->used : 0)); } @@ -718,11 +681,10 @@ void setDeferredAggregateLenAsync(client *c, void *node, long length, char prefi } void setDeferredArrayLen(client *c, void *node, long length) { - setDeferredAggregateLen(c,node,length,'*'); -} - -void setDeferredArrayLenAsync(client *c, void *node, long length) { - setDeferredAggregateLenAsync(c, node, length, '*'); + if (FCorrectThread(c)) + setDeferredAggregateLen(c,node,length,'*'); + else + setDeferredAggregateLenAsync(c, node, length, '*'); } void setDeferredMapLen(client *c, void *node, long length) { @@ -748,15 +710,15 @@ void setDeferredPushLen(client *c, void *node, long length) { } /* Add a double as a bulk reply */ -void addReplyDoubleCore(client *c, double d, bool fAsync) { +void addReplyDouble(client *c, double d) { if (std::isinf(d)) { /* Libc in odd systems (Hi Solaris!) will format infinite in a * different way, so better to handle it in an explicit way. */ if (c->resp == 2) { - addReplyBulkCStringCore(c, d > 0 ? "inf" : "-inf", fAsync); + addReplyBulkCString(c, d > 0 ? "inf" : "-inf"); } else { - addReplyProtoCore(c, d > 0 ? ",inf\r\n" : ",-inf\r\n", - d > 0 ? 6 : 7, fAsync); + addReplyProto(c, d > 0 ? ",inf\r\n" : ",-inf\r\n", + d > 0 ? 6 : 7); } } else { char dbuf[MAX_LONG_DOUBLE_CHARS+3], @@ -765,52 +727,34 @@ void addReplyDoubleCore(client *c, double d, bool fAsync) { if (c->resp == 2) { dlen = snprintf(dbuf,sizeof(dbuf),"%.17g",d); slen = snprintf(sbuf,sizeof(sbuf),"$%d\r\n%s\r\n",dlen,dbuf); - addReplyProtoCore(c,sbuf,slen,fAsync); + addReplyProto(c,sbuf,slen); } else { dlen = snprintf(dbuf,sizeof(dbuf),",%.17g\r\n",d); - addReplyProtoCore(c,dbuf,dlen,fAsync); + addReplyProto(c,dbuf,dlen); } } } -void addReplyDouble(client *c, double d) { - addReplyDoubleCore(c, d, false); -} - -void addReplyDoubleAsync(client *c, double d) { - addReplyDoubleCore(c, d, true); -} - -void addReplyBulkCore(client *c, robj_roptr obj, bool fAsync); - /* Add a long double as a bulk reply, but uses a human readable formatting * of the double instead of exposing the crude behavior of doubles to the * dear user. */ -void addReplyHumanLongDoubleCore(client *c, long double d, bool fAsync) { +void addReplyHumanLongDouble(client *c, long double d) { if (c->resp == 2) { robj *o = createStringObjectFromLongDouble(d,1); - addReplyBulkCore(c,o,fAsync); + addReplyBulk(c,o); decrRefCount(o); } else { char buf[MAX_LONG_DOUBLE_CHARS]; int len = ld2string(buf,sizeof(buf),d,LD_STR_HUMAN); - addReplyProtoCore(c,",",1,fAsync); - addReplyProtoCore(c,buf,len,fAsync); - addReplyProtoCore(c,"\r\n",2,fAsync); + addReplyProto(c,",",1); + addReplyProto(c,buf,len); + addReplyProto(c,"\r\n",2); } } -void addReplyHumanLongDouble(client *c, long double d) { - addReplyHumanLongDoubleCore(c, d, false); -} - -void addReplyHumanLongDoubleAsync(client *c, long double d) { - addReplyHumanLongDoubleCore(c, d, true); -} - /* Add a long long as integer reply or bulk len / multi bulk count. * Basically this is used to output . */ -void addReplyLongLongWithPrefixCore(client *c, long long ll, char prefix, bool fAsync) { +void addReplyLongLongWithPrefix(client *c, long long ll, char prefix) { char buf[128]; int len; @@ -818,10 +762,10 @@ void addReplyLongLongWithPrefixCore(client *c, long long ll, char prefix, bool f * so we have a few shared objects to use if the integer is small * like it is most of the times. */ if (prefix == '*' && ll < OBJ_SHARED_BULKHDR_LEN && ll >= 0) { - addReplyCore(c,shared.mbulkhdr[ll], fAsync); + addReply(c,shared.mbulkhdr[ll]); return; } else if (prefix == '$' && ll < OBJ_SHARED_BULKHDR_LEN && ll >= 0) { - addReplyCore(c,shared.bulkhdr[ll], fAsync); + addReply(c,shared.bulkhdr[ll]); return; } @@ -829,65 +773,33 @@ void addReplyLongLongWithPrefixCore(client *c, long long ll, char prefix, bool f len = ll2string(buf+1,sizeof(buf)-1,ll); buf[len+1] = '\r'; buf[len+2] = '\n'; - addReplyProtoCore(c,buf,len+3, fAsync); -} - -void addReplyLongLongWithPrefix(client *c, long long ll, char prefix) { - addReplyLongLongWithPrefixCore(c, ll, prefix, false); -} - -void addReplyLongLongCore(client *c, long long ll, bool fAsync) { - if (ll == 0) - addReplyCore(c,shared.czero, fAsync); - else if (ll == 1) - addReplyCore(c,shared.cone, fAsync); - else - addReplyLongLongWithPrefixCore(c,ll,':', fAsync); + addReplyProto(c,buf,len+3); } void addReplyLongLong(client *c, long long ll) { - addReplyLongLongCore(c, ll, false); -} - -void addReplyLongLongAsync(client *c, long long ll) { - addReplyLongLongCore(c, ll, true); -} - -void addReplyAggregateLenCore(client *c, long length, int prefix, bool fAsync) { - if (prefix == '*' && length < OBJ_SHARED_BULKHDR_LEN) - addReplyCore(c,shared.mbulkhdr[length], fAsync); + if (ll == 0) + addReply(c,shared.czero); + else if (ll == 1) + addReply(c,shared.cone); else - addReplyLongLongWithPrefixCore(c,length,prefix, fAsync); + addReplyLongLongWithPrefix(c,ll,':'); } void addReplyAggregateLen(client *c, long length, int prefix) { - addReplyAggregateLenCore(c, length, prefix, false); -} - -void addReplyArrayLenCore(client *c, long length, bool fAsync) { - addReplyAggregateLenCore(c,length,'*', fAsync); + if (prefix == '*' && length < OBJ_SHARED_BULKHDR_LEN) + addReply(c,shared.mbulkhdr[length]); + else + addReplyLongLongWithPrefix(c,length,prefix); } void addReplyArrayLen(client *c, long length) { - addReplyArrayLenCore(c, length, false); -} - -void addReplyArrayLenAsync(client *c, long length) { - addReplyArrayLenCore(c, length, true); -} - -void addReplyMapLenCore(client *c, long length, bool fAsync) { - int prefix = c->resp == 2 ? '*' : '%'; - if (c->resp == 2) length *= 2; - addReplyAggregateLenCore(c,length,prefix,fAsync); + addReplyAggregateLen(c,length,'*'); } void addReplyMapLen(client *c, long length) { - addReplyMapLenCore(c, length, false); -} - -void addReplyMapLenAsync(client *c, long length) { - addReplyMapLenCore(c, length, true); + int prefix = c->resp == 2 ? '*' : '%'; + if (c->resp == 2) length *= 2; + addReplyAggregateLen(c,length,prefix); } void addReplySetLen(client *c, long length) { @@ -901,38 +813,19 @@ void addReplyAttributeLen(client *c, long length) { addReplyAggregateLen(c,length,prefix); } -void addReplyPushLenCore(client *c, long length, bool fAsync) { - int prefix = c->resp == 2 ? '*' : '>'; - addReplyAggregateLenCore(c,length,prefix, fAsync); -} - void addReplyPushLen(client *c, long length) { - addReplyPushLenCore(c, length, false); + int prefix = c->resp == 2 ? '*' : '>'; + addReplyAggregateLen(c,length,prefix); } -void addReplyPushLenAsync(client *c, long length) { - addReplyPushLenCore(c, length, true); -} - -void addReplyNullCore(client *c, bool fAsync) { +void addReplyNull(client *c) { if (c->resp == 2) { - addReplyProtoCore(c,"$-1\r\n",5,fAsync); + addReplyProto(c,"$-1\r\n",5); } else { - addReplyProtoCore(c,"_\r\n",3,fAsync); + addReplyProto(c,"_\r\n",3); } } -void addReplyNull(client *c, robj_roptr objOldProtocol) { - if (c->resp < 3 && objOldProtocol != nullptr) - addReply(c, objOldProtocol); - else - addReplyNullCore(c, false); -} - -void addReplyNullAsync(client *c) { - addReplyNullCore(c, true); -} - void addReplyBool(client *c, int b) { if (c->resp == 2) { addReply(c, b ? shared.cone : shared.czero); @@ -945,107 +838,58 @@ void addReplyBool(client *c, int b) { * RESP2 had it, so API-wise we have this call, that will emit the correct * RESP2 protocol, however for RESP3 the reply will always be just the * Null type "_\r\n". */ -void addReplyNullArrayCore(client *c, bool fAsync) +void addReplyNullArray(client *c) { if (c->resp == 2) { - addReplyProtoCore(c,"*-1\r\n",5,fAsync); + addReplyProto(c,"*-1\r\n",5); } else { - addReplyProtoCore(c,"_\r\n",3,fAsync); + addReplyProto(c,"_\r\n",3); } } -void addReplyNullArray(client *c) -{ - addReplyNullArrayCore(c, false); -} - -void addReplyNullArrayAsync(client *c) -{ - addReplyNullArrayCore(c, true); -} - /* Create the length prefix of a bulk reply, example: $2234 */ -void addReplyBulkLenCore(client *c, robj_roptr obj, bool fAsync) { +void addReplyBulkLen(client *c, robj_roptr obj) { size_t len = stringObjectLen(obj); if (len < OBJ_SHARED_BULKHDR_LEN) - addReplyCore(c,shared.bulkhdr[len], fAsync); + addReply(c,shared.bulkhdr[len]); else - addReplyLongLongWithPrefixCore(c,len,'$', fAsync); -} - -void addReplyBulkLen(client *c, robj *obj) -{ - addReplyBulkLenCore(c, obj, false); + addReplyLongLongWithPrefix(c,len,'$'); } /* Add a Redis Object as a bulk reply */ -void addReplyBulkCore(client *c, robj_roptr obj, bool fAsync) { - addReplyBulkLenCore(c,obj,fAsync); - addReplyCore(c,obj,fAsync); - addReplyCore(c,shared.crlf,fAsync); -} - -void addReplyBulk(client *c, robj_roptr obj) -{ - addReplyBulkCore(c, obj, false); -} - -void addReplyBulkAsync(client *c, robj_roptr obj) -{ - addReplyBulkCore(c, obj, true); +void addReplyBulk(client *c, robj_roptr obj) { + addReplyBulkLen(c,obj); + addReply(c,obj); + addReply(c,shared.crlf); } /* Add a C buffer as bulk reply */ -void addReplyBulkCBufferCore(client *c, const void *p, size_t len, bool fAsync) { - addReplyLongLongWithPrefixCore(c,len,'$',fAsync); - addReplyProtoCore(c,(const char*)p,len,fAsync); - addReplyCore(c,shared.crlf,fAsync); -} - void addReplyBulkCBuffer(client *c, const void *p, size_t len) { - addReplyBulkCBufferCore(c, p, len, false); -} - -void addReplyBulkCBufferAsync(client *c, const void *p, size_t len) { - addReplyBulkCBufferCore(c, p, len, true); + addReplyLongLongWithPrefix(c,len,'$'); + addReplyProto(c,(const char*)p,len); + addReply(c,shared.crlf); } /* Add sds to reply (takes ownership of sds and frees it) */ -void addReplyBulkSdsCore(client *c, sds s, bool fAsync) { - addReplyLongLongWithPrefixCore(c,sdslen(s),'$', fAsync); - addReplySdsCore(c,s,fAsync); - addReplyCore(c,shared.crlf,fAsync); -} - -void addReplyBulkSds(client *c, sds s) { - addReplyBulkSdsCore(c, s, false); -} - -void addReplyBulkSdsAsync(client *c, sds s) { - addReplyBulkSdsCore(c, s, true); +void addReplyBulkSds(client *c, sds s) { + addReplyLongLongWithPrefix(c,sdslen(s),'$'); + addReplySds(c,s); + addReply(c,shared.crlf); } /* Add a C null term string as bulk reply */ -void addReplyBulkCStringCore(client *c, const char *s, bool fAsync) { +void addReplyBulkCString(client *c, const char *s) { if (s == NULL) { if (c->resp < 3) - addReplyCore(c,shared.nullbulk, fAsync); + addReply(c,shared.nullbulk); else - addReplyNullCore(c,fAsync); + addReplyNull(c); } else { - addReplyBulkCBufferCore(c,s,strlen(s),fAsync); + addReplyBulkCBuffer(c,s,strlen(s)); } } -void addReplyBulkCString(client *c, const char *s) { - addReplyBulkCStringCore(c, s, false); -} - -void addReplyBulkCStringAsync(client *c, const char *s) { - addReplyBulkCStringCore(c, s, true); -} - /* Add a long long as a bulk reply */ void addReplyBulkLongLong(client *c, long long ll) { char buf[64]; @@ -1064,9 +908,9 @@ void addReplyBulkLongLong(client *c, long long ll) { * three first characters of the extension are used, and if the * provided one is shorter than that, the remaining is filled with * spaces. */ -void addReplyVerbatimCore(client *c, const char *s, size_t len, const char *ext, bool fAsync) { +void addReplyVerbatim(client *c, const char *s, size_t len, const char *ext) { if (c->resp == 2) { - addReplyBulkCBufferCore(c,s,len,fAsync); + addReplyBulkCBuffer(c,s,len); } else { char buf[32]; size_t preflen = snprintf(buf,sizeof(buf),"=%zu\r\nxxx:",len+4); @@ -1078,20 +922,12 @@ void addReplyVerbatimCore(client *c, const char *s, size_t len, const char *ext, p[i] = *ext++; } } - addReplyProtoCore(c,buf,preflen,fAsync); - addReplyProtoCore(c,s,len,fAsync); - addReplyProtoCore(c,"\r\n",2,fAsync); + addReplyProto(c,buf,preflen); + addReplyProto(c,s,len); + addReplyProto(c,"\r\n",2); } } -void addReplyVerbatim(client *c, const char *s, size_t len, const char *ext) { - addReplyVerbatimCore(c, s, len, ext, false); -} - -void addReplyVerbatimAsync(client *c, const char *s, size_t len, const char *ext) { - addReplyVerbatimCore(c, s, len, ext, true); -} - /* Add an array of C strings as status replies with a heading. * This function is typically invoked by from commands that support * subcommands in response to the 'help' subcommand. The help array @@ -1127,7 +963,7 @@ void addReplySubcommandSyntaxError(client *c) { /* Append 'src' client output buffers into 'dst' client output buffers. * This function clears the output buffers of 'src' */ void AddReplyFromClient(client *dst, client *src) { - if (prepareClientToWrite(dst, false) != C_OK) + if (prepareClientToWrite(dst) != C_OK) return; addReplyProto(dst,src->buf, src->bufpos); if (listLength(src->reply)) @@ -1907,7 +1743,7 @@ void ProcessPendingAsyncWrites() if (FCorrectThread(c)) { - prepareClientToWrite(c, false); // queue an event + prepareClientToWrite(c); // queue an event } else { @@ -2898,7 +2734,7 @@ NULL if (target && target->flags & CLIENT_BLOCKED) { std::unique_lock ul(target->lock); if (unblock_error) - addReplyErrorAsync(target, + addReplyError(target, "-UNBLOCKED client unblocked via CLIENT UNBLOCK"); else replyToBlockedClientTimedOut(target); diff --git a/src/object.cpp b/src/object.cpp index 4b32c5a4d..4988fa0bf 100644 --- a/src/object.cpp +++ b/src/object.cpp @@ -433,7 +433,7 @@ robj *resetRefCount(robj *obj) { int checkType(client *c, robj_roptr o, int type) { if (o->type != type) { - addReplyAsync(c,shared.wrongtypeerr); + addReply(c,shared.wrongtypeerr); return 1; } return 0; diff --git a/src/pubsub.cpp b/src/pubsub.cpp index 3ccbb6a66..176a8271e 100644 --- a/src/pubsub.cpp +++ b/src/pubsub.cpp @@ -43,12 +43,12 @@ int clientSubscriptionsCount(client *c); * addReply*() API family. */ void addReplyPubsubMessage(client *c, robj *channel, robj *msg) { if (c->resp == 2) - addReplyAsync(c,shared.mbulkhdr[3]); + addReply(c,shared.mbulkhdr[3]); else - addReplyPushLenAsync(c,3); - addReplyAsync(c,shared.messagebulk); - addReplyBulkAsync(c,channel); - if (msg) addReplyBulkAsync(c,msg); + addReplyPushLen(c,3); + addReply(c,shared.messagebulk); + addReplyBulk(c,channel); + if (msg) addReplyBulk(c,msg); } /* Send a pubsub message of type "pmessage" to the client. The difference @@ -56,13 +56,13 @@ void addReplyPubsubMessage(client *c, robj *channel, robj *msg) { * this message format also includes the pattern that matched the message. */ void addReplyPubsubPatMessage(client *c, robj *pat, robj *channel, robj *msg) { if (c->resp == 2) - addReplyAsync(c,shared.mbulkhdr[4]); + addReply(c,shared.mbulkhdr[4]); else - addReplyPushLenAsync(c,4); - addReplyAsync(c,shared.pmessagebulk); - addReplyBulkAsync(c,pat); - addReplyBulkAsync(c,channel); - addReplyBulkAsync(c,msg); + addReplyPushLen(c,4); + addReply(c,shared.pmessagebulk); + addReplyBulk(c,pat); + addReplyBulk(c,channel); + addReplyBulk(c,msg); } /* Send the pubsub subscription notification to the client. */ diff --git a/src/replication.cpp b/src/replication.cpp index cf63c6e81..b9de0bce5 100644 --- a/src/replication.cpp +++ b/src/replication.cpp @@ -315,7 +315,7 @@ void replicationFeedSlave(client *replica, int dictid, robj **argv, int argc, bo if (g_pserver->repl_backlog && fSendRaw) feedReplicationBacklogWithObject(selectcmd); /* Send it to slaves */ - addReplyAsync(replica,selectcmd); + addReply(replica,selectcmd); if (dictid < 0 || dictid >= PROTO_SHARED_SELECT_CMDS) decrRefCount(selectcmd); @@ -329,18 +329,18 @@ void replicationFeedSlave(client *replica, int dictid, robj **argv, int argc, bo if (fSendRaw) { /* Add the multi bulk length. */ - addReplyArrayLenAsync(replica,argc); + addReplyArrayLen(replica,argc); /* Finally any additional argument that was not stored inside the * static buffer if any (from j to argc). */ for (int j = 0; j < argc; j++) - addReplyBulkAsync(replica,argv[j]); + addReplyBulk(replica,argv[j]); } else { struct redisCommand *cmd = lookupCommand(szFromObj(argv[0])); sds buf = catCommandForAofAndActiveReplication(sdsempty(), cmd, argv, argc); - addReplyProtoAsync(replica, buf, sdslen(buf)); + addReplyProto(replica, buf, sdslen(buf)); sdsfree(buf); } } @@ -516,21 +516,21 @@ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) { * or are already in sync with the master. */ if (!fSendRaw) - addReplyProtoAsync(replica, proto, cchProto); + addReplyProto(replica, proto, cchProto); - addReplyProtoAsync(replica,fake->buf,fake->bufpos); + addReplyProto(replica,fake->buf,fake->bufpos); listRewind(fake->reply, &liReply); while ((lnReply = listNext(&liReply))) { clientReplyBlock* reply = (clientReplyBlock*)listNodeValue(lnReply); - addReplyProtoAsync(replica, reply->buf(), reply->used); + addReplyProto(replica, reply->buf(), reply->used); } if (!fSendRaw) { - addReplyAsync(replica,shared.crlf); - addReplyProtoAsync(replica, szDbNum, cchDbNum); - addReplyProtoAsync(replica, szMvcc, cchMvcc); + addReply(replica,shared.crlf); + addReplyProto(replica, szDbNum, cchDbNum); + addReplyProto(replica, szMvcc, cchMvcc); } } @@ -605,7 +605,7 @@ void replicationFeedSlavesFromMasterStream(list *slaves, char *buf, size_t bufle /* Don't feed slaves that are still waiting for BGSAVE to start */ if (replica->replstate == SLAVE_STATE_WAIT_BGSAVE_START) continue; - addReplyProtoAsync(replica,buf,buflen); + addReplyProto(replica,buf,buflen); } if (listLength(slaves)) @@ -651,7 +651,7 @@ void replicationFeedMonitors(client *c, list *monitors, int dictid, robj **argv, // When writing to clients on other threads the global lock is sufficient provided we only use AddReply*Async() if (FCorrectThread(c)) lock.lock(); - addReplyAsync(monitor,cmdobj); + addReply(monitor,cmdobj); } decrRefCount(cmdobj); } @@ -3267,7 +3267,7 @@ void replicaofCommand(client *c) { miNew->masterhost, miNew->masterport, client); sdsfree(client); } - addReplyAsync(c,shared.ok); + addReply(c,shared.ok); } /* ROLE command: provide information about the role of the instance @@ -3747,7 +3747,7 @@ void processClientsWaitingReplicas(void) { last_numreplicas > c->bpop.numreplicas) { unblockClient(c); - addReplyLongLongAsync(c,last_numreplicas); + addReplyLongLong(c,last_numreplicas); } else { int numreplicas = replicationCountAcksByOffset(c->bpop.reploffset); @@ -3755,7 +3755,7 @@ void processClientsWaitingReplicas(void) { last_offset = c->bpop.reploffset; last_numreplicas = numreplicas; unblockClient(c); - addReplyLongLongAsync(c,numreplicas); + addReplyLongLong(c,numreplicas); } } fastlock_unlock(&c->lock); diff --git a/src/server.h b/src/server.h index 198633f90..9f7d8f22c 100644 --- a/src/server.h +++ b/src/server.h @@ -2004,17 +2004,14 @@ void acceptTcpHandler(aeEventLoop *el, int fd, void *privdata, int mask); void acceptTLSHandler(aeEventLoop *el, int fd, void *privdata, int mask); void acceptUnixHandler(aeEventLoop *el, int fd, void *privdata, int mask); void readQueryFromClient(connection *conn); -void addReplyNull(client *c, robj_roptr objOldProtocol = nullptr); +void addReplyNull(client *c); void addReplyNullArray(client *c); -void addReplyNullArrayAsync(client *c); void addReplyBool(client *c, int b); void addReplyVerbatim(client *c, const char *s, size_t len, const char *ext); -void addReplyVerbatimAsync(client *c, const char *s, size_t len, const char *ext); void addReplyProto(client *c, const char *s, size_t len); void addReplyBulk(client *c, robj_roptr obj); void AddReplyFromClient(client *c, client *src); void addReplyBulkCString(client *c, const char *s); -void addReplyBulkCStringAsync(client *c, const char *s); void addReplyBulkCBuffer(client *c, const void *p, size_t len); void addReplyBulkLongLong(client *c, long long ll); void addReply(client *c, robj_roptr obj); @@ -2026,10 +2023,9 @@ void addReplyError(client *c, const char *err); void addReplyStatus(client *c, const char *status); void addReplyDouble(client *c, double d); void addReplyHumanLongDouble(client *c, long double d); -void addReplyHumanLongDoubleAsync(client *c, long double d); void addReplyLongLong(client *c, long long ll); #ifdef __cplusplus -void addReplyLongLongWithPrefixCore(client *c, long long ll, char prefix, bool fAsync); +void addReplyLongLongWithPrefixCore(client *c, long long ll, char prefix); #endif void addReplyArrayLen(client *c, long length); void addReplyMapLen(client *c, long length); @@ -2074,23 +2070,6 @@ void linkClient(client *c); void protectClient(client *c); void unprotectClient(client *c); -// Special Thread-safe addReply() commands for posting messages to clients from a different thread -void addReplyAsync(client *c, robj_roptr obj); -void addReplyArrayLenAsync(client *c, long length); -void addReplyProtoAsync(client *c, const char *s, size_t len); -void addReplyBulkAsync(client *c, robj_roptr obj); -void addReplyBulkCBufferAsync(client *c, const void *p, size_t len); -void addReplyErrorAsync(client *c, const char *err); -void addReplyMapLenAsync(client *c, long length); -void addReplyNullAsync(client *c); -void addReplyDoubleAsync(client *c, double d); -void *addReplyDeferredLenAsync(client *c); -void setDeferredArrayLenAsync(client *c, void *node, long length); -void addReplySdsAsync(client *c, sds s); -void addReplyBulkSdsAsync(client *c, sds s); -void addReplyPushLenAsync(client *c, long length); -void addReplyLongLongAsync(client *c, long long ll); - void ProcessPendingAsyncWrites(void); client *lookupClientByID(uint64_t id); diff --git a/src/t_list.cpp b/src/t_list.cpp index 74634f91d..e81d94f1a 100644 --- a/src/t_list.cpp +++ b/src/t_list.cpp @@ -677,7 +677,7 @@ static void rpoplpushHandlePush(client *c, robj *dstkey, robj *dstobj, robj *val listTypePush(dstobj,value,LIST_HEAD); notifyKeyspaceEvent(NOTIFY_LIST,"lpush",dstkey,c->db->id); /* Always send the pushed value to the client. */ - addReplyBulkAsync(c,value); + addReplyBulk(c,value); } void rpoplpushCommand(client *c) { @@ -758,9 +758,9 @@ int serveClientBlockedOnList(client *receiver, robj *key, robj *dstkey, redisDb db->id,argv,2,PROPAGATE_AOF|PROPAGATE_REPL); /* BRPOP/BLPOP */ - addReplyArrayLenAsync(receiver,2); - addReplyBulkAsync(receiver,key); - addReplyBulkAsync(receiver,value); + addReplyArrayLen(receiver,2); + addReplyBulk(receiver,key); + addReplyBulk(receiver,value); /* Notify event. */ const char *event = (where == LIST_HEAD) ? "lpop" : "rpop"; diff --git a/src/t_stream.cpp b/src/t_stream.cpp index fd7a1329b..66157dbb1 100644 --- a/src/t_stream.cpp +++ b/src/t_stream.cpp @@ -818,7 +818,7 @@ static void addReplyStreamID(client *c, streamID *id) { static void addReplyStreamIDAsync(client *c, streamID *id) { sds replyid = sdscatfmt(sdsempty(),"%U-%U",id->ms,id->seq); - addReplyBulkSdsAsync(c,replyid); + addReplyBulkSds(c,replyid); } /* Similar to the above function, but just creates an object, usually useful @@ -968,7 +968,7 @@ size_t streamReplyWithRange(client *c, stream *s, streamID *start, streamID *end } if (!(flags & STREAM_RWR_RAWENTRIES)) - arraylen_ptr = addReplyDeferredLenAsync(c); + arraylen_ptr = addReplyDeferredLen(c); streamIteratorStart(&si,s,start,end,rev); while(streamIteratorGetID(&si,&id,&numfields)) { /* Update the group last_id if needed. */ @@ -982,18 +982,18 @@ size_t streamReplyWithRange(client *c, stream *s, streamID *start, streamID *end /* Emit a two elements array for each item. The first is * the ID, the second is an array of field-value pairs. */ - addReplyArrayLenAsync(c,2); + addReplyArrayLen(c,2); addReplyStreamIDAsync(c,&id); - addReplyArrayLenAsync(c,numfields*2); + addReplyArrayLen(c,numfields*2); /* Emit the field-value pairs. */ while(numfields--) { unsigned char *key, *value; int64_t key_len, value_len; streamIteratorGetField(&si,&key,&value,&key_len,&value_len); - addReplyBulkCBufferAsync(c,key,key_len); - addReplyBulkCBufferAsync(c,value,value_len); + addReplyBulkCBuffer(c,key,key_len); + addReplyBulkCBuffer(c,value,value_len); } /* If a group is passed, we need to create an entry in the @@ -1052,7 +1052,7 @@ size_t streamReplyWithRange(client *c, stream *s, streamID *start, streamID *end streamPropagateGroupID(c,spi->keyname,group,spi->groupname); streamIteratorStop(&si); - if (arraylen_ptr) setDeferredArrayLenAsync(c,arraylen_ptr,arraylen); + if (arraylen_ptr) setDeferredArrayLen(c,arraylen_ptr,arraylen); return arraylen; } diff --git a/src/t_zset.cpp b/src/t_zset.cpp index a2118b348..973e22ce6 100644 --- a/src/t_zset.cpp +++ b/src/t_zset.cpp @@ -3165,11 +3165,11 @@ void genericZpopCommand(client *c, robj **keyv, int keyc, int where, int emitkey return; } - void *arraylen_ptr = addReplyDeferredLenAsync(c); + void *arraylen_ptr = addReplyDeferredLen(c); long arraylen = 0; /* We emit the key only for the blocking variant. */ - if (emitkey) addReplyBulkAsync(c,key); + if (emitkey) addReplyBulk(c,key); /* Remove the element. */ do { @@ -3219,8 +3219,8 @@ void genericZpopCommand(client *c, robj **keyv, int keyc, int where, int emitkey signalModifiedKey(c,c->db,key); } - addReplyBulkCBufferAsync(c,ele,sdslen(ele)); - addReplyDoubleAsync(c,score); + addReplyBulkCBuffer(c,ele,sdslen(ele)); + addReplyDouble(c,score); sdsfree(ele); arraylen += 2; @@ -3232,7 +3232,7 @@ void genericZpopCommand(client *c, robj **keyv, int keyc, int where, int emitkey } } while(--count); - setDeferredArrayLenAsync(c,arraylen_ptr,arraylen + (emitkey != 0)); + setDeferredArrayLen(c,arraylen_ptr,arraylen + (emitkey != 0)); } /* ZPOPMIN key [] */ diff --git a/src/timeout.cpp b/src/timeout.cpp index d59bc44e6..18a553211 100644 --- a/src/timeout.cpp +++ b/src/timeout.cpp @@ -179,7 +179,7 @@ int getTimeoutFromObjectOrReply(client *c, robj *object, mstime_t *timeout, int } if (tval < 0) { - addReplyErrorAsync(c,"timeout is negative"); + addReplyError(c,"timeout is negative"); return C_ERR; } diff --git a/src/tracking.cpp b/src/tracking.cpp index ad10c3a57..58c675096 100644 --- a/src/tracking.cpp +++ b/src/tracking.cpp @@ -215,9 +215,9 @@ void sendTrackingMessage(client *c, char *keyname, size_t keylen, int proto) { * are unable to send invalidation messages to the redirected * connection, because the client no longer exist. */ if (c->resp > 2) { - addReplyPushLenAsync(c,3); - addReplyBulkCBufferAsync(c,"tracking-redir-broken",21); - addReplyLongLongAsync(c,c->client_tracking_redirection); + addReplyPushLen(c,3); + addReplyBulkCBuffer(c,"tracking-redir-broken",21); + addReplyLongLong(c,c->client_tracking_redirection); } return; } @@ -232,8 +232,8 @@ void sendTrackingMessage(client *c, char *keyname, size_t keylen, int proto) { * in Pub/Sub mode, we can support the feature with RESP 2 as well, * by sending Pub/Sub messages in the __redis__:invalidate channel. */ if (c->resp > 2) { - addReplyPushLenAsync(c,2); - addReplyBulkCBufferAsync(c,"invalidate",10); + addReplyPushLen(c,2); + addReplyBulkCBuffer(c,"invalidate",10); } else if (using_redirection && c->flags & CLIENT_PUBSUB) { /* We use a static object to speedup things, however we assume * that addReplyPubsubMessage() will not take a reference. */ @@ -248,10 +248,10 @@ void sendTrackingMessage(client *c, char *keyname, size_t keylen, int proto) { /* Send the "value" part, which is the array of keys. */ if (proto) { - addReplyProtoAsync(c,keyname,keylen); + addReplyProto(c,keyname,keylen); } else { - addReplyArrayLenAsync(c,1); - addReplyBulkCBufferAsync(c,keyname,keylen); + addReplyArrayLen(c,1); + addReplyBulkCBuffer(c,keyname,keylen); } } From 418b6206ea9b16c9d16be1be31347115a9a1aaae Mon Sep 17 00:00:00 2001 From: John Sully Date: Mon, 26 Oct 2020 02:28:38 +0000 Subject: [PATCH 189/377] Avoid unnecessary memory fences Former-commit-id: 88962af8b2fe835fb5d542013062cec8c771c6c6 --- src/networking.cpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/networking.cpp b/src/networking.cpp index 1c3e528c5..d843e8d99 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -258,20 +258,22 @@ int prepareClientToWrite(client *c) { serverAssert(GlobalLocksAcquired()); } - if (c->flags & CLIENT_FORCE_REPLY) return C_OK; // FORCE REPLY means we're doing something else with the buffer. + auto flags = c->flags.load(std::memory_order_relaxed); + + if (flags & CLIENT_FORCE_REPLY) return C_OK; // FORCE REPLY means we're doing something else with the buffer. // do not install a write handler /* If it's the Lua client we always return ok without installing any * handler since there is no socket at all. */ - if (c->flags & (CLIENT_LUA|CLIENT_MODULE)) return C_OK; + if (flags & (CLIENT_LUA|CLIENT_MODULE)) return C_OK; /* CLIENT REPLY OFF / SKIP handling: don't send replies. */ - if (c->flags & (CLIENT_REPLY_OFF|CLIENT_REPLY_SKIP)) return C_ERR; + if (flags & (CLIENT_REPLY_OFF|CLIENT_REPLY_SKIP)) return C_ERR; /* Masters don't receive replies, unless CLIENT_MASTER_FORCE_REPLY flag * is set. */ - if ((c->flags & CLIENT_MASTER) && - !(c->flags & CLIENT_MASTER_FORCE_REPLY)) return C_ERR; + if ((flags & CLIENT_MASTER) && + !(flags & CLIENT_MASTER_FORCE_REPLY)) return C_ERR; if (!c->conn) return C_ERR; /* Fake client for AOF loading. */ From a3323d2b5bad3a4064ba8babb0ad2c0a2cc75800 Mon Sep 17 00:00:00 2001 From: John Sully Date: Tue, 27 Oct 2020 01:54:13 +0000 Subject: [PATCH 190/377] Fix multithreaded test failure Former-commit-id: 1840601f8efb27174efa0a66f78de8c490b5bba4 --- src/server.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/server.cpp b/src/server.cpp index 3cf0b0189..13086037d 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -3540,6 +3540,12 @@ void call(client *c, int flags) { replicationFeedMonitors(c,g_pserver->monitors,c->db->id,c->argv,c->argc); } + /* We need to transfer async writes before a client's repl state gets changed. Otherwise + we won't be able to propogate them correctly. */ + if (c->cmd->flags & CMD_CATEGORY_REPLICATION) { + ProcessPendingAsyncWrites(); + } + /* Initialization: clear the flags that must be set by the command on * demand, and initialize the array for additional commands propagation. */ c->flags &= ~(CLIENT_FORCE_AOF|CLIENT_FORCE_REPL|CLIENT_PREVENT_PROP); From 366ef70f6effee6b33ba95c6c3821a69069f7891 Mon Sep 17 00:00:00 2001 From: John Sully Date: Tue, 27 Oct 2020 04:52:35 +0000 Subject: [PATCH 191/377] Ensure MVCC timestamp is incremented after the real time is updated Former-commit-id: 9c5b59ac010fa1e948d5e51d1cd408c7e743d66d --- src/server.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/server.cpp b/src/server.cpp index 13086037d..e5ddd7a83 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -3555,6 +3555,7 @@ void call(client *c, int flags) { /* Call the command. */ dirty = g_pserver->dirty; updateCachedTime(0); + incrementMvccTstamp(); start = g_pserver->ustime; c->cmd->proc(c); serverTL->commandsExecuted++; @@ -3838,8 +3839,6 @@ int processCommand(client *c, int callFlags) { } } - incrementMvccTstamp(); - /* Handle the maxmemory directive. * * Note that we do not want to reclaim memory if we are here re-entering From 2ae013910b0fbb0625237e4221465f906a2a52fa Mon Sep 17 00:00:00 2001 From: John Sully Date: Tue, 27 Oct 2020 04:53:19 +0000 Subject: [PATCH 192/377] Prevent crashes on shutdown due to lock being held Former-commit-id: 6a74f524e558100dfb3e54779020cd3407706e08 --- src/server.cpp | 13 +++++++------ tests/unit/maxmemory.tcl | 2 +- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/server.cpp b/src/server.cpp index e5ddd7a83..492937e40 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -2221,10 +2221,11 @@ void processClients(); * The most important is freeClientsInAsyncFreeQueue but we also * call some other low-risk functions. */ void beforeSleep(struct aeEventLoop *eventLoop) { + AeLocker locker; UNUSED(eventLoop); int iel = ielFromEventLoop(eventLoop); - aeAcquireLock(); + locker.arm(); processClients(); /* Handle precise timeouts of blocked clients. */ @@ -2232,9 +2233,9 @@ void beforeSleep(struct aeEventLoop *eventLoop) { /* Handle TLS pending data. (must be done before flushAppendOnlyFile) */ if (tlsHasPendingData()) { - aeReleaseLock(); + locker.release(); tlsProcessPendingData(); - aeAcquireLock(); + locker.arm(); } /* If tls still has pending unread data don't sleep at all. */ @@ -2299,9 +2300,9 @@ void beforeSleep(struct aeEventLoop *eventLoop) { first so perform it here */ bool fSentReplies = false; if (listLength(g_pserver->clients_to_close)) { - aeReleaseLock(); + locker.disarm(); handleClientsWithPendingWrites(iel, aof_state); - aeAcquireLock(); + locker.arm(); fSentReplies = true; } @@ -2311,7 +2312,7 @@ void beforeSleep(struct aeEventLoop *eventLoop) { /* Before we are going to sleep, let the threads access the dataset by * releasing the GIL. Redis main thread will not touch anything at this * time. */ - aeReleaseLock(); + locker.disarm(); if (!fSentReplies) handleClientsWithPendingWrites(iel, aof_state); if (moduleCount()) moduleReleaseGIL(TRUE /*fServerThread*/); diff --git a/tests/unit/maxmemory.tcl b/tests/unit/maxmemory.tcl index e12fedc91..b399e9b06 100644 --- a/tests/unit/maxmemory.tcl +++ b/tests/unit/maxmemory.tcl @@ -215,7 +215,7 @@ proc test_slave_buffers {test_name cmd_count payload_len limit_memory pipeline} set used_no_repl [expr {$new_used - $mem_not_counted_for_evict}] set delta [expr {($used_no_repl - $client_buf) - ($orig_used_no_repl - $orig_client_buf)}] - assert {[$master dbsize] == 100} + assert_equal [$master dbsize] 100 assert {$slave_buf > 2*1024*1024} ;# some of the data may have been pushed to the OS buffers set delta_max [expr {$cmd_count / 2}] ;# 1 byte unaccounted for, with 1M commands will consume some 1MB assert {$delta < $delta_max && $delta > -$delta_max} From c3ef2f00bb0ac08e3e3b70a2c7d8a3ef662dbb87 Mon Sep 17 00:00:00 2001 From: John Sully Date: Tue, 27 Oct 2020 06:23:14 +0000 Subject: [PATCH 193/377] Active replica test reliability enhancements Former-commit-id: 444555d3e4ec6e9469dae847dc631f2be263fb5e --- tests/integration/replication-active.tcl | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tests/integration/replication-active.tcl b/tests/integration/replication-active.tcl index d6501fbeb..5515c3c0f 100644 --- a/tests/integration/replication-active.tcl +++ b/tests/integration/replication-active.tcl @@ -235,7 +235,7 @@ start_server {tags {"active-repl"} overrides {active-replica yes}} { $master replicaof no one after 100 $master set testkey baz - after 100 + after 200 $slave set testkey bar after 100 $slave replicaof $master_host $master_port @@ -243,8 +243,8 @@ start_server {tags {"active-repl"} overrides {active-replica yes}} { $master replicaof $slave_host $slave_port after 1000 - assert_equal {bar} [$slave get testkey] - assert_equal {bar} [$master get testkey] + assert_equal {bar} [$slave get testkey] {replica is correct} + assert_equal {bar} [$master get testkey] {master is correct} } test {Active replica different databases} { @@ -271,6 +271,11 @@ start_server {tags {"active-repl"} overrides {active-replica yes}} { test {Active Replica Merges Database On Sync} { $slave set testkeyA foo r replicaof $slave_host $slave_port + wait_for_condition 50 1000 { + [string match *active-replica* [r role]] + } else { + fail [$slave role] + } after 1000 assert_equal 2 [r dbsize] } From 67003ec7147a88cd3b2fea194f19007f2e04bac0 Mon Sep 17 00:00:00 2001 From: John Sully Date: Tue, 27 Oct 2020 06:41:33 +0000 Subject: [PATCH 194/377] Disarm not release D'oh Former-commit-id: 3e33fbcd351a719126d30405c4dc9209ad381954 --- src/server.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server.cpp b/src/server.cpp index 492937e40..a162a8c1c 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -2233,7 +2233,7 @@ void beforeSleep(struct aeEventLoop *eventLoop) { /* Handle TLS pending data. (must be done before flushAppendOnlyFile) */ if (tlsHasPendingData()) { - locker.release(); + locker.disarm(); tlsProcessPendingData(); locker.arm(); } From f264678ccf0dc77c7f577214c9922866a241cccf Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Thu, 10 Sep 2020 13:43:38 +0300 Subject: [PATCH 195/377] Squash merging 125 typo/grammar/comment/doc PRs (#7773) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit List of squashed commits or PRs =============================== commit 66801ea Author: hwware Date: Mon Jan 13 00:54:31 2020 -0500 typo fix in acl.c commit 46f55db Author: Itamar Haber Date: Sun Sep 6 18:24:11 2020 +0300 Updates a couple of comments Specifically: * RM_AutoMemory completed instead of pointing to docs * Updated link to custom type doc commit 61a2aa0 Author: xindoo Date: Tue Sep 1 19:24:59 2020 +0800 Correct errors in code comments commit a5871d1 Author: yz1509 Date: Tue Sep 1 18:36:06 2020 +0800 fix typos in module.c commit 41eede7 Author: bookug Date: Sat Aug 15 01:11:33 2020 +0800 docs: fix typos in comments commit c303c84 Author: lazy-snail Date: Fri Aug 7 11:15:44 2020 +0800 fix spelling in redis.conf commit 1eb76bf Author: zhujian Date: Thu Aug 6 15:22:10 2020 +0800 add a missing 'n' in comment commit 1530ec2 Author: Daniel Dai <764122422@qq.com> Date: Mon Jul 27 00:46:35 2020 -0400 fix spelling in tracking.c commit e517b31 Author: Hunter-Chen Date: Fri Jul 17 22:33:32 2020 +0800 Update redis.conf Co-authored-by: Itamar Haber commit c300eff Author: Hunter-Chen Date: Fri Jul 17 22:33:23 2020 +0800 Update redis.conf Co-authored-by: Itamar Haber commit 4c058a8 Author: 陈浩鹏 Date: Thu Jun 25 19:00:56 2020 +0800 Grammar fix and clarification commit 5fcaa81 Author: bodong.ybd Date: Fri Jun 19 10:09:00 2020 +0800 Fix typos commit 4caca9a Author: Pruthvi P Date: Fri May 22 00:33:22 2020 +0530 Fix typo eviciton => eviction commit b2a25f6 Author: Brad Dunbar Date: Sun May 17 12:39:59 2020 -0400 Fix a typo. commit 12842ae Author: hwware Date: Sun May 3 17:16:59 2020 -0400 fix spelling in redis conf commit ddba07c Author: Chris Lamb Date: Sat May 2 23:25:34 2020 +0100 Correct a "conflicts" spelling error. commit 8fc7bf2 Author: Nao YONASHIRO Date: Thu Apr 30 10:25:27 2020 +0900 docs: fix EXPIRE_FAST_CYCLE_DURATION to ACTIVE_EXPIRE_CYCLE_FAST_DURATION commit 9b2b67a Author: Brad Dunbar Date: Fri Apr 24 11:46:22 2020 -0400 Fix a typo. commit 0746f10 Author: devilinrust <63737265+devilinrust@users.noreply.github.com> Date: Thu Apr 16 00:17:53 2020 +0200 Fix typos in server.c commit 92b588d Author: benjessop12 <56115861+benjessop12@users.noreply.github.com> Date: Mon Apr 13 13:43:55 2020 +0100 Fix spelling mistake in lazyfree.c commit 1da37aa Merge: 2d4ba28 c90b2a4 Author: hwware Date: Thu Mar 5 22:41:31 2020 -0500 Merge remote-tracking branch 'upstream/unstable' into expiretypofix commit 2d4ba28 Author: hwware Date: Mon Mar 2 00:09:40 2020 -0500 fix typo in expire.c commit 1a746f7 Author: SennoYuki Date: Thu Feb 27 16:54:32 2020 +0800 fix typo commit 8599b1a Author: dongheejeong Date: Sun Feb 16 20:31:43 2020 +0000 Fix typo in server.c commit f38d4e8 Author: hwware Date: Sun Feb 2 22:58:38 2020 -0500 fix typo in evict.c commit fe143fc Author: Leo Murillo Date: Sun Feb 2 01:57:22 2020 -0600 Fix a few typos in redis.conf commit 1ab4d21 Author: viraja1 Date: Fri Dec 27 17:15:58 2019 +0530 Fix typo in Latency API docstring commit ca1f70e Author: gosth Date: Wed Dec 18 15:18:02 2019 +0800 fix typo in sort.c commit a57c06b Author: ZYunH Date: Mon Dec 16 22:28:46 2019 +0800 fix-zset-typo commit b8c92b5 Author: git-hulk Date: Mon Dec 16 15:51:42 2019 +0800 FIX: typo in cluster.c, onformation->information commit 9dd981c Author: wujm2007 Date: Mon Dec 16 09:37:52 2019 +0800 Fix typo commit e132d7a Author: Sebastien Williams-Wynn Date: Fri Nov 15 00:14:07 2019 +0000 Minor typo change commit 47f44d5 Author: happynote3966 <01ssrmikururudevice01@gmail.com> Date: Mon Nov 11 22:08:48 2019 +0900 fix comment typo in redis-cli.c commit b8bdb0d Author: fulei Date: Wed Oct 16 18:00:17 2019 +0800 Fix a spelling mistake of comments in defragDictBucketCallback commit 0def46a Author: fulei Date: Wed Oct 16 13:09:27 2019 +0800 fix some spelling mistakes of comments in defrag.c commit f3596fd Author: Phil Rajchgot Date: Sun Oct 13 02:02:32 2019 -0400 Typo and grammar fixes Redis and its documentation are great -- just wanted to submit a few corrections in the spirit of Hacktoberfest. Thanks for all your work on this project. I use it all the time and it works beautifully. commit 2b928cd Author: KangZhiDong Date: Sun Sep 1 07:03:11 2019 +0800 fix typos commit 33aea14 Author: Axlgrep Date: Tue Aug 27 11:02:18 2019 +0800 Fixed eviction spelling issues commit e282a80 Author: Simen Flatby Date: Tue Aug 20 15:25:51 2019 +0200 Update comments to reflect prop name In the comments the prop is referenced as replica-validity-factor, but it is really named cluster-replica-validity-factor. commit 74d1f9a Author: Jim Green Date: Tue Aug 20 20:00:31 2019 +0800 fix comment error, the code is ok commit eea1407 Author: Liao Tonglang Date: Fri May 31 10:16:18 2019 +0800 typo fix fix cna't to can't commit 0da553c Author: KAWACHI Takashi Date: Wed Jul 17 00:38:16 2019 +0900 Fix typo commit 7fc8fb6 Author: Michael Prokop Date: Tue May 28 17:58:42 2019 +0200 Typo fixes s/familar/familiar/ s/compatiblity/compatibility/ s/ ot / to / s/itsef/itself/ commit 5f46c9d Author: zhumoing <34539422+zhumoing@users.noreply.github.com> Date: Tue May 21 21:16:50 2019 +0800 typo-fixes typo-fixes commit 321dfe1 Author: wxisme <850885154@qq.com> Date: Sat Mar 16 15:10:55 2019 +0800 typo fix commit b4fb131 Merge: 267e0e6 4842305 Author: Nikitas Bastas Date: Fri Feb 8 22:55:45 2019 +0200 Merge branch 'unstable' of antirez/redis into unstable commit 267e0e6 Author: Nikitas Bastas Date: Wed Jan 30 21:26:04 2019 +0200 Minor typo fix commit 30544e7 Author: inshal96 <39904558+inshal96@users.noreply.github.com> Date: Fri Jan 4 16:54:50 2019 +0500 remove an extra 'a' in the comments commit 337969d Author: BrotherGao Date: Sat Dec 29 12:37:29 2018 +0800 fix typo in redis.conf commit 9f4b121 Merge: 423a030 19d0ece Author: BrotherGao Date: Sat Dec 29 11:41:12 2018 +0800 Merge branch 'unstable' of antirez/redis into unstable commit 423a030 Merge: 42b02b7 0423081 Author: 杨东衡 Date: Tue Dec 4 23:56:11 2018 +0800 Merge branch 'unstable' of antirez/redis into unstable commit 42b02b7 Merge: 8c7dcff efa96f0 Author: Dongheng Yang Date: Sun Oct 28 15:54:23 2018 +0800 Merge pull request #1 from antirez/unstable update local data commit 714b589 Author: Christian Date: Fri Dec 28 01:17:26 2018 +0100 fix typo "resulution" commit e23259d Author: garenchan <1412950785@qq.com> Date: Wed Dec 26 09:58:35 2018 +0800 fix typo: segfauls -> segfault commit a9359f8 Author: xjp Date: Tue Dec 18 17:31:44 2018 +0800 Fixed REDISMODULE_H spell bug commit a12c3e4 Author: jdiaz Date: Sat Dec 15 23:39:52 2018 -0600 Fixes hyperloglog hash function comment block description commit 770eb11 Author: 林上耀 <1210tom@163.com> Date: Sun Nov 25 17:16:10 2018 +0800 fix typo commit fd97fbb Author: Chris Lamb Date: Fri Nov 23 17:14:01 2018 +0100 Correct "unsupported" typo. commit a85522d Author: Jungnam Lee Date: Thu Nov 8 23:01:29 2018 +0900 fix typo in test comments commit ade8007 Author: Arun Kumar Date: Tue Oct 23 16:56:35 2018 +0530 Fixed grammatical typo Fixed typo for word 'dictionary' commit 869ee39 Author: Hamid Alaei Date: Sun Aug 12 16:40:02 2018 +0430 fix documentations: (ThreadSafeContextStart/Stop -> ThreadSafeContextLock/Unlock), minor typo commit f89d158 Author: Mayank Jain Date: Tue Jul 31 23:01:21 2018 +0530 Updated README.md with some spelling corrections. Made correction in spelling of some misspelled words. commit 892198e Author: dsomeshwar Date: Sat Jul 21 23:23:04 2018 +0530 typo fix commit 8a4d780 Author: Itamar Haber Date: Mon Apr 30 02:06:52 2018 +0300 Fixes some typos commit e3acef6 Author: Noah Rosamilia Date: Sat Mar 3 23:41:21 2018 -0500 Fix typo in /deps/README.md commit 04442fb Author: WuYunlong Date: Sat Mar 3 10:32:42 2018 +0800 Fix typo in readSyncBulkPayload() comment. commit 9f36880 Author: WuYunlong Date: Sat Mar 3 10:20:37 2018 +0800 replication.c comment: run_id -> replid. commit f866b4a Author: Francesco 'makevoid' Canessa Date: Thu Feb 22 22:01:56 2018 +0000 fix comment typo in server.c commit 0ebc69b Author: 줍 Date: Mon Feb 12 16:38:48 2018 +0900 Fix typo in redis.conf Fix `five behaviors` to `eight behaviors` in [this sentence ](antirez/redis@unstable/redis.conf#L564) commit b50a620 Author: martinbroadhurst Date: Thu Dec 28 12:07:30 2017 +0000 Fix typo in valgrind.sup commit 7d8f349 Author: Peter Boughton Date: Mon Nov 27 19:52:19 2017 +0000 Update CONTRIBUTING; refer doc updates to redis-doc repo. commit 02dec7e Author: Klauswk Date: Tue Oct 24 16:18:38 2017 -0200 Fix typo in comment commit e1efbc8 Author: chenshi Date: Tue Oct 3 18:26:30 2017 +0800 Correct two spelling errors of comments commit 93327d8 Author: spacewander Date: Wed Sep 13 16:47:24 2017 +0800 Update the comment for OBJ_ENCODING_EMBSTR_SIZE_LIMIT's value The value of OBJ_ENCODING_EMBSTR_SIZE_LIMIT is 44 now instead of 39. commit 63d361f Author: spacewander Date: Tue Sep 12 15:06:42 2017 +0800 Fix related doc in ziplist.c According to the definition of ZIP_BIG_PREVLEN and other related code, the guard of single byte should be 254 instead of 255. commit ebe228d Author: hanael80 Date: Tue Aug 15 09:09:40 2017 +0900 Fix typo commit 6b696e6 Author: Matt Robenolt Date: Mon Aug 14 14:50:47 2017 -0700 Fix typo in LATENCY DOCTOR output commit a2ec6ae Author: caosiyang Date: Tue Aug 15 14:15:16 2017 +0800 Fix a typo: form => from commit 3ab7699 Author: caosiyang Date: Thu Aug 10 18:40:33 2017 +0800 Fix a typo: replicationFeedSlavesFromMaster() => replicationFeedSlavesFromMasterStream() commit 72d43ef Author: caosiyang Date: Tue Aug 8 15:57:25 2017 +0800 fix a typo: servewr => server commit 707c958 Author: Bo Cai Date: Wed Jul 26 21:49:42 2017 +0800 redis-cli.c typo: conut -> count. Signed-off-by: Bo Cai commit b9385b2 Author: JackDrogon Date: Fri Jun 30 14:22:31 2017 +0800 Fix some spell problems commit 20d9230 Author: akosel Date: Sun Jun 4 19:35:13 2017 -0500 Fix typo commit b167bfc Author: Krzysiek Witkowicz Date: Mon May 22 21:32:27 2017 +0100 Fix #4008 small typo in comment commit 2b78ac8 Author: Jake Clarkson Date: Wed Apr 26 15:49:50 2017 +0100 Correct typo in tests/unit/hyperloglog.tcl commit b0f1cdb Author: Qi Luo Date: Wed Apr 19 14:25:18 2017 -0700 Fix typo commit a90b0f9 Author: charsyam Date: Thu Mar 16 18:19:53 2017 +0900 fix typos fix typos fix typos commit 8430a79 Author: Richard Hart Date: Mon Mar 13 22:17:41 2017 -0400 Fixed log message typo in listenToPort. commit 481a1c2 Author: Vinod Kumar Date: Sun Jan 15 23:04:51 2017 +0530 src/db.c: Correct "save" -> "safe" typo commit 586b4d3 Author: wangshaonan Date: Wed Dec 21 20:28:27 2016 +0800 Fix typo they->the in helloworld.c commit c1c4b5e Author: Jenner Date: Mon Dec 19 16:39:46 2016 +0800 typo error commit 1ee1a3f Author: tielei <43289893@qq.com> Date: Mon Jul 18 13:52:25 2016 +0800 fix some comments commit 11a41fb Author: Otto Kekäläinen Date: Sun Jul 3 10:23:55 2016 +0100 Fix spelling in documentation and comments commit 5fb5d82 Author: francischan Date: Tue Jun 28 00:19:33 2016 +0800 Fix outdated comments about redis.c file. It should now refer to server.c file. commit 6b254bc Author: lmatt-bit Date: Thu Apr 21 21:45:58 2016 +0800 Refine the comment of dictRehashMilliseconds func SLAVECONF->REPLCONF in comment - by andyli029 commit ee9869f Author: clark.kang Date: Tue Mar 22 11:09:51 2016 +0900 fix typos commit f7b3b11 Author: Harisankar H Date: Wed Mar 9 11:49:42 2016 +0530 Typo correction: "faield" --> "failed" Typo correction: "faield" --> "failed" commit 3fd40fc Author: Itamar Haber Date: Thu Feb 25 10:31:51 2016 +0200 Fixes a typo in comments commit 621c160 Author: Prayag Verma Date: Mon Feb 1 12:36:20 2016 +0530 Fix typo in Readme.md Spelling mistakes - `eviciton` > `eviction` `familar` > `familiar` commit d7d07d6 Author: WonCheol Lee Date: Wed Dec 30 15:11:34 2015 +0900 Typo fixed commit a4dade7 Author: Felix Bünemann Date: Mon Dec 28 11:02:55 2015 +0100 [ci skip] Improve supervised upstart config docs This mentions that "expect stop" is required for supervised upstart to work correctly. See http://upstart.ubuntu.com/cookbook/#expect-stop for an explanation. commit d9caba9 Author: daurnimator Date: Mon Dec 21 18:30:03 2015 +1100 README: Remove trailing whitespace commit 72d42e5 Author: daurnimator Date: Mon Dec 21 18:29:32 2015 +1100 README: Fix typo. th => the commit dd6e957 Author: daurnimator Date: Mon Dec 21 18:29:20 2015 +1100 README: Fix typo. familar => familiar commit 3a12b23 Author: daurnimator Date: Mon Dec 21 18:28:54 2015 +1100 README: Fix typo. eviciton => eviction commit 2d1d03b Author: daurnimator Date: Mon Dec 21 18:21:45 2015 +1100 README: Fix typo. sever => server commit 3973b06 Author: Itamar Haber Date: Sat Dec 19 17:01:20 2015 +0200 Typo fix commit 4f2e460 Author: Steve Gao Date: Fri Dec 4 10:22:05 2015 +0800 Update README - fix typos commit b21667c Author: binyan Date: Wed Dec 2 22:48:37 2015 +0800 delete redundancy color judge in sdscatcolor commit 88894c7 Author: binyan Date: Wed Dec 2 22:14:42 2015 +0800 the example output shoule be HelloWorld commit 2763470 Author: binyan Date: Wed Dec 2 17:41:39 2015 +0800 modify error word keyevente Signed-off-by: binyan commit 0847b3d Author: Bruno Martins Date: Wed Nov 4 11:37:01 2015 +0000 typo commit bbb9e9e Author: dawedawe Date: Fri Mar 27 00:46:41 2015 +0100 typo: zimap -> zipmap commit 5ed297e Author: Axel Advento Date: Tue Mar 3 15:58:29 2015 +0800 Fix 'salve' typos to 'slave' commit edec9d6 Author: LudwikJaniuk Date: Wed Jun 12 14:12:47 2019 +0200 Update README.md Co-Authored-By: Qix commit 692a7af Author: LudwikJaniuk Date: Tue May 28 14:32:04 2019 +0200 grammar commit d962b0a Author: Nick Frost Date: Wed Jul 20 15:17:12 2016 -0700 Minor grammar fix commit 24fff01aaccaf5956973ada8c50ceb1462e211c6 (typos) Author: Chad Miller Date: Tue Sep 8 13:46:11 2020 -0400 Fix faulty comment about operation of unlink() commit 3cd5c1f3326c52aa552ada7ec797c6bb16452355 Author: Kevin Date: Wed Nov 20 00:13:50 2019 +0800 Fix typo in server.c. From a83af59 Mon Sep 17 00:00:00 2001 From: wuwo Date: Fri, 17 Mar 2017 20:37:45 +0800 Subject: [PATCH] falure to failure From c961896 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B7=A6=E6=87=B6?= Date: Sat, 27 May 2017 15:33:04 +0800 Subject: [PATCH] fix typo From e600ef2 Mon Sep 17 00:00:00 2001 From: "rui.zou" Date: Sat, 30 Sep 2017 12:38:15 +0800 Subject: [PATCH] fix a typo From c7d07fa Mon Sep 17 00:00:00 2001 From: Alexandre Perrin Date: Thu, 16 Aug 2018 10:35:31 +0200 Subject: [PATCH] deps README.md typo From b25cb67 Mon Sep 17 00:00:00 2001 From: Guy Korland Date: Wed, 26 Sep 2018 10:55:37 +0300 Subject: [PATCH 1/2] fix typos in header From ad28ca6 Mon Sep 17 00:00:00 2001 From: Guy Korland Date: Wed, 26 Sep 2018 11:02:36 +0300 Subject: [PATCH 2/2] fix typos commit 34924cdedd8552466fc22c1168d49236cb7ee915 Author: Adrian Lynch Date: Sat Apr 4 21:59:15 2015 +0100 Typos fixed commit fd2a1e7 Author: Jan Date: Sat Oct 27 19:13:01 2018 +0200 Fix typos Fix typos commit e14e47c1a234b53b0e103c5f6a1c61481cbcbb02 Author: Andy Lester Date: Fri Aug 2 22:30:07 2019 -0500 Fix multiple misspellings of "following" commit 79b948ce2dac6b453fe80995abbcaac04c213d5a Author: Andy Lester Date: Fri Aug 2 22:24:28 2019 -0500 Fix misspelling of create-cluster commit 1fffde52666dc99ab35efbd31071a4c008cb5a71 Author: Andy Lester Date: Wed Jul 31 17:57:56 2019 -0500 Fix typos commit 204c9ba9651e9e05fd73936b452b9a30be456cfe Author: Xiaobo Zhu Date: Tue Aug 13 22:19:25 2019 +0800 fix typos Squashed commit of the following: commit 1d9aaf8 Author: danmedani Date: Sun Aug 2 11:40:26 2015 -0700 README typo fix. Squashed commit of the following: commit 32bfa7c Author: Erik Dubbelboer Date: Mon Jul 6 21:15:08 2015 +0200 Fixed grammer Squashed commit of the following: commit b24f69c Author: Sisir Koppaka Date: Mon Mar 2 22:38:45 2015 -0500 utils/hashtable/rehashing.c: Fix typos Squashed commit of the following: commit 4e04082 Author: Erik Dubbelboer Date: Mon Mar 23 08:22:21 2015 +0000 Small config file documentation improvements Squashed commit of the following: commit acb8773 Author: ctd1500 Date: Fri May 8 01:52:48 2015 -0700 Typo and grammar fixes in readme commit 2eb75b6 Author: ctd1500 Date: Fri May 8 01:36:18 2015 -0700 fixed redis.conf comment Squashed commit of the following: commit a8249a2 Author: Masahiko Sawada Date: Fri Dec 11 11:39:52 2015 +0530 Revise correction of typos. Squashed commit of the following: commit 3c02028 Author: zhaojun11 Date: Wed Jan 17 19:05:28 2018 +0800 Fix typos include two code typos in cluster.c and latency.c Squashed commit of the following: commit 9dba47c Author: q191201771 <191201771@qq.com> Date: Sat Jan 4 11:31:04 2020 +0800 fix function listCreate comment in adlist.c Update src/server.c commit 2c7c2cb536e78dd211b1ac6f7bda00f0f54faaeb Author: charpty Date: Tue May 1 23:16:59 2018 +0800 server.c typo: modules system dictionary type comment Signed-off-by: charpty commit a8395323fb63cb59cb3591cb0f0c8edb7c29a680 Author: Itamar Haber Date: Sun May 6 00:25:18 2018 +0300 Updates test_helper.tcl's help with undocumented options Specifically: * Host * Port * Client commit bde6f9ced15755cd6407b4af7d601b030f36d60b Author: wxisme <850885154@qq.com> Date: Wed Aug 8 15:19:19 2018 +0800 fix comments in deps files commit 3172474ba991532ab799ee1873439f3402412331 Author: wxisme <850885154@qq.com> Date: Wed Aug 8 14:33:49 2018 +0800 fix some comments commit 01b6f2b6858b5cf2ce4ad5092d2c746e755f53f0 Author: Thor Juhasz Date: Sun Nov 18 14:37:41 2018 +0100 Minor fixes to comments Found some parts a little unclear on a first read, which prompted me to have a better look at the file and fix some minor things I noticed. Fixing minor typos and grammar. There are no changes to configuration options. These changes are only meant to help the user better understand the explanations to the various configuration options (cherry picked from commit 285ef446b05e09013556e7a490677494a9b4bb3e) --- CONTRIBUTING | 4 + README.md | 66 +++++++------- deps/README.md | 8 +- deps/linenoise/linenoise.c | 4 +- redis.conf | 124 +++++++++++++------------- sentinel.conf | 2 +- src/acl.c | 2 +- src/adlist.c | 9 +- src/ae.c | 4 +- src/ae_evport.c | 2 +- src/aof.c | 4 +- src/atomicvar.h | 2 +- src/bitops.c | 6 +- src/blocked.c | 4 +- src/cluster.c | 66 +++++++------- src/cluster.h | 8 +- src/config.c | 6 +- src/config.h | 2 +- src/connection.h | 4 +- src/db.c | 8 +- src/debug.c | 6 +- src/defrag.c | 30 +++---- src/dict.c | 8 +- src/endianconv.c | 2 +- src/evict.c | 8 +- src/expire.c | 8 +- src/geo.c | 6 +- src/geohash_helper.c | 4 +- src/hyperloglog.c | 16 ++-- src/latency.c | 12 +-- src/lazyfree.c | 6 +- src/listpack.c | 2 +- src/lolwut.c | 4 +- src/lolwut5.c | 2 +- src/lzfP.h | 2 +- src/module.c | 93 ++++++++++--------- src/modules/hellodict.c | 2 +- src/modules/helloworld.c | 4 +- src/multi.c | 2 +- src/networking.c | 12 +-- src/notify.c | 2 +- src/object.c | 4 +- src/quicklist.h | 4 +- src/rax.c | 16 ++-- src/rax.h | 4 +- src/rdb.c | 4 +- src/redis-check-rdb.c | 2 +- src/redis-cli.c | 12 +-- src/redismodule.h | 2 +- src/replication.c | 40 ++++----- src/scripting.c | 26 +++--- src/sds.c | 6 +- src/sentinel.c | 26 +++--- src/server.c | 35 ++++---- src/server.h | 12 +-- src/siphash.c | 6 +- src/slowlog.c | 2 +- src/sort.c | 2 +- src/sparkline.c | 2 +- src/stream.h | 2 +- src/t_hash.c | 2 +- src/t_list.c | 6 +- src/t_set.c | 6 +- src/t_stream.c | 6 +- src/t_string.c | 2 +- src/t_zset.c | 2 +- src/tracking.c | 2 +- src/valgrind.sup | 6 +- src/ziplist.c | 16 ++-- src/zipmap.c | 2 +- tests/cluster/tests/04-resharding.tcl | 6 +- tests/instances.tcl | 2 +- tests/integration/replication-4.tcl | 2 +- tests/support/test.tcl | 2 +- tests/test_helper.tcl | 7 +- tests/unit/expire.tcl | 2 +- tests/unit/hyperloglog.tcl | 2 +- tests/unit/scripting.tcl | 2 +- utils/create-cluster/README | 2 +- utils/hashtable/README | 2 +- 80 files changed, 436 insertions(+), 416 deletions(-) diff --git a/CONTRIBUTING b/CONTRIBUTING index 000edbeaf..82064afa3 100644 --- a/CONTRIBUTING +++ b/CONTRIBUTING @@ -20,6 +20,10 @@ each source file that you contribute. http://stackoverflow.com/questions/tagged/redis + Issues and pull requests for documentation belong on the redis-doc repo: + + https://github.com/redis/redis-doc + # How to provide a patch for a new feature 1. If it is a major feature or a semantical change, please don't start coding diff --git a/README.md b/README.md index a90b95cc1..80c2c9178 100644 --- a/README.md +++ b/README.md @@ -3,22 +3,22 @@ This README is just a fast *quick start* document. You can find more detailed do What is Redis? -------------- -Redis is often referred as a *data structures* server. What this means is that Redis provides access to mutable data structures via a set of commands, which are sent using a *server-client* model with TCP sockets and a simple protocol. So different processes can query and modify the same data structures in a shared way. +Redis is often referred to as a *data structures* server. What this means is that Redis provides access to mutable data structures via a set of commands, which are sent using a *server-client* model with TCP sockets and a simple protocol. So different processes can query and modify the same data structures in a shared way. Data structures implemented into Redis have a few special properties: -* Redis cares to store them on disk, even if they are always served and modified into the server memory. This means that Redis is fast, but that is also non-volatile. -* Implementation of data structures stress on memory efficiency, so data structures inside Redis will likely use less memory compared to the same data structure modeled using an high level programming language. -* Redis offers a number of features that are natural to find in a database, like replication, tunable levels of durability, cluster, high availability. +* Redis cares to store them on disk, even if they are always served and modified into the server memory. This means that Redis is fast, but that it is also non-volatile. +* The implementation of data structures emphasizes memory efficiency, so data structures inside Redis will likely use less memory compared to the same data structure modelled using a high-level programming language. +* Redis offers a number of features that are natural to find in a database, like replication, tunable levels of durability, clustering, and high availability. -Another good example is to think of Redis as a more complex version of memcached, where the operations are not just SETs and GETs, but operations to work with complex data types like Lists, Sets, ordered data structures, and so forth. +Another good example is to think of Redis as a more complex version of memcached, where the operations are not just SETs and GETs, but operations that work with complex data types like Lists, Sets, ordered data structures, and so forth. If you want to know more, this is a list of selected starting points: * Introduction to Redis data types. http://redis.io/topics/data-types-intro * Try Redis directly inside your browser. http://try.redis.io * The full list of Redis commands. http://redis.io/commands -* There is much more inside the Redis official documentation. http://redis.io/documentation +* There is much more inside the official Redis documentation. http://redis.io/documentation Building Redis -------------- @@ -29,7 +29,7 @@ and 64 bit systems. It may compile on Solaris derived systems (for instance SmartOS) but our support for this platform is *best effort* and Redis is not guaranteed to -work as well as in Linux, OSX, and \*BSD there. +work as well as in Linux, OSX, and \*BSD. It is as simple as: @@ -63,7 +63,7 @@ installed): Fixing build problems with dependencies or cached build options --------- -Redis has some dependencies which are included into the `deps` directory. +Redis has some dependencies which are included in the `deps` directory. `make` does not automatically rebuild dependencies even if something in the source code of dependencies changes. @@ -90,7 +90,7 @@ with a 64 bit target, or the other way around, you need to perform a In case of build errors when trying to build a 32 bit binary of Redis, try the following steps: -* Install the packages libc6-dev-i386 (also try g++-multilib). +* Install the package libc6-dev-i386 (also try g++-multilib). * Try using the following command line instead of `make 32bit`: `make CFLAGS="-m32 -march=native" LDFLAGS="-m32"` @@ -114,15 +114,15 @@ To compile against jemalloc on Mac OS X systems, use: Verbose build ------------- -Redis will build with a user friendly colorized output by default. -If you want to see a more verbose output use the following: +Redis will build with a user-friendly colorized output by default. +If you want to see a more verbose output, use the following: % make V=1 Running Redis ------------- -To run Redis with the default configuration just type: +To run Redis with the default configuration, just type: % cd src % ./redis-server @@ -173,7 +173,7 @@ You can find the list of all the available commands at http://redis.io/commands. Installing Redis ----------------- -In order to install Redis binaries into /usr/local/bin just use: +In order to install Redis binaries into /usr/local/bin, just use: % make install @@ -182,8 +182,8 @@ different destination. Make install will just install binaries in your system, but will not configure init scripts and configuration files in the appropriate place. This is not -needed if you want just to play a bit with Redis, but if you are installing -it the proper way for a production system, we have a script doing this +needed if you just want to play a bit with Redis, but if you are installing +it the proper way for a production system, we have a script that does this for Ubuntu and Debian systems: % cd utils @@ -201,7 +201,7 @@ You'll be able to stop and start Redis using the script named Code contributions ----------------- -Note: by contributing code to the Redis project in any form, including sending +Note: By contributing code to the Redis project in any form, including sending a pull request via Github, a code fragment or patch via private email or public discussion groups, you agree to release your code under the terms of the BSD license that you can find in the [COPYING][1] file included in the Redis @@ -251,7 +251,7 @@ of complexity incrementally. Note: lately Redis was refactored quite a bit. Function names and file names have been changed, so you may find that this documentation reflects the -`unstable` branch more closely. For instance in Redis 3.0 the `server.c` +`unstable` branch more closely. For instance, in Redis 3.0 the `server.c` and `server.h` files were named `redis.c` and `redis.h`. However the overall structure is the same. Keep in mind that all the new developments and pull requests should be performed against the `unstable` branch. @@ -296,7 +296,7 @@ The client structure defines a *connected client*: * The `fd` field is the client socket file descriptor. * `argc` and `argv` are populated with the command the client is executing, so that functions implementing a given Redis command can read the arguments. * `querybuf` accumulates the requests from the client, which are parsed by the Redis server according to the Redis protocol and executed by calling the implementations of the commands the client is executing. -* `reply` and `buf` are dynamic and static buffers that accumulate the replies the server sends to the client. These buffers are incrementally written to the socket as soon as the file descriptor is writable. +* `reply` and `buf` are dynamic and static buffers that accumulate the replies the server sends to the client. These buffers are incrementally written to the socket as soon as the file descriptor is writeable. As you can see in the client structure above, arguments in a command are described as `robj` structures. The following is the full `robj` @@ -329,13 +329,13 @@ This is the entry point of the Redis server, where the `main()` function is defined. The following are the most important steps in order to startup the Redis server. -* `initServerConfig()` setups the default values of the `server` structure. +* `initServerConfig()` sets up the default values of the `server` structure. * `initServer()` allocates the data structures needed to operate, setup the listening socket, and so forth. * `aeMain()` starts the event loop which listens for new connections. There are two special functions called periodically by the event loop: -1. `serverCron()` is called periodically (according to `server.hz` frequency), and performs tasks that must be performed from time to time, like checking for timedout clients. +1. `serverCron()` is called periodically (according to `server.hz` frequency), and performs tasks that must be performed from time to time, like checking for timed out clients. 2. `beforeSleep()` is called every time the event loop fired, Redis served a few requests, and is returning back into the event loop. Inside server.c you can find code that handles other vital things of the Redis server: @@ -352,16 +352,16 @@ This file defines all the I/O functions with clients, masters and replicas (which in Redis are just special clients): * `createClient()` allocates and initializes a new client. -* the `addReply*()` family of functions are used by commands implementations in order to append data to the client structure, that will be transmitted to the client as a reply for a given command executed. +* the `addReply*()` family of functions are used by command implementations in order to append data to the client structure, that will be transmitted to the client as a reply for a given command executed. * `writeToClient()` transmits the data pending in the output buffers to the client and is called by the *writable event handler* `sendReplyToClient()`. -* `readQueryFromClient()` is the *readable event handler* and accumulates data from read from the client into the query buffer. +* `readQueryFromClient()` is the *readable event handler* and accumulates data read from the client into the query buffer. * `processInputBuffer()` is the entry point in order to parse the client query buffer according to the Redis protocol. Once commands are ready to be processed, it calls `processCommand()` which is defined inside `server.c` in order to actually execute the command. * `freeClient()` deallocates, disconnects and removes a client. aof.c and rdb.c --- -As you can guess from the names these files implement the RDB and AOF +As you can guess from the names, these files implement the RDB and AOF persistence for Redis. Redis uses a persistence model based on the `fork()` system call in order to create a thread with the same (shared) memory content of the main Redis thread. This secondary thread dumps the content @@ -373,13 +373,13 @@ The implementation inside `aof.c` has additional functions in order to implement an API that allows commands to append new commands into the AOF file as clients execute them. -The `call()` function defined inside `server.c` is responsible to call +The `call()` function defined inside `server.c` is responsible for calling the functions that in turn will write the commands into the AOF. db.c --- -Certain Redis commands operate on specific data types, others are general. +Certain Redis commands operate on specific data types; others are general. Examples of generic commands are `DEL` and `EXPIRE`. They operate on keys and not on their values specifically. All those generic commands are defined inside `db.c`. @@ -387,7 +387,7 @@ defined inside `db.c`. Moreover `db.c` implements an API in order to perform certain operations on the Redis dataset without directly accessing the internal data structures. -The most important functions inside `db.c` which are used in many commands +The most important functions inside `db.c` which are used in many command implementations are the following: * `lookupKeyRead()` and `lookupKeyWrite()` are used in order to get a pointer to the value associated to a given key, or `NULL` if the key does not exist. @@ -405,7 +405,7 @@ The `robj` structure defining Redis objects was already described. Inside a basic level, like functions to allocate new objects, handle the reference counting and so forth. Notable functions inside this file: -* `incrRefcount()` and `decrRefCount()` are used in order to increment or decrement an object reference count. When it drops to 0 the object is finally freed. +* `incrRefCount()` and `decrRefCount()` are used in order to increment or decrement an object reference count. When it drops to 0 the object is finally freed. * `createObject()` allocates a new object. There are also specialized functions to allocate string objects having a specific content, like `createStringObjectFromLongLong()` and similar functions. This file also implements the `OBJECT` command. @@ -429,12 +429,12 @@ replicas, or to continue the replication after a disconnection. Other C files --- -* `t_hash.c`, `t_list.c`, `t_set.c`, `t_string.c`, `t_zset.c` and `t_stream.c` contains the implementation of the Redis data types. They implement both an API to access a given data type, and the client commands implementations for these data types. +* `t_hash.c`, `t_list.c`, `t_set.c`, `t_string.c`, `t_zset.c` and `t_stream.c` contains the implementation of the Redis data types. They implement both an API to access a given data type, and the client command implementations for these data types. * `ae.c` implements the Redis event loop, it's a self contained library which is simple to read and understand. * `sds.c` is the Redis string library, check http://github.com/antirez/sds for more information. * `anet.c` is a library to use POSIX networking in a simpler way compared to the raw interface exposed by the kernel. * `dict.c` is an implementation of a non-blocking hash table which rehashes incrementally. -* `scripting.c` implements Lua scripting. It is completely self contained from the rest of the Redis implementation and is simple enough to understand if you are familar with the Lua API. +* `scripting.c` implements Lua scripting. It is completely self-contained and isolated from the rest of the Redis implementation and is simple enough to understand if you are familiar with the Lua API. * `cluster.c` implements the Redis Cluster. Probably a good read only after being very familiar with the rest of the Redis code base. If you want to read `cluster.c` make sure to read the [Redis Cluster specification][3]. [3]: http://redis.io/topics/cluster-spec @@ -460,12 +460,12 @@ top comment inside `server.c`. After the command operates in some way, it returns a reply to the client, usually using `addReply()` or a similar function defined inside `networking.c`. -There are tons of commands implementations inside the Redis source code -that can serve as examples of actual commands implementations. To write -a few toy commands can be a good exercise to familiarize with the code base. +There are tons of command implementations inside the Redis source code +that can serve as examples of actual commands implementations. Writing +a few toy commands can be a good exercise to get familiar with the code base. There are also many other files not described here, but it is useless to -cover everything. We want to just help you with the first steps. +cover everything. We just want to help you with the first steps. Eventually you'll find your way inside the Redis code base :-) Enjoy! diff --git a/deps/README.md b/deps/README.md index f923c06ad..02c99052f 100644 --- a/deps/README.md +++ b/deps/README.md @@ -21,7 +21,7 @@ just following tose steps: 1. Remove the jemalloc directory. 2. Substitute it with the new jemalloc source tree. -3. Edit the Makefile localted in the same directory as the README you are +3. Edit the Makefile located in the same directory as the README you are reading, and change the --with-version in the Jemalloc configure script options with the version you are using. This is required because otherwise Jemalloc configuration script is broken and will not work nested in another @@ -33,7 +33,7 @@ If you want to upgrade Jemalloc while also providing support for active defragmentation, in addition to the above steps you need to perform the following additional steps: -5. In Jemalloc three, file `include/jemalloc/jemalloc_macros.h.in`, make sure +5. In Jemalloc tree, file `include/jemalloc/jemalloc_macros.h.in`, make sure to add `#define JEMALLOC_FRAG_HINT`. 6. Implement the function `je_get_defrag_hint()` inside `src/jemalloc.c`. You can see how it is implemented in the current Jemalloc source tree shipped @@ -49,7 +49,7 @@ Hiredis uses the SDS string library, that must be the same version used inside R 1. Check with diff if hiredis API changed and what impact it could have in Redis. 2. Make sure that the SDS library inside Hiredis and inside Redis are compatible. 3. After the upgrade, run the Redis Sentinel test. -4. Check manually that redis-cli and redis-benchmark behave as expecteed, since we have no tests for CLI utilities currently. +4. Check manually that redis-cli and redis-benchmark behave as expected, since we have no tests for CLI utilities currently. Linenoise --- @@ -77,6 +77,6 @@ and our version: 1. Makefile is modified to allow a different compiler than GCC. 2. We have the implementation source code, and directly link to the following external libraries: `lua_cjson.o`, `lua_struct.o`, `lua_cmsgpack.o` and `lua_bit.o`. -3. There is a security fix in `ldo.c`, line 498: The check for `LUA_SIGNATURE[0]` is removed in order toa void direct bytecode execution. +3. There is a security fix in `ldo.c`, line 498: The check for `LUA_SIGNATURE[0]` is removed in order to avoid direct bytecode execution. diff --git a/deps/linenoise/linenoise.c b/deps/linenoise/linenoise.c index cfe51e768..ccf5c5548 100644 --- a/deps/linenoise/linenoise.c +++ b/deps/linenoise/linenoise.c @@ -625,7 +625,7 @@ static void refreshMultiLine(struct linenoiseState *l) { rpos2 = (plen+l->pos+l->cols)/l->cols; /* current cursor relative row. */ lndebug("rpos2 %d", rpos2); - /* Go up till we reach the expected positon. */ + /* Go up till we reach the expected position. */ if (rows-rpos2 > 0) { lndebug("go-up %d", rows-rpos2); snprintf(seq,64,"\x1b[%dA", rows-rpos2); @@ -767,7 +767,7 @@ void linenoiseEditBackspace(struct linenoiseState *l) { } } -/* Delete the previosu word, maintaining the cursor at the start of the +/* Delete the previous word, maintaining the cursor at the start of the * current word. */ void linenoiseEditDeletePrevWord(struct linenoiseState *l) { size_t old_pos = l->pos; diff --git a/redis.conf b/redis.conf index f2e7f1964..38499b276 100644 --- a/redis.conf +++ b/redis.conf @@ -24,7 +24,7 @@ # to customize a few per-server settings. Include files can include # other files, so use this wisely. # -# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# Note that option "include" won't be rewritten by command "CONFIG REWRITE" # from admin or Redis Sentinel. Since Redis always uses the last processed # line as value of a configuration directive, you'd better put includes # at the beginning of this file to avoid overwriting config change at runtime. @@ -46,7 +46,7 @@ ################################## NETWORK ##################################### # By default, if no "bind" configuration directive is specified, Redis listens -# for connections from all the network interfaces available on the server. +# for connections from all available network interfaces on the host machine. # It is possible to listen to just one or multiple selected interfaces using # the "bind" configuration directive, followed by one or more IP addresses. # @@ -58,13 +58,12 @@ # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the # internet, binding to all the interfaces is dangerous and will expose the # instance to everybody on the internet. So by default we uncomment the -# following bind directive, that will force Redis to listen only into -# the IPv4 loopback interface address (this means Redis will be able to -# accept connections only from clients running into the same computer it -# is running). +# following bind directive, that will force Redis to listen only on the +# IPv4 loopback interface address (this means Redis will only be able to +# accept client connections from the same host that it is running on). # # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES -# JUST COMMENT THE FOLLOWING LINE. +# JUST COMMENT OUT THE FOLLOWING LINE. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ bind 127.0.0.1 @@ -93,8 +92,8 @@ port 6379 # TCP listen() backlog. # -# In high requests-per-second environments you need an high backlog in order -# to avoid slow clients connections issues. Note that the Linux kernel +# In high requests-per-second environments you need a high backlog in order +# to avoid slow clients connection issues. Note that the Linux kernel # will silently truncate it to the value of /proc/sys/net/core/somaxconn so # make sure to raise both the value of somaxconn and tcp_max_syn_backlog # in order to get the desired effect. @@ -118,8 +117,8 @@ timeout 0 # of communication. This is useful for two reasons: # # 1) Detect dead peers. -# 2) Take the connection alive from the point of view of network -# equipment in the middle. +# 2) Force network equipment in the middle to consider the connection to be +# alive. # # On Linux, the specified value (in seconds) is the period used to send ACKs. # Note that to close the connection the double of the time is needed. @@ -228,11 +227,12 @@ daemonize no # supervision tree. Options: # supervised no - no supervision interaction # supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# requires "expect stop" in your upstart job config # supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET # supervised auto - detect upstart or systemd method based on # UPSTART_JOB or NOTIFY_SOCKET environment variables # Note: these supervision methods only signal "process is ready." -# They do not enable continuous liveness pings back to your supervisor. +# They do not enable continuous pings back to your supervisor. supervised no # If a pid file is specified, Redis writes it where specified at startup @@ -291,7 +291,7 @@ always-show-logo yes # Will save the DB if both the given number of seconds and the given # number of write operations against the DB occurred. # -# In the example below the behaviour will be to save: +# In the example below the behavior will be to save: # after 900 sec (15 min) if at least 1 key changed # after 300 sec (5 min) if at least 10 keys changed # after 60 sec if at least 10000 keys changed @@ -324,7 +324,7 @@ save 60 10000 stop-writes-on-bgsave-error yes # Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. +# By default compression is enabled as it's almost always a win. # If you want to save some CPU in the saving child set it to 'no' but # the dataset will likely be bigger if you have compressible values or keys. rdbcompression yes @@ -412,11 +412,11 @@ dir ./ # still reply to client requests, possibly with out of date data, or the # data set may just be empty if this is the first synchronization. # -# 2) if replica-serve-stale-data is set to 'no' the replica will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, -# SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, -# COMMAND, POST, HOST: and LATENCY. +# 2) If replica-serve-stale-data is set to 'no' the replica will reply with +# an error "SYNC with master in progress" to all commands except: +# INFO, REPLICAOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE, +# UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST, +# HOST and LATENCY. # replica-serve-stale-data yes @@ -487,7 +487,7 @@ repl-diskless-sync-delay 5 # # Replica can load the RDB it reads from the replication link directly from the # socket, or store the RDB to a file and read that file after it was completely -# recived from the master. +# received from the master. # # In many cases the disk is slower than the network, and storing and loading # the RDB file may increase replication time (and even increase the master's @@ -517,7 +517,8 @@ repl-diskless-load disabled # # It is important to make sure that this value is greater than the value # specified for repl-ping-replica-period otherwise a timeout will be detected -# every time there is low traffic between the master and the replica. +# every time there is low traffic between the master and the replica. The default +# value is 60 seconds. # # repl-timeout 60 @@ -542,21 +543,21 @@ repl-disable-tcp-nodelay no # partial resync is enough, just passing the portion of data the replica # missed while disconnected. # -# The bigger the replication backlog, the longer the time the replica can be -# disconnected and later be able to perform a partial resynchronization. +# The bigger the replication backlog, the longer the replica can endure the +# disconnect and later be able to perform a partial resynchronization. # -# The backlog is only allocated once there is at least a replica connected. +# The backlog is only allocated if there is at least one replica connected. # # repl-backlog-size 1mb -# After a master has no longer connected replicas for some time, the backlog -# will be freed. The following option configures the amount of seconds that -# need to elapse, starting from the time the last replica disconnected, for -# the backlog buffer to be freed. +# After a master has no connected replicas for some time, the backlog will be +# freed. The following option configures the amount of seconds that need to +# elapse, starting from the time the last replica disconnected, for the backlog +# buffer to be freed. # # Note that replicas never free the backlog for timeout, since they may be # promoted to masters later, and should be able to correctly "partially -# resynchronize" with the replicas: hence they should always accumulate backlog. +# resynchronize" with other replicas: hence they should always accumulate backlog. # # A value of 0 means to never release the backlog. # @@ -606,8 +607,8 @@ replica-priority 100 # Another place where this info is available is in the output of the # "ROLE" command of a master. # -# The listed IP and address normally reported by a replica is obtained -# in the following way: +# The listed IP address and port normally reported by a replica is +# obtained in the following way: # # IP: The address is auto detected by checking the peer address # of the socket used by the replica to connect with the master. @@ -617,7 +618,7 @@ replica-priority 100 # listen for connections. # # However when port forwarding or Network Address Translation (NAT) is -# used, the replica may be actually reachable via different IP and port +# used, the replica may actually be reachable via different IP and port # pairs. The following two options can be used by a replica in order to # report to its master a specific set of IP and port, so that both INFO # and ROLE will report those values. @@ -634,7 +635,7 @@ replica-priority 100 # This is implemented using an invalidation table that remembers, using # 16 millions of slots, what clients may have certain subsets of keys. In turn # this is used in order to send invalidation messages to clients. Please -# to understand more about the feature check this page: +# check this page to understand more about the feature: # # https://redis.io/topics/client-side-caching # @@ -666,7 +667,7 @@ replica-priority 100 ################################## SECURITY ################################### -# Warning: since Redis is pretty fast an outside user can try up to +# Warning: since Redis is pretty fast, an outside user can try up to # 1 million passwords per second against a modern box. This means that you # should use very strong passwords, otherwise they will be very easy to break. # Note that because the password is really a shared secret between the client @@ -690,7 +691,7 @@ replica-priority 100 # AUTH (or the HELLO command AUTH option) in order to be authenticated and # start to work. # -# The ACL rules that describe what an user can do are the following: +# The ACL rules that describe what a user can do are the following: # # on Enable the user: it is possible to authenticate as this user. # off Disable the user: it's no longer possible to authenticate @@ -718,7 +719,7 @@ replica-priority 100 # It is possible to specify multiple patterns. # allkeys Alias for ~* # resetkeys Flush the list of allowed keys patterns. -# > Add this passowrd to the list of valid password for the user. +# > Add this password to the list of valid password for the user. # For example >mypass will add "mypass" to the list. # This directive clears the "nopass" flag (see later). # < Remove this password from the list of valid passwords. @@ -772,7 +773,7 @@ acllog-max-len 128 # # Instead of configuring users here in this file, it is possible to use # a stand-alone file just listing users. The two methods cannot be mixed: -# if you configure users here and at the same time you activate the exteranl +# if you configure users here and at the same time you activate the external # ACL file, the server will refuse to start. # # The format of the external ACL user file is exactly the same as the @@ -780,7 +781,7 @@ acllog-max-len 128 # # aclfile /etc/redis/users.acl -# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatiblity +# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility # layer on top of the new ACL system. The option effect will be just setting # the password for the default user. Clients will still authenticate using # AUTH as usually, or more explicitly with AUTH default @@ -891,8 +892,8 @@ acllog-max-len 128 # LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated # algorithms (in order to save memory), so you can tune it for speed or -# accuracy. For default Redis will check five keys and pick the one that was -# used less recently, you can change the sample size using the following +# accuracy. By default Redis will check five keys and pick the one that was +# used least recently, you can change the sample size using the following # configuration directive. # # The default of 5 produces good enough results. 10 Approximates very closely @@ -932,8 +933,8 @@ acllog-max-len 128 # it is possible to increase the expire "effort" that is normally set to # "1", to a greater value, up to the value "10". At its maximum value the # system will use more CPU, longer cycles (and technically may introduce -# more latency), and will tollerate less already expired keys still present -# in the system. It's a tradeoff betweeen memory, CPU and latecy. +# more latency), and will tolerate less already expired keys still present +# in the system. It's a tradeoff between memory, CPU and latency. # # active-expire-effort 1 @@ -1001,7 +1002,7 @@ lazyfree-lazy-user-del no # # Now it is also possible to handle Redis clients socket reads and writes # in different I/O threads. Since especially writing is so slow, normally -# Redis users use pipelining in order to speedup the Redis performances per +# Redis users use pipelining in order to speed up the Redis performances per # core, and spawn multiple instances in order to scale more. Using I/O # threads it is possible to easily speedup two times Redis without resorting # to pipelining nor sharding of the instance. @@ -1019,7 +1020,7 @@ lazyfree-lazy-user-del no # # io-threads 4 # -# Setting io-threads to 1 will just use the main thread as usually. +# Setting io-threads to 1 will just use the main thread as usual. # When I/O threads are enabled, we only use threads for writes, that is # to thread the write(2) syscall and transfer the client buffers to the # socket. However it is also possible to enable threading of reads and @@ -1036,7 +1037,7 @@ lazyfree-lazy-user-del no # # NOTE 2: If you want to test the Redis speedup using redis-benchmark, make # sure you also run the benchmark itself in threaded mode, using the -# --threads option to match the number of Redis theads, otherwise you'll not +# --threads option to match the number of Redis threads, otherwise you'll not # be able to notice the improvements. ############################ KERNEL OOM CONTROL ############################## @@ -1189,8 +1190,8 @@ aof-load-truncated yes # # [RDB file][AOF tail] # -# When loading Redis recognizes that the AOF file starts with the "REDIS" -# string and loads the prefixed RDB file, and continues loading the AOF +# When loading, Redis recognizes that the AOF file starts with the "REDIS" +# string and loads the prefixed RDB file, then continues loading the AOF # tail. aof-use-rdb-preamble yes @@ -1204,7 +1205,7 @@ aof-use-rdb-preamble yes # # When a long running script exceeds the maximum execution time only the # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second +# used to stop a script that did not yet call any write commands. The second # is the only way to shut down the server in the case a write command was # already issued by the script but the user doesn't want to wait for the natural # termination of the script. @@ -1230,7 +1231,7 @@ lua-time-limit 5000 # Cluster node timeout is the amount of milliseconds a node must be unreachable # for it to be considered in failure state. -# Most other internal time limits are multiple of the node timeout. +# Most other internal time limits are a multiple of the node timeout. # # cluster-node-timeout 15000 @@ -1257,18 +1258,18 @@ lua-time-limit 5000 # the failover if, since the last interaction with the master, the time # elapsed is greater than: # -# (node-timeout * replica-validity-factor) + repl-ping-replica-period +# (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period # -# So for example if node-timeout is 30 seconds, and the replica-validity-factor +# So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor # is 10, and assuming a default repl-ping-replica-period of 10 seconds, the # replica will not try to failover if it was not able to talk with the master # for longer than 310 seconds. # -# A large replica-validity-factor may allow replicas with too old data to failover +# A large cluster-replica-validity-factor may allow replicas with too old data to failover # a master, while a too small value may prevent the cluster from being able to # elect a replica at all. # -# For maximum availability, it is possible to set the replica-validity-factor +# For maximum availability, it is possible to set the cluster-replica-validity-factor # to a value of 0, which means, that replicas will always try to failover the # master regardless of the last time they interacted with the master. # (However they'll always try to apply a delay proportional to their @@ -1299,7 +1300,7 @@ lua-time-limit 5000 # cluster-migration-barrier 1 # By default Redis Cluster nodes stop accepting queries if they detect there -# is at least an hash slot uncovered (no available node is serving it). +# is at least a hash slot uncovered (no available node is serving it). # This way if the cluster is partially down (for example a range of hash slots # are no longer covered) all the cluster becomes, eventually, unavailable. # It automatically returns available as soon as all the slots are covered again. @@ -1354,7 +1355,7 @@ lua-time-limit 5000 # * cluster-announce-port # * cluster-announce-bus-port # -# Each instruct the node about its address, client port, and cluster message +# Each instructs the node about its address, client port, and cluster message # bus port. The information is then published in the header of the bus packets # so that other nodes will be able to correctly map the address of the node # publishing the information. @@ -1365,7 +1366,7 @@ lua-time-limit 5000 # Note that when remapped, the bus port may not be at the fixed offset of # clients port + 10000, so you can specify any port and bus-port depending # on how they get remapped. If the bus-port is not set, a fixed offset of -# 10000 will be used as usually. +# 10000 will be used as usual. # # Example: # @@ -1494,7 +1495,7 @@ notify-keyspace-events "" # two kind of inline requests that were anyway illegal: an empty request # or any request that starts with "/" (there are no Redis commands starting # with such a slash). Normal RESP2/RESP3 requests are completely out of the -# path of the Gopher protocol implementation and are served as usually as well. +# path of the Gopher protocol implementation and are served as usual as well. # # If you open a connection to Redis when Gopher is enabled and send it # a string like "/foo", if there is a key named "/foo" it is served via the @@ -1666,7 +1667,7 @@ client-output-buffer-limit pubsub 32mb 8mb 60 # client-query-buffer-limit 1gb # In the Redis protocol, bulk requests, that are, elements representing single -# strings, are normally limited ot 512 mb. However you can change this limit +# strings, are normally limited to 512 mb. However you can change this limit # here, but must be 1mb or greater # # proto-max-bulk-len 512mb @@ -1695,7 +1696,7 @@ hz 10 # # Since the default HZ value by default is conservatively set to 10, Redis # offers, and enables by default, the ability to use an adaptive HZ value -# which will temporary raise when there are many connected clients. +# which will temporarily raise when there are many connected clients. # # When dynamic HZ is enabled, the actual configured HZ will be used # as a baseline, but multiples of the configured HZ value will be actually @@ -1762,7 +1763,7 @@ rdb-save-incremental-fsync yes # for the key counter to be divided by two (or decremented if it has a value # less <= 10). # -# The default value for the lfu-decay-time is 1. A Special value of 0 means to +# The default value for the lfu-decay-time is 1. A special value of 0 means to # decay the counter every time it happens to be scanned. # # lfu-log-factor 10 @@ -1782,7 +1783,7 @@ rdb-save-incremental-fsync yes # restart is needed in order to lower the fragmentation, or at least to flush # away all the data and create it again. However thanks to this feature # implemented by Oran Agra for Redis 4.0 this process can happen at runtime -# in an "hot" way, while the server is running. +# in a "hot" way, while the server is running. # # Basically when the fragmentation is over a certain level (see the # configuration options below) Redis will start to create new copies of the @@ -1859,3 +1860,4 @@ jemalloc-bg-thread yes # # Set bgsave child process to cpu affinity 1,10,11 # bgsave_cpulist 1,10-11 + diff --git a/sentinel.conf b/sentinel.conf index 4ca5e5f8f..b6ff05f25 100644 --- a/sentinel.conf +++ b/sentinel.conf @@ -259,6 +259,6 @@ sentinel deny-scripts-reconfig yes # SENTINEL SET can also be used in order to perform this configuration at runtime. # # In order to set a command back to its original name (undo the renaming), it -# is possible to just rename a command to itsef: +# is possible to just rename a command to itself: # # SENTINEL rename-command mymaster CONFIG CONFIG diff --git a/src/acl.c b/src/acl.c index e0432ba5c..74768aa27 100644 --- a/src/acl.c +++ b/src/acl.c @@ -289,7 +289,7 @@ void ACLFreeUserAndKillClients(user *u) { while ((ln = listNext(&li)) != NULL) { client *c = listNodeValue(ln); if (c->user == u) { - /* We'll free the conenction asynchronously, so + /* We'll free the connection asynchronously, so * in theory to set a different user is not needed. * However if there are bugs in Redis, soon or later * this may result in some security hole: it's much diff --git a/src/adlist.c b/src/adlist.c index 0fedc0729..bc06ffc8f 100644 --- a/src/adlist.c +++ b/src/adlist.c @@ -34,8 +34,9 @@ #include "zmalloc.h" /* Create a new list. The created list can be freed with - * AlFreeList(), but private value of every node need to be freed - * by the user before to call AlFreeList(). + * listRelease(), but private value of every node need to be freed + * by the user before to call listRelease(), or by setting a free method using + * listSetFreeMethod. * * On error, NULL is returned. Otherwise the pointer to the new list. */ list *listCreate(void) @@ -217,8 +218,8 @@ void listRewindTail(list *list, listIter *li) { * listDelNode(), but not to remove other elements. * * The function returns a pointer to the next element of the list, - * or NULL if there are no more elements, so the classical usage patter - * is: + * or NULL if there are no more elements, so the classical usage + * pattern is: * * iter = listGetIterator(list,); * while ((node = listNext(iter)) != NULL) { diff --git a/src/ae.c b/src/ae.c index 689a27d16..c51666562 100644 --- a/src/ae.c +++ b/src/ae.c @@ -457,7 +457,7 @@ int aeProcessEvents(aeEventLoop *eventLoop, int flags) int fired = 0; /* Number of events fired for current fd. */ /* Normally we execute the readable event first, and the writable - * event laster. This is useful as sometimes we may be able + * event later. This is useful as sometimes we may be able * to serve the reply of a query immediately after processing the * query. * @@ -465,7 +465,7 @@ int aeProcessEvents(aeEventLoop *eventLoop, int flags) * asking us to do the reverse: never fire the writable event * after the readable. In such a case, we invert the calls. * This is useful when, for instance, we want to do things - * in the beforeSleep() hook, like fsynching a file to disk, + * in the beforeSleep() hook, like fsyncing a file to disk, * before replying to a client. */ int invert = fe->mask & AE_BARRIER; diff --git a/src/ae_evport.c b/src/ae_evport.c index 5c317becb..b79ed9bc7 100644 --- a/src/ae_evport.c +++ b/src/ae_evport.c @@ -232,7 +232,7 @@ static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) { /* * ENOMEM is a potentially transient condition, but the kernel won't * generally return it unless things are really bad. EAGAIN indicates - * we've reached an resource limit, for which it doesn't make sense to + * we've reached a resource limit, for which it doesn't make sense to * retry (counter-intuitively). All other errors indicate a bug. In any * of these cases, the best we can do is to abort. */ diff --git a/src/aof.c b/src/aof.c index cbc0989d0..dc50e2228 100644 --- a/src/aof.c +++ b/src/aof.c @@ -544,7 +544,7 @@ sds catAppendOnlyGenericCommand(sds dst, int argc, robj **argv) { return dst; } -/* Create the sds representation of an PEXPIREAT command, using +/* Create the sds representation of a PEXPIREAT command, using * 'seconds' as time to live and 'cmd' to understand what command * we are translating into a PEXPIREAT. * @@ -1818,7 +1818,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { "Background AOF rewrite terminated with error"); } else { /* SIGUSR1 is whitelisted, so we have a way to kill a child without - * tirggering an error condition. */ + * triggering an error condition. */ if (bysignal != SIGUSR1) server.aof_lastbgrewrite_status = C_ERR; diff --git a/src/atomicvar.h b/src/atomicvar.h index 160056cd7..ecd26ad70 100644 --- a/src/atomicvar.h +++ b/src/atomicvar.h @@ -21,7 +21,7 @@ * * Never use return value from the macros, instead use the AtomicGetIncr() * if you need to get the current value and increment it atomically, like - * in the followign example: + * in the following example: * * long oldvalue; * atomicGetIncr(myvar,oldvalue,1); diff --git a/src/bitops.c b/src/bitops.c index 4b1a09aa4..eb3a9bb1f 100644 --- a/src/bitops.c +++ b/src/bitops.c @@ -36,7 +36,7 @@ /* Count number of bits set in the binary array pointed by 's' and long * 'count' bytes. The implementation of this function is required to - * work with a input string length up to 512 MB. */ + * work with an input string length up to 512 MB. */ size_t redisPopcount(void *s, long count) { size_t bits = 0; unsigned char *p = s; @@ -107,7 +107,7 @@ long redisBitpos(void *s, unsigned long count, int bit) { int found; /* Process whole words first, seeking for first word that is not - * all ones or all zeros respectively if we are lookig for zeros + * all ones or all zeros respectively if we are looking for zeros * or ones. This is much faster with large strings having contiguous * blocks of 1 or 0 bits compared to the vanilla bit per bit processing. * @@ -496,7 +496,7 @@ robj *lookupStringForBitCommand(client *c, size_t maxbit) { * in 'len'. The user is required to pass (likely stack allocated) buffer * 'llbuf' of at least LONG_STR_SIZE bytes. Such a buffer is used in the case * the object is integer encoded in order to provide the representation - * without usign heap allocation. + * without using heap allocation. * * The function returns the pointer to the object array of bytes representing * the string it contains, that may be a pointer to 'llbuf' or to the diff --git a/src/blocked.c b/src/blocked.c index 92f1cee65..ea20f5923 100644 --- a/src/blocked.c +++ b/src/blocked.c @@ -53,7 +53,7 @@ * to 0, no timeout is processed). * It usually just needs to send a reply to the client. * - * When implementing a new type of blocking opeation, the implementation + * When implementing a new type of blocking operation, the implementation * should modify unblockClient() and replyToBlockedClientTimedOut() in order * to handle the btype-specific behavior of this two functions. * If the blocking operation waits for certain keys to change state, the @@ -118,7 +118,7 @@ void processUnblockedClients(void) { /* This function will schedule the client for reprocessing at a safe time. * - * This is useful when a client was blocked for some reason (blocking opeation, + * This is useful when a client was blocked for some reason (blocking operation, * CLIENT PAUSE, or whatever), because it may end with some accumulated query * buffer that needs to be processed ASAP: * diff --git a/src/cluster.c b/src/cluster.c index 8d8b61ab4..e8db4050d 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -377,7 +377,7 @@ void clusterSaveConfigOrDie(int do_fsync) { } } -/* Lock the cluster config using flock(), and leaks the file descritor used to +/* Lock the cluster config using flock(), and leaks the file descriptor used to * acquire the lock so that the file will be locked forever. * * This works because we always update nodes.conf with a new version @@ -544,13 +544,13 @@ void clusterInit(void) { /* Reset a node performing a soft or hard reset: * - * 1) All other nodes are forget. + * 1) All other nodes are forgotten. * 2) All the assigned / open slots are released. * 3) If the node is a slave, it turns into a master. - * 5) Only for hard reset: a new Node ID is generated. - * 6) Only for hard reset: currentEpoch and configEpoch are set to 0. - * 7) The new configuration is saved and the cluster state updated. - * 8) If the node was a slave, the whole data set is flushed away. */ + * 4) Only for hard reset: a new Node ID is generated. + * 5) Only for hard reset: currentEpoch and configEpoch are set to 0. + * 6) The new configuration is saved and the cluster state updated. + * 7) If the node was a slave, the whole data set is flushed away. */ void clusterReset(int hard) { dictIterator *di; dictEntry *de; @@ -646,7 +646,7 @@ static void clusterConnAcceptHandler(connection *conn) { /* Create a link object we use to handle the connection. * It gets passed to the readable handler when data is available. - * Initiallly the link->node pointer is set to NULL as we don't know + * Initially the link->node pointer is set to NULL as we don't know * which node is, but the right node is references once we know the * node identity. */ link = createClusterLink(NULL); @@ -1060,7 +1060,7 @@ uint64_t clusterGetMaxEpoch(void) { * 3) Persist the configuration on disk before sending packets with the * new configuration. * - * If the new config epoch is generated and assigend, C_OK is returned, + * If the new config epoch is generated and assigned, C_OK is returned, * otherwise C_ERR is returned (since the node has already the greatest * configuration around) and no operation is performed. * @@ -1133,7 +1133,7 @@ int clusterBumpConfigEpochWithoutConsensus(void) { * * In general we want a system that eventually always ends with different * masters having different configuration epochs whatever happened, since - * nothign is worse than a split-brain condition in a distributed system. + * nothing is worse than a split-brain condition in a distributed system. * * BEHAVIOR * @@ -1192,7 +1192,7 @@ void clusterHandleConfigEpochCollision(clusterNode *sender) { * entries from the black list. This is an O(N) operation but it is not a * problem since add / exists operations are called very infrequently and * the hash table is supposed to contain very little elements at max. - * However without the cleanup during long uptimes and with some automated + * However without the cleanup during long uptime and with some automated * node add/removal procedures, entries could accumulate. */ void clusterBlacklistCleanup(void) { dictIterator *di; @@ -1346,12 +1346,12 @@ int clusterHandshakeInProgress(char *ip, int port, int cport) { return de != NULL; } -/* Start an handshake with the specified address if there is not one +/* Start a handshake with the specified address if there is not one * already in progress. Returns non-zero if the handshake was actually * started. On error zero is returned and errno is set to one of the * following values: * - * EAGAIN - There is already an handshake in progress for this address. + * EAGAIN - There is already a handshake in progress for this address. * EINVAL - IP or port are not valid. */ int clusterStartHandshake(char *ip, int port, int cport) { clusterNode *n; @@ -1793,7 +1793,7 @@ int clusterProcessPacket(clusterLink *link) { if (sender) sender->data_received = now; if (sender && !nodeInHandshake(sender)) { - /* Update our curretEpoch if we see a newer epoch in the cluster. */ + /* Update our currentEpoch if we see a newer epoch in the cluster. */ senderCurrentEpoch = ntohu64(hdr->currentEpoch); senderConfigEpoch = ntohu64(hdr->configEpoch); if (senderCurrentEpoch > server.cluster->currentEpoch) @@ -2480,7 +2480,7 @@ void clusterSetGossipEntry(clusterMsg *hdr, int i, clusterNode *n) { } /* Send a PING or PONG packet to the specified node, making sure to add enough - * gossip informations. */ + * gossip information. */ void clusterSendPing(clusterLink *link, int type) { unsigned char *buf; clusterMsg *hdr; @@ -2500,7 +2500,7 @@ void clusterSendPing(clusterLink *link, int type) { * node_timeout we exchange with each other node at least 4 packets * (we ping in the worst case in node_timeout/2 time, and we also * receive two pings from the host), we have a total of 8 packets - * in the node_timeout*2 falure reports validity time. So we have + * in the node_timeout*2 failure reports validity time. So we have * that, for a single PFAIL node, we can expect to receive the following * number of failure reports (in the specified window of time): * @@ -2527,7 +2527,7 @@ void clusterSendPing(clusterLink *link, int type) { * faster to propagate to go from PFAIL to FAIL state. */ int pfail_wanted = server.cluster->stats_pfail_nodes; - /* Compute the maxium totlen to allocate our buffer. We'll fix the totlen + /* Compute the maximum totlen to allocate our buffer. We'll fix the totlen * later according to the number of gossip sections we really were able * to put inside the packet. */ totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); @@ -2564,7 +2564,7 @@ void clusterSendPing(clusterLink *link, int type) { if (this->flags & (CLUSTER_NODE_HANDSHAKE|CLUSTER_NODE_NOADDR) || (this->link == NULL && this->numslots == 0)) { - freshnodes--; /* Tecnically not correct, but saves CPU. */ + freshnodes--; /* Technically not correct, but saves CPU. */ continue; } @@ -3149,7 +3149,7 @@ void clusterHandleSlaveFailover(void) { } } - /* If the previous failover attempt timedout and the retry time has + /* If the previous failover attempt timeout and the retry time has * elapsed, we can setup a new one. */ if (auth_age > auth_retry_time) { server.cluster->failover_auth_time = mstime() + @@ -3255,7 +3255,7 @@ void clusterHandleSlaveFailover(void) { * * Slave migration is the process that allows a slave of a master that is * already covered by at least another slave, to "migrate" to a master that - * is orpaned, that is, left with no working slaves. + * is orphaned, that is, left with no working slaves. * ------------------------------------------------------------------------- */ /* This function is responsible to decide if this replica should be migrated @@ -3272,7 +3272,7 @@ void clusterHandleSlaveFailover(void) { * the nodes anyway, so we spend time into clusterHandleSlaveMigration() * if definitely needed. * - * The fuction is called with a pre-computed max_slaves, that is the max + * The function is called with a pre-computed max_slaves, that is the max * number of working (not in FAIL state) slaves for a single master. * * Additional conditions for migration are examined inside the function. @@ -3391,7 +3391,7 @@ void clusterHandleSlaveMigration(int max_slaves) { * data loss due to the asynchronous master-slave replication. * -------------------------------------------------------------------------- */ -/* Reset the manual failover state. This works for both masters and slavesa +/* Reset the manual failover state. This works for both masters and slaves * as all the state about manual failover is cleared. * * The function can be used both to initialize the manual failover state at @@ -3683,7 +3683,7 @@ void clusterCron(void) { replicationSetMaster(myself->slaveof->ip, myself->slaveof->port); } - /* Abourt a manual failover if the timeout is reached. */ + /* Abort a manual failover if the timeout is reached. */ manualFailoverCheckTimeout(); if (nodeIsSlave(myself)) { @@ -3788,12 +3788,12 @@ int clusterNodeSetSlotBit(clusterNode *n, int slot) { * target for replicas migration, if and only if at least one of * the other masters has slaves right now. * - * Normally masters are valid targerts of replica migration if: + * Normally masters are valid targets of replica migration if: * 1. The used to have slaves (but no longer have). * 2. They are slaves failing over a master that used to have slaves. * * However new masters with slots assigned are considered valid - * migration tagets if the rest of the cluster is not a slave-less. + * migration targets if the rest of the cluster is not a slave-less. * * See https://github.com/antirez/redis/issues/3043 for more info. */ if (n->numslots == 1 && clusterMastersHaveSlaves()) @@ -3977,7 +3977,7 @@ void clusterUpdateState(void) { * A) If no other node is in charge according to the current cluster * configuration, we add these slots to our node. * B) If according to our config other nodes are already in charge for - * this lots, we set the slots as IMPORTING from our point of view + * this slots, we set the slots as IMPORTING from our point of view * in order to justify we have those slots, and in order to make * redis-trib aware of the issue, so that it can try to fix it. * 2) If we find data in a DB different than DB0 we return C_ERR to @@ -4507,7 +4507,7 @@ NULL } /* If this slot is in migrating status but we have no keys * for it assigning the slot to another node will clear - * the migratig status. */ + * the migrating status. */ if (countKeysInSlot(slot) == 0 && server.cluster->migrating_slots_to[slot]) server.cluster->migrating_slots_to[slot] = NULL; @@ -4852,7 +4852,7 @@ NULL server.cluster->currentEpoch = epoch; /* No need to fsync the config here since in the unlucky event * of a failure to persist the config, the conflict resolution code - * will assign an unique config to this node. */ + * will assign a unique config to this node. */ clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE| CLUSTER_TODO_SAVE_CONFIG); addReply(c,shared.ok); @@ -4900,7 +4900,7 @@ void createDumpPayload(rio *payload, robj *o, robj *key) { unsigned char buf[2]; uint64_t crc; - /* Serialize the object in a RDB-like format. It consist of an object type + /* Serialize the object in an RDB-like format. It consist of an object type * byte followed by the serialized object. This is understood by RESTORE. */ rioInitWithBuffer(payload,sdsempty()); serverAssert(rdbSaveObjectType(payload,o)); @@ -5567,7 +5567,7 @@ void readwriteCommand(client *c) { * resharding in progress). * * On success the function returns the node that is able to serve the request. - * If the node is not 'myself' a redirection must be perfomed. The kind of + * If the node is not 'myself' a redirection must be performed. The kind of * redirection is specified setting the integer passed by reference * 'error_code', which will be set to CLUSTER_REDIR_ASK or * CLUSTER_REDIR_MOVED. @@ -5694,7 +5694,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in } } - /* Migarting / Improrting slot? Count keys we don't have. */ + /* Migrating / Importing slot? Count keys we don't have. */ if ((migrating_slot || importing_slot) && lookupKeyRead(&server.db[0],thiskey) == NULL) { @@ -5763,7 +5763,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in } /* Handle the read-only client case reading from a slave: if this - * node is a slave and the request is about an hash slot our master + * node is a slave and the request is about a hash slot our master * is serving, we can reply without redirection. */ int is_readonly_command = (c->cmd->flags & CMD_READONLY) || (c->cmd->proc == execCommand && !(c->mstate.cmd_inv_flags & CMD_READONLY)); @@ -5777,7 +5777,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in } /* Base case: just return the right node. However if this node is not - * myself, set error_code to MOVED since we need to issue a rediretion. */ + * myself, set error_code to MOVED since we need to issue a redirection. */ if (n != myself && error_code) *error_code = CLUSTER_REDIR_MOVED; return n; } @@ -5823,7 +5823,7 @@ void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_co * 3) The client may remain blocked forever (or up to the max timeout time) * waiting for a key change that will never happen. * - * If the client is found to be blocked into an hash slot this node no + * If the client is found to be blocked into a hash slot this node no * longer handles, the client is sent a redirection error, and the function * returns 1. Otherwise 0 is returned and no operation is performed. */ int clusterRedirectBlockedClientIfNeeded(client *c) { diff --git a/src/cluster.h b/src/cluster.h index 596a4629a..48a111764 100644 --- a/src/cluster.h +++ b/src/cluster.h @@ -51,8 +51,8 @@ typedef struct clusterLink { #define CLUSTER_NODE_HANDSHAKE 32 /* We have still to exchange the first ping */ #define CLUSTER_NODE_NOADDR 64 /* We don't know the address of this node */ #define CLUSTER_NODE_MEET 128 /* Send a MEET message to this node */ -#define CLUSTER_NODE_MIGRATE_TO 256 /* Master elegible for replica migration. */ -#define CLUSTER_NODE_NOFAILOVER 512 /* Slave will not try to failver. */ +#define CLUSTER_NODE_MIGRATE_TO 256 /* Master eligible for replica migration. */ +#define CLUSTER_NODE_NOFAILOVER 512 /* Slave will not try to failover. */ #define CLUSTER_NODE_NULL_NAME "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" #define nodeIsMaster(n) ((n)->flags & CLUSTER_NODE_MASTER) @@ -164,10 +164,10 @@ typedef struct clusterState { clusterNode *mf_slave; /* Slave performing the manual failover. */ /* Manual failover state of slave. */ long long mf_master_offset; /* Master offset the slave needs to start MF - or zero if stil not received. */ + or zero if still not received. */ int mf_can_start; /* If non-zero signal that the manual failover can start requesting masters vote. */ - /* The followign fields are used by masters to take state on elections. */ + /* The following fields are used by masters to take state on elections. */ uint64_t lastVoteEpoch; /* Epoch of the last vote granted. */ int todo_before_sleep; /* Things to do in clusterBeforeSleep(). */ /* Messages received and sent by type. */ diff --git a/src/config.c b/src/config.c index 2c69540db..63852ff4f 100644 --- a/src/config.c +++ b/src/config.c @@ -1279,7 +1279,7 @@ void rewriteConfigNumericalOption(struct rewriteConfigState *state, const char * rewriteConfigRewriteLine(state,option,line,force); } -/* Rewrite a octal option. */ +/* Rewrite an octal option. */ void rewriteConfigOctalOption(struct rewriteConfigState *state, char *option, int value, int defvalue) { int force = value != defvalue; sds line = sdscatprintf(sdsempty(),"%s %o",option,value); @@ -2097,7 +2097,7 @@ static int isValidAOFfilename(char *val, char **err) { static int updateHZ(long long val, long long prev, char **err) { UNUSED(prev); UNUSED(err); - /* Hz is more an hint from the user, so we accept values out of range + /* Hz is more a hint from the user, so we accept values out of range * but cap them to reasonable values. */ server.config_hz = val; if (server.config_hz < CONFIG_MIN_HZ) server.config_hz = CONFIG_MIN_HZ; @@ -2115,7 +2115,7 @@ static int updateJemallocBgThread(int val, int prev, char **err) { static int updateReplBacklogSize(long long val, long long prev, char **err) { /* resizeReplicationBacklog sets server.repl_backlog_size, and relies on - * being able to tell when the size changes, so restore prev becore calling it. */ + * being able to tell when the size changes, so restore prev before calling it. */ UNUSED(err); server.repl_backlog_size = prev; resizeReplicationBacklog(val); diff --git a/src/config.h b/src/config.h index e807b9330..f9ec7e44a 100644 --- a/src/config.h +++ b/src/config.h @@ -166,7 +166,7 @@ void setproctitle(const char *fmt, ...); #endif /* BYTE_ORDER */ /* Sometimes after including an OS-specific header that defines the - * endianess we end with __BYTE_ORDER but not with BYTE_ORDER that is what + * endianness we end with __BYTE_ORDER but not with BYTE_ORDER that is what * the Redis code uses. In this case let's define everything without the * underscores. */ #ifndef BYTE_ORDER diff --git a/src/connection.h b/src/connection.h index 85585a3d0..e00d2ea17 100644 --- a/src/connection.h +++ b/src/connection.h @@ -106,7 +106,7 @@ static inline int connAccept(connection *conn, ConnectionCallbackFunc accept_han } /* Establish a connection. The connect_handler will be called when the connection - * is established, or if an error has occured. + * is established, or if an error has occurred. * * The connection handler will be responsible to set up any read/write handlers * as needed. @@ -168,7 +168,7 @@ static inline int connSetReadHandler(connection *conn, ConnectionCallbackFunc fu /* Set a write handler, and possibly enable a write barrier, this flag is * cleared when write handler is changed or removed. - * With barroer enabled, we never fire the event if the read handler already + * With barrier enabled, we never fire the event if the read handler already * fired in the same event loop iteration. Useful when you want to persist * things to disk before sending replies, and want to do that in a group fashion. */ static inline int connSetWriteHandlerWithBarrier(connection *conn, ConnectionCallbackFunc func, int barrier) { diff --git a/src/db.c b/src/db.c index 19b2c48e4..9efda0907 100644 --- a/src/db.c +++ b/src/db.c @@ -116,7 +116,7 @@ robj *lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) { * However, if the command caller is not the master, and as additional * safety measure, the command invoked is a read-only command, we can * safely return NULL here, and provide a more consistent behavior - * to clients accessign expired values in a read-only fashion, that + * to clients accessing expired values in a read-only fashion, that * will say the key as non existing. * * Notably this covers GETs when slaves are used to scale reads. */ @@ -374,7 +374,7 @@ robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o) { * firing module events. * and the function to return ASAP. * - * On success the fuction returns the number of keys removed from the + * On success the function returns the number of keys removed from the * database(s). Otherwise -1 is returned in the specific case the * DB number is out of range, and errno is set to EINVAL. */ long long emptyDbGeneric(redisDb *dbarray, int dbnum, int flags, void(callback)(void*)) { @@ -866,7 +866,7 @@ void scanGenericCommand(client *c, robj *o, unsigned long cursor) { /* Filter element if it is an expired key. */ if (!filter && o == NULL && expireIfNeeded(c->db, kobj)) filter = 1; - /* Remove the element and its associted value if needed. */ + /* Remove the element and its associated value if needed. */ if (filter) { decrRefCount(kobj); listDelNode(keys, node); @@ -1367,7 +1367,7 @@ int *getKeysUsingCommandTable(struct redisCommand *cmd,robj **argv, int argc, in /* Return all the arguments that are keys in the command passed via argc / argv. * * The command returns the positions of all the key arguments inside the array, - * so the actual return value is an heap allocated array of integers. The + * so the actual return value is a heap allocated array of integers. The * length of the array is returned by reference into *numkeys. * * 'cmd' must be point to the corresponding entry into the redisCommand diff --git a/src/debug.c b/src/debug.c index 4831c4d74..921c681a5 100644 --- a/src/debug.c +++ b/src/debug.c @@ -387,7 +387,7 @@ void debugCommand(client *c) { "OOM -- Crash the server simulating an out-of-memory error.", "PANIC -- Crash the server simulating a panic.", "POPULATE [prefix] [size] -- Create string keys named key:. If a prefix is specified is used instead of the 'key' prefix.", -"RELOAD [MERGE] [NOFLUSH] [NOSAVE] -- Save the RDB on disk and reload it back in memory. By default it will save the RDB file and load it back. With the NOFLUSH option the current database is not removed before loading the new one, but conficts in keys will kill the server with an exception. When MERGE is used, conflicting keys will be loaded (the key in the loaded RDB file will win). When NOSAVE is used, the server will not save the current dataset in the RDB file before loading. Use DEBUG RELOAD NOSAVE when you want just to load the RDB file you placed in the Redis working directory in order to replace the current dataset in memory. Use DEBUG RELOAD NOSAVE NOFLUSH MERGE when you want to add what is in the current RDB file placed in the Redis current directory, with the current memory content. Use DEBUG RELOAD when you want to verify Redis is able to persist the current dataset in the RDB file, flush the memory content, and load it back.", +"RELOAD [MERGE] [NOFLUSH] [NOSAVE] -- Save the RDB on disk and reload it back in memory. By default it will save the RDB file and load it back. With the NOFLUSH option the current database is not removed before loading the new one, but conflicts in keys will kill the server with an exception. When MERGE is used, conflicting keys will be loaded (the key in the loaded RDB file will win). When NOSAVE is used, the server will not save the current dataset in the RDB file before loading. Use DEBUG RELOAD NOSAVE when you want just to load the RDB file you placed in the Redis working directory in order to replace the current dataset in memory. Use DEBUG RELOAD NOSAVE NOFLUSH MERGE when you want to add what is in the current RDB file placed in the Redis current directory, with the current memory content. Use DEBUG RELOAD when you want to verify Redis is able to persist the current dataset in the RDB file, flush the memory content, and load it back.", "RESTART -- Graceful restart: save config, db, restart.", "SDSLEN -- Show low level SDS string info representing key and value.", "SEGFAULT -- Crash the server with sigsegv.", @@ -456,7 +456,7 @@ NULL } } - /* The default beahvior is to save the RDB file before loading + /* The default behavior is to save the RDB file before loading * it back. */ if (save) { rdbSaveInfo rsi, *rsiptr; @@ -1449,7 +1449,7 @@ void logCurrentClient(void) { #define MEMTEST_MAX_REGIONS 128 -/* A non destructive memory test executed during segfauls. */ +/* A non destructive memory test executed during segfault. */ int memtest_test_linux_anonymous_maps(void) { FILE *fp; char line[1024]; diff --git a/src/defrag.c b/src/defrag.c index 07a16ca6c..04ade30ea 100644 --- a/src/defrag.c +++ b/src/defrag.c @@ -47,11 +47,11 @@ int je_get_defrag_hint(void* ptr); /* forward declarations*/ void defragDictBucketCallback(void *privdata, dictEntry **bucketref); -dictEntry* replaceSateliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sds newkey, uint64_t hash, long *defragged); +dictEntry* replaceSatelliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sds newkey, uint64_t hash, long *defragged); /* Defrag helper for generic allocations. * - * returns NULL in case the allocatoin wasn't moved. + * returns NULL in case the allocation wasn't moved. * when it returns a non-null value, the old pointer was already released * and should NOT be accessed. */ void* activeDefragAlloc(void *ptr) { @@ -74,7 +74,7 @@ void* activeDefragAlloc(void *ptr) { /*Defrag helper for sds strings * - * returns NULL in case the allocatoin wasn't moved. + * returns NULL in case the allocation wasn't moved. * when it returns a non-null value, the old pointer was already released * and should NOT be accessed. */ sds activeDefragSds(sds sdsptr) { @@ -90,7 +90,7 @@ sds activeDefragSds(sds sdsptr) { /* Defrag helper for robj and/or string objects * - * returns NULL in case the allocatoin wasn't moved. + * returns NULL in case the allocation wasn't moved. * when it returns a non-null value, the old pointer was already released * and should NOT be accessed. */ robj *activeDefragStringOb(robj* ob, long *defragged) { @@ -130,11 +130,11 @@ robj *activeDefragStringOb(robj* ob, long *defragged) { } /* Defrag helper for dictEntries to be used during dict iteration (called on - * each step). Teturns a stat of how many pointers were moved. */ + * each step). Returns a stat of how many pointers were moved. */ long dictIterDefragEntry(dictIterator *iter) { /* This function is a little bit dirty since it messes with the internals * of the dict and it's iterator, but the benefit is that it is very easy - * to use, and require no other chagnes in the dict. */ + * to use, and require no other changes in the dict. */ long defragged = 0; dictht *ht; /* Handle the next entry (if there is one), and update the pointer in the @@ -238,7 +238,7 @@ double *zslDefrag(zskiplist *zsl, double score, sds oldele, sds newele) { return NULL; } -/* Defrag helpler for sorted set. +/* Defrag helper for sorted set. * Defrag a single dict entry key name, and corresponding skiplist struct */ long activeDefragZsetEntry(zset *zs, dictEntry *de) { sds newsds; @@ -349,7 +349,7 @@ long activeDefragSdsListAndDict(list *l, dict *d, int dict_val_type) { if ((newsds = activeDefragSds(sdsele))) { /* When defragging an sds value, we need to update the dict key */ uint64_t hash = dictGetHash(d, newsds); - replaceSateliteDictKeyPtrAndOrDefragDictEntry(d, sdsele, newsds, hash, &defragged); + replaceSatelliteDictKeyPtrAndOrDefragDictEntry(d, sdsele, newsds, hash, &defragged); ln->value = newsds; defragged++; } @@ -385,7 +385,7 @@ long activeDefragSdsListAndDict(list *l, dict *d, int dict_val_type) { * moved. Return value is the the dictEntry if found, or NULL if not found. * NOTE: this is very ugly code, but it let's us avoid the complication of * doing a scan on another dict. */ -dictEntry* replaceSateliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sds newkey, uint64_t hash, long *defragged) { +dictEntry* replaceSatelliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sds newkey, uint64_t hash, long *defragged) { dictEntry **deref = dictFindEntryRefByPtrAndHash(d, oldkey, hash); if (deref) { dictEntry *de = *deref; @@ -433,7 +433,7 @@ long activeDefragQuickListNodes(quicklist *ql) { } /* when the value has lots of elements, we want to handle it later and not as - * oart of the main dictionary scan. this is needed in order to prevent latency + * part of the main dictionary scan. this is needed in order to prevent latency * spikes when handling large items */ void defragLater(redisDb *db, dictEntry *kde) { sds key = sdsdup(dictGetKey(kde)); @@ -814,7 +814,7 @@ long defragKey(redisDb *db, dictEntry *de) { * I can't search in db->expires for that key after i already released * the pointer it holds it won't be able to do the string compare */ uint64_t hash = dictGetHash(db->dict, de->key); - replaceSateliteDictKeyPtrAndOrDefragDictEntry(db->expires, keysds, newsds, hash, &defragged); + replaceSatelliteDictKeyPtrAndOrDefragDictEntry(db->expires, keysds, newsds, hash, &defragged); } /* Try to defrag robj and / or string value. */ @@ -885,7 +885,7 @@ void defragScanCallback(void *privdata, const dictEntry *de) { server.stat_active_defrag_scanned++; } -/* Defrag scan callback for each hash table bicket, +/* Defrag scan callback for each hash table bucket, * used in order to defrag the dictEntry allocations. */ void defragDictBucketCallback(void *privdata, dictEntry **bucketref) { UNUSED(privdata); /* NOTE: this function is also used by both activeDefragCycle and scanLaterHash, etc. don't use privdata */ @@ -919,7 +919,7 @@ float getAllocatorFragmentation(size_t *out_frag_bytes) { return frag_pct; } -/* We may need to defrag other globals, one small allcation can hold a full allocator run. +/* We may need to defrag other globals, one small allocation can hold a full allocator run. * so although small, it is still important to defrag these */ long defragOtherGlobals() { long defragged = 0; @@ -1090,7 +1090,7 @@ void activeDefragCycle(void) { if (hasActiveChildProcess()) return; /* Defragging memory while there's a fork will just do damage. */ - /* Once a second, check if we the fragmentation justfies starting a scan + /* Once a second, check if the fragmentation justfies starting a scan * or making it more aggressive. */ run_with_period(1000) { computeDefragCycles(); @@ -1160,7 +1160,7 @@ void activeDefragCycle(void) { * (if we have a lot of pointers in one hash bucket or rehasing), * check if we reached the time limit. * But regardless, don't start a new db in this loop, this is because after - * the last db we call defragOtherGlobals, which must be done in once cycle */ + * the last db we call defragOtherGlobals, which must be done in one cycle */ if (!cursor || (++iterations > 16 || server.stat_active_defrag_hits - prev_defragged > 512 || server.stat_active_defrag_scanned - prev_scanned > 64)) { diff --git a/src/dict.c b/src/dict.c index 45aab66f9..6ebabbf8e 100644 --- a/src/dict.c +++ b/src/dict.c @@ -237,7 +237,9 @@ long long timeInMilliseconds(void) { return (((long long)tv.tv_sec)*1000)+(tv.tv_usec/1000); } -/* Rehash for an amount of time between ms milliseconds and ms+1 milliseconds */ +/* Rehash in ms+"delta" milliseconds. The value of "delta" is larger + * than 0, and is smaller than 1 in most cases. The exact upper bound + * depends on the running time of dictRehash(d,100).*/ int dictRehashMilliseconds(dict *d, int ms) { long long start = timeInMilliseconds(); int rehashes = 0; @@ -749,7 +751,7 @@ unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count) { * this function instead what we do is to consider a "linear" range of the table * that may be constituted of N buckets with chains of different lengths * appearing one after the other. Then we report a random element in the range. - * In this way we smooth away the problem of different chain lenghts. */ + * In this way we smooth away the problem of different chain lengths. */ #define GETFAIR_NUM_ENTRIES 15 dictEntry *dictGetFairRandomKey(dict *d) { dictEntry *entries[GETFAIR_NUM_ENTRIES]; @@ -1119,7 +1121,7 @@ size_t _dictGetStatsHt(char *buf, size_t bufsize, dictht *ht, int tableid) { i, clvector[i], ((float)clvector[i]/ht->size)*100); } - /* Unlike snprintf(), teturn the number of characters actually written. */ + /* Unlike snprintf(), return the number of characters actually written. */ if (bufsize) buf[bufsize-1] = '\0'; return strlen(buf); } diff --git a/src/endianconv.c b/src/endianconv.c index f3b0b4730..918844e25 100644 --- a/src/endianconv.c +++ b/src/endianconv.c @@ -8,7 +8,7 @@ * to be backward compatible are still in big endian) because most of the * production environments are little endian, and we have a lot of conversions * in a few places because ziplists, intsets, zipmaps, need to be endian-neutral - * even in memory, since they are serialied on RDB files directly with a single + * even in memory, since they are serialized on RDB files directly with a single * write(2) without other additional steps. * * ---------------------------------------------------------------------------- diff --git a/src/evict.c b/src/evict.c index 0755acc0e..5d398c6c9 100644 --- a/src/evict.c +++ b/src/evict.c @@ -41,7 +41,7 @@ /* To improve the quality of the LRU approximation we take a set of keys * that are good candidate for eviction across freeMemoryIfNeeded() calls. * - * Entries inside the eviciton pool are taken ordered by idle time, putting + * Entries inside the eviction pool are taken ordered by idle time, putting * greater idle times to the right (ascending order). * * When an LFU policy is used instead, a reverse frequency indication is used @@ -242,7 +242,7 @@ void evictionPoolPopulate(int dbid, dict *sampledict, dict *keydict, struct evic /* Try to reuse the cached SDS string allocated in the pool entry, * because allocating and deallocating this object is costly * (according to the profiler, not my fantasy. Remember: - * premature optimizbla bla bla bla. */ + * premature optimization bla bla bla. */ int klen = sdslen(key); if (klen > EVPOOL_CACHED_SDS_SIZE) { pool[k].key = sdsdup(key); @@ -342,7 +342,7 @@ unsigned long LFUDecrAndReturn(robj *o) { } /* ---------------------------------------------------------------------------- - * The external API for eviction: freeMemroyIfNeeded() is called by the + * The external API for eviction: freeMemoryIfNeeded() is called by the * server when there is data to add in order to make space if needed. * --------------------------------------------------------------------------*/ @@ -441,7 +441,7 @@ int getMaxmemoryState(size_t *total, size_t *logical, size_t *tofree, float *lev * * The function returns C_OK if we are under the memory limit or if we * were over the limit, but the attempt to free memory was successful. - * Otehrwise if we are over the memory limit, but not enough memory + * Otherwise if we are over the memory limit, but not enough memory * was freed to return back under the limit, the function returns C_ERR. */ int freeMemoryIfNeeded(void) { int keys_freed = 0; diff --git a/src/expire.c b/src/expire.c index 1c4f71df3..85fd59fe2 100644 --- a/src/expire.c +++ b/src/expire.c @@ -97,7 +97,7 @@ int activeExpireCycleTryExpire(redisDb *db, dictEntry *de, long long now) { * conditions: * * If type is ACTIVE_EXPIRE_CYCLE_FAST the function will try to run a - * "fast" expire cycle that takes no longer than EXPIRE_FAST_CYCLE_DURATION + * "fast" expire cycle that takes no longer than ACTIVE_EXPIRE_CYCLE_FAST_DURATION * microseconds, and is not repeated again before the same amount of time. * The cycle will also refuse to run at all if the latest slow cycle did not * terminate because of a time limit condition. @@ -414,7 +414,7 @@ void expireSlaveKeys(void) { else dictDelete(slaveKeysWithExpire,keyname); - /* Stop conditions: found 3 keys we cna't expire in a row or + /* Stop conditions: found 3 keys we can't expire in a row or * time limit was reached. */ cycles++; if (noexpire > 3) break; @@ -466,7 +466,7 @@ size_t getSlaveKeyWithExpireCount(void) { * * Note: technically we should handle the case of a single DB being flushed * but it is not worth it since anyway race conditions using the same set - * of key names in a wriatable slave and in its master will lead to + * of key names in a writable slave and in its master will lead to * inconsistencies. This is just a best-effort thing we do. */ void flushSlaveKeysWithExpireList(void) { if (slaveKeysWithExpire) { @@ -490,7 +490,7 @@ int checkAlreadyExpired(long long when) { *----------------------------------------------------------------------------*/ /* This is the generic command implementation for EXPIRE, PEXPIRE, EXPIREAT - * and PEXPIREAT. Because the commad second argument may be relative or absolute + * and PEXPIREAT. Because the command second argument may be relative or absolute * the "basetime" argument is used to signal what the base time is (either 0 * for *AT variants of the command, or the current time for relative expires). * diff --git a/src/geo.c b/src/geo.c index 3e5d5f606..5c5054414 100644 --- a/src/geo.c +++ b/src/geo.c @@ -143,8 +143,8 @@ double extractUnitOrReply(client *c, robj *unit) { } /* Input Argument Helper. - * Extract the dinstance from the specified two arguments starting at 'argv' - * that shouldbe in the form: and return the dinstance in the + * Extract the distance from the specified two arguments starting at 'argv' + * that should be in the form: , and return the distance in the * specified unit on success. *conversions is populated with the coefficient * to use in order to convert meters to the unit. * @@ -788,7 +788,7 @@ void geoposCommand(client *c) { /* GEODIST key ele1 ele2 [unit] * - * Return the distance, in meters by default, otherwise accordig to "unit", + * Return the distance, in meters by default, otherwise according to "unit", * between points ele1 and ele2. If one or more elements are missing NULL * is returned. */ void geodistCommand(client *c) { diff --git a/src/geohash_helper.c b/src/geohash_helper.c index e23f17b4e..01fb2cb88 100644 --- a/src/geohash_helper.c +++ b/src/geohash_helper.c @@ -68,7 +68,7 @@ uint8_t geohashEstimateStepsByRadius(double range_meters, double lat) { } step -= 2; /* Make sure range is included in most of the base cases. */ - /* Wider range torwards the poles... Note: it is possible to do better + /* Wider range towards the poles... Note: it is possible to do better * than this approximation by computing the distance between meridians * at this latitude, but this does the trick for now. */ if (lat > 66 || lat < -66) { @@ -84,7 +84,7 @@ uint8_t geohashEstimateStepsByRadius(double range_meters, double lat) { /* Return the bounding box of the search area centered at latitude,longitude * having a radius of radius_meter. bounds[0] - bounds[2] is the minimum - * and maxium longitude, while bounds[1] - bounds[3] is the minimum and + * and maximum longitude, while bounds[1] - bounds[3] is the minimum and * maximum latitude. * * This function does not behave correctly with very large radius values, for diff --git a/src/hyperloglog.c b/src/hyperloglog.c index 721f492a1..d018e975e 100644 --- a/src/hyperloglog.c +++ b/src/hyperloglog.c @@ -36,9 +36,9 @@ /* The Redis HyperLogLog implementation is based on the following ideas: * - * * The use of a 64 bit hash function as proposed in [1], in order to don't - * limited to cardinalities up to 10^9, at the cost of just 1 additional - * bit per register. + * * The use of a 64 bit hash function as proposed in [1], in order to estimate + * cardinalities larger than 10^9, at the cost of just 1 additional bit per + * register. * * The use of 16384 6-bit registers for a great level of accuracy, using * a total of 12k per key. * * The use of the Redis string data type. No new type is introduced. @@ -279,7 +279,7 @@ static char *invalid_hll_err = "-INVALIDOBJ Corrupted HLL object detected\r\n"; * So we right shift of 0 bits (no shift in practice) and * left shift the next byte of 8 bits, even if we don't use it, * but this has the effect of clearing the bits so the result - * will not be affacted after the OR. + * will not be affected after the OR. * * ------------------------------------------------------------------------- * @@ -297,7 +297,7 @@ static char *invalid_hll_err = "-INVALIDOBJ Corrupted HLL object detected\r\n"; * |11000000| <- Our byte at b0 * +--------+ * - * To create a AND-mask to clear the bits about this position, we just + * To create an AND-mask to clear the bits about this position, we just * initialize the mask with the value 63, left shift it of "fs" bits, * and finally invert the result. * @@ -766,7 +766,7 @@ int hllSparseSet(robj *o, long index, uint8_t count) { * by a ZERO opcode with len > 1, or by an XZERO opcode. * * In those cases the original opcode must be split into multiple - * opcodes. The worst case is an XZERO split in the middle resuling into + * opcodes. The worst case is an XZERO split in the middle resulting into * XZERO - VAL - XZERO, so the resulting sequence max length is * 5 bytes. * @@ -899,7 +899,7 @@ promote: /* Promote to dense representation. */ * the element belongs to is incremented if needed. * * This function is actually a wrapper for hllSparseSet(), it only performs - * the hashshing of the elmenet to obtain the index and zeros run length. */ + * the hashshing of the element to obtain the index and zeros run length. */ int hllSparseAdd(robj *o, unsigned char *ele, size_t elesize) { long index; uint8_t count = hllPatLen(ele,elesize,&index); @@ -1014,7 +1014,7 @@ uint64_t hllCount(struct hllhdr *hdr, int *invalid) { double m = HLL_REGISTERS; double E; int j; - /* Note that reghisto size could be just HLL_Q+2, becuase HLL_Q+1 is + /* Note that reghisto size could be just HLL_Q+2, because HLL_Q+1 is * the maximum frequency of the "000...1" sequence the hash function is * able to return. However it is slow to check for sanity of the * input: instead we history array at a safe size: overflows will diff --git a/src/latency.c b/src/latency.c index b5ccc7cc6..6148543c8 100644 --- a/src/latency.c +++ b/src/latency.c @@ -85,7 +85,7 @@ int THPGetAnonHugePagesSize(void) { /* ---------------------------- Latency API --------------------------------- */ /* Latency monitor initialization. We just need to create the dictionary - * of time series, each time serie is created on demand in order to avoid + * of time series, each time series is created on demand in order to avoid * having a fixed list to maintain. */ void latencyMonitorInit(void) { server.latency_events = dictCreate(&latencyTimeSeriesDictType,NULL); @@ -154,7 +154,7 @@ int latencyResetEvent(char *event_to_reset) { /* Analyze the samples available for a given event and return a structure * populate with different metrics, average, MAD, min, max, and so forth. - * Check latency.h definition of struct latenctStat for more info. + * Check latency.h definition of struct latencyStats for more info. * If the specified event has no elements the structure is populate with * zero values. */ void analyzeLatencyForEvent(char *event, struct latencyStats *ls) { @@ -343,7 +343,7 @@ sds createLatencyReport(void) { } if (!strcasecmp(event,"aof-fstat") || - !strcasecmp(event,"rdb-unlik-temp-file")) { + !strcasecmp(event,"rdb-unlink-temp-file")) { advise_disk_contention = 1; advise_local_disk = 1; advices += 2; @@ -396,7 +396,7 @@ sds createLatencyReport(void) { /* Better VM. */ report = sdscat(report,"\nI have a few advices for you:\n\n"); if (advise_better_vm) { - report = sdscat(report,"- If you are using a virtual machine, consider upgrading it with a faster one using an hypervisior that provides less latency during fork() calls. Xen is known to have poor fork() performance. Even in the context of the same VM provider, certain kinds of instances can execute fork faster than others.\n"); + report = sdscat(report,"- If you are using a virtual machine, consider upgrading it with a faster one using a hypervisior that provides less latency during fork() calls. Xen is known to have poor fork() performance. Even in the context of the same VM provider, certain kinds of instances can execute fork faster than others.\n"); } /* Slow log. */ @@ -416,7 +416,7 @@ sds createLatencyReport(void) { if (advise_scheduler) { report = sdscat(report,"- The system is slow to execute Redis code paths not containing system calls. This usually means the system does not provide Redis CPU time to run for long periods. You should try to:\n" " 1) Lower the system load.\n" - " 2) Use a computer / VM just for Redis if you are running other softawre in the same system.\n" + " 2) Use a computer / VM just for Redis if you are running other software in the same system.\n" " 3) Check if you have a \"noisy neighbour\" problem.\n" " 4) Check with 'redis-cli --intrinsic-latency 100' what is the intrinsic latency in your system.\n" " 5) Check if the problem is allocator-related by recompiling Redis with MALLOC=libc, if you are using Jemalloc. However this may create fragmentation problems.\n"); @@ -432,7 +432,7 @@ sds createLatencyReport(void) { } if (advise_data_writeback) { - report = sdscat(report,"- Mounting ext3/4 filesystems with data=writeback can provide a performance boost compared to data=ordered, however this mode of operation provides less guarantees, and sometimes it can happen that after a hard crash the AOF file will have an half-written command at the end and will require to be repaired before Redis restarts.\n"); + report = sdscat(report,"- Mounting ext3/4 filesystems with data=writeback can provide a performance boost compared to data=ordered, however this mode of operation provides less guarantees, and sometimes it can happen that after a hard crash the AOF file will have a half-written command at the end and will require to be repaired before Redis restarts.\n"); } if (advise_disk_contention) { diff --git a/src/lazyfree.c b/src/lazyfree.c index cbcc1c240..821dc50df 100644 --- a/src/lazyfree.c +++ b/src/lazyfree.c @@ -15,7 +15,7 @@ size_t lazyfreeGetPendingObjectsCount(void) { /* Return the amount of work needed in order to free an object. * The return value is not always the actual number of allocations the - * object is compoesd of, but a number proportional to it. + * object is composed of, but a number proportional to it. * * For strings the function always returns 1. * @@ -137,7 +137,7 @@ void emptyDbAsync(redisDb *db) { } /* Empty the slots-keys map of Redis CLuster by creating a new empty one - * and scheduiling the old for lazy freeing. */ + * and scheduling the old for lazy freeing. */ void slotToKeyFlushAsync(void) { rax *old = server.cluster->slots_to_keys; @@ -156,7 +156,7 @@ void lazyfreeFreeObjectFromBioThread(robj *o) { } /* Release a database from the lazyfree thread. The 'db' pointer is the - * database which was substitutied with a fresh one in the main thread + * database which was substituted with a fresh one in the main thread * when the database was logically deleted. 'sl' is a skiplist used by * Redis Cluster in order to take the hash slots -> keys mapping. This * may be NULL if Redis Cluster is disabled. */ diff --git a/src/listpack.c b/src/listpack.c index 9e77ab12d..075552ccb 100644 --- a/src/listpack.c +++ b/src/listpack.c @@ -405,7 +405,7 @@ unsigned char *lpNext(unsigned char *lp, unsigned char *p) { } /* If 'p' points to an element of the listpack, calling lpPrev() will return - * the pointer to the preivous element (the one on the left), or NULL if 'p' + * the pointer to the previous element (the one on the left), or NULL if 'p' * already pointed to the first element of the listpack. */ unsigned char *lpPrev(unsigned char *lp, unsigned char *p) { if (p-lp == LP_HDR_SIZE) return NULL; diff --git a/src/lolwut.c b/src/lolwut.c index 0e1552ba0..eebd5da6a 100644 --- a/src/lolwut.c +++ b/src/lolwut.c @@ -85,7 +85,7 @@ void lolwutCommand(client *c) { } /* ========================== LOLWUT Canvase =============================== - * Many LOWUT versions will likely print some computer art to the screen. + * Many LOLWUT versions will likely print some computer art to the screen. * This is the case with LOLWUT 5 and LOLWUT 6, so here there is a generic * canvas implementation that can be reused. */ @@ -106,7 +106,7 @@ void lwFreeCanvas(lwCanvas *canvas) { } /* Set a pixel to the specified color. Color is 0 or 1, where zero means no - * dot will be displyed, and 1 means dot will be displayed. + * dot will be displayed, and 1 means dot will be displayed. * Coordinates are arranged so that left-top corner is 0,0. You can write * out of the size of the canvas without issues. */ void lwDrawPixel(lwCanvas *canvas, int x, int y, int color) { diff --git a/src/lolwut5.c b/src/lolwut5.c index 5a9348800..d64e0bb27 100644 --- a/src/lolwut5.c +++ b/src/lolwut5.c @@ -156,7 +156,7 @@ void lolwut5Command(client *c) { return; /* Limits. We want LOLWUT to be always reasonably fast and cheap to execute - * so we have maximum number of columns, rows, and output resulution. */ + * so we have maximum number of columns, rows, and output resolution. */ if (cols < 1) cols = 1; if (cols > 1000) cols = 1000; if (squares_per_row < 1) squares_per_row = 1; diff --git a/src/lzfP.h b/src/lzfP.h index 93c27b42d..78c858fad 100644 --- a/src/lzfP.h +++ b/src/lzfP.h @@ -127,7 +127,7 @@ /* * Whether to store pointers or offsets inside the hash table. On - * 64 bit architetcures, pointers take up twice as much space, + * 64 bit architectures, pointers take up twice as much space, * and might also be slower. Default is to autodetect. */ /*#define LZF_USER_OFFSETS autodetect */ diff --git a/src/module.c b/src/module.c index f293d6a6c..bd75c8f92 100644 --- a/src/module.c +++ b/src/module.c @@ -46,7 +46,7 @@ typedef struct RedisModuleInfoCtx { sds info; /* info string we collected so far */ int sections; /* number of sections we collected so far */ int in_section; /* indication if we're in an active section or not */ - int in_dict_field; /* indication that we're curreintly appending to a dict */ + int in_dict_field; /* indication that we're currently appending to a dict */ } RedisModuleInfoCtx; typedef void (*RedisModuleInfoFunc)(RedisModuleInfoCtx *ctx, int for_crash_report); @@ -906,10 +906,21 @@ int RM_SignalModifiedKey(RedisModuleCtx *ctx, RedisModuleString *keyname) { * Automatic memory management for modules * -------------------------------------------------------------------------- */ -/* Enable automatic memory management. See API.md for more information. +/* Enable automatic memory management. * * The function must be called as the first function of a command implementation - * that wants to use automatic memory. */ + * that wants to use automatic memory. + * + * When enabled, automatic memory management tracks and automatically frees + * keys, call replies and Redis string objects once the command returns. In most + * cases this eliminates the need of calling the following functions: + * + * 1) RedisModule_CloseKey() + * 2) RedisModule_FreeCallReply() + * 3) RedisModule_FreeString() + * + * These functions can still be used with automatic memory management enabled, + * to optimize loops that make numerous allocations for example. */ void RM_AutoMemory(RedisModuleCtx *ctx) { ctx->flags |= REDISMODULE_CTX_AUTO_MEMORY; } @@ -1045,7 +1056,7 @@ RedisModuleString *RM_CreateStringFromLongLong(RedisModuleCtx *ctx, long long ll } /* Like RedisModule_CreatString(), but creates a string starting from a double - * integer instead of taking a buffer and its length. + * instead of taking a buffer and its length. * * The returned string must be released with RedisModule_FreeString() or by * enabling automatic memory management. */ @@ -1922,7 +1933,7 @@ int RM_GetContextFlags(RedisModuleCtx *ctx) { flags |= REDISMODULE_CTX_FLAGS_LUA; if (ctx->client->flags & CLIENT_MULTI) flags |= REDISMODULE_CTX_FLAGS_MULTI; - /* Module command recieved from MASTER, is replicated. */ + /* Module command received from MASTER, is replicated. */ if (ctx->client->flags & CLIENT_MASTER) flags |= REDISMODULE_CTX_FLAGS_REPLICATED; } @@ -2921,7 +2932,7 @@ int RM_HashSet(RedisModuleKey *key, int flags, ...) { /* Get fields from an hash value. This function is called using a variable * number of arguments, alternating a field name (as a StringRedisModule * pointer) with a pointer to a StringRedisModule pointer, that is set to the - * value of the field if the field exist, or NULL if the field did not exist. + * value of the field if the field exists, or NULL if the field does not exist. * At the end of the field/value-ptr pairs, NULL must be specified as last * argument to signal the end of the arguments in the variadic function. * @@ -3040,7 +3051,7 @@ void moduleParseCallReply_SimpleString(RedisModuleCallReply *reply); void moduleParseCallReply_Array(RedisModuleCallReply *reply); /* Do nothing if REDISMODULE_REPLYFLAG_TOPARSE is false, otherwise - * use the protcol of the reply in reply->proto in order to fill the + * use the protocol of the reply in reply->proto in order to fill the * reply with parsed data according to the reply type. */ void moduleParseCallReply(RedisModuleCallReply *reply) { if (!(reply->flags & REDISMODULE_REPLYFLAG_TOPARSE)) return; @@ -3599,7 +3610,7 @@ void moduleTypeNameByID(char *name, uint64_t moduleid) { /* Register a new data type exported by the module. The parameters are the * following. Please for in depth documentation check the modules API - * documentation, especially the TYPES.md file. + * documentation, especially https://redis.io/topics/modules-native-types. * * * **name**: A 9 characters data type name that MUST be unique in the Redis * Modules ecosystem. Be creative... and there will be no collisions. Use @@ -3646,7 +3657,7 @@ void moduleTypeNameByID(char *name, uint64_t moduleid) { * * **aux_load**: A callback function pointer that loads out of keyspace data from RDB files. * Similar to aux_save, returns REDISMODULE_OK on success, and ERR otherwise. * - * The **digest* and **mem_usage** methods should currently be omitted since + * The **digest** and **mem_usage** methods should currently be omitted since * they are not yet implemented inside the Redis modules core. * * Note: the module name "AAAAAAAAA" is reserved and produces an error, it @@ -3656,7 +3667,7 @@ void moduleTypeNameByID(char *name, uint64_t moduleid) { * and if the module name or encver is invalid, NULL is returned. * Otherwise the new type is registered into Redis, and a reference of * type RedisModuleType is returned: the caller of the function should store - * this reference into a gobal variable to make future use of it in the + * this reference into a global variable to make future use of it in the * modules type API, since a single module may register multiple types. * Example code fragment: * @@ -3738,7 +3749,7 @@ moduleType *RM_ModuleTypeGetType(RedisModuleKey *key) { /* Assuming RedisModule_KeyType() returned REDISMODULE_KEYTYPE_MODULE on * the key, returns the module type low-level value stored at key, as - * it was set by the user via RedisModule_ModuleTypeSet(). + * it was set by the user via RedisModule_ModuleTypeSetValue(). * * If the key is NULL, is not associated with a module type, or is empty, * then NULL is returned instead. */ @@ -3795,7 +3806,7 @@ int moduleAllDatatypesHandleErrors() { /* Returns true if any previous IO API failed. * for Load* APIs the REDISMODULE_OPTIONS_HANDLE_IO_ERRORS flag must be set with - * RediModule_SetModuleOptions first. */ + * RedisModule_SetModuleOptions first. */ int RM_IsIOError(RedisModuleIO *io) { return io->error; } @@ -3928,7 +3939,7 @@ RedisModuleString *RM_LoadString(RedisModuleIO *io) { * * The size of the string is stored at '*lenptr' if not NULL. * The returned string is not automatically NULL terminated, it is loaded - * exactly as it was stored inisde the RDB file. */ + * exactly as it was stored inside the RDB file. */ char *RM_LoadStringBuffer(RedisModuleIO *io, size_t *lenptr) { return moduleLoadString(io,1,lenptr); } @@ -4517,14 +4528,14 @@ int moduleTryServeClientBlockedOnKey(client *c, robj *key) { * * The callbacks are called in the following contexts: * - * reply_callback: called after a successful RedisModule_UnblockClient() - * call in order to reply to the client and unblock it. + * reply_callback: called after a successful RedisModule_UnblockClient() + * call in order to reply to the client and unblock it. * - * reply_timeout: called when the timeout is reached in order to send an - * error to the client. + * timeout_callback: called when the timeout is reached in order to send an + * error to the client. * - * free_privdata: called in order to free the private data that is passed - * by RedisModule_UnblockClient() call. + * free_privdata: called in order to free the private data that is passed + * by RedisModule_UnblockClient() call. * * Note: RedisModule_UnblockClient should be called for every blocked client, * even if client was killed, timed-out or disconnected. Failing to do so @@ -4547,13 +4558,13 @@ RedisModuleBlockedClient *RM_BlockClient(RedisModuleCtx *ctx, RedisModuleCmdFunc * once certain keys become "ready", that is, contain more data. * * Basically this is similar to what a typical Redis command usually does, - * like BLPOP or ZPOPMAX: the client blocks if it cannot be served ASAP, + * like BLPOP or BZPOPMAX: the client blocks if it cannot be served ASAP, * and later when the key receives new data (a list push for instance), the * client is unblocked and served. * * However in the case of this module API, when the client is unblocked? * - * 1. If you block ok a key of a type that has blocking operations associated, + * 1. If you block on a key of a type that has blocking operations associated, * like a list, a sorted set, a stream, and so forth, the client may be * unblocked once the relevant key is targeted by an operation that normally * unblocks the native blocking operations for that type. So if we block @@ -4948,7 +4959,7 @@ void moduleReleaseGIL(void) { /* Subscribe to keyspace notifications. This is a low-level version of the * keyspace-notifications API. A module can register callbacks to be notified - * when keyspce events occur. + * when keyspace events occur. * * Notification events are filtered by their type (string events, set events, * etc), and the subscriber callback receives only events that match a specific @@ -5659,7 +5670,7 @@ int RM_AuthenticateClientWithACLUser(RedisModuleCtx *ctx, const char *name, size /* Deauthenticate and close the client. The client resources will not be * be immediately freed, but will be cleaned up in a background job. This is * the recommended way to deauthenicate a client since most clients can't - * handle users becomming deauthenticated. Returns REDISMODULE_ERR when the + * handle users becoming deauthenticated. Returns REDISMODULE_ERR when the * client doesn't exist and REDISMODULE_OK when the operation was successful. * * The client ID is returned from the RM_AuthenticateClientWithUser and @@ -5779,14 +5790,14 @@ int RM_DictDel(RedisModuleDict *d, RedisModuleString *key, void *oldval) { return RM_DictDelC(d,key->ptr,sdslen(key->ptr),oldval); } -/* Return an interator, setup in order to start iterating from the specified +/* Return an iterator, setup in order to start iterating from the specified * key by applying the operator 'op', which is just a string specifying the * comparison operator to use in order to seek the first element. The - * operators avalable are: + * operators available are: * * "^" -- Seek the first (lexicographically smaller) key. * "$" -- Seek the last (lexicographically biffer) key. - * ">" -- Seek the first element greter than the specified key. + * ">" -- Seek the first element greater than the specified key. * ">=" -- Seek the first element greater or equal than the specified key. * "<" -- Seek the first element smaller than the specified key. * "<=" -- Seek the first element smaller or equal than the specified key. @@ -5913,7 +5924,7 @@ RedisModuleString *RM_DictPrev(RedisModuleCtx *ctx, RedisModuleDictIter *di, voi * in the loop, as we iterate elements, we can also check if we are still * on range. * - * The function returne REDISMODULE_ERR if the iterator reached the + * The function return REDISMODULE_ERR if the iterator reached the * end of elements condition as well. */ int RM_DictCompareC(RedisModuleDictIter *di, const char *op, void *key, size_t keylen) { if (raxEOF(&di->ri)) return REDISMODULE_ERR; @@ -6294,7 +6305,7 @@ int RM_ExportSharedAPI(RedisModuleCtx *ctx, const char *apiname, void *func) { * command that requires external APIs: if some API cannot be resolved, the * command should return an error. * - * Here is an exmaple: + * Here is an example: * * int ... myCommandImplementation() { * if (getExternalAPIs() == 0) { @@ -6680,7 +6691,7 @@ void RM_ScanCursorDestroy(RedisModuleScanCursor *cursor) { * RedisModule_ScanCursorDestroy(c); * * It is also possible to use this API from another thread while the lock - * is acquired durring the actuall call to RM_Scan: + * is acquired during the actuall call to RM_Scan: * * RedisModuleCursor *c = RedisModule_ScanCursorCreate(); * RedisModule_ThreadSafeContextLock(ctx); @@ -6694,7 +6705,7 @@ void RM_ScanCursorDestroy(RedisModuleScanCursor *cursor) { * The function will return 1 if there are more elements to scan and * 0 otherwise, possibly setting errno if the call failed. * - * It is also possible to restart and existing cursor using RM_CursorRestart. + * It is also possible to restart an existing cursor using RM_ScanCursorRestart. * * IMPORTANT: This API is very similar to the Redis SCAN command from the * point of view of the guarantees it provides. This means that the API @@ -6708,7 +6719,7 @@ void RM_ScanCursorDestroy(RedisModuleScanCursor *cursor) { * Moreover playing with the Redis keyspace while iterating may have the * effect of returning more duplicates. A safe pattern is to store the keys * names you want to modify elsewhere, and perform the actions on the keys - * later when the iteration is complete. Howerver this can cost a lot of + * later when the iteration is complete. However this can cost a lot of * memory, so it may make sense to just operate on the current key when * possible during the iteration, given that this is safe. */ int RM_Scan(RedisModuleCtx *ctx, RedisModuleScanCursor *cursor, RedisModuleScanCB fn, void *privdata) { @@ -6773,8 +6784,8 @@ static void moduleScanKeyCallback(void *privdata, const dictEntry *de) { * RedisModule_CloseKey(key); * RedisModule_ScanCursorDestroy(c); * - * It is also possible to use this API from another thread while the lock is acquired durring - * the actuall call to RM_Scan, and re-opening the key each time: + * It is also possible to use this API from another thread while the lock is acquired during + * the actuall call to RM_ScanKey, and re-opening the key each time: * RedisModuleCursor *c = RedisModule_ScanCursorCreate(); * RedisModule_ThreadSafeContextLock(ctx); * RedisModuleKey *key = RedisModule_OpenKey(...) @@ -6790,7 +6801,7 @@ static void moduleScanKeyCallback(void *privdata, const dictEntry *de) { * * The function will return 1 if there are more elements to scan and 0 otherwise, * possibly setting errno if the call failed. - * It is also possible to restart and existing cursor using RM_CursorRestart. + * It is also possible to restart an existing cursor using RM_ScanCursorRestart. * * NOTE: Certain operations are unsafe while iterating the object. For instance * while the API guarantees to return at least one time all the elements that @@ -6943,7 +6954,7 @@ int TerminateModuleForkChild(int child_pid, int wait) { } /* Can be used to kill the forked child process from the parent process. - * child_pid whould be the return value of RedisModule_Fork. */ + * child_pid would be the return value of RedisModule_Fork. */ int RM_KillForkChild(int child_pid) { /* Kill module child, wait for child exit. */ if (TerminateModuleForkChild(child_pid,1) == C_OK) @@ -7081,7 +7092,7 @@ void ModuleForkDoneHandler(int exitcode, int bysignal) { * REDISMODULE_SUBEVENT_LOADING_FAILED * * Note that AOF loading may start with an RDB data in case of - * rdb-preamble, in which case you'll only recieve an AOF_START event. + * rdb-preamble, in which case you'll only receive an AOF_START event. * * * RedisModuleEvent_ClientChange @@ -7103,7 +7114,7 @@ void ModuleForkDoneHandler(int exitcode, int bysignal) { * This event is called when the instance (that can be both a * master or a replica) get a new online replica, or lose a * replica since it gets disconnected. - * The following sub events are availble: + * The following sub events are available: * * REDISMODULE_SUBEVENT_REPLICA_CHANGE_ONLINE * REDISMODULE_SUBEVENT_REPLICA_CHANGE_OFFLINE @@ -7141,7 +7152,7 @@ void ModuleForkDoneHandler(int exitcode, int bysignal) { * RedisModuleEvent_ModuleChange * * This event is called when a new module is loaded or one is unloaded. - * The following sub events are availble: + * The following sub events are available: * * REDISMODULE_SUBEVENT_MODULE_LOADED * REDISMODULE_SUBEVENT_MODULE_UNLOADED @@ -7168,7 +7179,7 @@ void ModuleForkDoneHandler(int exitcode, int bysignal) { * int32_t progress; // Approximate progress between 0 and 1024, * or -1 if unknown. * - * The function returns REDISMODULE_OK if the module was successfully subscrived + * The function returns REDISMODULE_OK if the module was successfully subscribed * for the specified event. If the API is called from a wrong context then * REDISMODULE_ERR is returned. */ int RM_SubscribeToServerEvent(RedisModuleCtx *ctx, RedisModuleEvent event, RedisModuleEventCallback callback) { @@ -7364,7 +7375,7 @@ void moduleInitModulesSystem(void) { server.loadmodule_queue = listCreate(); modules = dictCreate(&modulesDictType,NULL); - /* Set up the keyspace notification susbscriber list and static client */ + /* Set up the keyspace notification subscriber list and static client */ moduleKeyspaceSubscribers = listCreate(); moduleFreeContextReusedClient = createClient(NULL); moduleFreeContextReusedClient->flags |= CLIENT_MODULE; @@ -7728,7 +7739,7 @@ size_t moduleCount(void) { return dictSize(modules); } -/* Set the key last access time for LRU based eviction. not relevent if the +/* Set the key last access time for LRU based eviction. not relevant if the * servers's maxmemory policy is LFU based. Value is idle time in milliseconds. * returns REDISMODULE_OK if the LRU was updated, REDISMODULE_ERR otherwise. */ int RM_SetLRU(RedisModuleKey *key, mstime_t lru_idle) { diff --git a/src/modules/hellodict.c b/src/modules/hellodict.c index 651615b03..1428a1381 100644 --- a/src/modules/hellodict.c +++ b/src/modules/hellodict.c @@ -125,7 +125,7 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) cmd_KEYRANGE,"readonly",1,1,0) == REDISMODULE_ERR) return REDISMODULE_ERR; - /* Create our global dictionray. Here we'll set our keys and values. */ + /* Create our global dictionary. Here we'll set our keys and values. */ Keyspace = RedisModule_CreateDict(NULL); return REDISMODULE_OK; diff --git a/src/modules/helloworld.c b/src/modules/helloworld.c index 3b00dea77..043f5be32 100644 --- a/src/modules/helloworld.c +++ b/src/modules/helloworld.c @@ -91,7 +91,7 @@ int HelloPushCall_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, in } /* HELLO.PUSH.CALL2 - * This is exaxctly as HELLO.PUSH.CALL, but shows how we can reply to the + * This is exactly as HELLO.PUSH.CALL, but shows how we can reply to the * client using directly a reply object that Call() returned. */ int HelloPushCall2_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { @@ -345,7 +345,7 @@ int HelloToggleCase_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, /* HELLO.MORE.EXPIRE key milliseconds. * - * If they key has already an associated TTL, extends it by "milliseconds" + * If the key has already an associated TTL, extends it by "milliseconds" * milliseconds. Otherwise no operation is performed. */ int HelloMoreExpire_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { RedisModule_AutoMemory(ctx); /* Use automatic memory management. */ diff --git a/src/multi.c b/src/multi.c index a99c308be..3ce6d60ec 100644 --- a/src/multi.c +++ b/src/multi.c @@ -87,7 +87,7 @@ void discardTransaction(client *c) { unwatchAllKeys(c); } -/* Flag the transacation as DIRTY_EXEC so that EXEC will fail. +/* Flag the transaction as DIRTY_EXEC so that EXEC will fail. * Should be called every time there is an error while queueing a command. */ void flagTransaction(client *c) { if (c->flags & CLIENT_MULTI) diff --git a/src/networking.c b/src/networking.c index 495be0ece..0d290e169 100644 --- a/src/networking.c +++ b/src/networking.c @@ -170,7 +170,7 @@ client *createClient(connection *conn) { return c; } -/* This funciton puts the client in the queue of clients that should write +/* This function puts the client in the queue of clients that should write * their output buffers to the socket. Note that it does not *yet* install * the write handler, to start clients are put in a queue of clients that need * to write, so we try to do that before returning in the event loop (see the @@ -268,7 +268,7 @@ void _addReplyProtoToList(client *c, const char *s, size_t len) { listNode *ln = listLast(c->reply); clientReplyBlock *tail = ln? listNodeValue(ln): NULL; - /* Note that 'tail' may be NULL even if we have a tail node, becuase when + /* Note that 'tail' may be NULL even if we have a tail node, because when * addReplyDeferredLen() is used, it sets a dummy node to NULL just * fo fill it later, when the size of the bulk length is set. */ @@ -1161,7 +1161,7 @@ void freeClient(client *c) { listDelNode(server.clients_to_close,ln); } - /* If it is our master that's beging disconnected we should make sure + /* If it is our master that's being disconnected we should make sure * to cache the state to try a partial resynchronization later. * * Note that before doing this we make sure that the client is not in @@ -1491,7 +1491,7 @@ void resetClient(client *c) { } } -/* This funciton is used when we want to re-enter the event loop but there +/* This function is used when we want to re-enter the event loop but there * is the risk that the client we are dealing with will be freed in some * way. This happens for instance in: * @@ -2050,7 +2050,7 @@ char *getClientPeerId(client *c) { return c->peerid; } -/* Concatenate a string representing the state of a client in an human +/* Concatenate a string representing the state of a client in a human * readable format, into the sds string 's'. */ sds catClientInfoString(sds s, client *client) { char flags[16], events[3], conninfo[CONN_INFO_LEN], *p; @@ -3050,7 +3050,7 @@ void stopThreadedIO(void) { * we need to handle in parallel, however the I/O threading is disabled * globally for reads as well if we have too little pending clients. * - * The function returns 0 if the I/O threading should be used becuase there + * The function returns 0 if the I/O threading should be used because there * are enough active threads, otherwise 1 is returned and the I/O threads * could be possibly stopped (if already active) as a side effect. */ int stopThreadedIOIfNeeded(void) { diff --git a/src/notify.c b/src/notify.c index bb1055724..5c7634bce 100644 --- a/src/notify.c +++ b/src/notify.c @@ -62,7 +62,7 @@ int keyspaceEventsStringToFlags(char *classes) { return flags; } -/* This function does exactly the revese of the function above: it gets +/* This function does exactly the reverse of the function above: it gets * as input an integer with the xored flags and returns a string representing * the selected classes. The string returned is an sds string that needs to * be released with sdsfree(). */ diff --git a/src/object.c b/src/object.c index 1bc400e85..f8775ea97 100644 --- a/src/object.c +++ b/src/object.c @@ -126,7 +126,7 @@ robj *createStringObject(const char *ptr, size_t len) { /* Create a string object from a long long value. When possible returns a * shared integer object, or at least an integer encoded one. * - * If valueobj is non zero, the function avoids returning a a shared + * If valueobj is non zero, the function avoids returning a shared * integer, because the object is going to be used as value in the Redis key * space (for instance when the INCR command is used), so we want LFU/LRU * values specific for each key. */ @@ -1223,7 +1223,7 @@ robj *objectCommandLookupOrReply(client *c, robj *key, robj *reply) { return o; } -/* Object command allows to inspect the internals of an Redis Object. +/* Object command allows to inspect the internals of a Redis Object. * Usage: OBJECT */ void objectCommand(client *c) { robj *o; diff --git a/src/quicklist.h b/src/quicklist.h index 8b553c119..fd9878af0 100644 --- a/src/quicklist.h +++ b/src/quicklist.h @@ -40,7 +40,7 @@ * count: 16 bits, max 65536 (max zl bytes is 65k, so max count actually < 32k). * encoding: 2 bits, RAW=1, LZF=2. * container: 2 bits, NONE=1, ZIPLIST=2. - * recompress: 1 bit, bool, true if node is temporarry decompressed for usage. + * recompress: 1 bit, bool, true if node is temporary decompressed for usage. * attempted_compress: 1 bit, boolean, used for verifying during testing. * extra: 10 bits, free for future use; pads out the remainder of 32 bits */ typedef struct quicklistNode { @@ -97,7 +97,7 @@ typedef struct quicklistBookmark { /* quicklist is a 40 byte struct (on 64-bit systems) describing a quicklist. * 'count' is the number of total entries. * 'len' is the number of quicklist nodes. - * 'compress' is: -1 if compression disabled, otherwise it's the number + * 'compress' is: 0 if compression disabled, otherwise it's the number * of quicklistNodes to leave uncompressed at ends of quicklist. * 'fill' is the user-requested (or default) fill factor. * 'bookmakrs are an optional feature that is used by realloc this struct, diff --git a/src/rax.c b/src/rax.c index c8a1fb6b4..5768071c0 100644 --- a/src/rax.c +++ b/src/rax.c @@ -628,7 +628,7 @@ int raxGenericInsert(rax *rax, unsigned char *s, size_t len, void *data, void ** * * 3b. IF $SPLITPOS != 0: * Trim the compressed node (reallocating it as well) in order to - * contain $splitpos characters. Change chilid pointer in order to link + * contain $splitpos characters. Change child pointer in order to link * to the split node. If new compressed node len is just 1, set * iscompr to 0 (layout is the same). Fix parent's reference. * @@ -1082,7 +1082,7 @@ int raxRemove(rax *rax, unsigned char *s, size_t len, void **old) { } } else if (h->size == 1) { /* If the node had just one child, after the removal of the key - * further compression with adjacent nodes is pontentially possible. */ + * further compression with adjacent nodes is potentially possible. */ trycompress = 1; } @@ -1329,7 +1329,7 @@ int raxIteratorNextStep(raxIterator *it, int noup) { if (!noup && children) { debugf("GO DEEPER\n"); /* Seek the lexicographically smaller key in this subtree, which - * is the first one found always going torwards the first child + * is the first one found always going towards the first child * of every successive node. */ if (!raxStackPush(&it->stack,it->node)) return 0; raxNode **cp = raxNodeFirstChildPtr(it->node); @@ -1348,7 +1348,7 @@ int raxIteratorNextStep(raxIterator *it, int noup) { return 1; } } else { - /* If we finished exporing the previous sub-tree, switch to the + /* If we finished exploring the previous sub-tree, switch to the * new one: go upper until a node is found where there are * children representing keys lexicographically greater than the * current key. */ @@ -1510,7 +1510,7 @@ int raxIteratorPrevStep(raxIterator *it, int noup) { int raxSeek(raxIterator *it, const char *op, unsigned char *ele, size_t len) { int eq = 0, lt = 0, gt = 0, first = 0, last = 0; - it->stack.items = 0; /* Just resetting. Intialized by raxStart(). */ + it->stack.items = 0; /* Just resetting. Initialized by raxStart(). */ it->flags |= RAX_ITER_JUST_SEEKED; it->flags &= ~RAX_ITER_EOF; it->key_len = 0; @@ -1731,7 +1731,7 @@ int raxPrev(raxIterator *it) { * tree, expect a disappointing distribution. A random walk produces good * random elements if the tree is not sparse, however in the case of a radix * tree certain keys will be reported much more often than others. At least - * this function should be able to expore every possible element eventually. */ + * this function should be able to explore every possible element eventually. */ int raxRandomWalk(raxIterator *it, size_t steps) { if (it->rt->numele == 0) { it->flags |= RAX_ITER_EOF; @@ -1825,7 +1825,7 @@ uint64_t raxSize(rax *rax) { /* ----------------------------- Introspection ------------------------------ */ /* This function is mostly used for debugging and learning purposes. - * It shows an ASCII representation of a tree on standard output, outling + * It shows an ASCII representation of a tree on standard output, outline * all the nodes and the contained keys. * * The representation is as follow: @@ -1835,7 +1835,7 @@ uint64_t raxSize(rax *rax) { * [abc]=0x12345678 (node is a key, pointing to value 0x12345678) * [] (a normal empty node) * - * Children are represented in new idented lines, each children prefixed by + * Children are represented in new indented lines, each children prefixed by * the "`-(x)" string, where "x" is the edge byte. * * [abc] diff --git a/src/rax.h b/src/rax.h index f2521d14a..6b1fd4188 100644 --- a/src/rax.h +++ b/src/rax.h @@ -58,7 +58,7 @@ * successive nodes having a single child are "compressed" into the node * itself as a string of characters, each representing a next-level child, * and only the link to the node representing the last character node is - * provided inside the representation. So the above representation is turend + * provided inside the representation. So the above representation is turned * into: * * ["foo"] "" @@ -123,7 +123,7 @@ typedef struct raxNode { * nodes). * * If the node has an associated key (iskey=1) and is not NULL - * (isnull=0), then after the raxNode pointers poiting to the + * (isnull=0), then after the raxNode pointers pointing to the * children, an additional value pointer is present (as you can see * in the representation above as "value-ptr" field). */ diff --git a/src/rdb.c b/src/rdb.c index 54a169cd8..4bcf96038 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -2172,7 +2172,7 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { } else if (type == RDB_OPCODE_AUX) { /* AUX: generic string-string fields. Use to add state to RDB * which is backward compatible. Implementations of RDB loading - * are requierd to skip AUX fields they don't understand. + * are required to skip AUX fields they don't understand. * * An AUX field is composed of two strings: key and value. */ robj *auxkey, *auxval; @@ -2421,7 +2421,7 @@ void backgroundSaveDoneHandlerDisk(int exitcode, int bysignal) { latencyEndMonitor(latency); latencyAddSampleIfNeeded("rdb-unlink-temp-file",latency); /* SIGUSR1 is whitelisted, so we have a way to kill a child without - * tirggering an error condition. */ + * triggering an error condition. */ if (bysignal != SIGUSR1) server.lastbgsave_status = C_ERR; } diff --git a/src/redis-check-rdb.c b/src/redis-check-rdb.c index 17ec656ce..592feaf42 100644 --- a/src/redis-check-rdb.c +++ b/src/redis-check-rdb.c @@ -331,7 +331,7 @@ err: return 1; } -/* RDB check main: called form redis.c when Redis is executed with the +/* RDB check main: called form server.c when Redis is executed with the * redis-check-rdb alias, on during RDB loading errors. * * The function works in two ways: can be called with argc/argv as a diff --git a/src/redis-cli.c b/src/redis-cli.c index ca949b8f0..2f4609661 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -309,7 +309,7 @@ static void cliRefreshPrompt(void) { /* Return the name of the dotfile for the specified 'dotfilename'. * Normally it just concatenates user $HOME to the file specified - * in 'dotfilename'. However if the environment varialbe 'envoverride' + * in 'dotfilename'. However if the environment variable 'envoverride' * is set, its value is taken as the path. * * The function returns NULL (if the file is /dev/null or cannot be @@ -1713,7 +1713,7 @@ static void usage(void) { " -a Password to use when connecting to the server.\n" " You can also use the " REDIS_CLI_AUTH_ENV " environment\n" " variable to pass this password more safely\n" -" (if both are used, this argument takes predecence).\n" +" (if both are used, this argument takes precedence).\n" " --user Used to send ACL style 'AUTH username pass'. Needs -a.\n" " --pass Alias of -a for consistency with the new --user option.\n" " --askpass Force user to input password with mask from STDIN.\n" @@ -2142,7 +2142,7 @@ static int evalMode(int argc, char **argv) { argv2[2] = sdscatprintf(sdsempty(),"%d",keys); /* Call it */ - int eval_ldb = config.eval_ldb; /* Save it, may be reverteed. */ + int eval_ldb = config.eval_ldb; /* Save it, may be reverted. */ retval = issueCommand(argc+3-got_comma, argv2); if (eval_ldb) { if (!config.eval_ldb) { @@ -6658,13 +6658,13 @@ struct distsamples { * samples greater than the previous one, and is also the stop sentinel. * * "tot' is the total number of samples in the different buckets, so it - * is the SUM(samples[i].conut) for i to 0 up to the max sample. + * is the SUM(samples[i].count) for i to 0 up to the max sample. * * As a side effect the function sets all the buckets count to 0. */ void showLatencyDistSamples(struct distsamples *samples, long long tot) { int j; - /* We convert samples into a index inside the palette + /* We convert samples into an index inside the palette * proportional to the percentage a given bucket represents. * This way intensity of the different parts of the spectrum * don't change relative to the number of requests, which avoids to @@ -7971,7 +7971,7 @@ static void LRUTestMode(void) { * Intrisic latency mode. * * Measure max latency of a running process that does not result from - * syscalls. Basically this software should provide an hint about how much + * syscalls. Basically this software should provide a hint about how much * time the kernel leaves the process without a chance to run. *--------------------------------------------------------------------------- */ diff --git a/src/redismodule.h b/src/redismodule.h index 4bfc14cc7..4a0e5bf15 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -963,4 +963,4 @@ static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int #define RedisModuleString robj #endif /* REDISMODULE_CORE */ -#endif /* REDISMOUDLE_H */ +#endif /* REDISMODULE_H */ diff --git a/src/replication.c b/src/replication.c index 8f4ad2c92..6feb9ab6c 100644 --- a/src/replication.c +++ b/src/replication.c @@ -83,16 +83,16 @@ char *replicationGetSlaveName(client *c) { * the file deletion to the filesystem. This call removes the file in a * background thread instead. We actually just do close() in the thread, * by using the fact that if there is another instance of the same file open, - * the foreground unlink() will not really do anything, and deleting the - * file will only happen once the last reference is lost. */ + * the foreground unlink() will only remove the fs name, and deleting the + * file's storage space will only happen once the last reference is lost. */ int bg_unlink(const char *filename) { int fd = open(filename,O_RDONLY|O_NONBLOCK); if (fd == -1) { /* Can't open the file? Fall back to unlinking in the main thread. */ return unlink(filename); } else { - /* The following unlink() will not do anything since file - * is still open. */ + /* The following unlink() removes the name but doesn't free the + * file contents because a process still has it open. */ int retval = unlink(filename); if (retval == -1) { /* If we got an unlink error, we just return it, closing the @@ -204,7 +204,7 @@ void feedReplicationBacklogWithObject(robj *o) { * as well. This function is used if the instance is a master: we use * the commands received by our clients in order to create the replication * stream. Instead if the instance is a slave and has sub-slaves attached, - * we use replicationFeedSlavesFromMaster() */ + * we use replicationFeedSlavesFromMasterStream() */ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) { listNode *ln; listIter li; @@ -535,7 +535,7 @@ int masterTryPartialResynchronization(client *c) { (strcasecmp(master_replid, server.replid2) || psync_offset > server.second_replid_offset)) { - /* Run id "?" is used by slaves that want to force a full resync. */ + /* Replid "?" is used by slaves that want to force a full resync. */ if (master_replid[0] != '?') { if (strcasecmp(master_replid, server.replid) && strcasecmp(master_replid, server.replid2)) @@ -707,7 +707,7 @@ int startBgsaveForReplication(int mincapa) { return retval; } -/* SYNC and PSYNC command implemenation. */ +/* SYNC and PSYNC command implementation. */ void syncCommand(client *c) { /* ignore SYNC if already slave or in monitor mode */ if (c->flags & CLIENT_SLAVE) return; @@ -1377,7 +1377,7 @@ void replicationEmptyDbCallback(void *privdata) { replicationSendNewlineToMaster(); } -/* Once we have a link with the master and the synchroniziation was +/* Once we have a link with the master and the synchronization was * performed, this function materializes the master client we store * at server.master, starting from the specified file descriptor. */ void replicationCreateMasterClient(connection *conn, int dbid) { @@ -1454,7 +1454,7 @@ redisDb *disklessLoadMakeBackups(void) { * the 'restore' argument (the number of DBs to replace) is non-zero. * * When instead the loading succeeded we want just to free our old backups, - * in that case the funciton will do just that when 'restore' is 0. */ + * in that case the function will do just that when 'restore' is 0. */ void disklessLoadRestoreBackups(redisDb *backup, int restore, int empty_db_flags) { if (restore) { @@ -1488,7 +1488,7 @@ void readSyncBulkPayload(connection *conn) { off_t left; /* Static vars used to hold the EOF mark, and the last bytes received - * form the server: when they match, we reached the end of the transfer. */ + * from the server: when they match, we reached the end of the transfer. */ static char eofmark[CONFIG_RUN_ID_SIZE]; static char lastbytes[CONFIG_RUN_ID_SIZE]; static int usemark = 0; @@ -1805,7 +1805,7 @@ void readSyncBulkPayload(connection *conn) { REDISMODULE_SUBEVENT_MASTER_LINK_UP, NULL); - /* After a full resynchroniziation we use the replication ID and + /* After a full resynchronization we use the replication ID and * offset of the master. The secondary ID / offset are cleared since * we are starting a new history. */ memcpy(server.replid,server.master->replid,sizeof(server.replid)); @@ -1901,7 +1901,7 @@ char *sendSynchronousCommand(int flags, connection *conn, ...) { /* Try a partial resynchronization with the master if we are about to reconnect. * If there is no cached master structure, at least try to issue a * "PSYNC ? -1" command in order to trigger a full resync using the PSYNC - * command in order to obtain the master run id and the master replication + * command in order to obtain the master replid and the master replication * global offset. * * This function is designed to be called from syncWithMaster(), so the @@ -1929,7 +1929,7 @@ char *sendSynchronousCommand(int flags, connection *conn, ...) { * * PSYNC_CONTINUE: If the PSYNC command succeeded and we can continue. * PSYNC_FULLRESYNC: If PSYNC is supported but a full resync is needed. - * In this case the master run_id and global replication + * In this case the master replid and global replication * offset is saved. * PSYNC_NOT_SUPPORTED: If the server does not understand PSYNC at all and * the caller should fall back to SYNC. @@ -1960,7 +1960,7 @@ int slaveTryPartialResynchronization(connection *conn, int read_reply) { /* Writing half */ if (!read_reply) { /* Initially set master_initial_offset to -1 to mark the current - * master run_id and offset as not valid. Later if we'll be able to do + * master replid and offset as not valid. Later if we'll be able to do * a FULL resync using the PSYNC command we'll set the offset at the * right value, so that this information will be propagated to the * client structure representing the master into server.master. */ @@ -2001,7 +2001,7 @@ int slaveTryPartialResynchronization(connection *conn, int read_reply) { if (!strncmp(reply,"+FULLRESYNC",11)) { char *replid = NULL, *offset = NULL; - /* FULL RESYNC, parse the reply in order to extract the run id + /* FULL RESYNC, parse the reply in order to extract the replid * and the replication offset. */ replid = strchr(reply,' '); if (replid) { @@ -2293,7 +2293,7 @@ void syncWithMaster(connection *conn) { /* Try a partial resynchonization. If we don't have a cached master * slaveTryPartialResynchronization() will at least try to use PSYNC - * to start a full resynchronization so that we get the master run id + * to start a full resynchronization so that we get the master replid * and the global offset, to try a partial resync at the next * reconnection attempt. */ if (server.repl_state == REPL_STATE_SEND_PSYNC) { @@ -2455,7 +2455,7 @@ void replicationAbortSyncTransfer(void) { * If there was a replication handshake in progress 1 is returned and * the replication state (server.repl_state) set to REPL_STATE_CONNECT. * - * Otherwise zero is returned and no operation is perforemd at all. */ + * Otherwise zero is returned and no operation is performed at all. */ int cancelReplicationHandshake(void) { if (server.repl_state == REPL_STATE_TRANSFER) { replicationAbortSyncTransfer(); @@ -2887,7 +2887,7 @@ void refreshGoodSlavesCount(void) { * * We don't care about taking a different cache for every different slave * since to fill the cache again is not very costly, the goal of this code - * is to avoid that the same big script is trasmitted a big number of times + * is to avoid that the same big script is transmitted a big number of times * per second wasting bandwidth and processor speed, but it is not a problem * if we need to rebuild the cache from scratch from time to time, every used * script will need to be transmitted a single time to reappear in the cache. @@ -2897,7 +2897,7 @@ void refreshGoodSlavesCount(void) { * 1) Every time a new slave connects, we flush the whole script cache. * 2) We only send as EVALSHA what was sent to the master as EVALSHA, without * trying to convert EVAL into EVALSHA specifically for slaves. - * 3) Every time we trasmit a script as EVAL to the slaves, we also add the + * 3) Every time we transmit a script as EVAL to the slaves, we also add the * corresponding SHA1 of the script into the cache as we are sure every * slave knows about the script starting from now. * 4) On SCRIPT FLUSH command, we replicate the command to all the slaves @@ -2988,7 +2988,7 @@ int replicationScriptCacheExists(sds sha1) { /* This just set a flag so that we broadcast a REPLCONF GETACK command * to all the slaves in the beforeSleep() function. Note that this way - * we "group" all the clients that want to wait for synchronouns replication + * we "group" all the clients that want to wait for synchronous replication * in a given event loop iteration, and send a single GETACK for them all. */ void replicationRequestAckFromSlaves(void) { server.get_ack_from_slaves = 1; diff --git a/src/scripting.c b/src/scripting.c index bccbcf637..e43472b3a 100644 --- a/src/scripting.c +++ b/src/scripting.c @@ -69,7 +69,7 @@ struct ldbState { list *children; /* All forked debugging sessions pids. */ int bp[LDB_BREAKPOINTS_MAX]; /* An array of breakpoints line numbers. */ int bpcount; /* Number of valid entries inside bp. */ - int step; /* Stop at next line ragardless of breakpoints. */ + int step; /* Stop at next line regardless of breakpoints. */ int luabp; /* Stop at next line because redis.breakpoint() was called. */ sds *src; /* Lua script source code split by line. */ int lines; /* Number of lines in 'src'. */ @@ -886,7 +886,7 @@ int luaRedisReplicateCommandsCommand(lua_State *lua) { /* redis.breakpoint() * - * Allows to stop execution during a debuggign session from within + * Allows to stop execution during a debugging session from within * the Lua code implementation, like if a breakpoint was set in the code * immediately after the function. */ int luaRedisBreakpointCommand(lua_State *lua) { @@ -1499,7 +1499,7 @@ void evalGenericCommand(client *c, int evalsha) { /* Hash the code if this is an EVAL call */ sha1hex(funcname+2,c->argv[1]->ptr,sdslen(c->argv[1]->ptr)); } else { - /* We already have the SHA if it is a EVALSHA */ + /* We already have the SHA if it is an EVALSHA */ int j; char *sha = c->argv[1]->ptr; @@ -1628,7 +1628,7 @@ void evalGenericCommand(client *c, int evalsha) { * To do so we use a cache of SHA1s of scripts that we already propagated * as full EVAL, that's called the Replication Script Cache. * - * For repliation, everytime a new slave attaches to the master, we need to + * For replication, everytime a new slave attaches to the master, we need to * flush our cache of scripts that can be replicated as EVALSHA, while * for AOF we need to do so every time we rewrite the AOF file. */ if (evalsha && !server.lua_replicate_commands) { @@ -1801,7 +1801,7 @@ void ldbLog(sds entry) { } /* A version of ldbLog() which prevents producing logs greater than - * ldb.maxlen. The first time the limit is reached an hint is generated + * ldb.maxlen. The first time the limit is reached a hint is generated * to inform the user that reply trimming can be disabled using the * debugger "maxlen" command. */ void ldbLogWithMaxLen(sds entry) { @@ -1842,7 +1842,7 @@ void ldbSendLogs(void) { } /* Start a debugging session before calling EVAL implementation. - * The techique we use is to capture the client socket file descriptor, + * The technique we use is to capture the client socket file descriptor, * in order to perform direct I/O with it from within Lua hooks. This * way we don't have to re-enter Redis in order to handle I/O. * @@ -1925,7 +1925,7 @@ void ldbEndSession(client *c) { connNonBlock(ldb.conn); connSendTimeout(ldb.conn,0); - /* Close the client connectin after sending the final EVAL reply + /* Close the client connection after sending the final EVAL reply * in order to signal the end of the debugging session. */ c->flags |= CLIENT_CLOSE_AFTER_REPLY; @@ -2094,7 +2094,7 @@ void ldbLogSourceLine(int lnum) { /* Implement the "list" command of the Lua debugger. If around is 0 * the whole file is listed, otherwise only a small portion of the file * around the specified line is shown. When a line number is specified - * the amonut of context (lines before/after) is specified via the + * the amount of context (lines before/after) is specified via the * 'context' argument. */ void ldbList(int around, int context) { int j; @@ -2105,7 +2105,7 @@ void ldbList(int around, int context) { } } -/* Append an human readable representation of the Lua value at position 'idx' +/* Append a human readable representation of the Lua value at position 'idx' * on the stack of the 'lua' state, to the SDS string passed as argument. * The new SDS string with the represented value attached is returned. * Used in order to implement ldbLogStackValue(). @@ -2349,7 +2349,7 @@ char *ldbRedisProtocolToHuman_Double(sds *o, char *reply) { return p+2; } -/* Log a Redis reply as debugger output, in an human readable format. +/* Log a Redis reply as debugger output, in a human readable format. * If the resulting string is longer than 'len' plus a few more chars * used as prefix, it gets truncated. */ void ldbLogRedisReply(char *reply) { @@ -2533,7 +2533,7 @@ void ldbTrace(lua_State *lua) { } } -/* Impleemnts the debugger "maxlen" command. It just queries or sets the +/* Implements the debugger "maxlen" command. It just queries or sets the * ldb.maxlen variable. */ void ldbMaxlen(sds *argv, int argc) { if (argc == 2) { @@ -2606,8 +2606,8 @@ ldbLog(sdsnew(" mode dataset changes will be retained.")); ldbLog(sdsnew("")); ldbLog(sdsnew("Debugger functions you can call from Lua scripts:")); ldbLog(sdsnew("redis.debug() Produce logs in the debugger console.")); -ldbLog(sdsnew("redis.breakpoint() Stop execution like if there was a breakpoing.")); -ldbLog(sdsnew(" in the next line of code.")); +ldbLog(sdsnew("redis.breakpoint() Stop execution like if there was a breakpoint in the")); +ldbLog(sdsnew(" next line of code.")); ldbSendLogs(); } else if (!strcasecmp(argv[0],"s") || !strcasecmp(argv[0],"step") || !strcasecmp(argv[0],"n") || !strcasecmp(argv[0],"next")) { diff --git a/src/sds.c b/src/sds.c index 118971621..a723a42c3 100644 --- a/src/sds.c +++ b/src/sds.c @@ -405,7 +405,7 @@ sds sdscatlen(sds s, const void *t, size_t len) { return s; } -/* Append the specified null termianted C string to the sds string 's'. +/* Append the specified null terminated C string to the sds string 's'. * * After the call, the passed sds string is no longer valid and all the * references must be substituted with the new pointer returned by the call. */ @@ -453,7 +453,7 @@ int sdsll2str(char *s, long long value) { size_t l; /* Generate the string representation, this method produces - * an reversed string. */ + * a reversed string. */ v = (value < 0) ? -value : value; p = s; do { @@ -484,7 +484,7 @@ int sdsull2str(char *s, unsigned long long v) { size_t l; /* Generate the string representation, this method produces - * an reversed string. */ + * a reversed string. */ p = s; do { *p++ = '0'+(v%10); diff --git a/src/sentinel.c b/src/sentinel.c index bdc339674..1bd82453f 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -131,13 +131,13 @@ typedef struct sentinelAddr { /* The link to a sentinelRedisInstance. When we have the same set of Sentinels * monitoring many masters, we have different instances representing the * same Sentinels, one per master, and we need to share the hiredis connections - * among them. Oherwise if 5 Sentinels are monitoring 100 masters we create + * among them. Otherwise if 5 Sentinels are monitoring 100 masters we create * 500 outgoing connections instead of 5. * * So this structure represents a reference counted link in terms of the two * hiredis connections for commands and Pub/Sub, and the fields needed for * failure detection, since the ping/pong time are now local to the link: if - * the link is available, the instance is avaialbe. This way we don't just + * the link is available, the instance is available. This way we don't just * have 5 connections instead of 500, we also send 5 pings instead of 500. * * Links are shared only for Sentinels: master and slave instances have @@ -986,7 +986,7 @@ instanceLink *createInstanceLink(void) { return link; } -/* Disconnect an hiredis connection in the context of an instance link. */ +/* Disconnect a hiredis connection in the context of an instance link. */ void instanceLinkCloseConnection(instanceLink *link, redisAsyncContext *c) { if (c == NULL) return; @@ -1125,7 +1125,7 @@ int sentinelUpdateSentinelAddressInAllMasters(sentinelRedisInstance *ri) { return reconfigured; } -/* This function is called when an hiredis connection reported an error. +/* This function is called when a hiredis connection reported an error. * We set it to NULL and mark the link as disconnected so that it will be * reconnected again. * @@ -2015,7 +2015,7 @@ void sentinelSendAuthIfNeeded(sentinelRedisInstance *ri, redisAsyncContext *c) { * The connection type is "cmd" or "pubsub" as specified by 'type'. * * This makes it possible to list all the sentinel instances connected - * to a Redis servewr with CLIENT LIST, grepping for a specific name format. */ + * to a Redis server with CLIENT LIST, grepping for a specific name format. */ void sentinelSetClientName(sentinelRedisInstance *ri, redisAsyncContext *c, char *type) { char name[64]; @@ -2470,7 +2470,7 @@ void sentinelPublishReplyCallback(redisAsyncContext *c, void *reply, void *privd ri->last_pub_time = mstime(); } -/* Process an hello message received via Pub/Sub in master or slave instance, +/* Process a hello message received via Pub/Sub in master or slave instance, * or sent directly to this sentinel via the (fake) PUBLISH command of Sentinel. * * If the master name specified in the message is not known, the message is @@ -2607,7 +2607,7 @@ void sentinelReceiveHelloMessages(redisAsyncContext *c, void *reply, void *privd sentinelProcessHelloMessage(r->element[2]->str, r->element[2]->len); } -/* Send an "Hello" message via Pub/Sub to the specified 'ri' Redis +/* Send a "Hello" message via Pub/Sub to the specified 'ri' Redis * instance in order to broadcast the current configuration for this * master, and to advertise the existence of this Sentinel at the same time. * @@ -2661,7 +2661,7 @@ int sentinelSendHello(sentinelRedisInstance *ri) { } /* Reset last_pub_time in all the instances in the specified dictionary - * in order to force the delivery of an Hello update ASAP. */ + * in order to force the delivery of a Hello update ASAP. */ void sentinelForceHelloUpdateDictOfRedisInstances(dict *instances) { dictIterator *di; dictEntry *de; @@ -2675,13 +2675,13 @@ void sentinelForceHelloUpdateDictOfRedisInstances(dict *instances) { dictReleaseIterator(di); } -/* This function forces the delivery of an "Hello" message (see +/* This function forces the delivery of a "Hello" message (see * sentinelSendHello() top comment for further information) to all the Redis * and Sentinel instances related to the specified 'master'. * * It is technically not needed since we send an update to every instance * with a period of SENTINEL_PUBLISH_PERIOD milliseconds, however when a - * Sentinel upgrades a configuration it is a good idea to deliever an update + * Sentinel upgrades a configuration it is a good idea to deliver an update * to the other Sentinels ASAP. */ int sentinelForceHelloUpdateForMaster(sentinelRedisInstance *master) { if (!(master->flags & SRI_MASTER)) return C_ERR; @@ -3082,7 +3082,7 @@ void sentinelCommand(client *c) { * ip and port are the ip and port of the master we want to be * checked by Sentinel. Note that the command will not check by * name but just by master, in theory different Sentinels may monitor - * differnet masters with the same name. + * different masters with the same name. * * current-epoch is needed in order to understand if we are allowed * to vote for a failover leader or not. Each Sentinel can vote just @@ -3995,7 +3995,7 @@ int sentinelSendSlaveOf(sentinelRedisInstance *ri, char *host, int port) { * the following tasks: * 1) Reconfigure the instance according to the specified host/port params. * 2) Rewrite the configuration. - * 3) Disconnect all clients (but this one sending the commnad) in order + * 3) Disconnect all clients (but this one sending the command) in order * to trigger the ask-master-on-reconnection protocol for connected * clients. * @@ -4547,7 +4547,7 @@ void sentinelHandleDictOfRedisInstances(dict *instances) { * difference bigger than SENTINEL_TILT_TRIGGER milliseconds if one of the * following conditions happen: * - * 1) The Sentiel process for some time is blocked, for every kind of + * 1) The Sentinel process for some time is blocked, for every kind of * random reason: the load is huge, the computer was frozen for some time * in I/O or alike, the process was stopped by a signal. Everything. * 2) The system clock was altered significantly. diff --git a/src/server.c b/src/server.c index a7a36df13..cf9dc83ea 100644 --- a/src/server.c +++ b/src/server.c @@ -115,7 +115,7 @@ volatile unsigned long lru_clock; /* Server global current LRU time. */ * write: Write command (may modify the key space). * * read-only: All the non special commands just reading from keys without - * changing the content, or returning other informations like + * changing the content, or returning other information like * the TIME command. Special commands such administrative commands * or transaction related commands (multi, exec, discard, ...) * are not flagged as read-only commands, since they affect the @@ -1280,7 +1280,7 @@ dictType objectKeyHeapPointerValueDictType = { dictVanillaFree /* val destructor */ }; -/* Set dictionary type. Keys are SDS strings, values are ot used. */ +/* Set dictionary type. Keys are SDS strings, values are not used. */ dictType setDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ @@ -1385,9 +1385,8 @@ dictType clusterNodesBlackListDictType = { NULL /* val destructor */ }; -/* Cluster re-addition blacklist. This maps node IDs to the time - * we can re-add this node. The goal is to avoid readding a removed - * node for some time. */ +/* Modules system dictionary type. Keys are module name, + * values are pointer to RedisModule struct. */ dictType modulesDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ @@ -1440,7 +1439,7 @@ void tryResizeHashTables(int dbid) { /* Our hash table implementation performs rehashing incrementally while * we write/read from the hash table. Still if the server is idle, the hash * table will use two tables for a long time. So we try to use 1 millisecond - * of CPU time at every call of this function to perform some rehahsing. + * of CPU time at every call of this function to perform some rehashing. * * The function returns 1 if some rehashing was performed, otherwise 0 * is returned. */ @@ -1462,8 +1461,8 @@ int incrementallyRehash(int dbid) { * as we want to avoid resizing the hash tables when there is a child in order * to play well with copy-on-write (otherwise when a resize happens lots of * memory pages are copied). The goal of this function is to update the ability - * for dict.c to resize the hash tables accordingly to the fact we have o not - * running childs. */ + * for dict.c to resize the hash tables accordingly to the fact we have an + * active fork child running. */ void updateDictResizePolicy(void) { if (!hasActiveChildProcess()) dictEnableResize(); @@ -1613,7 +1612,7 @@ int clientsCronTrackClientsMemUsage(client *c) { mem += sdsAllocSize(c->querybuf); mem += sizeof(client); /* Now that we have the memory used by the client, remove the old - * value from the old categoty, and add it back. */ + * value from the old category, and add it back. */ server.stat_clients_type_memory[c->client_cron_last_memory_type] -= c->client_cron_last_memory_usage; server.stat_clients_type_memory[type] += mem; @@ -2028,7 +2027,7 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { /* AOF write errors: in this case we have a buffer to flush as well and * clear the AOF error in case of success to make the DB writable again, * however to try every second is enough in case of 'hz' is set to - * an higher frequency. */ + * a higher frequency. */ run_with_period(1000) { if (server.aof_last_write_status == C_ERR) flushAppendOnlyFile(0); @@ -2198,7 +2197,7 @@ void beforeSleep(struct aeEventLoop *eventLoop) { if (moduleCount()) moduleReleaseGIL(); } -/* This function is called immadiately after the event loop multiplexing +/* This function is called immediately after the event loop multiplexing * API returned, and the control is going to soon return to Redis by invoking * the different events callbacks. */ void afterSleep(struct aeEventLoop *eventLoop) { @@ -2420,7 +2419,7 @@ void initServerConfig(void) { R_NegInf = -1.0/R_Zero; R_Nan = R_Zero/R_Zero; - /* Command table -- we initiialize it here as it is part of the + /* Command table -- we initialize it here as it is part of the * initial configuration, since command names may be changed via * redis.conf using the rename-command directive. */ server.commands = dictCreate(&commandTableDictType,NULL); @@ -3085,7 +3084,7 @@ int populateCommandTableParseFlags(struct redisCommand *c, char *strflags) { } /* Populates the Redis Command Table starting from the hard coded list - * we have on top of redis.c file. */ + * we have on top of server.c file. */ void populateCommandTable(void) { int j; int numcommands = sizeof(redisCommandTable)/sizeof(struct redisCommand); @@ -3219,12 +3218,12 @@ void propagate(struct redisCommand *cmd, int dbid, robj **argv, int argc, * * 'cmd' must be a pointer to the Redis command to replicate, dbid is the * database ID the command should be propagated into. - * Arguments of the command to propagte are passed as an array of redis + * Arguments of the command to propagate are passed as an array of redis * objects pointers of len 'argc', using the 'argv' vector. * * The function does not take a reference to the passed 'argv' vector, * so it is up to the caller to release the passed argv (but it is usually - * stack allocated). The function autoamtically increments ref count of + * stack allocated). The function automatically increments ref count of * passed objects, so the caller does not need to. */ void alsoPropagate(struct redisCommand *cmd, int dbid, robj **argv, int argc, int target) @@ -3384,7 +3383,7 @@ void call(client *c, int flags) { if (c->flags & CLIENT_FORCE_AOF) propagate_flags |= PROPAGATE_AOF; /* However prevent AOF / replication propagation if the command - * implementations called preventCommandPropagation() or similar, + * implementation called preventCommandPropagation() or similar, * or if we don't have the call() flags to do so. */ if (c->flags & CLIENT_PREVENT_REPL_PROP || !(flags & CMD_CALL_PROPAGATE_REPL)) @@ -3632,7 +3631,7 @@ int processCommand(client *c) { } /* Save out_of_memory result at script start, otherwise if we check OOM - * untill first write within script, memory used by lua stack and + * until first write within script, memory used by lua stack and * arguments might interfere. */ if (c->cmd->proc == evalCommand || c->cmd->proc == evalShaCommand) { server.lua_oom = out_of_memory; @@ -3870,7 +3869,7 @@ int prepareForShutdown(int flags) { /*================================== Commands =============================== */ -/* Sometimes Redis cannot accept write commands because there is a perstence +/* Sometimes Redis cannot accept write commands because there is a persistence * error with the RDB or AOF file, and Redis is configured in order to stop * accepting writes in such situation. This function returns if such a * condition is active, and the type of the condition. diff --git a/src/server.h b/src/server.h index 980011938..6a130879e 100644 --- a/src/server.h +++ b/src/server.h @@ -161,7 +161,7 @@ extern int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT]; /* Hash table parameters */ #define HASHTABLE_MIN_FILL 10 /* Minimal hash table fill 10% */ -/* Command flags. Please check the command table defined in the redis.c file +/* Command flags. Please check the command table defined in the server.c file * for more information about the meaning of every flag. */ #define CMD_WRITE (1ULL<<0) /* "write" flag */ #define CMD_READONLY (1ULL<<1) /* "read-only" flag */ @@ -827,7 +827,7 @@ typedef struct client { copying this slave output buffer should use. */ char replid[CONFIG_RUN_ID_SIZE+1]; /* Master replication ID (if master). */ - int slave_listening_port; /* As configured with: SLAVECONF listening-port */ + int slave_listening_port; /* As configured with: REPLCONF listening-port */ char slave_ip[NET_IP_STR_LEN]; /* Optionally given by REPLCONF ip-address */ int slave_capa; /* Slave capabilities: SLAVE_CAPA_* bitwise OR. */ multiState mstate; /* MULTI/EXEC state */ @@ -939,7 +939,7 @@ typedef struct redisOp { } redisOp; /* Defines an array of Redis operations. There is an API to add to this - * structure in a easy way. + * structure in an easy way. * * redisOpArrayInit(); * redisOpArrayAppend(); @@ -1349,7 +1349,7 @@ struct redisServer { unsigned int maxclients; /* Max number of simultaneous clients */ unsigned long long maxmemory; /* Max number of memory bytes to use */ int maxmemory_policy; /* Policy for key eviction */ - int maxmemory_samples; /* Pricision of random sampling */ + int maxmemory_samples; /* Precision of random sampling */ int lfu_log_factor; /* LFU logarithmic counter factor. */ int lfu_decay_time; /* LFU counter decay factor. */ long long proto_max_bulk_len; /* Protocol bulk length maximum size. */ @@ -1429,7 +1429,7 @@ struct redisServer { int lua_random_dirty; /* True if a random command was called during the execution of the current script. */ int lua_replicate_commands; /* True if we are doing single commands repl. */ - int lua_multi_emitted;/* True if we already proagated MULTI. */ + int lua_multi_emitted;/* True if we already propagated MULTI. */ int lua_repl; /* Script replication flags for redis.set_repl(). */ int lua_timedout; /* True if we reached the time limit for script execution. */ @@ -1935,7 +1935,7 @@ void addACLLogEntry(client *c, int reason, int keypos, sds username); /* Flags only used by the ZADD command but not by zsetAdd() API: */ #define ZADD_CH (1<<16) /* Return num of elements added or updated. */ -/* Struct to hold a inclusive/exclusive range spec by score comparison. */ +/* Struct to hold an inclusive/exclusive range spec by score comparison. */ typedef struct { double min, max; int minex, maxex; /* are min or max exclusive? */ diff --git a/src/siphash.c b/src/siphash.c index 357741132..30c15c04e 100644 --- a/src/siphash.c +++ b/src/siphash.c @@ -22,7 +22,7 @@ 1. We use SipHash 1-2. This is not believed to be as strong as the suggested 2-4 variant, but AFAIK there are not trivial attacks against this reduced-rounds version, and it runs at the same speed - as Murmurhash2 that we used previously, why the 2-4 variant slowed + as Murmurhash2 that we used previously, while the 2-4 variant slowed down Redis by a 4% figure more or less. 2. Hard-code rounds in the hope the compiler can optimize it more in this raw from. Anyway we always want the standard 2-4 variant. @@ -36,7 +36,7 @@ perform a text transformation in some temporary buffer, which is costly. 5. Remove debugging code. 6. Modified the original test.c file to be a stand-alone function testing - the function in the new form (returing an uint64_t) using just the + the function in the new form (returning an uint64_t) using just the relevant test vector. */ #include @@ -46,7 +46,7 @@ #include /* Fast tolower() alike function that does not care about locale - * but just returns a-z insetad of A-Z. */ + * but just returns a-z instead of A-Z. */ int siptlw(int c) { if (c >= 'A' && c <= 'Z') { return c+('a'-'A'); diff --git a/src/slowlog.c b/src/slowlog.c index 1d715e39b..408456b14 100644 --- a/src/slowlog.c +++ b/src/slowlog.c @@ -75,7 +75,7 @@ slowlogEntry *slowlogCreateEntry(client *c, robj **argv, int argc, long long dur } else if (argv[j]->refcount == OBJ_SHARED_REFCOUNT) { se->argv[j] = argv[j]; } else { - /* Here we need to dupliacate the string objects composing the + /* Here we need to duplicate the string objects composing the * argument vector of the command, because those may otherwise * end shared with string objects stored into keys. Having * shared objects between any part of Redis, and the data diff --git a/src/sort.c b/src/sort.c index f269a7731..aeef53e6a 100644 --- a/src/sort.c +++ b/src/sort.c @@ -115,7 +115,7 @@ robj *lookupKeyByPattern(redisDb *db, robj *pattern, robj *subst, int writeflag) if (fieldobj) { if (o->type != OBJ_HASH) goto noobj; - /* Retrieve value from hash by the field name. The returend object + /* Retrieve value from hash by the field name. The returned object * is a new object with refcount already incremented. */ o = hashTypeGetValueObject(o, fieldobj->ptr); } else { diff --git a/src/sparkline.c b/src/sparkline.c index 0a986883d..67482c774 100644 --- a/src/sparkline.c +++ b/src/sparkline.c @@ -92,7 +92,7 @@ void freeSparklineSequence(struct sequence *seq) { * ------------------------------------------------------------------------- */ /* Render part of a sequence, so that render_sequence() call call this function - * with differnent parts in order to create the full output without overflowing + * with different parts in order to create the full output without overflowing * the current terminal columns. */ sds sparklineRenderRange(sds output, struct sequence *seq, int rows, int offset, int len, int flags) { int j; diff --git a/src/stream.h b/src/stream.h index 0d3bf63fc..e4c5ff78d 100644 --- a/src/stream.h +++ b/src/stream.h @@ -74,7 +74,7 @@ typedef struct streamConsumer { consumer not yet acknowledged. Keys are big endian message IDs, while values are the same streamNACK structure referenced - in the "pel" of the conumser group structure + in the "pel" of the consumer group structure itself, so the value is shared. */ } streamConsumer; diff --git a/src/t_hash.c b/src/t_hash.c index 240e11c91..8e79432a4 100644 --- a/src/t_hash.c +++ b/src/t_hash.c @@ -630,7 +630,7 @@ void hincrbyfloatCommand(client *c) { server.dirty++; /* Always replicate HINCRBYFLOAT as an HSET command with the final value - * in order to make sure that differences in float pricision or formatting + * in order to make sure that differences in float precision or formatting * will not create differences in replicas or after an AOF restart. */ robj *aux, *newobj; aux = createStringObject("HSET",4); diff --git a/src/t_list.c b/src/t_list.c index a751dde26..4f0bd7b81 100644 --- a/src/t_list.c +++ b/src/t_list.c @@ -723,7 +723,7 @@ void rpoplpushCommand(client *c) { * Blocking POP operations *----------------------------------------------------------------------------*/ -/* This is a helper function for handleClientsBlockedOnKeys(). It's work +/* This is a helper function for handleClientsBlockedOnKeys(). Its work * is to serve a specific client (receiver) that is blocked on 'key' * in the context of the specified 'db', doing the following: * @@ -809,7 +809,7 @@ void blockingPopGenericCommand(client *c, int where) { return; } else { if (listTypeLength(o) != 0) { - /* Non empty list, this is like a non normal [LR]POP. */ + /* Non empty list, this is like a normal [LR]POP. */ char *event = (where == LIST_HEAD) ? "lpop" : "rpop"; robj *value = listTypePop(o,where); serverAssert(value != NULL); @@ -845,7 +845,7 @@ void blockingPopGenericCommand(client *c, int where) { return; } - /* If the list is empty or the key does not exists we must block */ + /* If the keys do not exist we must block */ blockForKeys(c,BLOCKED_LIST,c->argv + 1,c->argc - 2,timeout,NULL,NULL); } diff --git a/src/t_set.c b/src/t_set.c index c2e73a6e6..837337ba7 100644 --- a/src/t_set.c +++ b/src/t_set.c @@ -193,7 +193,7 @@ sds setTypeNextObject(setTypeIterator *si) { } /* Return random element from a non empty set. - * The returned element can be a int64_t value if the set is encoded + * The returned element can be an int64_t value if the set is encoded * as an "intset" blob of integers, or an SDS string if the set * is a regular set. * @@ -442,7 +442,7 @@ void spopWithCountCommand(client *c) { dbDelete(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_GENERIC,"del",c->argv[1],c->db->id); - /* Propagate this command as an DEL operation */ + /* Propagate this command as a DEL operation */ rewriteClientCommandVector(c,2,shared.del,c->argv[1]); signalModifiedKey(c,c->db,c->argv[1]); server.dirty++; @@ -676,7 +676,7 @@ void srandmemberWithCountCommand(client *c) { * In this case we create a set from scratch with all the elements, and * subtract random elements to reach the requested number of elements. * - * This is done because if the number of requsted elements is just + * This is done because if the number of requested elements is just * a bit less than the number of elements in the set, the natural approach * used into CASE 3 is highly inefficient. */ if (count*SRANDMEMBER_SUB_STRATEGY_MUL > size) { diff --git a/src/t_stream.c b/src/t_stream.c index a54671938..357975079 100644 --- a/src/t_stream.c +++ b/src/t_stream.c @@ -1197,7 +1197,7 @@ void xaddCommand(client *c) { int id_given = 0; /* Was an ID different than "*" specified? */ long long maxlen = -1; /* If left to -1 no trimming is performed. */ int approx_maxlen = 0; /* If 1 only delete whole radix tree nodes, so - the maxium length is not applied verbatim. */ + the maximum length is not applied verbatim. */ int maxlen_arg_idx = 0; /* Index of the count in MAXLEN, for rewriting. */ /* Parse options. */ @@ -1893,7 +1893,7 @@ NULL } } -/* XSETID +/* XSETID * * Set the internal "last ID" of a stream. */ void xsetidCommand(client *c) { @@ -1982,7 +1982,7 @@ void xackCommand(client *c) { * * If start and stop are omitted, the command just outputs information about * the amount of pending messages for the key/group pair, together with - * the minimum and maxium ID of pending messages. + * the minimum and maximum ID of pending messages. * * If start and stop are provided instead, the pending messages are returned * with informations about the current owner, number of deliveries and last diff --git a/src/t_string.c b/src/t_string.c index 4be758e65..4886f7e44 100644 --- a/src/t_string.c +++ b/src/t_string.c @@ -316,7 +316,7 @@ void msetGenericCommand(client *c, int nx) { } /* Handle the NX flag. The MSETNX semantic is to return zero and don't - * set anything if at least one key alerady exists. */ + * set anything if at least one key already exists. */ if (nx) { for (j = 1; j < c->argc; j += 2) { if (lookupKeyWrite(c->db,c->argv[j]) != NULL) { diff --git a/src/t_zset.c b/src/t_zset.c index 9c409cd96..cf2d7f972 100644 --- a/src/t_zset.c +++ b/src/t_zset.c @@ -245,7 +245,7 @@ int zslDelete(zskiplist *zsl, double score, sds ele, zskiplistNode **node) { return 0; /* not found */ } -/* Update the score of an elmenent inside the sorted set skiplist. +/* Update the score of an element inside the sorted set skiplist. * Note that the element must exist and must match 'score'. * This function does not update the score in the hash table side, the * caller should take care of it. diff --git a/src/tracking.c b/src/tracking.c index 2721de32a..3737f6859 100644 --- a/src/tracking.c +++ b/src/tracking.c @@ -134,7 +134,7 @@ void enableTracking(client *c, uint64_t redirect_to, uint64_t options, robj **pr CLIENT_TRACKING_NOLOOP); c->client_tracking_redirection = redirect_to; - /* This may be the first client we ever enable. Crete the tracking + /* This may be the first client we ever enable. Create the tracking * table if it does not exist. */ if (TrackingTable == NULL) { TrackingTable = raxNew(); diff --git a/src/valgrind.sup b/src/valgrind.sup index 3024d63bc..b05843d8c 100644 --- a/src/valgrind.sup +++ b/src/valgrind.sup @@ -1,17 +1,17 @@ { - + Memcheck:Cond fun:lzf_compress } { - + Memcheck:Value4 fun:lzf_compress } { - + Memcheck:Value8 fun:lzf_compress } diff --git a/src/ziplist.c b/src/ziplist.c index 13881c117..e27875f6e 100644 --- a/src/ziplist.c +++ b/src/ziplist.c @@ -99,7 +99,7 @@ * Integer encoded as 24 bit signed (3 bytes). * |11111110| - 2 bytes * Integer encoded as 8 bit signed (1 byte). - * |1111xxxx| - (with xxxx between 0000 and 1101) immediate 4 bit integer. + * |1111xxxx| - (with xxxx between 0001 and 1101) immediate 4 bit integer. * Unsigned integer from 0 to 12. The encoded value is actually from * 1 to 13 because 0000 and 1111 can not be used, so 1 should be * subtracted from the encoded 4 bit value to obtain the right value. @@ -191,10 +191,10 @@ #include "redisassert.h" #define ZIP_END 255 /* Special "end of ziplist" entry. */ -#define ZIP_BIG_PREVLEN 254 /* Max number of bytes of the previous entry, for - the "prevlen" field prefixing each entry, to be - represented with just a single byte. Otherwise - it is represented as FE AA BB CC DD, where +#define ZIP_BIG_PREVLEN 254 /* ZIP_BIG_PREVLEN - 1 is the max number of bytes of + the previous entry, for the "prevlen" field prefixing + each entry, to be represented with just a single byte. + Otherwise it is represented as FE AA BB CC DD, where AA BB CC DD are a 4 bytes unsigned integer representing the previous entry len. */ @@ -317,7 +317,7 @@ unsigned int zipIntSize(unsigned char encoding) { return 0; } -/* Write the encoidng header of the entry in 'p'. If p is NULL it just returns +/* Write the encoding header of the entry in 'p'. If p is NULL it just returns * the amount of bytes required to encode such a length. Arguments: * * 'encoding' is the encoding we are using for the entry. It could be @@ -325,7 +325,7 @@ unsigned int zipIntSize(unsigned char encoding) { * for single-byte small immediate integers. * * 'rawlen' is only used for ZIP_STR_* encodings and is the length of the - * srting that this entry represents. + * string that this entry represents. * * The function returns the number of bytes used by the encoding/length * header stored in 'p'. */ @@ -914,7 +914,7 @@ unsigned char *ziplistMerge(unsigned char **first, unsigned char **second) { } else { /* !append == prepending to target */ /* Move target *contents* exactly size of (source - [END]), - * then copy source into vacataed space (source - [END]): + * then copy source into vacated space (source - [END]): * [SOURCE - END, TARGET - HEADER] */ memmove(target + source_bytes - ZIPLIST_END_SIZE, target + ZIPLIST_HEADER_SIZE, diff --git a/src/zipmap.c b/src/zipmap.c index 22bfa1a46..365c4aea4 100644 --- a/src/zipmap.c +++ b/src/zipmap.c @@ -133,7 +133,7 @@ static unsigned int zipmapEncodeLength(unsigned char *p, unsigned int len) { * zipmap. Returns NULL if the key is not found. * * If NULL is returned, and totlen is not NULL, it is set to the entire - * size of the zimap, so that the calling function will be able to + * size of the zipmap, so that the calling function will be able to * reallocate the original zipmap to make room for more entries. */ static unsigned char *zipmapLookupRaw(unsigned char *zm, unsigned char *key, unsigned int klen, unsigned int *totlen) { unsigned char *p = zm+1, *k = NULL; diff --git a/tests/cluster/tests/04-resharding.tcl b/tests/cluster/tests/04-resharding.tcl index 33f861dc5..cee2ec5ba 100644 --- a/tests/cluster/tests/04-resharding.tcl +++ b/tests/cluster/tests/04-resharding.tcl @@ -1,7 +1,7 @@ # Failover stress test. # In this test a different node is killed in a loop for N # iterations. The test checks that certain properties -# are preseved across iterations. +# are preserved across iterations. source "../tests/includes/init-tests.tcl" source "../../../tests/support/cli.tcl" @@ -32,7 +32,7 @@ test "Enable AOF in all the instances" { } } -# Return nno-zero if the specified PID is about a process still in execution, +# Return non-zero if the specified PID is about a process still in execution, # otherwise 0 is returned. proc process_is_running {pid} { # PS should return with an error if PID is non existing, @@ -45,7 +45,7 @@ proc process_is_running {pid} { # # - N commands are sent to the cluster in the course of the test. # - Every command selects a random key from key:0 to key:MAX-1. -# - The operation RPUSH key is perforemd. +# - The operation RPUSH key is performed. # - Tcl remembers into an array all the values pushed to each list. # - After N/2 commands, the resharding process is started in background. # - The test continues while the resharding is in progress. diff --git a/tests/instances.tcl b/tests/instances.tcl index 2199cfcd4..275db45c3 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -322,7 +322,7 @@ proc pause_on_error {} { puts "S cmd ... arg Call command in Sentinel ." puts "R cmd ... arg Call command in Redis ." puts "SI Show Sentinel INFO ." - puts "RI Show Sentinel INFO ." + puts "RI Show Redis INFO ." puts "continue Resume test." } else { set errcode [catch {eval $line} retval] diff --git a/tests/integration/replication-4.tcl b/tests/integration/replication-4.tcl index 54891151b..8071c4f97 100644 --- a/tests/integration/replication-4.tcl +++ b/tests/integration/replication-4.tcl @@ -16,7 +16,7 @@ start_server {tags {"repl"}} { s 0 role } {slave} - test {Test replication with parallel clients writing in differnet DBs} { + test {Test replication with parallel clients writing in different DBs} { after 5000 stop_bg_complex_data $load_handle0 stop_bg_complex_data $load_handle1 diff --git a/tests/support/test.tcl b/tests/support/test.tcl index 55937b8f4..23015b3a7 100644 --- a/tests/support/test.tcl +++ b/tests/support/test.tcl @@ -108,7 +108,7 @@ proc test {name code {okpattern undefined} {options undefined}} { return } - # abort if test name in skiptests + # abort if only_tests was set but test name is not included if {[llength $::only_tests] > 0 && [lsearch $::only_tests $name] < 0} { incr ::num_skipped send_data_packet $::test_server_fd skip $name diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index fe2d484b8..7e1c5c88f 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -471,7 +471,7 @@ proc signal_idle_client fd { # The the_end function gets called when all the test units were already # executed, so the test finished. proc the_end {} { - # TODO: print the status, exit with the rigth exit code. + # TODO: print the status, exit with the right exit code. puts "\n The End\n" puts "Execution time of different units:" foreach {time name} $::clients_time_history { @@ -526,9 +526,10 @@ proc print_help_screen {} { "--stack-logging Enable OSX leaks/malloc stack logging." "--accurate Run slow randomized tests for more iterations." "--quiet Don't show individual tests." - "--single Just execute the specified unit (see next option). this option can be repeated." + "--single Just execute the specified unit (see next option). This option can be repeated." + "--verbose Increases verbosity." "--list-tests List all the available test units." - "--only Just execute the specified test by test name. this option can be repeated." + "--only Just execute the specified test by test name. This option can be repeated." "--skip-till Skip all units until (and including) the specified one." "--skipunit Skip one unit." "--clients Number of test clients (default 16)." diff --git a/tests/unit/expire.tcl b/tests/unit/expire.tcl index 52d174d75..444525f36 100644 --- a/tests/unit/expire.tcl +++ b/tests/unit/expire.tcl @@ -72,7 +72,7 @@ start_server {tags {"expire"}} { list [r persist foo] [r persist nokeyatall] } {0 0} - test {EXPIRE pricision is now the millisecond} { + test {EXPIRE precision is now the millisecond} { # This test is very likely to do a false positive if the # server is under pressure, so if it does not work give it a few more # chances. diff --git a/tests/unit/hyperloglog.tcl b/tests/unit/hyperloglog.tcl index 712fcc641..db26a2e75 100644 --- a/tests/unit/hyperloglog.tcl +++ b/tests/unit/hyperloglog.tcl @@ -79,7 +79,7 @@ start_server {tags {"hll"}} { } } - test {Corrupted sparse HyperLogLogs are detected: Additionl at tail} { + test {Corrupted sparse HyperLogLogs are detected: Additional at tail} { r del hll r pfadd hll a b c r append hll "hello" diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl index 8b364b287..3283edc66 100644 --- a/tests/unit/scripting.tcl +++ b/tests/unit/scripting.tcl @@ -533,7 +533,7 @@ start_server {tags {"scripting"}} { # Note: keep this test at the end of this server stanza because it # kills the server. test {SHUTDOWN NOSAVE can kill a timedout script anyway} { - # The server could be still unresponding to normal commands. + # The server should be still unresponding to normal commands. catch {r ping} e assert_match {BUSY*} $e catch {r shutdown nosave} diff --git a/utils/create-cluster/README b/utils/create-cluster/README index 37a3080db..bcd745977 100644 --- a/utils/create-cluster/README +++ b/utils/create-cluster/README @@ -1,4 +1,4 @@ -Create-custer is a small script used to easily start a big number of Redis +create-cluster is a small script used to easily start a big number of Redis instances configured to run in cluster mode. Its main goal is to allow manual testing in a condition which is not easy to replicate with the Redis cluster unit tests, for example when a lot of instances are needed in order to trigger diff --git a/utils/hashtable/README b/utils/hashtable/README index 87a76c9a5..87ffc2f08 100644 --- a/utils/hashtable/README +++ b/utils/hashtable/README @@ -5,7 +5,7 @@ rehashing.c Visually show buckets in the two hash tables between rehashings. Also stress test getRandomKeys() implementation, that may actually disappear from -Redis soon, however visualization some code is reusable in new bugs +Redis soon, However the visualization code is reusable in new bugs investigation. Compile with: From 410c101439d895b7354fad327e590154451da086 Mon Sep 17 00:00:00 2001 From: Wang Yuan Date: Thu, 10 Sep 2020 22:02:27 +0800 Subject: [PATCH 196/377] Remove dead global variable 'lru_clock' (#7782) (cherry picked from commit 95595d0636d4a993201ca9034fba6e91527b3337) --- src/server.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/server.c b/src/server.c index cf9dc83ea..06dcc5d9e 100644 --- a/src/server.c +++ b/src/server.c @@ -70,7 +70,6 @@ double R_Zero, R_PosInf, R_NegInf, R_Nan; /* Global vars */ struct redisServer server; /* Server global state */ -volatile unsigned long lru_clock; /* Server global current LRU time. */ /* Our command table. * From c769bc81b685253a91d5a2d38bbb8e2dfa38fd0c Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 13 Sep 2020 13:50:23 +0300 Subject: [PATCH 197/377] fix broken PEXPIREAT test (#7791) This test was nearly always failing on MacOS github actions. This is because of bugs in the test that caused it to nearly always run all 3 attempts and just look at the last one as the pass/fail creteria. i.e. the test was nearly always running all 3 attempts and still sometimes succeed. this is because the break condition was different than the test completion condition. The reason the test succeeded is because the break condition tested the results of all 3 tests (PSETEX/PEXPIRE/PEXPIREAT), but the success check at the end was only testing the result of PSETEX. The reason the PEXPIREAT test nearly always failed is because it was getting the current time wrong: getting the current second and loosing the sub-section time, so the only chance for it to succeed is if it run right when a certain second started. Because i now get the time from redis, adding another round trip, i added another 100ms to the PEXPIRE test to make it less fragile, and also added many more attempts. Adding many more attempts before failure to account for slow platforms, github actions and valgrind (cherry picked from commit 1fd56bb75a9afa5469b3ecb70d394b2adaf9baac) --- tests/unit/expire.tcl | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/tests/unit/expire.tcl b/tests/unit/expire.tcl index 444525f36..8bcdc16b7 100644 --- a/tests/unit/expire.tcl +++ b/tests/unit/expire.tcl @@ -92,7 +92,7 @@ start_server {tags {"expire"}} { # This test is very likely to do a false positive if the # server is under pressure, so if it does not work give it a few more # chances. - for {set j 0} {$j < 3} {incr j} { + for {set j 0} {$j < 30} {incr j} { r del x y z r psetex x 100 somevalue after 80 @@ -108,18 +108,22 @@ start_server {tags {"expire"}} { set d [r get x] r set x somevalue - r pexpireat x [expr ([clock seconds]*1000)+100] - after 80 + set now [r time] + r pexpireat x [expr ([lindex $now 0]*1000)+([lindex $now 1]/1000)+200] + after 20 set e [r get x] - after 120 + after 220 set f [r get x] if {$a eq {somevalue} && $b eq {} && $c eq {somevalue} && $d eq {} && $e eq {somevalue} && $f eq {}} break } - list $a $b - } {somevalue {}} + if {$::verbose} { + puts "sub-second expire test attempts: $j" + } + list $a $b $c $d $e $f + } {somevalue {} somevalue {} somevalue {}} test {TTL returns time to live in seconds} { r del x From 5097434dee085aae16a70c55dd2ddefcefc9aa10 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 13 Sep 2020 13:51:21 +0300 Subject: [PATCH 198/377] Fix failing valgrind installation in github actions (#7792) These tests started failing every day on http 404 (not being able to install valgrind) (cherry picked from commit 9428c1a591472fc87775781e5955aa527c6f1ff0) --- .github/workflows/daily.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 07cd55c87..087b9f2ef 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -109,6 +109,7 @@ jobs: run: make valgrind - name: test run: | + sudo apt-get update sudo apt-get install tcl8.5 valgrind -y ./runtest --valgrind --verbose --clients 1 - name: module api test From b4299b0629359e89a34f4c41c33016e37fb0fbf8 Mon Sep 17 00:00:00 2001 From: Mykhailo Pylyp Date: Sun, 13 Sep 2020 18:39:59 +0300 Subject: [PATCH 199/377] Recalculate hardcoded variables from $::instances_count in sentinel tests (#7561) Co-authored-by: MemuraiUser (cherry picked from commit c0a41896dad0396cb6e09ed0bbe72d90cdbf25e0) --- tests/sentinel/tests/06-ckquorum.tcl | 13 +++++++------ tests/sentinel/tests/07-down-conditions.tcl | 13 +++++++------ tests/sentinel/tests/includes/init-tests.tcl | 2 +- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/tests/sentinel/tests/06-ckquorum.tcl b/tests/sentinel/tests/06-ckquorum.tcl index 31e5fa2f8..4ea4e55d8 100644 --- a/tests/sentinel/tests/06-ckquorum.tcl +++ b/tests/sentinel/tests/06-ckquorum.tcl @@ -20,15 +20,16 @@ test "CKQUORUM detects quorum cannot be reached" { test "CKQUORUM detects failover authorization cannot be reached" { set orig_quorum [expr {$num_sentinels/2+1}] S 0 SENTINEL SET mymaster quorum 1 - kill_instance sentinel 1 - kill_instance sentinel 2 - kill_instance sentinel 3 + for {set i 0} {$i < $orig_quorum} {incr i} { + kill_instance sentinel [expr {$i + 1}] + } + after 5000 catch {[S 0 SENTINEL CKQUORUM mymaster]} err assert_match "*NOQUORUM*" $err S 0 SENTINEL SET mymaster quorum $orig_quorum - restart_instance sentinel 1 - restart_instance sentinel 2 - restart_instance sentinel 3 + for {set i 0} {$i < $orig_quorum} {incr i} { + restart_instance sentinel [expr {$i + 1}] + } } diff --git a/tests/sentinel/tests/07-down-conditions.tcl b/tests/sentinel/tests/07-down-conditions.tcl index a12ea3151..0a696fa6b 100644 --- a/tests/sentinel/tests/07-down-conditions.tcl +++ b/tests/sentinel/tests/07-down-conditions.tcl @@ -3,9 +3,10 @@ source "../tests/includes/init-tests.tcl" source "../../../tests/support/cli.tcl" +set ::alive_sentinel [expr {$::instances_count/2+2}] proc ensure_master_up {} { wait_for_condition 1000 50 { - [dict get [S 4 sentinel master mymaster] flags] eq "master" + [dict get [S $::alive_sentinel sentinel master mymaster] flags] eq "master" } else { fail "Master flags are not just 'master'" } @@ -14,7 +15,7 @@ proc ensure_master_up {} { proc ensure_master_down {} { wait_for_condition 1000 50 { [string match *down* \ - [dict get [S 4 sentinel master mymaster] flags]] + [dict get [S $::alive_sentinel sentinel master mymaster] flags]] } else { fail "Master is not flagged SDOWN" } @@ -27,7 +28,7 @@ test "Crash the majority of Sentinels to prevent failovers for this unit" { } test "SDOWN is triggered by non-responding but not crashed instance" { - lassign [S 4 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] host port + lassign [S $::alive_sentinel SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] host port ensure_master_up exec ../../../src/redis-cli -h $host -p $port {*}[rediscli_tls_config "../../../tests"] debug sleep 10 > /dev/null & ensure_master_down @@ -35,7 +36,7 @@ test "SDOWN is triggered by non-responding but not crashed instance" { } test "SDOWN is triggered by crashed instance" { - lassign [S 4 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] host port + lassign [S $::alive_sentinel SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] host port ensure_master_up kill_instance redis 0 ensure_master_down @@ -72,8 +73,8 @@ test "SDOWN is triggered by misconfigured instance repling with errors" { # effect of the master going down if we send PONG instead of PING test "SDOWN is triggered if we rename PING to PONG" { ensure_master_up - S 4 SENTINEL SET mymaster rename-command PING PONG + S $::alive_sentinel SENTINEL SET mymaster rename-command PING PONG ensure_master_down - S 4 SENTINEL SET mymaster rename-command PING PING + S $::alive_sentinel SENTINEL SET mymaster rename-command PING PING ensure_master_up } diff --git a/tests/sentinel/tests/includes/init-tests.tcl b/tests/sentinel/tests/includes/init-tests.tcl index c8165dcfa..234f9c589 100644 --- a/tests/sentinel/tests/includes/init-tests.tcl +++ b/tests/sentinel/tests/includes/init-tests.tcl @@ -18,7 +18,7 @@ test "(init) Remove old master entry from sentinels" { } } -set redis_slaves 4 +set redis_slaves [expr $::instances_count - 1] test "(init) Create a master-slaves cluster of [expr $redis_slaves+1] instances" { create_redis_master_slave_cluster [expr {$redis_slaves+1}] } From 7e8f233e8b95abdc8ef0a17bdd55f64c5c40b303 Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Tue, 15 Sep 2020 13:27:42 +0800 Subject: [PATCH 200/377] Clarify help text of tcl scripts. (#7798) Before this commit, following command did not show --tls option: ./runtest-cluster --help ./runtest-sentinel --help (cherry picked from commit e4a1280a0e6c33d03ec6b622b8159b2b26f0f9c3) --- tests/instances.tcl | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/instances.tcl b/tests/instances.tcl index 275db45c3..5c4b665db 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -243,6 +243,7 @@ proc parse_options {} { puts "--pause-on-error Pause for manual inspection on error." puts "--fail Simulate a test failure." puts "--valgrind Run with valgrind." + puts "--tls Run tests in TLS mode." puts "--help Shows this help." exit 0 } else { From 385e3596b50ce3bb6991dbbac71da064030b88dc Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Tue, 15 Sep 2020 01:58:21 -0400 Subject: [PATCH 201/377] correct OBJECT ENCODING response for stream type (#7797) This commit makes stream object returning "stream" as encoding type in OBJECT ENCODING subcommand and DEBUG OBJECT command. Till now, it would return "unknown" (cherry picked from commit 2a8803f534728a6fd1b7c29a2d7e195f6a928f50) --- src/object.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/object.c b/src/object.c index f8775ea97..6caa71bb9 100644 --- a/src/object.c +++ b/src/object.c @@ -739,6 +739,7 @@ char *strEncoding(int encoding) { case OBJ_ENCODING_INTSET: return "intset"; case OBJ_ENCODING_SKIPLIST: return "skiplist"; case OBJ_ENCODING_EMBSTR: return "embstr"; + case OBJ_ENCODING_STREAM: return "stream"; default: return "unknown"; } } From 4aef590a633fabf6a5bc1f5e03e2373c402ba390 Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Tue, 15 Sep 2020 13:06:47 +0800 Subject: [PATCH 202/377] bio: fix doFastMemoryTest. If one thread got SIGSEGV, function sigsegvHandler() would be triggered, it would call bioKillThreads(). But call pthread_cancel() to cancel itself would make it block. Also note that if SIGSEGV is caught by bio thread, it should kill the main thread in order to give a positive report. (cherry picked from commit cf8a6e3c7a0448851f0c00ff1a726701a2be9f1a) --- src/bio.c | 3 ++- src/debug.c | 22 +++++++++++++++++++++- src/server.c | 2 +- src/server.h | 1 + 4 files changed, 25 insertions(+), 3 deletions(-) diff --git a/src/bio.c b/src/bio.c index 69c62fc6f..33465a166 100644 --- a/src/bio.c +++ b/src/bio.c @@ -268,10 +268,11 @@ void bioKillThreads(void) { int err, j; for (j = 0; j < BIO_NUM_OPS; j++) { + if (bio_threads[j] == pthread_self()) continue; if (bio_threads[j] && pthread_cancel(bio_threads[j]) == 0) { if ((err = pthread_join(bio_threads[j],NULL)) != 0) { serverLog(LL_WARNING, - "Bio thread for job type #%d can be joined: %s", + "Bio thread for job type #%d can not be joined: %s", j, strerror(err)); } else { serverLog(LL_WARNING, diff --git a/src/debug.c b/src/debug.c index 921c681a5..178893bae 100644 --- a/src/debug.c +++ b/src/debug.c @@ -1512,6 +1512,26 @@ int memtest_test_linux_anonymous_maps(void) { } #endif +static void killMainThread(void) { + int err; + if (pthread_self() != server.main_thread_id && pthread_cancel(server.main_thread_id) == 0) { + if ((err = pthread_join(server.main_thread_id,NULL)) != 0) { + serverLog(LL_WARNING, "main thread can not be joined: %s", strerror(err)); + } else { + serverLog(LL_WARNING, "main thread terminated"); + } + } +} + +/* Kill the running threads (other than current) in an unclean way. This function + * should be used only when it's critical to stop the threads for some reason. + * Currently Redis does this only on crash (for instance on SIGSEGV) in order + * to perform a fast memory check without other threads messing with memory. */ +static void killThreads(void) { + killMainThread(); + bioKillThreads(); +} + /* Scans the (assumed) x86 code starting at addr, for a max of `len` * bytes, searching for E8 (callq) opcodes, and dumping the symbols * and the call offset if they appear to be valid. */ @@ -1589,7 +1609,7 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) { #if defined(HAVE_PROC_MAPS) /* Test memory */ serverLogRaw(LL_WARNING|LL_RAW, "\n------ FAST MEMORY TEST ------\n"); - bioKillThreads(); + killThreads(); if (memtest_test_linux_anonymous_maps()) { serverLogRaw(LL_WARNING|LL_RAW, "!!! MEMORY ERROR DETECTED! Check your memory ASAP !!!\n"); diff --git a/src/server.c b/src/server.c index 06dcc5d9e..37e3c37df 100644 --- a/src/server.c +++ b/src/server.c @@ -2814,6 +2814,7 @@ void initServer(void) { server.aof_state = server.aof_enabled ? AOF_ON : AOF_OFF; server.hz = server.config_hz; server.pid = getpid(); + server.main_thread_id = pthread_self(); server.current_client = NULL; server.fixed_time_expire = 0; server.clients = listCreate(); @@ -5091,7 +5092,6 @@ int iAmMaster(void) { (server.cluster_enabled && nodeIsMaster(server.cluster->myself))); } - int main(int argc, char **argv) { struct timeval tv; int j; diff --git a/src/server.h b/src/server.h index 6a130879e..a1ce26cc2 100644 --- a/src/server.h +++ b/src/server.h @@ -1050,6 +1050,7 @@ struct clusterState; struct redisServer { /* General */ pid_t pid; /* Main process pid. */ + pthread_t main_thread_id; /* Main thread id */ char *configfile; /* Absolute config file path, or NULL */ char *executable; /* Absolute executable file path. */ char **exec_argv; /* Executable argv vector (copy). */ From 201b993840292b920c06658f7df4a5ef94d645f0 Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Wed, 16 Sep 2020 09:58:24 +0800 Subject: [PATCH 203/377] bio: doFastMemoryTest should try to kill io threads as well. (cherry picked from commit e9b6077ac798e4d30c9401a3687ffe61568b6eae) --- src/debug.c | 1 + src/networking.c | 17 +++++++++++++++++ src/server.h | 1 + 3 files changed, 19 insertions(+) diff --git a/src/debug.c b/src/debug.c index 178893bae..ae62c0216 100644 --- a/src/debug.c +++ b/src/debug.c @@ -1530,6 +1530,7 @@ static void killMainThread(void) { static void killThreads(void) { killMainThread(); bioKillThreads(); + killIOThreads(); } /* Scans the (assumed) x86 code starting at addr, for a max of `len` diff --git a/src/networking.c b/src/networking.c index 0d290e169..b7d6d6211 100644 --- a/src/networking.c +++ b/src/networking.c @@ -3021,6 +3021,23 @@ void initThreadedIO(void) { } } +void killIOThreads(void) { + int err, j; + for (j = 0; j < server.io_threads_num; j++) { + if (io_threads[j] == pthread_self()) continue; + if (io_threads[j] && pthread_cancel(io_threads[j]) == 0) { + if ((err = pthread_join(io_threads[j],NULL)) != 0) { + serverLog(LL_WARNING, + "IO thread(tid:%lu) can not be joined: %s", + (unsigned long)io_threads[j], strerror(err)); + } else { + serverLog(LL_WARNING, + "IO thread(tid:%lu) terminated",(unsigned long)io_threads[j]); + } + } + } +} + void startThreadedIO(void) { if (tio_debug) { printf("S"); fflush(stdout); } if (tio_debug) printf("--- STARTING THREADED IO ---\n"); diff --git a/src/server.h b/src/server.h index a1ce26cc2..8cf29820d 100644 --- a/src/server.h +++ b/src/server.h @@ -2460,6 +2460,7 @@ int memtest_preserving_test(unsigned long *m, size_t bytes, int passes); void mixDigest(unsigned char *digest, void *ptr, size_t len); void xorDigest(unsigned char *digest, void *ptr, size_t len); int populateCommandTableParseFlags(struct redisCommand *c, char *strflags); +void killIOThreads(void); /* TLS stuff */ void tlsInit(void); From 1cbdafc9804352ae37cb9645cda582d7be0b3d84 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Wed, 16 Sep 2020 20:21:04 +0300 Subject: [PATCH 204/377] Add printf attribute and fix warnings and a minor bug (#7803) The fix in error handling of rdbGenericLoadStringObject is an actual bugfix (cherry picked from commit 622b57e9eea44e069ad973597bed40107cfbeff0) --- src/rdb.c | 11 +++++++---- src/server.h | 5 +++++ 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/rdb.c b/src/rdb.c index 4bcf96038..a9b262eac 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -52,6 +52,9 @@ extern int rdbCheckMode; void rdbCheckError(const char *fmt, ...); void rdbCheckSetError(const char *fmt, ...); +#ifdef __GNUC__ +void rdbReportError(int corruption_error, int linenum, char *reason, ...) __attribute__ ((format (printf, 3, 4))); +#endif void rdbReportError(int corruption_error, int linenum, char *reason, ...) { va_list ap; char msg[1024]; @@ -487,7 +490,7 @@ void *rdbGenericLoadStringObject(rio *rdb, int flags, size_t *lenptr) { int plain = flags & RDB_LOAD_PLAIN; int sds = flags & RDB_LOAD_SDS; int isencoded; - uint64_t len; + unsigned long long len; len = rdbLoadLen(rdb,&isencoded); if (isencoded) { @@ -499,8 +502,8 @@ void *rdbGenericLoadStringObject(rio *rdb, int flags, size_t *lenptr) { case RDB_ENC_LZF: return rdbLoadLzfStringObject(rdb,flags,lenptr); default: - rdbExitReportCorruptRDB("Unknown RDB string encoding type %d",len); - return NULL; /* Never reached. */ + rdbExitReportCorruptRDB("Unknown RDB string encoding type %llu",len); + return NULL; } } @@ -2200,7 +2203,7 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { if (luaCreateFunction(NULL,server.lua,auxval) == NULL) { rdbExitReportCorruptRDB( "Can't load Lua script from RDB file! " - "BODY: %s", auxval->ptr); + "BODY: %s", (char*)auxval->ptr); } } else if (!strcasecmp(auxkey->ptr,"redis-ver")) { serverLog(LL_NOTICE,"Loading RDB produced by version %s", diff --git a/src/server.h b/src/server.h index 8cf29820d..21d506d39 100644 --- a/src/server.h +++ b/src/server.h @@ -2446,7 +2446,12 @@ void *realloc(void *ptr, size_t size) __attribute__ ((deprecated)); /* Debugging stuff */ void _serverAssertWithInfo(const client *c, const robj *o, const char *estr, const char *file, int line); void _serverAssert(const char *estr, const char *file, int line); +#ifdef __GNUC__ +void _serverPanic(const char *file, int line, const char *msg, ...) + __attribute__ ((format (printf, 3, 4))); +#else void _serverPanic(const char *file, int line, const char *msg, ...); +#endif void bugReportStart(void); void serverLogObjectDebugInfo(const robj *o); void sigsegvHandler(int sig, siginfo_t *info, void *secret); From 048816bf27b09bf95c5bc6126bce373288e395f3 Mon Sep 17 00:00:00 2001 From: Wang Yuan Date: Thu, 17 Sep 2020 23:20:10 +0800 Subject: [PATCH 205/377] Remove tmp rdb file in background thread (#7762) We're already using bg_unlink in several places to delete the rdb file in the background, and avoid paying the cost of the deletion from our main thread. This commit uses bg_unlink to remove the temporary rdb file in the background too. However, in case we delete that rdb file just before exiting, we don't actually wait for the background thread or the main thread to delete it, and just let the OS clean up after us. i.e. we open the file, unlink it and exit with the fd still open. Furthermore, rdbRemoveTempFile can be called from a thread and was using snprintf which is not async-signal-safe, we now use ll2string instead. (cherry picked from commit 6638f6129553d0f19c60944e70fe619a4217658c) --- src/rdb.c | 29 +++++++++++++++++++----- src/rdb.h | 2 +- src/server.c | 6 ++++- src/server.h | 1 + tests/test_helper.tcl | 1 + tests/unit/shutdown.tcl | 49 +++++++++++++++++++++++++++++++++++++++++ 6 files changed, 81 insertions(+), 7 deletions(-) create mode 100644 tests/unit/shutdown.tcl diff --git a/src/rdb.c b/src/rdb.c index a9b262eac..7e0d33565 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -34,6 +34,7 @@ #include "stream.h" #include +#include #include #include #include @@ -1413,11 +1414,29 @@ int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) { return C_OK; /* unreached */ } -void rdbRemoveTempFile(pid_t childpid) { +/* Note that we may call this function in signal handle 'sigShutdownHandler', + * so we need guarantee all functions we call are async-signal-safe. + * If we call this function from signal handle, we won't call bg_unlik that + * is not async-signal-safe. */ +void rdbRemoveTempFile(pid_t childpid, int from_signal) { char tmpfile[256]; + char pid[32]; - snprintf(tmpfile,sizeof(tmpfile),"temp-%d.rdb", (int) childpid); - unlink(tmpfile); + /* Generate temp rdb file name using aync-signal safe functions. */ + int pid_len = ll2string(pid, sizeof(pid), childpid); + strcpy(tmpfile, "temp-"); + strncpy(tmpfile+5, pid, pid_len); + strcpy(tmpfile+5+pid_len, ".rdb"); + + if (from_signal) { + /* bg_unlink is not async-signal-safe, but in this case we don't really + * need to close the fd, it'll be released when the process exists. */ + int fd = open(tmpfile, O_RDONLY|O_NONBLOCK); + UNUSED(fd); + unlink(tmpfile); + } else { + bg_unlink(tmpfile); + } } /* This function is called by rdbLoadObject() when the code is in RDB-check @@ -2420,7 +2439,7 @@ void backgroundSaveDoneHandlerDisk(int exitcode, int bysignal) { serverLog(LL_WARNING, "Background saving terminated by signal %d", bysignal); latencyStartMonitor(latency); - rdbRemoveTempFile(server.rdb_child_pid); + rdbRemoveTempFile(server.rdb_child_pid, 0); latencyEndMonitor(latency); latencyAddSampleIfNeeded("rdb-unlink-temp-file",latency); /* SIGUSR1 is whitelisted, so we have a way to kill a child without @@ -2477,7 +2496,7 @@ void backgroundSaveDoneHandler(int exitcode, int bysignal) { * the cleanup needed. */ void killRDBChild(void) { kill(server.rdb_child_pid,SIGUSR1); - rdbRemoveTempFile(server.rdb_child_pid); + rdbRemoveTempFile(server.rdb_child_pid, 0); closeChildInfoPipe(); updateDictResizePolicy(); } diff --git a/src/rdb.h b/src/rdb.h index aae682dbc..885cf49c6 100644 --- a/src/rdb.h +++ b/src/rdb.h @@ -141,7 +141,7 @@ int rdbLoadObjectType(rio *rdb); int rdbLoad(char *filename, rdbSaveInfo *rsi, int rdbflags); int rdbSaveBackground(char *filename, rdbSaveInfo *rsi); int rdbSaveToSlavesSockets(rdbSaveInfo *rsi); -void rdbRemoveTempFile(pid_t childpid); +void rdbRemoveTempFile(pid_t childpid, int from_signal); int rdbSave(char *filename, rdbSaveInfo *rsi); ssize_t rdbSaveObject(rio *rdb, robj *o, robj *key); size_t rdbSavedObjectLen(robj *o, robj *key); diff --git a/src/server.c b/src/server.c index 37e3c37df..a88ffc6ee 100644 --- a/src/server.c +++ b/src/server.c @@ -3797,6 +3797,10 @@ int prepareForShutdown(int flags) { overwrite the synchronous saving did by SHUTDOWN. */ if (server.rdb_child_pid != -1) { serverLog(LL_WARNING,"There is a child saving an .rdb. Killing it!"); + /* Note that, in killRDBChild, we call rdbRemoveTempFile that will + * do close fd(in order to unlink file actully) in background thread. + * The temp rdb file fd may won't be closed when redis exits quickly, + * but OS will close this fd when process exits. */ killRDBChild(); } @@ -4846,7 +4850,7 @@ static void sigShutdownHandler(int sig) { * on disk. */ if (server.shutdown_asap && sig == SIGINT) { serverLogFromHandler(LL_WARNING, "You insist... exiting now."); - rdbRemoveTempFile(getpid()); + rdbRemoveTempFile(getpid(), 1); exit(1); /* Exit with an error since this was not a clean shutdown. */ } else if (server.loading) { serverLogFromHandler(LL_WARNING, "Received shutdown signal during loading, exiting now."); diff --git a/src/server.h b/src/server.h index 21d506d39..4bbe59703 100644 --- a/src/server.h +++ b/src/server.h @@ -1865,6 +1865,7 @@ int writeCommandsDeniedByDiskError(void); /* RDB persistence */ #include "rdb.h" void killRDBChild(void); +int bg_unlink(const char *filename); /* AOF persistence */ void flushAppendOnlyFile(int force); diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index 7e1c5c88f..b60adb881 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -69,6 +69,7 @@ set ::all_tests { unit/tls unit/tracking unit/oom-score-adj + unit/shutdown } # Index to the next test to run in the ::all_tests list. set ::next_test 0 diff --git a/tests/unit/shutdown.tcl b/tests/unit/shutdown.tcl new file mode 100644 index 000000000..21ea8545d --- /dev/null +++ b/tests/unit/shutdown.tcl @@ -0,0 +1,49 @@ +start_server {tags {"shutdown"}} { + test {Temp rdb will be deleted if we use bg_unlink when shutdown} { + for {set i 0} {$i < 20} {incr i} { + r set $i $i + } + # It will cost 2s(20 * 100ms) to dump rdb + r config set rdb-key-save-delay 100000 + + # Child is dumping rdb + r bgsave + after 100 + set dir [lindex [r config get dir] 1] + set child_pid [get_child_pid 0] + set temp_rdb [file join [lindex [r config get dir] 1] temp-${child_pid}.rdb] + # Temp rdb must be existed + assert {[file exists $temp_rdb]} + + catch {r shutdown nosave} + # Make sure the server was killed + catch {set rd [redis_deferring_client]} e + assert_match {*connection refused*} $e + + # Temp rdb file must be deleted + assert {![file exists $temp_rdb]} + } +} + +start_server {tags {"shutdown"}} { + test {Temp rdb will be deleted in signal handle} { + for {set i 0} {$i < 20} {incr i} { + r set $i $i + } + # It will cost 2s(20 * 100ms) to dump rdb + r config set rdb-key-save-delay 100000 + + set pid [s process_id] + set temp_rdb [file join [lindex [r config get dir] 1] temp-${pid}.rdb] + + exec kill -SIGINT $pid + after 100 + # Temp rdb must be existed + assert {[file exists $temp_rdb]} + + # Temp rdb file must be deleted + exec kill -SIGINT $pid + after 100 + assert {![file exists $temp_rdb]} + } +} From 57e1dbff57c11d0c2144ae4688d2beb45fc9ebb5 Mon Sep 17 00:00:00 2001 From: David CARLIER Date: Sat, 19 Sep 2020 10:24:40 +0100 Subject: [PATCH 206/377] debug.c: NetBSD build warning fix. (#7810) The symbol base address is a const on this system. (cherry picked from commit c9edb477921d2fbf80c8ffef0882fbd0281675fa) --- src/debug.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/debug.c b/src/debug.c index ae62c0216..0f8760ef6 100644 --- a/src/debug.c +++ b/src/debug.c @@ -1638,13 +1638,14 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) { /* Find the address of the next page, which is our "safety" * limit when dumping. Then try to dump just 128 bytes more * than EIP if there is room, or stop sooner. */ + void *base = (void *)info.dli_saddr; unsigned long next = ((unsigned long)eip + sz) & ~(sz-1); unsigned long end = (unsigned long)eip + 128; if (end > next) end = next; - len = end - (unsigned long)info.dli_saddr; + len = end - (unsigned long)base; serverLogHexDump(LL_WARNING, "dump of function", - info.dli_saddr ,len); - dumpX86Calls(info.dli_saddr,len); + base ,len); + dumpX86Calls(base,len); } } } From 064992af62b92c74735f599c50c0b9aba688c39a Mon Sep 17 00:00:00 2001 From: Daniel Dai <764122422@qq.com> Date: Sun, 20 Sep 2020 05:06:17 -0400 Subject: [PATCH 207/377] fix make warnings in debug.c MacOS (#7805) Co-authored-by: Oran Agra (cherry picked from commit 6d46a8e2163750f707f9d36889d5fdf514132a69) --- src/debug.c | 4 ++-- src/server.h | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/debug.c b/src/debug.c index 0f8760ef6..e64ec1b78 100644 --- a/src/debug.c +++ b/src/debug.c @@ -1510,7 +1510,7 @@ int memtest_test_linux_anonymous_maps(void) { closeDirectLogFiledes(fd); return errors; } -#endif +#endif /* HAVE_PROC_MAPS */ static void killMainThread(void) { int err; @@ -1527,7 +1527,7 @@ static void killMainThread(void) { * should be used only when it's critical to stop the threads for some reason. * Currently Redis does this only on crash (for instance on SIGSEGV) in order * to perform a fast memory check without other threads messing with memory. */ -static void killThreads(void) { +void killThreads(void) { killMainThread(); bioKillThreads(); killIOThreads(); diff --git a/src/server.h b/src/server.h index 4bbe59703..ba470c303 100644 --- a/src/server.h +++ b/src/server.h @@ -2467,6 +2467,7 @@ void mixDigest(unsigned char *digest, void *ptr, size_t len); void xorDigest(unsigned char *digest, void *ptr, size_t len); int populateCommandTableParseFlags(struct redisCommand *c, char *strflags); void killIOThreads(void); +void killThreads(void); /* TLS stuff */ void tlsInit(void); From d3f36e93a97879896ec923686b855ce3d976b075 Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Sun, 20 Sep 2020 06:36:20 -0400 Subject: [PATCH 208/377] Add Swapdb Module Event (#7804) (cherry picked from commit 0db3223bc6090556c920912d9c92dd42878e316c) --- src/db.c | 2 ++ src/module.c | 16 ++++++++++++++++ src/redismodule.h | 16 ++++++++++++++++ tests/modules/hooks.c | 12 ++++++++++++ tests/unit/moduleapi/hooks.tcl | 6 ++++++ 5 files changed, 52 insertions(+) diff --git a/src/db.c b/src/db.c index 9efda0907..7ed746f9a 100644 --- a/src/db.c +++ b/src/db.c @@ -1163,6 +1163,8 @@ void swapdbCommand(client *c) { addReplyError(c,"DB index is out of range"); return; } else { + RedisModuleSwapDbInfo si = {REDISMODULE_SWAPDBINFO_VERSION,id1,id2}; + moduleFireServerEvent(REDISMODULE_EVENT_SWAPDB,0,&si); server.dirty++; addReply(c,shared.ok); } diff --git a/src/module.c b/src/module.c index bd75c8f92..5dd845de0 100644 --- a/src/module.c +++ b/src/module.c @@ -7179,6 +7179,20 @@ void ModuleForkDoneHandler(int exitcode, int bysignal) { * int32_t progress; // Approximate progress between 0 and 1024, * or -1 if unknown. * + * RedisModuleEvent_SwapDB + * + * This event is called when a swap db command has been successfully + * Executed. + * For this event call currently there is no subevents available. + * + * The data pointer can be casted to a RedisModuleSwapDbInfo + * structure with the following fields: + * + * int32_t dbnum_first; // Swap Db first dbnum + * int32_t dbnum_second; // Swap Db second dbnum + * + * + * * The function returns REDISMODULE_OK if the module was successfully subscribed * for the specified event. If the API is called from a wrong context then * REDISMODULE_ERR is returned. */ @@ -7283,6 +7297,8 @@ void moduleFireServerEvent(uint64_t eid, int subid, void *data) { moduledata = data; } else if (eid == REDISMODULE_EVENT_CRON_LOOP) { moduledata = data; + } else if (eid == REDISMODULE_EVENT_SWAPDB) { + moduledata = data; } ModulesInHooks++; diff --git a/src/redismodule.h b/src/redismodule.h index 4a0e5bf15..56011fae0 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -190,6 +190,7 @@ typedef uint64_t RedisModuleTimerID; #define REDISMODULE_EVENT_CRON_LOOP 8 #define REDISMODULE_EVENT_MODULE_CHANGE 9 #define REDISMODULE_EVENT_LOADING_PROGRESS 10 +#define REDISMODULE_EVENT_SWAPDB 11 typedef struct RedisModuleEvent { uint64_t id; /* REDISMODULE_EVENT_... defines. */ @@ -243,6 +244,10 @@ static const RedisModuleEvent RedisModuleEvent_LoadingProgress = { REDISMODULE_EVENT_LOADING_PROGRESS, 1 + }, + RedisModuleEvent_SwapDB = { + REDISMODULE_EVENT_SWAPDB, + 1 }; /* Those are values that are used for the 'subevent' callback argument. */ @@ -374,6 +379,17 @@ typedef struct RedisModuleLoadingProgressInfo { #define RedisModuleLoadingProgress RedisModuleLoadingProgressV1 +#define REDISMODULE_SWAPDBINFO_VERSION 1 +typedef struct RedisModuleSwapDbInfo { + uint64_t version; /* Not used since this structure is never passed + from the module to the core right now. Here + for future compatibility. */ + int32_t dbnum_first; /* Swap Db first dbnum */ + int32_t dbnum_second; /* Swap Db second dbnum */ +} RedisModuleSwapDbInfoV1; + +#define RedisModuleSwapDbInfo RedisModuleSwapDbInfoV1 + /* ------------------------- End of common defines ------------------------ */ #ifndef REDISMODULE_CORE diff --git a/tests/modules/hooks.c b/tests/modules/hooks.c index 665a20481..54f84aa23 100644 --- a/tests/modules/hooks.c +++ b/tests/modules/hooks.c @@ -253,6 +253,16 @@ void moduleChangeCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, LogStringEvent(ctx, keyname, ei->module_name); } +void swapDbCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data) +{ + REDISMODULE_NOT_USED(e); + REDISMODULE_NOT_USED(sub); + + RedisModuleSwapDbInfo *ei = data; + LogNumericEvent(ctx, "swapdb-first", ei->dbnum_first); + LogNumericEvent(ctx, "swapdb-second", ei->dbnum_second); +} + /* This function must be present on each Redis module. It is used in order to * register the commands into the Redis server. */ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { @@ -289,6 +299,8 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) RedisModuleEvent_CronLoop, cronLoopCallback); RedisModule_SubscribeToServerEvent(ctx, RedisModuleEvent_ModuleChange, moduleChangeCallback); + RedisModule_SubscribeToServerEvent(ctx, + RedisModuleEvent_SwapDB, swapDbCallback); event_log = RedisModule_CreateDict(ctx); diff --git a/tests/unit/moduleapi/hooks.tcl b/tests/unit/moduleapi/hooks.tcl index da0307ce6..c4af59bd2 100644 --- a/tests/unit/moduleapi/hooks.tcl +++ b/tests/unit/moduleapi/hooks.tcl @@ -147,6 +147,12 @@ tags "modules" { set replica_stdout [srv 0 stdout] } + test {Test swapdb hooks} { + r swapdb 0 10 + assert_equal [r hooks.event_last swapdb-first] 0 + assert_equal [r hooks.event_last swapdb-second] 10 + + } # look into the log file of the server that just exited test {Test shutdown hook} { From f216bf312ae70dd62b447448e9c003df3b940dcd Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 20 Sep 2020 13:43:28 +0300 Subject: [PATCH 209/377] RM_GetContextFlags provides indication that we're in a fork child (#7783) (cherry picked from commit 0b476b591d8b92e88ec56675e747de23968eeae0) --- src/aof.c | 4 ++-- src/childinfo.c | 6 +++--- src/module.c | 5 +++-- src/rdb.c | 8 ++++---- src/redismodule.h | 2 ++ src/scripting.c | 2 +- src/server.c | 8 ++++++-- src/server.h | 11 +++++++---- 8 files changed, 28 insertions(+), 18 deletions(-) diff --git a/src/aof.c b/src/aof.c index dc50e2228..757c68807 100644 --- a/src/aof.c +++ b/src/aof.c @@ -1603,7 +1603,7 @@ int rewriteAppendOnlyFileBackground(void) { if (hasActiveChildProcess()) return C_ERR; if (aofCreatePipes() != C_OK) return C_ERR; openChildInfoPipe(); - if ((childpid = redisFork()) == 0) { + if ((childpid = redisFork(CHILD_TYPE_AOF)) == 0) { char tmpfile[256]; /* Child */ @@ -1611,7 +1611,7 @@ int rewriteAppendOnlyFileBackground(void) { redisSetCpuAffinity(server.aof_rewrite_cpulist); snprintf(tmpfile,256,"temp-rewriteaof-bg-%d.aof", (int) getpid()); if (rewriteAppendOnlyFile(tmpfile) == C_OK) { - sendChildCOWInfo(CHILD_INFO_TYPE_AOF, "AOF rewrite"); + sendChildCOWInfo(CHILD_TYPE_AOF, "AOF rewrite"); exitFromChild(0); } else { exitFromChild(1); diff --git a/src/childinfo.c b/src/childinfo.c index fa0600552..f95ae9647 100644 --- a/src/childinfo.c +++ b/src/childinfo.c @@ -76,11 +76,11 @@ void receiveChildInfo(void) { if (read(server.child_info_pipe[0],&server.child_info_data,wlen) == wlen && server.child_info_data.magic == CHILD_INFO_MAGIC) { - if (server.child_info_data.process_type == CHILD_INFO_TYPE_RDB) { + if (server.child_info_data.process_type == CHILD_TYPE_RDB) { server.stat_rdb_cow_bytes = server.child_info_data.cow_size; - } else if (server.child_info_data.process_type == CHILD_INFO_TYPE_AOF) { + } else if (server.child_info_data.process_type == CHILD_TYPE_AOF) { server.stat_aof_cow_bytes = server.child_info_data.cow_size; - } else if (server.child_info_data.process_type == CHILD_INFO_TYPE_MODULE) { + } else if (server.child_info_data.process_type == CHILD_TYPE_MODULE) { server.stat_module_cow_bytes = server.child_info_data.cow_size; } } diff --git a/src/module.c b/src/module.c index 5dd845de0..4cb4cfe20 100644 --- a/src/module.c +++ b/src/module.c @@ -1996,6 +1996,7 @@ int RM_GetContextFlags(RedisModuleCtx *ctx) { /* Presence of children processes. */ if (hasActiveChildProcess()) flags |= REDISMODULE_CTX_FLAGS_ACTIVE_CHILD; + if (server.in_fork_child) flags |= REDISMODULE_CTX_FLAGS_IS_CHILD; return flags; } @@ -6904,7 +6905,7 @@ int RM_Fork(RedisModuleForkDoneHandler cb, void *user_data) { } openChildInfoPipe(); - if ((childpid = redisFork()) == 0) { + if ((childpid = redisFork(CHILD_TYPE_MODULE)) == 0) { /* Child */ redisSetProcTitle("redis-module-fork"); } else if (childpid == -1) { @@ -6924,7 +6925,7 @@ int RM_Fork(RedisModuleForkDoneHandler cb, void *user_data) { * retcode will be provided to the done handler executed on the parent process. */ int RM_ExitFromChild(int retcode) { - sendChildCOWInfo(CHILD_INFO_TYPE_MODULE, "Module fork"); + sendChildCOWInfo(CHILD_TYPE_MODULE, "Module fork"); exitFromChild(retcode); return REDISMODULE_OK; } diff --git a/src/rdb.c b/src/rdb.c index 7e0d33565..fe9397624 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -1385,7 +1385,7 @@ int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) { server.lastbgsave_try = time(NULL); openChildInfoPipe(); - if ((childpid = redisFork()) == 0) { + if ((childpid = redisFork(CHILD_TYPE_RDB)) == 0) { int retval; /* Child */ @@ -1393,7 +1393,7 @@ int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) { redisSetCpuAffinity(server.bgsave_cpulist); retval = rdbSave(filename,rsi); if (retval == C_OK) { - sendChildCOWInfo(CHILD_INFO_TYPE_RDB, "RDB"); + sendChildCOWInfo(CHILD_TYPE_RDB, "RDB"); } exitFromChild((retval == C_OK) ? 0 : 1); } else { @@ -2540,7 +2540,7 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) { /* Create the child process. */ openChildInfoPipe(); - if ((childpid = redisFork()) == 0) { + if ((childpid = redisFork(CHILD_TYPE_RDB)) == 0) { /* Child */ int retval; rio rdb; @@ -2555,7 +2555,7 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) { retval = C_ERR; if (retval == C_OK) { - sendChildCOWInfo(CHILD_INFO_TYPE_RDB, "RDB"); + sendChildCOWInfo(CHILD_TYPE_RDB, "RDB"); } rioFreeFd(&rdb); diff --git a/src/redismodule.h b/src/redismodule.h index 56011fae0..c0eedc221 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -112,6 +112,8 @@ #define REDISMODULE_CTX_FLAGS_ACTIVE_CHILD (1<<18) /* The next EXEC will fail due to dirty CAS (touched keys). */ #define REDISMODULE_CTX_FLAGS_MULTI_DIRTY (1<<19) +/* Redis is currently running inside background child process. */ +#define REDISMODULE_CTX_FLAGS_IS_CHILD (1<<20) /* Keyspace changes notification classes. Every class is associated with a * character for configuration purposes. diff --git a/src/scripting.c b/src/scripting.c index e43472b3a..6beb6cdbf 100644 --- a/src/scripting.c +++ b/src/scripting.c @@ -1856,7 +1856,7 @@ void ldbSendLogs(void) { int ldbStartSession(client *c) { ldb.forked = (c->flags & CLIENT_LUA_DEBUG_SYNC) == 0; if (ldb.forked) { - pid_t cp = redisFork(); + pid_t cp = redisFork(CHILD_TYPE_LDB); if (cp == -1) { addReplyError(c,"Fork() failed: can't run EVAL in debugging mode."); return 0; diff --git a/src/server.c b/src/server.c index a88ffc6ee..ed416fb4c 100644 --- a/src/server.c +++ b/src/server.c @@ -2814,6 +2814,7 @@ void initServer(void) { server.aof_state = server.aof_enabled ? AOF_ON : AOF_OFF; server.hz = server.config_hz; server.pid = getpid(); + server.in_fork_child = CHILD_TYPE_NONE; server.main_thread_id = pthread_self(); server.current_client = NULL; server.fixed_time_expire = 0; @@ -4890,7 +4891,8 @@ void setupSignalHandlers(void) { * accepting writes because of a write error condition. */ static void sigKillChildHandler(int sig) { UNUSED(sig); - serverLogFromHandler(LL_WARNING, "Received SIGUSR1 in child, exiting now."); + int level = server.in_fork_child == CHILD_TYPE_MODULE? LL_VERBOSE: LL_WARNING; + serverLogFromHandler(level, "Received SIGUSR1 in child, exiting now."); exitFromChild(SERVER_CHILD_NOERROR_RETVAL); } @@ -4916,11 +4918,13 @@ void closeClildUnusedResourceAfterFork() { close(server.cluster_config_file_lock_fd); /* don't care if this fails */ } -int redisFork() { +/* purpose is one of CHILD_TYPE_ types */ +int redisFork(int purpose) { int childpid; long long start = ustime(); if ((childpid = fork()) == 0) { /* Child */ + server.in_fork_child = purpose; setOOMScoreAdj(CONFIG_OOM_BGCHILD); setupChildSignalHandlers(); closeClildUnusedResourceAfterFork(); diff --git a/src/server.h b/src/server.h index ba470c303..66d373944 100644 --- a/src/server.h +++ b/src/server.h @@ -1043,9 +1043,11 @@ struct clusterState; #endif #define CHILD_INFO_MAGIC 0xC17DDA7A12345678LL -#define CHILD_INFO_TYPE_RDB 0 -#define CHILD_INFO_TYPE_AOF 1 -#define CHILD_INFO_TYPE_MODULE 3 +#define CHILD_TYPE_NONE 0 +#define CHILD_TYPE_RDB 1 +#define CHILD_TYPE_AOF 2 +#define CHILD_TYPE_LDB 3 +#define CHILD_TYPE_MODULE 4 struct redisServer { /* General */ @@ -1059,6 +1061,7 @@ struct redisServer { the actual 'hz' field value if dynamic-hz is enabled. */ int hz; /* serverCron() calls frequency in hertz */ + int in_fork_child; /* indication that this is a fork child */ redisDb *db; dict *commands; /* Command table */ dict *orig_commands; /* Command table before command renaming. */ @@ -1889,7 +1892,7 @@ void sendChildInfo(int process_type); void receiveChildInfo(void); /* Fork helpers */ -int redisFork(); +int redisFork(int type); int hasActiveChildProcess(); void sendChildCOWInfo(int ptype, char *pname); From 4a3330c9412d98310c8312f46d56fa55623bbffb Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Fri, 18 Sep 2020 16:08:52 +0800 Subject: [PATCH 210/377] Make main thread killable so that it can be canceled at any time. Refine comment of makeThreadKillable(). This commit can be backported to 5.0, only if we also backport cf8a6e3. Co-authored-by: Oran Agra (cherry picked from commit d2291627305d606a5d3b1e3b3bfa17ab10a3ef32) --- src/bio.c | 5 +---- src/server.c | 9 +++++++++ src/server.h | 1 + 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/bio.c b/src/bio.c index 33465a166..ff1108799 100644 --- a/src/bio.c +++ b/src/bio.c @@ -168,10 +168,7 @@ void *bioProcessBackgroundJobs(void *arg) { redisSetCpuAffinity(server.bio_cpulist); - /* Make the thread killable at any time, so that bioKillThreads() - * can work reliably. */ - pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); - pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); + makeThreadKillable(); pthread_mutex_lock(&bio_mutex[type]); /* Block SIGALRM so we are sure that only the main thread will diff --git a/src/server.c b/src/server.c index ed416fb4c..5face48bb 100644 --- a/src/server.c +++ b/src/server.c @@ -2798,12 +2798,21 @@ void resetServerStats(void) { server.aof_delayed_fsync = 0; } +/* Make the thread killable at any time, so that kill threads functions + * can work reliably (default cancelability type is PTHREAD_CANCEL_DEFERRED). + * Needed for pthread_cancel used by the fast memory test used by the crash report. */ +void makeThreadKillable(void) { + pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); + pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); +} + void initServer(void) { int j; signal(SIGHUP, SIG_IGN); signal(SIGPIPE, SIG_IGN); setupSignalHandlers(); + makeThreadKillable(); if (server.syslog_enabled) { openlog(server.syslog_ident, LOG_PID | LOG_NDELAY | LOG_NOWAIT, diff --git a/src/server.h b/src/server.h index 66d373944..3317092a0 100644 --- a/src/server.h +++ b/src/server.h @@ -2471,6 +2471,7 @@ void xorDigest(unsigned char *digest, void *ptr, size_t len); int populateCommandTableParseFlags(struct redisCommand *c, char *strflags); void killIOThreads(void); void killThreads(void); +void makeThreadKillable(void); /* TLS stuff */ void tlsInit(void); From 5021ed7c458fa8d995050e2480835f2cfac1de10 Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Fri, 18 Sep 2020 16:10:54 +0800 Subject: [PATCH 211/377] Make IO threads killable so that they can be canceled at any time. This commit can be cherry picked to 6.0 only if we also cherry pick e9b6077. (cherry picked from commit 6c6ab16e5a31c09a6ea09f1b4638c121e610566a) --- src/networking.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/networking.c b/src/networking.c index b7d6d6211..e738d1c92 100644 --- a/src/networking.c +++ b/src/networking.c @@ -2948,6 +2948,7 @@ void *IOThreadMain(void *myid) { snprintf(thdname, sizeof(thdname), "io_thd_%ld", id); redis_set_thread_title(thdname); redisSetCpuAffinity(server.server_cpulist); + makeThreadKillable(); while(1) { /* Wait for start */ From 8f4ad687afaf56de7324ec6ee7fff533c8da4062 Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Tue, 22 Sep 2020 02:05:47 -0400 Subject: [PATCH 212/377] refactor rewriteStreamObject code for adding missing streamIteratorStop call (#7829) This commit adds streamIteratorStop call in rewriteStreamObject function in some of the return statement. Although currently this will not cause memory leak since stream id is only 16 bytes long. (cherry picked from commit 7934f163b4b6c1c0c0fc55710d3c7e49f56281f1) --- src/aof.c | 54 ++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 18 deletions(-) diff --git a/src/aof.c b/src/aof.c index 757c68807..2114a17e4 100644 --- a/src/aof.c +++ b/src/aof.c @@ -1201,16 +1201,24 @@ int rewriteStreamObject(rio *r, robj *key, robj *o) { * the ID, the second is an array of field-value pairs. */ /* Emit the XADD ...fields... command. */ - if (rioWriteBulkCount(r,'*',3+numfields*2) == 0) return 0; - if (rioWriteBulkString(r,"XADD",4) == 0) return 0; - if (rioWriteBulkObject(r,key) == 0) return 0; - if (rioWriteBulkStreamID(r,&id) == 0) return 0; + if (!rioWriteBulkCount(r,'*',3+numfields*2) || + !rioWriteBulkString(r,"XADD",4) || + !rioWriteBulkObject(r,key) || + !rioWriteBulkStreamID(r,&id)) + { + streamIteratorStop(&si); + return 0; + } while(numfields--) { unsigned char *field, *value; int64_t field_len, value_len; streamIteratorGetField(&si,&field,&value,&field_len,&value_len); - if (rioWriteBulkString(r,(char*)field,field_len) == 0) return 0; - if (rioWriteBulkString(r,(char*)value,value_len) == 0) return 0; + if (!rioWriteBulkString(r,(char*)field,field_len) || + !rioWriteBulkString(r,(char*)value,value_len)) + { + streamIteratorStop(&si); + return 0; + } } } } else { @@ -1218,22 +1226,30 @@ int rewriteStreamObject(rio *r, robj *key, robj *o) { * the key we are serializing is an empty string, which is possible * for the Stream type. */ id.ms = 0; id.seq = 1; - if (rioWriteBulkCount(r,'*',7) == 0) return 0; - if (rioWriteBulkString(r,"XADD",4) == 0) return 0; - if (rioWriteBulkObject(r,key) == 0) return 0; - if (rioWriteBulkString(r,"MAXLEN",6) == 0) return 0; - if (rioWriteBulkString(r,"0",1) == 0) return 0; - if (rioWriteBulkStreamID(r,&id) == 0) return 0; - if (rioWriteBulkString(r,"x",1) == 0) return 0; - if (rioWriteBulkString(r,"y",1) == 0) return 0; + if (!rioWriteBulkCount(r,'*',7) || + !rioWriteBulkString(r,"XADD",4) || + !rioWriteBulkObject(r,key) || + !rioWriteBulkString(r,"MAXLEN",6) || + !rioWriteBulkString(r,"0",1) || + !rioWriteBulkStreamID(r,&id) || + !rioWriteBulkString(r,"x",1) || + !rioWriteBulkString(r,"y",1)) + { + streamIteratorStop(&si); + return 0; + } } /* Append XSETID after XADD, make sure lastid is correct, * in case of XDEL lastid. */ - if (rioWriteBulkCount(r,'*',3) == 0) return 0; - if (rioWriteBulkString(r,"XSETID",6) == 0) return 0; - if (rioWriteBulkObject(r,key) == 0) return 0; - if (rioWriteBulkStreamID(r,&s->last_id) == 0) return 0; + if (!rioWriteBulkCount(r,'*',3) || + !rioWriteBulkString(r,"XSETID",6) || + !rioWriteBulkObject(r,key) || + !rioWriteBulkStreamID(r,&s->last_id)) + { + streamIteratorStop(&si); + return 0; + } /* Create all the stream consumer groups. */ @@ -1252,6 +1268,7 @@ int rewriteStreamObject(rio *r, robj *key, robj *o) { !rioWriteBulkStreamID(r,&group->last_id)) { raxStop(&ri); + streamIteratorStop(&si); return 0; } @@ -1277,6 +1294,7 @@ int rewriteStreamObject(rio *r, robj *key, robj *o) { raxStop(&ri_pel); raxStop(&ri_cons); raxStop(&ri); + streamIteratorStop(&si); return 0; } } From 29f6e9fe9593a237ddf82e695db37dff90ecc6b8 Mon Sep 17 00:00:00 2001 From: Ariel Shtul Date: Tue, 22 Sep 2020 10:18:07 +0300 Subject: [PATCH 213/377] Fix redis-check-rdb support for modules aux data (#7826) redis-check-rdb was unable to parse rdb files containing module aux data. Co-authored-by: Oran Agra (cherry picked from commit b914d4fc4825cc20cebca43431af5029ee077d09) --- src/rdb.c | 2 ++ src/rdb.h | 1 + src/redis-check-rdb.c | 19 ++++++++++++++++++- 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/src/rdb.c b/src/rdb.c index fe9397624..5a6be6e38 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -1146,6 +1146,8 @@ ssize_t rdbSaveSingleModuleAux(rio *rdb, int when, moduleType *mt) { /* Save a module-specific aux value. */ RedisModuleIO io; int retval = rdbSaveType(rdb, RDB_OPCODE_MODULE_AUX); + if (retval == -1) return -1; + io.bytes += retval; /* Write the "module" identifier as prefix, so that we'll be able * to call the right module during loading. */ diff --git a/src/rdb.h b/src/rdb.h index 885cf49c6..f22fbecd1 100644 --- a/src/rdb.h +++ b/src/rdb.h @@ -149,6 +149,7 @@ robj *rdbLoadObject(int type, rio *rdb, sds key); void backgroundSaveDoneHandler(int exitcode, int bysignal); int rdbSaveKeyValuePair(rio *rdb, robj *key, robj *val, long long expiretime); ssize_t rdbSaveSingleModuleAux(rio *rdb, int when, moduleType *mt); +robj *rdbLoadCheckModuleValue(rio *rdb, char *modulename); robj *rdbLoadStringObject(rio *rdb); ssize_t rdbSaveStringObject(rio *rdb, robj *obj); ssize_t rdbSaveRawString(rio *rdb, unsigned char *s, size_t len); diff --git a/src/redis-check-rdb.c b/src/redis-check-rdb.c index 592feaf42..a9d110aa8 100644 --- a/src/redis-check-rdb.c +++ b/src/redis-check-rdb.c @@ -58,6 +58,7 @@ struct { #define RDB_CHECK_DOING_CHECK_SUM 5 #define RDB_CHECK_DOING_READ_LEN 6 #define RDB_CHECK_DOING_READ_AUX 7 +#define RDB_CHECK_DOING_READ_MODULE_AUX 8 char *rdb_check_doing_string[] = { "start", @@ -67,7 +68,8 @@ char *rdb_check_doing_string[] = { "read-object-value", "check-sum", "read-len", - "read-aux" + "read-aux", + "read-module-aux" }; char *rdb_type_string[] = { @@ -272,6 +274,21 @@ int redis_check_rdb(char *rdbfilename, FILE *fp) { decrRefCount(auxkey); decrRefCount(auxval); continue; /* Read type again. */ + } else if (type == RDB_OPCODE_MODULE_AUX) { + /* AUX: Auxiliary data for modules. */ + uint64_t moduleid, when_opcode, when; + rdbstate.doing = RDB_CHECK_DOING_READ_MODULE_AUX; + if ((moduleid = rdbLoadLen(&rdb,NULL)) == RDB_LENERR) goto eoferr; + if ((when_opcode = rdbLoadLen(&rdb,NULL)) == RDB_LENERR) goto eoferr; + if ((when = rdbLoadLen(&rdb,NULL)) == RDB_LENERR) goto eoferr; + + char name[10]; + moduleTypeNameByID(name,moduleid); + rdbCheckInfo("MODULE AUX for: %s", name); + + robj *o = rdbLoadCheckModuleValue(&rdb,name); + decrRefCount(o); + continue; /* Read type again. */ } else { if (!rdbIsObjectType(type)) { rdbCheckError("Invalid object type: %d", type); From 24f258e39cf6573720704b2b37c905ded77f759e Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 22 Sep 2020 11:38:52 +0300 Subject: [PATCH 214/377] Fix occasional hangs on replication reconnection. (#7830) This happens only on diskless replicas when attempting to reconnect after failing to load an RDB file. It is more likely to occur with larger datasets. After reconnection is initiated, replicationEmptyDbCallback() may get called and try to write to an unconnected socket. This triggered another issue where the connection is put into an error state and the connect handler never gets called. The problem is a regression introduced by commit cad93ed. (cherry picked from commit ecd86283ec292c1062f377f5707be57a8a77adb4) --- src/connection.c | 14 ++++++++++++-- src/replication.c | 3 ++- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/connection.c b/src/connection.c index 23b44a314..415cbdf78 100644 --- a/src/connection.c +++ b/src/connection.c @@ -168,7 +168,12 @@ static int connSocketWrite(connection *conn, const void *data, size_t data_len) int ret = write(conn->fd, data, data_len); if (ret < 0 && errno != EAGAIN) { conn->last_errno = errno; - conn->state = CONN_STATE_ERROR; + + /* Don't overwrite the state of a connection that is not already + * connected, not to mess with handler callbacks. + */ + if (conn->state == CONN_STATE_CONNECTED) + conn->state = CONN_STATE_ERROR; } return ret; @@ -180,7 +185,12 @@ static int connSocketRead(connection *conn, void *buf, size_t buf_len) { conn->state = CONN_STATE_CLOSED; } else if (ret < 0 && errno != EAGAIN) { conn->last_errno = errno; - conn->state = CONN_STATE_ERROR; + + /* Don't overwrite the state of a connection that is not already + * connected, not to mess with handler callbacks. + */ + if (conn->state == CONN_STATE_CONNECTED) + conn->state = CONN_STATE_ERROR; } return ret; diff --git a/src/replication.c b/src/replication.c index 6feb9ab6c..047449c4b 100644 --- a/src/replication.c +++ b/src/replication.c @@ -1374,7 +1374,8 @@ void replicationSendNewlineToMaster(void) { * the new dataset received by the master. */ void replicationEmptyDbCallback(void *privdata) { UNUSED(privdata); - replicationSendNewlineToMaster(); + if (server.repl_state == REPL_STATE_TRANSFER) + replicationSendNewlineToMaster(); } /* Once we have a link with the master and the synchronization was From 90e8da536e919a99a14790a057acb3853a668591 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 22 Sep 2020 12:11:19 +0300 Subject: [PATCH 215/377] RM_GetContextFlags - document missing flags (#7821) (cherry picked from commit 78c80b3f8c4d37884ee387ef44abdd83664ee448) --- src/module.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/module.c b/src/module.c index 4cb4cfe20..feaff0223 100644 --- a/src/module.c +++ b/src/module.c @@ -1922,6 +1922,12 @@ int RM_GetSelectedDb(RedisModuleCtx *ctx) { * * * REDISMODULE_CTX_FLAGS_ACTIVE_CHILD: There is currently some background * process active (RDB, AUX or module). + * + * * REDISMODULE_CTX_FLAGS_MULTI_DIRTY: The next EXEC will fail due to dirty + * CAS (touched keys). + * + * * REDISMODULE_CTX_FLAGS_IS_CHILD: Redis is currently running inside + * background child process. */ int RM_GetContextFlags(RedisModuleCtx *ctx) { From f478a1b6fb146671712086eec9187fa84bb5380e Mon Sep 17 00:00:00 2001 From: yixiang Date: Tue, 22 Sep 2020 17:53:36 +0800 Subject: [PATCH 216/377] Fix connGetSocketError usage (#7811) (cherry picked from commit 4e70e49d2bdaa477d9436a394f8626a1cc6e94af) --- src/connection.c | 5 +++-- src/tls.c | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/connection.c b/src/connection.c index 415cbdf78..83fb84d6d 100644 --- a/src/connection.c +++ b/src/connection.c @@ -261,8 +261,9 @@ static void connSocketEventHandler(struct aeEventLoop *el, int fd, void *clientD if (conn->state == CONN_STATE_CONNECTING && (mask & AE_WRITABLE) && conn->conn_handler) { - if (connGetSocketError(conn)) { - conn->last_errno = errno; + int conn_error = connGetSocketError(conn); + if (conn_error) { + conn->last_errno = conn_error; conn->state = CONN_STATE_ERROR; } else { conn->state = CONN_STATE_CONNECTED; diff --git a/src/tls.c b/src/tls.c index 52887cd23..f55d25c78 100644 --- a/src/tls.c +++ b/src/tls.c @@ -464,8 +464,9 @@ static void tlsHandleEvent(tls_connection *conn, int mask) { switch (conn->c.state) { case CONN_STATE_CONNECTING: - if (connGetSocketError((connection *) conn)) { - conn->c.last_errno = errno; + int conn_error = connGetSocketError((connection *) conn); + if (conn_error) { + conn->c.last_errno = conn_error; conn->c.state = CONN_STATE_ERROR; } else { if (!(conn->flags & TLS_CONN_FLAG_FD_SET)) { From d3fc73612360e0c8818c3f4d43f4862c5767f579 Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Wed, 23 Sep 2020 14:09:48 +0800 Subject: [PATCH 217/377] Fix redundancy use of semicolon in do-while macros in ziplist.c. (#7832) this is very dangerous bug, but it looks like it didn't cause any harm. (cherry picked from commit 00668f782f0d8e987fc2c049c34e100567c0a5c6) --- src/ziplist.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/ziplist.c b/src/ziplist.c index e27875f6e..8721ace39 100644 --- a/src/ziplist.c +++ b/src/ziplist.c @@ -390,7 +390,7 @@ unsigned int zipStoreEntryEncoding(unsigned char *p, unsigned char encoding, uns (lensize) = 1; \ (len) = zipIntSize(encoding); \ } \ -} while(0); +} while(0) /* Encode the length of the previous entry and write it to "p". This only * uses the larger encoding (required in __ziplistCascadeUpdate). */ @@ -426,7 +426,7 @@ unsigned int zipStorePrevEntryLength(unsigned char *p, unsigned int len) { } else { \ (prevlensize) = 5; \ } \ -} while(0); +} while(0) /* Return the length of the previous element, and the number of bytes that * are used in order to encode the previous element length. @@ -444,7 +444,7 @@ unsigned int zipStorePrevEntryLength(unsigned char *p, unsigned int len) { memcpy(&(prevlen), ((char*)(ptr)) + 1, 4); \ memrev32ifbe(&prevlen); \ } \ -} while(0); +} while(0) /* Given a pointer 'p' to the prevlen info that prefixes an entry, this * function returns the difference in number of bytes needed to encode From 06c8f03ba1dd8ed5b4cd84ef117cd8b3f7d87d22 Mon Sep 17 00:00:00 2001 From: David CARLIER Date: Wed, 23 Sep 2020 08:00:31 +0100 Subject: [PATCH 218/377] Further NetBSD update and build fixes. (#7831) mainly backtrace and register dump support. (cherry picked from commit 6bc28d99a3a24c31c44e134b12a502441266e8bc) --- src/Makefile | 12 ++++++++++- src/config.h | 4 ++-- src/debug.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+), 3 deletions(-) diff --git a/src/Makefile b/src/Makefile index 873797330..e59089811 100644 --- a/src/Makefile +++ b/src/Makefile @@ -121,12 +121,21 @@ ifeq ($(uname_S),OpenBSD) endif else +ifeq ($(uname_S),NetBSD) + # NetBSD + FINAL_LIBS+= -lpthread + ifeq ($(USE_BACKTRACE),yes) + FINAL_CFLAGS+= -DUSE_BACKTRACE -I/usr/pkg/include + FINAL_LDFLAGS+= -L/usr/pkg/lib + FINAL_LIBS+= -lexecinfo + endif +else ifeq ($(uname_S),FreeBSD) # FreeBSD FINAL_LIBS+= -lpthread -lexecinfo else ifeq ($(uname_S),DragonFly) - # FreeBSD + # DragonFly FINAL_LIBS+= -lpthread -lexecinfo else ifeq ($(uname_S),OpenBSD) @@ -148,6 +157,7 @@ endif endif endif endif +endif # Include paths to dependencies FINAL_CFLAGS+= -I../deps/hiredis -I../deps/linenoise -I../deps/lua/src diff --git a/src/config.h b/src/config.h index f9ec7e44a..d391508fa 100644 --- a/src/config.h +++ b/src/config.h @@ -64,7 +64,7 @@ /* Test for backtrace() */ #if defined(__APPLE__) || (defined(__linux__) && defined(__GLIBC__)) || \ - defined(__FreeBSD__) || (defined(__OpenBSD__) && defined(USE_BACKTRACE))\ + defined(__FreeBSD__) || ((defined(__OpenBSD__) || defined(__NetBSD__)) && defined(USE_BACKTRACE))\ || defined(__DragonFly__) #define HAVE_BACKTRACE 1 #endif @@ -236,7 +236,7 @@ void setproctitle(const char *fmt, ...); #define redis_set_thread_title(name) pthread_set_name_np(pthread_self(), name) #elif defined __NetBSD__ #include -#define redis_set_thread_title(name) pthread_setname_np(pthread_self(), name, NULL) +#define redis_set_thread_title(name) pthread_setname_np(pthread_self(), "%s", name) #else #if (defined __APPLE__ && defined(MAC_OS_X_VERSION_10_7)) int pthread_setname_np(const char *name); diff --git a/src/debug.c b/src/debug.c index e64ec1b78..1a41574e4 100644 --- a/src/debug.c +++ b/src/debug.c @@ -967,6 +967,12 @@ static void *getMcontextEip(ucontext_t *uc) { #elif defined(__x86_64__) return (void*) uc->sc_rip; #endif +#elif defined(__NetBSD__) + #if defined(__i386__) + return (void*) uc->uc_mcontext.__gregs[_REG_EIP]; + #elif defined(__x86_64__) + return (void*) uc->uc_mcontext.__gregs[_REG_RIP]; + #endif #elif defined(__DragonFly__) return (void*) uc->uc_mcontext.mc_rip; #else @@ -1324,6 +1330,59 @@ void logRegisters(ucontext_t *uc) { ); logStackContent((void**)uc->sc_esp); #endif +#elif defined(__NetBSD__) + #if defined(__x86_64__) + serverLog(LL_WARNING, + "\n" + "RAX:%016lx RBX:%016lx\nRCX:%016lx RDX:%016lx\n" + "RDI:%016lx RSI:%016lx\nRBP:%016lx RSP:%016lx\n" + "R8 :%016lx R9 :%016lx\nR10:%016lx R11:%016lx\n" + "R12:%016lx R13:%016lx\nR14:%016lx R15:%016lx\n" + "RIP:%016lx EFL:%016lx\nCSGSFS:%016lx", + (unsigned long) uc->uc_mcontext.__gregs[_REG_RAX], + (unsigned long) uc->uc_mcontext.__gregs[_REG_RBX], + (unsigned long) uc->uc_mcontext.__gregs[_REG_RCX], + (unsigned long) uc->uc_mcontext.__gregs[_REG_RDX], + (unsigned long) uc->uc_mcontext.__gregs[_REG_RDI], + (unsigned long) uc->uc_mcontext.__gregs[_REG_RSI], + (unsigned long) uc->uc_mcontext.__gregs[_REG_RBP], + (unsigned long) uc->uc_mcontext.__gregs[_REG_RSP], + (unsigned long) uc->uc_mcontext.__gregs[_REG_R8], + (unsigned long) uc->uc_mcontext.__gregs[_REG_R9], + (unsigned long) uc->uc_mcontext.__gregs[_REG_R10], + (unsigned long) uc->uc_mcontext.__gregs[_REG_R11], + (unsigned long) uc->uc_mcontext.__gregs[_REG_R12], + (unsigned long) uc->uc_mcontext.__gregs[_REG_R13], + (unsigned long) uc->uc_mcontext.__gregs[_REG_R14], + (unsigned long) uc->uc_mcontext.__gregs[_REG_R15], + (unsigned long) uc->uc_mcontext.__gregs[_REG_RIP], + (unsigned long) uc->uc_mcontext.__gregs[_REG_RFLAGS], + (unsigned long) uc->uc_mcontext.__gregs[_REG_CS] + ); + logStackContent((void**)uc->uc_mcontext.__gregs[_REG_RSP]); + #elif defined(__i386__) + serverLog(LL_WARNING, + "\n" + "EAX:%08lx EBX:%08lx ECX:%08lx EDX:%08lx\n" + "EDI:%08lx ESI:%08lx EBP:%08lx ESP:%08lx\n" + "SS :%08lx EFL:%08lx EIP:%08lx CS:%08lx\n" + "DS :%08lx ES :%08lx FS :%08lx GS:%08lx", + (unsigned long) uc->uc_mcontext.__gregs[_REG_EAX], + (unsigned long) uc->uc_mcontext.__gregs[_REG_EBX], + (unsigned long) uc->uc_mcontext.__gregs[_REG_EDX], + (unsigned long) uc->uc_mcontext.__gregs[_REG_EDI], + (unsigned long) uc->uc_mcontext.__gregs[_REG_ESI], + (unsigned long) uc->uc_mcontext.__gregs[_REG_EBP], + (unsigned long) uc->uc_mcontext.__gregs[_REG_ESP], + (unsigned long) uc->uc_mcontext.__gregs[_REG_SS], + (unsigned long) uc->uc_mcontext.__gregs[_REG_EFLAGS], + (unsigned long) uc->uc_mcontext.__gregs[_REG_EIP], + (unsigned long) uc->uc_mcontext.__gregs[_REG_CS], + (unsigned long) uc->uc_mcontext.__gregs[_REG_ES], + (unsigned long) uc->uc_mcontext.__gregs[_REG_FS], + (unsigned long) uc->uc_mcontext.__gregs[_REG_GS] + ); + #endif #elif defined(__DragonFly__) serverLog(LL_WARNING, "\n" From 7ab8961c6d588f1d34be7b9e2fa8413cd6117392 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Wed, 23 Sep 2020 11:30:24 +0300 Subject: [PATCH 219/377] fix recently broken TLS build error, and add coverage for CI (#7833) (cherry picked from commit 270fcb80bf8c5d8458d60d3a494f422d12e1dfaf) --- .github/workflows/ci.yml | 3 ++- src/tls.c | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4d6c1c14c..70aebfc87 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -9,7 +9,8 @@ jobs: steps: - uses: actions/checkout@v2 - name: make - run: make + # build with TLS just for compilatoin coverage + run: make BUILD_TLS=yes - name: test run: | sudo apt-get install tcl8.5 diff --git a/src/tls.c b/src/tls.c index f55d25c78..d3173fab0 100644 --- a/src/tls.c +++ b/src/tls.c @@ -454,7 +454,7 @@ void updateSSLEvent(tls_connection *conn) { } static void tlsHandleEvent(tls_connection *conn, int mask) { - int ret; + int ret, conn_error; TLSCONN_DEBUG("tlsEventHandler(): fd=%d, state=%d, mask=%d, r=%d, w=%d, flags=%d", fd, conn->c.state, mask, conn->c.read_handler != NULL, conn->c.write_handler != NULL, @@ -464,7 +464,7 @@ static void tlsHandleEvent(tls_connection *conn, int mask) { switch (conn->c.state) { case CONN_STATE_CONNECTING: - int conn_error = connGetSocketError((connection *) conn); + conn_error = connGetSocketError((connection *) conn); if (conn_error) { conn->c.last_errno = conn_error; conn->c.state = CONN_STATE_ERROR; From 66a13267c7b734526de3959511eb14f59132bd46 Mon Sep 17 00:00:00 2001 From: Guy Korland Date: Thu, 24 Sep 2020 12:45:30 +0300 Subject: [PATCH 220/377] Fix RedisModule_HashGet examples (#6697) (cherry picked from commit 04945e0e6d5aadd9fb5a7b47d947d759073af51a) --- src/module.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/module.c b/src/module.c index feaff0223..9c7d74c50 100644 --- a/src/module.c +++ b/src/module.c @@ -2952,22 +2952,22 @@ int RM_HashSet(RedisModuleKey *key, int flags, ...) { * As with RedisModule_HashSet() the behavior of the command can be specified * passing flags different than REDISMODULE_HASH_NONE: * - * REDISMODULE_HASH_CFIELD: field names as null terminated C strings. + * REDISMODULE_HASH_CFIELDS: field names as null terminated C strings. * * REDISMODULE_HASH_EXISTS: instead of setting the value of the field * expecting a RedisModuleString pointer to pointer, the function just * reports if the field exists or not and expects an integer pointer * as the second element of each pair. * - * Example of REDISMODULE_HASH_CFIELD: + * Example of REDISMODULE_HASH_CFIELDS: * * RedisModuleString *username, *hashedpass; - * RedisModule_HashGet(mykey,"username",&username,"hp",&hashedpass, NULL); + * RedisModule_HashGet(mykey,REDISMODULE_HASH_CFIELDS,"username",&username,"hp",&hashedpass, NULL); * * Example of REDISMODULE_HASH_EXISTS: * * int exists; - * RedisModule_HashGet(mykey,argv[1],&exists,NULL); + * RedisModule_HashGet(mykey,REDISMODULE_HASH_EXISTS,argv[1],&exists,NULL); * * The function returns REDISMODULE_OK on success and REDISMODULE_ERR if * the key is not an hash value. From 1c4a99c9ec61c68f31ccf121c44fd7108be61f68 Mon Sep 17 00:00:00 2001 From: Wang Yuan Date: Thu, 24 Sep 2020 21:01:41 +0800 Subject: [PATCH 221/377] Don't write replies if close the client ASAP (#7202) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before this commit, we would have continued to add replies to the reply buffer even if client output buffer limit is reached, so the used memory would keep increasing over the configured limit. What's more, we shouldn’t write any reply to the client if it is set 'CLIENT_CLOSE_ASAP' flag because that doesn't conform to its definition and we will close all clients flagged with 'CLIENT_CLOSE_ASAP' in ‘beforeSleep’. Because of code execution order, before this, we may firstly write to part of the replies to the socket before disconnecting it, but in fact, we may can’t send the full replies to clients since OS socket buffer is limited. But this unexpected behavior makes some commands work well, for instance ACL DELUSER, if the client deletes the current user, we need to send reply to client and close the connection, but before, we close the client firstly and write the reply to reply buffer. secondly, we shouldn't do this despite the fact it works well in most cases. We add a flag 'CLIENT_CLOSE_AFTER_COMMAND' to mark clients, this flag means we will close the client after executing commands and send all entire replies, so that we can write replies to reply buffer during executing commands, send replies to clients, and close them later. We also fix some implicit problems. If client output buffer limit is enforced in 'multi/exec', all commands will be executed completely in redis and clients will not read any reply instead of partial replies. Even more, if the client executes 'ACL deluser' the using user in 'multi/exec', it will not read the replies after 'ACL deluser' just like before executing 'client kill' itself in 'multi/exec'. We added some tests for output buffer limit breach during multi-exec and using a pipeline of many small commands rather than one with big response. Co-authored-by: Oran Agra (cherry picked from commit 3085577c095a0f3b1261f6dbf016d7701aadab46) --- src/acl.c | 8 +++- src/module.c | 8 +++- src/networking.c | 14 ++++++ src/server.c | 7 +++ src/server.h | 2 + tests/unit/acl.tcl | 17 +++++++ tests/unit/obuf-limits.tcl | 90 ++++++++++++++++++++++++++++++++++++++ 7 files changed, 144 insertions(+), 2 deletions(-) diff --git a/src/acl.c b/src/acl.c index 74768aa27..5d484a742 100644 --- a/src/acl.c +++ b/src/acl.c @@ -297,7 +297,13 @@ void ACLFreeUserAndKillClients(user *u) { * it in non authenticated mode. */ c->user = DefaultUser; c->authenticated = 0; - freeClientAsync(c); + /* We will write replies to this client later, so we can't + * close it directly even if async. */ + if (c == server.current_client) { + c->flags |= CLIENT_CLOSE_AFTER_COMMAND; + } else { + freeClientAsync(c); + } } } ACLFreeUser(u); diff --git a/src/module.c b/src/module.c index 9c7d74c50..0655272ff 100644 --- a/src/module.c +++ b/src/module.c @@ -5537,7 +5537,13 @@ void revokeClientAuthentication(client *c) { c->user = DefaultUser; c->authenticated = 0; - freeClientAsync(c); + /* We will write replies to this client later, so we can't close it + * directly even if async. */ + if (c == server.current_client) { + c->flags |= CLIENT_CLOSE_AFTER_COMMAND; + } else { + freeClientAsync(c); + } } /* Cleanup all clients that have been authenticated with this module. This diff --git a/src/networking.c b/src/networking.c index e738d1c92..9b744eb0c 100644 --- a/src/networking.c +++ b/src/networking.c @@ -223,6 +223,9 @@ int prepareClientToWrite(client *c) { * handler since there is no socket at all. */ if (c->flags & (CLIENT_LUA|CLIENT_MODULE)) return C_OK; + /* If CLIENT_CLOSE_ASAP flag is set, we need not write anything. */ + if (c->flags & CLIENT_CLOSE_ASAP) return C_ERR; + /* CLIENT REPLY OFF / SKIP handling: don't send replies. */ if (c->flags & (CLIENT_REPLY_OFF|CLIENT_REPLY_SKIP)) return C_ERR; @@ -1436,6 +1439,9 @@ int handleClientsWithPendingWrites(void) { * that may trigger write error or recreate handler. */ if (c->flags & CLIENT_PROTECTED) continue; + /* Don't write to clients that are going to be closed anyway. */ + if (c->flags & CLIENT_CLOSE_ASAP) continue; + /* Try to write buffers to the client socket. */ if (writeToClient(c,0) == C_ERR) continue; @@ -3108,6 +3114,14 @@ int handleClientsWithPendingWritesUsingThreads(void) { while((ln = listNext(&li))) { client *c = listNodeValue(ln); c->flags &= ~CLIENT_PENDING_WRITE; + + /* Remove clients from the list of pending writes since + * they are going to be closed ASAP. */ + if (c->flags & CLIENT_CLOSE_ASAP) { + listDelNode(server.clients_pending_write, ln); + continue; + } + int target_id = item_id % server.io_threads_num; listAddNodeTail(io_threads_list[target_id],c); item_id++; diff --git a/src/server.c b/src/server.c index 5face48bb..1f20efe94 100644 --- a/src/server.c +++ b/src/server.c @@ -3345,6 +3345,13 @@ void call(client *c, int flags) { dirty = server.dirty-dirty; if (dirty < 0) dirty = 0; + /* After executing command, we will close the client after writing entire + * reply if it is set 'CLIENT_CLOSE_AFTER_COMMAND' flag. */ + if (c->flags & CLIENT_CLOSE_AFTER_COMMAND) { + c->flags &= ~CLIENT_CLOSE_AFTER_COMMAND; + c->flags |= CLIENT_CLOSE_AFTER_REPLY; + } + /* When EVAL is called loading the AOF we don't want commands called * from Lua to go into the slowlog or to populate statistics. */ if (server.loading && c->flags & CLIENT_LUA) diff --git a/src/server.h b/src/server.h index 3317092a0..48dcee631 100644 --- a/src/server.h +++ b/src/server.h @@ -264,6 +264,8 @@ extern int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT]; about writes performed by myself.*/ #define CLIENT_IN_TO_TABLE (1ULL<<38) /* This client is in the timeout table. */ #define CLIENT_PROTOCOL_ERROR (1ULL<<39) /* Protocol error chatting with it. */ +#define CLIENT_CLOSE_AFTER_COMMAND (1ULL<<40) /* Close after executing commands + * and writing entire reply. */ /* Client block type (btype field in client structure) * if CLIENT_BLOCKED flag is set. */ diff --git a/tests/unit/acl.tcl b/tests/unit/acl.tcl index 381f2f95f..f015f75a0 100644 --- a/tests/unit/acl.tcl +++ b/tests/unit/acl.tcl @@ -260,6 +260,23 @@ start_server {tags {"acl"}} { catch {r ACL help xxx} e assert_match "*Unknown subcommand or wrong number of arguments*" $e } + + test {Delete a user that the client doesn't use} { + r ACL setuser not_used on >passwd + assert {[r ACL deluser not_used] == 1} + # The client is not closed + assert {[r ping] eq {PONG}} + } + + test {Delete a user that the client is using} { + r ACL setuser using on +acl >passwd + r AUTH using passwd + # The client will receive reply normally + assert {[r ACL deluser using] == 1} + # The client is closed + catch {[r ping]} e + assert_match "*I/O error*" $e + } } set server_path [tmpdir "server.acl"] diff --git a/tests/unit/obuf-limits.tcl b/tests/unit/obuf-limits.tcl index c45bf8e86..20ba32fd5 100644 --- a/tests/unit/obuf-limits.tcl +++ b/tests/unit/obuf-limits.tcl @@ -70,4 +70,94 @@ start_server {tags {"obuf-limits"}} { assert {$omem >= 100000 && $time_elapsed < 6} $rd1 close } + + test {No response for single command if client output buffer hard limit is enforced} { + r config set client-output-buffer-limit {normal 100000 0 0} + # Total size of all items must be more than 100k + set item [string repeat "x" 1000] + for {set i 0} {$i < 150} {incr i} { + r lpush mylist $item + } + set orig_mem [s used_memory] + # Set client name and get all items + set rd [redis_deferring_client] + $rd client setname mybiglist + assert {[$rd read] eq "OK"} + $rd lrange mylist 0 -1 + $rd flush + after 100 + + # Before we read reply, redis will close this client. + set clients [r client list] + assert_no_match "*name=mybiglist*" $clients + set cur_mem [s used_memory] + # 10k just is a deviation threshold + assert {$cur_mem < 10000 + $orig_mem} + + # Read nothing + set fd [$rd channel] + assert_equal {} [read $fd] + } + + test {No response for multi commands in pipeline if client output buffer limit is enforced} { + r config set client-output-buffer-limit {normal 100000 0 0} + set value [string repeat "x" 10000] + r set bigkey $value + set rd1 [redis_deferring_client] + set rd2 [redis_deferring_client] + $rd2 client setname multicommands + assert_equal "OK" [$rd2 read] + # Let redis sleep 2s firstly + $rd1 debug sleep 2 + $rd1 flush + after 100 + + # Total size should be less than OS socket buffer, redis can + # execute all commands in this pipeline when it wakes up. + for {set i 0} {$i < 15} {incr i} { + $rd2 set $i $i + $rd2 get $i + $rd2 del $i + # One bigkey is 10k, total response size must be more than 100k + $rd2 get bigkey + } + $rd2 flush + after 100 + + # Reds must wake up if it can send reply + assert_equal "PONG" [r ping] + set clients [r client list] + assert_no_match "*name=multicommands*" $clients + set fd [$rd2 channel] + assert_equal {} [read $fd] + } + + test {Execute transactions completely even if client output buffer limit is enforced} { + r config set client-output-buffer-limit {normal 100000 0 0} + # Total size of all items must be more than 100k + set item [string repeat "x" 1000] + for {set i 0} {$i < 150} {incr i} { + r lpush mylist2 $item + } + + # Output buffer limit is enforced during executing transaction + r client setname transactionclient + r set k1 v1 + r multi + r set k2 v2 + r get k2 + r lrange mylist2 0 -1 + r set k3 v3 + r del k1 + catch {[r exec]} e + assert_match "*I/O error*" $e + reconnect + set clients [r client list] + assert_no_match "*name=transactionclient*" $clients + + # Transactions should be executed completely + assert_equal {} [r get k1] + assert_equal "v2" [r get k2] + assert_equal "v3" [r get k3] + } } From ac867bfb6a348dd792ef681fe7d5121cfe28cdcb Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Thu, 24 Sep 2020 11:17:53 -0400 Subject: [PATCH 222/377] rdb.c: handle fclose error case differently to avoid double fclose (#7307) When fclose would fail, the previous implementation would have attempted to do fclose again this can in theory lead to segfault. other changes: check for non-zero return value as failure rather than a specific error code. this doesn't fix a real bug, just a minor cleanup. (cherry picked from commit c67656fa3541376590fe9a9b146ad5641cb861aa) --- src/rdb.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/rdb.c b/src/rdb.c index 5a6be6e38..fa3934027 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -1314,7 +1314,7 @@ werr: /* Write error. */ int rdbSave(char *filename, rdbSaveInfo *rsi) { char tmpfile[256]; char cwd[MAXPATHLEN]; /* Current working dir path for error messages. */ - FILE *fp; + FILE *fp = NULL; rio rdb; int error = 0; @@ -1343,10 +1343,11 @@ int rdbSave(char *filename, rdbSaveInfo *rsi) { } /* Make sure data will not remain on the OS's output buffers */ - if (fflush(fp) == EOF) goto werr; - if (fsync(fileno(fp)) == -1) goto werr; - if (fclose(fp) == EOF) goto werr; - + if (fflush(fp)) goto werr; + if (fsync(fileno(fp))) goto werr; + if (fclose(fp)) { fp = NULL; goto werr; } + fp = NULL; + /* Use RENAME to make sure the DB file is changed atomically only * if the generate DB file is ok. */ if (rename(tmpfile,filename) == -1) { @@ -1372,7 +1373,7 @@ int rdbSave(char *filename, rdbSaveInfo *rsi) { werr: serverLog(LL_WARNING,"Write error saving DB on disk: %s", strerror(errno)); - fclose(fp); + if (fp) fclose(fp); unlink(tmpfile); stopSaving(0); return C_ERR; From 39e1f2e2bd761d1226353369b92d542205cd845b Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Fri, 25 Sep 2020 13:08:06 +0800 Subject: [PATCH 223/377] Add fsync to readSyncBulkPayload(). (#7839) We should sync temp DB file before renaming as rdb_fsync_range does not use flag `SYNC_FILE_RANGE_WAIT_AFTER`. Refer to `Linux Programmer's Manual`: SYNC_FILE_RANGE_WAIT_AFTER Wait upon write-out of all pages in the range after performing any write. (cherry picked from commit d119448881655a1529eb6d7d7e78af5f15132536) --- src/replication.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/replication.c b/src/replication.c index 047449c4b..acc0befbb 100644 --- a/src/replication.c +++ b/src/replication.c @@ -1752,6 +1752,17 @@ void readSyncBulkPayload(connection *conn) { killRDBChild(); } + /* Make sure the new file (also used for persistence) is fully synced + * (not covered by earlier calls to rdb_fsync_range). */ + if (fsync(server.repl_transfer_fd) == -1) { + serverLog(LL_WARNING, + "Failed trying to sync the temp DB to disk in " + "MASTER <-> REPLICA synchronization: %s", + strerror(errno)); + cancelReplicationHandshake(); + return; + } + /* Rename rdb like renaming rewrite aof asynchronously. */ int old_rdb_fd = open(server.rdb_filename,O_RDONLY|O_NONBLOCK); if (rename(server.repl_transfer_tmpfile,server.rdb_filename) == -1) { From 10be3d96d872770ab3d8df644290d765a59c47b9 Mon Sep 17 00:00:00 2001 From: Uri Shachar Date: Fri, 25 Sep 2020 12:55:45 +0300 Subject: [PATCH 224/377] Fix config rewrite file handling to make it really atomic (#7824) Make sure we handle short writes correctly, sync to disk after writing and use rename to make sure the replacement is actually atomic. In any case of failure old configuration will remain in place. Also, add some additional logging to make it easier to diagnose rewrite problems. (cherry picked from commit 8dbe91f0316f08d785bad1e8e28f1c13ddfbef2c) --- src/config.c | 82 +++++++++++++++++++++++++++------------------------- 1 file changed, 42 insertions(+), 40 deletions(-) diff --git a/src/config.c b/src/config.c index 63852ff4f..2902758fa 100644 --- a/src/config.c +++ b/src/config.c @@ -1543,60 +1543,62 @@ void rewriteConfigRemoveOrphaned(struct rewriteConfigState *state) { dictReleaseIterator(di); } -/* This function overwrites the old configuration file with the new content. - * - * 1) The old file length is obtained. - * 2) If the new content is smaller, padding is added. - * 3) A single write(2) call is used to replace the content of the file. - * 4) Later the file is truncated to the length of the new content. - * - * This way we are sure the file is left in a consistent state even if the - * process is stopped between any of the four operations. +/* This function replaces the old configuration file with the new content + * in an atomic manner. * * The function returns 0 on success, otherwise -1 is returned and errno - * set accordingly. */ + * is set accordingly. */ int rewriteConfigOverwriteFile(char *configfile, sds content) { - int retval = 0; - int fd = open(configfile,O_RDWR|O_CREAT,0644); - int content_size = sdslen(content), padding = 0; - struct stat sb; - sds content_padded; + int fd = -1; + int retval = -1; + char tmp_conffile[PATH_MAX]; + const char *tmp_suffix = ".XXXXXX"; + size_t offset = 0; + ssize_t written_bytes = 0; - /* 1) Open the old file (or create a new one if it does not - * exist), get the size. */ - if (fd == -1) return -1; /* errno set by open(). */ - if (fstat(fd,&sb) == -1) { - close(fd); - return -1; /* errno set by fstat(). */ + int tmp_path_len = snprintf(tmp_conffile, sizeof(tmp_conffile), "%s%s", configfile, tmp_suffix); + if (tmp_path_len <= 0 || (unsigned int)tmp_path_len >= sizeof(tmp_conffile)) { + serverLog(LL_WARNING, "Config file full path is too long"); + errno = ENAMETOOLONG; + return retval; } - /* 2) Pad the content at least match the old file size. */ - content_padded = sdsdup(content); - if (content_size < sb.st_size) { - /* If the old file was bigger, pad the content with - * a newline plus as many "#" chars as required. */ - padding = sb.st_size - content_size; - content_padded = sdsgrowzero(content_padded,sb.st_size); - content_padded[content_size] = '\n'; - memset(content_padded+content_size+1,'#',padding-1); +#ifdef _GNU_SOURCE + fd = mkostemp(tmp_conffile, O_CLOEXEC); +#else + /* There's a theoretical chance here to leak the FD if a module thread forks & execv in the middle */ + fd = mkstemp(tmp_conffile); +#endif + + if (fd == -1) { + serverLog(LL_WARNING, "Could not create tmp config file (%s)", strerror(errno)); + return retval; } - /* 3) Write the new content using a single write(2). */ - if (write(fd,content_padded,strlen(content_padded)) == -1) { - retval = -1; - goto cleanup; + while (offset < sdslen(content)) { + written_bytes = write(fd, content + offset, sdslen(content) - offset); + if (written_bytes <= 0) { + if (errno == EINTR) continue; /* FD is blocking, no other retryable errors */ + serverLog(LL_WARNING, "Failed after writing (%ld) bytes to tmp config file (%s)", offset, strerror(errno)); + goto cleanup; + } + offset+=written_bytes; } - /* 4) Truncate the file to the right length if we used padding. */ - if (padding) { - if (ftruncate(fd,content_size) == -1) { - /* Non critical error... */ - } + if (fsync(fd)) + serverLog(LL_WARNING, "Could not sync tmp config file to disk (%s)", strerror(errno)); + else if (fchmod(fd, 0644) == -1) + serverLog(LL_WARNING, "Could not chmod config file (%s)", strerror(errno)); + else if (rename(tmp_conffile, configfile) == -1) + serverLog(LL_WARNING, "Could not rename tmp config file (%s)", strerror(errno)); + else { + retval = 0; + serverLog(LL_DEBUG, "Rewritten config file (%s) successfully", configfile); } cleanup: - sdsfree(content_padded); close(fd); + if (retval) unlink(tmp_conffile); return retval; } From 9042852ea1e2e09a59180e7726955f0a1eac19db Mon Sep 17 00:00:00 2001 From: Wang Yuan Date: Fri, 25 Sep 2020 21:25:47 +0800 Subject: [PATCH 225/377] Set 'loading' and 'shutdown_asap' to volatile sig_atomic_t type (#7845) We may access and modify these two variables in signal handler function, to guarantee them async-signal-safe, so we should set them to volatile sig_atomic_t type. It doesn't look like this could have caused any real issue, and it seems that signals are handled in main thread on most platforms. But we want to follow C and POSIX standard in signal handler function. (cherry picked from commit 917043fa438d9bbe9a80fb838fcfd33a7e390952) --- src/server.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/server.h b/src/server.h index 48dcee631..b12e4587d 100644 --- a/src/server.h +++ b/src/server.h @@ -1069,7 +1069,7 @@ struct redisServer { dict *orig_commands; /* Command table before command renaming. */ aeEventLoop *el; _Atomic unsigned int lruclock; /* Clock for LRU eviction */ - int shutdown_asap; /* SHUTDOWN needed ASAP */ + volatile sig_atomic_t shutdown_asap; /* SHUTDOWN needed ASAP */ int activerehashing; /* Incremental rehash in serverCron() */ int active_defrag_running; /* Active defragmentation running (holds current scan aggressiveness) */ char *pidfile; /* PID file path */ @@ -1126,7 +1126,7 @@ struct redisServer { long long events_processed_while_blocked; /* processEventsWhileBlocked() */ /* RDB / AOF loading information */ - int loading; /* We are loading data from disk if true */ + volatile sig_atomic_t loading; /* We are loading data from disk if true */ off_t loading_total_bytes; off_t loading_loaded_bytes; time_t loading_start_time; From a6f8745127915dfb8ba3ee9cfff6cc34bce07e82 Mon Sep 17 00:00:00 2001 From: Wang Yuan Date: Sun, 27 Sep 2020 17:35:16 +0800 Subject: [PATCH 226/377] Don't support Gopher if enable io threads to read queries (#7851) There's currently an issue with IO threads and gopher (issuing lookupKey from within the thread). simply fix is to just not support it for now. (cherry picked from commit 9bdef76f8e3bbfaacf0962ab1ceded1bafa80bda) --- redis.conf | 8 +++++--- src/networking.c | 5 +++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/redis.conf b/redis.conf index 38499b276..ab0a30cbd 100644 --- a/redis.conf +++ b/redis.conf @@ -1519,8 +1519,11 @@ notify-keyspace-events "" # # So use the 'requirepass' option to protect your instance. # -# To enable Gopher support uncomment the following line and set -# the option from no (the default) to yes. +# Note that Gopher is not currently supported when 'io-threads-do-reads' +# is enabled. +# +# To enable Gopher support, uncomment the following line and set the option +# from no (the default) to yes. # # gopher-enabled no @@ -1860,4 +1863,3 @@ jemalloc-bg-thread yes # # Set bgsave child process to cpu affinity 1,10,11 # bgsave_cpulist 1,10-11 - diff --git a/src/networking.c b/src/networking.c index 9b744eb0c..445150ab3 100644 --- a/src/networking.c +++ b/src/networking.c @@ -1884,8 +1884,9 @@ void processInputBuffer(client *c) { if (c->reqtype == PROTO_REQ_INLINE) { if (processInlineBuffer(c) != C_OK) break; /* If the Gopher mode and we got zero or one argument, process - * the request in Gopher mode. */ - if (server.gopher_enabled && + * the request in Gopher mode. To avoid data race, Redis won't + * support Gopher if enable io threads to read queries. */ + if (server.gopher_enabled && !server.io_threads_do_reads && ((c->argc == 1 && ((char*)(c->argv[0]->ptr))[0] == '/') || c->argc == 0)) { From 5ed795f025ef6f4955df4012aafeed9df94811e4 Mon Sep 17 00:00:00 2001 From: caozb <1162650653@qq.com> Date: Sun, 27 Sep 2020 20:40:07 +0800 Subject: [PATCH 227/377] ignore slaveof no one in redis.conf (#7842) when slaveof config is "no one", reset any pre-existing config and resume. also solve a memory leak if slaveof appears twice. and fail loading if port number is out of range or not an integer. Co-authored-by: caozhengbin Co-authored-by: Oran Agra (cherry picked from commit 01694608cb4e39a6ec7970d24b21ab33b7347e31) --- src/config.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/config.c b/src/config.c index 2902758fa..d8915343e 100644 --- a/src/config.c +++ b/src/config.c @@ -464,8 +464,17 @@ void loadServerConfigFromString(char *config) { } else if ((!strcasecmp(argv[0],"slaveof") || !strcasecmp(argv[0],"replicaof")) && argc == 3) { slaveof_linenum = linenum; + sdsfree(server.masterhost); + if (!strcasecmp(argv[1], "no") && !strcasecmp(argv[2], "one")) { + server.masterhost = NULL; + continue; + } server.masterhost = sdsnew(argv[1]); - server.masterport = atoi(argv[2]); + char *ptr; + server.masterport = strtol(argv[2], &ptr, 10); + if (server.masterport < 0 || server.masterport > 65535 || *ptr != '\0') { + err = "Invalid master port"; goto loaderr; + } server.repl_state = REPL_STATE_CONNECT; } else if (!strcasecmp(argv[0],"requirepass") && argc == 2) { if (strlen(argv[1]) > CONFIG_AUTHPASS_MAX_LEN) { From 0fc601b3dd9d571a72551bf41826de6f73a62ec7 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 27 Sep 2020 17:13:33 +0300 Subject: [PATCH 228/377] Fix new obuf-limits tests to work with TLS (#7848) Also stabilize new shutdown tests on slow machines (valgrind) (cherry picked from commit d89ae2d7ab3f6d181689b2546f2784b574d9b80e) --- tests/unit/obuf-limits.tcl | 20 ++++++++++++++------ tests/unit/shutdown.tcl | 22 +++++++++++++++------- 2 files changed, 29 insertions(+), 13 deletions(-) diff --git a/tests/unit/obuf-limits.tcl b/tests/unit/obuf-limits.tcl index 20ba32fd5..456d3ac82 100644 --- a/tests/unit/obuf-limits.tcl +++ b/tests/unit/obuf-limits.tcl @@ -99,6 +99,8 @@ start_server {tags {"obuf-limits"}} { assert_equal {} [read $fd] } + # Note: This test assumes that what's written with one write, will be read by redis in one read. + # this assumption is wrong, but seem to work empirically (for now) test {No response for multi commands in pipeline if client output buffer limit is enforced} { r config set client-output-buffer-limit {normal 100000 0 0} set value [string repeat "x" 10000] @@ -107,20 +109,26 @@ start_server {tags {"obuf-limits"}} { set rd2 [redis_deferring_client] $rd2 client setname multicommands assert_equal "OK" [$rd2 read] - # Let redis sleep 2s firstly - $rd1 debug sleep 2 + + # Let redis sleep 1s firstly + $rd1 debug sleep 1 $rd1 flush after 100 + # Create a pipeline of commands that will be processed in one socket read. + # It is important to use one write, in TLS mode independant writes seem + # to wait for response from the server. # Total size should be less than OS socket buffer, redis can # execute all commands in this pipeline when it wakes up. + set buf "" for {set i 0} {$i < 15} {incr i} { - $rd2 set $i $i - $rd2 get $i - $rd2 del $i + append buf "set $i $i\r\n" + append buf "get $i\r\n" + append buf "del $i\r\n" # One bigkey is 10k, total response size must be more than 100k - $rd2 get bigkey + append buf "get bigkey\r\n" } + $rd2 write $buf $rd2 flush after 100 diff --git a/tests/unit/shutdown.tcl b/tests/unit/shutdown.tcl index 21ea8545d..f48eadc50 100644 --- a/tests/unit/shutdown.tcl +++ b/tests/unit/shutdown.tcl @@ -30,20 +30,28 @@ start_server {tags {"shutdown"}} { for {set i 0} {$i < 20} {incr i} { r set $i $i } - # It will cost 2s(20 * 100ms) to dump rdb + # It will cost 2s (20 * 100ms) to dump rdb r config set rdb-key-save-delay 100000 set pid [s process_id] set temp_rdb [file join [lindex [r config get dir] 1] temp-${pid}.rdb] + # trigger a shutdown which will save an rdb exec kill -SIGINT $pid - after 100 - # Temp rdb must be existed - assert {[file exists $temp_rdb]} + # Wait for creation of temp rdb + wait_for_condition 50 10 { + [file exists $temp_rdb] + } else { + fail "Can't trigger rdb save on shutdown" + } - # Temp rdb file must be deleted + # Insist on immediate shutdown, temp rdb file must be deleted exec kill -SIGINT $pid - after 100 - assert {![file exists $temp_rdb]} + # wait for the rdb file to be deleted + wait_for_condition 50 10 { + ![file exists $temp_rdb] + } else { + fail "Can't trigger rdb save on shutdown" + } } } From 58a7774ca4acbfc1079cc2d68a972ce42cdffe50 Mon Sep 17 00:00:00 2001 From: David CARLIER Date: Tue, 29 Sep 2020 06:49:35 +0100 Subject: [PATCH 229/377] getting rss size implementation for netbsd (#7293) (cherry picked from commit 520c3b26c3fce1c86cf0c70961acd0515c8cb498) --- src/zmalloc.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/src/zmalloc.c b/src/zmalloc.c index 639a5fe2b..2645432b6 100644 --- a/src/zmalloc.c +++ b/src/zmalloc.c @@ -314,6 +314,26 @@ size_t zmalloc_get_rss(void) { return 0L; } +#elif defined(__NetBSD__) +#include +#include +#include + +size_t zmalloc_get_rss(void) { + struct kinfo_proc2 info; + size_t infolen = sizeof(info); + int mib[6]; + mib[0] = CTL_KERN; + mib[1] = KERN_PROC; + mib[2] = KERN_PROC_PID; + mib[3] = getpid(); + mib[4] = sizeof(info); + mib[5] = 1; + if (sysctl(mib, 4, &info, &infolen, NULL, 0) == 0) + return (size_t)info.p_vm_rssize; + + return 0L; +} #else size_t zmalloc_get_rss(void) { /* If we can't get the RSS in an OS-specific way for this system just From c6d664656721f8950050a5ac41b6de0c9f2c71f5 Mon Sep 17 00:00:00 2001 From: Gavrie Philipson Date: Tue, 29 Sep 2020 13:10:08 +0300 Subject: [PATCH 230/377] Fix typo in module API docs (#7861) (cherry picked from commit ce5efb444b203536335ca6dd5d34cb57425b55be) --- src/module.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/module.c b/src/module.c index 0655272ff..bfe7d6886 100644 --- a/src/module.c +++ b/src/module.c @@ -2937,8 +2937,8 @@ int RM_HashSet(RedisModuleKey *key, int flags, ...) { } /* Get fields from an hash value. This function is called using a variable - * number of arguments, alternating a field name (as a StringRedisModule - * pointer) with a pointer to a StringRedisModule pointer, that is set to the + * number of arguments, alternating a field name (as a RedisModuleString + * pointer) with a pointer to a RedisModuleString pointer, that is set to the * value of the field if the field exists, or NULL if the field does not exist. * At the end of the field/value-ptr pairs, NULL must be specified as last * argument to signal the end of the arguments in the variadic function. From 4da82110634828ca999217e7c7bd3f0529d117c2 Mon Sep 17 00:00:00 2001 From: David CARLIER Date: Tue, 29 Sep 2020 13:52:13 +0100 Subject: [PATCH 231/377] Add support for Haiku OS (#7435) (cherry picked from commit d535a5061ccd561d0c132b2e97b56a3bd252fde9) --- src/Makefile | 7 +++++++ src/config.h | 4 ++++ src/memtest.c | 5 +++++ 3 files changed, 16 insertions(+) diff --git a/src/Makefile b/src/Makefile index e59089811..a91d457f4 100644 --- a/src/Makefile +++ b/src/Makefile @@ -145,6 +145,12 @@ else ifeq ($(uname_S),NetBSD) # NetBSD FINAL_LIBS+= -lpthread -lexecinfo +else +ifeq ($(uname_S),Haiku) + # Haiku + FINAL_CFLAGS+= -DBSD_SOURCE + FINAL_LDFLAGS+= -lbsd -lnetwork + FINAL_LIBS+= -lpthread else # All the other OSes (notably Linux) FINAL_LDFLAGS+= -rdynamic @@ -158,6 +164,7 @@ endif endif endif endif +endif # Include paths to dependencies FINAL_CFLAGS+= -I../deps/hiredis -I../deps/linenoise -I../deps/lua/src diff --git a/src/config.h b/src/config.h index d391508fa..320837b7e 100644 --- a/src/config.h +++ b/src/config.h @@ -124,6 +124,10 @@ #define USE_SETPROCTITLE #endif +#if defined(__HAIKU__) +#define ESOCKTNOSUPPORT 0 +#endif + #if ((defined __linux && defined(__GLIBC__)) || defined __APPLE__) #define USE_SETPROCTITLE #define INIT_SETPROCTITLE_REPLACEMENT diff --git a/src/memtest.c b/src/memtest.c index a455430f5..cb4d35e83 100644 --- a/src/memtest.c +++ b/src/memtest.c @@ -347,10 +347,15 @@ void memtest_alloc_and_test(size_t megabytes, int passes) { } void memtest(size_t megabytes, int passes) { +#if !defined(__HAIKU__) if (ioctl(1, TIOCGWINSZ, &ws) == -1) { ws.ws_col = 80; ws.ws_row = 20; } +#else + ws.ws_col = 80; + ws.ws_row = 20; +#endif memtest_alloc_and_test(megabytes,passes); printf("\nYour memory passed this test.\n"); printf("Please if you are still in doubt use the following two tools:\n"); From c7cae0df77f5d7cb3861ae418c9b726cc13c5ef0 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 29 Sep 2020 17:03:47 +0300 Subject: [PATCH 232/377] warning: comparison between signed and unsigned integer in 32bit build (#7838) (cherry picked from commit c11bda25fd2959523cb1e87af5b366cc451dbd04) --- src/listpack.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/listpack.c b/src/listpack.c index 075552ccb..7e2da9b74 100644 --- a/src/listpack.c +++ b/src/listpack.c @@ -768,10 +768,10 @@ unsigned char *lpSeek(unsigned char *lp, long index) { if (numele != LP_HDR_NUMELE_UNKNOWN) { if (index < 0) index = (long)numele+index; if (index < 0) return NULL; /* Index still < 0 means out of range. */ - if (index >= numele) return NULL; /* Out of range the other side. */ + if ((long)index >= numele) return NULL; /* Out of range the other side. */ /* We want to scan right-to-left if the element we are looking for * is past the half of the listpack. */ - if (index > numele/2) { + if ((long)index > numele/2) { forward = 0; /* Right to left scanning always expects a negative index. Convert * our index to negative form. */ From 51a6e1e61a7461c4b1181e3a38e189a833c5d546 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 29 Sep 2020 20:48:21 +0300 Subject: [PATCH 233/377] TLS: Do not require CA config if not used. (#7862) The tls-ca-cert or tls-ca-cert-dir configuration parameters are only used when Redis needs to authenticate peer certificates, in one of these scenarios: 1. Incoming clients or replicas, with `tls-auth-clients` enabled. 2. A replica authenticating the master's peer certificate. 3. Cluster nodes authenticating other nodes when establishing the bus protocol connection. (cherry picked from commit 3bd9d0cc85d4ef8f4cc2789e9ab27e5557471409) --- src/tls.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/tls.c b/src/tls.c index d3173fab0..0a9e07895 100644 --- a/src/tls.c +++ b/src/tls.c @@ -167,8 +167,9 @@ int tlsConfigure(redisTLSContextConfig *ctx_config) { goto error; } - if (!ctx_config->ca_cert_file && !ctx_config->ca_cert_dir) { - serverLog(LL_WARNING, "Either tls-ca-cert-file or tls-ca-cert-dir must be configured!"); + if (((server.tls_auth_clients != TLS_CLIENT_AUTH_NO) || server.tls_cluster || server.tls_replication) && + !ctx_config->ca_cert_file && !ctx_config->ca_cert_dir) { + serverLog(LL_WARNING, "Either tls-ca-cert-file or tls-ca-cert-dir must be specified when tls-cluster, tls-replication or tls-auth-clients are enabled!"); goto error; } @@ -235,7 +236,8 @@ int tlsConfigure(redisTLSContextConfig *ctx_config) { goto error; } - if (SSL_CTX_load_verify_locations(ctx, ctx_config->ca_cert_file, ctx_config->ca_cert_dir) <= 0) { + if ((ctx_config->ca_cert_file || ctx_config->ca_cert_dir) && + SSL_CTX_load_verify_locations(ctx, ctx_config->ca_cert_file, ctx_config->ca_cert_dir) <= 0) { ERR_error_string_n(ERR_get_error(), errbuf, sizeof(errbuf)); serverLog(LL_WARNING, "Failed to configure CA certificate(s) file/directory: %s", errbuf); goto error; From 0816b8fadd3cba0e49418452842f0d5c97a2040f Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 18 Aug 2020 17:13:09 +0300 Subject: [PATCH 234/377] Module API: Fail ineffective auth calls. The client pointed to by the module context may in some cases be a fake client. RM_Authenticate*() calls in this case would be ineffective but appear to succeed, and this change fails them to make it easier to catch such cases. (cherry picked from commit 82866776d0c26f17043f9c1b0f0f5f48660e6848) --- src/module.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/module.c b/src/module.c index bfe7d6886..65d22b713 100644 --- a/src/module.c +++ b/src/module.c @@ -5638,6 +5638,11 @@ static int authenticateClientWithUser(RedisModuleCtx *ctx, user *user, RedisModu return REDISMODULE_ERR; } + /* Avoid settings which are meaningless and will be lost */ + if (!ctx->client || (ctx->client->flags & CLIENT_MODULE)) { + return REDISMODULE_ERR; + } + moduleNotifyUserChanged(ctx->client); ctx->client->user = user; From 38853fd48771ab95797d1201723e6f463839323d Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 18 Aug 2020 17:16:08 +0300 Subject: [PATCH 235/377] Modules: expose real client on conn events. When REDISMODULE_EVENT_CLIENT_CHANGE events are delivered, modules may want to mutate the client state (e.g. perform authentication). This change links the module context with the real client rather than a fake client for these events. (cherry picked from commit 4aca4e5f392ad6030150a92a9ef82412072f9622) --- src/module.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/module.c b/src/module.c index 65d22b713..afb525dbe 100644 --- a/src/module.c +++ b/src/module.c @@ -7263,6 +7263,7 @@ void moduleFireServerEvent(uint64_t eid, int subid, void *data) { * cheap if there are no registered modules. */ if (listLength(RedisModule_EventListeners) == 0) return; + int real_client_used = 0; listIter li; listNode *ln; listRewind(RedisModule_EventListeners,&li); @@ -7272,7 +7273,15 @@ void moduleFireServerEvent(uint64_t eid, int subid, void *data) { RedisModuleCtx ctx = REDISMODULE_CTX_INIT; ctx.module = el->module; - if (ModulesInHooks == 0) { + if (eid == REDISMODULE_EVENT_CLIENT_CHANGE) { + /* In the case of client changes, we're pushing the real client + * so the event handler can mutate it if needed. For example, + * to change its authentication state in a way that does not + * depend on specific commands executed later. + */ + ctx.client = (client *) data; + real_client_used = 1; + } else if (ModulesInHooks == 0) { ctx.client = moduleFreeContextReusedClient; } else { ctx.client = createClient(NULL); @@ -7325,7 +7334,7 @@ void moduleFireServerEvent(uint64_t eid, int subid, void *data) { el->module->in_hook--; ModulesInHooks--; - if (ModulesInHooks != 0) freeClient(ctx.client); + if (ModulesInHooks != 0 && !real_client_used) freeClient(ctx.client); moduleFreeContext(&ctx); } } From e7bae8839249ec8d49136929e4644e5187da5336 Mon Sep 17 00:00:00 2001 From: nitaicaro <42576749+nitaicaro@users.noreply.github.com> Date: Wed, 30 Sep 2020 19:52:01 +0300 Subject: [PATCH 236/377] =?UTF-8?q?Fixed=20Tracking=20test=20=E2=80=9CThe?= =?UTF-8?q?=20other=20connection=20is=20able=20to=20get=20invalidations?= =?UTF-8?q?=E2=80=9D=20(#7871)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PROBLEM: [$rd1 read] reads invalidation messages one by one, so it's never going to see the second invalidation message produced after INCR b, whether or not it exists. Adding another read will block incase no invalidation message is produced. FIX: We switch the order of "INCR a" and "INCR b" - now "INCR b" comes first. We still only read the first invalidation message produces. If an invalidation message is wrongly produces for b - then it will be produced before that of a, since "INCR b" comes before "INCR a". Co-authored-by: Nitai Caro (cherry picked from commit 94e9b0124e8582912c3771f9828842348490bc38) --- tests/unit/tracking.tcl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/unit/tracking.tcl b/tests/unit/tracking.tcl index 0332fa726..839b894ea 100644 --- a/tests/unit/tracking.tcl +++ b/tests/unit/tracking.tcl @@ -16,9 +16,10 @@ start_server {tags {"tracking"}} { test {The other connection is able to get invalidations} { r SET a 1 + r SET b 1 r GET a - r INCR a - r INCR b ; # This key should not be notified, since it wasn't fetched. + r INCR b ; # This key should not be notified, since it wasn't fetched. + r INCR a set keys [lindex [$rd1 read] 2] assert {[llength $keys] == 1} assert {[lindex $keys 0] eq {a}} From 57027f0c55193e561fff4d56b7c87691c9f16933 Mon Sep 17 00:00:00 2001 From: Rafi Einstein Date: Thu, 1 Oct 2020 10:56:23 +0300 Subject: [PATCH 237/377] Makefile: enable program suffixes via PROG_SUFFIX (#7868) (cherry picked from commit 2636b760fb062a763ae528800fd998d2913c7cb1) --- README.md | 4 ++++ src/Makefile | 12 ++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 80c2c9178..b6cfbbc5c 100644 --- a/README.md +++ b/README.md @@ -45,6 +45,10 @@ as libsystemd-dev on Debian/Ubuntu or systemd-devel on CentOS) and run: % make USE_SYSTEMD=yes +To append a suffix to Redis program names, use: + + % make PROG_SUFFIX="-alt" + You can run a 32 bit Redis binary using: % make 32bit diff --git a/src/Makefile b/src/Makefile index a91d457f4..3a09ccd3f 100644 --- a/src/Makefile +++ b/src/Makefile @@ -243,15 +243,15 @@ QUIET_LINK = @printf ' %b %b\n' $(LINKCOLOR)LINK$(ENDCOLOR) $(BINCOLOR)$@$(EN QUIET_INSTALL = @printf ' %b %b\n' $(LINKCOLOR)INSTALL$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR) 1>&2; endif -REDIS_SERVER_NAME=redis-server -REDIS_SENTINEL_NAME=redis-sentinel +REDIS_SERVER_NAME=redis-server$(PROG_SUFFIX) +REDIS_SENTINEL_NAME=redis-sentinel$(PROG_SUFFIX) REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crcspeed.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o redis-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o lolwut.o lolwut5.o lolwut6.o acl.o gopher.o tracking.o connection.o tls.o sha256.o timeout.o setcpuaffinity.o -REDIS_CLI_NAME=redis-cli +REDIS_CLI_NAME=redis-cli$(PROG_SUFFIX) REDIS_CLI_OBJ=anet.o adlist.o dict.o redis-cli.o zmalloc.o release.o ae.o crcspeed.o crc64.o siphash.o crc16.o -REDIS_BENCHMARK_NAME=redis-benchmark +REDIS_BENCHMARK_NAME=redis-benchmark$(PROG_SUFFIX) REDIS_BENCHMARK_OBJ=ae.o anet.o redis-benchmark.o adlist.o dict.o zmalloc.o siphash.o -REDIS_CHECK_RDB_NAME=redis-check-rdb -REDIS_CHECK_AOF_NAME=redis-check-aof +REDIS_CHECK_RDB_NAME=redis-check-rdb$(PROG_SUFFIX) +REDIS_CHECK_AOF_NAME=redis-check-aof$(PROG_SUFFIX) all: $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) @echo "" From 51dd4677c9f8d2003c80ca71bf6dfda8ce2a8dfe Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Thu, 1 Oct 2020 11:27:45 +0300 Subject: [PATCH 238/377] Fix crash in script timeout during AOF loading (#7870) (cherry picked from commit 8cff3e03520bb08cb7dfdbd11f98827a3cb1d3a5) --- src/networking.c | 12 ++++++++---- tests/unit/scripting.tcl | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 4 deletions(-) diff --git a/src/networking.c b/src/networking.c index 445150ab3..e05c8e1af 100644 --- a/src/networking.c +++ b/src/networking.c @@ -1512,16 +1512,20 @@ void resetClient(client *c) { * path, it is not really released, but only marked for later release. */ void protectClient(client *c) { c->flags |= CLIENT_PROTECTED; - connSetReadHandler(c->conn,NULL); - connSetWriteHandler(c->conn,NULL); + if (c->conn) { + connSetReadHandler(c->conn,NULL); + connSetWriteHandler(c->conn,NULL); + } } /* This will undo the client protection done by protectClient() */ void unprotectClient(client *c) { if (c->flags & CLIENT_PROTECTED) { c->flags &= ~CLIENT_PROTECTED; - connSetReadHandler(c->conn,readQueryFromClient); - if (clientHasPendingReplies(c)) clientInstallWriteHandler(c); + if (c->conn) { + connSetReadHandler(c->conn,readQueryFromClient); + if (clientHasPendingReplies(c)) clientInstallWriteHandler(c); + } } } diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl index 3283edc66..6bcba4c3f 100644 --- a/tests/unit/scripting.tcl +++ b/tests/unit/scripting.tcl @@ -430,6 +430,45 @@ start_server {tags {"scripting"}} { set res } {102} + test {EVAL timeout from AOF} { + # generate a long running script that is propagated to the AOF as script + # make sure that the script times out during loading + r config set appendonly no + r config set aof-use-rdb-preamble no + r config set lua-replicate-commands no + r flushall + r config set appendonly yes + wait_for_condition 50 100 { + [s aof_rewrite_in_progress] == 0 + } else { + fail "AOF rewrite can't complete after CONFIG SET appendonly yes." + } + r config set lua-time-limit 1 + set rd [redis_deferring_client] + set start [clock clicks -milliseconds] + $rd eval {redis.call('set',KEYS[1],'y'); for i=1,1500000 do redis.call('ping') end return 'ok'} 1 x + $rd flush + after 100 + catch {r ping} err + assert_match {BUSY*} $err + $rd read + set elapsed [expr [clock clicks -milliseconds]-$start] + if {$::verbose} { puts "script took $elapsed milliseconds" } + set start [clock clicks -milliseconds] + $rd debug loadaof + $rd flush + after 100 + catch {r ping} err + assert_match {LOADING*} $err + $rd read + set elapsed [expr [clock clicks -milliseconds]-$start] + if {$::verbose} { puts "loading took $elapsed milliseconds" } + $rd close + r get x + } {y} + r config set aof-use-rdb-preamble yes + r config set lua-replicate-commands yes + test {We can call scripts rewriting client->argv from Lua} { r del myset r sadd myset a b c From 0d7a1d1d373a04f28ef71f165178debbffc39177 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Thu, 1 Oct 2020 11:30:22 +0300 Subject: [PATCH 239/377] Include internal sds fragmentation in MEMORY reporting (#7864) The MEMORY command is used for debugging memory usage, so it should include internal fragmentation, same as used_memory (cherry picked from commit 86483e795262c6e2efdffe92c1642a72ef0dd6a0) --- src/object.c | 12 ++++++------ src/server.c | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/object.c b/src/object.c index 6caa71bb9..92eebb556 100644 --- a/src/object.c +++ b/src/object.c @@ -786,7 +786,7 @@ size_t objectComputeSize(robj *o, size_t sample_size) { if(o->encoding == OBJ_ENCODING_INT) { asize = sizeof(*o); } else if(o->encoding == OBJ_ENCODING_RAW) { - asize = sdsAllocSize(o->ptr)+sizeof(*o); + asize = sdsZmallocSize(o->ptr)+sizeof(*o); } else if(o->encoding == OBJ_ENCODING_EMBSTR) { asize = sdslen(o->ptr)+2+sizeof(*o); } else { @@ -814,7 +814,7 @@ size_t objectComputeSize(robj *o, size_t sample_size) { asize = sizeof(*o)+sizeof(dict)+(sizeof(struct dictEntry*)*dictSlots(d)); while((de = dictNext(di)) != NULL && samples < sample_size) { ele = dictGetKey(de); - elesize += sizeof(struct dictEntry) + sdsAllocSize(ele); + elesize += sizeof(struct dictEntry) + sdsZmallocSize(ele); samples++; } dictReleaseIterator(di); @@ -836,7 +836,7 @@ size_t objectComputeSize(robj *o, size_t sample_size) { (sizeof(struct dictEntry*)*dictSlots(d))+ zmalloc_size(zsl->header); while(znode != NULL && samples < sample_size) { - elesize += sdsAllocSize(znode->ele); + elesize += sdsZmallocSize(znode->ele); elesize += sizeof(struct dictEntry) + zmalloc_size(znode); samples++; znode = znode->level[0].forward; @@ -855,7 +855,7 @@ size_t objectComputeSize(robj *o, size_t sample_size) { while((de = dictNext(di)) != NULL && samples < sample_size) { ele = dictGetKey(de); ele2 = dictGetVal(de); - elesize += sdsAllocSize(ele) + sdsAllocSize(ele2); + elesize += sdsZmallocSize(ele) + sdsZmallocSize(ele2); elesize += sizeof(struct dictEntry); samples++; } @@ -995,7 +995,7 @@ struct redisMemOverhead *getMemoryOverheadData(void) { mem = 0; if (server.aof_state != AOF_OFF) { - mem += sdsalloc(server.aof_buf); + mem += sdsZmallocSize(server.aof_buf); mem += aofRewriteBufferSize(); } mh->aof_buffer = mem; @@ -1311,7 +1311,7 @@ NULL return; } size_t usage = objectComputeSize(dictGetVal(de),samples); - usage += sdsAllocSize(dictGetKey(de)); + usage += sdsZmallocSize(dictGetKey(de)); usage += sizeof(dictEntry); addReplyLongLong(c,usage); } else if (!strcasecmp(c->argv[1]->ptr,"stats") && c->argc == 2) { diff --git a/src/server.c b/src/server.c index 1f20efe94..cc9fe7d2f 100644 --- a/src/server.c +++ b/src/server.c @@ -1608,7 +1608,7 @@ int clientsCronTrackClientsMemUsage(client *c) { size_t mem = 0; int type = getClientType(c); mem += getClientOutputBufferMemoryUsage(c); - mem += sdsAllocSize(c->querybuf); + mem += sdsZmallocSize(c->querybuf); mem += sizeof(client); /* Now that we have the memory used by the client, remove the old * value from the old category, and add it back. */ From 7a8a268ac40a25db699757748e285a84b1fd9a04 Mon Sep 17 00:00:00 2001 From: DvirDukhan Date: Sun, 4 Oct 2020 17:18:17 +0300 Subject: [PATCH 240/377] redis-cli add control on raw format line delimiter (#7841) Adding -D option for redis-cli to control newline between command responses in raw mode. Also removing cleanup code before calling exit, just in order to avoid adding more adding more cleanup code (redis doesn't bother to release allocations before exit anyway) Co-authored-by: Oran Agra (cherry picked from commit f0f8e9c824b819c8aec996ec8c8851773a6f9432) --- src/redis-cli.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 2f4609661..73d4abff2 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -237,6 +237,7 @@ static struct config { char *user; int output; /* output mode, see OUTPUT_* defines */ sds mb_delim; + sds cmd_delim; char prompt[128]; char *eval; int eval_ldb; @@ -1251,7 +1252,7 @@ static int cliReadReply(int output_raw_strings) { } else { if (config.output == OUTPUT_RAW) { out = cliFormatReplyRaw(reply); - out = sdscat(out,"\n"); + out = sdscatsds(out, config.cmd_delim); } else if (config.output == OUTPUT_STANDARD) { out = cliFormatReplyTTY(reply,""); } else if (config.output == OUTPUT_CSV) { @@ -1533,6 +1534,9 @@ static int parseOptions(int argc, char **argv) { } else if (!strcmp(argv[i],"-d") && !lastarg) { sdsfree(config.mb_delim); config.mb_delim = sdsnew(argv[++i]); + } else if (!strcmp(argv[i],"-D") && !lastarg) { + sdsfree(config.cmd_delim); + config.cmd_delim = sdsnew(argv[++i]); } else if (!strcmp(argv[i],"--verbose")) { config.verbose = 1; } else if (!strcmp(argv[i],"--cluster") && !lastarg) { @@ -1726,7 +1730,8 @@ static void usage(void) { " -n Database number.\n" " -3 Start session in RESP3 protocol mode.\n" " -x Read last argument from STDIN.\n" -" -d Multi-bulk delimiter in for raw formatting (default: \\n).\n" +" -d Delimiter between response bulks for raw formatting (default: \\n).\n" +" -D Delimiter between responses for raw formatting (default: \\n).\n" " -c Enable cluster mode (follow -ASK and -MOVED redirections).\n" #ifdef USE_OPENSSL " --tls Establish a secure TLS connection.\n" @@ -5360,8 +5365,6 @@ static void clusterManagerMode(clusterManagerCommandProc *proc) { exit(0); cluster_manager_err: freeClusterManager(); - sdsfree(config.hostip); - sdsfree(config.mb_delim); exit(1); } @@ -8118,6 +8121,7 @@ int main(int argc, char **argv) { else config.output = OUTPUT_STANDARD; config.mb_delim = sdsnew("\n"); + config.cmd_delim = sdsnew("\n"); firstarg = parseOptions(argc,argv); argc -= firstarg; @@ -8141,8 +8145,6 @@ int main(int argc, char **argv) { if (CLUSTER_MANAGER_MODE()) { clusterManagerCommandProc *proc = validateClusterManagerCommand(); if (!proc) { - sdsfree(config.hostip); - sdsfree(config.mb_delim); exit(1); } clusterManagerMode(proc); From a5302a8c21ec0dfbe9894ec12e3fd19f0fce1c49 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 5 Oct 2020 11:15:36 +0300 Subject: [PATCH 241/377] memory reporting of clients argv (#7874) track and report memory used by clients argv. this is very usaful in case clients started sending a command and didn't complete it. in which case the first args of the command are already trimmed from the query buffer. in an effort to avoid cache misses and overheads while keeping track of these, i avoid calling sdsZmallocSize and instead use the sdslen / bulk-len which can at least give some insight into the problem. This memory is now added to the total clients memory usage, as well as the client list. (cherry picked from commit 7481e513f0507d01381a87046d8d1366c718f94e) --- src/aof.c | 2 ++ src/networking.c | 51 +++++++++++++++++++++++++++++++++--- src/server.c | 4 ++- src/server.h | 1 + tests/unit/introspection.tcl | 2 +- 5 files changed, 55 insertions(+), 5 deletions(-) diff --git a/src/aof.c b/src/aof.c index 2114a17e4..b8ba31c19 100644 --- a/src/aof.c +++ b/src/aof.c @@ -669,6 +669,7 @@ struct client *createAOFClient(void) { c->querybuf_peak = 0; c->argc = 0; c->argv = NULL; + c->argv_len_sum = 0; c->bufpos = 0; c->flags = 0; c->btype = BLOCKED_NONE; @@ -694,6 +695,7 @@ void freeFakeClientArgv(struct client *c) { for (j = 0; j < c->argc; j++) decrRefCount(c->argv[j]); zfree(c->argv); + c->argv_len_sum = 0; } void freeFakeClient(struct client *c) { diff --git a/src/networking.c b/src/networking.c index e05c8e1af..54de8ac54 100644 --- a/src/networking.c +++ b/src/networking.c @@ -48,7 +48,7 @@ size_t sdsZmallocSize(sds s) { } /* Return the amount of memory used by the sds string at object->ptr - * for a string object. */ + * for a string object. This includes internal fragmentation. */ size_t getStringObjectSdsUsedMemory(robj *o) { serverAssertWithInfo(NULL,o,o->type == OBJ_STRING); switch(o->encoding) { @@ -58,6 +58,17 @@ size_t getStringObjectSdsUsedMemory(robj *o) { } } +/* Return the length of a string object. + * This does NOT includes internal fragmentation or sds unused space. */ +size_t getStringObjectLen(robj *o) { + serverAssertWithInfo(NULL,o,o->type == OBJ_STRING); + switch(o->encoding) { + case OBJ_ENCODING_RAW: return sdslen(o->ptr); + case OBJ_ENCODING_EMBSTR: return sdslen(o->ptr); + default: return 0; /* Just integer encoding for now. */ + } +} + /* Client.reply list dup and free methods. */ void *dupClientReplyValue(void *o) { clientReplyBlock *old = o; @@ -116,6 +127,7 @@ client *createClient(connection *conn) { c->reqtype = 0; c->argc = 0; c->argv = NULL; + c->argv_len_sum = 0; c->cmd = c->lastcmd = NULL; c->user = DefaultUser; c->multibulklen = 0; @@ -1051,6 +1063,7 @@ static void freeClientArgv(client *c) { decrRefCount(c->argv[j]); c->argc = 0; c->cmd = NULL; + c->argv_len_sum = 0; } /* Close all the slaves connections. This is useful in chained replication @@ -1249,6 +1262,7 @@ void freeClient(client *c) { * and finally release the client structure itself. */ if (c->name) decrRefCount(c->name); zfree(c->argv); + c->argv_len_sum = 0; freeClientMultiState(c); sdsfree(c->peerid); zfree(c); @@ -1595,12 +1609,14 @@ int processInlineBuffer(client *c) { if (argc) { if (c->argv) zfree(c->argv); c->argv = zmalloc(sizeof(robj*)*argc); + c->argv_len_sum = 0; } /* Create redis objects for all arguments. */ for (c->argc = 0, j = 0; j < argc; j++) { c->argv[c->argc] = createObject(OBJ_STRING,argv[j]); c->argc++; + c->argv_len_sum += sdslen(argv[j]); } zfree(argv); return C_OK; @@ -1692,6 +1708,7 @@ int processMultibulkBuffer(client *c) { /* Setup argv array on client structure */ if (c->argv) zfree(c->argv); c->argv = zmalloc(sizeof(robj*)*c->multibulklen); + c->argv_len_sum = 0; } serverAssertWithInfo(c,NULL,c->multibulklen > 0); @@ -1764,6 +1781,7 @@ int processMultibulkBuffer(client *c) { sdslen(c->querybuf) == (size_t)(c->bulklen+2)) { c->argv[c->argc++] = createObject(OBJ_STRING,c->querybuf); + c->argv_len_sum += c->bulklen; sdsIncrLen(c->querybuf,-2); /* remove CRLF */ /* Assume that if we saw a fat argument we'll see another one * likely... */ @@ -1772,6 +1790,7 @@ int processMultibulkBuffer(client *c) { } else { c->argv[c->argc++] = createStringObject(c->querybuf+c->qb_pos,c->bulklen); + c->argv_len_sum += c->bulklen; c->qb_pos += c->bulklen+2; } c->bulklen = -1; @@ -2094,8 +2113,21 @@ sds catClientInfoString(sds s, client *client) { if (connHasWriteHandler(client->conn)) *p++ = 'w'; } *p = '\0'; + + /* Compute the total memory consumed by this client. */ + size_t obufmem = getClientOutputBufferMemoryUsage(client); + size_t total_mem = obufmem; + total_mem += zmalloc_size(client); /* includes client->buf */ + total_mem += sdsZmallocSize(client->querybuf); + /* For efficiency (less work keeping track of the argv memory), it doesn't include the used memory + * i.e. unused sds space and internal fragmentation, just the string length. but this is enough to + * spot problematic clients. */ + total_mem += client->argv_len_sum; + if (client->argv) + total_mem += zmalloc_size(client->argv); + return sdscatfmt(s, - "id=%U addr=%s %s name=%s age=%I idle=%I flags=%s db=%i sub=%i psub=%i multi=%i qbuf=%U qbuf-free=%U obl=%U oll=%U omem=%U events=%s cmd=%s user=%s", + "id=%U addr=%s %s name=%s age=%I idle=%I flags=%s db=%i sub=%i psub=%i multi=%i qbuf=%U qbuf-free=%U argv-mem=%U obl=%U oll=%U omem=%U tot-mem=%U events=%s cmd=%s user=%s", (unsigned long long) client->id, getClientPeerId(client), connGetInfo(client->conn, conninfo, sizeof(conninfo)), @@ -2109,9 +2141,11 @@ sds catClientInfoString(sds s, client *client) { (client->flags & CLIENT_MULTI) ? client->mstate.count : -1, (unsigned long long) sdslen(client->querybuf), (unsigned long long) sdsavail(client->querybuf), + (unsigned long long) client->argv_len_sum, (unsigned long long) client->bufpos, (unsigned long long) listLength(client->reply), - (unsigned long long) getClientOutputBufferMemoryUsage(client), + (unsigned long long) obufmem, /* should not include client->buf since we want to see 0 for static clients. */ + (unsigned long long) total_mem, events, client->lastcmd ? client->lastcmd->name : "NULL", client->user ? client->user->name : "(superuser)"); @@ -2649,6 +2683,10 @@ void rewriteClientCommandVector(client *c, int argc, ...) { /* Replace argv and argc with our new versions. */ c->argv = argv; c->argc = argc; + c->argv_len_sum = 0; + for (j = 0; j < c->argc; j++) + if (c->argv[j]) + c->argv_len_sum += getStringObjectLen(c->argv[j]); c->cmd = lookupCommandOrOriginal(c->argv[0]->ptr); serverAssertWithInfo(c,NULL,c->cmd != NULL); va_end(ap); @@ -2656,10 +2694,15 @@ void rewriteClientCommandVector(client *c, int argc, ...) { /* Completely replace the client command vector with the provided one. */ void replaceClientCommandVector(client *c, int argc, robj **argv) { + int j; freeClientArgv(c); zfree(c->argv); c->argv = argv; c->argc = argc; + c->argv_len_sum = 0; + for (j = 0; j < c->argc; j++) + if (c->argv[j]) + c->argv_len_sum += getStringObjectLen(c->argv[j]); c->cmd = lookupCommandOrOriginal(c->argv[0]->ptr); serverAssertWithInfo(c,NULL,c->cmd != NULL); } @@ -2684,6 +2727,8 @@ void rewriteClientCommandArgument(client *c, int i, robj *newval) { c->argv[i] = NULL; } oldval = c->argv[i]; + if (oldval) c->argv_len_sum -= getStringObjectLen(oldval); + if (newval) c->argv_len_sum += getStringObjectLen(newval); c->argv[i] = newval; incrRefCount(newval); if (oldval) decrRefCount(oldval); diff --git a/src/server.c b/src/server.c index cc9fe7d2f..c9f92702f 100644 --- a/src/server.c +++ b/src/server.c @@ -1609,7 +1609,9 @@ int clientsCronTrackClientsMemUsage(client *c) { int type = getClientType(c); mem += getClientOutputBufferMemoryUsage(c); mem += sdsZmallocSize(c->querybuf); - mem += sizeof(client); + mem += zmalloc_size(c); + mem += c->argv_len_sum; + if (c->argv) mem += zmalloc_size(c->argv); /* Now that we have the memory used by the client, remove the old * value from the old category, and add it back. */ server.stat_clients_type_memory[c->client_cron_last_memory_type] -= diff --git a/src/server.h b/src/server.h index b12e4587d..9f2c50af1 100644 --- a/src/server.h +++ b/src/server.h @@ -799,6 +799,7 @@ typedef struct client { size_t querybuf_peak; /* Recent (100ms or more) peak of querybuf size. */ int argc; /* Num of arguments of current command. */ robj **argv; /* Arguments of current command. */ + size_t argv_len_sum; /* Sum of lengths of objects in argv list. */ struct redisCommand *cmd, *lastcmd; /* Last command executed. */ user *user; /* User associated with this connection. If the user is set to NULL the connection can do diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl index 37470c068..32215868c 100644 --- a/tests/unit/introspection.tcl +++ b/tests/unit/introspection.tcl @@ -1,7 +1,7 @@ start_server {tags {"introspection"}} { test {CLIENT LIST} { r client list - } {*addr=*:* fd=* age=* idle=* flags=N db=9 sub=0 psub=0 multi=-1 qbuf=26 qbuf-free=* obl=0 oll=0 omem=0 events=r cmd=client*} + } {*addr=*:* fd=* age=* idle=* flags=N db=9 sub=0 psub=0 multi=-1 qbuf=26 qbuf-free=* argv-mem=* obl=0 oll=0 omem=0 tot-mem=* events=r cmd=client*} test {MONITOR can log executed commands} { set rd [redis_deferring_client] From 6e03b388ce91f2bd89c7a100690af3c79635c184 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 6 Oct 2020 21:43:30 +0300 Subject: [PATCH 242/377] Allow blocked XREAD on a cluster replica (#7881) I suppose that it was overlooked, since till recently none of the blocked commands were readonly. other changes: - add test for the above. - add better support for additional (and deferring) clients for cluster tests - improve a test which left the client in MULTI state. (cherry picked from commit ba61700db24628451212c5875e0ca7e5d83ea743) --- src/cluster.c | 9 ++++++++ .../tests/16-transactions-on-replica.tcl | 21 +++++++++++++++++++ tests/instances.tcl | 13 ++++++++++++ 3 files changed, 43 insertions(+) diff --git a/src/cluster.c b/src/cluster.c index e8db4050d..43bea155b 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -5851,6 +5851,15 @@ int clusterRedirectBlockedClientIfNeeded(client *c) { int slot = keyHashSlot((char*)key->ptr, sdslen(key->ptr)); clusterNode *node = server.cluster->slots[slot]; + /* if the client is read-only and attempting to access key that our + * replica can handle, allow it. */ + if ((c->flags & CLIENT_READONLY) && + (c->lastcmd->flags & CMD_READONLY) && + nodeIsSlave(myself) && myself->slaveof == node) + { + node = myself; + } + /* We send an error and unblock the client if: * 1) The slot is unassigned, emitting a cluster down error. * 2) The slot is not handled by this node, nor being imported. */ diff --git a/tests/cluster/tests/16-transactions-on-replica.tcl b/tests/cluster/tests/16-transactions-on-replica.tcl index da9dff1ca..41083f421 100644 --- a/tests/cluster/tests/16-transactions-on-replica.tcl +++ b/tests/cluster/tests/16-transactions-on-replica.tcl @@ -45,4 +45,25 @@ test "MULTI-EXEC with write operations is MOVED" { $replica MULTI catch {$replica HSET h b 4} err assert {[string range $err 0 4] eq {MOVED}} + catch {$replica exec} err + assert {[string range $err 0 8] eq {EXECABORT}} +} + +test "read-only blocking operations from replica" { + set rd [redis_deferring_client redis 1] + $rd readonly + $rd read + $rd XREAD BLOCK 0 STREAMS k 0 + + wait_for_condition 1000 50 { + [RI 1 blocked_clients] eq {1} + } else { + fail "client wasn't blocked" + } + + $primary XADD k * foo bar + set res [$rd read] + set res [lindex [lindex [lindex [lindex $res 0] 1] 0] 1] + assert {$res eq {foo bar}} + $rd close } diff --git a/tests/instances.tcl b/tests/instances.tcl index 5c4b665db..d3b1b50cd 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -606,3 +606,16 @@ proc restart_instance {type id} { } } +proc redis_deferring_client {type id} { + set port [get_instance_attrib $type $id port] + set host [get_instance_attrib $type $id host] + set client [redis $host $port 1 $::tls] + return $client +} + +proc redis_client {type id} { + set port [get_instance_attrib $type $id port] + set host [get_instance_attrib $type $id host] + set client [redis $host $port 0 $::tls] + return $client +} From 9cf7292ef7ca2668a799a7e4b82a1381553aadc1 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Wed, 7 Oct 2020 20:28:57 +0300 Subject: [PATCH 243/377] Add some additional signal info to the crash log (#7891) - si_code can be very useful info some day. - a clear indication that redis was killed by an external user (cherry picked from commit 38c7c62d2270b4921219953aaabfcdc721154b88) --- src/debug.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/debug.c b/src/debug.c index 1a41574e4..7112ff535 100644 --- a/src/debug.c +++ b/src/debug.c @@ -1627,7 +1627,7 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) { bugReportStart(); serverLog(LL_WARNING, - "Redis %s crashed by signal: %d", REDIS_VERSION, sig); + "Redis %s crashed by signal: %d, si_code: %d", REDIS_VERSION, sig, info->si_code); if (eip != NULL) { serverLog(LL_WARNING, "Crashed running the instruction at: %p", eip); @@ -1636,6 +1636,9 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) { serverLog(LL_WARNING, "Accessing address: %p", (void*)info->si_addr); } + if (info->si_pid != -1) { + serverLog(LL_WARNING, "Killed by PID: %d, UID: %d", info->si_pid, info->si_uid); + } serverLog(LL_WARNING, "Failed assertion: %s (%s:%d)", server.assert_failed, server.assert_file, server.assert_line); From ec9b1cca59fdadb56ad4d541d2c9572207847974 Mon Sep 17 00:00:00 2001 From: Madelyn Olson <34459052+madolson@users.noreply.github.com> Date: Wed, 7 Oct 2020 22:09:09 -0700 Subject: [PATCH 244/377] Fixed excessive categories being displayed from acls (#7889) (cherry picked from commit abe416c5f251b7b151440b38829a719c4846b8b8) --- src/acl.c | 17 +++++++++++++++-- tests/unit/acl.tcl | 14 ++++++++++++++ 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/src/acl.c b/src/acl.c index 5d484a742..e781c9211 100644 --- a/src/acl.c +++ b/src/acl.c @@ -478,17 +478,30 @@ sds ACLDescribeUserCommandRules(user *u) { /* Try to add or subtract each category one after the other. Often a * single category will not perfectly match the set of commands into * it, so at the end we do a final pass adding/removing the single commands - * needed to make the bitmap exactly match. */ + * needed to make the bitmap exactly match. A temp user is maintained to + * keep track of categories already applied. */ + user tu = {0}; + user *tempuser = &tu; + memcpy(tempuser->allowed_commands, + u->allowed_commands, + sizeof(u->allowed_commands)); + for (int j = 0; ACLCommandCategories[j].flag != 0; j++) { unsigned long on, off; - ACLCountCategoryBitsForUser(u,&on,&off,ACLCommandCategories[j].name); + ACLCountCategoryBitsForUser(tempuser,&on,&off,ACLCommandCategories[j].name); if ((additive && on > off) || (!additive && off > on)) { sds op = sdsnewlen(additive ? "+@" : "-@", 2); op = sdscat(op,ACLCommandCategories[j].name); ACLSetUser(fakeuser,op,-1); + + sds invop = sdsnewlen(additive ? "-@" : "+@", 2); + invop = sdscat(invop,ACLCommandCategories[j].name); + ACLSetUser(tempuser,invop,-1); + rules = sdscatsds(rules,op); rules = sdscatlen(rules," ",1); sdsfree(op); + sdsfree(invop); } } diff --git a/tests/unit/acl.tcl b/tests/unit/acl.tcl index f015f75a0..12f59e749 100644 --- a/tests/unit/acl.tcl +++ b/tests/unit/acl.tcl @@ -135,6 +135,20 @@ start_server {tags {"acl"}} { assert_match {*+acl*} $cmdstr } + # A regression test make sure that as long as there is a simple + # category defining the commands, that it will be used as is. + test {ACL GETUSER provides reasonable results} { + # Test for future commands where allowed + r ACL setuser additive reset +@all -@write + set cmdstr [dict get [r ACL getuser additive] commands] + assert_match {+@all -@write} $cmdstr + + # Test for future commands are disallowed + r ACL setuser subtractive reset -@all +@read + set cmdstr [dict get [r ACL getuser subtractive] commands] + assert_match {-@all +@read} $cmdstr + } + test {ACL #5998 regression: memory leaks adding / removing subcommands} { r AUTH default "" r ACL setuser newuser reset -debug +debug|a +debug|b +debug|c From ad1ed7dcd0fcf299e9560b4d74194a885723f7c2 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Mon, 5 Oct 2020 17:03:17 +0300 Subject: [PATCH 245/377] Introduce getKeysResult for getKeysFromCommand. Avoid using a static buffer for short key index responses, and make it caller's responsibility to stack-allocate a result type. Responses that don't fit are still allocated on the heap. (cherry picked from commit bf5beab64a196214c3c741d9ef67d0446c6480c3) --- src/acl.c | 9 +-- src/cluster.c | 11 ++-- src/db.c | 171 ++++++++++++++++++++++++++----------------------- src/module.c | 33 +++++++--- src/server.c | 14 ++-- src/server.h | 40 ++++++++---- src/tracking.c | 13 ++-- 7 files changed, 170 insertions(+), 121 deletions(-) diff --git a/src/acl.c b/src/acl.c index e781c9211..e0fd3f728 100644 --- a/src/acl.c +++ b/src/acl.c @@ -1115,8 +1115,9 @@ int ACLCheckCommandPerm(client *c, int *keyidxptr) { if (!(c->user->flags & USER_FLAG_ALLKEYS) && (c->cmd->getkeys_proc || c->cmd->firstkey)) { - int numkeys; - int *keyidx = getKeysFromCommand(c->cmd,c->argv,c->argc,&numkeys); + getKeysResult result = GETKEYS_RESULT_INIT; + int numkeys = getKeysFromCommand(c->cmd,c->argv,c->argc,&result); + int *keyidx = result.keys; for (int j = 0; j < numkeys; j++) { listIter li; listNode *ln; @@ -1137,11 +1138,11 @@ int ACLCheckCommandPerm(client *c, int *keyidxptr) { } if (!match) { if (keyidxptr) *keyidxptr = keyidx[j]; - getKeysFreeResult(keyidx); + getKeysFreeResult(&result); return ACL_DENIED_KEY; } } - getKeysFreeResult(keyidx); + getKeysFreeResult(&result); } /* If we survived all the above checks, the user can execute the diff --git a/src/cluster.c b/src/cluster.c index 43bea155b..7d690e863 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -5640,7 +5640,10 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in margc = ms->commands[i].argc; margv = ms->commands[i].argv; - keyindex = getKeysFromCommand(mcmd,margv,margc,&numkeys); + getKeysResult result = GETKEYS_RESULT_INIT; + numkeys = getKeysFromCommand(mcmd,margv,margc,&result); + keyindex = result.keys; + for (j = 0; j < numkeys; j++) { robj *thiskey = margv[keyindex[j]]; int thisslot = keyHashSlot((char*)thiskey->ptr, @@ -5658,7 +5661,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in * not trapped earlier in processCommand(). Report the same * error to the client. */ if (n == NULL) { - getKeysFreeResult(keyindex); + getKeysFreeResult(&result); if (error_code) *error_code = CLUSTER_REDIR_DOWN_UNBOUND; return NULL; @@ -5682,7 +5685,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in if (!equalStringObjects(firstkey,thiskey)) { if (slot != thisslot) { /* Error: multiple keys from different slots. */ - getKeysFreeResult(keyindex); + getKeysFreeResult(&result); if (error_code) *error_code = CLUSTER_REDIR_CROSS_SLOT; return NULL; @@ -5701,7 +5704,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in missing_keys++; } } - getKeysFreeResult(keyindex); + getKeysFreeResult(&result); } /* No key at all in command? then we can serve the request diff --git a/src/db.c b/src/db.c index 7ed746f9a..eb87cebc5 100644 --- a/src/db.c +++ b/src/db.c @@ -1322,27 +1322,54 @@ int expireIfNeeded(redisDb *db, robj *key) { /* ----------------------------------------------------------------------------- * API to get key arguments from commands * ---------------------------------------------------------------------------*/ -#define MAX_KEYS_BUFFER 256 -static int getKeysTempBuffer[MAX_KEYS_BUFFER]; + +/* Prepare the getKeysResult struct to hold numkeys, either by using the + * pre-allocated keysbuf or by allocating a new array on the heap. + * + * This function must be called at least once before starting to populate + * the result, and can be called repeatedly to enlarge the result array. + */ +int *getKeysPrepareResult(getKeysResult *result, int numkeys) { + /* GETKEYS_RESULT_INIT initializes keys to NULL, point it to the pre-allocated stack + * buffer here. */ + if (!result->keys) { + serverAssert(!result->numkeys); + result->keys = result->keysbuf; + } + + /* Resize if necessary */ + if (numkeys > result->size) { + if (result->keys != result->keysbuf) { + /* We're not using a static buffer, just (re)alloc */ + result->keys = zrealloc(result->keys, numkeys * sizeof(int)); + } else { + /* We are using a static buffer, copy its contents */ + result->keys = zmalloc(numkeys * sizeof(int)); + if (result->numkeys) + memcpy(result->keys, result->keysbuf, result->numkeys * sizeof(int)); + } + result->size = numkeys; + } + + return result->keys; +} /* The base case is to use the keys position as given in the command table * (firstkey, lastkey, step). */ -int *getKeysUsingCommandTable(struct redisCommand *cmd,robj **argv, int argc, int *numkeys) { +int getKeysUsingCommandTable(struct redisCommand *cmd,robj **argv, int argc, getKeysResult *result) { int j, i = 0, last, *keys; UNUSED(argv); if (cmd->firstkey == 0) { - *numkeys = 0; - return NULL; + result->numkeys = 0; + return 0; } last = cmd->lastkey; if (last < 0) last = argc+last; int count = ((last - cmd->firstkey)+1); - keys = getKeysTempBuffer; - if (count > MAX_KEYS_BUFFER) - keys = zmalloc(sizeof(int)*count); + keys = getKeysPrepareResult(result, count); for (j = cmd->firstkey; j <= last; j += cmd->keystep) { if (j >= argc) { @@ -1353,17 +1380,17 @@ int *getKeysUsingCommandTable(struct redisCommand *cmd,robj **argv, int argc, in * return no keys and expect the command implementation to report * an arity or syntax error. */ if (cmd->flags & CMD_MODULE || cmd->arity < 0) { - getKeysFreeResult(keys); - *numkeys = 0; - return NULL; + getKeysFreeResult(result); + result->numkeys = 0; + return 0; } else { serverPanic("Redis built-in command declared keys positions not matching the arity requirements."); } } keys[i++] = j; } - *numkeys = i; - return keys; + result->numkeys = i; + return i; } /* Return all the arguments that are keys in the command passed via argc / argv. @@ -1377,26 +1404,26 @@ int *getKeysUsingCommandTable(struct redisCommand *cmd,robj **argv, int argc, in * * This function uses the command table if a command-specific helper function * is not required, otherwise it calls the command-specific function. */ -int *getKeysFromCommand(struct redisCommand *cmd, robj **argv, int argc, int *numkeys) { +int getKeysFromCommand(struct redisCommand *cmd, robj **argv, int argc, getKeysResult *result) { if (cmd->flags & CMD_MODULE_GETKEYS) { - return moduleGetCommandKeysViaAPI(cmd,argv,argc,numkeys); + return moduleGetCommandKeysViaAPI(cmd,argv,argc,result); } else if (!(cmd->flags & CMD_MODULE) && cmd->getkeys_proc) { - return cmd->getkeys_proc(cmd,argv,argc,numkeys); + return cmd->getkeys_proc(cmd,argv,argc,result); } else { - return getKeysUsingCommandTable(cmd,argv,argc,numkeys); + return getKeysUsingCommandTable(cmd,argv,argc,result); } } /* Free the result of getKeysFromCommand. */ -void getKeysFreeResult(int *result) { - if (result != getKeysTempBuffer) - zfree(result); +void getKeysFreeResult(getKeysResult *result) { + if (result && result->keys != result->keysbuf) + zfree(result->keys); } /* Helper function to extract keys from following commands: * ZUNIONSTORE ... * ZINTERSTORE ... */ -int *zunionInterGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *numkeys) { +int zunionInterGetKeys(struct redisCommand *cmd, robj **argv, int argc, getKeysResult *result) { int i, num, *keys; UNUSED(cmd); @@ -1404,30 +1431,30 @@ int *zunionInterGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *nu /* Sanity check. Don't return any key if the command is going to * reply with syntax error. */ if (num < 1 || num > (argc-3)) { - *numkeys = 0; - return NULL; + result->numkeys = 0; + return 0; } /* Keys in z{union,inter}store come from two places: * argv[1] = storage key, * argv[3...n] = keys to intersect */ - keys = getKeysTempBuffer; - if (num+1>MAX_KEYS_BUFFER) - keys = zmalloc(sizeof(int)*(num+1)); + /* Total keys = {union,inter} keys + storage key */ + keys = getKeysPrepareResult(result, num+1); + result->numkeys = num+1; /* Add all key positions for argv[3...n] to keys[] */ for (i = 0; i < num; i++) keys[i] = 3+i; /* Finally add the argv[1] key position (the storage key target). */ keys[num] = 1; - *numkeys = num+1; /* Total keys = {union,inter} keys + storage key */ - return keys; + + return result->numkeys; } /* Helper function to extract keys from the following commands: * EVAL