From 31c9cd86040b4da9368a57148aa17588f290f262 Mon Sep 17 00:00:00 2001 From: Paul Spooren Date: Tue, 9 Jun 2020 16:53:14 -0400 Subject: [PATCH 001/215] LRANK: Add command (the command will be renamed LPOS). The `LRANK` command returns the index (position) of a given element within a list. Using the `direction` argument it is possible to specify going from head to tail (acending, 1) or from tail to head (decending, -1). Only the first found index is returend. The complexity is O(N). When using lists as a queue it can be of interest at what position a given element is, for instance to monitor a job processing through a work queue. This came up within the Python `rq` project which is based on Redis[0]. [0]: https://github.com/rq/rq/issues/1197 Signed-off-by: Paul Spooren --- src/help.h | 5 +++++ src/server.c | 4 ++++ src/server.h | 1 + src/t_list.c | 34 ++++++++++++++++++++++++++++++++++ 4 files changed, 44 insertions(+) diff --git a/src/help.h b/src/help.h index 6d3eb33ed..9b4e90f9f 100644 --- a/src/help.h +++ b/src/help.h @@ -668,6 +668,11 @@ struct commandHelp { "Remove elements from a list", 2, "1.0.0" }, + { "LRANK", + "key direction element", + "Return first index of element in list based on direction", + 2, + "9.9.9" }, { "LSET", "key index element", "Set the value of an element in a list by its index", diff --git a/src/server.c b/src/server.c index e8e711240..767d3374f 100644 --- a/src/server.c +++ b/src/server.c @@ -326,6 +326,10 @@ struct redisCommand redisCommandTable[] = { "write @list", 0,NULL,1,1,1,0,0,0}, + {"lrank",lrankCommand,4, + "read-only fast @list", + 0,NULL,1,1,1,0,0,0}, + {"lrem",lremCommand,4, "write @list", 0,NULL,1,1,1,0,0,0}, diff --git a/src/server.h b/src/server.h index a08585292..68932f656 100644 --- a/src/server.h +++ b/src/server.h @@ -2269,6 +2269,7 @@ void flushdbCommand(client *c); void flushallCommand(client *c); void sortCommand(client *c); void lremCommand(client *c); +void lrankCommand(client *c); void rpoplpushCommand(client *c); void infoCommand(client *c); void mgetCommand(client *c); diff --git a/src/t_list.c b/src/t_list.c index 4770a2272..899a20f47 100644 --- a/src/t_list.c +++ b/src/t_list.c @@ -487,6 +487,40 @@ void ltrimCommand(client *c) { addReply(c,shared.ok); } +void lrankCommand(client *c) { + robj *subject, *obj; + obj = c->argv[3]; + long direction = 0; + long index = 0; + + if ((getLongFromObjectOrReply(c, c->argv[2], &direction, NULL) != C_OK)) + return; + + subject = lookupKeyWriteOrReply(c,c->argv[1],shared.czero); + if (subject == NULL || checkType(c,subject,OBJ_LIST)) return; + + listTypeIterator *li; + if (direction < 0) { + direction = -1; + li = listTypeInitIterator(subject,-1,LIST_HEAD); + } else { + direction = 1; + li = listTypeInitIterator(subject,0,LIST_TAIL); + } + + listTypeEntry entry; + while (listTypeNext(li,&entry)) { + if (listTypeEqual(&entry,obj)) { + break; + } + index++; + } + + listTypeReleaseIterator(li); + + addReplyLongLong(c,index * direction); +} + void lremCommand(client *c) { robj *subject, *obj; obj = c->argv[3]; From afb8b3c030e4dd4a717937bd8e34ac64fd52a843 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 10 Jun 2020 12:40:24 +0200 Subject: [PATCH 002/215] LPOS: implement the final design. --- src/server.c | 4 +- src/server.h | 2 +- src/t_list.c | 105 ++++++++++++++++++++++++++++++++++++++++----------- 3 files changed, 87 insertions(+), 24 deletions(-) diff --git a/src/server.c b/src/server.c index 767d3374f..53dccf875 100644 --- a/src/server.c +++ b/src/server.c @@ -326,8 +326,8 @@ struct redisCommand redisCommandTable[] = { "write @list", 0,NULL,1,1,1,0,0,0}, - {"lrank",lrankCommand,4, - "read-only fast @list", + {"lpos",lposCommand,-3, + "read-only @list", 0,NULL,1,1,1,0,0,0}, {"lrem",lremCommand,4, diff --git a/src/server.h b/src/server.h index 68932f656..841e1f941 100644 --- a/src/server.h +++ b/src/server.h @@ -2269,7 +2269,7 @@ void flushdbCommand(client *c); void flushallCommand(client *c); void sortCommand(client *c); void lremCommand(client *c); -void lrankCommand(client *c); +void lposCommand(client *c); void rpoplpushCommand(client *c); void infoCommand(client *c); void mgetCommand(client *c); diff --git a/src/t_list.c b/src/t_list.c index 899a20f47..3735280ff 100644 --- a/src/t_list.c +++ b/src/t_list.c @@ -487,38 +487,101 @@ void ltrimCommand(client *c) { addReply(c,shared.ok); } -void lrankCommand(client *c) { - robj *subject, *obj; - obj = c->argv[3]; - long direction = 0; - long index = 0; +/* LPOS key element [matchpos] [ALL] [RELATIVE] + * + * matchnum is the position of the match, so if it is 1, the first match + * is returned, if it is 2 the second match is returned and so forth. + * It is 1 by default. If negative has the same meaning but the search is + * performed starting from the end of the list. + * + * A matchnum of 0 is accepted only if ALL is given, and means to return + * all the elements. + * + * If ALL is given, instead of returning the single elmenet, a list of + * all the matching elements up to "matchnum" are returned. + * + * The returned elements indexes are always referring to what LINDEX + * would return. So first element from head is 0, and so forth. + * However if RELATIVE is given and a negative matchpos is given, the + * indexes are returned as if the last element of the list is the element 0, + * the penultimante is 1, and so forth. */ +void lposCommand(client *c) { + robj *o, *ele; + ele = c->argv[2]; + int all = 0, direction = LIST_TAIL; + long matchpos = 1; - if ((getLongFromObjectOrReply(c, c->argv[2], &direction, NULL) != C_OK)) + /* Parse the optional "matchpos" argument, and the ALL option. */ + if (c->argc >= 4 && + getLongFromObjectOrReply(c, c->argv[3], &matchpos, NULL) != C_OK) + { return; - - subject = lookupKeyWriteOrReply(c,c->argv[1],shared.czero); - if (subject == NULL || checkType(c,subject,OBJ_LIST)) return; - - listTypeIterator *li; - if (direction < 0) { - direction = -1; - li = listTypeInitIterator(subject,-1,LIST_HEAD); - } else { - direction = 1; - li = listTypeInitIterator(subject,0,LIST_TAIL); } + if (c->argc == 5) { + if (!strcasecmp(c->argv[4]->ptr,"all")) { + all = 1; + } else { + addReply(c,shared.syntaxerr); + return; + } + } + + /* Raise an error on incompatible options. */ + if (!all && matchpos == 0) { + addReplyError(c,"A match position of zero is valid only " + "when using the ALL option"); + return; + } + + /* A negative matchpos means start from the tail. */ + if (matchpos < 0) { + matchpos = -matchpos; + direction = LIST_HEAD; + } + + /* We return NULL or an empty array if there is no such key (or + * if we find no matches, depending on the presence of the ALL option. */ + if ((o = lookupKeyWriteOrReply(c,c->argv[1],NULL)) == NULL) { + if (all) + addReply(c,shared.emptyarray); + else + addReply(c,shared.null[c->resp]); + return; + } + if (checkType(c,o,OBJ_LIST)) return; + + /* If we got the ALL option, prepare to emit an array. */ + void *arraylenptr = NULL; + if (all) arraylenptr = addReplyDeferredLen(c); + + /* Seek the element. */ + listTypeIterator *li; + li = listTypeInitIterator(o,direction == LIST_HEAD ? -1 : 0,direction); listTypeEntry entry; + long llen = listTypeLength(o); + long index = 0, matches = 0, matchindex = -1; while (listTypeNext(li,&entry)) { - if (listTypeEqual(&entry,obj)) { - break; + if (listTypeEqual(&entry,ele)) { + matches++; + matchindex = (direction == LIST_TAIL) ? index : llen - index - 1; + if (all) addReplyLongLong(c,matchindex); + if (matches == matchpos) break; } index++; } - listTypeReleaseIterator(li); - addReplyLongLong(c,index * direction); + /* Reply to the client. Note that arraylenptr is not NULL only if + * the ALL option was selected. */ + if (arraylenptr != NULL) { + setDeferredArrayLen(c,arraylenptr,matches); + } else { + if (matchindex != -1) + addReplyLongLong(c,matchindex); + else + addReply(c,shared.null[c->resp]); + } } void lremCommand(client *c) { From 495327c0206d8000be98172b6ee54382f6afc972 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 11 Jun 2020 11:18:16 +0200 Subject: [PATCH 003/215] LPOS: update to latest proposal. See https://gist.github.com/antirez/3591c5096bc79cad8b5a992e08304f48 --- src/t_list.c | 99 +++++++++++++++++++++++++++++++--------------------- 1 file changed, 59 insertions(+), 40 deletions(-) diff --git a/src/t_list.c b/src/t_list.c index 3735280ff..653337d78 100644 --- a/src/t_list.c +++ b/src/t_list.c @@ -487,63 +487,75 @@ void ltrimCommand(client *c) { addReply(c,shared.ok); } -/* LPOS key element [matchpos] [ALL] [RELATIVE] +/* LPOS key element [FIRST rank] [COUNT num-matches] [MAXLEN len] * - * matchnum is the position of the match, so if it is 1, the first match + * FIRST "rank" is the position of the match, so if it is 1, the first match * is returned, if it is 2 the second match is returned and so forth. * It is 1 by default. If negative has the same meaning but the search is * performed starting from the end of the list. * - * A matchnum of 0 is accepted only if ALL is given, and means to return - * all the elements. + * If COUNT is given, instead of returning the single element, a list of + * all the matching elements up to "num-matches" are returned. COUNT can + * be combiled with FIRST in order to returning only the element starting + * from the Nth. If COUNT is zero, all the matching elements are returned. * - * If ALL is given, instead of returning the single elmenet, a list of - * all the matching elements up to "matchnum" are returned. + * MAXLEN tells the command to scan a max of len elements. If zero (the + * default), all the elements in the list are scanned if needed. * * The returned elements indexes are always referring to what LINDEX - * would return. So first element from head is 0, and so forth. - * However if RELATIVE is given and a negative matchpos is given, the - * indexes are returned as if the last element of the list is the element 0, - * the penultimante is 1, and so forth. */ + * would return. So first element from head is 0, and so forth. */ void lposCommand(client *c) { robj *o, *ele; ele = c->argv[2]; - int all = 0, direction = LIST_TAIL; - long matchpos = 1; + int direction = LIST_TAIL; + long rank = 1, count = -1, maxlen = 0; /* Count -1: option not given. */ - /* Parse the optional "matchpos" argument, and the ALL option. */ - if (c->argc >= 4 && - getLongFromObjectOrReply(c, c->argv[3], &matchpos, NULL) != C_OK) - { - return; - } + /* Parse the optional arguments. */ + for (int j = 3; j < c->argc; j++) { + char *opt = c->argv[j]->ptr; + int moreargs = (c->argc-1)-j; - if (c->argc == 5) { - if (!strcasecmp(c->argv[4]->ptr,"all")) { - all = 1; + if (!strcasecmp(opt,"FIRST") && moreargs) { + j++; + if (getLongFromObjectOrReply(c, c->argv[j], &rank, NULL) != C_OK) + return; + if (rank == 0) { + addReplyError(c,"FIRST can't be zero: use 1 to start from " + "the first match, 2 from the second, ..."); + return; + } + } else if (!strcasecmp(opt,"COUNT") && moreargs) { + j++; + if (getLongFromObjectOrReply(c, c->argv[j], &count, NULL) != C_OK) + return; + if (count < 0) { + addReplyError(c,"COUNT can't be negative"); + return; + } + } else if (!strcasecmp(opt,"MAXLEN") && moreargs) { + j++; + if (getLongFromObjectOrReply(c, c->argv[j], &maxlen, NULL) != C_OK) + return; + if (maxlen < 0) { + addReplyError(c,"MAXLEN can't be negative"); + return; + } } else { addReply(c,shared.syntaxerr); return; } } - /* Raise an error on incompatible options. */ - if (!all && matchpos == 0) { - addReplyError(c,"A match position of zero is valid only " - "when using the ALL option"); - return; - } - - /* A negative matchpos means start from the tail. */ - if (matchpos < 0) { - matchpos = -matchpos; + /* A negative rank means start from the tail. */ + if (rank < 0) { + rank = -rank; direction = LIST_HEAD; } /* We return NULL or an empty array if there is no such key (or - * if we find no matches, depending on the presence of the ALL option. */ + * if we find no matches, depending on the presence of the COUNT option. */ if ((o = lookupKeyWriteOrReply(c,c->argv[1],NULL)) == NULL) { - if (all) + if (count != -1) addReply(c,shared.emptyarray); else addReply(c,shared.null[c->resp]); @@ -551,9 +563,9 @@ void lposCommand(client *c) { } if (checkType(c,o,OBJ_LIST)) return; - /* If we got the ALL option, prepare to emit an array. */ + /* If we got the COUNT option, prepare to emit an array. */ void *arraylenptr = NULL; - if (all) arraylenptr = addReplyDeferredLen(c); + if (count != -1) arraylenptr = addReplyDeferredLen(c); /* Seek the element. */ listTypeIterator *li; @@ -561,21 +573,28 @@ void lposCommand(client *c) { listTypeEntry entry; long llen = listTypeLength(o); long index = 0, matches = 0, matchindex = -1; - while (listTypeNext(li,&entry)) { + while (listTypeNext(li,&entry) && (maxlen == 0 || index < maxlen)) { if (listTypeEqual(&entry,ele)) { matches++; matchindex = (direction == LIST_TAIL) ? index : llen - index - 1; - if (all) addReplyLongLong(c,matchindex); - if (matches == matchpos) break; + if (matches >= rank) { + if (arraylenptr) { + addReplyLongLong(c,matchindex); + if (count && matches-rank+1 >= count) break; + } else { + break; + } + } } index++; + matchindex = -1; /* Remember if we exit the loop without a match. */ } listTypeReleaseIterator(li); /* Reply to the client. Note that arraylenptr is not NULL only if - * the ALL option was selected. */ + * the COUNT option was selected. */ if (arraylenptr != NULL) { - setDeferredArrayLen(c,arraylenptr,matches); + setDeferredArrayLen(c,arraylenptr,matches-rank+1); } else { if (matchindex != -1) addReplyLongLong(c,matchindex); From e0022d8cfe098b60a05fd6b8fc5bdf66d51cbe89 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 11 Jun 2020 12:38:51 +0200 Subject: [PATCH 004/215] LPOS: tests + crash fix. --- src/t_list.c | 2 +- tests/unit/type/list.tcl | 44 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/src/t_list.c b/src/t_list.c index 653337d78..e580139ab 100644 --- a/src/t_list.c +++ b/src/t_list.c @@ -554,7 +554,7 @@ void lposCommand(client *c) { /* We return NULL or an empty array if there is no such key (or * if we find no matches, depending on the presence of the COUNT option. */ - if ((o = lookupKeyWriteOrReply(c,c->argv[1],NULL)) == NULL) { + if ((o = lookupKeyRead(c->db,c->argv[1])) == NULL) { if (count != -1) addReply(c,shared.emptyarray); else diff --git a/tests/unit/type/list.tcl b/tests/unit/type/list.tcl index 676896a75..a0c04dcaa 100644 --- a/tests/unit/type/list.tcl +++ b/tests/unit/type/list.tcl @@ -6,6 +6,50 @@ start_server { } { source "tests/unit/type/list-common.tcl" + test {LPOS basic usage} { + r DEL mylist + r RPUSH mylist a b c 1 2 3 c c + assert {[r LPOS mylist a] == 0} + assert {[r LPOS mylist c] == 2} + } + + test {LPOS FIRST (positive and negative rank) option} { + assert {[r LPOS mylist c FIRST 1] == 2} + assert {[r LPOS mylist c FIRST 2] == 6} + assert {[r LPOS mylist c FIRST 4] eq ""} + assert {[r LPOS mylist c FIRST -1] == 7} + assert {[r LPOS mylist c FIRST -2] == 6} + } + + test {LPOS COUNT option} { + assert {[r LPOS mylist c COUNT 0] == {2 6 7}} + assert {[r LPOS mylist c COUNT 1] == {2}} + assert {[r LPOS mylist c COUNT 2] == {2 6}} + assert {[r LPOS mylist c COUNT 100] == {2 6 7}} + } + + test {LPOS COUNT + FIRST option} { + assert {[r LPOS mylist c COUNT 0 FIRST 2] == {6 7}} + assert {[r LPOS mylist c COUNT 2 FIRST -1] == {7 6}} + } + + test {LPOS non existing key} { + assert {[r LPOS mylistxxx c COUNT 0 FIRST 2] eq {}} + } + + test {LPOS no match} { + assert {[r LPOS mylist x COUNT 2 FIRST -1] eq {}} + assert {[r LPOS mylist x FIRST -1] eq {}} + } + + test {LPOS MAXLEN} { + assert {[r LPOS mylist a COUNT 0 MAXLEN 1] == {0}} + assert {[r LPOS mylist c COUNT 0 MAXLEN 1] == {}} + assert {[r LPOS mylist c COUNT 0 MAXLEN 3] == {2}} + assert {[r LPOS mylist c COUNT 0 MAXLEN 3 FIRST -1] == {7 6}} + assert {[r LPOS mylist c COUNT 0 MAXLEN 7 FIRST 2] == {6}} + } + test {LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - ziplist} { # first lpush then rpush assert_equal 1 [r lpush myziplist1 aa] From 0cae0900cfda433c445b5230c1bafb98700d2c2f Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 12 Jun 2020 12:16:19 +0200 Subject: [PATCH 005/215] help.h updated. --- src/help.h | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/src/help.h b/src/help.h index 9b4e90f9f..1b1ac5e08 100644 --- a/src/help.h +++ b/src/help.h @@ -43,6 +43,16 @@ struct commandHelp { "Generate a pseudorandom secure password to use for ACL users", 9, "6.0.0" }, + { "ACL GETUSER", + "username", + "Get the rules for a specific ACL user", + 9, + "6.0.0" }, + { "ACL HELP", + "-", + "Show helpful text about the different subcommands", + 9, + "6.0.0" }, { "ACL LIST", "-", "List the current ACL rules in ACL config file format", @@ -64,7 +74,7 @@ struct commandHelp { 9, "6.0.0" }, { "ACL SETUSER", - "rule [rule ...]", + "username [rule [rule ...]]", "Modify or create the rules for a specific ACL user", 9, "6.0.0" }, @@ -164,7 +174,7 @@ struct commandHelp { 8, "5.0.0" }, { "CLIENT KILL", - "[ip:port] [ID client-id] [TYPE normal|master|slave|pubsub] [ADDR ip:port] [SKIPME yes/no]", + "[ip:port] [ID client-id] [TYPE normal|master|slave|pubsub] [USER username] [ADDR ip:port] [SKIPME yes/no]", "Kill the connection of a client", 8, "2.4.0" }, @@ -182,14 +192,14 @@ struct commandHelp { "ON|OFF|SKIP", "Instruct the server whether to reply to commands", 8, - "3.2" }, + "3.2.0" }, { "CLIENT SETNAME", "connection-name", "Set the current connection name", 8, "2.6.9" }, { "CLIENT TRACKING", - "ON|OFF [REDIRECT client-id] [PREFIX prefix] [BCAST] [OPTIN] [OPTOUT] [NOLOOP]", + "ON|OFF [REDIRECT client-id] [PREFIX prefix [PREFIX prefix ...]] [BCAST] [OPTIN] [OPTOUT] [NOLOOP]", "Enable or disable server assisted client side caching support", 8, "6.0.0" }, @@ -619,7 +629,7 @@ struct commandHelp { 9, "2.8.13" }, { "LATENCY RESET", - "[event]", + "[event [event ...]]", "Reset latency data for one or more events.", 9, "2.8.13" }, @@ -648,6 +658,11 @@ struct commandHelp { "Remove and get the first element in a list", 2, "1.0.0" }, + { "LPOS", + "key element [FIRST rank] [COUNT num-matches] [MAXLEN len]", + "Return the index of matching elements on a list", + 2, + "6.0.6" }, { "LPUSH", "key element [element ...]", "Prepend one or multiple elements to a list", @@ -668,11 +683,6 @@ struct commandHelp { "Remove elements from a list", 2, "1.0.0" }, - { "LRANK", - "key direction element", - "Return first index of element in list based on direction", - 2, - "9.9.9" }, { "LSET", "key index element", "Set the value of an element in a list by its index", From 930fac805833fe46d8ee41e1c904dd6ea90e10b2 Mon Sep 17 00:00:00 2001 From: Jamie Scott Date: Sun, 12 Apr 2020 17:56:58 -0700 Subject: [PATCH 006/215] minor fix (cherry picked from commit 4f3b15e6ea2d72debeb2f0533e2ba11884257021) --- redis.conf | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/redis.conf b/redis.conf index 1aa760599..5cea06a47 100644 --- a/redis.conf +++ b/redis.conf @@ -745,9 +745,8 @@ replica-priority 100 # # The ACL Log tracks failed commands and authentication events associated # with ACLs. The ACL Log is useful to troubleshoot failed commands blocked -# by ACLs. The ACL Log is stored in and consumes memory. There is no limit -# to its length.You can reclaim memory with ACL LOG RESET or set a maximum -# length below. +# by ACLs. The ACL Log is stored in memory. You can reclaim memory with +# ACL LOG RESET. Define the maximum entry length of the ACL Log below. acllog-max-len 128 # Using an external ACL file From 845fb2d1c191bf968830f2bffb08336e6654d20b Mon Sep 17 00:00:00 2001 From: Benjamin Sergeant Date: Fri, 1 May 2020 20:57:51 -0700 Subject: [PATCH 007/215] Update redis-cli.c (cherry picked from commit 93021da221f71cf71fe874fd881ea59f325b82f2) --- src/redis-cli.c | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 96eb3c3dd..75845f346 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -3423,6 +3423,7 @@ static redisReply *clusterManagerMigrateKeysInReply(clusterManagerNode *source, size_t *argv_len = NULL; int c = (replace ? 8 : 7); if (config.auth) c += 2; + if (config.user) c += 1; size_t argc = c + reply->elements; size_t i, offset = 6; // Keys Offset argv = zcalloc(argc * sizeof(char *)); @@ -3449,12 +3450,24 @@ static redisReply *clusterManagerMigrateKeysInReply(clusterManagerNode *source, offset++; } if (config.auth) { - argv[offset] = "AUTH"; - argv_len[offset] = 4; - offset++; - argv[offset] = config.auth; - argv_len[offset] = strlen(config.auth); - offset++; + if (config.user) { + argv[offset] = "AUTH2"; + argv_len[offset] = 5; + offset++; + argv[offset] = config.user; + argv_len[offset] = strlen(config.user); + offset++; + argv[offset] = config.auth; + argv_len[offset] = strlen(config.auth); + offset++; + } else { + argv[offset] = "AUTH"; + argv_len[offset] = 4; + offset++; + argv[offset] = config.auth; + argv_len[offset] = strlen(config.auth); + offset++; + } } argv[offset] = "KEYS"; argv_len[offset] = 4; From c1326d7b1059c0087c7b9124d79dff345cdfcb71 Mon Sep 17 00:00:00 2001 From: hwware Date: Mon, 8 Jun 2020 23:36:01 -0400 Subject: [PATCH 008/215] fix server crash in STRALGO command (cherry picked from commit 2a05fa0d481d12d3747465c4f14470bdca100c5d) --- src/t_string.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/t_string.c b/src/t_string.c index 5306069bf..d1a3e1b96 100644 --- a/src/t_string.c +++ b/src/t_string.c @@ -534,6 +534,13 @@ void stralgoLCS(client *c) { } obja = lookupKeyRead(c->db,c->argv[j+1]); objb = lookupKeyRead(c->db,c->argv[j+2]); + + if ( !(obja->type == OBJ_STRING) || !(objb->type == OBJ_STRING) ) { + addReplyError(c,"Object associate with KEYS option should only be string type"); + return; + + } + obja = obja ? getDecodedObject(obja) : createStringObject("",0); objb = objb ? getDecodedObject(objb) : createStringObject("",0); a = obja->ptr; From ec1faeec726ff75d042a5c36f3c2951d2395ec1a Mon Sep 17 00:00:00 2001 From: hwware Date: Mon, 8 Jun 2020 23:48:51 -0400 Subject: [PATCH 009/215] fix memory leak (cherry picked from commit 7008a0ba66fe13af0d584071eaa5fe3f34c56512) --- src/t_string.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/src/t_string.c b/src/t_string.c index d1a3e1b96..8e367ec80 100644 --- a/src/t_string.c +++ b/src/t_string.c @@ -516,13 +516,13 @@ void stralgoLCS(client *c) { withmatchlen = 1; } else if (!strcasecmp(opt,"MINMATCHLEN") && moreargs) { if (getLongLongFromObjectOrReply(c,c->argv[j+1],&minmatchlen,NULL) - != C_OK) return; + != C_OK) goto clean_up_obj; if (minmatchlen < 0) minmatchlen = 0; j++; } else if (!strcasecmp(opt,"STRINGS") && moreargs > 1) { if (a != NULL) { addReplyError(c,"Either use STRINGS or KEYS"); - return; + goto clean_up_obj; } a = c->argv[j+1]->ptr; b = c->argv[j+2]->ptr; @@ -530,17 +530,14 @@ void stralgoLCS(client *c) { } else if (!strcasecmp(opt,"KEYS") && moreargs > 1) { if (a != NULL) { addReplyError(c,"Either use STRINGS or KEYS"); - return; + goto clean_up_obj; } obja = lookupKeyRead(c->db,c->argv[j+1]); objb = lookupKeyRead(c->db,c->argv[j+2]); - if ( !(obja->type == OBJ_STRING) || !(objb->type == OBJ_STRING) ) { addReplyError(c,"Object associate with KEYS option should only be string type"); - return; - + goto clean_up_obj; } - obja = obja ? getDecodedObject(obja) : createStringObject("",0); objb = objb ? getDecodedObject(objb) : createStringObject("",0); a = obja->ptr; @@ -548,7 +545,7 @@ void stralgoLCS(client *c) { j += 2; } else { addReply(c,shared.syntaxerr); - return; + goto clean_up_obj; } } @@ -556,12 +553,12 @@ void stralgoLCS(client *c) { if (a == NULL) { addReplyError(c,"Please specify two strings: " "STRINGS or KEYS options are mandatory"); - return; + goto clean_up_obj; } else if (getlen && getidx) { addReplyError(c, "If you want both the length and indexes, please " "just use IDX."); - return; + goto clean_up_obj; } /* Compute the LCS using the vanilla dynamic programming technique of @@ -696,10 +693,12 @@ void stralgoLCS(client *c) { } /* Cleanup. */ - if (obja) decrRefCount(obja); - if (objb) decrRefCount(objb); sdsfree(result); zfree(lcs); + +clean_up_obj: + if (obja) decrRefCount(obja); + if (objb) decrRefCount(objb); return; } From 82b2bfd20b615b337bdad3f33ca97cebcf9b26bc Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 12 Jun 2020 12:34:44 +0200 Subject: [PATCH 010/215] Fix LCS object type checking. Related to #7379. (cherry picked from commit 10553988498acea1d772af69092b67fd5b56d529) --- src/t_string.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/src/t_string.c b/src/t_string.c index 8e367ec80..259f43142 100644 --- a/src/t_string.c +++ b/src/t_string.c @@ -516,13 +516,13 @@ void stralgoLCS(client *c) { withmatchlen = 1; } else if (!strcasecmp(opt,"MINMATCHLEN") && moreargs) { if (getLongLongFromObjectOrReply(c,c->argv[j+1],&minmatchlen,NULL) - != C_OK) goto clean_up_obj; + != C_OK) goto cleanup; if (minmatchlen < 0) minmatchlen = 0; j++; } else if (!strcasecmp(opt,"STRINGS") && moreargs > 1) { if (a != NULL) { addReplyError(c,"Either use STRINGS or KEYS"); - goto clean_up_obj; + goto cleanup; } a = c->argv[j+1]->ptr; b = c->argv[j+2]->ptr; @@ -530,13 +530,20 @@ void stralgoLCS(client *c) { } else if (!strcasecmp(opt,"KEYS") && moreargs > 1) { if (a != NULL) { addReplyError(c,"Either use STRINGS or KEYS"); - goto clean_up_obj; + goto cleanup; } obja = lookupKeyRead(c->db,c->argv[j+1]); objb = lookupKeyRead(c->db,c->argv[j+2]); - if ( !(obja->type == OBJ_STRING) || !(objb->type == OBJ_STRING) ) { - addReplyError(c,"Object associate with KEYS option should only be string type"); - goto clean_up_obj; + if ((obja && obja->type != OBJ_STRING) || + (objb && objb->type != OBJ_STRING)) + { + addReplyError(c, + "The specified keys must contain string values"); + /* Don't cleanup the objects, we need to do that + * only after callign getDecodedObject(). */ + obja = NULL; + objb = NULL; + goto cleanup; } obja = obja ? getDecodedObject(obja) : createStringObject("",0); objb = objb ? getDecodedObject(objb) : createStringObject("",0); @@ -545,7 +552,7 @@ void stralgoLCS(client *c) { j += 2; } else { addReply(c,shared.syntaxerr); - goto clean_up_obj; + goto cleanup; } } @@ -553,12 +560,12 @@ void stralgoLCS(client *c) { if (a == NULL) { addReplyError(c,"Please specify two strings: " "STRINGS or KEYS options are mandatory"); - goto clean_up_obj; + goto cleanup; } else if (getlen && getidx) { addReplyError(c, "If you want both the length and indexes, please " "just use IDX."); - goto clean_up_obj; + goto cleanup; } /* Compute the LCS using the vanilla dynamic programming technique of @@ -696,7 +703,7 @@ void stralgoLCS(client *c) { sdsfree(result); zfree(lcs); -clean_up_obj: +cleanup: if (obja) decrRefCount(obja); if (objb) decrRefCount(objb); return; From 51e178454d03a7cd9f0f3d185e47d69a02223f3a Mon Sep 17 00:00:00 2001 From: "meir@redislabs.com" Date: Sun, 14 Jun 2020 10:06:00 +0300 Subject: [PATCH 011/215] Fix RM_ScanKey module api not to return int encoded strings The scan key module API provides the scan callback with the current field name and value (if it exists). Those arguments are RedisModuleString* which means it supposes to point to robj which is encoded as a string. Using createStringObjectFromLongLong function might return robj that points to an integer and so break a module that tries for example to use RedisModule_StringPtrLen on the given field/value. The PR introduces a fix that uses the createObject function and sdsfromlonglong function. Using those function promise that the field and value pass to the to the scan callback will be Strings. The PR also changes the Scan test module to use RedisModule_StringPtrLen to catch the issue. without this, the issue is hidden because RedisModule_ReplyWithString knows to handle integer encoding of the given robj (RedisModuleString). The PR also introduces a new test to verify the issue is solved. (cherry picked from commit a89bf734a933e45b9dd3ae85ef4c3b62bd6891d8) --- src/module.c | 6 +++--- tests/modules/scan.c | 20 ++++++++++++++++---- tests/unit/moduleapi/scan.tcl | 5 +++++ 3 files changed, 24 insertions(+), 7 deletions(-) diff --git a/src/module.c b/src/module.c index e3a338dad..226c60fd0 100644 --- a/src/module.c +++ b/src/module.c @@ -6708,7 +6708,7 @@ int RM_ScanKey(RedisModuleKey *key, RedisModuleScanCursor *cursor, RedisModuleSc int pos = 0; int64_t ll; while(intsetGet(o->ptr,pos++,&ll)) { - robj *field = createStringObjectFromLongLong(ll); + robj *field = createObject(OBJ_STRING,sdsfromlonglong(ll)); fn(key, field, NULL, privdata); decrRefCount(field); } @@ -6724,12 +6724,12 @@ int RM_ScanKey(RedisModuleKey *key, RedisModuleScanCursor *cursor, RedisModuleSc ziplistGet(p,&vstr,&vlen,&vll); robj *field = (vstr != NULL) ? createStringObject((char*)vstr,vlen) : - createStringObjectFromLongLong(vll); + createObject(OBJ_STRING,sdsfromlonglong(vll)); p = ziplistNext(o->ptr,p); ziplistGet(p,&vstr,&vlen,&vll); robj *value = (vstr != NULL) ? createStringObject((char*)vstr,vlen) : - createStringObjectFromLongLong(vll); + createObject(OBJ_STRING,sdsfromlonglong(vll)); fn(key, field, value, privdata); p = ziplistNext(o->ptr,p); decrRefCount(field); diff --git a/tests/modules/scan.c b/tests/modules/scan.c index afede244b..1576bae9e 100644 --- a/tests/modules/scan.c +++ b/tests/modules/scan.c @@ -55,11 +55,23 @@ void scan_key_callback(RedisModuleKey *key, RedisModuleString* field, RedisModul REDISMODULE_NOT_USED(key); scan_key_pd* pd = privdata; RedisModule_ReplyWithArray(pd->ctx, 2); - RedisModule_ReplyWithString(pd->ctx, field); - if (value) - RedisModule_ReplyWithString(pd->ctx, value); - else + size_t fieldCStrLen; + + // The implementation of RedisModuleString is robj with lots of encodings. + // We want to make sure the robj that passes to this callback in + // String encoded, this is why we use RedisModule_StringPtrLen and + // RedisModule_ReplyWithStringBuffer instead of directly use + // RedisModule_ReplyWithString. + const char* fieldCStr = RedisModule_StringPtrLen(field, &fieldCStrLen); + RedisModule_ReplyWithStringBuffer(pd->ctx, fieldCStr, fieldCStrLen); + if(value){ + size_t valueCStrLen; + const char* valueCStr = RedisModule_StringPtrLen(value, &valueCStrLen); + RedisModule_ReplyWithStringBuffer(pd->ctx, valueCStr, valueCStrLen); + } else { RedisModule_ReplyWithNull(pd->ctx); + } + pd->nreplies++; } diff --git a/tests/unit/moduleapi/scan.tcl b/tests/unit/moduleapi/scan.tcl index de1672e0a..43a0c4d8a 100644 --- a/tests/unit/moduleapi/scan.tcl +++ b/tests/unit/moduleapi/scan.tcl @@ -16,6 +16,11 @@ start_server {tags {"modules"}} { r hmset hh f1 v1 f2 v2 lsort [r scan.scan_key hh] } {{f1 v1} {f2 v2}} + + test {Module scan hash dict with int value} { + r hmset hh1 f1 1 + lsort [r scan.scan_key hh1] + } {{f1 1}} test {Module scan hash dict} { r config set hash-max-ziplist-entries 2 From 8095daea4a60b15d8797ae8bc0bfc2bd4a376356 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 15 Jun 2020 10:18:14 +0800 Subject: [PATCH 012/215] cluster.c remove if of clusterSendFail in markNodeAsFailingIfNeeded (cherry picked from commit c92464db694172dac8b0f9eeedd366c494d6db8a) --- src/cluster.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cluster.c b/src/cluster.c index 24b14d1dc..332d5a4ac 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -1255,7 +1255,7 @@ void markNodeAsFailingIfNeeded(clusterNode *node) { /* Broadcast the failing node name to everybody, forcing all the other * reachable nodes to flag the node as FAIL. */ - if (nodeIsMaster(myself)) clusterSendFail(node->name); + clusterSendFail(node->name); clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG); } From 43ed3c3589f43a872e00f7085529d611b345ceeb Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 16 Jun 2020 11:09:45 +0200 Subject: [PATCH 013/215] Tracking: fix enableBcastTrackingForPrefix() invalid sdslen() call. Related to #7387. (cherry picked from commit 784479939d9e560835a9eb7a410304b46047d5f5) --- src/tracking.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tracking.c b/src/tracking.c index eb4113131..8c2dca7ba 100644 --- a/src/tracking.c +++ b/src/tracking.c @@ -102,7 +102,7 @@ void disableTracking(client *c) { /* Set the client 'c' to track the prefix 'prefix'. If the client 'c' is * already registered for the specified prefix, no operation is performed. */ void enableBcastTrackingForPrefix(client *c, char *prefix, size_t plen) { - bcastState *bs = raxFind(PrefixTable,(unsigned char*)prefix,sdslen(prefix)); + bcastState *bs = raxFind(PrefixTable,(unsigned char*)prefix,plen); /* If this is the first client subscribing to such prefix, create * the prefix in the table. */ if (bs == raxNotFound) { From 0ebbc36059f95650d2e81efbec4861b0c8fc01b9 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 16 Jun 2020 11:45:03 +0200 Subject: [PATCH 014/215] Use cluster connections too, to limit maxclients. See #7401. (cherry picked from commit 4b8d8826afa0f240b26977e9d128144ebf8d5d7a) --- src/cluster.c | 7 +++++++ src/cluster.h | 1 + src/networking.c | 23 +++++++++++++++-------- 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index 332d5a4ac..ccbc373ca 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -691,6 +691,13 @@ void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) { } } +/* Return the approximated number of sockets we are using in order to + * take the cluster bus connections. */ +unsigned long getClusterConnectionsCount(void) { + return server.cluster_enabled ? + (dictSize(server.cluster->nodes)*2) : 0; +} + /* ----------------------------------------------------------------------------- * Key space handling * -------------------------------------------------------------------------- */ diff --git a/src/cluster.h b/src/cluster.h index d3af4a355..596a4629a 100644 --- a/src/cluster.h +++ b/src/cluster.h @@ -283,5 +283,6 @@ typedef struct { clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, int argc, int *hashslot, int *ask); int clusterRedirectBlockedClientIfNeeded(client *c); void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_code); +unsigned long getClusterConnectionsCount(void); #endif /* __CLUSTER_H */ diff --git a/src/networking.c b/src/networking.c index 77b9a6fcf..9d36ed3a2 100644 --- a/src/networking.c +++ b/src/networking.c @@ -892,17 +892,24 @@ static void acceptCommonHandler(connection *conn, int flags, char *ip) { client *c; UNUSED(ip); - /* Admission control will happen before a client is created and connAccept() + /* Limit the number of connections we take at the same time. + * + * Admission control will happen before a client is created and connAccept() * called, because we don't want to even start transport-level negotiation - * if rejected. - */ - if (listLength(server.clients) >= server.maxclients) { - char *err = "-ERR max number of clients reached\r\n"; + * if rejected. */ + if (listLength(server.clients) + getClusterConnectionsCount() + >= server.maxclients) + { + char *err; + if (server.cluster_enabled) + err = "-ERR max number of clients reached\r\n"; + else + err = "-ERR max number of clients + cluster " + "connections reached\r\n"; /* That's a best effort error message, don't check write errors. - * Note that for TLS connections, no handshake was done yet so nothing is written - * and the connection will just drop. - */ + * Note that for TLS connections, no handshake was done yet so nothing + * is written and the connection will just drop. */ if (connWrite(conn,err,strlen(err)) == -1) { /* Nothing to do, Just to avoid the warning... */ } From 88e1f80eeedfd3201ce4acba2bdf8dde27b0c4cd Mon Sep 17 00:00:00 2001 From: chenhui0212 Date: Tue, 16 Jun 2020 17:50:38 +0800 Subject: [PATCH 015/215] fix comments in listpack.c (cherry picked from commit 71fafd761ad5f939f485259eb3856ed3766c98be) --- src/listpack.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/listpack.c b/src/listpack.c index e1f4d9a02..9e77ab12d 100644 --- a/src/listpack.c +++ b/src/listpack.c @@ -773,13 +773,13 @@ unsigned char *lpSeek(unsigned char *lp, long index) { * is past the half of the listpack. */ if (index > numele/2) { forward = 0; - /* Left to right scanning always expects a negative index. Convert + /* Right to left scanning always expects a negative index. Convert * our index to negative form. */ index -= numele; } } else { /* If the listpack length is unspecified, for negative indexes we - * want to always scan left-to-right. */ + * want to always scan right-to-left. */ if (index < 0) forward = 0; } From 3328b7a514e0ae46b440c674d267a0b6a90251e7 Mon Sep 17 00:00:00 2001 From: Tomasz Poradowski Date: Wed, 17 Jun 2020 22:22:49 +0200 Subject: [PATCH 016/215] ensure SHUTDOWN_NOSAVE in Sentinel mode - enforcing of SHUTDOWN_NOSAVE flag in one place to make it consitent when running in Sentinel mode (cherry picked from commit 4ee011adb5f0d56085ecd3e5d643ab192ef77ce6) --- src/db.c | 8 -------- src/server.c | 9 +++++++++ 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/db.c b/src/db.c index dc4a0b63e..19b2c48e4 100644 --- a/src/db.c +++ b/src/db.c @@ -963,14 +963,6 @@ void shutdownCommand(client *c) { return; } } - /* When SHUTDOWN is called while the server is loading a dataset in - * memory we need to make sure no attempt is performed to save - * the dataset on shutdown (otherwise it could overwrite the current DB - * with half-read data). - * - * Also when in Sentinel mode clear the SAVE flag and force NOSAVE. */ - if (server.loading || server.sentinel_mode) - flags = (flags & ~SHUTDOWN_SAVE) | SHUTDOWN_NOSAVE; if (prepareForShutdown(flags) == C_OK) exit(0); addReplyError(c,"Errors trying to SHUTDOWN. Check logs."); } diff --git a/src/server.c b/src/server.c index 53dccf875..7c92c9244 100644 --- a/src/server.c +++ b/src/server.c @@ -3680,6 +3680,15 @@ void closeListeningSockets(int unlink_unix_socket) { } int prepareForShutdown(int flags) { + /* When SHUTDOWN is called while the server is loading a dataset in + * memory we need to make sure no attempt is performed to save + * the dataset on shutdown (otherwise it could overwrite the current DB + * with half-read data). + * + * Also when in Sentinel mode clear the SAVE flag and force NOSAVE. */ + if (server.loading || server.sentinel_mode) + flags = (flags & ~SHUTDOWN_SAVE) | SHUTDOWN_NOSAVE; + int save = flags & SHUTDOWN_SAVE; int nosave = flags & SHUTDOWN_NOSAVE; From 2b189f098e6fff3a66fb99b7f77f2fdce889b4ba Mon Sep 17 00:00:00 2001 From: chenhui0212 Date: Thu, 18 Jun 2020 17:28:26 +0800 Subject: [PATCH 017/215] Fix comments in function raxLowWalk of listpack.c (cherry picked from commit f800b52172622b84aa8bdf09aa6cd522aade93be) --- src/rax.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rax.c b/src/rax.c index 7dcf04582..c8a1fb6b4 100644 --- a/src/rax.c +++ b/src/rax.c @@ -487,8 +487,8 @@ static inline size_t raxLowWalk(rax *rax, unsigned char *s, size_t len, raxNode if (h->iscompr) j = 0; /* Compressed node only child is at index 0. */ memcpy(&h,children+j,sizeof(h)); parentlink = children+j; - j = 0; /* If the new node is compressed and we do not - iterate again (since i == l) set the split + j = 0; /* If the new node is non compressed and we do not + iterate again (since i == len) set the split position to 0 to signal this node represents the searched key. */ } From d3aa3791fef71f68fd633d71408674de067ba52e Mon Sep 17 00:00:00 2001 From: hwware Date: Sun, 21 Jun 2020 23:04:28 -0400 Subject: [PATCH 018/215] fix memory leak in sentinel connection sharing (cherry picked from commit 1bfa2d27a637119226ee3244d2d219c7e5a7ff33) --- src/sentinel.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/sentinel.c b/src/sentinel.c index fb504ae4d..5be4193dc 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -1076,6 +1076,7 @@ int sentinelTryConnectionSharing(sentinelRedisInstance *ri) { releaseInstanceLink(ri->link,NULL); ri->link = match->link; match->link->refcount++; + dictReleaseIterator(di); return C_OK; } dictReleaseIterator(di); From 8312aa27d47c0befcf69eb74d0a5dc19745ffd32 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 22 Jun 2020 11:21:21 +0200 Subject: [PATCH 019/215] Clarify maxclients and cluster in conf. Remove myself too. (cherry picked from commit 59fd178014c7cca1b0c668b30ab0d991dd3030f3) --- redis.conf | 5 +++++ src/cluster.c | 5 ++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/redis.conf b/redis.conf index 5cea06a47..a51ef007d 100644 --- a/redis.conf +++ b/redis.conf @@ -805,6 +805,11 @@ acllog-max-len 128 # Once the limit is reached Redis will close all the new connections sending # an error 'max number of clients reached'. # +# IMPORTANT: When Redis Cluster is used, the max number of connections is also +# shared with the cluster bus: every node in the cluster will use two +# connections, one incoming and another outgoing. It is important to size the +# limit accordingly in case of very large clusters. +# # maxclients 10000 ############################## MEMORY MANAGEMENT ################################ diff --git a/src/cluster.c b/src/cluster.c index ccbc373ca..e15e59fda 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -694,8 +694,11 @@ void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) { /* Return the approximated number of sockets we are using in order to * take the cluster bus connections. */ unsigned long getClusterConnectionsCount(void) { + /* We decrement the number of nodes by one, since there is the + * "myself" node too in the list. Each node uses two file descriptors, + * one incoming and one outgoing, thus the multiplication by 2. */ return server.cluster_enabled ? - (dictSize(server.cluster->nodes)*2) : 0; + ((dictSize(server.cluster->nodes)-1)*2) : 0; } /* ----------------------------------------------------------------------------- From 9412094f2574a9d635625f2ef21c02628d3f4cc0 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 22 Jun 2020 11:41:19 +0200 Subject: [PATCH 020/215] Fix BITFIELD i64 type handling, see #7417. (cherry picked from commit 746297314f3c198a90ceab8462a40abd9fc69f26) --- src/bitops.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/bitops.c b/src/bitops.c index f506a881b..b37bea2bf 100644 --- a/src/bitops.c +++ b/src/bitops.c @@ -257,7 +257,7 @@ int64_t getSignedBitfield(unsigned char *p, uint64_t offset, uint64_t bits) { /* If the top significant bit is 1, propagate it to all the * higher bits for two's complement representation of signed * integers. */ - if (value & ((uint64_t)1 << (bits-1))) + if (bits < 64 && (value & ((uint64_t)1 << (bits-1)))) value |= ((uint64_t)-1) << bits; return value; } @@ -356,7 +356,6 @@ int checkSignedBitfieldOverflow(int64_t value, int64_t incr, uint64_t bits, int handle_wrap: { - uint64_t mask = ((uint64_t)-1) << bits; uint64_t msb = (uint64_t)1 << (bits-1); uint64_t a = value, b = incr, c; c = a+b; /* Perform addition as unsigned so that's defined. */ @@ -364,10 +363,13 @@ handle_wrap: /* If the sign bit is set, propagate to all the higher order * bits, to cap the negative value. If it's clear, mask to * the positive integer limit. */ - if (c & msb) { - c |= mask; - } else { - c &= ~mask; + if (bits < 64) { + uint64_t mask = ((uint64_t)-1) << bits; + if (c & msb) { + c |= mask; + } else { + c &= ~mask; + } } *limit = c; } From c8f250f8ddd37e919a4c39128109df9507623f41 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 22 Jun 2020 11:44:11 +0200 Subject: [PATCH 021/215] Include cluster.h for getClusterConnectionsCount(). (cherry picked from commit 21f62c3346ab22ac37f81110e59dae9372c9e4d1) --- src/networking.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/networking.c b/src/networking.c index 9d36ed3a2..2795deafd 100644 --- a/src/networking.c +++ b/src/networking.c @@ -29,6 +29,7 @@ #include "server.h" #include "atomicvar.h" +#include "cluster.h" #include #include #include From 05e483cbb32875a90e9bf4ae8b5d359bf16bc250 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Thu, 11 Jun 2020 21:09:35 +0300 Subject: [PATCH 022/215] EXEC always fails with EXECABORT and multi-state is cleared In order to support the use of multi-exec in pipeline, it is important that MULTI and EXEC are never rejected and it is easy for the client to know if the connection is still in multi state. It was easy to make sure MULTI and DISCARD never fail (done by previous commits) since these only change the client state and don't do any actual change in the server, but EXEC is a different story. Since in the past, it was possible for clients to handle some EXEC errors and retry the EXEC, we now can't affort to return any error on EXEC other than EXECABORT, which now carries with it the real reason for the abort too. Other fixes in this commit: - Some checks that where performed at the time of queuing need to be re- validated when EXEC runs, for instance if the transaction contains writes commands, it needs to be aborted. there was one check that was already done in execCommand (-READONLY), but other checks where missing: -OOM, -MISCONF, -NOREPLICAS, -MASTERDOWN - When a command is rejected by processCommand it was rejected with addReply, which was not recognized as an error in case the bad command came from the master. this will enable to count or MONITOR these errors in the future. - make it easier for tests to create additional (non deferred) clients. - add tests for the fixes of this commit. (cherry picked from commit 65a3307bc95aadbc91d85cdf9dfbe1b3493222ca) --- src/multi.c | 43 +++++++-------- src/networking.c | 20 ++++--- src/server.c | 93 ++++++++++++++++++++------------- src/server.h | 5 ++ tests/test_helper.tcl | 15 ++++++ tests/unit/multi.tcl | 119 +++++++++++++++++++++++++++++++++--------- 6 files changed, 204 insertions(+), 91 deletions(-) diff --git a/src/multi.c b/src/multi.c index 60a07dfc7..35ddf92af 100644 --- a/src/multi.c +++ b/src/multi.c @@ -36,6 +36,7 @@ void initClientMultiState(client *c) { c->mstate.commands = NULL; c->mstate.count = 0; c->mstate.cmd_flags = 0; + c->mstate.cmd_inv_flags = 0; } /* Release all the resources associated with MULTI/EXEC state */ @@ -76,6 +77,7 @@ void queueMultiCommand(client *c) { incrRefCount(mc->argv[j]); c->mstate.count++; c->mstate.cmd_flags |= c->cmd->flags; + c->mstate.cmd_inv_flags |= ~c->cmd->flags; } void discardTransaction(client *c) { @@ -122,6 +124,23 @@ void execCommandPropagateExec(client *c) { PROPAGATE_AOF|PROPAGATE_REPL); } +/* Aborts a transaction, with a specific error message. + * The transaction is always aboarted with -EXECABORT so that the client knows + * the server exited the multi state, but the actual reason for the abort is + * included too. */ +void execCommandAbort(client *c, sds error) { + discardTransaction(c); + + if (error[0] == '-') error++; + addReplyErrorFormat(c, "-EXECABORT Transaction discarded because of: %s", error); + + /* Send EXEC to clients waiting data from MONITOR. We did send a MULTI + * already, and didn't send any of the queued commands, now we'll just send + * EXEC so it is clear that the transaction is over. */ + if (listLength(server.monitors) && !server.loading) + replicationFeedMonitors(c,server.monitors,c->db->id,c->argv,c->argc); +} + void execCommand(client *c) { int j; robj **orig_argv; @@ -135,15 +154,6 @@ void execCommand(client *c) { return; } - /* If we are in -BUSY state, flag the transaction and return the - * -BUSY error, like Redis <= 5. This is a temporary fix, may be changed - * ASAP, see issue #7353 on Github. */ - if (server.lua_timedout) { - flagTransaction(c); - addReply(c, shared.slowscripterr); - return; - } - /* Check if we need to abort the EXEC because: * 1) Some WATCHed key was touched. * 2) There was a previous error while queueing commands. @@ -157,21 +167,6 @@ void execCommand(client *c) { goto handle_monitor; } - /* If there are write commands inside the transaction, and this is a read - * only slave, we want to send an error. This happens when the transaction - * was initiated when the instance was a master or a writable replica and - * then the configuration changed (for example instance was turned into - * a replica). */ - if (!server.loading && server.masterhost && server.repl_slave_ro && - !(c->flags & CLIENT_MASTER) && c->mstate.cmd_flags & CMD_WRITE) - { - addReplyError(c, - "Transaction contains write commands but instance " - "is now a read-only replica. EXEC aborted."); - discardTransaction(c); - goto handle_monitor; - } - /* Exec all the queued commands */ unwatchAllKeys(c); /* Unwatch ASAP otherwise we'll waste CPU cycles */ orig_argv = c->argv; diff --git a/src/networking.c b/src/networking.c index 2795deafd..d35347991 100644 --- a/src/networking.c +++ b/src/networking.c @@ -407,19 +407,23 @@ void addReplyError(client *c, const char *err) { addReplyErrorLength(c,err,strlen(err)); } +/* See addReplyErrorLength. + * Makes sure there are no newlines in the string, otherwise invalid protocol + * is emitted. */ +void addReplyErrorSafe(client *c, char *s, size_t len) { + size_t j; + for (j = 0; j < len; j++) { + if (s[j] == '\r' || s[j] == '\n') s[j] = ' '; + } + addReplyErrorLength(c,s,sdslen(s)); +} + void addReplyErrorFormat(client *c, const char *fmt, ...) { - size_t l, j; va_list ap; va_start(ap,fmt); sds s = sdscatvprintf(sdsempty(),fmt,ap); va_end(ap); - /* Make sure there are no newlines in the string, otherwise invalid protocol - * is emitted. */ - l = sdslen(s); - for (j = 0; j < l; j++) { - if (s[j] == '\r' || s[j] == '\n') s[j] = ' '; - } - addReplyErrorLength(c,s,sdslen(s)); + addReplyErrorSafe(c, s, sdslen(s)); sdsfree(s); } diff --git a/src/server.c b/src/server.c index 7c92c9244..1f794e4ed 100644 --- a/src/server.c +++ b/src/server.c @@ -3402,6 +3402,34 @@ void call(client *c, int flags) { server.stat_numcommands++; } +/* Used when a command that is ready for execution needs to be rejected, due to + * varios pre-execution checks. it returns the appropriate error to the client. + * If there's a transaction is flags it as dirty, and if the command is EXEC, + * it aborts the transaction. */ +void rejectCommand(client *c, robj *reply) { + flagTransaction(c); + if (c->cmd && c->cmd->proc == execCommand) { + execCommandAbort(c, reply->ptr); + } else { + /* using addReplyError* rather than addReply so that the error can be logged. */ + addReplyErrorSafe(c, reply->ptr, sdslen(reply->ptr)); + } +} + +void rejectCommandFormat(client *c, const char *fmt, ...) { + flagTransaction(c); + va_list ap; + va_start(ap,fmt); + sds s = sdscatvprintf(sdsempty(),fmt,ap); + va_end(ap); + if (c->cmd && c->cmd->proc == execCommand) { + execCommandAbort(c, s); + } else { + addReplyErrorSafe(c, s, sdslen(s)); + } + sdsfree(s); +} + /* If this function gets called we already read a whole * command, arguments are in the client argv/argc fields. * processCommand() execute the command or prepare the @@ -3427,23 +3455,30 @@ int processCommand(client *c) { * such as wrong arity, bad command name and so forth. */ c->cmd = c->lastcmd = lookupCommand(c->argv[0]->ptr); if (!c->cmd) { - flagTransaction(c); sds args = sdsempty(); int i; for (i=1; i < c->argc && sdslen(args) < 128; i++) args = sdscatprintf(args, "`%.*s`, ", 128-(int)sdslen(args), (char*)c->argv[i]->ptr); - addReplyErrorFormat(c,"unknown command `%s`, with args beginning with: %s", + rejectCommandFormat(c,"unknown command `%s`, with args beginning with: %s", (char*)c->argv[0]->ptr, args); sdsfree(args); return C_OK; } else if ((c->cmd->arity > 0 && c->cmd->arity != c->argc) || (c->argc < -c->cmd->arity)) { - flagTransaction(c); - addReplyErrorFormat(c,"wrong number of arguments for '%s' command", + rejectCommandFormat(c,"wrong number of arguments for '%s' command", c->cmd->name); return C_OK; } + int is_write_command = (c->cmd->flags & CMD_WRITE) || + (c->cmd->proc == execCommand && (c->mstate.cmd_flags & CMD_WRITE)); + int is_denyoom_command = (c->cmd->flags & CMD_DENYOOM) || + (c->cmd->proc == execCommand && (c->mstate.cmd_flags & CMD_DENYOOM)); + int is_denystale_command = !(c->cmd->flags & CMD_STALE) || + (c->cmd->proc == execCommand && (c->mstate.cmd_inv_flags & CMD_STALE)); + int is_denyloading_command = !(c->cmd->flags & CMD_LOADING) || + (c->cmd->proc == execCommand && (c->mstate.cmd_inv_flags & CMD_LOADING)); + /* Check if the user is authenticated. This check is skipped in case * the default user is flagged as "nopass" and is active. */ int auth_required = (!(DefaultUser->flags & USER_FLAG_NOPASS) || @@ -3453,8 +3488,7 @@ int processCommand(client *c) { /* AUTH and HELLO and no auth modules are valid even in * non-authenticated state. */ if (!(c->cmd->flags & CMD_NO_AUTH)) { - flagTransaction(c); - addReply(c,shared.noautherr); + rejectCommand(c,shared.noautherr); return C_OK; } } @@ -3465,13 +3499,12 @@ int processCommand(client *c) { int acl_retval = ACLCheckCommandPerm(c,&acl_keypos); if (acl_retval != ACL_OK) { addACLLogEntry(c,acl_retval,acl_keypos,NULL); - flagTransaction(c); if (acl_retval == ACL_DENIED_CMD) - addReplyErrorFormat(c, + rejectCommandFormat(c, "-NOPERM this user has no permissions to run " "the '%s' command or its subcommand", c->cmd->name); else - addReplyErrorFormat(c, + rejectCommandFormat(c, "-NOPERM this user has no permissions to access " "one of the keys used as arguments"); return C_OK; @@ -3519,13 +3552,11 @@ int processCommand(client *c) { * is trying to execute is denied during OOM conditions or the client * is in MULTI/EXEC context? Error. */ if (out_of_memory && - (c->cmd->flags & CMD_DENYOOM || + (is_denyoom_command || (c->flags & CLIENT_MULTI && - c->cmd->proc != execCommand && c->cmd->proc != discardCommand))) { - flagTransaction(c); - addReply(c, shared.oomerr); + rejectCommand(c, shared.oomerr); return C_OK; } @@ -3546,17 +3577,14 @@ int processCommand(client *c) { int deny_write_type = writeCommandsDeniedByDiskError(); if (deny_write_type != DISK_ERROR_TYPE_NONE && server.masterhost == NULL && - (c->cmd->flags & CMD_WRITE || - c->cmd->proc == pingCommand)) + (is_write_command ||c->cmd->proc == pingCommand)) { - flagTransaction(c); if (deny_write_type == DISK_ERROR_TYPE_RDB) - addReply(c, shared.bgsaveerr); + rejectCommand(c, shared.bgsaveerr); else - addReplySds(c, - sdscatprintf(sdsempty(), + rejectCommandFormat(c, "-MISCONF Errors writing to the AOF file: %s\r\n", - strerror(server.aof_last_write_errno))); + strerror(server.aof_last_write_errno)); return C_OK; } @@ -3565,11 +3593,10 @@ int processCommand(client *c) { if (server.masterhost == NULL && server.repl_min_slaves_to_write && server.repl_min_slaves_max_lag && - c->cmd->flags & CMD_WRITE && + is_write_command && server.repl_good_slaves_count < server.repl_min_slaves_to_write) { - flagTransaction(c); - addReply(c, shared.noreplicaserr); + rejectCommand(c, shared.noreplicaserr); return C_OK; } @@ -3577,10 +3604,9 @@ int processCommand(client *c) { * accept write commands if this is our master. */ if (server.masterhost && server.repl_slave_ro && !(c->flags & CLIENT_MASTER) && - c->cmd->flags & CMD_WRITE) + is_write_command) { - flagTransaction(c); - addReply(c, shared.roslaveerr); + rejectCommand(c, shared.roslaveerr); return C_OK; } @@ -3592,7 +3618,7 @@ int processCommand(client *c) { c->cmd->proc != unsubscribeCommand && c->cmd->proc != psubscribeCommand && c->cmd->proc != punsubscribeCommand) { - addReplyErrorFormat(c, + rejectCommandFormat(c, "Can't execute '%s': only (P)SUBSCRIBE / " "(P)UNSUBSCRIBE / PING / QUIT are allowed in this context", c->cmd->name); @@ -3604,17 +3630,16 @@ int processCommand(client *c) { * link with master. */ if (server.masterhost && server.repl_state != REPL_STATE_CONNECTED && server.repl_serve_stale_data == 0 && - !(c->cmd->flags & CMD_STALE)) + is_denystale_command) { - flagTransaction(c); - addReply(c, shared.masterdownerr); + rejectCommand(c, shared.masterdownerr); return C_OK; } /* Loading DB? Return an error if the command has not the * CMD_LOADING flag. */ - if (server.loading && !(c->cmd->flags & CMD_LOADING)) { - addReply(c, shared.loadingerr); + if (server.loading && is_denyloading_command) { + rejectCommand(c, shared.loadingerr); return C_OK; } @@ -3629,7 +3654,6 @@ int processCommand(client *c) { c->cmd->proc != helloCommand && c->cmd->proc != replconfCommand && c->cmd->proc != multiCommand && - c->cmd->proc != execCommand && c->cmd->proc != discardCommand && c->cmd->proc != watchCommand && c->cmd->proc != unwatchCommand && @@ -3640,8 +3664,7 @@ int processCommand(client *c) { c->argc == 2 && tolower(((char*)c->argv[1]->ptr)[0]) == 'k')) { - flagTransaction(c); - addReply(c, shared.slowscripterr); + rejectCommand(c, shared.slowscripterr); return C_OK; } diff --git a/src/server.h b/src/server.h index 841e1f941..6c36385e1 100644 --- a/src/server.h +++ b/src/server.h @@ -666,6 +666,9 @@ typedef struct multiState { int cmd_flags; /* The accumulated command flags OR-ed together. So if at least a command has a given flag, it will be set in this field. */ + int cmd_inv_flags; /* Same as cmd_flags, OR-ing the ~flags. so that it + is possible to know if all the commands have a + certain flag. */ int minreplicas; /* MINREPLICAS for synchronous replication */ time_t minreplicas_timeout; /* MINREPLICAS timeout as unixtime. */ } multiState; @@ -1626,6 +1629,7 @@ void addReplyBulkLongLong(client *c, long long ll); void addReply(client *c, robj *obj); void addReplySds(client *c, sds s); void addReplyBulkSds(client *c, sds s); +void addReplyErrorSafe(client *c, char *s, size_t len); void addReplyError(client *c, const char *err); void addReplyStatus(client *c, const char *status); void addReplyDouble(client *c, double d); @@ -1724,6 +1728,7 @@ void touchWatchedKey(redisDb *db, robj *key); void touchWatchedKeysOnFlush(int dbid); void discardTransaction(client *c); void flagTransaction(client *c); +void execCommandAbort(client *c, sds error); void execCommandPropagateMulti(client *c); void execCommandPropagateExec(client *c); diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index fba54acb5..ef9bf7fdf 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -196,6 +196,21 @@ proc redis_deferring_client {args} { return $client } +proc redis_client {args} { + set level 0 + if {[llength $args] > 0 && [string is integer [lindex $args 0]]} { + set level [lindex $args 0] + set args [lrange $args 1 end] + } + + # create client that defers reading reply + set client [redis [srv $level "host"] [srv $level "port"] 0 $::tls] + + # select the right db and read the response (OK) + $client select 9 + return $client +} + # Provide easy access to INFO properties. Same semantic as "proc r". proc s {args} { set level 0 diff --git a/tests/unit/multi.tcl b/tests/unit/multi.tcl index 0c70fbde7..44a822ba6 100644 --- a/tests/unit/multi.tcl +++ b/tests/unit/multi.tcl @@ -325,74 +325,145 @@ start_server {tags {"multi"}} { # check that if MULTI arrives during timeout, it is either refused, or # allowed to pass, and we don't end up executing half of the transaction set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set r2 [redis_client] r config set lua-time-limit 10 r set xx 1 $rd1 eval {while true do end} 0 after 200 - catch { $rd2 multi; $rd2 read } e - catch { $rd2 incr xx; $rd2 read } e + catch { $r2 multi; } e + catch { $r2 incr xx; } e r script kill after 200 ; # Give some time to Lua to call the hook again... - catch { $rd2 incr xx; $rd2 read } e - catch { $rd2 exec; $rd2 read } e + catch { $r2 incr xx; } e + catch { $r2 exec; } e + assert_match {EXECABORT*previous errors*} $e set xx [r get xx] # make sure that either the whole transcation passed or none of it (we actually expect none) assert { $xx == 1 || $xx == 3} # check that the connection is no longer in multi state - $rd2 ping asdf - set pong [$rd2 read] + set pong [$r2 ping asdf] assert_equal $pong "asdf" + $rd1 close; $r2 close } test {EXEC and script timeout} { # check that if EXEC arrives during timeout, we don't end up executing # half of the transaction, and also that we exit the multi state set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set r2 [redis_client] r config set lua-time-limit 10 r set xx 1 - catch { $rd2 multi; $rd2 read } e - catch { $rd2 incr xx; $rd2 read } e + catch { $r2 multi; } e + catch { $r2 incr xx; } e $rd1 eval {while true do end} 0 after 200 - catch { $rd2 incr xx; $rd2 read } e - catch { $rd2 exec; $rd2 read } e + catch { $r2 incr xx; } e + catch { $r2 exec; } e + assert_match {EXECABORT*BUSY*} $e r script kill after 200 ; # Give some time to Lua to call the hook again... set xx [r get xx] # make sure that either the whole transcation passed or none of it (we actually expect none) assert { $xx == 1 || $xx == 3} - # Discard the transaction since EXEC likely got -BUSY error - # so the client is still in MULTI state. - catch { $rd2 discard ;$rd2 read } e # check that the connection is no longer in multi state - $rd2 ping asdf - set pong [$rd2 read] + set pong [$r2 ping asdf] assert_equal $pong "asdf" + $rd1 close; $r2 close } test {MULTI-EXEC body and script timeout} { # check that we don't run an imcomplete transaction due to some commands # arriving during busy script set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set r2 [redis_client] r config set lua-time-limit 10 r set xx 1 - catch { $rd2 multi; $rd2 read } e - catch { $rd2 incr xx; $rd2 read } e + catch { $r2 multi; } e + catch { $r2 incr xx; } e $rd1 eval {while true do end} 0 after 200 - catch { $rd2 incr xx; $rd2 read } e + catch { $r2 incr xx; } e r script kill after 200 ; # Give some time to Lua to call the hook again... - catch { $rd2 exec; $rd2 read } e + catch { $r2 exec; } e + assert_match {EXECABORT*previous errors*} $e set xx [r get xx] # make sure that either the whole transcation passed or none of it (we actually expect none) assert { $xx == 1 || $xx == 3} # check that the connection is no longer in multi state - $rd2 ping asdf - set pong [$rd2 read] + set pong [$r2 ping asdf] assert_equal $pong "asdf" + $rd1 close; $r2 close + } + + test {just EXEC and script timeout} { + # check that if EXEC arrives during timeout, we don't end up executing + # actual commands during busy script, and also that we exit the multi state + set rd1 [redis_deferring_client] + set r2 [redis_client] + r config set lua-time-limit 10 + r set xx 1 + catch { $r2 multi; } e + catch { $r2 incr xx; } e + $rd1 eval {while true do end} 0 + after 200 + catch { $r2 exec; } e + assert_match {EXECABORT*BUSY*} $e + r script kill + after 200 ; # Give some time to Lua to call the hook again... + set xx [r get xx] + # make we didn't execute the transaction + assert { $xx == 1} + # check that the connection is no longer in multi state + set pong [$r2 ping asdf] + assert_equal $pong "asdf" + $rd1 close; $r2 close + } + + test {exec with write commands and state change} { + # check that exec that contains write commands fails if server state changed since they were queued + set r1 [redis_client] + r set xx 1 + r multi + r incr xx + $r1 config set min-replicas-to-write 2 + catch {r exec} e + assert_match {*EXECABORT*NOREPLICAS*} $e + set xx [r get xx] + # make sure that the INCR wasn't executed + assert { $xx == 1} + $r1 config set min-replicas-to-write 0 + $r1 close; + } + + test {exec with read commands and stale replica state change} { + # check that exec that contains read commands fails if server state changed since they were queued + r config set replica-serve-stale-data no + set r1 [redis_client] + r set xx 1 + + # check that GET is disallowed on stale replica, even if the replica becomes stale only after queuing. + r multi + r get xx + $r1 replicaof localhsot 0 + catch {r exec} e + assert_match {*EXECABORT*MASTERDOWN*} $e + + # check that PING is allowed + r multi + r ping + $r1 replicaof localhsot 0 + set pong [r exec] + assert {$pong == "PONG"} + + # check that when replica is not stale, GET is allowed + # while we're at it, let's check that multi is allowed on stale replica too + r multi + $r1 replicaof no one + r get xx + set xx [r exec] + # make sure that the INCR was executed + assert { $xx == 1 } + $r1 close; } } From 3f5cf24da9bfc92baefa5a79f47390f09ea491be Mon Sep 17 00:00:00 2001 From: Dave Nielsen Date: Tue, 23 Jun 2020 09:51:12 -0700 Subject: [PATCH 023/215] updated copyright year Changed "2015" to "2020" (cherry picked from commit 2d6d9f75524f280b9bc02613ebe9727b92c1429e) --- COPYING | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/COPYING b/COPYING index ac68e012b..a381681a1 100644 --- a/COPYING +++ b/COPYING @@ -1,4 +1,4 @@ -Copyright (c) 2006-2015, Salvatore Sanfilippo +Copyright (c) 2006-2020, Salvatore Sanfilippo All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: From 17aaf5ec979e0db5c09a0dd4cce0daf48f93ec2d Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 24 Jun 2020 09:07:17 +0200 Subject: [PATCH 024/215] LPOS: option FIRST renamed RANK. (cherry picked from commit a5a3a7bbc61203398ecc1d5b52c76214f5672776) --- src/t_list.c | 10 +++++----- tests/unit/type/list.tcl | 28 ++++++++++++++-------------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/t_list.c b/src/t_list.c index e580139ab..2c339888d 100644 --- a/src/t_list.c +++ b/src/t_list.c @@ -487,16 +487,16 @@ void ltrimCommand(client *c) { addReply(c,shared.ok); } -/* LPOS key element [FIRST rank] [COUNT num-matches] [MAXLEN len] +/* LPOS key element [RANK rank] [COUNT num-matches] [MAXLEN len] * - * FIRST "rank" is the position of the match, so if it is 1, the first match + * The "rank" is the position of the match, so if it is 1, the first match * is returned, if it is 2 the second match is returned and so forth. * It is 1 by default. If negative has the same meaning but the search is * performed starting from the end of the list. * * If COUNT is given, instead of returning the single element, a list of * all the matching elements up to "num-matches" are returned. COUNT can - * be combiled with FIRST in order to returning only the element starting + * be combiled with RANK in order to returning only the element starting * from the Nth. If COUNT is zero, all the matching elements are returned. * * MAXLEN tells the command to scan a max of len elements. If zero (the @@ -515,12 +515,12 @@ void lposCommand(client *c) { char *opt = c->argv[j]->ptr; int moreargs = (c->argc-1)-j; - if (!strcasecmp(opt,"FIRST") && moreargs) { + if (!strcasecmp(opt,"RANK") && moreargs) { j++; if (getLongFromObjectOrReply(c, c->argv[j], &rank, NULL) != C_OK) return; if (rank == 0) { - addReplyError(c,"FIRST can't be zero: use 1 to start from " + addReplyError(c,"RANK can't be zero: use 1 to start from " "the first match, 2 from the second, ..."); return; } diff --git a/tests/unit/type/list.tcl b/tests/unit/type/list.tcl index a0c04dcaa..0e39d7d95 100644 --- a/tests/unit/type/list.tcl +++ b/tests/unit/type/list.tcl @@ -13,12 +13,12 @@ start_server { assert {[r LPOS mylist c] == 2} } - test {LPOS FIRST (positive and negative rank) option} { - assert {[r LPOS mylist c FIRST 1] == 2} - assert {[r LPOS mylist c FIRST 2] == 6} - assert {[r LPOS mylist c FIRST 4] eq ""} - assert {[r LPOS mylist c FIRST -1] == 7} - assert {[r LPOS mylist c FIRST -2] == 6} + test {LPOS RANK (positive and negative rank) option} { + assert {[r LPOS mylist c RANK 1] == 2} + assert {[r LPOS mylist c RANK 2] == 6} + assert {[r LPOS mylist c RANK 4] eq ""} + assert {[r LPOS mylist c RANK -1] == 7} + assert {[r LPOS mylist c RANK -2] == 6} } test {LPOS COUNT option} { @@ -28,26 +28,26 @@ start_server { assert {[r LPOS mylist c COUNT 100] == {2 6 7}} } - test {LPOS COUNT + FIRST option} { - assert {[r LPOS mylist c COUNT 0 FIRST 2] == {6 7}} - assert {[r LPOS mylist c COUNT 2 FIRST -1] == {7 6}} + test {LPOS COUNT + RANK option} { + assert {[r LPOS mylist c COUNT 0 RANK 2] == {6 7}} + assert {[r LPOS mylist c COUNT 2 RANK -1] == {7 6}} } test {LPOS non existing key} { - assert {[r LPOS mylistxxx c COUNT 0 FIRST 2] eq {}} + assert {[r LPOS mylistxxx c COUNT 0 RANK 2] eq {}} } test {LPOS no match} { - assert {[r LPOS mylist x COUNT 2 FIRST -1] eq {}} - assert {[r LPOS mylist x FIRST -1] eq {}} + assert {[r LPOS mylist x COUNT 2 RANK -1] eq {}} + assert {[r LPOS mylist x RANK -1] eq {}} } test {LPOS MAXLEN} { assert {[r LPOS mylist a COUNT 0 MAXLEN 1] == {0}} assert {[r LPOS mylist c COUNT 0 MAXLEN 1] == {}} assert {[r LPOS mylist c COUNT 0 MAXLEN 3] == {2}} - assert {[r LPOS mylist c COUNT 0 MAXLEN 3 FIRST -1] == {7 6}} - assert {[r LPOS mylist c COUNT 0 MAXLEN 7 FIRST 2] == {6}} + assert {[r LPOS mylist c COUNT 0 MAXLEN 3 RANK -1] == {7 6}} + assert {[r LPOS mylist c COUNT 0 MAXLEN 7 RANK 2] == {6}} } test {LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - ziplist} { From 14a59d4ce7e266f9220e26dd2824a6f1d5c10186 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 25 Jun 2020 12:58:21 +0200 Subject: [PATCH 025/215] Update comment to clarify change in #7398. (cherry picked from commit ad0a9df77a2ccf3fdf309dcdd1b54cf350fcbe3c) --- src/cluster.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/cluster.c b/src/cluster.c index e15e59fda..e7a32a9a2 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -1264,7 +1264,10 @@ void markNodeAsFailingIfNeeded(clusterNode *node) { node->fail_time = mstime(); /* Broadcast the failing node name to everybody, forcing all the other - * reachable nodes to flag the node as FAIL. */ + * reachable nodes to flag the node as FAIL. + * We do that even if this node is a replica and not a master: anyway + * the failing state is triggered collecting failure reports from masters, + * so here the replica is only helping propagating this status. */ clusterSendFail(node->name); clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG); } From 3155adb299bfb8bf6b66658073f1641f623de9ef Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" <276441700@qq.com> Date: Fri, 10 Jul 2020 13:20:27 +0800 Subject: [PATCH 026/215] BITOP: propagate only when it really SET or DEL targetkey (#5783) For example: BITOP not targetkey sourcekey If targetkey and sourcekey doesn't exist, BITOP has no effect, we do not propagate it, thus can save aof and replica flow. (cherry picked from commit 1978f996d8b13db112d5d2fdf4a4ce2baf636729) --- src/bitops.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/bitops.c b/src/bitops.c index b37bea2bf..4b1a09aa4 100644 --- a/src/bitops.c +++ b/src/bitops.c @@ -759,11 +759,12 @@ void bitopCommand(client *c) { setKey(c,c->db,targetkey,o); notifyKeyspaceEvent(NOTIFY_STRING,"set",targetkey,c->db->id); decrRefCount(o); + server.dirty++; } else if (dbDelete(c->db,targetkey)) { signalModifiedKey(c,c->db,targetkey); notifyKeyspaceEvent(NOTIFY_GENERIC,"del",targetkey,c->db->id); + server.dirty++; } - server.dirty++; addReplyLongLong(c,maxlen); /* Return the output string length in bytes. */ } From 62f85834451def17070d88fc4f09cf65a889eb46 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Fri, 10 Jul 2020 08:25:26 +0300 Subject: [PATCH 027/215] change references to the github repo location (#7479) (cherry picked from commit 9bbf768d3ceaa882c7dcc0033fc3cb4be0973248) --- BUGS | 2 +- README.md | 6 +++--- src/debug.c | 2 +- utils/generate-command-help.rb | 2 +- utils/releasetools/changelog.tcl | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/BUGS b/BUGS index a8e936892..7af259340 100644 --- a/BUGS +++ b/BUGS @@ -1 +1 @@ -Please check https://github.com/antirez/redis/issues +Please check https://github.com/redis/redis/issues diff --git a/README.md b/README.md index c08013416..55537e01f 100644 --- a/README.md +++ b/README.md @@ -205,8 +205,8 @@ source distribution. Please see the [CONTRIBUTING][2] file in this source distribution for more information. -[1]: https://github.com/antirez/redis/blob/unstable/COPYING -[2]: https://github.com/antirez/redis/blob/unstable/CONTRIBUTING +[1]: https://github.com/redis/redis/blob/unstable/COPYING +[2]: https://github.com/redis/redis/blob/unstable/CONTRIBUTING Redis internals === @@ -236,7 +236,7 @@ Inside the root are the following important directories: * `src`: contains the Redis implementation, written in C. * `tests`: contains the unit tests, implemented in Tcl. -* `deps`: contains libraries Redis uses. Everything needed to compile Redis is inside this directory; your system just needs to provide `libc`, a POSIX compatible interface and a C compiler. Notably `deps` contains a copy of `jemalloc`, which is the default allocator of Redis under Linux. Note that under `deps` there are also things which started with the Redis project, but for which the main repository is not `antirez/redis`. +* `deps`: contains libraries Redis uses. Everything needed to compile Redis is inside this directory; your system just needs to provide `libc`, a POSIX compatible interface and a C compiler. Notably `deps` contains a copy of `jemalloc`, which is the default allocator of Redis under Linux. Note that under `deps` there are also things which started with the Redis project, but for which the main repository is not `redis/redis`. There are a few more directories but they are not very important for our goals here. We'll focus mostly on `src`, where the Redis implementation is contained, diff --git a/src/debug.c b/src/debug.c index d79226bf2..ca113bcaa 100644 --- a/src/debug.c +++ b/src/debug.c @@ -1569,7 +1569,7 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) { serverLogRaw(LL_WARNING|LL_RAW, "\n=== REDIS BUG REPORT END. Make sure to include from START to END. ===\n\n" " Please report the crash by opening an issue on github:\n\n" -" http://github.com/antirez/redis/issues\n\n" +" http://github.com/redis/redis/issues\n\n" " Suspect RAM error? Use redis-server --test-memory to verify it.\n\n" ); diff --git a/utils/generate-command-help.rb b/utils/generate-command-help.rb index 29acef69d..e57acf4b9 100755 --- a/utils/generate-command-help.rb +++ b/utils/generate-command-help.rb @@ -53,7 +53,7 @@ def commands require "json" require "uri" - url = URI.parse "https://raw.githubusercontent.com/antirez/redis-doc/master/commands.json" + url = URI.parse "https://raw.githubusercontent.com/redis/redis-doc/master/commands.json" client = Net::HTTP.new url.host, url.port client.use_ssl = true response = client.get url.path diff --git a/utils/releasetools/changelog.tcl b/utils/releasetools/changelog.tcl index 06e38ba99..2288794bb 100755 --- a/utils/releasetools/changelog.tcl +++ b/utils/releasetools/changelog.tcl @@ -30,6 +30,6 @@ append template [exec git log $branch~$count..$branch "--format=format:%an in co #Older, more verbose version. # -#append template [exec git log $branch~30..$branch "--format=format:+-------------------------------------------------------------------------------%n| %s%n| By %an, %ai%n+--------------------------------------------------------------------------------%nhttps://github.com/antirez/redis/commit/%H%n%n%b" --stat] +#append template [exec git log $branch~30..$branch "--format=format:+-------------------------------------------------------------------------------%n| %s%n| By %an, %ai%n+--------------------------------------------------------------------------------%nhttps://github.com/redis/redis/commit/%H%n%n%b" --stat] puts $template From 1104113c07f64ea392135d632a9bc2235921dcdd Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Fri, 10 Jul 2020 08:26:52 +0300 Subject: [PATCH 028/215] tests/valgrind: don't use debug restart (#7404) * tests/valgrind: don't use debug restart DEBUG REATART causes two issues: 1. it uses execve which replaces the original process and valgrind doesn't have a chance to check for errors, so leaks go unreported. 2. valgrind report invalid calls to close() which we're unable to resolve. So now the tests use restart_server mechanism in the tests, that terminates the old server and starts a new one, new PID, but same stdout, stderr. since the stderr can contain two or more valgrind report, it is not enough to just check for the absence of leaks, we also need to check for some known errors, we do both, and fail if we either find an error, or can't find a report saying there are no leaks. other changes: - when killing a server that was already terminated we check for leaks too. - adding DEBUG LEAK which was used to test it. - adding --trace-children to valgrind, although no longer needed. - since the stdout contains two or more runs, we need slightly different way of checking if the new process is up (explicitly looking for the new PID) - move the code that handles --wait-server to happen earlier (before watching the startup message in the log), and serve the restarted server too. * squashme - CR fixes (cherry picked from commit 69ade87325eedebdb44760af9a8c28e15381888e) --- src/debug.c | 4 + tests/integration/psync2.tcl | 6 +- tests/integration/rdb.tcl | 14 +--- tests/support/server.tcl | 147 +++++++++++++++++++++++++---------- 4 files changed, 114 insertions(+), 57 deletions(-) diff --git a/src/debug.c b/src/debug.c index ca113bcaa..a74c22647 100644 --- a/src/debug.c +++ b/src/debug.c @@ -378,6 +378,7 @@ void debugCommand(client *c) { "DEBUG PROTOCOL [string|integer|double|bignum|null|array|set|map|attrib|push|verbatim|true|false]", "ERROR -- Return a Redis protocol error with as message. Useful for clients unit tests to simulate Redis errors.", "LOG -- write message to the server log.", +"LEAK -- Create a memory leak of the input string.", "HTSTATS -- Return hash table statistics of the specified Redis database.", "HTSTATS-KEY -- Like htstats but for the hash table stored as key's value.", "LOADAOF -- Flush the AOF buffers on disk and reload the AOF in memory.", @@ -430,6 +431,9 @@ NULL } else if (!strcasecmp(c->argv[1]->ptr,"log") && c->argc == 3) { serverLog(LL_WARNING, "DEBUG LOG: %s", (char*)c->argv[2]->ptr); addReply(c,shared.ok); + } else if (!strcasecmp(c->argv[1]->ptr,"leak") && c->argc == 3) { + sdsdup(c->argv[2]->ptr); + addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"reload")) { int flush = 1, save = 1; int flags = RDBFLAGS_NONE; diff --git a/tests/integration/psync2.tcl b/tests/integration/psync2.tcl index 3f636463a..1b996ffd4 100644 --- a/tests/integration/psync2.tcl +++ b/tests/integration/psync2.tcl @@ -280,7 +280,8 @@ start_server {} { set sync_partial_err [status $R($master_id) sync_partial_err] catch { $R($slave_id) config rewrite - $R($slave_id) debug restart + restart_server [expr {0-$slave_id}] true + set R($slave_id) [srv [expr {0-$slave_id}] client] } # note: just waiting for connected_slaves==4 has a race condition since # we might do the check before the master realized that the slave disconnected @@ -328,7 +329,8 @@ start_server {} { catch { $R($slave_id) config rewrite - $R($slave_id) debug restart + restart_server [expr {0-$slave_id}] true + set R($slave_id) [srv [expr {0-$slave_id}] client] } # Reconfigure the slave correctly again, when it's back online. diff --git a/tests/integration/rdb.tcl b/tests/integration/rdb.tcl index 123e9c8b6..b176bf199 100644 --- a/tests/integration/rdb.tcl +++ b/tests/integration/rdb.tcl @@ -137,18 +137,8 @@ test {client freed during loading} { # 100mb of rdb, 100k keys will load in more than 1 second r debug populate 100000 key 1000 - catch { - r debug restart - } + restart_server 0 false - set stdout [srv 0 stdout] - while 1 { - # check that the new server actually started and is ready for connections - if {[exec grep -i "Server initialized" | wc -l < $stdout] > 1} { - break - } - after 10 - } # make sure it's still loading assert_equal [s loading] 1 @@ -180,4 +170,4 @@ test {client freed during loading} { # no need to keep waiting for loading to complete exec kill [srv 0 pid] } -} \ No newline at end of file +} diff --git a/tests/support/server.tcl b/tests/support/server.tcl index 146ebc72c..ea7d0b13c 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -17,7 +17,14 @@ proc check_valgrind_errors stderr { set buf [read $fd] close $fd + # look for stack trace and other errors, or the absense of a leak free summary if {[regexp -- { at 0x} $buf] || + [regexp -- {Warning} $buf] || + [regexp -- {Invalid} $buf] || + [regexp -- {Mismatched} $buf] || + [regexp -- {uninitialized} $buf] || + [regexp -- {has a fishy} $buf] || + [regexp -- {overlap} $buf] || (![regexp -- {definitely lost: 0 bytes} $buf] && ![regexp -- {no leaks are possible} $buf])} { send_data_packet $::test_server_fd err "Valgrind error: $buf\n" @@ -29,7 +36,13 @@ proc kill_server config { if {$::external} return # nevermind if its already dead - if {![is_alive $config]} { return } + if {![is_alive $config]} { + # Check valgrind errors if needed + if {$::valgrind} { + check_valgrind_errors [dict get $config stderr] + } + return + } set pid [dict get $config pid] # check for leaks @@ -153,6 +166,55 @@ proc create_server_config_file {filename config} { close $fp } +proc spawn_server {config_file stdout stderr} { + if {$::valgrind} { + set pid [exec valgrind --track-origins=yes --trace-children=yes --suppressions=[pwd]/src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full src/redis-server $config_file >> $stdout 2>> $stderr &] + } elseif ($::stack_logging) { + set pid [exec /usr/bin/env MallocStackLogging=1 MallocLogFile=/tmp/malloc_log.txt src/redis-server $config_file >> $stdout 2>> $stderr &] + } else { + set pid [exec src/redis-server $config_file >> $stdout 2>> $stderr &] + } + + if {$::wait_server} { + set msg "server started PID: $pid. press any key to continue..." + puts $msg + read stdin 1 + } + + # Tell the test server about this new instance. + send_data_packet $::test_server_fd server-spawned $pid + return $pid +} + +# Wait for actual startup, return 1 if port is busy, 0 otherwise +proc wait_server_started {config_file stdout pid} { + set checkperiod 100; # Milliseconds + set maxiter [expr {120*1000/$checkperiod}] ; # Wait up to 2 minutes. + set port_busy 0 + while 1 { + if {[regexp -- " PID: $pid" [exec cat $stdout]]} { + break + } + after $checkperiod + incr maxiter -1 + if {$maxiter == 0} { + start_server_error $config_file "No PID detected in log $stdout" + puts "--- LOG CONTENT ---" + puts [exec cat $stdout] + puts "-------------------" + break + } + + # Check if the port is actually busy and the server failed + # for this reason. + if {[regexp {Could not create server TCP} [exec cat $stdout]]} { + set port_busy 1 + break + } + } + return $port_busy +} + proc start_server {options {code undefined}} { # If we are running against an external server, we just push the # host/port pair in the stack the first time @@ -248,44 +310,10 @@ proc start_server {options {code undefined}} { send_data_packet $::test_server_fd "server-spawning" "port $port" - if {$::valgrind} { - set pid [exec valgrind --track-origins=yes --suppressions=src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full src/redis-server $config_file > $stdout 2> $stderr &] - } elseif ($::stack_logging) { - set pid [exec /usr/bin/env MallocStackLogging=1 MallocLogFile=/tmp/malloc_log.txt src/redis-server $config_file > $stdout 2> $stderr &] - } else { - set pid [exec src/redis-server $config_file > $stdout 2> $stderr &] - } - - # Tell the test server about this new instance. - send_data_packet $::test_server_fd server-spawned $pid + set pid [spawn_server $config_file $stdout $stderr] # check that the server actually started - # ugly but tries to be as fast as possible... - if {$::valgrind} {set retrynum 1000} else {set retrynum 100} - - # Wait for actual startup - set checkperiod 100; # Milliseconds - set maxiter [expr {120*1000/100}] ; # Wait up to 2 minutes. - set port_busy 0 - while {![info exists _pid]} { - regexp {PID:\s(\d+)} [exec cat $stdout] _ _pid - after $checkperiod - incr maxiter -1 - if {$maxiter == 0} { - start_server_error $config_file "No PID detected in log $stdout" - puts "--- LOG CONTENT ---" - puts [exec cat $stdout] - puts "-------------------" - break - } - - # Check if the port is actually busy and the server failed - # for this reason. - if {[regexp {Could not create server TCP} [exec cat $stdout]]} { - set port_busy 1 - break - } - } + set port_busy [wait_server_started $config_file $stdout $pid] # Sometimes we have to try a different port, even if we checked # for availability. Other test clients may grab the port before we @@ -302,6 +330,7 @@ proc start_server {options {code undefined}} { continue; # Try again } + if {$::valgrind} {set retrynum 1000} else {set retrynum 100} if {$code ne "undefined"} { set serverisup [server_is_up $::host $port $retrynum] } else { @@ -345,12 +374,6 @@ proc start_server {options {code undefined}} { error_and_quit $config_file $line } - if {$::wait_server} { - set msg "server started PID: [dict get $srv "pid"]. press any key to continue..." - puts $msg - read stdin 1 - } - while 1 { # check that the server actually started and is ready for connections if {[exec grep -i "Ready to accept" | wc -l < $stdout] > 0} { @@ -370,6 +393,9 @@ proc start_server {options {code undefined}} { if {[catch { uplevel 1 $code } error]} { set backtrace $::errorInfo + # fetch srv back from the server list, in case it was restarted by restart_server (new PID) + set srv [lindex $::servers end] + # Kill the server without checking for leaks dict set srv "skipleaks" 1 kill_server $srv @@ -387,6 +413,9 @@ proc start_server {options {code undefined}} { error $error $backtrace } + # fetch srv back from the server list, in case it was restarted by restart_server (new PID) + set srv [lindex $::servers end] + # Don't do the leak check when no tests were run if {$num_tests == $::num_tests} { dict set srv "skipleaks" 1 @@ -402,3 +431,35 @@ proc start_server {options {code undefined}} { set _ $srv } } + +proc restart_server {level wait_ready} { + set srv [lindex $::servers end+$level] + kill_server $srv + + set stdout [dict get $srv "stdout"] + set stderr [dict get $srv "stderr"] + set config_file [dict get $srv "config_file"] + + set prev_ready_count [exec grep -i "Ready to accept" | wc -l < $stdout] + + set pid [spawn_server $config_file $stdout $stderr] + + # check that the server actually started + wait_server_started $config_file $stdout $pid + + # update the pid in the servers list + dict set srv "pid" $pid + # re-set $srv in the servers list + lset ::servers end+$level $srv + + if {$wait_ready} { + while 1 { + # check that the server actually started and is ready for connections + if {[exec grep -i "Ready to accept" | wc -l < $stdout] > $prev_ready_count + 1} { + break + } + after 10 + } + } + reconnect $level +} From 2b5f23197cf6bc81a948017197e6cbd186f366ec Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Fri, 10 Jul 2020 08:28:22 +0300 Subject: [PATCH 029/215] stabilize tests that look for log lines (#7367) tests were sensitive to additional log lines appearing in the log causing the search to come empty handed. instead of just looking for the n last log lines, capture the log lines before performing the action, and then search from that offset. (cherry picked from commit 8e76e13472b7d277af78691775c2cf845f68ab90) --- tests/integration/replication.tcl | 16 ++++++++++------ tests/support/util.tcl | 22 +++++++++++++++++++--- tests/unit/moduleapi/testrdb.tcl | 6 ++++-- 3 files changed, 33 insertions(+), 11 deletions(-) diff --git a/tests/integration/replication.tcl b/tests/integration/replication.tcl index 7c03c4bc6..d47ec4fe4 100644 --- a/tests/integration/replication.tcl +++ b/tests/integration/replication.tcl @@ -430,6 +430,7 @@ test {diskless loading short read} { } # Start the replication process... + set loglines [count_log_lines -1] $master config set repl-diskless-sync-delay 0 $replica replicaof $master_host $master_port @@ -439,7 +440,7 @@ test {diskless loading short read} { for {set i 0} {$i < $attempts} {incr i} { # wait for the replica to start reading the rdb # using the log file since the replica only responds to INFO once in 2mb - wait_for_log_message -1 "*Loading DB in memory*" 5 2000 1 + wait_for_log_message -1 "*Loading DB in memory*" $loglines 2000 1 # add some additional random sleep so that we kill the master on a different place each time after [expr {int(rand()*100)}] @@ -448,7 +449,7 @@ test {diskless loading short read} { set killed [$master client kill type replica] if {[catch { - set res [wait_for_log_message -1 "*Internal error in RDB*" 5 100 10] + set res [wait_for_log_message -1 "*Internal error in RDB*" $loglines 100 10] if {$::verbose} { puts $res } @@ -461,6 +462,7 @@ test {diskless loading short read} { $master config set repl-backlog-size [expr {16384 + $i}] } # wait for loading to stop (fail) + set loglines [count_log_lines -1] wait_for_condition 100 10 { [s -1 loading] eq 0 } else { @@ -535,6 +537,7 @@ start_server {tags {"repl"}} { # start replication # it's enough for just one replica to be slow, and have it's write handler enabled # so that the whole rdb generation process is bound to that + set loglines [count_log_lines -1] [lindex $replicas 0] config set repl-diskless-load swapdb [lindex $replicas 0] config set key-load-delay 100 [lindex $replicas 0] replicaof $master_host $master_port @@ -542,7 +545,7 @@ start_server {tags {"repl"}} { # wait for the replicas to start reading the rdb # using the log file since the replica only responds to INFO once in 2mb - wait_for_log_message -1 "*Loading DB in memory*" 8 800 10 + wait_for_log_message -1 "*Loading DB in memory*" $loglines 800 10 if {$measure_time} { set master_statfile "/proc/$master_pid/stat" @@ -558,6 +561,7 @@ start_server {tags {"repl"}} { $master incr $all_drop # disconnect replicas depending on the current test + set loglines [count_log_lines -2] if {$all_drop == "all" || $all_drop == "fast"} { exec kill [srv 0 pid] set replicas_alive [lreplace $replicas_alive 1 1] @@ -576,13 +580,13 @@ start_server {tags {"repl"}} { # make sure we got what we were aiming for, by looking for the message in the log file if {$all_drop == "all"} { - wait_for_log_message -2 "*Diskless rdb transfer, last replica dropped, killing fork child*" 12 1 1 + wait_for_log_message -2 "*Diskless rdb transfer, last replica dropped, killing fork child*" $loglines 1 1 } if {$all_drop == "no"} { - wait_for_log_message -2 "*Diskless rdb transfer, done reading from pipe, 2 replicas still up*" 12 1 1 + wait_for_log_message -2 "*Diskless rdb transfer, done reading from pipe, 2 replicas still up*" $loglines 1 1 } if {$all_drop == "slow" || $all_drop == "fast"} { - wait_for_log_message -2 "*Diskless rdb transfer, done reading from pipe, 1 replicas still up*" 12 1 1 + wait_for_log_message -2 "*Diskless rdb transfer, done reading from pipe, 1 replicas still up*" $loglines 1 1 } # make sure we don't have a busy loop going thought epoll_wait diff --git a/tests/support/util.tcl b/tests/support/util.tcl index 8bec95374..fce3ffd18 100644 --- a/tests/support/util.tcl +++ b/tests/support/util.tcl @@ -99,11 +99,27 @@ proc wait_for_ofs_sync {r1 r2} { } } -proc wait_for_log_message {srv_idx pattern last_lines maxtries delay} { +# count current log lines in server's stdout +proc count_log_lines {srv_idx} { + set _ [exec wc -l < [srv $srv_idx stdout]] +} + +# verify pattern exists in server's sdtout after a certain line number +proc verify_log_message {srv_idx pattern from_line} { + set lines_after [count_log_lines] + set lines [expr $lines_after - $from_line] + set result [exec tail -$lines < [srv $srv_idx stdout]] + if {![string match $pattern $result]} { + error "assertion:expected message not found in log file: $pattern" + } +} + +# wait for pattern to be found in server's stdout after certain line number +proc wait_for_log_message {srv_idx pattern from_line maxtries delay} { set retry $maxtries set stdout [srv $srv_idx stdout] while {$retry} { - set result [exec tail -$last_lines < $stdout] + set result [exec tail +$from_line < $stdout] set result [split $result "\n"] foreach line $result { if {[string match $pattern $line]} { @@ -114,7 +130,7 @@ proc wait_for_log_message {srv_idx pattern last_lines maxtries delay} { after $delay } if {$retry == 0} { - fail "log message of '$pattern' not found" + fail "log message of '$pattern' not found in $stdout after line: $from_line" } } diff --git a/tests/unit/moduleapi/testrdb.tcl b/tests/unit/moduleapi/testrdb.tcl index a93b34b69..98641ae0a 100644 --- a/tests/unit/moduleapi/testrdb.tcl +++ b/tests/unit/moduleapi/testrdb.tcl @@ -67,6 +67,7 @@ tags "modules" { } # Start the replication process... + set loglines [count_log_lines -1] $master config set repl-diskless-sync-delay 0 $replica replicaof $master_host $master_port @@ -76,7 +77,7 @@ tags "modules" { for {set i 0} {$i < $attempts} {incr i} { # wait for the replica to start reading the rdb # using the log file since the replica only responds to INFO once in 2mb - wait_for_log_message -1 "*Loading DB in memory*" 5 2000 1 + wait_for_log_message -1 "*Loading DB in memory*" $loglines 2000 1 # add some additional random sleep so that we kill the master on a different place each time after [expr {int(rand()*100)}] @@ -85,7 +86,7 @@ tags "modules" { set killed [$master client kill type replica] if {[catch { - set res [wait_for_log_message -1 "*Internal error in RDB*" 5 100 10] + set res [wait_for_log_message -1 "*Internal error in RDB*" $loglines 100 10] if {$::verbose} { puts $res } @@ -98,6 +99,7 @@ tags "modules" { $master config set repl-backlog-size [expr {16384 + $i}] } # wait for loading to stop (fail) + set loglines [count_log_lines -1] wait_for_condition 100 10 { [s -1 loading] eq 0 } else { From 33ca884cc5cce678f35e98412acab80482f7bd6c Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Fri, 10 Jul 2020 08:29:02 +0300 Subject: [PATCH 030/215] skip a test that uses +inf on valgrind (#7440) On some platforms strtold("+inf") with valgrind returns a non-inf result [err]: INCRBYFLOAT does not allow NaN or Infinity in tests/unit/type/incr.tcl Expected 'ERR*would produce*' to equal or match '1189731495357231765085759.....' (cherry picked from commit 909bc97c526db757a3d022b29911ff6d08eba59c) --- tests/unit/type/incr.tcl | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/tests/unit/type/incr.tcl b/tests/unit/type/incr.tcl index b7a135203..dbf45e455 100644 --- a/tests/unit/type/incr.tcl +++ b/tests/unit/type/incr.tcl @@ -130,15 +130,18 @@ start_server {tags {"incr"}} { format $err } {WRONGTYPE*} - test {INCRBYFLOAT does not allow NaN or Infinity} { - r set foo 0 - set err {} - catch {r incrbyfloat foo +inf} err - set err - # p.s. no way I can force NaN to test it from the API because - # there is no way to increment / decrement by infinity nor to - # perform divisions. - } {ERR*would produce*} + # On some platforms strtold("+inf") with valgrind returns a non-inf result + if {!$::valgrind} { + test {INCRBYFLOAT does not allow NaN or Infinity} { + r set foo 0 + set err {} + catch {r incrbyfloat foo +inf} err + set err + # p.s. no way I can force NaN to test it from the API because + # there is no way to increment / decrement by infinity nor to + # perform divisions. + } {ERR*would produce*} + } test {INCRBYFLOAT decrement} { r set foo 1 From 7b21b8c3fbf288f379f489c3861fa05e04fe770c Mon Sep 17 00:00:00 2001 From: huangzhw Date: Fri, 10 Jul 2020 13:29:44 +0800 Subject: [PATCH 031/215] defrag.c activeDefragSdsListAndDict when defrag sdsele, We can't use (#7492) it to calculate hash, we should use newsds. (cherry picked from commit d6180c8c8674ffdae3d6efa5f946d85fe9163464) --- src/defrag.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/defrag.c b/src/defrag.c index 6e5296632..2d8db8ea5 100644 --- a/src/defrag.c +++ b/src/defrag.c @@ -348,7 +348,7 @@ long activeDefragSdsListAndDict(list *l, dict *d, int dict_val_type) { sdsele = ln->value; if ((newsds = activeDefragSds(sdsele))) { /* When defragging an sds value, we need to update the dict key */ - uint64_t hash = dictGetHash(d, sdsele); + uint64_t hash = dictGetHash(d, newsds); replaceSateliteDictKeyPtrAndOrDefragDictEntry(d, sdsele, newsds, hash, &defragged); ln->value = newsds; defragged++; From 95ba01b53849d56e5699aebb5db532563f976491 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Fri, 10 Jul 2020 10:02:37 +0300 Subject: [PATCH 032/215] RESTORE ABSTTL won't store expired keys into the db (#7472) Similarly to EXPIREAT with TTL in the past, which implicitly deletes the key and return success, RESTORE should not store key that are already expired into the db. When used together with REPLACE it should emit a DEL to keyspace notification and replication stream. (cherry picked from commit 5977a94842a25140520297fe4bfda15e0e4de711) --- src/cluster.c | 30 ++++++++++++++++++++++-------- src/expire.c | 18 +++++++++++------- src/server.h | 1 + tests/unit/dump.tcl | 13 ++++++++++++- 4 files changed, 46 insertions(+), 16 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index e7a32a9a2..88b810d13 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -4988,7 +4988,8 @@ void restoreCommand(client *c) { } /* Make sure this key does not already exist here... */ - if (!replace && lookupKeyWrite(c->db,c->argv[1]) != NULL) { + robj *key = c->argv[1]; + if (!replace && lookupKeyWrite(c->db,key) != NULL) { addReply(c,shared.busykeyerr); return; } @@ -5010,24 +5011,37 @@ void restoreCommand(client *c) { rioInitWithBuffer(&payload,c->argv[3]->ptr); if (((type = rdbLoadObjectType(&payload)) == -1) || - ((obj = rdbLoadObject(type,&payload,c->argv[1]->ptr)) == NULL)) + ((obj = rdbLoadObject(type,&payload,key->ptr)) == NULL)) { addReplyError(c,"Bad data format"); return; } /* Remove the old key if needed. */ - if (replace) dbDelete(c->db,c->argv[1]); + int deleted = 0; + if (replace) + deleted = dbDelete(c->db,key); + + if (ttl && !absttl) ttl+=mstime(); + if (ttl && checkAlreadyExpired(ttl)) { + if (deleted) { + rewriteClientCommandVector(c,2,shared.del,key); + signalModifiedKey(c,c->db,key); + notifyKeyspaceEvent(NOTIFY_GENERIC,"del",key,c->db->id); + server.dirty++; + } + addReply(c, shared.ok); + return; + } /* Create the key and set the TTL if any */ - dbAdd(c->db,c->argv[1],obj); + dbAdd(c->db,key,obj); if (ttl) { - if (!absttl) ttl+=mstime(); - setExpire(c,c->db,c->argv[1],ttl); + setExpire(c,c->db,key,ttl); } objectSetLRUOrLFU(obj,lfu_freq,lru_idle,lru_clock,1000); - signalModifiedKey(c,c->db,c->argv[1]); - notifyKeyspaceEvent(NOTIFY_GENERIC,"restore",c->argv[1],c->db->id); + signalModifiedKey(c,c->db,key); + notifyKeyspaceEvent(NOTIFY_GENERIC,"restore",key,c->db->id); addReply(c,shared.ok); server.dirty++; } diff --git a/src/expire.c b/src/expire.c index 30a27193d..f2d135e2b 100644 --- a/src/expire.c +++ b/src/expire.c @@ -475,6 +475,16 @@ void flushSlaveKeysWithExpireList(void) { } } +int checkAlreadyExpired(long long when) { + /* EXPIRE with negative TTL, or EXPIREAT with a timestamp into the past + * should never be executed as a DEL when load the AOF or in the context + * of a slave instance. + * + * Instead we add the already expired key to the database with expire time + * (possibly in the past) and wait for an explicit DEL from the master. */ + return (when <= mstime() && !server.loading && !server.masterhost); +} + /*----------------------------------------------------------------------------- * Expires Commands *----------------------------------------------------------------------------*/ @@ -502,13 +512,7 @@ void expireGenericCommand(client *c, long long basetime, int unit) { return; } - /* EXPIRE with negative TTL, or EXPIREAT with a timestamp into the past - * should never be executed as a DEL when load the AOF or in the context - * of a slave instance. - * - * Instead we take the other branch of the IF statement setting an expire - * (possibly in the past) and wait for an explicit DEL from the master. */ - if (when <= mstime() && !server.loading && !server.masterhost) { + if (checkAlreadyExpired(when)) { robj *aux; int deleted = server.lazyfree_lazy_expire ? dbAsyncDelete(c->db,key) : diff --git a/src/server.h b/src/server.h index 6c36385e1..8c0facd04 100644 --- a/src/server.h +++ b/src/server.h @@ -2070,6 +2070,7 @@ void propagateExpire(redisDb *db, robj *key, int lazy); int expireIfNeeded(redisDb *db, robj *key); long long getExpire(redisDb *db, robj *key); void setExpire(client *c, redisDb *db, robj *key, long long when); +int checkAlreadyExpired(long long when); robj *lookupKey(redisDb *db, robj *key, int flags); robj *lookupKeyRead(redisDb *db, robj *key); robj *lookupKeyWrite(redisDb *db, robj *key); diff --git a/tests/unit/dump.tcl b/tests/unit/dump.tcl index 062d803b5..a9def9206 100644 --- a/tests/unit/dump.tcl +++ b/tests/unit/dump.tcl @@ -36,7 +36,18 @@ start_server {tags {"dump"}} { assert {$ttl >= 2900 && $ttl <= 3100} r get foo } {bar} - + + test {RESTORE with ABSTTL in the past} { + r set foo bar + set encoded [r dump foo] + set now [clock milliseconds] + r debug set-active-expire 0 + r restore foo [expr $now-3000] $encoded absttl REPLACE + catch {r debug object foo} e + r debug set-active-expire 1 + set e + } {ERR no such key} + test {RESTORE can set LRU} { r set foo bar set encoded [r dump foo] From 183bdc11c0d3a99eefee1871a567a1911ee0d4f6 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 5 Jul 2020 12:36:25 +0300 Subject: [PATCH 033/215] redis-cli --bigkeys fixed to handle non-printable key names (cherry picked from commit 6f8a8647de5b16ff937dc25046151619a2eafabb) --- src/redis-cli.c | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 75845f346..e1f40373a 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -7246,7 +7246,9 @@ static void getKeyTypes(dict *types_dict, redisReply *keys, typeinfo **types) { /* Pipeline TYPE commands */ for(i=0;ielements;i++) { - redisAppendCommand(context, "TYPE %s", keys->element[i]->str); + const char* argv[] = {"TYPE", keys->element[i]->str}; + size_t lens[] = {4, keys->element[i]->len}; + redisAppendCommandArgv(context, 2, argv, lens); } /* Retrieve types */ @@ -7292,15 +7294,21 @@ static void getKeySizes(redisReply *keys, typeinfo **types, if(!types[i] || (!types[i]->sizecmd && !memkeys)) continue; - if (!memkeys) - redisAppendCommand(context, "%s %s", - types[i]->sizecmd, keys->element[i]->str); - else if (memkeys_samples==0) - redisAppendCommand(context, "%s %s %s", - "MEMORY", "USAGE", keys->element[i]->str); - else - redisAppendCommand(context, "%s %s %s SAMPLES %u", - "MEMORY", "USAGE", keys->element[i]->str, memkeys_samples); + if (!memkeys) { + const char* argv[] = {types[i]->sizecmd, keys->element[i]->str}; + size_t lens[] = {strlen(types[i]->sizecmd), keys->element[i]->len}; + redisAppendCommandArgv(context, 2, argv, lens); + } else if (memkeys_samples==0) { + const char* argv[] = {"MEMORY", "USAGE", keys->element[i]->str}; + size_t lens[] = {6, 5, keys->element[i]->len}; + redisAppendCommandArgv(context, 3, argv, lens); + } else { + sds samplesstr = sdsfromlonglong(memkeys_samples); + const char* argv[] = {"MEMORY", "USAGE", keys->element[i]->str, "SAMPLES", samplesstr}; + size_t lens[] = {6, 5, keys->element[i]->len, 7, sdslen(samplesstr)}; + redisAppendCommandArgv(context, 5, argv, lens); + sdsfree(samplesstr); + } } /* Retrieve sizes */ @@ -7396,20 +7404,20 @@ static void findBigKeys(int memkeys, unsigned memkeys_samples) { sampled++; if(type->biggestname, keys->element[i]->str, sizes[i], - !memkeys? type->sizeunit: "bytes"); - /* Keep track of biggest key name for this type */ if (type->biggest_key) sdsfree(type->biggest_key); - type->biggest_key = sdsnew(keys->element[i]->str); + type->biggest_key = sdscatrepr(sdsempty(), keys->element[i]->str, keys->element[i]->len); if(!type->biggest_key) { fprintf(stderr, "Failed to allocate memory for key!\n"); exit(1); } + printf( + "[%05.2f%%] Biggest %-6s found so far '%s' with %llu %s\n", + pct, type->name, type->biggest_key, sizes[i], + !memkeys? type->sizeunit: "bytes"); + /* Keep track of the biggest size for this type */ type->biggest = sizes[i]; } From a9d1014fb28600ee6e569739546a3a4a0b8a1763 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 7 Jul 2020 16:15:44 +0300 Subject: [PATCH 034/215] redis-cli --hotkeys fixed to handle non-printable key names (cherry picked from commit b23e2510364bd13d9988921235c400c5657a8438) --- src/redis-cli.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index e1f40373a..c5ba48447 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -7481,21 +7481,27 @@ static void getKeyFreqs(redisReply *keys, unsigned long long *freqs) { /* Pipeline OBJECT freq commands */ for(i=0;ielements;i++) { - redisAppendCommand(context, "OBJECT freq %s", keys->element[i]->str); + const char* argv[] = {"OBJECT", "FREQ", keys->element[i]->str}; + size_t lens[] = {6, 4, keys->element[i]->len}; + redisAppendCommandArgv(context, 3, argv, lens); } /* Retrieve freqs */ for(i=0;ielements;i++) { if(redisGetReply(context, (void**)&reply)!=REDIS_OK) { + sds keyname = sdscatrepr(sdsempty(), keys->element[i]->str, keys->element[i]->len); fprintf(stderr, "Error getting freq for key '%s' (%d: %s)\n", - keys->element[i]->str, context->err, context->errstr); + keyname, context->err, context->errstr); + sdsfree(keyname); exit(1); } else if(reply->type != REDIS_REPLY_INTEGER) { if(reply->type == REDIS_REPLY_ERROR) { fprintf(stderr, "Error: %s\n", reply->str); exit(1); } else { - fprintf(stderr, "Warning: OBJECT freq on '%s' failed (may have been deleted)\n", keys->element[i]->str); + sds keyname = sdscatrepr(sdsempty(), keys->element[i]->str, keys->element[i]->len); + fprintf(stderr, "Warning: OBJECT freq on '%s' failed (may have been deleted)\n", keyname); + sdsfree(keyname); freqs[i] = 0; } } else { @@ -7566,10 +7572,10 @@ static void findHotKeys(void) { memmove(hotkeys,hotkeys+1,sizeof(hotkeys[0])*k); } counters[k] = freqs[i]; - hotkeys[k] = sdsnew(keys->element[i]->str); + hotkeys[k] = sdscatrepr(sdsempty(), keys->element[i]->str, keys->element[i]->len); printf( "[%05.2f%%] Hot key '%s' found so far with counter %llu\n", - pct, keys->element[i]->str, freqs[i]); + pct, hotkeys[k], freqs[i]); } /* Sleep if we've been directed to do so */ From b057ff81ee4792436c98d0d852bf1670473d341e Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Fri, 10 Jul 2020 10:25:55 +0300 Subject: [PATCH 035/215] TLS: Add missing redis-cli options. (#7456) * Tests: fix and reintroduce redis-cli tests. These tests have been broken and disabled for 10 years now! * TLS: add remaining redis-cli support. This adds support for the redis-cli --pipe, --rdb and --replica options previously unsupported in --tls mode. * Fix writeConn(). (cherry picked from commit d9f970d8d3f0b694f1e8915cab6d4eab9cfb2ef1) --- src/redis-cli.c | 102 +++++++++++++++++++--------- tests/integration/redis-cli.tcl | 115 ++++++++++++++++++++++++++------ tests/test_helper.tcl | 1 + 3 files changed, 166 insertions(+), 52 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index c5ba48447..0148964bf 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -1989,6 +1989,7 @@ static void repl(void) { if (argv == NULL) { printf("Invalid argument(s)\n"); + fflush(stdout); linenoiseFree(line); continue; } else if (argc > 0) { @@ -6784,10 +6785,53 @@ void sendCapa() { sendReplconf("capa", "eof"); } +/* Wrapper around hiredis to allow arbitrary reads and writes. + * + * We piggybacks on top of hiredis to achieve transparent TLS support, + * and use its internal buffers so it can co-exist with commands + * previously/later issued on the connection. + * + * Interface is close to enough to read()/write() so things should mostly + * work transparently. + */ + +/* Write a raw buffer through a redisContext. If we already have something + * in the buffer (leftovers from hiredis operations) it will be written + * as well. + */ +static ssize_t writeConn(redisContext *c, const char *buf, size_t buf_len) +{ + int done = 0; + + c->obuf = sdscatlen(c->obuf, buf, buf_len); + if (redisBufferWrite(c, &done) == REDIS_ERR) { + sdsrange(c->obuf, 0, -(buf_len+1)); + if (!(c->flags & REDIS_BLOCK)) + errno = EAGAIN; + return -1; + } + + size_t left = sdslen(c->obuf); + sdsclear(c->obuf); + if (!done) { + return buf_len - left; + } + + return buf_len; +} + +/* Read raw bytes through a redisContext. The read operation is not greedy + * and may not fill the buffer entirely. + */ +static ssize_t readConn(redisContext *c, char *buf, size_t len) +{ + return c->funcs->read(c, buf, len); +} + /* Sends SYNC and reads the number of bytes in the payload. Used both by * slaveMode() and getRDB(). * returns 0 in case an EOF marker is used. */ -unsigned long long sendSync(int fd, char *out_eof) { +unsigned long long sendSync(redisContext *c, char *out_eof) { /* To start we need to send the SYNC command and return the payload. * The hiredis client lib does not understand this part of the protocol * and we don't want to mess with its buffers, so everything is performed @@ -6796,7 +6840,7 @@ unsigned long long sendSync(int fd, char *out_eof) { ssize_t nread; /* Send the SYNC command. */ - if (write(fd,"SYNC\r\n",6) != 6) { + if (writeConn(c, "SYNC\r\n", 6) != 6) { fprintf(stderr,"Error writing to master\n"); exit(1); } @@ -6804,7 +6848,7 @@ unsigned long long sendSync(int fd, char *out_eof) { /* Read $\r\n, making sure to read just up to "\n" */ p = buf; while(1) { - nread = read(fd,p,1); + nread = readConn(c,p,1); if (nread <= 0) { fprintf(stderr,"Error reading bulk length while SYNCing\n"); exit(1); @@ -6825,11 +6869,10 @@ unsigned long long sendSync(int fd, char *out_eof) { } static void slaveMode(void) { - int fd = context->fd; static char eofmark[RDB_EOF_MARK_SIZE]; static char lastbytes[RDB_EOF_MARK_SIZE]; static int usemark = 0; - unsigned long long payload = sendSync(fd, eofmark); + unsigned long long payload = sendSync(context,eofmark); char buf[1024]; int original_output = config.output; @@ -6849,7 +6892,7 @@ static void slaveMode(void) { while(payload) { ssize_t nread; - nread = read(fd,buf,(payload > sizeof(buf)) ? sizeof(buf) : payload); + nread = readConn(context,buf,(payload > sizeof(buf)) ? sizeof(buf) : payload); if (nread <= 0) { fprintf(stderr,"Error reading RDB payload while SYNCing\n"); exit(1); @@ -6892,14 +6935,15 @@ static void slaveMode(void) { /* This function implements --rdb, so it uses the replication protocol in order * to fetch the RDB file from a remote server. */ static void getRDB(clusterManagerNode *node) { - int s, fd; + int fd; + redisContext *s; char *filename; if (node != NULL) { assert(node->context); - s = node->context->fd; + s = node->context; filename = clusterManagerGetNodeRDBFilename(node); } else { - s = context->fd; + s = context; filename = config.rdb_filename; } static char eofmark[RDB_EOF_MARK_SIZE]; @@ -6934,7 +6978,7 @@ static void getRDB(clusterManagerNode *node) { while(payload) { ssize_t nread, nwritten; - nread = read(s,buf,(payload > sizeof(buf)) ? sizeof(buf) : payload); + nread = readConn(s,buf,(payload > sizeof(buf)) ? sizeof(buf) : payload); if (nread <= 0) { fprintf(stderr,"I/O Error reading RDB payload from socket\n"); exit(1); @@ -6968,7 +7012,7 @@ static void getRDB(clusterManagerNode *node) { } else { fprintf(stderr,"Transfer finished with success.\n"); } - close(s); /* Close the file descriptor ASAP as fsync() may take time. */ + redisFree(s); /* Close the file descriptor ASAP as fsync() may take time. */ fsync(fd); close(fd); fprintf(stderr,"Transfer finished with success.\n"); @@ -6985,11 +7029,9 @@ static void getRDB(clusterManagerNode *node) { #define PIPEMODE_WRITE_LOOP_MAX_BYTES (128*1024) static void pipeMode(void) { - int fd = context->fd; long long errors = 0, replies = 0, obuf_len = 0, obuf_pos = 0; - char ibuf[1024*16], obuf[1024*16]; /* Input and output buffers */ + char obuf[1024*16]; /* Output buffer */ char aneterr[ANET_ERR_LEN]; - redisReader *reader = redisReaderCreate(); redisReply *reply; int eof = 0; /* True once we consumed all the standard input. */ int done = 0; @@ -6999,47 +7041,38 @@ static void pipeMode(void) { srand(time(NULL)); /* Use non blocking I/O. */ - if (anetNonBlock(aneterr,fd) == ANET_ERR) { + if (anetNonBlock(aneterr,context->fd) == ANET_ERR) { fprintf(stderr, "Can't set the socket in non blocking mode: %s\n", aneterr); exit(1); } + context->flags &= ~REDIS_BLOCK; + /* Transfer raw protocol and read replies from the server at the same * time. */ while(!done) { int mask = AE_READABLE; if (!eof || obuf_len != 0) mask |= AE_WRITABLE; - mask = aeWait(fd,mask,1000); + mask = aeWait(context->fd,mask,1000); /* Handle the readable state: we can read replies from the server. */ if (mask & AE_READABLE) { - ssize_t nread; int read_error = 0; - /* Read from socket and feed the hiredis reader. */ do { - nread = read(fd,ibuf,sizeof(ibuf)); - if (nread == -1 && errno != EAGAIN && errno != EINTR) { - fprintf(stderr, "Error reading from the server: %s\n", - strerror(errno)); + if (!read_error && redisBufferRead(context) == REDIS_ERR) { read_error = 1; - break; } - if (nread > 0) { - redisReaderFeed(reader,ibuf,nread); - last_read_time = time(NULL); - } - } while(nread > 0); - /* Consume replies. */ - do { - if (redisReaderGetReply(reader,(void**)&reply) == REDIS_ERR) { + reply = NULL; + if (redisGetReply(context, (void **) &reply) == REDIS_ERR) { fprintf(stderr, "Error reading replies from server\n"); exit(1); } if (reply) { + last_read_time = time(NULL); if (reply->type == REDIS_REPLY_ERROR) { fprintf(stderr,"%s\n", reply->str); errors++; @@ -7072,7 +7105,7 @@ static void pipeMode(void) { while(1) { /* Transfer current buffer to server. */ if (obuf_len != 0) { - ssize_t nwritten = write(fd,obuf+obuf_pos,obuf_len); + ssize_t nwritten = writeConn(context,obuf+obuf_pos,obuf_len); if (nwritten == -1) { if (errno != EAGAIN && errno != EINTR) { @@ -7088,6 +7121,10 @@ static void pipeMode(void) { loop_nwritten += nwritten; if (obuf_len != 0) break; /* Can't accept more data. */ } + if (context->err) { + fprintf(stderr, "Server I/O Error: %s\n", context->errstr); + exit(1); + } /* If buffer is empty, load from stdin. */ if (obuf_len == 0 && !eof) { ssize_t nread = read(STDIN_FILENO,obuf,sizeof(obuf)); @@ -7138,7 +7175,6 @@ static void pipeMode(void) { break; } } - redisReaderFree(reader); printf("errors: %lld, replies: %lld\n", errors, replies); if (errors) exit(1); diff --git a/tests/integration/redis-cli.tcl b/tests/integration/redis-cli.tcl index 5d1635950..016e4915c 100644 --- a/tests/integration/redis-cli.tcl +++ b/tests/integration/redis-cli.tcl @@ -1,14 +1,13 @@ source tests/support/cli.tcl start_server {tags {"cli"}} { - proc open_cli {} { + proc open_cli {{opts "-n 9"}} { set ::env(TERM) dumb - set cmdline [rediscli [srv port] "-n 9"] + set cmdline [rediscli [srv port] $opts] set fd [open "|$cmdline" "r+"] fconfigure $fd -buffering none fconfigure $fd -blocking false fconfigure $fd -translation binary - assert_equal "redis> " [read_cli $fd] set _ $fd } @@ -32,11 +31,14 @@ start_server {tags {"cli"}} { } # Helpers to run tests in interactive mode + + proc format_output {output} { + set _ [string trimright [regsub -all "\r" $output ""] "\n"] + } + proc run_command {fd cmd} { write_cli $fd $cmd - set lines [split [read_cli $fd] "\n"] - assert_equal "redis> " [lindex $lines end] - join [lrange $lines 0 end-1] "\n" + set _ [format_output [read_cli $fd]] } proc test_interactive_cli {name code} { @@ -58,7 +60,7 @@ start_server {tags {"cli"}} { proc _run_cli {opts args} { set cmd [rediscli [srv port] [list -n 9 {*}$args]] - foreach {key value} $args { + foreach {key value} $opts { if {$key eq "pipe"} { set cmd "sh -c \"$value | $cmd\"" } @@ -72,7 +74,7 @@ start_server {tags {"cli"}} { fconfigure $fd -translation binary set resp [read $fd 1048576] close $fd - set _ $resp + set _ [format_output $resp] } proc run_cli {args} { @@ -80,11 +82,11 @@ start_server {tags {"cli"}} { } proc run_cli_with_input_pipe {cmd args} { - _run_cli [list pipe $cmd] {*}$args + _run_cli [list pipe $cmd] -x {*}$args } proc run_cli_with_input_file {path args} { - _run_cli [list path $path] {*}$args + _run_cli [list path $path] -x {*}$args } proc test_nontty_cli {name code} { @@ -101,7 +103,7 @@ start_server {tags {"cli"}} { test_interactive_cli "INFO response should be printed raw" { set lines [split [run_command $fd info] "\n"] foreach line $lines { - assert [regexp {^[a-z0-9_]+:[a-z0-9_]+} $line] + assert [regexp {^$|^#|^[a-z0-9_]+:.+} $line] } } @@ -121,7 +123,7 @@ start_server {tags {"cli"}} { test_interactive_cli "Multi-bulk reply" { r rpush list foo r rpush list bar - assert_equal "1. \"foo\"\n2. \"bar\"" [run_command $fd "lrange list 0 -1"] + assert_equal "1) \"foo\"\n2) \"bar\"" [run_command $fd "lrange list 0 -1"] } test_interactive_cli "Parsing quotes" { @@ -144,35 +146,35 @@ start_server {tags {"cli"}} { } test_tty_cli "Status reply" { - assert_equal "OK\n" [run_cli set key bar] + assert_equal "OK" [run_cli set key bar] assert_equal "bar" [r get key] } test_tty_cli "Integer reply" { r del counter - assert_equal "(integer) 1\n" [run_cli incr counter] + assert_equal "(integer) 1" [run_cli incr counter] } test_tty_cli "Bulk reply" { r set key "tab\tnewline\n" - assert_equal "\"tab\\tnewline\\n\"\n" [run_cli get key] + assert_equal "\"tab\\tnewline\\n\"" [run_cli get key] } test_tty_cli "Multi-bulk reply" { r del list r rpush list foo r rpush list bar - assert_equal "1. \"foo\"\n2. \"bar\"\n" [run_cli lrange list 0 -1] + assert_equal "1) \"foo\"\n2) \"bar\"" [run_cli lrange list 0 -1] } test_tty_cli "Read last argument from pipe" { - assert_equal "OK\n" [run_cli_with_input_pipe "echo foo" set key] + assert_equal "OK" [run_cli_with_input_pipe "echo foo" set key] assert_equal "foo\n" [r get key] } test_tty_cli "Read last argument from file" { set tmpfile [write_tmpfile "from file"] - assert_equal "OK\n" [run_cli_with_input_file $tmpfile set key] + assert_equal "OK" [run_cli_with_input_file $tmpfile set key] assert_equal "from file" [r get key] } @@ -188,7 +190,7 @@ start_server {tags {"cli"}} { test_nontty_cli "Bulk reply" { r set key "tab\tnewline\n" - assert_equal "tab\tnewline\n" [run_cli get key] + assert_equal "tab\tnewline" [run_cli get key] } test_nontty_cli "Multi-bulk reply" { @@ -208,4 +210,79 @@ start_server {tags {"cli"}} { assert_equal "OK" [run_cli_with_input_file $tmpfile set key] assert_equal "from file" [r get key] } + + proc test_redis_cli_rdb_dump {} { + r flushdb + + set dir [lindex [r config get dir] 1] + + assert_equal "OK" [r debug populate 100000 key 1000] + catch {run_cli --rdb "$dir/cli.rdb"} output + assert_match {*Transfer finished with success*} $output + + file delete "$dir/dump.rdb" + file rename "$dir/cli.rdb" "$dir/dump.rdb" + + assert_equal "OK" [r set should-not-exist 1] + assert_equal "OK" [r debug reload nosave] + assert_equal {} [r get should-not-exist] + } + + test_nontty_cli "Dumping an RDB" { + # Disk-based master + assert_match "OK" [r config set repl-diskless-sync no] + test_redis_cli_rdb_dump + + # Disk-less master + assert_match "OK" [r config set repl-diskless-sync yes] + assert_match "OK" [r config set repl-diskless-sync-delay 0] + test_redis_cli_rdb_dump + } + + test_nontty_cli "Connecting as a replica" { + set fd [open_cli "--replica"] + wait_for_condition 50 500 { + [string match {*slave0:*state=online*} [r info]] + } else { + fail "redis-cli --replica did not connect" + } + + for {set i 0} {$i < 100} {incr i} { + r set test-key test-value-$i + } + r client kill type slave + catch { + assert_match {*SET*key-a*} [read_cli $fd] + } + + close_cli $fd + } + + test_nontty_cli "Piping raw protocol" { + set fd [open_cli "--pipe"] + fconfigure $fd -blocking true + + # Create a new deferring client and overwrite its fd + set client [redis [srv 0 "host"] [srv 0 "port"] 1 0] + set ::redis::fd($::redis::id) $fd + $client select 9 + + r del test-counter + for {set i 0} {$i < 10000} {incr i} { + $client incr test-counter + $client set large-key [string repeat "x" 20000] + } + + for {set i 0} {$i < 1000} {incr i} { + $client set very-large-key [string repeat "x" 512000] + } + + close $fd write + set output [read_cli $fd] + + assert_equal {10000} [r get test-counter] + assert_match {*All data transferred*errors: 0*replies: 21001*} $output + + close_cli $fd + } } diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index ef9bf7fdf..1527aa1b5 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -48,6 +48,7 @@ set ::all_tests { integration/psync2 integration/psync2-reg integration/psync2-pingoff + integration/redis-cli unit/pubsub unit/slowlog unit/scripting From 2d3687655b8bad2748b1c266ef5104bf171c5ee4 Mon Sep 17 00:00:00 2001 From: James Hilliard Date: Fri, 10 Jul 2020 01:30:09 -0600 Subject: [PATCH 036/215] Use pkg-config to properly detect libssl and libcrypto libraries (#7452) (cherry picked from commit 6a014af79a9cbafc0be946047f216544f116b598) --- src/Makefile | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/src/Makefile b/src/Makefile index b8c05c32b..80c627c24 100644 --- a/src/Makefile +++ b/src/Makefile @@ -192,9 +192,21 @@ ifeq ($(MALLOC),jemalloc) endif ifeq ($(BUILD_TLS),yes) - FINAL_CFLAGS+=-DUSE_OPENSSL $(OPENSSL_CFLAGS) - FINAL_LDFLAGS+=$(OPENSSL_LDFLAGS) - FINAL_LIBS += ../deps/hiredis/libhiredis_ssl.a -lssl -lcrypto + FINAL_CFLAGS+=-DUSE_OPENSSL $(OPENSSL_CFLAGS) + FINAL_LDFLAGS+=$(OPENSSL_LDFLAGS) + LIBSSL_PKGCONFIG := $(shell $(PKG_CONFIG) --exists libssl && echo $$?) +ifeq ($(LIBSSL_PKGCONFIG),0) + LIBSSL_LIBS=$(shell $(PKG_CONFIG) --libs libssl) +else + LIBSSL_LIBS=-lssl +endif + LIBCRYPTO_PKGCONFIG := $(shell $(PKG_CONFIG) --exists libcrypto && echo $$?) +ifeq ($(LIBCRYPTO_PKGCONFIG),0) + LIBCRYPTO_LIBS=$(shell $(PKG_CONFIG) --libs libcrypto) +else + LIBCRYPTO_LIBS=-lcrypto +endif + FINAL_LIBS += ../deps/hiredis/libhiredis_ssl.a $(LIBSSL_LIBS) $(LIBCRYPTO_LIBS) endif REDIS_CC=$(QUIET_CC)$(CC) $(FINAL_CFLAGS) From 6f1e828454c6cda25f39d841c1909028ce97ba31 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Fri, 10 Jul 2020 10:32:21 +0300 Subject: [PATCH 037/215] TLS: Ignore client cert when tls-auth-clients off. (#7457) (cherry picked from commit 5266293a0fdee57fe6bb8a408a2e2ff0c66f0259) --- src/tls.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/tls.c b/src/tls.c index a62f2284e..4b9948195 100644 --- a/src/tls.c +++ b/src/tls.c @@ -337,9 +337,7 @@ connection *connCreateAcceptedTLS(int fd, int require_auth) { conn->c.state = CONN_STATE_ACCEPTING; if (!require_auth) { - /* We still verify certificates if provided, but don't require them. - */ - SSL_set_verify(conn->ssl, SSL_VERIFY_PEER, NULL); + SSL_set_verify(conn->ssl, SSL_VERIFY_NONE, NULL); } SSL_set_fd(conn->ssl, conn->c.fd); From 7a536c2912be1fd9f62b26b7022a00644c88ef8b Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Fri, 10 Jul 2020 11:33:47 +0300 Subject: [PATCH 038/215] TLS: Session caching configuration support. (#7420) * TLS: Session caching configuration support. * TLS: Remove redundant config initialization. (cherry picked from commit 3e6f2b1a45176ac3d81b95cb6025f30d7aaa1393) --- TLS.md | 2 -- redis.conf | 16 ++++++++++++++++ src/config.c | 11 ++++++++++- src/server.h | 3 +++ src/tls.c | 12 +++++++++--- tests/unit/introspection.tcl | 28 ++++++++++++++++++---------- 6 files changed, 56 insertions(+), 16 deletions(-) diff --git a/TLS.md b/TLS.md index e480c1e9d..2d020d0ce 100644 --- a/TLS.md +++ b/TLS.md @@ -68,8 +68,6 @@ but there are probably other good reasons to improve that part anyway. To-Do List ---------- -- [ ] Add session caching support. Check if/how it's handled by clients to - assess how useful/important it is. - [ ] redis-benchmark support. The current implementation is a mix of using hiredis for parsing and basic networking (establishing connections), but directly manipulating sockets for most actions. This will need to be cleaned diff --git a/redis.conf b/redis.conf index a51ef007d..8c53f015a 100644 --- a/redis.conf +++ b/redis.conf @@ -199,6 +199,22 @@ tcp-keepalive 300 # # tls-prefer-server-ciphers yes +# By default, TLS session caching is enabled to allow faster and less expensive +# reconnections by clients that support it. Use the following directive to disable +# caching. +# +# tls-session-caching no + +# Change the default number of TLS sessions cached. A zero value sets the cache +# to unlimited size. The default size is 20480. +# +# tls-session-cache-size 5000 + +# Change the default timeout of cached TLS sessions. The default timeout is 300 +# seconds. +# +# tls-session-cache-timeout 60 + ################################# GENERAL ##################################### # By default Redis does not run as a daemon. Use 'yes' if you need it. diff --git a/src/config.c b/src/config.c index 64854592c..acf1b069f 100644 --- a/src/config.c +++ b/src/config.c @@ -2071,7 +2071,7 @@ static int updateTlsCfg(char *val, char *prev, char **err) { UNUSED(prev); UNUSED(err); if (tlsConfigure(&server.tls_ctx_config) == C_ERR) { - *err = "Unable to configure tls-cert-file. Check server logs."; + *err = "Unable to update TLS configuration. Check server logs."; return 0; } return 1; @@ -2081,6 +2081,12 @@ static int updateTlsCfgBool(int val, int prev, char **err) { UNUSED(prev); return updateTlsCfg(NULL, NULL, err); } + +static int updateTlsCfgInt(long long val, long long prev, char **err) { + UNUSED(val); + UNUSED(prev); + return updateTlsCfg(NULL, NULL, err); +} #endif /* USE_OPENSSL */ standardConfig configs[] = { @@ -2216,10 +2222,13 @@ standardConfig configs[] = { #ifdef USE_OPENSSL createIntConfig("tls-port", NULL, IMMUTABLE_CONFIG, 0, 65535, server.tls_port, 0, INTEGER_CONFIG, NULL, NULL), /* TCP port. */ + createIntConfig("tls-session-cache-size", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_size, 20*1024, INTEGER_CONFIG, NULL, updateTlsCfgInt), + createIntConfig("tls-session-cache-timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_timeout, 300, INTEGER_CONFIG, NULL, updateTlsCfgInt), createBoolConfig("tls-cluster", NULL, MODIFIABLE_CONFIG, server.tls_cluster, 0, NULL, NULL), createBoolConfig("tls-replication", NULL, MODIFIABLE_CONFIG, server.tls_replication, 0, NULL, NULL), createBoolConfig("tls-auth-clients", NULL, MODIFIABLE_CONFIG, server.tls_auth_clients, 1, NULL, NULL), createBoolConfig("tls-prefer-server-ciphers", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.prefer_server_ciphers, 0, NULL, updateTlsCfgBool), + createBoolConfig("tls-session-caching", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.session_caching, 1, NULL, updateTlsCfgBool), createStringConfig("tls-cert-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.cert_file, NULL, NULL, updateTlsCfg), createStringConfig("tls-key-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.key_file, NULL, NULL, updateTlsCfg), createStringConfig("tls-dh-params-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.dh_params_file, NULL, NULL, updateTlsCfg), diff --git a/src/server.h b/src/server.h index 8c0facd04..3f471efcb 100644 --- a/src/server.h +++ b/src/server.h @@ -1011,6 +1011,9 @@ typedef struct redisTLSContextConfig { char *ciphers; char *ciphersuites; int prefer_server_ciphers; + int session_caching; + int session_cache_size; + int session_cache_timeout; } redisTLSContextConfig; /*----------------------------------------------------------------------------- diff --git a/src/tls.c b/src/tls.c index 4b9948195..8b2bb58e1 100644 --- a/src/tls.c +++ b/src/tls.c @@ -148,9 +148,6 @@ void tlsInit(void) { } pending_list = listCreate(); - - /* Server configuration */ - server.tls_auth_clients = 1; /* Secure by default */ } /* Attempt to configure/reconfigure TLS. This operation is atomic and will @@ -184,6 +181,15 @@ int tlsConfigure(redisTLSContextConfig *ctx_config) { SSL_CTX_set_options(ctx, SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS); #endif + if (ctx_config->session_caching) { + SSL_CTX_set_session_cache_mode(ctx, SSL_SESS_CACHE_SERVER); + SSL_CTX_sess_set_cache_size(ctx, ctx_config->session_cache_size); + SSL_CTX_set_timeout(ctx, ctx_config->session_cache_timeout); + SSL_CTX_set_session_id_context(ctx, (void *) "redis", 5); + } else { + SSL_CTX_set_session_cache_mode(ctx, SSL_SESS_CACHE_OFF); + } + int protocols = parseProtocolsConfig(ctx_config->protocols); if (protocols == -1) goto error; diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl index b60ca0d48..d681e06d5 100644 --- a/tests/unit/introspection.tcl +++ b/tests/unit/introspection.tcl @@ -78,17 +78,8 @@ start_server {tags {"introspection"}} { syslog-facility databases port - io-threads tls-port - tls-prefer-server-ciphers - tls-cert-file - tls-key-file - tls-dh-params-file - tls-ca-cert-file - tls-ca-cert-dir - tls-protocols - tls-ciphers - tls-ciphersuites + io-threads logfile unixsocketperm slaveof @@ -100,6 +91,23 @@ start_server {tags {"introspection"}} { bgsave_cpulist } + if {!$::tls} { + append skip_configs { + tls-prefer-server-ciphers + tls-session-cache-timeout + tls-session-cache-size + tls-session-caching + tls-cert-file + tls-key-file + tls-dh-params-file + tls-ca-cert-file + tls-ca-cert-dir + tls-protocols + tls-ciphers + tls-ciphersuites + } + } + set configs {} foreach {k v} [r config get *] { if {[lsearch $skip_configs $k] != -1} { From f838df92d22839e3c3e7d4b788a68a0b90c9317b Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Fri, 10 Jul 2020 16:41:48 +0800 Subject: [PATCH 039/215] Add missing latency-monitor tcl test to test_helper.tcl. (#6782) (cherry picked from commit d792db7948c9de9c4ac3b7669fac2dbc9eb7b173) --- tests/test_helper.tcl | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index 1527aa1b5..51c364601 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -35,6 +35,7 @@ set ::all_tests { unit/quit unit/aofrw unit/acl + unit/latency-monitor integration/block-repl integration/replication integration/replication-2 From b0faf6113ed2809e1cc2abc039bc5714e4ef46fc Mon Sep 17 00:00:00 2001 From: Jiayuan Chen Date: Fri, 10 Jul 2020 21:02:18 +0800 Subject: [PATCH 040/215] Fix typo in deps README (#7500) (cherry picked from commit af39d750cd6af99774b606899794bef405c32429) --- deps/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/README.md b/deps/README.md index 685dbb40d..f923c06ad 100644 --- a/deps/README.md +++ b/deps/README.md @@ -47,7 +47,7 @@ Hiredis Hiredis uses the SDS string library, that must be the same version used inside Redis itself. Hiredis is also very critical for Sentinel. Historically Redis often used forked versions of hiredis in a way or the other. In order to upgrade it is advised to take a lot of care: 1. Check with diff if hiredis API changed and what impact it could have in Redis. -2. Make sure thet the SDS library inside Hiredis and inside Redis are compatible. +2. Make sure that the SDS library inside Hiredis and inside Redis are compatible. 3. After the upgrade, run the Redis Sentinel test. 4. Check manually that redis-cli and redis-benchmark behave as expecteed, since we have no tests for CLI utilities currently. From 52929b61f81859a1d597072907fca345797f384f Mon Sep 17 00:00:00 2001 From: Abhishek Soni Date: Fri, 10 Jul 2020 18:35:29 +0530 Subject: [PATCH 041/215] fix: typo in CI job name (#7466) (cherry picked from commit d5648d617e1ed5b9cfa575ad412bc9d450b16afd) --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 439e3f3df..730eaf0dd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,7 +31,7 @@ jobs: - name: make run: make - biuld-32bit: + build-32bit: runs-on: ubuntu-latest steps: - uses: actions/checkout@v1 From 1acdc84d107ac9ebe411a573e222f3479d07edb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=A9=AC=E6=B0=B8=E6=B3=BD?= <1014057907@qq.com> Date: Fri, 10 Jul 2020 21:37:11 +0800 Subject: [PATCH 042/215] fix benchmark in cluster mode fails to authenticate (#7488) Co-authored-by: Oran Agra (styling) (cherry picked from commit 279b4a14643f247365637da13439fcb407072a9d) --- src/redis-benchmark.c | 98 +++++++++++++++++++++++++------------------ 1 file changed, 57 insertions(+), 41 deletions(-) diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c index 38d4ca51b..f47cbe333 100644 --- a/src/redis-benchmark.c +++ b/src/redis-benchmark.c @@ -1,4 +1,4 @@ -/* Redis benchmark utility. +/* Redis benchmark utility. * * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. @@ -183,6 +183,8 @@ static void *execBenchmarkThread(void *ptr); static clusterNode *createClusterNode(char *ip, int port); static redisConfig *getRedisConfig(const char *ip, int port, const char *hostsocket); +static redisContext *getRedisContext(const char *ip, int port, + const char *hostsocket); static void freeRedisConfig(redisConfig *cfg); static int fetchClusterSlotsConfiguration(client c); static void updateClusterSlotsConfiguration(); @@ -238,6 +240,52 @@ void _serverAssert(const char *estr, const char *file, int line) { *((char*)-1) = 'x'; } +static redisContext *getRedisContext(const char *ip, int port, + const char *hostsocket) +{ + redisContext *ctx = NULL; + redisReply *reply = NULL; + if (hostsocket == NULL) + ctx = redisConnect(ip, port); + else + ctx = redisConnectUnix(hostsocket); + if (ctx == NULL || ctx->err) { + fprintf(stderr,"Could not connect to Redis at "); + char *err = (ctx != NULL ? ctx->errstr : ""); + if (hostsocket == NULL) + fprintf(stderr,"%s:%d: %s\n",ip,port,err); + else + fprintf(stderr,"%s: %s\n",hostsocket,err); + goto cleanup; + } + if (config.auth == NULL) + return ctx; + if (config.user == NULL) + reply = redisCommand(ctx,"AUTH %s", config.auth); + else + reply = redisCommand(ctx,"AUTH %s %s", config.user, config.auth); + if (reply != NULL) { + if (reply->type == REDIS_REPLY_ERROR) { + if (hostsocket == NULL) + fprintf(stderr, "Node %s:%d replied with error:\n%s\n", ip, port, reply->str); + else + fprintf(stderr, "Node %s replied with error:\n%s\n", hostsocket, reply->str); + goto cleanup; + } + freeReplyObject(reply); + return ctx; + } + fprintf(stderr, "ERROR: failed to fetch reply from "); + if (hostsocket == NULL) + fprintf(stderr, "%s:%d\n", ip, port); + else + fprintf(stderr, "%s\n", hostsocket); +cleanup: + freeReplyObject(reply); + redisFree(ctx); + return NULL; +} + static redisConfig *getRedisConfig(const char *ip, int port, const char *hostsocket) { @@ -245,33 +293,11 @@ static redisConfig *getRedisConfig(const char *ip, int port, if (!cfg) return NULL; redisContext *c = NULL; redisReply *reply = NULL, *sub_reply = NULL; - if (hostsocket == NULL) - c = redisConnect(ip, port); - else - c = redisConnectUnix(hostsocket); - if (c == NULL || c->err) { - fprintf(stderr,"Could not connect to Redis at "); - char *err = (c != NULL ? c->errstr : ""); - if (hostsocket == NULL) fprintf(stderr,"%s:%d: %s\n",ip,port,err); - else fprintf(stderr,"%s: %s\n",hostsocket,err); - goto fail; + c = getRedisContext(ip, port, hostsocket); + if (c == NULL) { + freeRedisConfig(cfg); + return NULL; } - - if(config.auth) { - void *authReply = NULL; - if (config.user == NULL) - redisAppendCommand(c, "AUTH %s", config.auth); - else - redisAppendCommand(c, "AUTH %s %s", config.user, config.auth); - if (REDIS_OK != redisGetReply(c, &authReply)) goto fail; - if (reply) freeReplyObject(reply); - reply = ((redisReply *) authReply); - if (reply->type == REDIS_REPLY_ERROR) { - fprintf(stderr, "ERROR: %s\n", reply->str); - goto fail; - } - } - redisAppendCommand(c, "CONFIG GET %s", "save"); redisAppendCommand(c, "CONFIG GET %s", "appendonly"); int i = 0; @@ -994,16 +1020,8 @@ static int fetchClusterConfiguration() { int success = 1; redisContext *ctx = NULL; redisReply *reply = NULL; - if (config.hostsocket == NULL) - ctx = redisConnect(config.hostip,config.hostport); - else - ctx = redisConnectUnix(config.hostsocket); - if (ctx->err) { - fprintf(stderr,"Could not connect to Redis at "); - if (config.hostsocket == NULL) { - fprintf(stderr,"%s:%d: %s\n",config.hostip,config.hostport, - ctx->errstr); - } else fprintf(stderr,"%s: %s\n",config.hostsocket,ctx->errstr); + ctx = getRedisContext(config.hostip, config.hostport, config.hostsocket); + if (ctx == NULL) { exit(1); } clusterNode *firstNode = createClusterNode((char *) config.hostip, @@ -1199,11 +1217,9 @@ static int fetchClusterSlotsConfiguration(client c) { assert(node->port); /* Use first node as entry point to connect to. */ if (ctx == NULL) { - ctx = redisConnect(node->ip, node->port); - if (!ctx || ctx->err) { + ctx = getRedisContext(node->ip, node->port, NULL); + if (!ctx) { success = 0; - if (ctx && ctx->err) - fprintf(stderr, "REDIS CONNECTION ERROR: %s\n", ctx->errstr); goto cleanup; } } From 1be6dbbdf6ea2a7a5aba3d4e44c597e37489aaaa Mon Sep 17 00:00:00 2001 From: jimgreen2013 Date: Sun, 12 Jul 2020 03:51:44 +0800 Subject: [PATCH 043/215] fix description about ziplist, the code is ok (#6318) * fix description about ZIP_BIG_PREVLEN(the code is ok), it's similar to antirez#4705 * fix description about ziplist entry encoding field (the code is ok), the max length should be 2^32 - 1 when encoding is 5 bytes (cherry picked from commit 67660881ed5b19a979425c37b3e8beea3349043c) --- src/ziplist.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ziplist.c b/src/ziplist.c index ddae0d96f..13881c117 100644 --- a/src/ziplist.c +++ b/src/ziplist.c @@ -86,7 +86,7 @@ * |10000000|qqqqqqqq|rrrrrrrr|ssssssss|tttttttt| - 5 bytes * String value with length greater than or equal to 16384 bytes. * Only the 4 bytes following the first byte represents the length - * up to 32^2-1. The 6 lower bits of the first byte are not used and + * up to 2^32-1. The 6 lower bits of the first byte are not used and * are set to zero. * IMPORTANT: The 32 bit number is stored in big endian. * |11000000| - 3 bytes @@ -194,7 +194,7 @@ #define ZIP_BIG_PREVLEN 254 /* Max number of bytes of the previous entry, for the "prevlen" field prefixing each entry, to be represented with just a single byte. Otherwise - it is represented as FF AA BB CC DD, where + it is represented as FE AA BB CC DD, where AA BB CC DD are a 4 bytes unsigned integer representing the previous entry len. */ From 23bf3d1cb990cb408629bca1898ea30750e9b813 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 12 Jul 2020 13:55:26 +0300 Subject: [PATCH 044/215] update release scripts for new hosts, and CI to run more tests (#7480) * update daily CI to include cluster and sentinel tests * update daily CI to run when creating a new release * update release scripts to work on the new redis.io hosts (cherry picked from commit 7f19a04f0f049720ff5f84f3ab1aa81014f2f4ed) --- .github/workflows/daily.yml | 26 ++++++++++++++++--- utils/releasetools/01_create_tarball.sh | 3 +-- utils/releasetools/02_upload_tarball.sh | 22 ++++++++++++++--- utils/releasetools/03_test_release.sh | 33 +++++++++++++------------ utils/releasetools/04_release_hash.sh | 10 ++++++-- 5 files changed, 68 insertions(+), 26 deletions(-) diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index acc4dd33a..4d54fbc42 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -1,14 +1,16 @@ name: Daily on: + release: + types: [created] schedule: - - cron: '0 7 * * *' + - cron: '0 0 * * *' jobs: test-jemalloc: runs-on: ubuntu-latest - timeout-minutes: 1200 + timeout-minutes: 14400 steps: - uses: actions/checkout@v1 - name: make @@ -19,10 +21,14 @@ jobs: ./runtest --accurate --verbose - name: module api test run: ./runtest-moduleapi --verbose + - name: sentinel tests + run: ./runtest-sentinel + - name: cluster tests + run: ./runtest-cluster test-libc-malloc: runs-on: ubuntu-latest - timeout-minutes: 1200 + timeout-minutes: 14400 steps: - uses: actions/checkout@v1 - name: make @@ -33,9 +39,14 @@ jobs: ./runtest --accurate --verbose - name: module api test run: ./runtest-moduleapi --verbose + - name: sentinel tests + run: ./runtest-sentinel + - name: cluster tests + run: ./runtest-cluster test-32bit: runs-on: ubuntu-latest + timeout-minutes: 14400 steps: - uses: actions/checkout@v1 - name: make @@ -50,9 +61,14 @@ jobs: run: | make -C tests/modules 32bit # the script below doesn't have an argument, we must build manually ahead of time ./runtest-moduleapi --verbose + - name: sentinel tests + run: ./runtest-sentinel + - name: cluster tests + run: ./runtest-cluster test-tls: runs-on: ubuntu-latest + timeout-minutes: 14400 steps: - uses: actions/checkout@v1 - name: make @@ -65,6 +81,10 @@ jobs: ./runtest --accurate --verbose --tls - name: module api test run: ./runtest-moduleapi --verbose --tls + - name: sentinel tests + run: ./runtest-sentinel + - name: cluster tests + run: ./runtest-cluster test-valgrind: runs-on: ubuntu-latest diff --git a/utils/releasetools/01_create_tarball.sh b/utils/releasetools/01_create_tarball.sh index 54bca8c04..366a61e2c 100755 --- a/utils/releasetools/01_create_tarball.sh +++ b/utils/releasetools/01_create_tarball.sh @@ -1,14 +1,13 @@ #!/bin/sh if [ $# != "1" ] then - echo "Usage: ./mkrelease.sh " + echo "Usage: ./utils/releasetools/01_create_tarball.sh " exit 1 fi TAG=$1 TARNAME="redis-${TAG}.tar" echo "Generating /tmp/${TARNAME}" -cd ~/hack/redis git archive $TAG --prefix redis-${TAG}/ > /tmp/$TARNAME || exit 1 echo "Gizipping the archive" rm -f /tmp/$TARNAME.gz diff --git a/utils/releasetools/02_upload_tarball.sh b/utils/releasetools/02_upload_tarball.sh index ed7065388..6400efad7 100755 --- a/utils/releasetools/02_upload_tarball.sh +++ b/utils/releasetools/02_upload_tarball.sh @@ -1,6 +1,22 @@ #!/bin/bash +if [ $# != "1" ] +then + echo "Usage: ./utils/releasetools/02_upload_tarball.sh " + exit 1 +fi + echo "Uploading..." -scp /tmp/redis-${1}.tar.gz antirez@antirez.com:/var/virtual/download.redis.io/httpdocs/releases/ -echo "Updating web site... (press any key if it is a stable release, or Ctrl+C)" +scp /tmp/redis-${1}.tar.gz ubuntu@host.redis.io:/var/www/download/releases/ +echo "Updating web site... " +echo "Please check the github action tests for the release." +echo "Press any key if it is a stable release, or Ctrl+C to abort" read x -ssh antirez@antirez.com "cd /var/virtual/download.redis.io/httpdocs; ./update.sh ${1}" +ssh ubuntu@host.redis.io "cd /var/www/download; + rm -rf redis-${1}.tar.gz; + wget http://download.redis.io/releases/redis-${1}.tar.gz; + tar xvzf redis-${1}.tar.gz; + rm -rf redis-stable; + mv redis-${1} redis-stable; + tar cvzf redis-stable.tar.gz redis-stable; + rm -rf redis-${1}.tar.gz; + " diff --git a/utils/releasetools/03_test_release.sh b/utils/releasetools/03_test_release.sh index 3dfdcd6a3..169e965d5 100755 --- a/utils/releasetools/03_test_release.sh +++ b/utils/releasetools/03_test_release.sh @@ -1,7 +1,8 @@ #!/bin/sh +set -e if [ $# != "1" ] then - echo "Usage: ${0} " + echo "Usage: ./utils/releasetools/03_test_release.sh " exit 1 fi @@ -9,18 +10,18 @@ TAG=$1 TARNAME="redis-${TAG}.tar.gz" DOWNLOADURL="http://download.redis.io/releases/${TARNAME}" -ssh antirez@metal "export TERM=xterm; - cd /tmp; - rm -rf test_release_tmp_dir; - cd test_release_tmp_dir; - rm -f $TARNAME; - rm -rf redis-${TAG}; - wget $DOWNLOADURL; - tar xvzf $TARNAME; - cd redis-${TAG}; - make; - ./runtest; - ./runtest-sentinel; - if [ -x runtest-cluster ]; then - ./runtest-cluster; - fi" +echo "Doing sanity test on the actual tarball" + +cd /tmp +rm -rf test_release_tmp_dir +cd test_release_tmp_dir +rm -f $TARNAME +rm -rf redis-${TAG} +wget $DOWNLOADURL +tar xvzf $TARNAME +cd redis-${TAG} +make +./runtest +./runtest-sentinel +./runtest-cluster +./runtest-moduleapi diff --git a/utils/releasetools/04_release_hash.sh b/utils/releasetools/04_release_hash.sh index 9d5c6ad4b..bc1ebb66c 100755 --- a/utils/releasetools/04_release_hash.sh +++ b/utils/releasetools/04_release_hash.sh @@ -1,8 +1,14 @@ #!/bin/bash +if [ $# != "1" ] +then + echo "Usage: ./utils/releasetools/04_release_hash.sh " + exit 1 +fi + SHA=$(curl -s http://download.redis.io/releases/redis-${1}.tar.gz | shasum -a 256 | cut -f 1 -d' ') ENTRY="hash redis-${1}.tar.gz sha256 $SHA http://download.redis.io/releases/redis-${1}.tar.gz" echo $ENTRY >> ~/hack/redis-hashes/README -vi ~/hack/redis-hashes/README +vi ../redis-hashes/README echo "Press any key to commit, Ctrl-C to abort)." read yes -(cd ~/hack/redis-hashes; git commit -a -m "${1} hash."; git push) +(cd ../redis-hashes; git commit -a -m "${1} hash."; git push) From b5c5f870a4e528669bcc29c257b955f747532f34 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 13 Jul 2020 16:09:08 +0300 Subject: [PATCH 045/215] runtest --stop pause stops before terminating the redis server (#7513) in the majority of the cases (on this rarely used feature) we want to stop and be able to connect to the shard with redis-cli. since these are two different processes interracting with the tty we need to stop both, and we'll have to hit enter twice, but it's not that bad considering it is rarely used. (cherry picked from commit 02ef355f98691adba4126bbdab0d4d2bfe475701) --- tests/support/test.tcl | 6 ++++++ tests/test_helper.tcl | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/support/test.tcl b/tests/support/test.tcl index 5e8916236..a5573f583 100644 --- a/tests/support/test.tcl +++ b/tests/support/test.tcl @@ -153,6 +153,12 @@ proc test {name code {okpattern undefined}} { incr ::num_failed send_data_packet $::test_server_fd err [join $details "\n"] + + if {$::stop_on_failure} { + puts "Test error (last server port:[srv port], log:[srv stdout]), press enter to teardown the test." + flush stdout + gets stdin + } } else { # Re-raise, let handler up the stack take care of this. error $error $::errorInfo diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index 51c364601..7ce0d545e 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -359,8 +359,8 @@ proc read_from_test_client fd { puts $err lappend ::failed_tests $err set ::active_clients_task($fd) "(ERR) $data" - if {$::stop_on_failure} { - puts -nonewline "(Test stopped, press enter to continue)" + if {$::stop_on_failure} { + puts -nonewline "(Test stopped, press enter to resume the tests)" flush stdout gets stdin } From aea4db2f5a4f52c1143be6eb2188e7e99493ff9b Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 13 Jul 2020 16:40:03 +0300 Subject: [PATCH 046/215] fix recently added time sensitive tests failing with valgrind (#7512) interestingly the latency monitor test fails because valgrind is slow enough so that the time inside PEXPIREAT command from the moment of the first mstime() call to get the basetime until checkAlreadyExpired calls mstime() again is more than 1ms, and that test was too sensitive. using this opportunity to speed up the test (unrelated to the failure) the fix is just the longer time passed to PEXPIRE. (cherry picked from commit e5227aab899628653285478a9d1083e8e8f51b57) --- tests/integration/redis-cli.tcl | 2 +- tests/unit/latency-monitor.tcl | 16 +++++++++++----- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/tests/integration/redis-cli.tcl b/tests/integration/redis-cli.tcl index 016e4915c..aa8b92199 100644 --- a/tests/integration/redis-cli.tcl +++ b/tests/integration/redis-cli.tcl @@ -241,7 +241,7 @@ start_server {tags {"cli"}} { test_nontty_cli "Connecting as a replica" { set fd [open_cli "--replica"] - wait_for_condition 50 500 { + wait_for_condition 200 500 { [string match {*slave0:*state=online*} [r info]] } else { fail "redis-cli --replica did not connect" diff --git a/tests/unit/latency-monitor.tcl b/tests/unit/latency-monitor.tcl index 69da13f06..d76867cc6 100644 --- a/tests/unit/latency-monitor.tcl +++ b/tests/unit/latency-monitor.tcl @@ -50,15 +50,21 @@ start_server {tags {"latency-monitor"}} { test {LATENCY of expire events are correctly collected} { r config set latency-monitor-threshold 20 + r flushdb + if {$::valgrind} {set count 100000} else {set count 1000000} r eval { local i = 0 - while (i < 1000000) do - redis.call('sadd','mybigkey',i) + while (i < tonumber(ARGV[1])) do + redis.call('sadd',KEYS[1],i) i = i+1 end - } 0 - r pexpire mybigkey 1 - after 500 + } 1 mybigkey $count + r pexpire mybigkey 50 + wait_for_condition 5 100 { + [r dbsize] == 0 + } else { + fail "key wasn't expired" + } assert_match {*expire-cycle*} [r latency latest] } } From c4b428a388597a783a8689e8560bfc3bd2d1f352 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 13 Jul 2020 16:40:19 +0300 Subject: [PATCH 047/215] RESTORE ABSTTL skip expired keys - leak (#7511) (cherry picked from commit 6a814501448c5673d05eb2cfe4fe650883da8f0e) --- src/cluster.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/cluster.c b/src/cluster.c index 88b810d13..45fb5552e 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -5030,6 +5030,7 @@ void restoreCommand(client *c) { notifyKeyspaceEvent(NOTIFY_GENERIC,"del",key,c->db->id); server.dirty++; } + decrRefCount(obj); addReply(c, shared.ok); return; } From 9bc6af24497a4b228cf83f1072e71019ecee5fa8 Mon Sep 17 00:00:00 2001 From: Qu Chen Date: Mon, 13 Jul 2020 07:16:06 -0700 Subject: [PATCH 048/215] Replica always reports master's config epoch in CLUSTER NODES output. (#7235) (cherry picked from commit 938c35302fb618ec15e62443ef3bb3d00d10f5b9) --- src/cluster.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/cluster.c b/src/cluster.c index 45fb5552e..5dcd69ff8 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -4104,11 +4104,15 @@ sds clusterGenNodeDescription(clusterNode *node) { else ci = sdscatlen(ci," - ",3); + unsigned long long nodeEpoch = node->configEpoch; + if (nodeIsSlave(node) && node->slaveof) { + nodeEpoch = node->slaveof->configEpoch; + } /* Latency from the POV of this node, config epoch, link status */ ci = sdscatprintf(ci,"%lld %lld %llu %s", (long long) node->ping_sent, (long long) node->pong_received, - (unsigned long long) node->configEpoch, + nodeEpoch, (node->link || node->flags & CLUSTER_NODE_MYSELF) ? "connected" : "disconnected"); From 4780cc5efa60c04d1e76595c891cc1ffdd771060 Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Tue, 14 Jul 2020 16:35:04 +0800 Subject: [PATCH 049/215] Fix out of update help info in tcl tests. (#7516) Before this commit, the output of "./runtest-cluster --help" is incorrect. After this commit, the format of the following 3 output is consistent: ./runtest --help ./runtest-cluster --help ./runtest-sentinel --help (cherry picked from commit 8128d39737adaf7092c9a367f71fbe9e0a2b33a2) --- tests/instances.tcl | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/instances.tcl b/tests/instances.tcl index 3a4fadca0..677af6427 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -170,8 +170,6 @@ proc parse_options {} { -keyfile "$::tlsdir/redis.key" set ::tls 1 } elseif {$opt eq "--help"} { - puts "Hello, I'm sentinel.tcl and I run Sentinel unit tests." - puts "\nOptions:" puts "--single Only runs tests specified by pattern." puts "--pause-on-error Pause for manual inspection on error." puts "--fail Simulate a test failure." From 05f8975d21f8f7d74b13072ff2a757502d8f7291 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 14 Jul 2020 18:04:08 +0300 Subject: [PATCH 050/215] redis-cli tests, fix valgrind timing issue (#7519) this test when run with valgrind on github actions takes 160 seconds (cherry picked from commit 254c96255420e950bcad1a46bc4f8617b4373797) --- tests/integration/redis-cli.tcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/redis-cli.tcl b/tests/integration/redis-cli.tcl index aa8b92199..c70d14eeb 100644 --- a/tests/integration/redis-cli.tcl +++ b/tests/integration/redis-cli.tcl @@ -241,7 +241,7 @@ start_server {tags {"cli"}} { test_nontty_cli "Connecting as a replica" { set fd [open_cli "--replica"] - wait_for_condition 200 500 { + wait_for_condition 500 500 { [string match {*slave0:*state=online*} [r info]] } else { fail "redis-cli --replica did not connect" From f89f50dbd06247677b8cb3927cbb88c1b5384061 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 14 Jul 2020 20:21:59 +0300 Subject: [PATCH 051/215] diskless master disconnect replicas when rdb child failed (#7518) in case the rdb child failed, crashed or terminated unexpectedly redis would have marked the replica clients with repl_put_online_on_ack and then kill them only after a minute when no ack was received. it would not stream anything to these connections, so the only effect of this bug is a delay of 1 minute in the replicas attempt to re-connect. (cherry picked from commit a176cb56a3c0235adddde33fcbaee2369a5af73e) --- src/replication.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/replication.c b/src/replication.c index d9bff79ad..8457150a0 100644 --- a/src/replication.c +++ b/src/replication.c @@ -1240,6 +1240,12 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) { } else if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END) { struct redis_stat buf; + if (bgsaveerr != C_OK) { + freeClient(slave); + serverLog(LL_WARNING,"SYNC failed. BGSAVE child returned an error"); + continue; + } + /* If this was an RDB on disk save, we have to prepare to send * the RDB from disk to the slave socket. Otherwise if this was * already an RDB -> Slaves socket transfer, used in the case of @@ -1278,11 +1284,6 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) { slave->repl_put_online_on_ack = 1; slave->repl_ack_time = server.unixtime; /* Timeout otherwise. */ } else { - if (bgsaveerr != C_OK) { - freeClient(slave); - serverLog(LL_WARNING,"SYNC failed. BGSAVE child returned an error"); - continue; - } if ((slave->repldbfd = open(server.rdb_filename,O_RDONLY)) == -1 || redis_fstat(slave->repldbfd,&buf) == -1) { freeClient(slave); From 83f55f61a6ac53a82c72c9f16e0a15c71bb5ec30 Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Wed, 15 Jul 2020 17:37:44 +0800 Subject: [PATCH 052/215] Refactor RM_KeyType() by using macro. (#7486) (cherry picked from commit dc690161d5652d86d51bd209821bfb0e9c5f7ec2) --- src/module.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/module.c b/src/module.c index 226c60fd0..acfaf2ac7 100644 --- a/src/module.c +++ b/src/module.c @@ -2044,7 +2044,7 @@ int RM_KeyType(RedisModuleKey *key) { case OBJ_HASH: return REDISMODULE_KEYTYPE_HASH; case OBJ_MODULE: return REDISMODULE_KEYTYPE_MODULE; case OBJ_STREAM: return REDISMODULE_KEYTYPE_STREAM; - default: return 0; + default: return REDISMODULE_KEYTYPE_EMPTY; } } From b1a01fda91693be8e3692ff2124ac90ae2b9eec2 Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Wed, 15 Jul 2020 17:38:22 +0800 Subject: [PATCH 053/215] Fix command help for unexpected options (#7476) (cherry picked from commit 93bdbf5aa4857ede0816cf790f951da8e2fa2ae9) --- src/acl.c | 2 +- src/latency.c | 2 +- src/t_stream.c | 2 +- tests/unit/acl.tcl | 5 +++++ tests/unit/latency-monitor.tcl | 5 +++++ tests/unit/type/stream.tcl | 7 +++++++ 6 files changed, 20 insertions(+), 3 deletions(-) diff --git a/src/acl.c b/src/acl.c index 6dd0f70ac..3ce45f03b 100644 --- a/src/acl.c +++ b/src/acl.c @@ -1911,7 +1911,7 @@ void aclCommand(client *c) { addReplyBulkCString(c,"client-info"); addReplyBulkCBuffer(c,le->cinfo,sdslen(le->cinfo)); } - } else if (!strcasecmp(sub,"help")) { + } else if (c->argc == 2 && !strcasecmp(sub,"help")) { const char *help[] = { "LOAD -- Reload users from the ACL file.", "SAVE -- Save the current config to the ACL file.", diff --git a/src/latency.c b/src/latency.c index 9a291ac9b..dfdc6668c 100644 --- a/src/latency.c +++ b/src/latency.c @@ -621,7 +621,7 @@ NULL resets += latencyResetEvent(c->argv[j]->ptr); addReplyLongLong(c,resets); } - } else if (!strcasecmp(c->argv[1]->ptr,"help") && c->argc >= 2) { + } else if (!strcasecmp(c->argv[1]->ptr,"help") && c->argc == 2) { addReplyHelp(c, help); } else { addReplySubcommandSyntaxError(c); diff --git a/src/t_stream.c b/src/t_stream.c index 676ddd9bb..f564b1ff9 100644 --- a/src/t_stream.c +++ b/src/t_stream.c @@ -1885,7 +1885,7 @@ NULL server.dirty++; notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-delconsumer", c->argv[2],c->db->id); - } else if (!strcasecmp(opt,"HELP")) { + } else if (c->argc == 2 && !strcasecmp(opt,"HELP")) { addReplyHelp(c, help); } else { addReplySubcommandSyntaxError(c); diff --git a/tests/unit/acl.tcl b/tests/unit/acl.tcl index 85c9b81a9..e81280995 100644 --- a/tests/unit/acl.tcl +++ b/tests/unit/acl.tcl @@ -255,4 +255,9 @@ start_server {tags {"acl"}} { r ACL setuser default on set e } {*NOAUTH*} + + test {ACL HELP should not have unexpected options} { + catch {r ACL help xxx} e + assert_match "*Unknown subcommand or wrong number of arguments*" $e + } } diff --git a/tests/unit/latency-monitor.tcl b/tests/unit/latency-monitor.tcl index d76867cc6..18b9ecebb 100644 --- a/tests/unit/latency-monitor.tcl +++ b/tests/unit/latency-monitor.tcl @@ -67,4 +67,9 @@ start_server {tags {"latency-monitor"}} { } assert_match {*expire-cycle*} [r latency latest] } + + test {LATENCY HELP should not have unexpected options} { + catch {r LATENCY help xxx} e + assert_match "*Unknown subcommand or wrong number of arguments*" $e + } } diff --git a/tests/unit/type/stream.tcl b/tests/unit/type/stream.tcl index c2b524d7f..0ff570cab 100644 --- a/tests/unit/type/stream.tcl +++ b/tests/unit/type/stream.tcl @@ -461,3 +461,10 @@ start_server {tags {"stream"} overrides {appendonly yes aof-use-rdb-preamble no} assert {[dict get [r xinfo stream mystream] last-generated-id] == "2-2"} } } + +start_server {tags {"stream"}} { + test {XGROUP HELP should not have unexpected options} { + catch {r XGROUP help xxx} e + assert_match "*Unknown subcommand or wrong number of arguments*" $e + } +} From 0f75036c07db48dfcf605e090216a4447edc38fc Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Wed, 15 Jul 2020 05:38:47 -0400 Subject: [PATCH 054/215] correct error msg for num connections reaching maxclients in cluster mode (#7444) (cherry picked from commit d85af4d6f5fbe9cb9787b81583627cd74b47f838) --- src/networking.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/networking.c b/src/networking.c index d35347991..589a459d4 100644 --- a/src/networking.c +++ b/src/networking.c @@ -907,10 +907,10 @@ static void acceptCommonHandler(connection *conn, int flags, char *ip) { { char *err; if (server.cluster_enabled) - err = "-ERR max number of clients reached\r\n"; - else err = "-ERR max number of clients + cluster " "connections reached\r\n"; + else + err = "-ERR max number of clients reached\r\n"; /* That's a best effort error message, don't check write errors. * Note that for TLS connections, no handshake was done yet so nothing From 5b36681213b2d764f6192b0187bd805f8222345b Mon Sep 17 00:00:00 2001 From: Developer-Ecosystem-Engineering <65677710+Developer-Ecosystem-Engineering@users.noreply.github.com> Date: Wed, 15 Jul 2020 02:44:03 -0700 Subject: [PATCH 055/215] Add registers dump support for Apple silicon (#7453) Export following environment variables before building on macOS on Apple silicon export ARCH_FLAGS="-arch arm64" export SDK_NAME=macosx export SDK_PATH=$(xcrun --show-sdk-path --sdk $SDK_NAME) export CFLAGS="$ARCH_FLAGS -isysroot $SDK_PATH -I$SDK_PATH/usr/include" export CXXFLAGS=$CFLAGS export LDFLAGS="$ARCH_FLAGS" export CC="$(xcrun -sdk $SDK_PATH --find clang) $CFLAGS" export CXX="$(xcrun -sdk $SDK_PATH --find clang++) $CXXFLAGS" export LD="$(xcrun -sdk $SDK_PATH --find ld) $LDFLAGS" make make test .. All tests passed without errors! Backtrack logging assumes x86 and required updating (cherry picked from commit c2b5f1c15bbc5363f92f8e697538759c5d929934) --- src/debug.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 54 insertions(+), 2 deletions(-) diff --git a/src/debug.c b/src/debug.c index a74c22647..60cc2a1fa 100644 --- a/src/debug.c +++ b/src/debug.c @@ -928,8 +928,11 @@ static void *getMcontextEip(ucontext_t *uc) { /* OSX >= 10.6 */ #if defined(_STRUCT_X86_THREAD_STATE64) && !defined(__i386__) return (void*) uc->uc_mcontext->__ss.__rip; - #else + #elif defined(__i386__) return (void*) uc->uc_mcontext->__ss.__eip; + #else + /* OSX ARM64 */ + return (void*) arm_thread_state64_get_pc(uc->uc_mcontext->__ss); #endif #elif defined(__linux__) /* Linux */ @@ -1015,7 +1018,7 @@ void logRegisters(ucontext_t *uc) { (unsigned long) uc->uc_mcontext->__ss.__gs ); logStackContent((void**)uc->uc_mcontext->__ss.__rsp); - #else + #elif defined(__i386__) /* OSX x86 */ serverLog(LL_WARNING, "\n" @@ -1041,6 +1044,55 @@ void logRegisters(ucontext_t *uc) { (unsigned long) uc->uc_mcontext->__ss.__gs ); logStackContent((void**)uc->uc_mcontext->__ss.__esp); + #else + /* OSX ARM64 */ + serverLog(LL_WARNING, + "\n" + "x0:%016lx x1:%016lx x2:%016lx x3:%016lx\n" + "x4:%016lx x5:%016lx x6:%016lx x7:%016lx\n" + "x8:%016lx x9:%016lx x10:%016lx x11:%016lx\n" + "x12:%016lx x13:%016lx x14:%016lx x15:%016lx\n" + "x16:%016lx x17:%016lx x18:%016lx x19:%016lx\n" + "x20:%016lx x21:%016lx x22:%016lx x23:%016lx\n" + "x24:%016lx x25:%016lx x26:%016lx x27:%016lx\n" + "x28:%016lx fp:%016lx lr:%016lx\n" + "sp:%016lx pc:%016lx cpsr:%08lx\n", + (unsigned long) uc->uc_mcontext->__ss.__x[0], + (unsigned long) uc->uc_mcontext->__ss.__x[1], + (unsigned long) uc->uc_mcontext->__ss.__x[2], + (unsigned long) uc->uc_mcontext->__ss.__x[3], + (unsigned long) uc->uc_mcontext->__ss.__x[4], + (unsigned long) uc->uc_mcontext->__ss.__x[5], + (unsigned long) uc->uc_mcontext->__ss.__x[6], + (unsigned long) uc->uc_mcontext->__ss.__x[7], + (unsigned long) uc->uc_mcontext->__ss.__x[8], + (unsigned long) uc->uc_mcontext->__ss.__x[9], + (unsigned long) uc->uc_mcontext->__ss.__x[10], + (unsigned long) uc->uc_mcontext->__ss.__x[11], + (unsigned long) uc->uc_mcontext->__ss.__x[12], + (unsigned long) uc->uc_mcontext->__ss.__x[13], + (unsigned long) uc->uc_mcontext->__ss.__x[14], + (unsigned long) uc->uc_mcontext->__ss.__x[15], + (unsigned long) uc->uc_mcontext->__ss.__x[16], + (unsigned long) uc->uc_mcontext->__ss.__x[17], + (unsigned long) uc->uc_mcontext->__ss.__x[18], + (unsigned long) uc->uc_mcontext->__ss.__x[19], + (unsigned long) uc->uc_mcontext->__ss.__x[20], + (unsigned long) uc->uc_mcontext->__ss.__x[21], + (unsigned long) uc->uc_mcontext->__ss.__x[22], + (unsigned long) uc->uc_mcontext->__ss.__x[23], + (unsigned long) uc->uc_mcontext->__ss.__x[24], + (unsigned long) uc->uc_mcontext->__ss.__x[25], + (unsigned long) uc->uc_mcontext->__ss.__x[26], + (unsigned long) uc->uc_mcontext->__ss.__x[27], + (unsigned long) uc->uc_mcontext->__ss.__x[28], + (unsigned long) arm_thread_state64_get_fp(uc->uc_mcontext->__ss), + (unsigned long) arm_thread_state64_get_lr(uc->uc_mcontext->__ss), + (unsigned long) arm_thread_state64_get_sp(uc->uc_mcontext->__ss), + (unsigned long) arm_thread_state64_get_pc(uc->uc_mcontext->__ss), + (unsigned long) uc->uc_mcontext->__ss.__cpsr + ); + logStackContent((void**) arm_thread_state64_get_sp(uc->uc_mcontext->__ss)); #endif /* Linux */ #elif defined(__linux__) From 29b20fd528856c3bf17bb79440143c454f9012fb Mon Sep 17 00:00:00 2001 From: dmurnane Date: Wed, 15 Jul 2020 06:29:26 -0400 Subject: [PATCH 056/215] Notify systemd on sentinel startup (#7168) Co-authored-by: Daniel Murnane (cherry picked from commit 9242ccf238cbed018eb3a7fa3a437618345dd52b) --- src/server.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/server.c b/src/server.c index 1f794e4ed..9c2126bc0 100644 --- a/src/server.c +++ b/src/server.c @@ -5198,6 +5198,10 @@ int main(int argc, char **argv) { } else { InitServerLast(); sentinelIsRunning(); + if (server.supervised_mode == SUPERVISED_SYSTEMD) { + redisCommunicateSystemd("STATUS=Ready to accept connections\n"); + redisCommunicateSystemd("READY=1\n"); + } } /* Warning the user about suspicious maxmemory setting. */ From 3051430040ebf5408cbe923753dfe14606ee7fc2 Mon Sep 17 00:00:00 2001 From: Luke Palmer Date: Wed, 15 Jul 2020 13:53:41 -0400 Subject: [PATCH 057/215] Send null for invalidate on flush (#7469) (cherry picked from commit 5f716ea467d29059a89f90b6ccbdee5a60443200) --- src/tracking.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/src/tracking.c b/src/tracking.c index 8c2dca7ba..2721de32a 100644 --- a/src/tracking.c +++ b/src/tracking.c @@ -198,9 +198,11 @@ void trackingRememberKeys(client *c) { * * In case the 'proto' argument is non zero, the function will assume that * 'keyname' points to a buffer of 'keylen' bytes already expressed in the - * form of Redis RESP protocol, representing an array of keys to send - * to the client as value of the invalidation. This is used in BCAST mode - * in order to optimized the implementation to use less CPU time. */ + * form of Redis RESP protocol. This is used for: + * - In BCAST mode, to send an array of invalidated keys to all + * applicable clients + * - Following a flush command, to send a single RESP NULL to indicate + * that all keys are now invalid. */ void sendTrackingMessage(client *c, char *keyname, size_t keylen, int proto) { int using_redirection = 0; if (c->client_tracking_redirection) { @@ -342,17 +344,19 @@ void trackingInvalidateKey(client *c, robj *keyobj) { trackingInvalidateKeyRaw(c,keyobj->ptr,sdslen(keyobj->ptr),1); } -/* This function is called when one or all the Redis databases are flushed - * (dbid == -1 in case of FLUSHALL). Caching keys are not specific for - * each DB but are global: currently what we do is send a special - * notification to clients with tracking enabled, invalidating the caching - * key "", which means, "all the keys", in order to avoid flooding clients - * with many invalidation messages for all the keys they may hold. +/* This function is called when one or all the Redis databases are + * flushed (dbid == -1 in case of FLUSHALL). Caching keys are not + * specific for each DB but are global: currently what we do is send a + * special notification to clients with tracking enabled, sending a + * RESP NULL, which means, "all the keys", in order to avoid flooding + * clients with many invalidation messages for all the keys they may + * hold. */ void freeTrackingRadixTree(void *rt) { raxFree(rt); } +/* A RESP NULL is sent to indicate that all keys are invalid */ void trackingInvalidateKeysOnFlush(int dbid) { if (server.tracking_clients) { listNode *ln; @@ -361,7 +365,7 @@ void trackingInvalidateKeysOnFlush(int dbid) { while ((ln = listNext(&li)) != NULL) { client *c = listNodeValue(ln); if (c->flags & CLIENT_TRACKING) { - sendTrackingMessage(c,"",1,0); + sendTrackingMessage(c,shared.null[c->resp]->ptr,sdslen(shared.null[c->resp]->ptr),1); } } } From e28aa99af1f47f03d32a7a51644b48a79796f2b5 Mon Sep 17 00:00:00 2001 From: yoav-steinberg Date: Thu, 16 Jul 2020 20:59:38 +0300 Subject: [PATCH 058/215] Support passing stack allocated module strings to moduleCreateArgvFromUserFormat (#7528) Specifically, the key passed to the module aof_rewrite callback is a stack allocated robj. When passing it to RedisModule_EmitAOF (with appropriate "s" fmt string) redis used to panic when trying to inc the ref count of the stack allocated robj. Now support such robjs by coying them to a new heap robj. This doesn't affect performance because using the alternative "c" or "b" format strings also copies the input to a new heap robj. (cherry picked from commit d484b8a04ed67e79030fcb060e88641acb6e4f98) --- src/module.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/module.c b/src/module.c index acfaf2ac7..9316e004d 100644 --- a/src/module.c +++ b/src/module.c @@ -3189,8 +3189,11 @@ robj **moduleCreateArgvFromUserFormat(const char *cmdname, const char *fmt, int argv[argc++] = createStringObject(cstr,strlen(cstr)); } else if (*p == 's') { robj *obj = va_arg(ap,void*); + if (obj->refcount == OBJ_STATIC_REFCOUNT) + obj = createStringObject(obj->ptr,sdslen(obj->ptr)); + else + incrRefCount(obj); argv[argc++] = obj; - incrRefCount(obj); } else if (*p == 'b') { char *buf = va_arg(ap,char*); size_t len = va_arg(ap,size_t); From e15528bf1da1f1232fd08801ad382c915be94662 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Thu, 16 Jul 2020 21:31:36 +0300 Subject: [PATCH 059/215] Adds SHA256SUM to redis-stable tarball upload (cherry picked from commit 5df0a64d30e7815c0a4a75a80f165fdee0bd1db6) --- utils/releasetools/02_upload_tarball.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/releasetools/02_upload_tarball.sh b/utils/releasetools/02_upload_tarball.sh index 6400efad7..ef1e777cc 100755 --- a/utils/releasetools/02_upload_tarball.sh +++ b/utils/releasetools/02_upload_tarball.sh @@ -19,4 +19,5 @@ ssh ubuntu@host.redis.io "cd /var/www/download; mv redis-${1} redis-stable; tar cvzf redis-stable.tar.gz redis-stable; rm -rf redis-${1}.tar.gz; + shasum -a 256 redis-stable.tar.gz > redis-stable.tar.gz.SHA256SUM; " From a5696bdf4f2687ab45f633ccb7cdc4ee9c2f957d Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 19 Jul 2020 15:33:21 +0300 Subject: [PATCH 060/215] Run daily CI on PRs to release a branch --- .github/workflows/daily.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 4d54fbc42..5614aad1e 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -1,8 +1,10 @@ name: Daily on: - release: - types: [created] + pull_request: + branches: + # any PR to a release branch. + - '[0-9].[0-9]' schedule: - cron: '0 0 * * *' From 7bf665f125a4771db095c83a7ad6ed46692cd314 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 19 Jul 2020 14:00:20 +0300 Subject: [PATCH 061/215] Redis 6.0.6. --- 00-RELEASENOTES | 245 ++++++++++++++++++++++++++++++++++++++++++++++++ src/help.h | 4 +- src/version.h | 2 +- 3 files changed, 248 insertions(+), 3 deletions(-) diff --git a/00-RELEASENOTES b/00-RELEASENOTES index c6ee44246..484aeb621 100644 --- a/00-RELEASENOTES +++ b/00-RELEASENOTES @@ -11,6 +11,251 @@ CRITICAL: There is a critical bug affecting MOST USERS. Upgrade ASAP. SECURITY: There are security fixes in the release. -------------------------------------------------------------------------------- +================================================================================ +Redis 6.0.6 Released Mon Jul 20 09:31:30 IDT 2020 +================================================================================ + +Upgrade urgency MODERATE: several bugs with moderate impact are fixed here. + +The most important issues are listed here: + +* Fix crash when enabling CLIENT TRACKING with prefix +* EXEC always fails with EXECABORT and multi-state is cleared +* RESTORE ABSTTL won't store expired keys into the db +* redis-cli better handling of non-pritable key names +* TLS: Ignore client cert when tls-auth-clients off +* Tracking: fix invalidation message on flush +* Notify systemd on Sentinel startup +* Fix crash on a misuse of STRALGO +* Few fixes in module API +* Fix a few rare leaks (STRALGO error misuse, Sentinel) +* Fix a possible invalid access in defrag of scripts (unlikely to cause real harm) + +New features: + +* LPOS command to search in a list +* Use user+pass for MIGRATE in redis-cli and redis-benchmark in cluster mode +* redis-cli support TLS for --pipe, --rdb and --replica options +* TLS: Session caching configuration support + +And this is the full list of commits: + +Itamar Haber in commit 50548cafc: + Adds SHA256SUM to redis-stable tarball upload + 1 file changed, 1 insertion(+) + +yoav-steinberg in commit 3a4c6684f: + Support passing stack allocated module strings to moduleCreateArgvFromUserFormat (#7528) + 1 file changed, 4 insertions(+), 1 deletion(-) + +Luke Palmer in commit 2fd0b2bd6: + Send null for invalidate on flush (#7469) + 1 file changed, 14 insertions(+), 10 deletions(-) + +dmurnane in commit c3c81e1a8: + Notify systemd on sentinel startup (#7168) + 1 file changed, 4 insertions(+) + +Developer-Ecosystem-Engineering in commit e2770f29b: + Add registers dump support for Apple silicon (#7453) + 1 file changed, 54 insertions(+), 2 deletions(-) + +Wen Hui in commit b068eae97: + correct error msg for num connections reaching maxclients in cluster mode (#7444) + 1 file changed, 2 insertions(+), 2 deletions(-) + +WuYunlong in commit e6169ae5c: + Fix command help for unexpected options (#7476) + 6 files changed, 20 insertions(+), 3 deletions(-) + +WuYunlong in commit abf08fc02: + Refactor RM_KeyType() by using macro. (#7486) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Oran Agra in commit 11b83076a: + diskless master disconnect replicas when rdb child failed (#7518) + 1 file changed, 6 insertions(+), 5 deletions(-) + +Oran Agra in commit 8f27f2f7d: + redis-cli tests, fix valgrind timing issue (#7519) + 1 file changed, 1 insertion(+), 1 deletion(-) + +WuYunlong in commit 180b588e8: + Fix out of update help info in tcl tests. (#7516) + 1 file changed, 2 deletions(-) + +Qu Chen in commit 417c60bdc: + Replica always reports master's config epoch in CLUSTER NODES output. (#7235) + 1 file changed, 5 insertions(+), 1 deletion(-) + +Oran Agra in commit 72a242419: + RESTORE ABSTTL skip expired keys - leak (#7511) + 1 file changed, 1 insertion(+) + +Oran Agra in commit 2ca45239f: + fix recently added time sensitive tests failing with valgrind (#7512) + 2 files changed, 12 insertions(+), 6 deletions(-) + +Oran Agra in commit 123dc8b21: + runtest --stop pause stops before terminating the redis server (#7513) + 2 files changed, 8 insertions(+), 2 deletions(-) + +Oran Agra in commit a6added45: + update release scripts for new hosts, and CI to run more tests (#7480) + 5 files changed, 68 insertions(+), 26 deletions(-) + +jimgreen2013 in commit cf4869f9e: + fix description about ziplist, the code is ok (#6318) + 1 file changed, 2 insertions(+), 2 deletions(-) + +马永泽 in commit d548f219b: + fix benchmark in cluster mode fails to authenticate (#7488) + 1 file changed, 56 insertions(+), 40 deletions(-) + +Abhishek Soni in commit e58eb7b89: + fix: typo in CI job name (#7466) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Jiayuan Chen in commit 6def10a2b: + Fix typo in deps README (#7500) + 1 file changed, 1 insertion(+), 1 deletion(-) + +WuYunlong in commit 8af61afef: + Add missing latency-monitor tcl test to test_helper.tcl. (#6782) + 1 file changed, 1 insertion(+) + +Yossi Gottlieb in commit a419f400e: + TLS: Session caching configuration support. (#7420) + 6 files changed, 56 insertions(+), 16 deletions(-) + +Yossi Gottlieb in commit 2e4bb2667: + TLS: Ignore client cert when tls-auth-clients off. (#7457) + 1 file changed, 1 insertion(+), 3 deletions(-) + +James Hilliard in commit f0b1aee9e: + Use pkg-config to properly detect libssl and libcrypto libraries (#7452) + 1 file changed, 15 insertions(+), 3 deletions(-) + +Yossi Gottlieb in commit e92b99564: + TLS: Add missing redis-cli options. (#7456) + 3 files changed, 166 insertions(+), 52 deletions(-) + +Oran Agra in commit 1f3db5bf5: + redis-cli --hotkeys fixed to handle non-printable key names + 1 file changed, 11 insertions(+), 5 deletions(-) + +Oran Agra in commit c3044f369: + redis-cli --bigkeys fixed to handle non-printable key names + 1 file changed, 24 insertions(+), 16 deletions(-) + +Oran Agra in commit b3f75527b: + RESTORE ABSTTL won't store expired keys into the db (#7472) + 4 files changed, 46 insertions(+), 16 deletions(-) + +huangzhw in commit 6f87fc92f: + defrag.c activeDefragSdsListAndDict when defrag sdsele, We can't use (#7492) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Oran Agra in commit d8e6a3e5b: + skip a test that uses +inf on valgrind (#7440) + 1 file changed, 12 insertions(+), 9 deletions(-) + +Oran Agra in commit 28fd1a110: + stabilize tests that look for log lines (#7367) + 3 files changed, 33 insertions(+), 11 deletions(-) + +Oran Agra in commit a513b4ed9: + tests/valgrind: don't use debug restart (#7404) + 4 files changed, 114 insertions(+), 57 deletions(-) + +Oran Agra in commit 70e72fc1b: + change references to the github repo location (#7479) + 5 files changed, 7 insertions(+), 7 deletions(-) + +zhaozhao.zz in commit c63e533cc: + BITOP: propagate only when it really SET or DEL targetkey (#5783) + 1 file changed, 2 insertions(+), 1 deletion(-) + +antirez in commit 31040ff54: + Update comment to clarify change in #7398. + 1 file changed, 4 insertions(+), 1 deletion(-) + +antirez in commit b605fe827: + LPOS: option FIRST renamed RANK. + 2 files changed, 19 insertions(+), 19 deletions(-) + +Dave Nielsen in commit 8deb24954: + updated copyright year + 1 file changed, 1 insertion(+), 1 deletion(-) + +Oran Agra in commit a61c2930c: + EXEC always fails with EXECABORT and multi-state is cleared + 6 files changed, 204 insertions(+), 91 deletions(-) + +antirez in commit 3c8041637: + Include cluster.h for getClusterConnectionsCount(). + 1 file changed, 1 insertion(+) + +antirez in commit 5be673ee8: + Fix BITFIELD i64 type handling, see #7417. + 1 file changed, 8 insertions(+), 6 deletions(-) + +antirez in commit 5f289df9b: + Clarify maxclients and cluster in conf. Remove myself too. + 2 files changed, 9 insertions(+), 1 deletion(-) + +hwware in commit 000f928d6: + fix memory leak in sentinel connection sharing + 1 file changed, 1 insertion(+) + +chenhui0212 in commit d9a3c0171: + Fix comments in function raxLowWalk of listpack.c + 1 file changed, 2 insertions(+), 2 deletions(-) + +Tomasz Poradowski in commit 7526e4506: + ensure SHUTDOWN_NOSAVE in Sentinel mode + 2 files changed, 9 insertions(+), 8 deletions(-) + +chenhui0212 in commit 6487cbc33: + fix comments in listpack.c + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit 69b66bfca: + Use cluster connections too, to limit maxclients. + 3 files changed, 23 insertions(+), 8 deletions(-) + +antirez in commit 5a960a033: + Tracking: fix enableBcastTrackingForPrefix() invalid sdslen() call. + 1 file changed, 1 insertion(+), 1 deletion(-) + +root in commit 1c2e50de3: + cluster.c remove if of clusterSendFail in markNodeAsFailingIfNeeded + 1 file changed, 1 insertion(+), 1 deletion(-) + +meir@redislabs.com in commit 040efb697: + Fix RM_ScanKey module api not to return int encoded strings + 3 files changed, 24 insertions(+), 7 deletions(-) + +antirez in commit 1b8b7941d: + Fix LCS object type checking. Related to #7379. + 1 file changed, 17 insertions(+), 10 deletions(-) + +hwware in commit 6b571b45a: + fix memory leak + 1 file changed, 11 insertions(+), 12 deletions(-) + +hwware in commit 674759062: + fix server crash in STRALGO command + 1 file changed, 7 insertions(+) + +Benjamin Sergeant in commit a05ffefdc: + Update redis-cli.c + 1 file changed, 19 insertions(+), 6 deletions(-) + +Jamie Scott in commit 870b63733: + minor fix + 1 file changed, 2 insertions(+), 3 deletions(-) + ================================================================================ Redis 6.0.5 Released Tue Jun 09 11:56:08 CEST 2020 ================================================================================ diff --git a/src/help.h b/src/help.h index 1b1ac5e08..64344aa63 100644 --- a/src/help.h +++ b/src/help.h @@ -1,4 +1,4 @@ -/* Automatically generated by generate-command-help.rb, do not edit. */ +/* Automatically generated by ./utils/generate-command-help.rb, do not edit. */ #ifndef __REDIS_HELP_H #define __REDIS_HELP_H @@ -659,7 +659,7 @@ struct commandHelp { 2, "1.0.0" }, { "LPOS", - "key element [FIRST rank] [COUNT num-matches] [MAXLEN len]", + "key element [RANK rank] [COUNT num-matches] [MAXLEN len]", "Return the index of matching elements on a list", 2, "6.0.6" }, diff --git a/src/version.h b/src/version.h index e1eb096f3..9dfd8f274 100644 --- a/src/version.h +++ b/src/version.h @@ -1 +1 @@ -#define REDIS_VERSION "6.0.5" +#define REDIS_VERSION "6.0.6" From 171aa22b0b68a2ae2511b0e27f9290231a3a7947 Mon Sep 17 00:00:00 2001 From: Scott Brenner Date: Sun, 19 Jul 2020 23:22:24 -0700 Subject: [PATCH 062/215] GitHub Actions workflows - use latest version of actions/checkout (#7534) (cherry picked from commit 2f4e9c3f9f38e196fe4a03232c97782cbf8b3702) --- .github/workflows/ci.yml | 10 +++++----- .github/workflows/daily.yml | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 730eaf0dd..4d6c1c14c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,7 +7,7 @@ jobs: test-ubuntu-latest: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: make - name: test @@ -20,21 +20,21 @@ jobs: build-ubuntu-old: runs-on: ubuntu-16.04 steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: make build-macos-latest: runs-on: macos-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: make build-32bit: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: | sudo apt-get update && sudo apt-get install libc6-dev-i386 @@ -43,7 +43,7 @@ jobs: build-libc-malloc: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: make MALLOC=libc diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 5614aad1e..5b5f3f7d4 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 14400 steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: make - name: test @@ -32,7 +32,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 14400 steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: make MALLOC=libc - name: test @@ -50,7 +50,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 14400 steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: | sudo apt-get update && sudo apt-get install libc6-dev-i386 @@ -72,7 +72,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 14400 steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: | make BUILD_TLS=yes @@ -92,7 +92,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 14400 steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: make run: make valgrind - name: test From 9e997cd0d117cb4ae6ba27b4eef960a6d295e8d8 Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" <276441700@qq.com> Date: Mon, 20 Jul 2020 22:21:55 +0800 Subject: [PATCH 063/215] replication: need handle -NOPERM error after send ping (#7538) (cherry picked from commit 13e50935a84c319763a0d3b2be6ce64962092541) --- src/replication.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/replication.c b/src/replication.c index 8457150a0..197acde79 100644 --- a/src/replication.c +++ b/src/replication.c @@ -2158,6 +2158,7 @@ void syncWithMaster(connection *conn) { * both. */ if (err[0] != '+' && strncmp(err,"-NOAUTH",7) != 0 && + strncmp(err,"-NOPERM",7) != 0 && strncmp(err,"-ERR operation not permitted",28) != 0) { serverLog(LL_WARNING,"Error reply to PING from master: '%s'",err); From cef46edebc9781b012896fcf3f71629e4ed6af46 Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Mon, 20 Jul 2020 21:53:03 -0400 Subject: [PATCH 064/215] add missing caching command in client help (#7399) (cherry picked from commit 2fbd0271f6fc0a93b9323b6478ec1a7a4ca77614) --- src/networking.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/networking.c b/src/networking.c index 589a459d4..e3b62f151 100644 --- a/src/networking.c +++ b/src/networking.c @@ -2147,6 +2147,7 @@ void clientCommand(client *c) { "SETNAME -- Assign the name to the current connection.", "UNBLOCK [TIMEOUT|ERROR] -- Unblock the specified blocked client.", "TRACKING (on|off) [REDIRECT ] [BCAST] [PREFIX first] [PREFIX second] [OPTIN] [OPTOUT]... -- Enable client keys tracking for client side caching.", +"CACHING (yes|no) -- Enable/Disable tracking of the keys for next command in OPTIN/OPTOUT mode.", "GETREDIR -- Return the client ID we are redirecting to when tracking is enabled.", NULL }; From d2f09a054ca2d4eccd8f484445da87887c4b92c3 Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Tue, 21 Jul 2020 01:13:05 -0400 Subject: [PATCH 065/215] Add missing calls to raxStop (#7532) Since the dynamic allocations in raxIterator are only used for deep walks, memory leak due to missing call to raxStop can only happen for rax with key names longer than 32 bytes. Out of all the missing calls, the only ones that may lead to a leak are the rax for consumer groups and consumers, and these were only in AOFRW and rdbSave, which normally only happen in fork or at shutdown. (cherry picked from commit 4e8f2d6881a38397bfbf0d7d161959163a5f6e88) --- src/aof.c | 19 +++++++++++----- src/defrag.c | 1 + src/rdb.c | 61 ++++++++++++++++++++++++++++++++++++++++----------- src/timeout.c | 1 + 4 files changed, 63 insertions(+), 19 deletions(-) diff --git a/src/aof.c b/src/aof.c index 6f8e53712..cbc0989d0 100644 --- a/src/aof.c +++ b/src/aof.c @@ -1244,12 +1244,16 @@ int rewriteStreamObject(rio *r, robj *key, robj *o) { while(raxNext(&ri)) { streamCG *group = ri.data; /* Emit the XGROUP CREATE in order to create the group. */ - if (rioWriteBulkCount(r,'*',5) == 0) return 0; - if (rioWriteBulkString(r,"XGROUP",6) == 0) return 0; - if (rioWriteBulkString(r,"CREATE",6) == 0) return 0; - if (rioWriteBulkObject(r,key) == 0) return 0; - if (rioWriteBulkString(r,(char*)ri.key,ri.key_len) == 0) return 0; - if (rioWriteBulkStreamID(r,&group->last_id) == 0) return 0; + if (!rioWriteBulkCount(r,'*',5) || + !rioWriteBulkString(r,"XGROUP",6) || + !rioWriteBulkString(r,"CREATE",6) || + !rioWriteBulkObject(r,key) || + !rioWriteBulkString(r,(char*)ri.key,ri.key_len) || + !rioWriteBulkStreamID(r,&group->last_id)) + { + raxStop(&ri); + return 0; + } /* Generate XCLAIMs for each consumer that happens to * have pending entries. Empty consumers have no semantical @@ -1270,6 +1274,9 @@ int rewriteStreamObject(rio *r, robj *key, robj *o) { ri.key_len,consumer, ri_pel.key,nack) == 0) { + raxStop(&ri_pel); + raxStop(&ri_cons); + raxStop(&ri); return 0; } } diff --git a/src/defrag.c b/src/defrag.c index 2d8db8ea5..07a16ca6c 100644 --- a/src/defrag.c +++ b/src/defrag.c @@ -662,6 +662,7 @@ int scanLaterStraemListpacks(robj *ob, unsigned long *cursor, long long endtime, /* if cursor is non-zero, we seek to the static 'last' */ if (!raxSeek(&ri,">", last, sizeof(last))) { *cursor = 0; + raxStop(&ri); return 0; } /* assign the iterator node callback after the seek, so that the diff --git a/src/rdb.c b/src/rdb.c index 5cec208c5..ac1985d24 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -697,15 +697,23 @@ ssize_t rdbSaveStreamPEL(rio *rdb, rax *pel, int nacks) { while(raxNext(&ri)) { /* We store IDs in raw form as 128 big big endian numbers, like * they are inside the radix tree key. */ - if ((n = rdbWriteRaw(rdb,ri.key,sizeof(streamID))) == -1) return -1; + if ((n = rdbWriteRaw(rdb,ri.key,sizeof(streamID))) == -1) { + raxStop(&ri); + return -1; + } nwritten += n; if (nacks) { streamNACK *nack = ri.data; - if ((n = rdbSaveMillisecondTime(rdb,nack->delivery_time)) == -1) + if ((n = rdbSaveMillisecondTime(rdb,nack->delivery_time)) == -1) { + raxStop(&ri); return -1; + } nwritten += n; - if ((n = rdbSaveLen(rdb,nack->delivery_count)) == -1) return -1; + if ((n = rdbSaveLen(rdb,nack->delivery_count)) == -1) { + raxStop(&ri); + return -1; + } nwritten += n; /* We don't save the consumer name: we'll save the pending IDs * for each consumer in the consumer PEL, and resolve the consumer @@ -734,20 +742,27 @@ size_t rdbSaveStreamConsumers(rio *rdb, streamCG *cg) { streamConsumer *consumer = ri.data; /* Consumer name. */ - if ((n = rdbSaveRawString(rdb,ri.key,ri.key_len)) == -1) return -1; + if ((n = rdbSaveRawString(rdb,ri.key,ri.key_len)) == -1) { + raxStop(&ri); + return -1; + } nwritten += n; /* Last seen time. */ - if ((n = rdbSaveMillisecondTime(rdb,consumer->seen_time)) == -1) + if ((n = rdbSaveMillisecondTime(rdb,consumer->seen_time)) == -1) { + raxStop(&ri); return -1; + } nwritten += n; /* Consumer PEL, without the ACKs (see last parameter of the function * passed with value of 0), at loading time we'll lookup the ID * in the consumer group global PEL and will put a reference in the * consumer local PEL. */ - if ((n = rdbSaveStreamPEL(rdb,consumer->pel,0)) == -1) + if ((n = rdbSaveStreamPEL(rdb,consumer->pel,0)) == -1) { + raxStop(&ri); return -1; + } nwritten += n; } raxStop(&ri); @@ -912,9 +927,15 @@ ssize_t rdbSaveObject(rio *rdb, robj *o, robj *key) { while (raxNext(&ri)) { unsigned char *lp = ri.data; size_t lp_bytes = lpBytes(lp); - if ((n = rdbSaveRawString(rdb,ri.key,ri.key_len)) == -1) return -1; + if ((n = rdbSaveRawString(rdb,ri.key,ri.key_len)) == -1) { + raxStop(&ri); + return -1; + } nwritten += n; - if ((n = rdbSaveRawString(rdb,lp,lp_bytes)) == -1) return -1; + if ((n = rdbSaveRawString(rdb,lp,lp_bytes)) == -1) { + raxStop(&ri); + return -1; + } nwritten += n; } raxStop(&ri); @@ -946,22 +967,36 @@ ssize_t rdbSaveObject(rio *rdb, robj *o, robj *key) { streamCG *cg = ri.data; /* Save the group name. */ - if ((n = rdbSaveRawString(rdb,ri.key,ri.key_len)) == -1) + if ((n = rdbSaveRawString(rdb,ri.key,ri.key_len)) == -1) { + raxStop(&ri); return -1; + } nwritten += n; /* Last ID. */ - if ((n = rdbSaveLen(rdb,cg->last_id.ms)) == -1) return -1; + if ((n = rdbSaveLen(rdb,cg->last_id.ms)) == -1) { + raxStop(&ri); + return -1; + } nwritten += n; - if ((n = rdbSaveLen(rdb,cg->last_id.seq)) == -1) return -1; + if ((n = rdbSaveLen(rdb,cg->last_id.seq)) == -1) { + raxStop(&ri); + return -1; + } nwritten += n; /* Save the global PEL. */ - if ((n = rdbSaveStreamPEL(rdb,cg->pel,1)) == -1) return -1; + if ((n = rdbSaveStreamPEL(rdb,cg->pel,1)) == -1) { + raxStop(&ri); + return -1; + } nwritten += n; /* Save the consumers of this group. */ - if ((n = rdbSaveStreamConsumers(rdb,cg)) == -1) return -1; + if ((n = rdbSaveStreamConsumers(rdb,cg)) == -1) { + raxStop(&ri); + return -1; + } nwritten += n; } raxStop(&ri); diff --git a/src/timeout.c b/src/timeout.c index 7787a049f..d4c4690e5 100644 --- a/src/timeout.c +++ b/src/timeout.c @@ -150,6 +150,7 @@ void handleBlockedClientsTimeout(void) { raxRemove(server.clients_timeout_table,ri.key,ri.key_len,NULL); raxSeek(&ri,"^",NULL,0); } + raxStop(&ri); } /* Get a timeout value from an object and store it into 'timeout'. From af907e4b6d70f5f44340910dce15cecd3c8e7672 Mon Sep 17 00:00:00 2001 From: Remi Collet Date: Tue, 21 Jul 2020 08:07:54 +0200 Subject: [PATCH 066/215] Fix deprecated tail syntax in tests (#7543) (cherry picked from commit 3f2fbc4c614ff718dce7d55fd971d7ed36062c24) --- tests/support/util.tcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/support/util.tcl b/tests/support/util.tcl index fce3ffd18..69ea675dc 100644 --- a/tests/support/util.tcl +++ b/tests/support/util.tcl @@ -119,7 +119,7 @@ proc wait_for_log_message {srv_idx pattern from_line maxtries delay} { set retry $maxtries set stdout [srv $srv_idx stdout] while {$retry} { - set result [exec tail +$from_line < $stdout] + set result [exec tail -n +$from_line < $stdout] set result [split $result "\n"] foreach line $result { if {[string match $pattern $line]} { From cc7f4ba67e87d64a06f42f7fe4ff90e973b4eb20 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 21 Jul 2020 14:07:06 +0300 Subject: [PATCH 067/215] Fixes to release scripts (#7547) (cherry picked from commit 343dd9bcce0ce28d131510e28bac411599da90b0) --- utils/releasetools/03_test_release.sh | 1 + utils/releasetools/04_release_hash.sh | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/releasetools/03_test_release.sh b/utils/releasetools/03_test_release.sh index 169e965d5..493d0b74c 100755 --- a/utils/releasetools/03_test_release.sh +++ b/utils/releasetools/03_test_release.sh @@ -14,6 +14,7 @@ echo "Doing sanity test on the actual tarball" cd /tmp rm -rf test_release_tmp_dir +mkdir test_release_tmp_dir cd test_release_tmp_dir rm -f $TARNAME rm -rf redis-${TAG} diff --git a/utils/releasetools/04_release_hash.sh b/utils/releasetools/04_release_hash.sh index bc1ebb66c..d93292803 100755 --- a/utils/releasetools/04_release_hash.sh +++ b/utils/releasetools/04_release_hash.sh @@ -7,8 +7,7 @@ fi SHA=$(curl -s http://download.redis.io/releases/redis-${1}.tar.gz | shasum -a 256 | cut -f 1 -d' ') ENTRY="hash redis-${1}.tar.gz sha256 $SHA http://download.redis.io/releases/redis-${1}.tar.gz" -echo $ENTRY >> ~/hack/redis-hashes/README -vi ../redis-hashes/README +echo $ENTRY >> ../redis-hashes/README echo "Press any key to commit, Ctrl-C to abort)." read yes (cd ../redis-hashes; git commit -a -m "${1} hash."; git push) From f1d5d5d28eef0eda6e1973d40cde3499627e7c71 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 21 Jul 2020 14:17:14 +0300 Subject: [PATCH 068/215] Tests: drop TCL 8.6 dependency. (#7548) This re-implements the redis-cli --pipe test so it no longer depends on a close feature available only in TCL 8.6. Basically what this test does is run redis-cli --pipe, generates a bunch of commands and pipes them through redis-cli, and inspects the result in both Redis and the redis-cli output. To do that, we need to close stdin for redis-cli to indicate we're done so it can flush its buffers and exit. TCL has bi-directional channels can only offers a way to "one-way close" a channel with TCL 8.6. To work around that, we now generate the commands into a file and feed that file to redis-cli directly. As we're writing to an actual file, the number of commands is now reduced. (cherry picked from commit f57e844b2edbb86a5df2f3436045814812c0a3ae) --- tests/integration/redis-cli.tcl | 51 ++++++++++++++++++--------------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/tests/integration/redis-cli.tcl b/tests/integration/redis-cli.tcl index c70d14eeb..44ff430e2 100644 --- a/tests/integration/redis-cli.tcl +++ b/tests/integration/redis-cli.tcl @@ -1,10 +1,16 @@ source tests/support/cli.tcl start_server {tags {"cli"}} { - proc open_cli {{opts "-n 9"}} { + proc open_cli {{opts "-n 9"} {infile ""}} { set ::env(TERM) dumb set cmdline [rediscli [srv port] $opts] - set fd [open "|$cmdline" "r+"] + if {$infile ne ""} { + set cmdline "$cmdline < $infile" + set mode "r" + } else { + set mode "r+" + } + set fd [open "|$cmdline" $mode] fconfigure $fd -buffering none fconfigure $fd -blocking false fconfigure $fd -translation binary @@ -228,7 +234,7 @@ start_server {tags {"cli"}} { assert_equal {} [r get should-not-exist] } - test_nontty_cli "Dumping an RDB" { + test "Dumping an RDB" { # Disk-based master assert_match "OK" [r config set repl-diskless-sync no] test_redis_cli_rdb_dump @@ -239,7 +245,7 @@ start_server {tags {"cli"}} { test_redis_cli_rdb_dump } - test_nontty_cli "Connecting as a replica" { + test "Connecting as a replica" { set fd [open_cli "--replica"] wait_for_condition 500 500 { [string match {*slave0:*state=online*} [r info]] @@ -258,31 +264,30 @@ start_server {tags {"cli"}} { close_cli $fd } - test_nontty_cli "Piping raw protocol" { - set fd [open_cli "--pipe"] - fconfigure $fd -blocking true + test "Piping raw protocol" { + set cmds [tmpfile "cli_cmds"] + set cmds_fd [open $cmds "w"] - # Create a new deferring client and overwrite its fd - set client [redis [srv 0 "host"] [srv 0 "port"] 1 0] - set ::redis::fd($::redis::id) $fd - $client select 9 - - r del test-counter - for {set i 0} {$i < 10000} {incr i} { - $client incr test-counter - $client set large-key [string repeat "x" 20000] - } + puts $cmds_fd [formatCommand select 9] + puts $cmds_fd [formatCommand del test-counter] for {set i 0} {$i < 1000} {incr i} { - $client set very-large-key [string repeat "x" 512000] + puts $cmds_fd [formatCommand incr test-counter] + puts $cmds_fd [formatCommand set large-key [string repeat "x" 20000]] } - close $fd write - set output [read_cli $fd] + for {set i 0} {$i < 100} {incr i} { + puts $cmds_fd [formatCommand set very-large-key [string repeat "x" 512000]] + } + close $cmds_fd - assert_equal {10000} [r get test-counter] - assert_match {*All data transferred*errors: 0*replies: 21001*} $output + set cli_fd [open_cli "--pipe" $cmds] + fconfigure $cli_fd -blocking true + set output [read_cli $cli_fd] - close_cli $fd + assert_equal {1000} [r get test-counter] + assert_match {*All data transferred*errors: 0*replies: 2102*} $output + + file delete $cmds } } From e8aa5583d0687bd1e07d6b8eebd358c6410df135 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 21 Jul 2020 16:56:19 +0300 Subject: [PATCH 069/215] testsuite may leave servers alive on error (#7549) in cases where you have test name { start_server { start_server { assert } } } the exception will be thrown to the test proc, and the servers are supposed to be killed on the way out. but it seems there was always a bug of not cleaning the server stack, and recently (#7404) we started relying on that stack in order to kill them, so with that bug sometimes we would have tried to kill the same server twice, and leave one alive. luckly, in most cases the pattern is: start_server { test name { } } (cherry picked from commit 36b949438547eb5bf8555fcac2c5040528fd7854) --- tests/support/server.tcl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/support/server.tcl b/tests/support/server.tcl index ea7d0b13c..0afe89f7c 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -396,6 +396,9 @@ proc start_server {options {code undefined}} { # fetch srv back from the server list, in case it was restarted by restart_server (new PID) set srv [lindex $::servers end] + # pop the server object + set ::servers [lrange $::servers 0 end-1] + # Kill the server without checking for leaks dict set srv "skipleaks" 1 kill_server $srv From e00b6248c93c0b26be9c54ef6678ee0bcf248467 Mon Sep 17 00:00:00 2001 From: Madelyn Olson <34459052+madolson@users.noreply.github.com> Date: Tue, 21 Jul 2020 17:00:13 -0700 Subject: [PATCH 070/215] Properly reset errno for rdbLoad (#7542) (cherry picked from commit 818dc3a0898125d621288df671994eed34f849f2) --- src/server.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/server.c b/src/server.c index 9c2126bc0..db32227bc 100644 --- a/src/server.c +++ b/src/server.c @@ -4876,6 +4876,7 @@ void loadDataFromDisk(void) { serverLog(LL_NOTICE,"DB loaded from append only file: %.3f seconds",(float)(ustime()-start)/1000000); } else { rdbSaveInfo rsi = RDB_SAVE_INFO_INIT; + errno = 0; /* Prevent a stale value from affecting error checking */ if (rdbLoad(server.rdb_filename,&rsi,RDBFLAGS_NONE) == C_OK) { serverLog(LL_NOTICE,"DB loaded from disk: %.3f seconds", (float)(ustime()-start)/1000000); From 6daa8b9adba6ea082a0836d03d95e381189a462e Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Thu, 23 Jul 2020 13:06:24 +0300 Subject: [PATCH 071/215] Stabilize bgsave test that sometimes fails with valgrind (#7559) on ci.redis.io the test fails a lot, reporting that bgsave didn't end. increaseing the timeout we wait for that bgsave to get aborted. in addition to that, i also verify that it indeed got aborted by checking that the save counter wasn't reset. add another test to verify that a successful bgsave indeed resets the change counter. (cherry picked from commit 8a57969fd75db01b881d438200911d95bdead293) --- tests/integration/rdb.tcl | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/tests/integration/rdb.tcl b/tests/integration/rdb.tcl index b176bf199..6535cd089 100644 --- a/tests/integration/rdb.tcl +++ b/tests/integration/rdb.tcl @@ -118,16 +118,34 @@ start_server_and_kill_it [list "dir" $server_path] { start_server {} { test {Test FLUSHALL aborts bgsave} { + # 1000 keys with 1ms sleep per key shuld take 1 second r config set rdb-key-save-delay 1000 r debug populate 1000 r bgsave assert_equal [s rdb_bgsave_in_progress] 1 r flushall - after 200 - assert_equal [s rdb_bgsave_in_progress] 0 + # wait half a second max + wait_for_condition 5 100 { + [s rdb_bgsave_in_progress] == 0 + } else { + fail "bgsave not aborted" + } + # veirfy that bgsave failed, by checking that the change counter is still high + assert_lessthan 999 [s rdb_changes_since_last_save] # make sure the server is still writable r set x xx } + + test {bgsave resets the change counter} { + r config set rdb-key-save-delay 0 + r bgsave + wait_for_condition 5 100 { + [s rdb_bgsave_in_progress] == 0 + } else { + fail "bgsave not aborted" + } + assert_equal [s rdb_changes_since_last_save] 0 + } } test {client freed during loading} { From 395d3392342111b2f04dd151c68cc3c5c707842b Mon Sep 17 00:00:00 2001 From: grishaf Date: Sun, 26 Jul 2020 08:27:30 +0300 Subject: [PATCH 072/215] Fix prepareForShutdown function declaration (#7566) (cherry picked from commit 4126ca466fb9fbf0d84a66e31c9f59f28e2fbff6) --- src/server.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server.h b/src/server.h index 3f471efcb..57047f55b 100644 --- a/src/server.h +++ b/src/server.h @@ -1976,7 +1976,7 @@ void forceCommandPropagation(client *c, int flags); void preventCommandPropagation(client *c); void preventCommandAOF(client *c); void preventCommandReplication(client *c); -int prepareForShutdown(); +int prepareForShutdown(int flags); #ifdef __GNUC__ void serverLog(int level, const char *fmt, ...) __attribute__((format(printf, 2, 3))); From 8a4240b1fa9fb54458cf1bf6ef8121dc44304d86 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Mon, 27 Jul 2020 12:52:13 +0300 Subject: [PATCH 073/215] TLS: support cluster/replication without tls-port. Initialize and configure OpenSSL even when tls-port is not used, because we may still have tls-cluster or tls-replication. Also, make sure to reconfigure OpenSSL when these parameters are changed as TLS could have been enabled for the first time. (cherry picked from commit c75512d89d1697cf782cdc826acffab5b6adc1c7) --- src/config.c | 6 +++--- src/server.c | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/config.c b/src/config.c index acf1b069f..9b6dc459c 100644 --- a/src/config.c +++ b/src/config.c @@ -2221,11 +2221,11 @@ standardConfig configs[] = { createOffTConfig("auto-aof-rewrite-min-size", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.aof_rewrite_min_size, 64*1024*1024, MEMORY_CONFIG, NULL, NULL), #ifdef USE_OPENSSL - createIntConfig("tls-port", NULL, IMMUTABLE_CONFIG, 0, 65535, server.tls_port, 0, INTEGER_CONFIG, NULL, NULL), /* TCP port. */ + createIntConfig("tls-port", NULL, IMMUTABLE_CONFIG, 0, 65535, server.tls_port, 0, INTEGER_CONFIG, NULL, updateTlsCfgInt), /* TCP port. */ createIntConfig("tls-session-cache-size", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_size, 20*1024, INTEGER_CONFIG, NULL, updateTlsCfgInt), createIntConfig("tls-session-cache-timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_timeout, 300, INTEGER_CONFIG, NULL, updateTlsCfgInt), - createBoolConfig("tls-cluster", NULL, MODIFIABLE_CONFIG, server.tls_cluster, 0, NULL, NULL), - createBoolConfig("tls-replication", NULL, MODIFIABLE_CONFIG, server.tls_replication, 0, NULL, NULL), + createBoolConfig("tls-cluster", NULL, MODIFIABLE_CONFIG, server.tls_cluster, 0, NULL, updateTlsCfgBool), + createBoolConfig("tls-replication", NULL, MODIFIABLE_CONFIG, server.tls_replication, 0, NULL, updateTlsCfgBool), createBoolConfig("tls-auth-clients", NULL, MODIFIABLE_CONFIG, server.tls_auth_clients, 1, NULL, NULL), createBoolConfig("tls-prefer-server-ciphers", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.prefer_server_ciphers, 0, NULL, updateTlsCfgBool), createBoolConfig("tls-session-caching", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.session_caching, 1, NULL, updateTlsCfgBool), diff --git a/src/server.c b/src/server.c index db32227bc..d624cb434 100644 --- a/src/server.c +++ b/src/server.c @@ -2774,7 +2774,8 @@ void initServer(void) { server.events_processed_while_blocked = 0; server.system_memory_size = zmalloc_get_memory_size(); - if (server.tls_port && tlsConfigure(&server.tls_ctx_config) == C_ERR) { + if ((server.tls_port || server.tls_replication || server.tls_cluster) + && tlsConfigure(&server.tls_ctx_config) == C_ERR) { serverLog(LL_WARNING, "Failed to configure TLS. Check logs for more info."); exit(1); } From 0b2f7c6407c2a6fbe3e7a8323cdb42192c50dc76 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 27 Jul 2020 15:30:36 +0300 Subject: [PATCH 074/215] Daily github action: run cluster and sentinel tests with tls (#7575) (cherry picked from commit 6d92eee69b8e693b9bf0a34320dd108b5a81a81a) --- .github/workflows/daily.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 5b5f3f7d4..acc811d3a 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -84,9 +84,9 @@ jobs: - name: module api test run: ./runtest-moduleapi --verbose --tls - name: sentinel tests - run: ./runtest-sentinel + run: ./runtest-sentinel --tls - name: cluster tests - run: ./runtest-cluster + run: ./runtest-cluster --tls test-valgrind: runs-on: ubuntu-latest From 096285ab64445a6e80ee89eeb08b731860e1a1e8 Mon Sep 17 00:00:00 2001 From: Jiayuan Chen Date: Tue, 28 Jul 2020 15:45:21 +0800 Subject: [PATCH 075/215] Add optional tls verification (#7502) Adds an `optional` value to the previously boolean `tls-auth-clients` configuration keyword. Co-authored-by: Yossi Gottlieb (cherry picked from commit f31260b0445f5649449da41555e1272a40ae4af7) --- redis.conf | 5 ++++- src/cluster.c | 3 ++- src/config.c | 8 +++++++- src/server.h | 5 +++++ src/tls.c | 12 ++++++++++-- tests/unit/tls.tcl | 12 ++++++++++++ 6 files changed, 40 insertions(+), 5 deletions(-) diff --git a/redis.conf b/redis.conf index 8c53f015a..d4e3e47f0 100644 --- a/redis.conf +++ b/redis.conf @@ -159,9 +159,12 @@ tcp-keepalive 300 # By default, clients (including replica servers) on a TLS port are required # to authenticate using valid client side certificates. # -# It is possible to disable authentication using this directive. +# If "no" is specified, client certificates are not required and not accepted. +# If "optional" is specified, client certificates are accepted and must be +# valid if provided, but are not required. # # tls-auth-clients no +# tls-auth-clients optional # By default, a Redis replica does not attempt to establish a TLS connection # with its master. diff --git a/src/cluster.c b/src/cluster.c index 5dcd69ff8..485683cf0 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -670,7 +670,8 @@ void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) { return; } - connection *conn = server.tls_cluster ? connCreateAcceptedTLS(cfd,1) : connCreateAcceptedSocket(cfd); + connection *conn = server.tls_cluster ? + connCreateAcceptedTLS(cfd, TLS_CLIENT_AUTH_YES) : connCreateAcceptedSocket(cfd); connNonBlock(conn); connEnableTcpNoDelay(conn); diff --git a/src/config.c b/src/config.c index 9b6dc459c..10faa3bea 100644 --- a/src/config.c +++ b/src/config.c @@ -98,6 +98,12 @@ configEnum repl_diskless_load_enum[] = { {NULL, 0} }; +configEnum tls_auth_clients_enum[] = { + {"no", TLS_CLIENT_AUTH_NO}, + {"yes", TLS_CLIENT_AUTH_YES}, + {"optional", TLS_CLIENT_AUTH_OPTIONAL}, + {NULL, 0} +}; /* Output buffer limits presets. */ clientBufferLimitsConfig clientBufferLimitsDefaults[CLIENT_TYPE_OBUF_COUNT] = { {0, 0, 0}, /* normal */ @@ -2226,7 +2232,7 @@ standardConfig configs[] = { createIntConfig("tls-session-cache-timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_timeout, 300, INTEGER_CONFIG, NULL, updateTlsCfgInt), createBoolConfig("tls-cluster", NULL, MODIFIABLE_CONFIG, server.tls_cluster, 0, NULL, updateTlsCfgBool), createBoolConfig("tls-replication", NULL, MODIFIABLE_CONFIG, server.tls_replication, 0, NULL, updateTlsCfgBool), - createBoolConfig("tls-auth-clients", NULL, MODIFIABLE_CONFIG, server.tls_auth_clients, 1, NULL, NULL), + createEnumConfig("tls-auth-clients", NULL, MODIFIABLE_CONFIG, tls_auth_clients_enum, server.tls_auth_clients, TLS_CLIENT_AUTH_YES, NULL, NULL), createBoolConfig("tls-prefer-server-ciphers", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.prefer_server_ciphers, 0, NULL, updateTlsCfgBool), createBoolConfig("tls-session-caching", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.session_caching, 1, NULL, updateTlsCfgBool), createStringConfig("tls-cert-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.cert_file, NULL, NULL, updateTlsCfg), diff --git a/src/server.h b/src/server.h index 57047f55b..1862e879e 100644 --- a/src/server.h +++ b/src/server.h @@ -358,6 +358,11 @@ typedef long long ustime_t; /* microsecond time type. */ #define REPL_DISKLESS_LOAD_WHEN_DB_EMPTY 1 #define REPL_DISKLESS_LOAD_SWAPDB 2 +/* TLS Client Authentication */ +#define TLS_CLIENT_AUTH_NO 0 +#define TLS_CLIENT_AUTH_YES 1 +#define TLS_CLIENT_AUTH_OPTIONAL 2 + /* Sets operations codes */ #define SET_OP_UNION 0 #define SET_OP_DIFF 1 diff --git a/src/tls.c b/src/tls.c index 8b2bb58e1..07d1775f8 100644 --- a/src/tls.c +++ b/src/tls.c @@ -342,8 +342,16 @@ connection *connCreateAcceptedTLS(int fd, int require_auth) { conn->c.fd = fd; conn->c.state = CONN_STATE_ACCEPTING; - if (!require_auth) { - SSL_set_verify(conn->ssl, SSL_VERIFY_NONE, NULL); + switch (require_auth) { + case TLS_CLIENT_AUTH_NO: + SSL_set_verify(conn->ssl, SSL_VERIFY_NONE, NULL); + break; + case TLS_CLIENT_AUTH_OPTIONAL: + SSL_set_verify(conn->ssl, SSL_VERIFY_PEER, NULL); + break; + default: /* TLS_CLIENT_AUTH_YES, also fall-secure */ + SSL_set_verify(conn->ssl, SSL_VERIFY_PEER|SSL_VERIFY_FAIL_IF_NO_PEER_CERT, NULL); + break; } SSL_set_fd(conn->ssl, conn->c.fd); diff --git a/tests/unit/tls.tcl b/tests/unit/tls.tcl index 2b04590cd..bb5b6d034 100644 --- a/tests/unit/tls.tcl +++ b/tests/unit/tls.tcl @@ -21,7 +21,19 @@ start_server {tags {"tls"}} { catch {$s PING} e assert_match {PONG} $e + r CONFIG SET tls-auth-clients optional + + set s [redis [srv 0 host] [srv 0 port]] + ::tls::import [$s channel] + catch {$s PING} e + assert_match {PONG} $e + r CONFIG SET tls-auth-clients yes + + set s [redis [srv 0 host] [srv 0 port]] + ::tls::import [$s channel] + catch {$s PING} e + assert_match {*error*} $e } test {TLS: Verify tls-protocols behaves as expected} { From 67750ce3b3ec8855ee698c80e3271b9cf27f17a4 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 28 Jul 2020 11:15:29 +0300 Subject: [PATCH 076/215] Fix failing tests due to issues with wait_for_log_message (#7572) - the test now waits for specific set of log messages rather than wait for timeout looking for just one message. - we don't wanna sample the current length of the log after an action, due to a race, we need to start the search from the line number of the last message we where waiting for. - when attempting to trigger a full sync, use multi-exec to avoid a race where the replica manages to re-connect before we completed the set of actions that should force a full sync. - fix verify_log_message which was broken and unused (cherry picked from commit 109b5ccdcd6e6b8cecdaeb13a246bc49ce7a61f4) --- tests/integration/replication.tcl | 30 +++++++++++++++--------------- tests/support/util.tcl | 20 ++++++++++++-------- tests/unit/moduleapi/testrdb.tcl | 22 +++++++++++----------- 3 files changed, 38 insertions(+), 34 deletions(-) diff --git a/tests/integration/replication.tcl b/tests/integration/replication.tcl index d47ec4fe4..1052fbdd5 100644 --- a/tests/integration/replication.tcl +++ b/tests/integration/replication.tcl @@ -440,30 +440,30 @@ test {diskless loading short read} { for {set i 0} {$i < $attempts} {incr i} { # wait for the replica to start reading the rdb # using the log file since the replica only responds to INFO once in 2mb - wait_for_log_message -1 "*Loading DB in memory*" $loglines 2000 1 + set res [wait_for_log_messages -1 {"*Loading DB in memory*"} $loglines 2000 1] + set loglines [lindex $res 1] # add some additional random sleep so that we kill the master on a different place each time - after [expr {int(rand()*100)}] + after [expr {int(rand()*50)}] # kill the replica connection on the master set killed [$master client kill type replica] - if {[catch { - set res [wait_for_log_message -1 "*Internal error in RDB*" $loglines 100 10] - if {$::verbose} { - puts $res - } - }]} { - puts "failed triggering short read" + set res [wait_for_log_messages -1 {"*Internal error in RDB*" "*Finished with success*" "*Successful partial resynchronization*"} $loglines 1000 1] + if {$::verbose} { puts $res } + set log_text [lindex $res 0] + set loglines [lindex $res 1] + if {![string match "*Internal error in RDB*" $log_text]} { # force the replica to try another full sync + $master multi $master client kill type replica $master set asdf asdf # the side effect of resizing the backlog is that it is flushed (16k is the min size) $master config set repl-backlog-size [expr {16384 + $i}] + $master exec } # wait for loading to stop (fail) - set loglines [count_log_lines -1] - wait_for_condition 100 10 { + wait_for_condition 1000 1 { [s -1 loading] eq 0 } else { fail "Replica didn't disconnect" @@ -545,7 +545,7 @@ start_server {tags {"repl"}} { # wait for the replicas to start reading the rdb # using the log file since the replica only responds to INFO once in 2mb - wait_for_log_message -1 "*Loading DB in memory*" $loglines 800 10 + wait_for_log_messages -1 {"*Loading DB in memory*"} $loglines 800 10 if {$measure_time} { set master_statfile "/proc/$master_pid/stat" @@ -580,13 +580,13 @@ start_server {tags {"repl"}} { # make sure we got what we were aiming for, by looking for the message in the log file if {$all_drop == "all"} { - wait_for_log_message -2 "*Diskless rdb transfer, last replica dropped, killing fork child*" $loglines 1 1 + wait_for_log_messages -2 {"*Diskless rdb transfer, last replica dropped, killing fork child*"} $loglines 1 1 } if {$all_drop == "no"} { - wait_for_log_message -2 "*Diskless rdb transfer, done reading from pipe, 2 replicas still up*" $loglines 1 1 + wait_for_log_messages -2 {"*Diskless rdb transfer, done reading from pipe, 2 replicas still up*"} $loglines 1 1 } if {$all_drop == "slow" || $all_drop == "fast"} { - wait_for_log_message -2 "*Diskless rdb transfer, done reading from pipe, 1 replicas still up*" $loglines 1 1 + wait_for_log_messages -2 {"*Diskless rdb transfer, done reading from pipe, 1 replicas still up*"} $loglines 1 1 } # make sure we don't have a busy loop going thought epoll_wait diff --git a/tests/support/util.tcl b/tests/support/util.tcl index 69ea675dc..8340ad207 100644 --- a/tests/support/util.tcl +++ b/tests/support/util.tcl @@ -106,31 +106,35 @@ proc count_log_lines {srv_idx} { # verify pattern exists in server's sdtout after a certain line number proc verify_log_message {srv_idx pattern from_line} { - set lines_after [count_log_lines] - set lines [expr $lines_after - $from_line] - set result [exec tail -$lines < [srv $srv_idx stdout]] + incr from_line + set result [exec tail -n +$from_line < [srv $srv_idx stdout]] if {![string match $pattern $result]} { error "assertion:expected message not found in log file: $pattern" } } # wait for pattern to be found in server's stdout after certain line number -proc wait_for_log_message {srv_idx pattern from_line maxtries delay} { +# return value is a list containing the line that matched the pattern and the line number +proc wait_for_log_messages {srv_idx patterns from_line maxtries delay} { set retry $maxtries + set next_line [expr $from_line + 1] ;# searching form the line after set stdout [srv $srv_idx stdout] while {$retry} { - set result [exec tail -n +$from_line < $stdout] + set result [exec tail -n +$next_line < $stdout] set result [split $result "\n"] foreach line $result { - if {[string match $pattern $line]} { - return $line + foreach pattern $patterns { + if {[string match $pattern $line]} { + return [list $line $next_line] + } } + incr next_line } incr retry -1 after $delay } if {$retry == 0} { - fail "log message of '$pattern' not found in $stdout after line: $from_line" + fail "log message of '$patterns' not found in $stdout after line: $from_line till line: [expr $next_line -1]" } } diff --git a/tests/unit/moduleapi/testrdb.tcl b/tests/unit/moduleapi/testrdb.tcl index 98641ae0a..02c82c7c3 100644 --- a/tests/unit/moduleapi/testrdb.tcl +++ b/tests/unit/moduleapi/testrdb.tcl @@ -77,30 +77,30 @@ tags "modules" { for {set i 0} {$i < $attempts} {incr i} { # wait for the replica to start reading the rdb # using the log file since the replica only responds to INFO once in 2mb - wait_for_log_message -1 "*Loading DB in memory*" $loglines 2000 1 + set res [wait_for_log_messages -1 {"*Loading DB in memory*"} $loglines 2000 1] + set loglines [lindex $res 1] # add some additional random sleep so that we kill the master on a different place each time - after [expr {int(rand()*100)}] + after [expr {int(rand()*50)}] # kill the replica connection on the master set killed [$master client kill type replica] - if {[catch { - set res [wait_for_log_message -1 "*Internal error in RDB*" $loglines 100 10] - if {$::verbose} { - puts $res - } - }]} { - puts "failed triggering short read" + set res [wait_for_log_messages -1 {"*Internal error in RDB*" "*Finished with success*" "*Successful partial resynchronization*"} $loglines 1000 1] + if {$::verbose} { puts $res } + set log_text [lindex $res 0] + set loglines [lindex $res 1] + if {![string match "*Internal error in RDB*" $log_text]} { # force the replica to try another full sync + $master multi $master client kill type replica $master set asdf asdf # the side effect of resizing the backlog is that it is flushed (16k is the min size) $master config set repl-backlog-size [expr {16384 + $i}] + $master exec } # wait for loading to stop (fail) - set loglines [count_log_lines -1] - wait_for_condition 100 10 { + wait_for_condition 1000 1 { [s -1 loading] eq 0 } else { fail "Replica didn't disconnect" From 1c05b87e304ea1016c91858bac94990e48a1c72d Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 28 Jul 2020 11:32:47 +0300 Subject: [PATCH 077/215] TLS: Propagate and handle SSL_new() failures. (#7576) The connection API may create an accepted connection object in an error state, and callers are expected to check it before attempting to use it. Co-authored-by: mrpre (cherry picked from commit 784ceeb90d84bbc49fc2f2e2e6c7b9fae2524bd5) --- src/cluster.c | 9 +++++++++ src/connection.c | 6 +++++- src/networking.c | 11 ++++++++++- src/tls.c | 28 ++++++++++++++++++++++++---- 4 files changed, 48 insertions(+), 6 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index 485683cf0..350aa7b6a 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -672,6 +672,15 @@ void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) { connection *conn = server.tls_cluster ? connCreateAcceptedTLS(cfd, TLS_CLIENT_AUTH_YES) : connCreateAcceptedSocket(cfd); + + /* Make sure connection is not in an error state */ + if (connGetState(conn) != CONN_STATE_ACCEPTING) { + serverLog(LL_VERBOSE, + "Error creating an accepting connection for cluster node: %s", + connGetLastError(conn)); + connClose(conn); + return; + } connNonBlock(conn); connEnableTcpNoDelay(conn); diff --git a/src/connection.c b/src/connection.c index 2015c9195..09fa12f2a 100644 --- a/src/connection.c +++ b/src/connection.c @@ -85,8 +85,12 @@ connection *connCreateSocket() { /* Create a new socket-type connection that is already associated with * an accepted connection. * - * The socket is not read for I/O until connAccept() was called and + * The socket is not ready for I/O until connAccept() was called and * invoked the connection-level accept handler. + * + * Callers should use connGetState() and verify the created connection + * is not in an error state (which is not possible for a socket connection, + * but could but possible with other protocols). */ connection *connCreateAcceptedSocket(int fd) { connection *conn = connCreateSocket(); diff --git a/src/networking.c b/src/networking.c index e3b62f151..a3c04efa6 100644 --- a/src/networking.c +++ b/src/networking.c @@ -895,8 +895,18 @@ void clientAcceptHandler(connection *conn) { #define MAX_ACCEPTS_PER_CALL 1000 static void acceptCommonHandler(connection *conn, int flags, char *ip) { client *c; + char conninfo[100]; UNUSED(ip); + if (connGetState(conn) != CONN_STATE_ACCEPTING) { + serverLog(LL_VERBOSE, + "Accepted client connection in error state: %s (conn: %s)", + connGetLastError(conn), + connGetInfo(conn, conninfo, sizeof(conninfo))); + connClose(conn); + return; + } + /* Limit the number of connections we take at the same time. * * Admission control will happen before a client is created and connAccept() @@ -925,7 +935,6 @@ static void acceptCommonHandler(connection *conn, int flags, char *ip) { /* Create connection and client */ if ((c = createClient(conn)) == NULL) { - char conninfo[100]; serverLog(LL_WARNING, "Error registering fd event for the new client: %s (conn: %s)", connGetLastError(conn), diff --git a/src/tls.c b/src/tls.c index 07d1775f8..4f0ea4d65 100644 --- a/src/tls.c +++ b/src/tls.c @@ -337,11 +337,34 @@ connection *connCreateTLS(void) { return (connection *) conn; } +/* Fetch the latest OpenSSL error and store it in the connection */ +static void updateTLSError(tls_connection *conn) { + conn->c.last_errno = 0; + if (conn->ssl_error) zfree(conn->ssl_error); + conn->ssl_error = zmalloc(512); + ERR_error_string_n(ERR_get_error(), conn->ssl_error, 512); +} + +/* Create a new TLS connection that is already associated with + * an accepted underlying file descriptor. + * + * The socket is not ready for I/O until connAccept() was called and + * invoked the connection-level accept handler. + * + * Callers should use connGetState() and verify the created connection + * is not in an error state. + */ connection *connCreateAcceptedTLS(int fd, int require_auth) { tls_connection *conn = (tls_connection *) connCreateTLS(); conn->c.fd = fd; conn->c.state = CONN_STATE_ACCEPTING; + if (!conn->ssl) { + updateTLSError(conn); + conn->c.state = CONN_STATE_ERROR; + return (connection *) conn; + } + switch (require_auth) { case TLS_CLIENT_AUTH_NO: SSL_set_verify(conn->ssl, SSL_VERIFY_NONE, NULL); @@ -384,10 +407,7 @@ static int handleSSLReturnCode(tls_connection *conn, int ret_value, WantIOType * break; default: /* Error! */ - conn->c.last_errno = 0; - if (conn->ssl_error) zfree(conn->ssl_error); - conn->ssl_error = zmalloc(512); - ERR_error_string_n(ERR_get_error(), conn->ssl_error, 512); + updateTLSError(conn); break; } From c68b3908a6fbd8bb0780f8e126dbf1cfc4a7428a Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 28 Jul 2020 14:04:06 +0300 Subject: [PATCH 078/215] Fix TLS cluster tests. (#7578) Fix consistency test added in af5167b7f without considering TLS redis-cli configuration. (cherry picked from commit bedf1b21269dfb8afb584bc24585023af8b2a208) --- tests/cluster/tests/14-consistency-check.tcl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/cluster/tests/14-consistency-check.tcl b/tests/cluster/tests/14-consistency-check.tcl index a43725ebc..d3b3052f3 100644 --- a/tests/cluster/tests/14-consistency-check.tcl +++ b/tests/cluster/tests/14-consistency-check.tcl @@ -64,7 +64,10 @@ proc test_slave_load_expired_keys {aof} { kill_instance redis $replica_id set master_port [get_instance_attrib redis $master_id port] - exec ../../../src/redis-cli -h 127.0.0.1 -p $master_port debug sleep [expr $data_ttl+3] > /dev/null & + exec ../../../src/redis-cli \ + -h 127.0.0.1 -p $master_port \ + {*}[rediscli_tls_config "../../../tests"] \ + debug sleep [expr $data_ttl+3] > /dev/null & while {[clock seconds] <= $end_time} { #wait for $data_ttl seconds From 4238df54366fdcdf35f4b2ae668d0833372cd3f1 Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Tue, 28 Jul 2020 15:05:48 -0400 Subject: [PATCH 079/215] fix leak in error handling of debug populate command (#7062) valsize was not modified during the for loop below instead of getting from c->argv[4], therefore there is no need to put inside the for loop.. Moreover, putting the check outside loop will also avoid memory leaking, decrRefCount(key) should be called in the original code if we put the check in for loop (cherry picked from commit c69a9b2f61e0747fabb8120f03c9e2a29b43b472) --- src/debug.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/debug.c b/src/debug.c index 60cc2a1fa..0bea69876 100644 --- a/src/debug.c +++ b/src/debug.c @@ -591,14 +591,13 @@ NULL if (getLongFromObjectOrReply(c, c->argv[2], &keys, NULL) != C_OK) return; dictExpand(c->db->dict,keys); + long valsize = 0; + if ( c->argc == 5 && getLongFromObjectOrReply(c, c->argv[4], &valsize, NULL) != C_OK ) + return; for (j = 0; j < keys; j++) { - long valsize = 0; snprintf(buf,sizeof(buf),"%s:%lu", (c->argc == 3) ? "key" : (char*)c->argv[3]->ptr, j); key = createStringObject(buf,strlen(buf)); - if (c->argc == 5) - if (getLongFromObjectOrReply(c, c->argv[4], &valsize, NULL) != C_OK) - return; if (lookupKeyWrite(c->db,key) != NULL) { decrRefCount(key); continue; From b2bd3c0653ce19c4e22b69df920324248094e2ae Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Wed, 29 Jul 2020 01:22:54 -0400 Subject: [PATCH 080/215] Add SignalModifiedKey hook in XGROUP CREATE with MKSTREAM option (#7562) (cherry picked from commit f33acb3f029d7f5aa68e22e612036e04417e3e91) --- src/t_stream.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/t_stream.c b/src/t_stream.c index f564b1ff9..a54671938 100644 --- a/src/t_stream.c +++ b/src/t_stream.c @@ -1841,6 +1841,7 @@ NULL o = createStreamObject(); dbAdd(c->db,c->argv[2],o); s = o->ptr; + signalModifiedKey(c,c->db,c->argv[2]); } streamCG *cg = streamCreateCG(s,grpname,sdslen(grpname),&id); From 2bcc056a41a43d97530e021fa48f101b78dfecb2 Mon Sep 17 00:00:00 2001 From: namtsui <384455+namtsui@users.noreply.github.com> Date: Tue, 28 Jul 2020 22:25:56 -0700 Subject: [PATCH 081/215] Avoid an out-of-bounds read in the redis-sentinel (#7443) The Redis sentinel would crash with a segfault after a few minutes because it tried to read from a page without read permissions. Check up front whether the sds is long enough to contain redis:slave or redis:master before memcmp() as is done everywhere else in sentinelRefreshInstanceInfo(). Bug report and commit message from Theo Buehler. Fix from Nam Nguyen. Co-authored-by: Nam Nguyen (cherry picked from commit 63dae5232415d216dfc1acce8b5335e20aa3b178) --- src/sentinel.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/sentinel.c b/src/sentinel.c index 5be4193dc..5bd594955 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -2218,8 +2218,8 @@ void sentinelRefreshInstanceInfo(sentinelRedisInstance *ri, const char *info) { } /* role: */ - if (!memcmp(l,"role:master",11)) role = SRI_MASTER; - else if (!memcmp(l,"role:slave",10)) role = SRI_SLAVE; + if (sdslen(l) >= 11 && !memcmp(l,"role:master",11)) role = SRI_MASTER; + else if (sdslen(l) >= 10 && !memcmp(l,"role:slave",10)) role = SRI_SLAVE; if (role == SRI_SLAVE) { /* master_host: */ From e8efcdee0a96ff9950fa3bd545ceb91af1492bcf Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Wed, 29 Jul 2020 17:03:38 +0300 Subject: [PATCH 082/215] Clarify RM_BlockClient() error condition. (#6093) (cherry picked from commit 7af05f07ff72a4246ddaf986a1e815eadd0cbfef) --- src/module.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/module.c b/src/module.c index 9316e004d..b381b4f99 100644 --- a/src/module.c +++ b/src/module.c @@ -4347,6 +4347,7 @@ void unblockClientFromModule(client *c) { * Even when blocking on keys, RM_UnblockClient() can be called however, but * in that case the privdata argument is disregarded, because we pass the * reply callback the privdata that is set here while blocking. + * */ RedisModuleBlockedClient *moduleBlockClient(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, RedisModuleCmdFunc timeout_callback, void (*free_privdata)(RedisModuleCtx*,void*), long long timeout_ms, RedisModuleString **keys, int numkeys, void *privdata) { client *c = ctx->client; @@ -4439,6 +4440,14 @@ int moduleTryServeClientBlockedOnKey(client *c, robj *key) { * Note: RedisModule_UnblockClient should be called for every blocked client, * even if client was killed, timed-out or disconnected. Failing to do so * will result in memory leaks. + * + * There are some cases where RedisModule_BlockClient() cannot be used: + * + * 1. If the client is a Lua script. + * 2. If the client is executing a MULTI block. + * + * In these cases, a call to RedisModule_BlockClient() will **not** block the + * client, but instead produce a specific error reply. */ RedisModuleBlockedClient *RM_BlockClient(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, RedisModuleCmdFunc timeout_callback, void (*free_privdata)(RedisModuleCtx*,void*), long long timeout_ms) { return moduleBlockClient(ctx,reply_callback,timeout_callback,free_privdata,timeout_ms, NULL,0,NULL); From 7200b3aa0f0a74ab38e2f2d1d418c7de693806a2 Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Thu, 30 Jul 2020 13:56:21 +0800 Subject: [PATCH 083/215] Fix running single test 14-consistency-check.tcl (#7587) (cherry picked from commit f3352daf4f4826e1cad4c163fd6e35b81a72e21b) --- tests/cluster/tests/14-consistency-check.tcl | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/cluster/tests/14-consistency-check.tcl b/tests/cluster/tests/14-consistency-check.tcl index d3b3052f3..5a80dd0df 100644 --- a/tests/cluster/tests/14-consistency-check.tcl +++ b/tests/cluster/tests/14-consistency-check.tcl @@ -1,4 +1,5 @@ source "../tests/includes/init-tests.tcl" +source "../../../tests/support/cli.tcl" test "Create a 5 nodes cluster" { create_cluster 5 5 From e511ab2cc6662100420adc17098cfd27f052c3e6 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Thu, 30 Jul 2020 13:25:10 +0300 Subject: [PATCH 084/215] CI: Add daily CentOS 7.x jobs. (#7582) (cherry picked from commit 4ac1f9ac55b841094861e84cdec5da0f4a294a86) --- .github/workflows/daily.yml | 54 ++++++++++++++++++++++++++++++++++--- 1 file changed, 50 insertions(+), 4 deletions(-) diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index acc811d3a..458f07c97 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -10,7 +10,7 @@ on: jobs: - test-jemalloc: + test-ubuntu-jemalloc: runs-on: ubuntu-latest timeout-minutes: 14400 steps: @@ -28,7 +28,7 @@ jobs: - name: cluster tests run: ./runtest-cluster - test-libc-malloc: + test-ubuntu-libc-malloc: runs-on: ubuntu-latest timeout-minutes: 14400 steps: @@ -46,7 +46,7 @@ jobs: - name: cluster tests run: ./runtest-cluster - test-32bit: + test-ubuntu-32bit: runs-on: ubuntu-latest timeout-minutes: 14400 steps: @@ -68,7 +68,7 @@ jobs: - name: cluster tests run: ./runtest-cluster - test-tls: + test-ubuntu-tls: runs-on: ubuntu-latest timeout-minutes: 14400 steps: @@ -101,3 +101,49 @@ jobs: ./runtest --valgrind --verbose --clients 1 - name: module api test run: ./runtest-moduleapi --valgrind --verbose --clients 1 + + test-centos7-jemalloc: + runs-on: ubuntu-latest + container: centos:7 + timeout-minutes: 14400 + steps: + - uses: actions/checkout@v2 + - name: make + run: | + yum -y install centos-release-scl + yum -y install devtoolset-7 + scl enable devtoolset-7 make + - name: test + run: | + yum -y install tcl + ./runtest --accurate --verbose + - name: module api test + run: ./runtest-moduleapi --verbose + - name: sentinel tests + run: ./runtest-sentinel + - name: cluster tests + run: ./runtest-cluster + + test-centos7-tls: + runs-on: ubuntu-latest + container: centos:7 + timeout-minutes: 14400 + steps: + - uses: actions/checkout@v2 + - name: make + run: | + yum -y install centos-release-scl epel-release + yum -y install devtoolset-7 openssl-devel openssl + scl enable devtoolset-7 make BUILD_TLS=yes + - name: test + run: | + yum -y install tcl tcltls + ./utils/gen-test-certs.sh + ./runtest --accurate --verbose --tls + - name: module api test + run: ./runtest-moduleapi --verbose --tls + - name: sentinel tests + run: ./runtest-sentinel --tls + - name: cluster tests + run: ./runtest-cluster --tls + From a5294c4e525e2328946b9317e5542c6a874ce358 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Fri, 31 Jul 2020 13:14:29 +0300 Subject: [PATCH 085/215] module hook for master link up missing on successful psync (#7584) besides, hooks test was time sensitive. when the replica managed to reconnect quickly after the client kill, the test would fail (cherry picked from commit f7e77759902aa19cfa537ed454e6bc987498e8c5) --- src/replication.c | 5 +++++ tests/unit/moduleapi/hooks.tcl | 19 +++++++++++++++++-- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/src/replication.c b/src/replication.c index 197acde79..a8f46be95 100644 --- a/src/replication.c +++ b/src/replication.c @@ -2824,6 +2824,11 @@ void replicationResurrectCachedMaster(connection *conn) { server.repl_state = REPL_STATE_CONNECTED; server.repl_down_since = 0; + /* Fire the master link modules event. */ + moduleFireServerEvent(REDISMODULE_EVENT_MASTER_LINK_CHANGE, + REDISMODULE_SUBEVENT_MASTER_LINK_UP, + NULL); + /* Re-add to the list of clients. */ linkClient(server.master); if (connSetReadHandler(server.master->conn, readQueryFromClient)) { diff --git a/tests/unit/moduleapi/hooks.tcl b/tests/unit/moduleapi/hooks.tcl index bf2b9010b..da0307ce6 100644 --- a/tests/unit/moduleapi/hooks.tcl +++ b/tests/unit/moduleapi/hooks.tcl @@ -114,6 +114,21 @@ tags "modules" { test {Test master link down hook} { r client kill type master assert_equal [r hooks.event_count masterlink-down] 1 + + wait_for_condition 50 100 { + [string match {*master_link_status:up*} [r info replication]] + } else { + fail "Replica didn't reconnect" + } + + assert_equal [r hooks.event_count masterlink-down] 1 + assert_equal [r hooks.event_count masterlink-up] 2 + } + + wait_for_condition 50 10 { + [string match {*master_link_status:up*} [r info replication]] + } else { + fail "Can't turn the instance into a replica" } $replica replicaof no one @@ -125,8 +140,8 @@ tags "modules" { } test {Test replica-offline hook} { - assert_equal [r -1 hooks.event_count replica-online] 1 - assert_equal [r -1 hooks.event_count replica-offline] 1 + assert_equal [r -1 hooks.event_count replica-online] 2 + assert_equal [r -1 hooks.event_count replica-offline] 2 } # get the replica stdout, to be used by the next test set replica_stdout [srv 0 stdout] From 9f0e8f7849843a6d559b2c0f687b52fcd012ebbb Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Fri, 31 Jul 2020 13:55:57 +0300 Subject: [PATCH 086/215] Fix test-centos7-tls daily job. (#7598) (cherry picked from commit f2da3efc78fe39c52fd46a2b9b5ed05e29647119) --- .github/workflows/daily.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 458f07c97..986bd223b 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -112,7 +112,7 @@ jobs: run: | yum -y install centos-release-scl yum -y install devtoolset-7 - scl enable devtoolset-7 make + scl enable devtoolset-7 "make" - name: test run: | yum -y install tcl @@ -134,7 +134,7 @@ jobs: run: | yum -y install centos-release-scl epel-release yum -y install devtoolset-7 openssl-devel openssl - scl enable devtoolset-7 make BUILD_TLS=yes + scl enable devtoolset-7 "make BUILD_TLS=yes" - name: test run: | yum -y install tcl tcltls From 916b215fc5e772e6487881b0bf44d996cc89f84f Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 4 Aug 2020 08:53:50 +0300 Subject: [PATCH 087/215] fix new rdb test failing on timing issues (#7604) apparenlty on github actions sometimes 500ms is not enough (cherry picked from commit 824bd2ac11472b7a3fce9fcf3189a8e6c6048115) --- tests/integration/rdb.tcl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/rdb.tcl b/tests/integration/rdb.tcl index 6535cd089..9cd970350 100644 --- a/tests/integration/rdb.tcl +++ b/tests/integration/rdb.tcl @@ -139,10 +139,10 @@ start_server {} { test {bgsave resets the change counter} { r config set rdb-key-save-delay 0 r bgsave - wait_for_condition 5 100 { + wait_for_condition 50 100 { [s rdb_bgsave_in_progress] == 0 } else { - fail "bgsave not aborted" + fail "bgsave not done" } assert_equal [s rdb_changes_since_last_save] 0 } From dc46f6f0416e4d20f9f96008a82f08cb6bcbe392 Mon Sep 17 00:00:00 2001 From: Frank Meier <40453138+meierfra-ergon@users.noreply.github.com> Date: Wed, 5 Aug 2020 11:06:33 +0200 Subject: [PATCH 088/215] add force option to 'create-cluster create' script call (#7612) (cherry picked from commit 3244fae7d4f2e77f6d0d3f8280e852bd63753462) --- utils/create-cluster/create-cluster | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/utils/create-cluster/create-cluster b/utils/create-cluster/create-cluster index 931f6f521..d9a3b8c35 100755 --- a/utils/create-cluster/create-cluster +++ b/utils/create-cluster/create-cluster @@ -38,7 +38,11 @@ then PORT=$((PORT+1)) HOSTS="$HOSTS $CLUSTER_HOST:$PORT" done - $BIN_PATH/redis-cli --cluster create $HOSTS --cluster-replicas $REPLICAS + OPT_ARG="" + if [ "$2" == "-f" ]; then + OPT_ARG="--cluster-yes" + fi + $BIN_PATH/redis-cli --cluster create $HOSTS --cluster-replicas $REPLICAS $OPT_ARG exit 0 fi @@ -104,7 +108,7 @@ fi echo "Usage: $0 [start|create|stop|watch|tail|clean|call]" echo "start -- Launch Redis Cluster instances." -echo "create -- Create a cluster using redis-cli --cluster create." +echo "create [-f] -- Create a cluster using redis-cli --cluster create." echo "stop -- Stop Redis Cluster instances." echo "watch -- Show CLUSTER NODES output (first 30 lines) of first node." echo "tail -- Run tail -f of instance at base port + ID." From 3a677b45ac118a3826b2cbadda7f80d5fe6ebb57 Mon Sep 17 00:00:00 2001 From: Frank Meier Date: Thu, 28 Feb 2019 10:59:06 +0100 Subject: [PATCH 089/215] reintroduce REDISCLI_CLUSTER_YES env variable in redis-cli the variable was introduced only in the 5.0 branch in #5879 bc6c1c40db (cherry picked from commit 51077c821236eba5223b1e624f2462214a0e5062) --- src/redis-cli.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/redis-cli.c b/src/redis-cli.c index 0148964bf..ec7153d15 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -73,6 +73,7 @@ #define REDIS_CLI_RCFILE_ENV "REDISCLI_RCFILE" #define REDIS_CLI_RCFILE_DEFAULT ".redisclirc" #define REDIS_CLI_AUTH_ENV "REDISCLI_AUTH" +#define REDIS_CLI_CLUSTER_YES_ENV "REDISCLI_CLUSTER_YES" #define CLUSTER_MANAGER_SLOTS 16384 #define CLUSTER_MANAGER_MIGRATE_TIMEOUT 60000 @@ -1668,6 +1669,11 @@ static void parseEnv() { if (auth != NULL && config.auth == NULL) { config.auth = auth; } + + char *cluster_yes = getenv(REDIS_CLI_CLUSTER_YES_ENV); + if (cluster_yes != NULL && !strcmp(cluster_yes, "1")) { + config.cluster_manager_command.flags |= CLUSTER_MANAGER_CMD_FLAG_YES; + } } static sds readArgFromStdin(void) { From e8801f7eaa51a5af0bcb02e09a3e03b536fe754a Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Wed, 5 Aug 2020 12:18:44 +0300 Subject: [PATCH 090/215] redis-cli --cluster-yes - negate force flag for clarity this internal flag is there so that some commands do not comply to `--cluster-yes` (cherry picked from commit 1aa31e4da9a2daf9058dd923738e331bc5c91e31) --- src/redis-cli.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index ec7153d15..de5d08149 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -1803,10 +1803,10 @@ static void usage(void) { exit(1); } -static int confirmWithYes(char *msg, int force) { - /* if force is true and --cluster-yes option is on, +static int confirmWithYes(char *msg, int ignore_force) { + /* if --cluster-yes option is set and ignore_force is false, * do not prompt for an answer */ - if (force && + if (!ignore_force && (config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_YES)) { return 1; } @@ -4500,7 +4500,7 @@ static int clusterManagerFixSlotsCoverage(char *all_slots) { dictReleaseIterator(iter); /* we want explicit manual confirmation from users for all the fix cases */ - int force = 0; + int ignore_force = 1; /* Handle case "1": keys in no node. */ if (listLength(none) > 0) { @@ -4508,7 +4508,7 @@ static int clusterManagerFixSlotsCoverage(char *all_slots) { "across the cluster:\n"); clusterManagerPrintSlotsList(none); if (confirmWithYes("Fix these slots by covering with a random node?", - force)) { + ignore_force)) { listIter li; listNode *ln; listRewind(none, &li); @@ -4535,7 +4535,7 @@ static int clusterManagerFixSlotsCoverage(char *all_slots) { printf("The following uncovered slots have keys in just one node:\n"); clusterManagerPrintSlotsList(single); if (confirmWithYes("Fix these slots by covering with those nodes?", - force)) { + ignore_force)) { listIter li; listNode *ln; listRewind(single, &li); @@ -4567,7 +4567,7 @@ static int clusterManagerFixSlotsCoverage(char *all_slots) { printf("The following uncovered slots have keys in multiple nodes:\n"); clusterManagerPrintSlotsList(multi); if (confirmWithYes("Fix these slots by moving keys " - "into a single node?", force)) { + "into a single node?", ignore_force)) { listIter li; listNode *ln; listRewind(multi, &li); @@ -5530,8 +5530,8 @@ assign_replicas: } clusterManagerOptimizeAntiAffinity(ip_nodes, ip_count); clusterManagerShowNodes(); - int force = 1; - if (confirmWithYes("Can I set the above configuration?", force)) { + int ignore_force = 0; + if (confirmWithYes("Can I set the above configuration?", ignore_force)) { listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *node = ln->value; From 0e581e0044ed0ca2319819a68fd4bb2b88cf5f59 Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Sat, 8 Aug 2020 07:42:32 -0400 Subject: [PATCH 091/215] fix memory leak in ACLLoadFromFile error handling (#7623) (cherry picked from commit 3f67b0337833cbd2a2746dad7cd8411a31b75f70) --- src/acl.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/acl.c b/src/acl.c index 3ce45f03b..e0432ba5c 100644 --- a/src/acl.c +++ b/src/acl.c @@ -1327,6 +1327,7 @@ sds ACLLoadFromFile(const char *filename) { errors = sdscatprintf(errors, "'%s:%d: username '%s' contains invalid characters. ", server.acl_filename, linenum, argv[1]); + sdsfreesplitres(argv,argc); continue; } From 9fd0c027e78f86611ea02e14925b72c130882e33 Mon Sep 17 00:00:00 2001 From: Wang Yuan Date: Sun, 9 Aug 2020 03:03:56 +0800 Subject: [PATCH 092/215] Print error info if failed opening config file (#6943) (cherry picked from commit ea7eeb2fd2e7a73d951f394d5d14a5ba5a75d4b2) --- src/config.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/config.c b/src/config.c index 10faa3bea..cfe420552 100644 --- a/src/config.c +++ b/src/config.c @@ -556,7 +556,8 @@ void loadServerConfig(char *filename, char *options) { } else { if ((fp = fopen(filename,"r")) == NULL) { serverLog(LL_WARNING, - "Fatal error, can't open config file '%s'", filename); + "Fatal error, can't open config file '%s': %s", + filename, strerror(errno)); exit(1); } } From 086707315fd58235a0ed956685e8b422d77006da Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Tue, 11 Aug 2020 10:18:21 +0800 Subject: [PATCH 093/215] see #7250, fix signature of RedisModule_DeauthenticateAndCloseClient (#7645) In redismodule.h, RedisModule_DeauthenticateAndCloseClient returns void `void REDISMODULE_API_FUNC(RedisModule_DeauthenticateAndCloseClient)(RedisModuleCtx *ctx, uint64_t client_id);` But in module.c, RM_DeauthenticateAndCloseClient returns int `int RM_DeauthenticateAndCloseClient(RedisModuleCtx *ctx, uint64_t client_id)` It it safe to change return value from `void` to `int` from the user's perspective. (cherry picked from commit d6220f12a9fafeb07ad86fa91e88383ca2a524c8) --- src/redismodule.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redismodule.h b/src/redismodule.h index 23b4d26e0..d67b01f68 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -679,7 +679,7 @@ void REDISMODULE_API_FUNC(RedisModule_FreeModuleUser)(RedisModuleUser *user); int REDISMODULE_API_FUNC(RedisModule_SetModuleUserACL)(RedisModuleUser *user, const char* acl); int REDISMODULE_API_FUNC(RedisModule_AuthenticateClientWithACLUser)(RedisModuleCtx *ctx, const char *name, size_t len, RedisModuleUserChangedFunc callback, void *privdata, uint64_t *client_id); int REDISMODULE_API_FUNC(RedisModule_AuthenticateClientWithUser)(RedisModuleCtx *ctx, RedisModuleUser *user, RedisModuleUserChangedFunc callback, void *privdata, uint64_t *client_id); -void REDISMODULE_API_FUNC(RedisModule_DeauthenticateAndCloseClient)(RedisModuleCtx *ctx, uint64_t client_id); +int REDISMODULE_API_FUNC(RedisModule_DeauthenticateAndCloseClient)(RedisModuleCtx *ctx, uint64_t client_id); #endif #define RedisModule_IsAOFClient(id) ((id) == CLIENT_ID_AOF) From 0314429c8f3325b437cfd2a8e6dc8d03ed265e26 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 11 Aug 2020 10:59:47 +0300 Subject: [PATCH 094/215] Run daily workflow on main repo only (no forks). (#7646) (cherry picked from commit 944cca98c84efcf0365b0525f88124c53dbe1c53) --- .github/workflows/daily.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 986bd223b..010a32289 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -12,6 +12,7 @@ jobs: test-ubuntu-jemalloc: runs-on: ubuntu-latest + if: github.repository == 'redis/redis' timeout-minutes: 14400 steps: - uses: actions/checkout@v2 @@ -30,6 +31,7 @@ jobs: test-ubuntu-libc-malloc: runs-on: ubuntu-latest + if: github.repository == 'redis/redis' timeout-minutes: 14400 steps: - uses: actions/checkout@v2 @@ -48,6 +50,7 @@ jobs: test-ubuntu-32bit: runs-on: ubuntu-latest + if: github.repository == 'redis/redis' timeout-minutes: 14400 steps: - uses: actions/checkout@v2 @@ -70,6 +73,7 @@ jobs: test-ubuntu-tls: runs-on: ubuntu-latest + if: github.repository == 'redis/redis' timeout-minutes: 14400 steps: - uses: actions/checkout@v2 @@ -90,6 +94,7 @@ jobs: test-valgrind: runs-on: ubuntu-latest + if: github.repository == 'redis/redis' timeout-minutes: 14400 steps: - uses: actions/checkout@v2 @@ -104,6 +109,7 @@ jobs: test-centos7-jemalloc: runs-on: ubuntu-latest + if: github.repository == 'redis/redis' container: centos:7 timeout-minutes: 14400 steps: @@ -126,6 +132,7 @@ jobs: test-centos7-tls: runs-on: ubuntu-latest + if: github.repository == 'redis/redis' container: centos:7 timeout-minutes: 14400 steps: From 171f83306edff4a2e86f3fcd73c0aada141a11ff Mon Sep 17 00:00:00 2001 From: YoongHM Date: Tue, 11 Aug 2020 17:30:32 +0800 Subject: [PATCH 095/215] Start redis after network is online (#7639) The two lines allow systemd to start redis.service after the network is online. Only after the network is online that Redis could bind to IP address other than 127.0.0.1 during initial boot up process. (cherry picked from commit 8e937ce4cc1462d996bae6a45e8c0a66f71e7ee6) --- utils/systemd-redis_server.service | 2 ++ 1 file changed, 2 insertions(+) diff --git a/utils/systemd-redis_server.service b/utils/systemd-redis_server.service index addee3498..cf158644a 100644 --- a/utils/systemd-redis_server.service +++ b/utils/systemd-redis_server.service @@ -20,6 +20,8 @@ Description=Redis data structure server Documentation=https://redis.io/documentation #Before=your_application.service another_example_application.service #AssertPathExists=/var/lib/redis +Wants=network-online.target +After=network-online.target [Service] ExecStart=/usr/local/bin/redis-server --supervised systemd --daemonize no From 44ef19743e79f6dde31c162184e80981c2902af9 Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" Date: Tue, 30 Oct 2018 00:38:20 +0800 Subject: [PATCH 096/215] using proto-max-bulk-len in checkStringLength for SETRANGE and APPEND (cherry picked from commit 2e69bfe44d154d6739da48ff87f1c66045f68e93) --- src/t_string.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/t_string.c b/src/t_string.c index 259f43142..4be758e65 100644 --- a/src/t_string.c +++ b/src/t_string.c @@ -35,8 +35,8 @@ *----------------------------------------------------------------------------*/ static int checkStringLength(client *c, long long size) { - if (size > 512*1024*1024) { - addReplyError(c,"string exceeds maximum allowed size (512MB)"); + if (!(c->flags & CLIENT_MASTER) && size > server.proto_max_bulk_len) { + addReplyError(c,"string exceeds maximum allowed size (proto-max-bulk-len)"); return C_ERR; } return C_OK; From d3831559cf1da869f1b6b3697de8b914fe78beab Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" Date: Tue, 30 Oct 2018 00:47:49 +0800 Subject: [PATCH 097/215] config: proto-max-bulk-len must be 1mb or greater (cherry picked from commit bd4b33d7a2b00fddcaaccee9705d21b92d97311e) --- redis.conf | 2 +- src/config.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/redis.conf b/redis.conf index d4e3e47f0..565c24eca 100644 --- a/redis.conf +++ b/redis.conf @@ -1641,7 +1641,7 @@ client-output-buffer-limit pubsub 32mb 8mb 60 # In the Redis protocol, bulk requests, that are, elements representing single # strings, are normally limited ot 512 mb. However you can change this limit -# here. +# here, but must be 1mb or greater # # proto-max-bulk-len 512mb diff --git a/src/config.c b/src/config.c index cfe420552..476956e18 100644 --- a/src/config.c +++ b/src/config.c @@ -2205,7 +2205,7 @@ standardConfig configs[] = { createLongLongConfig("cluster-node-timeout", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.cluster_node_timeout, 15000, INTEGER_CONFIG, NULL, NULL), createLongLongConfig("slowlog-log-slower-than", NULL, MODIFIABLE_CONFIG, -1, LLONG_MAX, server.slowlog_log_slower_than, 10000, INTEGER_CONFIG, NULL, NULL), createLongLongConfig("latency-monitor-threshold", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.latency_monitor_threshold, 0, INTEGER_CONFIG, NULL, NULL), - createLongLongConfig("proto-max-bulk-len", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.proto_max_bulk_len, 512ll*1024*1024, MEMORY_CONFIG, NULL, NULL), /* Bulk request max size */ + createLongLongConfig("proto-max-bulk-len", NULL, MODIFIABLE_CONFIG, 1024*1024, LLONG_MAX, server.proto_max_bulk_len, 512ll*1024*1024, MEMORY_CONFIG, NULL, NULL), /* Bulk request max size */ createLongLongConfig("stream-node-max-entries", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.stream_node_max_entries, 100, INTEGER_CONFIG, NULL, NULL), createLongLongConfig("repl-backlog-size", NULL, MODIFIABLE_CONFIG, 1, LLONG_MAX, server.repl_backlog_size, 1024*1024, MEMORY_CONFIG, NULL, updateReplBacklogSize), /* Default: 1mb */ From 3c87d572b9951d040fe7aa94af189291de3813bf Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" Date: Tue, 30 Oct 2018 00:50:36 +0800 Subject: [PATCH 098/215] CLIENT_MASTER should ignore server.proto_max_bulk_len (cherry picked from commit 589e610ebc62ced9c5392b7ece122c68c9990988) --- src/networking.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/networking.c b/src/networking.c index a3c04efa6..aa42c43e9 100644 --- a/src/networking.c +++ b/src/networking.c @@ -1691,7 +1691,8 @@ int processMultibulkBuffer(client *c) { } ok = string2ll(c->querybuf+c->qb_pos+1,newline-(c->querybuf+c->qb_pos+1),&ll); - if (!ok || ll < 0 || ll > server.proto_max_bulk_len) { + if (!ok || ll < 0 || + (!(c->flags & CLIENT_MASTER) && ll > server.proto_max_bulk_len)) { addReplyError(c,"Protocol error: invalid bulk length"); setProtocolError("invalid bulk length",c); return C_ERR; From 840a9951b548846752658e9b00135c395663f26f Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" <276441700@qq.com> Date: Tue, 11 Aug 2020 20:51:27 +0800 Subject: [PATCH 099/215] redis-benchmark: fix wrong random key for hset (#4895) (cherry picked from commit ff1e4a7063c4de96bba3661f5f282ed61b6dfe8e) --- src/redis-benchmark.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c index f47cbe333..6385fa9b3 100644 --- a/src/redis-benchmark.c +++ b/src/redis-benchmark.c @@ -1722,7 +1722,7 @@ int main(int argc, const char **argv) { if (test_is_selected("hset")) { len = redisFormatCommand(&cmd, - "HSET myhash:{tag}:__rand_int__ element:__rand_int__ %s",data); + "HSET myhash:{tag} element:__rand_int__ %s",data); benchmark("HSET",cmd,len); free(cmd); } From 31415cb27ac6a8c6a383d16c09a45801f97e8b1f Mon Sep 17 00:00:00 2001 From: Wagner Francisco Mezaroba Date: Tue, 11 Aug 2020 19:57:21 +0100 Subject: [PATCH 100/215] allow --pattern to be used along with --bigkeys (#3586) Adds --pattern option to cli's --bigkeys, --hotkeys & --scan modes (cherry picked from commit e2a71338ebfb719cba94742f1c75ce755cd6074d) --- src/redis-cli.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index de5d08149..0b44ec252 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -1764,7 +1764,8 @@ static void usage(void) { " --hotkeys Sample Redis keys looking for hot keys.\n" " only works when maxmemory-policy is *lfu.\n" " --scan List all keys using the SCAN command.\n" -" --pattern Useful with --scan to specify a SCAN pattern.\n" +" --pattern Keys pattern when using the --scan, --bigkeys or --hotkeys\n" +" options (default: *).\n" " --intrinsic-latency Run a test to measure intrinsic system latency.\n" " The test will run for the specified amount of seconds.\n" " --eval Send an EVAL command using the Lua script at .\n" @@ -7193,7 +7194,13 @@ static void pipeMode(void) { *--------------------------------------------------------------------------- */ static redisReply *sendScan(unsigned long long *it) { - redisReply *reply = redisCommand(context, "SCAN %llu", *it); + redisReply *reply; + + if (config.pattern) + reply = redisCommand(context,"SCAN %llu MATCH %s", + *it,config.pattern); + else + reply = redisCommand(context,"SCAN %llu",*it); /* Handle any error conditions */ if(reply == NULL) { From 39ffc3702fa173de39c1158742b14a23a8432ea6 Mon Sep 17 00:00:00 2001 From: Madelyn Olson <34459052+madolson@users.noreply.github.com> Date: Tue, 11 Aug 2020 20:16:41 -0700 Subject: [PATCH 101/215] Fixed timer warning (#5953) (cherry picked from commit 79c506ebf07db32bbcf84ede9d72dd9eb04e9812) --- src/modules/hellotimer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/modules/hellotimer.c b/src/modules/hellotimer.c index 57b111b7c..f6700df26 100644 --- a/src/modules/hellotimer.c +++ b/src/modules/hellotimer.c @@ -40,7 +40,7 @@ /* Timer callback. */ void timerHandler(RedisModuleCtx *ctx, void *data) { REDISMODULE_NOT_USED(ctx); - printf("Fired %s!\n", data); + printf("Fired %s!\n", (char *)data); RedisModule_Free(data); } From 2e7ad5891863dd71bb96ff17d3d6a11c1d924794 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Mon, 17 Aug 2020 17:36:50 +0300 Subject: [PATCH 102/215] TLS: relax verification on CONFIG SET. (#7665) Avoid re-configuring (and validating) SSL/TLS configuration on `CONFIG SET` when TLS is not actively enabled for incoming connections, cluster bus or replication. This fixes failures when tests run without `--tls` on binaries that were built with TLS support. An additional benefit is that it's now possible to perform a multi-step configuration process while TLS is disabled. The new configuration will be verified and applied only when TLS is effectively enabled. (cherry picked from commit fb2a94af3fbb3f3cf8b26b8bd89387669cb111a1) --- .github/workflows/daily.yml | 26 ++++++++++++++++++++------ src/config.c | 5 ++++- 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 010a32289..5b395351b 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -85,12 +85,19 @@ jobs: sudo apt-get install tcl8.5 tcl-tls ./utils/gen-test-certs.sh ./runtest --accurate --verbose --tls + ./runtest --accurate --verbose - name: module api test - run: ./runtest-moduleapi --verbose --tls + run: | + ./runtest-moduleapi --verbose --tls + ./runtest-moduleapi --verbose - name: sentinel tests - run: ./runtest-sentinel --tls + run: | + ./runtest-sentinel --tls + ./runtest-sentinel - name: cluster tests - run: ./runtest-cluster --tls + run: | + ./runtest-cluster --tls + ./runtest-cluster test-valgrind: runs-on: ubuntu-latest @@ -147,10 +154,17 @@ jobs: yum -y install tcl tcltls ./utils/gen-test-certs.sh ./runtest --accurate --verbose --tls + ./runtest --accurate --verbose - name: module api test - run: ./runtest-moduleapi --verbose --tls + run: | + ./runtest-moduleapi --verbose --tls + ./runtest-moduleapi --verbose - name: sentinel tests - run: ./runtest-sentinel --tls + run: | + ./runtest-sentinel --tls + ./runtest-sentinel - name: cluster tests - run: ./runtest-cluster --tls + run: | + ./runtest-cluster --tls + ./runtest-cluster diff --git a/src/config.c b/src/config.c index 476956e18..b3c437d49 100644 --- a/src/config.c +++ b/src/config.c @@ -2077,7 +2077,10 @@ static int updateTlsCfg(char *val, char *prev, char **err) { UNUSED(val); UNUSED(prev); UNUSED(err); - if (tlsConfigure(&server.tls_ctx_config) == C_ERR) { + + /* If TLS is enabled, try to configure OpenSSL. */ + if ((server.tls_port || server.tls_replication || server.tls_cluster) + && tlsConfigure(&server.tls_ctx_config) == C_ERR) { *err = "Unable to update TLS configuration. Check server logs."; return 0; } From 11805375890ef5e21b6276c827b29d6a931caf01 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Mon, 17 Aug 2020 17:46:54 +0300 Subject: [PATCH 103/215] Module API: fix missing RM_CLIENTINFO_FLAG_SSL. (#7666) The `REDISMODULE_CLIENTINFO_FLAG_SSL` flag was already a part of the `RedisModuleClientInfo` structure but was not implemented. (cherry picked from commit 64c360c5156ca6ee6d1eb52bfeb3fa48f3b25da5) --- src/connection.c | 8 +++++++- src/connection.h | 9 +++++++++ src/module.c | 2 ++ src/tls.c | 7 +++++++ tests/modules/misc.c | 38 +++++++++++++++++++++++++++++++++++ tests/unit/moduleapi/misc.tcl | 19 ++++++++++++++++++ 6 files changed, 82 insertions(+), 1 deletion(-) diff --git a/src/connection.c b/src/connection.c index 09fa12f2a..23b44a314 100644 --- a/src/connection.c +++ b/src/connection.c @@ -329,6 +329,11 @@ static ssize_t connSocketSyncReadLine(connection *conn, char *ptr, ssize_t size, return syncReadLine(conn->fd, ptr, size, timeout); } +static int connSocketGetType(connection *conn) { + (void) conn; + + return CONN_TYPE_SOCKET; +} ConnectionType CT_Socket = { .ae_handler = connSocketEventHandler, @@ -343,7 +348,8 @@ ConnectionType CT_Socket = { .blocking_connect = connSocketBlockingConnect, .sync_write = connSocketSyncWrite, .sync_read = connSocketSyncRead, - .sync_readline = connSocketSyncReadLine + .sync_readline = connSocketSyncReadLine, + .get_type = connSocketGetType }; diff --git a/src/connection.h b/src/connection.h index 0fd6c5f24..85585a3d0 100644 --- a/src/connection.h +++ b/src/connection.h @@ -48,6 +48,9 @@ typedef enum { #define CONN_FLAG_CLOSE_SCHEDULED (1<<0) /* Closed scheduled by a handler */ #define CONN_FLAG_WRITE_BARRIER (1<<1) /* Write barrier requested */ +#define CONN_TYPE_SOCKET 1 +#define CONN_TYPE_TLS 2 + typedef void (*ConnectionCallbackFunc)(struct connection *conn); typedef struct ConnectionType { @@ -64,6 +67,7 @@ typedef struct ConnectionType { ssize_t (*sync_write)(struct connection *conn, char *ptr, ssize_t size, long long timeout); ssize_t (*sync_read)(struct connection *conn, char *ptr, ssize_t size, long long timeout); ssize_t (*sync_readline)(struct connection *conn, char *ptr, ssize_t size, long long timeout); + int (*get_type)(struct connection *conn); } ConnectionType; struct connection { @@ -194,6 +198,11 @@ static inline ssize_t connSyncReadLine(connection *conn, char *ptr, ssize_t size return conn->type->sync_readline(conn, ptr, size, timeout); } +/* Return CONN_TYPE_* for the specified connection */ +static inline int connGetType(connection *conn) { + return conn->type->get_type(conn); +} + connection *connCreateSocket(); connection *connCreateAcceptedSocket(int fd); diff --git a/src/module.c b/src/module.c index b381b4f99..81824cd1e 100644 --- a/src/module.c +++ b/src/module.c @@ -1694,6 +1694,8 @@ int modulePopulateClientInfoStructure(void *ci, client *client, int structver) { ci1->flags |= REDISMODULE_CLIENTINFO_FLAG_TRACKING; if (client->flags & CLIENT_BLOCKED) ci1->flags |= REDISMODULE_CLIENTINFO_FLAG_BLOCKED; + if (connGetType(client->conn) == CONN_TYPE_TLS) + ci1->flags |= REDISMODULE_CLIENTINFO_FLAG_SSL; int port; connPeerToString(client->conn,ci1->addr,sizeof(ci1->addr),&port); diff --git a/src/tls.c b/src/tls.c index 4f0ea4d65..52887cd23 100644 --- a/src/tls.c +++ b/src/tls.c @@ -823,6 +823,12 @@ exit: return nread; } +static int connTLSGetType(connection *conn_) { + (void) conn_; + + return CONN_TYPE_TLS; +} + ConnectionType CT_TLS = { .ae_handler = tlsEventHandler, .accept = connTLSAccept, @@ -837,6 +843,7 @@ ConnectionType CT_TLS = { .sync_write = connTLSSyncWrite, .sync_read = connTLSSyncRead, .sync_readline = connTLSSyncReadLine, + .get_type = connTLSGetType }; int tlsHasPendingData() { diff --git a/tests/modules/misc.c b/tests/modules/misc.c index 1048d5065..1f9cb1932 100644 --- a/tests/modules/misc.c +++ b/tests/modules/misc.c @@ -195,6 +195,42 @@ int test_setlfu(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) return REDISMODULE_OK; } +int test_clientinfo(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) +{ + (void) argv; + (void) argc; + + RedisModuleClientInfo ci = { .version = REDISMODULE_CLIENTINFO_VERSION }; + + if (RedisModule_GetClientInfoById(&ci, RedisModule_GetClientId(ctx)) == REDISMODULE_ERR) { + RedisModule_ReplyWithError(ctx, "failed to get client info"); + return REDISMODULE_OK; + } + + RedisModule_ReplyWithArray(ctx, 10); + char flags[512]; + snprintf(flags, sizeof(flags) - 1, "%s:%s:%s:%s:%s:%s", + ci.flags & REDISMODULE_CLIENTINFO_FLAG_SSL ? "ssl" : "", + ci.flags & REDISMODULE_CLIENTINFO_FLAG_PUBSUB ? "pubsub" : "", + ci.flags & REDISMODULE_CLIENTINFO_FLAG_BLOCKED ? "blocked" : "", + ci.flags & REDISMODULE_CLIENTINFO_FLAG_TRACKING ? "tracking" : "", + ci.flags & REDISMODULE_CLIENTINFO_FLAG_UNIXSOCKET ? "unixsocket" : "", + ci.flags & REDISMODULE_CLIENTINFO_FLAG_MULTI ? "multi" : ""); + + RedisModule_ReplyWithCString(ctx, "flags"); + RedisModule_ReplyWithCString(ctx, flags); + RedisModule_ReplyWithCString(ctx, "id"); + RedisModule_ReplyWithLongLong(ctx, ci.id); + RedisModule_ReplyWithCString(ctx, "addr"); + RedisModule_ReplyWithCString(ctx, ci.addr); + RedisModule_ReplyWithCString(ctx, "port"); + RedisModule_ReplyWithLongLong(ctx, ci.port); + RedisModule_ReplyWithCString(ctx, "db"); + RedisModule_ReplyWithLongLong(ctx, ci.db); + + return REDISMODULE_OK; +} + int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { REDISMODULE_NOT_USED(argv); REDISMODULE_NOT_USED(argc); @@ -221,6 +257,8 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) return REDISMODULE_ERR; if (RedisModule_CreateCommand(ctx,"test.getlfu", test_getlfu,"",0,0,0) == REDISMODULE_ERR) return REDISMODULE_ERR; + if (RedisModule_CreateCommand(ctx,"test.clientinfo", test_clientinfo,"",0,0,0) == REDISMODULE_ERR) + return REDISMODULE_ERR; return REDISMODULE_OK; } diff --git a/tests/unit/moduleapi/misc.tcl b/tests/unit/moduleapi/misc.tcl index 748016f1a..b57a94f6a 100644 --- a/tests/unit/moduleapi/misc.tcl +++ b/tests/unit/moduleapi/misc.tcl @@ -67,4 +67,23 @@ start_server {tags {"modules"}} { assert { $was_set == 0 } } + test {test module clientinfo api} { + # Test basic sanity and SSL flag + set info [r test.clientinfo] + set ssl_flag [expr $::tls ? {"ssl:"} : {":"}] + + assert { [dict get $info db] == 9 } + assert { [dict get $info flags] == "${ssl_flag}::::" } + + # Test MULTI flag + r multi + r test.clientinfo + set info [lindex [r exec] 0] + assert { [dict get $info flags] == "${ssl_flag}::::multi" } + + # Test TRACKING flag + r client tracking on + set info [r test.clientinfo] + assert { [dict get $info flags] == "${ssl_flag}::tracking::" } + } } From 6dcd641681caa3addfe1b14c547e84e420c5e69d Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 18 Aug 2020 08:28:43 +0300 Subject: [PATCH 104/215] Trim trailing spaces in error replies coming from rejectCommand (#7668) 65a3307bc9 added rejectCommand which takes an robj reply and passes it through addReplyErrorSafe to addReplyErrorLength. The robj contains newline at it's end, but addReplyErrorSafe converts it to spaces, and passes it to addReplyErrorLength which adds the protocol newlines. The result was that most error replies (like OOM) had extra two trailing spaces in them. (cherry picked from commit cdd925b2898ac270afdf3d72f065410a96980f80) --- src/networking.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/networking.c b/src/networking.c index aa42c43e9..31c6664c7 100644 --- a/src/networking.c +++ b/src/networking.c @@ -412,10 +412,14 @@ void addReplyError(client *c, const char *err) { * is emitted. */ void addReplyErrorSafe(client *c, char *s, size_t len) { size_t j; + /* Trim any newlines at the end (ones will be added by addReplyErrorLength) */ + while (s[len-1] == '\r' || s[len-1] == '\n') + len--; + /* Replace any newlines in the rest of the string with spaces. */ for (j = 0; j < len; j++) { if (s[j] == '\r' || s[j] == '\n') s[j] = ' '; } - addReplyErrorLength(c,s,sdslen(s)); + addReplyErrorLength(c,s,len); } void addReplyErrorFormat(client *c, const char *fmt, ...) { From 3842bfd899fa672141874cbf01b9b076472f85f1 Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Tue, 18 Aug 2020 01:50:03 -0400 Subject: [PATCH 105/215] [module] using predefined REDISMODULE_NO_EXPIRE in RM_GetExpire (#7669) It was already defined in the API header and the documentation, but not used by the implementation. (cherry picked from commit 93d87d6d4cd2aed9a45c4307b4c7b0b19a47b2e9) --- src/module.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/module.c b/src/module.c index 81824cd1e..2f14e1a50 100644 --- a/src/module.c +++ b/src/module.c @@ -2100,7 +2100,8 @@ int RM_UnlinkKey(RedisModuleKey *key) { * REDISMODULE_NO_EXPIRE is returned. */ mstime_t RM_GetExpire(RedisModuleKey *key) { mstime_t expire = getExpire(key->db,key->key); - if (expire == -1 || key->value == NULL) return -1; + if (expire == -1 || key->value == NULL) + return REDISMODULE_NO_EXPIRE; expire -= mstime(); return expire >= 0 ? expire : 0; } From 43bc1ea0659d853de17747953c86089f220cc853 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 18 Aug 2020 09:53:59 +0300 Subject: [PATCH 106/215] OOM Crash log include size of allocation attempt. (#7670) Since users often post just the crash log in github issues, the log print that's above it is missing. No reason not to include the size in the panic message itself. (cherry picked from commit 0f741a9e2de355211667c0f2ad38f7bead686c28) --- src/server.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/server.c b/src/server.c index d624cb434..38b35c9e1 100644 --- a/src/server.c +++ b/src/server.c @@ -4911,7 +4911,8 @@ void loadDataFromDisk(void) { void redisOutOfMemoryHandler(size_t allocation_size) { serverLog(LL_WARNING,"Out Of Memory allocating %zu bytes!", allocation_size); - serverPanic("Redis aborting for OUT OF MEMORY"); + serverPanic("Redis aborting for OUT OF MEMORY. Allocating %zu bytes!", + allocation_size); } void redisSetProcTitle(char *title) { From 4e19358d65208783ea263fd4d76e8a33380d0622 Mon Sep 17 00:00:00 2001 From: guybe7 Date: Tue, 18 Aug 2020 18:07:59 +0200 Subject: [PATCH 107/215] PERSIST should signalModifiedKey (Like EXPIRE does) (#7671) (cherry picked from commit b87c288016c68716cbb97534b7748045ca58c953) --- src/expire.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/expire.c b/src/expire.c index f2d135e2b..1c4f71df3 100644 --- a/src/expire.c +++ b/src/expire.c @@ -594,6 +594,7 @@ void pttlCommand(client *c) { void persistCommand(client *c) { if (lookupKeyWrite(c->db,c->argv[1])) { if (removeExpire(c->db,c->argv[1])) { + signalModifiedKey(c,c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_GENERIC,"persist",c->argv[1],c->db->id); addReply(c,shared.cone); server.dirty++; From ba48e7e9cdbbf05cd313dde265e96566f5349f81 Mon Sep 17 00:00:00 2001 From: Madelyn Olson <34459052+madolson@users.noreply.github.com> Date: Wed, 19 Aug 2020 19:07:43 -0700 Subject: [PATCH 108/215] Fixed hset error since it's shared with hmset (#7678) (cherry picked from commit cbd9af85838a73c9c9654829cf8e511ecc5853de) --- src/t_hash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/t_hash.c b/src/t_hash.c index 866bcd25b..4a03cfb25 100644 --- a/src/t_hash.c +++ b/src/t_hash.c @@ -532,7 +532,7 @@ void hsetCommand(client *c) { robj *o; if ((c->argc % 2) == 1) { - addReplyError(c,"wrong number of arguments for HMSET"); + addReplyErrorFormat(c,"wrong number of arguments for '%s' command",c->cmd->name); return; } From 1bd75aafdb8350a6e24c50a54aaab092d121f45e Mon Sep 17 00:00:00 2001 From: Raghav Muddur Date: Wed, 19 Aug 2020 19:13:32 -0700 Subject: [PATCH 109/215] Update clusterMsgDataPublish to clusterMsgModule (#7682) Correcting the variable to clusterMsgModule. (cherry picked from commit 34c3be365a3a8bc74f7a99b3e35cb211afaa58ca) --- src/cluster.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cluster.c b/src/cluster.c index 350aa7b6a..e54c249d9 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -1767,7 +1767,7 @@ int clusterProcessPacket(clusterLink *link) { } else if (type == CLUSTERMSG_TYPE_MODULE) { uint32_t explen = sizeof(clusterMsg)-sizeof(union clusterMsgData); - explen += sizeof(clusterMsgDataPublish) - + explen += sizeof(clusterMsgModule) - 3 + ntohl(hdr->data.module.msg.len); if (totlen != explen) return 1; } From 113d5ae872646a099ccbcc8ea084b491dc08326d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=A8=E5=8D=9A=E4=B8=9C?= Date: Thu, 20 Aug 2020 13:59:02 +0800 Subject: [PATCH 110/215] Fix flock cluster config may cause failure to restart after kill -9 (#7674) After fork, the child process(redis-aof-rewrite) will get the fd opened by the parent process(redis), when redis killed by kill -9, it will not graceful exit(call prepareForShutdown()), so redis-aof-rewrite thread may still alive, the fd(lock) will still be held by redis-aof-rewrite thread, and redis restart will fail to get lock, means fail to start. This issue was causing failures in the cluster tests in github actions. Co-authored-by: Oran Agra (cherry picked from commit cbaf3c5bbafd43e009a2d6b38dd0e9fc450a3e12) --- src/cluster.c | 11 ++++++++++- src/server.c | 12 +++++++++++- src/server.h | 1 + tests/instances.tcl | 14 +++++++++----- 4 files changed, 31 insertions(+), 7 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index e54c249d9..17d21df29 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -418,7 +418,15 @@ int clusterLockConfig(char *filename) { return C_ERR; } /* Lock acquired: leak the 'fd' by not closing it, so that we'll retain the - * lock to the file as long as the process exists. */ + * lock to the file as long as the process exists. + * + * After fork, the child process will get the fd opened by the parent process, + * we need save `fd` to `cluster_config_file_lock_fd`, so that in redisFork(), + * it will be closed in the child process. + * If it is not closed, when the main process is killed -9, but the child process + * (redis-aof-rewrite) is still alive, the fd(lock) will still be held by the + * child process, and the main process will fail to get lock, means fail to start. */ + server.cluster_config_file_lock_fd = fd; #endif /* __sun */ return C_OK; @@ -468,6 +476,7 @@ void clusterInit(void) { /* Lock the cluster config file to make sure every node uses * its own nodes.conf. */ + server.cluster_config_file_lock_fd = -1; if (clusterLockConfig(server.cluster_configfile) == C_ERR) exit(1); diff --git a/src/server.c b/src/server.c index 38b35c9e1..43b118759 100644 --- a/src/server.c +++ b/src/server.c @@ -4823,13 +4823,23 @@ void setupChildSignalHandlers(void) { return; } +/* After fork, the child process will inherit the resources + * of the parent process, e.g. fd(socket or flock) etc. + * should close the resources not used by the child process, so that if the + * parent restarts it can bind/lock despite the child possibly still running. */ +void closeClildUnusedResourceAfterFork() { + closeListeningSockets(0); + if (server.cluster_enabled && server.cluster_config_file_lock_fd != -1) + close(server.cluster_config_file_lock_fd); /* don't care if this fails */ +} + int redisFork() { int childpid; long long start = ustime(); if ((childpid = fork()) == 0) { /* Child */ - closeListeningSockets(0); setupChildSignalHandlers(); + closeClildUnusedResourceAfterFork(); } else { /* Parent */ server.stat_fork_time = ustime()-start; diff --git a/src/server.h b/src/server.h index 1862e879e..a6d4b58ca 100644 --- a/src/server.h +++ b/src/server.h @@ -1397,6 +1397,7 @@ struct redisServer { REDISMODULE_CLUSTER_FLAG_*. */ int cluster_allow_reads_when_down; /* Are reads allowed when the cluster is down? */ + int cluster_config_file_lock_fd; /* cluster config fd, will be flock */ /* Scripting */ lua_State *lua; /* The Lua interpreter. We use just one for all clients */ client *lua_client; /* The "fake client" to query Redis from Lua */ diff --git a/tests/instances.tcl b/tests/instances.tcl index 677af6427..691378b9b 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -98,7 +98,9 @@ proc spawn_instance {type base_port count {conf {}}} { # Check availability if {[server_is_up 127.0.0.1 $port 100] == 0} { - abort_sentinel_test "Problems starting $type #$j: ping timeout" + set logfile [file join $dirname log.txt] + puts [exec tail $logfile] + abort_sentinel_test "Problems starting $type #$j: ping timeout, maybe server start failed, check $logfile" } # Push the instance into the right list @@ -475,12 +477,12 @@ proc kill_instance {type id} { # Wait for the port it was using to be available again, so that's not # an issue to start a new server ASAP with the same port. - set retry 10 + set retry 100 while {[incr retry -1]} { - set port_is_free [catch {set s [socket 127.0.01 $port]}] + set port_is_free [catch {set s [socket 127.0.0.1 $port]}] if {$port_is_free} break catch {close $s} - after 1000 + after 100 } if {$retry == 0} { error "Port $port does not return available after killing instance." @@ -507,7 +509,9 @@ proc restart_instance {type id} { # Check that the instance is running if {[server_is_up 127.0.0.1 $port 100] == 0} { - abort_sentinel_test "Problems starting $type #$id: ping timeout" + set logfile [file join $dirname log.txt] + puts [exec tail $logfile] + abort_sentinel_test "Problems starting $type #$id: ping timeout, maybe server start failed, check $logfile" } # Connect with it with a fresh link From c9478a3b4e758bb96090817f484b53c39ff13222 Mon Sep 17 00:00:00 2001 From: guybe7 Date: Thu, 20 Aug 2020 18:55:14 +0200 Subject: [PATCH 111/215] Modules: Invalidate saved_oparray after use (#7688) We wanna avoid a chance of someone using the pointer in it after it'll be freed / realloced. (cherry picked from commit 65c24bd3d436a08a680fa80bf5b3f4f9cf8ef395) --- src/module.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/module.c b/src/module.c index 2f14e1a50..2542a0e2a 100644 --- a/src/module.c +++ b/src/module.c @@ -609,6 +609,8 @@ void moduleHandlePropagationAfterCommandCallback(RedisModuleCtx *ctx) { redisOpArrayFree(&server.also_propagate); /* Restore the previous oparray in case of nexted use of the API. */ server.also_propagate = ctx->saved_oparray; + /* We're done with saved_oparray, let's invalidate it. */ + redisOpArrayInit(&ctx->saved_oparray); } } From 6cfcf76e0b91e758c9278712fcb5fff73e689ac3 Mon Sep 17 00:00:00 2001 From: huangzhw Date: Fri, 21 Aug 2020 04:31:06 +0800 Subject: [PATCH 112/215] RedisModuleEvent_LoadingProgress always at 100% progress (#7685) It was also using the wrong struct, but luckily RedisModuleFlushInfo and RedisModuleLoadingProgress are identical. (cherry picked from commit a3d4d7bf68bf825584b34785ed2b117dd24c1754) --- src/module.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/module.c b/src/module.c index 2542a0e2a..9755e282a 100644 --- a/src/module.c +++ b/src/module.c @@ -7200,8 +7200,8 @@ void processModuleLoadingProgressEvent(int is_aof) { /* Fire the loading progress modules end event. */ int progress = -1; if (server.loading_total_bytes) - progress = (server.loading_total_bytes<<10) / server.loading_total_bytes; - RedisModuleFlushInfoV1 fi = {REDISMODULE_LOADING_PROGRESS_VERSION, + progress = (server.loading_loaded_bytes<<10) / server.loading_total_bytes; + RedisModuleLoadingProgressV1 fi = {REDISMODULE_LOADING_PROGRESS_VERSION, server.hz, progress}; moduleFireServerEvent(REDISMODULE_EVENT_LOADING_PROGRESS, From 8d79702d8a6f5fb96e61308614064e59b7c55ad1 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Sun, 23 Aug 2020 10:17:43 +0300 Subject: [PATCH 113/215] Tests: fix redis-cli with remote hosts. (#7693) (cherry picked from commit f80f3f492a0ca56e163899eeca7ad40d67d903be) --- tests/integration/redis-cli.tcl | 4 ++-- tests/support/cli.tcl | 4 ++-- tests/unit/wait.tcl | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/integration/redis-cli.tcl b/tests/integration/redis-cli.tcl index 44ff430e2..2d4145ff0 100644 --- a/tests/integration/redis-cli.tcl +++ b/tests/integration/redis-cli.tcl @@ -3,7 +3,7 @@ source tests/support/cli.tcl start_server {tags {"cli"}} { proc open_cli {{opts "-n 9"} {infile ""}} { set ::env(TERM) dumb - set cmdline [rediscli [srv port] $opts] + set cmdline [rediscli [srv host] [srv port] $opts] if {$infile ne ""} { set cmdline "$cmdline < $infile" set mode "r" @@ -65,7 +65,7 @@ start_server {tags {"cli"}} { } proc _run_cli {opts args} { - set cmd [rediscli [srv port] [list -n 9 {*}$args]] + set cmd [rediscli [srv host] [srv port] [list -n 9 {*}$args]] foreach {key value} $opts { if {$key eq "pipe"} { set cmd "sh -c \"$value | $cmd\"" diff --git a/tests/support/cli.tcl b/tests/support/cli.tcl index 37c902a50..d55487931 100644 --- a/tests/support/cli.tcl +++ b/tests/support/cli.tcl @@ -11,8 +11,8 @@ proc rediscli_tls_config {testsdir} { } } -proc rediscli {port {opts {}}} { - set cmd [list src/redis-cli -p $port] +proc rediscli {host port {opts {}}} { + set cmd [list src/redis-cli -h $host -p $port] lappend cmd {*}[rediscli_tls_config "tests"] lappend cmd {*}$opts return $cmd diff --git a/tests/unit/wait.tcl b/tests/unit/wait.tcl index c9cfa6ed4..b1500cff8 100644 --- a/tests/unit/wait.tcl +++ b/tests/unit/wait.tcl @@ -33,7 +33,7 @@ start_server {} { } test {WAIT should not acknowledge 1 additional copy if slave is blocked} { - set cmd [rediscli $slave_port "-h $slave_host debug sleep 5"] + set cmd [rediscli $slave_host $slave_port "debug sleep 5"] exec {*}$cmd > /dev/null 2> /dev/null & after 1000 ;# Give redis-cli the time to execute the command. $master set foo 0 From 19ef1f371d6624913fae06803d37dd6693f2c8f5 Mon Sep 17 00:00:00 2001 From: Valentino Geron Date: Thu, 20 Aug 2020 18:48:09 +0300 Subject: [PATCH 114/215] Fix LPOS command when RANK is greater than matches When calling to LPOS command when RANK is higher than matches, the return value is non valid response. For example: ``` LPUSH l a :1 LPOS l b RANK 5 COUNT 10 *-4 ``` It may break client-side parser. Now, we count how many replies were replied in the array. ``` LPUSH l a :1 LPOS l b RANK 5 COUNT 10 *0 ``` (cherry picked from commit 9204a9b2c2f6eb59767ab0bddcde62c75e8c20b0) --- src/t_list.c | 5 +++-- tests/unit/type/list.tcl | 6 ++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/t_list.c b/src/t_list.c index 2c339888d..a751dde26 100644 --- a/src/t_list.c +++ b/src/t_list.c @@ -572,13 +572,14 @@ void lposCommand(client *c) { li = listTypeInitIterator(o,direction == LIST_HEAD ? -1 : 0,direction); listTypeEntry entry; long llen = listTypeLength(o); - long index = 0, matches = 0, matchindex = -1; + long index = 0, matches = 0, matchindex = -1, arraylen = 0; while (listTypeNext(li,&entry) && (maxlen == 0 || index < maxlen)) { if (listTypeEqual(&entry,ele)) { matches++; matchindex = (direction == LIST_TAIL) ? index : llen - index - 1; if (matches >= rank) { if (arraylenptr) { + arraylen++; addReplyLongLong(c,matchindex); if (count && matches-rank+1 >= count) break; } else { @@ -594,7 +595,7 @@ void lposCommand(client *c) { /* Reply to the client. Note that arraylenptr is not NULL only if * the COUNT option was selected. */ if (arraylenptr != NULL) { - setDeferredArrayLen(c,arraylenptr,matches-rank+1); + setDeferredArrayLen(c,arraylenptr,arraylen); } else { if (matchindex != -1) addReplyLongLong(c,matchindex); diff --git a/tests/unit/type/list.tcl b/tests/unit/type/list.tcl index 0e39d7d95..5ea2b9cd1 100644 --- a/tests/unit/type/list.tcl +++ b/tests/unit/type/list.tcl @@ -50,6 +50,12 @@ start_server { assert {[r LPOS mylist c COUNT 0 MAXLEN 7 RANK 2] == {6}} } + test {LPOS when RANK is greater than matches} { + r DEL mylist + r LPUSH l a + assert {[r LPOS mylist b COUNT 10 RANK 5] eq {}} + } + test {LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - ziplist} { # first lpush then rpush assert_equal 1 [r lpush myziplist1 aa] From 3e2ed279fda35428e0a845e01be589e79ae49b81 Mon Sep 17 00:00:00 2001 From: Valentino Geron Date: Sun, 23 Aug 2020 12:19:41 +0300 Subject: [PATCH 115/215] Assert that setDeferredAggregateLen isn't called with negative value In case the redis is about to return broken reply we want to crash with assert so that we are notified about the bug. see #7687. (cherry picked from commit 8b428cf0f7ce5489fbdf4640a1dd36357f3dc5f0) --- src/networking.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/networking.c b/src/networking.c index 31c6664c7..e267565a6 100644 --- a/src/networking.c +++ b/src/networking.c @@ -492,6 +492,7 @@ void *addReplyDeferredLen(client *c) { /* Populate the length object and try gluing it to the next chunk. */ void setDeferredAggregateLen(client *c, void *node, long length, char prefix) { + serverAssert(length >= 0); listNode *ln = (listNode*)node; clientReplyBlock *next; char lenstr[128]; From ff0846cbb72a35187e1438b4daa7d1421cd6c2bf Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Mon, 24 Aug 2020 12:54:56 +0300 Subject: [PATCH 116/215] Add language servers stuff, test/tls to gitignore. (#7698) (cherry picked from commit 5449a2a8b54db6a57b564e453c9f7075ebb81f79) --- .gitignore | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitignore b/.gitignore index e445fd201..721f1d74e 100644 --- a/.gitignore +++ b/.gitignore @@ -27,9 +27,13 @@ src/nodes.conf deps/lua/src/lua deps/lua/src/luac deps/lua/src/liblua.a +tests/tls/* .make-* .prerequisites *.dSYM Makefile.dep .vscode/* .idea/* +.ccls +.ccls-cache/* +compile_commands.json From 5a9a653f3edebed5514d94ddc3aa4bf228fcecda Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Tue, 25 Aug 2020 15:58:50 +0300 Subject: [PATCH 117/215] Expands lazyfree's effort estimate to include Streams (#5794) Otherwise, it is treated as a single allocation and freed synchronously. The following logic is used for estimating the effort in constant-ish time complexity: 1. Check the number of nodes. 1. Add an allocation for each consumer group registered inside the stream. 1. Check the number of PELs in the first CG, and then add this count times the number of CGs. 1. Check the number of consumers in the first CG, and then add this count times the number of CGs. (cherry picked from commit 5b0a06af48997794af60dabb58ce4336ef56f73d) --- src/lazyfree.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/lazyfree.c b/src/lazyfree.c index f01504e70..cbcc1c240 100644 --- a/src/lazyfree.c +++ b/src/lazyfree.c @@ -41,6 +41,30 @@ size_t lazyfreeGetFreeEffort(robj *obj) { } else if (obj->type == OBJ_HASH && obj->encoding == OBJ_ENCODING_HT) { dict *ht = obj->ptr; return dictSize(ht); + } else if (obj->type == OBJ_STREAM) { + size_t effort = 0; + stream *s = obj->ptr; + + /* Make a best effort estimate to maintain constant runtime. Every macro + * node in the Stream is one allocation. */ + effort += s->rax->numnodes; + + /* Every consumer group is an allocation and so are the entries in its + * PEL. We use size of the first group's PEL as an estimate for all + * others. */ + if (s->cgroups) { + raxIterator ri; + streamCG *cg; + raxStart(&ri,s->cgroups); + raxSeek(&ri,"^",NULL,0); + /* There must be at least one group so the following should always + * work. */ + serverAssert(raxNext(&ri)); + cg = ri.data; + effort += raxSize(s->cgroups)*(1+raxSize(cg->pel)); + raxStop(&ri); + } + return effort; } else { return 1; /* Everything else is a single allocation. */ } From e6f6731c66acc36025f9c72e4792d96223fba4cc Mon Sep 17 00:00:00 2001 From: valentinogeron Date: Thu, 27 Aug 2020 09:19:24 +0300 Subject: [PATCH 118/215] EXEC with only read commands should not be rejected when OOM (#7696) If the server gets MULTI command followed by only read commands, and right before it gets the EXEC it reaches OOM, the client will get OOM response. So, from now on, it will get OOM response only if there was at least one command that was tagged with `use-memory` flag (cherry picked from commit b7289e912cbe1a011a5569cd67929e83731b9660) --- src/server.c | 21 +++++++++++++-------- tests/unit/multi.tcl | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 8 deletions(-) diff --git a/src/server.c b/src/server.c index 43b118759..dc0236cbd 100644 --- a/src/server.c +++ b/src/server.c @@ -3549,14 +3549,19 @@ int processCommand(client *c) { * into a slave, that may be the active client, to be freed. */ if (server.current_client == NULL) return C_ERR; - /* It was impossible to free enough memory, and the command the client - * is trying to execute is denied during OOM conditions or the client - * is in MULTI/EXEC context? Error. */ - if (out_of_memory && - (is_denyoom_command || - (c->flags & CLIENT_MULTI && - c->cmd->proc != discardCommand))) - { + int reject_cmd_on_oom = is_denyoom_command; + /* If client is in MULTI/EXEC context, queuing may consume an unlimited + * amount of memory, so we want to stop that. + * However, we never want to reject DISCARD, or even EXEC (unless it + * contains denied commands, in which case is_denyoom_command is already + * set. */ + if (c->flags & CLIENT_MULTI && + c->cmd->proc != execCommand && + c->cmd->proc != discardCommand) { + reject_cmd_on_oom = 1; + } + + if (out_of_memory && reject_cmd_on_oom) { rejectCommand(c, shared.oomerr); return C_OK; } diff --git a/tests/unit/multi.tcl b/tests/unit/multi.tcl index 44a822ba6..817d509c5 100644 --- a/tests/unit/multi.tcl +++ b/tests/unit/multi.tcl @@ -466,4 +466,42 @@ start_server {tags {"multi"}} { assert { $xx == 1 } $r1 close; } + + test {EXEC with only read commands should not be rejected when OOM} { + set r2 [redis_client] + + r set x value + r multi + r get x + r ping + + # enforcing OOM + $r2 config set maxmemory 1 + + # finish the multi transaction with exec + assert { [r exec] == {value PONG} } + + # releasing OOM + $r2 config set maxmemory 0 + $r2 close + } + + test {EXEC with at least one use-memory command should fail} { + set r2 [redis_client] + + r multi + r set x 1 + r get x + + # enforcing OOM + $r2 config set maxmemory 1 + + # finish the multi transaction with exec + catch {r exec} e + assert_match {EXECABORT*OOM*} $e + + # releasing OOM + $r2 config set maxmemory 0 + $r2 close + } } From 2165a78d1074cba5ee590782f17281114228bcba Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Thu, 27 Aug 2020 12:54:01 +0300 Subject: [PATCH 119/215] Fix rejectCommand trims newline in shared error objects, hung clients (#7714) 65a3307bc (released in 6.0.6) has a side effect, when processCommand rejects a command with pre-made shared object error string, it trims the newlines from the end of the string. if that string is later used with addReply, the newline will be missing, breaking the protocol, and leaving the client hung. It seems that the only scenario which this happens is when replying with -LOADING to some command, and later using that reply from the CONFIG SET command (still during loading). this will result in hung client. Refactoring the code in order to avoid trimming these newlines from shared string objects, and do the newline trimming only in other cases where it's needed. Co-authored-by: Guy Benoish (cherry picked from commit 9fcd9e191e6f54276688fb7c74e1d5c3c4be9a75) --- src/multi.c | 3 ++- src/networking.c | 47 ++++++++++++++++++++++++++++++----------------- src/server.c | 12 ++++++++---- src/server.h | 3 ++- 4 files changed, 42 insertions(+), 23 deletions(-) diff --git a/src/multi.c b/src/multi.c index 35ddf92af..a99c308be 100644 --- a/src/multi.c +++ b/src/multi.c @@ -127,7 +127,8 @@ void execCommandPropagateExec(client *c) { /* Aborts a transaction, with a specific error message. * The transaction is always aboarted with -EXECABORT so that the client knows * the server exited the multi state, but the actual reason for the abort is - * included too. */ + * included too. + * Note: 'error' may or may not end with \r\n. see addReplyErrorFormat. */ void execCommandAbort(client *c, sds error) { discardTransaction(c); diff --git a/src/networking.c b/src/networking.c index e267565a6..8f06c6ba6 100644 --- a/src/networking.c +++ b/src/networking.c @@ -357,14 +357,18 @@ void addReplyProto(client *c, const char *s, size_t len) { * * If the error code is already passed in the string 's', the error * code provided is used, otherwise the string "-ERR " for the generic - * error code is automatically added. */ + * error code is automatically added. + * Note that 's' must NOT end with \r\n. */ void addReplyErrorLength(client *c, const char *s, size_t len) { /* If the string already starts with "-..." then the error code * is provided by the caller. Otherwise we use "-ERR". */ if (!len || s[0] != '-') addReplyProto(c,"-ERR ",5); addReplyProto(c,s,len); addReplyProto(c,"\r\n",2); +} +/* Do some actions after an error reply was sent (Log if needed, updates stats, etc.) */ +void afterErrorReply(client *c, const char *s, size_t len) { /* Sometimes it could be normal that a slave replies to a master with * an error and this function gets called. Actually the error will never * be sent because addReply*() against master clients has no effect... @@ -390,10 +394,11 @@ void addReplyErrorLength(client *c, const char *s, size_t len) { from = "master"; } + if (len > 4096) len = 4096; char *cmdname = c->lastcmd ? c->lastcmd->name : ""; serverLog(LL_WARNING,"== CRITICAL == This %s is sending an error " - "to its %s: '%s' after processing the command " - "'%s'", from, to, s, cmdname); + "to its %s: '%.*s' after processing the command " + "'%s'", from, to, (int)len, s, cmdname); if (ctype == CLIENT_TYPE_MASTER && server.repl_backlog && server.repl_backlog_histlen > 0) { @@ -403,31 +408,39 @@ void addReplyErrorLength(client *c, const char *s, size_t len) { } } +/* The 'err' object is expected to start with -ERRORCODE and end with \r\n. + * Unlike addReplyErrorSds and others alike which rely on addReplyErrorLength. */ +void addReplyErrorObject(client *c, robj *err) { + addReply(c, err); + afterErrorReply(c, err->ptr, sdslen(err->ptr)-2); /* Ignore trailing \r\n */ +} + +/* See addReplyErrorLength for expectations from the input string. */ void addReplyError(client *c, const char *err) { addReplyErrorLength(c,err,strlen(err)); + afterErrorReply(c,err,strlen(err)); } -/* See addReplyErrorLength. - * Makes sure there are no newlines in the string, otherwise invalid protocol - * is emitted. */ -void addReplyErrorSafe(client *c, char *s, size_t len) { - size_t j; - /* Trim any newlines at the end (ones will be added by addReplyErrorLength) */ - while (s[len-1] == '\r' || s[len-1] == '\n') - len--; - /* Replace any newlines in the rest of the string with spaces. */ - for (j = 0; j < len; j++) { - if (s[j] == '\r' || s[j] == '\n') s[j] = ' '; - } - addReplyErrorLength(c,s,len); +/* See addReplyErrorLength for expectations from the input string. */ +void addReplyErrorSds(client *c, sds err) { + addReplyErrorLength(c,err,sdslen(err)); + afterErrorReply(c,err,sdslen(err)); } +/* See addReplyErrorLength for expectations from the formatted string. + * The formatted string is safe to contain \r and \n anywhere. */ void addReplyErrorFormat(client *c, const char *fmt, ...) { va_list ap; va_start(ap,fmt); sds s = sdscatvprintf(sdsempty(),fmt,ap); va_end(ap); - addReplyErrorSafe(c, s, sdslen(s)); + /* Trim any newlines at the end (ones will be added by addReplyErrorLength) */ + s = sdstrim(s, "\r\n"); + /* Make sure there are no newlines in the middle of the string, otherwise + * invalid protocol is emitted. */ + s = sdsmapchars(s, "\r\n", " ", 2); + addReplyErrorLength(c,s,sdslen(s)); + afterErrorReply(c,s,sdslen(s)); sdsfree(s); } diff --git a/src/server.c b/src/server.c index dc0236cbd..9c3d71d6b 100644 --- a/src/server.c +++ b/src/server.c @@ -3406,14 +3406,15 @@ void call(client *c, int flags) { /* Used when a command that is ready for execution needs to be rejected, due to * varios pre-execution checks. it returns the appropriate error to the client. * If there's a transaction is flags it as dirty, and if the command is EXEC, - * it aborts the transaction. */ + * it aborts the transaction. + * Note: 'reply' is expected to end with \r\n */ void rejectCommand(client *c, robj *reply) { flagTransaction(c); if (c->cmd && c->cmd->proc == execCommand) { execCommandAbort(c, reply->ptr); } else { /* using addReplyError* rather than addReply so that the error can be logged. */ - addReplyErrorSafe(c, reply->ptr, sdslen(reply->ptr)); + addReplyErrorObject(c, reply); } } @@ -3423,10 +3424,13 @@ void rejectCommandFormat(client *c, const char *fmt, ...) { va_start(ap,fmt); sds s = sdscatvprintf(sdsempty(),fmt,ap); va_end(ap); + /* Make sure there are no newlines in the string, otherwise invalid protocol + * is emitted (The args come from the user, they may contain any character). */ + sdsmapchars(s, "\r\n", " ", 2); if (c->cmd && c->cmd->proc == execCommand) { execCommandAbort(c, s); } else { - addReplyErrorSafe(c, s, sdslen(s)); + addReplyErrorSds(c, s); } sdsfree(s); } @@ -3589,7 +3593,7 @@ int processCommand(client *c) { rejectCommand(c, shared.bgsaveerr); else rejectCommandFormat(c, - "-MISCONF Errors writing to the AOF file: %s\r\n", + "-MISCONF Errors writing to the AOF file: %s", strerror(server.aof_last_write_errno)); return C_OK; } diff --git a/src/server.h b/src/server.h index a6d4b58ca..ba6dffcef 100644 --- a/src/server.h +++ b/src/server.h @@ -1638,7 +1638,8 @@ void addReplyBulkLongLong(client *c, long long ll); void addReply(client *c, robj *obj); void addReplySds(client *c, sds s); void addReplyBulkSds(client *c, sds s); -void addReplyErrorSafe(client *c, char *s, size_t len); +void addReplyErrorObject(client *c, robj *err); +void addReplyErrorSds(client *c, sds err); void addReplyError(client *c, const char *err); void addReplyStatus(client *c, const char *status); void addReplyDouble(client *c, double d); From 2257f38b68826576044814283fe8a6cfb162c2e1 Mon Sep 17 00:00:00 2001 From: "Meir Shpilraien (Spielrein)" Date: Thu, 23 Jul 2020 12:38:51 +0300 Subject: [PATCH 120/215] This PR introduces a new loaded keyspace event (#7536) Co-authored-by: Oran Agra Co-authored-by: Itamar Haber (cherry picked from commit 8d826393191399e132bd9e56fb51ed83223cc5ca) --- runtest-moduleapi | 1 + src/module.c | 5 ++ src/rdb.c | 6 +- src/redismodule.h | 2 +- src/server.h | 1 + tests/modules/Makefile | 3 +- tests/modules/keyspace_events.c | 99 ++++++++++++++++++++++++ tests/unit/moduleapi/keyspace_events.tcl | 22 ++++++ 8 files changed, 135 insertions(+), 4 deletions(-) create mode 100644 tests/modules/keyspace_events.c create mode 100644 tests/unit/moduleapi/keyspace_events.tcl diff --git a/runtest-moduleapi b/runtest-moduleapi index f6cc0a258..71db27e5e 100755 --- a/runtest-moduleapi +++ b/runtest-moduleapi @@ -25,4 +25,5 @@ $TCLSH tests/test_helper.tcl \ --single unit/moduleapi/scan \ --single unit/moduleapi/datatype \ --single unit/moduleapi/auth \ +--single unit/moduleapi/keyspace_events \ "${@}" diff --git a/src/module.c b/src/module.c index 9755e282a..b15bb7276 100644 --- a/src/module.c +++ b/src/module.c @@ -4864,6 +4864,11 @@ void moduleReleaseGIL(void) { * - REDISMODULE_NOTIFY_STREAM: Stream events * - REDISMODULE_NOTIFY_KEYMISS: Key-miss events * - REDISMODULE_NOTIFY_ALL: All events (Excluding REDISMODULE_NOTIFY_KEYMISS) + * - REDISMODULE_NOTIFY_LOADED: A special notification available only for modules, + * indicates that the key was loaded from persistence. + * Notice, when this event fires, the given key + * can not be retained, use RM_CreateStringFromString + * instead. * * We do not distinguish between key events and keyspace events, and it is up * to the module to filter the actions taken based on the key. diff --git a/src/rdb.c b/src/rdb.c index ac1985d24..54a169cd8 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -2307,6 +2307,7 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { decrRefCount(val); } else { robj keyobj; + initStaticStringObject(keyobj,key); /* Add the new object in the hash table */ int added = dbAddRDBLoad(db,key,val); @@ -2315,7 +2316,6 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { /* This flag is useful for DEBUG RELOAD special modes. * When it's set we allow new keys to replace the current * keys with the same name. */ - initStaticStringObject(keyobj,key); dbSyncDelete(db,&keyobj); dbAddRDBLoad(db,key,val); } else { @@ -2327,12 +2327,14 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { /* Set the expire time if needed */ if (expiretime != -1) { - initStaticStringObject(keyobj,key); setExpire(NULL,db,&keyobj,expiretime); } /* Set usage information (for eviction). */ objectSetLRUOrLFU(val,lfu_freq,lru_idle,lru_clock,1000); + + /* call key space notification on key loaded for modules only */ + moduleNotifyKeyspaceEvent(NOTIFY_LOADED, "loaded", &keyobj, db->id); } /* Loading the database more slowly is useful in order to test diff --git a/src/redismodule.h b/src/redismodule.h index d67b01f68..ffc679ebc 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -128,9 +128,9 @@ #define REDISMODULE_NOTIFY_EVICTED (1<<9) /* e */ #define REDISMODULE_NOTIFY_STREAM (1<<10) /* t */ #define REDISMODULE_NOTIFY_KEY_MISS (1<<11) /* m (Note: This one is excluded from REDISMODULE_NOTIFY_ALL on purpose) */ +#define REDISMODULE_NOTIFY_LOADED (1<<12) /* module only key space notification, indicate a key loaded from rdb */ #define REDISMODULE_NOTIFY_ALL (REDISMODULE_NOTIFY_GENERIC | REDISMODULE_NOTIFY_STRING | REDISMODULE_NOTIFY_LIST | REDISMODULE_NOTIFY_SET | REDISMODULE_NOTIFY_HASH | REDISMODULE_NOTIFY_ZSET | REDISMODULE_NOTIFY_EXPIRED | REDISMODULE_NOTIFY_EVICTED | REDISMODULE_NOTIFY_STREAM) /* A */ - /* A special pointer that we can use between the core and the module to signal * field deletion, and that is impossible to be a valid pointer. */ #define REDISMODULE_HASH_DELETE ((RedisModuleString*)(long)1) diff --git a/src/server.h b/src/server.h index ba6dffcef..e9b4777ef 100644 --- a/src/server.h +++ b/src/server.h @@ -431,6 +431,7 @@ typedef long long ustime_t; /* microsecond time type. */ #define NOTIFY_EVICTED (1<<9) /* e */ #define NOTIFY_STREAM (1<<10) /* t */ #define NOTIFY_KEY_MISS (1<<11) /* m (Note: This one is excluded from NOTIFY_ALL on purpose) */ +#define NOTIFY_LOADED (1<<12) /* module only key space notification, indicate a key loaded from rdb */ #define NOTIFY_ALL (NOTIFY_GENERIC | NOTIFY_STRING | NOTIFY_LIST | NOTIFY_SET | NOTIFY_HASH | NOTIFY_ZSET | NOTIFY_EXPIRED | NOTIFY_EVICTED | NOTIFY_STREAM) /* A flag */ /* Get the first bind addr or NULL */ diff --git a/tests/modules/Makefile b/tests/modules/Makefile index 39b8e6efa..de7407a84 100644 --- a/tests/modules/Makefile +++ b/tests/modules/Makefile @@ -22,7 +22,8 @@ TEST_MODULES = \ blockonkeys.so \ scan.so \ datatype.so \ - auth.so + auth.so \ + keyspace_events.so .PHONY: all diff --git a/tests/modules/keyspace_events.c b/tests/modules/keyspace_events.c new file mode 100644 index 000000000..b2296c1cb --- /dev/null +++ b/tests/modules/keyspace_events.c @@ -0,0 +1,99 @@ +/* This module is used to test the server keyspace events API. + * + * ----------------------------------------------------------------------------- + * + * Copyright (c) 2020, Meir Shpilraien + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#define REDISMODULE_EXPERIMENTAL_API + +#include "redismodule.h" +#include +#include + +/** strores all the keys on which we got 'loaded' keyspace notification **/ +RedisModuleDict *loaded_event_log = NULL; + +static int KeySpace_Notification(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key){ + REDISMODULE_NOT_USED(ctx); + REDISMODULE_NOT_USED(type); + + if(strcmp(event, "loaded") == 0){ + const char* keyName = RedisModule_StringPtrLen(key, NULL); + int nokey; + RedisModule_DictGetC(loaded_event_log, (void*)keyName, strlen(keyName), &nokey); + if(nokey){ + RedisModule_DictSetC(loaded_event_log, (void*)keyName, strlen(keyName), NULL); + } + } + + return REDISMODULE_OK; +} + +static int cmdIsKeyLoaded(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){ + if(argc != 2){ + return RedisModule_WrongArity(ctx); + } + + const char* key = RedisModule_StringPtrLen(argv[1], NULL); + + int nokey; + RedisModule_DictGetC(loaded_event_log, (void*)key, strlen(key), &nokey); + + RedisModule_ReplyWithLongLong(ctx, !nokey); + return REDISMODULE_OK; +} + +/* This function must be present on each Redis module. It is used in order to + * register the commands into the Redis server. */ +int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + REDISMODULE_NOT_USED(argv); + REDISMODULE_NOT_USED(argc); + + if (RedisModule_Init(ctx,"testkeyspace",1,REDISMODULE_APIVER_1) == REDISMODULE_ERR){ + return REDISMODULE_ERR; + } + + loaded_event_log = RedisModule_CreateDict(ctx); + + if(RedisModule_SubscribeToKeyspaceEvents(ctx, REDISMODULE_NOTIFY_LOADED, KeySpace_Notification) != REDISMODULE_OK){ + return REDISMODULE_ERR; + } + + if (RedisModule_CreateCommand(ctx,"keyspace.is_key_loaded", cmdIsKeyLoaded,"",0,0,0) == REDISMODULE_ERR){ + return REDISMODULE_ERR; + } + + return REDISMODULE_OK; +} + +int RedisModule_OnUnload(RedisModuleCtx *ctx) { + RedisModule_FreeDict(ctx, loaded_event_log); + loaded_event_log = NULL; + return REDISMODULE_OK; +} diff --git a/tests/unit/moduleapi/keyspace_events.tcl b/tests/unit/moduleapi/keyspace_events.tcl new file mode 100644 index 000000000..cb959ab52 --- /dev/null +++ b/tests/unit/moduleapi/keyspace_events.tcl @@ -0,0 +1,22 @@ +set testmodule [file normalize tests/modules/keyspace_events.so] + +tags "modules" { + start_server [list overrides [list loadmodule "$testmodule"]] { + + test {Test loaded key space event} { + r set x 1 + r hset y f v + r lpush z 1 2 3 + r sadd p 1 2 3 + r zadd t 1 f1 2 f2 + r xadd s * f v + r debug reload + assert_equal 1 [r keyspace.is_key_loaded x] + assert_equal 1 [r keyspace.is_key_loaded y] + assert_equal 1 [r keyspace.is_key_loaded z] + assert_equal 1 [r keyspace.is_key_loaded p] + assert_equal 1 [r keyspace.is_key_loaded t] + assert_equal 1 [r keyspace.is_key_loaded s] + } + } +} \ No newline at end of file From e81bac32fd0ec7dc729752ba68fb1230b0d2eefa Mon Sep 17 00:00:00 2001 From: Arun Ranganathan Date: Wed, 29 Jul 2020 01:46:44 -0400 Subject: [PATCH 121/215] Show threading configuration in INFO output (#7446) Co-authored-by: Oran Agra (cherry picked from commit f6cad30bb69b2ad35bb0a870077fac2d4605d727) --- src/networking.c | 33 +++++++++++++++++++++++---------- src/server.c | 22 ++++++++++++++++++---- src/server.h | 5 +++++ 3 files changed, 46 insertions(+), 14 deletions(-) diff --git a/src/networking.c b/src/networking.c index 8f06c6ba6..495be0ece 100644 --- a/src/networking.c +++ b/src/networking.c @@ -1313,6 +1313,9 @@ client *lookupClientByID(uint64_t id) { * set to 0. So when handler_installed is set to 0 the function must be * thread safe. */ int writeToClient(client *c, int handler_installed) { + /* Update total number of writes on server */ + server.stat_total_writes_processed++; + ssize_t nwritten = 0, totwritten = 0; size_t objlen; clientReplyBlock *o; @@ -1929,6 +1932,9 @@ void readQueryFromClient(connection *conn) { * the event loop. This is the case if threaded I/O is enabled. */ if (postponeClientRead(c)) return; + /* Update total number of reads on server */ + server.stat_total_reads_processed++; + readlen = PROTO_IOBUF_LEN; /* If this is a multi bulk request, and we are processing a bulk reply * that is large enough, try to maximize the probability that the query @@ -2926,7 +2932,6 @@ int tio_debug = 0; pthread_t io_threads[IO_THREADS_MAX_NUM]; pthread_mutex_t io_threads_mutex[IO_THREADS_MAX_NUM]; _Atomic unsigned long io_threads_pending[IO_THREADS_MAX_NUM]; -int io_threads_active; /* Are the threads currently spinning waiting I/O? */ int io_threads_op; /* IO_THREADS_OP_WRITE or IO_THREADS_OP_READ. */ /* This is the list of clients each thread will serve when threaded I/O is @@ -2985,7 +2990,7 @@ void *IOThreadMain(void *myid) { /* Initialize the data structures needed for threaded I/O. */ void initThreadedIO(void) { - io_threads_active = 0; /* We start with threads not active. */ + server.io_threads_active = 0; /* We start with threads not active. */ /* Don't spawn any thread if the user selected a single thread: * we'll handle I/O directly from the main thread. */ @@ -3019,10 +3024,10 @@ void initThreadedIO(void) { void startThreadedIO(void) { if (tio_debug) { printf("S"); fflush(stdout); } if (tio_debug) printf("--- STARTING THREADED IO ---\n"); - serverAssert(io_threads_active == 0); + serverAssert(server.io_threads_active == 0); for (int j = 1; j < server.io_threads_num; j++) pthread_mutex_unlock(&io_threads_mutex[j]); - io_threads_active = 1; + server.io_threads_active = 1; } void stopThreadedIO(void) { @@ -3033,10 +3038,10 @@ void stopThreadedIO(void) { if (tio_debug) printf("--- STOPPING THREADED IO [R%d] [W%d] ---\n", (int) listLength(server.clients_pending_read), (int) listLength(server.clients_pending_write)); - serverAssert(io_threads_active == 1); + serverAssert(server.io_threads_active == 1); for (int j = 1; j < server.io_threads_num; j++) pthread_mutex_lock(&io_threads_mutex[j]); - io_threads_active = 0; + server.io_threads_active = 0; } /* This function checks if there are not enough pending clients to justify @@ -3055,7 +3060,7 @@ int stopThreadedIOIfNeeded(void) { if (server.io_threads_num == 1) return 1; if (pending < (server.io_threads_num*2)) { - if (io_threads_active) stopThreadedIO(); + if (server.io_threads_active) stopThreadedIO(); return 1; } else { return 0; @@ -3073,7 +3078,7 @@ int handleClientsWithPendingWritesUsingThreads(void) { } /* Start threads if needed. */ - if (!io_threads_active) startThreadedIO(); + if (!server.io_threads_active) startThreadedIO(); if (tio_debug) printf("%d TOTAL WRITE pending clients\n", processed); @@ -3130,6 +3135,10 @@ int handleClientsWithPendingWritesUsingThreads(void) { } } listEmpty(server.clients_pending_write); + + /* Update processed count on server */ + server.stat_io_writes_processed += processed; + return processed; } @@ -3138,7 +3147,7 @@ int handleClientsWithPendingWritesUsingThreads(void) { * As a side effect of calling this function the client is put in the * pending read clients and flagged as such. */ int postponeClientRead(client *c) { - if (io_threads_active && + if (server.io_threads_active && server.io_threads_do_reads && !ProcessingEventsWhileBlocked && !(c->flags & (CLIENT_MASTER|CLIENT_SLAVE|CLIENT_PENDING_READ))) @@ -3158,7 +3167,7 @@ int postponeClientRead(client *c) { * the reads in the buffers, and also parse the first command available * rendering it in the client structures. */ int handleClientsWithPendingReadsUsingThreads(void) { - if (!io_threads_active || !server.io_threads_do_reads) return 0; + if (!server.io_threads_active || !server.io_threads_do_reads) return 0; int processed = listLength(server.clients_pending_read); if (processed == 0) return 0; @@ -3219,5 +3228,9 @@ int handleClientsWithPendingReadsUsingThreads(void) { } processInputBuffer(c); } + + /* Update processed count on server */ + server.stat_io_reads_processed += processed; + return processed; } diff --git a/src/server.c b/src/server.c index 9c3d71d6b..3381356ea 100644 --- a/src/server.c +++ b/src/server.c @@ -2726,6 +2726,10 @@ void resetServerStats(void) { server.stat_sync_full = 0; server.stat_sync_partial_ok = 0; server.stat_sync_partial_err = 0; + server.stat_io_reads_processed = 0; + server.stat_total_reads_processed = 0; + server.stat_io_writes_processed = 0; + server.stat_total_writes_processed = 0; for (j = 0; j < STATS_METRIC_COUNT; j++) { server.inst_metric[j].idx = 0; server.inst_metric[j].last_sample_time = mstime(); @@ -4075,7 +4079,8 @@ sds genRedisInfoString(const char *section) { "configured_hz:%i\r\n" "lru_clock:%u\r\n" "executable:%s\r\n" - "config_file:%s\r\n", + "config_file:%s\r\n" + "io_threads_active:%d\r\n", REDIS_VERSION, redisGitSHA1(), strtol(redisGitDirty(),NULL,10) > 0, @@ -4099,7 +4104,8 @@ sds genRedisInfoString(const char *section) { server.config_hz, server.lruclock, server.executable ? server.executable : "", - server.configfile ? server.configfile : ""); + server.configfile ? server.configfile : "", + server.io_threads_active); } /* Clients */ @@ -4371,7 +4377,11 @@ sds genRedisInfoString(const char *section) { "tracking_total_keys:%lld\r\n" "tracking_total_items:%lld\r\n" "tracking_total_prefixes:%lld\r\n" - "unexpected_error_replies:%lld\r\n", + "unexpected_error_replies:%lld\r\n" + "total_reads_processed:%lld\r\n" + "total_writes_processed:%lld\r\n" + "io_threaded_reads_processed:%lld\r\n" + "io_threaded_writes_processed:%lld\r\n", server.stat_numconnections, server.stat_numcommands, getInstantaneousMetric(STATS_METRIC_COMMAND), @@ -4402,7 +4412,11 @@ sds genRedisInfoString(const char *section) { (unsigned long long) trackingGetTotalKeys(), (unsigned long long) trackingGetTotalItems(), (unsigned long long) trackingGetTotalPrefixes(), - server.stat_unexpected_error_replies); + server.stat_unexpected_error_replies, + server.stat_total_reads_processed, + server.stat_total_writes_processed, + server.stat_io_reads_processed, + server.stat_io_writes_processed); } /* Replication */ diff --git a/src/server.h b/src/server.h index e9b4777ef..2d8279264 100644 --- a/src/server.h +++ b/src/server.h @@ -1108,6 +1108,7 @@ struct redisServer { queries. Will still serve RESP2 queries. */ int io_threads_num; /* Number of IO threads to use. */ int io_threads_do_reads; /* Read and parse from IO threads? */ + int io_threads_active; /* Is IO threads currently active? */ long long events_processed_while_blocked; /* processEventsWhileBlocked() */ /* RDB / AOF loading information */ @@ -1157,6 +1158,10 @@ struct redisServer { size_t stat_module_cow_bytes; /* Copy on write bytes during module fork. */ uint64_t stat_clients_type_memory[CLIENT_TYPE_COUNT];/* Mem usage by type */ long long stat_unexpected_error_replies; /* Number of unexpected (aof-loading, replica to master, etc.) error replies */ + long long stat_io_reads_processed; /* Number of read events processed by IO / Main threads */ + long long stat_io_writes_processed; /* Number of write events processed by IO / Main threads */ + _Atomic long long stat_total_reads_processed; /* Total number of read events processed */ + _Atomic long long stat_total_writes_processed; /* Total number of write events processed */ /* The following two are used to track instantaneous metrics, like * number of operations per second, network traffic. */ struct { From 7fcf29e6152d80a6ad6b5e17872475306bd6a456 Mon Sep 17 00:00:00 2001 From: ShooterIT Date: Thu, 6 Aug 2020 15:36:28 +0800 Subject: [PATCH 122/215] [Redis-benchmark] Support zset type (cherry picked from commit e5a50ed3c49c211b413d92b1f45f950c830c58c0) --- src/redis-benchmark.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c index 6385fa9b3..89c84c278 100644 --- a/src/redis-benchmark.c +++ b/src/redis-benchmark.c @@ -1733,6 +1733,22 @@ int main(int argc, const char **argv) { free(cmd); } + if (test_is_selected("zadd")) { + char *score = "0"; + if (config.randomkeys) score = "__rand_int__"; + len = redisFormatCommand(&cmd, + "ZADD myzset:{tag} %s element:__rand_int__",score); + benchmark("ZADD",cmd,len); + free(cmd); + } + + if (test_is_selected("zrem")) { + len = redisFormatCommand(&cmd, + "ZREM myzset:{tag} element:__rand_int__"); + benchmark("ZREM",cmd,len); + free(cmd); + } + if (test_is_selected("lrange") || test_is_selected("lrange_100") || test_is_selected("lrange_300") || From e3cfc921803bdf6e35dd3987c7c572d5a8772b32 Mon Sep 17 00:00:00 2001 From: ShooterIT Date: Sat, 8 Aug 2020 23:08:27 +0800 Subject: [PATCH 123/215] [Redis-benchmark] Remove zrem test, add zpopmin test (cherry picked from commit 6a06a5a597b349be4cf587e59613bbf4cd2f2c43) --- src/redis-benchmark.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c index 89c84c278..fc24afbff 100644 --- a/src/redis-benchmark.c +++ b/src/redis-benchmark.c @@ -1433,7 +1433,8 @@ usage: " --cluster Enable cluster mode.\n" " --enable-tracking Send CLIENT TRACKING on before starting benchmark.\n" " -k 1=keep alive 0=reconnect (default 1)\n" -" -r Use random keys for SET/GET/INCR, random values for SADD\n" +" -r Use random keys for SET/GET/INCR, random values for SADD,\n" +" random members and scores for ZADD.\n" " Using this option the benchmark will expand the string __rand_int__\n" " inside an argument with a 12 digits number in the specified range\n" " from 0 to keyspacelen-1. The substitution changes every time a command\n" @@ -1742,10 +1743,9 @@ int main(int argc, const char **argv) { free(cmd); } - if (test_is_selected("zrem")) { - len = redisFormatCommand(&cmd, - "ZREM myzset:{tag} element:__rand_int__"); - benchmark("ZREM",cmd,len); + if (test_is_selected("zpopmin")) { + len = redisFormatCommand(&cmd,"ZPOPMIN myzset:{tag}"); + benchmark("ZPOPMIN",cmd,len); free(cmd); } From 2fc915f509800a89b7b39f227fb6971550a232f7 Mon Sep 17 00:00:00 2001 From: "Meir Shpilraien (Spielrein)" Date: Sun, 9 Aug 2020 06:11:47 +0300 Subject: [PATCH 124/215] see #7544, added RedisModule_HoldString api. (#7577) Added RedisModule_HoldString that either returns a shallow copy of the given String (by increasing the String ref count) or a new deep copy of String in case its not possible to get a shallow copy. Co-authored-by: Itamar Haber (cherry picked from commit 3f494cc49d25929f27fa75a78d9921a9dee771f2) --- src/module.c | 60 ++++++++++++++++++++++++ src/redismodule.h | 2 + tests/modules/keyspace_events.c | 17 ++++++- tests/unit/moduleapi/keyspace_events.tcl | 12 ++--- 4 files changed, 83 insertions(+), 8 deletions(-) diff --git a/src/module.c b/src/module.c index b15bb7276..9b44d5ecc 100644 --- a/src/module.c +++ b/src/module.c @@ -1140,6 +1140,65 @@ void RM_RetainString(RedisModuleCtx *ctx, RedisModuleString *str) { } } +/** +* This function can be used instead of RedisModule_RetainString(). +* The main difference between the two is that this function will always +* succeed, whereas RedisModule_RetainString() may fail because of an +* assertion. +* +* The function returns a pointer to RedisModuleString, which is owned +* by the caller. It requires a call to RedisModule_FreeString() to free +* the string when automatic memory management is disabled for the context. +* When automatic memory management is enabled, you can either call +* RedisModule_FreeString() or let the automation free it. +* +* This function is more efficient than RedisModule_CreateStringFromString() +* because whenever possible, it avoids copying the underlying +* RedisModuleString. The disadvantage of using this function is that it +* might not be possible to use RedisModule_StringAppendBuffer() on the +* returned RedisModuleString. +* +* It is possible to call this function with a NULL context. + */ +RedisModuleString* RM_HoldString(RedisModuleCtx *ctx, RedisModuleString *str) { + if (str->refcount == OBJ_STATIC_REFCOUNT) { + return RM_CreateStringFromString(ctx, str); + } + + incrRefCount(str); + if (ctx != NULL) { + /* + * Put the str in the auto memory management of the ctx. +         * It might already be there, in this case, the ref count will +         * be 2 and we will decrease the ref count twice and free the +         * object in the auto memory free function. +         * +         * Why we can not do the same trick of just remove the object +         * from the auto memory (like in RM_RetainString)? +         * This code shows the issue: +         * +         * RM_AutoMemory(ctx); +         * str1 = RM_CreateString(ctx, "test", 4); +         * str2 = RM_HoldString(ctx, str1); +         * RM_FreeString(str1); +         * RM_FreeString(str2); +         * +         * If after the RM_HoldString we would just remove the string from +         * the auto memory, this example will cause access to a freed memory +         * on 'RM_FreeString(str2);' because the String will be free +         * on 'RM_FreeString(str1);'. +         * +         * So it's safer to just increase the ref count +         * and add the String to auto memory again. +         * +         * The limitation is that it is not possible to use RedisModule_StringAppendBuffer +         * on the String. + */ + autoMemoryAdd(ctx,REDISMODULE_AM_STRING,str); + } + return str; +} + /* Given a string module object, this function returns the string pointer * and length of the string. The returned pointer and length should only * be used for read only accesses and never modified. */ @@ -7830,6 +7889,7 @@ void moduleRegisterCoreAPI(void) { REGISTER_API(LatencyAddSample); REGISTER_API(StringAppendBuffer); REGISTER_API(RetainString); + REGISTER_API(HoldString); REGISTER_API(StringCompare); REGISTER_API(GetContextFromIO); REGISTER_API(GetKeyNameFromIO); diff --git a/src/redismodule.h b/src/redismodule.h index ffc679ebc..5f828b9e3 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -569,6 +569,7 @@ void REDISMODULE_API_FUNC(RedisModule__Assert)(const char *estr, const char *fil void REDISMODULE_API_FUNC(RedisModule_LatencyAddSample)(const char *event, mstime_t latency); int REDISMODULE_API_FUNC(RedisModule_StringAppendBuffer)(RedisModuleCtx *ctx, RedisModuleString *str, const char *buf, size_t len); void REDISMODULE_API_FUNC(RedisModule_RetainString)(RedisModuleCtx *ctx, RedisModuleString *str); +RedisModuleString* REDISMODULE_API_FUNC(RedisModule_HoldString)(RedisModuleCtx *ctx, RedisModuleString *str); int REDISMODULE_API_FUNC(RedisModule_StringCompare)(RedisModuleString *a, RedisModuleString *b); RedisModuleCtx *REDISMODULE_API_FUNC(RedisModule_GetContextFromIO)(RedisModuleIO *io); const RedisModuleString *REDISMODULE_API_FUNC(RedisModule_GetKeyNameFromIO)(RedisModuleIO *io); @@ -807,6 +808,7 @@ static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int REDISMODULE_GET_API(LatencyAddSample); REDISMODULE_GET_API(StringAppendBuffer); REDISMODULE_GET_API(RetainString); + REDISMODULE_GET_API(HoldString); REDISMODULE_GET_API(StringCompare); REDISMODULE_GET_API(GetContextFromIO); REDISMODULE_GET_API(GetKeyNameFromIO); diff --git a/tests/modules/keyspace_events.c b/tests/modules/keyspace_events.c index b2296c1cb..db3977be1 100644 --- a/tests/modules/keyspace_events.c +++ b/tests/modules/keyspace_events.c @@ -48,7 +48,7 @@ static int KeySpace_Notification(RedisModuleCtx *ctx, int type, const char *even int nokey; RedisModule_DictGetC(loaded_event_log, (void*)keyName, strlen(keyName), &nokey); if(nokey){ - RedisModule_DictSetC(loaded_event_log, (void*)keyName, strlen(keyName), NULL); + RedisModule_DictSetC(loaded_event_log, (void*)keyName, strlen(keyName), RedisModule_HoldString(ctx, key)); } } @@ -63,9 +63,15 @@ static int cmdIsKeyLoaded(RedisModuleCtx *ctx, RedisModuleString **argv, int arg const char* key = RedisModule_StringPtrLen(argv[1], NULL); int nokey; - RedisModule_DictGetC(loaded_event_log, (void*)key, strlen(key), &nokey); + RedisModuleString* keyStr = RedisModule_DictGetC(loaded_event_log, (void*)key, strlen(key), &nokey); + RedisModule_ReplyWithArray(ctx, 2); RedisModule_ReplyWithLongLong(ctx, !nokey); + if(nokey){ + RedisModule_ReplyWithNull(ctx); + }else{ + RedisModule_ReplyWithString(ctx, keyStr); + } return REDISMODULE_OK; } @@ -93,6 +99,13 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) } int RedisModule_OnUnload(RedisModuleCtx *ctx) { + RedisModuleDictIter *iter = RedisModule_DictIteratorStartC(loaded_event_log, "^", NULL, 0); + char* key; + size_t keyLen; + RedisModuleString* val; + while((key = RedisModule_DictNextC(iter, &keyLen, (void**)&val))){ + RedisModule_FreeString(ctx, val); + } RedisModule_FreeDict(ctx, loaded_event_log); loaded_event_log = NULL; return REDISMODULE_OK; diff --git a/tests/unit/moduleapi/keyspace_events.tcl b/tests/unit/moduleapi/keyspace_events.tcl index cb959ab52..5b3db0c0a 100644 --- a/tests/unit/moduleapi/keyspace_events.tcl +++ b/tests/unit/moduleapi/keyspace_events.tcl @@ -11,12 +11,12 @@ tags "modules" { r zadd t 1 f1 2 f2 r xadd s * f v r debug reload - assert_equal 1 [r keyspace.is_key_loaded x] - assert_equal 1 [r keyspace.is_key_loaded y] - assert_equal 1 [r keyspace.is_key_loaded z] - assert_equal 1 [r keyspace.is_key_loaded p] - assert_equal 1 [r keyspace.is_key_loaded t] - assert_equal 1 [r keyspace.is_key_loaded s] + assert_equal {1 x} [r keyspace.is_key_loaded x] + assert_equal {1 y} [r keyspace.is_key_loaded y] + assert_equal {1 z} [r keyspace.is_key_loaded z] + assert_equal {1 p} [r keyspace.is_key_loaded p] + assert_equal {1 t} [r keyspace.is_key_loaded t] + assert_equal {1 s} [r keyspace.is_key_loaded s] } } } \ No newline at end of file From f6d04d01b900194d6f0c7b60142720555874051f Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Wed, 12 Aug 2020 17:58:56 +0300 Subject: [PATCH 125/215] Add oom-score-adj configuration option to control Linux OOM killer. (#1690) Add Linux kernel OOM killer control option. This adds the ability to control the Linux OOM killer oom_score_adj parameter for all Redis processes, depending on the process role (i.e. master, replica, background child). A oom-score-adj global boolean flag control this feature. In addition, specific values can be configured using oom-score-adj-values if additional tuning is required. (cherry picked from commit 2530dc0ebd8be8d792f4673073401377cd5bdc42) --- redis.conf | 26 ++++++++ src/config.c | 120 ++++++++++++++++++++++++++++++++++- src/config.h | 1 + src/replication.c | 6 ++ src/server.c | 60 ++++++++++++++++++ src/server.h | 12 ++++ tests/test_helper.tcl | 1 + tests/unit/oom-score-adj.tcl | 81 +++++++++++++++++++++++ 8 files changed, 306 insertions(+), 1 deletion(-) create mode 100644 tests/unit/oom-score-adj.tcl diff --git a/redis.conf b/redis.conf index 565c24eca..f2e7f1964 100644 --- a/redis.conf +++ b/redis.conf @@ -1039,6 +1039,32 @@ lazyfree-lazy-user-del no # --threads option to match the number of Redis theads, otherwise you'll not # be able to notice the improvements. +############################ KERNEL OOM CONTROL ############################## + +# On Linux, it is possible to hint the kernel OOM killer on what processes +# should be killed first when out of memory. +# +# Enabling this feature makes Redis actively control the oom_score_adj value +# for all its processes, depending on their role. The default scores will +# attempt to have background child processes killed before all others, and +# replicas killed before masters. + +oom-score-adj no + +# When oom-score-adj is used, this directive controls the specific values used +# for master, replica and background child processes. Values range -1000 to +# 1000 (higher means more likely to be killed). +# +# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) +# can freely increase their value, but not decrease it below its initial +# settings. +# +# Values are used relative to the initial value of oom_score_adj when the server +# starts. Because typically the initial value is 0, they will often match the +# absolute values. + +oom-score-adj-values 0 200 800 + ############################## APPEND ONLY MODE ############################### # By default Redis asynchronously dumps the dataset on disk. This mode is diff --git a/src/config.c b/src/config.c index b3c437d49..52acb527b 100644 --- a/src/config.c +++ b/src/config.c @@ -111,6 +111,9 @@ clientBufferLimitsConfig clientBufferLimitsDefaults[CLIENT_TYPE_OBUF_COUNT] = { {1024*1024*32, 1024*1024*8, 60} /* pubsub */ }; +/* OOM Score defaults */ +int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT] = { 0, 200, 800 }; + /* Generic config infrastructure function pointers * int is_valid_fn(val, err) * Return 1 when val is valid, and 0 when invalid. @@ -286,6 +289,59 @@ void queueLoadModule(sds path, sds *argv, int argc) { listAddNodeTail(server.loadmodule_queue,loadmod); } +/* Parse an array of CONFIG_OOM_COUNT sds strings, validate and populate + * server.oom_score_adj_values if valid. + */ + +static int updateOOMScoreAdjValues(sds *args, char **err) { + int i; + int values[CONFIG_OOM_COUNT]; + + for (i = 0; i < CONFIG_OOM_COUNT; i++) { + char *eptr; + long long val = strtoll(args[i], &eptr, 10); + + if (*eptr != '\0' || val < -1000 || val > 1000) { + if (err) *err = "Invalid oom-score-adj-values, elements must be between -1000 and 1000."; + return C_ERR; + } + + values[i] = val; + } + + /* Verify that the values make sense. If they don't omit a warning but + * keep the configuration, which may still be valid for privileged processes. + */ + + if (values[CONFIG_OOM_REPLICA] < values[CONFIG_OOM_MASTER] || + values[CONFIG_OOM_BGCHILD] < values[CONFIG_OOM_REPLICA]) { + serverLog(LOG_WARNING, + "The oom-score-adj-values configuration may not work for non-privileged processes! " + "Please consult the documentation."); + } + + /* Store values, retain previous config for rollback in case we fail. */ + int old_values[CONFIG_OOM_COUNT]; + for (i = 0; i < CONFIG_OOM_COUNT; i++) { + old_values[i] = server.oom_score_adj_values[i]; + server.oom_score_adj_values[i] = values[i]; + } + + /* Update */ + if (setOOMScoreAdj(-1) == C_ERR) { + /* Roll back */ + for (i = 0; i < CONFIG_OOM_COUNT; i++) + server.oom_score_adj_values[i] = old_values[i]; + + if (err) + *err = "Failed to apply oom-score-adj-values configuration, check server logs."; + + return C_ERR; + } + + return C_OK; +} + void initConfigValues() { for (standardConfig *config = configs; config->name != NULL; config++) { config->interface.init(config->data); @@ -479,6 +535,8 @@ void loadServerConfigFromString(char *config) { server.client_obuf_limits[class].hard_limit_bytes = hard; server.client_obuf_limits[class].soft_limit_bytes = soft; server.client_obuf_limits[class].soft_limit_seconds = soft_seconds; + } else if (!strcasecmp(argv[0],"oom-score-adj-values") && argc == 1 + CONFIG_OOM_COUNT) { + if (updateOOMScoreAdjValues(&argv[1], &err) == C_ERR) goto loaderr; } else if (!strcasecmp(argv[0],"notify-keyspace-events") && argc == 2) { int flags = keyspaceEventsStringToFlags(argv[1]); @@ -728,6 +786,17 @@ void configSetCommand(client *c) { server.client_obuf_limits[class].soft_limit_seconds = soft_seconds; } sdsfreesplitres(v,vlen); + } config_set_special_field("oom-score-adj-values") { + int vlen; + int success = 1; + + sds *v = sdssplitlen(o->ptr, sdslen(o->ptr), " ", 1, &vlen); + if (vlen != CONFIG_OOM_COUNT || updateOOMScoreAdjValues(v, &errstr) == C_ERR) + success = 0; + + sdsfreesplitres(v, vlen); + if (!success) + goto badfmt; } config_set_special_field("notify-keyspace-events") { int flags = keyspaceEventsStringToFlags(o->ptr); @@ -923,6 +992,22 @@ void configGetCommand(client *c) { matches++; } + if (stringmatch(pattern,"oom-score-adj-values",0)) { + sds buf = sdsempty(); + int j; + + for (j = 0; j < CONFIG_OOM_COUNT; j++) { + buf = sdscatprintf(buf,"%d", server.oom_score_adj_values[j]); + if (j != CONFIG_OOM_COUNT-1) + buf = sdscatlen(buf," ",1); + } + + addReplyBulkCString(c,"oom-score-adj-values"); + addReplyBulkCString(c,buf); + sdsfree(buf); + matches++; + } + setDeferredMapLen(c,replylen,matches); } @@ -1330,6 +1415,25 @@ void rewriteConfigClientoutputbufferlimitOption(struct rewriteConfigState *state } } +/* Rewrite the oom-score-adj-values option. */ +void rewriteConfigOOMScoreAdjValuesOption(struct rewriteConfigState *state) { + int force = 0; + int j; + char *option = "oom-score-adj-values"; + sds line; + + line = sdsempty(); + for (j = 0; j < CONFIG_OOM_COUNT; j++) { + if (server.oom_score_adj_values[j] != configOOMScoreAdjValuesDefaults[j]) + force = 1; + + line = sdscatprintf(line, "%d", server.oom_score_adj_values[j]); + if (j+1 != CONFIG_OOM_COUNT) + line = sdscatlen(line, " ", 1); + } + rewriteConfigRewriteLine(state,option,line,force); +} + /* Rewrite the bind option. */ void rewriteConfigBindOption(struct rewriteConfigState *state) { int force = 1; @@ -1528,6 +1632,7 @@ int rewriteConfig(char *path) { rewriteConfigStringOption(state,"cluster-config-file",server.cluster_configfile,CONFIG_DEFAULT_CLUSTER_CONFIG_FILE); rewriteConfigNotifykeyspaceeventsOption(state); rewriteConfigClientoutputbufferlimitOption(state); + rewriteConfigOOMScoreAdjValuesOption(state); /* Rewrite Sentinel config if in Sentinel mode. */ if (server.sentinel_mode) rewriteConfigSentinelOption(state); @@ -2072,6 +2177,19 @@ static int updateMaxclients(long long val, long long prev, char **err) { return 1; } +static int updateOOMScoreAdj(int val, int prev, char **err) { + UNUSED(prev); + + if (val) { + if (setOOMScoreAdj(-1) == C_ERR) { + *err = "Failed to set current oom_score_adj. Check server logs."; + return 0; + } + } + + return 1; +} + #ifdef USE_OPENSSL static int updateTlsCfg(char *val, char *prev, char **err) { UNUSED(val); @@ -2136,7 +2254,7 @@ standardConfig configs[] = { createBoolConfig("cluster-enabled", NULL, IMMUTABLE_CONFIG, server.cluster_enabled, 0, NULL, NULL), createBoolConfig("appendonly", NULL, MODIFIABLE_CONFIG, server.aof_enabled, 0, NULL, updateAppendonly), createBoolConfig("cluster-allow-reads-when-down", NULL, MODIFIABLE_CONFIG, server.cluster_allow_reads_when_down, 0, NULL, NULL), - + createBoolConfig("oom-score-adj", NULL, MODIFIABLE_CONFIG, server.oom_score_adj, 0, NULL, updateOOMScoreAdj), /* String Configs */ createStringConfig("aclfile", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.acl_filename, "", NULL, NULL), diff --git a/src/config.h b/src/config.h index 0fcc42972..e807b9330 100644 --- a/src/config.h +++ b/src/config.h @@ -54,6 +54,7 @@ #define HAVE_PROC_MAPS 1 #define HAVE_PROC_SMAPS 1 #define HAVE_PROC_SOMAXCONN 1 +#define HAVE_PROC_OOM_SCORE_ADJ 1 #endif /* Test for task_info() */ diff --git a/src/replication.c b/src/replication.c index a8f46be95..8f4ad2c92 100644 --- a/src/replication.c +++ b/src/replication.c @@ -2483,6 +2483,9 @@ void replicationSetMaster(char *ip, int port) { } disconnectAllBlockedClients(); /* Clients blocked in master, now slave. */ + /* Update oom_score_adj */ + setOOMScoreAdj(-1); + /* Force our slaves to resync with us as well. They may hopefully be able * to partially resync with us, but we can notify the replid change. */ disconnectSlaves(); @@ -2545,6 +2548,9 @@ void replicationUnsetMaster(void) { * master switch. */ server.slaveseldb = -1; + /* Update oom_score_adj */ + setOOMScoreAdj(-1); + /* Once we turn from slave to master, we consider the starting time without * slaves (that is used to count the replication backlog time to live) as * starting from now. Otherwise the backlog will be freed after a diff --git a/src/server.c b/src/server.c index 3381356ea..5a986006e 100644 --- a/src/server.c +++ b/src/server.c @@ -2410,6 +2410,10 @@ void initServerConfig(void) { for (j = 0; j < CLIENT_TYPE_OBUF_COUNT; j++) server.client_obuf_limits[j] = clientBufferLimitsDefaults[j]; + /* Linux OOM Score config */ + for (j = 0; j < CONFIG_OOM_COUNT; j++) + server.oom_score_adj_values[j] = configOOMScoreAdjValuesDefaults[j]; + /* Double constants initialization */ R_Zero = 0.0; R_PosInf = 1.0/R_Zero; @@ -2519,6 +2523,58 @@ int restartServer(int flags, mstime_t delay) { return C_ERR; /* Never reached. */ } +static void readOOMScoreAdj(void) { +#ifdef HAVE_PROC_OOM_SCORE_ADJ + char buf[64]; + int fd = open("/proc/self/oom_score_adj", O_RDONLY); + + if (fd < 0) return; + if (read(fd, buf, sizeof(buf)) > 0) + server.oom_score_adj_base = atoi(buf); + close(fd); +#endif +} + +/* This function will configure the current process's oom_score_adj according + * to user specified configuration. This is currently implemented on Linux + * only. + * + * A process_class value of -1 implies OOM_CONFIG_MASTER or OOM_CONFIG_REPLICA, + * depending on current role. + */ +int setOOMScoreAdj(int process_class) { + int fd; + int val; + char buf[64]; + + if (!server.oom_score_adj) return C_OK; + if (process_class == -1) + process_class = (server.masterhost ? CONFIG_OOM_REPLICA : CONFIG_OOM_MASTER); + + serverAssert(process_class >= 0 && process_class < CONFIG_OOM_COUNT); + +#ifdef HAVE_PROC_OOM_SCORE_ADJ + val = server.oom_score_adj_base + server.oom_score_adj_values[process_class]; + if (val > 1000) val = 1000; + if (val < -1000) val = -1000; + + snprintf(buf, sizeof(buf) - 1, "%d\n", val); + + fd = open("/proc/self/oom_score_adj", O_WRONLY); + if (fd < 0 || write(fd, buf, strlen(buf)) < 0) { + serverLog(LOG_WARNING, "Unable to write oom_score_adj: %s", strerror(errno)); + if (fd != -1) close(fd); + return C_ERR; + } + + close(fd); + return C_OK; +#else + /* Unsupported */ + return C_ERR; +#endif +} + /* This function will try to raise the max number of open files accordingly to * the configured max number of clients. It also reserves a number of file * descriptors (CONFIG_MIN_RESERVED_FDS) for extra operations of @@ -4861,6 +4917,7 @@ int redisFork() { long long start = ustime(); if ((childpid = fork()) == 0) { /* Child */ + setOOMScoreAdj(CONFIG_OOM_BGCHILD); setupChildSignalHandlers(); closeClildUnusedResourceAfterFork(); } else { @@ -5194,6 +5251,7 @@ int main(int argc, char **argv) { server.supervised = redisIsSupervised(server.supervised_mode); int background = server.daemonize && !server.supervised; if (background) daemonize(); + readOOMScoreAdj(); initServer(); if (background || server.pidfile) createPidFile(); @@ -5246,6 +5304,8 @@ int main(int argc, char **argv) { } redisSetCpuAffinity(server.server_cpulist); + setOOMScoreAdj(-1); + aeMain(server.el); aeDeleteEventLoop(server.el); return 0; diff --git a/src/server.h b/src/server.h index 2d8279264..c42955b94 100644 --- a/src/server.h +++ b/src/server.h @@ -150,6 +150,14 @@ typedef long long ustime_t; /* microsecond time type. */ * in order to make sure of not over provisioning more than 128 fds. */ #define CONFIG_FDSET_INCR (CONFIG_MIN_RESERVED_FDS+96) +/* OOM Score Adjustment classes. */ +#define CONFIG_OOM_MASTER 0 +#define CONFIG_OOM_REPLICA 1 +#define CONFIG_OOM_BGCHILD 2 +#define CONFIG_OOM_COUNT 3 + +extern int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT]; + /* Hash table parameters */ #define HASHTABLE_MIN_FILL 10 /* Minimal hash table fill 10% */ @@ -1345,6 +1353,9 @@ struct redisServer { int lfu_log_factor; /* LFU logarithmic counter factor. */ int lfu_decay_time; /* LFU counter decay factor. */ long long proto_max_bulk_len; /* Protocol bulk length maximum size. */ + int oom_score_adj_base; /* Base oom_score_adj value, as observed on startup */ + int oom_score_adj_values[CONFIG_OOM_COUNT]; /* Linux oom_score_adj configuration */ + int oom_score_adj; /* If true, oom_score_adj is managed */ /* Blocked clients */ unsigned int blocked_clients; /* # of clients executing a blocking cmd.*/ unsigned int blocked_clients_by_type[BLOCKED_NUM]; @@ -2014,6 +2025,7 @@ const char *evictPolicyToString(void); struct redisMemOverhead *getMemoryOverheadData(void); void freeMemoryOverheadData(struct redisMemOverhead *mh); void checkChildrenDone(void); +int setOOMScoreAdj(int process_class); #define RESTART_SERVER_NONE 0 #define RESTART_SERVER_GRACEFULLY (1<<0) /* Do proper shutdown. */ diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index 7ce0d545e..d0f962762 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -68,6 +68,7 @@ set ::all_tests { unit/pendingquerybuf unit/tls unit/tracking + unit/oom-score-adj } # Index to the next test to run in the ::all_tests list. set ::next_test 0 diff --git a/tests/unit/oom-score-adj.tcl b/tests/unit/oom-score-adj.tcl new file mode 100644 index 000000000..894a70fb2 --- /dev/null +++ b/tests/unit/oom-score-adj.tcl @@ -0,0 +1,81 @@ +set system_name [string tolower [exec uname -s]] +set user_id [exec id -u] + +if {$system_name eq {linux}} { + start_server {tags {"oom-score-adj"}} { + proc get_oom_score_adj {{pid ""}} { + if {$pid == ""} { + set pid [srv 0 pid] + } + set fd [open "/proc/$pid/oom_score_adj" "r"] + set val [gets $fd] + close $fd + + return $val + } + + proc get_child_pid {} { + set pid [srv 0 pid] + set fd [open "|ps --ppid $pid -o pid -h" "r"] + set child_pid [string trim [read $fd]] + close $fd + + return $child_pid + } + + test {CONFIG SET oom-score-adj works as expected} { + set base [get_oom_score_adj] + + # Enable oom-score-adj, check defaults + r config set oom-score-adj-values "10 20 30" + r config set oom-score-adj yes + + assert {[get_oom_score_adj] == [expr $base + 10]} + + # Modify current class + r config set oom-score-adj-values "15 20 30" + assert {[get_oom_score_adj] == [expr $base + 15]} + + # Check replica class + r replicaof localhost 1 + assert {[get_oom_score_adj] == [expr $base + 20]} + r replicaof no one + assert {[get_oom_score_adj] == [expr $base + 15]} + + # Check child process + r set key-a value-a + r config set rdb-key-save-delay 100000 + r bgsave + + set child_pid [get_child_pid] + assert {[get_oom_score_adj $child_pid] == [expr $base + 30]} + } + + # Failed oom-score-adj tests can only run unprivileged + if {$user_id != 0} { + test {CONFIG SET oom-score-adj handles configuration failures} { + # Bad config + r config set oom-score-adj no + r config set oom-score-adj-values "-1000 -1000 -1000" + + # Make sure it fails + catch {r config set oom-score-adj yes} e + assert_match {*Failed to set*} $e + + # Make sure it remains off + assert {[r config get oom-score-adj] == "oom-score-adj no"} + + # Fix config + r config set oom-score-adj-values "0 100 100" + r config set oom-score-adj yes + + # Make sure it fails + catch {r config set oom-score-adj-values "-1000 -1000 -1000"} e + assert_match {*Failed*} $e + + # Make sure previous values remain + assert {[r config get oom-score-adj-values] == {oom-score-adj-values {0 100 100}}} + } + } + } +} From 2c0c5a809c413fe909dde11926b398235a2ea28f Mon Sep 17 00:00:00 2001 From: Nathan Scott Date: Fri, 14 Aug 2020 21:45:34 +1000 Subject: [PATCH 126/215] Annotate module API functions in redismodule.h for use with -fno-common (#6900) In order to keep the redismodule.h self-contained but still usable with gcc v10 and later, annotate each API function tentative definition with the __common__ attribute. This avoids the 'multiple definition' errors modules will otherwise see for all API functions at link time. Further details at gcc.gnu.org/gcc-10/porting_to.html Turn the existing __attribute__ ((unused)), ((__common__)) and ((print)) annotations into conditional macros for any compilers not accepting this syntax. These macros only expand to API annotations under gcc. Provide a pre- and post- macro for every API function, so that they can be defined differently by the file that includes redismodule.h. Removing REDISMODULE_API_FUNC in the interest of keeping the function declarations readable. Co-authored-by: Yossi Gottlieb Co-authored-by: Oran Agra (cherry picked from commit 11cd983d58199b6ac7fa54049734457bd767a0b5) --- src/redismodule.h | 506 ++++++++++++++++++++++++---------------------- 1 file changed, 265 insertions(+), 241 deletions(-) diff --git a/src/redismodule.h b/src/redismodule.h index 5f828b9e3..460fdd480 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -380,6 +380,31 @@ typedef struct RedisModuleLoadingProgressInfo { typedef long long mstime_t; +/* Macro definitions specific to individual compilers */ +#ifndef REDISMODULE_ATTR_UNUSED +# ifdef __GNUC__ +# define REDISMODULE_ATTR_UNUSED __attribute__((unused)) +# else +# define REDISMODULE_ATTR_UNUSED +# endif +#endif + +#ifndef REDISMODULE_ATTR_PRINTF +# ifdef __GNUC__ +# define REDISMODULE_ATTR_PRINTF(idx,cnt) __attribute__((format(printf,idx,cnt))) +# else +# define REDISMODULE_ATTR_PRINTF(idx,cnt) +# endif +#endif + +#ifndef REDISMODULE_ATTR_COMMON +# if defined(__GNUC__) && !defined(__clang__) +# define REDISMODULE_ATTR_COMMON __attribute__((__common__)) +# else +# define REDISMODULE_ATTR_COMMON +# endif +#endif + /* Incomplete structures for compiler checks but opaque access. */ typedef struct RedisModuleCtx RedisModuleCtx; typedef struct RedisModuleKey RedisModuleKey; @@ -436,257 +461,256 @@ typedef struct RedisModuleTypeMethods { #define REDISMODULE_GET_API(name) \ RedisModule_GetApi("RedisModule_" #name, ((void **)&RedisModule_ ## name)) -#define REDISMODULE_API_FUNC(x) (*x) - - -void *REDISMODULE_API_FUNC(RedisModule_Alloc)(size_t bytes); -void *REDISMODULE_API_FUNC(RedisModule_Realloc)(void *ptr, size_t bytes); -void REDISMODULE_API_FUNC(RedisModule_Free)(void *ptr); -void *REDISMODULE_API_FUNC(RedisModule_Calloc)(size_t nmemb, size_t size); -char *REDISMODULE_API_FUNC(RedisModule_Strdup)(const char *str); -int REDISMODULE_API_FUNC(RedisModule_GetApi)(const char *, void *); -int REDISMODULE_API_FUNC(RedisModule_CreateCommand)(RedisModuleCtx *ctx, const char *name, RedisModuleCmdFunc cmdfunc, const char *strflags, int firstkey, int lastkey, int keystep); -void REDISMODULE_API_FUNC(RedisModule_SetModuleAttribs)(RedisModuleCtx *ctx, const char *name, int ver, int apiver); -int REDISMODULE_API_FUNC(RedisModule_IsModuleNameBusy)(const char *name); -int REDISMODULE_API_FUNC(RedisModule_WrongArity)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithLongLong)(RedisModuleCtx *ctx, long long ll); -int REDISMODULE_API_FUNC(RedisModule_GetSelectedDb)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_SelectDb)(RedisModuleCtx *ctx, int newid); -void *REDISMODULE_API_FUNC(RedisModule_OpenKey)(RedisModuleCtx *ctx, RedisModuleString *keyname, int mode); -void REDISMODULE_API_FUNC(RedisModule_CloseKey)(RedisModuleKey *kp); -int REDISMODULE_API_FUNC(RedisModule_KeyType)(RedisModuleKey *kp); -size_t REDISMODULE_API_FUNC(RedisModule_ValueLength)(RedisModuleKey *kp); -int REDISMODULE_API_FUNC(RedisModule_ListPush)(RedisModuleKey *kp, int where, RedisModuleString *ele); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_ListPop)(RedisModuleKey *key, int where); -RedisModuleCallReply *REDISMODULE_API_FUNC(RedisModule_Call)(RedisModuleCtx *ctx, const char *cmdname, const char *fmt, ...); -const char *REDISMODULE_API_FUNC(RedisModule_CallReplyProto)(RedisModuleCallReply *reply, size_t *len); -void REDISMODULE_API_FUNC(RedisModule_FreeCallReply)(RedisModuleCallReply *reply); -int REDISMODULE_API_FUNC(RedisModule_CallReplyType)(RedisModuleCallReply *reply); -long long REDISMODULE_API_FUNC(RedisModule_CallReplyInteger)(RedisModuleCallReply *reply); -size_t REDISMODULE_API_FUNC(RedisModule_CallReplyLength)(RedisModuleCallReply *reply); -RedisModuleCallReply *REDISMODULE_API_FUNC(RedisModule_CallReplyArrayElement)(RedisModuleCallReply *reply, size_t idx); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateString)(RedisModuleCtx *ctx, const char *ptr, size_t len); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateStringFromLongLong)(RedisModuleCtx *ctx, long long ll); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateStringFromDouble)(RedisModuleCtx *ctx, double d); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateStringFromLongDouble)(RedisModuleCtx *ctx, long double ld, int humanfriendly); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateStringFromString)(RedisModuleCtx *ctx, const RedisModuleString *str); -#ifdef __GNUC__ -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateStringPrintf)(RedisModuleCtx *ctx, const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); -#else -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateStringPrintf)(RedisModuleCtx *ctx, const char *fmt, ...); +/* Default API declaration prefix (not 'extern' for backwards compatibility) */ +#ifndef REDISMODULE_API +#define REDISMODULE_API #endif -void REDISMODULE_API_FUNC(RedisModule_FreeString)(RedisModuleCtx *ctx, RedisModuleString *str); -const char *REDISMODULE_API_FUNC(RedisModule_StringPtrLen)(const RedisModuleString *str, size_t *len); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithError)(RedisModuleCtx *ctx, const char *err); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithSimpleString)(RedisModuleCtx *ctx, const char *msg); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithArray)(RedisModuleCtx *ctx, long len); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithNullArray)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithEmptyArray)(RedisModuleCtx *ctx); -void REDISMODULE_API_FUNC(RedisModule_ReplySetArrayLength)(RedisModuleCtx *ctx, long len); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithStringBuffer)(RedisModuleCtx *ctx, const char *buf, size_t len); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithCString)(RedisModuleCtx *ctx, const char *buf); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithString)(RedisModuleCtx *ctx, RedisModuleString *str); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithEmptyString)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithVerbatimString)(RedisModuleCtx *ctx, const char *buf, size_t len); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithNull)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithDouble)(RedisModuleCtx *ctx, double d); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithLongDouble)(RedisModuleCtx *ctx, long double d); -int REDISMODULE_API_FUNC(RedisModule_ReplyWithCallReply)(RedisModuleCtx *ctx, RedisModuleCallReply *reply); -int REDISMODULE_API_FUNC(RedisModule_StringToLongLong)(const RedisModuleString *str, long long *ll); -int REDISMODULE_API_FUNC(RedisModule_StringToDouble)(const RedisModuleString *str, double *d); -int REDISMODULE_API_FUNC(RedisModule_StringToLongDouble)(const RedisModuleString *str, long double *d); -void REDISMODULE_API_FUNC(RedisModule_AutoMemory)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_Replicate)(RedisModuleCtx *ctx, const char *cmdname, const char *fmt, ...); -int REDISMODULE_API_FUNC(RedisModule_ReplicateVerbatim)(RedisModuleCtx *ctx); -const char *REDISMODULE_API_FUNC(RedisModule_CallReplyStringPtr)(RedisModuleCallReply *reply, size_t *len); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateStringFromCallReply)(RedisModuleCallReply *reply); -int REDISMODULE_API_FUNC(RedisModule_DeleteKey)(RedisModuleKey *key); -int REDISMODULE_API_FUNC(RedisModule_UnlinkKey)(RedisModuleKey *key); -int REDISMODULE_API_FUNC(RedisModule_StringSet)(RedisModuleKey *key, RedisModuleString *str); -char *REDISMODULE_API_FUNC(RedisModule_StringDMA)(RedisModuleKey *key, size_t *len, int mode); -int REDISMODULE_API_FUNC(RedisModule_StringTruncate)(RedisModuleKey *key, size_t newlen); -mstime_t REDISMODULE_API_FUNC(RedisModule_GetExpire)(RedisModuleKey *key); -int REDISMODULE_API_FUNC(RedisModule_SetExpire)(RedisModuleKey *key, mstime_t expire); -void REDISMODULE_API_FUNC(RedisModule_ResetDataset)(int restart_aof, int async); -unsigned long long REDISMODULE_API_FUNC(RedisModule_DbSize)(RedisModuleCtx *ctx); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_RandomKey)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_ZsetAdd)(RedisModuleKey *key, double score, RedisModuleString *ele, int *flagsptr); -int REDISMODULE_API_FUNC(RedisModule_ZsetIncrby)(RedisModuleKey *key, double score, RedisModuleString *ele, int *flagsptr, double *newscore); -int REDISMODULE_API_FUNC(RedisModule_ZsetScore)(RedisModuleKey *key, RedisModuleString *ele, double *score); -int REDISMODULE_API_FUNC(RedisModule_ZsetRem)(RedisModuleKey *key, RedisModuleString *ele, int *deleted); -void REDISMODULE_API_FUNC(RedisModule_ZsetRangeStop)(RedisModuleKey *key); -int REDISMODULE_API_FUNC(RedisModule_ZsetFirstInScoreRange)(RedisModuleKey *key, double min, double max, int minex, int maxex); -int REDISMODULE_API_FUNC(RedisModule_ZsetLastInScoreRange)(RedisModuleKey *key, double min, double max, int minex, int maxex); -int REDISMODULE_API_FUNC(RedisModule_ZsetFirstInLexRange)(RedisModuleKey *key, RedisModuleString *min, RedisModuleString *max); -int REDISMODULE_API_FUNC(RedisModule_ZsetLastInLexRange)(RedisModuleKey *key, RedisModuleString *min, RedisModuleString *max); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_ZsetRangeCurrentElement)(RedisModuleKey *key, double *score); -int REDISMODULE_API_FUNC(RedisModule_ZsetRangeNext)(RedisModuleKey *key); -int REDISMODULE_API_FUNC(RedisModule_ZsetRangePrev)(RedisModuleKey *key); -int REDISMODULE_API_FUNC(RedisModule_ZsetRangeEndReached)(RedisModuleKey *key); -int REDISMODULE_API_FUNC(RedisModule_HashSet)(RedisModuleKey *key, int flags, ...); -int REDISMODULE_API_FUNC(RedisModule_HashGet)(RedisModuleKey *key, int flags, ...); -int REDISMODULE_API_FUNC(RedisModule_IsKeysPositionRequest)(RedisModuleCtx *ctx); -void REDISMODULE_API_FUNC(RedisModule_KeyAtPos)(RedisModuleCtx *ctx, int pos); -unsigned long long REDISMODULE_API_FUNC(RedisModule_GetClientId)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_GetClientInfoById)(void *ci, uint64_t id); -int REDISMODULE_API_FUNC(RedisModule_PublishMessage)(RedisModuleCtx *ctx, RedisModuleString *channel, RedisModuleString *message); -int REDISMODULE_API_FUNC(RedisModule_GetContextFlags)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_AvoidReplicaTraffic)(); -void *REDISMODULE_API_FUNC(RedisModule_PoolAlloc)(RedisModuleCtx *ctx, size_t bytes); -RedisModuleType *REDISMODULE_API_FUNC(RedisModule_CreateDataType)(RedisModuleCtx *ctx, const char *name, int encver, RedisModuleTypeMethods *typemethods); -int REDISMODULE_API_FUNC(RedisModule_ModuleTypeSetValue)(RedisModuleKey *key, RedisModuleType *mt, void *value); -int REDISMODULE_API_FUNC(RedisModule_ModuleTypeReplaceValue)(RedisModuleKey *key, RedisModuleType *mt, void *new_value, void **old_value); -RedisModuleType *REDISMODULE_API_FUNC(RedisModule_ModuleTypeGetType)(RedisModuleKey *key); -void *REDISMODULE_API_FUNC(RedisModule_ModuleTypeGetValue)(RedisModuleKey *key); -int REDISMODULE_API_FUNC(RedisModule_IsIOError)(RedisModuleIO *io); -void REDISMODULE_API_FUNC(RedisModule_SetModuleOptions)(RedisModuleCtx *ctx, int options); -int REDISMODULE_API_FUNC(RedisModule_SignalModifiedKey)(RedisModuleCtx *ctx, RedisModuleString *keyname); -void REDISMODULE_API_FUNC(RedisModule_SaveUnsigned)(RedisModuleIO *io, uint64_t value); -uint64_t REDISMODULE_API_FUNC(RedisModule_LoadUnsigned)(RedisModuleIO *io); -void REDISMODULE_API_FUNC(RedisModule_SaveSigned)(RedisModuleIO *io, int64_t value); -int64_t REDISMODULE_API_FUNC(RedisModule_LoadSigned)(RedisModuleIO *io); -void REDISMODULE_API_FUNC(RedisModule_EmitAOF)(RedisModuleIO *io, const char *cmdname, const char *fmt, ...); -void REDISMODULE_API_FUNC(RedisModule_SaveString)(RedisModuleIO *io, RedisModuleString *s); -void REDISMODULE_API_FUNC(RedisModule_SaveStringBuffer)(RedisModuleIO *io, const char *str, size_t len); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_LoadString)(RedisModuleIO *io); -char *REDISMODULE_API_FUNC(RedisModule_LoadStringBuffer)(RedisModuleIO *io, size_t *lenptr); -void REDISMODULE_API_FUNC(RedisModule_SaveDouble)(RedisModuleIO *io, double value); -double REDISMODULE_API_FUNC(RedisModule_LoadDouble)(RedisModuleIO *io); -void REDISMODULE_API_FUNC(RedisModule_SaveFloat)(RedisModuleIO *io, float value); -float REDISMODULE_API_FUNC(RedisModule_LoadFloat)(RedisModuleIO *io); -void REDISMODULE_API_FUNC(RedisModule_SaveLongDouble)(RedisModuleIO *io, long double value); -long double REDISMODULE_API_FUNC(RedisModule_LoadLongDouble)(RedisModuleIO *io); -void *REDISMODULE_API_FUNC(RedisModule_LoadDataTypeFromString)(const RedisModuleString *str, const RedisModuleType *mt); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_SaveDataTypeToString)(RedisModuleCtx *ctx, void *data, const RedisModuleType *mt); -#ifdef __GNUC__ -void REDISMODULE_API_FUNC(RedisModule_Log)(RedisModuleCtx *ctx, const char *level, const char *fmt, ...) __attribute__ ((format (printf, 3, 4))); -void REDISMODULE_API_FUNC(RedisModule_LogIOError)(RedisModuleIO *io, const char *levelstr, const char *fmt, ...) __attribute__ ((format (printf, 3, 4))); -#else -void REDISMODULE_API_FUNC(RedisModule_Log)(RedisModuleCtx *ctx, const char *level, const char *fmt, ...); -void REDISMODULE_API_FUNC(RedisModule_LogIOError)(RedisModuleIO *io, const char *levelstr, const char *fmt, ...); + +/* Default API declaration suffix (compiler attributes) */ +#ifndef REDISMODULE_ATTR +#define REDISMODULE_ATTR REDISMODULE_ATTR_COMMON #endif -void REDISMODULE_API_FUNC(RedisModule__Assert)(const char *estr, const char *file, int line); -void REDISMODULE_API_FUNC(RedisModule_LatencyAddSample)(const char *event, mstime_t latency); -int REDISMODULE_API_FUNC(RedisModule_StringAppendBuffer)(RedisModuleCtx *ctx, RedisModuleString *str, const char *buf, size_t len); -void REDISMODULE_API_FUNC(RedisModule_RetainString)(RedisModuleCtx *ctx, RedisModuleString *str); -RedisModuleString* REDISMODULE_API_FUNC(RedisModule_HoldString)(RedisModuleCtx *ctx, RedisModuleString *str); -int REDISMODULE_API_FUNC(RedisModule_StringCompare)(RedisModuleString *a, RedisModuleString *b); -RedisModuleCtx *REDISMODULE_API_FUNC(RedisModule_GetContextFromIO)(RedisModuleIO *io); -const RedisModuleString *REDISMODULE_API_FUNC(RedisModule_GetKeyNameFromIO)(RedisModuleIO *io); -const RedisModuleString *REDISMODULE_API_FUNC(RedisModule_GetKeyNameFromModuleKey)(RedisModuleKey *key); -long long REDISMODULE_API_FUNC(RedisModule_Milliseconds)(void); -void REDISMODULE_API_FUNC(RedisModule_DigestAddStringBuffer)(RedisModuleDigest *md, unsigned char *ele, size_t len); -void REDISMODULE_API_FUNC(RedisModule_DigestAddLongLong)(RedisModuleDigest *md, long long ele); -void REDISMODULE_API_FUNC(RedisModule_DigestEndSequence)(RedisModuleDigest *md); -RedisModuleDict *REDISMODULE_API_FUNC(RedisModule_CreateDict)(RedisModuleCtx *ctx); -void REDISMODULE_API_FUNC(RedisModule_FreeDict)(RedisModuleCtx *ctx, RedisModuleDict *d); -uint64_t REDISMODULE_API_FUNC(RedisModule_DictSize)(RedisModuleDict *d); -int REDISMODULE_API_FUNC(RedisModule_DictSetC)(RedisModuleDict *d, void *key, size_t keylen, void *ptr); -int REDISMODULE_API_FUNC(RedisModule_DictReplaceC)(RedisModuleDict *d, void *key, size_t keylen, void *ptr); -int REDISMODULE_API_FUNC(RedisModule_DictSet)(RedisModuleDict *d, RedisModuleString *key, void *ptr); -int REDISMODULE_API_FUNC(RedisModule_DictReplace)(RedisModuleDict *d, RedisModuleString *key, void *ptr); -void *REDISMODULE_API_FUNC(RedisModule_DictGetC)(RedisModuleDict *d, void *key, size_t keylen, int *nokey); -void *REDISMODULE_API_FUNC(RedisModule_DictGet)(RedisModuleDict *d, RedisModuleString *key, int *nokey); -int REDISMODULE_API_FUNC(RedisModule_DictDelC)(RedisModuleDict *d, void *key, size_t keylen, void *oldval); -int REDISMODULE_API_FUNC(RedisModule_DictDel)(RedisModuleDict *d, RedisModuleString *key, void *oldval); -RedisModuleDictIter *REDISMODULE_API_FUNC(RedisModule_DictIteratorStartC)(RedisModuleDict *d, const char *op, void *key, size_t keylen); -RedisModuleDictIter *REDISMODULE_API_FUNC(RedisModule_DictIteratorStart)(RedisModuleDict *d, const char *op, RedisModuleString *key); -void REDISMODULE_API_FUNC(RedisModule_DictIteratorStop)(RedisModuleDictIter *di); -int REDISMODULE_API_FUNC(RedisModule_DictIteratorReseekC)(RedisModuleDictIter *di, const char *op, void *key, size_t keylen); -int REDISMODULE_API_FUNC(RedisModule_DictIteratorReseek)(RedisModuleDictIter *di, const char *op, RedisModuleString *key); -void *REDISMODULE_API_FUNC(RedisModule_DictNextC)(RedisModuleDictIter *di, size_t *keylen, void **dataptr); -void *REDISMODULE_API_FUNC(RedisModule_DictPrevC)(RedisModuleDictIter *di, size_t *keylen, void **dataptr); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_DictNext)(RedisModuleCtx *ctx, RedisModuleDictIter *di, void **dataptr); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_DictPrev)(RedisModuleCtx *ctx, RedisModuleDictIter *di, void **dataptr); -int REDISMODULE_API_FUNC(RedisModule_DictCompareC)(RedisModuleDictIter *di, const char *op, void *key, size_t keylen); -int REDISMODULE_API_FUNC(RedisModule_DictCompare)(RedisModuleDictIter *di, const char *op, RedisModuleString *key); -int REDISMODULE_API_FUNC(RedisModule_RegisterInfoFunc)(RedisModuleCtx *ctx, RedisModuleInfoFunc cb); -int REDISMODULE_API_FUNC(RedisModule_InfoAddSection)(RedisModuleInfoCtx *ctx, char *name); -int REDISMODULE_API_FUNC(RedisModule_InfoBeginDictField)(RedisModuleInfoCtx *ctx, char *name); -int REDISMODULE_API_FUNC(RedisModule_InfoEndDictField)(RedisModuleInfoCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_InfoAddFieldString)(RedisModuleInfoCtx *ctx, char *field, RedisModuleString *value); -int REDISMODULE_API_FUNC(RedisModule_InfoAddFieldCString)(RedisModuleInfoCtx *ctx, char *field, char *value); -int REDISMODULE_API_FUNC(RedisModule_InfoAddFieldDouble)(RedisModuleInfoCtx *ctx, char *field, double value); -int REDISMODULE_API_FUNC(RedisModule_InfoAddFieldLongLong)(RedisModuleInfoCtx *ctx, char *field, long long value); -int REDISMODULE_API_FUNC(RedisModule_InfoAddFieldULongLong)(RedisModuleInfoCtx *ctx, char *field, unsigned long long value); -RedisModuleServerInfoData *REDISMODULE_API_FUNC(RedisModule_GetServerInfo)(RedisModuleCtx *ctx, const char *section); -void REDISMODULE_API_FUNC(RedisModule_FreeServerInfo)(RedisModuleCtx *ctx, RedisModuleServerInfoData *data); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_ServerInfoGetField)(RedisModuleCtx *ctx, RedisModuleServerInfoData *data, const char* field); -const char *REDISMODULE_API_FUNC(RedisModule_ServerInfoGetFieldC)(RedisModuleServerInfoData *data, const char* field); -long long REDISMODULE_API_FUNC(RedisModule_ServerInfoGetFieldSigned)(RedisModuleServerInfoData *data, const char* field, int *out_err); -unsigned long long REDISMODULE_API_FUNC(RedisModule_ServerInfoGetFieldUnsigned)(RedisModuleServerInfoData *data, const char* field, int *out_err); -double REDISMODULE_API_FUNC(RedisModule_ServerInfoGetFieldDouble)(RedisModuleServerInfoData *data, const char* field, int *out_err); -int REDISMODULE_API_FUNC(RedisModule_SubscribeToServerEvent)(RedisModuleCtx *ctx, RedisModuleEvent event, RedisModuleEventCallback callback); -int REDISMODULE_API_FUNC(RedisModule_SetLRU)(RedisModuleKey *key, mstime_t lru_idle); -int REDISMODULE_API_FUNC(RedisModule_GetLRU)(RedisModuleKey *key, mstime_t *lru_idle); -int REDISMODULE_API_FUNC(RedisModule_SetLFU)(RedisModuleKey *key, long long lfu_freq); -int REDISMODULE_API_FUNC(RedisModule_GetLFU)(RedisModuleKey *key, long long *lfu_freq); -RedisModuleBlockedClient *REDISMODULE_API_FUNC(RedisModule_BlockClientOnKeys)(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, RedisModuleCmdFunc timeout_callback, void (*free_privdata)(RedisModuleCtx*,void*), long long timeout_ms, RedisModuleString **keys, int numkeys, void *privdata); -void REDISMODULE_API_FUNC(RedisModule_SignalKeyAsReady)(RedisModuleCtx *ctx, RedisModuleString *key); -RedisModuleString *REDISMODULE_API_FUNC(RedisModule_GetBlockedClientReadyKey)(RedisModuleCtx *ctx); -RedisModuleScanCursor *REDISMODULE_API_FUNC(RedisModule_ScanCursorCreate)(); -void REDISMODULE_API_FUNC(RedisModule_ScanCursorRestart)(RedisModuleScanCursor *cursor); -void REDISMODULE_API_FUNC(RedisModule_ScanCursorDestroy)(RedisModuleScanCursor *cursor); -int REDISMODULE_API_FUNC(RedisModule_Scan)(RedisModuleCtx *ctx, RedisModuleScanCursor *cursor, RedisModuleScanCB fn, void *privdata); -int REDISMODULE_API_FUNC(RedisModule_ScanKey)(RedisModuleKey *key, RedisModuleScanCursor *cursor, RedisModuleScanKeyCB fn, void *privdata); + +REDISMODULE_API void * (*RedisModule_Alloc)(size_t bytes) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_Realloc)(void *ptr, size_t bytes) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_Free)(void *ptr) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_Calloc)(size_t nmemb, size_t size) REDISMODULE_ATTR; +REDISMODULE_API char * (*RedisModule_Strdup)(const char *str) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_GetApi)(const char *, void *) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_CreateCommand)(RedisModuleCtx *ctx, const char *name, RedisModuleCmdFunc cmdfunc, const char *strflags, int firstkey, int lastkey, int keystep) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SetModuleAttribs)(RedisModuleCtx *ctx, const char *name, int ver, int apiver) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_IsModuleNameBusy)(const char *name) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_WrongArity)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithLongLong)(RedisModuleCtx *ctx, long long ll) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_GetSelectedDb)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_SelectDb)(RedisModuleCtx *ctx, int newid) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_OpenKey)(RedisModuleCtx *ctx, RedisModuleString *keyname, int mode) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_CloseKey)(RedisModuleKey *kp) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_KeyType)(RedisModuleKey *kp) REDISMODULE_ATTR; +REDISMODULE_API size_t (*RedisModule_ValueLength)(RedisModuleKey *kp) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ListPush)(RedisModuleKey *kp, int where, RedisModuleString *ele) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_ListPop)(RedisModuleKey *key, int where) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleCallReply * (*RedisModule_Call)(RedisModuleCtx *ctx, const char *cmdname, const char *fmt, ...) REDISMODULE_ATTR; +REDISMODULE_API const char * (*RedisModule_CallReplyProto)(RedisModuleCallReply *reply, size_t *len) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_FreeCallReply)(RedisModuleCallReply *reply) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_CallReplyType)(RedisModuleCallReply *reply) REDISMODULE_ATTR; +REDISMODULE_API long long (*RedisModule_CallReplyInteger)(RedisModuleCallReply *reply) REDISMODULE_ATTR; +REDISMODULE_API size_t (*RedisModule_CallReplyLength)(RedisModuleCallReply *reply) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleCallReply * (*RedisModule_CallReplyArrayElement)(RedisModuleCallReply *reply, size_t idx) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_CreateString)(RedisModuleCtx *ctx, const char *ptr, size_t len) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_CreateStringFromLongLong)(RedisModuleCtx *ctx, long long ll) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_CreateStringFromDouble)(RedisModuleCtx *ctx, double d) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_CreateStringFromLongDouble)(RedisModuleCtx *ctx, long double ld, int humanfriendly) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_CreateStringFromString)(RedisModuleCtx *ctx, const RedisModuleString *str) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_CreateStringPrintf)(RedisModuleCtx *ctx, const char *fmt, ...) REDISMODULE_ATTR_PRINTF(2,3) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_FreeString)(RedisModuleCtx *ctx, RedisModuleString *str) REDISMODULE_ATTR; +REDISMODULE_API const char * (*RedisModule_StringPtrLen)(const RedisModuleString *str, size_t *len) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithError)(RedisModuleCtx *ctx, const char *err) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithSimpleString)(RedisModuleCtx *ctx, const char *msg) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithArray)(RedisModuleCtx *ctx, long len) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithNullArray)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithEmptyArray)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_ReplySetArrayLength)(RedisModuleCtx *ctx, long len) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithStringBuffer)(RedisModuleCtx *ctx, const char *buf, size_t len) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithCString)(RedisModuleCtx *ctx, const char *buf) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithString)(RedisModuleCtx *ctx, RedisModuleString *str) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithEmptyString)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithVerbatimString)(RedisModuleCtx *ctx, const char *buf, size_t len) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithNull)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithDouble)(RedisModuleCtx *ctx, double d) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithLongDouble)(RedisModuleCtx *ctx, long double d) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplyWithCallReply)(RedisModuleCtx *ctx, RedisModuleCallReply *reply) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_StringToLongLong)(const RedisModuleString *str, long long *ll) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_StringToDouble)(const RedisModuleString *str, double *d) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_StringToLongDouble)(const RedisModuleString *str, long double *d) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_AutoMemory)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_Replicate)(RedisModuleCtx *ctx, const char *cmdname, const char *fmt, ...) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ReplicateVerbatim)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API const char * (*RedisModule_CallReplyStringPtr)(RedisModuleCallReply *reply, size_t *len) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_CreateStringFromCallReply)(RedisModuleCallReply *reply) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DeleteKey)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_UnlinkKey)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_StringSet)(RedisModuleKey *key, RedisModuleString *str) REDISMODULE_ATTR; +REDISMODULE_API char * (*RedisModule_StringDMA)(RedisModuleKey *key, size_t *len, int mode) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_StringTruncate)(RedisModuleKey *key, size_t newlen) REDISMODULE_ATTR; +REDISMODULE_API mstime_t (*RedisModule_GetExpire)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_SetExpire)(RedisModuleKey *key, mstime_t expire) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_ResetDataset)(int restart_aof, int async) REDISMODULE_ATTR; +REDISMODULE_API unsigned long long (*RedisModule_DbSize)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_RandomKey)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetAdd)(RedisModuleKey *key, double score, RedisModuleString *ele, int *flagsptr) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetIncrby)(RedisModuleKey *key, double score, RedisModuleString *ele, int *flagsptr, double *newscore) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetScore)(RedisModuleKey *key, RedisModuleString *ele, double *score) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetRem)(RedisModuleKey *key, RedisModuleString *ele, int *deleted) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_ZsetRangeStop)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetFirstInScoreRange)(RedisModuleKey *key, double min, double max, int minex, int maxex) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetLastInScoreRange)(RedisModuleKey *key, double min, double max, int minex, int maxex) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetFirstInLexRange)(RedisModuleKey *key, RedisModuleString *min, RedisModuleString *max) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetLastInLexRange)(RedisModuleKey *key, RedisModuleString *min, RedisModuleString *max) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_ZsetRangeCurrentElement)(RedisModuleKey *key, double *score) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetRangeNext)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetRangePrev)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ZsetRangeEndReached)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_HashSet)(RedisModuleKey *key, int flags, ...) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_HashGet)(RedisModuleKey *key, int flags, ...) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_IsKeysPositionRequest)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_KeyAtPos)(RedisModuleCtx *ctx, int pos) REDISMODULE_ATTR; +REDISMODULE_API unsigned long long (*RedisModule_GetClientId)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_GetClientInfoById)(void *ci, uint64_t id) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_PublishMessage)(RedisModuleCtx *ctx, RedisModuleString *channel, RedisModuleString *message) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_GetContextFlags)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_AvoidReplicaTraffic)() REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_PoolAlloc)(RedisModuleCtx *ctx, size_t bytes) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleType * (*RedisModule_CreateDataType)(RedisModuleCtx *ctx, const char *name, int encver, RedisModuleTypeMethods *typemethods) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ModuleTypeSetValue)(RedisModuleKey *key, RedisModuleType *mt, void *value) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ModuleTypeReplaceValue)(RedisModuleKey *key, RedisModuleType *mt, void *new_value, void **old_value) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleType * (*RedisModule_ModuleTypeGetType)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_ModuleTypeGetValue)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_IsIOError)(RedisModuleIO *io) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SetModuleOptions)(RedisModuleCtx *ctx, int options) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_SignalModifiedKey)(RedisModuleCtx *ctx, RedisModuleString *keyname) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SaveUnsigned)(RedisModuleIO *io, uint64_t value) REDISMODULE_ATTR; +REDISMODULE_API uint64_t (*RedisModule_LoadUnsigned)(RedisModuleIO *io) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SaveSigned)(RedisModuleIO *io, int64_t value) REDISMODULE_ATTR; +REDISMODULE_API int64_t (*RedisModule_LoadSigned)(RedisModuleIO *io) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_EmitAOF)(RedisModuleIO *io, const char *cmdname, const char *fmt, ...) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SaveString)(RedisModuleIO *io, RedisModuleString *s) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SaveStringBuffer)(RedisModuleIO *io, const char *str, size_t len) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_LoadString)(RedisModuleIO *io) REDISMODULE_ATTR; +REDISMODULE_API char * (*RedisModule_LoadStringBuffer)(RedisModuleIO *io, size_t *lenptr) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SaveDouble)(RedisModuleIO *io, double value) REDISMODULE_ATTR; +REDISMODULE_API double (*RedisModule_LoadDouble)(RedisModuleIO *io) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SaveFloat)(RedisModuleIO *io, float value) REDISMODULE_ATTR; +REDISMODULE_API float (*RedisModule_LoadFloat)(RedisModuleIO *io) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SaveLongDouble)(RedisModuleIO *io, long double value) REDISMODULE_ATTR; +REDISMODULE_API long double (*RedisModule_LoadLongDouble)(RedisModuleIO *io) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_LoadDataTypeFromString)(const RedisModuleString *str, const RedisModuleType *mt) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_SaveDataTypeToString)(RedisModuleCtx *ctx, void *data, const RedisModuleType *mt) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_Log)(RedisModuleCtx *ctx, const char *level, const char *fmt, ...) REDISMODULE_ATTR REDISMODULE_ATTR_PRINTF(3,4); +REDISMODULE_API void (*RedisModule_LogIOError)(RedisModuleIO *io, const char *levelstr, const char *fmt, ...) REDISMODULE_ATTR REDISMODULE_ATTR_PRINTF(3,4); +REDISMODULE_API void (*RedisModule__Assert)(const char *estr, const char *file, int line) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_LatencyAddSample)(const char *event, mstime_t latency) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_StringAppendBuffer)(RedisModuleCtx *ctx, RedisModuleString *str, const char *buf, size_t len) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_RetainString)(RedisModuleCtx *ctx, RedisModuleString *str) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_HoldString)(RedisModuleCtx *ctx, RedisModuleString *str) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_StringCompare)(RedisModuleString *a, RedisModuleString *b) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleCtx * (*RedisModule_GetContextFromIO)(RedisModuleIO *io) REDISMODULE_ATTR; +REDISMODULE_API const RedisModuleString * (*RedisModule_GetKeyNameFromIO)(RedisModuleIO *io) REDISMODULE_ATTR; +REDISMODULE_API const RedisModuleString * (*RedisModule_GetKeyNameFromModuleKey)(RedisModuleKey *key) REDISMODULE_ATTR; +REDISMODULE_API long long (*RedisModule_Milliseconds)(void) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_DigestAddStringBuffer)(RedisModuleDigest *md, unsigned char *ele, size_t len) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_DigestAddLongLong)(RedisModuleDigest *md, long long ele) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_DigestEndSequence)(RedisModuleDigest *md) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleDict * (*RedisModule_CreateDict)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_FreeDict)(RedisModuleCtx *ctx, RedisModuleDict *d) REDISMODULE_ATTR; +REDISMODULE_API uint64_t (*RedisModule_DictSize)(RedisModuleDict *d) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictSetC)(RedisModuleDict *d, void *key, size_t keylen, void *ptr) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictReplaceC)(RedisModuleDict *d, void *key, size_t keylen, void *ptr) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictSet)(RedisModuleDict *d, RedisModuleString *key, void *ptr) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictReplace)(RedisModuleDict *d, RedisModuleString *key, void *ptr) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_DictGetC)(RedisModuleDict *d, void *key, size_t keylen, int *nokey) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_DictGet)(RedisModuleDict *d, RedisModuleString *key, int *nokey) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictDelC)(RedisModuleDict *d, void *key, size_t keylen, void *oldval) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictDel)(RedisModuleDict *d, RedisModuleString *key, void *oldval) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleDictIter * (*RedisModule_DictIteratorStartC)(RedisModuleDict *d, const char *op, void *key, size_t keylen) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleDictIter * (*RedisModule_DictIteratorStart)(RedisModuleDict *d, const char *op, RedisModuleString *key) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_DictIteratorStop)(RedisModuleDictIter *di) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictIteratorReseekC)(RedisModuleDictIter *di, const char *op, void *key, size_t keylen) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictIteratorReseek)(RedisModuleDictIter *di, const char *op, RedisModuleString *key) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_DictNextC)(RedisModuleDictIter *di, size_t *keylen, void **dataptr) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_DictPrevC)(RedisModuleDictIter *di, size_t *keylen, void **dataptr) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_DictNext)(RedisModuleCtx *ctx, RedisModuleDictIter *di, void **dataptr) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_DictPrev)(RedisModuleCtx *ctx, RedisModuleDictIter *di, void **dataptr) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictCompareC)(RedisModuleDictIter *di, const char *op, void *key, size_t keylen) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DictCompare)(RedisModuleDictIter *di, const char *op, RedisModuleString *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_RegisterInfoFunc)(RedisModuleCtx *ctx, RedisModuleInfoFunc cb) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_InfoAddSection)(RedisModuleInfoCtx *ctx, char *name) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_InfoBeginDictField)(RedisModuleInfoCtx *ctx, char *name) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_InfoEndDictField)(RedisModuleInfoCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_InfoAddFieldString)(RedisModuleInfoCtx *ctx, char *field, RedisModuleString *value) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_InfoAddFieldCString)(RedisModuleInfoCtx *ctx, char *field, char *value) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_InfoAddFieldDouble)(RedisModuleInfoCtx *ctx, char *field, double value) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_InfoAddFieldLongLong)(RedisModuleInfoCtx *ctx, char *field, long long value) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_InfoAddFieldULongLong)(RedisModuleInfoCtx *ctx, char *field, unsigned long long value) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleServerInfoData * (*RedisModule_GetServerInfo)(RedisModuleCtx *ctx, const char *section) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_FreeServerInfo)(RedisModuleCtx *ctx, RedisModuleServerInfoData *data) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_ServerInfoGetField)(RedisModuleCtx *ctx, RedisModuleServerInfoData *data, const char* field) REDISMODULE_ATTR; +REDISMODULE_API const char * (*RedisModule_ServerInfoGetFieldC)(RedisModuleServerInfoData *data, const char* field) REDISMODULE_ATTR; +REDISMODULE_API long long (*RedisModule_ServerInfoGetFieldSigned)(RedisModuleServerInfoData *data, const char* field, int *out_err) REDISMODULE_ATTR; +REDISMODULE_API unsigned long long (*RedisModule_ServerInfoGetFieldUnsigned)(RedisModuleServerInfoData *data, const char* field, int *out_err) REDISMODULE_ATTR; +REDISMODULE_API double (*RedisModule_ServerInfoGetFieldDouble)(RedisModuleServerInfoData *data, const char* field, int *out_err) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_SubscribeToServerEvent)(RedisModuleCtx *ctx, RedisModuleEvent event, RedisModuleEventCallback callback) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_SetLRU)(RedisModuleKey *key, mstime_t lru_idle) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_GetLRU)(RedisModuleKey *key, mstime_t *lru_idle) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_SetLFU)(RedisModuleKey *key, long long lfu_freq) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_GetLFU)(RedisModuleKey *key, long long *lfu_freq) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleBlockedClient * (*RedisModule_BlockClientOnKeys)(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, RedisModuleCmdFunc timeout_callback, void (*free_privdata)(RedisModuleCtx*,void*), long long timeout_ms, RedisModuleString **keys, int numkeys, void *privdata) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SignalKeyAsReady)(RedisModuleCtx *ctx, RedisModuleString *key) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleString * (*RedisModule_GetBlockedClientReadyKey)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleScanCursor * (*RedisModule_ScanCursorCreate)() REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_ScanCursorRestart)(RedisModuleScanCursor *cursor) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_ScanCursorDestroy)(RedisModuleScanCursor *cursor) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_Scan)(RedisModuleCtx *ctx, RedisModuleScanCursor *cursor, RedisModuleScanCB fn, void *privdata) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ScanKey)(RedisModuleKey *key, RedisModuleScanCursor *cursor, RedisModuleScanKeyCB fn, void *privdata) REDISMODULE_ATTR; + /* Experimental APIs */ #ifdef REDISMODULE_EXPERIMENTAL_API #define REDISMODULE_EXPERIMENTAL_API_VERSION 3 -RedisModuleBlockedClient *REDISMODULE_API_FUNC(RedisModule_BlockClient)(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, RedisModuleCmdFunc timeout_callback, void (*free_privdata)(RedisModuleCtx*,void*), long long timeout_ms); -int REDISMODULE_API_FUNC(RedisModule_UnblockClient)(RedisModuleBlockedClient *bc, void *privdata); -int REDISMODULE_API_FUNC(RedisModule_IsBlockedReplyRequest)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_IsBlockedTimeoutRequest)(RedisModuleCtx *ctx); -void *REDISMODULE_API_FUNC(RedisModule_GetBlockedClientPrivateData)(RedisModuleCtx *ctx); -RedisModuleBlockedClient *REDISMODULE_API_FUNC(RedisModule_GetBlockedClientHandle)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_AbortBlock)(RedisModuleBlockedClient *bc); -RedisModuleCtx *REDISMODULE_API_FUNC(RedisModule_GetThreadSafeContext)(RedisModuleBlockedClient *bc); -void REDISMODULE_API_FUNC(RedisModule_FreeThreadSafeContext)(RedisModuleCtx *ctx); -void REDISMODULE_API_FUNC(RedisModule_ThreadSafeContextLock)(RedisModuleCtx *ctx); -void REDISMODULE_API_FUNC(RedisModule_ThreadSafeContextUnlock)(RedisModuleCtx *ctx); -int REDISMODULE_API_FUNC(RedisModule_SubscribeToKeyspaceEvents)(RedisModuleCtx *ctx, int types, RedisModuleNotificationFunc cb); -int REDISMODULE_API_FUNC(RedisModule_NotifyKeyspaceEvent)(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key); -int REDISMODULE_API_FUNC(RedisModule_GetNotifyKeyspaceEvents)(); -int REDISMODULE_API_FUNC(RedisModule_BlockedClientDisconnected)(RedisModuleCtx *ctx); -void REDISMODULE_API_FUNC(RedisModule_RegisterClusterMessageReceiver)(RedisModuleCtx *ctx, uint8_t type, RedisModuleClusterMessageReceiver callback); -int REDISMODULE_API_FUNC(RedisModule_SendClusterMessage)(RedisModuleCtx *ctx, char *target_id, uint8_t type, unsigned char *msg, uint32_t len); -int REDISMODULE_API_FUNC(RedisModule_GetClusterNodeInfo)(RedisModuleCtx *ctx, const char *id, char *ip, char *master_id, int *port, int *flags); -char **REDISMODULE_API_FUNC(RedisModule_GetClusterNodesList)(RedisModuleCtx *ctx, size_t *numnodes); -void REDISMODULE_API_FUNC(RedisModule_FreeClusterNodesList)(char **ids); -RedisModuleTimerID REDISMODULE_API_FUNC(RedisModule_CreateTimer)(RedisModuleCtx *ctx, mstime_t period, RedisModuleTimerProc callback, void *data); -int REDISMODULE_API_FUNC(RedisModule_StopTimer)(RedisModuleCtx *ctx, RedisModuleTimerID id, void **data); -int REDISMODULE_API_FUNC(RedisModule_GetTimerInfo)(RedisModuleCtx *ctx, RedisModuleTimerID id, uint64_t *remaining, void **data); -const char *REDISMODULE_API_FUNC(RedisModule_GetMyClusterID)(void); -size_t REDISMODULE_API_FUNC(RedisModule_GetClusterSize)(void); -void REDISMODULE_API_FUNC(RedisModule_GetRandomBytes)(unsigned char *dst, size_t len); -void REDISMODULE_API_FUNC(RedisModule_GetRandomHexChars)(char *dst, size_t len); -void REDISMODULE_API_FUNC(RedisModule_SetDisconnectCallback)(RedisModuleBlockedClient *bc, RedisModuleDisconnectFunc callback); -void REDISMODULE_API_FUNC(RedisModule_SetClusterFlags)(RedisModuleCtx *ctx, uint64_t flags); -int REDISMODULE_API_FUNC(RedisModule_ExportSharedAPI)(RedisModuleCtx *ctx, const char *apiname, void *func); -void *REDISMODULE_API_FUNC(RedisModule_GetSharedAPI)(RedisModuleCtx *ctx, const char *apiname); -RedisModuleCommandFilter *REDISMODULE_API_FUNC(RedisModule_RegisterCommandFilter)(RedisModuleCtx *ctx, RedisModuleCommandFilterFunc cb, int flags); -int REDISMODULE_API_FUNC(RedisModule_UnregisterCommandFilter)(RedisModuleCtx *ctx, RedisModuleCommandFilter *filter); -int REDISMODULE_API_FUNC(RedisModule_CommandFilterArgsCount)(RedisModuleCommandFilterCtx *fctx); -const RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CommandFilterArgGet)(RedisModuleCommandFilterCtx *fctx, int pos); -int REDISMODULE_API_FUNC(RedisModule_CommandFilterArgInsert)(RedisModuleCommandFilterCtx *fctx, int pos, RedisModuleString *arg); -int REDISMODULE_API_FUNC(RedisModule_CommandFilterArgReplace)(RedisModuleCommandFilterCtx *fctx, int pos, RedisModuleString *arg); -int REDISMODULE_API_FUNC(RedisModule_CommandFilterArgDelete)(RedisModuleCommandFilterCtx *fctx, int pos); -int REDISMODULE_API_FUNC(RedisModule_Fork)(RedisModuleForkDoneHandler cb, void *user_data); -int REDISMODULE_API_FUNC(RedisModule_ExitFromChild)(int retcode); -int REDISMODULE_API_FUNC(RedisModule_KillForkChild)(int child_pid); -float REDISMODULE_API_FUNC(RedisModule_GetUsedMemoryRatio)(); -size_t REDISMODULE_API_FUNC(RedisModule_MallocSize)(void* ptr); -RedisModuleUser *REDISMODULE_API_FUNC(RedisModule_CreateModuleUser)(const char *name); -void REDISMODULE_API_FUNC(RedisModule_FreeModuleUser)(RedisModuleUser *user); -int REDISMODULE_API_FUNC(RedisModule_SetModuleUserACL)(RedisModuleUser *user, const char* acl); -int REDISMODULE_API_FUNC(RedisModule_AuthenticateClientWithACLUser)(RedisModuleCtx *ctx, const char *name, size_t len, RedisModuleUserChangedFunc callback, void *privdata, uint64_t *client_id); -int REDISMODULE_API_FUNC(RedisModule_AuthenticateClientWithUser)(RedisModuleCtx *ctx, RedisModuleUser *user, RedisModuleUserChangedFunc callback, void *privdata, uint64_t *client_id); -int REDISMODULE_API_FUNC(RedisModule_DeauthenticateAndCloseClient)(RedisModuleCtx *ctx, uint64_t client_id); +REDISMODULE_API RedisModuleBlockedClient * (*RedisModule_BlockClient)(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, RedisModuleCmdFunc timeout_callback, void (*free_privdata)(RedisModuleCtx*,void*), long long timeout_ms) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_UnblockClient)(RedisModuleBlockedClient *bc, void *privdata) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_IsBlockedReplyRequest)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_IsBlockedTimeoutRequest)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_GetBlockedClientPrivateData)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleBlockedClient * (*RedisModule_GetBlockedClientHandle)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_AbortBlock)(RedisModuleBlockedClient *bc) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleCtx * (*RedisModule_GetThreadSafeContext)(RedisModuleBlockedClient *bc) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_FreeThreadSafeContext)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_ThreadSafeContextLock)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_ThreadSafeContextUnlock)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_SubscribeToKeyspaceEvents)(RedisModuleCtx *ctx, int types, RedisModuleNotificationFunc cb) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_NotifyKeyspaceEvent)(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_GetNotifyKeyspaceEvents)() REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_BlockedClientDisconnected)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_RegisterClusterMessageReceiver)(RedisModuleCtx *ctx, uint8_t type, RedisModuleClusterMessageReceiver callback) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_SendClusterMessage)(RedisModuleCtx *ctx, char *target_id, uint8_t type, unsigned char *msg, uint32_t len) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_GetClusterNodeInfo)(RedisModuleCtx *ctx, const char *id, char *ip, char *master_id, int *port, int *flags) REDISMODULE_ATTR; +REDISMODULE_API char ** (*RedisModule_GetClusterNodesList)(RedisModuleCtx *ctx, size_t *numnodes) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_FreeClusterNodesList)(char **ids) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleTimerID (*RedisModule_CreateTimer)(RedisModuleCtx *ctx, mstime_t period, RedisModuleTimerProc callback, void *data) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_StopTimer)(RedisModuleCtx *ctx, RedisModuleTimerID id, void **data) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_GetTimerInfo)(RedisModuleCtx *ctx, RedisModuleTimerID id, uint64_t *remaining, void **data) REDISMODULE_ATTR; +REDISMODULE_API const char * (*RedisModule_GetMyClusterID)(void) REDISMODULE_ATTR; +REDISMODULE_API size_t (*RedisModule_GetClusterSize)(void) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_GetRandomBytes)(unsigned char *dst, size_t len) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_GetRandomHexChars)(char *dst, size_t len) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SetDisconnectCallback)(RedisModuleBlockedClient *bc, RedisModuleDisconnectFunc callback) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_SetClusterFlags)(RedisModuleCtx *ctx, uint64_t flags) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ExportSharedAPI)(RedisModuleCtx *ctx, const char *apiname, void *func) REDISMODULE_ATTR; +REDISMODULE_API void * (*RedisModule_GetSharedAPI)(RedisModuleCtx *ctx, const char *apiname) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleCommandFilter * (*RedisModule_RegisterCommandFilter)(RedisModuleCtx *ctx, RedisModuleCommandFilterFunc cb, int flags) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_UnregisterCommandFilter)(RedisModuleCtx *ctx, RedisModuleCommandFilter *filter) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_CommandFilterArgsCount)(RedisModuleCommandFilterCtx *fctx) REDISMODULE_ATTR; +REDISMODULE_API const RedisModuleString * (*RedisModule_CommandFilterArgGet)(RedisModuleCommandFilterCtx *fctx, int pos) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_CommandFilterArgInsert)(RedisModuleCommandFilterCtx *fctx, int pos, RedisModuleString *arg) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_CommandFilterArgReplace)(RedisModuleCommandFilterCtx *fctx, int pos, RedisModuleString *arg) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_CommandFilterArgDelete)(RedisModuleCommandFilterCtx *fctx, int pos) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_Fork)(RedisModuleForkDoneHandler cb, void *user_data) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ExitFromChild)(int retcode) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_KillForkChild)(int child_pid) REDISMODULE_ATTR; +REDISMODULE_API float (*RedisModule_GetUsedMemoryRatio)() REDISMODULE_ATTR; +REDISMODULE_API size_t (*RedisModule_MallocSize)(void* ptr) REDISMODULE_ATTR; +REDISMODULE_API RedisModuleUser * (*RedisModule_CreateModuleUser)(const char *name) REDISMODULE_ATTR; +REDISMODULE_API void (*RedisModule_FreeModuleUser)(RedisModuleUser *user) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_SetModuleUserACL)(RedisModuleUser *user, const char* acl) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_AuthenticateClientWithACLUser)(RedisModuleCtx *ctx, const char *name, size_t len, RedisModuleUserChangedFunc callback, void *privdata, uint64_t *client_id) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_AuthenticateClientWithUser)(RedisModuleCtx *ctx, RedisModuleUser *user, RedisModuleUserChangedFunc callback, void *privdata, uint64_t *client_id) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_DeauthenticateAndCloseClient)(RedisModuleCtx *ctx, uint64_t client_id) REDISMODULE_ATTR; #endif #define RedisModule_IsAOFClient(id) ((id) == CLIENT_ID_AOF) /* This is included inline inside each Redis module. */ -static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int apiver) __attribute__((unused)); +static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int apiver) REDISMODULE_ATTR_UNUSED; static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int apiver) { void *getapifuncptr = ((void**)ctx)[0]; RedisModule_GetApi = (int (*)(const char *, void *)) (unsigned long)getapifuncptr; From d4e1c8805217c856271db50407d37888d6bdb586 Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Fri, 21 Aug 2020 16:37:49 -0400 Subject: [PATCH 127/215] fix make warnings (#7692) (cherry picked from commit e61adc0d897074d8c2ca8e0f7bf08fa2985d9b01) --- src/server.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/server.c b/src/server.c index 5a986006e..a37fd137a 100644 --- a/src/server.c +++ b/src/server.c @@ -2543,9 +2543,6 @@ static void readOOMScoreAdj(void) { * depending on current role. */ int setOOMScoreAdj(int process_class) { - int fd; - int val; - char buf[64]; if (!server.oom_score_adj) return C_OK; if (process_class == -1) @@ -2554,6 +2551,10 @@ int setOOMScoreAdj(int process_class) { serverAssert(process_class >= 0 && process_class < CONFIG_OOM_COUNT); #ifdef HAVE_PROC_OOM_SCORE_ADJ + int fd; + int val; + char buf[64]; + val = server.oom_score_adj_base + server.oom_score_adj_values[process_class]; if (val > 1000) val = 1000; if (val < -1000) val = -1000; From 1b100a167a006db4a9baa81267cc5230322085d2 Mon Sep 17 00:00:00 2001 From: Wang Yuan Date: Tue, 25 Aug 2020 03:59:56 +0800 Subject: [PATCH 128/215] Fix wrong format specifiers of 'sdscatfmt' for the INFO command (#7706) unlike printf, sdscatfmt doesn't take %d (cherry picked from commit 43af28f5b487370bd3d65d00be93c4a23ee42fa7) --- src/server.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server.c b/src/server.c index a37fd137a..f9154794b 100644 --- a/src/server.c +++ b/src/server.c @@ -4137,7 +4137,7 @@ sds genRedisInfoString(const char *section) { "lru_clock:%u\r\n" "executable:%s\r\n" "config_file:%s\r\n" - "io_threads_active:%d\r\n", + "io_threads_active:%i\r\n", REDIS_VERSION, redisGitSHA1(), strtol(redisGitDirty(),NULL,10) > 0, From f38e2802b65a2540c37440849b091e93ea8ddbba Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Sun, 30 Aug 2020 12:23:47 +0300 Subject: [PATCH 129/215] Fix oom-score-adj on older distros. (#7724) Don't assume `ps` handles `-h` to display output without headers and manually trim headers line from output. (cherry picked from commit b61b663895f16d9f559a14c408c225062254a57b) --- tests/unit/oom-score-adj.tcl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit/oom-score-adj.tcl b/tests/unit/oom-score-adj.tcl index 894a70fb2..993004602 100644 --- a/tests/unit/oom-score-adj.tcl +++ b/tests/unit/oom-score-adj.tcl @@ -16,8 +16,8 @@ if {$system_name eq {linux}} { proc get_child_pid {} { set pid [srv 0 pid] - set fd [open "|ps --ppid $pid -o pid -h" "r"] - set child_pid [string trim [read $fd]] + set fd [open "|ps --ppid $pid -o pid" "r"] + set child_pid [string trim [lindex [split [read $fd] \n] 1]] close $fd return $child_pid From 00d0d870d289ef754a610c2b2cfc436ec6863447 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Leo=C5=A1=20Liter=C3=A1k?= Date: Mon, 31 Aug 2020 11:44:09 +0200 Subject: [PATCH 130/215] Update README.md with instructions how to build with systemd support (#7730) #7728 - update instructions for systemd support (cherry picked from commit 571571ca192ec0b7cc66ca61cd6794dcb6a9d8bc) --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 55537e01f..ca205f2a0 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,11 @@ libssl-dev on Debian/Ubuntu) and run: % make BUILD_TLS=yes +To build with systemd support, you'll need systemd development libraries (such +as libsystemd-dev on Debian/Ubuntu or systemd-devel on CentOS) and run: + + % make BUILD_WITH_SYSTEMD=yes USE_SYSTEMD=yes + You can run a 32 bit Redis binary using: % make 32bit From d2532d133518f2e07720f0947a6597447a3d45b4 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Mon, 31 Aug 2020 20:42:46 +0300 Subject: [PATCH 131/215] Backport Lua 5.2.2 stack overflow fix. (#7733) This fixes the issue described in CVE-2014-5461. At this time we cannot confirm that the original issue has a real impact on Redis, but it is included as an extra safety measure. (cherry picked from commit d75ad774a92bd7de0b9448be3d622d7a13b7af27) --- deps/lua/src/ldo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/lua/src/ldo.c b/deps/lua/src/ldo.c index 514f7a2a3..939940a4c 100644 --- a/deps/lua/src/ldo.c +++ b/deps/lua/src/ldo.c @@ -274,7 +274,7 @@ int luaD_precall (lua_State *L, StkId func, int nresults) { CallInfo *ci; StkId st, base; Proto *p = cl->p; - luaD_checkstack(L, p->maxstacksize); + luaD_checkstack(L, p->maxstacksize + p->numparams); func = restorestack(L, funcr); if (!p->is_vararg) { /* no varargs? */ base = func + 1; From 4bb40a9688d0b55934b98dbc4921f10b1d6adef4 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 9 Aug 2020 06:08:00 +0300 Subject: [PATCH 132/215] Reduce the probability of failure when start redis in runtest-cluster #7554 (#7635) When runtest-cluster, at first, we need to create a cluster use spawn_instance, a port which is not used is choosen, however sometimes we can't run server on the port. possibley due to a race with another process taking it first. such as redis/redis/runs/896537490. It may be due to the machine problem or In order to reduce the probability of failure when start redis in runtest-cluster, we attemp to use another port when find server do not start up. Co-authored-by: Oran Agra Co-authored-by: yanhui13 (cherry picked from commit e2d64485b8262971776fb1be803c7296c98d1572) --- tests/instances.tcl | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/tests/instances.tcl b/tests/instances.tcl index 691378b9b..a43a4cc87 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -59,8 +59,6 @@ proc exec_instance {type cfgfile} { proc spawn_instance {type base_port count {conf {}}} { for {set j 0} {$j < $count} {incr j} { set port [find_available_port $base_port $::redis_port_count] - incr base_port - puts "Starting $type #$j at port $port" # Create a directory for this instance. set dirname "${type}_${j}" @@ -93,10 +91,30 @@ proc spawn_instance {type base_port count {conf {}}} { close $cfg # Finally exec it and remember the pid for later cleanup. - set pid [exec_instance $type $cfgfile] - lappend ::pids $pid + set retry 100 + while {$retry} { + set pid [exec_instance $type $cfgfile] - # Check availability + # Check availability + if {[server_is_up 127.0.0.1 $port 100] == 0} { + puts "Starting $type #$j at port $port failed, try another" + incr retry -1 + set port [find_available_port $base_port $::redis_port_count] + set cfg [open $cfgfile a+] + if {$::tls} { + puts $cfg "tls-port $port" + } else { + puts $cfg "port $port" + } + close $cfg + } else { + puts "Starting $type #$j at port $port" + lappend ::pids $pid + break + } + } + + # Check availability finally if {[server_is_up 127.0.0.1 $port 100] == 0} { set logfile [file join $dirname log.txt] puts [exec tail $logfile] From dbea5f7a8dce8291690f308f4adeb5d9ec979c05 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Fri, 28 Aug 2020 11:29:53 +0300 Subject: [PATCH 133/215] Redis 6.0.7 --- 00-RELEASENOTES | 324 ++++++++++++++++++++++++++++++++++++++++++++++++ src/version.h | 2 +- 2 files changed, 325 insertions(+), 1 deletion(-) diff --git a/00-RELEASENOTES b/00-RELEASENOTES index 484aeb621..ce75e19c2 100644 --- a/00-RELEASENOTES +++ b/00-RELEASENOTES @@ -11,6 +11,330 @@ CRITICAL: There is a critical bug affecting MOST USERS. Upgrade ASAP. SECURITY: There are security fixes in the release. -------------------------------------------------------------------------------- +================================================================================ +Redis 6.0.7 Released Fri Aug 28 11:05:09 IDT 2020 +================================================================================ + +Upgrade urgency MODERATE: several bugs with moderate impact are fixed, +Specifically the first two listed below which cause protocol errors for clients. + +Bug fixes: + +* CONFIG SET could hung the client when arrives during RDB/ROF loading (When + processed after another command that was also rejected with -LOADING error) +* LPOS command when RANK is greater than matches responded wiht broken protocol + (negative multi-bulk count) +* UNLINK / Lazyfree for stream type key would have never do async freeing +* PERSIST should invalidate WATCH (Like EXPIRE does) +* EXEC with only read commands could have be rejected when OOM +* TLS: relax verification on CONFIG SET (Don't error if some configs are set + and tls isn't enabled) +* TLS: support cluster/replication without tls-port +* Systemd startup after network is online +* Redis-benchmark improvements +* Various small bug fixes + +New features: + +* Add oom-score-adj configuration option to control Linux OOM killer +* Show IO threads statistics and status in INFO output +* Add optional tls verification mode (see tls-auth-clients) + +Module API: + +* Add RedisModule_HoldString +* Add loaded keyspace event +* Fix RedisModuleEvent_LoadingProgress +* Fix RedisModuleEvent_MasterLinkChange hook missing on successful psync +* Fix missing RM_CLIENTINFO_FLAG_SSL +* Refactor redismodule.h for use with -fno-common / extern + +Full list of commits: + +Oran Agra in commit c26394e4f: + Reduce the probability of failure when start redis in runtest-cluster #7554 (#7635) + 1 file changed, 23 insertions(+), 5 deletions(-) + +Leoš Literák in commit 745d5e802: + Update README.md with instructions how to build with systemd support (#7730) + 1 file changed, 5 insertions(+) + +Yossi Gottlieb in commit 03f1d208a: + Fix oom-score-adj on older distros. (#7724) + 1 file changed, 2 insertions(+), 2 deletions(-) + +Yossi Gottlieb in commit 941174d9c: + Backport Lua 5.2.2 stack overflow fix. (#7733) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Wang Yuan in commit c897dba14: + Fix wrong format specifiers of 'sdscatfmt' for the INFO command (#7706) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Wen Hui in commit 5e3fab5e7: + fix make warnings (#7692) + 1 file changed, 4 insertions(+), 3 deletions(-) + +Nathan Scott in commit a2b09c13f: + Annotate module API functions in redismodule.h for use with -fno-common (#6900) + 1 file changed, 265 insertions(+), 241 deletions(-) + +Yossi Gottlieb in commit bf244273f: + Add oom-score-adj configuration option to control Linux OOM killer. (#1690) + 8 files changed, 306 insertions(+), 1 deletion(-) + +Meir Shpilraien (Spielrein) in commit b5a6ab98f: + see #7544, added RedisModule_HoldString api. (#7577) + 4 files changed, 83 insertions(+), 8 deletions(-) + +ShooterIT in commit ff04cf62b: + [Redis-benchmark] Remove zrem test, add zpopmin test + 1 file changed, 5 insertions(+), 5 deletions(-) + +ShooterIT in commit 0f3260f31: + [Redis-benchmark] Support zset type + 1 file changed, 16 insertions(+) + +Arun Ranganathan in commit 45d0b94fc: + Show threading configuration in INFO output (#7446) + 3 files changed, 46 insertions(+), 14 deletions(-) + +Meir Shpilraien (Spielrein) in commit a22f61e12: + This PR introduces a new loaded keyspace event (#7536) + 8 files changed, 135 insertions(+), 4 deletions(-) + +Oran Agra in commit 1c9ca1030: + Fix rejectCommand trims newline in shared error objects, hung clients (#7714) + 4 files changed, 42 insertions(+), 23 deletions(-) + +valentinogeron in commit 217471795: + EXEC with only read commands should not be rejected when OOM (#7696) + 2 files changed, 51 insertions(+), 8 deletions(-) + +Itamar Haber in commit 6e6c47d16: + Expands lazyfree's effort estimate to include Streams (#5794) + 1 file changed, 24 insertions(+) + +Yossi Gottlieb in commit da6813623: + Add language servers stuff, test/tls to gitignore. (#7698) + 1 file changed, 4 insertions(+) + +Valentino Geron in commit de7fb126e: + Assert that setDeferredAggregateLen isn't called with negative value + 1 file changed, 1 insertion(+) + +Valentino Geron in commit 6cf27f25f: + Fix LPOS command when RANK is greater than matches + 2 files changed, 9 insertions(+), 2 deletions(-) + +Yossi Gottlieb in commit 9bba54ace: + Tests: fix redis-cli with remote hosts. (#7693) + 3 files changed, 5 insertions(+), 5 deletions(-) + +huangzhw in commit 0fec2cb81: + RedisModuleEvent_LoadingProgress always at 100% progress (#7685) + 1 file changed, 2 insertions(+), 2 deletions(-) + +guybe7 in commit 931e19aa6: + Modules: Invalidate saved_oparray after use (#7688) + 1 file changed, 2 insertions(+) + +杨博东 in commit 6f2065570: + Fix flock cluster config may cause failure to restart after kill -9 (#7674) + 4 files changed, 31 insertions(+), 7 deletions(-) + +Raghav Muddur in commit 200149a2a: + Update clusterMsgDataPublish to clusterMsgModule (#7682) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Madelyn Olson in commit 72daa1b4e: + Fixed hset error since it's shared with hmset (#7678) + 1 file changed, 1 insertion(+), 1 deletion(-) + +guybe7 in commit 3bf9ac994: + PERSIST should signalModifiedKey (Like EXPIRE does) (#7671) + 1 file changed, 1 insertion(+) + +Oran Agra in commit b37501684: + OOM Crash log include size of allocation attempt. (#7670) + 1 file changed, 2 insertions(+), 1 deletion(-) + +Wen Hui in commit 2136cb68f: + [module] using predefined REDISMODULE_NO_EXPIRE in RM_GetExpire (#7669) + 1 file changed, 2 insertions(+), 1 deletion(-) + +Oran Agra in commit f56aee4bc: + Trim trailing spaces in error replies coming from rejectCommand (#7668) + 1 file changed, 5 insertions(+), 1 deletion(-) + +Yossi Gottlieb in commit 012d7506a: + Module API: fix missing RM_CLIENTINFO_FLAG_SSL. (#7666) + 6 files changed, 82 insertions(+), 1 deletion(-) + +Yossi Gottlieb in commit a0adbc857: + TLS: relax verification on CONFIG SET. (#7665) + 2 files changed, 24 insertions(+), 7 deletions(-) + +Madelyn Olson in commit 2ef29715b: + Fixed timer warning (#5953) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Wagner Francisco Mezaroba in commit b76f171f5: + allow --pattern to be used along with --bigkeys (#3586) + 1 file changed, 9 insertions(+), 2 deletions(-) + +zhaozhao.zz in commit cc7b57765: + redis-benchmark: fix wrong random key for hset (#4895) + 1 file changed, 1 insertion(+), 1 deletion(-) + +zhaozhao.zz in commit 479c1ba77: + CLIENT_MASTER should ignore server.proto_max_bulk_len + 1 file changed, 2 insertions(+), 1 deletion(-) + +zhaozhao.zz in commit f61ce8a52: + config: proto-max-bulk-len must be 1mb or greater + 2 files changed, 2 insertions(+), 2 deletions(-) + +zhaozhao.zz in commit 0350f597a: + using proto-max-bulk-len in checkStringLength for SETRANGE and APPEND + 1 file changed, 2 insertions(+), 2 deletions(-) + +YoongHM in commit eea63548d: + Start redis after network is online (#7639) + 1 file changed, 2 insertions(+) + +Yossi Gottlieb in commit aef6d74fb: + Run daily workflow on main repo only (no forks). (#7646) + 1 file changed, 7 insertions(+) + +WuYunlong in commit 917b4d241: + see #7250, fix signature of RedisModule_DeauthenticateAndCloseClient (#7645) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Wang Yuan in commit efab7fd54: + Print error info if failed opening config file (#6943) + 1 file changed, 2 insertions(+), 1 deletion(-) + +Wen Hui in commit 8c4468bcf: + fix memory leak in ACLLoadFromFile error handling (#7623) + 1 file changed, 1 insertion(+) + +Oran Agra in commit 89724e1d2: + redis-cli --cluster-yes - negate force flag for clarity + 1 file changed, 9 insertions(+), 9 deletions(-) + +Frank Meier in commit c813739af: + reintroduce REDISCLI_CLUSTER_YES env variable in redis-cli + 1 file changed, 6 insertions(+) + +Frank Meier in commit 7e3b86c18: + add force option to 'create-cluster create' script call (#7612) + 1 file changed, 6 insertions(+), 2 deletions(-) + +Oran Agra in commit 3f7fa4312: + fix new rdb test failing on timing issues (#7604) + 1 file changed, 2 insertions(+), 2 deletions(-) + +Yossi Gottlieb in commit 417976d7a: + Fix test-centos7-tls daily job. (#7598) + 1 file changed, 2 insertions(+), 2 deletions(-) + +Oran Agra in commit c41818c51: + module hook for master link up missing on successful psync (#7584) + 2 files changed, 22 insertions(+), 2 deletions(-) + +Yossi Gottlieb in commit 6ef3fc185: + CI: Add daily CentOS 7.x jobs. (#7582) + 1 file changed, 50 insertions(+), 4 deletions(-) + +WuYunlong in commit 002c37482: + Fix running single test 14-consistency-check.tcl (#7587) + 1 file changed, 1 insertion(+) + +Yossi Gottlieb in commit 66cbbb6ad: + Clarify RM_BlockClient() error condition. (#6093) + 1 file changed, 9 insertions(+) + +namtsui in commit 22aba2207: + Avoid an out-of-bounds read in the redis-sentinel (#7443) + 1 file changed, 2 insertions(+), 2 deletions(-) + +Wen Hui in commit af08887dc: + Add SignalModifiedKey hook in XGROUP CREATE with MKSTREAM option (#7562) + 1 file changed, 1 insertion(+) + +Wen Hui in commit a5e0a64b0: + fix leak in error handling of debug populate command (#7062) + 1 file changed, 3 insertions(+), 4 deletions(-) + +Yossi Gottlieb in commit cbfdfa231: + Fix TLS cluster tests. (#7578) + 1 file changed, 4 insertions(+), 1 deletion(-) + +Yossi Gottlieb in commit 6d5376d30: + TLS: Propagate and handle SSL_new() failures. (#7576) + 4 files changed, 48 insertions(+), 6 deletions(-) + +Oran Agra in commit a662cd577: + Fix failing tests due to issues with wait_for_log_message (#7572) + 3 files changed, 38 insertions(+), 34 deletions(-) + +Jiayuan Chen in commit 2786a4b5e: + Add optional tls verification (#7502) + 6 files changed, 40 insertions(+), 5 deletions(-) + +Oran Agra in commit 3ef3d3612: + Daily github action: run cluster and sentinel tests with tls (#7575) + 1 file changed, 2 insertions(+), 2 deletions(-) + +Yossi Gottlieb in commit f20f63322: + TLS: support cluster/replication without tls-port. + 2 files changed, 5 insertions(+), 4 deletions(-) + +grishaf in commit 3c9ae059d: + Fix prepareForShutdown function declaration (#7566) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Oran Agra in commit 3f4803af9: + Stabilize bgsave test that sometimes fails with valgrind (#7559) + 1 file changed, 20 insertions(+), 2 deletions(-) + +Madelyn Olson in commit 1a3c51a1f: + Properly reset errno for rdbLoad (#7542) + 1 file changed, 1 insertion(+) + +Oran Agra in commit 92d80b13a: + testsuite may leave servers alive on error (#7549) + 1 file changed, 3 insertions(+) + +Yossi Gottlieb in commit 245582ba7: + Tests: drop TCL 8.6 dependency. (#7548) + 1 file changed, 27 insertions(+), 22 deletions(-) + +Oran Agra in commit f20e1ba2d: + Fixes to release scripts (#7547) + 2 files changed, 2 insertions(+), 2 deletions(-) + +Remi Collet in commit 60ff56993: + Fix deprecated tail syntax in tests (#7543) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Wen Hui in commit 34e8541b9: + Add missing calls to raxStop (#7532) + 4 files changed, 63 insertions(+), 19 deletions(-) + +Wen Hui in commit 2f7bc5435: + add missing caching command in client help (#7399) + 1 file changed, 1 insertion(+) + +zhaozhao.zz in commit c15be9ffe: + replication: need handle -NOPERM error after send ping (#7538) + 1 file changed, 1 insertion(+) + +Scott Brenner in commit 1b29152c3: + GitHub Actions workflows - use latest version of actions/checkout (#7534) + 2 files changed, 10 insertions(+), 10 deletions(-) + ================================================================================ Redis 6.0.6 Released Mon Jul 20 09:31:30 IDT 2020 ================================================================================ diff --git a/src/version.h b/src/version.h index 9dfd8f274..71f998326 100644 --- a/src/version.h +++ b/src/version.h @@ -1 +1 @@ -#define REDIS_VERSION "6.0.6" +#define REDIS_VERSION "6.0.7" From da33a18588efedd7912a81a602d57b5e501c73d4 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 1 Sep 2020 10:02:14 +0300 Subject: [PATCH 134/215] Fix double-make issue with make && make install. (#7734) All user-supplied variables that affect the build should be explicitly persisted. Fixes #7254 (cherry picked from commit b35d6e5cffb895ab6cbbd9e5e56ef00299b1359e) --- src/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Makefile b/src/Makefile index 80c627c24..873797330 100644 --- a/src/Makefile +++ b/src/Makefile @@ -255,6 +255,8 @@ persist-settings: distclean echo WARN=$(WARN) >> .make-settings echo OPT=$(OPT) >> .make-settings echo MALLOC=$(MALLOC) >> .make-settings + echo BUILD_TLS=$(BUILD_TLS) >> .make-settings + echo USE_SYSTEMD=$(USE_SYSTEMD) >> .make-settings echo CFLAGS=$(CFLAGS) >> .make-settings echo LDFLAGS=$(LDFLAGS) >> .make-settings echo REDIS_CFLAGS=$(REDIS_CFLAGS) >> .make-settings From 9b718d41f00972a5a42af39f6aad3b59e26c5a0c Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 1 Sep 2020 21:31:37 +0300 Subject: [PATCH 135/215] fix README about BUILD_WITH_SYSTEMD usage (#7739) BUILD_WITH_SYSTEMD is an internal variable. Users should use USE_SYSTEMD=yes. (cherry picked from commit 747b4004eaf49b40a25f9b4f78f57b6328f015c7) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ca205f2a0..a90b95cc1 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ libssl-dev on Debian/Ubuntu) and run: To build with systemd support, you'll need systemd development libraries (such as libsystemd-dev on Debian/Ubuntu or systemd-devel on CentOS) and run: - % make BUILD_WITH_SYSTEMD=yes USE_SYSTEMD=yes + % make USE_SYSTEMD=yes You can run a 32 bit Redis binary using: From b983015dd4e37b65121219eb0ffb2f79ac5e1398 Mon Sep 17 00:00:00 2001 From: Thandayuthapani Date: Wed, 2 Sep 2020 18:53:49 +0530 Subject: [PATCH 136/215] Add masters/replicas options to redis-cli --cluster call command (#6491) * Add master/slave option in --cluster call command * Update src/redis-cli.c * Update src/redis-cli.c Co-authored-by: Itamar Haber (cherry picked from commit f22f64f0db82cce666714808a9c994304f97904a) --- src/redis-cli.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 0b44ec252..6c8ce068a 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -125,6 +125,8 @@ #define CLUSTER_MANAGER_CMD_FLAG_COLOR 1 << 8 #define CLUSTER_MANAGER_CMD_FLAG_CHECK_OWNERS 1 << 9 #define CLUSTER_MANAGER_CMD_FLAG_FIX_WITH_UNREACHABLE_MASTERS 1 << 10 +#define CLUSTER_MANAGER_CMD_FLAG_MASTERS_ONLY 1 << 11 +#define CLUSTER_MANAGER_CMD_FLAG_SLAVES_ONLY 1 << 12 #define CLUSTER_MANAGER_OPT_GETFRIENDS 1 << 0 #define CLUSTER_MANAGER_OPT_COLD 1 << 1 @@ -1543,6 +1545,12 @@ static int parseOptions(int argc, char **argv) { i = j; } else if (!strcmp(argv[i],"--cluster") && lastarg) { usage(); + } else if ((!strcmp(argv[i],"--cluster-only-masters"))) { + config.cluster_manager_command.flags |= + CLUSTER_MANAGER_CMD_FLAG_MASTERS_ONLY; + } else if ((!strcmp(argv[i],"--cluster-only-replicas"))) { + config.cluster_manager_command.flags |= + CLUSTER_MANAGER_CMD_FLAG_SLAVES_ONLY; } else if (!strcmp(argv[i],"--cluster-replicas") && !lastarg) { config.cluster_manager_command.replicas = atoi(argv[++i]); } else if (!strcmp(argv[i],"--cluster-master-id") && !lastarg) { @@ -2320,7 +2328,7 @@ clusterManagerCommandDef clusterManagerCommands[] = { "new_host:new_port existing_host:existing_port", "slave,master-id "}, {"del-node", clusterManagerCommandDeleteNode, 2, "host:port node_id",NULL}, {"call", clusterManagerCommandCall, -2, - "host:port command arg arg .. arg", NULL}, + "host:port command arg arg .. arg", "only-masters,only-replicas"}, {"set-timeout", clusterManagerCommandSetTimeout, 2, "host:port milliseconds", NULL}, {"import", clusterManagerCommandImport, 1, "host:port", @@ -6423,6 +6431,10 @@ static int clusterManagerCommandCall(int argc, char **argv) { listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; + if ((config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_MASTERS_ONLY) + && (n->replicate != NULL)) continue; // continue if node is slave + if ((config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_SLAVES_ONLY) + && (n->replicate == NULL)) continue; // continue if node is master if (!n->context && !clusterManagerNodeConnect(n)) continue; redisReply *reply = NULL; redisAppendCommandArgv(n->context, argc, (const char **) argv, argvlen); @@ -8196,4 +8208,3 @@ int main(int argc, char **argv) { return noninteractive(argc,convertToSds(argc,argv)); } } - From e4db67ea579696fba1383f1ba430d59d62d58de9 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Wed, 2 Sep 2020 17:18:09 +0300 Subject: [PATCH 137/215] Print server startup messages after daemonization (#7743) When redis isn't configured to have a log file, having these prints before damonization puts them in the calling process stdout rather than /dev/null (cherry picked from commit 8b0747d65734ba1128da7479f8b961e530b82916) --- src/server.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/server.c b/src/server.c index f9154794b..990b23b7d 100644 --- a/src/server.c +++ b/src/server.c @@ -5234,6 +5234,10 @@ int main(int argc, char **argv) { sdsfree(options); } + server.supervised = redisIsSupervised(server.supervised_mode); + int background = server.daemonize && !server.supervised; + if (background) daemonize(); + serverLog(LL_WARNING, "oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo"); serverLog(LL_WARNING, "Redis version=%s, bits=%d, commit=%s, modified=%d, pid=%d, just started", @@ -5249,11 +5253,7 @@ int main(int argc, char **argv) { serverLog(LL_WARNING, "Configuration loaded"); } - server.supervised = redisIsSupervised(server.supervised_mode); - int background = server.daemonize && !server.supervised; - if (background) daemonize(); readOOMScoreAdj(); - initServer(); if (background || server.pidfile) createPidFile(); redisSetProcTitle(argv[0]); From 332aa2f919e8bdc3b78399d99b33530764d45165 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Thu, 3 Sep 2020 18:15:48 +0300 Subject: [PATCH 138/215] redis-cli: fix writeConn() buffer handling. (#7749) Fix issues with writeConn() which resulted with corruption of the stream by leaving an extra byte in the buffer. The trigger for this is partial writes or write errors which were not experienced on Linux but reported on macOS. (cherry picked from commit 58e5feb3f49c50b9c18f38fd8f6cad2317c02265) --- src/redis-cli.c | 43 +++++++++++++++++++++++++++++++++++++------ 1 file changed, 37 insertions(+), 6 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 6c8ce068a..ca949b8f0 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -6822,21 +6822,52 @@ static ssize_t writeConn(redisContext *c, const char *buf, size_t buf_len) { int done = 0; + /* Append data to buffer which is *usually* expected to be empty + * but we don't assume that, and write. + */ c->obuf = sdscatlen(c->obuf, buf, buf_len); if (redisBufferWrite(c, &done) == REDIS_ERR) { - sdsrange(c->obuf, 0, -(buf_len+1)); if (!(c->flags & REDIS_BLOCK)) errno = EAGAIN; + + /* On error, we assume nothing was written and we roll back the + * buffer to its original state. + */ + if (sdslen(c->obuf) > buf_len) + sdsrange(c->obuf, 0, -(buf_len+1)); + else + sdsclear(c->obuf); + return -1; } - size_t left = sdslen(c->obuf); - sdsclear(c->obuf); - if (!done) { - return buf_len - left; + /* If we're done, free up everything. We may have written more than + * buf_len (if c->obuf was not initially empty) but we don't have to + * tell. + */ + if (done) { + sdsclear(c->obuf); + return buf_len; } - return buf_len; + /* Write was successful but we have some leftovers which we should + * remove from the buffer. + * + * Do we still have data that was there prior to our buf? If so, + * restore buffer to it's original state and report no new data was + * writen. + */ + if (sdslen(c->obuf) > buf_len) { + sdsrange(c->obuf, 0, -(buf_len+1)); + return 0; + } + + /* At this point we're sure no prior data is left. We flush the buffer + * and report how much we've written. + */ + size_t left = sdslen(c->obuf); + sdsclear(c->obuf); + return buf_len - left; } /* Read raw bytes through a redisContext. The read operation is not greedy From bce350c666c2b48b70d83995cb4dc90a8c397e33 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 31 Aug 2020 10:23:09 +0300 Subject: [PATCH 139/215] test infra - write test name to logfile (cherry picked from commit 9d527d076b17851b87bc95aa34cca8fa5a91d41b) --- tests/support/server.tcl | 14 ++++++++++++++ tests/support/test.tcl | 13 +++++++++++++ tests/support/util.tcl | 8 ++++++++ 3 files changed, 35 insertions(+) diff --git a/tests/support/server.tcl b/tests/support/server.tcl index 0afe89f7c..14e59e55c 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -301,6 +301,13 @@ proc start_server {options {code undefined}} { set stdout [format "%s/%s" [dict get $config "dir"] "stdout"] set stderr [format "%s/%s" [dict get $config "dir"] "stderr"] + # if we're inside a test, write the test name to the server log file + if {[info exists ::cur_test]} { + set fd [open $stdout "a+"] + puts $fd "### Starting server for test $::cur_test" + close $fd + } + # We need a loop here to retry with different ports. set server_started 0 while {$server_started == 0} { @@ -443,6 +450,13 @@ proc restart_server {level wait_ready} { set stderr [dict get $srv "stderr"] set config_file [dict get $srv "config_file"] + # if we're inside a test, write the test name to the server log file + if {[info exists ::cur_test]} { + set fd [open $stdout "a+"] + puts $fd "### Restarting server for test $::cur_test" + close $fd + } + set prev_ready_count [exec grep -i "Ready to accept" | wc -l < $stdout] set pid [spawn_server $config_file $stdout $stderr] diff --git a/tests/support/test.tcl b/tests/support/test.tcl index a5573f583..d266eba41 100644 --- a/tests/support/test.tcl +++ b/tests/support/test.tcl @@ -143,6 +143,18 @@ proc test {name code {okpattern undefined}} { set details {} lappend details "$name in $::curfile" + # set a cur_test global to be logged into new servers that are spown + # and log the test name in all existing servers + set ::cur_test "$name in $::curfile" + if {!$::external} { + foreach srv $::servers { + set stdout [dict get $srv stdout] + set fd [open $stdout "a+"] + puts $fd "### Starting test $::cur_test" + close $fd + } + } + send_data_packet $::test_server_fd testing $name if {[catch {set retval [uplevel 1 $code]} error]} { @@ -183,4 +195,5 @@ proc test {name code {okpattern undefined}} { send_data_packet $::test_server_fd err "Detected a memory leak in test '$name': $output" } } + unset ::cur_test } diff --git a/tests/support/util.tcl b/tests/support/util.tcl index 8340ad207..979eccdf9 100644 --- a/tests/support/util.tcl +++ b/tests/support/util.tcl @@ -138,6 +138,14 @@ proc wait_for_log_messages {srv_idx patterns from_line maxtries delay} { } } +# write line to server log file +proc write_log_line {srv_idx msg} { + set logfile [srv $srv_idx stdout] + set fd [open $logfile "a+"] + puts $fd "### $msg" + close $fd +} + # Random integer between 0 and max (excluded). proc randomInt {max} { expr {int(rand()*$max)} From 5b8de5b7f227975d2222bd142e39a70ca525e5d8 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 31 Aug 2020 10:44:43 +0300 Subject: [PATCH 140/215] test infra - reduce disk space usage this is important when running a test with --loop (cherry picked from commit e3e69c25fd05b608f5ea8d612bc0e377922a6115) --- tests/integration/rdb.tcl | 8 ++++---- tests/support/server.tcl | 30 ++++++++++++++++++++++++++---- tests/unit/moduleapi/testrdb.tcl | 6 +++--- 3 files changed, 33 insertions(+), 11 deletions(-) diff --git a/tests/integration/rdb.tcl b/tests/integration/rdb.tcl index 9cd970350..58dc6c968 100644 --- a/tests/integration/rdb.tcl +++ b/tests/integration/rdb.tcl @@ -25,7 +25,7 @@ start_server [list overrides [list "dir" $server_path "dbfilename" "encodings.rd set server_path [tmpdir "server.rdb-startup-test"] -start_server [list overrides [list "dir" $server_path]] { +start_server [list overrides [list "dir" $server_path] keep_persistence true] { test {Server started empty with non-existing RDB file} { r debug digest } {0000000000000000000000000000000000000000} @@ -33,13 +33,13 @@ start_server [list overrides [list "dir" $server_path]] { r save } -start_server [list overrides [list "dir" $server_path]] { +start_server [list overrides [list "dir" $server_path] keep_persistence true] { test {Server started empty with empty RDB file} { r debug digest } {0000000000000000000000000000000000000000} } -start_server [list overrides [list "dir" $server_path]] { +start_server [list overrides [list "dir" $server_path] keep_persistence true] { test {Test RDB stream encoding} { for {set j 0} {$j < 1000} {incr j} { if {rand() < 0.9} { @@ -64,7 +64,7 @@ set defaults {} proc start_server_and_kill_it {overrides code} { upvar defaults defaults srv srv server_path server_path set config [concat $defaults $overrides] - set srv [start_server [list overrides $config]] + set srv [start_server [list overrides $config keep_persistence true]] uplevel 1 $code kill_server $srv } diff --git a/tests/support/server.tcl b/tests/support/server.tcl index 14e59e55c..6775b125a 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -31,6 +31,16 @@ proc check_valgrind_errors stderr { } } +proc clean_persistence config { + # we may wanna keep the logs for later, but let's clean the persistence + # files right away, since they can accumulate and take up a lot of space + set config [dict get $config "config"] + set rdb [format "%s/%s" [dict get $config "dir"] "dump.rdb"] + set aof [format "%s/%s" [dict get $config "dir"] "appendonly.aof"] + catch {exec rm -rf $rdb} + catch {exec rm -rf $aof} +} + proc kill_server config { # nothing to kill when running against external server if {$::external} return @@ -238,19 +248,27 @@ proc start_server {options {code undefined}} { set baseconfig "default.conf" set overrides {} set tags {} + set keep_persistence false # parse options foreach {option value} $options { switch $option { "config" { - set baseconfig $value } + set baseconfig $value + } "overrides" { - set overrides $value } + set overrides $value + } "tags" { set tags $value - set ::tags [concat $::tags $value] } + set ::tags [concat $::tags $value] + } + "keep_persistence" { + set keep_persistence $value + } default { - error "Unknown option $option" } + error "Unknown option $option" + } } } @@ -436,6 +454,10 @@ proc start_server {options {code undefined}} { set ::tags [lrange $::tags 0 end-[llength $tags]] kill_server $srv + if {!$keep_persistence} { + clean_persistence $srv + } + set _ "" } else { set ::tags [lrange $::tags 0 end-[llength $tags]] set _ $srv diff --git a/tests/unit/moduleapi/testrdb.tcl b/tests/unit/moduleapi/testrdb.tcl index 02c82c7c3..2298a73c5 100644 --- a/tests/unit/moduleapi/testrdb.tcl +++ b/tests/unit/moduleapi/testrdb.tcl @@ -12,7 +12,7 @@ tags "modules" { test {modules global are lost without aux} { set server_path [tmpdir "server.module-testrdb"] - start_server [list overrides [list loadmodule "$testmodule" "dir" $server_path]] { + start_server [list overrides [list loadmodule "$testmodule" "dir" $server_path] keep_persistence true] { r testrdb.set.before global1 assert_equal "global1" [r testrdb.get.before] } @@ -23,7 +23,7 @@ tags "modules" { test {modules are able to persist globals before and after} { set server_path [tmpdir "server.module-testrdb"] - start_server [list overrides [list loadmodule "$testmodule 2" "dir" $server_path]] { + start_server [list overrides [list loadmodule "$testmodule 2" "dir" $server_path] keep_persistence true] { r testrdb.set.before global1 r testrdb.set.after global2 assert_equal "global1" [r testrdb.get.before] @@ -38,7 +38,7 @@ tags "modules" { test {modules are able to persist globals just after} { set server_path [tmpdir "server.module-testrdb"] - start_server [list overrides [list loadmodule "$testmodule 1" "dir" $server_path]] { + start_server [list overrides [list loadmodule "$testmodule 1" "dir" $server_path] keep_persistence true] { r testrdb.set.after global2 assert_equal "global2" [r testrdb.get.after] } From 28e074608c85f600452c30f29926603fa7f64c7f Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 31 Aug 2020 11:05:30 +0300 Subject: [PATCH 141/215] test infra - improve test skipping ability - skip full units - skip a single test (not just a list of tests) - when skipping tag, skip spinning up servers, not just the tests - skip tags when running against an external server too - allow using multiple tags (split them) (cherry picked from commit 677d14c2137ab50fa25c8163d20b14bc563261c7) --- tests/support/server.tcl | 69 ++++++++++++++++++++++++++++------------ tests/support/test.tcl | 11 +------ tests/test_helper.tcl | 47 +++++++++++++++++++++++---- 3 files changed, 91 insertions(+), 36 deletions(-) diff --git a/tests/support/server.tcl b/tests/support/server.tcl index 6775b125a..8ab8cf66a 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -160,7 +160,19 @@ proc server_is_up {host port retrynum} { # doesn't really belong here, but highly coupled to code in start_server proc tags {tags code} { + # If we 'tags' contain multiple tags, quoted and seperated by spaces, + # we want to get rid of the quotes in order to have a proper list + set tags [string map { \" "" } $tags] set ::tags [concat $::tags $tags] + # We skip unwanted tags + foreach tag $::denytags { + if {[lsearch $::tags $tag] >= 0} { + incr ::num_aborted + send_data_packet $::test_server_fd ignore "Tag: $tag" + set ::tags [lrange $::tags 0 end-[llength $tags]] + return + } + } uplevel 1 $code set ::tags [lrange $::tags 0 end-[llength $tags]] } @@ -226,24 +238,6 @@ proc wait_server_started {config_file stdout pid} { } proc start_server {options {code undefined}} { - # If we are running against an external server, we just push the - # host/port pair in the stack the first time - if {$::external} { - if {[llength $::servers] == 0} { - set srv {} - dict set srv "host" $::host - dict set srv "port" $::port - set client [redis $::host $::port 0 $::tls] - dict set srv "client" $client - $client select 9 - - # append the server to the stack - lappend ::servers $srv - } - uplevel 1 $code - return - } - # setup defaults set baseconfig "default.conf" set overrides {} @@ -260,8 +254,10 @@ proc start_server {options {code undefined}} { set overrides $value } "tags" { - set tags $value - set ::tags [concat $::tags $value] + # If we 'tags' contain multiple tags, quoted and seperated by spaces, + # we want to get rid of the quotes in order to have a proper list + set tags [string map { \" "" } $value] + set ::tags [concat $::tags $tags] } "keep_persistence" { set keep_persistence $value @@ -272,6 +268,39 @@ proc start_server {options {code undefined}} { } } + # We skip unwanted tags + foreach tag $::denytags { + if {[lsearch $::tags $tag] >= 0} { + incr ::num_aborted + send_data_packet $::test_server_fd ignore "Tag: $tag" + set ::tags [lrange $::tags 0 end-[llength $tags]] + return + } + } + + # If we are running against an external server, we just push the + # host/port pair in the stack the first time + if {$::external} { + if {[llength $::servers] == 0} { + set srv {} + dict set srv "host" $::host + dict set srv "port" $::port + set client [redis $::host $::port 0 $::tls] + dict set srv "client" $client + $client select 9 + + set config {} + dict set config "port" $::port + dict set srv "config" $config + + # append the server to the stack + lappend ::servers $srv + } + uplevel 1 $code + set ::tags [lrange $::tags 0 end-[llength $tags]] + return + } + set data [split [exec cat "tests/assets/$baseconfig"] "\n"] set config {} if {$::tls} { diff --git a/tests/support/test.tcl b/tests/support/test.tcl index d266eba41..773461abb 100644 --- a/tests/support/test.tcl +++ b/tests/support/test.tcl @@ -99,16 +99,7 @@ proc wait_for_condition {maxtries delay e _else_ elsescript} { } } -proc test {name code {okpattern undefined}} { - # abort if tagged with a tag to deny - foreach tag $::denytags { - if {[lsearch $::tags $tag] >= 0} { - incr ::num_aborted - send_data_packet $::test_server_fd ignore $name - return - } - } - +proc test {name code {okpattern undefined} {options undefined}} { # abort if test name in skiptests if {[lsearch $::skiptests $name] >= 0} { incr ::num_skipped diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index d0f962762..4a470ec30 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -85,6 +85,7 @@ set ::verbose 0 set ::quiet 0 set ::denytags {} set ::skiptests {} +set ::skipunits {} set ::allowtags {} set ::only_tests {} set ::single_tests {} @@ -423,6 +424,12 @@ proc lpop {listVar {count 1}} { set ele } +proc lremove {listVar value} { + upvar 1 $listVar var + set idx [lsearch -exact $var $value] + set var [lreplace $var $idx $idx] +} + # A new client is idle. Remove it from the list of active clients and # if there are still test units to run, launch them. proc signal_idle_client fd { @@ -521,11 +528,13 @@ proc print_help_screen {} { "--list-tests List all the available test units." "--only Just execute the specified test by test name. this option can be repeated." "--skip-till Skip all units until (and including) the specified one." + "--skipunit Skip one unit." "--clients Number of test clients (default 16)." "--timeout Test timeout in seconds (default 10 min)." "--force-failure Force the execution of a test that always fails." "--config Extra config file argument." "--skipfile Name of a file containing test names that should be skipped (one per line)." + "--skiptest Name of a file containing test names that should be skipped (one per line)." "--dont-clean Don't delete redis log files after the run." "--stop Blocks once the first test fails." "--loop Execute the specified set of tests forever." @@ -563,6 +572,9 @@ for {set j 0} {$j < [llength $argv]} {incr j} { set file_data [read $fp] close $fp set ::skiptests [split $file_data "\n"] + } elseif {$opt eq {--skiptest}} { + lappend ::skiptests $arg + incr j } elseif {$opt eq {--valgrind}} { set ::valgrind 1 } elseif {$opt eq {--stack-logging}} { @@ -601,6 +613,9 @@ for {set j 0} {$j < [llength $argv]} {incr j} { } elseif {$opt eq {--only}} { lappend ::only_tests $arg incr j + } elseif {$opt eq {--skipunit}} { + lappend ::skipunits $arg + incr j } elseif {$opt eq {--skip-till}} { set ::skip_till $arg incr j @@ -638,13 +653,23 @@ for {set j 0} {$j < [llength $argv]} {incr j} { } } -# If --skil-till option was given, we populate the list of single tests +set filtered_tests {} + +# Set the filtered tests to be the short list (single_tests) if exists. +# Otherwise, we start filtering all_tests +if {[llength $::single_tests] > 0} { + set filtered_tests $::single_tests +} else { + set filtered_tests $::all_tests +} + +# If --skip-till option was given, we populate the list of single tests # to run with everything *after* the specified unit. if {$::skip_till != ""} { set skipping 1 foreach t $::all_tests { - if {$skipping == 0} { - lappend ::single_tests $t + if {$skipping == 1} { + lremove filtered_tests $t } if {$t == $::skip_till} { set skipping 0 @@ -656,10 +681,20 @@ if {$::skip_till != ""} { } } +# If --skipunits option was given, we populate the list of single tests +# to run with everything *not* in the skipunits list. +if {[llength $::skipunits] > 0} { + foreach t $::all_tests { + if {[lsearch $::skipunits $t] != -1} { + lremove filtered_tests $t + } + } +} + # Override the list of tests with the specific tests we want to run -# in case there was some filter, that is --single or --skip-till options. -if {[llength $::single_tests] > 0} { - set ::all_tests $::single_tests +# in case there was some filter, that is --single, -skipunit or --skip-till options. +if {[llength $filtered_tests] < [llength $::all_tests]} { + set ::all_tests $filtered_tests } proc attach_to_replication_stream {} { From 72d6f966ac66669eda3263afdf4b483c02697609 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 31 Aug 2020 11:16:06 +0300 Subject: [PATCH 142/215] test infra - flushall between tests in external mode (cherry picked from commit b65e5aca86b9c2d24b96abc8414a45f9907b6f7d) --- tests/support/server.tcl | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/support/server.tcl b/tests/support/server.tcl index 8ab8cf66a..9894e6f2a 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -296,6 +296,7 @@ proc start_server {options {code undefined}} { # append the server to the stack lappend ::servers $srv } + r flushall uplevel 1 $code set ::tags [lrange $::tags 0 end-[llength $tags]] return From db6c763d8bebe62ab0bee78bc274826ab8fef177 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 31 Aug 2020 11:20:02 +0300 Subject: [PATCH 143/215] test infra - wait_done_loading reduce code duplication in aof.tcl. move creation of clients into the test so that it can be skipped (cherry picked from commit 1b7ba44e7917082ac6d5523666d3b4ab210dfbad) --- tests/integration/aof.tcl | 44 +++++++-------------------------------- tests/support/util.tcl | 8 +++++++ 2 files changed, 16 insertions(+), 36 deletions(-) diff --git a/tests/integration/aof.tcl b/tests/integration/aof.tcl index b82c87d71..d81521374 100644 --- a/tests/integration/aof.tcl +++ b/tests/integration/aof.tcl @@ -52,15 +52,9 @@ tags {"aof"} { assert_equal 1 [is_alive $srv] } - set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls] - - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } - test "Truncated AOF loaded: we expect foo to be equal to 5" { + set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls] + wait_done_loading $client assert {[$client get foo] eq "5"} } @@ -75,15 +69,9 @@ tags {"aof"} { assert_equal 1 [is_alive $srv] } - set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls] - - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } - test "Truncated AOF loaded: we expect foo to be equal to 6 now" { + set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls] + wait_done_loading $client assert {[$client get foo] eq "6"} } } @@ -183,11 +171,7 @@ tags {"aof"} { test "Fixed AOF: Keyspace should contain values that were parseable" { set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls] - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } + wait_done_loading $client assert_equal "hello" [$client get foo] assert_equal "" [$client get bar] } @@ -207,11 +191,7 @@ tags {"aof"} { test "AOF+SPOP: Set should have 1 member" { set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls] - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } + wait_done_loading $client assert_equal 1 [$client scard set] } } @@ -231,11 +211,7 @@ tags {"aof"} { test "AOF+SPOP: Set should have 1 member" { set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls] - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } + wait_done_loading $client assert_equal 1 [$client scard set] } } @@ -254,11 +230,7 @@ tags {"aof"} { test "AOF+EXPIRE: List should be empty" { set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls] - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } + wait_done_loading $client assert_equal 0 [$client llen list] } } diff --git a/tests/support/util.tcl b/tests/support/util.tcl index 979eccdf9..c698c255f 100644 --- a/tests/support/util.tcl +++ b/tests/support/util.tcl @@ -99,6 +99,14 @@ proc wait_for_ofs_sync {r1 r2} { } } +proc wait_done_loading r { + wait_for_condition 50 100 { + [catch {$r ping} e] == 0 + } else { + fail "Loading DB is taking too much time." + } +} + # count current log lines in server's stdout proc count_log_lines {srv_idx} { set _ [exec wc -l < [srv $srv_idx stdout]] From 41c7c7919cbbe7f733a37b00f684d15c046e50da Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 31 Aug 2020 11:24:17 +0300 Subject: [PATCH 144/215] test infra - add durable mode to work around test suite crashing in some cases a command that returns an error possibly due to a timing issue causes the tcl code to crash and thus prevents the rest of the tests from running. this adds an option to make the test proceed despite the crash. maybe it should be the default mode some day. (cherry picked from commit fe5da2e60d8d6d907062f4789673fbe06fa8773e) --- tests/support/server.tcl | 28 ++++++++++++++++++++++++++-- tests/support/test.tcl | 6 +++++- tests/test_helper.tcl | 4 ++++ 3 files changed, 35 insertions(+), 3 deletions(-) diff --git a/tests/support/server.tcl b/tests/support/server.tcl index 9894e6f2a..f2f6ceece 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -297,7 +297,20 @@ proc start_server {options {code undefined}} { lappend ::servers $srv } r flushall - uplevel 1 $code + if {[catch {set retval [uplevel 1 $code]} error]} { + if {$::durable} { + set msg [string range $error 10 end] + lappend details $msg + lappend details $::errorInfo + lappend ::tests_failed $details + + incr ::num_failed + send_data_packet $::test_server_fd err [join $details "\n"] + } else { + # Re-raise, let handler up the stack take care of this. + error $error $::errorInfo + } + } set ::tags [lrange $::tags 0 end-[llength $tags]] return } @@ -468,7 +481,18 @@ proc start_server {options {code undefined}} { } puts "" - error $error $backtrace + if {$::durable} { + set msg [string range $error 10 end] + lappend details $msg + lappend details $backtrace + lappend ::tests_failed $details + + incr ::num_failed + send_data_packet $::test_server_fd err [join $details "\n"] + } else { + # Re-raise, let handler up the stack take care of this. + error $error $backtrace + } } # fetch srv back from the server list, in case it was restarted by restart_server (new PID) diff --git a/tests/support/test.tcl b/tests/support/test.tcl index 773461abb..f5b4c8bef 100644 --- a/tests/support/test.tcl +++ b/tests/support/test.tcl @@ -149,9 +149,13 @@ proc test {name code {okpattern undefined} {options undefined}} { send_data_packet $::test_server_fd testing $name if {[catch {set retval [uplevel 1 $code]} error]} { - if {[string match "assertion:*" $error]} { + set assertion [string match "assertion:*" $error] + if {$assertion || $::durable} { set msg [string range $error 10 end] lappend details $msg + if {!$assertion} { + lappend details $::errorInfo + } lappend ::tests_failed $details incr ::num_failed diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index 4a470ec30..fe2d484b8 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -79,6 +79,7 @@ set ::baseport 21111; # initial port for spawned redis servers set ::portcount 8000; # we don't wanna use more than 10000 to avoid collision with cluster bus ports set ::traceleaks 0 set ::valgrind 0 +set ::durable 0 set ::tls 0 set ::stack_logging 0 set ::verbose 0 @@ -521,6 +522,7 @@ proc send_data_packet {fd status data} { proc print_help_screen {} { puts [join { "--valgrind Run the test over valgrind." + "--durable suppress test crashes and keep running" "--stack-logging Enable OSX leaks/malloc stack logging." "--accurate Run slow randomized tests for more iterations." "--quiet Don't show individual tests." @@ -633,6 +635,8 @@ for {set j 0} {$j < [llength $argv]} {incr j} { } elseif {$opt eq {--clients}} { set ::numclients $arg incr j + } elseif {$opt eq {--durable}} { + set ::durable 1 } elseif {$opt eq {--dont-clean}} { set ::dont_clean 1 } elseif {$opt eq {--wait-server}} { From d410dc316245775de6b71c16d6c03e7cd848510e Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 6 Sep 2020 11:11:49 +0300 Subject: [PATCH 145/215] Improve valgrind support for cluster tests (#7725) - redirect valgrind reports to a dedicated file rather than console - try to avoid killing instances with SIGKILL so that we get the memory leak report (killing with SIGTERM before resorting to SIGKILL) - search for valgrind reports when done, print them and fail the tests - add --dont-clean option to keep the logs on exit - fix exit error code when crash is found (would have exited with 0) changes that affect the normal redis test suite: - refactor check_valgrind_errors into two functions one to search and one to report - move the search half into util.tcl to serve the cluster tests too - ignore "address range perms" valgrind warnings which seem non relevant. (cherry picked from commit 2b998de46078c172c6b19ac3b779318e7992c60a) --- tests/instances.tcl | 65 +++++++++++++++++++++++++++++++++++----- tests/support/server.tcl | 18 ++--------- tests/support/util.tcl | 23 ++++++++++++++ 3 files changed, 83 insertions(+), 23 deletions(-) diff --git a/tests/instances.tcl b/tests/instances.tcl index a43a4cc87..2029bc5f5 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -19,6 +19,7 @@ set ::verbose 0 set ::valgrind 0 set ::tls 0 set ::pause_on_error 0 +set ::dont_clean 0 set ::simulate_error 0 set ::failed 0 set ::sentinel_instances {} @@ -38,7 +39,7 @@ if {[catch {cd tmp}]} { # Execute the specified instance of the server specified by 'type', using # the provided configuration file. Returns the PID of the process. -proc exec_instance {type cfgfile} { +proc exec_instance {type dirname cfgfile} { if {$type eq "redis"} { set prgname redis-server } elseif {$type eq "sentinel"} { @@ -47,8 +48,9 @@ proc exec_instance {type cfgfile} { error "Unknown instance type." } + set errfile [file join $dirname err.txt] if {$::valgrind} { - set pid [exec valgrind --track-origins=yes --suppressions=../../../src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full ../../../src/${prgname} $cfgfile &] + set pid [exec valgrind --track-origins=yes --suppressions=../../../src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full ../../../src/${prgname} $cfgfile 2>> $errfile &] } else { set pid [exec ../../../src/${prgname} $cfgfile &] } @@ -93,7 +95,7 @@ proc spawn_instance {type base_port count {conf {}}} { # Finally exec it and remember the pid for later cleanup. set retry 100 while {$retry} { - set pid [exec_instance $type $cfgfile] + set pid [exec_instance $type $dirname $cfgfile] # Check availability if {[server_is_up 127.0.0.1 $port 100] == 0} { @@ -144,16 +146,60 @@ proc log_crashes {} { puts "\n*** Crash report found in $log ***" set found 1 } - if {$found} {puts $line} + if {$found} { + puts $line + incr ::failed + } } } + + set logs [glob */err.txt] + foreach log $logs { + set res [find_valgrind_errors $log] + if {$res != ""} { + puts $res + incr ::failed + } + } +} + +proc is_alive pid { + if {[catch {exec ps -p $pid} err]} { + return 0 + } else { + return 1 + } +} + +proc stop_instance pid { + catch {exec kill $pid} + if {$::valgrind} { + set max_wait 60000 + } else { + set max_wait 10000 + } + while {[is_alive $pid]} { + incr wait 10 + + if {$wait >= $max_wait} { + puts "Forcing process $pid to exit..." + catch {exec kill -KILL $pid} + } elseif {$wait % 1000 == 0} { + puts "Waiting for process $pid to exit..." + } + after 10 + } } proc cleanup {} { puts "Cleaning up..." - log_crashes foreach pid $::pids { - catch {exec kill -9 $pid} + puts "killing stale instance $pid" + stop_instance $pid + } + log_crashes + if {$::dont_clean} { + return } foreach dir $::dirs { catch {exec rm -rf $dir} @@ -178,6 +224,8 @@ proc parse_options {} { set ::run_matching "*${val}*" } elseif {$opt eq "--pause-on-error"} { set ::pause_on_error 1 + } elseif {$opt eq {--dont-clean}} { + set ::dont_clean 1 } elseif {$opt eq "--fail"} { set ::simulate_error 1 } elseif {$opt eq {--valgrind}} { @@ -191,6 +239,7 @@ proc parse_options {} { set ::tls 1 } elseif {$opt eq "--help"} { puts "--single Only runs tests specified by pattern." + puts "--dont-clean Keep log files on exit." puts "--pause-on-error Pause for manual inspection on error." puts "--fail Simulate a test failure." puts "--valgrind Run with valgrind." @@ -486,7 +535,7 @@ proc kill_instance {type id} { error "You tried to kill $type $id twice." } - exec kill -9 $pid + stop_instance $pid set_instance_attrib $type $id pid -1 set_instance_attrib $type $id link you_tried_to_talk_with_killed_instance @@ -521,7 +570,7 @@ proc restart_instance {type id} { # Execute the instance with its old setup and append the new pid # file for cleanup. - set pid [exec_instance $type $cfgfile] + set pid [exec_instance $type $dirname $cfgfile] set_instance_attrib $type $id pid $pid lappend ::pids $pid diff --git a/tests/support/server.tcl b/tests/support/server.tcl index f2f6ceece..f74d839ad 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -13,21 +13,9 @@ proc start_server_error {config_file error} { } proc check_valgrind_errors stderr { - set fd [open $stderr] - set buf [read $fd] - close $fd - - # look for stack trace and other errors, or the absense of a leak free summary - if {[regexp -- { at 0x} $buf] || - [regexp -- {Warning} $buf] || - [regexp -- {Invalid} $buf] || - [regexp -- {Mismatched} $buf] || - [regexp -- {uninitialized} $buf] || - [regexp -- {has a fishy} $buf] || - [regexp -- {overlap} $buf] || - (![regexp -- {definitely lost: 0 bytes} $buf] && - ![regexp -- {no leaks are possible} $buf])} { - send_data_packet $::test_server_fd err "Valgrind error: $buf\n" + set res [find_valgrind_errors $stderr] + if {$res != ""} { + send_data_packet $::test_server_fd err "Valgrind error: $res\n" } } diff --git a/tests/support/util.tcl b/tests/support/util.tcl index c698c255f..ecf9f230f 100644 --- a/tests/support/util.tcl +++ b/tests/support/util.tcl @@ -432,6 +432,29 @@ proc colorstr {color str} { } } +proc find_valgrind_errors {stderr} { + set fd [open $stderr] + set buf [read $fd] + close $fd + + # Look for stack trace (" at 0x") and other errors (Invalid, Mismatched, etc). + # Look for "Warnings", but not the "set address range perms". These don't indicate any real concern. + # Look for the absense of a leak free summary (happens when redis isn't terminated properly). + if {[regexp -- { at 0x} $buf] || + [regexp -- {^(?=.*Warning)(?:(?!set address range perms).)*$} $buf] || + [regexp -- {Invalid} $buf] || + [regexp -- {Mismatched} $buf] || + [regexp -- {uninitialized} $buf] || + [regexp -- {has a fishy} $buf] || + [regexp -- {overlap} $buf] || + (![regexp -- {definitely lost: 0 bytes} $buf] && + ![regexp -- {no leaks are possible} $buf])} { + return $buf + } + + return "" +} + # Execute a background process writing random data for the specified number # of seconds to the specified Redis instance. proc start_write_load {host port seconds} { From a227bc805336b3bc21ba62c0d21b46288a5e44d7 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 7 Sep 2020 16:26:11 +0300 Subject: [PATCH 146/215] fix broken cluster/sentinel tests by recent commit (#7752) 2b998de46 added a file for stderr to keep valgrind log but i forgot to add a similar thing when valgrind isn't being used. the result is that `glob */err.txt` fails. (cherry picked from commit 42ba7a1b75aa100ba99d2024420bc658439452fb) --- tests/instances.tcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/instances.tcl b/tests/instances.tcl index 2029bc5f5..82c35854b 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -52,7 +52,7 @@ proc exec_instance {type dirname cfgfile} { if {$::valgrind} { set pid [exec valgrind --track-origins=yes --suppressions=../../../src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full ../../../src/${prgname} $cfgfile 2>> $errfile &] } else { - set pid [exec ../../../src/${prgname} $cfgfile &] + set pid [exec ../../../src/${prgname} $cfgfile 2>> $errfile &] } return $pid } From e9fef49e1241fb9a672760007cfd177bb1583f5a Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Mon, 7 Sep 2020 17:30:36 +0300 Subject: [PATCH 147/215] Tests: fix unmonitored servers. (#7756) There is an inherent race condition in port allocation for spawned servers. If a server fails to start because a port is taken, a new port is allocated. This fixes a problem where the logs are not truncated and as a result a large number of unmonitored servers are started. (cherry picked from commit 2df4cb93acabf10bb0ff39c12030791b0947e719) --- tests/support/server.tcl | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/support/server.tcl b/tests/support/server.tcl index f74d839ad..30d0d4045 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -383,6 +383,11 @@ proc start_server {options {code undefined}} { dict set config port $port } create_server_config_file $config_file $config + + # Truncate log so wait_server_started will not be looking at + # output of the failed server. + close [open $stdout "w"] + continue; # Try again } From 57dbd6e90d0e5d8da42c9597510d8d9da76639a1 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 7 Sep 2020 18:06:25 +0300 Subject: [PATCH 148/215] Fix cluster consistency-check test (#7754) This test was failing from time to time see discussion at the bottom of #7635 This was probably due to timing, the DEBUG SLEEP executed by redis-cli didn't sleep for enough time. This commit changes: 1) use SET-ACTIVE-EXPIRE instead of DEBUG SLEEP 2) reduce many `after` sleeps with retry loops to speed up the test. 3) add many comment explaining the different steps of the test and it's purpose. 4) config appendonly before populating the volatile keys, so that they'll be part of the AOF command stream rather than the preamble RDB portion. other complications: recently kill_instance switched from SIGKILL to SIGTERM, and this would sometimes fail since there was an AOFRW running in the background. now we wait for it to end before attempting the kill. (cherry picked from commit b491d477c3062d1fb064a794d74643c82d1c4adc) --- tests/cluster/tests/14-consistency-check.tcl | 86 +++++++++++++------- 1 file changed, 56 insertions(+), 30 deletions(-) diff --git a/tests/cluster/tests/14-consistency-check.tcl b/tests/cluster/tests/14-consistency-check.tcl index 5a80dd0df..ddc0570e6 100644 --- a/tests/cluster/tests/14-consistency-check.tcl +++ b/tests/cluster/tests/14-consistency-check.tcl @@ -39,53 +39,79 @@ proc cluster_write_keys_with_expire {id ttl} { $cluster close } +# make sure that replica who restarts from persistence will load keys +# that have already expired, critical for correct execution of commands +# that arrive from the master proc test_slave_load_expired_keys {aof} { test "Slave expired keys is loaded when restarted: appendonly=$aof" { set master_id [find_non_empty_master] set replica_id [get_one_of_my_replica $master_id] - set master_dbsize [R $master_id dbsize] - set slave_dbsize [R $replica_id dbsize] - assert_equal $master_dbsize $slave_dbsize - - set data_ttl 5 - cluster_write_keys_with_expire $master_id $data_ttl - after 100 - set replica_dbsize_1 [R $replica_id dbsize] - assert {$replica_dbsize_1 > $slave_dbsize} + set master_dbsize_0 [R $master_id dbsize] + set replica_dbsize_0 [R $replica_id dbsize] + assert_equal $master_dbsize_0 $replica_dbsize_0 + # config the replica persistency and rewrite the config file to survive restart + # note that this needs to be done before populating the volatile keys since + # that triggers and AOFRW, and we rather the AOF file to have SETEX commands + # rather than an RDB with volatile keys R $replica_id config set appendonly $aof R $replica_id config rewrite - set start_time [clock seconds] - set end_time [expr $start_time+$data_ttl+2] - R $replica_id save - set replica_dbsize_2 [R $replica_id dbsize] - assert {$replica_dbsize_2 > $slave_dbsize} + # fill with 100 keys with 3 second TTL + set data_ttl 3 + cluster_write_keys_with_expire $master_id $data_ttl + + # wait for replica to be in sync with master + wait_for_condition 500 10 { + [R $replica_id dbsize] eq [R $master_id dbsize] + } else { + fail "replica didn't sync" + } + + set replica_dbsize_1 [R $replica_id dbsize] + assert {$replica_dbsize_1 > $replica_dbsize_0} + + # make replica create persistence file + if {$aof == "yes"} { + # we need to wait for the initial AOFRW to be done, otherwise + # kill_instance (which now uses SIGTERM will fail ("Writing initial AOF, can't exit") + wait_for_condition 100 10 { + [RI $replica_id aof_rewrite_in_progress] eq 0 + } else { + fail "keys didn't expire" + } + } else { + R $replica_id save + } + + # kill the replica (would stay down until re-started) kill_instance redis $replica_id - set master_port [get_instance_attrib redis $master_id port] - exec ../../../src/redis-cli \ - -h 127.0.0.1 -p $master_port \ - {*}[rediscli_tls_config "../../../tests"] \ - debug sleep [expr $data_ttl+3] > /dev/null & + # Make sure the master doesn't do active expire (sending DELs to the replica) + R $master_id DEBUG SET-ACTIVE-EXPIRE 0 - while {[clock seconds] <= $end_time} { - #wait for $data_ttl seconds - } + # wait for all the keys to get logically expired + after [expr $data_ttl*1000] + + # start the replica again (loading an RDB or AOF file) restart_instance redis $replica_id - wait_for_condition 200 50 { - [R $replica_id ping] eq {PONG} - } else { - fail "replica #$replica_id not started" - } - + # make sure the keys are still there set replica_dbsize_3 [R $replica_id dbsize] - assert {$replica_dbsize_3 > $slave_dbsize} + assert {$replica_dbsize_3 > $replica_dbsize_0} + + # restore settings + R $master_id DEBUG SET-ACTIVE-EXPIRE 1 + + # wait for the master to expire all keys and replica to get the DELs + wait_for_condition 500 10 { + [R $replica_id dbsize] eq $master_dbsize_0 + } else { + fail "keys didn't expire" + } } } test_slave_load_expired_keys no -after 5000 test_slave_load_expired_keys yes From 781e50d41f0a8fd5ff92ba0216535734db83aeeb Mon Sep 17 00:00:00 2001 From: "bodong.ybd" Date: Tue, 8 Sep 2020 10:45:03 +0800 Subject: [PATCH 149/215] Tests: Some fixes for macOS 1) cur_test: when restart_server, "no such variable" error occurs ./runtest --single integration/rdb test {client freed during loading} SET ::cur_test restart_server kill_server test "Check for memory leaks (pid $pid)" SET ::cur_test UNSET ::cur_test UNSET ::cur_test // This global variable has been unset. 2) `ps --ppid` not available on macOS platform, can be replaced with `pgrep -P pid`. (cherry picked from commit f22fa9594d536cb53f83ed8e508c03d4278778b0) --- tests/support/test.tcl | 1 - tests/support/util.tcl | 25 +++++++++++++++++++++++++ tests/unit/oom-score-adj.tcl | 11 +---------- 3 files changed, 26 insertions(+), 11 deletions(-) diff --git a/tests/support/test.tcl b/tests/support/test.tcl index f5b4c8bef..54d323fa2 100644 --- a/tests/support/test.tcl +++ b/tests/support/test.tcl @@ -190,5 +190,4 @@ proc test {name code {okpattern undefined} {options undefined}} { send_data_packet $::test_server_fd err "Detected a memory leak in test '$name': $output" } } - unset ::cur_test } diff --git a/tests/support/util.tcl b/tests/support/util.tcl index ecf9f230f..b9a65358f 100644 --- a/tests/support/util.tcl +++ b/tests/support/util.tcl @@ -494,3 +494,28 @@ proc start_bg_complex_data {host port db ops} { proc stop_bg_complex_data {handle} { catch {exec /bin/kill -9 $handle} } + +proc populate {num prefix size} { + set rd [redis_deferring_client] + for {set j 0} {$j < $num} {incr j} { + $rd set $prefix$j [string repeat A $size] + } + for {set j 0} {$j < $num} {incr j} { + $rd read + } + $rd close +} + +proc get_child_pid {idx} { + set pid [srv $idx pid] + if {[string match {*Darwin*} [exec uname -a]]} { + set fd [open "|pgrep -P $pid" "r"] + set child_pid [string trim [lindex [split [read $fd] \n] 0]] + } else { + set fd [open "|ps --ppid $pid -o pid" "r"] + set child_pid [string trim [lindex [split [read $fd] \n] 1]] + } + close $fd + + return $child_pid +} diff --git a/tests/unit/oom-score-adj.tcl b/tests/unit/oom-score-adj.tcl index 993004602..8eb09a993 100644 --- a/tests/unit/oom-score-adj.tcl +++ b/tests/unit/oom-score-adj.tcl @@ -14,15 +14,6 @@ if {$system_name eq {linux}} { return $val } - proc get_child_pid {} { - set pid [srv 0 pid] - set fd [open "|ps --ppid $pid -o pid" "r"] - set child_pid [string trim [lindex [split [read $fd] \n] 1]] - close $fd - - return $child_pid - } - test {CONFIG SET oom-score-adj works as expected} { set base [get_oom_score_adj] @@ -47,7 +38,7 @@ if {$system_name eq {linux}} { r config set rdb-key-save-delay 100000 r bgsave - set child_pid [get_child_pid] + set child_pid [get_child_pid 0] assert {[get_oom_score_adj $child_pid] == [expr $base + 30]} } From 874c3a997826b2a8f28d132bc197a090c7ca9530 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 8 Sep 2020 10:59:25 +0300 Subject: [PATCH 150/215] Add daily CI for MacOS (#7759) (cherry picked from commit 5496b4a7cd9f04fb28ab5c6d903f1b59a9c0842c) --- .github/workflows/daily.yml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 5b395351b..07cd55c87 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -168,3 +168,21 @@ jobs: ./runtest-cluster --tls ./runtest-cluster + test-macos-latest: + runs-on: macos-latest + if: github.repository == 'redis/redis' + timeout-minutes: 14400 + steps: + - uses: actions/checkout@v2 + - name: make + run: make + - name: test + run: | + ./runtest --accurate --verbose + - name: module api test + run: ./runtest-moduleapi --verbose + - name: sentinel tests + run: ./runtest-sentinel + - name: cluster tests + run: ./runtest-cluster + From 643d3e0b7964c170af3762251dfbcf220530cc8f Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 8 Sep 2020 14:12:03 +0300 Subject: [PATCH 151/215] handle cur_test for nested tests if there are nested tests and nested servers, we need to restore the previous value of cur_test when a test exist. example: ``` test{test 1} { start_server { test{test 1.1 - master only} { } start_server { test{test 1.2 - with replication} { } } } } ``` when `test 1.1 - master only exists`, we're still inside `test 1` (cherry picked from commit 0a1e7341935dbca4bae582de1a4a26d5ed4c652d) --- tests/support/test.tcl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/support/test.tcl b/tests/support/test.tcl index 54d323fa2..55937b8f4 100644 --- a/tests/support/test.tcl +++ b/tests/support/test.tcl @@ -4,6 +4,7 @@ set ::num_failed 0 set ::num_skipped 0 set ::num_aborted 0 set ::tests_failed {} +set ::cur_test "" proc fail {msg} { error "assertion:$msg" @@ -136,6 +137,7 @@ proc test {name code {okpattern undefined} {options undefined}} { # set a cur_test global to be logged into new servers that are spown # and log the test name in all existing servers + set prev_test $::cur_test set ::cur_test "$name in $::curfile" if {!$::external} { foreach srv $::servers { @@ -190,4 +192,5 @@ proc test {name code {okpattern undefined} {options undefined}} { send_data_packet $::test_server_fd err "Detected a memory leak in test '$name': $output" } } + set ::cur_test $prev_test } From cf3375bf7cac0fc84ed31d7f46ec95bb486b4501 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 8 Sep 2020 16:00:20 +0300 Subject: [PATCH 152/215] Fix CONFIG REWRITE of oom-score-adj-values. (#7761) (cherry picked from commit 750acf3a457053ad9f57a263baa84ec68e0ae145) --- src/config.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/config.c b/src/config.c index 52acb527b..de1269e48 100644 --- a/src/config.c +++ b/src/config.c @@ -1422,7 +1422,8 @@ void rewriteConfigOOMScoreAdjValuesOption(struct rewriteConfigState *state) { char *option = "oom-score-adj-values"; sds line; - line = sdsempty(); + line = sdsnew(option); + line = sdscatlen(line, " ", 1); for (j = 0; j < CONFIG_OOM_COUNT; j++) { if (server.oom_score_adj_values[j] != configOOMScoreAdjValuesDefaults[j]) force = 1; From 84ba2ec82b809ba73ba4c630ac15d0859670d3cc Mon Sep 17 00:00:00 2001 From: Eran Liberty Date: Wed, 9 Sep 2020 09:35:42 +0300 Subject: [PATCH 153/215] Allow exec with read commands on readonly replica in cluster (#7766) There was a bug. Although cluster replicas would allow read commands, they would not allow a MULTI-EXEC that's composed solely of read commands. Adds tests for coverage. Co-authored-by: Oran Agra Co-authored-by: Eran Liberty (cherry picked from commit b120366d48d9e488a406965773e64f29ba2946f7) --- src/cluster.c | 4 +- .../tests/16-transactions-on-replica.tcl | 48 +++++++++++++++++++ tests/instances.tcl | 10 +++- 3 files changed, 59 insertions(+), 3 deletions(-) create mode 100644 tests/cluster/tests/16-transactions-on-replica.tcl diff --git a/src/cluster.c b/src/cluster.c index 17d21df29..8d8b61ab4 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -5765,8 +5765,10 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in /* Handle the read-only client case reading from a slave: if this * node is a slave and the request is about an hash slot our master * is serving, we can reply without redirection. */ + int is_readonly_command = (c->cmd->flags & CMD_READONLY) || + (c->cmd->proc == execCommand && !(c->mstate.cmd_inv_flags & CMD_READONLY)); if (c->flags & CLIENT_READONLY && - (cmd->flags & CMD_READONLY || cmd->proc == evalCommand || + (is_readonly_command || cmd->proc == evalCommand || cmd->proc == evalShaCommand) && nodeIsSlave(myself) && myself->slaveof == n) diff --git a/tests/cluster/tests/16-transactions-on-replica.tcl b/tests/cluster/tests/16-transactions-on-replica.tcl new file mode 100644 index 000000000..da9dff1ca --- /dev/null +++ b/tests/cluster/tests/16-transactions-on-replica.tcl @@ -0,0 +1,48 @@ +# Check basic transactions on a replica. + +source "../tests/includes/init-tests.tcl" + +test "Create a primary with a replica" { + create_cluster 1 1 +} + +test "Cluster should start ok" { + assert_cluster_state ok +} + +set primary [Rn 0] +set replica [Rn 1] + +test "Cant read from replica without READONLY" { + $primary SET a 1 + catch {$replica GET a} err + assert {[string range $err 0 4] eq {MOVED}} +} + +test "Can read from replica after READONLY" { + $replica READONLY + assert {[$replica GET a] eq {1}} +} + +test "Can preform HSET primary and HGET from replica" { + $primary HSET h a 1 + $primary HSET h b 2 + $primary HSET h c 3 + assert {[$replica HGET h a] eq {1}} + assert {[$replica HGET h b] eq {2}} + assert {[$replica HGET h c] eq {3}} +} + +test "Can MULTI-EXEC transaction of HGET operations from replica" { + $replica MULTI + assert {[$replica HGET h a] eq {QUEUED}} + assert {[$replica HGET h b] eq {QUEUED}} + assert {[$replica HGET h c] eq {QUEUED}} + assert {[$replica EXEC] eq {1 2 3}} +} + +test "MULTI-EXEC with write operations is MOVED" { + $replica MULTI + catch {$replica HSET h b 4} err + assert {[string range $err 0 4] eq {MOVED}} +} diff --git a/tests/instances.tcl b/tests/instances.tcl index 82c35854b..2199cfcd4 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -422,10 +422,16 @@ proc S {n args} { [dict get $s link] {*}$args } +# Returns a Redis instance by index. +# Example: +# [Rn 0] info +proc Rn {n} { + return [dict get [lindex $::redis_instances $n] link] +} + # Like R but to chat with Redis instances. proc R {n args} { - set r [lindex $::redis_instances $n] - [dict get $r link] {*}$args + [Rn $n] {*}$args } proc get_info_field {info field} { From 3c8b39451174d406303557af446c8d86acbb44d5 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Wed, 9 Sep 2020 12:30:43 +0300 Subject: [PATCH 154/215] Tests: clean up stale .cli files. (#7768) (cherry picked from commit 918abd7276afcb994f2d3f8a86a0708993420e37) --- tests/integration/redis-cli.tcl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/redis-cli.tcl b/tests/integration/redis-cli.tcl index 2d4145ff0..1e346a9a5 100644 --- a/tests/integration/redis-cli.tcl +++ b/tests/integration/redis-cli.tcl @@ -182,6 +182,7 @@ start_server {tags {"cli"}} { set tmpfile [write_tmpfile "from file"] assert_equal "OK" [run_cli_with_input_file $tmpfile set key] assert_equal "from file" [r get key] + file delete $tmpfile } test_nontty_cli "Status reply" { @@ -215,6 +216,7 @@ start_server {tags {"cli"}} { set tmpfile [write_tmpfile "from file"] assert_equal "OK" [run_cli_with_input_file $tmpfile set key] assert_equal "from file" [r get key] + file delete $tmpfile } proc test_redis_cli_rdb_dump {} { From 8b0d797ecdd1e416cb92bc92769c37bb9b5db0a6 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Wed, 9 Sep 2020 14:06:04 +0200 Subject: [PATCH 155/215] Check that THP is not set to always (madvise is ok) (#4001) THP can also be set to madvise, in which case it shouldn't cause problems for Redis since redis (or the allocator) doesn't use madvise to activate it. (cherry picked from commit b2419c31c166bd2d73f7af3d089859795c0e3506) --- src/latency.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/latency.c b/src/latency.c index dfdc6668c..b5ccc7cc6 100644 --- a/src/latency.c +++ b/src/latency.c @@ -71,7 +71,7 @@ int THPIsEnabled(void) { return 0; } fclose(fp); - return (strstr(buf,"[never]") == NULL) ? 1 : 0; + return (strstr(buf,"[always]") != NULL) ? 1 : 0; } #endif From 3cd6c2605681e08ec0813c3469298325a8126ebc Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Wed, 9 Sep 2020 15:09:41 +0300 Subject: [PATCH 156/215] Documents RM_Call's fmt (#5448) Improve RM_Call inline documentation about the fmt argument so that we don't completely depend on the web docs. Co-authored-by: Oran Agra (cherry picked from commit ce15620dc17ebad94cd03cb4ee779fdd4e99b212) --- src/module.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/module.c b/src/module.c index 9b44d5ecc..b7321f7d9 100644 --- a/src/module.c +++ b/src/module.c @@ -3303,6 +3303,23 @@ fmterr: } /* Exported API to call any Redis command from modules. + * + * * **cmdname**: The Redis command to call. + * * **fmt**: A format specifier string for the command's arguments. Each + * of the arguments should be specified by a valid type specification: + * b The argument is a buffer and is immediately followed by another + * argument that is the buffer's length. + * c The argument is a pointer to a plain C string (null-terminated). + * l The argument is long long integer. + * s The argument is a RedisModuleString. + * v The argument(s) is a vector of RedisModuleString. + * + * The format specifier can also include modifiers: + * ! Sends the Redis command and its arguments to replicas and AOF. + * A Suppress AOF propagation, send only to replicas (requires `!`). + * R Suppress replicas propagation, send only to AOF (requires `!`). + * * **...**: The actual arguments to the Redis command. + * * On success a RedisModuleCallReply object is returned, otherwise * NULL is returned and errno is set to the following values: * @@ -3314,6 +3331,14 @@ fmterr: * in a readonly state. * ENETDOWN: operation in Cluster instance when cluster is down. * + * Example code fragment: + * + * reply = RedisModule_Call(ctx,"INCRBY","sc",argv[1],"10"); + * if (RedisModule_CallReplyType(reply) == REDISMODULE_REPLY_INTEGER) { + * long long myval = RedisModule_CallReplyInteger(reply); + * // Do something with myval. + * } + * * This API is documented here: https://redis.io/topics/modules-intro */ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const char *fmt, ...) { From 47364bce690517d1e2abb4c5e6fc96af35e0fab0 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Wed, 9 Sep 2020 15:39:57 +0300 Subject: [PATCH 157/215] Change THP warning to use madvise rather than never (#7771) completes b2419c31c166bd2d73f7af3d089859795c0e3506 (cherry picked from commit 1461f02deb65585bb47c4d50d68ef733edfba6f9) --- src/server.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server.c b/src/server.c index 990b23b7d..6447c69e4 100644 --- a/src/server.c +++ b/src/server.c @@ -4728,7 +4728,7 @@ void linuxMemoryWarnings(void) { serverLog(LL_WARNING,"WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect."); } if (THPIsEnabled()) { - serverLog(LL_WARNING,"WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled."); + serverLog(LL_WARNING,"WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo madvise > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled (set to 'madvise' or 'never')."); } } #endif /* __linux__ */ From 635704d4248dc401cf8de2ec57a5952b30ee00a3 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Wed, 9 Sep 2020 15:43:11 +0300 Subject: [PATCH 158/215] Tests: validate CONFIG REWRITE for all params. (#7764) This is a catch-all test to confirm that that rewrite produces a valid output for all parameters and that this process does not introduce undesired configuration changes. (cherry picked from commit a8b7268911803f4ac827ab22e8e5b10134664832) --- src/config.c | 12 +++++++++--- src/debug.c | 7 +++++++ src/sentinel.c | 2 +- src/server.c | 2 +- src/server.h | 2 +- tests/unit/introspection.tcl | 24 ++++++++++++++++++++++++ 6 files changed, 43 insertions(+), 6 deletions(-) diff --git a/src/config.c b/src/config.c index de1269e48..2c69540db 100644 --- a/src/config.c +++ b/src/config.c @@ -1055,6 +1055,8 @@ struct rewriteConfigState { sds *lines; /* Current lines as an array of sds strings */ int has_tail; /* True if we already added directives that were not present in the original config file. */ + int force_all; /* True if we want all keywords to be force + written. Currently only used for testing. */ }; /* Append the new line to the current configuration state. */ @@ -1101,6 +1103,7 @@ struct rewriteConfigState *rewriteConfigReadOldFile(char *path) { state->numlines = 0; state->lines = NULL; state->has_tail = 0; + state->force_all = 0; if (fp == NULL) return state; /* Read the old file line by line, populate the state. */ @@ -1179,7 +1182,7 @@ void rewriteConfigRewriteLine(struct rewriteConfigState *state, const char *opti rewriteConfigMarkAsProcessed(state,option); - if (!l && !force) { + if (!l && !force && !state->force_all) { /* Option not used previously, and we are not forced to use it. */ sdsfree(line); sdsfree(o); @@ -1603,15 +1606,18 @@ cleanup: * * Configuration parameters that are at their default value, unless already * explicitly included in the old configuration file, are not rewritten. + * The force_all flag overrides this behavior and forces everything to be + * written. This is currently only used for testing purposes. * * On error -1 is returned and errno is set accordingly, otherwise 0. */ -int rewriteConfig(char *path) { +int rewriteConfig(char *path, int force_all) { struct rewriteConfigState *state; sds newcontent; int retval; /* Step 1: read the old config into our rewrite state. */ if ((state = rewriteConfigReadOldFile(path)) == NULL) return -1; + if (force_all) state->force_all = 1; /* Step 2: rewrite every single option, replacing or appending it inside * the rewrite state. */ @@ -2405,7 +2411,7 @@ NULL addReplyError(c,"The server is running without a config file"); return; } - if (rewriteConfig(server.configfile) == -1) { + if (rewriteConfig(server.configfile, 0) == -1) { serverLog(LL_WARNING,"CONFIG REWRITE failed: %s", strerror(errno)); addReplyErrorFormat(c,"Rewriting config file: %s", strerror(errno)); } else { diff --git a/src/debug.c b/src/debug.c index 0bea69876..4831c4d74 100644 --- a/src/debug.c +++ b/src/debug.c @@ -397,6 +397,7 @@ void debugCommand(client *c) { "STRUCTSIZE -- Return the size of different Redis core C structures.", "ZIPLIST -- Show low level info about the ziplist encoding.", "STRINGMATCH-TEST -- Run a fuzz tester against the stringmatchlen() function.", +"CONFIG-REWRITE-FORCE-ALL -- Like CONFIG REWRITE but writes all configuration options, including keywords not listed in original configuration file or default values.", #ifdef USE_JEMALLOC "MALLCTL [] -- Get or set a malloc tunning integer.", "MALLCTL-STR [] -- Get or set a malloc tunning string.", @@ -794,6 +795,12 @@ NULL { stringmatchlen_fuzz_test(); addReplyStatus(c,"Apparently Redis did not crash: test passed"); + } else if (!strcasecmp(c->argv[1]->ptr,"config-rewrite-force-all") && c->argc == 2) + { + if (rewriteConfig(server.configfile, 1) == -1) + addReplyError(c, "CONFIG-REWRITE-FORCE-ALL failed"); + else + addReply(c, shared.ok); #ifdef USE_JEMALLOC } else if(!strcasecmp(c->argv[1]->ptr,"mallctl") && c->argc >= 3) { mallctl_int(c, c->argv+2, c->argc-2); diff --git a/src/sentinel.c b/src/sentinel.c index 5bd594955..bdc339674 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -1954,7 +1954,7 @@ void sentinelFlushConfig(void) { int rewrite_status; server.hz = CONFIG_DEFAULT_HZ; - rewrite_status = rewriteConfig(server.configfile); + rewrite_status = rewriteConfig(server.configfile, 0); server.hz = saved_hz; if (rewrite_status == -1) goto werr; diff --git a/src/server.c b/src/server.c index 6447c69e4..a7a36df13 100644 --- a/src/server.c +++ b/src/server.c @@ -2488,7 +2488,7 @@ int restartServer(int flags, mstime_t delay) { /* Config rewriting. */ if (flags & RESTART_SERVER_CONFIG_REWRITE && server.configfile && - rewriteConfig(server.configfile) == -1) + rewriteConfig(server.configfile, 0) == -1) { serverLog(LL_WARNING,"Can't restart: configuration rewrite process " "failed"); diff --git a/src/server.h b/src/server.h index c42955b94..d77df93b5 100644 --- a/src/server.h +++ b/src/server.h @@ -2089,7 +2089,7 @@ void appendServerSaveParams(time_t seconds, int changes); void resetServerSaveParams(void); struct rewriteConfigState; /* Forward declaration to export API. */ void rewriteConfigRewriteLine(struct rewriteConfigState *state, const char *option, sds line, int force); -int rewriteConfig(char *path); +int rewriteConfig(char *path, int force_all); void initConfigValues(); /* db.c -- Keyspace access API */ diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl index d681e06d5..37470c068 100644 --- a/tests/unit/introspection.tcl +++ b/tests/unit/introspection.tcl @@ -134,4 +134,28 @@ start_server {tags {"introspection"}} { } } + + # Do a force-all config rewrite and make sure we're able to parse + # it. + test {CONFIG REWRITE sanity} { + # Capture state of config before + set configs {} + foreach {k v} [r config get *] { + dict set configs $k $v + } + + # Rewrite entire configuration, restart and confirm the + # server is able to parse it and start. + assert_equal [r debug config-rewrite-force-all] "OK" + restart_server 0 0 + assert_equal [r ping] "PONG" + + # Verify no changes were introduced + dict for {k v} $configs { + assert_equal $v [lindex [r config get $k] 1] + } + } + + # Config file at this point is at a wierd state, and includes all + # known keywords. Might be a good idea to avoid adding tests here. } From 25e211bcf00a5437c0f6e24f96590885ca9fe146 Mon Sep 17 00:00:00 2001 From: Roi Lipman Date: Wed, 9 Sep 2020 16:01:16 +0300 Subject: [PATCH 159/215] RM_ThreadSafeContextTryLock a non-blocking method for acquiring GIL (#7738) Co-authored-by: Yossi Gottlieb Co-authored-by: Oran Agra (cherry picked from commit 042189fd8707544139337b3ddcf38b5c5fea1bf0) --- runtest-moduleapi | 1 + src/module.c | 22 +++++++ src/redismodule.h | 2 + src/server.h | 1 + tests/modules/Makefile | 4 +- tests/modules/blockedclient.c | 82 ++++++++++++++++++++++++++ tests/unit/moduleapi/blockedclient.tcl | 11 ++++ 7 files changed, 122 insertions(+), 1 deletion(-) create mode 100644 tests/modules/blockedclient.c create mode 100644 tests/unit/moduleapi/blockedclient.tcl diff --git a/runtest-moduleapi b/runtest-moduleapi index 71db27e5e..f3abde740 100755 --- a/runtest-moduleapi +++ b/runtest-moduleapi @@ -26,4 +26,5 @@ $TCLSH tests/test_helper.tcl \ --single unit/moduleapi/datatype \ --single unit/moduleapi/auth \ --single unit/moduleapi/keyspace_events \ +--single unit/moduleapi/blockedclient \ "${@}" diff --git a/src/module.c b/src/module.c index b7321f7d9..f293d6a6c 100644 --- a/src/module.c +++ b/src/module.c @@ -4906,6 +4906,23 @@ void RM_ThreadSafeContextLock(RedisModuleCtx *ctx) { moduleAcquireGIL(); } +/* Similar to RM_ThreadSafeContextLock but this function + * would not block if the server lock is already acquired. + * + * If successful (lock acquired) REDISMODULE_OK is returned, + * otherwise REDISMODULE_ERR is returned and errno is set + * accordingly. */ +int RM_ThreadSafeContextTryLock(RedisModuleCtx *ctx) { + UNUSED(ctx); + + int res = moduleTryAcquireGIL(); + if(res != 0) { + errno = res; + return REDISMODULE_ERR; + } + return REDISMODULE_OK; +} + /* Release the server lock after a thread safe API call was executed. */ void RM_ThreadSafeContextUnlock(RedisModuleCtx *ctx) { UNUSED(ctx); @@ -4916,6 +4933,10 @@ void moduleAcquireGIL(void) { pthread_mutex_lock(&moduleGIL); } +int moduleTryAcquireGIL(void) { + return pthread_mutex_trylock(&moduleGIL); +} + void moduleReleaseGIL(void) { pthread_mutex_unlock(&moduleGIL); } @@ -7929,6 +7950,7 @@ void moduleRegisterCoreAPI(void) { REGISTER_API(GetThreadSafeContext); REGISTER_API(FreeThreadSafeContext); REGISTER_API(ThreadSafeContextLock); + REGISTER_API(ThreadSafeContextTryLock); REGISTER_API(ThreadSafeContextUnlock); REGISTER_API(DigestAddStringBuffer); REGISTER_API(DigestAddLongLong); diff --git a/src/redismodule.h b/src/redismodule.h index 460fdd480..4bfc14cc7 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -666,6 +666,7 @@ REDISMODULE_API int (*RedisModule_AbortBlock)(RedisModuleBlockedClient *bc) REDI REDISMODULE_API RedisModuleCtx * (*RedisModule_GetThreadSafeContext)(RedisModuleBlockedClient *bc) REDISMODULE_ATTR; REDISMODULE_API void (*RedisModule_FreeThreadSafeContext)(RedisModuleCtx *ctx) REDISMODULE_ATTR; REDISMODULE_API void (*RedisModule_ThreadSafeContextLock)(RedisModuleCtx *ctx) REDISMODULE_ATTR; +REDISMODULE_API int (*RedisModule_ThreadSafeContextTryLock)(RedisModuleCtx *ctx) REDISMODULE_ATTR; REDISMODULE_API void (*RedisModule_ThreadSafeContextUnlock)(RedisModuleCtx *ctx) REDISMODULE_ATTR; REDISMODULE_API int (*RedisModule_SubscribeToKeyspaceEvents)(RedisModuleCtx *ctx, int types, RedisModuleNotificationFunc cb) REDISMODULE_ATTR; REDISMODULE_API int (*RedisModule_NotifyKeyspaceEvent)(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key) REDISMODULE_ATTR; @@ -899,6 +900,7 @@ static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int REDISMODULE_GET_API(GetThreadSafeContext); REDISMODULE_GET_API(FreeThreadSafeContext); REDISMODULE_GET_API(ThreadSafeContextLock); + REDISMODULE_GET_API(ThreadSafeContextTryLock); REDISMODULE_GET_API(ThreadSafeContextUnlock); REDISMODULE_GET_API(BlockClient); REDISMODULE_GET_API(UnblockClient); diff --git a/src/server.h b/src/server.h index d77df93b5..980011938 100644 --- a/src/server.h +++ b/src/server.h @@ -1595,6 +1595,7 @@ void moduleBlockedClientTimedOut(client *c); void moduleBlockedClientPipeReadable(aeEventLoop *el, int fd, void *privdata, int mask); size_t moduleCount(void); void moduleAcquireGIL(void); +int moduleTryAcquireGIL(void); void moduleReleaseGIL(void); void moduleNotifyKeyspaceEvent(int type, const char *event, robj *key, int dbid); void moduleCallCommandFilters(client *c); diff --git a/tests/modules/Makefile b/tests/modules/Makefile index de7407a84..fad6e55d8 100644 --- a/tests/modules/Makefile +++ b/tests/modules/Makefile @@ -23,7 +23,9 @@ TEST_MODULES = \ scan.so \ datatype.so \ auth.so \ - keyspace_events.so + keyspace_events.so \ + blockedclient.so + .PHONY: all diff --git a/tests/modules/blockedclient.c b/tests/modules/blockedclient.c new file mode 100644 index 000000000..9d59114a1 --- /dev/null +++ b/tests/modules/blockedclient.c @@ -0,0 +1,82 @@ +#define REDISMODULE_EXPERIMENTAL_API +#include "redismodule.h" +#include +#include +#include + +#define UNUSED(V) ((void) V) + +void *sub_worker(void *arg) { + // Get Redis module context + RedisModuleCtx *ctx = (RedisModuleCtx *)arg; + + // Try acquiring GIL + int res = RedisModule_ThreadSafeContextTryLock(ctx); + + // GIL is already taken by the calling thread expecting to fail. + assert(res != REDISMODULE_OK); + + return NULL; +} + +void *worker(void *arg) { + // Retrieve blocked client + RedisModuleBlockedClient *bc = (RedisModuleBlockedClient *)arg; + + // Get Redis module context + RedisModuleCtx *ctx = RedisModule_GetThreadSafeContext(bc); + + // Acquire GIL + RedisModule_ThreadSafeContextLock(ctx); + + // Create another thread which will try to acquire the GIL + pthread_t tid; + int res = pthread_create(&tid, NULL, sub_worker, ctx); + assert(res == 0); + + // Wait for thread + pthread_join(tid, NULL); + + // Release GIL + RedisModule_ThreadSafeContextUnlock(ctx); + + // Reply to client + RedisModule_ReplyWithSimpleString(ctx, "OK"); + + // Unblock client + RedisModule_UnblockClient(bc, NULL); + + return NULL; +} + +int acquire_gil(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) +{ + UNUSED(argv); + UNUSED(argc); + + /* This command handler tries to acquire the GIL twice + * once in the worker thread using "RedisModule_ThreadSafeContextLock" + * second in the sub-worker thread + * using "RedisModule_ThreadSafeContextTryLock" + * as the GIL is already locked. */ + RedisModuleBlockedClient *bc = RedisModule_BlockClient(ctx, NULL, NULL, NULL, 0); + + pthread_t tid; + int res = pthread_create(&tid, NULL, worker, bc); + assert(res == 0); + + return REDISMODULE_OK; +} + +int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + REDISMODULE_NOT_USED(argv); + REDISMODULE_NOT_USED(argc); + + if (RedisModule_Init(ctx, "blockedclient", 1, REDISMODULE_APIVER_1)== REDISMODULE_ERR) + return REDISMODULE_ERR; + + if (RedisModule_CreateCommand(ctx, "acquire_gil", acquire_gil, "", 0, 0, 0) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + return REDISMODULE_OK; +} diff --git a/tests/unit/moduleapi/blockedclient.tcl b/tests/unit/moduleapi/blockedclient.tcl new file mode 100644 index 000000000..d093a0297 --- /dev/null +++ b/tests/unit/moduleapi/blockedclient.tcl @@ -0,0 +1,11 @@ +# source tests/support/util.tcl + +set testmodule [file normalize tests/modules/blockedclient.so] + +start_server {tags {"modules"}} { + r module load $testmodule + + test {Locked GIL acquisition} { + assert_match "OK" [r acquire_gil] + } +} From b55b0ea0f2ecce083a03b4a0b98dc029d81863a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=A8=E5=8D=9A=E4=B8=9C?= Date: Wed, 9 Sep 2020 22:13:35 +0800 Subject: [PATCH 160/215] Tests: Add aclfile load and save tests (#7765) improves test coverage (cherry picked from commit 0666267d2771b1a46cdf36eef27d8a7a393c0c7a) --- tests/assets/user.acl | 2 ++ tests/unit/acl.tcl | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) create mode 100644 tests/assets/user.acl diff --git a/tests/assets/user.acl b/tests/assets/user.acl new file mode 100644 index 000000000..2f065dab6 --- /dev/null +++ b/tests/assets/user.acl @@ -0,0 +1,2 @@ +user alice on allcommands allkeys >alice +user bob on -@all +@set +acl ~set* >bob \ No newline at end of file diff --git a/tests/unit/acl.tcl b/tests/unit/acl.tcl index e81280995..381f2f95f 100644 --- a/tests/unit/acl.tcl +++ b/tests/unit/acl.tcl @@ -261,3 +261,42 @@ start_server {tags {"acl"}} { assert_match "*Unknown subcommand or wrong number of arguments*" $e } } + +set server_path [tmpdir "server.acl"] +exec cp -f tests/assets/user.acl $server_path +start_server [list overrides [list "dir" $server_path "aclfile" "user.acl"]] { + # user alice on allcommands allkeys >alice + # user bob on -@all +@set +acl ~set* >bob + + test "Alice: can excute all command" { + r AUTH alice alice + assert_equal "alice" [r acl whoami] + r SET key value + } + + test "Bob: just excute @set and acl command" { + r AUTH bob bob + assert_equal "bob" [r acl whoami] + assert_equal "3" [r sadd set 1 2 3] + catch {r SET key value} e + set e + } {*NOPERM*} + + test "ACL load and save" { + r ACL setuser eve +get allkeys >eve on + r ACL save + + # ACL load will free user and kill clients + r ACL load + catch {r ACL LIST} e + assert_match {*I/O error*} $e + + reconnect + r AUTH alice alice + r SET key value + r AUTH eve eve + r GET key + catch {r SET key value} e + set e + } {*NOPERM*} +} From 0052d5196d6e8776e4258c45dc5bddf3b5e1b15a Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Wed, 9 Sep 2020 18:58:06 +0300 Subject: [PATCH 161/215] Tests: fix oom-score-adj false positives. (#7772) The key save delay is too short and on certain systems the child process is gone before we have a chance to inspect it. (cherry picked from commit b2a73c404bf277bac287c72494a4c4cd2ba02f8c) --- tests/unit/oom-score-adj.tcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/oom-score-adj.tcl b/tests/unit/oom-score-adj.tcl index 8eb09a993..cf671fe6a 100644 --- a/tests/unit/oom-score-adj.tcl +++ b/tests/unit/oom-score-adj.tcl @@ -35,7 +35,7 @@ if {$system_name eq {linux}} { # Check child process r set key-a value-a - r config set rdb-key-save-delay 100000 + r config set rdb-key-save-delay 1000000 r bgsave set child_pid [get_child_pid 0] From 18b30467f72693a8d9c615112303c0b1a61d2a29 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Thu, 10 Sep 2020 08:18:20 +0300 Subject: [PATCH 162/215] Fix leak in new blockedclient module API test (cherry picked from commit 0e20ad14a6a857cb168b808f94721df19b23dc0c) --- tests/modules/blockedclient.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/modules/blockedclient.c b/tests/modules/blockedclient.c index 9d59114a1..ca98281a4 100644 --- a/tests/modules/blockedclient.c +++ b/tests/modules/blockedclient.c @@ -46,6 +46,9 @@ void *worker(void *arg) { // Unblock client RedisModule_UnblockClient(bc, NULL); + // Free the Redis module context + RedisModule_FreeThreadSafeContext(ctx); + return NULL; } From 5380716a70a46f5decce34fb51e78aa31ff9ce0e Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Thu, 10 Sep 2020 09:01:10 +0300 Subject: [PATCH 163/215] Fix RESP3 response for HKEYS/HVALS on non-existing key --- src/t_hash.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/t_hash.c b/src/t_hash.c index 4a03cfb25..240e11c91 100644 --- a/src/t_hash.c +++ b/src/t_hash.c @@ -772,7 +772,9 @@ void genericHgetallCommand(client *c, int flags) { hashTypeIterator *hi; int length, count = 0; - if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.emptymap[c->resp])) + robj *emptyResp = (flags & OBJ_HASH_KEY && flags & OBJ_HASH_VALUE) ? + shared.emptymap[c->resp] : shared.emptyarray; + if ((o = lookupKeyReadOrReply(c,c->argv[1],emptyResp)) == NULL || checkType(c,o,OBJ_HASH)) return; /* We return a map if the user requested keys and values, like in the From 03b59cd5f0788a52039287daa83229d459641ff5 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Thu, 10 Sep 2020 00:04:21 +0300 Subject: [PATCH 164/215] 6.0.8 --- 00-RELEASENOTES | 148 ++++++++++++++++++++++++++++++++++++++++++++++++ src/help.h | 2 +- src/version.h | 2 +- 3 files changed, 150 insertions(+), 2 deletions(-) diff --git a/00-RELEASENOTES b/00-RELEASENOTES index ce75e19c2..c9c09f76e 100644 --- a/00-RELEASENOTES +++ b/00-RELEASENOTES @@ -11,6 +11,154 @@ CRITICAL: There is a critical bug affecting MOST USERS. Upgrade ASAP. SECURITY: There are security fixes in the release. -------------------------------------------------------------------------------- +================================================================================ +Redis 6.0.8 Released Wed Sep 09 23:34:17 IDT 2020 +================================================================================ + +Upgrade urgency HIGH: Anyone who's using Redis 6.0.7 with Sentinel or +CONFIG REWRITE command is affected and should upgrade ASAP, see #7760. + +Bug fixes: + +* CONFIG REWRITE after setting oom-score-adj-values either via CONFIG SET or + loading it from a config file, will generate a corrupt config file that will + cause Redis to fail to start +* Fix issue with redis-cli --pipe on MacOS +* Fix RESP3 response for HKEYS/HVALS on non-existing key +* Various small bug fixes + +New features / Changes: + +* Remove THP warning when set to madvise +* Allow EXEC with read commands on readonly replica in cluster +* Add masters/replicas options to redis-cli --cluster call command + +Module API: + +* Add RedisModule_ThreadSafeContextTryLock + +Full list of commits: + +Oran Agra in commit cdabf696a: + Fix RESP3 response for HKEYS/HVALS on non-existing key + 1 file changed, 3 insertions(+), 1 deletion(-) + +Oran Agra in commit ec633c716: + Fix leak in new blockedclient module API test + 1 file changed, 3 insertions(+) + +Yossi Gottlieb in commit 6bac07c5c: + Tests: fix oom-score-adj false positives. (#7772) + 1 file changed, 1 insertion(+), 1 deletion(-) + +杨博东 in commit 6043dc614: + Tests: Add aclfile load and save tests (#7765) + 2 files changed, 41 insertions(+) + +Roi Lipman in commit c0b5f9bf0: + RM_ThreadSafeContextTryLock a non-blocking method for acquiring GIL (#7738) + 7 files changed, 122 insertions(+), 1 deletion(-) + +Yossi Gottlieb in commit 5780a1599: + Tests: validate CONFIG REWRITE for all params. (#7764) + 6 files changed, 43 insertions(+), 6 deletions(-) + +Oran Agra in commit e3c14b25d: + Change THP warning to use madvise rather than never (#7771) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Itamar Haber in commit 28929917b: + Documents RM_Call's fmt (#5448) + 1 file changed, 25 insertions(+) + +Jan-Erik Rediger in commit 9146402c2: + Check that THP is not set to always (madvise is ok) (#4001) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Yossi Gottlieb in commit d05089429: + Tests: clean up stale .cli files. (#7768) + 1 file changed, 2 insertions(+) + +Eran Liberty in commit 8861c1bae: + Allow exec with read commands on readonly replica in cluster (#7766) + 3 files changed, 59 insertions(+), 3 deletions(-) + +Yossi Gottlieb in commit 2cf2ff2f6: + Fix CONFIG REWRITE of oom-score-adj-values. (#7761) + 1 file changed, 2 insertions(+), 1 deletion(-) + +Oran Agra in commit 1386c80f7: + handle cur_test for nested tests + 1 file changed, 3 insertions(+) + +Oran Agra in commit c7d4945f0: + Add daily CI for MacOS (#7759) + 1 file changed, 18 insertions(+) + +bodong.ybd in commit 32548264c: + Tests: Some fixes for macOS + 3 files changed, 26 insertions(+), 11 deletions(-) + +Oran Agra in commit 1e17f9812: + Fix cluster consistency-check test (#7754) + 1 file changed, 55 insertions(+), 29 deletions(-) + +Yossi Gottlieb in commit f4ecdf86a: + Tests: fix unmonitored servers. (#7756) + 1 file changed, 5 insertions(+) + +Oran Agra in commit 9f020050d: + fix broken cluster/sentinel tests by recent commit (#7752) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Oran Agra in commit fdbabb496: + Improve valgrind support for cluster tests (#7725) + 3 files changed, 83 insertions(+), 23 deletions(-) + +Oran Agra in commit 35a6a0bbc: + test infra - add durable mode to work around test suite crashing + 3 files changed, 35 insertions(+), 3 deletions(-) + +Oran Agra in commit e3136b13f: + test infra - wait_done_loading + 2 files changed, 16 insertions(+), 36 deletions(-) + +Oran Agra in commit 83c75dbd9: + test infra - flushall between tests in external mode + 1 file changed, 1 insertion(+) + +Oran Agra in commit 265f5d3cf: + test infra - improve test skipping ability + 3 files changed, 91 insertions(+), 36 deletions(-) + +Oran Agra in commit fcd3a9908: + test infra - reduce disk space usage + 3 files changed, 33 insertions(+), 11 deletions(-) + +Oran Agra in commit b6ea4699f: + test infra - write test name to logfile + 3 files changed, 35 insertions(+) + +Yossi Gottlieb in commit 4a4b07fc6: + redis-cli: fix writeConn() buffer handling. (#7749) + 1 file changed, 37 insertions(+), 6 deletions(-) + +Oran Agra in commit f2d08de2e: + Print server startup messages after daemonization (#7743) + 1 file changed, 4 insertions(+), 4 deletions(-) + +Thandayuthapani in commit 77541d555: + Add masters/replicas options to redis-cli --cluster call command (#6491) + 1 file changed, 13 insertions(+), 2 deletions(-) + +Oran Agra in commit 91d13a854: + fix README about BUILD_WITH_SYSTEMD usage (#7739) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Yossi Gottlieb in commit 88d03d965: + Fix double-make issue with make && make install. (#7734) + 1 file changed, 2 insertions(+) + ================================================================================ Redis 6.0.7 Released Fri Aug 28 11:05:09 IDT 2020 ================================================================================ diff --git a/src/help.h b/src/help.h index 64344aa63..5a8af427e 100644 --- a/src/help.h +++ b/src/help.h @@ -974,7 +974,7 @@ struct commandHelp { 8, "1.0.0" }, { "SET", - "key value [EX seconds|PX milliseconds] [NX|XX] [KEEPTTL]", + "key value [EX seconds|PX milliseconds|KEEPTTL] [NX|XX]", "Set the string value of a key", 1, "1.0.0" }, diff --git a/src/version.h b/src/version.h index 71f998326..e09e5c4c4 100644 --- a/src/version.h +++ b/src/version.h @@ -1 +1 @@ -#define REDIS_VERSION "6.0.7" +#define REDIS_VERSION "6.0.8" From a4aa190f200c8cf487cd1e260706648a128c47b0 Mon Sep 17 00:00:00 2001 From: John Sully Date: Mon, 28 Sep 2020 19:17:36 +0000 Subject: [PATCH 165/215] Drop min-clients-per-thread to a more reasonable number Former-commit-id: a0abc1eddd071f984950ad8918fad0259c495184 --- src/config.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config.cpp b/src/config.cpp index fe1d5cd41..ea8d7a507 100644 --- a/src/config.cpp +++ b/src/config.cpp @@ -2294,7 +2294,7 @@ standardConfig configs[] = { createIntConfig("hz", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, g_pserver->config_hz, CONFIG_DEFAULT_HZ, INTEGER_CONFIG, NULL, updateHZ), createIntConfig("min-replicas-to-write", "min-slaves-to-write", MODIFIABLE_CONFIG, 0, INT_MAX, g_pserver->repl_min_slaves_to_write, 0, INTEGER_CONFIG, NULL, updateGoodSlaves), createIntConfig("min-replicas-max-lag", "min-slaves-max-lag", MODIFIABLE_CONFIG, 0, INT_MAX, g_pserver->repl_min_slaves_max_lag, 10, INTEGER_CONFIG, NULL, updateGoodSlaves), - createIntConfig("min-clients-per-thread", NULL, MODIFIABLE_CONFIG, 0, 400, cserver.thread_min_client_threshold, 50, INTEGER_CONFIG, NULL, NULL), + createIntConfig("min-clients-per-thread", NULL, MODIFIABLE_CONFIG, 0, 400, cserver.thread_min_client_threshold, 20, INTEGER_CONFIG, NULL, NULL), createIntConfig("replica-quorum", NULL, MODIFIABLE_CONFIG, -1, INT_MAX, g_pserver->repl_quorum, -1, INTEGER_CONFIG, NULL, NULL), /* Unsigned int configs */ createUIntConfig("maxclients", NULL, MODIFIABLE_CONFIG, 1, UINT_MAX, g_pserver->maxclients, 10000, INTEGER_CONFIG, NULL, updateMaxclients), From 3c0556093bb22b8f8b59410c9da4b760e5432068 Mon Sep 17 00:00:00 2001 From: John Sully Date: Tue, 29 Sep 2020 03:26:06 +0000 Subject: [PATCH 166/215] Test RDB merge on load with active replication Former-commit-id: 28183f4b66fc4c865048080b61e599eeb1d2293b --- tests/integration/replication-active.tcl | 30 +++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/tests/integration/replication-active.tcl b/tests/integration/replication-active.tcl index 6c3c6d674..d6501fbeb 100644 --- a/tests/integration/replication-active.tcl +++ b/tests/integration/replication-active.tcl @@ -215,14 +215,38 @@ start_server {tags {"active-repl"} overrides {active-replica yes}} { assert_equal {1} [$slave wait 1 500] { "value should propogate within 0.5 seconds" } exec kill -SIGSTOP $slave_pid - after 3000 + after 3000 # Ensure testkey1 is gone. Note, we can't do this directly as the normal commands lie to us # about what is actually in the dict. The only way to know is with a count from info - assert_equal {1} [expr [string first {keys=1} [$master info keyspace]] >= 0] {"slave expired"} + assert_equal {1} [expr [string first {keys=1} [$master info keyspace]] >= 0] {"slave expired"} } - + exec kill -SIGCONT $slave_pid + test {Active replica merge works when reconnecting} { + $slave flushall + $slave set testkey foo + wait_for_condition 50 1000 { + [string match *foo* [$master get testkey]] + } else { + fail "Replication failed to propogate" + } + $slave replicaof no one + $master replicaof no one + after 100 + $master set testkey baz + after 100 + $slave set testkey bar + after 100 + $slave replicaof $master_host $master_port + after 1000 + $master replicaof $slave_host $slave_port + after 1000 + + assert_equal {bar} [$slave get testkey] + assert_equal {bar} [$master get testkey] + } + test {Active replica different databases} { $master select 3 $master set testkey abcd From 1fd53af71b5a2f7a07b30b2e6565b24135ca0380 Mon Sep 17 00:00:00 2001 From: John Sully Date: Wed, 30 Sep 2020 20:12:54 +0000 Subject: [PATCH 167/215] Fix stream replication failure with active replication, issue #238 Former-commit-id: a41366cc3a6568c0249a5ee022e517add55e286d --- src/t_stream.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/t_stream.cpp b/src/t_stream.cpp index b54df9c71..fd7a1329b 100644 --- a/src/t_stream.cpp +++ b/src/t_stream.cpp @@ -43,6 +43,7 @@ void streamFreeCG(streamCG *cg); void streamFreeNACK(streamNACK *na); size_t streamReplyWithRangeFromConsumerPEL(client *c, stream *s, streamID *start, streamID *end, size_t count, streamConsumer *consumer); +bool FInReplicaReplay(); /* ----------------------------------------------------------------------- * Low level stream encoding: a radix tree of listpacks. @@ -838,6 +839,9 @@ void streamPropagateXCLAIM(client *c, robj *key, streamCG *group, robj *groupnam * * Note that JUSTID is useful in order to avoid that XCLAIM will do * useless work in the replica side, trying to fetch the stream item. */ + if (FInReplicaReplay()) + return; + robj *argv[14]; argv[0] = createStringObject("XCLAIM",6); argv[1] = key; From a543a502b816d1fc644e4e67d371dded8b46605c Mon Sep 17 00:00:00 2001 From: John Sully Date: Wed, 30 Sep 2020 20:17:02 +0000 Subject: [PATCH 168/215] Mac build break fix Former-commit-id: 5f3543921b4123c2216d9294c5eb7bfed007cbf5 --- src/server.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server.h b/src/server.h index 197b9bf88..3705b199a 100644 --- a/src/server.h +++ b/src/server.h @@ -819,7 +819,7 @@ struct redisObjectExtended { uint64_t mvcc_tstamp; }; -typedef class redisObject { +typedef struct redisObject { protected: redisObject() {} From f7b7c5a168b4c6e2325c82148be373ab4377bf33 Mon Sep 17 00:00:00 2001 From: John Sully Date: Wed, 30 Sep 2020 20:17:51 +0000 Subject: [PATCH 169/215] Keep redis-cli.c C89 compatible Former-commit-id: 9798f2d711939b03e972207638d18dcaa3b2e473 --- src/redis-cli.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 2b736b3e3..5dfcd6fad 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -1442,7 +1442,7 @@ static int parseOptions(int argc, char **argv) { sdsfree(version); exit(0); } else if (!strcmp(argv[i],"--no-motd")) { - config.disable_motd = true; + config.disable_motd = 1; } else if (!strcmp(argv[i],"-3")) { config.resp3 = 1; } else if (CLUSTER_MANAGER_MODE() && argv[i][0] != '-') { From 51ae66539daf1aa2ab7aa5c216635fe5220237b9 Mon Sep 17 00:00:00 2001 From: John Sully Date: Wed, 30 Sep 2020 20:21:40 +0000 Subject: [PATCH 170/215] Fix TLS test failure due to bad merge Former-commit-id: 41466a4147bf675f69670016135f88589cc02a5c --- src/config.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config.cpp b/src/config.cpp index 6158e7a87..d58bbbade 100644 --- a/src/config.cpp +++ b/src/config.cpp @@ -2477,7 +2477,7 @@ standardConfig configs[] = { createIntConfig("tls-session-cache-timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, g_pserver->tls_ctx_config.session_cache_timeout, 300, INTEGER_CONFIG, NULL, updateTlsCfgInt), createBoolConfig("tls-cluster", NULL, MODIFIABLE_CONFIG, g_pserver->tls_cluster, 0, NULL, NULL), createBoolConfig("tls-replication", NULL, MODIFIABLE_CONFIG, g_pserver->tls_replication, 0, NULL, NULL), - createBoolConfig("tls-auth-clients", NULL, MODIFIABLE_CONFIG, g_pserver->tls_auth_clients, 1, NULL, NULL), + createEnumConfig("tls-auth-clients", NULL, MODIFIABLE_CONFIG, tls_auth_clients_enum, g_pserver->tls_auth_clients, TLS_CLIENT_AUTH_YES, NULL, NULL), createBoolConfig("tls-prefer-server-ciphers", NULL, MODIFIABLE_CONFIG, g_pserver->tls_ctx_config.prefer_server_ciphers, 0, NULL, updateTlsCfgBool), createBoolConfig("tls-session-caching", NULL, MODIFIABLE_CONFIG, g_pserver->tls_ctx_config.session_caching, 1, NULL, updateTlsCfgBool), createStringConfig("tls-cert-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, g_pserver->tls_ctx_config.cert_file, NULL, NULL, updateTlsCfg), From 2085e21dca77727be6f86f1caa08329b78888aba Mon Sep 17 00:00:00 2001 From: John Sully Date: Thu, 1 Oct 2020 00:08:54 +0000 Subject: [PATCH 171/215] Fix module test failures due to locking Former-commit-id: 420ccdfbaebc452b9b374b54c6ebeec4a3ffea36 --- src/module.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/module.cpp b/src/module.cpp index c37071f87..73e176e7a 100644 --- a/src/module.cpp +++ b/src/module.cpp @@ -329,6 +329,7 @@ static int s_cAcquisitionsServer = 0; static int s_cAcquisitionsModule = 0; static std::mutex s_mutex; static std::condition_variable s_cv; +static std::recursive_mutex s_mutexModule; typedef void (*RedisModuleForkDoneHandler) (int exitcode, int bysignal, void *user_data); @@ -5055,6 +5056,7 @@ void moduleAcquireGIL(int fServerThread) { } else { + s_mutexModule.lock(); ++s_cAcquisitionsModule; fModuleGILWlocked++; } @@ -5079,6 +5081,8 @@ int moduleTryAcquireGIL(bool fServerThread) { } else { + if (!s_mutexModule.try_lock()) + return 1; ++s_cAcquisitionsModule; fModuleGILWlocked++; } @@ -5098,6 +5102,7 @@ void moduleReleaseGIL(int fServerThread) { } else { + s_mutexModule.unlock(); --s_cAcquisitionsModule; fModuleGILWlocked--; } From 71da0d44a5e8fe308440c7c39b016e3d0878f277 Mon Sep 17 00:00:00 2001 From: John Sully Date: Thu, 1 Oct 2020 11:17:56 -0400 Subject: [PATCH 172/215] Delete CONTRIBUTING Former-commit-id: b042495f37f3d22f7759a4c59044af40273c3a1a --- CONTRIBUTING | 50 -------------------------------------------------- 1 file changed, 50 deletions(-) delete mode 100644 CONTRIBUTING diff --git a/CONTRIBUTING b/CONTRIBUTING deleted file mode 100644 index 000edbeaf..000000000 --- a/CONTRIBUTING +++ /dev/null @@ -1,50 +0,0 @@ -Note: by contributing code to the Redis project in any form, including sending -a pull request via Github, a code fragment or patch via private email or -public discussion groups, you agree to release your code under the terms -of the BSD license that you can find in the COPYING file included in the Redis -source distribution. You will include BSD license in the COPYING file within -each source file that you contribute. - -# IMPORTANT: HOW TO USE REDIS GITHUB ISSUES - -* Github issues SHOULD ONLY BE USED to report bugs, and for DETAILED feature - requests. Everything else belongs to the Redis Google Group: - - https://groups.google.com/forum/m/#!forum/Redis-db - - PLEASE DO NOT POST GENERAL QUESTIONS that are not about bugs or suspected - bugs in the Github issues system. We'll be very happy to help you and provide - all the support in the mailing list. - - There is also an active community of Redis users at Stack Overflow: - - http://stackoverflow.com/questions/tagged/redis - -# How to provide a patch for a new feature - -1. If it is a major feature or a semantical change, please don't start coding -straight away: if your feature is not a conceptual fit you'll lose a lot of -time writing the code without any reason. Start by posting in the mailing list -and creating an issue at Github with the description of, exactly, what you want -to accomplish and why. Use cases are important for features to be accepted. -Here you'll see if there is consensus about your idea. - -2. If in step 1 you get an acknowledgment from the project leaders, use the - following procedure to submit a patch: - - a. Fork Redis on github ( http://help.github.com/fork-a-repo/ ) - b. Create a topic branch (git checkout -b my_branch) - c. Push to your branch (git push origin my_branch) - d. Initiate a pull request on github ( https://help.github.com/articles/creating-a-pull-request/ ) - e. Done :) - -3. Keep in mind that we are very overloaded, so issues and PRs sometimes wait -for a *very* long time. However this is not lack of interest, as the project -gets more and more users, we find ourselves in a constant need to prioritize -certain issues/PRs over others. If you think your issue/PR is very important -try to popularize it, have other users commenting and sharing their point of -view and so forth. This helps. - -4. For minor fixes just open a pull request on Github. - -Thanks! From bb67320752baa0c6570df18d2ec84f6b62d7b59b Mon Sep 17 00:00:00 2001 From: Hanif Bin Ariffin Date: Sat, 3 Oct 2020 23:11:03 +0800 Subject: [PATCH 173/215] Removed dead code from a macro in zmalloc.cpp I think the compiler would have removed this no-op anyways but it definitely wasted me some 30 minutes on this :( I was hoping I could remove the branch through some bit-hacking but apparently its dead code :). Oh well, its 30 minutes of refreshing bit hacking. Signed-off-by: Hanif Bin Ariffin Former-commit-id: 8171e6de13311e3ad2e87c32d63060dcf3bd6055 --- src/zmalloc.cpp | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/src/zmalloc.cpp b/src/zmalloc.cpp index e3d364a97..f88fee638 100644 --- a/src/zmalloc.cpp +++ b/src/zmalloc.cpp @@ -83,17 +83,8 @@ static_assert((PREFIX_SIZE % 16) == 0, "Our prefix must be modulo 16-bytes or ou #define realloc(ptr,size,type) realloc(ptr,size) #endif -#define update_zmalloc_stat_alloc(__n) do { \ - size_t _n = (__n); \ - if (_n&(sizeof(long)-1)) _n += sizeof(long)-(_n&(sizeof(long)-1)); \ - atomicIncr(used_memory,__n); \ -} while(0) - -#define update_zmalloc_stat_free(__n) do { \ - size_t _n = (__n); \ - if (_n&(sizeof(long)-1)) _n += sizeof(long)-(_n&(sizeof(long)-1)); \ - atomicDecr(used_memory,__n); \ -} while(0) +#define update_zmalloc_stat_alloc(__n) atomicIncr(used_memory,(__n)) +#define update_zmalloc_stat_free(__n) atomicDecr(used_memory,(__n)) static size_t used_memory = 0; pthread_mutex_t used_memory_mutex = PTHREAD_MUTEX_INITIALIZER; From 5c957fe228af903bd59a8d89d929ee3374329a0a Mon Sep 17 00:00:00 2001 From: Hanif Bin Ariffin Date: Mon, 5 Oct 2020 21:57:42 +0800 Subject: [PATCH 174/215] Fixed non-empty check in src/Makefile Per [GNU Make Manual](https://www.gnu.org/software/make/manual/html_node/Conditional-Syntax.html). To properly check for non-empty variable, one must strip whitespaces. Signed-off-by: Hanif Bin Ariffin Former-commit-id: 05e0f323456b8e667d10dabfa804757a2fc81b04 --- src/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Makefile b/src/Makefile index c0aa26737..9db3e6934 100644 --- a/src/Makefile +++ b/src/Makefile @@ -47,7 +47,7 @@ endif USEASM?=true -ifneq ($(SANITIZE),) +ifneq ($(strip $(SANITIZE)),) CFLAGS+= -fsanitize=$(SANITIZE) -DSANITIZE CXXFLAGS+= -fsanitize=$(SANITIZE) -DSANITIZE LDFLAGS+= -fsanitize=$(SANITIZE) From 2c45f36dd4b655ac6d01b0e38ef1a2c5d9bc0574 Mon Sep 17 00:00:00 2001 From: John Sully Date: Fri, 9 Oct 2020 21:02:09 +0000 Subject: [PATCH 175/215] Remove dead code Former-commit-id: c6f13892e04607700e27ec963fa1da695e784c11 --- src/server.cpp | 31 ------------------------------- 1 file changed, 31 deletions(-) diff --git a/src/server.cpp b/src/server.cpp index 02b68f3da..baf53f8fe 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -2302,37 +2302,6 @@ void beforeSleep(struct aeEventLoop *eventLoop) { if (moduleCount()) moduleReleaseGIL(TRUE /*fServerThread*/); } -void beforeSleepLite(struct aeEventLoop *eventLoop) -{ - int iel = ielFromEventLoop(eventLoop); - - /* Try to process pending commands for clients that were just unblocked. */ - aeAcquireLock(); - processClients(); - if (listLength(g_pserver->rgthreadvar[iel].unblocked_clients)) { - processUnblockedClients(iel); - } - - /* Check if there are clients unblocked by modules that implement - * blocking commands. */ - if (moduleCount()) moduleHandleBlockedClients(ielFromEventLoop(eventLoop)); - int aof_state = g_pserver->aof_state; - aeReleaseLock(); - - /* Handle writes with pending output buffers. */ - handleClientsWithPendingWrites(iel, aof_state); - - aeAcquireLock(); - /* Close clients that need to be closed asynchronous */ - freeClientsInAsyncFreeQueue(iel); - aeReleaseLock(); - - /* Before we are going to sleep, let the threads access the dataset by - * releasing the GIL. Redis main thread will not touch anything at this - * time. */ - if (moduleCount()) moduleReleaseGIL(TRUE /*fServerThread*/); -} - /* This function is called immadiately after the event loop multiplexing * API returned, and the control is going to soon return to Redis by invoking * the different events callbacks. */ From 2983ffe13930a8ab2ae6f2182aa517b98d94956b Mon Sep 17 00:00:00 2001 From: John Sully Date: Mon, 12 Oct 2020 04:28:39 +0000 Subject: [PATCH 176/215] Perf: remove unnecessary vector operations Former-commit-id: 1b46d4f09ab73e08a1e77fd7f73d18e98dbdce7c --- src/networking.cpp | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/networking.cpp b/src/networking.cpp index 719cfcc1b..f117839b2 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -1920,8 +1920,7 @@ void ProcessPendingAsyncWrites() * get it called, and so forth. */ int handleClientsWithPendingWrites(int iel, int aof_state) { std::unique_lock lockf(g_pserver->rgthreadvar[iel].lockPendingWrite); - auto &vec = g_pserver->rgthreadvar[iel].clients_pending_write; - int processed = (int)vec.size(); + int processed = 0; serverAssert(iel == (serverTL - g_pserver->rgthreadvar)); int ae_flags = AE_WRITABLE|AE_WRITE_THREADSAFE; @@ -1936,16 +1935,17 @@ int handleClientsWithPendingWrites(int iel, int aof_state) { ae_flags |= AE_BARRIER; } - while(!vec.empty()) { - client *c = vec.back(); + auto vec = std::move(g_pserver->rgthreadvar[iel].clients_pending_write); + processed += (int)vec.size(); + + for (client *c : vec) { AssertCorrectThread(c); - c->flags &= ~CLIENT_PENDING_WRITE; - vec.pop_back(); + uint64_t flags = c->flags.fetch_and(~CLIENT_PENDING_WRITE, std::memory_order_relaxed); /* If a client is protected, don't do anything, - * that may trigger write error or recreate handler. */ - if (c->flags & CLIENT_PROTECTED) continue; + * that may trigger write error or recreate handler. */ + if (flags & CLIENT_PROTECTED) continue; std::unique_locklock)> lock(c->lock); @@ -1964,7 +1964,7 @@ int handleClientsWithPendingWrites(int iel, int aof_state) { } /* If after the synchronous writes above we still have data to - * output to the client, we need to install the writable handler. */ + * output to the client, we need to install the writable handler. */ if (clientHasPendingReplies(c)) { if (connSetWriteHandlerWithBarrier(c->conn, sendReplyToClient, ae_flags, true) == C_ERR) freeClientAsync(c); From 239d874f1f26776811442befadf3be62d2c58b45 Mon Sep 17 00:00:00 2001 From: John Sully Date: Mon, 12 Oct 2020 05:50:58 +0000 Subject: [PATCH 177/215] Avoid excess locking, seeing up to 8% performance improvements Former-commit-id: 69a74a567bd381a84e71f954d4cb35eb878f6d3c --- src/server.cpp | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/src/server.cpp b/src/server.cpp index baf53f8fe..7ad913809 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -2223,15 +2223,19 @@ void processClients(); void beforeSleep(struct aeEventLoop *eventLoop) { UNUSED(eventLoop); int iel = ielFromEventLoop(eventLoop); + + aeAcquireLock(); processClients(); /* Handle precise timeouts of blocked clients. */ handleBlockedClientsTimeout(); /* Handle TLS pending data. (must be done before flushAppendOnlyFile) */ - aeReleaseLock(); - tlsProcessPendingData(); - aeAcquireLock(); + if (tlsHasPendingData()) { + aeReleaseLock(); + tlsProcessPendingData(); + aeAcquireLock(); + } /* If tls still has pending unread data don't sleep at all. */ aeSetDontWait(eventLoop, tlsHasPendingData()); @@ -2289,9 +2293,17 @@ void beforeSleep(struct aeEventLoop *eventLoop) { /* Handle writes with pending output buffers. */ int aof_state = g_pserver->aof_state; - aeReleaseLock(); - handleClientsWithPendingWrites(iel, aof_state); - aeAcquireLock(); + + /* We try to handle writes at the end so we don't have to reacquire the lock, + but if there is a pending async close we need to ensure the writes happen + first so perform it here */ + bool fSentReplies = false; + if (listLength(g_pserver->clients_to_close)) { + aeReleaseLock(); + handleClientsWithPendingWrites(iel, aof_state); + aeAcquireLock(); + fSentReplies = true; + } /* Close clients that need to be closed asynchronous */ freeClientsInAsyncFreeQueue(iel); @@ -2299,6 +2311,9 @@ void beforeSleep(struct aeEventLoop *eventLoop) { /* Before we are going to sleep, let the threads access the dataset by * releasing the GIL. Redis main thread will not touch anything at this * time. */ + aeReleaseLock(); + if (!fSentReplies) + handleClientsWithPendingWrites(iel, aof_state); if (moduleCount()) moduleReleaseGIL(TRUE /*fServerThread*/); } @@ -3018,7 +3033,7 @@ static void initServerThread(struct redisServerThreadVars *pvar, int fMain) pvar->tlsfd_count = 0; pvar->cclients = 0; pvar->el = aeCreateEventLoop(g_pserver->maxclients+CONFIG_FDSET_INCR); - aeSetBeforeSleepProc(pvar->el, beforeSleep, 0); + aeSetBeforeSleepProc(pvar->el, beforeSleep, AE_SLEEP_THREADSAFE); aeSetAfterSleepProc(pvar->el, afterSleep, AE_SLEEP_THREADSAFE); pvar->current_client = nullptr; pvar->clients_paused = 0; @@ -3519,7 +3534,7 @@ void call(client *c, int flags) { /* Send the command to clients in MONITOR mode if applicable. * Administrative commands are considered too dangerous to be shown. */ if (listLength(g_pserver->monitors) && - !g_pserver->loading && + !g_pserver->loading.load(std::memory_order_relaxed) && !(c->cmd->flags & (CMD_SKIP_MONITOR|CMD_ADMIN))) { replicationFeedMonitors(c,g_pserver->monitors,c->db->id,c->argv,c->argc); From b2b72ff41208e3995bd53e2f1e89e697ac45e84b Mon Sep 17 00:00:00 2001 From: John Sully Date: Mon, 12 Oct 2020 15:27:03 +0000 Subject: [PATCH 178/215] Significantly improve perf in replication scenarios Former-commit-id: ae8a94d6158cada41b7497d55fe12f5d776f0c75 --- src/server.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/server.cpp b/src/server.cpp index 7ad913809..32610a974 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -3669,8 +3669,6 @@ void call(client *c, int flags) { } redisOpArrayFree(&g_pserver->also_propagate); } - - ProcessPendingAsyncWrites(); g_pserver->also_propagate = prev_also_propagate; From bfe4b6d1aa85c8994070c0926747eaaf297408da Mon Sep 17 00:00:00 2001 From: John Sully Date: Tue, 13 Oct 2020 06:35:16 +0000 Subject: [PATCH 179/215] Relax memory order where possible Former-commit-id: 3e996035ea1d5a40d02f84e916837a1d350b844b --- src/networking.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/networking.cpp b/src/networking.cpp index f117839b2..14b6a4428 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -293,7 +293,7 @@ int prepareClientToWrite(client *c, bool fAsync) { * -------------------------------------------------------------------------- */ int _addReplyToBuffer(client *c, const char *s, size_t len, bool fAsync) { - if (c->flags & CLIENT_CLOSE_AFTER_REPLY) return C_OK; + if (c->flags.load(std::memory_order_relaxed) & CLIENT_CLOSE_AFTER_REPLY) return C_OK; fAsync = fAsync && !FCorrectThread(c); // Not async if we're on the right thread if (fAsync) @@ -327,7 +327,7 @@ int _addReplyToBuffer(client *c, const char *s, size_t len, bool fAsync) { } void _addReplyProtoToList(client *c, const char *s, size_t len) { - if (c->flags & CLIENT_CLOSE_AFTER_REPLY) return; + if (c->flags.load(std::memory_order_relaxed) & CLIENT_CLOSE_AFTER_REPLY) return; AssertCorrectThread(c); listNode *ln = listLast(c->reply); From 665a9ed7c607edbe920ec525eadbb92011617a94 Mon Sep 17 00:00:00 2001 From: John Sully Date: Thu, 15 Oct 2020 20:15:35 +0000 Subject: [PATCH 180/215] Fix branding Former-commit-id: fb32bb12b1b070d230f89845b0415b1e7d8bc669 --- src/config.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config.cpp b/src/config.cpp index d58bbbade..839a8b91e 100644 --- a/src/config.cpp +++ b/src/config.cpp @@ -641,7 +641,7 @@ void loadServerConfigFromString(char *config) { return; loaderr: - fprintf(stderr, "\n*** FATAL CONFIG FILE ERROR (Redis %s) ***\n", + fprintf(stderr, "\n*** FATAL CONFIG FILE ERROR (KeyDB %s) ***\n", KEYDB_REAL_VERSION); fprintf(stderr, "Reading the configuration file, at line %d\n", linenum); fprintf(stderr, ">>> '%s'\n", lines[i]); From a9f4c37604afcc25445dc4048ed261a762ff92c8 Mon Sep 17 00:00:00 2001 From: John Sully Date: Thu, 15 Oct 2020 23:10:17 +0000 Subject: [PATCH 181/215] Reduce async write copies Former-commit-id: ed369d722335ed4105748ef2bed5d64f3f32c433 --- src/networking.cpp | 87 ++++++++++++++++++++++++++-------------------- src/server.h | 5 ++- 2 files changed, 51 insertions(+), 41 deletions(-) diff --git a/src/networking.cpp b/src/networking.cpp index 14b6a4428..94f1c21c9 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -172,9 +172,7 @@ client *createClient(connection *conn, int iel) { c->pubsub_patterns = listCreate(); c->peerid = NULL; c->client_list_node = NULL; - c->bufAsync = NULL; - c->buflenAsync = 0; - c->bufposAsync = 0; + c->replyAsync = NULL; c->client_tracking_redirection = 0; c->casyncOpsPending = 0; c->master_error = 0; @@ -299,15 +297,27 @@ int _addReplyToBuffer(client *c, const char *s, size_t len, bool fAsync) { if (fAsync) { serverAssert(GlobalLocksAcquired()); - if ((c->buflenAsync - c->bufposAsync) < (int)len) + if (c->replyAsync == nullptr || (c->replyAsync->size - c->replyAsync->used) < len) { - int minsize = len + c->bufposAsync; - c->buflenAsync = std::max(minsize, c->buflenAsync*2 - c->buflenAsync); - c->bufAsync = (char*)zrealloc(c->bufAsync, c->buflenAsync, MALLOC_LOCAL); - c->buflenAsync = zmalloc_usable(c->bufAsync); + if (c->replyAsync == nullptr) { + size_t newsize = std::max(len, (size_t)PROTO_ASYNC_REPLY_CHUNK_BYTES); + + clientReplyBlock *replyNew = (clientReplyBlock*)zmalloc(sizeof(clientReplyBlock) + newsize); + replyNew->size = zmalloc_usable(replyNew) - sizeof(clientReplyBlock); + replyNew->used = 0; + c->replyAsync = replyNew; + } else { + size_t newsize = std::max(c->replyAsync->used + len, c->replyAsync->size*2); + clientReplyBlock *replyNew = (clientReplyBlock*)zmalloc(sizeof(clientReplyBlock) + newsize); + replyNew->size = zmalloc_usable(replyNew) - sizeof(clientReplyBlock); + replyNew->used = c->replyAsync->used; + memcpy(replyNew->buf(), c->replyAsync->buf(), c->replyAsync->used); + zfree(c->replyAsync); + c->replyAsync = replyNew; + } } - memcpy(c->bufAsync+c->bufposAsync,s,len); - c->bufposAsync += len; + memcpy(c->replyAsync->buf() + c->replyAsync->used,s,len); + c->replyAsync->used += len; } else { @@ -633,7 +643,7 @@ void *addReplyDeferredLenAsync(client *c) { if (FCorrectThread(c)) return addReplyDeferredLen(c); - return (void*)((ssize_t)c->bufposAsync); + return (void*)((ssize_t)(c->replyAsync ? c->replyAsync->used : 0)); } /* Populate the length object and try gluing it to the next chunk. */ @@ -689,17 +699,22 @@ void setDeferredAggregateLenAsync(client *c, void *node, long length, char prefi char lenstr[128]; int lenstr_len = sprintf(lenstr, "%c%ld\r\n", prefix, length); - ssize_t idxSplice = (ssize_t)node; - serverAssert(idxSplice <= c->bufposAsync); - if (c->buflenAsync < (c->bufposAsync + lenstr_len)) + size_t idxSplice = (size_t)node; + serverAssert(idxSplice <= c->replyAsync->used); + if (c->replyAsync->size < (c->replyAsync->used + lenstr_len)) { - c->buflenAsync = std::max((int)(c->bufposAsync+lenstr_len), c->buflenAsync*2 - c->buflenAsync); - c->bufAsync = (char*)zrealloc(c->bufAsync, c->buflenAsync, MALLOC_LOCAL); + int newsize = std::max(c->replyAsync->used + lenstr_len, c->replyAsync->size*2); + clientReplyBlock *replyNew = (clientReplyBlock*)zmalloc(sizeof(clientReplyBlock) + newsize); + replyNew->size = zmalloc_usable(replyNew) - sizeof(clientReplyBlock); + replyNew->used = c->replyAsync->used; + memcpy(replyNew->buf(), c->replyAsync->buf(), c->replyAsync->used); + zfree(c->replyAsync); + c->replyAsync = replyNew; } - memmove(c->bufAsync + idxSplice + lenstr_len, c->bufAsync + idxSplice, c->bufposAsync - idxSplice); - memcpy(c->bufAsync + idxSplice, lenstr, lenstr_len); - c->bufposAsync += lenstr_len; + memmove(c->replyAsync->buf() + idxSplice + lenstr_len, c->replyAsync->buf() + idxSplice, c->replyAsync->used - idxSplice); + memcpy(c->replyAsync->buf() + idxSplice, lenstr, lenstr_len); + c->replyAsync->used += lenstr_len; } void setDeferredArrayLen(client *c, void *node, long length) { @@ -1640,7 +1655,7 @@ bool freeClient(client *c) { /* Release other dynamically allocated client structure fields, * and finally release the client structure itself. */ - zfree(c->bufAsync); + zfree(c->replyAsync); if (c->name) decrRefCount(c->name); zfree(c->argv); freeClientMultiState(c); @@ -1846,29 +1861,25 @@ void ProcessPendingAsyncWrites() serverAssert(c->fPendingAsyncWrite); if (c->flags & (CLIENT_CLOSE_ASAP | CLIENT_CLOSE_AFTER_REPLY)) { - c->bufposAsync = 0; - c->buflenAsync = 0; - zfree(c->bufAsync); - c->bufAsync = nullptr; + zfree(c->replyAsync); + c->replyAsync = nullptr; c->fPendingAsyncWrite = FALSE; continue; } - // TODO: Append to end of reply block? + int size = c->replyAsync->used; - size_t size = c->bufposAsync; - clientReplyBlock *reply = (clientReplyBlock*)zmalloc(size + sizeof(clientReplyBlock), MALLOC_LOCAL); - /* take over the allocation's internal fragmentation */ - reply->size = zmalloc_usable(reply) - sizeof(clientReplyBlock); - reply->used = c->bufposAsync; - memcpy(reply->buf(), c->bufAsync, c->bufposAsync); - listAddNodeTail(c->reply, reply); - c->reply_bytes += reply->size; + if (listLength(c->reply) == 0 && size <= (PROTO_REPLY_CHUNK_BYTES - c->bufpos)) { + memcpy(c->buf + c->bufpos, c->replyAsync->buf(), size); + c->bufpos += size; + } else { + c->reply_bytes += c->replyAsync->size; + listAddNodeTail(c->reply, c->replyAsync); + c->replyAsync = nullptr; + } - c->bufposAsync = 0; - c->buflenAsync = 0; - zfree(c->bufAsync); - c->bufAsync = nullptr; + zfree(c->replyAsync); + c->replyAsync = nullptr; c->fPendingAsyncWrite = FALSE; // Now install the write event handler @@ -3241,7 +3252,7 @@ void rewriteClientCommandArgument(client *c, int i, robj *newval) { * enforcing the client output length limits. */ unsigned long getClientOutputBufferMemoryUsage(client *c) { unsigned long list_item_size = sizeof(listNode) + sizeof(clientReplyBlock); - return c->reply_bytes + (list_item_size*listLength(c->reply)) + c->buflenAsync; + return c->reply_bytes + (list_item_size*listLength(c->reply)) + (c->replyAsync ? c->replyAsync->size : 0); } /* Get the class of a client, used in order to enforce limits to different diff --git a/src/server.h b/src/server.h index 3705b199a..3e0c48f77 100644 --- a/src/server.h +++ b/src/server.h @@ -327,6 +327,7 @@ inline bool operator!=(const void *p, const robj_sharedptr &rhs) #define PROTO_MAX_QUERYBUF_LEN (1024*1024*1024) /* 1GB max query buffer. */ #define PROTO_IOBUF_LEN (1024*16) /* Generic I/O buffer size */ #define PROTO_REPLY_CHUNK_BYTES (16*1024) /* 16k output buffer */ +#define PROTO_ASYNC_REPLY_CHUNK_BYTES (1024) #define PROTO_INLINE_MAX_SIZE (1024*64) /* Max size of inline reads */ #define PROTO_MBULK_BIG_ARG (1024*32) #define LONG_STR_SIZE 21 /* Bytes needed for long -> str + '\0' */ @@ -1145,9 +1146,7 @@ typedef struct client { char buf[PROTO_REPLY_CHUNK_BYTES]; /* Async Response Buffer - other threads write here */ - int bufposAsync; - int buflenAsync; - char *bufAsync; + clientReplyBlock *replyAsync; int iel; /* the event loop index we're registered with */ struct fastlock lock; From 2b1dee4191624405413684e1b25576b81c1ee7ba Mon Sep 17 00:00:00 2001 From: John Sully Date: Fri, 16 Oct 2020 06:19:52 +0000 Subject: [PATCH 182/215] Avoid locking if we won't run a time event Former-commit-id: 33b05c859afd6665feae43c47d19f7a0a764c36b --- src/ae.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/ae.cpp b/src/ae.cpp index 789a6888b..cb500d7b6 100644 --- a/src/ae.cpp +++ b/src/ae.cpp @@ -589,7 +589,7 @@ static aeTimeEvent *aeSearchNearestTimer(aeEventLoop *eventLoop) /* Process time events */ static int processTimeEvents(aeEventLoop *eventLoop) { - std::unique_lock ulock(g_lock); + std::unique_lock ulock(g_lock, std::defer_lock); int processed = 0; aeTimeEvent *te; long long maxId; @@ -634,8 +634,10 @@ static int processTimeEvents(aeEventLoop *eventLoop) { eventLoop->timeEventHead = te->next; if (te->next) te->next->prev = te->prev; - if (te->finalizerProc) + if (te->finalizerProc) { + if (!ulock.owns_lock()) ulock.lock(); te->finalizerProc(eventLoop, te->clientData); + } zfree(te); te = next; continue; @@ -654,6 +656,7 @@ static int processTimeEvents(aeEventLoop *eventLoop) { if (now_sec > te->when_sec || (now_sec == te->when_sec && now_ms >= te->when_ms)) { + if (!ulock.owns_lock()) ulock.lock(); int retval; id = te->id; From b5074e9b1eb4ca3fe5d46878aa42da61de113399 Mon Sep 17 00:00:00 2001 From: John Sully Date: Fri, 16 Oct 2020 06:47:40 +0000 Subject: [PATCH 183/215] Fix replica buffer overflows Former-commit-id: 738c782f02517744662991091beb3f724661317e --- src/networking.cpp | 9 +++++---- src/server.cpp | 4 ++-- src/server.h | 4 ++-- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/networking.cpp b/src/networking.cpp index 94f1c21c9..f5698282b 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -1911,12 +1911,13 @@ void ProcessPendingAsyncWrites() } else { - if (!c->fPendingAsyncWriteHandler) { - c->fPendingAsyncWriteHandler = true; + bool expected = false; + if (c->fPendingAsyncWriteHandler.compare_exchange_strong(expected, true)) { bool fResult = c->postFunction([](client *c) { c->fPendingAsyncWriteHandler = false; - connSetWriteHandler(c->conn, sendReplyToClient, true); - }); + clientInstallWriteHandler(c); + handleClientsWithPendingWrites(c->iel, g_pserver->aof_state); + }, false); if (!fResult) c->fPendingAsyncWriteHandler = false; // if we failed to set the handler then prevent this from never being reset diff --git a/src/server.cpp b/src/server.cpp index 32610a974..3cf0b0189 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -3992,13 +3992,13 @@ int processCommand(client *c, int callFlags) { return C_OK; } -bool client::postFunction(std::function fn) { +bool client::postFunction(std::function fn, bool fLock) { this->casyncOpsPending++; return aePostFunction(g_pserver->rgthreadvar[this->iel].el, [this, fn]{ std::lock_guardlock)> lock(this->lock); --casyncOpsPending; fn(this); - }) == AE_OK; + }, false, fLock) == AE_OK; } /*================================== Shutdown =============================== */ diff --git a/src/server.h b/src/server.h index 3e0c48f77..198633f90 100644 --- a/src/server.h +++ b/src/server.h @@ -1083,7 +1083,7 @@ typedef struct client { std::atomic flags; /* Client flags: CLIENT_* macros. */ int casyncOpsPending; int fPendingAsyncWrite; /* NOTE: Not a flag because it is written to outside of the client lock (locked by the global lock instead) */ - int fPendingAsyncWriteHandler; + std::atomic fPendingAsyncWriteHandler; int authenticated; /* Needed when the default user requires auth. */ int replstate; /* Replication state if this is a replica. */ int repl_put_online_on_ack; /* Install replica write handler on ACK. */ @@ -1153,7 +1153,7 @@ typedef struct client { int master_error; // post a function from a non-client thread to run on its client thread - bool postFunction(std::function fn); + bool postFunction(std::function fn, bool fLock = true); } client; struct saveparam { From b0d9d3bc09192805fa29337d8ac023649f4aef1b Mon Sep 17 00:00:00 2001 From: John Sully Date: Fri, 16 Oct 2020 07:00:03 +0000 Subject: [PATCH 184/215] run when necessary Former-commit-id: d724e7226c690fdce0e6ee6cbf8afdb9481f51c5 --- src/networking.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/networking.cpp b/src/networking.cpp index f5698282b..9c4266df4 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -1915,8 +1915,10 @@ void ProcessPendingAsyncWrites() if (c->fPendingAsyncWriteHandler.compare_exchange_strong(expected, true)) { bool fResult = c->postFunction([](client *c) { c->fPendingAsyncWriteHandler = false; - clientInstallWriteHandler(c); - handleClientsWithPendingWrites(c->iel, g_pserver->aof_state); + if (c->bufpos || listLength(c->reply) || (c->flags & CLIENT_PENDING_WRITE)) { + clientInstallWriteHandler(c); + handleClientsWithPendingWrites(c->iel, g_pserver->aof_state); + } }, false); if (!fResult) From dcd6d809aad8498cccd4e00963776f6bc5f8dd7e Mon Sep 17 00:00:00 2001 From: John Sully Date: Wed, 21 Oct 2020 21:16:08 +0000 Subject: [PATCH 185/215] Remove leak warning with ASAN in the CLI Former-commit-id: 05cd92f7661ba1a96cdcac278720b7da0b6ba60e --- src/redis-cli.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 5dfcd6fad..412760278 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -1878,14 +1878,15 @@ static void repl(void) { exit(0); } -static int noninteractive(int argc, char **argv) { +static int noninteractive(int argc, char ***argv) { int retval = 0; if (config.stdinarg) { - argv = zrealloc(argv, (argc+1)*sizeof(char*), MALLOC_LOCAL); - argv[argc] = readArgFromStdin(); - retval = issueCommand(argc+1, argv); + *argv = zrealloc(*argv, (argc+1)*sizeof(char*), MALLOC_LOCAL); + (*argv)[argc] = readArgFromStdin(); + retval = issueCommand(argc+1, *argv); + sdsfree((*argv)[argc]); } else { - retval = issueCommand(argc, argv); + retval = issueCommand(argc, *argv); } return retval; } @@ -7134,6 +7135,11 @@ int main(int argc, char **argv) { if (config.eval) { return evalMode(argc,argv); } else { - return noninteractive(argc,convertToSds(argc,argv)); + sds *sdsArgs = convertToSds(argc,argv); + int rval = noninteractive(argc,&sdsArgs); + for (int i = 0; i < argc; ++i) + sdsfree(sdsArgs[i]); + zfree(sdsArgs); + return rval; } } From c88a19bc5eb563a2af8fcda4fcf236ef79124f6b Mon Sep 17 00:00:00 2001 From: John Sully Date: Fri, 23 Oct 2020 15:57:39 +0000 Subject: [PATCH 186/215] Fix multithread test failures Former-commit-id: 7c39a9b0e193f5c0b327ff21cd49210037142642 --- src/networking.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/networking.cpp b/src/networking.cpp index 9c4266df4..f5698282b 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -1915,10 +1915,8 @@ void ProcessPendingAsyncWrites() if (c->fPendingAsyncWriteHandler.compare_exchange_strong(expected, true)) { bool fResult = c->postFunction([](client *c) { c->fPendingAsyncWriteHandler = false; - if (c->bufpos || listLength(c->reply) || (c->flags & CLIENT_PENDING_WRITE)) { - clientInstallWriteHandler(c); - handleClientsWithPendingWrites(c->iel, g_pserver->aof_state); - } + clientInstallWriteHandler(c); + handleClientsWithPendingWrites(c->iel, g_pserver->aof_state); }, false); if (!fResult) From 95606ffb102f38e2b70e5d2ddad6155a66b7e0bb Mon Sep 17 00:00:00 2001 From: John Sully Date: Fri, 23 Oct 2020 20:21:49 +0000 Subject: [PATCH 187/215] Allow the locker to be used even without a client Former-commit-id: 041446005b03121adf7ac061bd0fd2ec70d9418e --- src/aelocker.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aelocker.h b/src/aelocker.h index e854f907b..777be4832 100644 --- a/src/aelocker.h +++ b/src/aelocker.h @@ -9,7 +9,7 @@ public: { } - void arm(client *c, bool fIfNeeded = false) // if a client is passed, then the client is already locked + void arm(client *c = nullptr, bool fIfNeeded = false) // if a client is passed, then the client is already locked { if (m_fArmed) return; From b336d269ca120ac64b623b52af86a1e5c05860cc Mon Sep 17 00:00:00 2001 From: John Sully Date: Sat, 24 Oct 2020 02:18:03 +0000 Subject: [PATCH 188/215] Remove addReply*Async methods since we already know if its async or not. This is just a source of bugs Former-commit-id: df22cdf6e91a1b9c390b69c4209c719ecf1e44f1 --- src/blocked.cpp | 16 +- src/module.cpp | 36 ++--- src/networking.cpp | 348 ++++++++++++-------------------------------- src/object.cpp | 2 +- src/pubsub.cpp | 22 +-- src/replication.cpp | 30 ++-- src/server.h | 25 +--- src/t_list.cpp | 8 +- src/t_stream.cpp | 14 +- src/t_zset.cpp | 10 +- src/timeout.cpp | 2 +- src/tracking.cpp | 16 +- 12 files changed, 172 insertions(+), 357 deletions(-) diff --git a/src/blocked.cpp b/src/blocked.cpp index 7f96fcfec..63cce0996 100644 --- a/src/blocked.cpp +++ b/src/blocked.cpp @@ -188,9 +188,9 @@ void replyToBlockedClientTimedOut(client *c) { if (c->btype == BLOCKED_LIST || c->btype == BLOCKED_ZSET || c->btype == BLOCKED_STREAM) { - addReplyNullArrayAsync(c); + addReplyNullArray(c); } else if (c->btype == BLOCKED_WAIT) { - addReplyLongLongAsync(c,replicationCountAcksByOffset(c->bpop.reploffset)); + addReplyLongLong(c,replicationCountAcksByOffset(c->bpop.reploffset)); } else if (c->btype == BLOCKED_MODULE) { moduleBlockedClientTimedOut(c); } else { @@ -216,7 +216,7 @@ void disconnectAllBlockedClients(void) { fastlock_lock(&c->lock); if (c->flags & CLIENT_BLOCKED) { - addReplySdsAsync(c,sdsnew( + addReplySds(c,sdsnew( "-UNBLOCKED force unblock from blocking operation, " "instance state changed (master -> replica?)\r\n")); unblockClient(c); @@ -373,7 +373,7 @@ void serveClientsBlockedOnStreamKey(robj *o, readyList *rl) { /* If the group was not found, send an error * to the consumer. */ if (!group) { - addReplyErrorAsync(receiver, + addReplyError(receiver, "-NOGROUP the consumer group this client " "was blocked on no longer exists"); unblockClient(receiver); @@ -404,12 +404,12 @@ void serveClientsBlockedOnStreamKey(robj *o, readyList *rl) { * extracted from it. Wrapped in a single-item * array, since we have just one key. */ if (receiver->resp == 2) { - addReplyArrayLenAsync(receiver,1); - addReplyArrayLenAsync(receiver,2); + addReplyArrayLen(receiver,1); + addReplyArrayLen(receiver,2); } else { - addReplyMapLenAsync(receiver,1); + addReplyMapLen(receiver,1); } - addReplyBulkAsync(receiver,rl->key); + addReplyBulk(receiver,rl->key); streamPropInfo pi = { rl->key, diff --git a/src/module.cpp b/src/module.cpp index 73e176e7a..028ffd4cc 100644 --- a/src/module.cpp +++ b/src/module.cpp @@ -1358,7 +1358,7 @@ int RM_ReplyWithLongLong(RedisModuleCtx *ctx, long long ll) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyLongLongAsync(c,ll); + addReplyLongLong(c,ll); return REDISMODULE_OK; } @@ -1371,9 +1371,9 @@ int replyWithStatus(RedisModuleCtx *ctx, const char *msg, const char *prefix) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyProtoAsync(c,prefix,strlen(prefix)); - addReplyProtoAsync(c,msg,strlen(msg)); - addReplyProtoAsync(c,"\r\n",2); + addReplyProto(c,prefix,strlen(prefix)); + addReplyProto(c,msg,strlen(msg)); + addReplyProto(c,"\r\n",2); return REDISMODULE_OK; } @@ -1426,10 +1426,10 @@ int RM_ReplyWithArray(RedisModuleCtx *ctx, long len) { ctx->postponed_arrays = (void**)zrealloc(ctx->postponed_arrays,sizeof(void*)* (ctx->postponed_arrays_count+1), MALLOC_LOCAL); ctx->postponed_arrays[ctx->postponed_arrays_count] = - addReplyDeferredLenAsync(c); + addReplyDeferredLen(c); ctx->postponed_arrays_count++; } else { - addReplyArrayLenAsync(c,len); + addReplyArrayLen(c,len); } return REDISMODULE_OK; } @@ -1444,7 +1444,7 @@ int RM_ReplyWithNullArray(RedisModuleCtx *ctx) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyNullArrayAsync(c); + addReplyNullArray(c); return REDISMODULE_OK; } @@ -1457,7 +1457,7 @@ int RM_ReplyWithEmptyArray(RedisModuleCtx *ctx) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyAsync(c,shared.emptyarray); + addReply(c,shared.emptyarray); return REDISMODULE_OK; } @@ -1502,7 +1502,7 @@ void RM_ReplySetArrayLength(RedisModuleCtx *ctx, long len) { return; } ctx->postponed_arrays_count--; - setDeferredArrayLenAsync(c, + setDeferredArrayLen(c, ctx->postponed_arrays[ctx->postponed_arrays_count], len); if (ctx->postponed_arrays_count == 0) { @@ -1520,7 +1520,7 @@ int RM_ReplyWithStringBuffer(RedisModuleCtx *ctx, const char *buf, size_t len) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyBulkCBufferAsync(c,(char*)buf,len); + addReplyBulkCBuffer(c,(char*)buf,len); return REDISMODULE_OK; } @@ -1534,7 +1534,7 @@ int RM_ReplyWithCString(RedisModuleCtx *ctx, const char *buf) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyBulkCStringAsync(c,(char*)buf); + addReplyBulkCString(c,(char*)buf); return REDISMODULE_OK; } @@ -1547,7 +1547,7 @@ int RM_ReplyWithString(RedisModuleCtx *ctx, RedisModuleString *str) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyBulkAsync(c,str); + addReplyBulk(c,str); return REDISMODULE_OK; } @@ -1560,7 +1560,7 @@ int RM_ReplyWithEmptyString(RedisModuleCtx *ctx) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyAsync(c,shared.emptybulk); + addReply(c,shared.emptybulk); return REDISMODULE_OK; } @@ -1574,7 +1574,7 @@ int RM_ReplyWithVerbatimString(RedisModuleCtx *ctx, const char *buf, size_t len) AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyVerbatimAsync(c, buf, len, "txt"); + addReplyVerbatim(c, buf, len, "txt"); return REDISMODULE_OK; } @@ -1587,7 +1587,7 @@ int RM_ReplyWithNull(RedisModuleCtx *ctx) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyNullAsync(c); + addReplyNull(c); return REDISMODULE_OK; } @@ -1604,7 +1604,7 @@ int RM_ReplyWithCallReply(RedisModuleCtx *ctx, RedisModuleCallReply *reply) { std::unique_lock lock(c->lock); locker.arm(c); sds proto = sdsnewlen(reply->proto, reply->protolen); - addReplySdsAsync(c,proto); + addReplySds(c,proto); return REDISMODULE_OK; } @@ -1620,7 +1620,7 @@ int RM_ReplyWithDouble(RedisModuleCtx *ctx, double d) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyDoubleAsync(c,d); + addReplyDouble(c,d); return REDISMODULE_OK; } @@ -1638,7 +1638,7 @@ int RM_ReplyWithLongDouble(RedisModuleCtx *ctx, long double ld) { AeLocker locker; std::unique_lock lock(c->lock); locker.arm(c); - addReplyHumanLongDoubleAsync(c, ld); + addReplyHumanLongDouble(c, ld); return REDISMODULE_OK; } diff --git a/src/networking.cpp b/src/networking.cpp index f5698282b..1c3e528c5 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -40,8 +40,6 @@ #include "aelocker.h" static void setProtocolError(const char *errstr, client *c); -void addReplyLongLongWithPrefixCore(client *c, long long ll, char prefix, bool fAsync); -void addReplyBulkCStringCore(client *c, const char *s, bool fAsync); /* Return the size consumed from the allocator, for the specified SDS string, * including internal fragmentation. This function is used in order to compute @@ -251,10 +249,10 @@ void clientInstallAsyncWriteHandler(client *c) { * Typically gets called every time a reply is built, before adding more * data to the clients output buffers. If the function returns C_ERR no * data should be appended to the output buffers. */ -int prepareClientToWrite(client *c, bool fAsync) { - fAsync = fAsync && !FCorrectThread(c); // Not async if we're on the right thread - serverAssert(FCorrectThread(c) || fAsync); - if (FCorrectThread(c)) { +int prepareClientToWrite(client *c) { + bool fAsync = !FCorrectThread(c); // Not async if we're on the right thread + + if (!fAsync) { serverAssert(c->conn == nullptr || c->lock.fOwnLock()); } else { serverAssert(GlobalLocksAcquired()); @@ -290,10 +288,10 @@ int prepareClientToWrite(client *c, bool fAsync) { * Low level functions to add more data to output buffers. * -------------------------------------------------------------------------- */ -int _addReplyToBuffer(client *c, const char *s, size_t len, bool fAsync) { +int _addReplyToBuffer(client *c, const char *s, size_t len) { if (c->flags.load(std::memory_order_relaxed) & CLIENT_CLOSE_AFTER_REPLY) return C_OK; - fAsync = fAsync && !FCorrectThread(c); // Not async if we're on the right thread + bool fAsync = !FCorrectThread(c); if (fAsync) { serverAssert(GlobalLocksAcquired()); @@ -377,11 +375,12 @@ void _addReplyProtoToList(client *c, const char *s, size_t len) { * Higher level functions to queue data on the client output buffer. * The following functions are the ones that commands implementations will call. * -------------------------------------------------------------------------- */ -void addReplyCore(client *c, robj_roptr obj, bool fAsync) { - if (prepareClientToWrite(c, fAsync) != C_OK) return; +/* Add the object 'obj' string representation to the client output buffer. */ +void addReply(client *c, robj_roptr obj) { + if (prepareClientToWrite(c) != C_OK) return; if (sdsEncodedObject(obj)) { - if (_addReplyToBuffer(c,(const char*)ptrFromObj(obj),sdslen((sds)ptrFromObj(obj)),fAsync) != C_OK) + if (_addReplyToBuffer(c,(const char*)ptrFromObj(obj),sdslen((sds)ptrFromObj(obj))) != C_OK) _addReplyProtoToList(c,(const char*)ptrFromObj(obj),sdslen((sds)ptrFromObj(obj))); } else if (obj->encoding == OBJ_ENCODING_INT) { /* For integer encoded strings we just convert it into a string @@ -389,44 +388,26 @@ void addReplyCore(client *c, robj_roptr obj, bool fAsync) { * to the output buffer. */ char buf[32]; size_t len = ll2string(buf,sizeof(buf),(long)ptrFromObj(obj)); - if (_addReplyToBuffer(c,buf,len,fAsync) != C_OK) + if (_addReplyToBuffer(c,buf,len) != C_OK) _addReplyProtoToList(c,buf,len); } else { serverPanic("Wrong obj->encoding in addReply()"); } } -/* Add the object 'obj' string representation to the client output buffer. */ -void addReply(client *c, robj_roptr obj) -{ - addReplyCore(c, obj, false); -} -void addReplyAsync(client *c, robj_roptr obj) -{ - addReplyCore(c, obj, true); -} - /* Add the SDS 's' string to the client output buffer, as a side effect * the SDS string is freed. */ -void addReplySdsCore(client *c, sds s, bool fAsync) { - if (prepareClientToWrite(c, fAsync) != C_OK) { +void addReplySds(client *c, sds s) { + if (prepareClientToWrite(c) != C_OK) { /* The caller expects the sds to be free'd. */ sdsfree(s); return; } - if (_addReplyToBuffer(c,s,sdslen(s), fAsync) != C_OK) + if (_addReplyToBuffer(c,s,sdslen(s)) != C_OK) _addReplyProtoToList(c,s,sdslen(s)); sdsfree(s); } -void addReplySds(client *c, sds s) { - addReplySdsCore(c, s, false); -} - -void addReplySdsAsync(client *c, sds s) { - addReplySdsCore(c, s, true); -} - /* This low level function just adds whatever protocol you send it to the * client buffer, trying the static buffer initially, and using the string * of objects if not possible. @@ -435,18 +416,10 @@ void addReplySdsAsync(client *c, sds s) { * if not needed. The object will only be created by calling * _addReplyProtoToList() if we fail to extend the existing tail object * in the list of objects. */ -void addReplyProtoCore(client *c, const char *s, size_t len, bool fAsync) { - if (prepareClientToWrite(c, fAsync) != C_OK) return; - if (_addReplyToBuffer(c,s,len,fAsync) != C_OK) - _addReplyProtoToList(c,s,len); -} - void addReplyProto(client *c, const char *s, size_t len) { - addReplyProtoCore(c, s, len, false); -} - -void addReplyProtoAsync(client *c, const char *s, size_t len) { - addReplyProtoCore(c, s, len, true); + if (prepareClientToWrite(c) != C_OK) return; + if (_addReplyToBuffer(c,s,len) != C_OK) + _addReplyProtoToList(c,s,len); } std::string escapeString(sds str) @@ -486,12 +459,12 @@ std::string escapeString(sds str) * code provided is used, otherwise the string "-ERR " for the generic * error code is automatically added. * Note that 's' must NOT end with \r\n. */ -void addReplyErrorLengthCore(client *c, const char *s, size_t len, bool fAsync) { +void addReplyErrorLength(client *c, const char *s, size_t len) { /* If the string already starts with "-..." then the error code * is provided by the caller. Otherwise we use "-ERR". */ - if (!len || s[0] != '-') addReplyProtoCore(c,"-ERR ",5,fAsync); - addReplyProtoCore(c,s,len,fAsync); - addReplyProtoCore(c,"\r\n",2,fAsync); + if (!len || s[0] != '-') addReplyProto(c,"-ERR ",5); + addReplyProto(c,s,len); + addReplyProto(c,"\r\n",2); } /* Do some actions after an error reply was sent (Log if needed, updates stats, etc.) */ @@ -535,11 +508,6 @@ void afterErrorReply(client *c, const char *s, size_t len) { } } -void addReplyErrorLength(client *c, const char *s, size_t len) -{ - addReplyErrorLengthCore(c, s, len, false); -} - /* The 'err' object is expected to start with -ERRORCODE and end with \r\n. * Unlike addReplyErrorSds and others alike which rely on addReplyErrorLength. */ void addReplyErrorObject(client *c, robj *err) { @@ -547,13 +515,8 @@ void addReplyErrorObject(client *c, robj *err) { afterErrorReply(c, szFromObj(err), sdslen(szFromObj(err))-2); /* Ignore trailing \r\n */ } -/* See addReplyErrorLength for expectations from the input string. */ void addReplyError(client *c, const char *err) { - addReplyErrorLengthCore(c,err,strlen(err), false); -} - -void addReplyErrorAsync(client *c, const char *err) { - addReplyErrorLengthCore(c, err, strlen(err), true); + addReplyErrorLength(c, err, strlen(err)); afterErrorReply(c,err,strlen(err)); } @@ -629,19 +592,19 @@ void trimReplyUnusedTailSpace(client *c) { /* Adds an empty object to the reply list that will contain the multi bulk * length, which is not known when this function is called. */ -void *addReplyDeferredLen(client *c) { +void *addReplyDeferredLenCore(client *c) { /* Note that we install the write event here even if the object is not * ready to be sent, since we are sure that before returning to the * event loop setDeferredAggregateLen() will be called. */ - if (prepareClientToWrite(c, false) != C_OK) return NULL; + if (prepareClientToWrite(c) != C_OK) return NULL; trimReplyUnusedTailSpace(c); listAddNodeTail(c->reply,NULL); /* NULL is our placeholder. */ return listLast(c->reply); } -void *addReplyDeferredLenAsync(client *c) { +void *addReplyDeferredLen(client *c) { if (FCorrectThread(c)) - return addReplyDeferredLen(c); + return addReplyDeferredLenCore(c); return (void*)((ssize_t)(c->replyAsync ? c->replyAsync->used : 0)); } @@ -718,11 +681,10 @@ void setDeferredAggregateLenAsync(client *c, void *node, long length, char prefi } void setDeferredArrayLen(client *c, void *node, long length) { - setDeferredAggregateLen(c,node,length,'*'); -} - -void setDeferredArrayLenAsync(client *c, void *node, long length) { - setDeferredAggregateLenAsync(c, node, length, '*'); + if (FCorrectThread(c)) + setDeferredAggregateLen(c,node,length,'*'); + else + setDeferredAggregateLenAsync(c, node, length, '*'); } void setDeferredMapLen(client *c, void *node, long length) { @@ -748,15 +710,15 @@ void setDeferredPushLen(client *c, void *node, long length) { } /* Add a double as a bulk reply */ -void addReplyDoubleCore(client *c, double d, bool fAsync) { +void addReplyDouble(client *c, double d) { if (std::isinf(d)) { /* Libc in odd systems (Hi Solaris!) will format infinite in a * different way, so better to handle it in an explicit way. */ if (c->resp == 2) { - addReplyBulkCStringCore(c, d > 0 ? "inf" : "-inf", fAsync); + addReplyBulkCString(c, d > 0 ? "inf" : "-inf"); } else { - addReplyProtoCore(c, d > 0 ? ",inf\r\n" : ",-inf\r\n", - d > 0 ? 6 : 7, fAsync); + addReplyProto(c, d > 0 ? ",inf\r\n" : ",-inf\r\n", + d > 0 ? 6 : 7); } } else { char dbuf[MAX_LONG_DOUBLE_CHARS+3], @@ -765,52 +727,34 @@ void addReplyDoubleCore(client *c, double d, bool fAsync) { if (c->resp == 2) { dlen = snprintf(dbuf,sizeof(dbuf),"%.17g",d); slen = snprintf(sbuf,sizeof(sbuf),"$%d\r\n%s\r\n",dlen,dbuf); - addReplyProtoCore(c,sbuf,slen,fAsync); + addReplyProto(c,sbuf,slen); } else { dlen = snprintf(dbuf,sizeof(dbuf),",%.17g\r\n",d); - addReplyProtoCore(c,dbuf,dlen,fAsync); + addReplyProto(c,dbuf,dlen); } } } -void addReplyDouble(client *c, double d) { - addReplyDoubleCore(c, d, false); -} - -void addReplyDoubleAsync(client *c, double d) { - addReplyDoubleCore(c, d, true); -} - -void addReplyBulkCore(client *c, robj_roptr obj, bool fAsync); - /* Add a long double as a bulk reply, but uses a human readable formatting * of the double instead of exposing the crude behavior of doubles to the * dear user. */ -void addReplyHumanLongDoubleCore(client *c, long double d, bool fAsync) { +void addReplyHumanLongDouble(client *c, long double d) { if (c->resp == 2) { robj *o = createStringObjectFromLongDouble(d,1); - addReplyBulkCore(c,o,fAsync); + addReplyBulk(c,o); decrRefCount(o); } else { char buf[MAX_LONG_DOUBLE_CHARS]; int len = ld2string(buf,sizeof(buf),d,LD_STR_HUMAN); - addReplyProtoCore(c,",",1,fAsync); - addReplyProtoCore(c,buf,len,fAsync); - addReplyProtoCore(c,"\r\n",2,fAsync); + addReplyProto(c,",",1); + addReplyProto(c,buf,len); + addReplyProto(c,"\r\n",2); } } -void addReplyHumanLongDouble(client *c, long double d) { - addReplyHumanLongDoubleCore(c, d, false); -} - -void addReplyHumanLongDoubleAsync(client *c, long double d) { - addReplyHumanLongDoubleCore(c, d, true); -} - /* Add a long long as integer reply or bulk len / multi bulk count. * Basically this is used to output . */ -void addReplyLongLongWithPrefixCore(client *c, long long ll, char prefix, bool fAsync) { +void addReplyLongLongWithPrefix(client *c, long long ll, char prefix) { char buf[128]; int len; @@ -818,10 +762,10 @@ void addReplyLongLongWithPrefixCore(client *c, long long ll, char prefix, bool f * so we have a few shared objects to use if the integer is small * like it is most of the times. */ if (prefix == '*' && ll < OBJ_SHARED_BULKHDR_LEN && ll >= 0) { - addReplyCore(c,shared.mbulkhdr[ll], fAsync); + addReply(c,shared.mbulkhdr[ll]); return; } else if (prefix == '$' && ll < OBJ_SHARED_BULKHDR_LEN && ll >= 0) { - addReplyCore(c,shared.bulkhdr[ll], fAsync); + addReply(c,shared.bulkhdr[ll]); return; } @@ -829,65 +773,33 @@ void addReplyLongLongWithPrefixCore(client *c, long long ll, char prefix, bool f len = ll2string(buf+1,sizeof(buf)-1,ll); buf[len+1] = '\r'; buf[len+2] = '\n'; - addReplyProtoCore(c,buf,len+3, fAsync); -} - -void addReplyLongLongWithPrefix(client *c, long long ll, char prefix) { - addReplyLongLongWithPrefixCore(c, ll, prefix, false); -} - -void addReplyLongLongCore(client *c, long long ll, bool fAsync) { - if (ll == 0) - addReplyCore(c,shared.czero, fAsync); - else if (ll == 1) - addReplyCore(c,shared.cone, fAsync); - else - addReplyLongLongWithPrefixCore(c,ll,':', fAsync); + addReplyProto(c,buf,len+3); } void addReplyLongLong(client *c, long long ll) { - addReplyLongLongCore(c, ll, false); -} - -void addReplyLongLongAsync(client *c, long long ll) { - addReplyLongLongCore(c, ll, true); -} - -void addReplyAggregateLenCore(client *c, long length, int prefix, bool fAsync) { - if (prefix == '*' && length < OBJ_SHARED_BULKHDR_LEN) - addReplyCore(c,shared.mbulkhdr[length], fAsync); + if (ll == 0) + addReply(c,shared.czero); + else if (ll == 1) + addReply(c,shared.cone); else - addReplyLongLongWithPrefixCore(c,length,prefix, fAsync); + addReplyLongLongWithPrefix(c,ll,':'); } void addReplyAggregateLen(client *c, long length, int prefix) { - addReplyAggregateLenCore(c, length, prefix, false); -} - -void addReplyArrayLenCore(client *c, long length, bool fAsync) { - addReplyAggregateLenCore(c,length,'*', fAsync); + if (prefix == '*' && length < OBJ_SHARED_BULKHDR_LEN) + addReply(c,shared.mbulkhdr[length]); + else + addReplyLongLongWithPrefix(c,length,prefix); } void addReplyArrayLen(client *c, long length) { - addReplyArrayLenCore(c, length, false); -} - -void addReplyArrayLenAsync(client *c, long length) { - addReplyArrayLenCore(c, length, true); -} - -void addReplyMapLenCore(client *c, long length, bool fAsync) { - int prefix = c->resp == 2 ? '*' : '%'; - if (c->resp == 2) length *= 2; - addReplyAggregateLenCore(c,length,prefix,fAsync); + addReplyAggregateLen(c,length,'*'); } void addReplyMapLen(client *c, long length) { - addReplyMapLenCore(c, length, false); -} - -void addReplyMapLenAsync(client *c, long length) { - addReplyMapLenCore(c, length, true); + int prefix = c->resp == 2 ? '*' : '%'; + if (c->resp == 2) length *= 2; + addReplyAggregateLen(c,length,prefix); } void addReplySetLen(client *c, long length) { @@ -901,38 +813,19 @@ void addReplyAttributeLen(client *c, long length) { addReplyAggregateLen(c,length,prefix); } -void addReplyPushLenCore(client *c, long length, bool fAsync) { - int prefix = c->resp == 2 ? '*' : '>'; - addReplyAggregateLenCore(c,length,prefix, fAsync); -} - void addReplyPushLen(client *c, long length) { - addReplyPushLenCore(c, length, false); + int prefix = c->resp == 2 ? '*' : '>'; + addReplyAggregateLen(c,length,prefix); } -void addReplyPushLenAsync(client *c, long length) { - addReplyPushLenCore(c, length, true); -} - -void addReplyNullCore(client *c, bool fAsync) { +void addReplyNull(client *c) { if (c->resp == 2) { - addReplyProtoCore(c,"$-1\r\n",5,fAsync); + addReplyProto(c,"$-1\r\n",5); } else { - addReplyProtoCore(c,"_\r\n",3,fAsync); + addReplyProto(c,"_\r\n",3); } } -void addReplyNull(client *c, robj_roptr objOldProtocol) { - if (c->resp < 3 && objOldProtocol != nullptr) - addReply(c, objOldProtocol); - else - addReplyNullCore(c, false); -} - -void addReplyNullAsync(client *c) { - addReplyNullCore(c, true); -} - void addReplyBool(client *c, int b) { if (c->resp == 2) { addReply(c, b ? shared.cone : shared.czero); @@ -945,107 +838,58 @@ void addReplyBool(client *c, int b) { * RESP2 had it, so API-wise we have this call, that will emit the correct * RESP2 protocol, however for RESP3 the reply will always be just the * Null type "_\r\n". */ -void addReplyNullArrayCore(client *c, bool fAsync) +void addReplyNullArray(client *c) { if (c->resp == 2) { - addReplyProtoCore(c,"*-1\r\n",5,fAsync); + addReplyProto(c,"*-1\r\n",5); } else { - addReplyProtoCore(c,"_\r\n",3,fAsync); + addReplyProto(c,"_\r\n",3); } } -void addReplyNullArray(client *c) -{ - addReplyNullArrayCore(c, false); -} - -void addReplyNullArrayAsync(client *c) -{ - addReplyNullArrayCore(c, true); -} - /* Create the length prefix of a bulk reply, example: $2234 */ -void addReplyBulkLenCore(client *c, robj_roptr obj, bool fAsync) { +void addReplyBulkLen(client *c, robj_roptr obj) { size_t len = stringObjectLen(obj); if (len < OBJ_SHARED_BULKHDR_LEN) - addReplyCore(c,shared.bulkhdr[len], fAsync); + addReply(c,shared.bulkhdr[len]); else - addReplyLongLongWithPrefixCore(c,len,'$', fAsync); -} - -void addReplyBulkLen(client *c, robj *obj) -{ - addReplyBulkLenCore(c, obj, false); + addReplyLongLongWithPrefix(c,len,'$'); } /* Add a Redis Object as a bulk reply */ -void addReplyBulkCore(client *c, robj_roptr obj, bool fAsync) { - addReplyBulkLenCore(c,obj,fAsync); - addReplyCore(c,obj,fAsync); - addReplyCore(c,shared.crlf,fAsync); -} - -void addReplyBulk(client *c, robj_roptr obj) -{ - addReplyBulkCore(c, obj, false); -} - -void addReplyBulkAsync(client *c, robj_roptr obj) -{ - addReplyBulkCore(c, obj, true); +void addReplyBulk(client *c, robj_roptr obj) { + addReplyBulkLen(c,obj); + addReply(c,obj); + addReply(c,shared.crlf); } /* Add a C buffer as bulk reply */ -void addReplyBulkCBufferCore(client *c, const void *p, size_t len, bool fAsync) { - addReplyLongLongWithPrefixCore(c,len,'$',fAsync); - addReplyProtoCore(c,(const char*)p,len,fAsync); - addReplyCore(c,shared.crlf,fAsync); -} - void addReplyBulkCBuffer(client *c, const void *p, size_t len) { - addReplyBulkCBufferCore(c, p, len, false); -} - -void addReplyBulkCBufferAsync(client *c, const void *p, size_t len) { - addReplyBulkCBufferCore(c, p, len, true); + addReplyLongLongWithPrefix(c,len,'$'); + addReplyProto(c,(const char*)p,len); + addReply(c,shared.crlf); } /* Add sds to reply (takes ownership of sds and frees it) */ -void addReplyBulkSdsCore(client *c, sds s, bool fAsync) { - addReplyLongLongWithPrefixCore(c,sdslen(s),'$', fAsync); - addReplySdsCore(c,s,fAsync); - addReplyCore(c,shared.crlf,fAsync); -} - -void addReplyBulkSds(client *c, sds s) { - addReplyBulkSdsCore(c, s, false); -} - -void addReplyBulkSdsAsync(client *c, sds s) { - addReplyBulkSdsCore(c, s, true); +void addReplyBulkSds(client *c, sds s) { + addReplyLongLongWithPrefix(c,sdslen(s),'$'); + addReplySds(c,s); + addReply(c,shared.crlf); } /* Add a C null term string as bulk reply */ -void addReplyBulkCStringCore(client *c, const char *s, bool fAsync) { +void addReplyBulkCString(client *c, const char *s) { if (s == NULL) { if (c->resp < 3) - addReplyCore(c,shared.nullbulk, fAsync); + addReply(c,shared.nullbulk); else - addReplyNullCore(c,fAsync); + addReplyNull(c); } else { - addReplyBulkCBufferCore(c,s,strlen(s),fAsync); + addReplyBulkCBuffer(c,s,strlen(s)); } } -void addReplyBulkCString(client *c, const char *s) { - addReplyBulkCStringCore(c, s, false); -} - -void addReplyBulkCStringAsync(client *c, const char *s) { - addReplyBulkCStringCore(c, s, true); -} - /* Add a long long as a bulk reply */ void addReplyBulkLongLong(client *c, long long ll) { char buf[64]; @@ -1064,9 +908,9 @@ void addReplyBulkLongLong(client *c, long long ll) { * three first characters of the extension are used, and if the * provided one is shorter than that, the remaining is filled with * spaces. */ -void addReplyVerbatimCore(client *c, const char *s, size_t len, const char *ext, bool fAsync) { +void addReplyVerbatim(client *c, const char *s, size_t len, const char *ext) { if (c->resp == 2) { - addReplyBulkCBufferCore(c,s,len,fAsync); + addReplyBulkCBuffer(c,s,len); } else { char buf[32]; size_t preflen = snprintf(buf,sizeof(buf),"=%zu\r\nxxx:",len+4); @@ -1078,20 +922,12 @@ void addReplyVerbatimCore(client *c, const char *s, size_t len, const char *ext, p[i] = *ext++; } } - addReplyProtoCore(c,buf,preflen,fAsync); - addReplyProtoCore(c,s,len,fAsync); - addReplyProtoCore(c,"\r\n",2,fAsync); + addReplyProto(c,buf,preflen); + addReplyProto(c,s,len); + addReplyProto(c,"\r\n",2); } } -void addReplyVerbatim(client *c, const char *s, size_t len, const char *ext) { - addReplyVerbatimCore(c, s, len, ext, false); -} - -void addReplyVerbatimAsync(client *c, const char *s, size_t len, const char *ext) { - addReplyVerbatimCore(c, s, len, ext, true); -} - /* Add an array of C strings as status replies with a heading. * This function is typically invoked by from commands that support * subcommands in response to the 'help' subcommand. The help array @@ -1127,7 +963,7 @@ void addReplySubcommandSyntaxError(client *c) { /* Append 'src' client output buffers into 'dst' client output buffers. * This function clears the output buffers of 'src' */ void AddReplyFromClient(client *dst, client *src) { - if (prepareClientToWrite(dst, false) != C_OK) + if (prepareClientToWrite(dst) != C_OK) return; addReplyProto(dst,src->buf, src->bufpos); if (listLength(src->reply)) @@ -1907,7 +1743,7 @@ void ProcessPendingAsyncWrites() if (FCorrectThread(c)) { - prepareClientToWrite(c, false); // queue an event + prepareClientToWrite(c); // queue an event } else { @@ -2898,7 +2734,7 @@ NULL if (target && target->flags & CLIENT_BLOCKED) { std::unique_lock ul(target->lock); if (unblock_error) - addReplyErrorAsync(target, + addReplyError(target, "-UNBLOCKED client unblocked via CLIENT UNBLOCK"); else replyToBlockedClientTimedOut(target); diff --git a/src/object.cpp b/src/object.cpp index 4b32c5a4d..4988fa0bf 100644 --- a/src/object.cpp +++ b/src/object.cpp @@ -433,7 +433,7 @@ robj *resetRefCount(robj *obj) { int checkType(client *c, robj_roptr o, int type) { if (o->type != type) { - addReplyAsync(c,shared.wrongtypeerr); + addReply(c,shared.wrongtypeerr); return 1; } return 0; diff --git a/src/pubsub.cpp b/src/pubsub.cpp index 3ccbb6a66..176a8271e 100644 --- a/src/pubsub.cpp +++ b/src/pubsub.cpp @@ -43,12 +43,12 @@ int clientSubscriptionsCount(client *c); * addReply*() API family. */ void addReplyPubsubMessage(client *c, robj *channel, robj *msg) { if (c->resp == 2) - addReplyAsync(c,shared.mbulkhdr[3]); + addReply(c,shared.mbulkhdr[3]); else - addReplyPushLenAsync(c,3); - addReplyAsync(c,shared.messagebulk); - addReplyBulkAsync(c,channel); - if (msg) addReplyBulkAsync(c,msg); + addReplyPushLen(c,3); + addReply(c,shared.messagebulk); + addReplyBulk(c,channel); + if (msg) addReplyBulk(c,msg); } /* Send a pubsub message of type "pmessage" to the client. The difference @@ -56,13 +56,13 @@ void addReplyPubsubMessage(client *c, robj *channel, robj *msg) { * this message format also includes the pattern that matched the message. */ void addReplyPubsubPatMessage(client *c, robj *pat, robj *channel, robj *msg) { if (c->resp == 2) - addReplyAsync(c,shared.mbulkhdr[4]); + addReply(c,shared.mbulkhdr[4]); else - addReplyPushLenAsync(c,4); - addReplyAsync(c,shared.pmessagebulk); - addReplyBulkAsync(c,pat); - addReplyBulkAsync(c,channel); - addReplyBulkAsync(c,msg); + addReplyPushLen(c,4); + addReply(c,shared.pmessagebulk); + addReplyBulk(c,pat); + addReplyBulk(c,channel); + addReplyBulk(c,msg); } /* Send the pubsub subscription notification to the client. */ diff --git a/src/replication.cpp b/src/replication.cpp index cf63c6e81..b9de0bce5 100644 --- a/src/replication.cpp +++ b/src/replication.cpp @@ -315,7 +315,7 @@ void replicationFeedSlave(client *replica, int dictid, robj **argv, int argc, bo if (g_pserver->repl_backlog && fSendRaw) feedReplicationBacklogWithObject(selectcmd); /* Send it to slaves */ - addReplyAsync(replica,selectcmd); + addReply(replica,selectcmd); if (dictid < 0 || dictid >= PROTO_SHARED_SELECT_CMDS) decrRefCount(selectcmd); @@ -329,18 +329,18 @@ void replicationFeedSlave(client *replica, int dictid, robj **argv, int argc, bo if (fSendRaw) { /* Add the multi bulk length. */ - addReplyArrayLenAsync(replica,argc); + addReplyArrayLen(replica,argc); /* Finally any additional argument that was not stored inside the * static buffer if any (from j to argc). */ for (int j = 0; j < argc; j++) - addReplyBulkAsync(replica,argv[j]); + addReplyBulk(replica,argv[j]); } else { struct redisCommand *cmd = lookupCommand(szFromObj(argv[0])); sds buf = catCommandForAofAndActiveReplication(sdsempty(), cmd, argv, argc); - addReplyProtoAsync(replica, buf, sdslen(buf)); + addReplyProto(replica, buf, sdslen(buf)); sdsfree(buf); } } @@ -516,21 +516,21 @@ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) { * or are already in sync with the master. */ if (!fSendRaw) - addReplyProtoAsync(replica, proto, cchProto); + addReplyProto(replica, proto, cchProto); - addReplyProtoAsync(replica,fake->buf,fake->bufpos); + addReplyProto(replica,fake->buf,fake->bufpos); listRewind(fake->reply, &liReply); while ((lnReply = listNext(&liReply))) { clientReplyBlock* reply = (clientReplyBlock*)listNodeValue(lnReply); - addReplyProtoAsync(replica, reply->buf(), reply->used); + addReplyProto(replica, reply->buf(), reply->used); } if (!fSendRaw) { - addReplyAsync(replica,shared.crlf); - addReplyProtoAsync(replica, szDbNum, cchDbNum); - addReplyProtoAsync(replica, szMvcc, cchMvcc); + addReply(replica,shared.crlf); + addReplyProto(replica, szDbNum, cchDbNum); + addReplyProto(replica, szMvcc, cchMvcc); } } @@ -605,7 +605,7 @@ void replicationFeedSlavesFromMasterStream(list *slaves, char *buf, size_t bufle /* Don't feed slaves that are still waiting for BGSAVE to start */ if (replica->replstate == SLAVE_STATE_WAIT_BGSAVE_START) continue; - addReplyProtoAsync(replica,buf,buflen); + addReplyProto(replica,buf,buflen); } if (listLength(slaves)) @@ -651,7 +651,7 @@ void replicationFeedMonitors(client *c, list *monitors, int dictid, robj **argv, // When writing to clients on other threads the global lock is sufficient provided we only use AddReply*Async() if (FCorrectThread(c)) lock.lock(); - addReplyAsync(monitor,cmdobj); + addReply(monitor,cmdobj); } decrRefCount(cmdobj); } @@ -3267,7 +3267,7 @@ void replicaofCommand(client *c) { miNew->masterhost, miNew->masterport, client); sdsfree(client); } - addReplyAsync(c,shared.ok); + addReply(c,shared.ok); } /* ROLE command: provide information about the role of the instance @@ -3747,7 +3747,7 @@ void processClientsWaitingReplicas(void) { last_numreplicas > c->bpop.numreplicas) { unblockClient(c); - addReplyLongLongAsync(c,last_numreplicas); + addReplyLongLong(c,last_numreplicas); } else { int numreplicas = replicationCountAcksByOffset(c->bpop.reploffset); @@ -3755,7 +3755,7 @@ void processClientsWaitingReplicas(void) { last_offset = c->bpop.reploffset; last_numreplicas = numreplicas; unblockClient(c); - addReplyLongLongAsync(c,numreplicas); + addReplyLongLong(c,numreplicas); } } fastlock_unlock(&c->lock); diff --git a/src/server.h b/src/server.h index 198633f90..9f7d8f22c 100644 --- a/src/server.h +++ b/src/server.h @@ -2004,17 +2004,14 @@ void acceptTcpHandler(aeEventLoop *el, int fd, void *privdata, int mask); void acceptTLSHandler(aeEventLoop *el, int fd, void *privdata, int mask); void acceptUnixHandler(aeEventLoop *el, int fd, void *privdata, int mask); void readQueryFromClient(connection *conn); -void addReplyNull(client *c, robj_roptr objOldProtocol = nullptr); +void addReplyNull(client *c); void addReplyNullArray(client *c); -void addReplyNullArrayAsync(client *c); void addReplyBool(client *c, int b); void addReplyVerbatim(client *c, const char *s, size_t len, const char *ext); -void addReplyVerbatimAsync(client *c, const char *s, size_t len, const char *ext); void addReplyProto(client *c, const char *s, size_t len); void addReplyBulk(client *c, robj_roptr obj); void AddReplyFromClient(client *c, client *src); void addReplyBulkCString(client *c, const char *s); -void addReplyBulkCStringAsync(client *c, const char *s); void addReplyBulkCBuffer(client *c, const void *p, size_t len); void addReplyBulkLongLong(client *c, long long ll); void addReply(client *c, robj_roptr obj); @@ -2026,10 +2023,9 @@ void addReplyError(client *c, const char *err); void addReplyStatus(client *c, const char *status); void addReplyDouble(client *c, double d); void addReplyHumanLongDouble(client *c, long double d); -void addReplyHumanLongDoubleAsync(client *c, long double d); void addReplyLongLong(client *c, long long ll); #ifdef __cplusplus -void addReplyLongLongWithPrefixCore(client *c, long long ll, char prefix, bool fAsync); +void addReplyLongLongWithPrefixCore(client *c, long long ll, char prefix); #endif void addReplyArrayLen(client *c, long length); void addReplyMapLen(client *c, long length); @@ -2074,23 +2070,6 @@ void linkClient(client *c); void protectClient(client *c); void unprotectClient(client *c); -// Special Thread-safe addReply() commands for posting messages to clients from a different thread -void addReplyAsync(client *c, robj_roptr obj); -void addReplyArrayLenAsync(client *c, long length); -void addReplyProtoAsync(client *c, const char *s, size_t len); -void addReplyBulkAsync(client *c, robj_roptr obj); -void addReplyBulkCBufferAsync(client *c, const void *p, size_t len); -void addReplyErrorAsync(client *c, const char *err); -void addReplyMapLenAsync(client *c, long length); -void addReplyNullAsync(client *c); -void addReplyDoubleAsync(client *c, double d); -void *addReplyDeferredLenAsync(client *c); -void setDeferredArrayLenAsync(client *c, void *node, long length); -void addReplySdsAsync(client *c, sds s); -void addReplyBulkSdsAsync(client *c, sds s); -void addReplyPushLenAsync(client *c, long length); -void addReplyLongLongAsync(client *c, long long ll); - void ProcessPendingAsyncWrites(void); client *lookupClientByID(uint64_t id); diff --git a/src/t_list.cpp b/src/t_list.cpp index 74634f91d..e81d94f1a 100644 --- a/src/t_list.cpp +++ b/src/t_list.cpp @@ -677,7 +677,7 @@ static void rpoplpushHandlePush(client *c, robj *dstkey, robj *dstobj, robj *val listTypePush(dstobj,value,LIST_HEAD); notifyKeyspaceEvent(NOTIFY_LIST,"lpush",dstkey,c->db->id); /* Always send the pushed value to the client. */ - addReplyBulkAsync(c,value); + addReplyBulk(c,value); } void rpoplpushCommand(client *c) { @@ -758,9 +758,9 @@ int serveClientBlockedOnList(client *receiver, robj *key, robj *dstkey, redisDb db->id,argv,2,PROPAGATE_AOF|PROPAGATE_REPL); /* BRPOP/BLPOP */ - addReplyArrayLenAsync(receiver,2); - addReplyBulkAsync(receiver,key); - addReplyBulkAsync(receiver,value); + addReplyArrayLen(receiver,2); + addReplyBulk(receiver,key); + addReplyBulk(receiver,value); /* Notify event. */ const char *event = (where == LIST_HEAD) ? "lpop" : "rpop"; diff --git a/src/t_stream.cpp b/src/t_stream.cpp index fd7a1329b..66157dbb1 100644 --- a/src/t_stream.cpp +++ b/src/t_stream.cpp @@ -818,7 +818,7 @@ static void addReplyStreamID(client *c, streamID *id) { static void addReplyStreamIDAsync(client *c, streamID *id) { sds replyid = sdscatfmt(sdsempty(),"%U-%U",id->ms,id->seq); - addReplyBulkSdsAsync(c,replyid); + addReplyBulkSds(c,replyid); } /* Similar to the above function, but just creates an object, usually useful @@ -968,7 +968,7 @@ size_t streamReplyWithRange(client *c, stream *s, streamID *start, streamID *end } if (!(flags & STREAM_RWR_RAWENTRIES)) - arraylen_ptr = addReplyDeferredLenAsync(c); + arraylen_ptr = addReplyDeferredLen(c); streamIteratorStart(&si,s,start,end,rev); while(streamIteratorGetID(&si,&id,&numfields)) { /* Update the group last_id if needed. */ @@ -982,18 +982,18 @@ size_t streamReplyWithRange(client *c, stream *s, streamID *start, streamID *end /* Emit a two elements array for each item. The first is * the ID, the second is an array of field-value pairs. */ - addReplyArrayLenAsync(c,2); + addReplyArrayLen(c,2); addReplyStreamIDAsync(c,&id); - addReplyArrayLenAsync(c,numfields*2); + addReplyArrayLen(c,numfields*2); /* Emit the field-value pairs. */ while(numfields--) { unsigned char *key, *value; int64_t key_len, value_len; streamIteratorGetField(&si,&key,&value,&key_len,&value_len); - addReplyBulkCBufferAsync(c,key,key_len); - addReplyBulkCBufferAsync(c,value,value_len); + addReplyBulkCBuffer(c,key,key_len); + addReplyBulkCBuffer(c,value,value_len); } /* If a group is passed, we need to create an entry in the @@ -1052,7 +1052,7 @@ size_t streamReplyWithRange(client *c, stream *s, streamID *start, streamID *end streamPropagateGroupID(c,spi->keyname,group,spi->groupname); streamIteratorStop(&si); - if (arraylen_ptr) setDeferredArrayLenAsync(c,arraylen_ptr,arraylen); + if (arraylen_ptr) setDeferredArrayLen(c,arraylen_ptr,arraylen); return arraylen; } diff --git a/src/t_zset.cpp b/src/t_zset.cpp index a2118b348..973e22ce6 100644 --- a/src/t_zset.cpp +++ b/src/t_zset.cpp @@ -3165,11 +3165,11 @@ void genericZpopCommand(client *c, robj **keyv, int keyc, int where, int emitkey return; } - void *arraylen_ptr = addReplyDeferredLenAsync(c); + void *arraylen_ptr = addReplyDeferredLen(c); long arraylen = 0; /* We emit the key only for the blocking variant. */ - if (emitkey) addReplyBulkAsync(c,key); + if (emitkey) addReplyBulk(c,key); /* Remove the element. */ do { @@ -3219,8 +3219,8 @@ void genericZpopCommand(client *c, robj **keyv, int keyc, int where, int emitkey signalModifiedKey(c,c->db,key); } - addReplyBulkCBufferAsync(c,ele,sdslen(ele)); - addReplyDoubleAsync(c,score); + addReplyBulkCBuffer(c,ele,sdslen(ele)); + addReplyDouble(c,score); sdsfree(ele); arraylen += 2; @@ -3232,7 +3232,7 @@ void genericZpopCommand(client *c, robj **keyv, int keyc, int where, int emitkey } } while(--count); - setDeferredArrayLenAsync(c,arraylen_ptr,arraylen + (emitkey != 0)); + setDeferredArrayLen(c,arraylen_ptr,arraylen + (emitkey != 0)); } /* ZPOPMIN key [] */ diff --git a/src/timeout.cpp b/src/timeout.cpp index d59bc44e6..18a553211 100644 --- a/src/timeout.cpp +++ b/src/timeout.cpp @@ -179,7 +179,7 @@ int getTimeoutFromObjectOrReply(client *c, robj *object, mstime_t *timeout, int } if (tval < 0) { - addReplyErrorAsync(c,"timeout is negative"); + addReplyError(c,"timeout is negative"); return C_ERR; } diff --git a/src/tracking.cpp b/src/tracking.cpp index ad10c3a57..58c675096 100644 --- a/src/tracking.cpp +++ b/src/tracking.cpp @@ -215,9 +215,9 @@ void sendTrackingMessage(client *c, char *keyname, size_t keylen, int proto) { * are unable to send invalidation messages to the redirected * connection, because the client no longer exist. */ if (c->resp > 2) { - addReplyPushLenAsync(c,3); - addReplyBulkCBufferAsync(c,"tracking-redir-broken",21); - addReplyLongLongAsync(c,c->client_tracking_redirection); + addReplyPushLen(c,3); + addReplyBulkCBuffer(c,"tracking-redir-broken",21); + addReplyLongLong(c,c->client_tracking_redirection); } return; } @@ -232,8 +232,8 @@ void sendTrackingMessage(client *c, char *keyname, size_t keylen, int proto) { * in Pub/Sub mode, we can support the feature with RESP 2 as well, * by sending Pub/Sub messages in the __redis__:invalidate channel. */ if (c->resp > 2) { - addReplyPushLenAsync(c,2); - addReplyBulkCBufferAsync(c,"invalidate",10); + addReplyPushLen(c,2); + addReplyBulkCBuffer(c,"invalidate",10); } else if (using_redirection && c->flags & CLIENT_PUBSUB) { /* We use a static object to speedup things, however we assume * that addReplyPubsubMessage() will not take a reference. */ @@ -248,10 +248,10 @@ void sendTrackingMessage(client *c, char *keyname, size_t keylen, int proto) { /* Send the "value" part, which is the array of keys. */ if (proto) { - addReplyProtoAsync(c,keyname,keylen); + addReplyProto(c,keyname,keylen); } else { - addReplyArrayLenAsync(c,1); - addReplyBulkCBufferAsync(c,keyname,keylen); + addReplyArrayLen(c,1); + addReplyBulkCBuffer(c,keyname,keylen); } } From c6ebf961f4ec07873339089263900b5b3dff5255 Mon Sep 17 00:00:00 2001 From: John Sully Date: Mon, 26 Oct 2020 02:28:38 +0000 Subject: [PATCH 189/215] Avoid unnecessary memory fences Former-commit-id: 88962af8b2fe835fb5d542013062cec8c771c6c6 --- src/networking.cpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/networking.cpp b/src/networking.cpp index 1c3e528c5..d843e8d99 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -258,20 +258,22 @@ int prepareClientToWrite(client *c) { serverAssert(GlobalLocksAcquired()); } - if (c->flags & CLIENT_FORCE_REPLY) return C_OK; // FORCE REPLY means we're doing something else with the buffer. + auto flags = c->flags.load(std::memory_order_relaxed); + + if (flags & CLIENT_FORCE_REPLY) return C_OK; // FORCE REPLY means we're doing something else with the buffer. // do not install a write handler /* If it's the Lua client we always return ok without installing any * handler since there is no socket at all. */ - if (c->flags & (CLIENT_LUA|CLIENT_MODULE)) return C_OK; + if (flags & (CLIENT_LUA|CLIENT_MODULE)) return C_OK; /* CLIENT REPLY OFF / SKIP handling: don't send replies. */ - if (c->flags & (CLIENT_REPLY_OFF|CLIENT_REPLY_SKIP)) return C_ERR; + if (flags & (CLIENT_REPLY_OFF|CLIENT_REPLY_SKIP)) return C_ERR; /* Masters don't receive replies, unless CLIENT_MASTER_FORCE_REPLY flag * is set. */ - if ((c->flags & CLIENT_MASTER) && - !(c->flags & CLIENT_MASTER_FORCE_REPLY)) return C_ERR; + if ((flags & CLIENT_MASTER) && + !(flags & CLIENT_MASTER_FORCE_REPLY)) return C_ERR; if (!c->conn) return C_ERR; /* Fake client for AOF loading. */ From 0e44e11e3ae522182245f08f651eda6cbfcb9c67 Mon Sep 17 00:00:00 2001 From: John Sully Date: Tue, 27 Oct 2020 01:54:13 +0000 Subject: [PATCH 190/215] Fix multithreaded test failure Former-commit-id: 1840601f8efb27174efa0a66f78de8c490b5bba4 --- src/server.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/server.cpp b/src/server.cpp index 3cf0b0189..13086037d 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -3540,6 +3540,12 @@ void call(client *c, int flags) { replicationFeedMonitors(c,g_pserver->monitors,c->db->id,c->argv,c->argc); } + /* We need to transfer async writes before a client's repl state gets changed. Otherwise + we won't be able to propogate them correctly. */ + if (c->cmd->flags & CMD_CATEGORY_REPLICATION) { + ProcessPendingAsyncWrites(); + } + /* Initialization: clear the flags that must be set by the command on * demand, and initialize the array for additional commands propagation. */ c->flags &= ~(CLIENT_FORCE_AOF|CLIENT_FORCE_REPL|CLIENT_PREVENT_PROP); From cba89cf9f9d553c2fa45952aa25c27b9b1e5c94b Mon Sep 17 00:00:00 2001 From: John Sully Date: Tue, 27 Oct 2020 04:52:35 +0000 Subject: [PATCH 191/215] Ensure MVCC timestamp is incremented after the real time is updated Former-commit-id: 9c5b59ac010fa1e948d5e51d1cd408c7e743d66d --- src/server.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/server.cpp b/src/server.cpp index 13086037d..e5ddd7a83 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -3555,6 +3555,7 @@ void call(client *c, int flags) { /* Call the command. */ dirty = g_pserver->dirty; updateCachedTime(0); + incrementMvccTstamp(); start = g_pserver->ustime; c->cmd->proc(c); serverTL->commandsExecuted++; @@ -3838,8 +3839,6 @@ int processCommand(client *c, int callFlags) { } } - incrementMvccTstamp(); - /* Handle the maxmemory directive. * * Note that we do not want to reclaim memory if we are here re-entering From b226b318ff038d8af6e36c18b256b588ed45e536 Mon Sep 17 00:00:00 2001 From: John Sully Date: Tue, 27 Oct 2020 04:53:19 +0000 Subject: [PATCH 192/215] Prevent crashes on shutdown due to lock being held Former-commit-id: 6a74f524e558100dfb3e54779020cd3407706e08 --- src/server.cpp | 13 +++++++------ tests/unit/maxmemory.tcl | 2 +- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/server.cpp b/src/server.cpp index e5ddd7a83..492937e40 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -2221,10 +2221,11 @@ void processClients(); * The most important is freeClientsInAsyncFreeQueue but we also * call some other low-risk functions. */ void beforeSleep(struct aeEventLoop *eventLoop) { + AeLocker locker; UNUSED(eventLoop); int iel = ielFromEventLoop(eventLoop); - aeAcquireLock(); + locker.arm(); processClients(); /* Handle precise timeouts of blocked clients. */ @@ -2232,9 +2233,9 @@ void beforeSleep(struct aeEventLoop *eventLoop) { /* Handle TLS pending data. (must be done before flushAppendOnlyFile) */ if (tlsHasPendingData()) { - aeReleaseLock(); + locker.release(); tlsProcessPendingData(); - aeAcquireLock(); + locker.arm(); } /* If tls still has pending unread data don't sleep at all. */ @@ -2299,9 +2300,9 @@ void beforeSleep(struct aeEventLoop *eventLoop) { first so perform it here */ bool fSentReplies = false; if (listLength(g_pserver->clients_to_close)) { - aeReleaseLock(); + locker.disarm(); handleClientsWithPendingWrites(iel, aof_state); - aeAcquireLock(); + locker.arm(); fSentReplies = true; } @@ -2311,7 +2312,7 @@ void beforeSleep(struct aeEventLoop *eventLoop) { /* Before we are going to sleep, let the threads access the dataset by * releasing the GIL. Redis main thread will not touch anything at this * time. */ - aeReleaseLock(); + locker.disarm(); if (!fSentReplies) handleClientsWithPendingWrites(iel, aof_state); if (moduleCount()) moduleReleaseGIL(TRUE /*fServerThread*/); diff --git a/tests/unit/maxmemory.tcl b/tests/unit/maxmemory.tcl index e12fedc91..b399e9b06 100644 --- a/tests/unit/maxmemory.tcl +++ b/tests/unit/maxmemory.tcl @@ -215,7 +215,7 @@ proc test_slave_buffers {test_name cmd_count payload_len limit_memory pipeline} set used_no_repl [expr {$new_used - $mem_not_counted_for_evict}] set delta [expr {($used_no_repl - $client_buf) - ($orig_used_no_repl - $orig_client_buf)}] - assert {[$master dbsize] == 100} + assert_equal [$master dbsize] 100 assert {$slave_buf > 2*1024*1024} ;# some of the data may have been pushed to the OS buffers set delta_max [expr {$cmd_count / 2}] ;# 1 byte unaccounted for, with 1M commands will consume some 1MB assert {$delta < $delta_max && $delta > -$delta_max} From 18c34bbfe27539748dc9c03af68342fb1ba726fb Mon Sep 17 00:00:00 2001 From: John Sully Date: Tue, 27 Oct 2020 06:23:14 +0000 Subject: [PATCH 193/215] Active replica test reliability enhancements Former-commit-id: 444555d3e4ec6e9469dae847dc631f2be263fb5e --- tests/integration/replication-active.tcl | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tests/integration/replication-active.tcl b/tests/integration/replication-active.tcl index d6501fbeb..5515c3c0f 100644 --- a/tests/integration/replication-active.tcl +++ b/tests/integration/replication-active.tcl @@ -235,7 +235,7 @@ start_server {tags {"active-repl"} overrides {active-replica yes}} { $master replicaof no one after 100 $master set testkey baz - after 100 + after 200 $slave set testkey bar after 100 $slave replicaof $master_host $master_port @@ -243,8 +243,8 @@ start_server {tags {"active-repl"} overrides {active-replica yes}} { $master replicaof $slave_host $slave_port after 1000 - assert_equal {bar} [$slave get testkey] - assert_equal {bar} [$master get testkey] + assert_equal {bar} [$slave get testkey] {replica is correct} + assert_equal {bar} [$master get testkey] {master is correct} } test {Active replica different databases} { @@ -271,6 +271,11 @@ start_server {tags {"active-repl"} overrides {active-replica yes}} { test {Active Replica Merges Database On Sync} { $slave set testkeyA foo r replicaof $slave_host $slave_port + wait_for_condition 50 1000 { + [string match *active-replica* [r role]] + } else { + fail [$slave role] + } after 1000 assert_equal 2 [r dbsize] } From 588fa164c376965e2e398a49f7436133d5bb4540 Mon Sep 17 00:00:00 2001 From: John Sully Date: Tue, 27 Oct 2020 06:41:33 +0000 Subject: [PATCH 194/215] Disarm not release D'oh Former-commit-id: 3e33fbcd351a719126d30405c4dc9209ad381954 --- src/server.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server.cpp b/src/server.cpp index 492937e40..a162a8c1c 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -2233,7 +2233,7 @@ void beforeSleep(struct aeEventLoop *eventLoop) { /* Handle TLS pending data. (must be done before flushAppendOnlyFile) */ if (tlsHasPendingData()) { - locker.release(); + locker.disarm(); tlsProcessPendingData(); locker.arm(); } From 7db922f44bc6a7e1cf3e2f29bd584a5e0fe0de14 Mon Sep 17 00:00:00 2001 From: John Sully Date: Tue, 27 Oct 2020 07:25:43 +0000 Subject: [PATCH 195/215] Additional test reliability fixes Former-commit-id: dad5a902d394719ba722e487879fc283ca148786 --- tests/integration/replication-active.tcl | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/tests/integration/replication-active.tcl b/tests/integration/replication-active.tcl index 5515c3c0f..233b513ac 100644 --- a/tests/integration/replication-active.tcl +++ b/tests/integration/replication-active.tcl @@ -78,9 +78,9 @@ start_server {tags {"active-repl"} overrides {active-replica yes}} { $master flushall } - test {Replication of EXPIREMEMBER (set) command (Active)} { + test {Replication of EXPIREMEMBER (set) command (Active)} { $master sadd testkey a b c d - wait_for_condition 50 100 { + wait_for_condition 50 200 { [$master debug digest] eq [$slave debug digest] } else { fail "Failed to replicate set" @@ -274,10 +274,13 @@ start_server {tags {"active-repl"} overrides {active-replica yes}} { wait_for_condition 50 1000 { [string match *active-replica* [r role]] } else { - fail [$slave role] + fail "Replica did not connect" + } + wait_for_condition 50 1000 { + [string match "2" [r dbsize]] + } else { + fail "key did not propogate" } - after 1000 - assert_equal 2 [r dbsize] } } } From e085772d0144da927f6d5c5d04b6e1133a00af74 Mon Sep 17 00:00:00 2001 From: John Sully Date: Fri, 6 Nov 2020 19:24:48 +0000 Subject: [PATCH 196/215] Second implementation of nested hashes Former-commit-id: ba950a3d1f5708ed986f9b348eafcace6a7c22b9 --- src/Makefile | 2 +- src/networking.cpp | 4 + src/object.cpp | 2 + src/sds.h | 10 ++ src/server.cpp | 21 ++- src/server.h | 19 +-- src/t_nhash.cpp | 353 ++++++++++++++++++++++++++++++++++++++++++ src/t_nhash.h | 6 + tests/test_helper.tcl | 1 + 9 files changed, 406 insertions(+), 12 deletions(-) create mode 100644 src/t_nhash.cpp create mode 100644 src/t_nhash.h diff --git a/src/Makefile b/src/Makefile index 9db3e6934..0822e82f2 100644 --- a/src/Makefile +++ b/src/Makefile @@ -276,7 +276,7 @@ endif REDIS_SERVER_NAME=keydb-server REDIS_SENTINEL_NAME=keydb-sentinel -REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crcspeed.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o redis-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o acl.o storage.o rdb-s3.o fastlock.o new.o tracking.o cron.o connection.o tls.o sha256.o motd.o timeout.o setcpuaffinity.o $(ASM_OBJ) +REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o t_nhash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crcspeed.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o redis-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o acl.o storage.o rdb-s3.o fastlock.o new.o tracking.o cron.o connection.o tls.o sha256.o motd.o timeout.o setcpuaffinity.o $(ASM_OBJ) REDIS_CLI_NAME=keydb-cli REDIS_CLI_OBJ=anet.o adlist.o dict.o redis-cli.o redis-cli-cpphelper.o zmalloc.o release.o anet.o ae.o crcspeed.o crc64.o siphash.o crc16.o storage-lite.o fastlock.o new.o motd.o $(ASM_OBJ) REDIS_BENCHMARK_NAME=keydb-benchmark diff --git a/src/networking.cpp b/src/networking.cpp index d843e8d99..465376eeb 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -424,6 +424,10 @@ void addReplyProto(client *c, const char *s, size_t len) { _addReplyProtoToList(c,s,len); } +void addReplyProtoCString(client *c, const char *s) { + addReplyProto(c, s, strlen(s)); +} + std::string escapeString(sds str) { std::string newstr; diff --git a/src/object.cpp b/src/object.cpp index 4988fa0bf..35b0ae398 100644 --- a/src/object.cpp +++ b/src/object.cpp @@ -30,6 +30,7 @@ #include "server.h" #include "cron.h" +#include "t_nhash.h" #include #include #include @@ -395,6 +396,7 @@ void decrRefCount(robj_roptr o) { case OBJ_MODULE: freeModuleObject(o); break; case OBJ_STREAM: freeStreamObject(o); break; case OBJ_CRON: freeCronObject(o); break; + case OBJ_NESTEDHASH: freeNestedHashObject(o); break; default: serverPanic("Unknown object type"); break; } if (g_pserver->fActiveReplica) { diff --git a/src/sds.h b/src/sds.h index 3315ed9b1..d316a9ee8 100644 --- a/src/sds.h +++ b/src/sds.h @@ -390,6 +390,10 @@ public: : sdsview(sdsdup(other.m_str)) {} + sdsstring(const char *rgch, size_t cch) + : sdsview(sdsnewlen(rgch, cch)) + {} + sdsstring(sdsstring &&other) : sdsview(other.m_str) { @@ -410,6 +414,12 @@ public: return *this; } + sds release() { + sds sdsT = m_str; + m_str = nullptr; + return sdsT; + } + ~sdsstring() { sdsfree(m_str); diff --git a/src/server.cpp b/src/server.cpp index a162a8c1c..9d7dc2ef3 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -63,6 +63,7 @@ #include #include "aelocker.h" #include "motd.h" +#include "t_nhash.h" #ifdef __linux__ #include #endif @@ -1060,7 +1061,15 @@ struct redisCommand redisCommandTable[] = { {"stralgo",stralgoCommand,-2, "read-only @string", - 0,lcsGetKeys,0,0,0,0,0,0} + 0,lcsGetKeys,0,0,0,0,0,0}, + + {"keydb.nhget",nhgetCommand,-2, + "read-only fast @hash", + 0,NULL,1,1,1,0,0,0}, + + {"keydb.nhset",nhsetCommand,-3, + "read-only fast @hash", + 0,NULL,1,1,1,0,0,0}, }; /*============================ Utility functions ============================ */ @@ -3558,7 +3567,15 @@ void call(client *c, int flags) { updateCachedTime(0); incrementMvccTstamp(); start = g_pserver->ustime; - c->cmd->proc(c); + try { + c->cmd->proc(c); + } catch (robj_roptr o) { + addReply(c, o); + } catch (robj *o) { + addReply(c, o); + } catch (const char *sz) { + addReplyError(c, sz); + } serverTL->commandsExecuted++; duration = ustime()-start; dirty = g_pserver->dirty-dirty; diff --git a/src/server.h b/src/server.h index 9f7d8f22c..d8285e5f4 100644 --- a/src/server.h +++ b/src/server.h @@ -654,11 +654,11 @@ extern int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT]; /* A redis object, that is a type able to hold a string / list / set */ /* The actual Redis Object */ -#define OBJ_STRING 0 /* String object. */ -#define OBJ_LIST 1 /* List object. */ -#define OBJ_SET 2 /* Set object. */ -#define OBJ_ZSET 3 /* Sorted set object. */ -#define OBJ_HASH 4 /* Hash object. */ +#define OBJ_STRING 0 /* String object. */ +#define OBJ_LIST 1 /* List object. */ +#define OBJ_SET 2 /* Set object. */ +#define OBJ_ZSET 3 /* Sorted set object. */ +#define OBJ_HASH 4 /* Hash object. */ /* The "module" object type is a special one that signals that the object * is one directly managed by a Redis module. In this case the value points @@ -671,10 +671,10 @@ extern int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT]; * by a 64 bit module type ID, which has a 54 bits module-specific signature * in order to dispatch the loading to the right module, plus a 10 bits * encoding version. */ -#define OBJ_MODULE 5 /* Module object. */ -#define OBJ_STREAM 6 /* Stream object. */ -#define OBJ_CRON 7 /* CRON job */ - +#define OBJ_MODULE 5 /* Module object. */ +#define OBJ_STREAM 6 /* Stream object. */ +#define OBJ_CRON 7 /* CRON job */ +#define OBJ_NESTEDHASH 8 /* Nested Hash Object */ /* Extract encver / signature from a module type ID. */ #define REDISMODULE_TYPE_ENCVER_BITS 10 @@ -2009,6 +2009,7 @@ void addReplyNullArray(client *c); void addReplyBool(client *c, int b); void addReplyVerbatim(client *c, const char *s, size_t len, const char *ext); void addReplyProto(client *c, const char *s, size_t len); +void addReplyProtoCString(client *c, const char *s); void addReplyBulk(client *c, robj_roptr obj); void AddReplyFromClient(client *c, client *src); void addReplyBulkCString(client *c, const char *s); diff --git a/src/t_nhash.cpp b/src/t_nhash.cpp new file mode 100644 index 000000000..e9e17b1f7 --- /dev/null +++ b/src/t_nhash.cpp @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2020, EQ Alpha Technology Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "server.h" +#include + +void dictObjectDestructor(void *privdata, void *val); +dictType nestedHashDictType { + dictSdsHash, /* hash function */ + NULL, /* key dup */ + NULL, /* val dup */ + dictSdsKeyCompare, /* key compare */ + dictSdsDestructor, /* key destructor */ + dictObjectDestructor, /* val destructor */ +}; + +robj *createNestHashBucket() { + dict *d = dictCreate(&nestedHashDictType, nullptr); + return createObject(OBJ_NESTEDHASH, d); +} + +void freeNestedHashObject(robj_roptr o) { + dictRelease((dict*)ptrFromObj(o)); +} + +robj *fetchFromKey(redisDb *db, robj_roptr key) { + const char *pchCur = szFromObj(key); + const char *pchStart = pchCur; + const char *pchMax = pchCur + sdslen(pchCur); + robj *o = nullptr; + + while (pchCur <= pchMax) { + if (pchCur == pchMax || *pchCur == '.') { + // WARNING: Don't deref pchCur as it may be pchMax + + // New word + if ((pchCur - pchStart) < 1) { + throw shared.syntaxerr; // malformed + } + + dict *d = nullptr; + if (o == nullptr) + d = db->pdict; + else + d = (dict*)ptrFromObj(o); + + sdsstring str(pchStart, pchCur - pchStart); + dictEntry *de = dictFind(d, str.get()); + o = (de != nullptr) ? (robj*)dictGetVal(de) : nullptr; + + if (o == nullptr) throw shared.nokeyerr; // Not Found + serverAssert(o->type == OBJ_NESTEDHASH || o->type == OBJ_STRING || o->type == OBJ_LIST); + if (o->type == OBJ_STRING && pchCur != pchMax) + throw shared.nokeyerr; // Past the end + + pchStart = pchCur + 1; + } + ++pchCur; + } + + return o; +} + +// Returns one if we overwrote a value +bool setWithKey(redisDb *db, robj_roptr key, robj *val, bool fCreateBuckets) { + const char *pchCur = szFromObj(key); + const char *pchStart = pchCur; + const char *pchMax = pchCur + sdslen(pchCur); + robj *o = nullptr; + + while (pchCur <= pchMax) { + if (pchCur == pchMax || *pchCur == '.') { + // WARNING: Don't deref pchCur as it may be pchMax + + // New word + if ((pchCur - pchStart) < 1) { + throw shared.syntaxerr; // malformed + } + + dict *d = nullptr; + if (o == nullptr) + d = db->pdict; + else + d = (dict*)ptrFromObj(o); + + sdsstring str(pchStart, pchCur - pchStart); + dictEntry *de = dictFind(d, str.get()); + + if (pchCur == pchMax) { + val->addref(); + if (de != nullptr) { + decrRefCount((robj*)dictGetVal(de)); + dictSetVal(d, de, val); + return true; + } else { + dictAdd(d, str.release(), val); + return false; + } + } else { + o = (de != nullptr) ? (robj*)dictGetVal(de) : nullptr; + + if (o == nullptr) { + if (!fCreateBuckets) + throw shared.nokeyerr; // Not Found + o = createNestHashBucket(); + serverAssert(dictAdd(d, str.release(), o) == DICT_OK); + } else if (o->type != OBJ_NESTEDHASH) { + decrRefCount(o); + o = createNestHashBucket(); + de->v.val = o; + } + } + + pchStart = pchCur + 1; + } + ++pchCur; + } + throw "Internal Error"; +} + +void writeNestedHashToClient(client *c, robj_roptr o) { + if (o == nullptr) { + addReply(c, shared.null[c->resp]); + } else if (o->type == OBJ_STRING) { + addReplyBulk(c, o); + } else if (o->type == OBJ_LIST) { + unsigned char *zl = (unsigned char*)ptrFromObj(o); + addReplyArrayLen(c, ziplistLen(zl)); + unsigned char *p = ziplistIndex(zl, ZIPLIST_HEAD); + while (p != nullptr) { + unsigned char *str; + unsigned int len; + long long lval; + if (ziplistGet(p, &str, &len, &lval)) { + char rgT[128]; + if (str == nullptr) { + len = ll2string(rgT, 128, lval); + str = (unsigned char*)rgT; + } + addReplyBulkCBuffer(c, (const char*)str, len); + } + p = ziplistNext(zl, p); + } + } else { + serverAssert(o->type == OBJ_NESTEDHASH ); + dict *d = (dict*)ptrFromObj(o); + + if (dictSize(d) > 1) + addReplyArrayLen(c, dictSize(d)); + + dictIterator *di = dictGetIterator(d); + dictEntry *de; + while ((de = dictNext(di))) { + robj_roptr oT = (robj*)dictGetVal(de); + addReplyArrayLen(c, 2); + addReplyBulkCBuffer(c, (sds)dictGetKey(de), sdslen((sds)dictGetKey(de))); + if (oT->type == OBJ_STRING) { + addReplyBulk(c, oT); + } else { + writeNestedHashToClient(c, oT); + } + } + dictReleaseIterator(di); + } +} + +inline bool FSimpleJsonEscapeCh(char ch) { + return (ch == '"' || ch == '\\'); +} +inline bool FExtendedJsonEscapeCh(char ch) { + return ch <= 0x1F; +} + +sds writeJsonValue(sds output, const char *valIn, size_t cchIn) { + const char *val = valIn; + size_t cch = cchIn; + int cchEscapeExtra = 0; + + // First scan for escaped chars + for (size_t ich = 0; ich < cchIn; ++ich) { + if (FSimpleJsonEscapeCh(valIn[ich])) { + ++cchEscapeExtra; + } else if (FExtendedJsonEscapeCh(valIn[ich])) { + cchEscapeExtra += 5; + } + } + + if (cchEscapeExtra > 0) { + size_t ichDst = 0; + sds dst = sdsnewlen(SDS_NOINIT, cchIn+cchEscapeExtra); + for (size_t ich = 0; ich < cchIn; ++ich) { + switch (valIn[ich]) { + case '"': + dst[ichDst++] = '\\'; dst[ichDst++] = '"'; + break; + case '\\': + dst[ichDst++] = '\\'; dst[ichDst++] = '\\'; + break; + + default: + serverAssert(!FSimpleJsonEscapeCh(valIn[ich])); + if (FExtendedJsonEscapeCh(valIn[ich])) { + dst[ichDst++] = '\\'; dst[ichDst++] = 'u'; + sprintf(dst + ichDst, "%4x", valIn[ich]); + ichDst += 4; + } else { + dst[ichDst++] = valIn[ich]; + } + break; + } + } + val = (const char*)dst; + serverAssert(ichDst == (cchIn+cchEscapeExtra)); + cch = ichDst; + } + + output = sdscat(output, "\""); + output = sdscatlen(output, val, cch); + output = sdscat(output, "\""); + + if (val != valIn) + sdsfree(val); + + return output; +} +sds writeJsonValue(sds output, sds val) { + return writeJsonValue(output, (const char*)val, sdslen(val)); +} + +sds writeNestedHashAsJson(sds output, robj_roptr o) { + if (o->type == OBJ_STRING) { + output = writeJsonValue(output, (sds)szFromObj(o)); + } else if (o->type == OBJ_LIST) { + unsigned char *zl = (unsigned char*)ptrFromObj(o); + output = sdscat(output, "["); + unsigned char *p = ziplistIndex(zl, ZIPLIST_HEAD); + bool fFirst = true; + while (p != nullptr) { + unsigned char *str; + unsigned int len; + long long lval; + if (ziplistGet(p, &str, &len, &lval)) { + char rgT[128]; + if (str == nullptr) { + len = ll2string(rgT, 128, lval); + str = (unsigned char*)rgT; + } + if (!fFirst) + output = sdscat(output, ","); + fFirst = false; + output = writeJsonValue(output, (const char*)str, len); + } + p = ziplistNext(zl, p); + } + output = sdscat(output, "]"); + } else { + output = sdscat(output, "{"); + dictIterator *di = dictGetIterator((dict*)ptrFromObj(o)); + dictEntry *de; + bool fFirst = true; + while ((de = dictNext(di))) { + robj_roptr oT = (robj*)dictGetVal(de); + if (!fFirst) + output = sdscat(output, ","); + fFirst = false; + output = writeJsonValue(output, (sds)dictGetKey(de)); + output = sdscat(output, " : "); + output = writeNestedHashAsJson(output, oT); + } + dictReleaseIterator(di); + output = sdscat(output, "}"); + } + return output; +} + +void nhsetCommand(client *c) { + if (c->argc < 3) + throw shared.syntaxerr; + + robj *val = c->argv[2]; + if (c->argc > 3) { + // Its a list, we'll store as a ziplist + val = createZiplistObject(); + for (int iarg = 2; iarg < c->argc; ++iarg) { + sds arg = (sds)szFromObj(c->argv[iarg]); + val->m_ptr = ziplistPush((unsigned char*)ptrFromObj(val), (unsigned char*)arg, sdslen(arg), ZIPLIST_TAIL); + } + } + + try { + if (setWithKey(c->db, c->argv[1], val, true)) { + addReplyLongLong(c, 1); // we replaced a value + } else { + addReplyLongLong(c, 0); // we added a new value + } + } catch (...) { + if (val != c->argv[2]) + decrRefCount(val); + throw; + } + if (val != c->argv[2]) + decrRefCount(val); +} + +void nhgetCommand(client *c) { + if (c->argc != 2 && c->argc != 3) + throw shared.syntaxerr; + + bool fJson = false; + int argOffset = 0; + if (c->argc == 3) { + argOffset++; + if (strcasecmp(szFromObj(c->argv[1]), "json") == 0) { + fJson = true; + } else if (strcasecmp(szFromObj(c->argv[1]), "resp") != 0) { + throw shared.syntaxerr; + } + } + + robj *o = fetchFromKey(c->db, c->argv[argOffset + 1]); + if (fJson) { + sds val = writeNestedHashAsJson(sdsnew(nullptr), o); + addReplyBulkSds(c, val); + } else { + writeNestedHashToClient(c, o); + } +} \ No newline at end of file diff --git a/src/t_nhash.h b/src/t_nhash.h new file mode 100644 index 000000000..df71fb0ca --- /dev/null +++ b/src/t_nhash.h @@ -0,0 +1,6 @@ +#pragma once + +void freeNestedHashObject(robj_roptr o); + +void nhsetCommand(client *c); +void nhgetCommand(client *c); diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index 88ea79d61..b70d89968 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -28,6 +28,7 @@ set ::all_tests { unit/type/hash unit/type/stream unit/type/stream-cgroups + unit/type/nestedhash unit/sort unit/expire unit/other From 5de36840ad71582b31f302e72709175ecc55f0b2 Mon Sep 17 00:00:00 2001 From: John Sully Date: Wed, 11 Nov 2020 03:29:38 +0000 Subject: [PATCH 197/215] Add missing file Former-commit-id: 97fd73692363e05b80e0c0e84b7b2b8fe7f760b8 --- tests/unit/type/nestedhash.tcl | 51 ++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 tests/unit/type/nestedhash.tcl diff --git a/tests/unit/type/nestedhash.tcl b/tests/unit/type/nestedhash.tcl new file mode 100644 index 000000000..5b2d0be52 --- /dev/null +++ b/tests/unit/type/nestedhash.tcl @@ -0,0 +1,51 @@ +start_server {tags {"nested_hash"}} { + test {Simple set/get} { + assert_equal [r keydb.nhset foo bar] 0 + assert_equal bar [r keydb.nhget foo] + } + + test {overwrite} { + r flushall + assert_equal [r keydb.nhset foo bar] 0 + assert_equal [r keydb.nhset foo baz] 1 + assert_equal [r keydb.nhget foo] baz + } + + test {get simple string} { + r keydb.nhset foo bar + assert_equal [r keydb.nhget json foo] {"bar"} + assert_equal [r keydb.nhget foo] "bar" + } + + test {get array} { + r keydb.nhset foo a b c d + assert_equal [r keydb.nhget json foo] {["a","b","c","d"]} + assert_equal [r keydb.nhget foo] {a b c d} + } + + test {overwrite string with object} { + r keydb.nhset a.b.c val1 + r keydb.nhset a.b val2 + r keydb.nhset a.b.c.d val3 + assert_equal [r keydb.nhget json a] {{"b" : {"c" : {"d" : "val3"}}}} + } + + test {malformed nested key} { + assert_error *syntax* {r keydb.nhset a..b val1} + } + + test {missing final selector key} { + assert_error *syntax* {r keydb.nhset a.b. val1} + } + + test {expire nested hash} { + r keydb.nhset a.b.c val1 + assert_equal [r expire a 100] 1 + assert [expr [r ttl a] > 0] + } + + test {expire subhash} { + r keydb.nhset a.b.c val1 + assert_equal [r expire a.b 100] 0 + } +} From da2a3dbf99adeebb8526cc5c30b5bde759b3b4b9 Mon Sep 17 00:00:00 2001 From: John Sully Date: Fri, 13 Nov 2020 20:51:30 +0000 Subject: [PATCH 198/215] Add ability to compile KeyDB without MOTD and curl Former-commit-id: f3794d2c420423ca7dcc51688c478f8ada290786 --- src/Makefile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Makefile b/src/Makefile index 0822e82f2..dbb18af65 100644 --- a/src/Makefile +++ b/src/Makefile @@ -107,7 +107,7 @@ endif FINAL_CFLAGS=$(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(REDIS_CFLAGS) FINAL_CXXFLAGS=$(CXX_STD) $(WARN) $(OPT) $(DEBUG) $(CXXFLAGS) $(REDIS_CFLAGS) FINAL_LDFLAGS=$(LDFLAGS) $(REDIS_LDFLAGS) $(DEBUG) -FINAL_LIBS+=-lm -lcurl +FINAL_LIBS+=-lm DEBUG=-g -ggdb ifneq ($(uname_S),Darwin) @@ -171,8 +171,11 @@ else # All the other OSes (notably Linux) FINAL_LDFLAGS+= -rdynamic FINAL_LIBS+=-ldl -pthread -lrt -luuid +ifneq ($(NO_MOTD),yes) FINAL_CFLAGS += -DMOTD FINAL_CXXFLAGS += -DMOTD + FINAL_LIBS+=-lcurl +endif endif endif endif From e8753d1b4bbcdb471a6b3b374fc31b2e29ed5d31 Mon Sep 17 00:00:00 2001 From: John Sully Date: Thu, 19 Nov 2020 23:28:01 +0000 Subject: [PATCH 199/215] Blocking clients should not crash if an active replica loads a remote RDB with a key in the blocklist Former-commit-id: 1c525e20b10e0a47af687a0d46faf75229a1cbf5 --- src/blocked.cpp | 7 +++++++ src/replication.cpp | 3 ++- tests/integration/replication-active.tcl | 17 +++++++++++++++++ 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/src/blocked.cpp b/src/blocked.cpp index 63cce0996..34c1d60ef 100644 --- a/src/blocked.cpp +++ b/src/blocked.cpp @@ -672,6 +672,13 @@ void signalKeyAsReady(redisDb *db, robj *key) { /* Key was already signaled? No need to queue it again. */ if (dictFind(db->ready_keys,key) != NULL) return; + if (key->getrefcount() == OBJ_STATIC_REFCOUNT) { + // Sometimes a key may be stack allocated, we'll need to dupe it + robj *newKey = createStringObject(szFromObj(key), sdslen(szFromObj(key))); + newKey->setrefcount(0); // Start with 0 but don't free + key = newKey; + } + /* Ok, we need to queue this key into g_pserver->ready_keys. */ rl = (readyList*)zmalloc(sizeof(*rl), MALLOC_SHARED); rl->key = key; diff --git a/src/replication.cpp b/src/replication.cpp index b9de0bce5..f8db2f552 100644 --- a/src/replication.cpp +++ b/src/replication.cpp @@ -3049,7 +3049,8 @@ struct redisMaster *replicationAddMaster(char *ip, int port) { else freeClientAsync(mi->master); } - disconnectAllBlockedClients(); /* Clients blocked in master, now replica. */ + if (!g_pserver->fActiveReplica) + disconnectAllBlockedClients(); /* Clients blocked in master, now replica. */ /* Update oom_score_adj */ setOOMScoreAdj(-1); diff --git a/tests/integration/replication-active.tcl b/tests/integration/replication-active.tcl index 233b513ac..cd7acd991 100644 --- a/tests/integration/replication-active.tcl +++ b/tests/integration/replication-active.tcl @@ -247,6 +247,23 @@ start_server {tags {"active-repl"} overrides {active-replica yes}} { assert_equal {bar} [$master get testkey] {master is correct} } + test {Active replica merge works with client blocked} { + $slave flushall + $slave replicaof no one + $master replicaof no one + after 100 + set rd [redis_deferring_client] + $rd blpop testlist 0 + $slave lpush testlist foo + + #OK Now reconnect + $slave replicaof $master_host $master_port + $master replicaof $slave_host $slave_port + after 1000 + + $rd read + } {testlist foo} + test {Active replica different databases} { $master select 3 $master set testkey abcd From 3ee8edc8b016d40977ee9182cef04a6776ef0c0e Mon Sep 17 00:00:00 2001 From: Kajaruban Surendran Date: Tue, 17 Nov 2020 23:01:57 +0000 Subject: [PATCH 200/215] fix for the server crash when the maxclients increased via config set Former-commit-id: 34a97c447f8cb5753e0747b435c7ce1a5d381c47 --- src/config.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config.cpp b/src/config.cpp index 839a8b91e..bbc9cb68a 100644 --- a/src/config.cpp +++ b/src/config.cpp @@ -2273,7 +2273,7 @@ static int updateMaxclients(long long val, long long prev, const char **err) { } return 0; } - for (int iel = 0; iel < MAX_EVENT_LOOPS; ++iel) + for (int iel = 0; iel < cserver.cthreads; ++iel) { if ((unsigned int) aeGetSetSize(g_pserver->rgthreadvar[iel].el) < g_pserver->maxclients + CONFIG_FDSET_INCR) From c179c98870af4ae63528a93cec8ed0b4ef4a41a5 Mon Sep 17 00:00:00 2001 From: John Sully Date: Mon, 23 Nov 2020 02:01:40 +0000 Subject: [PATCH 201/215] Fix issue where active replication doesn't replicate RDB data Former-commit-id: 527b7eb0742567302e0343e3acbed9814c0cbb95 --- src/aof.cpp | 2 +- src/cluster.cpp | 42 ++++++ src/db.cpp | 14 +- src/rdb.cpp | 6 +- src/replication.cpp | 36 +++++ src/server.cpp | 6 + src/server.h | 6 +- .../replication-multimaster-connect.tcl | 124 ++++++++++++++++++ tests/test_helper.tcl | 1 + 9 files changed, 228 insertions(+), 9 deletions(-) create mode 100644 tests/integration/replication-multimaster-connect.tcl diff --git a/src/aof.cpp b/src/aof.cpp index 476789a65..34feea237 100644 --- a/src/aof.cpp +++ b/src/aof.cpp @@ -642,7 +642,7 @@ sds catAppendOnlyExpireMemberAtCommand(sds buf, struct redisCommand *cmd, robj * when += mstime(); robj *argvNew[4]; - argvNew[0] = createStringObject("PEXPIREMEMBERAT",15); + argvNew[0] = shared.pexpirememberat; argvNew[1] = argv[1]; argvNew[2] = argv[2]; argvNew[3] = createStringObjectFromLongLong(when); diff --git a/src/cluster.cpp b/src/cluster.cpp index a3074b564..b146b1011 100644 --- a/src/cluster.cpp +++ b/src/cluster.cpp @@ -5019,6 +5019,48 @@ void dumpCommand(client *c) { return; } +/* KEYDB.MVCCRESTORE key mvcc expire serialized-value */ +void mvccrestoreCommand(client *c) { + long long mvcc, expire; + robj *key = c->argv[1], *obj = nullptr; + int type; + + if (getLongLongFromObjectOrReply(c, c->argv[2], &mvcc, "Invalid MVCC Tstamp") != C_OK) + return; + + if (getLongLongFromObjectOrReply(c, c->argv[3], &expire, "Invalid expire") != C_OK) + return; + + /* Verify RDB version and data checksum unles the client is already a replica or master */ + if (!(c->flags & (CLIENT_SLAVE | CLIENT_MASTER))) { + if (verifyDumpPayload((unsigned char*)ptrFromObj(c->argv[4]),sdslen(szFromObj(c->argv[4]))) == C_ERR) + { + addReplyError(c,"DUMP payload version or checksum are wrong"); + return; + } + } + + rio payload; + rioInitWithBuffer(&payload,szFromObj(c->argv[4])); + if (((type = rdbLoadObjectType(&payload)) == -1) || + ((obj = rdbLoadObject(type,&payload,szFromObj(key), OBJ_MVCC_INVALID)) == NULL)) + { + addReplyError(c,"Bad data format"); + return; + } + setMvccTstamp(obj, mvcc); + + /* Create the key and set the TTL if any */ + dbMerge(c->db,key,obj,true); + if (expire >= 0) { + setExpire(c,c->db,key,nullptr,expire); + } + signalModifiedKey(c,c->db,key); + notifyKeyspaceEvent(NOTIFY_GENERIC,"restore",key,c->db->id); + addReply(c,shared.ok); + g_pserver->dirty++; +} + /* RESTORE key ttl serialized-value [REPLACE] */ void restoreCommand(client *c) { long long ttl, lfu_freq = -1, lru_idle = -1, lru_clock = -1; diff --git a/src/db.cpp b/src/db.cpp index 2edad7341..97bcc86df 100644 --- a/src/db.cpp +++ b/src/db.cpp @@ -202,13 +202,15 @@ robj *lookupKeyWriteOrReply(client *c, robj *key, robj *reply) { return o; } -int dbAddCore(redisDb *db, robj *key, robj *val) { +int dbAddCore(redisDb *db, robj *key, robj *val, bool fUpdateMvcc) { serverAssert(!val->FExpires()); sds copy = sdsdup(szFromObj(key)); int retval = dictAdd(db->pdict, copy, val); uint64_t mvcc = getMvccTstamp(); - setMvccTstamp(key, mvcc); - setMvccTstamp(val, mvcc); + if (fUpdateMvcc) { + setMvccTstamp(key, mvcc); + setMvccTstamp(val, mvcc); + } if (retval == DICT_OK) { @@ -232,7 +234,7 @@ int dbAddCore(redisDb *db, robj *key, robj *val) { * The program is aborted if the key already exists. */ void dbAdd(redisDb *db, robj *key, robj *val) { - int retval = dbAddCore(db, key, val); + int retval = dbAddCore(db, key, val, true /* fUpdateMvcc */); serverAssertWithInfo(NULL,key,retval == DICT_OK); } @@ -290,7 +292,7 @@ int dbMerge(redisDb *db, robj *key, robj *val, int fReplace) { dictEntry *de = dictFind(db->pdict, ptrFromObj(key)); if (de == nullptr) - return (dbAddCore(db, key, val) == DICT_OK); + return (dbAddCore(db, key, val, false /* fUpdateMvcc */) == DICT_OK); robj *old = (robj*)dictGetVal(de); if (mvccFromObj(old) <= mvccFromObj(val)) @@ -303,7 +305,7 @@ int dbMerge(redisDb *db, robj *key, robj *val, int fReplace) } else { - return (dbAddCore(db, key, val) == DICT_OK); + return (dbAddCore(db, key, val, true /* fUpdateMvcc */) == DICT_OK); } } diff --git a/src/rdb.cpp b/src/rdb.cpp index 1cfd6a423..367d8ad13 100644 --- a/src/rdb.cpp +++ b/src/rdb.cpp @@ -2353,7 +2353,9 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { else { redisObjectStack keyobj; initStaticStringObject(keyobj,key); - setExpire(NULL, db, &keyobj, subexpireKey, strtoll(szFromObj(auxval), nullptr, 10)); + long long expireT = strtoll(szFromObj(auxval), nullptr, 10); + setExpire(NULL, db, &keyobj, subexpireKey, expireT); + replicateSubkeyExpire(db, &keyobj, subexpireKey, expireT); decrRefCount(subexpireKey); subexpireKey = nullptr; } @@ -2475,6 +2477,8 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { /* call key space notification on key loaded for modules only */ moduleNotifyKeyspaceEvent(NOTIFY_LOADED, "loaded", &keyobj, db->id); + + replicationNotifyLoadedKey(db, &keyobj, val, expiretime); } else { diff --git a/src/replication.cpp b/src/replication.cpp index f8db2f552..46ade807d 100644 --- a/src/replication.cpp +++ b/src/replication.cpp @@ -4367,3 +4367,39 @@ static void propagateMasterStaleKeys() decrRefCount(rgobj[0]); } + +void replicationNotifyLoadedKey(redisDb *db, robj_roptr key, robj_roptr val, long long expire) { + if (!g_pserver->fActiveReplica || listLength(g_pserver->slaves) == 0) + return; + + // Send a digest over to the replicas + rio r; + + createDumpPayload(&r, val, key.unsafe_robjcast()); + + redisObjectStack objPayload; + initStaticStringObject(objPayload, r.io.buffer.ptr); + redisObjectStack objTtl; + initStaticStringObject(objTtl, sdscatprintf(sdsempty(), "%lld", expire)); + redisObjectStack objMvcc; + initStaticStringObject(objMvcc, sdscatprintf(sdsempty(), "%lu", mvccFromObj(val))); + redisObject *argv[5] = {shared.mvccrestore, key.unsafe_robjcast(), &objMvcc, &objTtl, &objPayload}; + + replicationFeedSlaves(g_pserver->slaves, db - g_pserver->db, argv, 5); + + sdsfree(szFromObj(&objTtl)); + sdsfree(szFromObj(&objMvcc)); + sdsfree(r.io.buffer.ptr); +} + +void replicateSubkeyExpire(redisDb *db, robj_roptr key, robj_roptr subkey, long long expire) { + if (!g_pserver->fActiveReplica || listLength(g_pserver->slaves) == 0) + return; + + redisObjectStack objTtl; + initStaticStringObject(objTtl, sdscatprintf(sdsempty(), "%lld", expire)); + redisObject *argv[4] = {shared.pexpirememberat, key.unsafe_robjcast(), subkey.unsafe_robjcast(), &objTtl}; + replicationFeedSlaves(g_pserver->slaves, db - g_pserver->db, argv, 4); + + sdsfree(szFromObj(&objTtl)); +} \ No newline at end of file diff --git a/src/server.cpp b/src/server.cpp index 9d7dc2ef3..277c9e39a 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -1070,6 +1070,10 @@ struct redisCommand redisCommandTable[] = { {"keydb.nhset",nhsetCommand,-3, "read-only fast @hash", 0,NULL,1,1,1,0,0,0}, + + {"KEYDB.MVCCRESTORE",mvccrestoreCommand, 5, + "write use-memory @keyspace @dangerous", + 0,NULL,1,1,1,0,0,0}, }; /*============================ Utility functions ============================ */ @@ -2438,6 +2442,8 @@ void createSharedObjects(void) { shared.hdel = makeObjectShared(createStringObject("HDEL", 4)); shared.zrem = makeObjectShared(createStringObject("ZREM", 4)); shared.srem = makeObjectShared(createStringObject("SREM", 4)); + shared.mvccrestore = makeObjectShared(createStringObject("KEYDB.MVCCRESTORE", 17)); + shared.pexpirememberat = makeObjectShared(createStringObject("PEXPIREMEMBERAT",15)); for (j = 0; j < OBJ_SHARED_INTEGERS; j++) { shared.integers[j] = makeObjectShared(createObject(OBJ_STRING,(void*)(long)j)); diff --git a/src/server.h b/src/server.h index d8285e5f4..0fc874b6a 100644 --- a/src/server.h +++ b/src/server.h @@ -1176,7 +1176,7 @@ struct sharedObjectsStruct { *busykeyerr, *oomerr, *plus, *messagebulk, *pmessagebulk, *subscribebulk, *unsubscribebulk, *psubscribebulk, *punsubscribebulk, *del, *unlink, *rpop, *lpop, *lpush, *rpoplpush, *zpopmin, *zpopmax, *emptyscan, - *multi, *exec, *srem, *hdel, *zrem, + *multi, *exec, *srem, *hdel, *zrem, *mvccrestore, *pexpirememberat, *select[PROTO_SHARED_SELECT_CMDS], *integers[OBJ_SHARED_INTEGERS], *mbulkhdr[OBJ_SHARED_BULKHDR_LEN], /* "*\r\n" */ @@ -2217,6 +2217,8 @@ void updateMasterAuth(); void showLatestBacklog(); void rdbPipeReadHandler(struct aeEventLoop *eventLoop, int fd, void *clientData, int mask); void rdbPipeWriteHandlerConnRemoved(struct connection *conn); +void replicationNotifyLoadedKey(redisDb *db, robj_roptr key, robj_roptr val, long long expire); +void replicateSubkeyExpire(redisDb *db, robj_roptr key, robj_roptr subkey, long long expire); /* Generic persistence functions */ void startLoadingFile(FILE* fp, const char * filename, int rdbflags); @@ -2547,6 +2549,7 @@ void clusterPropagatePublish(robj *channel, robj *message); void migrateCloseTimedoutSockets(void); void clusterBeforeSleep(void); int clusterSendModuleMessageToTarget(const char *target, uint64_t module_id, uint8_t type, unsigned char *payload, uint32_t len); +void createDumpPayload(rio *payload, robj_roptr o, robj *key); /* Sentinel */ void initSentinelConfig(void); @@ -2764,6 +2767,7 @@ void watchCommand(client *c); void unwatchCommand(client *c); void clusterCommand(client *c); void restoreCommand(client *c); +void mvccrestoreCommand(client *c); void migrateCommand(client *c); void askingCommand(client *c); void readonlyCommand(client *c); diff --git a/tests/integration/replication-multimaster-connect.tcl b/tests/integration/replication-multimaster-connect.tcl new file mode 100644 index 000000000..229d9fe96 --- /dev/null +++ b/tests/integration/replication-multimaster-connect.tcl @@ -0,0 +1,124 @@ +start_server {tags {"multi-master"} overrides {active-replica yes multi-master yes}} { +start_server {overrides {active-replica yes multi-master yes}} { +start_server {overrides {active-replica yes multi-master yes}} { +start_server {overrides {active-replica yes multi-master yes}} { + for {set j 0} {$j < 4} {incr j} { + set R($j) [srv [expr 0-$j] client] + set R_host($j) [srv [expr 0-$j] host] + set R_port($j) [srv [expr 0-$j] port] + } + + set keysPerServer 100 + + # Initialize Dataset + for {set j 1} {$j < 4} {incr j} { + for {set key 0} { $key < $keysPerServer} { incr key } { + $R($j) set "key_$j\_$key" asdjaoijioasdjiod ex 100000 + } + set hash($j) [$R($j) debug digest] + } + + $R(1) replicaof $R_host(0) $R_port(0) + $R(2) replicaof $R_host(0) $R_port(0) + $R(3) replicaof $R_host(0) $R_port(0) + + test "all nodes up" { + for {set j 1} {$j < 4} {incr j} { + wait_for_condition 50 100 { + [string match {*master_global_link_status:up*} [$R($j) info replication]] + } else { + fail "Multimaster group didn't connect up in a reasonable period of time" + } + } + } + + test "nodes retain their data" { + for {set j 1} { $j < 4 } { incr j } { + assert_equal [$R($j) debug digest] $hash($j) $j + } + } + + # Set all servers with an overlapping key - the last one should win + $R(0) set isvalid no + $R(1) set isvalid no + $R(2) set isvalid no + # Note: Sleep is due to mvcc slip + after 2 + $R(3) set isvalid yes + + for {set n 1} {$n < 4} {incr n} { + test "Node $n reciprocal rep works" { + $R(0) replicaof $R_host($n) $R_port($n) + after 2000 + for {set key 0} { $key < $keysPerServer } { incr key } { + assert_equal [$R(0) get "key_$n\_$key"] asdjaoijioasdjiod $key + } + } + } + + test "All data transferred between nodes" { + for {set server 0} {$server < 4} {incr server} { + set hash($j) [$R($server) debug digest] + for {set n 1} {$n < 4} {incr n} { + for {set key 0} {$key < $keysPerServer} {incr key} { + assert_equal [$R($server) get "key_$n\_$key"] asdjaoijioasdjiod "server: $n key: $key" + } + } + } + } + + test "MVCC Updates Correctly" { + assert_equal [$R(0) get isvalid] yes + assert_equal [$R(1) get isvalid] yes + assert_equal [$R(2) get isvalid] yes + assert_equal [$R(3) get isvalid] yes + } + + unset hash + test "All servers same debug digest" { + set hash [$R(0) debug digest] + for {set j 1} {$j < 4} {incr j} { + assert_equal $hash [$R($j) debug digest] $j + } + } +}}}} + +# The tests below validate features replicated via RDB +start_server {tags {"multi-master"} overrides {active-replica yes multi-master yes}} { +start_server {overrides {active-replica yes multi-master yes}} { +start_server {overrides {active-replica yes multi-master yes}} { + for {set j 0} {$j < 3} {incr j} { + set R($j) [srv [expr 0-$j] client] + set R_host($j) [srv [expr 0-$j] host] + set R_port($j) [srv [expr 0-$j] port] + } + + # Set replicated features here + $R(0) sadd testhash subkey + $R(0) expiremember testhash subkey 10000 + + + test "node 2 up" { + $R(2) replicaof $R_host(1) $R_port(1) + wait_for_condition 50 100 { + [string match {*master_global_link_status:up*} [$R(2) info replication]] + } else { + fail "didn't connect up in a reasonable period of time" + } + } + + # While node 1 loads from 0, it will relay to 2 + test "node 1 up" { + $R(1) replicaof $R_host(0) $R_port(0) + wait_for_condition 50 100 { + [string match {*master_global_link_status:up*} [$R(1) info replication]] + } else { + fail "didn't connect up in a reasonable period of time" + } + } + + #Tests that validate replication made it to node 2 + test "subkey expire replicates via RDB" { + assert [expr [$R(2) ttl testhash subkey] > 0] + } +}}} diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index b70d89968..6ee5572e1 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -48,6 +48,7 @@ set ::all_tests { integration/replication-psync integration/replication-active integration/replication-multimaster + integration/replication-multimaster-connect integration/aof integration/rdb integration/convert-zipmap-hash-on-load From d4a2ccd50bfdd41222441da584bd7bcdde1d4f05 Mon Sep 17 00:00:00 2001 From: VivekSainiEQ Date: Tue, 24 Nov 2020 21:04:32 +0000 Subject: [PATCH 202/215] Added NULL check to function objFromAllocPtr, now NULL input will map to NULL output even if active replica is enabled Former-commit-id: b67db7c2d5396892cc0c91c3c822a5b9a547badf --- src/object.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/object.cpp b/src/object.cpp index 35b0ae398..733062ee8 100644 --- a/src/object.cpp +++ b/src/object.cpp @@ -1528,7 +1528,7 @@ void *allocPtrFromObj(robj_roptr o) { } robj *objFromAllocPtr(void *pv) { - if (g_pserver->fActiveReplica) { + if (pv && g_pserver->fActiveReplica) { return reinterpret_cast(reinterpret_cast(pv)+1); } return reinterpret_cast(pv); From 47a18f51316ba5d5c27d79e939cca1dd4b2d4c66 Mon Sep 17 00:00:00 2001 From: VivekSainiEQ Date: Tue, 24 Nov 2020 21:24:47 +0000 Subject: [PATCH 203/215] Made NULL pointer check explicit, issue #257 Former-commit-id: 1d4d2925c2a11bb7fdffe92c17a315c2df011a54 --- src/object.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/object.cpp b/src/object.cpp index 733062ee8..ef68cef6e 100644 --- a/src/object.cpp +++ b/src/object.cpp @@ -1528,7 +1528,7 @@ void *allocPtrFromObj(robj_roptr o) { } robj *objFromAllocPtr(void *pv) { - if (pv && g_pserver->fActiveReplica) { + if (pv != nullptr && g_pserver->fActiveReplica) { return reinterpret_cast(reinterpret_cast(pv)+1); } return reinterpret_cast(pv); From 86ca4300c8c292147fa7b8201f11ebd3501722ae Mon Sep 17 00:00:00 2001 From: VivekSainiEQ Date: Wed, 25 Nov 2020 17:12:30 +0000 Subject: [PATCH 204/215] Added test for issue #257 Former-commit-id: 779c775df9e8be36064689404e3ff51bead8a364 --- tests/unit/memefficiency.tcl | 89 +++++++++++++++++++++++++++++++++++- 1 file changed, 88 insertions(+), 1 deletion(-) diff --git a/tests/unit/memefficiency.tcl b/tests/unit/memefficiency.tcl index 0654898ee..2032a0841 100644 --- a/tests/unit/memefficiency.tcl +++ b/tests/unit/memefficiency.tcl @@ -450,4 +450,91 @@ start_server {tags {"defrag"} overrides {server-threads 1} } { } } } -} ;# run_solo +start_server {tags {"defrag"} overrides {server-threads 1 active-replica yes} } { ;#test defrag with active-replica enabled + if {[string match {*jemalloc*} [s mem_allocator]]} { + + test "Active defrag with active replica" { + r config set save "" ;# prevent bgsave from interfereing with save below + r config set hz 100 + r config set activedefrag no + r config set active-defrag-threshold-lower 5 + r config set active-defrag-cycle-min 65 + r config set active-defrag-cycle-max 75 + r config set active-defrag-ignore-bytes 2mb + r config set maxmemory 100mb + r config set maxmemory-policy allkeys-lru + r debug populate 700000 asdf1 150 + r debug populate 170000 asdf2 300 + r ping ;# trigger eviction following the previous population + after 120 ;# serverCron only updates the info once in 100ms + set frag [s allocator_frag_ratio] + if {$::verbose} { + puts "frag $frag" + } + assert {$frag >= 1.4} + + r config set latency-monitor-threshold 5 + r latency reset + r config set maxmemory 110mb ;# prevent further eviction (not to fail the digest test) + set digest [r debug digest] + catch {r config set activedefrag yes} e + if {![string match {DISABLED*} $e]} { + # Wait for the active defrag to start working (decision once a + # second). + wait_for_condition 50 100 { + [s active_defrag_running] ne 0 + } else { + fail "defrag not started." + } + + # Wait for the active defrag to stop working. + catch { + wait_for_condition 150 100 { + [s active_defrag_running] eq 0 + } else { + after 120 ;# serverCron only updates the info once in 100ms + puts [r info memory] + puts [r memory malloc-stats] + fail "defrag didn't stop." + } + } e + if {[string match *error* $e]} { + after 120 + fail "defrag accessed invalid address." + } + + # Test the the fragmentation is lower. + after 120 ;# serverCron only updates the info once in 100ms + set frag [s allocator_frag_ratio] + set max_latency 0 + foreach event [r latency latest] { + lassign $event eventname time latency max + if {$eventname == "active-defrag-cycle"} { + set max_latency $max + } + } + if {$::verbose} { + puts "frag $frag" + set misses [s active_defrag_misses] + set hits [s active_defrag_hits] + puts "hits: $hits" + puts "misses: $misses" + puts "max latency $max_latency" + puts [r latency latest] + puts [r latency history active-defrag-cycle] + } + assert {$frag < 1.1} + # due to high fragmentation, 100hz, and active-defrag-cycle-max set to 75, + # we expect max latency to be not much higher than 7.5ms but due to rare slowness threshold is set higher + assert {$max_latency <= 30} + } else { + set _ "" + } + # verify the data isn't corrupted or changed + set newdigest [r debug digest] + assert {$digest eq $newdigest} + r save ;# saving an rdb iterates over all the data / pointers + } {OK} + } +} +} ;# run solo \ No newline at end of file From 2ab3385733b91c69f7ea9d7647d13f28d0dde936 Mon Sep 17 00:00:00 2001 From: VivekSainiEQ Date: Wed, 25 Nov 2020 21:34:40 +0000 Subject: [PATCH 205/215] Updated 'make lcov' command to exclude external dependancies and generate final code coverage percentage Former-commit-id: eebd70aaa6bec0362ea90148174263fa6ffcc169 --- src/Makefile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Makefile b/src/Makefile index dbb18af65..8b3cfdb6b 100644 --- a/src/Makefile +++ b/src/Makefile @@ -401,8 +401,9 @@ check: test lcov: $(MAKE) gcov @(set -e; cd ..; ./runtest --clients 1) - @geninfo -o redis.info . - @genhtml --legend -o lcov-html redis.info + @geninfo -o KeyDB.info --no-external . + @genhtml --legend -o lcov-html KeyDB.info + @genhtml --legend -o lcov-html KeyDB.info | grep lines | awk '{print $2;}' | sed 's/%//g' test-sds: sds.c sds.h $(REDIS_CC) sds.c zmalloc.cpp -DSDS_TEST_MAIN $(FINAL_LIBS) -o /tmp/sds_test From 7ad88d0bac7964338a95f00fcd9a2278b99eaefc Mon Sep 17 00:00:00 2001 From: VivekSainiEQ Date: Wed, 25 Nov 2020 22:29:09 +0000 Subject: [PATCH 206/215] Properly escaped $ in lcov command Former-commit-id: 06a17c8511cfa8e07962401cc01045878525dc27 --- src/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Makefile b/src/Makefile index 8b3cfdb6b..a3795762e 100644 --- a/src/Makefile +++ b/src/Makefile @@ -403,7 +403,7 @@ lcov: @(set -e; cd ..; ./runtest --clients 1) @geninfo -o KeyDB.info --no-external . @genhtml --legend -o lcov-html KeyDB.info - @genhtml --legend -o lcov-html KeyDB.info | grep lines | awk '{print $2;}' | sed 's/%//g' + @genhtml --legend -o lcov-html KeyDB.info | grep lines | awk '{print $$2;}' | sed 's/%//g' test-sds: sds.c sds.h $(REDIS_CC) sds.c zmalloc.cpp -DSDS_TEST_MAIN $(FINAL_LIBS) -o /tmp/sds_test From f99824faf4fe49e40727a453274c9d84e2edc528 Mon Sep 17 00:00:00 2001 From: VivekSainiEQ Date: Wed, 25 Nov 2020 23:47:13 +0000 Subject: [PATCH 207/215] Added references to KeyDB.info instead of redis.info Former-commit-id: b94f91b9b25b5476d62bdbe2f054cc61691de9f5 --- src/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Makefile b/src/Makefile index a3795762e..8cccfd711 100644 --- a/src/Makefile +++ b/src/Makefile @@ -378,7 +378,7 @@ DEP = $(REDIS_SERVER_OBJ:%.o=%.d) $(REDIS_CLI_OBJ:%.o=%.d) $(REDIS_BENCHMARK_OBJ $(KEYDB_AS) $< -o $@ clean: - rm -rf $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) *.o *.gcda *.gcno *.gcov redis.info lcov-html Makefile.dep dict-benchmark + rm -rf $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) *.o *.gcda *.gcno *.gcov KeyDB.info lcov-html Makefile.dep dict-benchmark rm -f $(DEP) .PHONY: clean From fd03008bbf34363101af8d3db1c4a7633c903da7 Mon Sep 17 00:00:00 2001 From: Kajaruban Surendran Date: Fri, 27 Nov 2020 21:42:08 +0000 Subject: [PATCH 208/215] Configurable option for MOTD Former-commit-id: 49a89d636ba698dbd0858d5059d3d6387c8c1fc7 --- keydb.conf | 3 +++ src/config.cpp | 1 + src/motd.cpp | 10 +++++++--- src/motd.h | 5 ++--- src/redis-cli.c | 3 ++- src/server.cpp | 2 +- src/server.h | 1 + tests/unit/introspection.tcl | 1 + 8 files changed, 18 insertions(+), 8 deletions(-) diff --git a/keydb.conf b/keydb.conf index 9015dd84a..349135f93 100644 --- a/keydb.conf +++ b/keydb.conf @@ -282,6 +282,9 @@ databases 16 # ASCII art logo in startup logs by setting the following option to yes. always-show-logo yes +# Retrieving "message of today" using CURL requests. +#enable-motd yes + ################################ SNAPSHOTTING ################################ # # Save the DB on disk: diff --git a/src/config.cpp b/src/config.cpp index bbc9cb68a..9aa1e0990 100644 --- a/src/config.cpp +++ b/src/config.cpp @@ -2344,6 +2344,7 @@ standardConfig configs[] = { createBoolConfig("daemonize", NULL, IMMUTABLE_CONFIG, cserver.daemonize, 0, NULL, NULL), createBoolConfig("lua-replicate-commands", NULL, MODIFIABLE_CONFIG, g_pserver->lua_always_replicate_commands, 1, NULL, NULL), createBoolConfig("always-show-logo", NULL, IMMUTABLE_CONFIG, g_pserver->always_show_logo, 0, NULL, NULL), + createBoolConfig("enable-motd", NULL, IMMUTABLE_CONFIG, cserver.enable_motd, 1, NULL, NULL), createBoolConfig("protected-mode", NULL, MODIFIABLE_CONFIG, g_pserver->protected_mode, 1, NULL, NULL), createBoolConfig("rdbcompression", NULL, MODIFIABLE_CONFIG, g_pserver->rdb_compression, 1, NULL, NULL), createBoolConfig("rdb-del-sync-files", NULL, MODIFIABLE_CONFIG, g_pserver->rdb_del_sync_files, 0, NULL, NULL), diff --git a/src/motd.cpp b/src/motd.cpp index 2fbef9526..e2f55673d 100644 --- a/src/motd.cpp +++ b/src/motd.cpp @@ -71,12 +71,16 @@ static void setMOTDCache(const char *sz) fclose(pf); } -extern "C" char *fetchMOTD(int cache) +extern "C" char *fetchMOTD(int cache, int enable_motd) { sds str; CURL *curl; CURLcode res; + /* Do not try the CURL if the motd is disabled*/ + if (!enable_motd) { + return NULL; + } /* First try and get the string from the cache */ if (cache) { str = fetchMOTDFromCache(); @@ -124,9 +128,9 @@ extern "C" char *fetchMOTD(int cache) #else -extern "C" char *fetchMOTD(int /* cache */) +extern "C" char *fetchMOTD(int /* cache */, int /* enable_motd */) { return NULL; } -#endif \ No newline at end of file +#endif diff --git a/src/motd.h b/src/motd.h index c57e63726..b6a91477b 100644 --- a/src/motd.h +++ b/src/motd.h @@ -6,9 +6,8 @@ extern const char *motd_cache_file; #ifdef __cplusplus extern "C" { #endif - -char *fetchMOTD(int fCache); +char *fetchMOTD(int fCache, int enable_motd); #ifdef __cplusplus } -#endif \ No newline at end of file +#endif diff --git a/src/redis-cli.c b/src/redis-cli.c index 412760278..41242b313 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -7113,7 +7113,8 @@ int main(int argc, char **argv) { if (argc == 0 && !config.eval) { /* Show the message of the day if we are interactive */ if (config.output == OUTPUT_STANDARD && !config.disable_motd) { - char *szMotd = fetchMOTD(1 /* cache */); + /*enable_motd=1 will retrieve the message of today using CURL*/ + char *szMotd = fetchMOTD(1 /* cache */, 1 /* enable_motd */); if (szMotd != NULL) { printf("Message of the day:\n %s\n", szMotd); sdsfree(szMotd); diff --git a/src/server.cpp b/src/server.cpp index 277c9e39a..92241f02b 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -5136,7 +5136,7 @@ void redisAsciiArt(void) { mode, g_pserver->port ? g_pserver->port : g_pserver->tls_port ); } else { - sds motd = fetchMOTD(true); + sds motd = fetchMOTD(true, cserver.enable_motd); snprintf(buf,1024*16,ascii_logo, KEYDB_REAL_VERSION, redisGitSHA1(), diff --git a/src/server.h b/src/server.h index 0fc874b6a..32581fb00 100644 --- a/src/server.h +++ b/src/server.h @@ -1456,6 +1456,7 @@ struct redisServerConst { bool fUsePro = false; int thread_min_client_threshold = 50; int multimaster_no_forward; + int enable_motd; /* Flag to retrieve the Message of today using CURL request*/ }; struct redisServer { diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl index ffad7405f..55f224e32 100644 --- a/tests/unit/introspection.tcl +++ b/tests/unit/introspection.tcl @@ -67,6 +67,7 @@ start_server {tags {"introspection"}} { io-threads-do-reads tcp-backlog always-show-logo + enable-motd syslog-enabled cluster-enabled aclfile From 6632bafced5d19b107279983e2b2bc045e001a75 Mon Sep 17 00:00:00 2001 From: VivekSainiEQ Date: Thu, 26 Nov 2020 17:02:45 +0000 Subject: [PATCH 209/215] Added KeyDB.info to .gitignore file Former-commit-id: e10dc0dace6b152418aad2bd3ee212a471bca9e8 --- src/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/src/.gitignore b/src/.gitignore index aee7aacf0..a9c50bee3 100644 --- a/src/.gitignore +++ b/src/.gitignore @@ -2,4 +2,5 @@ *.gcno *.gcov redis.info +KeyDB.info lcov-html From 6c16fe69e7f6a2e5f6567a77ac9e404f372d4244 Mon Sep 17 00:00:00 2001 From: VivekSainiEQ Date: Fri, 27 Nov 2020 22:57:18 +0000 Subject: [PATCH 210/215] Added remainder of runtest scripts to make lcov command. Code coverage goes from 57.9% to 72.1% Former-commit-id: f99cba0b92d921b1a2d350f2c7545138485c2a74 --- src/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Makefile b/src/Makefile index 8cccfd711..09385da30 100644 --- a/src/Makefile +++ b/src/Makefile @@ -400,7 +400,7 @@ check: test lcov: $(MAKE) gcov - @(set -e; cd ..; ./runtest --clients 1) + @(set -e; cd ..; ./runtest; ./runtest-sentinel; ./runtest-cluster; ./runtest-moduleapi) @geninfo -o KeyDB.info --no-external . @genhtml --legend -o lcov-html KeyDB.info @genhtml --legend -o lcov-html KeyDB.info | grep lines | awk '{print $$2;}' | sed 's/%//g' From 54b36868bac0b2be5fff9ec054e2474297d31e9c Mon Sep 17 00:00:00 2001 From: VivekSainiEQ Date: Mon, 30 Nov 2020 20:58:00 +0000 Subject: [PATCH 211/215] Added tests for saving various data types to disk and loading them back, and for loading data types from redis to maintain compatibility Former-commit-id: dcb44d3a09a4021f05079bedbac690e33ec7f39e --- tests/assets/redis-save.rdb | Bin 0 -> 268 bytes tests/test_helper.tcl | 1 + tests/unit/loadsave.tcl | 50 ++++++++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+) create mode 100644 tests/assets/redis-save.rdb create mode 100644 tests/unit/loadsave.tcl diff --git a/tests/assets/redis-save.rdb b/tests/assets/redis-save.rdb new file mode 100644 index 0000000000000000000000000000000000000000..efcbb6de43df678e4a39291feffede5a4e1a6c81 GIT binary patch literal 268 zcmWG?b@2=~Ffg$E#aWb^l3A=fx&=k4iMdHRsRtPTG5ls{U|=gQDay=CXLEFNHZ(Hk zVW}!kEs>ODU|i%S?qMS(hmfS8qm;TzLerY}sNnLhnzV9v}dIdq9>nkO$yMq+V>vNBMG48&{} mpxK6OEI^}yB#>b2`mCu^1s!lB+pl9wgdog3RD#U literal 0 HcmV?d00001 diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index 6ee5572e1..a148d7bc1 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -76,6 +76,7 @@ set ::all_tests { unit/tls unit/tracking unit/oom-score-adj + unit/loadsave } # Index to the next test to run in the ::all_tests list. set ::next_test 0 diff --git a/tests/unit/loadsave.tcl b/tests/unit/loadsave.tcl new file mode 100644 index 000000000..8ef4fd1e8 --- /dev/null +++ b/tests/unit/loadsave.tcl @@ -0,0 +1,50 @@ +set server_path [tmpdir "server.rdb-encoding-test"] +set testmodule [file normalize tests/modules/datatype.so] + +# Store a bunch of datatypes to the database, +# compute the hash of the database, +# and save the data to a file +start_server [list tags [list "loadsave"] overrides [list "dir" $server_path "loadmodule" $testmodule] keep_persistence true] { + + test "Save various data types to .rdb file" { + r set "int" [expr {int(rand()*10000)}] + r set "string" [string repeat A [expr {int(rand()*10000)}]] + r hset "hash" [string repeat A [expr {int(rand()*1000)}]] 0[string repeat A [expr {int(rand()*1000)}]] + r sadd "set" [string repeat A [expr {int(rand()*1000)}]] + r zadd "zset" [expr {rand()}] [string repeat A [expr {int(rand()*1000)}]] + r lpush "list" [string repeat A [expr {int(rand()*1000)}]] + r datatype.set dtkey 100 stringval + r keydb.cron "cron" single [expr {10000 + int(rand()*1000)}] "return 0" 0;# set delay long enough so it doesn't contend with saving + set saved_digest [r debug digest];# debug digest computes the hash + r save + } {OK}; +} + +# Load that data back from the file, +# and compare its hash to the previously computed hash +start_server [list tags [list "loadsave"] overrides [list "dir" $server_path "loadmodule" $testmodule] keep_persistence true] { + test "Load various data types from .rdb file" { + set loaded_digest [r debug digest] + if {![string match $saved_digest $loaded_digest]} { + fail "Loaded data did not match saved data" + } + } +} + +# Load in data from a redis instance +# The hash should match what we get in redis +set saved_digest 26ce4a819a86355af7ec75c7a3410f5b9fad02f3 +exec cp -f tests/assets/redis-save.rdb $server_path/dump.rdb + +start_server [list tags [list "loadsave"] overrides [list "dir" $server_path "loadmodule" $testmodule] keep_persistence true] { + test "Load various data types from Redis generated .rdb file" { + set loaded_digest [r debug digest] + if {![string match $saved_digest $loaded_digest]} { + fail "Loaded data did not match saved data" + } + } +} + +puts $server_path + + From 3d15a7b65a5c9d599bf75dbf9e21ffdd07f76041 Mon Sep 17 00:00:00 2001 From: VivekSainiEQ Date: Mon, 30 Nov 2020 21:38:04 +0000 Subject: [PATCH 212/215] Removed use of module datatypes, now should work if tests/modules is not built Former-commit-id: 089f28e8ba91ed2b875048efa0b28ceb32010541 --- tests/assets/redis-save.rdb | Bin 268 -> 247 bytes tests/unit/loadsave.tcl | 10 ++++------ 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/tests/assets/redis-save.rdb b/tests/assets/redis-save.rdb index efcbb6de43df678e4a39291feffede5a4e1a6c81..41478af228ae64225d8d89ac70940b44e652ba81 100644 GIT binary patch delta 61 zcmV-D0K)%_0`~!sHYZ|b#b5deb#rB8Ep26O!uUiD0QwAJZ)PoUa%Ev{Vr*r=0R8~` T29aGbk)mP<%2037ABPkc3?CTY delta 83 zcmV-Z0IdJ_0gM8WHYe>{#b5deb#rB8Ep26O!eB)W0QwAJZ)PoUa%Ev{Vr*r=0R8~` p2a#Pc6$b=lVRT`Ewd*ba|J}3!0s;lV3ILIfVhDvibZ$)&n;49gAgTZW diff --git a/tests/unit/loadsave.tcl b/tests/unit/loadsave.tcl index 8ef4fd1e8..28052fc0d 100644 --- a/tests/unit/loadsave.tcl +++ b/tests/unit/loadsave.tcl @@ -1,10 +1,9 @@ set server_path [tmpdir "server.rdb-encoding-test"] -set testmodule [file normalize tests/modules/datatype.so] # Store a bunch of datatypes to the database, # compute the hash of the database, # and save the data to a file -start_server [list tags [list "loadsave"] overrides [list "dir" $server_path "loadmodule" $testmodule] keep_persistence true] { +start_server [list tags [list "loadsave"] overrides [list "dir" $server_path] keep_persistence true] { test "Save various data types to .rdb file" { r set "int" [expr {int(rand()*10000)}] @@ -13,7 +12,6 @@ start_server [list tags [list "loadsave"] overrides [list "dir" $server_path "l r sadd "set" [string repeat A [expr {int(rand()*1000)}]] r zadd "zset" [expr {rand()}] [string repeat A [expr {int(rand()*1000)}]] r lpush "list" [string repeat A [expr {int(rand()*1000)}]] - r datatype.set dtkey 100 stringval r keydb.cron "cron" single [expr {10000 + int(rand()*1000)}] "return 0" 0;# set delay long enough so it doesn't contend with saving set saved_digest [r debug digest];# debug digest computes the hash r save @@ -22,7 +20,7 @@ start_server [list tags [list "loadsave"] overrides [list "dir" $server_path "l # Load that data back from the file, # and compare its hash to the previously computed hash -start_server [list tags [list "loadsave"] overrides [list "dir" $server_path "loadmodule" $testmodule] keep_persistence true] { +start_server [list tags [list "loadsave"] overrides [list "dir" $server_path] keep_persistence true] { test "Load various data types from .rdb file" { set loaded_digest [r debug digest] if {![string match $saved_digest $loaded_digest]} { @@ -33,10 +31,10 @@ start_server [list tags [list "loadsave"] overrides [list "dir" $server_path "lo # Load in data from a redis instance # The hash should match what we get in redis -set saved_digest 26ce4a819a86355af7ec75c7a3410f5b9fad02f3 +set saved_digest 0cff3e9c86eb26ef3b5c0e6bac8315829ad6adf4 exec cp -f tests/assets/redis-save.rdb $server_path/dump.rdb -start_server [list tags [list "loadsave"] overrides [list "dir" $server_path "loadmodule" $testmodule] keep_persistence true] { +start_server [list tags [list "loadsave"] overrides [list "dir" $server_path] keep_persistence true] { test "Load various data types from Redis generated .rdb file" { set loaded_digest [r debug digest] if {![string match $saved_digest $loaded_digest]} { From 52561f17293ed58bc91d742175a7232dd8d34465 Mon Sep 17 00:00:00 2001 From: VivekSainiEQ Date: Mon, 30 Nov 2020 22:18:32 +0000 Subject: [PATCH 213/215] Added module data type load/save tests Former-commit-id: cae9924fd9eefcd88cef1c964f0bc8bce7dd4242 --- runtest-moduleapi | 1 + tests/assets/redis-module-save.rdb | Bin 0 -> 214 bytes tests/unit/loadsave.tcl | 4 +-- tests/unit/moduleapi/moduleloadsave.tcl | 43 ++++++++++++++++++++++++ 4 files changed, 45 insertions(+), 3 deletions(-) create mode 100644 tests/assets/redis-module-save.rdb create mode 100644 tests/unit/moduleapi/moduleloadsave.tcl diff --git a/runtest-moduleapi b/runtest-moduleapi index f3abde740..8b2a12806 100755 --- a/runtest-moduleapi +++ b/runtest-moduleapi @@ -27,4 +27,5 @@ $TCLSH tests/test_helper.tcl \ --single unit/moduleapi/auth \ --single unit/moduleapi/keyspace_events \ --single unit/moduleapi/blockedclient \ +--single unit/moduleapi/moduleloadsave \ "${@}" diff --git a/tests/assets/redis-module-save.rdb b/tests/assets/redis-module-save.rdb new file mode 100644 index 0000000000000000000000000000000000000000..b741b1b44d7abea34e638143f748c6a7f7d84782 GIT binary patch literal 214 zcmWG?b@2=~Ffg$E#aWb^l3A=RR+=95J6*PL6*eglr)IA5kg!JVT2(xNCEu1U_ E0IQr+djJ3c literal 0 HcmV?d00001 diff --git a/tests/unit/loadsave.tcl b/tests/unit/loadsave.tcl index 28052fc0d..4216d3960 100644 --- a/tests/unit/loadsave.tcl +++ b/tests/unit/loadsave.tcl @@ -37,12 +37,10 @@ exec cp -f tests/assets/redis-save.rdb $server_path/dump.rdb start_server [list tags [list "loadsave"] overrides [list "dir" $server_path] keep_persistence true] { test "Load various data types from Redis generated .rdb file" { set loaded_digest [r debug digest] + puts loaded_digest if {![string match $saved_digest $loaded_digest]} { fail "Loaded data did not match saved data" } } } -puts $server_path - - diff --git a/tests/unit/moduleapi/moduleloadsave.tcl b/tests/unit/moduleapi/moduleloadsave.tcl new file mode 100644 index 000000000..9a5796384 --- /dev/null +++ b/tests/unit/moduleapi/moduleloadsave.tcl @@ -0,0 +1,43 @@ +set server_path [tmpdir "server.rdb-encoding-test"] +set testmodule [file normalize tests/modules/datatype.so] + +# Store module data typed data to the database, +# compute the hash of the database, +# and save the data to a file +start_server [list tags [list "loadsave"] overrides [list "dir" $server_path "loadmodule" $testmodule] keep_persistence true] { + + test "Save module data type to .rdb file" { + r datatype.set key1 100 stringval + r datatype.set key2 200 stringval + r datatype.set key3 300 stringval + r datatype.set key4 400 stringval + r datatype.set key5 500 stringval + set saved_digest [r debug digest]; + r save + } {OK}; +} + +# Load that data back from the file, +# and compare its hash to the previously computed hash +start_server [list tags [list "loadsave"] overrides [list "dir" $server_path "loadmodule" $testmodule] keep_persistence true] { + test "Load module data type from .rdb file" { + set loaded_digest [r debug digest] + if {![string match $saved_digest $loaded_digest]} { + fail "Loaded data did not match saved data" + } + } +} + +# Load in data from a redis instance +# The hash should match what we get in redis +set saved_digest acffad6b89e21339dc5c80f53f6c6fa15977a848 +exec cp -f tests/assets/redis-module-save.rdb $server_path/dump.rdb + +start_server [list tags [list "loadsave"] overrides [list "dir" $server_path "loadmodule" $testmodule] keep_persistence true] { + test "Load module data type from Redis generated .rdb file" { + set loaded_digest [r debug digest] + if {![string match $saved_digest $loaded_digest]} { + fail "Loaded data did not match saved data" + } + } +} From ecad9741bc53b9bca314c407ddbeb74f28537b9c Mon Sep 17 00:00:00 2001 From: John Sully Date: Mon, 30 Nov 2020 22:59:00 +0000 Subject: [PATCH 214/215] code coverage reports should test multithreaded Former-commit-id: 5d016fdb6b11404d0eddbab651ed17180526a991 --- src/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Makefile b/src/Makefile index 09385da30..27dc5e4c5 100644 --- a/src/Makefile +++ b/src/Makefile @@ -400,7 +400,7 @@ check: test lcov: $(MAKE) gcov - @(set -e; cd ..; ./runtest; ./runtest-sentinel; ./runtest-cluster; ./runtest-moduleapi) + @(set -e; cd ..; ./runtest --config server-threads 3; ./runtest-sentinel; ./runtest-cluster; ./runtest-moduleapi) @geninfo -o KeyDB.info --no-external . @genhtml --legend -o lcov-html KeyDB.info @genhtml --legend -o lcov-html KeyDB.info | grep lines | awk '{print $$2;}' | sed 's/%//g' From 300eca8ff6f3160d43e01aa8697c1cf1ee984bc2 Mon Sep 17 00:00:00 2001 From: John Sully Date: Mon, 30 Nov 2020 23:01:20 +0000 Subject: [PATCH 215/215] Issue: #260 Don't use C++ static_assert in C files Former-commit-id: f19b9c155ef5b957f7cd9a3017c54aab742ffb2f --- src/fastlock.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/fastlock.h b/src/fastlock.h index e4ab1874f..0809c8bcd 100644 --- a/src/fastlock.h +++ b/src/fastlock.h @@ -85,4 +85,6 @@ struct fastlock #endif }; +#ifdef __cplusplus static_assert(offsetof(struct fastlock, m_ticket) == 64, "ensure padding is correct"); +#endif \ No newline at end of file