From 67ed3183b561752561e85f50970cfd477ddb3456 Mon Sep 17 00:00:00 2001 From: "Chayim I. Kirshen" Date: Thu, 13 Jul 2023 09:23:10 +0300 Subject: [PATCH 01/20] Squashed 'deps/hiredis/' changes from b6a052fe0..60e5075d4 60e5075d4 Version 1.2.0 (#1202) adef139a7 Remove support in deprecated TLS versions 1.0 and 1.1 (#1205) d543baba6 Install major version symlink of shared objects. 052f99ab2 Ensure functionality without _MSC_VER definition ded32c7d1 Add a test for the TCP_USER_TIMEOUT option. (#1192) 5cbd1f296 Add -Werror as a default. (#1193) af1445638 CI: Update homebrew Redis version. (#1191) 6de326e87 Fix a typo in b6a052f. git-subtree-dir: deps/hiredis git-subtree-split: 60e5075d4ac77424809f855ba3e398df7aacefe8 --- .github/workflows/build.yml | 4 +-- .github/workflows/test.yml | 2 +- CHANGELOG.md | 57 +++++++++++++++++++++++++++++++++++-- Makefile | 6 ++-- hiredis.c | 4 +-- hiredis.h | 6 ++-- net.c | 3 +- sds.c | 12 ++++---- ssl.c | 20 +++++++++++-- test.c | 16 +++++++++-- 10 files changed, 104 insertions(+), 26 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1a1ef5153..581800b4f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -133,8 +133,8 @@ jobs: - name: Install dependencies run: | - brew install openssl redis@6.2 - brew link redis@6.2 --force + brew install openssl redis@7.0 + brew link redis@7.0 --force - name: Build hiredis run: USE_SSL=1 make diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7812af6f7..1a2c60b79 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -60,7 +60,7 @@ jobs: steps: - name: Install qemu if: matrix.emulator - run: sudo apt-get install -y qemu-user + run: sudo apt-get update && sudo apt-get install -y qemu-user - name: Install platform toolset if: matrix.toolset run: sudo apt-get install -y gcc-${{matrix.toolset}} diff --git a/CHANGELOG.md b/CHANGELOG.md index a2e065b2c..801c40729 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,12 +1,63 @@ +## [1.2.0](https://github.com/redis/hiredis/tree/v1.2.0) - (2023-06-04) + +Announcing Hiredis v1.2.0 with with new adapters, and a great many bug fixes. + +## 🚀 New Features + +- Add sdevent adapter @Oipo (#1144) +- Allow specifying the keepalive interval @michael-grunder (#1168) +- Add RedisModule adapter @tezc (#1182) +- Helper for setting TCP_USER_TIMEOUT socket option @zuiderkwast (#1188) + +## 🐛 Bug Fixes + +- Fix a typo in b6a052f. @yossigo (#1190) +- Fix wincrypt symbols conflict @hudayou (#1151) +- Don't attempt to set a timeout if we are in an error state. @michael-grunder (#1180) +- Accept -nan per the RESP3 spec recommendation. @michael-grunder (#1178) +- Fix colliding option values @zuiderkwast (#1172) +- Ensure functionality without `_MSC_VER` definition @windyakin (#1194) + +## 🧰 Maintenance + +- Add a test for the TCP_USER_TIMEOUT option. @michael-grunder (#1192) +- Add -Werror as a default. @yossigo (#1193) +- CI: Update homebrew Redis version. @yossigo (#1191) +- Fix typo in makefile. @michael-grunder (#1179) +- Write a version file for the CMake package @Neverlord (#1165) +- CMakeLists.txt: respect BUILD_SHARED_LIBS @ffontaine (#1147) +- Cmake static or shared @autoantwort (#1160) +- fix typo @tillkruss (#1153) +- Add a test ensuring we don't clobber connection error. @michael-grunder (#1181) +- Search for openssl on macOS @michael-grunder (#1169) + + +## Contributors +We'd like to thank all the contributors who worked on this release! + + + + + + + + + + + + + + + ## [1.1.0](https://github.com/redis/hiredis/tree/v1.1.0) - (2022-11-15) Announcing Hiredis v1.1.0 GA with better SSL convenience, new async adapters and a great many bug fixes. -**NOTE**: Hiredis can now return `nan` in addition to `-inf` and `inf` when returning a `REDIS_REPLY_DOUBLE`. +**NOTE**: Hiredis can now return `nan` in addition to `-inf` and `inf` when returning a `REDIS_REPLY_DOUBLE`. ## 🐛 Bug Fixes -- Add support for nan in RESP3 double [@filipecosta90](https://github.com/filipecosta90) +- Add support for nan in RESP3 double [@filipecosta90](https://github.com/filipecosta90) ([\#1133](https://github.com/redis/hiredis/pull/1133)) ## 🧰 Maintenance @@ -14,7 +65,7 @@ Announcing Hiredis v1.1.0 GA with better SSL convenience, new async adapters and - Add an example that calls redisCommandArgv [@michael-grunder](https://github.com/michael-grunder) ([\#1140](https://github.com/redis/hiredis/pull/1140)) - fix flag reference [@pata00](https://github.com/pata00) ([\#1136](https://github.com/redis/hiredis/pull/1136)) -- Make freeing a NULL redisAsyncContext a no op. [@michael-grunder](https://github.com/michael-grunder) +- Make freeing a NULL redisAsyncContext a no op. [@michael-grunder](https://github.com/michael-grunder) ([\#1135](https://github.com/redis/hiredis/pull/1135)) - CI updates ([@bjosv](https://github.com/redis/bjosv) ([\#1139](https://github.com/redis/hiredis/pull/1139)) diff --git a/Makefile b/Makefile index f31293e90..bd2106b1d 100644 --- a/Makefile +++ b/Makefile @@ -39,7 +39,7 @@ export REDIS_TEST_CONFIG CC:=$(shell sh -c 'type $${CC%% *} >/dev/null 2>/dev/null && echo $(CC) || echo gcc') CXX:=$(shell sh -c 'type $${CXX%% *} >/dev/null 2>/dev/null && echo $(CXX) || echo g++') OPTIMIZATION?=-O3 -WARNINGS=-Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers +WARNINGS=-Wall -Wextra -Werror -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers DEBUG_FLAGS?= -g -ggdb REAL_CFLAGS=$(OPTIMIZATION) -fPIC $(CPPFLAGS) $(CFLAGS) $(WARNINGS) $(DEBUG_FLAGS) $(PLATFORM_FLAGS) REAL_LDFLAGS=$(LDFLAGS) @@ -311,7 +311,7 @@ install: $(DYLIBNAME) $(STLIBNAME) $(PKGCONFNAME) $(SSL_INSTALL) $(INSTALL) hiredis.h async.h read.h sds.h alloc.h sockcompat.h $(INSTALL_INCLUDE_PATH) $(INSTALL) adapters/*.h $(INSTALL_INCLUDE_PATH)/adapters $(INSTALL) $(DYLIBNAME) $(INSTALL_LIBRARY_PATH)/$(DYLIB_MINOR_NAME) - cd $(INSTALL_LIBRARY_PATH) && ln -sf $(DYLIB_MINOR_NAME) $(DYLIBNAME) + cd $(INSTALL_LIBRARY_PATH) && ln -sf $(DYLIB_MINOR_NAME) $(DYLIBNAME) && ln -sf $(DYLIB_MINOR_NAME) $(DYLIB_MAJOR_NAME) $(INSTALL) $(STLIBNAME) $(INSTALL_LIBRARY_PATH) mkdir -p $(INSTALL_PKGCONF_PATH) $(INSTALL) $(PKGCONFNAME) $(INSTALL_PKGCONF_PATH) @@ -320,7 +320,7 @@ install-ssl: $(SSL_DYLIBNAME) $(SSL_STLIBNAME) $(SSL_PKGCONFNAME) mkdir -p $(INSTALL_INCLUDE_PATH) $(INSTALL_LIBRARY_PATH) $(INSTALL) hiredis_ssl.h $(INSTALL_INCLUDE_PATH) $(INSTALL) $(SSL_DYLIBNAME) $(INSTALL_LIBRARY_PATH)/$(SSL_DYLIB_MINOR_NAME) - cd $(INSTALL_LIBRARY_PATH) && ln -sf $(SSL_DYLIB_MINOR_NAME) $(SSL_DYLIBNAME) + cd $(INSTALL_LIBRARY_PATH) && ln -sf $(SSL_DYLIB_MINOR_NAME) $(SSL_DYLIBNAME) && ln -sf $(SSL_DYLIB_MINOR_NAME) $(SSL_DYLIB_MAJOR_NAME) $(INSTALL) $(SSL_STLIBNAME) $(INSTALL_LIBRARY_PATH) mkdir -p $(INSTALL_PKGCONF_PATH) $(INSTALL) $(SSL_PKGCONFNAME) $(INSTALL_PKGCONF_PATH) diff --git a/hiredis.c b/hiredis.c index 9d8a500c7..446ceb1e6 100644 --- a/hiredis.c +++ b/hiredis.c @@ -392,12 +392,12 @@ int redisvFormatCommand(char **target, const char *format, va_list ap) { while (*_p != '\0' && strchr(flags,*_p) != NULL) _p++; /* Field width */ - while (*_p != '\0' && isdigit(*_p)) _p++; + while (*_p != '\0' && isdigit((int) *_p)) _p++; /* Precision */ if (*_p == '.') { _p++; - while (*_p != '\0' && isdigit(*_p)) _p++; + while (*_p != '\0' && isdigit((int) *_p)) _p++; } /* Copy va_list before consuming with va_arg */ diff --git a/hiredis.h b/hiredis.h index 2291d3eba..14af8dace 100644 --- a/hiredis.h +++ b/hiredis.h @@ -46,9 +46,9 @@ typedef long long ssize_t; #include "alloc.h" /* for allocation wrappers */ #define HIREDIS_MAJOR 1 -#define HIREDIS_MINOR 1 -#define HIREDIS_PATCH 1 -#define HIREDIS_SONAME 1.1.1-dev +#define HIREDIS_MINOR 2 +#define HIREDIS_PATCH 0 +#define HIREDIS_SONAME 1.1.0 /* Connection type can be blocking or non-blocking and is set in the * least significant bit of the flags field in redisContext. */ diff --git a/net.c b/net.c index ccd7f166a..1e016384f 100644 --- a/net.c +++ b/net.c @@ -234,9 +234,10 @@ int redisContextSetTcpUserTimeout(redisContext *c, unsigned int timeout) { res = setsockopt(c->fd, IPPROTO_TCP, TCP_USER_TIMEOUT, &timeout, sizeof(timeout)); #else res = -1; + errno = ENOTSUP; (void)timeout; #endif - if (res == -1); { + if (res == -1) { __redisSetErrorFromErrno(c,REDIS_ERR_IO,"setsockopt(TCP_USER_TIMEOUT)"); redisNetClose(c); return REDIS_ERR; diff --git a/sds.c b/sds.c index a20ba1912..21ecec04e 100644 --- a/sds.c +++ b/sds.c @@ -886,7 +886,7 @@ sds sdscatrepr(sds s, const char *p, size_t len) { case '\a': s = sdscatlen(s,"\\a",2); break; case '\b': s = sdscatlen(s,"\\b",2); break; default: - if (isprint(*p)) + if (isprint((int) *p)) s = sdscatprintf(s,"%c",*p); else s = sdscatprintf(s,"\\x%02x",(unsigned char)*p); @@ -948,7 +948,7 @@ sds *sdssplitargs(const char *line, int *argc) { *argc = 0; while(1) { /* skip blanks */ - while(*p && isspace(*p)) p++; + while(*p && isspace((int) *p)) p++; if (*p) { /* get a token */ int inq=0; /* set to 1 if we are in "quotes" */ @@ -959,8 +959,8 @@ sds *sdssplitargs(const char *line, int *argc) { while(!done) { if (inq) { if (*p == '\\' && *(p+1) == 'x' && - isxdigit(*(p+2)) && - isxdigit(*(p+3))) + isxdigit((int) *(p+2)) && + isxdigit((int) *(p+3))) { unsigned char byte; @@ -984,7 +984,7 @@ sds *sdssplitargs(const char *line, int *argc) { } else if (*p == '"') { /* closing quote must be followed by a space or * nothing at all. */ - if (*(p+1) && !isspace(*(p+1))) goto err; + if (*(p+1) && !isspace((int) *(p+1))) goto err; done=1; } else if (!*p) { /* unterminated quotes */ @@ -999,7 +999,7 @@ sds *sdssplitargs(const char *line, int *argc) { } else if (*p == '\'') { /* closing quote must be followed by a space or * nothing at all. */ - if (*(p+1) && !isspace(*(p+1))) goto err; + if (*(p+1) && !isspace((int) *(p+1))) goto err; done=1; } else if (!*p) { /* unterminated quotes */ diff --git a/ssl.c b/ssl.c index 88bd9f324..21ff35932 100644 --- a/ssl.c +++ b/ssl.c @@ -59,6 +59,8 @@ #include "async_private.h" #include "hiredis_ssl.h" +#define OPENSSL_1_1_0 0x10100000L + void __redisSetError(redisContext *c, int type, const char *str); struct redisSSLContext { @@ -100,7 +102,7 @@ redisContextFuncs redisContextSSLFuncs; * Note that this is only required for OpenSSL < 1.1.0. */ -#if OPENSSL_VERSION_NUMBER < 0x10100000L +#if OPENSSL_VERSION_NUMBER < OPENSSL_1_1_0 #define HIREDIS_USE_CRYPTO_LOCKS #endif @@ -256,13 +258,25 @@ redisSSLContext *redisCreateSSLContextWithOptions(redisSSLOptions *options, redi if (ctx == NULL) goto error; - ctx->ssl_ctx = SSL_CTX_new(SSLv23_client_method()); + const SSL_METHOD *ssl_method; +#if OPENSSL_VERSION_NUMBER >= OPENSSL_1_1_0 + ssl_method = TLS_client_method(); +#else + ssl_method = SSLv23_client_method(); +#endif + + ctx->ssl_ctx = SSL_CTX_new(ssl_method); if (!ctx->ssl_ctx) { if (error) *error = REDIS_SSL_CTX_CREATE_FAILED; goto error; } - SSL_CTX_set_options(ctx->ssl_ctx, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3); +#if OPENSSL_VERSION_NUMBER >= OPENSSL_1_1_0 + SSL_CTX_set_min_proto_version(ctx->ssl_ctx, TLS1_2_VERSION); +#else + SSL_CTX_set_options(ctx->ssl_ctx, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3 | SSL_OP_NO_TLSv1 | SSL_OP_NO_TLSv1_1); +#endif + SSL_CTX_set_verify(ctx->ssl_ctx, options->verify_mode, NULL); if ((cert_filename != NULL && private_key_filename == NULL) || diff --git a/test.c b/test.c index f3cb73434..dc7a78936 100644 --- a/test.c +++ b/test.c @@ -78,7 +78,7 @@ static int tests = 0, fails = 0, skips = 0; static void millisleep(int ms) { -#if _MSC_VER +#ifdef _MSC_VER Sleep(ms); #else usleep(ms*1000); @@ -409,10 +409,19 @@ static void test_tcp_options(struct config cfg) { redisContext *c; c = do_connect(cfg); + test("We can enable TCP_KEEPALIVE: "); test_cond(redisEnableKeepAlive(c) == REDIS_OK); - disconnect(c, 0); +#ifdef TCP_USER_TIMEOUT + test("We can set TCP_USER_TIMEOUT: "); + test_cond(redisSetTcpUserTimeout(c, 100) == REDIS_OK); +#else + test("Setting TCP_USER_TIMEOUT errors when unsupported: "); + test_cond(redisSetTcpUserTimeout(c, 100) == REDIS_ERR && c->err == REDIS_ERR_IO); +#endif + + redisFree(c); } static void test_reply_reader(void) { @@ -1567,6 +1576,9 @@ static void test_throughput(struct config config) { // } #ifdef HIREDIS_TEST_ASYNC + +#pragma GCC diagnostic ignored "-Woverlength-strings" /* required on gcc 4.8.x due to assert statements */ + struct event_base *base; typedef struct TestState { From 0175fe8219a2a6843d1701539ca167dfd45d2b7f Mon Sep 17 00:00:00 2001 From: "Chayim I. Kirshen" Date: Thu, 13 Jul 2023 09:26:12 +0300 Subject: [PATCH 02/20] Explaining hiredis upgrade --- deps/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/deps/README.md b/deps/README.md index 6d434c21f..8da051baa 100644 --- a/deps/README.md +++ b/deps/README.md @@ -63,6 +63,10 @@ Hiredis Hiredis is used by Sentinel, `redis-cli` and `redis-benchmark`. Like Redis, uses the SDS string library, but not necessarily the same version. In order to avoid conflicts, this version has all SDS identifiers prefixed by `hi`. +1. `git subtree pull --prefix deps/hiredis https://github.com/redis/hiredis.git --squash`
+This should hopefully merge the local changes into the new version. +2. Conflicts will arise (due to our changes) you'll need to resolve them and commit. + Linenoise --- From 91011100ba57b138b729a819787eec75df27b844 Mon Sep 17 00:00:00 2001 From: Chen Tianjie Date: Sun, 16 Jul 2023 11:31:42 +0800 Subject: [PATCH 03/20] Hide the comma after cport when there is no hostname. (#12411) According to the format shown in https://redis.io/commands/cluster-nodes/ ``` ``` when there is no hostname, and the auxiliary fields are hidden, the cluster topology should be ``` ``` However in the code we always print the hostname even when it is an empty string, leaving an unnecessary comma tailing after cport, which is weird and conflicts with the doc. ``` 94ca2f6cf85228a49fde7b738ee1209de7bee325 127.0.0.1:6379@16379, myself,master - 0 0 0 connected 0-16383 ``` --- src/cluster.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index 9e4cfd273..b017b9385 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -5230,12 +5230,12 @@ sds clusterGenNodeDescription(client *c, clusterNode *node, int tls_primary) { if (sdslen(node->hostname) != 0) { ci = sdscatfmt(ci,",%s", node->hostname); } - if (sdslen(node->hostname) == 0) { - ci = sdscatfmt(ci,",", 1); - } /* Don't expose aux fields to any clients yet but do allow them * to be persisted to nodes.conf */ if (c == NULL) { + if (sdslen(node->hostname) == 0) { + ci = sdscatfmt(ci,",", 1); + } for (int i = af_count-1; i >=0; i--) { if ((tls_primary && i == af_tls_port) || (!tls_primary && i == af_tcp_port)) { continue; From 9a3b99cbd1f6690b96729fdcff85e0af6fbd981e Mon Sep 17 00:00:00 2001 From: George Guimares Date: Sun, 16 Jul 2023 17:04:15 -0500 Subject: [PATCH 04/20] Replaced comment with excessive warning. The data structures in the comment are not in sync and don't need to be. Referring to function that handles conversion. --- src/commands.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/commands.h b/src/commands.h index 52acacfe0..1eefab481 100644 --- a/src/commands.h +++ b/src/commands.h @@ -19,7 +19,7 @@ typedef enum { #define CMD_ARG_MULTIPLE (1<<1) #define CMD_ARG_MULTIPLE_TOKEN (1<<2) -/* WARNING! This struct must match RedisModuleCommandArg */ +/* Must be compatible with RedisModuleCommandArg. See moduleCopyCommandArgs. */ typedef struct redisCommandArg { const char *name; redisCommandArgType type; From 2495b90a647f9f9987202efd29647f81f217b8ad Mon Sep 17 00:00:00 2001 From: Makdon Date: Fri, 21 Jul 2023 06:31:06 +0800 Subject: [PATCH 05/20] redis-cli: use previous hostip when not provided by redis cluster server (#12273) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the redis server cluster running on cluster-preferred-endpoint-type unknown-endpoint mode, and receive a request that should be redirected to another redis server node, it does not reply the hostip, but a empty host like MOVED 3999 :6381. The redis-cli would try to connect to an address without a host, which cause the issue: ``` 127.0.0.1:7002> set bar bar -> Redirected to slot [5061] located at :7000 Could not connect to Redis at :7000: No address associated with hostname Could not connect to Redis at :7000: No address associated with hostname not connected> exit ``` In this case, the redis-cli should use the previous hostip when there's no host provided by the server. --------- Co-authored-by: Viktor Söderqvist Co-authored-by: Madelyn Olson --- src/redis-cli.c | 8 ++++++-- tests/unit/cluster/cli.tcl | 17 +++++++++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index de34965b4..b80e45ab8 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -2291,8 +2291,12 @@ static int cliReadReply(int output_raw_strings) { slot = atoi(s+1); s = strrchr(p+1,':'); /* MOVED 3999[P]127.0.0.1[S]6381 */ *s = '\0'; - sdsfree(config.conn_info.hostip); - config.conn_info.hostip = sdsnew(p+1); + if (p+1 != s) { + /* Host might be empty, like 'MOVED 3999 :6381', if endpoint type is unknown. Only update the + * host if it's non-empty. */ + sdsfree(config.conn_info.hostip); + config.conn_info.hostip = sdsnew(p+1); + } config.conn_info.hostport = atoi(s+1); if (config.interactive) printf("-> Redirected to slot [%d] located at %s:%d\n", diff --git a/tests/unit/cluster/cli.tcl b/tests/unit/cluster/cli.tcl index 5b7f24927..76e97210f 100644 --- a/tests/unit/cluster/cli.tcl +++ b/tests/unit/cluster/cli.tcl @@ -81,6 +81,23 @@ start_multiple_servers 3 [list overrides $base_conf] { set node1_rd [redis_deferring_client 0] + test "use previous hostip in \"cluster-preferred-endpoint-type unknown-endpoint\" mode" { + + # backup and set cluster-preferred-endpoint-type unknown-endpoint + set endpoint_type_before_set [lindex [split [$node1 CONFIG GET cluster-preferred-endpoint-type] " "] 1] + $node1 CONFIG SET cluster-preferred-endpoint-type unknown-endpoint + + # when redis-cli not in cluster mode, return MOVE with empty host + set slot_for_foo [$node1 CLUSTER KEYSLOT foo] + assert_error "*MOVED $slot_for_foo :*" {$node1 set foo bar} + + # when in cluster mode, redirect using previous hostip + assert_equal "[exec src/redis-cli -h 127.0.0.1 -p [srv 0 port] -c set foo bar]" {OK} + assert_match "[exec src/redis-cli -h 127.0.0.1 -p [srv 0 port] -c get foo]" {bar} + + assert_equal [$node1 CONFIG SET cluster-preferred-endpoint-type "$endpoint_type_before_set"] {OK} + } + test "Sanity test push cmd after resharding" { assert_error {*MOVED*} {$node3 lpush key9184688 v1} From 34b95f752c62e2235f5b98a217664221ae281753 Mon Sep 17 00:00:00 2001 From: Harkrishn Patro Date: Mon, 24 Jul 2023 18:25:50 -0700 Subject: [PATCH 06/20] Add test case for APPEND command usage on integer value (#12429) Add test coverage to validate object encoding update on APPEND command usage on a integer value --- tests/unit/type/string.tcl | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/unit/type/string.tcl b/tests/unit/type/string.tcl index 5a8af7b53..94702ec3d 100644 --- a/tests/unit/type/string.tcl +++ b/tests/unit/type/string.tcl @@ -656,4 +656,19 @@ if {[string match {*jemalloc*} [s mem_allocator]]} { } } } + + test {APPEND modifies the encoding from int to raw} { + r del foo + r set foo 1 + assert_encoding "int" foo + r append foo 2 + + set res {} + lappend res [r get foo] + assert_encoding "raw" foo + + r set bar 12 + assert_encoding "int" bar + lappend res [r get bar] + } {12 12} } From 9f512017aae1a2ca2f7c214585757c7e670878b2 Mon Sep 17 00:00:00 2001 From: nihohit Date: Tue, 25 Jul 2023 10:21:23 +0300 Subject: [PATCH 07/20] Update request/response policies. (#12417) changing the response and request policy of a few commands, see https://redis.io/docs/reference/command-tips 1. RANDOMKEY used to have no response policy, which means that when sent to multiple shards, the responses should be aggregated. this normally applies to commands that return arrays, but since RANDOMKEY replies with a simple string, it actually requires a SPECIAL response policy (for the client to select just one) 2. SCAN used to have no response policy, but although the key names part of the response can be aggregated, the cursor part certainly can't. 3. MSETNX had a request policy of MULTI_SHARD and response policy of AGG_MIN, but in fact the contract with MSETNX is that when one key exists, it returns 0 and doesn't set any key, routing it to multiple shards would mean that if one failed and another succeeded, it's atomicity is broken and it's impossible to return a valid response to the caller. Co-authored-by: Shachar Langbeheim Co-authored-by: Oran Agra --- src/commands.def | 13 ++++++------- src/commands/msetnx.json | 4 ---- src/commands/randomkey.json | 1 + src/commands/scan.json | 3 ++- 4 files changed, 9 insertions(+), 12 deletions(-) diff --git a/src/commands.def b/src/commands.def index 7e575648c..d51a6057a 100644 --- a/src/commands.def +++ b/src/commands.def @@ -2328,6 +2328,7 @@ struct COMMAND_ARG PTTL_Args[] = { /* RANDOMKEY tips */ const char *RANDOMKEY_Tips[] = { "request_policy:all_shards", +"response_policy:special", "nondeterministic_output", }; #endif @@ -2437,6 +2438,7 @@ commandHistory SCAN_History[] = { const char *SCAN_Tips[] = { "nondeterministic_output", "request_policy:special", +"response_policy:special", }; #endif @@ -10263,10 +10265,7 @@ struct COMMAND_ARG MSET_Args[] = { #ifndef SKIP_CMD_TIPS_TABLE /* MSETNX tips */ -const char *MSETNX_Tips[] = { -"request_policy:multi_shard", -"response_policy:agg_min", -}; +#define MSETNX_Tips NULL #endif #ifndef SKIP_CMD_KEY_SPECS_TABLE @@ -10621,11 +10620,11 @@ struct COMMAND_STRUCT redisCommandTable[] = { {MAKE_CMD("pexpireat","Sets the expiration time of a key to a Unix milliseconds timestamp.","O(1)","2.6.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,PEXPIREAT_History,1,PEXPIREAT_Tips,0,pexpireatCommand,-3,CMD_WRITE|CMD_FAST,ACL_CATEGORY_KEYSPACE,PEXPIREAT_Keyspecs,1,NULL,3),.args=PEXPIREAT_Args}, {MAKE_CMD("pexpiretime","Returns the expiration time of a key as a Unix milliseconds timestamp.","O(1)","7.0.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,PEXPIRETIME_History,0,PEXPIRETIME_Tips,0,pexpiretimeCommand,2,CMD_READONLY|CMD_FAST,ACL_CATEGORY_KEYSPACE,PEXPIRETIME_Keyspecs,1,NULL,1),.args=PEXPIRETIME_Args}, {MAKE_CMD("pttl","Returns the expiration time in milliseconds of a key.","O(1)","2.6.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,PTTL_History,1,PTTL_Tips,1,pttlCommand,2,CMD_READONLY|CMD_FAST,ACL_CATEGORY_KEYSPACE,PTTL_Keyspecs,1,NULL,1),.args=PTTL_Args}, -{MAKE_CMD("randomkey","Returns a random key name from the database.","O(1)","1.0.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,RANDOMKEY_History,0,RANDOMKEY_Tips,2,randomkeyCommand,1,CMD_READONLY|CMD_TOUCHES_ARBITRARY_KEYS,ACL_CATEGORY_KEYSPACE,RANDOMKEY_Keyspecs,0,NULL,0)}, +{MAKE_CMD("randomkey","Returns a random key name from the database.","O(1)","1.0.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,RANDOMKEY_History,0,RANDOMKEY_Tips,3,randomkeyCommand,1,CMD_READONLY|CMD_TOUCHES_ARBITRARY_KEYS,ACL_CATEGORY_KEYSPACE,RANDOMKEY_Keyspecs,0,NULL,0)}, {MAKE_CMD("rename","Renames a key and overwrites the destination.","O(1)","1.0.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,RENAME_History,0,RENAME_Tips,0,renameCommand,3,CMD_WRITE,ACL_CATEGORY_KEYSPACE,RENAME_Keyspecs,2,NULL,2),.args=RENAME_Args}, {MAKE_CMD("renamenx","Renames a key only when the target key name doesn't exist.","O(1)","1.0.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,RENAMENX_History,1,RENAMENX_Tips,0,renamenxCommand,3,CMD_WRITE|CMD_FAST,ACL_CATEGORY_KEYSPACE,RENAMENX_Keyspecs,2,NULL,2),.args=RENAMENX_Args}, {MAKE_CMD("restore","Creates a key from the serialized representation of a value.","O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N)).","2.6.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,RESTORE_History,3,RESTORE_Tips,0,restoreCommand,-4,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_KEYSPACE|ACL_CATEGORY_DANGEROUS,RESTORE_Keyspecs,1,NULL,7),.args=RESTORE_Args}, -{MAKE_CMD("scan","Iterates over the key names in the database.","O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection.","2.8.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,SCAN_History,1,SCAN_Tips,2,scanCommand,-2,CMD_READONLY|CMD_TOUCHES_ARBITRARY_KEYS,ACL_CATEGORY_KEYSPACE,SCAN_Keyspecs,0,NULL,4),.args=SCAN_Args}, +{MAKE_CMD("scan","Iterates over the key names in the database.","O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection.","2.8.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,SCAN_History,1,SCAN_Tips,3,scanCommand,-2,CMD_READONLY|CMD_TOUCHES_ARBITRARY_KEYS,ACL_CATEGORY_KEYSPACE,SCAN_Keyspecs,0,NULL,4),.args=SCAN_Args}, {MAKE_CMD("sort","Sorts the elements in a list, a set, or a sorted set, optionally storing the result.","O(N+M*log(M)) where N is the number of elements in the list or set to sort, and M the number of returned elements. When the elements are not sorted, complexity is O(N).","1.0.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,SORT_History,0,SORT_Tips,0,sortCommand,-2,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_SET|ACL_CATEGORY_SORTEDSET|ACL_CATEGORY_LIST|ACL_CATEGORY_DANGEROUS,SORT_Keyspecs,3,sortGetKeys,7),.args=SORT_Args}, {MAKE_CMD("sort_ro","Returns the sorted elements of a list, a set, or a sorted set.","O(N+M*log(M)) where N is the number of elements in the list or set to sort, and M the number of returned elements. When the elements are not sorted, complexity is O(N).","7.0.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,SORT_RO_History,0,SORT_RO_Tips,0,sortroCommand,-2,CMD_READONLY,ACL_CATEGORY_SET|ACL_CATEGORY_SORTEDSET|ACL_CATEGORY_LIST|ACL_CATEGORY_DANGEROUS,SORT_RO_Keyspecs,2,sortROGetKeys,6),.args=SORT_RO_Args}, {MAKE_CMD("touch","Returns the number of existing keys out of those specified after updating the time they were last accessed.","O(N) where N is the number of keys that will be touched.","3.2.1",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,TOUCH_History,0,TOUCH_Tips,2,touchCommand,-2,CMD_READONLY|CMD_FAST,ACL_CATEGORY_KEYSPACE,TOUCH_Keyspecs,1,NULL,1),.args=TOUCH_Args}, @@ -10827,7 +10826,7 @@ struct COMMAND_STRUCT redisCommandTable[] = { {MAKE_CMD("lcs","Finds the longest common substring.","O(N*M) where N and M are the lengths of s1 and s2, respectively","7.0.0",CMD_DOC_NONE,NULL,NULL,"string",COMMAND_GROUP_STRING,LCS_History,0,LCS_Tips,0,lcsCommand,-3,CMD_READONLY,ACL_CATEGORY_STRING,LCS_Keyspecs,1,NULL,6),.args=LCS_Args}, {MAKE_CMD("mget","Atomically returns the string values of one or more keys.","O(N) where N is the number of keys to retrieve.","1.0.0",CMD_DOC_NONE,NULL,NULL,"string",COMMAND_GROUP_STRING,MGET_History,0,MGET_Tips,1,mgetCommand,-2,CMD_READONLY|CMD_FAST,ACL_CATEGORY_STRING,MGET_Keyspecs,1,NULL,1),.args=MGET_Args}, {MAKE_CMD("mset","Atomically creates or modifies the string values of one or more keys.","O(N) where N is the number of keys to set.","1.0.1",CMD_DOC_NONE,NULL,NULL,"string",COMMAND_GROUP_STRING,MSET_History,0,MSET_Tips,2,msetCommand,-3,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_STRING,MSET_Keyspecs,1,NULL,1),.args=MSET_Args}, -{MAKE_CMD("msetnx","Atomically modifies the string values of one or more keys only when all keys don't exist.","O(N) where N is the number of keys to set.","1.0.1",CMD_DOC_NONE,NULL,NULL,"string",COMMAND_GROUP_STRING,MSETNX_History,0,MSETNX_Tips,2,msetnxCommand,-3,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_STRING,MSETNX_Keyspecs,1,NULL,1),.args=MSETNX_Args}, +{MAKE_CMD("msetnx","Atomically modifies the string values of one or more keys only when all keys don't exist.","O(N) where N is the number of keys to set.","1.0.1",CMD_DOC_NONE,NULL,NULL,"string",COMMAND_GROUP_STRING,MSETNX_History,0,MSETNX_Tips,0,msetnxCommand,-3,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_STRING,MSETNX_Keyspecs,1,NULL,1),.args=MSETNX_Args}, {MAKE_CMD("psetex","Sets both string value and expiration time in milliseconds of a key. The key is created if it doesn't exist.","O(1)","2.6.0",CMD_DOC_DEPRECATED,"`SET` with the `PX` argument","2.6.12","string",COMMAND_GROUP_STRING,PSETEX_History,0,PSETEX_Tips,0,psetexCommand,4,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_STRING,PSETEX_Keyspecs,1,NULL,3),.args=PSETEX_Args}, {MAKE_CMD("set","Sets the string value of a key, ignoring its type. The key is created if it doesn't exist.","O(1)","1.0.0",CMD_DOC_NONE,NULL,NULL,"string",COMMAND_GROUP_STRING,SET_History,4,SET_Tips,0,setCommand,-3,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_STRING,SET_Keyspecs,1,setGetKeys,5),.args=SET_Args}, {MAKE_CMD("setex","Sets the string value and expiration time of a key. Creates the key if it doesn't exist.","O(1)","2.0.0",CMD_DOC_DEPRECATED,"`SET` with the `EX` argument","2.6.12","string",COMMAND_GROUP_STRING,SETEX_History,0,SETEX_Tips,0,setexCommand,4,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_STRING,SETEX_Keyspecs,1,NULL,3),.args=SETEX_Args}, diff --git a/src/commands/msetnx.json b/src/commands/msetnx.json index fa71d2b45..27592d304 100644 --- a/src/commands/msetnx.json +++ b/src/commands/msetnx.json @@ -13,10 +13,6 @@ "acl_categories": [ "STRING" ], - "command_tips": [ - "REQUEST_POLICY:MULTI_SHARD", - "RESPONSE_POLICY:AGG_MIN" - ], "key_specs": [ { "flags": [ diff --git a/src/commands/randomkey.json b/src/commands/randomkey.json index e8773ee6b..eeef61aef 100644 --- a/src/commands/randomkey.json +++ b/src/commands/randomkey.json @@ -15,6 +15,7 @@ ], "command_tips": [ "REQUEST_POLICY:ALL_SHARDS", + "RESPONSE_POLICY:SPECIAL", "NONDETERMINISTIC_OUTPUT" ], "reply_schema": { diff --git a/src/commands/scan.json b/src/commands/scan.json index ca9adf5b4..a7df78a21 100644 --- a/src/commands/scan.json +++ b/src/commands/scan.json @@ -21,7 +21,8 @@ ], "command_tips": [ "NONDETERMINISTIC_OUTPUT", - "REQUEST_POLICY:SPECIAL" + "REQUEST_POLICY:SPECIAL", + "RESPONSE_POLICY:SPECIAL" ], "arguments": [ { From 01eb939a0661c94d22bfddb4841b4975a12dbf7e Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" Date: Tue, 25 Jul 2023 21:10:38 +0800 Subject: [PATCH 08/20] update monitor client's memory and evict correctly (#12420) A bug introduced in #11657 (7.2 RC1), causes client-eviction (#8687) and INFO to have inaccurate memory usage metrics of MONITOR clients. Because the type in `c->type` and the type in `getClientType()` are confusing (in the later, `CLIENT_TYPE_NORMAL` not `CLIENT_TYPE_SLAVE`), the comment we wrote in `updateClientMemUsageAndBucket` was wrong, and in fact that function didn't skip monitor clients. And since it doesn't skip monitor clients, it was wrong to delete the call for it from `replicationFeedMonitors` (it wasn't a NOP). That deletion could mean that the monitor client memory usage is not always up to date (updated less frequently, but still a candidate for client eviction). --- src/networking.c | 2 +- src/replication.c | 1 + src/server.c | 6 ++---- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/networking.c b/src/networking.c index 56273fc7e..7696e8c28 100644 --- a/src/networking.c +++ b/src/networking.c @@ -3808,7 +3808,7 @@ size_t getClientMemoryUsage(client *c, size_t *output_buffer_mem_usage) { * classes of clients. * * The function will return one of the following: - * CLIENT_TYPE_NORMAL -> Normal client + * CLIENT_TYPE_NORMAL -> Normal client, including MONITOR * CLIENT_TYPE_SLAVE -> Slave * CLIENT_TYPE_PUBSUB -> Client subscribed to Pub/Sub channels * CLIENT_TYPE_MASTER -> The client representing our replication master. diff --git a/src/replication.c b/src/replication.c index 97e01b64d..885f2b17c 100644 --- a/src/replication.c +++ b/src/replication.c @@ -610,6 +610,7 @@ void replicationFeedMonitors(client *c, list *monitors, int dictid, robj **argv, while((ln = listNext(&li))) { client *monitor = ln->value; addReply(monitor,cmdobj); + updateClientMemUsageAndBucket(monitor); } decrRefCount(cmdobj); } diff --git a/src/server.c b/src/server.c index 6815aac3b..838695b88 100644 --- a/src/server.c +++ b/src/server.c @@ -917,11 +917,9 @@ void removeClientFromMemUsageBucket(client *c, int allow_eviction) { * together clients consuming about the same amount of memory and can quickly * free them in case we reach maxmemory-clients (client eviction). * - * Note: This function filters clients of type monitor, master or replica regardless + * Note: This function filters clients of type no-evict, master or replica regardless * of whether the eviction is enabled or not, so the memory usage we get from these - * types of clients via the INFO command may be out of date. If someday we wanna - * improve that to make monitors' memory usage more accurate, we need to re-add this - * function call to `replicationFeedMonitors()`. + * types of clients via the INFO command may be out of date. * * returns 1 if client eviction for this client is allowed, 0 otherwise. */ From 42985b00eaa3c74c55aa1e1aee2a9c077c6ac58a Mon Sep 17 00:00:00 2001 From: Harkrishn Patro Date: Tue, 25 Jul 2023 16:43:31 -0700 Subject: [PATCH 09/20] Test coverage for incr/decr operation on robj encoding type optimization (#12435) Additional test coverage for incr/decr operation. integer number could be present in raw encoding format due to operation like append. A incr/decr operation following it optimize the string to int encoding format. --- tests/unit/type/incr.tcl | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/tests/unit/type/incr.tcl b/tests/unit/type/incr.tcl index c09f2e8b2..2319b2ce3 100644 --- a/tests/unit/type/incr.tcl +++ b/tests/unit/type/incr.tcl @@ -182,4 +182,33 @@ start_server {tags {"incr"}} { r incrbyfloat foo [expr double(-1)/41] r get foo } {0} + + foreach cmd {"incr" "decr" "incrby" "decrby"} { + test "$cmd operation should update encoding from raw to int" { + set res {} + set expected {1 12} + if {[string match {*incr*} $cmd]} { + lappend expected 13 + } else { + lappend expected 11 + } + + r set foo 1 + assert_encoding "int" foo + lappend res [r get foo] + + r append foo 2 + assert_encoding "raw" foo + lappend res [r get foo] + + if {[string match {*by*} $cmd]} { + r $cmd foo 1 + } else { + r $cmd foo + } + assert_encoding "int" foo + lappend res [r get foo] + assert_equal $res $expected + } + } } From 6abb3c40384bd507fe410680e0e0fefc4141f8a3 Mon Sep 17 00:00:00 2001 From: DarrenJiang13 Date: Sun, 30 Jul 2023 13:48:29 +0800 Subject: [PATCH 10/20] change log match to line match in tcl sanitizer_errors_from_file. (#12446) In the tcl foreach loop, the function should compare line rather than the whole file. --- tests/support/util.tcl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/support/util.tcl b/tests/support/util.tcl index 37d3c89a9..8941d1ae8 100644 --- a/tests/support/util.tcl +++ b/tests/support/util.tcl @@ -62,8 +62,8 @@ proc sanitizer_errors_from_file {filename} { } # GCC UBSAN output does not contain 'Sanitizer' but 'runtime error'. - if {[string match {*runtime error*} $log] || - [string match {*Sanitizer*} $log]} { + if {[string match {*runtime error*} $line] || + [string match {*Sanitizer*} $line]} { return $log } } From b653c759cd9f0629ea19ef7c8a11400c065c7efd Mon Sep 17 00:00:00 2001 From: Diego Lopez Recas Date: Tue, 1 Aug 2023 17:03:33 +0200 Subject: [PATCH 11/20] Fix race condition in tests/unit/auth.tcl (#12444) Changing the masterauth while turning into a replica is racy. Turn into replica after changing the masterauth instead. --- tests/unit/auth.tcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/auth.tcl b/tests/unit/auth.tcl index 26d125579..9532e0bd2 100644 --- a/tests/unit/auth.tcl +++ b/tests/unit/auth.tcl @@ -70,8 +70,8 @@ start_server {tags {"auth_binary_password external:skip"}} { # Configure the replica with masterauth set loglines [count_log_lines 0] - $slave slaveof $master_host $master_port $slave config set masterauth "abc" + $slave slaveof $master_host $master_port # Verify replica is not able to sync with master wait_for_log_messages 0 {"*Unable to AUTH to MASTER*"} $loglines 1000 10 From 90ab91f00b64843f6201bf352bb71aef743b6ca3 Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" Date: Wed, 2 Aug 2023 15:46:06 +0800 Subject: [PATCH 12/20] fix false success and a memory leak for ACL selector with bad parenthesis combination (#12452) When doing merge selector, we should check whether the merge has started (i.e., whether open_bracket_start is -1) every time. Otherwise, encountering an illegal selector pattern could succeed and also cause memory leaks, for example: ``` acl setuser test1 (+PING (+SELECT (+DEL ) ``` The above would leak memory and succeed with only DEL being applied, and would now error after the fix. Co-authored-by: Oran Agra --- src/acl.c | 3 ++- tests/unit/acl-v2.tcl | 9 +++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/acl.c b/src/acl.c index aa42c58dc..07840406a 100644 --- a/src/acl.c +++ b/src/acl.c @@ -1990,7 +1990,8 @@ sds *ACLMergeSelectorArguments(sds *argv, int argc, int *merged_argc, int *inval for (int j = 0; j < argc; j++) { char *op = argv[j]; - if (op[0] == '(' && op[sdslen(op) - 1] != ')') { + if (open_bracket_start == -1 && + (op[0] == '(' && op[sdslen(op) - 1] != ')')) { selector = sdsdup(argv[j]); open_bracket_start = j; continue; diff --git a/tests/unit/acl-v2.tcl b/tests/unit/acl-v2.tcl index af23d745e..b259c2716 100644 --- a/tests/unit/acl-v2.tcl +++ b/tests/unit/acl-v2.tcl @@ -47,6 +47,15 @@ start_server {tags {"acl external:skip"}} { catch {r ACL SETUSER selector-syntax on (&* &fail)} e assert_match "*ERR Error in ACL SETUSER modifier '(*)*Adding a pattern after the*" $e + catch {r ACL SETUSER selector-syntax on (+PING (+SELECT (+DEL} e + assert_match "*ERR Unmatched parenthesis in acl selector*" $e + + catch {r ACL SETUSER selector-syntax on (+PING (+SELECT (+DEL ) ) ) } e + assert_match "*ERR Error in ACL SETUSER modifier*" $e + + catch {r ACL SETUSER selector-syntax on (+PING (+SELECT (+DEL ) } e + assert_match "*ERR Error in ACL SETUSER modifier*" $e + assert_equal "" [r ACL GETUSER selector-syntax] } From 2ee1bbb53baea26b442d602b0b3335176170736d Mon Sep 17 00:00:00 2001 From: "Meir Shpilraien (Spielrein)" Date: Wed, 2 Aug 2023 11:43:31 +0300 Subject: [PATCH 13/20] Ensure that the function load timeout is disabled during loading from RDB/AOF and on replicas. (#12451) When loading a function from either RDB/AOF or a replica, it is essential not to fail on timeout errors. The loading time may vary due to various factors, such as hardware specifications or the system's workload during the loading process. Once a function has been successfully loaded, it should be allowed to load from persistence or on replicas without encountering a timeout failure. To maintain a clear separation between the engine and Redis internals, the implementation refrains from directly checking the state of Redis within the engine itself. Instead, the engine receives the desired timeout as part of the library creation and duly respects this timeout value. If Redis wishes to disable any timeout, it can simply send a value of 0. --- src/function_lua.c | 7 ++++--- src/functions.c | 12 +++++++++--- src/functions.h | 11 ++++++++--- src/rdb.c | 2 +- 4 files changed, 22 insertions(+), 10 deletions(-) diff --git a/src/function_lua.c b/src/function_lua.c index 91bb5cd67..be79dc163 100644 --- a/src/function_lua.c +++ b/src/function_lua.c @@ -51,7 +51,6 @@ #define REGISTRY_LOAD_CTX_NAME "__LIBRARY_CTX__" #define LIBRARY_API_NAME "__LIBRARY_API__" #define GLOBALS_API_NAME "__GLOBALS_API__" -#define LOAD_TIMEOUT_MS 500 /* Lua engine ctx */ typedef struct luaEngineCtx { @@ -67,6 +66,7 @@ typedef struct luaFunctionCtx { typedef struct loadCtx { functionLibInfo *li; monotime start_time; + size_t timeout; } loadCtx; typedef struct registerFunctionArgs { @@ -85,7 +85,7 @@ static void luaEngineLoadHook(lua_State *lua, lua_Debug *ar) { loadCtx *load_ctx = luaGetFromRegistry(lua, REGISTRY_LOAD_CTX_NAME); serverAssert(load_ctx); /* Only supported inside script invocation */ uint64_t duration = elapsedMs(load_ctx->start_time); - if (duration > LOAD_TIMEOUT_MS) { + if (load_ctx->timeout > 0 && duration > load_ctx->timeout) { lua_sethook(lua, luaEngineLoadHook, LUA_MASKLINE, 0); luaPushError(lua,"FUNCTION LOAD timeout"); @@ -100,7 +100,7 @@ static void luaEngineLoadHook(lua_State *lua, lua_Debug *ar) { * * Return NULL on compilation error and set the error to the err variable */ -static int luaEngineCreate(void *engine_ctx, functionLibInfo *li, sds blob, sds *err) { +static int luaEngineCreate(void *engine_ctx, functionLibInfo *li, sds blob, size_t timeout, sds *err) { int ret = C_ERR; luaEngineCtx *lua_engine_ctx = engine_ctx; lua_State *lua = lua_engine_ctx->lua; @@ -124,6 +124,7 @@ static int luaEngineCreate(void *engine_ctx, functionLibInfo *li, sds blob, sds loadCtx load_ctx = { .li = li, .start_time = getMonotonicUs(), + .timeout = timeout, }; luaSaveOnRegistry(lua, REGISTRY_LOAD_CTX_NAME, &load_ctx); diff --git a/src/functions.c b/src/functions.c index f5738ba79..c858db975 100644 --- a/src/functions.c +++ b/src/functions.c @@ -33,6 +33,8 @@ #include "adlist.h" #include "atomicvar.h" +#define LOAD_TIMEOUT_MS 500 + typedef enum { restorePolicy_Flush, restorePolicy_Append, restorePolicy_Replace } restorePolicy; @@ -961,7 +963,7 @@ void functionFreeLibMetaData(functionsLibMataData *md) { /* Compile and save the given library, return the loaded library name on success * and NULL on failure. In case on failure the err out param is set with relevant error message */ -sds functionsCreateWithLibraryCtx(sds code, int replace, sds* err, functionsLibCtx *lib_ctx) { +sds functionsCreateWithLibraryCtx(sds code, int replace, sds* err, functionsLibCtx *lib_ctx, size_t timeout) { dictIterator *iter = NULL; dictEntry *entry = NULL; functionLibInfo *new_li = NULL; @@ -995,7 +997,7 @@ sds functionsCreateWithLibraryCtx(sds code, int replace, sds* err, functionsLibC } new_li = engineLibraryCreate(md.name, ei, code); - if (engine->create(engine->engine_ctx, new_li, md.code, err) != C_OK) { + if (engine->create(engine->engine_ctx, new_li, md.code, timeout, err) != C_OK) { goto error; } @@ -1063,7 +1065,11 @@ void functionLoadCommand(client *c) { robj *code = c->argv[argc_pos]; sds err = NULL; sds library_name = NULL; - if (!(library_name = functionsCreateWithLibraryCtx(code->ptr, replace, &err, curr_functions_lib_ctx))) + size_t timeout = LOAD_TIMEOUT_MS; + if (mustObeyClient(c)) { + timeout = 0; + } + if (!(library_name = functionsCreateWithLibraryCtx(code->ptr, replace, &err, curr_functions_lib_ctx, timeout))) { addReplyErrorSds(c, err); return; diff --git a/src/functions.h b/src/functions.h index 26e45babc..22af139eb 100644 --- a/src/functions.h +++ b/src/functions.h @@ -53,9 +53,14 @@ typedef struct engine { /* engine specific context */ void *engine_ctx; - /* Create function callback, get the engine_ctx, and function code. + /* Create function callback, get the engine_ctx, and function code + * engine_ctx - opaque struct that was created on engine initialization + * li - library information that need to be provided and when add functions + * code - the library code + * timeout - timeout for the library creation (0 for no timeout) + * err - description of error (if occurred) * returns NULL on error and set sds to be the error message */ - int (*create)(void *engine_ctx, functionLibInfo *li, sds code, sds *err); + int (*create)(void *engine_ctx, functionLibInfo *li, sds code, size_t timeout, sds *err); /* Invoking a function, r_ctx is an opaque object (from engine POV). * The r_ctx should be used by the engine to interaction with Redis, @@ -109,7 +114,7 @@ struct functionLibInfo { }; int functionsRegisterEngine(const char *engine_name, engine *engine_ctx); -sds functionsCreateWithLibraryCtx(sds code, int replace, sds* err, functionsLibCtx *lib_ctx); +sds functionsCreateWithLibraryCtx(sds code, int replace, sds* err, functionsLibCtx *lib_ctx, size_t timeout); unsigned long functionsMemory(void); unsigned long functionsMemoryOverhead(void); unsigned long functionsNum(void); diff --git a/src/rdb.c b/src/rdb.c index cfc92e815..ed30b6523 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -2981,7 +2981,7 @@ int rdbFunctionLoad(rio *rdb, int ver, functionsLibCtx* lib_ctx, int rdbflags, s if (lib_ctx) { sds library_name = NULL; - if (!(library_name = functionsCreateWithLibraryCtx(final_payload, rdbflags & RDBFLAGS_ALLOW_DUP, &error, lib_ctx))) { + if (!(library_name = functionsCreateWithLibraryCtx(final_payload, rdbflags & RDBFLAGS_ALLOW_DUP, &error, lib_ctx, 0))) { if (!error) { error = sdsnew("Failed creating the library"); } From 7af9f4b36e8178842ba836a40bc518595f49567a Mon Sep 17 00:00:00 2001 From: Binbin Date: Sat, 5 Aug 2023 12:29:24 +0800 Subject: [PATCH 14/20] Fix GEOHASH / GEODIST / GEOPOS time complexity, should be O(1) (#12445) GEOHASH / GEODIST / GEOPOS use zsetScore to get the score, in skiplist encoding, we use dictFind to get the score, which is O(1), same as ZSCORE command. It is not clear why these commands had O(Log(N)), and O(N) until now. --- src/commands.def | 6 +++--- src/commands/geodist.json | 2 +- src/commands/geohash.json | 2 +- src/commands/geopos.json | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/commands.def b/src/commands.def index d51a6057a..d1e6f3edd 100644 --- a/src/commands.def +++ b/src/commands.def @@ -10635,9 +10635,9 @@ struct COMMAND_STRUCT redisCommandTable[] = { {MAKE_CMD("waitaof","Blocks until all of the preceding write commands sent by the connection are written to the append-only file of the master and/or replicas.","O(1)","7.2.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,WAITAOF_History,0,WAITAOF_Tips,2,waitaofCommand,4,CMD_NOSCRIPT,ACL_CATEGORY_CONNECTION,WAITAOF_Keyspecs,0,NULL,3),.args=WAITAOF_Args}, /* geo */ {MAKE_CMD("geoadd","Adds one or more members to a geospatial index. The key is created if it doesn't exist.","O(log(N)) for each item added, where N is the number of elements in the sorted set.","3.2.0",CMD_DOC_NONE,NULL,NULL,"geo",COMMAND_GROUP_GEO,GEOADD_History,1,GEOADD_Tips,0,geoaddCommand,-5,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_GEO,GEOADD_Keyspecs,1,NULL,4),.args=GEOADD_Args}, -{MAKE_CMD("geodist","Returns the distance between two members of a geospatial index.","O(log(N))","3.2.0",CMD_DOC_NONE,NULL,NULL,"geo",COMMAND_GROUP_GEO,GEODIST_History,0,GEODIST_Tips,0,geodistCommand,-4,CMD_READONLY,ACL_CATEGORY_GEO,GEODIST_Keyspecs,1,NULL,4),.args=GEODIST_Args}, -{MAKE_CMD("geohash","Returns members from a geospatial index as geohash strings.","O(log(N)) for each member requested, where N is the number of elements in the sorted set.","3.2.0",CMD_DOC_NONE,NULL,NULL,"geo",COMMAND_GROUP_GEO,GEOHASH_History,0,GEOHASH_Tips,0,geohashCommand,-2,CMD_READONLY,ACL_CATEGORY_GEO,GEOHASH_Keyspecs,1,NULL,2),.args=GEOHASH_Args}, -{MAKE_CMD("geopos","Returns the longitude and latitude of members from a geospatial index.","O(N) where N is the number of members requested.","3.2.0",CMD_DOC_NONE,NULL,NULL,"geo",COMMAND_GROUP_GEO,GEOPOS_History,0,GEOPOS_Tips,0,geoposCommand,-2,CMD_READONLY,ACL_CATEGORY_GEO,GEOPOS_Keyspecs,1,NULL,2),.args=GEOPOS_Args}, +{MAKE_CMD("geodist","Returns the distance between two members of a geospatial index.","O(1)","3.2.0",CMD_DOC_NONE,NULL,NULL,"geo",COMMAND_GROUP_GEO,GEODIST_History,0,GEODIST_Tips,0,geodistCommand,-4,CMD_READONLY,ACL_CATEGORY_GEO,GEODIST_Keyspecs,1,NULL,4),.args=GEODIST_Args}, +{MAKE_CMD("geohash","Returns members from a geospatial index as geohash strings.","O(1) for each member requested.","3.2.0",CMD_DOC_NONE,NULL,NULL,"geo",COMMAND_GROUP_GEO,GEOHASH_History,0,GEOHASH_Tips,0,geohashCommand,-2,CMD_READONLY,ACL_CATEGORY_GEO,GEOHASH_Keyspecs,1,NULL,2),.args=GEOHASH_Args}, +{MAKE_CMD("geopos","Returns the longitude and latitude of members from a geospatial index.","O(1) for each member requested.","3.2.0",CMD_DOC_NONE,NULL,NULL,"geo",COMMAND_GROUP_GEO,GEOPOS_History,0,GEOPOS_Tips,0,geoposCommand,-2,CMD_READONLY,ACL_CATEGORY_GEO,GEOPOS_Keyspecs,1,NULL,2),.args=GEOPOS_Args}, {MAKE_CMD("georadius","Queries a geospatial index for members within a distance from a coordinate, optionally stores the result.","O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.","3.2.0",CMD_DOC_DEPRECATED,"`GEOSEARCH` and `GEOSEARCHSTORE` with the `BYRADIUS` argument","6.2.0","geo",COMMAND_GROUP_GEO,GEORADIUS_History,2,GEORADIUS_Tips,0,georadiusCommand,-6,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_GEO,GEORADIUS_Keyspecs,3,georadiusGetKeys,11),.args=GEORADIUS_Args}, {MAKE_CMD("georadiusbymember","Queries a geospatial index for members within a distance from a member, optionally stores the result.","O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.","3.2.0",CMD_DOC_DEPRECATED,"`GEOSEARCH` and `GEOSEARCHSTORE` with the `BYRADIUS` and `FROMMEMBER` arguments","6.2.0","geo",COMMAND_GROUP_GEO,GEORADIUSBYMEMBER_History,1,GEORADIUSBYMEMBER_Tips,0,georadiusbymemberCommand,-5,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_GEO,GEORADIUSBYMEMBER_Keyspecs,3,georadiusGetKeys,10),.args=GEORADIUSBYMEMBER_Args}, {MAKE_CMD("georadiusbymember_ro","Returns members from a geospatial index that are within a distance from a member.","O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.","3.2.10",CMD_DOC_DEPRECATED,"`GEOSEARCH` with the `BYRADIUS` and `FROMMEMBER` arguments","6.2.0","geo",COMMAND_GROUP_GEO,GEORADIUSBYMEMBER_RO_History,0,GEORADIUSBYMEMBER_RO_Tips,0,georadiusbymemberroCommand,-5,CMD_READONLY,ACL_CATEGORY_GEO,GEORADIUSBYMEMBER_RO_Keyspecs,1,NULL,9),.args=GEORADIUSBYMEMBER_RO_Args}, diff --git a/src/commands/geodist.json b/src/commands/geodist.json index 97969d332..145ca718a 100644 --- a/src/commands/geodist.json +++ b/src/commands/geodist.json @@ -1,7 +1,7 @@ { "GEODIST": { "summary": "Returns the distance between two members of a geospatial index.", - "complexity": "O(log(N))", + "complexity": "O(1)", "group": "geo", "since": "3.2.0", "arity": -4, diff --git a/src/commands/geohash.json b/src/commands/geohash.json index 8f4d55a62..01402c465 100644 --- a/src/commands/geohash.json +++ b/src/commands/geohash.json @@ -1,7 +1,7 @@ { "GEOHASH": { "summary": "Returns members from a geospatial index as geohash strings.", - "complexity": "O(log(N)) for each member requested, where N is the number of elements in the sorted set.", + "complexity": "O(1) for each member requested.", "group": "geo", "since": "3.2.0", "arity": -2, diff --git a/src/commands/geopos.json b/src/commands/geopos.json index 5473c1b76..408b6e6a3 100644 --- a/src/commands/geopos.json +++ b/src/commands/geopos.json @@ -1,7 +1,7 @@ { "GEOPOS": { "summary": "Returns the longitude and latitude of members from a geospatial index.", - "complexity": "O(N) where N is the number of members requested.", + "complexity": "O(1) for each member requested.", "group": "geo", "since": "3.2.0", "arity": -2, From da9c2804a5ce774099bc3aabc894a8fd8c7e1440 Mon Sep 17 00:00:00 2001 From: sundb Date: Sat, 5 Aug 2023 12:57:06 +0800 Subject: [PATCH 15/20] Avoid mostly harmless integer overflow in cjson (#12456) This PR mainly fixes a possible integer overflow in `json_append_string()`. When we use `cjson.encoding()` to encode a string larger than 2GB, at specific compilation flags, an integer overflow may occur leading to truncation, resulting in the part of the string larger than 2GB not being encoded. On the other hand, this overflow doesn't cause any read or write out-of-range or segment fault. 1) using -O0 for lua_cjson (`make LUA_DEBUG=yes`) In this case, `i` will overflow and leads to truncation. When `i` reaches `INT_MAX+1` and overflows to INT_MIN, when compared to len, `i` (1000000..00) is expanded to 64 bits signed integer (1111111.....000000) . At this point i will be greater than len and jump out of the loop, so `for (i = 0; i < len; i++)` will loop up to 2^31 times, and the part of larger than 2GB will be truncated. ```asm `i` => -0x24(%rbp) <+253>: addl $0x1,-0x24(%rbp) ; overflow if i large than 2^31 <+257>: mov -0x24(%rbp),%eax <+260>: movslq %eax,%rdx ; move a 32-bit value with sign extension into a 64-bit signed <+263>: mov -0x20(%rbp),%rax <+267>: cmp %rax,%rdx ; check `i < len` <+270>: jb 0x212600 ``` 2) using -O2/-O3 for lua_cjson (`make LUA_DEBUG=no`, **the default**) In this case, because singed integer overflow is an undefined behavior, `i` will not overflow. `i` will be optimized by the compiler and use 64-bit registers for all subsequent instructions. ```asm <+180>: add $0x1,%rbx ; Using 64-bit register `rbx` for i++ <+184>: lea 0x1(%rdx),%rsi <+188>: mov %rsi,0x10(%rbp) <+192>: mov %al,(%rcx,%rdx,1) <+195>: cmp %rbx,(%rsp) ; check `i < len` <+199>: ja 0x20b63a ``` 3) using 32bit Because `strbuf_ensure_empty_length()` preallocates memory of length (len * 6 + 2), in 32-bit `cjson.encode()` can only handle strings smaller than ((2 ^ 32) - 3 ) / 6. So 32bit is not affected. Also change `i` in `strbuf_append_string()` to `size_t`. Since its second argument `str` is taken from the `char2escape` string array which is never larger than 6, so `strbuf_append_string()` is not at risk of overflow (the bug was unreachable). --- deps/lua/src/lua_cjson.c | 3 +-- deps/lua/src/strbuf.c | 3 +-- tests/unit/scripting.tcl | 7 +++++++ 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/deps/lua/src/lua_cjson.c b/deps/lua/src/lua_cjson.c index 991f5d31d..b86d73e97 100644 --- a/deps/lua/src/lua_cjson.c +++ b/deps/lua/src/lua_cjson.c @@ -464,9 +464,8 @@ static void json_encode_exception(lua_State *l, json_config_t *cfg, strbuf_t *js static void json_append_string(lua_State *l, strbuf_t *json, int lindex) { const char *escstr; - int i; const char *str; - size_t len; + size_t i, len; str = lua_tolstring(l, lindex, &len); diff --git a/deps/lua/src/strbuf.c b/deps/lua/src/strbuf.c index 775e8baf1..97ee940c9 100644 --- a/deps/lua/src/strbuf.c +++ b/deps/lua/src/strbuf.c @@ -176,8 +176,7 @@ void strbuf_resize(strbuf_t *s, size_t len) void strbuf_append_string(strbuf_t *s, const char *str) { - int i; - size_t space; + size_t i, space; space = strbuf_empty_length(s); diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl index a8195fb91..cedf2b4f7 100644 --- a/tests/unit/scripting.tcl +++ b/tests/unit/scripting.tcl @@ -307,6 +307,13 @@ start_server {tags {"scripting"}} { set e } {*against a key*} + test {EVAL - JSON string encoding a string larger than 2GB} { + run_script { + local s = string.rep("a", 1024 * 1024 * 1024) + return #cjson.encode(s..s..s) + } 0 + } {3221225474} {large-memory} ;# length includes two double quotes at both ends + test {EVAL - JSON numeric decoding} { # We must return the table as a string because otherwise # Redis converts floats to ints and we get 0 and 1023 instead From 8226f39fb200a6a2d6f57d027ffc55319fb85d92 Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" Date: Sat, 5 Aug 2023 14:52:03 +0800 Subject: [PATCH 16/20] do not call handleClientsBlockedOnKeys inside yielding command (#12459) Fix the assertion when a busy script (timeout) signal ready keys (like LPUSH), and then an arbitrary client's `allow-busy` command steps into `handleClientsBlockedOnKeys` try wake up clients blocked on keys (like BLPOP). Reproduction process: 1. start a redis with aof `./redis-server --appendonly yes` 2. exec blpop `127.0.0.1:6379> blpop a 0` 3. use another client call a busy script and this script push the blocked key `127.0.0.1:6379> eval "redis.call('lpush','a','b') while(1) do end" 0` 4. user a new client call an allow-busy command like auth `127.0.0.1:6379> auth a` BTW, this issue also break the atomicity of script. This bug has been around for many years, the old versions only have the atomic problem, only 7.0/7.2 has the assertion problem. Co-authored-by: Oran Agra --- src/server.c | 2 +- tests/unit/scripting.tcl | 47 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/src/server.c b/src/server.c index 838695b88..438325f28 100644 --- a/src/server.c +++ b/src/server.c @@ -4158,7 +4158,7 @@ int processCommand(client *c) { int flags = CMD_CALL_FULL; if (client_reprocessing_command) flags |= CMD_CALL_REPROCESSING; call(c,flags); - if (listLength(server.ready_keys)) + if (listLength(server.ready_keys) && !isInsideYieldingLongCommand()) handleClientsBlockedOnKeys(); } diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl index cedf2b4f7..c2f79a742 100644 --- a/tests/unit/scripting.tcl +++ b/tests/unit/scripting.tcl @@ -1119,6 +1119,53 @@ start_server {tags {"scripting"}} { r ping } {PONG} + test {Timedout scripts and unblocked command} { + # make sure a command that's allowed during BUSY doesn't trigger an unblocked command + + # enable AOF to also expose an assertion if the bug would happen + r flushall + r config set appendonly yes + + # create clients, and set one to block waiting for key 'x' + set rd [redis_deferring_client] + set rd2 [redis_deferring_client] + set r3 [redis_client] + $rd2 blpop x 0 + wait_for_blocked_clients_count 1 + + # hack: allow the script to use client list command so that we can control when it aborts + r DEBUG set-disable-deny-scripts 1 + r config set lua-time-limit 10 + run_script_on_connection $rd { + local clients + redis.call('lpush',KEYS[1],'y'); + while true do + clients = redis.call('client','list') + if string.find(clients, 'abortscript') ~= nil then break end + end + redis.call('lpush',KEYS[1],'z'); + return clients + } 1 x + + # wait for the script to be busy + after 200 + catch {r ping} e + assert_match {BUSY*} $e + + # run cause the script to abort, and run a command that could have processed + # unblocked clients (due to a bug) + $r3 hello 2 setname abortscript + + # make sure the script completed before the pop was processed + assert_equal [$rd2 read] {x z} + assert_match {*abortscript*} [$rd read] + + $rd close + $rd2 close + $r3 close + r DEBUG set-disable-deny-scripts 0 + } {OK} {external:skip needs:debug} + test {Timedout scripts that modified data can't be killed by SCRIPT KILL} { set rd [redis_deferring_client] r config set lua-time-limit 10 From 1b6bdff48dcc333affab02b945821eb865529de2 Mon Sep 17 00:00:00 2001 From: "zhaozhao.zz" Date: Sat, 5 Aug 2023 15:00:54 +0800 Subject: [PATCH 17/20] optimize the check of kill pubsub clients after modifying ACL rules (#12457) if there are no subscribers, we can ignore the operation --- src/acl.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/acl.c b/src/acl.c index 07840406a..0bffbe970 100644 --- a/src/acl.c +++ b/src/acl.c @@ -1859,6 +1859,12 @@ int ACLCheckAllPerm(client *c, int *idxptr) { /* Check if the user's existing pub/sub clients violate the ACL pub/sub * permissions specified via the upcoming argument, and kill them if so. */ void ACLKillPubsubClientsIfNeeded(user *new, user *original) { + /* Do nothing if there are no subscribers. */ + if (!dictSize(server.pubsub_patterns) && + !dictSize(server.pubsub_channels) && + !dictSize(server.pubsubshard_channels)) + return; + listIter li, lpi; listNode *ln, *lpn; robj *o; From 6abfda54c380c07ea0d460706833929654fac25a Mon Sep 17 00:00:00 2001 From: Binbin Date: Thu, 10 Aug 2023 13:58:52 +0800 Subject: [PATCH 18/20] Fix flaky SENTINEL RESET test (#12437) After SENTINEL RESET, sometimes the sentinel can sense the master again, causing the test to fail. Here we give it a few more chances. --- tests/sentinel/tests/00-base.tcl | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/tests/sentinel/tests/00-base.tcl b/tests/sentinel/tests/00-base.tcl index b4d65751b..7b6439508 100644 --- a/tests/sentinel/tests/00-base.tcl +++ b/tests/sentinel/tests/00-base.tcl @@ -195,8 +195,16 @@ test "New master [join $addr {:}] role matches" { } test "SENTINEL RESET can resets the master" { - assert_equal 1 [S 0 SENTINEL RESET mymaster] - assert_equal 0 [llength [S 0 SENTINEL SENTINELS mymaster]] - assert_equal 0 [llength [S 0 SENTINEL SLAVES mymaster]] - assert_equal 0 [llength [S 0 SENTINEL REPLICAS mymaster]] + # After SENTINEL RESET, sometimes the sentinel can sense the master again, + # causing the test to fail. Here we give it a few more chances. + for {set j 0} {$j < 10} {incr j} { + assert_equal 1 [S 0 SENTINEL RESET mymaster] + set res1 [llength [S 0 SENTINEL SENTINELS mymaster]] + set res2 [llength [S 0 SENTINEL SLAVES mymaster]] + set res3 [llength [S 0 SENTINEL REPLICAS mymaster]] + if {$res1 eq 0 && $res2 eq 0 && $res3 eq 0} break + } + assert_equal 0 $res1 + assert_equal 0 $res2 + assert_equal 0 $res3 } From 7c179f9bf4390512196b3a2b2ad6d0f4cb625c8a Mon Sep 17 00:00:00 2001 From: Madelyn Olson <34459052+madolson@users.noreply.github.com> Date: Wed, 9 Aug 2023 23:58:53 -0700 Subject: [PATCH 19/20] Fixed a bug where sequential matching ACL rules weren't compressed (#12472) When adding a new ACL rule was added, an attempt was made to remove any "overlapping" rules. However, there when a match was found, the search was not resumed at the right location, but instead after the original position of the original command. For example, if the current rules were `-config +config|get` and a rule `+config` was added. It would identify that `-config` was matched, but it would skip over `+config|get`, leaving the compacted rule `-config +config`. This would be evaluated safely, but looks weird. This bug can only be triggered with subcommands, since that is the only way to have sequential matching rules. Resolves #12470. This is also only present in 7.2. I think there was also a minor risk of removing another valid rule, since it would start the search of the next command at an arbitrary point. I couldn't find a valid offset that would have cause a match using any of the existing commands that have subcommands with another command. --- src/acl.c | 4 +++- tests/unit/acl.tcl | 4 ++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/acl.c b/src/acl.c index 0bffbe970..5fd956d23 100644 --- a/src/acl.c +++ b/src/acl.c @@ -563,7 +563,7 @@ void ACLSelectorRemoveCommandRule(aclSelector *selector, sds new_rule) { * as well if the command is removed. */ char *rule_end = strchr(existing_rule, ' '); if (!rule_end) { - /* This is the last rule, so it it to the end of the string. */ + /* This is the last rule, so move it to the end of the string. */ rule_end = existing_rule + strlen(existing_rule); /* This approach can leave a trailing space if the last rule is removed, @@ -580,6 +580,8 @@ void ACLSelectorRemoveCommandRule(aclSelector *selector, sds new_rule) { /* Copy the remaining rules starting at the next rule to replace the rule to be * deleted, including the terminating NULL character. */ memmove(copy_position, copy_end, strlen(copy_end) + 1); + existing_rule = copy_position; + continue; } } existing_rule = copy_end; diff --git a/tests/unit/acl.tcl b/tests/unit/acl.tcl index 6dcee8b94..36ef06370 100644 --- a/tests/unit/acl.tcl +++ b/tests/unit/acl.tcl @@ -615,6 +615,10 @@ start_server {tags {"acl external:skip"}} { # Unnecessary categories are retained for potentional future compatibility r ACL SETUSER adv-test -@all -@dangerous assert_equal "-@all -@dangerous" [dict get [r ACL getuser adv-test] commands] + + # Duplicate categories are compressed, regression test for #12470 + r ACL SETUSER adv-test -@all +config +config|get -config|set +config + assert_equal "-@all +config" [dict get [r ACL getuser adv-test] commands] } test "ACL CAT with illegal arguments" { From f4549d1cf4830c0584f8e41369a8628842a67aec Mon Sep 17 00:00:00 2001 From: Binbin Date: Tue, 15 Aug 2023 11:57:55 +0800 Subject: [PATCH 20/20] Fix CLUSTER REPLICAS time complexity, should be O(N) (#12477) We iterate over all replicas to get the result, the time complexity should be O(N), like CLUSTER NODES complexity is O(N). --- src/cluster.c | 1 + src/commands.def | 4 ++-- src/commands/cluster-replicas.json | 2 +- src/commands/cluster-slaves.json | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index b017b9385..69eff1020 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -6294,6 +6294,7 @@ NULL } else if ((!strcasecmp(c->argv[1]->ptr,"slaves") || !strcasecmp(c->argv[1]->ptr,"replicas")) && c->argc == 3) { /* CLUSTER SLAVES */ + /* CLUSTER REPLICAS */ clusterNode *n = clusterLookupNode(c->argv[2]->ptr, sdslen(c->argv[2]->ptr)); int j; diff --git a/src/commands.def b/src/commands.def index d1e6f3edd..97f5fb316 100644 --- a/src/commands.def +++ b/src/commands.def @@ -964,14 +964,14 @@ struct COMMAND_STRUCT CLUSTER_Subcommands[] = { {MAKE_CMD("myid","Returns the ID of a node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_MYID_History,0,CLUSTER_MYID_Tips,0,clusterCommand,2,CMD_STALE,0,CLUSTER_MYID_Keyspecs,0,NULL,0)}, {MAKE_CMD("myshardid","Returns the shard ID of a node.","O(1)","7.2.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_MYSHARDID_History,0,CLUSTER_MYSHARDID_Tips,1,clusterCommand,2,CMD_STALE,0,CLUSTER_MYSHARDID_Keyspecs,0,NULL,0)}, {MAKE_CMD("nodes","Returns the cluster configuration for a node.","O(N) where N is the total number of Cluster nodes","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_NODES_History,0,CLUSTER_NODES_Tips,1,clusterCommand,2,CMD_STALE,0,CLUSTER_NODES_Keyspecs,0,NULL,0)}, -{MAKE_CMD("replicas","Lists the replica nodes of a master node.","O(1)","5.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_REPLICAS_History,0,CLUSTER_REPLICAS_Tips,1,clusterCommand,3,CMD_ADMIN|CMD_STALE,0,CLUSTER_REPLICAS_Keyspecs,0,NULL,1),.args=CLUSTER_REPLICAS_Args}, +{MAKE_CMD("replicas","Lists the replica nodes of a master node.","O(N) where N is the number of replicas.","5.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_REPLICAS_History,0,CLUSTER_REPLICAS_Tips,1,clusterCommand,3,CMD_ADMIN|CMD_STALE,0,CLUSTER_REPLICAS_Keyspecs,0,NULL,1),.args=CLUSTER_REPLICAS_Args}, {MAKE_CMD("replicate","Configure a node as replica of a master node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_REPLICATE_History,0,CLUSTER_REPLICATE_Tips,0,clusterCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_REPLICATE_Keyspecs,0,NULL,1),.args=CLUSTER_REPLICATE_Args}, {MAKE_CMD("reset","Resets a node.","O(N) where N is the number of known nodes. The command may execute a FLUSHALL as a side effect.","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_RESET_History,0,CLUSTER_RESET_Tips,0,clusterCommand,-2,CMD_ADMIN|CMD_STALE|CMD_NOSCRIPT,0,CLUSTER_RESET_Keyspecs,0,NULL,1),.args=CLUSTER_RESET_Args}, {MAKE_CMD("saveconfig","Forces a node to save the cluster configuration to disk.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SAVECONFIG_History,0,CLUSTER_SAVECONFIG_Tips,0,clusterCommand,2,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_SAVECONFIG_Keyspecs,0,NULL,0)}, {MAKE_CMD("set-config-epoch","Sets the configuration epoch for a new node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SET_CONFIG_EPOCH_History,0,CLUSTER_SET_CONFIG_EPOCH_Tips,0,clusterCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_SET_CONFIG_EPOCH_Keyspecs,0,NULL,1),.args=CLUSTER_SET_CONFIG_EPOCH_Args}, {MAKE_CMD("setslot","Binds a hash slot to a node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SETSLOT_History,0,CLUSTER_SETSLOT_Tips,0,clusterCommand,-4,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_SETSLOT_Keyspecs,0,NULL,2),.args=CLUSTER_SETSLOT_Args}, {MAKE_CMD("shards","Returns the mapping of cluster slots to shards.","O(N) where N is the total number of cluster nodes","7.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SHARDS_History,0,CLUSTER_SHARDS_Tips,1,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_SHARDS_Keyspecs,0,NULL,0)}, -{MAKE_CMD("slaves","Lists the replica nodes of a master node.","O(1)","3.0.0",CMD_DOC_DEPRECATED,"`CLUSTER REPLICAS`","5.0.0","cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SLAVES_History,0,CLUSTER_SLAVES_Tips,1,clusterCommand,3,CMD_ADMIN|CMD_STALE,0,CLUSTER_SLAVES_Keyspecs,0,NULL,1),.args=CLUSTER_SLAVES_Args}, +{MAKE_CMD("slaves","Lists the replica nodes of a master node.","O(N) where N is the number of replicas.","3.0.0",CMD_DOC_DEPRECATED,"`CLUSTER REPLICAS`","5.0.0","cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SLAVES_History,0,CLUSTER_SLAVES_Tips,1,clusterCommand,3,CMD_ADMIN|CMD_STALE,0,CLUSTER_SLAVES_Keyspecs,0,NULL,1),.args=CLUSTER_SLAVES_Args}, {MAKE_CMD("slots","Returns the mapping of cluster slots to nodes.","O(N) where N is the total number of Cluster nodes","3.0.0",CMD_DOC_DEPRECATED,"`CLUSTER SHARDS`","7.0.0","cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SLOTS_History,2,CLUSTER_SLOTS_Tips,1,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_SLOTS_Keyspecs,0,NULL,0)}, {0} }; diff --git a/src/commands/cluster-replicas.json b/src/commands/cluster-replicas.json index 49a922770..e01617fee 100644 --- a/src/commands/cluster-replicas.json +++ b/src/commands/cluster-replicas.json @@ -1,7 +1,7 @@ { "REPLICAS": { "summary": "Lists the replica nodes of a master node.", - "complexity": "O(1)", + "complexity": "O(N) where N is the number of replicas.", "group": "cluster", "since": "5.0.0", "arity": 3, diff --git a/src/commands/cluster-slaves.json b/src/commands/cluster-slaves.json index a2e6755a0..a736088e4 100644 --- a/src/commands/cluster-slaves.json +++ b/src/commands/cluster-slaves.json @@ -1,7 +1,7 @@ { "SLAVES": { "summary": "Lists the replica nodes of a master node.", - "complexity": "O(1)", + "complexity": "O(N) where N is the number of replicas.", "group": "cluster", "since": "3.0.0", "arity": 3,