diff --git a/00-RELEASENOTES b/00-RELEASENOTES index 4f6cb9978..62d1def15 100644 --- a/00-RELEASENOTES +++ b/00-RELEASENOTES @@ -11,6 +11,132 @@ CRITICAL: There is a critical bug affecting MOST USERS. Upgrade ASAP. SECURITY: There are security fixes in the release. -------------------------------------------------------------------------------- +================================================================================ +Redis 6.2.6 Released Mon Oct 4 12:00:00 IDT 2021 +================================================================================ + +Upgrade urgency: SECURITY, contains fixes to security issues. + +Security Fixes: +* (CVE-2021-41099) Integer to heap buffer overflow handling certain string + commands and network payloads, when proto-max-bulk-len is manually configured + to a non-default, very large value [reported by yiyuaner]. +* (CVE-2021-32762) Integer to heap buffer overflow issue in redis-cli and + redis-sentinel parsing large multi-bulk replies on some older and less common + platforms [reported by Microsoft Vulnerability Research]. +* (CVE-2021-32687) Integer to heap buffer overflow with intsets, when + set-max-intset-entries is manually configured to a non-default, very large + value [reported by Pawel Wieczorkiewicz, AWS]. +* (CVE-2021-32675) Denial Of Service when processing RESP request payloads with + a large number of elements on many connections. +* (CVE-2021-32672) Random heap reading issue with Lua Debugger [reported by + Meir Shpilraien]. +* (CVE-2021-32628) Integer to heap buffer overflow handling ziplist-encoded + data types, when configuring a large, non-default value for + hash-max-ziplist-entries, hash-max-ziplist-value, zset-max-ziplist-entries + or zset-max-ziplist-value [reported by sundb]. +* (CVE-2021-32627) Integer to heap buffer overflow issue with streams, when + configuring a non-default, large value for proto-max-bulk-len and + client-query-buffer-limit [reported by sundb]. +* (CVE-2021-32626) Specially crafted Lua scripts may result with Heap buffer + overflow [reported by Meir Shpilraien]. + +Bug fixes that involve behavior changes: +* GEO* STORE with empty source key deletes the destination key and return 0 (#9271) + Previously it would have returned an empty array like the non-STORE variant. +* PUBSUB NUMPAT replies with number of patterns rather than number of subscriptions (#9209) + This actually changed in 6.2.0 but was overlooked and omitted from the release notes. + +Bug fixes that are only applicable to previous releases of Redis 6.2: +* Fix CLIENT PAUSE, used an old timeout from previous PAUSE (#9477) +* Fix CLIENT PAUSE in a replica would mess the replication offset (#9448) +* Add some missing error statistics in INFO errorstats (#9328) + +Other bug fixes: +* Fix incorrect reply of COMMAND command key positions for MIGRATE command (#9455) +* Fix appendfsync to always guarantee fsync before reply, on MacOS and FreeBSD (kqueue) (#9416) +* Fix the wrong mis-detection of sync_file_range system call, affecting performance (#9371) + +CLI tools: +* When redis-cli received ASK response, it didn't handle it (#8930) + +Improvements: +* Add latency monitor sample when key is deleted via lazy expire (#9317) +* Sanitize corrupt payload improvements (#9321, #9399) +* Delete empty keys when loading RDB file or handling a RESTORE command (#9297, #9349) + +================================================================================ +Redis 6.2.5 Released Wed Jul 21 16:32:19 IDT 2021 +================================================================================ + +Upgrade urgency: SECURITY, contains fixes to security issues that affect +authenticated client connections on 32-bit versions. MODERATE otherwise. + +Fix integer overflow in BITFIELD on 32-bit versions (CVE-2021-32761). +An integer overflow bug in Redis version 2.2 or newer can be exploited using the +BITFIELD command to corrupt the heap and potentially result with remote code +execution. + +Bug fixes that involve behavior changes: +* Change reply type for ZPOPMAX/MIN with count in RESP3 to nested array (#8981). + Was using a flat array like in RESP2 instead of a nested array like ZRANGE does. +* Fix reply type for HRANDFIELD and ZRANDMEMBER when key is missing (#9178). + Was using a null array instead of an empty array. +* Fix reply type for ZRANGESTORE when source key is missing (#9089). + Was using an empty array like ZRANGE instead of 0 (used in the STORE variant). + +Bug fixes that are only applicable to previous releases of Redis 6.2: +* ZRANDMEMBER WITHSCORES with negative COUNT may return bad score (#9162) +* Fix crash after CLIENT UNPAUSE when threaded I/O config is enabled (#9041) +* Fix XTRIM or XADD with LIMIT may delete more entries than the limit (#9048) +* Fix build issue with OpenSSL 1.1.0 (#9233) + +Other bug fixes: +* Fail EXEC command in case a watched key is expired (#9194) +* Fix SMOVE not to invalidate dest key (WATCH and tracking) when member already exists (#9244) +* Fix SINTERSTORE not to delete dest key when getting a wrong type error (#9032) +* Fix overflows on 32-bit versions in GETBIT, SETBIT, BITCOUNT, BITPOS, and BITFIELD (#9191) +* Improve MEMORY USAGE on stream keys (#9164) +* Set TCP keepalive on inbound cluster bus connections (#9230) +* Fix diskless replica loading to recover from RDB short read on module AUX data (#9199) +* Fix race in client side tracking (#9116) +* Fix ziplist length updates on big-endian platforms (#2080) + +CLI tools: +* redis-cli cluster import command may issue wrong MIGRATE command, sending COPY instead of REPLACE (#8945) +* redis-cli --rdb fixes when using "-" to write to stdout (#9136, #9135) +* redis-cli support for RESP3 set type in CSV and RAW output (#7338) + +Modules: +* Module API for getting current command name (#8792) +* Fix RM_StringTruncate when newlen is 0 (#3718) +* Fix CLIENT UNBLOCK crashing modules without timeout callback (#9167) + +================================================================================ +Redis 6.2.4 Released Tue June 1 12:00:00 IST 2021 +================================================================================ + +Upgrade urgency: SECURITY, Contains fixes to security issues that affect +authenticated client connections. MODERATE otherwise. + +Fix integer overflow in STRALGO LCS (CVE-2021-32625) +An integer overflow bug in Redis version 6.0 or newer can be exploited using the +STRALGO LCS command to corrupt the heap and potentially result with remote code +execution. This is a result of an incomplete fix by CVE-2021-29477. + +Bug fixes that are only applicable to previous releases of Redis 6.2: +* Fix crash after a diskless replication fork child is terminated (#8991) +* Fix redis-benchmark crash on unsupported configs (#8916) + +Other bug fixes: +* Fix crash in UNLINK on a stream key with deleted consumer groups (#8932) +* SINTERSTORE: Add missing keyspace del event when none of the sources exist (#8949) +* Sentinel: Fix CONFIG SET of empty string sentinel-user/sentinel-pass configs (#8958) +* Enforce client output buffer soft limit when no traffic (#8833) + +Improvements: +* Hide AUTH passwords in MIGRATE command from slowlog (#8859) + ================================================================================ Redis 6.2.3 Released Mon May 3 19:00:00 IST 2021 ================================================================================ diff --git a/deps/hiredis/Makefile b/deps/hiredis/Makefile index a9e87dd82..3893a1adf 100644 --- a/deps/hiredis/Makefile +++ b/deps/hiredis/Makefile @@ -77,7 +77,12 @@ ifeq ($(USE_SSL),1) endif ifeq ($(uname_S),Linux) - SSL_LDFLAGS=-lssl -lcrypto + ifdef OPENSSL_PREFIX + CFLAGS+=-I$(OPENSSL_PREFIX)/include + SSL_LDFLAGS+=-L$(OPENSSL_PREFIX)/lib -lssl -lcrypto + else + SSL_LDFLAGS=-lssl -lcrypto + endif else OPENSSL_PREFIX?=/usr/local/opt/openssl CFLAGS+=-I$(OPENSSL_PREFIX)/include diff --git a/deps/hiredis/hiredis.c b/deps/hiredis/hiredis.c index 51f22a665..990f61960 100644 --- a/deps/hiredis/hiredis.c +++ b/deps/hiredis/hiredis.c @@ -174,6 +174,7 @@ static void *createArrayObject(const redisReadTask *task, size_t elements) { return NULL; if (elements > 0) { + if (SIZE_MAX / sizeof(redisReply*) < elements) return NULL; /* Don't overflow */ r->element = hi_calloc(elements,sizeof(redisReply*)); if (r->element == NULL) { freeReplyObject(r); diff --git a/deps/hiredis/test.c b/deps/hiredis/test.c index 829536739..bdff74e88 100644 --- a/deps/hiredis/test.c +++ b/deps/hiredis/test.c @@ -498,6 +498,20 @@ static void test_reply_reader(void) { freeReplyObject(reply); redisReaderFree(reader); + test("Multi-bulk never overflows regardless of maxelements: "); + size_t bad_mbulk_len = (SIZE_MAX / sizeof(void *)) + 3; + char bad_mbulk_reply[100]; + snprintf(bad_mbulk_reply, sizeof(bad_mbulk_reply), "*%llu\r\n+asdf\r\n", + (unsigned long long) bad_mbulk_len); + + reader = redisReaderCreate(); + reader->maxelements = 0; /* Don't rely on default limit */ + redisReaderFeed(reader, bad_mbulk_reply, strlen(bad_mbulk_reply)); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && strcasecmp(reader->errstr, "Out of memory") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + #if LLONG_MAX > SIZE_MAX test("Set error when array > SIZE_MAX: "); reader = redisReaderCreate(); diff --git a/runtest-moduleapi b/runtest-moduleapi index 154818ed8..8adf2171d 100755 --- a/runtest-moduleapi +++ b/runtest-moduleapi @@ -16,6 +16,7 @@ fi $MAKE -C tests/modules && \ $TCLSH tests/test_helper.tcl \ --single unit/moduleapi/commandfilter \ +--single unit/moduleapi/basics \ --single unit/moduleapi/fork \ --single unit/moduleapi/testrdb \ --single unit/moduleapi/infotest \ diff --git a/src/Makefile b/src/Makefile index 1a2e13b97..2c6bd0e07 100644 --- a/src/Makefile +++ b/src/Makefile @@ -156,12 +156,10 @@ ifeq ($(uname_S),Darwin) # must be referenced explicitly during build. ifeq ($(uname_M),arm64) # Homebrew arm64 uses /opt/homebrew as HOMEBREW_PREFIX - OPENSSL_CFLAGS=-I/opt/homebrew/opt/openssl/include - OPENSSL_LDFLAGS=-L/opt/homebrew/opt/openssl/lib + OPENSSL_PREFIX?=/opt/homebrew/opt/openssl else # Homebrew x86/ppc uses /usr/local as HOMEBREW_PREFIX - OPENSSL_CFLAGS=-I/usr/local/opt/openssl/include - OPENSSL_LDFLAGS=-L/usr/local/opt/openssl/lib + OPENSSL_PREFIX?=/usr/local/opt/openssl endif else ifeq ($(uname_S),AIX) @@ -229,6 +227,14 @@ endif endif endif endif + +ifdef OPENSSL_PREFIX + OPENSSL_CFLAGS=-I$(OPENSSL_PREFIX)/include + OPENSSL_LDFLAGS=-L$(OPENSSL_PREFIX)/lib + # Also export OPENSSL_PREFIX so it ends up in deps sub-Makefiles + export OPENSSL_PREFIX +endif + # Include paths to dependencies FINAL_CFLAGS+= -I../deps/hiredis -I../deps/linenoise -I../deps/lua/src -I../deps/hdr_histogram FINAL_CXXFLAGS+= -I../deps/hiredis -I../deps/linenoise -I../deps/lua/src -I../deps/hdr_histogram @@ -456,6 +462,10 @@ clean: distclean: clean -(cd ../deps && $(MAKE) distclean) -(cd modules && $(MAKE) clean) +<<<<<<< HEAD +======= + -(cd ../tests/modules && $(MAKE) clean) +>>>>>>> 6.2.6 -(rm -f .make-*) .PHONY: distclean @@ -463,6 +473,9 @@ distclean: clean test: $(REDIS_SERVER_NAME) $(REDIS_CHECK_AOF_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) @(cd ..; ./runtest) +test-modules: $(REDIS_SERVER_NAME) + @(cd ..; ./runtest-moduleapi) + test-sentinel: $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) @(cd ..; ./runtest-sentinel) diff --git a/src/acl.cpp b/src/acl.cpp index 654a9d6a2..cd1e149bb 100644 --- a/src/acl.cpp +++ b/src/acl.cpp @@ -1895,11 +1895,15 @@ void addACLLogEntry(client *c, int reason, int argpos, sds username) { void aclCommand(client *c) { char *sub = szFromObj(c->argv[1]); if (!strcasecmp(sub,"setuser") && c->argc >= 3) { +<<<<<<< HEAD:src/acl.cpp /* Consider information about passwords or permissions * to be sensitive, which will be the arguments for this * subcommand. */ preventCommandLogging(c); sds username = szFromObj(c->argv[2]); +======= + sds username = c->argv[2]->ptr; +>>>>>>> 6.2.6:src/acl.c /* Check username validity. */ if (ACLStringHasSpaces(username,sdslen(username))) { addReplyErrorFormat(c, @@ -1915,6 +1919,12 @@ void aclCommand(client *c) { user *u = ACLGetUserByName(username,sdslen(username)); if (u) ACLCopyUser(tempu, u); + /* Initially redact all of the arguments to not leak any information + * about the user. */ + for (int j = 2; j < c->argc; j++) { + redactClientCommandArgument(c, j); + } + for (int j = 3; j < c->argc; j++) { if (ACLSetUser(tempu,szFromObj(c->argv[j]),sdslen(szFromObj(c->argv[j]))) != C_OK) { const char *errmsg = ACLSetUserStringError(); @@ -2248,6 +2258,8 @@ void authCommand(client *c) { addReplyErrorObject(c,shared.syntaxerr); return; } + /* Always redact the second argument */ + redactClientCommandArgument(c, 1); /* Handle the two different forms here. The form with two arguments * will just use "default" as username. */ @@ -2267,6 +2279,7 @@ void authCommand(client *c) { } else { username = c->argv[1]; password = c->argv[2]; + redactClientCommandArgument(c, 2); } if (ACLAuthenticateUser(c,username,password) == C_OK) { diff --git a/src/ae_kqueue.c b/src/ae_kqueue.c index c6e9cf3fa..f481fd22f 100644 --- a/src/ae_kqueue.c +++ b/src/ae_kqueue.c @@ -36,8 +36,29 @@ typedef struct aeApiState { int kqfd; struct kevent *events; + + /* Events mask for merge read and write event. + * To reduce memory consumption, we use 2 bits to store the mask + * of an event, so that 1 byte will store the mask of 4 events. */ + char *eventsMask; } aeApiState; +#define EVENT_MASK_MALLOC_SIZE(sz) (((sz) + 3) / 4) +#define EVENT_MASK_OFFSET(fd) ((fd) % 4 * 2) +#define EVENT_MASK_ENCODE(fd, mask) (((mask) & 0x3) << EVENT_MASK_OFFSET(fd)) + +static inline int getEventMask(const char *eventsMask, int fd) { + return (eventsMask[fd/4] >> EVENT_MASK_OFFSET(fd)) & 0x3; +} + +static inline void addEventMask(char *eventsMask, int fd, int mask) { + eventsMask[fd/4] |= EVENT_MASK_ENCODE(fd, mask); +} + +static inline void resetEventMask(char *eventsMask, int fd) { + eventsMask[fd/4] &= ~EVENT_MASK_ENCODE(fd, 0x3); +} + static int aeApiCreate(aeEventLoop *eventLoop) { aeApiState *state = (aeApiState*)zmalloc(sizeof(aeApiState), MALLOC_LOCAL); @@ -54,6 +75,8 @@ static int aeApiCreate(aeEventLoop *eventLoop) { return -1; } anetCloexec(state->kqfd); + state->eventsMask = zmalloc(EVENT_MASK_MALLOC_SIZE(eventLoop->setsize)); + memset(state->eventsMask, 0, EVENT_MASK_MALLOC_SIZE(eventLoop->setsize)); eventLoop->apidata = state; return 0; } @@ -61,7 +84,13 @@ static int aeApiCreate(aeEventLoop *eventLoop) { static int aeApiResize(aeEventLoop *eventLoop, int setsize) { aeApiState *state = (aeApiState*)eventLoop->apidata; +<<<<<<< HEAD state->events = (struct kevent*)zrealloc(state->events, sizeof(struct kevent)*setsize, MALLOC_LOCAL); +======= + state->events = zrealloc(state->events, sizeof(struct kevent)*setsize); + state->eventsMask = zrealloc(state->eventsMask, EVENT_MASK_MALLOC_SIZE(setsize)); + memset(state->eventsMask, 0, EVENT_MASK_MALLOC_SIZE(setsize)); +>>>>>>> 6.2.6 return 0; } @@ -70,6 +99,7 @@ static void aeApiFree(aeEventLoop *eventLoop) { close(state->kqfd); zfree(state->events); + zfree(state->eventsMask); zfree(state); } @@ -120,15 +150,37 @@ static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { if (retval > 0) { int j; - numevents = retval; - for(j = 0; j < numevents; j++) { - int mask = 0; + /* Normally we execute the read event first and then the write event. + * When the barrier is set, we will do it reverse. + * + * However, under kqueue, read and write events would be separate + * events, which would make it impossible to control the order of + * reads and writes. So we store the event's mask we've got and merge + * the same fd events later. */ + for (j = 0; j < retval; j++) { struct kevent *e = state->events+j; + int fd = e->ident; + int mask = 0; - if (e->filter == EVFILT_READ) mask |= AE_READABLE; - if (e->filter == EVFILT_WRITE) mask |= AE_WRITABLE; - eventLoop->fired[j].fd = e->ident; - eventLoop->fired[j].mask = mask; + if (e->filter == EVFILT_READ) mask = AE_READABLE; + else if (e->filter == EVFILT_WRITE) mask = AE_WRITABLE; + addEventMask(state->eventsMask, fd, mask); + } + + /* Re-traversal to merge read and write events, and set the fd's mask to + * 0 so that events are not added again when the fd is encountered again. */ + numevents = 0; + for (j = 0; j < retval; j++) { + struct kevent *e = state->events+j; + int fd = e->ident; + int mask = getEventMask(state->eventsMask, fd); + + if (mask) { + eventLoop->fired[numevents].fd = fd; + eventLoop->fired[numevents].mask = mask; + resetEventMask(state->eventsMask, fd); + numevents++; + } } } return numevents; diff --git a/src/aof.cpp b/src/aof.cpp index 9ddc52d0a..795f41a31 100644 --- a/src/aof.cpp +++ b/src/aof.cpp @@ -180,7 +180,16 @@ void aofRewriteBufferAppend(unsigned char *s, unsigned long len) { /* Install a file event to send data to the rewrite child if there is * not one already. */ +<<<<<<< HEAD:src/aof.cpp installAofRewriteEvent(); +======= + if (!server.aof_stop_sending_diff && + aeGetFileEvents(server.el,server.aof_pipe_write_data_to_child) == 0) + { + aeCreateFileEvent(server.el, server.aof_pipe_write_data_to_child, + AE_WRITABLE, aofChildWriteDiffData, NULL); + } +>>>>>>> 6.2.6:src/aof.c } /* Write the buffer (possibly composed of multiple blocks) into the specified diff --git a/src/bitops.cpp b/src/bitops.cpp index c03f068fd..4bc15905c 100644 --- a/src/bitops.cpp +++ b/src/bitops.cpp @@ -37,9 +37,15 @@ /* Count number of bits set in the binary array pointed by 's' and long * 'count' bytes. The implementation of this function is required to * work with an input string length up to 512 MB or more (server.proto_max_bulk_len) */ +<<<<<<< HEAD:src/bitops.cpp size_t redisPopcount(const void *s, long count) { size_t bits = 0; unsigned char *p = (unsigned char*)s; +======= +long long redisPopcount(void *s, long count) { + long long bits = 0; + unsigned char *p = s; +>>>>>>> 6.2.6:src/bitops.c uint32_t *p4; static const unsigned char bitsinbyte[256] = {0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8}; @@ -98,11 +104,15 @@ size_t redisPopcount(const void *s, long count) { * no zero bit is found, it returns count*8 assuming the string is zero * padded on the right. However if 'bit' is 1 it is possible that there is * not a single set bit in the bitmap. In this special case -1 is returned. */ +<<<<<<< HEAD:src/bitops.cpp long redisBitpos(const void *s, unsigned long count, int bit) { +======= +long long redisBitpos(void *s, unsigned long count, int bit) { +>>>>>>> 6.2.6:src/bitops.c unsigned long *l; unsigned char *c; unsigned long skipval, word = 0, one; - long pos = 0; /* Position of bit, to return to the caller. */ + long long pos = 0; /* Position of bit, to return to the caller. */ unsigned long j; int found; @@ -412,7 +422,7 @@ void printBits(unsigned char *p, unsigned long count) { * If the 'hash' argument is true, and 'bits is positive, then the command * will also parse bit offsets prefixed by "#". In such a case the offset * is multiplied by 'bits'. This is useful for the BITFIELD command. */ -int getBitOffsetFromArgument(client *c, robj *o, size_t *offset, int hash, int bits) { +int getBitOffsetFromArgument(client *c, robj *o, uint64_t *offset, int hash, int bits) { long long loffset; const char *err = "bit offset is not an integer or out of range"; char *p = szFromObj(o); @@ -437,7 +447,7 @@ int getBitOffsetFromArgument(client *c, robj *o, size_t *offset, int hash, int b return C_ERR; } - *offset = (size_t)loffset; + *offset = loffset; return C_OK; } @@ -479,7 +489,7 @@ int getBitfieldTypeFromArgument(client *c, robj *o, int *sign, int *bits) { * so that the 'maxbit' bit can be addressed. The object is finally * returned. Otherwise if the key holds a wrong type NULL is returned and * an error is sent to the client. */ -robj *lookupStringForBitCommand(client *c, size_t maxbit) { +robj *lookupStringForBitCommand(client *c, uint64_t maxbit) { size_t byte = maxbit >> 3; robj *o = lookupKeyWrite(c->db,c->argv[1]); if (checkType(c,o,OBJ_STRING)) return NULL; @@ -528,8 +538,13 @@ const unsigned char *getObjectReadOnlyString(robj_roptr o, long *len, char *llbu /* SETBIT key offset bitvalue */ void setbitCommand(client *c) { robj *o; +<<<<<<< HEAD:src/bitops.cpp const char *err = "bit is not an integer or out of range"; size_t bitoffset; +======= + char *err = "bit is not an integer or out of range"; + uint64_t bitoffset; +>>>>>>> 6.2.6:src/bitops.c ssize_t byte, bit; int byteval, bitval; long on; @@ -568,7 +583,7 @@ void setbitCommand(client *c) { void getbitCommand(client *c) { robj_roptr o; char llbuf[32]; - size_t bitoffset; + uint64_t bitoffset; size_t byte, bit; size_t bitval = 0; @@ -961,7 +976,7 @@ void bitposCommand(client *c) { addReplyLongLong(c, -1); } else { long bytes = end-start+1; - long pos = redisBitpos(p+start,bytes,bit); + long long pos = redisBitpos(p+start,bytes,bit); /* If we are looking for clear bits, and the user specified an exact * range with start-end, we can't consider the right of the range as @@ -970,11 +985,11 @@ void bitposCommand(client *c) { * So if redisBitpos() returns the first bit outside the range, * we return -1 to the caller, to mean, in the specified range there * is not a single "0" bit. */ - if (end_given && bit == 0 && pos == bytes*8) { + if (end_given && bit == 0 && pos == (long long)bytes<<3) { addReplyLongLong(c,-1); return; } - if (pos != -1) pos += start*8; /* Adjust for the bytes we skipped. */ + if (pos != -1) pos += (long long)start<<3; /* Adjust for the bytes we skipped. */ addReplyLongLong(c,pos); } } @@ -1005,13 +1020,18 @@ struct bitfieldOp { * when flags is set to BITFIELD_FLAG_READONLY: in this case only the * GET subcommand is allowed, other subcommands will return an error. */ void bitfieldGeneric(client *c, int flags) { +<<<<<<< HEAD:src/bitops.cpp robj_roptr o; size_t bitoffset; +======= + robj *o; + uint64_t bitoffset; +>>>>>>> 6.2.6:src/bitops.c int j, numops = 0, changes = 0; struct bitfieldOp *ops = NULL; /* Array of ops to execute at end. */ int owtype = BFOVERFLOW_WRAP; /* Overflow type. */ int readonly = 1; - size_t highest_write_offset = 0; + uint64_t highest_write_offset = 0; for (j = 2; j < c->argc; j++) { int remargs = c->argc-j-1; /* Remaining args other than current. */ @@ -1201,9 +1221,9 @@ void bitfieldGeneric(client *c, int flags) { * object boundaries. */ memset(buf,0,9); int i; - size_t byte = thisop->offset >> 3; + uint64_t byte = thisop->offset >> 3; for (i = 0; i < 9; i++) { - if (src == NULL || i+byte >= (size_t)strlen) break; + if (src == NULL || i+byte >= (uint64_t)strlen) break; buf[i] = src[i+byte]; } diff --git a/src/blocked.cpp b/src/blocked.cpp index f3d4ec9e5..b376a0838 100644 --- a/src/blocked.cpp +++ b/src/blocked.cpp @@ -88,7 +88,15 @@ typedef struct bkinfo { * flag is set client query buffer is not longer processed, but accumulated, * and will be processed when the client is unblocked. */ void blockClient(client *c, int btype) { +<<<<<<< HEAD:src/blocked.cpp serverAssert(GlobalLocksAcquired()); +======= + /* Master client should never be blocked unless pause or module */ + serverAssert(!(c->flags & CLIENT_MASTER && + btype != BLOCKED_MODULE && + btype != BLOCKED_PAUSE)); + +>>>>>>> 6.2.6:src/blocked.c c->flags |= CLIENT_BLOCKED; c->btype = btype; g_pserver->blocked_clients++; diff --git a/src/childinfo.cpp b/src/childinfo.cpp index 1d763729e..d9a482687 100644 --- a/src/childinfo.cpp +++ b/src/childinfo.cpp @@ -93,7 +93,7 @@ void sendChildInfoGeneric(childInfoType info_type, size_t keys, double progress, if (cow) { serverLog((info_type == CHILD_INFO_TYPE_CURRENT_INFO) ? LL_VERBOSE : LL_NOTICE, "%s: %zu MB of memory used by copy-on-write", - pname, data.cow / (1024 * 1024)); + pname, cow / (1024 * 1024)); } } diff --git a/src/cluster.cpp b/src/cluster.cpp index 256b5cebf..8145347e3 100644 --- a/src/cluster.cpp +++ b/src/cluster.cpp @@ -746,6 +746,7 @@ void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) { } connNonBlock(conn); connEnableTcpNoDelay(conn); + connKeepAlive(conn,server.cluster_node_timeout * 2); /* Use non-blocking I/O for cluster messages. */ serverLog(LL_VERBOSE,"Accepting cluster node connection from %s:%d", cip, cport); @@ -5276,7 +5277,11 @@ void restoreCommand(client *c) { rioInitWithBuffer(&payload,szFromObj(c->argv[3])); if (((type = rdbLoadObjectType(&payload)) == -1) || +<<<<<<< HEAD:src/cluster.cpp ((obj = rdbLoadObject(type,&payload,szFromObj(key), OBJ_MVCC_INVALID)) == NULL)) +======= + ((obj = rdbLoadObject(type,&payload,key->ptr,NULL)) == NULL)) +>>>>>>> 6.2.6:src/cluster.c { addReplyError(c,"Bad data format"); return; @@ -5459,16 +5464,31 @@ void migrateCommand(client *c) { return; } j++; +<<<<<<< HEAD:src/cluster.cpp password = szFromObj(c->argv[j]); } else if (!strcasecmp(szFromObj(c->argv[j]),"auth2")) { +======= + password = c->argv[j]->ptr; + redactClientCommandArgument(c,j); + } else if (!strcasecmp(c->argv[j]->ptr,"auth2")) { +>>>>>>> 6.2.6:src/cluster.c if (moreargs < 2) { addReply(c,shared.syntaxerr); return; } +<<<<<<< HEAD:src/cluster.cpp username = szFromObj(c->argv[++j]); password = szFromObj(c->argv[++j]); } else if (!strcasecmp(szFromObj(c->argv[j]),"keys")) { if (sdslen(szFromObj(c->argv[3])) != 0) { +======= + username = c->argv[++j]->ptr; + redactClientCommandArgument(c,j); + password = c->argv[++j]->ptr; + redactClientCommandArgument(c,j); + } else if (!strcasecmp(c->argv[j]->ptr,"keys")) { + if (sdslen(c->argv[3]->ptr) != 0) { +>>>>>>> 6.2.6:src/cluster.c addReplyError(c, "When using MIGRATE KEYS option, the key argument" " must be set to the empty string"); diff --git a/src/config.cpp b/src/config.cpp index ad980e67c..aa3b32ebb 100644 --- a/src/config.cpp +++ b/src/config.cpp @@ -800,7 +800,7 @@ void configSetCommand(client *c) { (config->alias && !strcasecmp(szFromObj(c->argv[2]),config->alias)))) { if (config->flags & SENSITIVE_CONFIG) { - preventCommandLogging(c); + redactClientCommandArgument(c,3); } if (!config->interface.set(config->data,szFromObj(o),1,&errstr)) { goto badfmt; diff --git a/src/config.h b/src/config.h index 0475ee7d5..feb9ee310 100644 --- a/src/config.h +++ b/src/config.h @@ -36,6 +36,7 @@ #ifdef __linux__ #include +#include #endif #define CONFIG_DEFAULT_RDB_FILENAME "dump.rdb" diff --git a/src/db.cpp b/src/db.cpp index 2e959cf9c..b2d67fa5f 100644 --- a/src/db.cpp +++ b/src/db.cpp @@ -30,7 +30,11 @@ #include "server.h" #include "cluster.h" #include "atomicvar.h" +<<<<<<< HEAD:src/db.cpp #include "aelocker.h" +======= +#include "latency.h" +>>>>>>> 6.2.6:src/db.c #include #include @@ -194,7 +198,7 @@ robj *lookupKeyWriteWithFlags(redisDb *db, robj *key, int flags) { robj *lookupKeyWrite(redisDb *db, robj *key) { return lookupKeyWriteWithFlags(db, key, LOOKUP_NONE); } -static void SentReplyOnKeyMiss(client *c, robj *reply){ +void SentReplyOnKeyMiss(client *c, robj *reply){ serverAssert(sdsEncodedObject(reply)); sds rep = szFromObj(reply); if (sdslen(rep) > 1 && rep[0] == '-'){ @@ -1670,6 +1674,22 @@ expireEntry *getExpire(redisDb *db, robj_roptr key) { return itr.operator->(); } +/* Delete the specified expired key and propagate expire. */ +void deleteExpiredKeyAndPropagate(redisDb *db, robj *keyobj) { + mstime_t expire_latency; + latencyStartMonitor(expire_latency); + if (server.lazyfree_lazy_expire) + dbAsyncDelete(db,keyobj); + else + dbSyncDelete(db,keyobj); + latencyEndMonitor(expire_latency); + latencyAddSampleIfNeeded("expire-del",expire_latency); + notifyKeyspaceEvent(NOTIFY_EXPIRED,"expired",keyobj,db->id); + signalModifiedKey(NULL, db, keyobj); + propagateExpire(db,keyobj,server.lazyfree_lazy_expire); + server.stat_expiredkeys++; +} + /* Propagate expires into slaves and the AOF file. * When a key expires in the master, a DEL operation for this key is sent * to all the slaves and the AOF file if enabled. @@ -1823,6 +1843,7 @@ int expireIfNeeded(redisDb *db, robj *key) { if (checkClientPauseTimeoutAndReturnIfPaused()) return 1; /* Delete the key */ +<<<<<<< HEAD:src/db.cpp if (g_pserver->lazyfree_lazy_expire) { dbAsyncDelete(db,key); } else { @@ -1833,6 +1854,9 @@ int expireIfNeeded(redisDb *db, robj *key) { notifyKeyspaceEvent(NOTIFY_EXPIRED, "expired",key,db->id); signalModifiedKey(NULL,db,key); +======= + deleteExpiredKeyAndPropagate(db,key); +>>>>>>> 6.2.6:src/db.c return 1; } @@ -1897,7 +1921,6 @@ int getKeysUsingCommandTable(struct redisCommand *cmd,robj **argv, int argc, get * return no keys and expect the command implementation to report * an arity or syntax error. */ if (cmd->flags & CMD_MODULE || cmd->arity < 0) { - getKeysFreeResult(result); result->numkeys = 0; return 0; } else { diff --git a/src/debug.cpp b/src/debug.cpp index 24dad0cb1..8dc71bd78 100644 --- a/src/debug.cpp +++ b/src/debug.cpp @@ -741,7 +741,7 @@ NULL } else if (!strcasecmp(name,"double")) { addReplyDouble(c,3.14159265359); } else if (!strcasecmp(name,"bignum")) { - addReplyProto(c,"(1234567999999999999999999999999999999\r\n",40); + addReplyBigNum(c,"1234567999999999999999999999999999999",37); } else if (!strcasecmp(name,"null")) { addReplyNull(c); } else if (!strcasecmp(name,"array")) { @@ -757,11 +757,13 @@ NULL addReplyBool(c, j == 1); } } else if (!strcasecmp(name,"attrib")) { - addReplyAttributeLen(c,1); - addReplyBulkCString(c,"key-popularity"); - addReplyArrayLen(c,2); - addReplyBulkCString(c,"key:123"); - addReplyLongLong(c,90); + if (c->resp >= 3) { + addReplyAttributeLen(c,1); + addReplyBulkCString(c,"key-popularity"); + addReplyArrayLen(c,2); + addReplyBulkCString(c,"key:123"); + addReplyLongLong(c,90); + } /* Attributes are not real replies, so a well formed reply should * also have a normal reply type after the attribute. */ addReplyBulkCString(c,"Some real reply following the attribute"); diff --git a/src/dict.cpp b/src/dict.cpp index fdda6baf6..13f9a28ed 100644 --- a/src/dict.cpp +++ b/src/dict.cpp @@ -154,6 +154,10 @@ int _dictExpand(dict *d, unsigned long size, int* malloc_failed) dictht n; /* the new hash table */ unsigned long realsize = _dictNextPower(size); + /* Detect overflows */ + if (realsize < size || realsize * sizeof(dictEntry*) < realsize) + return DICT_ERR; + /* Rehashing to the same table size is not useful. */ if (realsize == d->ht[0].size) return DICT_ERR; diff --git a/src/expire.cpp b/src/expire.cpp index d7735fbc6..4d017c05a 100644 --- a/src/expire.cpp +++ b/src/expire.cpp @@ -44,6 +44,7 @@ * * The parameter 'now' is the current time in milliseconds as is passed * to the function to avoid too many gettimeofday() syscalls. */ +<<<<<<< HEAD:src/expire.cpp void activeExpireCycleExpireFullKey(redisDb *db, const char *key) { robj *keyobj = createStringObject(key,sdslen(key)); mstime_t expire_latency; @@ -77,6 +78,15 @@ int activeExpireCycleExpire(redisDb *db, expireEntry &e, long long now, size_t & { activeExpireCycleExpireFullKey(db, e.key()); ++tried; +======= +int activeExpireCycleTryExpire(redisDb *db, dictEntry *de, long long now) { + long long t = dictGetSignedIntegerVal(de); + if (now > t) { + sds key = dictGetKey(de); + robj *keyobj = createStringObject(key,sdslen(key)); + deleteExpiredKeyAndPropagate(db,keyobj); + decrRefCount(keyobj); +>>>>>>> 6.2.6:src/expire.c return 1; } diff --git a/src/geo.cpp b/src/geo.cpp index 10b6c15ed..e9da53ccc 100644 --- a/src/geo.cpp +++ b/src/geo.cpp @@ -509,7 +509,7 @@ void geoaddCommand(client *c) { * [COUNT count [ANY]] [STORE key] [STOREDIST key] * GEORADIUSBYMEMBER key member radius unit ... options ... * GEOSEARCH key [FROMMEMBER member] [FROMLONLAT long lat] [BYRADIUS radius unit] - * [BYBOX width height unit] [WITHCORD] [WITHDIST] [WITHASH] [COUNT count [ANY]] [ASC|DESC] + * [BYBOX width height unit] [WITHCOORD] [WITHDIST] [WITHASH] [COUNT count [ANY]] [ASC|DESC] * GEOSEARCHSTORE dest_key src_key [FROMMEMBER member] [FROMLONLAT long lat] [BYRADIUS radius unit] * [BYBOX width height unit] [WITHCORD] [WITHDIST] [WITHASH] [COUNT count [ANY]] [ASC|DESC] [STOREDIST] * */ @@ -518,21 +518,32 @@ void georadiusGeneric(client *c, int srcKeyIndex, int flags) { int storedist = 0; /* 0 for STORE, 1 for STOREDIST. */ /* Look up the requested zset */ +<<<<<<< HEAD:src/geo.cpp robj_roptr zobj = nullptr; if ((zobj = lookupKeyReadOrReply(c, c->argv[srcKeyIndex], shared.emptyarray)) == nullptr || checkType(c, zobj, OBJ_ZSET)) { return; } +======= + robj *zobj = lookupKeyRead(c->db, c->argv[srcKeyIndex]); + if (checkType(c, zobj, OBJ_ZSET)) return; +>>>>>>> 6.2.6:src/geo.c /* Find long/lat to use for radius or box search based on inquiry type */ int base_args; GeoShape shape = {0}; if (flags & RADIUS_COORDS) { + /* GEORADIUS or GEORADIUS_RO */ base_args = 6; shape.type = CIRCULAR_TYPE; if (extractLongLatOrReply(c, c->argv + 2, shape.xy) == C_ERR) return; if (extractDistanceOrReply(c, c->argv+base_args-2, &shape.conversion, &shape.t.radius) != C_OK) return; + } else if ((flags & RADIUS_MEMBER) && !zobj) { + /* We don't have a source key, but we need to proceed with argument + * parsing, so we know which reply to use depending on the STORE flag. */ + base_args = 5; } else if (flags & RADIUS_MEMBER) { + /* GEORADIUSBYMEMBER or GEORADIUSBYMEMBER_RO */ base_args = 5; shape.type = CIRCULAR_TYPE; robj *member = c->argv[2]; @@ -542,6 +553,7 @@ void georadiusGeneric(client *c, int srcKeyIndex, int flags) { } if (extractDistanceOrReply(c, c->argv+base_args-2, &shape.conversion, &shape.t.radius) != C_OK) return; } else if (flags & GEOSEARCH) { + /* GEOSEARCH or GEOSEARCHSTORE */ base_args = 2; if (flags & GEOSEARCHSTORE) { base_args = 3; @@ -608,6 +620,13 @@ void georadiusGeneric(client *c, int srcKeyIndex, int flags) { flags & GEOSEARCH && !fromloc) { + /* No source key, proceed with argument parsing and return an error when done. */ + if (zobj == NULL) { + frommember = 1; + i++; + continue; + } + if (longLatFromMember(zobj, c->argv[base_args+i+1], shape.xy) == C_ERR) { addReplyError(c, "could not decode requested zset member"); return; @@ -676,6 +695,23 @@ void georadiusGeneric(client *c, int srcKeyIndex, int flags) { return; } + /* Return ASAP when src key does not exist. */ + if (zobj == NULL) { + if (storekey) { + /* store key is not NULL, try to delete it and return 0. */ + if (dbDelete(c->db, storekey)) { + signalModifiedKey(c, c->db, storekey); + notifyKeyspaceEvent(NOTIFY_GENERIC, "del", storekey, c->db->id); + server.dirty++; + } + addReply(c, shared.czero); + } else { + /* Otherwise we return an empty array. */ + addReply(c, shared.emptyarray); + } + return; + } + /* COUNT without ordering does not make much sense (we need to * sort in order to return the closest N entries), * force ASC ordering if COUNT was specified but no sorting was @@ -770,7 +806,7 @@ void georadiusGeneric(client *c, int srcKeyIndex, int flags) { robj *zobj; zset *zs; int i; - size_t maxelelen = 0; + size_t maxelelen = 0, totelelen = 0; if (returned_items) { zobj = createZsetObject(); @@ -785,13 +821,14 @@ void georadiusGeneric(client *c, int srcKeyIndex, int flags) { size_t elelen = sdslen(gp->member); if (maxelelen < elelen) maxelelen = elelen; + totelelen += elelen; znode = zslInsert(zs->zsl,score,gp->member); serverAssert(dictAdd(zs->dict,gp->member,&znode->score) == DICT_OK); gp->member = NULL; } if (returned_items) { - zsetConvertToZiplistIfNeeded(zobj,maxelelen); + zsetConvertToZiplistIfNeeded(zobj,maxelelen,totelelen); setKey(c,c->db,storekey,zobj); decrRefCount(zobj); notifyKeyspaceEvent(NOTIFY_ZSET,flags & GEOSEARCH ? "geosearchstore" : "georadiusstore",storekey, diff --git a/src/intset.c b/src/intset.c index fd634ed9d..b8fffd3f8 100644 --- a/src/intset.c +++ b/src/intset.c @@ -104,8 +104,14 @@ intset *intsetNew(void) { /* Resize the intset */ static intset *intsetResize(intset *is, uint32_t len) { +<<<<<<< HEAD uint32_t size = len*intrev32ifbe(is->encoding); is = zrealloc(is,sizeof(intset)+size, MALLOC_SHARED); +======= + uint64_t size = (uint64_t)len*intrev32ifbe(is->encoding); + assert(size <= SIZE_MAX - sizeof(intset)); + is = zrealloc(is,sizeof(intset)+size); +>>>>>>> 6.2.6 return is; } diff --git a/src/lazyfree.cpp b/src/lazyfree.cpp index c3bc9ddb0..b57817a7c 100644 --- a/src/lazyfree.cpp +++ b/src/lazyfree.cpp @@ -109,7 +109,7 @@ size_t lazyfreeGetFreeEffort(robj *key, robj *obj) { /* Every consumer group is an allocation and so are the entries in its * PEL. We use size of the first group's PEL as an estimate for all * others. */ - if (s->cgroups) { + if (s->cgroups && raxSize(s->cgroups)) { raxIterator ri; streamCG *cg; raxStart(&ri,s->cgroups); diff --git a/src/listpack.c b/src/listpack.c index ee256bad3..8424da87f 100644 --- a/src/listpack.c +++ b/src/listpack.c @@ -131,6 +131,8 @@ assert((p) >= (lp)+LP_HDR_SIZE && (p)+(len) < (lp)+lpGetTotalBytes((lp))); \ } while (0) +static inline void lpAssertValidEntry(unsigned char* lp, size_t lpbytes, unsigned char *p); + /* Convert a string into a signed 64 bit integer. * The function returns 1 if the string could be parsed into a (non-overflowing) * signed 64 bit int, 0 otherwise. The 'value' will be set to the parsed value @@ -313,7 +315,7 @@ int lpEncodeGetType(unsigned char *ele, uint32_t size, unsigned char *intenc, ui } else { if (size < 64) *enclen = 1+size; else if (size < 4096) *enclen = 2+size; - else *enclen = 5+size; + else *enclen = 5+(uint64_t)size; return LP_ENCODING_STRING; } } @@ -453,8 +455,8 @@ unsigned char *lpSkip(unsigned char *p) { unsigned char *lpNext(unsigned char *lp, unsigned char *p) { assert(p); p = lpSkip(p); - ASSERT_INTEGRITY(lp, p); if (p[0] == LP_EOF) return NULL; + lpAssertValidEntry(lp, lpBytes(lp), p); return p; } @@ -468,16 +470,17 @@ unsigned char *lpPrev(unsigned char *lp, unsigned char *p) { uint64_t prevlen = lpDecodeBacklen(p); prevlen += lpEncodeBacklen(NULL,prevlen); p -= prevlen-1; /* Seek the first byte of the previous entry. */ - ASSERT_INTEGRITY(lp, p); + lpAssertValidEntry(lp, lpBytes(lp), p); return p; } /* Return a pointer to the first element of the listpack, or NULL if the * listpack has no elements. */ unsigned char *lpFirst(unsigned char *lp) { - lp += LP_HDR_SIZE; /* Skip the header. */ - if (lp[0] == LP_EOF) return NULL; - return lp; + unsigned char *p = lp + LP_HDR_SIZE; /* Skip the header. */ + if (p[0] == LP_EOF) return NULL; + lpAssertValidEntry(lp, lpBytes(lp), p); + return p; } /* Return a pointer to the last element of the listpack, or NULL if the @@ -861,6 +864,13 @@ unsigned char *lpSeek(unsigned char *lp, long index) { } } +/* Same as lpFirst but without validation assert, to be used right before lpValidateNext. */ +unsigned char *lpValidateFirst(unsigned char *lp) { + unsigned char *p = lp + LP_HDR_SIZE; /* Skip the header. */ + if (p[0] == LP_EOF) return NULL; + return p; +} + /* Validate the integrity of a single listpack entry and move to the next one. * The input argument 'pp' is a reference to the current record and is advanced on exit. * Returns 1 if valid, 0 if invalid. */ @@ -872,6 +882,10 @@ int lpValidateNext(unsigned char *lp, unsigned char **pp, size_t lpbytes) { if (!p) return 0; + /* Before accessing p, make sure it's valid. */ + if (OUT_OF_RANGE(p)) + return 0; + if (*p == LP_EOF) { *pp = NULL; return 1; @@ -908,6 +922,11 @@ int lpValidateNext(unsigned char *lp, unsigned char **pp, size_t lpbytes) { #undef OUT_OF_RANGE } +/* Validate that the entry doesn't reach outside the listpack allocation. */ +static inline void lpAssertValidEntry(unsigned char* lp, size_t lpbytes, unsigned char *p) { + assert(lpValidateNext(lp, &p, lpbytes)); +} + /* Validate the integrity of the data structure. * when `deep` is 0, only the integrity of the header is validated. * when `deep` is 1, we scan all the entries one by one. */ @@ -930,8 +949,8 @@ int lpValidateIntegrity(unsigned char *lp, size_t size, int deep){ /* Validate the invividual entries. */ uint32_t count = 0; - unsigned char *p = lpFirst(lp); - while(p) { + unsigned char *p = lp + LP_HDR_SIZE; + while(p && p[0] != LP_EOF) { if (!lpValidateNext(lp, &p, bytes)) return 0; count++; diff --git a/src/listpack.h b/src/listpack.h index 7760d9bc5..7f3197324 100644 --- a/src/listpack.h +++ b/src/listpack.h @@ -64,6 +64,7 @@ unsigned char *lpPrev(unsigned char *lp, unsigned char *p); uint32_t lpBytes(unsigned char *lp); unsigned char *lpSeek(unsigned char *lp, long index); int lpValidateIntegrity(unsigned char *lp, size_t size, int deep); +unsigned char *lpValidateFirst(unsigned char *lp); int lpValidateNext(unsigned char *lp, unsigned char **pp, size_t lpbytes); #ifdef __cplusplus diff --git a/src/module.cpp b/src/module.cpp index b4b99fac6..de5b0db50 100644 --- a/src/module.cpp +++ b/src/module.cpp @@ -2632,7 +2632,11 @@ int RM_StringTruncate(RedisModuleKey *key, size_t newlen) { if (newlen > curlen) { key->value->m_ptr = sdsgrowzero(szFromObj(key->value),newlen); } else if (newlen < curlen) { +<<<<<<< HEAD:src/module.cpp sdsrange(szFromObj(key->value),0,newlen-1); +======= + sdssubstr(key->value->ptr,0,newlen); +>>>>>>> 6.2.6:src/module.c /* If the string is too wasteful, reallocate it. */ if (sdslen(szFromObj(key->value)) < sdsavail(szFromObj(key->value))) key->value->m_ptr = sdsRemoveFreeSpace(szFromObj(key->value)); @@ -3409,6 +3413,7 @@ int RM_HashGet(RedisModuleKey *key, int flags, ...) { * - EDOM if the given ID was 0-0 or not greater than all other IDs in the * stream (only if the AUTOID flag is unset) * - EFBIG if the stream has reached the last possible ID + * - ERANGE if the elements are too large to be stored. */ int RM_StreamAdd(RedisModuleKey *key, int flags, RedisModuleStreamID *id, RedisModuleString **argv, long numfields) { /* Validate args */ @@ -3452,8 +3457,9 @@ int RM_StreamAdd(RedisModuleKey *key, int flags, RedisModuleStreamID *id, RedisM use_id_ptr = &use_id; } if (streamAppendItem(s, argv, numfields, &added_id, use_id_ptr) == C_ERR) { - /* ID not greater than all existing IDs in the stream */ - errno = EDOM; + /* Either the ID not greater than all existing IDs in the stream, or + * the elements are too large to be stored. either way, errno is already + * set by streamAppendItem. */ return REDISMODULE_ERR; } /* Postponed signalKeyAsReady(). Done implicitly by moduleCreateEmptyKey() @@ -5467,8 +5473,8 @@ int moduleTryServeClientBlockedOnKey(client *c, robj *key) { * reply_callback: called after a successful RedisModule_UnblockClient() * call in order to reply to the client and unblock it. * - * timeout_callback: called when the timeout is reached in order to send an - * error to the client. + * timeout_callback: called when the timeout is reached or if `CLIENT UNBLOCK` + * is invoked, in order to send an error to the client. * * free_privdata: called in order to free the private data that is passed * by RedisModule_UnblockClient() call. @@ -5485,6 +5491,12 @@ int moduleTryServeClientBlockedOnKey(client *c, robj *key) { * In these cases, a call to RedisModule_BlockClient() will **not** block the * client, but instead produce a specific error reply. * + * A module that registers a timeout_callback function can also be unblocked + * using the `CLIENT UNBLOCK` command, which will trigger the timeout callback. + * If a callback function is not registered, then the blocked client will be + * treated as if it is not in a blocked state and `CLIENT UNBLOCK` will return + * a zero value. + * * Measuring background time: By default the time spent in the blocked command * is not account for the total command duration. To include such time you should * use RM_BlockedClientMeasureTimeStart() and RM_BlockedClientMeasureTimeEnd() one, @@ -5771,6 +5783,17 @@ void moduleHandleBlockedClients(int iel) { pthread_mutex_unlock(&moduleUnblockedClientsMutex); } +/* Check if the specified client can be safely timed out using + * moduleBlockedClientTimedOut(). + */ +int moduleBlockedClientMayTimeout(client *c) { + if (c->btype != BLOCKED_MODULE) + return 1; + + RedisModuleBlockedClient *bc = c->bpop.module_blocked_handle; + return (bc && bc->timeout_callback != NULL); +} + /* Called when our client timed out. After this function unblockClient() * is called, and it will invalidate the blocked client. So this function * does not need to do any cleanup. Eventually the module will call the @@ -8908,8 +8931,9 @@ sds genModulesInfoStringRenderModulesList(list *l) { while((ln = listNext(&li))) { RedisModule *module = (RedisModule*)ln->value; output = sdscat(output,module->name); + if (ln != listLast(l)) + output = sdscat(output,"|"); } - output = sdstrim(output,"|"); output = sdscat(output,"]"); return output; } @@ -9237,6 +9261,14 @@ int *RM_GetCommandKeys(RedisModuleCtx *ctx, RedisModuleString **argv, int argc, return res; } +/* Return the name of the command currently running */ +const char *RM_GetCurrentCommandName(RedisModuleCtx *ctx) { + if (!ctx || !ctx->client || !ctx->client->cmd) + return NULL; + + return (const char*)ctx->client->cmd->name; +} + /* -------------------------------------------------------------------------- * ## Defrag API * -------------------------------------------------------------------------- */ @@ -9698,6 +9730,7 @@ void moduleRegisterCoreAPI(void) { REGISTER_API(GetServerVersion); REGISTER_API(GetClientCertificate); REGISTER_API(GetCommandKeys); + REGISTER_API(GetCurrentCommandName); REGISTER_API(GetTypeMethodVersion); REGISTER_API(RegisterDefragFunc); REGISTER_API(DefragAlloc); diff --git a/src/modules/Makefile b/src/modules/Makefile index 5e012d6f1..3db19e79a 100644 --- a/src/modules/Makefile +++ b/src/modules/Makefile @@ -13,7 +13,7 @@ endif .SUFFIXES: .c .so .xo .o -all: helloworld.so hellotype.so helloblock.so testmodule.so hellocluster.so hellotimer.so hellodict.so hellohook.so helloacl.so +all: helloworld.so hellotype.so helloblock.so hellocluster.so hellotimer.so hellodict.so hellohook.so helloacl.so .c.xo: $(CC) -I. $(CFLAGS) $(SHOBJ_CFLAGS) -fPIC -c $< -o $@ @@ -58,10 +58,5 @@ helloacl.xo: ../redismodule.h helloacl.so: helloacl.xo $(LD) -o $@ $< $(SHOBJ_LDFLAGS) $(LIBS) -lc -testmodule.xo: ../redismodule.h - -testmodule.so: testmodule.xo - $(LD) -o $@ $< $(SHOBJ_LDFLAGS) $(LIBS) -lc - clean: rm -rf *.xo *.so diff --git a/src/multi.cpp b/src/multi.cpp index 8ccec0a67..3e74be7ef 100644 --- a/src/multi.cpp +++ b/src/multi.cpp @@ -156,8 +156,12 @@ void execCommandAbort(client *c, sds error) { /* Send EXEC to clients waiting data from MONITOR. We did send a MULTI * already, and didn't send any of the queued commands, now we'll just send * EXEC so it is clear that the transaction is over. */ +<<<<<<< HEAD:src/multi.cpp if (listLength(g_pserver->monitors) && !g_pserver->loading) replicationFeedMonitors(c,g_pserver->monitors,c->db->id,c->argv,c->argc); +======= + replicationFeedMonitors(c,server.monitors,c->db->id,c->argv,c->argc); +>>>>>>> 6.2.6:src/multi.c } void execCommand(client *c) { @@ -172,17 +176,26 @@ void execCommand(client *c) { return; } + /* EXEC with expired watched key is disallowed*/ + if (isWatchedKeyExpired(c)) { + c->flags |= (CLIENT_DIRTY_CAS); + } + /* Check if we need to abort the EXEC because: * 1) Some WATCHed key was touched. * 2) There was a previous error while queueing commands. * A failed EXEC in the first case returns a multi bulk nil object * (technically it is not an error but a special behavior), while * in the second an EXECABORT error is returned. */ - if (c->flags & (CLIENT_DIRTY_CAS|CLIENT_DIRTY_EXEC)) { - addReply(c, c->flags & CLIENT_DIRTY_EXEC ? shared.execaborterr : - shared.nullarray[c->resp]); + if (c->flags & (CLIENT_DIRTY_CAS | CLIENT_DIRTY_EXEC)) { + if (c->flags & CLIENT_DIRTY_EXEC) { + addReplyErrorObject(c, shared.execaborterr); + } else { + addReply(c, shared.nullarray[c->resp]); + } + discardTransaction(c); - goto handle_monitor; + return; } { // GOTO Protectect Variable Scope @@ -272,6 +285,7 @@ void execCommand(client *c) { afterPropagateExec(); } +<<<<<<< HEAD:src/multi.cpp serverTL->in_exec = 0; } // END Goto Variable Protection Scope @@ -283,6 +297,9 @@ handle_monitor: * table, and we do it here with correct ordering. */ if (listLength(g_pserver->monitors) && !g_pserver->loading) replicationFeedMonitors(c,g_pserver->monitors,c->db->id,c->argv,c->argc); +======= + server.in_exec = 0; +>>>>>>> 6.2.6:src/multi.c } /* ===================== WATCH (CAS alike for MULTI/EXEC) =================== @@ -360,6 +377,22 @@ void unwatchAllKeys(client *c) { } } +/* iterates over the watched_keys list and + * look for an expired key . */ +int isWatchedKeyExpired(client *c) { + listIter li; + listNode *ln; + watchedKey *wk; + if (listLength(c->watched_keys) == 0) return 0; + listRewind(c->watched_keys,&li); + while ((ln = listNext(&li))) { + wk = listNodeValue(ln); + if (keyIsExpired(wk->db, wk->key)) return 1; + } + + return 0; +} + /* "Touch" a key, so that if this key is being WATCHed by some client the * next EXEC will fail. */ void touchWatchedKey(redisDb *db, robj *key) { diff --git a/src/networking.cpp b/src/networking.cpp index 202525e6d..918c1291f 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -112,9 +112,23 @@ static void clientSetDefaultAuth(client *c) { !(c->user->flags & USER_FLAG_DISABLED); } +<<<<<<< HEAD:src/networking.cpp client *createClient(connection *conn, int iel) { client *c = (client*)zmalloc(sizeof(client), MALLOC_LOCAL); serverAssert(conn == nullptr || (iel == (serverTL - g_pserver->rgthreadvar))); +======= +int authRequired(client *c) { + /* Check if the user is authenticated. This check is skipped in case + * the default user is flagged as "nopass" and is active. */ + int auth_required = (!(DefaultUser->flags & USER_FLAG_NOPASS) || + (DefaultUser->flags & USER_FLAG_DISABLED)) && + !c->authenticated; + return auth_required; +} + +client *createClient(connection *conn) { + client *c = zmalloc(sizeof(client)); +>>>>>>> 6.2.6:src/networking.c c->iel = iel; /* passing NULL as conn it is possible to create a non connected client. @@ -403,7 +417,7 @@ void _addReplyProtoToList(client *c, const char *s, size_t len) { listAddNodeTail(c->reply, tail); c->reply_bytes += tail->size; - asyncCloseClientOnOutputBufferLimitReached(c); + closeClientOnOutputBufferLimitReached(c, 1); } } @@ -735,7 +749,7 @@ void setDeferredReply(client *c, void *node, const char *s, size_t length) { listNodeValue(ln) = buf; c->reply_bytes += buf->size; - asyncCloseClientOnOutputBufferLimitReached(c); + closeClientOnOutputBufferLimitReached(c, 1); } } @@ -789,14 +803,13 @@ void setDeferredSetLen(client *c, void *node, long length) { } void setDeferredAttributeLen(client *c, void *node, long length) { - int prefix = c->resp == 2 ? '*' : '|'; - if (c->resp == 2) length *= 2; - setDeferredAggregateLen(c,node,length,prefix); + serverAssert(c->resp >= 3); + setDeferredAggregateLen(c,node,length,'|'); } void setDeferredPushLen(client *c, void *node, long length) { - int prefix = c->resp == 2 ? '*' : '>'; - setDeferredAggregateLen(c,node,length,prefix); + serverAssert(c->resp >= 3); + setDeferredAggregateLen(c,node,length,'>'); } /* Add a double as a bulk reply */ @@ -825,6 +838,16 @@ void addReplyDouble(client *c, double d) { } } +void addReplyBigNum(client *c, const char* num, size_t len) { + if (c->resp == 2) { + addReplyBulkCBuffer(c, num, len); + } else { + addReplyProto(c,"(",1); + addReplyProto(c,num,len); + addReply(c,shared.crlf); + } +} + /* Add a long double as a bulk reply, but uses a human readable formatting * of the double instead of exposing the crude behavior of doubles to the * dear user. */ @@ -896,14 +919,13 @@ void addReplySetLen(client *c, long length) { } void addReplyAttributeLen(client *c, long length) { - int prefix = c->resp == 2 ? '*' : '|'; - if (c->resp == 2) length *= 2; - addReplyAggregateLen(c,length,prefix); + serverAssert(c->resp >= 3); + addReplyAggregateLen(c,length,'|'); } void addReplyPushLen(client *c, long length) { - int prefix = c->resp == 2 ? '*' : '>'; - addReplyAggregateLen(c,length,prefix); + serverAssert(c->resp >= 3); + addReplyAggregateLen(c,length,'>'); } void addReplyNull(client *c) { @@ -1093,7 +1115,7 @@ void AddReplyFromClient(client *dst, client *src) { src->bufpos = 0; /* Check output buffer limits */ - asyncCloseClientOnOutputBufferLimitReached(dst); + closeClientOnOutputBufferLimitReached(dst, 1); } /* Copy 'src' client output buffers into 'dst' client output buffers. @@ -2035,9 +2057,6 @@ void resetClient(client *c) { c->flags |= CLIENT_REPLY_SKIP; c->flags &= ~CLIENT_REPLY_SKIP_NEXT; } - - /* Always clear the prevent logging field. */ - c->flags &= ~CLIENT_PREVENT_LOGGING; } /* This function is used when we want to re-enter the event loop but there @@ -2229,6 +2248,10 @@ int processMultibulkBuffer(client *c) { addReplyError(c,"Protocol error: invalid multibulk length"); setProtocolError("invalid mbulk count",c); return C_ERR; + } else if (ll > 10 && authRequired(c)) { + addReplyError(c, "Protocol error: unauthenticated multibulk length"); + setProtocolError("unauth mbulk count", c); + return C_ERR; } c->qb_pos = (newline-c->querybuf)+2; @@ -2276,6 +2299,10 @@ int processMultibulkBuffer(client *c) { addReplyError(c,"Protocol error: invalid bulk length"); setProtocolError("invalid bulk length",c); return C_ERR; + } else if (ll > 16384 && authRequired(c)) { + addReplyError(c, "Protocol error: unauthenticated bulk length"); + setProtocolError("unauth bulk length", c); + return C_ERR; } c->qb_pos = newline-c->querybuf+2; @@ -2342,20 +2369,28 @@ int processMultibulkBuffer(client *c) { * 1. The client is reset unless there are reasons to avoid doing it. * 2. In the case of master clients, the replication offset is updated. * 3. Propagate commands we got from our master to replicas down the line. */ +<<<<<<< HEAD:src/networking.cpp void commandProcessed(client *c, int flags) { +======= +void commandProcessed(client *c) { + /* If client is blocked(including paused), just return avoid reset and replicate. + * + * 1. Don't reset the client structure for blocked clients, so that the reply + * callback will still be able to access the client argv and argc fields. + * The client will be reset in unblockClient(). + * 2. Don't update replication offset or propagate commands to replicas, + * since we have not applied the command. */ + if (c->flags & CLIENT_BLOCKED) return; + + resetClient(c); + +>>>>>>> 6.2.6:src/networking.c long long prev_offset = c->reploff; if (c->flags & CLIENT_MASTER && !(c->flags & CLIENT_MULTI)) { /* Update the applied replication offset of our master. */ c->reploff = c->read_reploff - sdslen(c->querybuf) + c->qb_pos; } - /* Don't reset the client structure for blocked clients, so that the reply - * callback will still be able to access the client argv and argc fields. - * The client will be reset in unblockClient(). */ - if (!(c->flags & CLIENT_BLOCKED)) { - resetClient(c); - } - /* If the client is a master we need to compute the difference * between the applied offset before and after processing the buffer, * to understand how much of the replication stream was actually @@ -3087,8 +3122,12 @@ NULL if (getLongLongFromObjectOrReply(c,c->argv[2],&id,NULL) != C_OK) return; struct client *target = lookupClientByID(id); +<<<<<<< HEAD:src/networking.cpp if (target && target->flags & CLIENT_BLOCKED) { std::unique_lock ul(target->lock); +======= + if (target && target->flags & CLIENT_BLOCKED && moduleBlockedClientMayTimeout(target)) { +>>>>>>> 6.2.6:src/networking.c if (unblock_error) addReplyError(target, "-UNBLOCKED client unblocked via CLIENT UNBLOCK"); @@ -3413,7 +3452,8 @@ void helloCommand(client *c) { int moreargs = (c->argc-1) - j; const char *opt = (const char*)ptrFromObj(c->argv[j]); if (!strcasecmp(opt,"AUTH") && moreargs >= 2) { - preventCommandLogging(c); + redactClientCommandArgument(c, j+1); + redactClientCommandArgument(c, j+2); if (ACLAuthenticateUser(c, c->argv[j+1], c->argv[j+2]) == C_ERR) { addReplyError(c,"-WRONGPASS invalid username-password pair or user is disabled."); return; @@ -3502,6 +3542,15 @@ static void retainOriginalCommandVector(client *c) { } } +/* Redact a given argument to prevent it from being shown + * in the slowlog. This information is stored in the + * original_argv array. */ +void redactClientCommandArgument(client *c, int argc) { + retainOriginalCommandVector(c); + decrRefCount(c->argv[argc]); + c->original_argv[argc] = shared.redacted; +} + /* Rewrite the command vector of the client. All the new objects ref count * is incremented. The old command vector is freed, and the old objects * ref count is decremented. */ @@ -3671,18 +3720,33 @@ int checkClientOutputBufferLimits(client *c) { * * Note: we need to close the client asynchronously because this function is * called from contexts where the client can't be freed safely, i.e. from the - * lower level functions pushing data inside the client output buffers. */ -void asyncCloseClientOnOutputBufferLimitReached(client *c) { - if (!c->conn) return; /* It is unsafe to free fake clients. */ + * lower level functions pushing data inside the client output buffers. + * When `async` is set to 0, we close the client immediately, this is + * useful when called from cron. + * + * Returns 1 if client was (flagged) closed. */ +int closeClientOnOutputBufferLimitReached(client *c, int async) { + if (!c->conn) return 0; /* It is unsafe to free fake clients. */ serverAssert(c->reply_bytes < SIZE_MAX-(1024*64)); - if (c->reply_bytes == 0 || c->flags & CLIENT_CLOSE_ASAP) return; + if (c->reply_bytes == 0 || c->flags & CLIENT_CLOSE_ASAP) return 0; if (checkClientOutputBufferLimits(c)) { sds client = catClientInfoString(sdsempty(),c); - freeClientAsync(c); - serverLog(LL_WARNING,"Client %s scheduled to be closed ASAP for overcoming of output buffer limits.", client); + if (async) { + freeClientAsync(c); + serverLog(LL_WARNING, + "Client %s scheduled to be closed ASAP for overcoming of output buffer limits.", + client); + } else { + freeClient(c); + serverLog(LL_WARNING, + "Client %s closed for overcoming of output buffer limits.", + client); + } sdsfree(client); + return 1; } + return 0; } /* Helper function used by performEvictions() in order to flush slaves @@ -3768,7 +3832,12 @@ void unpauseClients(void) { listIter li; client *c; +<<<<<<< HEAD:src/networking.cpp g_pserver->client_pause_type = CLIENT_PAUSE_OFF; +======= + server.client_pause_type = CLIENT_PAUSE_OFF; + server.client_pause_end_time = 0; +>>>>>>> 6.2.6:src/networking.c /* Unblock all of the clients so they are reprocessed. */ listRewind(g_pserver->paused_clients,&li); @@ -3832,6 +3901,7 @@ void processEventsWhileBlocked(int iel) { vecclients.push_back(c); } } +<<<<<<< HEAD:src/networking.cpp /* Since we're about to release our lock we need to flush the repl backlog queue */ bool fReplBacklog = g_pserver->repl_batch_offStart >= 0; @@ -3845,6 +3915,25 @@ void processEventsWhileBlocked(int iel) { aeReleaseLock(); serverAssert(!GlobalLocksAcquired()); try +======= + listEmpty(server.clients_pending_write); + + /* Update processed count on server */ + server.stat_io_writes_processed += processed; + + return processed; +} + +/* Return 1 if we want to handle the client read later using threaded I/O. + * This is called by the readable handler of the event loop. + * As a side effect of calling this function the client is put in the + * pending read clients and flagged as such. */ +int postponeClientRead(client *c) { + if (server.io_threads_active && + server.io_threads_do_reads && + !ProcessingEventsWhileBlocked && + !(c->flags & (CLIENT_MASTER|CLIENT_SLAVE|CLIENT_PENDING_READ|CLIENT_BLOCKED))) +>>>>>>> 6.2.6:src/networking.c { ProcessingEventsWhileBlocked = 1; while (iterations--) { @@ -3876,7 +3965,40 @@ void processEventsWhileBlocked(int iel) { locker.arm(nullptr); locker.release(); +<<<<<<< HEAD:src/networking.cpp g_pserver->events_processed_while_blocked += eventsCount; +======= + /* Also use the main thread to process a slice of clients. */ + listRewind(io_threads_list[0],&li); + while((ln = listNext(&li))) { + client *c = listNodeValue(ln); + readQueryFromClient(c->conn); + } + listEmpty(io_threads_list[0]); + + /* Wait for all the other threads to end their work. */ + while(1) { + unsigned long pending = 0; + for (int j = 1; j < server.io_threads_num; j++) + pending += getIOPendingCount(j); + if (pending == 0) break; + } + + /* Run the list of clients again to process the new buffers. */ + while(listLength(server.clients_pending_read)) { + ln = listFirst(server.clients_pending_read); + client *c = listNodeValue(ln); + c->flags &= ~CLIENT_PENDING_READ; + listDelNode(server.clients_pending_read,ln); + + serverAssert(!(c->flags & CLIENT_BLOCKED)); + if (processPendingCommandsAndResetClient(c) == C_ERR) { + /* If the client is no longer valid, we avoid + * processing the client later. So we just go + * to the next. */ + continue; + } +>>>>>>> 6.2.6:src/networking.c whileBlockedCron(); diff --git a/src/object.cpp b/src/object.cpp index c8502462c..2cbf5e746 100644 --- a/src/object.cpp +++ b/src/object.cpp @@ -146,6 +146,21 @@ robj *createStringObject(const char *ptr, size_t len) { return createRawStringObject(ptr,len); } +/* Same as CreateRawStringObject, can return NULL if allocation fails */ +robj *tryCreateRawStringObject(const char *ptr, size_t len) { + sds str = sdstrynewlen(ptr,len); + if (!str) return NULL; + return createObject(OBJ_STRING, str); +} + +/* Same as createStringObject, can return NULL if allocation fails */ +robj *tryCreateStringObject(const char *ptr, size_t len) { + if (len <= OBJ_ENCODING_EMBSTR_SIZE_LIMIT) + return createEmbeddedStringObject(ptr,len); + else + return tryCreateRawStringObject(ptr,len); +} + /* Create a string object from a long long value. When possible returns a * shared integer object, or at least an integer encoded one. * @@ -960,8 +975,13 @@ size_t objectComputeSize(robj *o, size_t sample_size) { serverPanic("Unknown hash encoding"); } } else if (o->type == OBJ_STREAM) { +<<<<<<< HEAD:src/object.cpp stream *s = (stream*)ptrFromObj(o); asize = sizeof(*o); +======= + stream *s = o->ptr; + asize = sizeof(*o)+sizeof(*s); +>>>>>>> 6.2.6:src/object.c asize += streamRadixTreeMemoryUsage(s->rax); /* Now we have to add the listpacks. The last listpack is often non @@ -1310,10 +1330,16 @@ robj_roptr objectCommandLookup(client *c, robj *key) { return lookupKeyReadWithFlags(c->db,key,LOOKUP_NOTOUCH|LOOKUP_NONOTIFY); } +<<<<<<< HEAD:src/object.cpp robj_roptr objectCommandLookupOrReply(client *c, robj *key, robj *reply) { robj_roptr o = objectCommandLookup(c,key); if (!o) addReply(c, reply); +======= +robj *objectCommandLookupOrReply(client *c, robj *key, robj *reply) { + robj *o = objectCommandLookup(c,key); + if (!o) SentReplyOnKeyMiss(c, reply); +>>>>>>> 6.2.6:src/object.c return o; } diff --git a/src/quicklist.c b/src/quicklist.c index d3890f893..95e468fdb 100644 --- a/src/quicklist.c +++ b/src/quicklist.c @@ -45,11 +45,16 @@ #define REDIS_STATIC static #endif -/* Optimization levels for size-based filling */ +/* Optimization levels for size-based filling. + * Note that the largest possible limit is 16k, so even if each record takes + * just one byte, it still won't overflow the 16 bit count field. */ static const size_t optimization_level[] = {4096, 8192, 16384, 32768, 65536}; /* Maximum size in bytes of any multi-element ziplist. - * Larger values will live in their own isolated ziplists. */ + * Larger values will live in their own isolated ziplists. + * This is used only if we're limited by record count. when we're limited by + * size, the maximum limit is bigger, but still safe. + * 8k is a recommended / default size limit */ #define SIZE_SAFETY_LIMIT 8192 /* Minimum ziplist size in bytes for attempting compression. */ @@ -444,6 +449,8 @@ REDIS_STATIC int _quicklistNodeAllowInsert(const quicklistNode *node, unsigned int new_sz = node->sz + sz + ziplist_overhead; if (likely(_quicklistNodeSizeMeetsOptimizationRequirement(new_sz, fill))) return 1; + /* when we return 1 above we know that the limit is a size limit (which is + * safe, see comments next to optimization_level and SIZE_SAFETY_LIMIT) */ else if (!sizeMeetsSafetyLimit(new_sz)) return 0; else if ((int)node->count < fill) @@ -463,6 +470,8 @@ REDIS_STATIC int _quicklistNodeAllowMerge(const quicklistNode *a, unsigned int merge_sz = a->sz + b->sz - 11; if (likely(_quicklistNodeSizeMeetsOptimizationRequirement(merge_sz, fill))) return 1; + /* when we return 1 above we know that the limit is a size limit (which is + * safe, see comments next to optimization_level and SIZE_SAFETY_LIMIT) */ else if (!sizeMeetsSafetyLimit(merge_sz)) return 0; else if ((int)(a->count + b->count) <= fill) @@ -482,6 +491,7 @@ REDIS_STATIC int _quicklistNodeAllowMerge(const quicklistNode *a, * Returns 1 if new head created. */ int quicklistPushHead(quicklist *quicklist, void *value, size_t sz) { quicklistNode *orig_head = quicklist->head; + assert(sz < UINT32_MAX); /* TODO: add support for quicklist nodes that are sds encoded (not zipped) */ if (likely( _quicklistNodeAllowInsert(quicklist->head, quicklist->fill, sz))) { quicklist->head->zl = @@ -505,6 +515,7 @@ int quicklistPushHead(quicklist *quicklist, void *value, size_t sz) { * Returns 1 if new tail created. */ int quicklistPushTail(quicklist *quicklist, void *value, size_t sz) { quicklistNode *orig_tail = quicklist->tail; + assert(sz < UINT32_MAX); /* TODO: add support for quicklist nodes that are sds encoded (not zipped) */ if (likely( _quicklistNodeAllowInsert(quicklist->tail, quicklist->fill, sz))) { quicklist->tail->zl = @@ -847,6 +858,7 @@ REDIS_STATIC void _quicklistInsert(quicklist *quicklist, quicklistEntry *entry, int fill = quicklist->fill; quicklistNode *node = entry->node; quicklistNode *new_node = NULL; + assert(sz < UINT32_MAX); /* TODO: add support for quicklist nodes that are sds encoded (not zipped) */ if (!node) { /* we have no reference node, so let's create only node in the list */ diff --git a/src/rdb.cpp b/src/rdb.cpp index 577917831..0167ca06f 100644 --- a/src/rdb.cpp +++ b/src/rdb.cpp @@ -547,9 +547,19 @@ void *rdbGenericLoadStringObject(rio *rdb, int flags, size_t *lenptr) { } return buf; } else { +<<<<<<< HEAD:src/rdb.cpp robj *o = encode ? createStringObject(SDS_NOINIT,len) : createRawStringObject(SDS_NOINIT,len); if (len && rioRead(rdb,ptrFromObj(o),len) == 0) { +======= + robj *o = encode ? tryCreateStringObject(SDS_NOINIT,len) : + tryCreateRawStringObject(SDS_NOINIT,len); + if (!o) { + serverLog(server.loading? LL_WARNING: LL_VERBOSE, "rdbGenericLoadStringObject failed allocating %llu bytes", len); + return NULL; + } + if (len && rioRead(rdb,o->ptr,len) == 0) { +>>>>>>> 6.2.6:src/rdb.c decrRefCount(o); return NULL; } @@ -1601,14 +1611,29 @@ robj *rdbLoadCheckModuleValue(rio *rdb, char *modulename) { } /* Load a Redis object of the specified type from the specified file. +<<<<<<< HEAD:src/rdb.cpp * On success a newly allocated object is returned, otherwise NULL. */ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { +======= + * On success a newly allocated object is returned, otherwise NULL. + * When the function returns NULL and if 'error' is not NULL, the + * integer pointed by 'error' is set to the type of error that occurred */ +robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int *error) { +>>>>>>> 6.2.6:src/rdb.c robj *o = NULL, *ele, *dec; uint64_t len; unsigned int i; +<<<<<<< HEAD:src/rdb.cpp int deep_integrity_validation = cserver.sanitize_dump_payload == SANITIZE_DUMP_YES; if (cserver.sanitize_dump_payload == SANITIZE_DUMP_CLIENTS) { +======= + /* Set default error of load object, it will be set to 0 on success. */ + if (error) *error = RDB_LOAD_ERR_OTHER; + + int deep_integrity_validation = server.sanitize_dump_payload == SANITIZE_DUMP_YES; + if (server.sanitize_dump_payload == SANITIZE_DUMP_CLIENTS) { +>>>>>>> 6.2.6:src/rdb.c /* Skip sanitization when loading (an RDB), or getting a RESTORE command * from either the master or a client using an ACL user with the skip-sanitize-payload flag. */ int skip = g_pserver->loading || @@ -1625,6 +1650,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { } else if (rdbtype == RDB_TYPE_LIST) { /* Read list value */ if ((len = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL; + if (len == 0) goto emptykey; o = createQuicklistObject(); quicklistSetOptions((quicklist*)ptrFromObj(o), g_pserver->list_max_ziplist_size, @@ -1645,9 +1671,16 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { } else if (rdbtype == RDB_TYPE_SET) { /* Read Set value */ if ((len = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL; + if (len == 0) goto emptykey; /* Use a regular set when there are too many entries. */ +<<<<<<< HEAD:src/rdb.cpp if (len > g_pserver->set_max_intset_entries) { +======= + size_t max_entries = server.set_max_intset_entries; + if (max_entries >= 1<<30) max_entries = 1<<30; + if (len > max_entries) { +>>>>>>> 6.2.6:src/rdb.c o = createSetObject(); /* It's faster to expand the dict to the right size asap in order * to avoid rehashing */ @@ -1708,10 +1741,12 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { } else if (rdbtype == RDB_TYPE_ZSET_2 || rdbtype == RDB_TYPE_ZSET) { /* Read list/set value. */ uint64_t zsetlen; - size_t maxelelen = 0; + size_t maxelelen = 0, totelelen = 0; zset *zs; if ((zsetlen = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL; + if (zsetlen == 0) goto emptykey; + o = createZsetObject(); zs = (zset*)ptrFromObj(o); @@ -1748,6 +1783,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { /* Don't care about integer-encoded strings. */ if (sdslen(sdsele) > maxelelen) maxelelen = sdslen(sdsele); + totelelen += sdslen(sdsele); znode = zslInsert(zs->zsl,score,sdsele); if (dictAdd(zs->dict,sdsele,&znode->score) != DICT_OK) { @@ -1759,9 +1795,18 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { } /* Convert *after* loading, since sorted sets are not stored ordered. */ +<<<<<<< HEAD:src/rdb.cpp if (zsetLength(o) <= g_pserver->zset_max_ziplist_entries && maxelelen <= g_pserver->zset_max_ziplist_value) zsetConvert(o,OBJ_ENCODING_ZIPLIST); +======= + if (zsetLength(o) <= server.zset_max_ziplist_entries && + maxelelen <= server.zset_max_ziplist_value && + ziplistSafeToAdd(NULL, totelelen)) + { + zsetConvert(o,OBJ_ENCODING_ZIPLIST); + } +>>>>>>> 6.2.6:src/rdb.c } else if (rdbtype == RDB_TYPE_HASH) { uint64_t len; int ret; @@ -1770,6 +1815,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { len = rdbLoadLen(rdb, NULL); if (len == RDB_LENERR) return NULL; + if (len == 0) goto emptykey; o = createHashObject(); @@ -1814,6 +1860,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { } } +<<<<<<< HEAD:src/rdb.cpp /* Add pair to ziplist */ o->m_ptr = ziplistPush((unsigned char*)ptrFromObj(o), (unsigned char*)field, sdslen(field), ZIPLIST_TAIL); @@ -1823,12 +1870,32 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { /* Convert to hash table if size threshold is exceeded */ if (sdslen(field) > g_pserver->hash_max_ziplist_value || sdslen(value) > g_pserver->hash_max_ziplist_value) +======= + /* Convert to hash table if size threshold is exceeded */ + if (sdslen(field) > server.hash_max_ziplist_value || + sdslen(value) > server.hash_max_ziplist_value || + !ziplistSafeToAdd(o->ptr, sdslen(field)+sdslen(value))) +>>>>>>> 6.2.6:src/rdb.c { - sdsfree(field); - sdsfree(value); hashTypeConvert(o, OBJ_ENCODING_HT); + ret = dictAdd((dict*)o->ptr, field, value); + if (ret == DICT_ERR) { + rdbReportCorruptRDB("Duplicate hash fields detected"); + if (dupSearchDict) dictRelease(dupSearchDict); + sdsfree(value); + sdsfree(field); + decrRefCount(o); + return NULL; + } break; } + + /* Add pair to ziplist */ + o->ptr = ziplistPush(o->ptr, (unsigned char*)field, + sdslen(field), ZIPLIST_TAIL); + o->ptr = ziplistPush(o->ptr, (unsigned char*)value, + sdslen(value), ZIPLIST_TAIL); + sdsfree(field); sdsfree(value); } @@ -1877,6 +1944,8 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { serverAssert(len == 0); } else if (rdbtype == RDB_TYPE_LIST_QUICKLIST) { if ((len = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL; + if (len == 0) goto emptykey; + o = createQuicklistObject(); quicklistSetOptions((quicklist*)ptrFromObj(o), g_pserver->list_max_ziplist_size, g_pserver->list_compress_depth); @@ -1896,7 +1965,23 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { zfree(zl); return NULL; } +<<<<<<< HEAD:src/rdb.cpp quicklistAppendZiplist((quicklist*)ptrFromObj(o), zl); +======= + + /* Silently skip empty ziplists, if we'll end up with empty quicklist we'll fail later. */ + if (ziplistLen(zl) == 0) { + zfree(zl); + continue; + } else { + quicklistAppendZiplist(o->ptr, zl); + } + } + + if (quicklistCount(o->ptr) == 0) { + decrRefCount(o); + goto emptykey; +>>>>>>> 6.2.6:src/rdb.c } } else if (rdbtype == RDB_TYPE_HASH_ZIPMAP || rdbtype == RDB_TYPE_LIST_ZIPLIST || @@ -1941,12 +2026,11 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { while ((zi = zipmapNext(zi, &fstr, &flen, &vstr, &vlen)) != NULL) { if (flen > maxlen) maxlen = flen; if (vlen > maxlen) maxlen = vlen; - zl = ziplistPush(zl, fstr, flen, ZIPLIST_TAIL); - zl = ziplistPush(zl, vstr, vlen, ZIPLIST_TAIL); /* search for duplicate records */ sds field = sdstrynewlen(fstr, flen); - if (!field || dictAdd(dupSearchDict, field, NULL) != DICT_OK) { + if (!field || dictAdd(dupSearchDict, field, NULL) != DICT_OK || + !ziplistSafeToAdd(zl, (size_t)flen + vlen)) { rdbReportCorruptRDB("Hash zipmap with dup elements, or big length (%u)", flen); dictRelease(dupSearchDict); sdsfree(field); @@ -1955,6 +2039,9 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { decrRefCount(o); return NULL; } + + zl = ziplistPush(zl, fstr, flen, ZIPLIST_TAIL); + zl = ziplistPush(zl, vstr, vlen, ZIPLIST_TAIL); } dictRelease(dupSearchDict); @@ -1979,6 +2066,14 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { decrRefCount(o); return NULL; } + + if (ziplistLen(encoded) == 0) { + zfree(encoded); + o->ptr = NULL; + decrRefCount(o); + goto emptykey; + } + o->type = OBJ_LIST; o->encoding = OBJ_ENCODING_ZIPLIST; listTypeConvert(o,OBJ_ENCODING_QUICKLIST); @@ -2008,7 +2103,18 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { } o->type = OBJ_ZSET; o->encoding = OBJ_ENCODING_ZIPLIST; +<<<<<<< HEAD:src/rdb.cpp if (zsetLength(o) > g_pserver->zset_max_ziplist_entries) +======= + if (zsetLength(o) == 0) { + zfree(encoded); + o->ptr = NULL; + decrRefCount(o); + goto emptykey; + } + + if (zsetLength(o) > server.zset_max_ziplist_entries) +>>>>>>> 6.2.6:src/rdb.c zsetConvert(o,OBJ_ENCODING_SKIPLIST); break; case RDB_TYPE_HASH_ZIPLIST: @@ -2022,7 +2128,18 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { } o->type = OBJ_HASH; o->encoding = OBJ_ENCODING_ZIPLIST; +<<<<<<< HEAD:src/rdb.cpp if (hashTypeLength(o) > g_pserver->hash_max_ziplist_entries) +======= + if (hashTypeLength(o) == 0) { + zfree(encoded); + o->ptr = NULL; + decrRefCount(o); + goto emptykey; + } + + if (hashTypeLength(o) > server.hash_max_ziplist_entries) +>>>>>>> 6.2.6:src/rdb.c hashTypeConvert(o, OBJ_ENCODING_HT); break; default: @@ -2090,7 +2207,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { } /* Insert the key in the radix tree. */ - int retval = raxInsert(s->rax, + int retval = raxTryInsert(s->rax, (unsigned char*)nodekey,sizeof(streamID),lp,NULL); sdsfree(nodekey); if (!retval) { @@ -2179,7 +2296,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { streamFreeNACK(nack); return NULL; } - if (!raxInsert(cgroup->pel,rawid,sizeof(rawid),nack,NULL)) { + if (!raxTryInsert(cgroup->pel,rawid,sizeof(rawid),nack,NULL)) { rdbReportCorruptRDB("Duplicated global PEL entry " "loading stream consumer group"); decrRefCount(o); @@ -2243,15 +2360,33 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { * loading the global PEL. Then set the same shared * NACK structure also in the consumer-specific PEL. */ nack->consumer = consumer; - if (!raxInsert(consumer->pel,rawid,sizeof(rawid),nack,NULL)) { + if (!raxTryInsert(consumer->pel,rawid,sizeof(rawid),nack,NULL)) { rdbReportCorruptRDB("Duplicated consumer PEL entry " " loading a stream consumer " "group"); decrRefCount(o); + streamFreeNACK(nack); return NULL; } } } + + /* Verify that each PEL eventually got a consumer assigned to it. */ + if (deep_integrity_validation) { + raxIterator ri_cg_pel; + raxStart(&ri_cg_pel,cgroup->pel); + raxSeek(&ri_cg_pel,"^",NULL,0); + while(raxNext(&ri_cg_pel)) { + streamNACK *nack = ri_cg_pel.data; + if (!nack->consumer) { + raxStop(&ri_cg_pel); + rdbReportCorruptRDB("Stream CG PEL entry without consumer"); + decrRefCount(o); + return NULL; + } + } + raxStop(&ri_cg_pel); + } } } else if (rdbtype == RDB_TYPE_MODULE || rdbtype == RDB_TYPE_MODULE_2) { uint64_t moduleid = rdbLoadLen(rdb,NULL); @@ -2330,10 +2465,18 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, uint64_t mvcc_tstamp) { rdbReportReadError("Unknown RDB encoding type %d",rdbtype); return NULL; } +<<<<<<< HEAD:src/rdb.cpp setMvccTstamp(o, mvcc_tstamp); serverAssert(!o->FExpires()); +======= + if (error) *error = 0; +>>>>>>> 6.2.6:src/rdb.c return o; + +emptykey: + if (error) *error = RDB_LOAD_ERR_EMPTY_KEY; + return NULL; } /* Mark that we are loading in the global state and setup the fields @@ -2452,6 +2595,7 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { int type, rdbver; redisDb *db = g_pserver->db+0; char buf[1024]; +<<<<<<< HEAD:src/rdb.cpp /* Key-specific attributes, set by opcodes before the key type. */ long long lru_idle = -1, lfu_freq = -1, expiretime = -1, now; long long lru_clock = 0; @@ -2459,6 +2603,10 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { robj *subexpireKey = nullptr; sds key = nullptr; bool fLastKeyExpired = false; +======= + int error; + long long empty_keys_skipped = 0, expired_keys_skipped = 0, keys_loaded = 0; +>>>>>>> 6.2.6:src/rdb.c rdb->update_cksum = rdbLoadProgressCallback; rdb->max_processing_chunk = g_pserver->loading_process_events_interval_bytes; @@ -2636,8 +2784,10 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { int when_opcode = rdbLoadLen(rdb,NULL); int when = rdbLoadLen(rdb,NULL); if (rioGetReadError(rdb)) goto eoferr; - if (when_opcode != RDB_MODULE_OPCODE_UINT) + if (when_opcode != RDB_MODULE_OPCODE_UINT) { rdbReportReadError("bad when_opcode"); + goto eoferr; + } moduleType *mt = moduleTypeLookupModuleByID(moduleid); char name[10]; moduleTypeNameByID(name,moduleid); @@ -2661,7 +2811,7 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { if (mt->aux_load(&io,moduleid&1023, when) != REDISMODULE_OK || io.error) { moduleTypeNameByID(name,moduleid); serverLog(LL_WARNING,"The RDB file contains module AUX data for the module type '%s', that the responsible module is not able to load. Check for modules log above for additional clues.", name); - exit(1); + goto eoferr; } if (io.ctx) { moduleFreeContext(io.ctx); @@ -2670,7 +2820,7 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { uint64_t eof = rdbLoadLen(rdb,NULL); if (eof != RDB_MODULE_OPCODE_EOF) { serverLog(LL_WARNING,"The RDB file contains module AUX data for the module '%s' that is not terminated by the proper module value EOF marker", name); - exit(1); + goto eoferr; } continue; } else { @@ -2691,12 +2841,16 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { if ((key = (sds)rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL)) == NULL) goto eoferr; /* Read value */ +<<<<<<< HEAD:src/rdb.cpp if ((val = rdbLoadObject(type,rdb,key,mvcc_tstamp)) == NULL) { sdsfree(key); key = nullptr; goto eoferr; } bool fStaleMvccKey = (rsi) ? mvccFromObj(val) < rsi->mvccMinThreshold : false; +======= + val = rdbLoadObject(type,rdb,key,&error); +>>>>>>> 6.2.6:src/rdb.c /* Check if the key already expired. This function is used when loading * an RDB file from disk, either at startup, or when an RDB was @@ -2706,6 +2860,7 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { * Similarly if the RDB is the preamble of an AOF file, we want to * load all the keys as they are, since the log of operations later * assume to work in an exact keyspace state. */ +<<<<<<< HEAD:src/rdb.cpp redisObjectStack keyobj; initStaticStringObject(keyobj,key); bool fExpiredKey = iAmMaster() && !(rdbflags&RDBFLAGS_AOF_PREAMBLE) && expiretime != -1 && expiretime < now; @@ -2717,15 +2872,39 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { rsi->mi->staleKeyMap->operator[](db - g_pserver->db).push_back(objKeyDup); } fLastKeyExpired = true; +======= + if (val == NULL) { + /* Since we used to have bug that could lead to empty keys + * (See #8453), we rather not fail when empty key is encountered + * in an RDB file, instead we will silently discard it and + * continue loading. */ + if (error == RDB_LOAD_ERR_EMPTY_KEY) { + if(empty_keys_skipped++ < 10) + serverLog(LL_WARNING, "rdbLoadObject skipping empty key: %s", key); + sdsfree(key); + } else { + sdsfree(key); + goto eoferr; + } + } else if (iAmMaster() && + !(rdbflags&RDBFLAGS_AOF_PREAMBLE) && + expiretime != -1 && expiretime < now) + { +>>>>>>> 6.2.6:src/rdb.c sdsfree(key); key = nullptr; decrRefCount(val); +<<<<<<< HEAD:src/rdb.cpp val = nullptr; +======= + expired_keys_skipped++; +>>>>>>> 6.2.6:src/rdb.c } else { redisObjectStack keyobj; initStaticStringObject(keyobj,key); /* Add the new object in the hash table */ +<<<<<<< HEAD:src/rdb.cpp int fInserted = dbMerge(db, key, val, (rsi && rsi->fForceSetKey) || (rdbflags & RDBFLAGS_ALLOW_DUP)); // Note: dbMerge will incrRef fLastKeyExpired = false; @@ -2735,6 +2914,21 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { if (expiretime != -1) { setExpire(NULL,db,&keyobj,nullptr,expiretime); +======= + int added = dbAddRDBLoad(db,key,val); + keys_loaded++; + if (!added) { + if (rdbflags & RDBFLAGS_ALLOW_DUP) { + /* This flag is useful for DEBUG RELOAD special modes. + * When it's set we allow new keys to replace the current + * keys with the same name. */ + dbSyncDelete(db,&keyobj); + dbAddRDBLoad(db,key,val); + } else { + serverLog(LL_WARNING, + "RDB has duplicated key '%s' in DB %d",key,db->id); + serverPanic("Duplicated key found in RDB file"); +>>>>>>> 6.2.6:src/rdb.c } /* Set usage information (for eviction). */ @@ -2795,6 +2989,16 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { } } } + + if (empty_keys_skipped) { + serverLog(LL_WARNING, + "Done loading RDB, keys loaded: %lld, keys expired: %lld, empty keys skipped: %lld.", + keys_loaded, expired_keys_skipped, empty_keys_skipped); + } else { + serverLog(LL_WARNING, + "Done loading RDB, keys loaded: %lld, keys expired: %lld.", + keys_loaded, expired_keys_skipped); + } return C_OK; /* Unexpected end of file is handled here calling rdbReportReadError(): @@ -2895,6 +3099,7 @@ static void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) { serverLog(LL_WARNING, "Background transfer terminated by signal %d", bysignal); } +<<<<<<< HEAD:src/rdb.cpp if (g_pserver->rdb_child_exit_pipe!=-1) close(g_pserver->rdb_child_exit_pipe); auto pipeT = g_pserver->rdb_pipe_read; @@ -2911,6 +3116,21 @@ static void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) { zfree(g_pserver->rdb_pipe_buff); g_pserver->rdb_pipe_buff = NULL; g_pserver->rdb_pipe_bufflen = 0; +======= + if (server.rdb_child_exit_pipe!=-1) + close(server.rdb_child_exit_pipe); + aeDeleteFileEvent(server.el, server.rdb_pipe_read, AE_READABLE); + close(server.rdb_pipe_read); + server.rdb_child_exit_pipe = -1; + server.rdb_pipe_read = -1; + zfree(server.rdb_pipe_conns); + server.rdb_pipe_conns = NULL; + server.rdb_pipe_numconns = 0; + server.rdb_pipe_numconns_writing = 0; + zfree(server.rdb_pipe_buff); + server.rdb_pipe_buff = NULL; + server.rdb_pipe_bufflen = 0; +>>>>>>> 6.2.6:src/rdb.c } /* When a background RDB saving/transfer terminates, call the right handler. */ diff --git a/src/rdb.h b/src/rdb.h index d7d7a1a3f..e64fcbf13 100644 --- a/src/rdb.h +++ b/src/rdb.h @@ -130,6 +130,11 @@ #define RDBFLAGS_REPLICATION (1<<1) /* Load/save for SYNC. */ #define RDBFLAGS_ALLOW_DUP (1<<2) /* Allow duplicated keys when loading.*/ +/* When rdbLoadObject() returns NULL, the err flag is + * set to hold the type of error that occurred */ +#define RDB_LOAD_ERR_EMPTY_KEY 1 /* Error of empty key */ +#define RDB_LOAD_ERR_OTHER 2 /* Any other errors */ + int rdbSaveType(rio *rdb, unsigned char type); int rdbLoadType(rio *rdb); int rdbSaveTime(rio *rdb, time_t t); @@ -153,7 +158,11 @@ int rdbSaveS3(char *path, rdbSaveInfo *rsi); int rdbLoadS3(char *path, rdbSaveInfo *rsi, int rdbflags); ssize_t rdbSaveObject(rio *rdb, robj_roptr o, robj *key); size_t rdbSavedObjectLen(robj *o, robj *key); +<<<<<<< HEAD robj *rdbLoadObject(int type, rio *rdb, sds key, uint64_t mvcc_tstamp); +======= +robj *rdbLoadObject(int type, rio *rdb, sds key, int *error); +>>>>>>> 6.2.6 void backgroundSaveDoneHandler(int exitcode, int bysignal); int rdbSaveKeyValuePair(rio *rdb, robj *key, robj *val, long long expiretime); ssize_t rdbSaveSingleModuleAux(rio *rdb, int when, moduleType *mt); diff --git a/src/redis-benchmark.cpp b/src/redis-benchmark.cpp index 727280584..a33ef2dab 100644 --- a/src/redis-benchmark.cpp +++ b/src/redis-benchmark.cpp @@ -372,9 +372,10 @@ fail: if (hostsocket == NULL) fprintf(stderr, "%s:%d\n", ip, port); else fprintf(stderr, "%s\n", hostsocket); int abort_test = 0; - if (!strncmp(reply->str,"NOAUTH",5) || - !strncmp(reply->str,"WRONGPASS",9) || - !strncmp(reply->str,"NOPERM",5)) + if (reply && reply->type == REDIS_REPLY_ERROR && + (!strncmp(reply->str,"NOAUTH",5) || + !strncmp(reply->str,"WRONGPASS",9) || + !strncmp(reply->str,"NOPERM",5))) abort_test = 1; freeReplyObject(reply); redisFree(c); diff --git a/src/redis-check-rdb.cpp b/src/redis-check-rdb.cpp index 954bb34c8..4e2033400 100644 --- a/src/redis-check-rdb.cpp +++ b/src/redis-check-rdb.cpp @@ -308,7 +308,11 @@ int redis_check_rdb(const char *rdbfilename, FILE *fp) { rdbstate.keys++; /* Read value */ rdbstate.doing = RDB_CHECK_DOING_READ_OBJECT_VALUE; +<<<<<<< HEAD:src/redis-check-rdb.cpp if ((val = rdbLoadObject(type,&rdb,szFromObj(key),OBJ_MVCC_INVALID)) == NULL) goto eoferr; +======= + if ((val = rdbLoadObject(type,&rdb,key->ptr,NULL)) == NULL) goto eoferr; +>>>>>>> 6.2.6:src/redis-check-rdb.c /* Check if the key already expired. */ if (expiretime != -1 && expiretime < now) rdbstate.already_expired++; diff --git a/src/redis-cli.c b/src/redis-cli.c index f1767ee4d..d20916769 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -72,7 +72,102 @@ redisContext *context; struct config config; +<<<<<<< HEAD int g_fTestMode = 0; +======= +static uint64_t dictSdsHash(const void *key); +static int dictSdsKeyCompare(void *privdata, const void *key1, + const void *key2); +static void dictSdsDestructor(void *privdata, void *val); +static void dictListDestructor(void *privdata, void *val); + +/* Cluster Manager Command Info */ +typedef struct clusterManagerCommand { + char *name; + int argc; + char **argv; + int flags; + int replicas; + char *from; + char *to; + char **weight; + int weight_argc; + char *master_id; + int slots; + int timeout; + int pipeline; + float threshold; + char *backup_dir; + char *from_user; + char *from_pass; + int from_askpass; +} clusterManagerCommand; + +static void createClusterManagerCommand(char *cmdname, int argc, char **argv); + + +static redisContext *context; +static struct config { + char *hostip; + int hostport; + char *hostsocket; + int tls; + cliSSLconfig sslconfig; + long repeat; + long interval; + int dbnum; /* db num currently selected */ + int input_dbnum; /* db num user input */ + int interactive; + int shutdown; + int monitor_mode; + int pubsub_mode; + int latency_mode; + int latency_dist_mode; + int latency_history; + int lru_test_mode; + long long lru_test_sample_size; + int cluster_mode; + int cluster_reissue_command; + int cluster_send_asking; + int slave_mode; + int pipe_mode; + int pipe_timeout; + int getrdb_mode; + int stat_mode; + int scan_mode; + int intrinsic_latency_mode; + int intrinsic_latency_duration; + sds pattern; + char *rdb_filename; + int bigkeys; + int memkeys; + unsigned memkeys_samples; + int hotkeys; + int stdinarg; /* get last arg from stdin. (-x option) */ + char *auth; + int askpass; + char *user; + int quoted_input; /* Force input args to be treated as quoted strings */ + int output; /* output mode, see OUTPUT_* defines */ + int push_output; /* Should we display spontaneous PUSH replies */ + sds mb_delim; + sds cmd_delim; + char prompt[128]; + char *eval; + int eval_ldb; + int eval_ldb_sync; /* Ask for synchronous mode of the Lua debugger. */ + int eval_ldb_end; /* Lua debugging session ended. */ + int enable_ldb_on_eval; /* Handle manual SCRIPT DEBUG + EVAL commands. */ + int last_cmd_type; + int verbose; + int set_errcode; + clusterManagerCommand cluster_manager_command; + int no_auth_warning; + int resp3; + int in_multi; + int pre_multi_dbnum; +} config; +>>>>>>> 6.2.6 /* User preferences. */ static struct pref { @@ -277,7 +372,7 @@ static void parseRedisUri(const char *uri) { if (curr == end) return; /* Extract database number. */ - config.dbnum = atoi(curr); + config.input_dbnum = atoi(curr); } /* _serverAssert is needed by dict */ @@ -596,28 +691,42 @@ static int cliAuth(redisContext *ctx, char *user, char *auth) { reply = redisCommand(ctx,"AUTH %s",auth); else reply = redisCommand(ctx,"AUTH %s %s",user,auth); - if (reply != NULL) { - if (reply->type == REDIS_REPLY_ERROR) - fprintf(stderr,"Warning: AUTH failed\n"); - freeReplyObject(reply); - return REDIS_OK; + + if (reply == NULL) { + fprintf(stderr, "\nI/O error\n"); + return REDIS_ERR; } - return REDIS_ERR; + + int result = REDIS_OK; + if (reply->type == REDIS_REPLY_ERROR) { + result = REDIS_ERR; + fprintf(stderr, "AUTH failed: %s\n", reply->str); + } + freeReplyObject(reply); + return result; } -/* Send SELECT dbnum to the server */ +/* Send SELECT input_dbnum to the server */ static int cliSelect(void) { redisReply *reply; - if (config.dbnum == 0) return REDIS_OK; + if (config.input_dbnum == config.dbnum) return REDIS_OK; - reply = redisCommand(context,"SELECT %d",config.dbnum); - if (reply != NULL) { - int result = REDIS_OK; - if (reply->type == REDIS_REPLY_ERROR) result = REDIS_ERR; - freeReplyObject(reply); - return result; + reply = redisCommand(context,"SELECT %d",config.input_dbnum); + if (reply == NULL) { + fprintf(stderr, "\nI/O error\n"); + return REDIS_ERR; } - return REDIS_ERR; + + int result = REDIS_OK; + if (reply->type == REDIS_REPLY_ERROR) { + result = REDIS_ERR; + fprintf(stderr,"SELECT %d failed: %s\n",config.input_dbnum,reply->str); + } else { + config.dbnum = config.input_dbnum; + cliRefreshPrompt(); + } + freeReplyObject(reply); + return result; } /* Select RESP3 mode if redis-cli was started with the -3 option. */ @@ -626,13 +735,18 @@ static int cliSwitchProto(void) { if (config.resp3 == 0) return REDIS_OK; reply = redisCommand(context,"HELLO 3"); - if (reply != NULL) { - int result = REDIS_OK; - if (reply->type == REDIS_REPLY_ERROR) result = REDIS_ERR; - freeReplyObject(reply); - return result; + if (reply == NULL) { + fprintf(stderr, "\nI/O error\n"); + return REDIS_ERR; } - return REDIS_ERR; + + int result = REDIS_OK; + if (reply->type == REDIS_REPLY_ERROR) { + result = REDIS_ERR; + fprintf(stderr,"HELLO 3 failed: %s\n",reply->str); + } + freeReplyObject(reply); + return result; } /* Connect to the server. It is possible to pass certain flags to the function: @@ -669,12 +783,15 @@ static int cliConnect(int flags) { if (context->err) { if (!(flags & CC_QUIET)) { fprintf(stderr,"Could not connect to Redis at "); - if (config.hostsocket == NULL) - fprintf(stderr,"%s:%d: %s\n", + if (config.hostsocket == NULL || + (config.cluster_mode && config.cluster_reissue_command)) + { + fprintf(stderr, "%s:%d: %s\n", config.hostip,config.hostport,context->errstr); - else + } else { fprintf(stderr,"%s: %s\n", config.hostsocket,context->errstr); + } } redisFree(context); context = NULL; @@ -705,6 +822,29 @@ static int cliConnect(int flags) { return REDIS_OK; } +/* In cluster, if server replies ASK, we will redirect to a different node. + * Before sending the real command, we need to send ASKING command first. */ +static int cliSendAsking() { + redisReply *reply; + + config.cluster_send_asking = 0; + if (context == NULL) { + return REDIS_ERR; + } + reply = redisCommand(context,"ASKING"); + if (reply == NULL) { + fprintf(stderr, "\nI/O error\n"); + return REDIS_ERR; + } + int result = REDIS_OK; + if (reply->type == REDIS_REPLY_ERROR) { + result = REDIS_ERR; + fprintf(stderr,"ASKING failed: %s\n",reply->str); + } + freeReplyObject(reply); + return result; +} + static void cliPrintContextError(void) { if (context == NULL) return; fprintf(stderr,"Error: %s\n",context->errstr); @@ -933,6 +1073,7 @@ static sds cliFormatReplyRaw(redisReply *r) { case REDIS_REPLY_DOUBLE: out = sdscatprintf(out,"%s",r->str); break; + case REDIS_REPLY_SET: case REDIS_REPLY_ARRAY: case REDIS_REPLY_PUSH: for (i = 0; i < r->elements; i++) { @@ -991,6 +1132,7 @@ static sds cliFormatReplyCSV(redisReply *r) { out = sdscat(out,r->integer ? "true" : "false"); break; case REDIS_REPLY_ARRAY: + case REDIS_REPLY_SET: case REDIS_REPLY_PUSH: case REDIS_REPLY_MAP: /* CSV has no map type, just output flat list. */ for (i = 0; i < r->elements; i++) { @@ -1078,7 +1220,7 @@ static int cliReadReply(int output_raw_strings) { /* Check if we need to connect to a different node and reissue the * request. */ if (config.cluster_mode && reply->type == REDIS_REPLY_ERROR && - (!strncmp(reply->str,"MOVED",5) || !strcmp(reply->str,"ASK"))) + (!strncmp(reply->str,"MOVED ",6) || !strncmp(reply->str,"ASK ",4))) { char *p = reply->str, *s; int slot; @@ -1102,6 +1244,9 @@ static int cliReadReply(int output_raw_strings) { printf("-> Redirected to slot [%d] located at %s:%d\n", slot, config.hostip, config.hostport); config.cluster_reissue_command = 1; + if (!strncmp(reply->str,"ASK ",4)) { + config.cluster_send_asking = 1; + } cliRefreshPrompt(); } else if (!config.interactive && config.set_errcode && reply->type == REDIS_REPLY_ERROR) @@ -1114,6 +1259,7 @@ static int cliReadReply(int output_raw_strings) { if (output) { out = cliFormatReply(reply, config.output, output_raw_strings); fwrite(out,sdslen(out),1,stdout); + fflush(stdout); sdsfree(out); } freeReplyObject(reply); @@ -1235,7 +1381,7 @@ static int cliSendCommand(int argc, char **argv, long repeat) { if (!strcasecmp(command,"select") && argc == 2 && config.last_cmd_type != REDIS_REPLY_ERROR) { - config.dbnum = atoi(argv[1]); + config.input_dbnum = config.dbnum = atoi(argv[1]); cliRefreshPrompt(); } else if (!strcasecmp(command,"auth") && (argc == 2 || argc == 3)) { cliSelect(); @@ -1247,21 +1393,23 @@ static int cliSendCommand(int argc, char **argv, long repeat) { cliRefreshPrompt(); } else if (!strcasecmp(command,"exec") && argc == 1 && config.in_multi) { config.in_multi = 0; - if (config.last_cmd_type == REDIS_REPLY_ERROR) { - config.dbnum = config.pre_multi_dbnum; + if (config.last_cmd_type == REDIS_REPLY_ERROR || + config.last_cmd_type == REDIS_REPLY_NIL) + { + config.input_dbnum = config.dbnum = config.pre_multi_dbnum; } cliRefreshPrompt(); } else if (!strcasecmp(command,"discard") && argc == 1 && config.last_cmd_type != REDIS_REPLY_ERROR) { config.in_multi = 0; - config.dbnum = config.pre_multi_dbnum; + config.input_dbnum = config.dbnum = config.pre_multi_dbnum; cliRefreshPrompt(); } } if (config.cluster_reissue_command){ /* If we need to reissue the command, break to prevent a - further 'repeat' number of dud interations */ + further 'repeat' number of dud interactions */ break; } if (config.interval) usleep(config.interval); @@ -1342,7 +1490,7 @@ static int parseOptions(int argc, char **argv) { double seconds = atof(argv[++i]); config.interval = seconds*1000000; } else if (!strcmp(argv[i],"-n") && !lastarg) { - config.dbnum = atoi(argv[++i]); + config.input_dbnum = atoi(argv[++i]); } else if (!strcmp(argv[i], "--no-auth-warning")) { config.no_auth_warning = 1; } else if (!strcmp(argv[i], "--askpass")) { @@ -1582,6 +1730,11 @@ static int parseOptions(int argc, char **argv) { } } + if (config.hostsocket && config.cluster_mode) { + fprintf(stderr,"Options -c and -s are mutually exclusive.\n"); + exit(1); + } + /* --ldb requires --eval. */ if (config.eval_ldb && config.eval == NULL) { fprintf(stderr,"Options --ldb and --ldb-sync-mode require --eval.\n"); @@ -1700,7 +1853,12 @@ static void usage(void) { " --lru-test Simulate a cache workload with an 80-20 distribution.\n" " --replica Simulate a replica showing commands received from the master.\n" " --rdb Transfer an RDB dump from remote server to local file.\n" +<<<<<<< HEAD " --pipe Transfer raw KeyDB protocol from stdin to server.\n" +======= +" Use filename of \"-\" to write to stdout.\n" +" --pipe Transfer raw Redis protocol from stdin to server.\n" +>>>>>>> 6.2.6 " --pipe-timeout In --pipe mode, abort with error if after sending all data.\n" " no reply is received within seconds.\n" " Default timeout: %d. Use 0 to wait forever.\n", @@ -1798,23 +1956,32 @@ static sds *getSdsArrayFromArgv(int argc, char **argv, int quoted) { static int issueCommandRepeat(int argc, char **argv, long repeat) { while (1) { + if (config.cluster_reissue_command || context == NULL || + context->err == REDIS_ERR_IO || context->err == REDIS_ERR_EOF) + { + if (cliConnect(CC_FORCE) != REDIS_OK) { + cliPrintContextError(); + config.cluster_reissue_command = 0; + return REDIS_ERR; + } + } config.cluster_reissue_command = 0; - if (cliSendCommand(argc,argv,repeat) != REDIS_OK) { - cliConnect(CC_FORCE); - - /* If we still cannot send the command print error. - * We'll try to reconnect the next time. */ - if (cliSendCommand(argc,argv,repeat) != REDIS_OK) { + if (config.cluster_send_asking) { + if (cliSendAsking() != REDIS_OK) { cliPrintContextError(); return REDIS_ERR; } } + if (cliSendCommand(argc,argv,repeat) != REDIS_OK) { + cliPrintContextError(); + return REDIS_ERR; + } + /* Issue the command again if we got redirected in cluster mode */ if (config.cluster_mode && config.cluster_reissue_command) { - cliConnect(CC_FORCE); - } else { - break; + continue; } + break; } return REDIS_OK; } @@ -5609,9 +5776,9 @@ static int clusterManagerCommandImport(int argc, char **argv) { } if (config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_COPY) - strcat(cmdfmt, " %s"); + cmdfmt = sdscat(cmdfmt," COPY"); if (config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_REPLACE) - strcat(cmdfmt, " %s"); + cmdfmt = sdscat(cmdfmt," REPLACE"); /* Use SCAN to iterate over the keys, migrating to the * right node as needed. */ @@ -5643,8 +5810,7 @@ static int clusterManagerCommandImport(int argc, char **argv) { printf("Migrating %s to %s:%d: ", key, target->ip, target->port); redisReply *r = reconnectingRedisCommand(src_ctx, cmdfmt, target->ip, target->port, - key, 0, timeout, - "COPY", "REPLACE"); + key, 0, timeout); if (!r || r->type == REDIS_REPLY_ERROR) { if (r && r->str) { clusterManagerLogErr("Source %s:%d replied with " @@ -6060,7 +6226,7 @@ static void latencyDistMode(void) { #define RDB_EOF_MARK_SIZE 40 void sendReplconf(const char* arg1, const char* arg2) { - printf("sending REPLCONF %s %s\n", arg1, arg2); + fprintf(stderr, "sending REPLCONF %s %s\n", arg1, arg2); redisReply *reply = redisCommand(context, "REPLCONF %s %s", arg1, arg2); /* Handle any error conditions */ @@ -6120,7 +6286,7 @@ unsigned long long sendSync(redisContext *c, char *out_eof) { } *p = '\0'; if (buf[0] == '-') { - printf("SYNC with master failed: %s\n", buf); + fprintf(stderr, "SYNC with master failed: %s\n", buf); exit(1); } if (strncmp(buf+1,"EOF:",4) == 0 && strlen(buf+5) >= RDB_EOF_MARK_SIZE) { @@ -6225,8 +6391,9 @@ static void getRDB(clusterManagerNode *node) { payload, filename); } + int write_to_stdout = !strcmp(filename,"-"); /* Write to file. */ - if (!strcmp(filename,"-")) { + if (write_to_stdout) { fd = STDOUT_FILENO; } else { fd = open(filename, O_CREAT|O_WRONLY, 0644); @@ -6268,7 +6435,7 @@ static void getRDB(clusterManagerNode *node) { } if (usemark) { payload = ULLONG_MAX - payload - RDB_EOF_MARK_SIZE; - if (ftruncate(fd, payload) == -1) + if (!write_to_stdout && ftruncate(fd, payload) == -1) fprintf(stderr,"ftruncate failed: %s.\n", strerror(errno)); fprintf(stderr,"Transfer finished with success after %llu bytes\n", payload); } else { @@ -6277,7 +6444,7 @@ static void getRDB(clusterManagerNode *node) { redisFree(s); /* Close the connection ASAP as fsync() may take time. */ if (node) node->context = NULL; - if (fsync(fd) == -1) { + if (!write_to_stdout && fsync(fd) == -1) { fprintf(stderr,"Fail to fsync '%s': %s\n", filename, strerror(errno)); exit(1); } @@ -7075,6 +7242,7 @@ int main(int argc, char **argv) { config.repeat = 1; config.interval = 0; config.dbnum = 0; + config.input_dbnum = 0; config.interactive = 0; config.shutdown = 0; config.monitor_mode = 0; @@ -7085,6 +7253,7 @@ int main(int argc, char **argv) { config.lru_test_mode = 0; config.lru_test_sample_size = 0; config.cluster_mode = 0; + config.cluster_send_asking = 0; config.slave_mode = 0; config.getrdb_mode = 0; config.stat_mode = 0; diff --git a/src/redismodule.h b/src/redismodule.h index fd29d1d03..4313aee01 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -840,6 +840,7 @@ REDISMODULE_API int (*RedisModule_AuthenticateClientWithUser)(RedisModuleCtx *ct REDISMODULE_API int (*RedisModule_DeauthenticateAndCloseClient)(RedisModuleCtx *ctx, uint64_t client_id) REDISMODULE_ATTR; REDISMODULE_API RedisModuleString * (*RedisModule_GetClientCertificate)(RedisModuleCtx *ctx, uint64_t id) REDISMODULE_ATTR; REDISMODULE_API int *(*RedisModule_GetCommandKeys)(RedisModuleCtx *ctx, RedisModuleString **argv, int argc, int *num_keys) REDISMODULE_ATTR; +REDISMODULE_API const char *(*RedisModule_GetCurrentCommandName)(RedisModuleCtx *ctx) REDISMODULE_ATTR; REDISMODULE_API int (*RedisModule_RegisterDefragFunc)(RedisModuleCtx *ctx, RedisModuleDefragFunc func) REDISMODULE_ATTR; REDISMODULE_API void *(*RedisModule_DefragAlloc)(RedisModuleDefragCtx *ctx, void *ptr) REDISMODULE_ATTR; REDISMODULE_API RedisModuleString *(*RedisModule_DefragRedisModuleString)(RedisModuleDefragCtx *ctx, RedisModuleString *str) REDISMODULE_ATTR; @@ -1112,6 +1113,7 @@ static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int REDISMODULE_GET_API(AuthenticateClientWithUser); REDISMODULE_GET_API(GetClientCertificate); REDISMODULE_GET_API(GetCommandKeys); + REDISMODULE_GET_API(GetCurrentCommandName); REDISMODULE_GET_API(RegisterDefragFunc); REDISMODULE_GET_API(DefragAlloc); REDISMODULE_GET_API(DefragRedisModuleString); diff --git a/src/replication.cpp b/src/replication.cpp index dd82aa637..1593a869d 100644 --- a/src/replication.cpp +++ b/src/replication.cpp @@ -609,6 +609,7 @@ void replicationFeedSlavesFromMasterStream(char *buf, size_t buflen) { } void replicationFeedMonitors(client *c, list *monitors, int dictid, robj **argv, int argc) { + if (!(listLength(server.monitors) && !server.loading)) return; listNode *ln; listIter li; int j; diff --git a/src/scripting.cpp b/src/scripting.cpp index 0b4ef9e1d..3e032e5d2 100644 --- a/src/scripting.cpp +++ b/src/scripting.cpp @@ -132,6 +132,16 @@ void sha1hex(char *digest, char *script, size_t len) { */ char *redisProtocolToLuaType(lua_State *lua, char* reply) { + + if (!lua_checkstack(lua, 5)) { + /* + * Increase the Lua stack if needed, to make sure there is enough room + * to push 5 elements to the stack. On failure, exit with panic. +         * Notice that we need, in the worst case, 5 elements because redisProtocolToLuaType_Aggregate +         * might push 5 elements to the Lua stack.*/ + serverPanic("lua stack limit reach when parsing redis.call reply"); + } + char *p = reply; switch(*p) { @@ -224,6 +234,11 @@ char *redisProtocolToLuaType_Aggregate(lua_State *lua, char *reply, int atype) { if (atype == '%') { p = redisProtocolToLuaType(lua,p); } else { + if (!lua_checkstack(lua, 1)) { + /* Notice that here we need to check the stack again because the recursive + * call to redisProtocolToLuaType might have use the room allocated in the stack */ + serverPanic("lua stack limit reach when parsing redis.call reply"); + } lua_pushboolean(lua,1); } lua_settable(lua,-3); @@ -343,6 +358,17 @@ void luaSortArray(lua_State *lua) { /* Reply to client 'c' converting the top element in the Lua stack to a * Redis reply. As a side effect the element is consumed from the stack. */ void luaReplyToRedisReply(client *c, lua_State *lua) { + + if (!lua_checkstack(lua, 4)) { + /* Increase the Lua stack if needed to make sure there is enough room + * to push 4 elements to the stack. On failure, return error. +         * Notice that we need, in the worst case, 4 elements because returning a map might + * require push 4 elements to the Lua stack.*/ + addReplyErrorFormat(c, "reached lua stack limit"); + lua_pop(lua,1); /* pop the element from the stack */ + return; + } + int t = lua_type(lua,-1); switch(t) { @@ -366,6 +392,7 @@ void luaReplyToRedisReply(client *c, lua_State *lua) { * field. */ /* Handle error reply. */ + /* we took care of the stack size on function start */ lua_pushstring(lua,"err"); lua_gettable(lua,-2); t = lua_type(lua,-1); @@ -408,6 +435,7 @@ void luaReplyToRedisReply(client *c, lua_State *lua) { if (t == LUA_TTABLE) { int maplen = 0; void *replylen = addReplyDeferredLen(c); + /* we took care of the stack size on function start */ lua_pushnil(lua); /* Use nil to start iteration. */ while (lua_next(lua,-2)) { /* Stack now: table, key, value */ @@ -430,6 +458,7 @@ void luaReplyToRedisReply(client *c, lua_State *lua) { if (t == LUA_TTABLE) { int setlen = 0; void *replylen = addReplyDeferredLen(c); + /* we took care of the stack size on function start */ lua_pushnil(lua); /* Use nil to start iteration. */ while (lua_next(lua,-2)) { /* Stack now: table, key, true */ @@ -450,6 +479,7 @@ void luaReplyToRedisReply(client *c, lua_State *lua) { void *replylen = addReplyDeferredLen(c); int j = 1, mbulklen = 0; while(1) { + /* we took care of the stack size on function start */ lua_pushnumber(lua,j++); lua_gettable(lua,-2); t = lua_type(lua,-1); @@ -1707,6 +1737,9 @@ void evalGenericCommand(client *c, int evalsha) { } void evalCommand(client *c) { + /* Explicitly feed monitor here so that lua commands appear after their + * script command. */ + replicationFeedMonitors(c,server.monitors,c->db->id,c->argv,c->argc); if (!(c->flags & CLIENT_LUA_DEBUG)) evalGenericCommand(c,0); else @@ -1714,7 +1747,14 @@ void evalCommand(client *c) { } void evalShaCommand(client *c) { +<<<<<<< HEAD:src/scripting.cpp if (sdslen((sds)ptrFromObj(c->argv[1])) != 40) { +======= + /* Explicitly feed monitor here so that lua commands appear after their + * script command. */ + replicationFeedMonitors(c,server.monitors,c->db->id,c->argv,c->argc); + if (sdslen(c->argv[1]->ptr) != 40) { +>>>>>>> 6.2.6:src/scripting.c /* We know that a match is not possible if the provided SHA is * not the right length. So we return an error ASAP, this way * evalGenericCommand() can be implemented without string length @@ -2092,7 +2132,8 @@ int ldbDelBreakpoint(int line) { /* Expect a valid multi-bulk command in the debugging client query buffer. * On success the command is parsed and returned as an array of SDS strings, * otherwise NULL is returned and there is to read more buffer. */ -sds *ldbReplParseCommand(int *argcp) { +sds *ldbReplParseCommand(int *argcp, char** err) { + static char* protocol_error = "protocol error"; sds *argv = NULL; int argc = 0; char *plen = NULL; @@ -2109,8 +2150,13 @@ sds *ldbReplParseCommand(int *argcp) { /* Seek and parse *\r\n. */ p = strchr(p,'*'); if (!p) goto protoerr; +<<<<<<< HEAD:src/scripting.cpp plen = p+1; /* Multi bulk len pointer. */ p = strstr(p,"\r\n"); if (!p) goto protoerr; +======= + char *plen = p+1; /* Multi bulk len pointer. */ + p = strstr(p,"\r\n"); if (!p) goto keep_reading; +>>>>>>> 6.2.6:src/scripting.c *p = '\0'; p += 2; *argcp = atoi(plen); if (*argcp <= 0 || *argcp > 1024) goto protoerr; @@ -2119,12 +2165,16 @@ sds *ldbReplParseCommand(int *argcp) { argv = (sds*)zmalloc(sizeof(sds)*(*argcp), MALLOC_LOCAL); argc = 0; while(argc < *argcp) { + /* reached the end but there should be more data to read */ + if (*p == '\0') goto keep_reading; + if (*p != '$') goto protoerr; plen = p+1; /* Bulk string len pointer. */ - p = strstr(p,"\r\n"); if (!p) goto protoerr; + p = strstr(p,"\r\n"); if (!p) goto keep_reading; *p = '\0'; p += 2; int slen = atoi(plen); /* Length of this arg. */ if (slen <= 0 || slen > 1024) goto protoerr; + if ((size_t)(p + slen + 2 - copy) > sdslen(copy) ) goto keep_reading; argv[argc++] = sdsnewlen(p,slen); p += slen; /* Skip the already parsed argument. */ if (p[0] != '\r' || p[1] != '\n') goto protoerr; @@ -2134,6 +2184,8 @@ sds *ldbReplParseCommand(int *argcp) { return argv; protoerr: + *err = protocol_error; +keep_reading: sdsfreesplitres(argv,argc); sdsfree(copy); return NULL; @@ -2566,6 +2618,17 @@ void ldbEval(lua_State *lua, sds *argv, int argc) { void ldbRedis(lua_State *lua, sds *argv, int argc) { int j, saved_rc = g_pserver->lua_replicate_commands; + if (!lua_checkstack(lua, argc + 1)) { + /* Increase the Lua stack if needed to make sure there is enough room + * to push 'argc + 1' elements to the stack. On failure, return error. +         * Notice that we need, in worst case, 'argc + 1' elements because we push all the arguments +         * given by the user (without the first argument) and we also push the 'redis' global table and +         * 'redis.call' function so: +         * (1 (redis table)) + (1 (redis.call function)) + (argc - 1 (all arguments without the first)) = argc + 1*/ + ldbLogRedisReply("max lua stack reached"); + return; + } + lua_getglobal(lua,"redis"); lua_pushstring(lua,"call"); lua_gettable(lua,-2); /* Stack: redis, redis.call */ @@ -2622,12 +2685,17 @@ void ldbMaxlen(sds *argv, int argc) { int ldbRepl(lua_State *lua) { sds *argv; int argc; + char* err = NULL; /* We continue processing commands until a command that should return * to the Lua interpreter is found. */ while(1) { - while((argv = ldbReplParseCommand(&argc)) == NULL) { + while((argv = ldbReplParseCommand(&argc, &err)) == NULL) { char buf[1024]; + if (err) { + lua_pushstring(lua, err); + lua_error(lua); + } int nread = connRead(ldb.conn,buf,sizeof(buf)); if (nread <= 0) { /* Make sure the script runs without user input since the @@ -2637,6 +2705,15 @@ int ldbRepl(lua_State *lua) { return C_ERR; } ldb.cbuf = sdscatlen(ldb.cbuf,buf,nread); + /* after 1M we will exit with an error + * so that the client will not blow the memory + */ + if (sdslen(ldb.cbuf) > 1<<20) { + sdsfree(ldb.cbuf); + ldb.cbuf = sdsempty(); + lua_pushstring(lua, "max client buffer reached"); + lua_error(lua); + } } /* Flush the old buffer. */ diff --git a/src/sds.c b/src/sds.c index 873eef7c0..8ed9a98c4 100644 --- a/src/sds.c +++ b/src/sds.c @@ -265,7 +265,7 @@ void sdsclear(sds s) { sds sdsMakeRoomFor(sds s, size_t addlen) { void *sh, *newsh; size_t avail = sdsavail(s); - size_t len, newlen; + size_t len, newlen, reqlen; char type, oldtype = s[-1] & SDS_TYPE_MASK; int hdrlen; size_t usable; @@ -275,7 +275,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen) { len = sdslen(s); sh = (char*)s-sdsHdrSize(oldtype); - newlen = (len+addlen); + reqlen = newlen = (len+addlen); assert(newlen > len); /* Catch size_t overflow */ if (newlen < SDS_MAX_PREALLOC) newlen *= 2; @@ -290,10 +290,15 @@ sds sdsMakeRoomFor(sds s, size_t addlen) { if (type == SDS_TYPE_5) type = SDS_TYPE_8; hdrlen = sdsHdrSize(type); +<<<<<<< HEAD assert(hdrlen + newlen + 1 > len); /* Catch size_t overflow */ if (oldtype==type && (len+1024) >= avail) { // note: if we have a lot of free space don't use this as we don't want s_realloc copying // uninitialized data +======= + assert(hdrlen + newlen + 1 > reqlen); /* Catch size_t overflow */ + if (oldtype==type) { +>>>>>>> 6.2.6 newsh = s_realloc_usable(sh, hdrlen+newlen+1, &usable); if (newsh == NULL) return NULL; s = (char*)newsh+hdrlen; @@ -798,6 +803,21 @@ sds sdstrim(sds s, const char *cset) { return s; } +/* Changes the input string to be a subset of the original. + * It does not release the free space in the string, so a call to + * sdsRemoveFreeSpace may be wise after. */ +void sdssubstr(sds s, size_t start, size_t len) { + /* Clamp out of range input */ + size_t oldlen = sdslen(s); + if (start >= oldlen) start = len = 0; + if (len > oldlen-start) len = oldlen-start; + + /* Move the data */ + if (len) memmove(s, s+start, len); + s[len] = 0; + sdssetlen(s,len); +} + /* Turn the string into a smaller (or equal) string containing only the * substring specified by the 'start' and 'end' indexes. * @@ -809,6 +829,11 @@ sds sdstrim(sds s, const char *cset) { * * The string is modified in-place. * + * NOTE: this function can be misleading and can have unexpected behaviour, + * specifically when you want the length of the new string to be 0. + * Having start==end will result in a string with one character. + * please consider using sdssubstr instead. + * * Example: * * s = sdsnew("Hello World"); @@ -816,28 +841,13 @@ sds sdstrim(sds s, const char *cset) { */ void sdsrange(sds s, ssize_t start, ssize_t end) { size_t newlen, len = sdslen(s); - if (len == 0) return; - if (start < 0) { - start = len+start; - if (start < 0) start = 0; - } - if (end < 0) { - end = len+end; - if (end < 0) end = 0; - } + if (start < 0) + start = len + start; + if (end < 0) + end = len + end; newlen = (start > end) ? 0 : (end-start)+1; - if (newlen != 0) { - if (start >= (ssize_t)len) { - newlen = 0; - } else if (end >= (ssize_t)len) { - end = len-1; - newlen = (start > end) ? 0 : (end-start)+1; - } - } - if (start && newlen) memmove(s, s+start, newlen); - s[newlen] = 0; - sdssetlen(s,newlen); + sdssubstr(s, start, newlen); } /* Apply tolower() to every character of the sds string 's'. */ @@ -1408,6 +1418,18 @@ int sdsTest(int argc, char **argv, int accurate) { test_cond("sdsrange(...,100,100)", sdslen(y) == 0 && memcmp(y,"\0",1) == 0); + sdsfree(y); + y = sdsdup(x); + sdsrange(y,4,6); + test_cond("sdsrange(...,4,6)", + sdslen(y) == 0 && memcmp(y,"\0",1) == 0); + + sdsfree(y); + y = sdsdup(x); + sdsrange(y,3,6); + test_cond("sdsrange(...,3,6)", + sdslen(y) == 1 && memcmp(y,"o\0",2) == 0); + sdsfree(y); sdsfree(x); x = sdsnew("foo"); diff --git a/src/sds.h b/src/sds.h index d16906f30..572c0e7ce 100644 --- a/src/sds.h +++ b/src/sds.h @@ -297,6 +297,7 @@ sds sdscatprintf(sds s, const char *fmt, ...); sds sdscatfmt(sds s, char const *fmt, ...); sds sdstrim(sds s, const char *cset); +void sdssubstr(sds s, size_t start, size_t len); void sdsrange(sds s, ssize_t start, ssize_t end); void sdsupdatelen(sds s); void sdsclear(sds s); diff --git a/src/sentinel.cpp b/src/sentinel.cpp index ffa0ff60c..2004b8b9c 100644 --- a/src/sentinel.cpp +++ b/src/sentinel.cpp @@ -3187,11 +3187,21 @@ void sentinelConfigSetCommand(client *c) { sentinel.announce_port = numval; } else if (!strcasecmp(szFromObj(o), "sentinel-user")) { sdsfree(sentinel.sentinel_auth_user); +<<<<<<< HEAD:src/sentinel.cpp sentinel.sentinel_auth_user = sdsnew(szFromObj(val)); +======= + sentinel.sentinel_auth_user = sdslen(val->ptr) == 0 ? + NULL : sdsdup(val->ptr); +>>>>>>> 6.2.6:src/sentinel.c drop_conns = 1; } else if (!strcasecmp(szFromObj(o), "sentinel-pass")) { sdsfree(sentinel.sentinel_auth_pass); +<<<<<<< HEAD:src/sentinel.cpp sentinel.sentinel_auth_pass = sdsnew(szFromObj(val)); +======= + sentinel.sentinel_auth_pass = sdslen(val->ptr) == 0 ? + NULL : sdsdup(val->ptr); +>>>>>>> 6.2.6:src/sentinel.c drop_conns = 1; } else { addReplyErrorFormat(c, "Invalid argument '%s' to SENTINEL CONFIG SET", diff --git a/src/server.cpp b/src/server.cpp index e507a8bfd..d83a37048 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -743,7 +743,7 @@ struct redisCommand redisCommandTable[] = { 0,NULL,0,0,0,0,0,0}, {"auth",authCommand,-2, - "no-auth no-script ok-loading ok-stale fast no-monitor no-slowlog @connection", + "no-auth no-script ok-loading ok-stale fast @connection", 0,NULL,0,0,0,0,0,0}, /* We don't allow PING during loading since in Redis PING is used as @@ -790,7 +790,7 @@ struct redisCommand redisCommandTable[] = { 0,NULL,0,0,0,0,0,0}, {"exec",execCommand,1, - "no-script no-monitor no-slowlog ok-loading ok-stale @transaction", + "no-script no-slowlog ok-loading ok-stale @transaction", 0,NULL,0,0,0,0,0,0}, {"discard",discardCommand,1, @@ -911,7 +911,7 @@ struct redisCommand redisCommandTable[] = { {"migrate",migrateCommand,-6, "write random @keyspace @dangerous", - 0,migrateGetKeys,0,0,0,0,0,0}, + 0,migrateGetKeys,3,3,1,0,0,0}, {"asking",askingCommand,1, "fast @keyspace", @@ -942,17 +942,21 @@ struct redisCommand redisCommandTable[] = { 0,NULL,0,0,0,0,0,0}, {"hello",helloCommand,-1, - "no-auth no-script fast no-monitor ok-loading ok-stale @connection", + "no-auth no-script fast ok-loading ok-stale @connection", 0,NULL,0,0,0,0,0,0}, /* EVAL can modify the dataset, however it is not flagged as a write - * command since we do the check while running commands from Lua. */ + * command since we do the check while running commands from Lua. + * + * EVAL and EVALSHA also feed monitors before the commands are executed, + * as opposed to after. + */ {"eval",evalCommand,-3, - "no-script may-replicate @scripting", + "no-script no-monitor may-replicate @scripting", 0,evalGetKeys,0,0,0,0,0,0}, {"evalsha",evalShaCommand,-3, - "no-script may-replicate @scripting", + "no-script no-monitor may-replicate @scripting", 0,evalGetKeys,0,0,0,0,0,0}, {"slowlog",slowlogCommand,-2, @@ -1904,6 +1908,7 @@ void clientsCron(int iel) { /* Rotate the list, take the current head, process. * This way if the client must be removed from the list it's the * first element and we don't incur into O(N) computation. */ +<<<<<<< HEAD:src/server.cpp listRotateTailToHead(g_pserver->clients); head = (listNode*)listFirst(g_pserver->clients); c = (client*)listNodeValue(head); @@ -1920,6 +1925,19 @@ void clientsCron(int iel) { LContinue: fastlock_unlock(&c->lock); } +======= + listRotateTailToHead(server.clients); + head = listFirst(server.clients); + c = listNodeValue(head); + /* The following functions do different service checks on the client. + * The protocol is that they return non-zero if the client was + * terminated. */ + if (clientsCronHandleTimeout(c,now)) continue; + if (clientsCronResizeQueryBuffer(c)) continue; + if (clientsCronTrackExpansiveClients(c, curr_peak_mem_usage_slot)) continue; + if (clientsCronTrackClientsMemUsage(c)) continue; + if (closeClientOnOutputBufferLimitReached(c, 0)) continue; +>>>>>>> 6.2.6:src/server.c } /* Free any pending clients */ @@ -2761,6 +2779,7 @@ void createSharedObjects(void) { shared.eval = makeObjectShared("EVAL",4); /* Shared command argument */ +<<<<<<< HEAD:src/server.cpp shared.left = makeObjectShared("left",4); shared.right = makeObjectShared("right",5); shared.pxat = makeObjectShared("PXAT", 4); @@ -2786,6 +2805,27 @@ void createSharedObjects(void) { shared.zrem = makeObjectShared(createStringObject("ZREM", 4)); shared.mvccrestore = makeObjectShared(createStringObject("KEYDB.MVCCRESTORE", 17)); shared.pexpirememberat = makeObjectShared(createStringObject("PEXPIREMEMBERAT",15)); +======= + shared.left = createStringObject("left",4); + shared.right = createStringObject("right",5); + shared.pxat = createStringObject("PXAT", 4); + shared.px = createStringObject("PX",2); + shared.time = createStringObject("TIME",4); + shared.retrycount = createStringObject("RETRYCOUNT",10); + shared.force = createStringObject("FORCE",5); + shared.justid = createStringObject("JUSTID",6); + shared.lastid = createStringObject("LASTID",6); + shared.default_username = createStringObject("default",7); + shared.ping = createStringObject("ping",4); + shared.setid = createStringObject("SETID",5); + shared.keepttl = createStringObject("KEEPTTL",7); + shared.load = createStringObject("LOAD",4); + shared.createconsumer = createStringObject("CREATECONSUMER",14); + shared.getack = createStringObject("GETACK",6); + shared.special_asterick = createStringObject("*",1); + shared.special_equals = createStringObject("=",1); + shared.redacted = makeObjectShared(createStringObject("(redacted)",10)); +>>>>>>> 6.2.6:src/server.c for (j = 0; j < OBJ_SHARED_INTEGERS; j++) { shared.integers[j] = @@ -2962,6 +3002,7 @@ void initServerConfig(void) { /* By default we want scripts to be always replicated by effects * (single commands executed by the script), and not by sending the +<<<<<<< HEAD:src/server.cpp * script to the replica / AOF. This is the new way starting from * Redis 5. However it is possible to revert it via keydb.conf. */ g_pserver->lua_always_replicate_commands = 1; @@ -2970,6 +3011,16 @@ void initServerConfig(void) { cserver.cthreads = CONFIG_DEFAULT_THREADS; cserver.fThreadAffinity = CONFIG_DEFAULT_THREAD_AFFINITY; cserver.threadAffinityOffset = 0; +======= + * script to the slave / AOF. This is the new way starting from + * Redis 5. However it is possible to revert it via redis.conf. */ + server.lua_always_replicate_commands = 1; + + /* Client Pause related */ + server.client_pause_type = CLIENT_PAUSE_OFF; + server.client_pause_end_time = 0; + +>>>>>>> 6.2.6:src/server.c initConfigValues(); } @@ -3910,12 +3961,6 @@ void preventCommandPropagation(client *c) { c->flags |= CLIENT_PREVENT_PROP; } -/* Avoid logging any information about this client's arguments - * since they contain sensitive information. */ -void preventCommandLogging(client *c) { - c->flags |= CLIENT_PREVENT_LOGGING; -} - /* AOF specific version of preventCommandPropagation(). */ void preventCommandAOF(client *c) { c->flags |= CLIENT_PREVENT_AOF_PROP; @@ -3929,7 +3974,7 @@ void preventCommandReplication(client *c) { /* Log the last command a client executed into the slowlog. */ void slowlogPushCurrentCommand(client *c, struct redisCommand *cmd, ustime_t duration) { /* Some commands may contain sensitive data that should not be available in the slowlog. */ - if ((c->flags & CLIENT_PREVENT_LOGGING) || (cmd->flags & CMD_SKIP_SLOWLOG)) + if (cmd->flags & CMD_SKIP_SLOWLOG) return; /* If command argument vector was rewritten, use the original @@ -3984,6 +4029,7 @@ void call(client *c, int flags) { serverAssert(GlobalLocksAcquired()); static long long prev_err_count; +<<<<<<< HEAD:src/server.cpp serverTL->fixed_time_expire++; /* Send the command to clients in MONITOR mode if applicable. @@ -4002,6 +4048,8 @@ void call(client *c, int flags) { ProcessPendingAsyncWrites(); } +======= +>>>>>>> 6.2.6:src/server.c /* Initialization: clear the flags that must be set by the command on * demand, and initialize the array for additional commands propagation. */ c->flags &= ~(CLIENT_FORCE_AOF|CLIENT_FORCE_REPL|CLIENT_PREVENT_PROP); @@ -4009,10 +4057,22 @@ void call(client *c, int flags) { redisOpArrayInit(&g_pserver->also_propagate); /* Call the command. */ +<<<<<<< HEAD:src/server.cpp dirty = g_pserver->dirty; prev_err_count = g_pserver->stat_total_error_replies; updateCachedTime(0); incrementMvccTstamp(); +======= + dirty = server.dirty; + prev_err_count = server.stat_total_error_replies; + + /* Update cache time, in case we have nested calls we want to + * update only on the first call*/ + if (server.fixed_time_expire++ == 0) { + updateCachedTime(0); + } + +>>>>>>> 6.2.6:src/server.c elapsedStart(&call_timer); try { c->cmd->proc(c); @@ -4077,6 +4137,14 @@ void call(client *c, int flags) { if ((flags & CMD_CALL_SLOWLOG) && !(c->flags & CLIENT_BLOCKED)) slowlogPushCurrentCommand(c, real_cmd, duration); + /* Send the command to clients in MONITOR mode if applicable. + * Administrative commands are considered too dangerous to be shown. */ + if (!(c->cmd->flags & (CMD_SKIP_MONITOR|CMD_ADMIN))) { + robj **argv = c->original_argv ? c->original_argv : c->argv; + int argc = c->original_argv ? c->original_argc : c->argc; + replicationFeedMonitors(c,server.monitors,c->db->id,argv,argc); + } + /* Clear the original argv. * If the client is blocked we will handle slowlog when it is unblocked. */ if (!(c->flags & CLIENT_BLOCKED)) @@ -4312,13 +4380,8 @@ int processCommand(client *c, int callFlags) { int is_may_replicate_command = (c->cmd->flags & (CMD_WRITE | CMD_MAY_REPLICATE)) || (c->cmd->proc == execCommand && (c->mstate.cmd_flags & (CMD_WRITE | CMD_MAY_REPLICATE))); - /* Check if the user is authenticated. This check is skipped in case - * the default user is flagged as "nopass" and is active. */ - int auth_required = (!(DefaultUser->flags & USER_FLAG_NOPASS) || - (DefaultUser->flags & USER_FLAG_DISABLED)) && - !c->authenticated; - if (auth_required) { - /* AUTH and HELLO and no auth modules are valid even in + if (authRequired(c)) { + /* AUTH and HELLO and no auth commands are valid even in * non-authenticated state. */ if (!(c->cmd->flags & CMD_NO_AUTH)) { rejectCommand(c,shared.noautherr); @@ -5437,6 +5500,7 @@ sds genRedisInfoString(const char *section) { info = sdscatprintf(info, "# Replication\r\n" "role:%s\r\n", +<<<<<<< HEAD:src/server.cpp listLength(g_pserver->masters) == 0 ? "master" : g_pserver->fActiveReplica ? "active-replica" : "slave"); if (listLength(g_pserver->masters)) { @@ -5456,6 +5520,39 @@ sds genRedisInfoString(const char *section) { slave_repl_offset = mi->master->reploff; else if (mi->cached_master) slave_repl_offset = mi->cached_master->reploff; +======= + server.masterhost == NULL ? "master" : "slave"); + if (server.masterhost) { + long long slave_repl_offset = 1; + long long slave_read_repl_offset = 1; + + if (server.master) { + slave_repl_offset = server.master->reploff; + slave_read_repl_offset = server.master->read_reploff; + } else if (server.cached_master) { + slave_repl_offset = server.cached_master->reploff; + slave_read_repl_offset = server.cached_master->read_reploff; + } + + info = sdscatprintf(info, + "master_host:%s\r\n" + "master_port:%d\r\n" + "master_link_status:%s\r\n" + "master_last_io_seconds_ago:%d\r\n" + "master_sync_in_progress:%d\r\n" + "slave_read_repl_offset:%lld\r\n" + "slave_repl_offset:%lld\r\n" + ,server.masterhost, + server.masterport, + (server.repl_state == REPL_STATE_CONNECTED) ? + "up" : "down", + server.master ? + ((int)(server.unixtime-server.master->lastinteraction)) : -1, + server.repl_state == REPL_STATE_TRANSFER, + slave_read_repl_offset, + slave_repl_offset + ); +>>>>>>> 6.2.6:src/server.c char master_prefix[128] = ""; if (cmasters != 0) { diff --git a/src/server.h b/src/server.h index 399bd4fb7..e5b287f4d 100644 --- a/src/server.h +++ b/src/server.h @@ -471,8 +471,11 @@ extern int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT]; and AOF client */ #define CLIENT_REPL_RDBONLY (1ULL<<42) /* This client is a replica that only wants RDB without replication buffer. */ +<<<<<<< HEAD #define CLIENT_PREVENT_LOGGING (1ULL<<43) /* Prevent logging of command to slowlog */ #define CLIENT_FORCE_REPLY (1ULL<<44) /* Should addReply be forced to write the text? */ +======= +>>>>>>> 6.2.6 /* Client block type (btype field in client structure) * if CLIENT_BLOCKED flag is set. */ @@ -1285,9 +1288,14 @@ struct sharedObjectsStruct { *emptyscan, *multi, *exec, *left, *right, *hset, *srem, *xgroup, *xclaim, *script, *replconf, *eval, *persist, *set, *pexpireat, *pexpire, *time, *pxat, *px, *retrycount, *force, *justid, +<<<<<<< HEAD *lastid, *ping, *replping, *setid, *keepttl, *load, *createconsumer, *getack, *special_asterick, *special_equals, *default_username, *hdel, *zrem, *mvccrestore, *pexpirememberat, +======= + *lastid, *ping, *setid, *keepttl, *load, *createconsumer, + *getack, *special_asterick, *special_equals, *default_username, *redacted, +>>>>>>> 6.2.6 *select[PROTO_SHARED_SELECT_CMDS], *integers[OBJ_SHARED_INTEGERS], *mbulkhdr[OBJ_SHARED_BULKHDR_LEN], /* "*\r\n" */ @@ -2165,6 +2173,7 @@ void moduleFireServerEvent(uint64_t eid, int subid, void *data); void processModuleLoadingProgressEvent(int is_aof); int moduleTryServeClientBlockedOnKey(client *c, robj *key); void moduleUnblockClient(client *c); +int moduleBlockedClientMayTimeout(client *c); int moduleClientIsBlockedOnKeys(client *c); void moduleNotifyUserChanged(client *c); void moduleNotifyKeyUnlink(robj *key, robj *val); @@ -2180,9 +2189,15 @@ extern "C" void getRandomHexChars(char *p, size_t len); extern "C" void getRandomBytes(unsigned char *p, size_t len); uint64_t crc64(uint64_t crc, const unsigned char *s, uint64_t l); void exitFromChild(int retcode); +<<<<<<< HEAD size_t redisPopcount(const void *s, long count); int redisSetProcTitle(const char *title); int validateProcTitleTemplate(const char *_template); +======= +long long redisPopcount(void *s, long count); +int redisSetProcTitle(char *title); +int validateProcTitleTemplate(const char *template); +>>>>>>> 6.2.6 int redisCommunicateSystemd(const char *sd_notify_msg); void redisSetCpuAffinity(const char *cpulist); @@ -2226,6 +2241,7 @@ void addReplyErrorSds(client *c, sds err); void addReplyError(client *c, const char *err); void addReplyStatus(client *c, const char *status); void addReplyDouble(client *c, double d); +void addReplyBigNum(client *c, const char* num, size_t len); void addReplyHumanLongDouble(client *c, long double d); void addReplyLongLong(client *c, long long ll); #ifdef __cplusplus @@ -2253,9 +2269,15 @@ sds getAllClientsInfoString(int type); void rewriteClientCommandVector(client *c, int argc, ...); void rewriteClientCommandArgument(client *c, int i, robj *newval); void replaceClientCommandVector(client *c, int argc, robj **argv); +void redactClientCommandArgument(client *c, int argc); unsigned long getClientOutputBufferMemoryUsage(client *c); +<<<<<<< HEAD int freeClientsInAsyncFreeQueue(int iel); void asyncCloseClientOnOutputBufferLimitReached(client *c); +======= +int freeClientsInAsyncFreeQueue(void); +int closeClientOnOutputBufferLimitReached(client *c, int async); +>>>>>>> 6.2.6 int getClientType(client *c); int getClientTypeByName(const char *name); const char *getClientTypeName(int cclass); @@ -2282,6 +2304,7 @@ void unprotectClient(client *c); void ProcessPendingAsyncWrites(void); client *lookupClientByID(uint64_t id); +int authRequired(client *c); #ifdef __GNUC__ void addReplyErrorFormat(client *c, const char *fmt, ...) @@ -2332,6 +2355,7 @@ void initClientMultiState(client *c); void freeClientMultiState(client *c); void queueMultiCommand(client *c); void touchWatchedKey(redisDb *db, robj *key); +int isWatchedKeyExpired(client *c); void touchAllWatchedKeysInDb(redisDb *emptied, redisDb *replaced_with); void discardTransaction(client *c); void flagTransaction(client *c); @@ -2357,6 +2381,8 @@ robj *createObject(int type, void *ptr); robj *createStringObject(const char *ptr, size_t len); robj *createRawStringObject(const char *ptr, size_t len); robj *createEmbeddedStringObject(const char *ptr, size_t len); +robj *tryCreateRawStringObject(const char *ptr, size_t len); +robj *tryCreateStringObject(const char *ptr, size_t len); robj *dupStringObject(const robj *o); int isSdsRepresentableAsLongLong(const char *s, long long *llval); int isObjectRepresentableAsLongLong(robj *o, long long *llongval); @@ -2566,8 +2592,13 @@ unsigned char *zzlFirstInRange(unsigned char *zl, zrangespec *range); unsigned char *zzlLastInRange(unsigned char *zl, zrangespec *range); unsigned long zsetLength(robj_roptr zobj); void zsetConvert(robj *zobj, int encoding); +<<<<<<< HEAD void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen); int zsetScore(robj_roptr zobj, sds member, double *score); +======= +void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen, size_t totelelen); +int zsetScore(robj *zobj, sds member, double *score); +>>>>>>> 6.2.6 unsigned long zslGetRank(zskiplist *zsl, double score, sds o); int zsetAdd(robj *zobj, double score, sds ele, int in_flags, int *out_flags, double *newscore); long zsetRank(robj_roptr zobj, sds ele, int reverse); @@ -2610,7 +2641,6 @@ void redisOpArrayInit(redisOpArray *oa); void redisOpArrayFree(redisOpArray *oa); void forceCommandPropagation(client *c, int flags); void preventCommandPropagation(client *c); -void preventCommandLogging(client *c); void preventCommandAOF(client *c); void preventCommandReplication(client *c); void slowlogPushCurrentCommand(client *c, struct redisCommand *cmd, ustime_t duration); @@ -2715,10 +2745,16 @@ void initConfigValues(); /* db.c -- Keyspace access API */ int removeExpire(redisDb *db, robj *key); +<<<<<<< HEAD int removeExpireCore(redisDb *db, robj *key, dictEntry *de); int removeSubkeyExpire(redisDb *db, robj *key, robj *subkey); void propagateExpire(redisDb *db, robj *key, int lazy); void propagateSubkeyExpire(redisDb *db, int type, robj *key, robj *subkey); +======= +void deleteExpiredKeyAndPropagate(redisDb *db, robj *keyobj); +void propagateExpire(redisDb *db, robj *key, int lazy); +int keyIsExpired(redisDb *db, robj *key); +>>>>>>> 6.2.6 int expireIfNeeded(redisDb *db, robj *key); expireEntry *getExpire(redisDb *db, robj_roptr key); void setExpire(client *c, redisDb *db, robj *key, robj *subkey, long long when); @@ -2730,8 +2766,14 @@ robj_roptr lookupKeyReadOrReply(client *c, robj *key, robj *reply); robj *lookupKeyWriteOrReply(client *c, robj *key, robj *reply); robj_roptr lookupKeyReadWithFlags(redisDb *db, robj *key, int flags); robj *lookupKeyWriteWithFlags(redisDb *db, robj *key, int flags); +<<<<<<< HEAD robj_roptr objectCommandLookup(client *c, robj *key); robj_roptr objectCommandLookupOrReply(client *c, robj *key, robj *reply); +======= +robj *objectCommandLookup(client *c, robj *key); +robj *objectCommandLookupOrReply(client *c, robj *key, robj *reply); +void SentReplyOnKeyMiss(client *c, robj *reply); +>>>>>>> 6.2.6 int objectSetLRUOrLFU(robj *val, long long lfu_freq, long long lru_idle, long long lru_clock, int lru_multiplier); #define LOOKUP_NONE 0 diff --git a/src/t_hash.cpp b/src/t_hash.cpp index 3f2cc5751..9a94c14fa 100644 --- a/src/t_hash.cpp +++ b/src/t_hash.cpp @@ -39,17 +39,28 @@ * as their string length can be queried in constant time. */ void hashTypeTryConversion(robj *o, robj **argv, int start, int end) { int i; + size_t sum = 0; if (o->encoding != OBJ_ENCODING_ZIPLIST) return; for (i = start; i <= end; i++) { +<<<<<<< HEAD:src/t_hash.cpp if (sdsEncodedObject(argv[i]) && sdslen(szFromObj(argv[i])) > g_pserver->hash_max_ziplist_value) { +======= + if (!sdsEncodedObject(argv[i])) + continue; + size_t len = sdslen(argv[i]->ptr); + if (len > server.hash_max_ziplist_value) { +>>>>>>> 6.2.6:src/t_hash.c hashTypeConvert(o, OBJ_ENCODING_HT); - break; + return; } + sum += len; } + if (!ziplistSafeToAdd(o->ptr, sum)) + hashTypeConvert(o, OBJ_ENCODING_HT); } /* Get the value from a ziplist encoded hash, identified by field. @@ -1015,8 +1026,13 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) { int uniq = 1; robj_roptr hash; +<<<<<<< HEAD:src/t_hash.cpp if ((hash = lookupKeyReadOrReply(c,c->argv[1],shared.null[c->resp])) == nullptr || checkType(c,hash,OBJ_HASH)) return; +======= + if ((hash = lookupKeyReadOrReply(c,c->argv[1],shared.emptyarray)) + == NULL || checkType(c,hash,OBJ_HASH)) return; +>>>>>>> 6.2.6:src/t_hash.c size = hashTypeLength(hash); if(l >= 0) { @@ -1204,7 +1220,7 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) { } } -/* HRANDFIELD [ WITHVALUES] */ +/* HRANDFIELD key [ [WITHVALUES]] */ void hrandfieldCommand(client *c) { long l; int withvalues = 0; diff --git a/src/t_list.cpp b/src/t_list.cpp index 2649583d4..f57c72b3e 100644 --- a/src/t_list.cpp +++ b/src/t_list.cpp @@ -29,6 +29,8 @@ #include "server.h" +#define LIST_MAX_ITEM_SIZE ((1ull<<32)-1024) + /*----------------------------------------------------------------------------- * List API *----------------------------------------------------------------------------*/ @@ -224,6 +226,13 @@ robj *listTypeDup(robj *o) { void pushGenericCommand(client *c, int where, int xx) { int j; + for (j = 2; j < c->argc; j++) { + if (sdslen(c->argv[j]->ptr) > LIST_MAX_ITEM_SIZE) { + addReplyError(c, "Element too large"); + return; + } + } + robj *lobj = lookupKeyWrite(c->db, c->argv[1]); if (checkType(c,lobj,OBJ_LIST)) return; if (!lobj) { @@ -287,6 +296,11 @@ void linsertCommand(client *c) { return; } + if (sdslen(c->argv[4]->ptr) > LIST_MAX_ITEM_SIZE) { + addReplyError(c, "Element too large"); + return; + } + if ((subject = lookupKeyWriteOrReply(c,c->argv[1],shared.czero)) == NULL || checkType(c,subject,OBJ_LIST)) return; @@ -354,6 +368,11 @@ void lsetCommand(client *c) { long index; robj *value = c->argv[3]; + if (sdslen(value->ptr) > LIST_MAX_ITEM_SIZE) { + addReplyError(c, "Element too large"); + return; + } + if ((getLongFromObjectOrReply(c, c->argv[2], &index, NULL) != C_OK)) return; @@ -576,6 +595,11 @@ void lposCommand(client *c) { int direction = LIST_TAIL; long rank = 1, count = -1, maxlen = 0; /* Count -1: option not given. */ + if (sdslen(ele->ptr) > LIST_MAX_ITEM_SIZE) { + addReplyError(c, "Element too large"); + return; + } + /* Parse the optional arguments. */ for (int j = 3; j < c->argc; j++) { char *opt = szFromObj(c->argv[j]); @@ -671,6 +695,11 @@ void lremCommand(client *c) { long toremove; long removed = 0; + if (sdslen(obj->ptr) > LIST_MAX_ITEM_SIZE) { + addReplyError(c, "Element too large"); + return; + } + if ((getLongFromObjectOrReply(c, c->argv[2], &toremove, NULL) != C_OK)) return; diff --git a/src/t_set.cpp b/src/t_set.cpp index 4d1f4420e..a81aab1f1 100644 --- a/src/t_set.cpp +++ b/src/t_set.cpp @@ -66,7 +66,14 @@ int setTypeAdd(robj *subject, const char *value) { if (success) { /* Convert to regular set when the intset contains * too many entries. */ +<<<<<<< HEAD:src/t_set.cpp if (intsetLen((intset*)subject->m_ptr) > g_pserver->set_max_intset_entries) +======= + size_t max_entries = server.set_max_intset_entries; + /* limit to 1G entries due to intset internals. */ + if (max_entries >= 1<<30) max_entries = 1<<30; + if (intsetLen(subject->ptr) > max_entries) +>>>>>>> 6.2.6:src/t_set.c setTypeConvert(subject,OBJ_ENCODING_HT); return 1; } @@ -397,12 +404,21 @@ void smoveCommand(client *c) { } signalModifiedKey(c,c->db,c->argv[1]); +<<<<<<< HEAD:src/t_set.cpp signalModifiedKey(c,c->db,c->argv[2]); g_pserver->dirty++; /* An extra key has changed when ele was successfully added to dstset */ if (setTypeAdd(dstset,szFromObj(ele))) { g_pserver->dirty++; +======= + server.dirty++; + + /* An extra key has changed when ele was successfully added to dstset */ + if (setTypeAdd(dstset,ele->ptr)) { + server.dirty++; + signalModifiedKey(c,c->db,c->argv[2]); +>>>>>>> 6.2.6:src/t_set.c notifyKeyspaceEvent(NOTIFY_SET,"sadd",c->argv[2],c->db->id); } addReply(c,shared.cone); @@ -863,13 +879,14 @@ void sinterGenericCommand(client *c, robj **setkeys, int64_t intobj; void *replylen = NULL; unsigned long j, cardinality = 0; - int encoding; + int encoding, empty = 0; for (j = 0; j < setnum; j++) { robj *setobj = dstkey ? lookupKeyWrite(c->db,setkeys[j]) : lookupKeyRead(c->db,setkeys[j]).unsafe_robjcast(); if (!setobj) { +<<<<<<< HEAD:src/t_set.cpp zfree(sets); if (dstkey) { if (dbDelete(c->db,dstkey)) { @@ -881,6 +898,12 @@ void sinterGenericCommand(client *c, robj **setkeys, addReply(c,shared.emptyset[c->resp]); } return; +======= + /* A NULL is considered an empty set */ + empty += 1; + sets[j] = NULL; + continue; +>>>>>>> 6.2.6:src/t_set.c } if (checkType(c,setobj,OBJ_SET)) { zfree(sets); @@ -888,6 +911,24 @@ void sinterGenericCommand(client *c, robj **setkeys, } sets[j] = setobj; } + + /* Set intersection with an empty set always results in an empty set. + * Return ASAP if there is an empty set. */ + if (empty > 0) { + zfree(sets); + if (dstkey) { + if (dbDelete(c->db,dstkey)) { + signalModifiedKey(c,c->db,dstkey); + notifyKeyspaceEvent(NOTIFY_GENERIC,"del",dstkey,c->db->id); + server.dirty++; + } + addReply(c,shared.czero); + } else { + addReply(c,shared.emptyset[c->resp]); + } + return; + } + /* Sort sets from the smallest to largest, this will improve our * algorithm's performance */ qsort(sets,setnum,sizeof(robj*),qsortCompareSetsByCardinality); @@ -981,10 +1022,12 @@ void sinterGenericCommand(client *c, robj **setkeys, zfree(sets); } +/* SINTER key [key ...] */ void sinterCommand(client *c) { sinterGenericCommand(c,c->argv+1,c->argc-1,NULL); } +/* SINTERSTORE destination key [key ...] */ void sinterstoreCommand(client *c) { sinterGenericCommand(c,c->argv+2,c->argc-2,c->argv[1]); } @@ -1154,18 +1197,22 @@ void sunionDiffGenericCommand(client *c, robj **setkeys, int setnum, zfree(sets); } +/* SUNION key [key ...] */ void sunionCommand(client *c) { sunionDiffGenericCommand(c,c->argv+1,c->argc-1,NULL,SET_OP_UNION); } +/* SUNIONSTORE destination key [key ...] */ void sunionstoreCommand(client *c) { sunionDiffGenericCommand(c,c->argv+2,c->argc-2,c->argv[1],SET_OP_UNION); } +/* SDIFF key [key ...] */ void sdiffCommand(client *c) { sunionDiffGenericCommand(c,c->argv+1,c->argc-1,NULL,SET_OP_DIFF); } +/* SDIFFSTORE destination key [key ...] */ void sdiffstoreCommand(client *c) { sunionDiffGenericCommand(c,c->argv+2,c->argc-2,c->argv[1],SET_OP_DIFF); } diff --git a/src/t_stream.cpp b/src/t_stream.cpp index d82044deb..5c46aa025 100644 --- a/src/t_stream.cpp +++ b/src/t_stream.cpp @@ -47,6 +47,12 @@ * setting stream_node_max_bytes to a huge number. */ #define STREAM_LISTPACK_MAX_PRE_ALLOCATE 4096 +/* Don't let listpacks grow too big, even if the user config allows it. + * doing so can lead to an overflow (trying to store more than 32bit length + * into the listpack header), or actually an assertion since lpInsert + * will return NULL. */ +#define STREAM_LISTPACK_MAX_SIZE (1<<30) + void streamFreeCG(streamCG *cg); void streamFreeNACK(streamNACK *na); size_t streamReplyWithRangeFromConsumerPEL(client *c, stream *s, streamID *start, streamID *end, size_t count, streamConsumer *consumer); @@ -434,8 +440,11 @@ void streamGetEdgeID(stream *s, int first, streamID *edge_id) * * The function returns C_OK if the item was added, this is always true * if the ID was generated by the function. However the function may return - * C_ERR if an ID was given via 'use_id', but adding it failed since the - * current top ID is greater or equal. */ + * C_ERR in several cases: + * 1. If an ID was given via 'use_id', but adding it failed since the + * current top ID is greater or equal. errno will be set to EDOM. + * 2. If a size of a single element or the sum of the elements is too big to + * be stored into the stream. errno will be set to ERANGE. */ int streamAppendItem(stream *s, robj **argv, int64_t numfields, streamID *added_id, streamID *use_id) { /* Generate the new entry ID. */ @@ -449,7 +458,23 @@ int streamAppendItem(stream *s, robj **argv, int64_t numfields, streamID *added_ * or return an error. Automatically generated IDs might * overflow (and wrap-around) when incrementing the sequence part. */ - if (streamCompareID(&id,&s->last_id) <= 0) return C_ERR; + if (streamCompareID(&id,&s->last_id) <= 0) { + errno = EDOM; + return C_ERR; + } + + /* Avoid overflow when trying to add an element to the stream (listpack + * can only host up to 32bit length sttrings, and also a total listpack size + * can't be bigger than 32bit length. */ + size_t totelelen = 0; + for (int64_t i = 0; i < numfields*2; i++) { + sds ele = argv[i]->ptr; + totelelen += sdslen(ele); + } + if (totelelen > STREAM_LISTPACK_MAX_SIZE) { + errno = ERANGE; + return C_ERR; + } /* Add the new entry. */ raxIterator ri; @@ -508,9 +533,16 @@ int streamAppendItem(stream *s, robj **argv, int64_t numfields, streamID *added_ * if we need to switch to the next one. 'lp' will be set to NULL if * the current node is full. */ if (lp != NULL) { +<<<<<<< HEAD:src/t_stream.cpp if (g_pserver->stream_node_max_bytes && lp_bytes >= g_pserver->stream_node_max_bytes) { +======= + size_t node_max_bytes = server.stream_node_max_bytes; + if (node_max_bytes == 0 || node_max_bytes > STREAM_LISTPACK_MAX_SIZE) + node_max_bytes = STREAM_LISTPACK_MAX_SIZE; + if (lp_bytes + totelelen >= node_max_bytes) { +>>>>>>> 6.2.6:src/t_stream.c lp = NULL; } else if (g_pserver->stream_node_max_entries) { unsigned char *lp_ele = lpFirst(lp); @@ -703,16 +735,16 @@ int64_t streamTrim(stream *s, streamAddTrimArgs *args) { int64_t deleted = 0; while (raxNext(&ri)) { - /* Check if we exceeded the amount of work we could do */ - if (limit && deleted >= limit) - break; - if (trim_strategy == TRIM_STRATEGY_MAXLEN && s->length <= maxlen) break; unsigned char *lp = (unsigned char*)ri.data, *p = lpFirst(lp); int64_t entries = lpGetInteger(p); + /* Check if we exceeded the amount of work we could do */ + if (limit && (deleted + entries) > limit) + break; + /* Check if we can remove the whole node. */ int remove_node; streamID master_id = {0}; /* For MINID */ @@ -1799,11 +1831,13 @@ void xaddCommand(client *c) { /* Append using the low level function and return the ID. */ streamID id; if (streamAppendItem(s,c->argv+field_pos,(c->argc-field_pos)/2, - &id, parsed_args.id_given ? &parsed_args.id : NULL) - == C_ERR) + &id, parsed_args.id_given ? &parsed_args.id : NULL) == C_ERR) { - addReplyError(c,"The ID specified in XADD is equal or smaller than the " - "target stream top item"); + if (errno == EDOM) + addReplyError(c,"The ID specified in XADD is equal or smaller than " + "the target stream top item"); + else + addReplyError(c,"Elements are too large to be stored"); return; } addReplyStreamID(c,&id); @@ -3241,7 +3275,7 @@ void xtrimCommand(client *c) { /* Argument parsing. */ streamAddTrimArgs parsed_args; - if (streamParseAddOrTrimArgsOrReply(c, &parsed_args, 1) < 0) + if (streamParseAddOrTrimArgsOrReply(c, &parsed_args, 0) < 0) return; /* streamParseAddOrTrimArgsOrReply already replied. */ /* If the key does not exist, we are ok returning zero, that is, the @@ -3570,7 +3604,7 @@ int streamValidateListpackIntegrity(unsigned char *lp, size_t size, int deep) { /* In non-deep mode we just validated the listpack header (encoded size) */ if (!deep) return 1; - next = p = lpFirst(lp); + next = p = lpValidateFirst(lp); if (!lpValidateNext(lp, &next, size)) return 0; if (!p) return 0; @@ -3608,7 +3642,11 @@ int streamValidateListpackIntegrity(unsigned char *lp, size_t size, int deep) { p = next; if (!lpValidateNext(lp, &next, size)) return 0; /* entry id */ + lpGetIntegerIfValid(p, &valid_record); + if (!valid_record) return 0; p = next; if (!lpValidateNext(lp, &next, size)) return 0; + lpGetIntegerIfValid(p, &valid_record); + if (!valid_record) return 0; p = next; if (!lpValidateNext(lp, &next, size)) return 0; if (!(flags & STREAM_ITEM_FLAG_SAMEFIELDS)) { diff --git a/src/t_string.cpp b/src/t_string.cpp index c1f2e134a..57469a1ab 100644 --- a/src/t_string.cpp +++ b/src/t_string.cpp @@ -800,8 +800,17 @@ void stralgoLCS(client *c) { goto cleanup; } +<<<<<<< HEAD:src/t_string.cpp { // Scope variables below for the goto +======= + /* Detect string truncation or later overflows. */ + if (sdslen(a) >= UINT32_MAX-1 || sdslen(b) >= UINT32_MAX-1) { + addReplyError(c, "String too long for LCS"); + goto cleanup; + } + +>>>>>>> 6.2.6:src/t_string.c /* Compute the LCS using the vanilla dynamic programming technique of * building a table of LCS(x,y) substrings. */ uint32_t alen = sdslen(a); @@ -810,9 +819,23 @@ void stralgoLCS(client *c) { /* Setup an uint32_t array to store at LCS[i,j] the length of the * LCS A0..i-1, B0..j-1. Note that we have a linear array here, so * we index it as LCS[j+(blen+1)*j] */ +<<<<<<< HEAD:src/t_string.cpp uint32_t *lcs = (uint32_t*)zmalloc((size_t)(alen+1)*(blen+1)*sizeof(uint32_t)); +======= +>>>>>>> 6.2.6:src/t_string.c #define LCS(A,B) lcs[(B)+((A)*(blen+1))] + /* Try to allocate the LCS table, and abort on overflow or insufficient memory. */ + unsigned long long lcssize = (unsigned long long)(alen+1)*(blen+1); /* Can't overflow due to the size limits above. */ + unsigned long long lcsalloc = lcssize * sizeof(uint32_t); + uint32_t *lcs = NULL; + if (lcsalloc < SIZE_MAX && lcsalloc / lcssize == sizeof(uint32_t)) + lcs = ztrymalloc(lcsalloc); + if (!lcs) { + addReplyError(c, "Insufficient memory"); + goto cleanup; + } + /* Start building the LCS table. */ for (uint32_t i = 0; i <= alen; i++) { for (uint32_t j = 0; j <= blen; j++) { diff --git a/src/t_zset.cpp b/src/t_zset.cpp index ba87f2f1a..0a3d8de4d 100644 --- a/src/t_zset.cpp +++ b/src/t_zset.cpp @@ -1242,15 +1242,24 @@ void zsetConvert(robj *zobj, int encoding) { } /* Convert the sorted set object into a ziplist if it is not already a ziplist - * and if the number of elements and the maximum element size is within the - * expected ranges. */ -void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen) { + * and if the number of elements and the maximum element size and total elements size + * are within the expected ranges. */ +void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen, size_t totelelen) { if (zobj->encoding == OBJ_ENCODING_ZIPLIST) return; zset *set = (zset*)zobj->m_ptr; +<<<<<<< HEAD:src/t_zset.cpp if (set->zsl->length <= g_pserver->zset_max_ziplist_entries && maxelelen <= g_pserver->zset_max_ziplist_value) zsetConvert(zobj,OBJ_ENCODING_ZIPLIST); +======= + if (zset->zsl->length <= server.zset_max_ziplist_entries && + maxelelen <= server.zset_max_ziplist_value && + ziplistSafeToAdd(NULL, totelelen)) + { + zsetConvert(zobj,OBJ_ENCODING_ZIPLIST); + } +>>>>>>> 6.2.6:src/t_zset.c } /* Return (by reference) the score of the specified member of the sorted set @@ -1370,21 +1379,40 @@ int zsetAdd(robj *zobj, double score, sds ele, int in_flags, int *out_flags, dou } return 1; } else if (!xx) { - /* Optimize: check if the element is too large or the list + /* check if the element is too large or the list * becomes too long *before* executing zzlInsert. */ +<<<<<<< HEAD:src/t_zset.cpp zobj->m_ptr = zzlInsert((unsigned char*)zobj->m_ptr,ele,score); if (zzlLength((unsigned char*)zobj->m_ptr) > g_pserver->zset_max_ziplist_entries || sdslen(ele) > g_pserver->zset_max_ziplist_value) +======= + if (zzlLength(zobj->ptr)+1 > server.zset_max_ziplist_entries || + sdslen(ele) > server.zset_max_ziplist_value || + !ziplistSafeToAdd(zobj->ptr, sdslen(ele))) + { +>>>>>>> 6.2.6:src/t_zset.c zsetConvert(zobj,OBJ_ENCODING_SKIPLIST); - if (newscore) *newscore = score; - *out_flags |= ZADD_OUT_ADDED; - return 1; + } else { + zobj->ptr = zzlInsert(zobj->ptr,ele,score); + if (newscore) *newscore = score; + *out_flags |= ZADD_OUT_ADDED; + return 1; + } } else { *out_flags |= ZADD_OUT_NOP; return 1; } +<<<<<<< HEAD:src/t_zset.cpp } else if (zobj->encoding == OBJ_ENCODING_SKIPLIST) { zset *zs = (zset*)zobj->m_ptr; +======= + } + + /* Note that the above block handling ziplist would have either returned or + * converted the key to skiplist. */ + if (zobj->encoding == OBJ_ENCODING_SKIPLIST) { + zset *zs = zobj->ptr; +>>>>>>> 6.2.6:src/t_zset.c zskiplistNode *znode; dictEntry *de; @@ -2360,7 +2388,7 @@ inline static void zunionInterAggregate(double *target, double val, int aggregat } } -static int zsetDictGetMaxElementLength(dict *d) { +static size_t zsetDictGetMaxElementLength(dict *d, size_t *totallen) { dictIterator *di; dictEntry *de; size_t maxelelen = 0; @@ -2370,6 +2398,8 @@ static int zsetDictGetMaxElementLength(dict *d) { while((de = dictNext(di)) != NULL) { sds ele = (sds)dictGetKey(de); if (sdslen(ele) > maxelelen) maxelelen = sdslen(ele); + if (totallen) + (*totallen) += sdslen(ele); } dictReleaseIterator(di); @@ -2377,7 +2407,7 @@ static int zsetDictGetMaxElementLength(dict *d) { return maxelelen; } -static void zdiffAlgorithm1(zsetopsrc *src, long setnum, zset *dstzset, size_t *maxelelen) { +static void zdiffAlgorithm1(zsetopsrc *src, long setnum, zset *dstzset, size_t *maxelelen, size_t *totelelen) { /* DIFF Algorithm 1: * * We perform the diff by iterating all the elements of the first set, @@ -2425,13 +2455,14 @@ static void zdiffAlgorithm1(zsetopsrc *src, long setnum, zset *dstzset, size_t * znode = zslInsert(dstzset->zsl,zval.score,tmp); dictAdd(dstzset->dict,tmp,&znode->score); if (sdslen(tmp) > *maxelelen) *maxelelen = sdslen(tmp); + (*totelelen) += sdslen(tmp); } } zuiClearIterator(&src[0]); } -static void zdiffAlgorithm2(zsetopsrc *src, long setnum, zset *dstzset, size_t *maxelelen) { +static void zdiffAlgorithm2(zsetopsrc *src, long setnum, zset *dstzset, size_t *maxelelen, size_t *totelelen) { /* DIFF Algorithm 2: * * Add all the elements of the first set to the auxiliary set. @@ -2485,7 +2516,7 @@ static void zdiffAlgorithm2(zsetopsrc *src, long setnum, zset *dstzset, size_t * /* Using this algorithm, we can't calculate the max element as we go, * we have to iterate through all elements to find the max one after. */ - *maxelelen = zsetDictGetMaxElementLength(dstzset->dict); + *maxelelen = zsetDictGetMaxElementLength(dstzset->dict, totelelen); } static int zsetChooseDiffAlgorithm(zsetopsrc *src, long setnum) { @@ -2522,14 +2553,14 @@ static int zsetChooseDiffAlgorithm(zsetopsrc *src, long setnum) { return (algo_one_work <= algo_two_work) ? 1 : 2; } -static void zdiff(zsetopsrc *src, long setnum, zset *dstzset, size_t *maxelelen) { +static void zdiff(zsetopsrc *src, long setnum, zset *dstzset, size_t *maxelelen, size_t *totelelen) { /* Skip everything if the smallest input is empty. */ if (zuiLength(&src[0]) > 0) { int diff_algo = zsetChooseDiffAlgorithm(src, setnum); if (diff_algo == 1) { - zdiffAlgorithm1(src, setnum, dstzset, maxelelen); + zdiffAlgorithm1(src, setnum, dstzset, maxelelen, totelelen); } else if (diff_algo == 2) { - zdiffAlgorithm2(src, setnum, dstzset, maxelelen); + zdiffAlgorithm2(src, setnum, dstzset, maxelelen, totelelen); } else if (diff_algo != 0) { serverPanic("Unknown algorithm"); } @@ -2564,7 +2595,7 @@ void zunionInterDiffGenericCommand(client *c, robj *dstkey, int numkeysIndex, in zsetopsrc *src; zsetopval zval; sds tmp; - size_t maxelelen = 0; + size_t maxelelen = 0, totelelen = 0; robj *dstobj; zset *dstzset; zskiplistNode *znode; @@ -2700,6 +2731,7 @@ void zunionInterDiffGenericCommand(client *c, robj *dstkey, int numkeysIndex, in tmp = zuiNewSdsFromValue(&zval); znode = zslInsert(dstzset->zsl,score,tmp); dictAdd(dstzset->dict,tmp,&znode->score); + totelelen += sdslen(tmp); if (sdslen(tmp) > maxelelen) maxelelen = sdslen(tmp); } } @@ -2736,6 +2768,7 @@ void zunionInterDiffGenericCommand(client *c, robj *dstkey, int numkeysIndex, in /* Remember the longest single element encountered, * to understand if it's possible to convert to ziplist * at the end. */ + totelelen += sdslen(tmp); if (sdslen(tmp) > maxelelen) maxelelen = sdslen(tmp); /* Update the element with its initial score. */ dictSetKey(accumulator, de, tmp); @@ -2770,14 +2803,14 @@ void zunionInterDiffGenericCommand(client *c, robj *dstkey, int numkeysIndex, in dictReleaseIterator(di); dictRelease(accumulator); } else if (op == SET_OP_DIFF) { - zdiff(src, setnum, dstzset, &maxelelen); + zdiff(src, setnum, dstzset, &maxelelen, &totelelen); } else { serverPanic("Unknown operator"); } if (dstkey) { if (dstzset->zsl->length) { - zsetConvertToZiplistIfNeeded(dstobj, maxelelen); + zsetConvertToZiplistIfNeeded(dstobj, maxelelen, totelelen); setKey(c, c->db, dstkey, dstobj); addReplyLongLong(c, zsetLength(dstobj)); notifyKeyspaceEvent(NOTIFY_ZSET, @@ -3661,8 +3694,18 @@ void zrangeGenericCommand(zrange_result_handler *handler, int argc_start, int st zobj = handler->dstkey ? lookupKeyWrite(c->db,key) : lookupKeyRead(c->db,key); +<<<<<<< HEAD:src/t_zset.cpp if (zobj == nullptr) { addReply(c,shared.emptyarray); +======= + if (zobj == NULL) { + if (store) { + handler->beginResultEmission(handler); + handler->finalizeResultEmission(handler, 0); + } else { + addReply(c, shared.emptyarray); + } +>>>>>>> 6.2.6:src/t_zset.c goto cleanup; } @@ -3818,11 +3861,16 @@ void genericZpopCommand(client *c, robj **keyv, int keyc, int where, int emitkey } void *arraylen_ptr = addReplyDeferredLen(c); - long arraylen = 0; + long result_count = 0; /* We emit the key only for the blocking variant. */ if (emitkey) addReplyBulk(c,key); + /* Respond with a single (flat) array in RESP2 or if countarg is not + * provided (returning a single element). In RESP3, when countarg is + * provided, use nested array. */ + int use_nested_array = c->resp > 2 && countarg != NULL; + /* Remove the element. */ do { if (zobj->encoding == OBJ_ENCODING_ZIPLIST) { @@ -3865,16 +3913,24 @@ void genericZpopCommand(client *c, robj **keyv, int keyc, int where, int emitkey serverAssertWithInfo(c,zobj,zsetDel(zobj,ele)); g_pserver->dirty++; +<<<<<<< HEAD:src/t_zset.cpp if (arraylen == 0) { /* Do this only for the first iteration. */ const char *events[2] = {"zpopmin","zpopmax"}; +======= + if (result_count == 0) { /* Do this only for the first iteration. */ + char *events[2] = {"zpopmin","zpopmax"}; +>>>>>>> 6.2.6:src/t_zset.c notifyKeyspaceEvent(NOTIFY_ZSET,events[where],key,c->db->id); signalModifiedKey(c,c->db,key); } + if (use_nested_array) { + addReplyArrayLen(c,2); + } addReplyBulkCBuffer(c,ele,sdslen(ele)); addReplyDouble(c,score); sdsfree(ele); - arraylen += 2; + ++result_count; /* Remove the key, if indeed needed. */ if (zsetLength(zobj) == 0) { @@ -3884,7 +3940,10 @@ void genericZpopCommand(client *c, robj **keyv, int keyc, int where, int emitkey } } while(--count); - setDeferredArrayLen(c,arraylen_ptr,arraylen + (emitkey != 0)); + if (!use_nested_array) { + result_count *= 2; + } + setDeferredArrayLen(c,arraylen_ptr,result_count + (emitkey != 0)); } /* ZPOPMIN key [] */ @@ -3985,8 +4044,13 @@ void zrandmemberWithCountCommand(client *c, long l, int withscores) { int uniq = 1; robj_roptr zsetobj; +<<<<<<< HEAD:src/t_zset.cpp if ((zsetobj = lookupKeyReadOrReply(c, c->argv[1], shared.null[c->resp])) == nullptr || checkType(c, zsetobj, OBJ_ZSET)) return; +======= + if ((zsetobj = lookupKeyReadOrReply(c, c->argv[1], shared.emptyarray)) + == NULL || checkType(c, zsetobj, OBJ_ZSET)) return; +>>>>>>> 6.2.6:src/t_zset.c size = zsetLength(zsetobj); if(l >= 0) { @@ -4021,7 +4085,7 @@ void zrandmemberWithCountCommand(client *c, long l, int withscores) { addReplyArrayLen(c,2); addReplyBulkCBuffer(c, key, sdslen(key)); if (withscores) - addReplyDouble(c, dictGetDoubleVal(de)); + addReplyDouble(c, *(double*)dictGetVal(de)); } } else if (zsetobj->encoding == OBJ_ENCODING_ZIPLIST) { ziplistEntry *keys, *vals = NULL; @@ -4170,7 +4234,7 @@ void zrandmemberWithCountCommand(client *c, long l, int withscores) { } } -/* ZRANDMEMBER [ WITHSCORES] */ +/* ZRANDMEMBER key [ [WITHSCORES]] */ void zrandmemberCommand(client *c) { long l; int withscores = 0; diff --git a/src/tls.cpp b/src/tls.cpp index 0315962e5..61d01fff1 100644 --- a/src/tls.cpp +++ b/src/tls.cpp @@ -185,7 +185,8 @@ void tlsCleanup(void) { redis_tls_client_ctx = NULL; } - #if OPENSSL_VERSION_NUMBER >= 0x10100000L + #if OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined(LIBRESSL_VERSION_NUMBER) + // unavailable on LibreSSL OPENSSL_cleanup(); #endif } diff --git a/src/tracking.cpp b/src/tracking.cpp index bdcf1c6cf..952f2a395 100644 --- a/src/tracking.cpp +++ b/src/tracking.cpp @@ -330,7 +330,7 @@ void trackingRememberKeyToBroadcast(client *c, char *keyname, size_t keylen) { * tree. This way we know who was the client that did the last * change to the key, and can avoid sending the notification in the * case the client is in NOLOOP mode. */ - raxTryInsert(bs->keys,(unsigned char*)keyname,keylen,c,NULL); + raxInsert(bs->keys,(unsigned char*)keyname,keylen,c,NULL); } raxStop(&ri); } diff --git a/src/version.h b/src/version.h index e59683759..7fbac0e1b 100644 --- a/src/version.h +++ b/src/version.h @@ -1,4 +1,9 @@ +<<<<<<< HEAD #define KEYDB_REAL_VERSION "255.255.255" #define KEYDB_VERSION_NUM 0x00ffffff extern const char *KEYDB_SET_VERSION; // Unlike real version, this can be overriden by the config +======= +#define REDIS_VERSION "6.2.6" +#define REDIS_VERSION_NUM 0x00060206 +>>>>>>> 6.2.6 diff --git a/src/ziplist.c b/src/ziplist.c index 726973f0d..b56886660 100644 --- a/src/ziplist.c +++ b/src/ziplist.c @@ -263,10 +263,21 @@ * to stay there to signal that a full scan is needed to get the number of * items inside the ziplist. */ #define ZIPLIST_INCR_LENGTH(zl,incr) { \ - if (ZIPLIST_LENGTH(zl) < UINT16_MAX) \ + if (intrev16ifbe(ZIPLIST_LENGTH(zl)) < UINT16_MAX) \ ZIPLIST_LENGTH(zl) = intrev16ifbe(intrev16ifbe(ZIPLIST_LENGTH(zl))+incr); \ } +/* Don't let ziplists grow over 1GB in any case, don't wanna risk overflow in + * zlbytes*/ +#define ZIPLIST_MAX_SAFETY_SIZE (1<<30) +int ziplistSafeToAdd(unsigned char* zl, size_t add) { + size_t len = zl? ziplistBlobLen(zl): 0; + if (len + add > ZIPLIST_MAX_SAFETY_SIZE) + return 0; + return 1; +} + + /* We use this function to receive information about a ziplist entry. * Note that this is not how the data is actually encoded, is just what we * get filled by a function in order to operate more easily. */ @@ -709,8 +720,14 @@ unsigned char *ziplistNew(void) { } /* Resize the ziplist. */ +<<<<<<< HEAD unsigned char *ziplistResize(unsigned char *zl, unsigned int len) { zl = zrealloc(zl,len, MALLOC_SHARED); +======= +unsigned char *ziplistResize(unsigned char *zl, size_t len) { + assert(len < UINT32_MAX); + zl = zrealloc(zl,len); +>>>>>>> 6.2.6 ZIPLIST_BYTES(zl) = intrev32ifbe(len); zl[len-1] = ZIP_END; return zl; @@ -1070,6 +1087,9 @@ unsigned char *ziplistMerge(unsigned char **first, unsigned char **second) { /* Combined zl length should be limited within UINT16_MAX */ zllength = zllength < UINT16_MAX ? zllength : UINT16_MAX; + /* larger values can't be stored into ZIPLIST_BYTES */ + assert(zlbytes < UINT32_MAX); + /* Save offset positions before we start ripping memory apart. */ size_t first_offset = intrev32ifbe(ZIPLIST_TAIL_OFFSET(*first)); size_t second_offset = intrev32ifbe(ZIPLIST_TAIL_OFFSET(*second)); @@ -1522,8 +1542,12 @@ int ziplistValidateIntegrity(unsigned char *zl, size_t size, int deep, count++; } + /* Make sure 'p' really does point to the end of the ziplist. */ + if (p != zl + bytes - ZIPLIST_END_SIZE) + return 0; + /* Make sure the entry really do point to the start of the last entry. */ - if (prev != ZIPLIST_ENTRY_TAIL(zl)) + if (prev != NULL && prev != ZIPLIST_ENTRY_TAIL(zl)) return 0; /* Check that the count in the header is correct */ diff --git a/src/ziplist.h b/src/ziplist.h index 1e4a4a69b..08a27178e 100644 --- a/src/ziplist.h +++ b/src/ziplist.h @@ -69,6 +69,7 @@ int ziplistValidateIntegrity(unsigned char *zl, size_t size, int deep, void ziplistRandomPair(unsigned char *zl, unsigned long total_count, ziplistEntry *key, ziplistEntry *val); void ziplistRandomPairs(unsigned char *zl, unsigned int count, ziplistEntry *keys, ziplistEntry *vals); unsigned int ziplistRandomPairsUnique(unsigned char *zl, unsigned int count, ziplistEntry *keys, ziplistEntry *vals); +int ziplistSafeToAdd(unsigned char* zl, size_t add); #ifdef REDIS_TEST int ziplistTest(int argc, char *argv[], int accurate); diff --git a/src/zipmap.c b/src/zipmap.c index 8d79e8c71..d0b41baa2 100644 --- a/src/zipmap.c +++ b/src/zipmap.c @@ -430,6 +430,9 @@ int zipmapValidateIntegrity(unsigned char *zm, size_t size, int deep) { return 0; } + /* check that the zipmap is not empty. */ + if (count == 0) return 0; + /* check that the count in the header is correct */ if (zm[0] != ZIPMAP_BIGLEN && zm[0] != count) return 0; diff --git a/tests/assets/corrupt_empty_keys.rdb b/tests/assets/corrupt_empty_keys.rdb new file mode 100644 index 000000000..8f260d493 Binary files /dev/null and b/tests/assets/corrupt_empty_keys.rdb differ diff --git a/tests/helpers/bg_block_op.tcl b/tests/helpers/bg_block_op.tcl index f76c22381..e3eb94dd8 100644 --- a/tests/helpers/bg_block_op.tcl +++ b/tests/helpers/bg_block_op.tcl @@ -12,6 +12,7 @@ set ::tlsdir "tests/tls" # blocking. proc bg_block_op {host port db ops tls} { set r [redis $host $port 0 $tls] + $r client setname LOAD_HANDLER $r select $db for {set j 0} {$j < $ops} {incr j} { diff --git a/tests/helpers/bg_complex_data.tcl b/tests/helpers/bg_complex_data.tcl index 606ed8f8b..ba1a4e832 100644 --- a/tests/helpers/bg_complex_data.tcl +++ b/tests/helpers/bg_complex_data.tcl @@ -5,6 +5,7 @@ set ::tlsdir "tests/tls" proc bg_complex_data {host port db ops tls} { set r [redis $host $port 0 $tls] + $r client setname LOAD_HANDLER $r select $db createComplexDataset $r $ops } diff --git a/tests/helpers/fake_redis_node.tcl b/tests/helpers/fake_redis_node.tcl new file mode 100644 index 000000000..a12d87fed --- /dev/null +++ b/tests/helpers/fake_redis_node.tcl @@ -0,0 +1,58 @@ +# A fake Redis node for replaying predefined/expected traffic with a client. +# +# Usage: tclsh fake_redis_node.tcl PORT COMMAND REPLY [ COMMAND REPLY [ ... ] ] +# +# Commands are given as space-separated strings, e.g. "GET foo", and replies as +# RESP-encoded replies minus the trailing \r\n, e.g. "+OK". + +set port [lindex $argv 0]; +set expected_traffic [lrange $argv 1 end]; + +# Reads and parses a command from a socket and returns it as a space-separated +# string, e.g. "set foo bar". +proc read_command {sock} { + set char [read $sock 1] + switch $char { + * { + set numargs [gets $sock] + set result {} + for {set i 0} {$i<$numargs} {incr i} { + read $sock 1; # dollar sign + set len [gets $sock] + set str [read $sock $len] + gets $sock; # trailing \r\n + lappend result $str + } + return $result + } + {} { + # EOF + return {} + } + default { + # Non-RESP command + set rest [gets $sock] + return "$char$rest" + } + } +} + +proc accept {sock host port} { + global expected_traffic + foreach {expect_cmd reply} $expected_traffic { + if {[eof $sock]} {break} + set cmd [read_command $sock] + if {[string equal -nocase $cmd $expect_cmd]} { + puts $sock $reply + flush $sock + } else { + puts $sock "-ERR unexpected command $cmd" + break + } + } + close $sock +} + +socket -server accept $port +after 5000 set done timeout +vwait done diff --git a/tests/helpers/gen_write_load.tcl b/tests/helpers/gen_write_load.tcl index 200026af6..a3509849f 100644 --- a/tests/helpers/gen_write_load.tcl +++ b/tests/helpers/gen_write_load.tcl @@ -5,6 +5,7 @@ set ::tlsdir "tests/tls" proc gen_write_load {host port seconds tls} { set start_time [clock seconds] set r [redis $host $port 1 $tls] + $r client setname LOAD_HANDLER $r select 9 while 1 { $r set [expr rand()] [expr rand()] diff --git a/tests/integration/block-repl.tcl b/tests/integration/block-repl.tcl index 07eceb228..7c2ba840d 100644 --- a/tests/integration/block-repl.tcl +++ b/tests/integration/block-repl.tcl @@ -33,14 +33,9 @@ start_server {tags {"repl"}} { stop_bg_block_op $load_handle0 stop_bg_block_op $load_handle1 stop_bg_block_op $load_handle2 - set retry 10 - while {$retry && ([$master debug digest] ne [$slave debug digest])}\ - { - after 1000 - incr retry -1 - } - - if {[$master debug digest] ne [$slave debug digest]} { + wait_for_condition 100 100 { + [$master debug digest] == [$slave debug digest] + } else { set csv1 [csvdump r] set csv2 [csvdump {r -1}] set fd [open /tmp/repldump1.txt w] @@ -49,10 +44,8 @@ start_server {tags {"repl"}} { set fd [open /tmp/repldump2.txt w] puts -nonewline $fd $csv2 close $fd - puts "Master - Replica inconsistency" - puts "Run diff -u against /tmp/repldump*.txt for more info" + fail "Master - Replica inconsistency, Run diff -u against /tmp/repldump*.txt for more info" } - assert_equal [r debug digest] [r -1 debug digest] } } } diff --git a/tests/integration/corrupt-dump-fuzzer.tcl b/tests/integration/corrupt-dump-fuzzer.tcl index 4fb503b8e..86cd8121b 100644 --- a/tests/integration/corrupt-dump-fuzzer.tcl +++ b/tests/integration/corrupt-dump-fuzzer.tcl @@ -90,6 +90,7 @@ foreach sanitize_dump {no yes} { r debug set-skip-checksum-validation 1 set start_time [clock seconds] generate_types + set dbsize [r dbsize] r save set cycle 0 set stat_terminated_in_restore 0 @@ -117,7 +118,7 @@ foreach sanitize_dump {no yes} { set report_and_restart true incr stat_terminated_in_restore write_log_line 0 "corrupt payload: $printable_dump" - if {$sanitize_dump == 1} { + if {$sanitize_dump == yes} { puts "Server crashed in RESTORE with payload: $printable_dump" } } @@ -133,6 +134,12 @@ foreach sanitize_dump {no yes} { set sent [generate_fuzzy_traffic_on_key "_$k" 1] ;# traffic for 1 second incr stat_traffic_commands_sent [llength $sent] r del "_$k" ;# in case the server terminated, here's where we'll detect it. + if {$dbsize != [r dbsize]} { + puts "unexpected keys" + puts "keys: [r keys *]" + puts $sent + exit 1 + } } err ] } { # if the server terminated update stats and restart it set report_and_restart true @@ -140,7 +147,7 @@ foreach sanitize_dump {no yes} { set by_signal [count_log_message 0 "crashed by signal"] incr stat_terminated_by_signal $by_signal - if {$by_signal != 0 || $sanitize_dump == 1 } { + if {$by_signal != 0 || $sanitize_dump == yes} { puts "Server crashed (by signal: $by_signal), with payload: $printable_dump" set print_commands true } @@ -186,7 +193,7 @@ foreach sanitize_dump {no yes} { } } # if we run sanitization we never expect the server to crash at runtime - if { $sanitize_dump == 1} { + if {$sanitize_dump == yes} { assert_equal $stat_terminated_in_restore 0 assert_equal $stat_terminated_in_traffic 0 } diff --git a/tests/integration/corrupt-dump.tcl b/tests/integration/corrupt-dump.tcl index fe2537b03..fbdd3c373 100644 --- a/tests/integration/corrupt-dump.tcl +++ b/tests/integration/corrupt-dump.tcl @@ -118,6 +118,16 @@ test {corrupt payload: #3080 - quicklist} { } } +test {corrupt payload: quicklist with empty ziplist} { + start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { + r config set sanitize-dump-payload no + r debug set-skip-checksum-validation 1 + catch {r restore key 0 "\x0E\x01\x0B\x0B\x00\x00\x00\x0A\x00\x00\x00\x00\x00\xFF\x09\x00\xC2\x69\x37\x83\x3C\x7F\xFE\x6F" replace} err + assert_match "*Bad data format*" $err + r ping + } +} + test {corrupt payload: #3080 - ziplist} { start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { # shallow sanitization is enough for restore to safely reject the payload with wrong size @@ -148,6 +158,27 @@ test {corrupt payload: load corrupted rdb with no CRC - #3505} { kill_server $srv ;# let valgrind look for issues } +foreach sanitize_dump {no yes} { + test {corrupt payload: load corrupted rdb with empty keys} { + set server_path [tmpdir "server.rdb-corruption-empty-keys-test"] + exec cp tests/assets/corrupt_empty_keys.rdb $server_path + start_server [list overrides [list "dir" $server_path "dbfilename" "corrupt_empty_keys.rdb" "sanitize-dump-payload" $sanitize_dump]] { + r select 0 + assert_equal [r dbsize] 0 + + verify_log_message 0 "*skipping empty key: set*" 0 + verify_log_message 0 "*skipping empty key: list_quicklist*" 0 + verify_log_message 0 "*skipping empty key: list_quicklist_empty_ziplist*" 0 + verify_log_message 0 "*skipping empty key: list_ziplist*" 0 + verify_log_message 0 "*skipping empty key: hash*" 0 + verify_log_message 0 "*skipping empty key: hash_ziplist*" 0 + verify_log_message 0 "*skipping empty key: zset*" 0 + verify_log_message 0 "*skipping empty key: zset_ziplist*" 0 + verify_log_message 0 "*empty keys skipped: 8*" 0 + } + } +} + test {corrupt payload: listpack invalid size header} { start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { r config set sanitize-dump-payload no @@ -224,6 +255,16 @@ test {corrupt payload: hash dupliacte records} { } } +test {corrupt payload: hash empty zipmap} { + start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { + r config set sanitize-dump-payload no + r debug set-skip-checksum-validation 1 + catch { r RESTORE _hash 0 "\x09\x02\x00\xFF\x09\x00\xC0\xF1\xB8\x67\x4C\x16\xAC\xE3" } err + assert_match "*Bad data format*" $err + verify_log_message 0 "*Zipmap integrity check failed*" 0 + } +} + test {corrupt payload: fuzzer findings - NPD in streamIteratorGetID} { start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { r config set sanitize-dump-payload no @@ -328,12 +369,13 @@ test {corrupt payload: fuzzer findings - leak in rdbloading due to dup entry in } } -test {corrupt payload: fuzzer findings - empty intset div by zero} { +test {corrupt payload: fuzzer findings - empty intset} { start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { r config set sanitize-dump-payload no r debug set-skip-checksum-validation 1 - r RESTORE _setbig 0 "\x02\xC0\xC0\x06\x02\x5F\x39\xC0\x02\x02\x5F\x33\xC0\x00\x02\x5F\x31\xC0\x04\xC0\x08\x02\x5F\x37\x02\x5F\x35\x09\x00\xC5\xD4\x6D\xBA\xAD\x14\xB7\xE7" - catch {r SRANDMEMBER _setbig } + catch {r RESTORE _setbig 0 "\x02\xC0\xC0\x06\x02\x5F\x39\xC0\x02\x02\x5F\x33\xC0\x00\x02\x5F\x31\xC0\x04\xC0\x08\x02\x5F\x37\x02\x5F\x35\x09\x00\xC5\xD4\x6D\xBA\xAD\x14\xB7\xE7"} err + assert_match "*Bad data format*" $err + r ping } } @@ -507,14 +549,13 @@ test {corrupt payload: fuzzer findings - valgrind invalid read} { } } -test {corrupt payload: fuzzer findings - HRANDFIELD on bad ziplist} { +test {corrupt payload: fuzzer findings - empty hash ziplist} { start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { r config set sanitize-dump-payload yes r debug set-skip-checksum-validation 1 - r RESTORE _int 0 "\x04\xC0\x01\x09\x00\xF6\x8A\xB6\x7A\x85\x87\x72\x4D" - catch {r HRANDFIELD _int} - assert_equal [count_log_message 0 "crashed by signal"] 0 - assert_equal [count_log_message 0 "ASSERTION FAILED"] 1 + catch {r RESTORE _int 0 "\x04\xC0\x01\x09\x00\xF6\x8A\xB6\x7A\x85\x87\x72\x4D"} err + assert_match "*Bad data format*" $err + r ping } } @@ -529,5 +570,133 @@ test {corrupt payload: fuzzer findings - stream with no records} { } } +test {corrupt payload: fuzzer findings - quicklist ziplist tail followed by extra data which start with 0xff} { + start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { + r config set sanitize-dump-payload yes + r debug set-skip-checksum-validation 1 + catch { + r restore key 0 "\x0E\x01\x11\x11\x00\x00\x00\x0A\x00\x00\x00\x01\x00\x00\xF6\xFF\xB0\x6C\x9C\xFF\x09\x00\x9C\x37\x47\x49\x4D\xDE\x94\xF5" replace + } err + assert_match "*Bad data format*" $err + verify_log_message 0 "*integrity check failed*" 0 + } +} + +test {corrupt payload: fuzzer findings - dict init to huge size} { + start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { + r config set sanitize-dump-payload no + r debug set-skip-checksum-validation 1 + catch {r restore key 0 "\x02\x81\xC0\x00\x02\x5F\x31\xC0\x02\x09\x00\xB2\x1B\xE5\x17\x2E\x15\xF4\x6C" replace} err + assert_match "*Bad data format*" $err + r ping + } +} + +test {corrupt payload: fuzzer findings - huge string} { + start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { + r config set sanitize-dump-payload yes + r debug set-skip-checksum-validation 1 + catch {r restore key 0 "\x00\x81\x01\x09\x00\xF6\x2B\xB6\x7A\x85\x87\x72\x4D"} err + assert_match "*Bad data format*" $err + r ping + } +} + +test {corrupt payload: fuzzer findings - stream PEL without consumer} { + start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { + r config set sanitize-dump-payload yes + r debug set-skip-checksum-validation 1 + catch {r restore _stream 0 "\x0F\x01\x10\x00\x00\x01\x7B\x08\xF0\xB2\x34\x00\x00\x00\x00\x00\x00\x00\x00\xC3\x3B\x40\x42\x19\x42\x00\x00\x00\x18\x00\x02\x01\x01\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x00\x20\x10\x00\x00\x20\x01\x00\x01\x20\x03\x02\x05\x01\x03\x20\x05\x40\x00\x04\x82\x5F\x31\x03\x05\x60\x19\x80\x32\x02\x05\x01\xFF\x02\x81\x00\x00\x01\x7B\x08\xF0\xB2\x34\x02\x01\x07\x6D\x79\x67\x72\x6F\x75\x70\x81\x00\x00\x01\x7B\x08\xF0\xB2\x34\x01\x01\x00\x00\x01\x7B\x08\xF0\xB2\x34\x00\x00\x00\x00\x00\x00\x00\x01\x35\xB2\xF0\x08\x7B\x01\x00\x00\x01\x01\x13\x41\x6C\x69\x63\x65\x35\xB2\xF0\x08\x7B\x01\x00\x00\x01\x00\x00\x01\x7B\x08\xF0\xB2\x34\x00\x00\x00\x00\x00\x00\x00\x01\x09\x00\x28\x2F\xE0\xC5\x04\xBB\xA7\x31"} err + assert_match "*Bad data format*" $err + #catch {r XINFO STREAM _stream FULL } + r ping + } +} + +test {corrupt payload: fuzzer findings - stream listpack valgrind issue} { + start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { + r config set sanitize-dump-payload no + r debug set-skip-checksum-validation 1 + r restore _stream 0 "\x0F\x01\x10\x00\x00\x01\x7B\x09\x5E\x94\xFF\x00\x00\x00\x00\x00\x00\x00\x00\x40\x42\x42\x00\x00\x00\x18\x00\x02\x01\x01\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x00\x01\x02\x01\x00\x01\x00\x01\x01\x01\x00\x01\x05\x01\x03\x01\x25\x01\x00\x01\x01\x01\x82\x5F\x31\x03\x05\x01\x02\x01\x32\x01\x00\x01\x01\x01\x02\x01\xF0\x01\xFF\x02\x81\x00\x00\x01\x7B\x09\x5E\x95\x31\x00\x01\x07\x6D\x79\x67\x72\x6F\x75\x70\x81\x00\x00\x01\x7B\x09\x5E\x95\x24\x00\x01\x00\x00\x01\x7B\x09\x5E\x95\x24\x00\x00\x00\x00\x00\x00\x00\x00\x5C\x95\x5E\x09\x7B\x01\x00\x00\x01\x01\x05\x41\x6C\x69\x63\x65\x4B\x95\x5E\x09\x7B\x01\x00\x00\x01\x00\x00\x01\x7B\x09\x5E\x95\x24\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x19\x29\x94\xDF\x76\xF8\x1A\xC6" + catch {r XINFO STREAM _stream FULL } + assert_equal [count_log_message 0 "crashed by signal"] 0 + assert_equal [count_log_message 0 "ASSERTION FAILED"] 1 + } +} + +test {corrupt payload: fuzzer findings - stream with bad lpFirst} { + start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { + r config set sanitize-dump-payload yes + r debug set-skip-checksum-validation 1 + catch {r restore _stream 0 "\x0F\x01\x10\x00\x00\x01\x7B\x0E\x52\xD2\xEC\x00\x00\x00\x00\x00\x00\x00\x00\x40\x42\x42\x00\x00\x00\x18\x00\x02\xF7\x01\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x00\x01\x02\x01\x00\x01\x00\x01\x01\x01\x00\x01\x05\x01\x03\x01\x01\x01\x00\x01\x01\x01\x82\x5F\x31\x03\x05\x01\x02\x01\x01\x01\x01\x01\x01\x01\x02\x01\x05\x01\xFF\x02\x81\x00\x00\x01\x7B\x0E\x52\xD2\xED\x01\x01\x07\x6D\x79\x67\x72\x6F\x75\x70\x81\x00\x00\x01\x7B\x0E\x52\xD2\xED\x00\x01\x00\x00\x01\x7B\x0E\x52\xD2\xED\x00\x00\x00\x00\x00\x00\x00\x00\xED\xD2\x52\x0E\x7B\x01\x00\x00\x01\x01\x05\x41\x6C\x69\x63\x65\xED\xD2\x52\x0E\x7B\x01\x00\x00\x01\x00\x00\x01\x7B\x0E\x52\xD2\xED\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\xAC\x05\xC9\x97\x5D\x45\x80\xB3"} err + assert_match "*Bad data format*" $err + r ping + } +} + +test {corrupt payload: fuzzer findings - stream listpack lpPrev valgrind issue} { + start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { + r config set sanitize-dump-payload no + r debug set-skip-checksum-validation 1 + r restore _stream 0 "\x0F\x01\x10\x00\x00\x01\x7B\x0E\xAE\x66\x36\x00\x00\x00\x00\x00\x00\x00\x00\x40\x42\x42\x00\x00\x00\x18\x00\x02\x01\x01\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x00\x01\x02\x01\x00\x01\x00\x01\x01\x01\x00\x01\x1D\x01\x03\x01\x24\x01\x00\x01\x01\x69\x82\x5F\x31\x03\x05\x01\x02\x01\x33\x01\x00\x01\x01\x01\x02\x01\x05\x01\xFF\x02\x81\x00\x00\x01\x7B\x0E\xAE\x66\x69\x00\x01\x07\x6D\x79\x67\x72\x6F\x75\x70\x81\x00\x00\x01\x7B\x0E\xAE\x66\x5A\x00\x01\x00\x00\x01\x7B\x0E\xAE\x66\x5A\x00\x00\x00\x00\x00\x00\x00\x00\x94\x66\xAE\x0E\x7B\x01\x00\x00\x01\x01\x05\x41\x6C\x69\x63\x65\x83\x66\xAE\x0E\x7B\x01\x00\x00\x01\x00\x00\x01\x7B\x0E\xAE\x66\x5A\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\xD5\xD7\xA5\x5C\x63\x1C\x09\x40" + catch {r XREVRANGE _stream 1618622681 606195012389} + assert_equal [count_log_message 0 "crashed by signal"] 0 + assert_equal [count_log_message 0 "ASSERTION FAILED"] 1 + } +} + +test {corrupt payload: fuzzer findings - stream with non-integer entry id} { + start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { + r config set sanitize-dump-payload yes + r debug set-skip-checksum-validation 1 + catch {r restore _streambig 0 "\x0F\x03\x10\x00\x00\x01\x7B\x13\x34\xC3\xB2\x00\x00\x00\x00\x00\x00\x00\x00\xC3\x40\x4F\x40\x5C\x18\x5C\x00\x00\x00\x24\x00\x05\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x40\x10\x00\x80\x20\x01\x00\x01\x20\x03\x00\x05\x20\x1C\x40\x09\x05\x01\x01\x82\x5F\x31\x03\x80\x0D\x00\x02\x20\x0D\x00\x02\xA0\x19\x00\x03\x20\x0B\x02\x82\x5F\x33\xA0\x19\x00\x04\x20\x0D\x00\x04\x20\x19\x00\xFF\x10\x00\x00\x01\x7B\x13\x34\xC3\xB2\x00\x00\x00\x00\x00\x00\x00\x05\xC3\x40\x56\x40\x61\x18\x61\x00\x00\x00\x24\x00\x05\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x40\x10\x00\x00\x20\x01\x06\x01\x01\x82\x5F\x35\x03\x05\x20\x1E\x40\x0B\x03\x01\x01\x06\x01\x40\x0B\x03\x01\x01\xDF\xFB\x20\x05\x02\x82\x5F\x37\x60\x1A\x20\x0E\x00\xFC\x20\x05\x00\x08\xC0\x1B\x00\xFD\x20\x0C\x02\x82\x5F\x39\x20\x1B\x00\xFF\x10\x00\x00\x01\x7B\x13\x34\xC3\xB3\x00\x00\x00\x00\x00\x00\x00\x03\xC3\x3D\x40\x4A\x18\x4A\x00\x00\x00\x15\x00\x02\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x40\x10\x00\x00\x20\x01\x40\x00\x00\x05\x60\x07\x02\xDF\xFD\x02\xC0\x23\x09\x01\x01\x86\x75\x6E\x69\x71\x75\x65\x07\xA0\x2D\x02\x08\x01\xFF\x0C\x81\x00\x00\x01\x7B\x13\x34\xC3\xB4\x00\x00\x09\x00\x9D\xBD\xD5\xB9\x33\xC4\xC5\xFF"} err + #catch {r XINFO STREAM _streambig FULL } + assert_match "*Bad data format*" $err + r ping + } +} + +test {corrupt payload: fuzzer findings - empty quicklist} { + start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { + r config set sanitize-dump-payload yes + r debug set-skip-checksum-validation 1 + catch { + r restore key 0 "\x0E\xC0\x2B\x15\x00\x00\x00\x0A\x00\x00\x00\x01\x00\x00\xE0\x62\x58\xEA\xDF\x22\x00\x00\x00\xFF\x09\x00\xDF\x35\xD2\x67\xDC\x0E\x89\xAB" replace + } err + assert_match "*Bad data format*" $err + r ping + } +} + +test {corrupt payload: fuzzer findings - empty zset} { + start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { + r config set sanitize-dump-payload yes + r debug set-skip-checksum-validation 1 + catch {r restore key 0 "\x05\xC0\x01\x09\x00\xF6\x8A\xB6\x7A\x85\x87\x72\x4D"} err + assert_match "*Bad data format*" $err + r ping + } +} + +test {corrupt payload: fuzzer findings - hash with len of 0} { + start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { + r config set sanitize-dump-payload yes + r debug set-skip-checksum-validation 1 + catch {r restore key 0 "\x04\xC0\x21\x09\x00\xF6\x8A\xB6\x7A\x85\x87\x72\x4D"} err + assert_match "*Bad data format*" $err + r ping + } +} + +test {corrupt payload: fuzzer findings - stream double free listpack when insert dup node to rax returns 0} { + start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { + r debug set-skip-checksum-validation 1 + r config set sanitize-dump-payload yes + catch { r restore _stream 0 "\x0F\x03\x10\x00\x00\x01\x7B\x60\x5A\x23\x79\x00\x00\x00\x00\x00\x00\x00\x00\xC3\x40\x4F\x40\x5C\x18\x5C\x00\x00\x00\x24\x00\x05\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x40\x10\x00\x00\x20\x01\x00\x01\x20\x03\x00\x05\x20\x1C\x40\x09\x05\x01\x01\x82\x5F\x31\x03\x80\x0D\x00\x02\x20\x0D\x00\x02\xA0\x19\x00\x03\x20\x0B\x02\x82\x5F\x33\xA0\x19\x00\x04\x20\x0D\x00\x04\x20\x19\x00\xFF\x10\x00\x00\x01\x7B\x60\x5A\x23\x79\x00\x00\x00\x00\x00\x00\x00\x05\xC3\x40\x51\x40\x5E\x18\x5E\x00\x00\x00\x24\x00\x05\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x40\x10\x00\x00\x20\x01\x06\x01\x01\x82\x5F\x35\x03\x05\x20\x1E\x40\x0B\x03\x01\x01\x06\x01\x80\x0B\x00\x02\x20\x0B\x02\x82\x5F\x37\xA0\x19\x00\x03\x20\x0D\x00\x08\xA0\x19\x00\x04\x20\x0B\x02\x82\x5F\x39\x20\x19\x00\xFF\x10\x00\x00\x01\x7B\x60\x5A\x23\x79\x00\x00\x00\x00\x00\x00\x00\x00\xC3\x3B\x40\x49\x18\x49\x00\x00\x00\x15\x00\x02\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x40\x10\x00\x00\x20\x01\x40\x00\x00\x05\x20\x07\x40\x09\xC0\x22\x09\x01\x01\x86\x75\x6E\x69\x71\x75\x65\x07\xA0\x2C\x02\x08\x01\xFF\x0C\x81\x00\x00\x01\x7B\x60\x5A\x23\x7A\x01\x00\x09\x00\x9C\x8F\x1E\xBF\x2E\x05\x59\x09" replace } err + assert_match "*Bad data format*" $err + r ping + } +} + } ;# tags diff --git a/tests/integration/failover.tcl b/tests/integration/failover.tcl index 5d8aaf706..3c8e43c15 100644 --- a/tests/integration/failover.tcl +++ b/tests/integration/failover.tcl @@ -83,7 +83,11 @@ start_server {} { } else { fail "Failover from node 0 to node 1 did not finish" } + + # stop the write load and make sure no more commands processed stop_write_load $load_handler + wait_load_handlers_disconnected + $node_2 replicaof $node_1_host $node_1_port wait_for_sync $node_0 wait_for_sync $node_2 diff --git a/tests/integration/keydb-cli.tcl b/tests/integration/keydb-cli.tcl index 7e8b41fca..3117cb6a5 100644 --- a/tests/integration/keydb-cli.tcl +++ b/tests/integration/keydb-cli.tcl @@ -64,8 +64,8 @@ start_server {tags {"cli"}} { set _ $tmp } - proc _run_cli {opts args} { - set cmd [rediscli [srv host] [srv port] [list -n 9 {*}$args]] + proc _run_cli {host port db opts args} { + set cmd [rediscli $host $port [list -n $db {*}$args]] foreach {key value} $opts { if {$key eq "pipe"} { set cmd "sh -c \"$value | $cmd\"" @@ -84,15 +84,19 @@ start_server {tags {"cli"}} { } proc run_cli {args} { - _run_cli {} {*}$args + _run_cli [srv host] [srv port] 9 {} {*}$args } proc run_cli_with_input_pipe {cmd args} { - _run_cli [list pipe $cmd] -x {*}$args + _run_cli [srv host] [srv port] 9 [list pipe $cmd] -x {*}$args } proc run_cli_with_input_file {path args} { - _run_cli [list path $path] -x {*}$args + _run_cli [srv host] [srv port] 9 [list path $path] -x {*}$args + } + + proc run_cli_host_port_db {host port db args} { + _run_cli $host $port $db {} {*}$args } proc test_nontty_cli {name code} { @@ -207,6 +211,30 @@ start_server {tags {"cli"}} { assert_equal "foo\nbar" [run_cli lrange list 0 -1] } +if {!$::tls} { ;# fake_redis_node doesn't support TLS + test_nontty_cli "ASK redirect test" { + # Set up two fake Redis nodes. + set tclsh [info nameofexecutable] + set script "tests/helpers/fake_redis_node.tcl" + set port1 [find_available_port $::baseport $::portcount] + set port2 [find_available_port $::baseport $::portcount] + set p1 [exec $tclsh $script $port1 \ + "SET foo bar" "-ASK 12182 127.0.0.1:$port2" &] + set p2 [exec $tclsh $script $port2 \ + "ASKING" "+OK" \ + "SET foo bar" "+OK" &] + # Make sure both fake nodes have started listening + wait_for_condition 50 50 { + [catch {close [socket "127.0.0.1" $port1]}] == 0 && \ + [catch {close [socket "127.0.0.1" $port2]}] == 0 + } else { + fail "Failed to start fake Redis nodes" + } + # Run the cli + assert_equal "OK" [run_cli_host_port_db "127.0.0.1" $port1 0 -c SET foo bar] + } +} + test_nontty_cli "Quoted input arguments" { r set "\x00\x00" "value" assert_equal "value" [run_cli --quoted-input get {"\x00\x00"}] @@ -215,7 +243,6 @@ start_server {tags {"cli"}} { test_nontty_cli "No accidental unquoting of input arguments" { run_cli --quoted-input set {"\x41\x41"} quoted-val run_cli set {"\x41\x41"} unquoted-val - assert_equal "quoted-val" [r get AA] assert_equal "unquoted-val" [r get {"\x41\x41"}] } @@ -283,9 +310,9 @@ start_server {tags {"cli"}} { assert_equal {key:2} [run_cli --scan --quoted-pattern {"*:\x32"}] } - test "Connecting as a replica" { + proc test_redis_cli_repl {} { set fd [open_cli "--replica"] - wait_for_condition 500 500 { + wait_for_condition 500 100 { [string match {*slave0:*state=online*} [r info]] } else { fail "redis-cli --replica did not connect" @@ -294,14 +321,30 @@ start_server {tags {"cli"}} { for {set i 0} {$i < 100} {incr i} { r set test-key test-value-$i } - r client kill type slave - catch { - assert_match {*SET*key-a*} [read_cli $fd] + + wait_for_condition 500 100 { + [string match {*test-value-99*} [read_cli $fd]] + } else { + fail "redis-cli --replica didn't read commands" } - close_cli $fd + fconfigure $fd -blocking true + r client kill type slave + catch { close_cli $fd } err + assert_match {*Server closed the connection*} $err } + test "Connecting as a replica" { + # Disk-based master + assert_match "OK" [r config set repl-diskless-sync no] + test_redis_cli_repl + + # Disk-less master + assert_match "OK" [r config set repl-diskless-sync yes] + assert_match "OK" [r config set repl-diskless-sync-delay 0] + test_redis_cli_repl + } {} + test "Piping raw protocol" { set cmds [tmpfile "cli_cmds"] set cmds_fd [open $cmds "w"] diff --git a/tests/integration/rdb.tcl b/tests/integration/rdb.tcl index 9e1c2651a..e652e5573 100644 --- a/tests/integration/rdb.tcl +++ b/tests/integration/rdb.tcl @@ -45,7 +45,7 @@ start_server [list overrides [list "dir" $server_path] keep_persistence true] { test {Test RDB stream encoding} { for {set j 0} {$j < 1000} {incr j} { if {rand() < 0.9} { - r xadd stream * foo $j + r xadd stream * foo abc } else { r xadd stream * bar $j } diff --git a/tests/integration/replication-4.tcl b/tests/integration/replication-4.tcl index 6905ec4a3..d567df924 100644 --- a/tests/integration/replication-4.tcl +++ b/tests/integration/replication-4.tcl @@ -21,15 +21,9 @@ start_server {tags {"repl network"}} { stop_bg_complex_data $load_handle0 stop_bg_complex_data $load_handle1 stop_bg_complex_data $load_handle2 - set retry 10 - while {$retry && ([$master debug digest] ne [$slave debug digest])}\ - { - after 1000 - incr retry -1 - } - assert {[$master dbsize] > 0} - - if {[$master debug digest] ne [$slave debug digest]} { + wait_for_condition 100 100 { + [$master debug digest] == [$slave debug digest] + } else { set csv1 [csvdump r] set csv2 [csvdump {r -1}] set fd [open /tmp/repldump1.txt w] @@ -38,10 +32,9 @@ start_server {tags {"repl network"}} { set fd [open /tmp/repldump2.txt w] puts -nonewline $fd $csv2 close $fd - puts "Master - Replica inconsistency" - puts "Run diff -u against /tmp/repldump*.txt for more info" + fail "Master - Replica inconsistency, Run diff -u against /tmp/repldump*.txt for more info" } - assert_equal [r debug digest] [r -1 debug digest] + assert {[$master dbsize] > 0} } } } diff --git a/tests/integration/replication-psync.tcl b/tests/integration/replication-psync.tcl index 3c98723af..08e21d310 100644 --- a/tests/integration/replication-psync.tcl +++ b/tests/integration/replication-psync.tcl @@ -97,15 +97,9 @@ proc test_psync {descr duration backlog_size backlog_ttl delay cond mdl sdl reco fail "Slave still not connected after some time" } - set retry 10 - while {$retry && ([$master debug digest] ne [$slave debug digest])}\ - { - after 1000 - incr retry -1 - } - assert {[$master dbsize] > 0} - - if {[$master debug digest] ne [$slave debug digest]} { + wait_for_condition 100 100 { + [$master debug digest] == [$slave debug digest] + } else { set csv1 [csvdump r] set csv2 [csvdump {r -1}] set fd [open /tmp/repldump1.txt w] @@ -114,10 +108,9 @@ proc test_psync {descr duration backlog_size backlog_ttl delay cond mdl sdl reco set fd [open /tmp/repldump2.txt w] puts -nonewline $fd $csv2 close $fd - puts "Master - Replica inconsistency" - puts "Run diff -u against /tmp/repldump*.txt for more info" + fail "Master - Replica inconsistency, Run diff -u against /tmp/repldump*.txt for more info" } - assert_equal [r debug digest] [r -1 debug digest] + assert {[$master dbsize] > 0} eval $cond } } diff --git a/tests/integration/replication.tcl b/tests/integration/replication.tcl index 4e4b39a6a..a7a46db6c 100644 --- a/tests/integration/replication.tcl +++ b/tests/integration/replication.tcl @@ -316,15 +316,12 @@ foreach mdl {no yes} { stop_write_load $load_handle3 stop_write_load $load_handle4 - # Make sure that slaves and master have same - # number of keys - wait_for_condition 500 100 { - [$master dbsize] == [[lindex $slaves 0] dbsize] && - [$master dbsize] == [[lindex $slaves 1] dbsize] && - [$master dbsize] == [[lindex $slaves 2] dbsize] - } else { - fail "Different number of keys between master and replica after too long time." - } + # Make sure no more commands processed + wait_load_handlers_disconnected + + wait_for_ofs_sync $master [lindex $slaves 0] + wait_for_ofs_sync $master [lindex $slaves 1] + wait_for_ofs_sync $master [lindex $slaves 2] # Check digests set digest [$master debug digest] @@ -773,6 +770,45 @@ test "diskless replication child being killed is collected" { } } +test "diskless replication read pipe cleanup" { + # In diskless replication, we create a read pipe for the RDB, between the child and the parent. + # When we close this pipe (fd), the read handler also needs to be removed from the event loop (if it still registered). + # Otherwise, next time we will use the same fd, the registration will be fail (panic), because + # we will use EPOLL_CTL_MOD (the fd still register in the event loop), on fd that already removed from epoll_ctl + start_server {tags {"repl"}} { + set master [srv 0 client] + set master_host [srv 0 host] + set master_port [srv 0 port] + set master_pid [srv 0 pid] + $master config set repl-diskless-sync yes + $master config set repl-diskless-sync-delay 0 + + # put enough data in the db, and slowdown the save, to keep the parent busy at the read process + $master config set rdb-key-save-delay 100000 + $master debug populate 20000 test 10000 + $master config set rdbcompression no + start_server {} { + set replica [srv 0 client] + set loglines [count_log_lines 0] + $replica config set repl-diskless-load swapdb + $replica replicaof $master_host $master_port + + # wait for the replicas to start reading the rdb + wait_for_log_messages 0 {"*Loading DB in memory*"} $loglines 800 10 + + set loglines [count_log_lines 0] + # send FLUSHALL so the RDB child will be killed + $master flushall + + # wait for another RDB child process to be started + wait_for_log_messages -1 {"*Background RDB transfer started by pid*"} $loglines 800 10 + + # make sure master is alive + $master ping + } + } +} + test {replicaof right after disconnection} { # this is a rare race condition that was reproduced sporadically by the psync2 unit. # see details in #7205 @@ -863,7 +899,7 @@ test {Kill rdb child process if its dumping RDB is not useful} { # Slave2 disconnect with master $slave2 slaveof no one # Should kill child - wait_for_condition 20 10 { + wait_for_condition 100 10 { [s 0 rdb_bgsave_in_progress] eq 0 } else { fail "can't kill rdb child" diff --git a/tests/modules/Makefile b/tests/modules/Makefile index f56313964..ae611de86 100644 --- a/tests/modules/Makefile +++ b/tests/modules/Makefile @@ -18,6 +18,7 @@ endif TEST_MODULES = \ commandfilter.so \ + basics.so \ testrdb.so \ fork.so \ infotest.so \ diff --git a/src/modules/testmodule.c b/tests/modules/basics.c similarity index 81% rename from src/modules/testmodule.c rename to tests/modules/basics.c index 078c02c5c..59ceb2d1d 100644 --- a/src/modules/testmodule.c +++ b/tests/modules/basics.c @@ -31,7 +31,7 @@ */ #define REDISMODULE_EXPERIMENTAL_API -#include "../redismodule.h" +#include "redismodule.h" #include /* --------------------------------- Helpers -------------------------------- */ @@ -152,11 +152,64 @@ int TestUnlink(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { return failTest(ctx, "Could not verify key to be unlinked"); } return RedisModule_ReplyWithSimpleString(ctx, "OK"); +} +/* TEST.STRING.TRUNCATE -- Test truncating an existing string object. */ +int TestStringTruncate(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + RedisModule_AutoMemory(ctx); + REDISMODULE_NOT_USED(argv); + REDISMODULE_NOT_USED(argc); + + RedisModule_Call(ctx, "SET", "cc", "foo", "abcde"); + RedisModuleKey *k = RedisModule_OpenKey(ctx, RedisModule_CreateStringPrintf(ctx, "foo"), REDISMODULE_READ | REDISMODULE_WRITE); + if (!k) return failTest(ctx, "Could not create key"); + + size_t len = 0; + char* s; + + /* expand from 5 to 8 and check null pad */ + if (REDISMODULE_ERR == RedisModule_StringTruncate(k, 8)) { + return failTest(ctx, "Could not truncate string value (8)"); + } + s = RedisModule_StringDMA(k, &len, REDISMODULE_READ); + if (!s) { + return failTest(ctx, "Failed to read truncated string (8)"); + } else if (len != 8) { + return failTest(ctx, "Failed to expand string value (8)"); + } else if (0 != strncmp(s, "abcde\0\0\0", 8)) { + return failTest(ctx, "Failed to null pad string value (8)"); + } + + /* shrink from 8 to 4 */ + if (REDISMODULE_ERR == RedisModule_StringTruncate(k, 4)) { + return failTest(ctx, "Could not truncate string value (4)"); + } + s = RedisModule_StringDMA(k, &len, REDISMODULE_READ); + if (!s) { + return failTest(ctx, "Failed to read truncated string (4)"); + } else if (len != 4) { + return failTest(ctx, "Failed to shrink string value (4)"); + } else if (0 != strncmp(s, "abcd", 4)) { + return failTest(ctx, "Failed to truncate string value (4)"); + } + + /* shrink to 0 */ + if (REDISMODULE_ERR == RedisModule_StringTruncate(k, 0)) { + return failTest(ctx, "Could not truncate string value (0)"); + } + s = RedisModule_StringDMA(k, &len, REDISMODULE_READ); + if (!s) { + return failTest(ctx, "Failed to read truncated string (0)"); + } else if (len != 0) { + return failTest(ctx, "Failed to shrink string value to (0)"); + } + + return RedisModule_ReplyWithSimpleString(ctx, "OK"); } int NotifyCallback(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key) { + RedisModule_AutoMemory(ctx); /* Increment a counter on the notifications: for each key notified we * increment a counter */ RedisModule_Log(ctx, "notice", "Got event type %d, event %s, key %s", type, @@ -168,6 +221,7 @@ int NotifyCallback(RedisModuleCtx *ctx, int type, const char *event, /* TEST.NOTIFICATIONS -- Test Keyspace Notifications. */ int TestNotifications(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + RedisModule_AutoMemory(ctx); REDISMODULE_NOT_USED(argv); REDISMODULE_NOT_USED(argc); @@ -279,6 +333,9 @@ int TestCtxFlags(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { flags = RedisModule_GetContextFlags(ctx); if (!(flags & REDISMODULE_CTX_FLAGS_AOF)) FAIL("AOF Flag not set after config set"); + /* Disable RDB saving and test the flag. */ + RedisModule_Call(ctx, "config", "ccc", "set", "save", ""); + flags = RedisModule_GetContextFlags(ctx); if (flags & REDISMODULE_CTX_FLAGS_RDB) FAIL("RDB Flag was set"); /* Enable RDB to test RDB flags */ RedisModule_Call(ctx, "config", "ccc", "set", "save", "900 1"); @@ -290,8 +347,12 @@ int TestCtxFlags(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { if (flags & REDISMODULE_CTX_FLAGS_READONLY) FAIL("Read-only flag was set"); if (flags & REDISMODULE_CTX_FLAGS_CLUSTER) FAIL("Cluster flag was set"); + /* Disable maxmemory and test the flag. (it is implicitly set in 32bit builds. */ + RedisModule_Call(ctx, "config", "ccc", "set", "maxmemory", "0"); + flags = RedisModule_GetContextFlags(ctx); if (flags & REDISMODULE_CTX_FLAGS_MAXMEMORY) FAIL("Maxmemory flag was set"); + /* Enable maxmemory and test the flag. */ RedisModule_Call(ctx, "config", "ccc", "set", "maxmemory", "100000000"); flags = RedisModule_GetContextFlags(ctx); if (!(flags & REDISMODULE_CTX_FLAGS_MAXMEMORY)) @@ -324,7 +385,11 @@ end: int TestAssertStringReply(RedisModuleCtx *ctx, RedisModuleCallReply *reply, char *str, size_t len) { RedisModuleString *mystr, *expected; - if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_STRING) { + if (RedisModule_CallReplyType(reply) == REDISMODULE_REPLY_ERROR) { + RedisModule_Log(ctx,"warning","Test error reply: %s", + RedisModule_CallReplyStringPtr(reply, NULL)); + return 0; + } else if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_STRING) { RedisModule_Log(ctx,"warning","Unexpected reply type %d", RedisModule_CallReplyType(reply)); return 0; @@ -345,7 +410,11 @@ int TestAssertStringReply(RedisModuleCtx *ctx, RedisModuleCallReply *reply, char /* Return 1 if the reply matches the specified integer, otherwise log errors * in the server log and return 0. */ int TestAssertIntegerReply(RedisModuleCtx *ctx, RedisModuleCallReply *reply, long long expected) { - if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_INTEGER) { + if (RedisModule_CallReplyType(reply) == REDISMODULE_REPLY_ERROR) { + RedisModule_Log(ctx,"warning","Test error reply: %s", + RedisModule_CallReplyStringPtr(reply, NULL)); + return 0; + } else if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_INTEGER) { RedisModule_Log(ctx,"warning","Unexpected reply type %d", RedisModule_CallReplyType(reply)); return 0; @@ -366,8 +435,11 @@ int TestAssertIntegerReply(RedisModuleCtx *ctx, RedisModuleCallReply *reply, lon reply = RedisModule_Call(ctx,name,__VA_ARGS__); \ } while (0) -/* TEST.IT -- Run all the tests. */ -int TestIt(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { +/* TEST.BASICS -- Run all the tests. + * Note: it is useful to run these tests from the module rather than TCL + * since it's easier to check the reply types like that (make a distinction + * between 0 and "0", etc. */ +int TestBasics(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { REDISMODULE_NOT_USED(argv); REDISMODULE_NOT_USED(argc); @@ -390,6 +462,9 @@ int TestIt(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { T("test.string.append",""); if (!TestAssertStringReply(ctx,reply,"foobar",6)) goto fail; + T("test.string.truncate",""); + if (!TestAssertStringReply(ctx,reply,"OK",2)) goto fail; + T("test.unlink",""); if (!TestAssertStringReply(ctx,reply,"OK",2)) goto fail; @@ -407,7 +482,7 @@ int TestIt(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { fail: RedisModule_ReplyWithSimpleString(ctx, - "SOME TEST NOT PASSED! Check server logs"); + "SOME TEST DID NOT PASS! Check server logs"); return REDISMODULE_OK; } @@ -430,6 +505,10 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) TestStringAppendAM,"write deny-oom",1,1,1) == REDISMODULE_ERR) return REDISMODULE_ERR; + if (RedisModule_CreateCommand(ctx,"test.string.truncate", + TestStringTruncate,"write deny-oom",1,1,1) == REDISMODULE_ERR) + return REDISMODULE_ERR; + if (RedisModule_CreateCommand(ctx,"test.string.printf", TestStringPrintf,"write deny-oom",1,1,1) == REDISMODULE_ERR) return REDISMODULE_ERR; @@ -442,8 +521,8 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) TestUnlink,"write deny-oom",1,1,1) == REDISMODULE_ERR) return REDISMODULE_ERR; - if (RedisModule_CreateCommand(ctx,"test.it", - TestIt,"readonly",1,1,1) == REDISMODULE_ERR) + if (RedisModule_CreateCommand(ctx,"test.basics", + TestBasics,"readonly",1,1,1) == REDISMODULE_ERR) return REDISMODULE_ERR; RedisModule_SubscribeToKeyspaceEvents(ctx, diff --git a/tests/modules/blockonbackground.c b/tests/modules/blockonbackground.c index 855fef9dc..688756309 100644 --- a/tests/modules/blockonbackground.c +++ b/tests/modules/blockonbackground.c @@ -195,6 +195,66 @@ int HelloDoubleBlock_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, return REDISMODULE_OK; } +RedisModuleBlockedClient *blocked_client = NULL; + +/* BLOCK.BLOCK [TIMEOUT] -- Blocks the current client until released + * or TIMEOUT seconds. If TIMEOUT is zero, no timeout function is + * registered. + */ +int Block_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + if (RedisModule_IsBlockedReplyRequest(ctx)) { + RedisModuleString *r = RedisModule_GetBlockedClientPrivateData(ctx); + return RedisModule_ReplyWithString(ctx, r); + } else if (RedisModule_IsBlockedTimeoutRequest(ctx)) { + RedisModule_UnblockClient(blocked_client, NULL); /* Must be called to avoid leaks. */ + blocked_client = NULL; + return RedisModule_ReplyWithSimpleString(ctx, "Timed out"); + } + + if (argc != 2) return RedisModule_WrongArity(ctx); + long long timeout; + + if (RedisModule_StringToLongLong(argv[1], &timeout) != REDISMODULE_OK) { + return RedisModule_ReplyWithError(ctx, "ERR invalid timeout"); + } + if (blocked_client) { + return RedisModule_ReplyWithError(ctx, "ERR another client already blocked"); + } + + /* Block client. We use this function as both a reply and optional timeout + * callback and differentiate the different code flows above. + */ + blocked_client = RedisModule_BlockClient(ctx, Block_RedisCommand, + timeout > 0 ? Block_RedisCommand : NULL, NULL, timeout); + return REDISMODULE_OK; +} + +/* BLOCK.IS_BLOCKED -- Returns 1 if we have a blocked client, or 0 otherwise. + */ +int IsBlocked_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + UNUSED(argv); + UNUSED(argc); + RedisModule_ReplyWithLongLong(ctx, blocked_client ? 1 : 0); + return REDISMODULE_OK; +} + +/* BLOCK.RELEASE [reply] -- Releases the blocked client and produce the specified reply. + */ +int Release_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + if (argc != 2) return RedisModule_WrongArity(ctx); + if (!blocked_client) { + return RedisModule_ReplyWithError(ctx, "ERR No blocked client"); + } + + RedisModuleString *replystr = argv[1]; + RedisModule_RetainString(ctx, replystr); + int err = RedisModule_UnblockClient(blocked_client, replystr); + blocked_client = NULL; + + RedisModule_ReplyWithSimpleString(ctx, "OK"); + + return REDISMODULE_OK; +} int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { UNUSED(argv); @@ -215,5 +275,17 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) HelloBlockNoTracking_RedisCommand,"",0,0,0) == REDISMODULE_ERR) return REDISMODULE_ERR; + if (RedisModule_CreateCommand(ctx, "block.block", + Block_RedisCommand, "", 0, 0, 0) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + if (RedisModule_CreateCommand(ctx,"block.is_blocked", + IsBlocked_RedisCommand,"",0,0,0) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + if (RedisModule_CreateCommand(ctx,"block.release", + Release_RedisCommand,"",0,0,0) == REDISMODULE_ERR) + return REDISMODULE_ERR; + return REDISMODULE_OK; } diff --git a/tests/support/keydb.tcl b/tests/support/keydb.tcl index 4d321c975..978163e98 100644 --- a/tests/support/keydb.tcl +++ b/tests/support/keydb.tcl @@ -34,13 +34,14 @@ array set ::redis::fd {} array set ::redis::addr {} array set ::redis::blocking {} array set ::redis::deferred {} +array set ::redis::readraw {} array set ::redis::reconnect {} array set ::redis::tls {} array set ::redis::callback {} array set ::redis::state {} ;# State in non-blocking reply reading array set ::redis::statestack {} ;# Stack of states, for nested mbulks -proc redis {{server 127.0.0.1} {port 6379} {defer 0} {tls 0} {tlsoptions {}}} { +proc redis {{server 127.0.0.1} {port 6379} {defer 0} {tls 0} {tlsoptions {}} {readraw 0}} { if {$tls} { package require tls ::tls::init \ @@ -58,6 +59,7 @@ proc redis {{server 127.0.0.1} {port 6379} {defer 0} {tls 0} {tlsoptions {}}} { set ::redis::addr($id) [list $server $port] set ::redis::blocking($id) 1 set ::redis::deferred($id) $defer + set ::redis::readraw($id) $readraw set ::redis::reconnect($id) 0 set ::redis::tls($id) $tls ::redis::redis_reset_state $id @@ -158,6 +160,7 @@ proc ::redis::__method__close {id fd} { catch {unset ::redis::addr($id)} catch {unset ::redis::blocking($id)} catch {unset ::redis::deferred($id)} + catch {unset ::redis::readraw($id)} catch {unset ::redis::reconnect($id)} catch {unset ::redis::tls($id)} catch {unset ::redis::state($id)} @@ -174,6 +177,10 @@ proc ::redis::__method__deferred {id fd val} { set ::redis::deferred($id) $val } +proc ::redis::__method__readraw {id fd val} { + set ::redis::readraw($id) $val +} + proc ::redis::redis_write {fd buf} { puts -nonewline $fd $buf } @@ -240,26 +247,46 @@ proc ::redis::redis_read_null fd { return {} } +proc ::redis::redis_read_bool fd { + set v [redis_read_line $fd] + if {$v == "t"} {return 1} + if {$v == "f"} {return 0} + return -code error "Bad protocol, '$v' as bool type" +} + proc ::redis::redis_read_reply {id fd} { - set type [read $fd 1] - switch -exact -- $type { - _ {redis_read_null $fd} - : - - + {redis_read_line $fd} - , {expr {double([redis_read_line $fd])}} - - {return -code error [redis_read_line $fd]} - $ {redis_bulk_read $fd} - > - - ~ - - * {redis_multi_bulk_read $id $fd} - % {redis_read_map $id $fd} - default { - if {$type eq {}} { - catch {close $fd} - set ::redis::fd($id) {} - return -code error "I/O error reading reply" + if {$::redis::readraw($id)} { + return [redis_read_line $fd] + } + + while {1} { + set type [read $fd 1] + switch -exact -- $type { + _ {return [redis_read_null $fd]} + : - + ( - + + {return [redis_read_line $fd]} + , {return [expr {double([redis_read_line $fd])}]} + # {return [redis_read_bool $fd]} + - {return -code error [redis_read_line $fd]} + $ {return [redis_bulk_read $fd]} + > - + ~ - + * {return [redis_multi_bulk_read $id $fd]} + % {return [redis_read_map $id $fd]} + | { + # ignore attributes for now (nowhere to store them) + redis_read_map $id $fd + continue + } + default { + if {$type eq {}} { + catch {close $fd} + set ::redis::fd($id) {} + return -code error "I/O error reading reply" + } + return -code error "Bad protocol, '$type' as reply type byte" } - return -code error "Bad protocol, '$type' as reply type byte" } } } diff --git a/tests/support/util.tcl b/tests/support/util.tcl index ef7e07b86..dd843110d 100644 --- a/tests/support/util.tcl +++ b/tests/support/util.tcl @@ -511,6 +511,14 @@ proc stop_write_load {handle} { catch {exec /bin/kill -9 $handle} } +proc wait_load_handlers_disconnected {{level 0}} { + wait_for_condition 50 100 { + ![string match {*name=LOAD_HANDLER*} [r $level client list]] + } else { + fail "load_handler(s) still connected after too long time." + } +} + proc K { x y } { set x } # Shuffle a list with Fisher-Yates algorithm. @@ -607,6 +615,7 @@ proc generate_fuzzy_traffic_on_key {key duration} { set arity [lindex $cmd_info 1] set arity [expr $arity < 0 ? - $arity: $arity] set firstkey [lindex $cmd_info 3] + set lastkey [lindex $cmd_info 4] set i 1 if {$cmd == "XINFO"} { lappend cmd "STREAM" @@ -636,7 +645,7 @@ proc generate_fuzzy_traffic_on_key {key duration} { incr i 4 } for {} {$i < $arity} {incr i} { - if {$i == $firstkey} { + if {$i == $firstkey || $i == $lastkey} { lappend cmd $key } else { lappend cmd [randomValue] diff --git a/tests/unit/aofrw.tcl b/tests/unit/aofrw.tcl index 1a686a2fa..5bdf87256 100644 --- a/tests/unit/aofrw.tcl +++ b/tests/unit/aofrw.tcl @@ -41,15 +41,8 @@ start_server {tags {"aofrw"}} { stop_write_load $load_handle3 stop_write_load $load_handle4 - # Make sure that we remain the only connected client. - # This step is needed to make sure there are no pending writes - # that will be processed between the two "debug digest" calls. - wait_for_condition 50 100 { - [llength [split [string trim [r client list]] "\n"]] == 1 - } else { - puts [r client list] - fail "Clients generating loads are not disconnecting" - } + # Make sure no more commands processed, before taking debug digest + wait_load_handlers_disconnected # Get the data set digest set d1 [r debug digest] diff --git a/tests/unit/auth.tcl b/tests/unit/auth.tcl index b63cf0126..5997707c6 100644 --- a/tests/unit/auth.tcl +++ b/tests/unit/auth.tcl @@ -24,6 +24,22 @@ start_server {tags {"auth"} overrides {requirepass foobar}} { r set foo 100 r incr foo } {101} + + test {For unauthenticated clients multibulk and bulk length are limited} { + set rr [redis [srv "host"] [srv "port"] 0 $::tls] + $rr write "*100\r\n" + $rr flush + catch {[$rr read]} e + assert_match {*unauthenticated multibulk length*} $e + $rr close + + set rr [redis [srv "host"] [srv "port"] 0 $::tls] + $rr write "*1\r\n\$100000000\r\n" + $rr flush + catch {[$rr read]} e + assert_match {*unauthenticated bulk length*} $e + $rr close + } } start_server {tags {"auth_binary_password"}} { diff --git a/tests/unit/bitops.tcl b/tests/unit/bitops.tcl index a280721f0..bab7458f1 100644 --- a/tests/unit/bitops.tcl +++ b/tests/unit/bitops.tcl @@ -407,3 +407,31 @@ start_server {tags {"bitops"}} { } } } + +start_server {tags {"bitops large-memory"}} { + test "BIT pos larger than UINT_MAX" { + set bytes [expr (1 << 29) + 1] + set bitpos [expr (1 << 32)] + set oldval [lindex [r config get proto-max-bulk-len] 1] + r config set proto-max-bulk-len $bytes + r setbit mykey $bitpos 1 + assert_equal $bytes [r strlen mykey] + assert_equal 1 [r getbit mykey $bitpos] + assert_equal [list 128 128 -1] [r bitfield mykey get u8 $bitpos set u8 $bitpos 255 get i8 $bitpos] + assert_equal $bitpos [r bitpos mykey 1] + assert_equal $bitpos [r bitpos mykey 1 [expr $bytes - 1]] + if {$::accurate} { + # set all bits to 1 + set mega [expr (1 << 23)] + set part [string repeat "\xFF" $mega] + for {set i 0} {$i < 64} {incr i} { + r setrange mykey [expr $i * $mega] $part + } + r setrange mykey [expr $bytes - 1] "\xFF" + assert_equal [expr $bitpos + 8] [r bitcount mykey] + assert_equal -1 [r bitpos mykey 0 0 [expr $bytes - 1]] + } + r config set proto-max-bulk-len $oldval + r del mykey + } {1} +} diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl index a51d1dc5c..3aa8f4d56 100644 --- a/tests/unit/geo.tcl +++ b/tests/unit/geo.tcl @@ -71,6 +71,34 @@ proc pointInRectangle {width_km height_km lon lat search_lon search_lat error} { return true } +proc verify_geo_edge_response_bylonlat {expected_response expected_store_response} { + catch {r georadius src{t} 1 1 1 km} response + assert_match $expected_response $response + + catch {r georadius src{t} 1 1 1 km store dest{t}} response + assert_match $expected_store_response $response + + catch {r geosearch src{t} fromlonlat 0 0 byradius 1 km} response + assert_match $expected_response $response + + catch {r geosearchstore dest{t} src{t} fromlonlat 0 0 byradius 1 km} response + assert_match $expected_store_response $response +} + +proc verify_geo_edge_response_bymember {expected_response expected_store_response} { + catch {r georadiusbymember src{t} member 1 km} response + assert_match $expected_response $response + + catch {r georadiusbymember src{t} member 1 km store dest{t}} response + assert_match $expected_store_response $response + + catch {r geosearch src{t} frommember member bybox 1 1 km} response + assert_match $expected_response $response + + catch {r geosearchstore dest{t} src{t} frommember member bybox 1 1 m} response + assert_match $expected_store_response $response +} + # The following list represents sets of random seed, search position # and radius that caused bugs in the past. It is used by the randomized # test later as a starting point. When the regression vectors are scanned @@ -95,6 +123,34 @@ set regression_vectors { set rv_idx 0 start_server {tags {"geo"}} { + test {GEO with wrong type src key} { + r set src{t} wrong_type + + verify_geo_edge_response_bylonlat "WRONGTYPE*" "WRONGTYPE*" + verify_geo_edge_response_bymember "WRONGTYPE*" "WRONGTYPE*" + } + + test {GEO with non existing src key} { + r del src{t} + + verify_geo_edge_response_bylonlat {} 0 + verify_geo_edge_response_bymember {} 0 + } + + test {GEO BYLONLAT with empty search} { + r del src{t} + r geoadd src{t} 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania" + + verify_geo_edge_response_bylonlat {} 0 + } + + test {GEO BYMEMBER with non existing member} { + r del src{t} + r geoadd src{t} 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania" + + verify_geo_edge_response_bymember "ERR*" "ERR*" + } + test {GEOADD create} { r geoadd nyc -73.9454966 40.747533 "lic market" } {1} @@ -357,6 +413,21 @@ start_server {tags {"geo"}} { assert_equal [r zrange points 0 -1] [r zrange points2 0 -1] } + test {GEORADIUSBYMEMBER STORE/STOREDIST option: plain usage} { + r del points{t} + r geoadd points{t} 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania" + + r georadiusbymember points{t} Palermo 500 km store points2{t} + assert_equal {Palermo Catania} [r zrange points2{t} 0 -1] + + r georadiusbymember points{t} Catania 500 km storedist points2{t} + assert_equal {Catania Palermo} [r zrange points2{t} 0 -1] + + set res [r zrange points2{t} 0 -1 withscores] + assert {[lindex $res 1] < 1} + assert {[lindex $res 3] > 166} + } + test {GEOSEARCHSTORE STORE option: plain usage} { r geosearchstore points2 points fromlonlat 13.361389 38.115556 byradius 500 km assert_equal [r zrange points 0 -1] [r zrange points2 0 -1] diff --git a/tests/unit/info.tcl b/tests/unit/info.tcl index 0602e7147..61be4a0d1 100644 --- a/tests/unit/info.tcl +++ b/tests/unit/info.tcl @@ -110,11 +110,12 @@ start_server {tags {"info"}} { catch {r exec} e assert_match {EXECABORT*} $e assert_match {*count=1*} [errorstat ERR] - assert_equal [s total_error_replies] 1 + assert_match {*count=1*} [errorstat EXECABORT] + assert_equal [s total_error_replies] 2 assert_match {*calls=0,*,rejected_calls=1,failed_calls=0} [cmdstat set] assert_match {*calls=1,*,rejected_calls=0,failed_calls=0} [cmdstat multi] - assert_match {*calls=1,*,rejected_calls=0,failed_calls=0} [cmdstat exec] - assert_equal [s total_error_replies] 1 + assert_match {*calls=1,*,rejected_calls=0,failed_calls=1} [cmdstat exec] + assert_equal [s total_error_replies] 2 r config resetstat assert_match {} [errorstat ERR] } diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl index 24297a841..104c299be 100644 --- a/tests/unit/introspection.tcl +++ b/tests/unit/introspection.tcl @@ -31,6 +31,52 @@ start_server {tags {"introspection"}} { assert_match {*lua*"set"*"foo"*"bar"*} [$rd read] } + test {MONITOR supports redacting command arguments} { + set rd [redis_deferring_client] + $rd monitor + $rd read ; # Discard the OK + + r migrate [srv 0 host] [srv 0 port] key 9 5000 + r migrate [srv 0 host] [srv 0 port] key 9 5000 AUTH user + r migrate [srv 0 host] [srv 0 port] key 9 5000 AUTH2 user password + catch {r auth not-real} _ + catch {r auth not-real not-a-password} _ + catch {r hello 2 AUTH not-real not-a-password} _ + + assert_match {*"key"*"9"*"5000"*} [$rd read] + assert_match {*"key"*"9"*"5000"*"(redacted)"*} [$rd read] + assert_match {*"key"*"9"*"5000"*"(redacted)"*"(redacted)"*} [$rd read] + assert_match {*"auth"*"(redacted)"*} [$rd read] + assert_match {*"auth"*"(redacted)"*"(redacted)"*} [$rd read] + assert_match {*"hello"*"2"*"AUTH"*"(redacted)"*"(redacted)"*} [$rd read] + $rd close + } + + test {MONITOR correctly handles multi-exec cases} { + set rd [redis_deferring_client] + $rd monitor + $rd read ; # Discard the OK + + # Make sure multi-exec statements are ordered + # correctly + r multi + r set foo bar + r exec + assert_match {*"multi"*} [$rd read] + assert_match {*"set"*"foo"*"bar"*} [$rd read] + assert_match {*"exec"*} [$rd read] + + # Make sure we close multi statements on errors + r multi + catch {r syntax error} _ + catch {r exec} _ + + assert_match {*"multi"*} [$rd read] + assert_match {*"exec"*} [$rd read] + + $rd close + } + test {CLIENT GETNAME should return NIL if name is not assigned} { r client getname } {} diff --git a/tests/unit/moduleapi/basics.tcl b/tests/unit/moduleapi/basics.tcl new file mode 100644 index 000000000..96683f4cc --- /dev/null +++ b/tests/unit/moduleapi/basics.tcl @@ -0,0 +1,12 @@ +set testmodule [file normalize tests/modules/basics.so] + + +start_server {tags {"modules"}} { + r module load $testmodule + + test {test module api basics} { + r test.basics + } {ALL TESTS PASSED} + + r module unload test +} diff --git a/tests/unit/moduleapi/blockonbackground.tcl b/tests/unit/moduleapi/blockonbackground.tcl index 66a232fab..79ca52143 100644 --- a/tests/unit/moduleapi/blockonbackground.tcl +++ b/tests/unit/moduleapi/blockonbackground.tcl @@ -85,4 +85,33 @@ start_server {tags {"modules"}} { assert_equal [r slowlog len] 0 } } + + test "client unblock works only for modules with timeout support" { + set rd [redis_deferring_client] + $rd client id + set id [$rd read] + + # Block with a timeout function - may unblock + $rd block.block 20000 + wait_for_condition 50 100 { + [r block.is_blocked] == 1 + } else { + fail "Module did not block" + } + + assert_equal 1 [r client unblock $id] + assert_match {*Timed out*} [$rd read] + + # Block without a timeout function - cannot unblock + $rd block.block 0 + wait_for_condition 50 100 { + [r block.is_blocked] == 1 + } else { + fail "Module did not block" + } + + assert_equal 0 [r client unblock $id] + assert_equal "OK" [r block.release foobar] + assert_equal "foobar" [$rd read] + } } diff --git a/tests/unit/multi.tcl b/tests/unit/multi.tcl index e22b6d43d..d33f94515 100644 --- a/tests/unit/multi.tcl +++ b/tests/unit/multi.tcl @@ -121,6 +121,22 @@ start_server {tags {"multi"}} { r exec } {} + test {EXEC fail on lazy expired WATCHed key} { + r flushall + r debug set-active-expire 0 + + r del key + r set key 1 px 2 + r watch key + + after 100 + + r multi + r incr key + assert_equal [r exec] {} + r debug set-active-expire 1 + } {OK} {needs:debug} + test {After successful EXEC key is no longer watched} { r set x 30 r watch x diff --git a/tests/unit/obuf-limits.tcl b/tests/unit/obuf-limits.tcl index ad5c13452..bbb9fcbf6 100644 --- a/tests/unit/obuf-limits.tcl +++ b/tests/unit/obuf-limits.tcl @@ -18,65 +18,72 @@ start_server {tags {"obuf-limits"} overrides { server-threads 1 }} { assert {$omem >= 70000 && $omem < 200000} $rd1 close } - - test {Client output buffer soft limit is not enforced if time is not overreached} { - r config set client-output-buffer-limit {pubsub 0 100000 10} - set rd1 [redis_deferring_client] - - $rd1 subscribe foo - set reply [$rd1 read] - assert {$reply eq "subscribe foo 1"} - - set omem 0 - set start_time 0 - set time_elapsed 0 - while 1 { - if {$start_time != 0} { - # Slow down loop when omen has reached the limit. - after 10 - } - r publish foo [string repeat "x" 1000] - set clients [split [r client list] "\r\n"] - set c [split [lindex $clients 1] " "] - if {![regexp {omem=([0-9]+)} $c - omem]} break - if {$omem > 100000} { - if {$start_time == 0} {set start_time [clock seconds]} - set time_elapsed [expr {[clock seconds]-$start_time}] - if {$time_elapsed >= 5} break - } + + foreach {soft_limit_time wait_for_timeout} {3 yes + 4 no } { + if $wait_for_timeout { + set test_name "Client output buffer soft limit is enforced if time is overreached" + } else { + set test_name "Client output buffer soft limit is not enforced too early and is enforced when no traffic" } - assert {$omem >= 100000 && $time_elapsed >= 5 && $time_elapsed <= 10} - $rd1 close - } - test {Client output buffer soft limit is enforced if time is overreached} { - r config set client-output-buffer-limit {pubsub 0 100000 3} - set rd1 [redis_deferring_client] + test $test_name { + r config set client-output-buffer-limit "pubsub 0 100000 $soft_limit_time" + set soft_limit_time [expr $soft_limit_time*1000] + set rd1 [redis_deferring_client] - $rd1 subscribe foo - set reply [$rd1 read] - assert {$reply eq "subscribe foo 1"} + $rd1 client setname test_client + set reply [$rd1 read] + assert {$reply eq "OK"} - set omem 0 - set start_time 0 - set time_elapsed 0 - while 1 { - if {$start_time != 0} { - # Slow down loop when omen has reached the limit. - after 10 + $rd1 subscribe foo + set reply [$rd1 read] + assert {$reply eq "subscribe foo 1"} + + set omem 0 + set start_time 0 + set time_elapsed 0 + set last_under_limit_time [clock milliseconds] + while 1 { + r publish foo [string repeat "x" 1000] + set clients [split [r client list] "\r\n"] + set c [lsearch -inline $clients *name=test_client*] + if {$start_time != 0} { + set time_elapsed [expr {[clock milliseconds]-$start_time}] + # Make sure test isn't taking too long + assert {$time_elapsed <= [expr $soft_limit_time+3000]} + } + if {$wait_for_timeout && $c == ""} { + # Make sure we're disconnected when we reach the soft limit + assert {$omem >= 100000 && $time_elapsed >= $soft_limit_time} + break + } else { + assert {[regexp {omem=([0-9]+)} $c - omem]} + } + if {$omem > 100000} { + if {$start_time == 0} {set start_time $last_under_limit_time} + if {!$wait_for_timeout && $time_elapsed >= [expr $soft_limit_time-1000]} break + # Slow down loop when omem has reached the limit. + after 10 + } else { + # if the OS socket buffers swallowed what we previously filled, reset the start timer. + set start_time 0 + set last_under_limit_time [clock milliseconds] + } } - r publish foo [string repeat "x" 1000] - set clients [split [r client list] "\r\n"] - set c [split [lindex $clients 1] " "] - if {![regexp {omem=([0-9]+)} $c - omem]} break - if {$omem > 100000} { - if {$start_time == 0} {set start_time [clock seconds]} - set time_elapsed [expr {[clock seconds]-$start_time}] - if {$time_elapsed >= 10} break + + if {!$wait_for_timeout} { + # After we completely stopped the traffic, wait for soft limit to time out + set timeout [expr {$soft_limit_time+1500 - ([clock milliseconds]-$start_time)}] + wait_for_condition [expr $timeout/10] 10 { + [lsearch [split [r client list] "\r\n"] *name=test_client*] == -1 + } else { + fail "Soft limit timed out but client still connected" + } } + + $rd1 close } - assert {$omem >= 100000 && $time_elapsed < 6} - $rd1 close } test {No response for single command if client output buffer hard limit is enforced} { diff --git a/tests/unit/other.tcl b/tests/unit/other.tcl index 9c2a70f05..8481a0a51 100644 --- a/tests/unit/other.tcl +++ b/tests/unit/other.tcl @@ -271,9 +271,6 @@ start_server {overrides {save ""} tags {"other"}} { assert_equal [$rd read] "OK" $rd reset - - # skip reset ouptut - $rd read assert_equal [$rd read] "RESET" assert_no_match {*flags=O*} [r client list] diff --git a/tests/unit/pause.tcl b/tests/unit/pause.tcl index 67b684d36..21b9680df 100644 --- a/tests/unit/pause.tcl +++ b/tests/unit/pause.tcl @@ -195,6 +195,57 @@ start_server {tags {"pause network"}} { $rd close } + start_server {tags {needs:repl external:skip}} { + set master [srv -1 client] + set master_host [srv -1 host] + set master_port [srv -1 port] + + # Avoid PINGs + $master config set repl-ping-replica-period 3600 + r replicaof $master_host $master_port + + wait_for_condition 50 100 { + [s master_link_status] eq {up} + } else { + fail "Replication not started." + } + + test "Test when replica paused, offset would not grow" { + $master set foo bar + set old_master_offset [status $master master_repl_offset] + + wait_for_condition 50 100 { + [s slave_repl_offset] == [status $master master_repl_offset] + } else { + fail "Replication offset not matched." + } + + r client pause 100000 write + $master set foo2 bar2 + + # Make sure replica received data from master + wait_for_condition 50 100 { + [s slave_read_repl_offset] == [status $master master_repl_offset] + } else { + fail "Replication not work." + } + + # Replica would not apply the write command + assert {[s slave_repl_offset] == $old_master_offset} + r get foo2 + } {} + + test "Test replica offset would grow after unpause" { + r client unpause + wait_for_condition 50 100 { + [s slave_repl_offset] == [status $master master_repl_offset] + } else { + fail "Replication not continue." + } + r get foo2 + } {bar2} + } + # Make sure we unpause at the end r client unpause } diff --git a/tests/unit/protocol.tcl b/tests/unit/protocol.tcl index 442c23de6..a3d8f7e89 100644 --- a/tests/unit/protocol.tcl +++ b/tests/unit/protocol.tcl @@ -102,6 +102,94 @@ start_server {tags {"protocol network"}} { } {*Protocol error*} } unset c + + # recover the broken connection + reconnect + r ping + + # raw RESP response tests + r readraw 1 + + test "raw protocol response" { + r srandmember nonexisting_key + } {*-1} + + r deferred 1 + + test "raw protocol response - deferred" { + r srandmember nonexisting_key + r read + } {*-1} + + test "raw protocol response - multiline" { + r sadd ss a + assert_equal [r read] {:1} + r srandmember ss 100 + assert_equal [r read] {*1} + assert_equal [r read] {$1} + assert_equal [r read] {a} + } + + # restore connection settings + r readraw 0 + r deferred 0 + + # check the connection still works + assert_equal [r ping] {PONG} + + test {RESP3 attributes} { + r hello 3 + set res [r debug protocol attrib] + # currently the parser in redis.tcl ignores the attributes + + # restore state + r hello 2 + set _ $res + } {Some real reply following the attribute} + + test {RESP3 attributes readraw} { + r hello 3 + r readraw 1 + r deferred 1 + + r debug protocol attrib + assert_equal [r read] {|1} + assert_equal [r read] {$14} + assert_equal [r read] {key-popularity} + assert_equal [r read] {*2} + assert_equal [r read] {$7} + assert_equal [r read] {key:123} + assert_equal [r read] {:90} + assert_equal [r read] {$39} + assert_equal [r read] {Some real reply following the attribute} + + # restore state + r readraw 0 + r deferred 0 + r hello 2 + set _ {} + } {} + + test {RESP3 attributes on RESP2} { + r hello 2 + set res [r debug protocol attrib] + set _ $res + } {Some real reply following the attribute} + + test "test big number parsing" { + r hello 3 + r debug protocol bignum + } {1234567999999999999999999999999999999} + + test "test bool parsing" { + r hello 3 + assert_equal [r debug protocol true] 1 + assert_equal [r debug protocol false] 0 + r hello 2 + assert_equal [r debug protocol true] 1 + assert_equal [r debug protocol false] 0 + set _ {} + } {} } start_server {tags {"regression"}} { diff --git a/tests/unit/pubsub.tcl b/tests/unit/pubsub.tcl index 1906805a7..03c5dfccb 100644 --- a/tests/unit/pubsub.tcl +++ b/tests/unit/pubsub.tcl @@ -152,6 +152,24 @@ start_server {tags {"pubsub network"}} { r pubsub numsub abc def } {abc 0 def 0} + test "NUMPATs returns the number of unique patterns" { + set rd1 [redis_deferring_client] + set rd2 [redis_deferring_client] + + # Three unique patterns and one that overlaps + psubscribe $rd1 "foo*" + psubscribe $rd2 "foo*" + psubscribe $rd1 "bar*" + psubscribe $rd2 "baz*" + + set patterns [r pubsub numpat] + + # clean up clients + punsubscribe $rd1 + punsubscribe $rd2 + assert_equal 3 $patterns + } + test "Mix SUBSCRIBE and PSUBSCRIBE" { set rd1 [redis_deferring_client] assert_equal {1} [subscribe $rd1 {foo.bar}] diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl index 22440c502..bd4c86588 100644 --- a/tests/unit/scripting.tcl +++ b/tests/unit/scripting.tcl @@ -940,3 +940,18 @@ start_server {tags {"scripting"}} { r eval {return 'hello'} 0 r eval {return 'hello'} 0 } + +start_server {tags {"scripting needs:debug external:skip"}} { + test {Test scripting debug protocol parsing} { + r script debug sync + r eval {return 'hello'} 0 + catch {r 'hello\0world'} e + assert_match {*Unknown Redis Lua debugger command*} $e + catch {r 'hello\0'} e + assert_match {*Unknown Redis Lua debugger command*} $e + catch {r '\0hello'} e + assert_match {*Unknown Redis Lua debugger command*} $e + catch {r '\0hello\0'} e + assert_match {*Unknown Redis Lua debugger command*} $e + } +} diff --git a/tests/unit/slowlog.tcl b/tests/unit/slowlog.tcl index eb9dfc65d..9f6e248e9 100644 --- a/tests/unit/slowlog.tcl +++ b/tests/unit/slowlog.tcl @@ -45,18 +45,35 @@ start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} { r config set slowlog-log-slower-than 0 r slowlog reset r config set masterauth "" - r acl setuser slowlog-test-user + r acl setuser slowlog-test-user +get +set r config set slowlog-log-slower-than 0 r config set slowlog-log-slower-than 10000 set slowlog_resp [r slowlog get] # Make sure normal configs work, but the two sensitive - # commands are omitted - assert_equal 2 [llength $slowlog_resp] - assert_equal {slowlog reset} [lindex [lindex [r slowlog get] 1] 3] + # commands are omitted or redacted + assert_equal 4 [llength $slowlog_resp] + assert_equal {slowlog reset} [lindex [lindex [r slowlog get] 3] 3] + assert_equal {config set masterauth (redacted)} [lindex [lindex [r slowlog get] 2] 3] + assert_equal {acl setuser (redacted) (redacted) (redacted)} [lindex [lindex [r slowlog get] 1] 3] assert_equal {config set slowlog-log-slower-than 0} [lindex [lindex [r slowlog get] 0] 3] } + test {SLOWLOG - Some commands can redact sensitive fields} { + r config set slowlog-log-slower-than 0 + r slowlog reset + r migrate [srv 0 host] [srv 0 port] key 9 5000 + r migrate [srv 0 host] [srv 0 port] key 9 5000 AUTH user + r migrate [srv 0 host] [srv 0 port] key 9 5000 AUTH2 user password + + r config set slowlog-log-slower-than 10000 + # Make sure all 3 commands were logged, but the sensitive fields are omitted + assert_equal 4 [llength [r slowlog get]] + assert_match {* key 9 5000} [lindex [lindex [r slowlog get] 2] 3] + assert_match {* key 9 5000 AUTH (redacted)} [lindex [lindex [r slowlog get] 1] 3] + assert_match {* key 9 5000 AUTH2 (redacted) (redacted)} [lindex [lindex [r slowlog get] 0] 3] + } + test {SLOWLOG - Rewritten commands are logged as their original command} { r config set slowlog-log-slower-than 0 diff --git a/tests/unit/tracking.tcl b/tests/unit/tracking.tcl index 4c75b6f48..217a057dd 100644 --- a/tests/unit/tracking.tcl +++ b/tests/unit/tracking.tcl @@ -132,6 +132,22 @@ start_server {tags {"tracking network"}} { assert {$keys eq {mykey}} } + test {Tracking gets notification of lazy expired keys} { + r CLIENT TRACKING off + r CLIENT TRACKING on BCAST REDIRECT $redir_id NOLOOP + # Use multi-exec to expose a race where the key gets an two invalidations + # in the same event loop, once by the client so filtered by NOLOOP, and + # the second one by the lazy expire + r MULTI + r SET mykey{t} myval px 1 + r SET mykeyotherkey{t} myval ; # We should not get it + r DEBUG SLEEP 0.1 + r GET mykey{t} + r EXEC + set keys [lsort [lindex [$rd_redirection read] 2]] + assert {$keys eq {mykey{t}}} + } {} {needs:debug} + test {HELLO 3 reply is correct} { set reply [r HELLO 3] assert_equal [dict get $reply proto] 3 diff --git a/tests/unit/type/hash.tcl b/tests/unit/type/hash.tcl index 2210fba2b..e95fd9fce 100644 --- a/tests/unit/type/hash.tcl +++ b/tests/unit/type/hash.tcl @@ -72,6 +72,19 @@ start_server {tags {"hash"}} { r hrandfield nonexisting_key 100 } {} + # Make sure we can distinguish between an empty array and a null response + r readraw 1 + + test "HRANDFIELD count of 0 is handled correctly - emptyarray" { + r hrandfield myhash 0 + } {*0} + + test "HRANDFIELD with against non existing key - emptyarray" { + r hrandfield nonexisting_key 100 + } {*0} + + r readraw 0 + foreach {type contents} " hashtable {{a 1} {b 2} {c 3} {d 4} {e 5} {6 f} {7 g} {8 h} {9 i} {[randstring 70 90 alpha] 10}} ziplist {{a 1} {b 2} {c 3} {d 4} {e 5} {6 f} {7 g} {8 h} {9 i} {10 j}} " { diff --git a/tests/unit/type/set.tcl b/tests/unit/type/set.tcl index 5548ca3a2..ee7b936b5 100644 --- a/tests/unit/type/set.tcl +++ b/tests/unit/type/set.tcl @@ -276,14 +276,86 @@ start_server { } } - test "SINTER against non-set should throw error" { - r set key1 x - assert_error "WRONGTYPE*" {r sinter key1 noset} + test "SDIFF against non-set should throw error" { + # with an empty set + r set key1{t} x + assert_error "WRONGTYPE*" {r sdiff key1{t} noset{t}} + # different order + assert_error "WRONGTYPE*" {r sdiff noset{t} key1{t}} + + # with a legal set + r del set1{t} + r sadd set1{t} a b c + assert_error "WRONGTYPE*" {r sdiff key1{t} set1{t}} + # different order + assert_error "WRONGTYPE*" {r sdiff set1{t} key1{t}} } - test "SUNION against non-set should throw error" { - r set key1 x - assert_error "WRONGTYPE*" {r sunion key1 noset} + test "SDIFF should handle non existing key as empty" { + r del set1{t} set2{t} set3{t} + + r sadd set1{t} a b c + r sadd set2{t} b c d + assert_equal {a} [lsort [r sdiff set1{t} set2{t} set3{t}]] + assert_equal {} [lsort [r sdiff set3{t} set2{t} set1{t}]] + } + + test "SDIFFSTORE against non-set should throw error" { + r del set1{t} set2{t} set3{t} key1{t} + r set key1{t} x + + # with en empty dstkey + assert_error "WRONGTYPE*" {r SDIFFSTORE set3{t} key1{t} noset{t}} + assert_equal 0 [r exists set3{t}] + assert_error "WRONGTYPE*" {r SDIFFSTORE set3{t} noset{t} key1{t}} + assert_equal 0 [r exists set3{t}] + + # with a legal dstkey + r sadd set1{t} a b c + r sadd set2{t} b c d + r sadd set3{t} e + assert_error "WRONGTYPE*" {r SDIFFSTORE set3{t} key1{t} set1{t} noset{t}} + assert_equal 1 [r exists set3{t}] + assert_equal {e} [lsort [r smembers set3{t}]] + + assert_error "WRONGTYPE*" {r SDIFFSTORE set3{t} set1{t} key1{t} set2{t}} + assert_equal 1 [r exists set3{t}] + assert_equal {e} [lsort [r smembers set3{t}]] + } + + test "SDIFFSTORE should handle non existing key as empty" { + r del set1{t} set2{t} set3{t} + + r set setres{t} xxx + assert_equal 0 [r sdiffstore setres{t} foo111{t} bar222{t}] + assert_equal 0 [r exists setres{t}] + + # with a legal dstkey, should delete dstkey + r sadd set3{t} a b c + assert_equal 0 [r sdiffstore set3{t} set1{t} set2{t}] + assert_equal 0 [r exists set3{t}] + + r sadd set1{t} a b c + assert_equal 3 [r sdiffstore set3{t} set1{t} set2{t}] + assert_equal 1 [r exists set3{t}] + assert_equal {a b c} [lsort [r smembers set3{t}]] + + # with a legal dstkey and empty set2, should delete the dstkey + r sadd set3{t} a b c + assert_equal 0 [r sdiffstore set3{t} set2{t} set1{t}] + assert_equal 0 [r exists set3{t}] + } + + test "SINTER against non-set should throw error" { + r set key1{t} x + assert_error "WRONGTYPE*" {r sinter key1{t} noset{t}} + # different order + assert_error "WRONGTYPE*" {r sinter noset{t} key1{t}} + + r sadd set1{t} a b c + assert_error "WRONGTYPE*" {r sinter key1{t} set1{t}} + # different order + assert_error "WRONGTYPE*" {r sinter set1{t} key1{t}} } test "SINTER should handle non existing key as empty" { @@ -303,10 +375,115 @@ start_server { lsort [r sinter set1 set2] } {1 2 3} + test "SINTERSTORE against non-set should throw error" { + r del set1{t} set2{t} set3{t} key1{t} + r set key1{t} x + + # with en empty dstkey + assert_error "WRONGTYPE*" {r sinterstore set3{t} key1{t} noset{t}} + assert_equal 0 [r exists set3{t}] + assert_error "WRONGTYPE*" {r sinterstore set3{t} noset{t} key1{t}} + assert_equal 0 [r exists set3{t}] + + # with a legal dstkey + r sadd set1{t} a b c + r sadd set2{t} b c d + r sadd set3{t} e + assert_error "WRONGTYPE*" {r sinterstore set3{t} key1{t} set2{t} noset{t}} + assert_equal 1 [r exists set3{t}] + assert_equal {e} [lsort [r smembers set3{t}]] + + assert_error "WRONGTYPE*" {r sinterstore set3{t} noset{t} key1{t} set2{t}} + assert_equal 1 [r exists set3{t}] + assert_equal {e} [lsort [r smembers set3{t}]] + } + test "SINTERSTORE against non existing keys should delete dstkey" { - r set setres xxx - assert_equal 0 [r sinterstore setres foo111 bar222] - assert_equal 0 [r exists setres] + r del set1{t} set2{t} set3{t} + + r set setres{t} xxx + assert_equal 0 [r sinterstore setres{t} foo111{t} bar222{t}] + assert_equal 0 [r exists setres{t}] + + # with a legal dstkey + r sadd set3{t} a b c + assert_equal 0 [r sinterstore set3{t} set1{t} set2{t}] + assert_equal 0 [r exists set3{t}] + + r sadd set1{t} a b c + assert_equal 0 [r sinterstore set3{t} set1{t} set2{t}] + assert_equal 0 [r exists set3{t}] + + assert_equal 0 [r sinterstore set3{t} set2{t} set1{t}] + assert_equal 0 [r exists set3{t}] + } + + test "SUNION against non-set should throw error" { + r set key1{t} x + assert_error "WRONGTYPE*" {r sunion key1{t} noset{t}} + # different order + assert_error "WRONGTYPE*" {r sunion noset{t} key1{t}} + + r del set1{t} + r sadd set1{t} a b c + assert_error "WRONGTYPE*" {r sunion key1{t} set1{t}} + # different order + assert_error "WRONGTYPE*" {r sunion set1{t} key1{t}} + } + + test "SUNION should handle non existing key as empty" { + r del set1{t} set2{t} set3{t} + + r sadd set1{t} a b c + r sadd set2{t} b c d + assert_equal {a b c d} [lsort [r sunion set1{t} set2{t} set3{t}]] + } + + test "SUNIONSTORE against non-set should throw error" { + r del set1{t} set2{t} set3{t} key1{t} + r set key1{t} x + + # with en empty dstkey + assert_error "WRONGTYPE*" {r sunionstore set3{t} key1{t} noset{t}} + assert_equal 0 [r exists set3{t}] + assert_error "WRONGTYPE*" {r sunionstore set3{t} noset{t} key1{t}} + assert_equal 0 [r exists set3{t}] + + # with a legal dstkey + r sadd set1{t} a b c + r sadd set2{t} b c d + r sadd set3{t} e + assert_error "WRONGTYPE*" {r sunionstore set3{t} key1{t} key2{t} noset{t}} + assert_equal 1 [r exists set3{t}] + assert_equal {e} [lsort [r smembers set3{t}]] + + assert_error "WRONGTYPE*" {r sunionstore set3{t} noset{t} key1{t} key2{t}} + assert_equal 1 [r exists set3{t}] + assert_equal {e} [lsort [r smembers set3{t}]] + } + + test "SUNIONSTORE should handle non existing key as empty" { + r del set1{t} set2{t} set3{t} + + r set setres{t} xxx + assert_equal 0 [r sunionstore setres{t} foo111{t} bar222{t}] + assert_equal 0 [r exists setres{t}] + + # set1 set2 both empty, should delete the dstkey + r sadd set3{t} a b c + assert_equal 0 [r sunionstore set3{t} set1{t} set2{t}] + assert_equal 0 [r exists set3{t}] + + r sadd set1{t} a b c + r sadd set3{t} e f + assert_equal 3 [r sunionstore set3{t} set1{t} set2{t}] + assert_equal 1 [r exists set3{t}] + assert_equal {a b c} [lsort [r smembers set3{t}]] + + r sadd set3{t} d + assert_equal 3 [r sunionstore set3{t} set2{t} set1{t}] + assert_equal 1 [r exists set3{t}] + assert_equal {a b c} [lsort [r smembers set3{t}]] } test "SUNIONSTORE against non existing keys should delete dstkey" { @@ -403,10 +580,27 @@ start_server { assert {[lsort $union] eq [lsort $content]} } + test "SRANDMEMBER count of 0 is handled correctly" { + r srandmember myset 0 + } {} + test "SRANDMEMBER with against non existing key" { r srandmember nonexisting_key 100 } {} + # Make sure we can distinguish between an empty array and a null response + r readraw 1 + + test "SRANDMEMBER count of 0 is handled correctly - emptyarray" { + r srandmember myset 0 + } {*0} + + test "SRANDMEMBER with against non existing key - emptyarray" { + r srandmember nonexisting_key 100 + } {*0} + + r readraw 0 + foreach {type contents} { hashtable { 1 5 10 50 125 50000 33959417 4775547 65434162 @@ -632,6 +826,28 @@ start_server { lsort [r smembers set] } {a b c} + test "SMOVE only notify dstset when the addition is successful" { + r del srcset{t} + r del dstset{t} + + r sadd srcset{t} a b + r sadd dstset{t} a + + r watch dstset{t} + + r multi + r sadd dstset{t} c + + set r2 [redis_client] + $r2 smove srcset{t} dstset{t} a + + # The dstset is actually unchanged, multi should success + r exec + set res [r scard dstset{t}] + assert_equal $res 2 + $r2 close + } + tags {slow} { test {intsets implementation stress testing} { for {set j 0} {$j < 20} {incr j} { diff --git a/tests/unit/type/stream.tcl b/tests/unit/type/stream.tcl index f257880ee..c299a1e97 100644 --- a/tests/unit/type/stream.tcl +++ b/tests/unit/type/stream.tcl @@ -199,6 +199,15 @@ start_server { assert {[r EXISTS otherstream] == 0} } + test {XADD with LIMIT delete entries no more than limit} { + r del yourstream + for {set j 0} {$j < 3} {incr j} { + r XADD yourstream * xitem v + } + r XADD yourstream MAXLEN ~ 0 limit 1 * xitem v + assert {[r XLEN yourstream] == 4} + } + test {XRANGE COUNT works as expected} { assert {[llength [r xrange mystream - + COUNT 10]] == 10} } @@ -532,6 +541,16 @@ start_server { } assert_error ERR* {r XTRIM mystream MAXLEN 1 LIMIT 30} } + + test {XTRIM with LIMIT delete entries no more than limit} { + r del mystream + r config set stream-node-max-entries 2 + for {set j 0} {$j < 3} {incr j} { + r XADD mystream * xitem v + } + assert {[r XTRIM mystream MAXLEN ~ 0 LIMIT 1] == 0} + assert {[r XTRIM mystream MAXLEN ~ 0 LIMIT 2] == 2} + } } start_server {tags {"stream"} overrides {appendonly yes}} { diff --git a/tests/unit/type/zset.tcl b/tests/unit/type/zset.tcl index 96647f778..94b2ab480 100644 --- a/tests/unit/type/zset.tcl +++ b/tests/unit/type/zset.tcl @@ -960,6 +960,39 @@ start_server {tags {"zset"}} { assert_equal 1 [r zcard z2] } + test "Basic ZPOP - $encoding RESP3" { + r hello 3 + r del z1 + create_zset z1 {0 a 1 b 2 c 3 d} + assert_equal {a 0.0} [r zpopmin z1] + assert_equal {d 3.0} [r zpopmax z1] + r hello 2 + } + + test "ZPOP with count - $encoding RESP3" { + r hello 3 + r del z1 + create_zset z1 {0 a 1 b 2 c 3 d} + assert_equal {{a 0.0} {b 1.0}} [r zpopmin z1 2] + assert_equal {{d 3.0} {c 2.0}} [r zpopmax z1 2] + r hello 2 + } + + test "BZPOP - $encoding RESP3" { + r hello 3 + set rd [redis_deferring_client] + create_zset zset {0 a 1 b 2 c} + + $rd bzpopmin zset 5 + assert_equal {zset a 0} [$rd read] + $rd bzpopmin zset 5 + assert_equal {zset b 1} [$rd read] + $rd bzpopmax zset 5 + assert_equal {zset c 2} [$rd read] + assert_equal 0 [r exists zset] + r hello 2 + } + r config set zset-max-ziplist-entries $original_max_entries r config set zset-max-ziplist-value $original_max_value } @@ -1568,6 +1601,19 @@ start_server {tags {"zset"}} { r zrange z1 5 0 BYSCORE REV LIMIT 0 2 WITHSCORES } {d 4 c 3} + test {ZRANGESTORE - src key missing} { + set res [r zrangestore z2{t} missing{t} 0 -1] + assert_equal $res 0 + r exists z2{t} + } {0} + + test {ZRANGESTORE - src key wrong type} { + r zadd z2{t} 1 a + r set foo{t} bar + assert_error "*WRONGTYPE*" {r zrangestore z2{t} foo{t} 0 -1} + r zrange z2{t} 0 -1 + } {a} + test {ZRANGESTORE - empty range} { set res [r zrangestore z2 z1 5 6] assert_equal $res 0 @@ -1616,6 +1662,20 @@ start_server {tags {"zset"}} { return $res } + # Check whether the zset members belong to the zset + proc check_member {mydict res} { + foreach ele $res { + assert {[dict exists $mydict $ele]} + } + } + + # Check whether the zset members and score belong to the zset + proc check_member_and_score {mydict res} { + foreach {key val} $res { + assert_equal $val [dict get $mydict $key] + } + } + foreach {type contents} "ziplist {1 a 2 b 3 c} skiplist {1 a 2 b 3 [randstring 70 90 alpha]}" { set original_max_value [lindex [r config get zset-max-ziplist-value] 1] r config set zset-max-ziplist-value 10 @@ -1654,6 +1714,19 @@ start_server {tags {"zset"}} { r zrandmember nonexisting_key 100 } {} + # Make sure we can distinguish between an empty array and a null response + r readraw 1 + + test "ZRANDMEMBER count of 0 is handled correctly - emptyarray" { + r zrandmember myzset 0 + } {*0} + + test "ZRANDMEMBER with against non existing key - emptyarray" { + r zrandmember nonexisting_key 100 + } {*0} + + r readraw 0 + foreach {type contents} " skiplist {1 a 2 b 3 c 4 d 5 e 6 f 7 g 7 h 9 i 10 [randstring 70 90 alpha]} ziplist {1 a 2 b 3 c 4 d 5 e 6 f 7 g 7 h 9 i 10 j} " { @@ -1676,25 +1749,29 @@ start_server {tags {"zset"}} { # PATH 1: Use negative count. # 1) Check that it returns repeated elements with and without values. + # 2) Check that all the elements actually belong to the original zset. set res [r zrandmember myzset -20] assert_equal [llength $res] 20 + check_member $mydict $res + set res [r zrandmember myzset -1001] assert_equal [llength $res] 1001 + check_member $mydict $res + # again with WITHSCORES set res [r zrandmember myzset -20 withscores] assert_equal [llength $res] 40 + check_member_and_score $mydict $res + set res [r zrandmember myzset -1001 withscores] assert_equal [llength $res] 2002 + check_member_and_score $mydict $res # Test random uniform distribution # df = 9, 40 means 0.00001 probability set res [r zrandmember myzset -1000] assert_lessthan [chi_square_value $res] 40 - - # 2) Check that all the elements actually belong to the original zset. - foreach {key val} $res { - assert {[dict exists $mydict $key]} - } + check_member $mydict $res # 3) Check that eventually all the elements are returned. # Use both WITHSCORES and without @@ -1710,7 +1787,7 @@ start_server {tags {"zset"}} { } else { set res [r zrandmember myzset -3] foreach key $res { - dict append auxset $key $val + dict append auxset $key } } if {[lsort [dict keys $mydict]] eq @@ -1726,11 +1803,13 @@ start_server {tags {"zset"}} { set res [r zrandmember myzset $size] assert_equal [llength $res] 10 assert_equal [lsort $res] [lsort [dict keys $mydict]] + check_member $mydict $res # again with WITHSCORES set res [r zrandmember myzset $size withscores] assert_equal [llength $res] 20 assert_equal [lsort $res] [lsort $mydict] + check_member_and_score $mydict $res } # PATH 3: Ask almost as elements as there are in the set. @@ -1742,18 +1821,17 @@ start_server {tags {"zset"}} { # # We can test both the code paths just changing the size but # using the same code. - foreach size {8 2} { + foreach size {1 2 8} { + # 1) Check that all the elements actually belong to the + # original set. set res [r zrandmember myzset $size] assert_equal [llength $res] $size + check_member $mydict $res + # again with WITHSCORES set res [r zrandmember myzset $size withscores] assert_equal [llength $res] [expr {$size * 2}] - - # 1) Check that all the elements actually belong to the - # original set. - foreach ele [dict keys $res] { - assert {[dict exists $mydict $ele]} - } + check_member_and_score $mydict $res # 2) Check that eventually all the elements are returned. # Use both WITHSCORES and without diff --git a/tests/unit/violations.tcl b/tests/unit/violations.tcl new file mode 100644 index 000000000..1d3140c52 --- /dev/null +++ b/tests/unit/violations.tcl @@ -0,0 +1,156 @@ +# These tests consume massive amounts of memory, and are not +# suitable to be executed as part of the normal test suite +set ::str500 [string repeat x 500000000] ;# 500mb + +# Utility function to write big argument into redis client connection +proc write_big_bulk {size} { + r write "\$$size\r\n" + while {$size >= 500000000} { + r write $::str500 + incr size -500000000 + } + if {$size > 0} { + r write [string repeat x $size] + } + r write "\r\n" +} + +# One XADD with one huge 5GB field +# Expected to fail resulting in an empty stream +start_server [list overrides [list save ""] ] { + test {XADD one huge field} { + r config set proto-max-bulk-len 10000000000 ;#10gb + r config set client-query-buffer-limit 10000000000 ;#10gb + r write "*5\r\n\$4\r\nXADD\r\n\$2\r\nS1\r\n\$1\r\n*\r\n" + r write "\$1\r\nA\r\n" + write_big_bulk 5000000000 ;#5gb + r flush + catch {r read} err + assert_match {*too large*} $err + r xlen S1 + } {0} +} + +# One XADD with one huge (exactly nearly) 4GB field +# This uncovers the overflow in lpEncodeGetType +# Expected to fail resulting in an empty stream +start_server [list overrides [list save ""] ] { + test {XADD one huge field - 1} { + r config set proto-max-bulk-len 10000000000 ;#10gb + r config set client-query-buffer-limit 10000000000 ;#10gb + r write "*5\r\n\$4\r\nXADD\r\n\$2\r\nS1\r\n\$1\r\n*\r\n" + r write "\$1\r\nA\r\n" + write_big_bulk 4294967295 ;#4gb-1 + r flush + catch {r read} err + assert_match {*too large*} $err + r xlen S1 + } {0} +} + +# Gradually add big stream fields using repeated XADD calls +start_server [list overrides [list save ""] ] { + test {several XADD big fields} { + r config set stream-node-max-bytes 0 + for {set j 0} {$j<10} {incr j} { + r xadd stream * 1 $::str500 2 $::str500 + } + r ping + r xlen stream + } {10} +} + +# Add over 4GB to a single stream listpack (one XADD command) +# Expected to fail resulting in an empty stream +start_server [list overrides [list save ""] ] { + test {single XADD big fields} { + r write "*23\r\n\$4\r\nXADD\r\n\$1\r\nS\r\n\$1\r\n*\r\n" + for {set j 0} {$j<10} {incr j} { + r write "\$1\r\n$j\r\n" + write_big_bulk 500000000 ;#500mb + } + r flush + catch {r read} err + assert_match {*too large*} $err + r xlen S + } {0} +} + +# Gradually add big hash fields using repeated HSET calls +# This reproduces the overflow in the call to ziplistResize +# Object will be converted to hashtable encoding +start_server [list overrides [list save ""] ] { + r config set hash-max-ziplist-value 1000000000 ;#1gb + test {hash with many big fields} { + for {set j 0} {$j<10} {incr j} { + r hset h $j $::str500 + } + r object encoding h + } {hashtable} +} + +# Add over 4GB to a single hash field (one HSET command) +# Object will be converted to hashtable encoding +start_server [list overrides [list save ""] ] { + test {hash with one huge field} { + catch {r config set hash-max-ziplist-value 10000000000} ;#10gb + r config set proto-max-bulk-len 10000000000 ;#10gb + r config set client-query-buffer-limit 10000000000 ;#10gb + r write "*4\r\n\$4\r\nHSET\r\n\$2\r\nH1\r\n" + r write "\$1\r\nA\r\n" + write_big_bulk 5000000000 ;#5gb + r flush + r read + r object encoding H1 + } {hashtable} +} + +# Add over 4GB to a single list member (one LPUSH command) +# Currently unsupported, and expected to fail rather than being truncated +# Expected to fail resulting in a non-existing list +start_server [list overrides [list save ""] ] { + test {list with one huge field} { + r config set proto-max-bulk-len 10000000000 ;#10gb + r config set client-query-buffer-limit 10000000000 ;#10gb + r write "*3\r\n\$5\r\nLPUSH\r\n\$2\r\nL1\r\n" + write_big_bulk 5000000000 ;#5gb + r flush + catch {r read} err + assert_match {*too large*} $err + r exists L1 + } {0} +} + +# SORT which attempts to store an element larger than 4GB into a list. +# Currently unsupported and results in an assertion instead of truncation +start_server [list overrides [list save ""] ] { + test {SORT adds huge field to list} { + r config set proto-max-bulk-len 10000000000 ;#10gb + r config set client-query-buffer-limit 10000000000 ;#10gb + r write "*3\r\n\$3\r\nSET\r\n\$2\r\nS1\r\n" + write_big_bulk 5000000000 ;#5gb + r flush + r read + assert_equal [r strlen S1] 5000000000 + r set S2 asdf + r sadd myset 1 2 + r mset D1 1 D2 2 + catch {r sort myset by D* get S* store mylist} + assert_equal [count_log_message 0 "crashed by signal"] 0 + assert_equal [count_log_message 0 "ASSERTION FAILED"] 1 + } +} + +# SORT which stores an integer encoded element into a list. +# Just for coverage, no news here. +start_server [list overrides [list save ""] ] { + test {SORT adds integer field to list} { + r set S1 asdf + r set S2 123 ;# integer encoded + assert_encoding "int" S2 + r sadd myset 1 2 + r mset D1 1 D2 2 + r sort myset by D* get S* store mylist + r llen mylist + } {2} +}