From 45fa113d004b98197883ba26a11ccf562a315d62 Mon Sep 17 00:00:00 2001 From: Adam Baldwin Date: Thu, 25 Oct 2012 20:27:10 -0700 Subject: [PATCH 0001/1928] Removed dofile() from Lua --- src/scripting.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/scripting.c b/src/scripting.c index 6f9ec2e89..5f7280448 100644 --- a/src/scripting.c +++ b/src/scripting.c @@ -451,6 +451,8 @@ void luaLoadLibraries(lua_State *lua) { void luaRemoveUnsupportedFunctions(lua_State *lua) { lua_pushnil(lua); lua_setglobal(lua,"loadfile"); + lua_pushnil(lua); + lua_setglobal(lua,"dofile"); } /* This function installs metamethods in the global table _G that prevent From 367035a9838695c1f17a78af0325de5ae6ec08fc Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Wed, 13 Aug 2014 19:31:57 +0200 Subject: [PATCH 0002/1928] Don't use extended Regexp Syntax It's not POSIX (BSD systems have -E instead) and we don't actually need it. Closes #1922 --- utils/install_server.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/utils/install_server.sh b/utils/install_server.sh index 15b60a08e..3a5fc3147 100755 --- a/utils/install_server.sh +++ b/utils/install_server.sh @@ -135,13 +135,13 @@ fi echo "## Generated by install_server.sh ##" > $TMP_FILE read -r SED_EXPR <<-EOF -s#^port [0-9]{4}\$#port ${REDIS_PORT}#; \ -s#^logfile .+\$#logfile ${REDIS_LOG_FILE}#; \ -s#^dir .+\$#dir ${REDIS_DATA_DIR}#; \ -s#^pidfile .+\$#pidfile ${PIDFILE}#; \ -s#^daemonize no\$#daemonize yes#; +s#^port .\+#port ${REDIS_PORT}#; \ +s#^logfile .\+#logfile ${REDIS_LOG_FILE}#; \ +s#^dir .\+#dir ${REDIS_DATA_DIR}#; \ +s#^pidfile .\+#pidfile ${PIDFILE}#; \ +s#^daemonize no#daemonize yes#; EOF -sed -r "$SED_EXPR" $DEFAULT_CONFIG >> $TMP_FILE +sed "$SED_EXPR" $DEFAULT_CONFIG >> $TMP_FILE #cat $TPL_FILE | while read line; do eval "echo \"$line\"" >> $TMP_FILE; done cp $TMP_FILE $REDIS_CONFIG_FILE || die "Could not write redis config file $REDIS_CONFIG_FILE" From 087b19237cbdda4a1c082a9cf1aaf6cd0a2b9066 Mon Sep 17 00:00:00 2001 From: Byron Grobe Date: Thu, 11 Sep 2014 10:21:05 -0400 Subject: [PATCH 0003/1928] Fixed issue #1996 (Missing '-' in help message for redis-benchmark) --- src/redis-benchmark.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c index 2e67f1021..6192d3401 100644 --- a/src/redis-benchmark.c +++ b/src/redis-benchmark.c @@ -557,7 +557,7 @@ usage: " -c Number of parallel connections (default 50)\n" " -n Total number of requests (default 10000)\n" " -d Data size of SET/GET value in bytes (default 2)\n" -" -dbnum SELECT the specified db number (default 0)\n" +" --dbnum SELECT the specified db number (default 0)\n" " -k 1=keep alive 0=reconnect (default 1)\n" " -r Use random keys for SET/GET/INCR, random values for SADD\n" " Using this option the benchmark will expand the string __rand_int__\n" From 6739ef4447adb17ce2d644b530aea429b8d52445 Mon Sep 17 00:00:00 2001 From: Matt Stancliff Date: Fri, 10 Oct 2014 10:35:34 -0400 Subject: [PATCH 0004/1928] Sentinel: Add initial quorum bounds check Fixes #2054 --- src/sentinel.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/sentinel.c b/src/sentinel.c index 8e78a2263..6d7096b6c 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -2736,6 +2736,12 @@ void sentinelCommand(redisClient *c) { != REDIS_OK) return; if (getLongFromObjectOrReply(c,c->argv[4],&port,"Invalid port") != REDIS_OK) return; + + if (quorum <= 0) { + addReplyError(c, "Quorum must be 1 or greater."); + return; + } + /* Make sure the IP field is actually a valid IP before passing it * to createSentinelRedisInstance(), otherwise we may trigger a * DNS lookup at runtime. */ From e9b014cfacb443a0e828002d900a5a94a704d965 Mon Sep 17 00:00:00 2001 From: Mihir Joshi Date: Fri, 21 Nov 2014 22:35:42 -0500 Subject: [PATCH 0005/1928] stricter options for SET command Issue: #2157 As the SET command is parsed, it remembers which options are already set and if a duplicate option is found, raises an error because it is essentially an invalid syntax. It still allows mutually exclusive options like EX and PX because taking an option over another (precedence) is not essentially a syntactic error. --- src/t_string.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/t_string.c b/src/t_string.c index 74c05793c..0bbefcaad 100644 --- a/src/t_string.c +++ b/src/t_string.c @@ -61,6 +61,8 @@ static int checkStringLength(redisClient *c, long long size) { #define REDIS_SET_NO_FLAGS 0 #define REDIS_SET_NX (1<<0) /* Set if key not exists. */ #define REDIS_SET_XX (1<<1) /* Set if key exists. */ +#define REDIS_SET_EX (1<<2) /* Set if time in seconds is given */ +#define REDIS_SET_PX (1<<3) /* Set if time in ms in given */ void setGenericCommand(redisClient *c, int flags, robj *key, robj *val, robj *expire, int unit, robj *ok_reply, robj *abort_reply) { long long milliseconds = 0; /* initialized to avoid any harmness warning */ @@ -101,19 +103,21 @@ void setCommand(redisClient *c) { char *a = c->argv[j]->ptr; robj *next = (j == c->argc-1) ? NULL : c->argv[j+1]; - if ((a[0] == 'n' || a[0] == 'N') && + if (!(flags & REDIS_SET_NX) && (a[0] == 'n' || a[0] == 'N') && (a[1] == 'x' || a[1] == 'X') && a[2] == '\0') { flags |= REDIS_SET_NX; - } else if ((a[0] == 'x' || a[0] == 'X') && + } else if (!(flags & REDIS_SET_XX) && (a[0] == 'x' || a[0] == 'X') && (a[1] == 'x' || a[1] == 'X') && a[2] == '\0') { flags |= REDIS_SET_XX; - } else if ((a[0] == 'e' || a[0] == 'E') && + } else if (!(flags & REDIS_SET_EX) && (a[0] == 'e' || a[0] == 'E') && (a[1] == 'x' || a[1] == 'X') && a[2] == '\0' && next) { + flags |= REDIS_SET_EX; unit = UNIT_SECONDS; expire = next; j++; - } else if ((a[0] == 'p' || a[0] == 'P') && + } else if (!(flags & REDIS_SET_PX) && (a[0] == 'p' || a[0] == 'P') && (a[1] == 'x' || a[1] == 'X') && a[2] == '\0' && next) { + flags |= REDIS_SET_PX; unit = UNIT_MILLISECONDS; expire = next; j++; From 391fc9b6335329e513664c69bdc18865ab944beb Mon Sep 17 00:00:00 2001 From: Matt Stancliff Date: Mon, 25 Aug 2014 15:53:11 -0500 Subject: [PATCH 0006/1928] Sentinel: Improve INFO command behavior Improvements: - Return empty string if asking for non-existing section (INFO foo) - Fix potential memory leak (caused by sdsempty() then returned if >2 args) - Clean up argument parsing - Allow "all" as valid section (same as "default" or zero args currently) - Move strcasecmp to end of evaluation chain in conditionals Also, since we're C99, I moved some variable declarations to be closer to where they are actually used (saves us from needing to free an empty info if detect argument errors up front). Closes #1915 Closes #1966 --- src/sentinel.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/src/sentinel.c b/src/sentinel.c index 12f15ff3e..bd2d42ac2 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -2853,24 +2853,30 @@ numargserr: /* SENTINEL INFO [section] */ void sentinelInfoCommand(redisClient *c) { - char *section = c->argc == 2 ? c->argv[1]->ptr : "default"; - sds info = sdsempty(); - int defsections = !strcasecmp(section,"default"); - int sections = 0; - if (c->argc > 2) { addReply(c,shared.syntaxerr); return; } - if (!strcasecmp(section,"server") || defsections) { + int defsections = 0, allsections = 0; + char *section = c->argc == 2 ? c->argv[1]->ptr : NULL; + if (section) { + allsections = !strcasecmp(section,"all"); + defsections = !strcasecmp(section,"default"); + } else { + defsections = 1; + } + + int sections = 0; + sds info = sdsempty(); + if (defsections || allsections || !strcasecmp(section,"server")) { if (sections++) info = sdscat(info,"\r\n"); sds serversection = genRedisInfoString("server"); info = sdscatlen(info,serversection,sdslen(serversection)); sdsfree(serversection); } - if (!strcasecmp(section,"sentinel") || defsections) { + if (defsections || allsections || !strcasecmp(section,"sentinel")) { dictIterator *di; dictEntry *de; int master_id = 0; From badf0f008bede268d3235412243fc62d618e323c Mon Sep 17 00:00:00 2001 From: Matt Stancliff Date: Thu, 7 Aug 2014 09:43:16 -0400 Subject: [PATCH 0007/1928] Bitops: Stop overallocating storage space on set Previously the string was created empty then re-sized to fit the offset, but sds resize causes the sds to over-allocate by at least 1 MB (which is a lot when you are operating at bit-level access). This also improves the speed of initial sets by 2% to 6% based on quick testing. Patch logic provided by @oranagra Fixes #1918 --- src/bitops.c | 8 +++----- src/t_string.c | 2 +- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/src/bitops.c b/src/bitops.c index 4c8662244..ec912bc24 100644 --- a/src/bitops.c +++ b/src/bitops.c @@ -229,19 +229,17 @@ void setbitCommand(redisClient *c) { return; } + byte = bitoffset >> 3; o = lookupKeyWrite(c->db,c->argv[1]); if (o == NULL) { - o = createObject(REDIS_STRING,sdsempty()); + o = createObject(REDIS_STRING,sdsnewlen(NULL, byte+1)); dbAdd(c->db,c->argv[1],o); } else { if (checkType(c,o,REDIS_STRING)) return; o = dbUnshareStringValue(c->db,c->argv[1],o); + o->ptr = sdsgrowzero(o->ptr,byte+1); } - /* Grow sds value to the right length if necessary */ - byte = bitoffset >> 3; - o->ptr = sdsgrowzero(o->ptr,byte+1); - /* Get current values */ byteval = ((uint8_t*)o->ptr)[byte]; bit = 7 - (bitoffset & 0x7); diff --git a/src/t_string.c b/src/t_string.c index e3c1e5f4a..b61589961 100644 --- a/src/t_string.c +++ b/src/t_string.c @@ -194,7 +194,7 @@ void setrangeCommand(redisClient *c) { if (checkStringLength(c,offset+sdslen(value)) != REDIS_OK) return; - o = createObject(REDIS_STRING,sdsempty()); + o = createObject(REDIS_STRING,sdsnewlen(NULL, offset+sdslen(value))); dbAdd(c->db,c->argv[1],o); } else { size_t olen; From 352172a7ef5015c0c487ba6258cdf3b4b31a551c Mon Sep 17 00:00:00 2001 From: Mihir Joshi Date: Sun, 14 Dec 2014 10:12:58 -0500 Subject: [PATCH 0008/1928] Stricter options for SET command - As per Antirez's suggestion, this commit raises an error when mutually exclusive options are provided. Duplicate options are allowed. --- src/t_string.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/t_string.c b/src/t_string.c index 0bbefcaad..067aa10e3 100644 --- a/src/t_string.c +++ b/src/t_string.c @@ -103,20 +103,24 @@ void setCommand(redisClient *c) { char *a = c->argv[j]->ptr; robj *next = (j == c->argc-1) ? NULL : c->argv[j+1]; - if (!(flags & REDIS_SET_NX) && (a[0] == 'n' || a[0] == 'N') && - (a[1] == 'x' || a[1] == 'X') && a[2] == '\0') { + if ((a[0] == 'n' || a[0] == 'N') && + (a[1] == 'x' || a[1] == 'X') && a[2] == '\0' && + !(flags & REDIS_SET_XX)) { flags |= REDIS_SET_NX; - } else if (!(flags & REDIS_SET_XX) && (a[0] == 'x' || a[0] == 'X') && - (a[1] == 'x' || a[1] == 'X') && a[2] == '\0') { + } else if ((a[0] == 'x' || a[0] == 'X') && + (a[1] == 'x' || a[1] == 'X') && a[2] == '\0' && + !(flags & REDIS_SET_NX)) { flags |= REDIS_SET_XX; - } else if (!(flags & REDIS_SET_EX) && (a[0] == 'e' || a[0] == 'E') && - (a[1] == 'x' || a[1] == 'X') && a[2] == '\0' && next) { + } else if ((a[0] == 'e' || a[0] == 'E') && + (a[1] == 'x' || a[1] == 'X') && a[2] == '\0' && + !(flags & REDIS_SET_PX) && next) { flags |= REDIS_SET_EX; unit = UNIT_SECONDS; expire = next; j++; - } else if (!(flags & REDIS_SET_PX) && (a[0] == 'p' || a[0] == 'P') && - (a[1] == 'x' || a[1] == 'X') && a[2] == '\0' && next) { + } else if ((a[0] == 'p' || a[0] == 'P') && + (a[1] == 'x' || a[1] == 'X') && a[2] == '\0' && + !(flags & REDIS_SET_EX) && next) { flags |= REDIS_SET_PX; unit = UNIT_MILLISECONDS; expire = next; From 04607b53500f71498a502ffcfc41a82792013e2b Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Wed, 7 Jan 2015 22:31:45 +0100 Subject: [PATCH 0009/1928] Check that the whole first argument is a number Fixes #2258 --- src/redis-cli.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 2a703ad7f..e18cc7c9f 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -977,9 +977,10 @@ static void repl(void) { } else { long long start_time = mstime(), elapsed; int repeat, skipargs = 0; + char *endptr; - repeat = atoi(argv[0]); - if (argc > 1 && repeat) { + repeat = strtol(argv[0], &endptr, 10); + if (argc > 1 && *endptr == '\0' && repeat) { skipargs = 1; } else { repeat = 1; From 6741bb981c53eb6b168e6ff96d915a0d51e0c7cd Mon Sep 17 00:00:00 2001 From: Matt Stancliff Date: Fri, 9 Jan 2015 17:00:43 -0500 Subject: [PATCH 0010/1928] Improve consistency of INFO MEMORY fields Adds used_memory_rss_human and used_memory_lua_human to match all the other fields reporting human-readable memory too. --- src/redis.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/redis.c b/src/redis.c index a4d9e562c..0e02f85d7 100644 --- a/src/redis.c +++ b/src/redis.c @@ -2700,9 +2700,12 @@ sds genRedisInfoString(char *section) { char hmem[64]; char peak_hmem[64]; char total_system_hmem[64]; + char used_memory_lua_hmem[64]; + char used_memory_rss_hmem[64]; size_t zmalloc_used = zmalloc_used_memory(); size_t total_system_mem = server.system_memory_size; char *evict_policy = maxmemoryToString(); + long long memory_lua = (long long)lua_gc(server.lua,LUA_GCCOUNT,0)*1024; /* Peak memory is updated from time to time by serverCron() so it * may happen that the instantaneous value is slightly bigger than @@ -2714,6 +2717,8 @@ sds genRedisInfoString(char *section) { bytesToHuman(hmem,zmalloc_used); bytesToHuman(peak_hmem,server.stat_peak_memory); bytesToHuman(total_system_hmem,total_system_mem); + bytesToHuman(used_memory_lua_hmem,memory_lua); + bytesToHuman(used_memory_rss_hmem,server.resident_set_size); if (sections++) info = sdscat(info,"\r\n"); info = sdscatprintf(info, @@ -2721,22 +2726,26 @@ sds genRedisInfoString(char *section) { "used_memory:%zu\r\n" "used_memory_human:%s\r\n" "used_memory_rss:%zu\r\n" + "used_memory_rss_human:%s\r\n" "used_memory_peak:%zu\r\n" "used_memory_peak_human:%s\r\n" "total_system_memory:%lu\r\n" "total_system_memory_human:%s\r\n" "used_memory_lua:%lld\r\n" + "used_memory_lua_human:%s\r\n" "mem_fragmentation_ratio:%.2f\r\n" "mem_allocator:%s\r\n" "maxmemory_policy:%s\r\n", zmalloc_used, hmem, server.resident_set_size, + used_memory_rss_hmem, server.stat_peak_memory, peak_hmem, (unsigned long)total_system_mem, total_system_hmem, - ((long long)lua_gc(server.lua,LUA_GCCOUNT,0))*1024LL, + memory_lua, + used_memory_lua_hmem, zmalloc_get_fragmentation_ratio(server.resident_set_size), ZMALLOC_LIB, evict_policy From 5a685f35a9e09cba3b08640912aed93acbc63351 Mon Sep 17 00:00:00 2001 From: Matt Stancliff Date: Fri, 9 Jan 2015 17:01:05 -0500 Subject: [PATCH 0011/1928] Add maxmemory limit to INFO MEMORY Since we have the eviction policy, we should have the memory limit too. --- src/redis.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/redis.c b/src/redis.c index 0e02f85d7..56eb10aac 100644 --- a/src/redis.c +++ b/src/redis.c @@ -2702,6 +2702,7 @@ sds genRedisInfoString(char *section) { char total_system_hmem[64]; char used_memory_lua_hmem[64]; char used_memory_rss_hmem[64]; + char maxmemory_hmem[64]; size_t zmalloc_used = zmalloc_used_memory(); size_t total_system_mem = server.system_memory_size; char *evict_policy = maxmemoryToString(); @@ -2719,6 +2720,7 @@ sds genRedisInfoString(char *section) { bytesToHuman(total_system_hmem,total_system_mem); bytesToHuman(used_memory_lua_hmem,memory_lua); bytesToHuman(used_memory_rss_hmem,server.resident_set_size); + bytesToHuman(maxmemory_hmem,server.maxmemory); if (sections++) info = sdscat(info,"\r\n"); info = sdscatprintf(info, @@ -2733,9 +2735,11 @@ sds genRedisInfoString(char *section) { "total_system_memory_human:%s\r\n" "used_memory_lua:%lld\r\n" "used_memory_lua_human:%s\r\n" + "maxmemory:%lld\r\n" + "maxmemory_human:%s\r\n" + "maxmemory_policy:%s\r\n" "mem_fragmentation_ratio:%.2f\r\n" - "mem_allocator:%s\r\n" - "maxmemory_policy:%s\r\n", + "mem_allocator:%s\r\n", zmalloc_used, hmem, server.resident_set_size, @@ -2746,9 +2750,11 @@ sds genRedisInfoString(char *section) { total_system_hmem, memory_lua, used_memory_lua_hmem, + server.maxmemory, + maxmemory_hmem, + evict_policy, zmalloc_get_fragmentation_ratio(server.resident_set_size), - ZMALLOC_LIB, - evict_policy + ZMALLOC_LIB ); } From f704360462640a88975eeb68fd80617921d7c97d Mon Sep 17 00:00:00 2001 From: Matt Stancliff Date: Sun, 18 Jan 2015 15:54:30 -0500 Subject: [PATCH 0012/1928] Improve RDB type correctness It's possible large objects could be larger than 'int', so let's upgrade all size counters to ssize_t. This also fixes rdbSaveObject serialized bytes calculation. Since entire serializations of data structures can be large, so we don't want to limit their calculated size to a 32 bit signed max. This commit increases object size calculation and cascades the change back up to serializedlength printing. Before: 127.0.0.1:6379> debug object hihihi ... encoding:quicklist serializedlength:-2147483559 ... After: 127.0.0.1:6379> debug object hihihi ... encoding:quicklist serializedlength:2147483737 ... --- src/debug.c | 4 ++-- src/rdb.c | 26 +++++++++++++------------- src/rdb.h | 5 ++--- 3 files changed, 17 insertions(+), 18 deletions(-) diff --git a/src/debug.c b/src/debug.c index 7783196a0..e6e7e1e2d 100644 --- a/src/debug.c +++ b/src/debug.c @@ -336,10 +336,10 @@ void debugCommand(redisClient *c) { addReplyStatusFormat(c, "Value at:%p refcount:%d " - "encoding:%s serializedlength:%lld " + "encoding:%s serializedlength:%zu " "lru:%d lru_seconds_idle:%llu%s", (void*)val, val->refcount, - strenc, (long long) rdbSavedObjectLen(val), + strenc, rdbSavedObjectLen(val), val->lru, estimateObjectIdleTime(val)/1000, extra); } else if (!strcasecmp(c->argv[1]->ptr,"sdslen") && c->argc == 3) { dictEntry *de; diff --git a/src/rdb.c b/src/rdb.c index 36ba151c7..8165ef265 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -222,10 +222,10 @@ int rdbTryIntegerEncoding(char *s, size_t len, unsigned char *enc) { return rdbEncodeInteger(value,enc); } -int rdbSaveLzfBlob(rio *rdb, void *data, size_t compress_len, - size_t original_len) { +ssize_t rdbSaveLzfBlob(rio *rdb, void *data, size_t compress_len, + size_t original_len) { unsigned char byte; - int n, nwritten = 0; + ssize_t n, nwritten = 0; /* Data compressed! Let's save it on disk */ byte = (REDIS_RDB_ENCVAL<<6)|REDIS_RDB_ENC_LZF; @@ -247,7 +247,7 @@ writeerr: return -1; } -int rdbSaveLzfStringObject(rio *rdb, unsigned char *s, size_t len) { +ssize_t rdbSaveLzfStringObject(rio *rdb, unsigned char *s, size_t len) { size_t comprlen, outlen; void *out; @@ -260,7 +260,7 @@ int rdbSaveLzfStringObject(rio *rdb, unsigned char *s, size_t len) { zfree(out); return 0; } - size_t nwritten = rdbSaveLzfBlob(rdb, out, comprlen, len); + ssize_t nwritten = rdbSaveLzfBlob(rdb, out, comprlen, len); zfree(out); return nwritten; } @@ -305,9 +305,9 @@ err: /* Save a string object as [len][data] on disk. If the object is a string * representation of an integer value we try to save it in a special form */ -int rdbSaveRawString(rio *rdb, unsigned char *s, size_t len) { +ssize_t rdbSaveRawString(rio *rdb, unsigned char *s, size_t len) { int enclen; - int n, nwritten = 0; + ssize_t n, nwritten = 0; /* Try integer encoding */ if (len <= 11) { @@ -338,9 +338,9 @@ int rdbSaveRawString(rio *rdb, unsigned char *s, size_t len) { } /* Save a long long value as either an encoded string or a string. */ -int rdbSaveLongLongAsStringObject(rio *rdb, long long value) { +ssize_t rdbSaveLongLongAsStringObject(rio *rdb, long long value) { unsigned char buf[32]; - int n, nwritten = 0; + ssize_t n, nwritten = 0; int enclen = rdbEncodeInteger(value,buf); if (enclen > 0) { return rdbWriteRaw(rdb,buf,enclen); @@ -532,8 +532,8 @@ int rdbLoadObjectType(rio *rdb) { } /* Save a Redis object. Returns -1 on error, number of bytes written on success. */ -int rdbSaveObject(rio *rdb, robj *o) { - int n = 0, nwritten = 0; +ssize_t rdbSaveObject(rio *rdb, robj *o) { + ssize_t n = 0, nwritten = 0; if (o->type == REDIS_STRING) { /* Save a string value */ @@ -654,8 +654,8 @@ int rdbSaveObject(rio *rdb, robj *o) { * the rdbSaveObject() function. Currently we use a trick to get * this length with very little changes to the code. In the future * we could switch to a faster solution. */ -off_t rdbSavedObjectLen(robj *o) { - int len = rdbSaveObject(NULL,o); +size_t rdbSavedObjectLen(robj *o) { + ssize_t len = rdbSaveObject(NULL,o); redisAssertWithInfo(NULL,o,len != -1); return len; } diff --git a/src/rdb.h b/src/rdb.h index 6319f5d02..a72607b71 100644 --- a/src/rdb.h +++ b/src/rdb.h @@ -109,9 +109,8 @@ int rdbSaveBackground(char *filename); int rdbSaveToSlavesSockets(void); void rdbRemoveTempFile(pid_t childpid); int rdbSave(char *filename); -int rdbSaveObject(rio *rdb, robj *o); -off_t rdbSavedObjectLen(robj *o); -off_t rdbSavedObjectPages(robj *o); +ssize_t rdbSaveObject(rio *rdb, robj *o); +size_t rdbSavedObjectLen(robj *o); robj *rdbLoadObject(int type, rio *rdb); void backgroundSaveDoneHandler(int exitcode, int bysignal); int rdbSaveKeyValuePair(rio *rdb, robj *key, robj *val, long long expiretime, long long now); From 53c082ec39fb4daafba09e416279265f20d46006 Mon Sep 17 00:00:00 2001 From: Matt Stancliff Date: Sun, 18 Jan 2015 16:46:25 -0500 Subject: [PATCH 0013/1928] Improve networking type correctness read() and write() return ssize_t (signed long), not int. For other offsets, we can use the unsigned size_t type instead of a signed offset (since our replication offsets and buffer positions are never negative). --- src/anet.c | 4 ++-- src/cluster.c | 2 +- src/networking.c | 5 +++-- src/redis-benchmark.c | 4 ++-- src/redis.h | 10 +++++----- 5 files changed, 13 insertions(+), 12 deletions(-) diff --git a/src/anet.c b/src/anet.c index 76e9b67ae..0ec5c55a2 100644 --- a/src/anet.c +++ b/src/anet.c @@ -391,7 +391,7 @@ int anetUnixNonBlockConnect(char *err, char *path) * (unless error or EOF condition is encountered) */ int anetRead(int fd, char *buf, int count) { - int nread, totlen = 0; + ssize_t nread, totlen = 0; while(totlen != count) { nread = read(fd,buf,count-totlen); if (nread == 0) return totlen; @@ -406,7 +406,7 @@ int anetRead(int fd, char *buf, int count) * (unless error is encountered) */ int anetWrite(int fd, char *buf, int count) { - int nwritten, totlen = 0; + ssize_t nwritten, totlen = 0; while(totlen != count) { nwritten = write(fd,buf,count-totlen); if (nwritten == 0) return totlen; diff --git a/src/cluster.c b/src/cluster.c index ec6901e8f..826c7f41d 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -4462,7 +4462,7 @@ try_again: { sds buf = cmd.io.buffer.ptr; size_t pos = 0, towrite; - int nwritten = 0; + ssize_t nwritten = 0; while ((towrite = sdslen(buf)-pos) > 0) { towrite = (towrite > (64*1024) ? (64*1024) : towrite); diff --git a/src/networking.c b/src/networking.c index 607d225fd..0b69f5408 100644 --- a/src/networking.c +++ b/src/networking.c @@ -797,7 +797,8 @@ void freeClientsInAsyncFreeQueue(void) { void sendReplyToClient(aeEventLoop *el, int fd, void *privdata, int mask) { redisClient *c = privdata; - int nwritten = 0, totwritten = 0, objlen; + ssize_t nwritten = 0, totwritten = 0; + size_t objlen; size_t objmem; robj *o; REDIS_NOTUSED(el); @@ -1621,7 +1622,7 @@ int checkClientOutputBufferLimits(redisClient *c) { * called from contexts where the client can't be freed safely, i.e. from the * lower level functions pushing data inside the client output buffers. */ void asyncCloseClientOnOutputBufferLimitReached(redisClient *c) { - redisAssert(c->reply_bytes < ULONG_MAX-(1024*64)); + redisAssert(c->reply_bytes < SIZE_MAX-(1024*64)); if (c->reply_bytes == 0 || c->flags & REDIS_CLOSE_ASAP) return; if (checkClientOutputBufferLimits(c)) { sds client = catClientInfoString(sdsempty(),c); diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c index 7567e0181..f735aeb63 100644 --- a/src/redis-benchmark.c +++ b/src/redis-benchmark.c @@ -86,7 +86,7 @@ typedef struct _client { char **randptr; /* Pointers to :rand: strings inside the command buf */ size_t randlen; /* Number of pointers in client->randptr */ size_t randfree; /* Number of unused pointers in client->randptr */ - unsigned int written; /* Bytes of 'obuf' already written */ + size_t written; /* Bytes of 'obuf' already written */ long long start; /* Start time of a request */ long long latency; /* Request latency */ int pending; /* Number of pending requests (replies to consume) */ @@ -266,7 +266,7 @@ static void writeHandler(aeEventLoop *el, int fd, void *privdata, int mask) { if (sdslen(c->obuf) > c->written) { void *ptr = c->obuf+c->written; - int nwritten = write(c->context->fd,ptr,sdslen(c->obuf)-c->written); + ssize_t nwritten = write(c->context->fd,ptr,sdslen(c->obuf)-c->written); if (nwritten == -1) { if (errno != EPIPE) fprintf(stderr, "Writing to socket: %s\n", strerror(errno)); diff --git a/src/redis.h b/src/redis.h index 0c191d06f..6a8308f78 100644 --- a/src/redis.h +++ b/src/redis.h @@ -542,8 +542,8 @@ typedef struct redisClient { int multibulklen; /* number of multi bulk arguments left to read */ long bulklen; /* length of bulk argument in multi bulk request */ list *reply; - unsigned long reply_bytes; /* Tot bytes of objects in reply list */ - int sentlen; /* Amount of bytes already sent in the current + size_t reply_bytes; /* Tot bytes of objects in reply list */ + size_t sentlen; /* Amount of bytes already sent in the current buffer or object being sent. */ time_t ctime; /* Client creation time */ time_t lastinteraction; /* time of the last interaction, used for timeout */ @@ -553,8 +553,8 @@ typedef struct redisClient { int replstate; /* replication state if this is a slave */ int repl_put_online_on_ack; /* Install slave write handler on ACK. */ int repldbfd; /* replication DB file descriptor */ - off_t repldboff; /* replication DB file offset */ - off_t repldbsize; /* replication DB file size */ + size_t repldboff; /* replication DB file offset */ + size_t repldbsize; /* replication DB file size */ sds replpreamble; /* replication DB preamble. */ long long reploff; /* replication offset if this is our master */ long long repl_ack_off; /* replication ack offset, if this is a slave */ @@ -571,7 +571,7 @@ typedef struct redisClient { sds peerid; /* Cached peer ID. */ /* Response buffer */ - int bufpos; + size_t bufpos; char buf[REDIS_REPLY_CHUNK_BYTES]; } redisClient; From 839767ad0b304c5e3c18727b146a17356d1a5e7c Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 20 Jan 2015 18:01:28 +0100 Subject: [PATCH 0014/1928] Panic on recursive calls to luaRedisGenericCommand(). Related to issue #2302. --- src/scripting.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/scripting.c b/src/scripting.c index b6a333a43..4b6f47fd6 100644 --- a/src/scripting.c +++ b/src/scripting.c @@ -214,11 +214,22 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) { static int argv_size = 0; static robj *cached_objects[LUA_CMD_OBJCACHE_SIZE]; static size_t cached_objects_len[LUA_CMD_OBJCACHE_SIZE]; + static int inuse = 0; /* Recursive calls detection. */ + + /* By using Lua debug hooks it is possible to trigger a recursive call + * to luaRedisGenericCommand(), which normally should never happen. + * To make this function reentrant is futile and makes it slower, but + * we should at least detect such a misuse, and abort. */ + if (inuse) { + redisPanic("luaRedisGenericCommand() recursive call detected. Are you doing funny stuff with Lua debug hooks?"); + } + inuse++; /* Require at least one argument */ if (argc == 0) { luaPushError(lua, "Please specify at least one argument for redis.call()"); + inuse--; return 1; } @@ -273,6 +284,7 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) { } luaPushError(lua, "Lua redis() command arguments must be strings or integers"); + inuse--; return 1; } @@ -426,8 +438,10 @@ cleanup: * return the plain error. */ lua_pushstring(lua,"err"); lua_gettable(lua,-2); + inuse--; return lua_error(lua); } + inuse--; return 1; } From bc8675612f50399568c1686ae0ee0d8d0b925254 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 20 Jan 2015 23:13:47 +0100 Subject: [PATCH 0015/1928] luaRedisGenericCommand() recursion: just return an error. Instead of calling redisPanic() to abort the server. Related to issue #2302. --- src/scripting.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/scripting.c b/src/scripting.c index 4b6f47fd6..313bc2187 100644 --- a/src/scripting.c +++ b/src/scripting.c @@ -221,7 +221,9 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) { * To make this function reentrant is futile and makes it slower, but * we should at least detect such a misuse, and abort. */ if (inuse) { - redisPanic("luaRedisGenericCommand() recursive call detected. Are you doing funny stuff with Lua debug hooks?"); + luaPushError(lua, + "luaRedisGenericCommand() recursive call detected. Are you doing funny stuff with Lua debug hooks?"); + return 1; } inuse++; From e467cf5db31322868b8a374e57a74e9ed7150c78 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 20 Jan 2015 23:20:12 +0100 Subject: [PATCH 0016/1928] luaRedisGenericCommand(): log error at WARNING level when re-entered. Rationale is that when re-entering, it is likely due to Lua debugging hooks. Returning an error will be ignored in most cases, going totally unnoticed. With the log at least we leave a trace. Related to issue #2302. --- src/scripting.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/scripting.c b/src/scripting.c index 313bc2187..c5dd4e718 100644 --- a/src/scripting.c +++ b/src/scripting.c @@ -221,8 +221,11 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) { * To make this function reentrant is futile and makes it slower, but * we should at least detect such a misuse, and abort. */ if (inuse) { - luaPushError(lua, - "luaRedisGenericCommand() recursive call detected. Are you doing funny stuff with Lua debug hooks?"); + char *recursion_warning = + "luaRedisGenericCommand() recursive call detected. " + "Are you doing funny stuff with Lua debug hooks?"; + redisLog(REDIS_WARNING,"%s",recursion_warning); + luaPushError(lua,recursion_warning); return 1; } inuse++; From 92cfab44b240750d957ae226efac7235c7ef655b Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 21 Jan 2015 14:51:42 +0100 Subject: [PATCH 0017/1928] Fix gcc warning for lack of casting to char pointer. --- src/rdb.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/rdb.c b/src/rdb.c index 36ba151c7..d5e3a7f47 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -1316,12 +1316,14 @@ int rdbLoad(char *filename) { /* All the fields with a name staring with '%' are considered * information fields and are logged at startup with a log * level of NOTICE. */ - redisLog(REDIS_NOTICE,"RDB '%s': %s", auxkey->ptr, auxval->ptr); + redisLog(REDIS_NOTICE,"RDB '%s': %s", + (char*)auxkey->ptr, + (char*)auxval->ptr); } else { /* We ignore fields we don't understand, as by AUX field * contract. */ redisLog(REDIS_DEBUG,"Unrecognized RDB AUX field: '%s'", - auxkey->ptr); + (char*)auxkey->ptr); } decrRefCount(auxkey); From b0146aafebd76e89638fe6b13947d0a53f5c7b27 Mon Sep 17 00:00:00 2001 From: Matt Stancliff Date: Fri, 9 Jan 2015 17:43:48 -0500 Subject: [PATCH 0018/1928] Tell sentinel/cluster tests to allow valgrind --- tests/instances.tcl | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/tests/instances.tcl b/tests/instances.tcl index 426508f33..b9eb42258 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -16,6 +16,7 @@ source ../support/server.tcl source ../support/test.tcl set ::verbose 0 +set ::valgrind 0 set ::pause_on_error 0 set ::simulate_error 0 set ::sentinel_instances {} @@ -65,7 +66,13 @@ proc spawn_instance {type base_port count {conf {}}} { } else { error "Unknown instance type." } - set pid [exec ../../../src/${prgname} $cfgfile &] + + if {$::valgrind} { + set pid [exec valgrind --suppressions=../../../src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full ../../../src/${prgname} $cfgfile &] + } else { + set pid [exec ../../../src/${prgname} $cfgfile &] + } + lappend ::pids $pid # Check availability @@ -113,6 +120,8 @@ proc parse_options {} { set ::pause_on_error 1 } elseif {$opt eq "--fail"} { set ::simulate_error 1 + } elseif {$opt eq {--valgrind}} { + set ::valgrind 1 } elseif {$opt eq "--help"} { puts "Hello, I'm sentinel.tcl and I run Sentinel unit tests." puts "\nOptions:" @@ -390,7 +399,13 @@ proc restart_instance {type id} { } else { set prgname redis-sentinel } - set pid [exec ../../../src/${prgname} $cfgfile &] + + if {$::valgrind} { + set pid [exec valgrind --suppressions=../../../src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full ../../../src/${prgname} $cfgfile &] + } else { + set pid [exec ../../../src/${prgname} $cfgfile &] + } + set_instance_attrib $type $id pid $pid lappend ::pids $pid From 491d57abaae7ddb9f32951acc3c38e27f3638b90 Mon Sep 17 00:00:00 2001 From: Matt Stancliff Date: Tue, 13 Jan 2015 11:15:30 -0500 Subject: [PATCH 0019/1928] Add --track-origins=yes to valgrind --- tests/instances.tcl | 4 ++-- tests/support/server.tcl | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/instances.tcl b/tests/instances.tcl index b9eb42258..4e2f33dfc 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -68,7 +68,7 @@ proc spawn_instance {type base_port count {conf {}}} { } if {$::valgrind} { - set pid [exec valgrind --suppressions=../../../src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full ../../../src/${prgname} $cfgfile &] + set pid [exec valgrind --track-origins=yes --suppressions=../../../src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full ../../../src/${prgname} $cfgfile &] } else { set pid [exec ../../../src/${prgname} $cfgfile &] } @@ -401,7 +401,7 @@ proc restart_instance {type id} { } if {$::valgrind} { - set pid [exec valgrind --suppressions=../../../src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full ../../../src/${prgname} $cfgfile &] + set pid [exec valgrind --track-origins=yes --suppressions=../../../src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full ../../../src/${prgname} $cfgfile &] } else { set pid [exec ../../../src/${prgname} $cfgfile &] } diff --git a/tests/support/server.tcl b/tests/support/server.tcl index 67ee24528..317b40a84 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -207,7 +207,7 @@ proc start_server {options {code undefined}} { set stderr [format "%s/%s" [dict get $config "dir"] "stderr"] if {$::valgrind} { - set pid [exec valgrind --suppressions=src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full src/redis-server $config_file > $stdout 2> $stderr &] + set pid [exec valgrind --track-origins=yes --suppressions=src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full src/redis-server $config_file > $stdout 2> $stderr &] } else { set pid [exec src/redis-server $config_file > $stdout 2> $stderr &] } From 59ad6ac5feac4f6760144861b723a51383a0f19b Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 21 Jan 2015 15:55:53 +0100 Subject: [PATCH 0020/1928] Cluster: set the slaves->slaveof filed to NULL when master is freed. Related to issue #2289. --- src/cluster.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/cluster.c b/src/cluster.c index ec6901e8f..5135cdaa5 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -821,6 +821,14 @@ int clusterCountNonFailingSlaves(clusterNode *n) { void freeClusterNode(clusterNode *n) { sds nodename; + int j; + + /* If the node is a master with associated slaves, we have to set + * all the slaves->slaveof fields to NULL (unknown). */ + if (nodeIsMaster(n)) { + for (j = 0; j < n->numslaves; j++) + n->slaves[j]->slaveof = NULL; + } nodename = sdsnewlen(n->name, REDIS_CLUSTER_NAMELEN); redisAssert(dictDelete(server.cluster->nodes,nodename) == DICT_OK); From 2601e3e4614800552a3ff5c91b4ac475a664df5a Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 21 Jan 2015 16:03:43 +0100 Subject: [PATCH 0021/1928] Cluster: node deletion cleanup / centralization. --- src/cluster.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index 5135cdaa5..01d6a32e8 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -819,6 +819,7 @@ int clusterCountNonFailingSlaves(clusterNode *n) { return okslaves; } +/* Low level cleanup of the node structure. Only called by clusterDelNode(). */ void freeClusterNode(clusterNode *n) { sds nodename; int j; @@ -830,10 +831,15 @@ void freeClusterNode(clusterNode *n) { n->slaves[j]->slaveof = NULL; } + /* Remove this node from the list of slaves of its master. */ + if (nodeIsSlave(n) && n->slaveof) clusterNodeRemoveSlave(n->slaveof,n); + + /* Unlink from the set of nodes. */ nodename = sdsnewlen(n->name, REDIS_CLUSTER_NAMELEN); redisAssert(dictDelete(server.cluster->nodes,nodename) == DICT_OK); sdsfree(nodename); - if (n->slaveof) clusterNodeRemoveSlave(n->slaveof, n); + + /* Release link and associated data structures. */ if (n->link) freeClusterLink(n->link); listRelease(n->fail_reports); zfree(n); @@ -848,11 +854,16 @@ int clusterAddNode(clusterNode *node) { return (retval == DICT_OK) ? REDIS_OK : REDIS_ERR; } -/* Remove a node from the cluster: - * 1) Mark all the nodes handled by it as unassigned. - * 2) Remove all the failure reports sent by this node. - * 3) Free the node, that will in turn remove it from the hash table - * and from the list of slaves of its master, if it is a slave node. +/* Remove a node from the cluster. The functio performs the high level + * cleanup, calling freeClusterNode() for the low level cleanup. + * Here we do the following: + * + * 1) Mark all the slots handled by it as unassigned. + * 2) Remove all the failure reports sent by this node and referenced by + * other nodes. + * 3) Free the node with freeClusterNode() that will in turn remove it + * from the hash table and from the list of slaves of its master, if + * it is a slave node. */ void clusterDelNode(clusterNode *delnode) { int j; @@ -879,11 +890,7 @@ void clusterDelNode(clusterNode *delnode) { } dictReleaseIterator(di); - /* 3) Remove this node from its master's slaves if needed. */ - if (nodeIsSlave(delnode) && delnode->slaveof) - clusterNodeRemoveSlave(delnode->slaveof,delnode); - - /* 4) Free the node, unlinking it from the cluster. */ + /* 3) Free the node, unlinking it from the cluster. */ freeClusterNode(delnode); } @@ -1619,7 +1626,7 @@ int clusterProcessPacket(clusterLink *link) { } /* Free this node as we already have it. This will * cause the link to be freed as well. */ - freeClusterNode(link->node); + clusterDelNode(link->node); return 0; } @@ -2913,7 +2920,7 @@ void clusterCron(void) { /* A Node in HANDSHAKE state has a limited lifespan equal to the * configured node timeout. */ if (nodeInHandshake(node) && now - node->ctime > handshake_timeout) { - freeClusterNode(node); + clusterDelNode(node); continue; } From a5bb0a0774fbe89d72de13624b23079031674932 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 21 Jan 2015 16:13:30 +0100 Subject: [PATCH 0022/1928] Cluster/Sentinel test: pause on exceptions as well. --- tests/cluster/run.tcl | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/cluster/run.tcl b/tests/cluster/run.tcl index 69a160c4f..f764cea0a 100644 --- a/tests/cluster/run.tcl +++ b/tests/cluster/run.tcl @@ -21,6 +21,7 @@ proc main {} { if {[catch main e]} { puts $::errorInfo + if {$::pause_on_error} pause_on_error cleanup exit 1 } From b3bf7584b0aa5c2dbc1acf4d7f6b2c3d420e8e42 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 21 Jan 2015 16:18:34 +0100 Subject: [PATCH 0023/1928] Cluster/Sentinel test: also pause on abort_sentinel_test call. --- tests/instances.tcl | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/instances.tcl b/tests/instances.tcl index 4e2f33dfc..a68b79d11 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -105,6 +105,7 @@ proc cleanup {} { proc abort_sentinel_test msg { puts "WARNING: Aborting the test." puts ">>>>>>>> $msg" + if {$::pause_on_error} pause_on_error cleanup exit 1 } From 4433f5a7f24350cb398ae448fca691a53a51a155 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 21 Jan 2015 16:39:38 +0100 Subject: [PATCH 0024/1928] AOF rewrite: set iterator var to NULL when freed. The cleanup code expects that if 'di' is not NULL, it is a valid iterator that should be freed. The result of this bug was a crash of the AOF rewriting process if an error occurred after the DBs data are written and the iterator is no longer valid. --- src/aof.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/aof.c b/src/aof.c index f5a90a12c..dc7d11873 100644 --- a/src/aof.c +++ b/src/aof.c @@ -1082,6 +1082,7 @@ int rewriteAppendOnlyFile(char *filename) { } } dictReleaseIterator(di); + di = NULL; } /* Do an initial slow fsync here while the parent is still sending From acb1d8debf23f3dbd9199d1276a86ada71750196 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 21 Jan 2015 16:46:51 +0100 Subject: [PATCH 0025/1928] Cluster test: wait for port to unbound in kill_instance. Otherwise kill_instance + restart_instance in short succession will still find the port busy and will fail. --- tests/instances.tcl | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/instances.tcl b/tests/instances.tcl index a68b79d11..7d87cdf59 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -370,15 +370,31 @@ proc get_instance_id_by_port {type port} { # The instance can be restarted with restart-instance. proc kill_instance {type id} { set pid [get_instance_attrib $type $id pid] + set port [get_instance_attrib $type $id port] + if {$pid == -1} { error "You tried to kill $type $id twice." } + exec kill -9 $pid set_instance_attrib $type $id pid -1 set_instance_attrib $type $id link you_tried_to_talk_with_killed_instance # Remove the PID from the list of pids to kill at exit. set ::pids [lsearch -all -inline -not -exact $::pids $pid] + + # Wait for the port it was using to be available again, so that's not + # an issue to start a new server ASAP with the same port. + set retry 10 + while {[incr retry -1]} { + set port_is_free [catch {set s [socket 127.0.01 $port]}] + if {$port_is_free} break + catch {close $s} + after 1000 + } + if {$retry == 0} { + error "Port $port does not return available after killing instance." + } } # Return true of the instance of the specified type/id is killed. From 72b8574cca7480f8d4a318727c6dacad891733d6 Mon Sep 17 00:00:00 2001 From: Matt Stancliff Date: Wed, 14 Jan 2015 11:21:50 -0500 Subject: [PATCH 0026/1928] Fix sending uninitialized bytes Fixes valgrind error: Syscall param write(buf) points to uninitialised byte(s) at 0x514C35D: ??? (syscall-template.S:81) by 0x456B81: clusterWriteHandler (cluster.c:1907) by 0x41D596: aeProcessEvents (ae.c:416) by 0x41D8EA: aeMain (ae.c:455) by 0x41A84B: main (redis.c:3832) Address 0x5f268e2 is 2,274 bytes inside a block of size 8,192 alloc'd at 0x4932D1: je_realloc (jemalloc.c:1297) by 0x428185: zrealloc (zmalloc.c:162) by 0x4269E0: sdsMakeRoomFor.part.0 (sds.c:142) by 0x426CD7: sdscatlen (sds.c:251) by 0x4579E7: clusterSendMessage (cluster.c:1995) by 0x45805A: clusterSendPing (cluster.c:2140) by 0x45BB03: clusterCron (cluster.c:2944) by 0x423344: serverCron (redis.c:1239) by 0x41D6CD: aeProcessEvents (ae.c:311) by 0x41D8EA: aeMain (ae.c:455) by 0x41A84B: main (redis.c:3832) Uninitialised value was created by a stack allocation at 0x457810: nodeUpdateAddressIfNeeded (cluster.c:1236) --- src/cluster.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cluster.c b/src/cluster.c index 01d6a32e8..328dc2c85 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -1249,7 +1249,7 @@ void nodeIp2String(char *buf, clusterLink *link) { * The function returns 0 if the node address is still the same, * otherwise 1 is returned. */ int nodeUpdateAddressIfNeeded(clusterNode *node, clusterLink *link, int port) { - char ip[REDIS_IP_STR_LEN]; + char ip[REDIS_IP_STR_LEN] = {0}; /* We don't proceed if the link is the same as the sender link, as this * function is designed to see if the node link is consistent with the From 30152554eab1d5fa3850ad6ad372aeb3dc1ebacf Mon Sep 17 00:00:00 2001 From: Matt Stancliff Date: Wed, 14 Jan 2015 11:31:17 -0500 Subject: [PATCH 0027/1928] Fix cluster reset memory leak [maybe] Fixes valgrind errors: 32 bytes in 4 blocks are definitely lost in loss record 107 of 228 at 0x80EA447: je_malloc (jemalloc.c:944) by 0x806E59C: zrealloc (zmalloc.c:125) by 0x80A9AFC: clusterSetMaster (cluster.c:801) by 0x80AEDC9: clusterCommand (cluster.c:3994) by 0x80682A5: call (redis.c:2049) by 0x8068A20: processCommand (redis.c:2309) by 0x8076497: processInputBuffer (networking.c:1143) by 0x8073BAF: readQueryFromClient (networking.c:1208) by 0x8060E98: aeProcessEvents (ae.c:412) by 0x806123B: aeMain (ae.c:455) by 0x806C3DB: main (redis.c:3832) 64 bytes in 8 blocks are definitely lost in loss record 143 of 228 at 0x80EA447: je_malloc (jemalloc.c:944) by 0x806E59C: zrealloc (zmalloc.c:125) by 0x80AAB40: clusterProcessPacket (cluster.c:801) by 0x80A847F: clusterReadHandler (cluster.c:1975) by 0x30000FF: ??? 80 bytes in 10 blocks are definitely lost in loss record 148 of 228 at 0x80EA447: je_malloc (jemalloc.c:944) by 0x806E59C: zrealloc (zmalloc.c:125) by 0x80AAB40: clusterProcessPacket (cluster.c:801) by 0x80A847F: clusterReadHandler (cluster.c:1975) by 0x2FFFFFF: ??? --- src/cluster.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/cluster.c b/src/cluster.c index 328dc2c85..71b17c977 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -842,6 +842,7 @@ void freeClusterNode(clusterNode *n) { /* Release link and associated data structures. */ if (n->link) freeClusterLink(n->link); listRelease(n->fail_reports); + zfree(n->slaves); zfree(n); } From 29049507ec34efd59ce6de7cff524fb44b47f934 Mon Sep 17 00:00:00 2001 From: Matt Stancliff Date: Wed, 14 Jan 2015 11:10:25 -0500 Subject: [PATCH 0028/1928] Fix potential invalid read past end of array If array has N elements, we can't read +1 if we are already at N. Also, we need to move elements by their storage size in the array, not just by individual bytes. --- src/cluster.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index 71b17c977..ba84b3a91 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -783,8 +783,11 @@ int clusterNodeRemoveSlave(clusterNode *master, clusterNode *slave) { for (j = 0; j < master->numslaves; j++) { if (master->slaves[j] == slave) { - memmove(master->slaves+j,master->slaves+(j+1), - (master->numslaves-1)-j); + if ((j+1) < master->numslaves) { + int remaining_slaves = (master->numslaves - j) - 1; + memmove(master->slaves+j,master->slaves+(j+1), + (sizeof(*master->slaves) * remaining_slaves)); + } master->numslaves--; return REDIS_OK; } From 051a43e03a4db665b3bf6e8b45790298c86a96af Mon Sep 17 00:00:00 2001 From: Matt Stancliff Date: Thu, 15 Jan 2015 14:20:59 -0500 Subject: [PATCH 0029/1928] Fix cluster migrate memory leak Fixes valgrind error: 48 bytes in 1 blocks are definitely lost in loss record 196 of 373 at 0x4910D3: je_malloc (jemalloc.c:944) by 0x42807D: zmalloc (zmalloc.c:125) by 0x41FA0D: dictGetIterator (dict.c:543) by 0x41FA48: dictGetSafeIterator (dict.c:555) by 0x459B73: clusterHandleSlaveMigration (cluster.c:2776) by 0x45BF27: clusterCron (cluster.c:3123) by 0x423344: serverCron (redis.c:1239) by 0x41D6CD: aeProcessEvents (ae.c:311) by 0x41D8EA: aeMain (ae.c:455) by 0x41A84B: main (redis.c:3832) --- src/cluster.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/cluster.c b/src/cluster.c index ba84b3a91..3381d98c8 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -2803,6 +2803,7 @@ void clusterHandleSlaveMigration(int max_slaves) { } } } + dictReleaseIterator(di); /* Step 4: perform the migration if there is a target, and if I'm the * candidate. */ From 7e79b3f51a06ced3c13a9b8fe756e322705d5ca9 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 21 Jan 2015 18:48:08 +0100 Subject: [PATCH 0030/1928] Cluster test initialization: use transaction for reset + set-config-epoch. Otherwise between the two commands other nodes may contact us making the next SET-CONFIG-EPOCH call impossible. --- tests/cluster/tests/includes/init-tests.tcl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/cluster/tests/includes/init-tests.tcl b/tests/cluster/tests/includes/init-tests.tcl index 65fc806e1..117f79208 100644 --- a/tests/cluster/tests/includes/init-tests.tcl +++ b/tests/cluster/tests/includes/init-tests.tcl @@ -28,8 +28,10 @@ test "Cluster nodes are reachable" { test "Cluster nodes hard reset" { foreach_redis_id id { catch {R $id flushall} ; # May fail for readonly slaves. + R $id MULTI R $id cluster reset hard R $id cluster set-config-epoch [expr {$id+1}] + R $id EXEC R $id config set cluster-node-timeout 3000 R $id config set cluster-slave-validity-factor 10 R $id config rewrite From 87301be15152bd3627c9e87533c2c96fa9d4fd67 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 21 Jan 2015 23:19:37 +0100 Subject: [PATCH 0031/1928] getRandomHexChars(): use /dev/urandom just to seed. On Darwin /dev/urandom depletes terribly fast. This is not an issue normally, but with Redis Cluster we generate a lot of unique IDs, for example during nodes handshakes. Our IDs need just to be unique without other strong crypto requirements, so this commit turns the function into something that gets a 20 bytes seed from /dev/urandom, and produces the rest of the output just using SHA1 in counter mode. --- src/util.c | 46 ++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 38 insertions(+), 8 deletions(-) diff --git a/src/util.c b/src/util.c index d69721bf4..0a2307c9b 100644 --- a/src/util.c +++ b/src/util.c @@ -40,6 +40,7 @@ #include #include "util.h" +#include "sha1.h" /* Glob-style pattern matching. */ int stringmatchlen(const char *pattern, int patternLen, @@ -428,11 +429,42 @@ int d2string(char *buf, size_t len, double value) { * having run_id == A, and you reconnect and it has run_id == B, you can be * sure that it is either a different instance or it was restarted. */ void getRandomHexChars(char *p, unsigned int len) { - FILE *fp = fopen("/dev/urandom","r"); char *charset = "0123456789abcdef"; unsigned int j; + static int seed_initialized = 0; + unsigned char seed[20]; /* A seed to have a different sequence each run. */ + uint64_t counter = 0; /* The counter we hash together with the seed. */ - if (fp == NULL || fread(p,len,1,fp) == 0) { + if (!seed_initialized) { + /* Initialize a seed and use SHA1 in counter mode, where we hash + * the same seed with a progressive counter. For the goals of this + * function we just need non-colliding strings, there are no + * cryptographic security needs. */ + FILE *fp = fopen("/dev/urandom","r"); + if (fp && fread(seed,sizeof(seed),1,fp) == 1) + seed_initialized = 1; + if (fp) fclose(fp); + } + + if (seed_initialized) { + while(len) { + unsigned char digest[20]; + SHA1_CTX ctx; + unsigned int copylen = len > 20 ? 20 : len; + + SHA1Init(&ctx); + SHA1Update(&ctx, seed, sizeof(seed)); + SHA1Update(&ctx, (unsigned char*)&counter,sizeof(counter)); + SHA1Final(digest, &ctx); + counter++; + + memcpy(p,digest,copylen); + /* Convert to hex digits. */ + for (j = 0; j < copylen; j++) p[j] = charset[p[j] & 0x0F]; + len -= copylen; + p += copylen; + } + } else { /* If we can't read from /dev/urandom, do some reasonable effort * in order to create some entropy, since this function is used to * generate run_id and cluster instance IDs */ @@ -459,14 +491,12 @@ void getRandomHexChars(char *p, unsigned int len) { x += sizeof(pid); } /* Finally xor it with rand() output, that was already seeded with - * time() at startup. */ - for (j = 0; j < len; j++) + * time() at startup, and convert to hex digits. */ + for (j = 0; j < len; j++) { p[j] ^= rand(); + p[j] = charset[p[j] & 0x0F]; + } } - /* Turn it into hex digits taking just 4 bits out of 8 for every byte. */ - for (j = 0; j < len; j++) - p[j] = charset[p[j] & 0x0F]; - if (fp) fclose(fp); } /* Given the filename, return the absolute path as an SDS string, or NULL From 9826038f0bb41a79b2ab06fd7e5f1ffa745b7156 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 22 Jan 2015 11:00:26 +0100 Subject: [PATCH 0032/1928] counter must be static in getRandomHexChars(). --- src/util.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/util.c b/src/util.c index 0a2307c9b..f995bf1ea 100644 --- a/src/util.c +++ b/src/util.c @@ -431,9 +431,11 @@ int d2string(char *buf, size_t len, double value) { void getRandomHexChars(char *p, unsigned int len) { char *charset = "0123456789abcdef"; unsigned int j; - static int seed_initialized = 0; unsigned char seed[20]; /* A seed to have a different sequence each run. */ - uint64_t counter = 0; /* The counter we hash together with the seed. */ + + /* Global state. */ + static int seed_initialized = 0; + static uint64_t counter = 0; /* The counter we hash with the seed. */ if (!seed_initialized) { /* Initialize a seed and use SHA1 in counter mode, where we hash From a330b6ca97dc92222e0405e6cbae552f5aa324eb Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 22 Jan 2015 11:10:43 +0100 Subject: [PATCH 0033/1928] The seed must be static in getRandomHexChars(). --- src/util.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util.c b/src/util.c index f995bf1ea..543de112b 100644 --- a/src/util.c +++ b/src/util.c @@ -431,10 +431,10 @@ int d2string(char *buf, size_t len, double value) { void getRandomHexChars(char *p, unsigned int len) { char *charset = "0123456789abcdef"; unsigned int j; - unsigned char seed[20]; /* A seed to have a different sequence each run. */ /* Global state. */ static int seed_initialized = 0; + static unsigned char seed[20]; /* The SHA1 seed, from /dev/urandom. */ static uint64_t counter = 0; /* The counter we hash with the seed. */ if (!seed_initialized) { From e4d65e35e6a26086ec955470baff159f5947f4c3 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 22 Jan 2015 11:10:43 +0100 Subject: [PATCH 0034/1928] The seed must be static in getRandomHexChars(). --- src/util.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util.c b/src/util.c index f995bf1ea..543de112b 100644 --- a/src/util.c +++ b/src/util.c @@ -431,10 +431,10 @@ int d2string(char *buf, size_t len, double value) { void getRandomHexChars(char *p, unsigned int len) { char *charset = "0123456789abcdef"; unsigned int j; - unsigned char seed[20]; /* A seed to have a different sequence each run. */ /* Global state. */ static int seed_initialized = 0; + static unsigned char seed[20]; /* The SHA1 seed, from /dev/urandom. */ static uint64_t counter = 0; /* The counter we hash with the seed. */ if (!seed_initialized) { From d5ba544e674cf7606f1d7fedf5802cd9bc4037f4 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 22 Jan 2015 16:08:21 +0100 Subject: [PATCH 0035/1928] Cluster test: when valgrind is enabled, use a larger node-timeout. Removes some percentage of timing related failures. --- tests/cluster/tests/includes/init-tests.tcl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/cluster/tests/includes/init-tests.tcl b/tests/cluster/tests/includes/init-tests.tcl index 117f79208..466ab8f25 100644 --- a/tests/cluster/tests/includes/init-tests.tcl +++ b/tests/cluster/tests/includes/init-tests.tcl @@ -27,12 +27,17 @@ test "Cluster nodes are reachable" { test "Cluster nodes hard reset" { foreach_redis_id id { + if {$::valgrind} { + set node_timeout 10000 + } else { + set node_timeout 3000 + } catch {R $id flushall} ; # May fail for readonly slaves. R $id MULTI R $id cluster reset hard R $id cluster set-config-epoch [expr {$id+1}] R $id EXEC - R $id config set cluster-node-timeout 3000 + R $id config set cluster-node-timeout $node_timeout R $id config set cluster-slave-validity-factor 10 R $id config rewrite } From e22d75734e7bb2293cf8fb38c285cdc56378c60f Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 22 Jan 2015 18:57:45 +0100 Subject: [PATCH 0036/1928] Avoid duplicated instance execution code in Cluster test. --- tests/instances.tcl | 47 ++++++++++++++++++++------------------------- 1 file changed, 21 insertions(+), 26 deletions(-) diff --git a/tests/instances.tcl b/tests/instances.tcl index 7d87cdf59..353d9b2d2 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -33,6 +33,25 @@ if {[catch {cd tmp}]} { exit 1 } +# Execute the specified instance of the server specified by 'type', using +# the provided configuration file. Returns the PID of the process. +proc exec_instance {type cfgfile} { + if {$type eq "redis"} { + set prgname redis-server + } elseif {$type eq "sentinel"} { + set prgname redis-sentinel + } else { + error "Unknown instance type." + } + + if {$::valgrind} { + set pid [exec valgrind --track-origins=yes --suppressions=../../../src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full ../../../src/${prgname} $cfgfile &] + } else { + set pid [exec ../../../src/${prgname} $cfgfile &] + } + return $pid +} + # Spawn a redis or sentinel instance, depending on 'type'. proc spawn_instance {type base_port count {conf {}}} { for {set j 0} {$j < $count} {incr j} { @@ -59,20 +78,7 @@ proc spawn_instance {type base_port count {conf {}}} { close $cfg # Finally exec it and remember the pid for later cleanup. - if {$type eq "redis"} { - set prgname redis-server - } elseif {$type eq "sentinel"} { - set prgname redis-sentinel - } else { - error "Unknown instance type." - } - - if {$::valgrind} { - set pid [exec valgrind --track-origins=yes --suppressions=../../../src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full ../../../src/${prgname} $cfgfile &] - } else { - set pid [exec ../../../src/${prgname} $cfgfile &] - } - + set pid [exec_instance $type $cfgfile] lappend ::pids $pid # Check availability @@ -411,18 +417,7 @@ proc restart_instance {type id} { # Execute the instance with its old setup and append the new pid # file for cleanup. - if {$type eq "redis"} { - set prgname redis-server - } else { - set prgname redis-sentinel - } - - if {$::valgrind} { - set pid [exec valgrind --track-origins=yes --suppressions=../../../src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full ../../../src/${prgname} $cfgfile &] - } else { - set pid [exec ../../../src/${prgname} $cfgfile &] - } - + set pid [exec_instance $type $cfgfile] set_instance_attrib $type $id pid $pid lappend ::pids $pid From 7885e1264e561afcc0182dd4763903def5138aa8 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 23 Jan 2015 18:10:14 +0100 Subject: [PATCH 0037/1928] DEBUG structsize Show sizes of a few important data structures in Redis. More missing. --- src/debug.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/debug.c b/src/debug.c index 7783196a0..162274cce 100644 --- a/src/debug.c +++ b/src/debug.c @@ -418,6 +418,13 @@ void debugCommand(redisClient *c) { errstr = sdsmapchars(errstr,"\n\r"," ",2); /* no newlines in errors. */ errstr = sdscatlen(errstr,"\r\n",2); addReplySds(c,errstr); + } else if (!strcasecmp(c->argv[1]->ptr,"structsize") && c->argc == 2) { + sds sizes = sdsempty(); + sizes = sdscatprintf(sizes,"bits:%d ", (sizeof(void*) == 8)?64:32); + sizes = sdscatprintf(sizes,"robj:%d ", (int)sizeof(robj)); + sizes = sdscatprintf(sizes,"dictentry:%d ", (int)sizeof(dictEntry)); + sizes = sdscatprintf(sizes,"sdshdr:%d", (int)sizeof(struct sdshdr)); + addReplyBulkSds(c,sizes); } else if (!strcasecmp(c->argv[1]->ptr,"jemalloc") && c->argc == 3) { #if defined(USE_JEMALLOC) if (!strcasecmp(c->argv[2]->ptr, "info")) { From 8aaf5075c5bb76492e56188090f33266d5a7ad46 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 23 Jan 2015 18:11:05 +0100 Subject: [PATCH 0038/1928] dict.c: make chaining strategy more clear in dictAddRaw(). --- src/dict.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/dict.c b/src/dict.c index 29d400099..7d8db3631 100644 --- a/src/dict.c +++ b/src/dict.c @@ -342,7 +342,10 @@ dictEntry *dictAddRaw(dict *d, void *key) if ((index = _dictKeyIndex(d, key)) == -1) return NULL; - /* Allocate the memory and store the new entry */ + /* Allocate the memory and store the new entry. + * Insert the element in top, with the assumption that in a database + * system it is more likely that recently added entries are accessed + * more frequently. */ ht = dictIsRehashing(d) ? &d->ht[1] : &d->ht[0]; entry = zmalloc(sizeof(*entry)); entry->next = ht->table[index]; From 9802ec3c83cf6b61edad50bc41ddb3f6fdb13c6f Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 24 Jan 2015 07:52:24 +0100 Subject: [PATCH 0039/1928] Cluster: initialized not used fileds in gossip section. Otherwise we risk sending not initialized data to other nodes, that may contain anything. This was actually not possible only because the initialization of the buffer where the cluster packets header is created was larger than the 3 gossip sections we use, so the memory was already all filled with zeroes by the memset(). --- src/cluster.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/cluster.c b/src/cluster.c index 3381d98c8..2cbb21901 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -2150,6 +2150,8 @@ void clusterSendPing(clusterLink *link, int type) { memcpy(gossip->ip,this->ip,sizeof(this->ip)); gossip->port = htons(this->port); gossip->flags = htons(this->flags); + gossip->notused1 = 0; + gossip->notused2 = 0; gossipcount++; } totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); From 6ec5f1f78064264b9b33349858f8aa9157d4efe0 Mon Sep 17 00:00:00 2001 From: mattcollier Date: Sun, 25 Jan 2015 14:01:39 -0500 Subject: [PATCH 0040/1928] Update redis-cli.c Code was adding '\n' (line 521) to the end of NIL values exlusively making csv output inconsistent. Removed '\n' --- src/redis-cli.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 3c1458742..e243db451 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -518,7 +518,7 @@ static sds cliFormatReplyCSV(redisReply *r) { out = sdscatrepr(out,r->str,r->len); break; case REDIS_REPLY_NIL: - out = sdscat(out,"NIL\n"); + out = sdscat(out,"NIL"); break; case REDIS_REPLY_ARRAY: for (i = 0; i < r->elements; i++) { From 145473acc5798a499a0f37e42df48a014a3955c1 Mon Sep 17 00:00:00 2001 From: Matt Stancliff Date: Fri, 9 May 2014 12:06:06 -0400 Subject: [PATCH 0041/1928] Convert check-dump to Redis check-rdb mode redis-check-dump is now named redis-check-rdb and it runs as a mode of redis-server instead of an independent binary. You can now use 'redis-server redis.conf --check-rdb' to check the RDB defined in redis.conf. Using argument --check-rdb checks the RDB and exits. We could potentially also allow the server to continue starting if the RDB check succeeds. This change also enables us to use RDB checking programatically from inside Redis for certain failure conditions. --- src/Makefile | 19 +++--- src/{redis-check-dump.c => redis-check-rdb.c} | 59 ++++++++----------- src/redis.c | 24 ++++++++ src/redis.h | 3 + 4 files changed, 60 insertions(+), 45 deletions(-) rename src/{redis-check-dump.c => redis-check-rdb.c} (94%) diff --git a/src/Makefile b/src/Makefile index 295600c4e..271ab34d8 100644 --- a/src/Makefile +++ b/src/Makefile @@ -117,17 +117,16 @@ endif REDIS_SERVER_NAME=redis-server REDIS_SENTINEL_NAME=redis-sentinel -REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o redis.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o +REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o redis.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o REDIS_CLI_NAME=redis-cli REDIS_CLI_OBJ=anet.o sds.o adlist.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o REDIS_BENCHMARK_NAME=redis-benchmark REDIS_BENCHMARK_OBJ=ae.o anet.o redis-benchmark.o sds.o adlist.o zmalloc.o redis-benchmark.o -REDIS_CHECK_DUMP_NAME=redis-check-dump -REDIS_CHECK_DUMP_OBJ=redis-check-dump.o lzf_c.o lzf_d.o crc64.o +REDIS_CHECK_RDB_NAME=redis-check-rdb REDIS_CHECK_AOF_NAME=redis-check-aof REDIS_CHECK_AOF_OBJ=redis-check-aof.o -all: $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_DUMP_NAME) $(REDIS_CHECK_AOF_NAME) +all: $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) @echo "" @echo "Hint: It's a good idea to run 'make test' ;)" @echo "" @@ -178,6 +177,10 @@ $(REDIS_SERVER_NAME): $(REDIS_SERVER_OBJ) $(REDIS_SENTINEL_NAME): $(REDIS_SERVER_NAME) $(REDIS_INSTALL) $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) +# redis-check-rdb +$(REDIS_CHECK_RDB_NAME): $(REDIS_SERVER_NAME) + $(REDIS_INSTALL) $(REDIS_SERVER_NAME) $(REDIS_CHECK_RDB_NAME) + # redis-cli $(REDIS_CLI_NAME): $(REDIS_CLI_OBJ) $(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/linenoise/linenoise.o $(FINAL_LIBS) @@ -186,10 +189,6 @@ $(REDIS_CLI_NAME): $(REDIS_CLI_OBJ) $(REDIS_BENCHMARK_NAME): $(REDIS_BENCHMARK_OBJ) $(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a $(FINAL_LIBS) -# redis-check-dump -$(REDIS_CHECK_DUMP_NAME): $(REDIS_CHECK_DUMP_OBJ) - $(REDIS_LD) -o $@ $^ $(FINAL_LIBS) - # redis-check-aof $(REDIS_CHECK_AOF_NAME): $(REDIS_CHECK_AOF_OBJ) $(REDIS_LD) -o $@ $^ $(FINAL_LIBS) @@ -201,7 +200,7 @@ $(REDIS_CHECK_AOF_NAME): $(REDIS_CHECK_AOF_OBJ) $(REDIS_CC) -c $< clean: - rm -rf $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_DUMP_NAME) $(REDIS_CHECK_AOF_NAME) *.o *.gcda *.gcno *.gcov redis.info lcov-html + rm -rf $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) *.o *.gcda *.gcno *.gcov redis.info lcov-html .PHONY: clean @@ -257,6 +256,6 @@ install: all $(REDIS_INSTALL) $(REDIS_SERVER_NAME) $(INSTALL_BIN) $(REDIS_INSTALL) $(REDIS_BENCHMARK_NAME) $(INSTALL_BIN) $(REDIS_INSTALL) $(REDIS_CLI_NAME) $(INSTALL_BIN) - $(REDIS_INSTALL) $(REDIS_CHECK_DUMP_NAME) $(INSTALL_BIN) + $(REDIS_INSTALL) $(REDIS_CHECK_RDB_NAME) $(INSTALL_BIN) $(REDIS_INSTALL) $(REDIS_CHECK_AOF_NAME) $(INSTALL_BIN) @ln -sf $(REDIS_SERVER_NAME) $(INSTALL_BIN)/$(REDIS_SENTINEL_NAME) diff --git a/src/redis-check-dump.c b/src/redis-check-rdb.c similarity index 94% rename from src/redis-check-dump.c rename to src/redis-check-rdb.c index 546462001..893a81855 100644 --- a/src/redis-check-dump.c +++ b/src/redis-check-rdb.c @@ -133,18 +133,13 @@ typedef struct { char success; } entry; -/* Global vars that are actually used as constants. The following double - * values are used for double on-disk serialization, and are initialized - * at runtime to avoid strange compiler optimizations. */ -static double R_Zero, R_PosInf, R_NegInf, R_Nan; - #define MAX_TYPES_NUM 256 #define MAX_TYPE_NAME_LEN 16 /* store string types for output */ static char types[MAX_TYPES_NUM][MAX_TYPE_NAME_LEN]; /* Return true if 't' is a valid object type. */ -int checkType(unsigned char t) { +static int checkType(unsigned char t) { /* In case a new object type is added, update the following * condition as necessary. */ return @@ -154,7 +149,7 @@ int checkType(unsigned char t) { } /* when number of bytes to read is negative, do a peek */ -int readBytes(void *target, long num) { +static int readBytes(void *target, long num) { char peek = (num < 0) ? 1 : 0; num = (num < 0) ? -num : num; @@ -188,7 +183,7 @@ int processHeader(void) { return dump_version; } -int loadType(entry *e) { +static int loadType(entry *e) { uint32_t offset = CURR_OFFSET; /* this byte needs to qualify as type */ @@ -208,7 +203,7 @@ int loadType(entry *e) { return 0; } -int peekType() { +static int peekType() { unsigned char t; if (readBytes(&t, -1) && (checkType(t))) return t; @@ -216,7 +211,7 @@ int peekType() { } /* discard time, just consume the bytes */ -int processTime(int type) { +static int processTime(int type) { uint32_t offset = CURR_OFFSET; unsigned char t[8]; int timelen = (type == REDIS_EXPIRETIME_MS) ? 8 : 4; @@ -231,7 +226,7 @@ int processTime(int type) { return 0; } -uint32_t loadLength(int *isencoded) { +static uint32_t loadLength(int *isencoded) { unsigned char buf[2]; uint32_t len; int type; @@ -257,7 +252,7 @@ uint32_t loadLength(int *isencoded) { } } -char *loadIntegerObject(int enctype) { +static char *loadIntegerObject(int enctype) { uint32_t offset = CURR_OFFSET; unsigned char enc[4]; long long val; @@ -289,7 +284,7 @@ char *loadIntegerObject(int enctype) { return buf; } -char* loadLzfStringObject() { +static char* loadLzfStringObject() { unsigned int slen, clen; char *c, *s; @@ -313,7 +308,7 @@ char* loadLzfStringObject() { } /* returns NULL when not processable, char* when valid */ -char* loadStringObject() { +static char* loadStringObject() { uint32_t offset = CURR_OFFSET; int isencoded; uint32_t len; @@ -336,7 +331,7 @@ char* loadStringObject() { if (len == REDIS_RDB_LENERR) return NULL; - char *buf = malloc(sizeof(char) * (len+1)); + char *buf = zmalloc(sizeof(char) * (len+1)); if (buf == NULL) return NULL; buf[len] = '\0'; if (!readBytes(buf, len)) { @@ -346,7 +341,7 @@ char* loadStringObject() { return buf; } -int processStringObject(char** store) { +static int processStringObject(char** store) { unsigned long offset = CURR_OFFSET; char *key = loadStringObject(); if (key == NULL) { @@ -363,7 +358,7 @@ int processStringObject(char** store) { return 1; } -double* loadDoubleValue() { +static double* loadDoubleValue() { char buf[256]; unsigned char len; double* val; @@ -386,7 +381,7 @@ double* loadDoubleValue() { } } -int processDoubleValue(double** store) { +static int processDoubleValue(double** store) { unsigned long offset = CURR_OFFSET; double *val = loadDoubleValue(); if (val == NULL) { @@ -403,7 +398,7 @@ int processDoubleValue(double** store) { return 1; } -int loadPair(entry *e) { +static int loadPair(entry *e) { uint32_t offset = CURR_OFFSET; uint32_t i; @@ -486,7 +481,7 @@ int loadPair(entry *e) { return 1; } -entry loadEntry() { +static entry loadEntry() { entry e = { NULL, -1, 0 }; uint32_t length, offset[4]; @@ -544,7 +539,7 @@ entry loadEntry() { return e; } -void printCentered(int indent, int width, char* body) { +static void printCentered(int indent, int width, char* body) { char head[256], tail[256]; memset(head, '\0', 256); memset(tail, '\0', 256); @@ -554,21 +549,21 @@ void printCentered(int indent, int width, char* body) { printf("%s %s %s\n", head, body, tail); } -void printValid(uint64_t ops, uint64_t bytes) { +static void printValid(uint64_t ops, uint64_t bytes) { char body[80]; sprintf(body, "Processed %llu valid opcodes (in %llu bytes)", (unsigned long long) ops, (unsigned long long) bytes); printCentered(4, 80, body); } -void printSkipped(uint64_t bytes, uint64_t offset) { +static void printSkipped(uint64_t bytes, uint64_t offset) { char body[80]; sprintf(body, "Skipped %llu bytes (resuming at 0x%08llx)", (unsigned long long) bytes, (unsigned long long) offset); printCentered(4, 80, body); } -void printErrorStack(entry *e) { +static void printErrorStack(entry *e) { unsigned int i; char body[64]; @@ -708,24 +703,18 @@ void process(void) { } } -int main(int argc, char **argv) { - /* expect the first argument to be the dump file */ - if (argc <= 1) { - printf("Usage: %s \n", argv[0]); - exit(0); - } - +int redis_check_rdb(char *rdbfilename) { int fd; off_t size; struct stat stat; void *data; - fd = open(argv[1], O_RDONLY); + fd = open(rdbfilename, O_RDONLY); if (fd < 1) { - ERROR("Cannot open file: %s\n", argv[1]); + ERROR("Cannot open file: %s\n", rdbfilename); } if (fstat(fd, &stat) == -1) { - ERROR("Cannot stat: %s\n", argv[1]); + ERROR("Cannot stat: %s\n", rdbfilename); } else { size = stat.st_size; } @@ -736,7 +725,7 @@ int main(int argc, char **argv) { data = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0); if (data == MAP_FAILED) { - ERROR("Cannot mmap: %s\n", argv[1]); + ERROR("Cannot mmap: %s\n", rdbfilename); } /* Initialize static vars */ diff --git a/src/redis.c b/src/redis.c index 13df8d28e..24739cc3c 100644 --- a/src/redis.c +++ b/src/redis.c @@ -3550,6 +3550,17 @@ int checkForSentinelMode(int argc, char **argv) { return 0; } +/* Returns 1 if there is --check-rdb among the arguments or if + * argv[0] is exactly "redis-check-rdb". */ +int checkForCheckRDBMode(int argc, char **argv) { + int j; + + if (strstr(argv[0],"redis-check-rdb") != NULL) return 1; + for (j = 1; j < argc; j++) + if (!strcmp(argv[j],"--check-rdb")) return 1; + return 0; +} + /* Function called at startup to load RDB or AOF file in memory. */ void loadDataFromDisk(void) { long long start = ustime(); @@ -3766,6 +3777,11 @@ int main(int argc, char **argv) { while(j != argc) { if (argv[j][0] == '-' && argv[j][1] == '-') { /* Option name */ + if (!strcmp(argv[j], "--check-rdb")) { + /* Argument has no options, need to skip for parsing. */ + j++; + continue; + } if (sdslen(options)) options = sdscat(options,"\n"); options = sdscat(options,argv[j]+2); options = sdscat(options," "); @@ -3791,9 +3807,17 @@ int main(int argc, char **argv) { redisLog(REDIS_WARNING, "Warning: no config file specified, using the default config. In order to specify a config file use %s /path/to/%s.conf", argv[0], server.sentinel_mode ? "sentinel" : "redis"); } + if (checkForCheckRDBMode(argc, argv)) { + redisLog(REDIS_WARNING, "Checking RDB file %s", server.rdb_filename); + redisLog(REDIS_WARNING, "To check different RDB file: " + "redis-check-rdb --dbfilename "); + exit(redis_check_rdb(server.rdb_filename)); + } + server.supervised = redisIsSupervised(server.supervised_mode); int background = server.daemonize && !server.supervised; if (background) daemonize(); + initServer(); if (background || server.pidfile) createPidFile(); redisSetProcTitle(argv[0]); diff --git a/src/redis.h b/src/redis.h index 0c191d06f..87bb811b2 100644 --- a/src/redis.h +++ b/src/redis.h @@ -1380,6 +1380,9 @@ void sentinelTimer(void); char *sentinelHandleConfiguration(char **argv, int argc); void sentinelIsRunning(void); +/* redis-check-rdb */ +int redis_check_rdb(char *rdbfilename); + /* Scripting */ void scriptingInit(void); From 764b000c3e7fcb88bdef51b4d3339fa9f620ee07 Mon Sep 17 00:00:00 2001 From: Matt Stancliff Date: Mon, 12 May 2014 12:12:48 -0400 Subject: [PATCH 0042/1928] Remove code duplication from check-rdb redis-check-rdb (previously redis-check-dump) had every RDB define copy/pasted from rdb.h and some defines copied from redis.h. Since the initial copy, some constants had changed in Redis headers and check-dump was using incorrect values. Since check-rdb is now a mode of Redis, the old check-dump code is cleaned up to: - replace all printf with redisLog (and remove \n from all strings) - remove all copy/pasted defines to use defines from rdb.h and redis.h - replace all malloc/free with zmalloc/zfree - remove unnecessary include headers --- src/redis-check-rdb.c | 188 ++++++++++++++---------------------------- 1 file changed, 63 insertions(+), 125 deletions(-) diff --git a/src/redis-check-rdb.c b/src/redis-check-rdb.c index 893a81855..c34204204 100644 --- a/src/redis-check-rdb.c +++ b/src/redis-check-rdb.c @@ -29,74 +29,19 @@ */ +#include "redis.h" +#include "rdb.h" #include #include #include #include #include #include -#include -#include -#include -#include #include "lzf.h" #include "crc64.h" -/* Object types */ -#define REDIS_STRING 0 -#define REDIS_LIST 1 -#define REDIS_SET 2 -#define REDIS_ZSET 3 -#define REDIS_HASH 4 -#define REDIS_HASH_ZIPMAP 9 -#define REDIS_LIST_ZIPLIST 10 -#define REDIS_SET_INTSET 11 -#define REDIS_ZSET_ZIPLIST 12 -#define REDIS_HASH_ZIPLIST 13 - -/* Objects encoding. Some kind of objects like Strings and Hashes can be - * internally represented in multiple ways. The 'encoding' field of the object - * is set to one of this fields for this object. */ -#define REDIS_ENCODING_RAW 0 /* Raw representation */ -#define REDIS_ENCODING_INT 1 /* Encoded as integer */ -#define REDIS_ENCODING_ZIPMAP 2 /* Encoded as zipmap */ -#define REDIS_ENCODING_HT 3 /* Encoded as a hash table */ - -/* Object types only used for dumping to disk */ -#define REDIS_EXPIRETIME_MS 252 -#define REDIS_EXPIRETIME 253 -#define REDIS_SELECTDB 254 -#define REDIS_EOF 255 - -/* Defines related to the dump file format. To store 32 bits lengths for short - * keys requires a lot of space, so we check the most significant 2 bits of - * the first byte to interpreter the length: - * - * 00|000000 => if the two MSB are 00 the len is the 6 bits of this byte - * 01|000000 00000000 => 01, the len is 14 byes, 6 bits + 8 bits of next byte - * 10|000000 [32 bit integer] => if it's 01, a full 32 bit len will follow - * 11|000000 this means: specially encoded object will follow. The six bits - * number specify the kind of object that follows. - * See the REDIS_RDB_ENC_* defines. - * - * Lengths up to 63 are stored using a single byte, most DB keys, and may - * values, will fit inside. */ -#define REDIS_RDB_6BITLEN 0 -#define REDIS_RDB_14BITLEN 1 -#define REDIS_RDB_32BITLEN 2 -#define REDIS_RDB_ENCVAL 3 -#define REDIS_RDB_LENERR UINT_MAX - -/* When a length of a string object stored on disk has the first two bits - * set, the remaining two bits specify a special encoding for the object - * accordingly to the following defines: */ -#define REDIS_RDB_ENC_INT8 0 /* 8 bit signed integer */ -#define REDIS_RDB_ENC_INT16 1 /* 16 bit signed integer */ -#define REDIS_RDB_ENC_INT32 2 /* 32 bit signed integer */ -#define REDIS_RDB_ENC_LZF 3 /* string compressed with FASTLZ */ - #define ERROR(...) { \ - printf(__VA_ARGS__); \ + redisLog(REDIS_WARNING, __VA_ARGS__); \ exit(1); \ } @@ -139,13 +84,13 @@ typedef struct { static char types[MAX_TYPES_NUM][MAX_TYPE_NAME_LEN]; /* Return true if 't' is a valid object type. */ -static int checkType(unsigned char t) { +static int rdbCheckType(unsigned char t) { /* In case a new object type is added, update the following * condition as necessary. */ return - (t >= REDIS_HASH_ZIPMAP && t <= REDIS_HASH_ZIPLIST) || - t <= REDIS_HASH || - t >= REDIS_EXPIRETIME_MS; + (t >= REDIS_RDB_TYPE_HASH_ZIPMAP && t <= REDIS_RDB_TYPE_HASH_ZIPLIST) || + t <= REDIS_RDB_TYPE_HASH || + t >= REDIS_RDB_OPCODE_EXPIRETIME_MS; } /* when number of bytes to read is negative, do a peek */ @@ -168,17 +113,17 @@ int processHeader(void) { int dump_version; if (!readBytes(buf, 9)) { - ERROR("Cannot read header\n"); + ERROR("Cannot read header"); } /* expect the first 5 bytes to equal REDIS */ if (memcmp(buf,"REDIS",5) != 0) { - ERROR("Wrong signature in header\n"); + ERROR("Wrong signature in header"); } dump_version = (int)strtol(buf + 5, NULL, 10); if (dump_version < 1 || dump_version > 6) { - ERROR("Unknown RDB format version: %d\n", dump_version); + ERROR("Unknown RDB format version: %d", dump_version); } return dump_version; } @@ -189,7 +134,7 @@ static int loadType(entry *e) { /* this byte needs to qualify as type */ unsigned char t; if (readBytes(&t, 1)) { - if (checkType(t)) { + if (rdbCheckType(t)) { e->type = t; return 1; } else { @@ -205,7 +150,7 @@ static int loadType(entry *e) { static int peekType() { unsigned char t; - if (readBytes(&t, -1) && (checkType(t))) + if (readBytes(&t, -1) && (rdbCheckType(t))) return t; return -1; } @@ -214,7 +159,7 @@ static int peekType() { static int processTime(int type) { uint32_t offset = CURR_OFFSET; unsigned char t[8]; - int timelen = (type == REDIS_EXPIRETIME_MS) ? 8 : 4; + int timelen = (type == REDIS_RDB_OPCODE_EXPIRETIME_MS) ? 8 : 4; if (readBytes(t,timelen)) { return 1; @@ -279,7 +224,7 @@ static char *loadIntegerObject(int enctype) { /* convert val into string */ char *buf; - buf = malloc(sizeof(char) * 128); + buf = zmalloc(sizeof(char) * 128); sprintf(buf, "%lld", val); return buf; } @@ -291,19 +236,19 @@ static char* loadLzfStringObject() { if ((clen = loadLength(NULL)) == REDIS_RDB_LENERR) return NULL; if ((slen = loadLength(NULL)) == REDIS_RDB_LENERR) return NULL; - c = malloc(clen); + c = zmalloc(clen); if (!readBytes(c, clen)) { - free(c); + zfree(c); return NULL; } - s = malloc(slen+1); + s = zmalloc(slen+1); if (lzf_decompress(c,clen,s,slen) == 0) { - free(c); free(s); + zfree(c); zfree(s); return NULL; } - free(c); + zfree(c); return s; } @@ -335,7 +280,7 @@ static char* loadStringObject() { if (buf == NULL) return NULL; buf[len] = '\0'; if (!readBytes(buf, len)) { - free(buf); + zfree(buf); return NULL; } return buf; @@ -346,14 +291,14 @@ static int processStringObject(char** store) { char *key = loadStringObject(); if (key == NULL) { SHIFT_ERROR(offset, "Error reading string object"); - free(key); + zfree(key); return 0; } if (store != NULL) { *store = key; } else { - free(key); + zfree(key); } return 1; } @@ -365,14 +310,14 @@ static double* loadDoubleValue() { if (!readBytes(&len,1)) return NULL; - val = malloc(sizeof(double)); + val = zmalloc(sizeof(double)); switch(len) { case 255: *val = R_NegInf; return val; case 254: *val = R_PosInf; return val; case 253: *val = R_Nan; return val; default: if (!readBytes(buf, len)) { - free(val); + zfree(val); return NULL; } buf[len] = '\0'; @@ -386,14 +331,14 @@ static int processDoubleValue(double** store) { double *val = loadDoubleValue(); if (val == NULL) { SHIFT_ERROR(offset, "Error reading double value"); - free(val); + zfree(val); return 0; } if (store != NULL) { *store = val; } else { - free(val); + zfree(val); } return 1; } @@ -412,10 +357,10 @@ static int loadPair(entry *e) { } uint32_t length = 0; - if (e->type == REDIS_LIST || - e->type == REDIS_SET || - e->type == REDIS_ZSET || - e->type == REDIS_HASH) { + if (e->type == REDIS_RDB_TYPE_LIST || + e->type == REDIS_RDB_TYPE_SET || + e->type == REDIS_RDB_TYPE_ZSET || + e->type == REDIS_RDB_TYPE_HASH) { if ((length = loadLength(NULL)) == REDIS_RDB_LENERR) { SHIFT_ERROR(offset, "Error reading %s length", types[e->type]); return 0; @@ -423,19 +368,19 @@ static int loadPair(entry *e) { } switch(e->type) { - case REDIS_STRING: - case REDIS_HASH_ZIPMAP: - case REDIS_LIST_ZIPLIST: - case REDIS_SET_INTSET: - case REDIS_ZSET_ZIPLIST: - case REDIS_HASH_ZIPLIST: + case REDIS_RDB_TYPE_STRING: + case REDIS_RDB_TYPE_HASH_ZIPMAP: + case REDIS_RDB_TYPE_LIST_ZIPLIST: + case REDIS_RDB_TYPE_SET_INTSET: + case REDIS_RDB_TYPE_ZSET_ZIPLIST: + case REDIS_RDB_TYPE_HASH_ZIPLIST: if (!processStringObject(NULL)) { SHIFT_ERROR(offset, "Error reading entry value"); return 0; } break; - case REDIS_LIST: - case REDIS_SET: + case REDIS_RDB_TYPE_LIST: + case REDIS_RDB_TYPE_SET: for (i = 0; i < length; i++) { offset = CURR_OFFSET; if (!processStringObject(NULL)) { @@ -444,7 +389,7 @@ static int loadPair(entry *e) { } } break; - case REDIS_ZSET: + case REDIS_RDB_TYPE_ZSET: for (i = 0; i < length; i++) { offset = CURR_OFFSET; if (!processStringObject(NULL)) { @@ -458,7 +403,7 @@ static int loadPair(entry *e) { } } break; - case REDIS_HASH: + case REDIS_RDB_TYPE_HASH: for (i = 0; i < length; i++) { offset = CURR_OFFSET; if (!processStringObject(NULL)) { @@ -494,7 +439,7 @@ static entry loadEntry() { } offset[1] = CURR_OFFSET; - if (e.type == REDIS_SELECTDB) { + if (e.type == REDIS_RDB_OPCODE_SELECTDB) { if ((length = loadLength(NULL)) == REDIS_RDB_LENERR) { SHIFT_ERROR(offset[1], "Error reading database number"); return e; @@ -503,7 +448,7 @@ static entry loadEntry() { SHIFT_ERROR(offset[1], "Database number out of range (%d)", length); return e; } - } else if (e.type == REDIS_EOF) { + } else if (e.type == REDIS_RDB_OPCODE_EOF) { if (positions[level].offset < positions[level].size) { SHIFT_ERROR(offset[0], "Unexpected EOF"); } else { @@ -512,8 +457,8 @@ static entry loadEntry() { return e; } else { /* optionally consume expire */ - if (e.type == REDIS_EXPIRETIME || - e.type == REDIS_EXPIRETIME_MS) { + if (e.type == REDIS_RDB_OPCODE_EXPIRETIME || + e.type == REDIS_RDB_OPCODE_EXPIRETIME_MS) { if (!processTime(e.type)) return e; if (!loadType(&e)) return e; } @@ -546,7 +491,7 @@ static void printCentered(int indent, int width, char* body) { memset(head, '=', indent); memset(tail, '=', width - 2 - indent - strlen(body)); - printf("%s %s %s\n", head, body, tail); + redisLog(REDIS_WARNING, "%s %s %s", head, body, tail); } static void printValid(uint64_t ops, uint64_t bytes) { @@ -593,7 +538,7 @@ static void printErrorStack(entry *e) { /* display error stack */ for (i = 0; i < errors.level; i++) { - printf("0x%08lx - %s\n", + redisLog(REDIS_WARNING, "0x%08lx - %s", (unsigned long) errors.offset[i], errors.error[i]); } } @@ -606,7 +551,7 @@ void process(void) { /* Exclude the final checksum for RDB >= 5. Will be checked at the end. */ if (dump_version >= 5) { if (positions[0].size < 8) { - printf("RDB version >= 5 but no room for checksum.\n"); + redisLog(REDIS_WARNING, "RDB version >= 5 but no room for checksum."); exit(1); } positions[0].size -= 8; @@ -655,7 +600,7 @@ void process(void) { /* advance position */ positions[0] = positions[1]; } - free(entry.key); + zfree(entry.key); } /* because there is another potential error, @@ -663,7 +608,7 @@ void process(void) { printValid(num_valid_ops, num_valid_bytes); /* expect an eof */ - if (entry.type != REDIS_EOF) { + if (entry.type != REDIS_RDB_OPCODE_EOF) { /* last byte should be EOF, add error */ errors.level = 0; SHIFT_ERROR(positions[0].offset, "Expected EOF, got %s", types[entry.type]); @@ -691,14 +636,13 @@ void process(void) { if (crc != crc2) { SHIFT_ERROR(positions[0].offset, "RDB CRC64 does not match."); } else { - printf("CRC64 checksum is OK\n"); + redisLog(REDIS_WARNING, "CRC64 checksum is OK"); } } /* print summary on errors */ if (num_errors) { - printf("\n"); - printf("Total unprocessable opcodes: %llu\n", + redisLog(REDIS_WARNING, "Total unprocessable opcodes: %llu", (unsigned long long) num_errors); } } @@ -711,21 +655,21 @@ int redis_check_rdb(char *rdbfilename) { fd = open(rdbfilename, O_RDONLY); if (fd < 1) { - ERROR("Cannot open file: %s\n", rdbfilename); + ERROR("Cannot open file: %s", rdbfilename); } if (fstat(fd, &stat) == -1) { - ERROR("Cannot stat: %s\n", rdbfilename); + ERROR("Cannot stat: %s", rdbfilename); } else { size = stat.st_size; } if (sizeof(size_t) == sizeof(int32_t) && size >= INT_MAX) { - ERROR("Cannot check dump files >2GB on a 32-bit platform\n"); + ERROR("Cannot check dump files >2GB on a 32-bit platform"); } data = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0); if (data == MAP_FAILED) { - ERROR("Cannot mmap: %s\n", rdbfilename); + ERROR("Cannot mmap: %s", rdbfilename); } /* Initialize static vars */ @@ -735,22 +679,16 @@ int redis_check_rdb(char *rdbfilename) { errors.level = 0; /* Object types */ - sprintf(types[REDIS_STRING], "STRING"); - sprintf(types[REDIS_LIST], "LIST"); - sprintf(types[REDIS_SET], "SET"); - sprintf(types[REDIS_ZSET], "ZSET"); - sprintf(types[REDIS_HASH], "HASH"); + sprintf(types[REDIS_RDB_TYPE_STRING], "STRING"); + sprintf(types[REDIS_RDB_TYPE_LIST], "LIST"); + sprintf(types[REDIS_RDB_TYPE_SET], "SET"); + sprintf(types[REDIS_RDB_TYPE_ZSET], "ZSET"); + sprintf(types[REDIS_RDB_TYPE_HASH], "HASH"); /* Object types only used for dumping to disk */ - sprintf(types[REDIS_EXPIRETIME], "EXPIRETIME"); - sprintf(types[REDIS_SELECTDB], "SELECTDB"); - sprintf(types[REDIS_EOF], "EOF"); - - /* Double constants initialization */ - R_Zero = 0.0; - R_PosInf = 1.0/R_Zero; - R_NegInf = -1.0/R_Zero; - R_Nan = R_Zero/R_Zero; + sprintf(types[REDIS_RDB_OPCODE_EXPIRETIME], "EXPIRETIME"); + sprintf(types[REDIS_RDB_OPCODE_SELECTDB], "SELECTDB"); + sprintf(types[REDIS_RDB_OPCODE_EOF], "EOF"); process(); From d8c7db1bdba3938f31856a067b2966285acbf97f Mon Sep 17 00:00:00 2001 From: Matt Stancliff Date: Mon, 12 May 2014 11:44:37 -0400 Subject: [PATCH 0043/1928] Improve RDB error-on-load handling Previouly if we loaded a corrupt RDB, Redis printed an error report with a big "REPORT ON GITHUB" message at the bottom. But, we know RDB load failures are corrupt data, not corrupt code. Now when RDB failure is detected (duplicate keys or unknown data types in the file), we run check-rdb against the RDB then exit. The automatic check-rdb hopefully gives the user instant feedback about what is wrong instead of providing a mysterious stack trace. --- src/rdb.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/src/rdb.c b/src/rdb.c index d5e3a7f47..3cf8344aa 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -44,6 +44,16 @@ #define RDB_LOAD_ENC (1<<0) #define RDB_LOAD_PLAIN (1<<1) +#define rdbExitReportCorruptRDB(reason) rdbCheckThenExit(reason, __LINE__); + +void rdbCheckThenExit(char *reason, int where) { + redisLog(REDIS_WARNING, "Corrupt RDB detected at rdb.c:%d (%s). " + "Running 'redis-check-rdb --dbfilename %s'", + where, reason, server.rdb_filename); + redis_check_rdb(server.rdb_filename); + exit(1); +} + static int rdbWriteRaw(rio *rdb, void *p, size_t len) { if (rdb && rioWrite(rdb,p,len) == 0) return -1; @@ -188,7 +198,7 @@ void *rdbLoadIntegerObject(rio *rdb, int enctype, int flags) { val = (int32_t)v; } else { val = 0; /* anti-warning */ - redisPanic("Unknown RDB integer encoding type"); + rdbExitReportCorruptRDB("Unknown RDB integer encoding type"); } if (plain) { char buf[REDIS_LONGSTR_SIZE], *p; @@ -394,7 +404,7 @@ void *rdbGenericLoadStringObject(rio *rdb, int flags) { case REDIS_RDB_ENC_LZF: return rdbLoadLzfStringObject(rdb,flags); default: - redisPanic("Unknown RDB encoding type"); + rdbExitReportCorruptRDB("Unknown RDB encoding type"); } } @@ -923,7 +933,7 @@ void rdbRemoveTempFile(pid_t childpid) { /* Load a Redis object of the specified type from the specified file. * On success a newly allocated object is returned, otherwise NULL. */ robj *rdbLoadObject(int rdbtype, rio *rdb) { - robj *o, *ele, *dec; + robj *o = NULL, *ele, *dec; size_t len; unsigned int i; @@ -1078,7 +1088,9 @@ robj *rdbLoadObject(int rdbtype, rio *rdb) { /* Add pair to hash table */ ret = dictAdd((dict*)o->ptr, field, value); - redisAssert(ret == DICT_OK); + if (ret == DICT_ERR) { + rdbExitReportCorruptRDB("Duplicate keys detected"); + } } /* All pairs should be read by now */ @@ -1164,11 +1176,11 @@ robj *rdbLoadObject(int rdbtype, rio *rdb) { hashTypeConvert(o, REDIS_ENCODING_HT); break; default: - redisPanic("Unknown encoding"); + rdbExitReportCorruptRDB("Unknown encoding"); break; } } else { - redisPanic("Unknown object type"); + rdbExitReportCorruptRDB("Unknown object type"); } return o; } From 8899f91a7f29500e0005fa8a19211503267d884f Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 28 Jan 2015 23:26:42 +0100 Subject: [PATCH 0044/1928] create-cluster script added. Simple shell script to create / destroy Redis clusters for manual testing. --- utils/create-cluster/README | 27 ++++++++++++ utils/create-cluster/create-cluster | 66 +++++++++++++++++++++++++++++ 2 files changed, 93 insertions(+) create mode 100644 utils/create-cluster/README create mode 100755 utils/create-cluster/create-cluster diff --git a/utils/create-cluster/README b/utils/create-cluster/README new file mode 100644 index 000000000..f3a3f0883 --- /dev/null +++ b/utils/create-cluster/README @@ -0,0 +1,27 @@ +Create-custer is a small script used to easily start a big number of Redis +instances configured to run in cluster mode. Its main goal is to allow manual +testing in a condition which is not easy to replicate with the Redis cluster +unit tests, for example when a lot of instances are needed in order to trigger +a give bug. + +The tool can also be used just to easily create a number of instances in a +Redis Cluster in order to experiment a bit with the system. + +USAGE +--- + +To create a cluster, follow this steps: + +1. Edit create-cluster and change the start / end port, depending on the +number of instances you want to create. +2. Use "./create-cluster start" in order to run the instances. +3. Use "./create-cluster create" in order to execute redis-trib create, so that +an actual Redis cluster will be created. +4. Now you are ready to play with the cluster. AOF files and logs for each instances are created in the current directory. + +In order to stop a cluster: + +1. Use "./craete-cluster stop" to stop all the instances. After you stopped the instances you can use "./create-cluster start" to restart them if you change ideas. +2. Use "./create-cluster clean" to remove all the AOF / log files to restat with a clean environment. + +It is currently hardcoded that you start a cluster where each master has one slave, since the script is pretty basic. diff --git a/utils/create-cluster/create-cluster b/utils/create-cluster/create-cluster new file mode 100755 index 000000000..80161587e --- /dev/null +++ b/utils/create-cluster/create-cluster @@ -0,0 +1,66 @@ +#!/bin/bash + +PORT=30000 +ENDPORT=30006 + +if [ "$1" == "start" ] +then + while [ $((PORT < ENDPORT)) != "0" ]; do + PORT=$((PORT+1)) + echo "Starting $PORT" + ../../src/redis-server --port $PORT --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout 5 --appendonly yes --appendfilename appendonly-${PORT}.aof --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes + done + exit 0 +fi + +if [ "$1" == "create" ] +then + HOSTS="" + while [ $((PORT < ENDPORT)) != "0" ]; do + PORT=$((PORT+1)) + HOSTS="$HOSTS 127.0.0.1:$PORT" + done + ../../src/redis-trib.rb create --replicas 1 $HOSTS + exit 0 +fi + +if [ "$1" == "stop" ] +then + while [ $((PORT < ENDPORT)) != "0" ]; do + PORT=$((PORT+1)) + echo "Stopping $PORT" + redis-cli -p $PORT shutdown nosave + done + exit 0 +fi + +if [ "$1" == "join" ] +then + while [ $((PORT < ENDPORT)) != "0" ]; do + PORT=$((PORT+1)) + echo "Joining $PORT" + redis-cli -p $PORT CLUSTER MEET 127.0.0.1 10002 + done + + echo "Waiting 5 seconds" + sleep 5 + + PORT=30000 + while [ $((PORT < ENDPORT)) != "0" ]; do + PORT=$((PORT+1)) + echo "Replicate $PORT" + redis-cli -p $PORT CLUSTER REPLICATE $2 + done + exit 0 +fi + +if [ "$1" == "clean" ] +then + rm -rf *.log + rm -rf appendonly*.aof + rm -rf dump*.rdb + rm -rf nodes*.conf + exit 0 +fi + +echo "Usage: $0 [start|create|stop|join|clean]" From 69583be1812027a1877d14639bdaf76675549989 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 28 Jan 2015 23:28:27 +0100 Subject: [PATCH 0045/1928] Ignore redis-check-rdb after the name switch. --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index d3b1c2f24..cdab2150e 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,7 @@ dump.rdb redis-benchmark redis-check-aof -redis-check-dump +redis-check-rdb redis-cli redis-sentinel redis-server From d59ad97d76eeb7cf39809e598bb3a34c57c81978 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 29 Jan 2015 13:21:42 +0100 Subject: [PATCH 0046/1928] create-cluster script: sane default timeout. --- utils/create-cluster/create-cluster | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/create-cluster/create-cluster b/utils/create-cluster/create-cluster index 80161587e..76f61091d 100755 --- a/utils/create-cluster/create-cluster +++ b/utils/create-cluster/create-cluster @@ -2,13 +2,14 @@ PORT=30000 ENDPORT=30006 +TIMEOUT=15000 if [ "$1" == "start" ] then while [ $((PORT < ENDPORT)) != "0" ]; do PORT=$((PORT+1)) echo "Starting $PORT" - ../../src/redis-server --port $PORT --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout 5 --appendonly yes --appendfilename appendonly-${PORT}.aof --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes + ../../src/redis-server --port $PORT --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes done exit 0 fi From 3fd43062c8127857f98c09a06bf70710b2dc2f68 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 29 Jan 2015 14:17:45 +0100 Subject: [PATCH 0047/1928] Cluster: use a number of gossip sections proportional to cluster size. Otherwise it is impossible to receive the majority of failure reports in the node_timeout*2 window in larger clusters. Still with a 200 nodes cluster, 20 gossip sections are a very reasonable amount of bytes to send. A side effect of this change is also fater cluster nodes joins for large clusters, because the cluster layout makes less time to propagate. --- src/cluster.c | 59 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 17 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index 2cbb21901..aaba7aa7b 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -2037,7 +2037,8 @@ void clusterBroadcastMessage(void *buf, size_t len) { dictReleaseIterator(di); } -/* Build the message header */ +/* Build the message header. hdr must point to a buffer at least + * sizeof(clusterMsg) in bytes. */ void clusterBuildMessageHdr(clusterMsg *hdr, int type) { int totlen = 0; uint64_t offset; @@ -2098,40 +2099,60 @@ void clusterBuildMessageHdr(clusterMsg *hdr, int type) { /* Send a PING or PONG packet to the specified node, making sure to add enough * gossip informations. */ void clusterSendPing(clusterLink *link, int type) { - unsigned char buf[sizeof(clusterMsg)+sizeof(clusterMsgDataGossip)*3]; - clusterMsg *hdr = (clusterMsg*) buf; - int gossipcount = 0, totlen; - /* freshnodes is the number of nodes we can still use to populate the - * gossip section of the ping packet. Basically we start with the nodes - * we have in memory minus two (ourself and the node we are sending the - * message to). Every time we add a node we decrement the counter, so when - * it will drop to <= zero we know there is no more gossip info we can - * send. */ + unsigned char *buf; + clusterMsg *hdr; + int gossipcount = 0; /* Number of gossip sections added so far. */ + int wanted; /* Number of gossip sections we want to append if possible. */ + int totlen; /* Total packet length. */ + /* freshnodes is the max number of nodes we can hope to append at all: + * nodes available minus two (ourself and the node we are sending the + * message to). However practically there may be less valid nodes since + * nodes in handshake state, disconnected, are not considered. */ int freshnodes = dictSize(server.cluster->nodes)-2; + /* How many gossip sections we want to add? 1/10 of the available nodes + * and anyway at least 3. */ + wanted = freshnodes/10; + if (wanted < 3) wanted = 3; + + /* Compute the maxium totlen to allocate our buffer. We'll fix the totlen + * later according to the number of gossip sections we really were able + * to put inside the packet. */ + totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); + totlen += (sizeof(clusterMsgDataGossip)*wanted); + /* Note: clusterBuildMessageHdr() expects the buffer to be always at least + * sizeof(clusterMsg) or more. */ + if (totlen < (int)sizeof(clusterMsg)) totlen = sizeof(clusterMsg); + buf = zcalloc(totlen); + hdr = (clusterMsg*) buf; + + /* Populate the header. */ if (link->node && type == CLUSTERMSG_TYPE_PING) link->node->ping_sent = mstime(); clusterBuildMessageHdr(hdr,type); /* Populate the gossip fields */ - while(freshnodes > 0 && gossipcount < 3) { + int maxiterations = wanted+10; + while(freshnodes > 0 && gossipcount < wanted && maxiterations--) { dictEntry *de = dictGetRandomKey(server.cluster->nodes); clusterNode *this = dictGetVal(de); clusterMsgDataGossip *gossip; int j; + /* Don't include this node: the whole packet header is about us + * already, so we just gossip about other nodes. */ + if (this == myself) continue; + /* In the gossip section don't include: - * 1) Myself. - * 2) Nodes in HANDSHAKE state. + * 1) Nodes in HANDSHAKE state. * 3) Nodes with the NOADDR flag set. * 4) Disconnected nodes if they don't have configured slots. */ - if (this == myself || - this->flags & (REDIS_NODE_HANDSHAKE|REDIS_NODE_NOADDR) || + if (this->flags & (REDIS_NODE_HANDSHAKE|REDIS_NODE_NOADDR) || (this->link == NULL && this->numslots == 0)) { - freshnodes--; /* otherwise we may loop forever. */ - continue; + freshnodes--; /* Tecnically not correct, but saves CPU. */ + continue; } /* Check if we already added this node */ @@ -2154,11 +2175,15 @@ void clusterSendPing(clusterLink *link, int type) { gossip->notused2 = 0; gossipcount++; } + + /* Ready to send... fix the totlen fiend and queue the message in the + * output buffer. */ totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); totlen += (sizeof(clusterMsgDataGossip)*gossipcount); hdr->count = htons(gossipcount); hdr->totlen = htonl(totlen); clusterSendMessage(link,buf,totlen); + zfree(buf); } /* Send a PONG packet to every connected node that's not in handshake state From 1efacfe53d27913f640c886633340c37f76eafda Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 29 Jan 2015 15:01:26 +0100 Subject: [PATCH 0048/1928] CLUSTER count-failure-reports command added. --- src/cluster.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/cluster.c b/src/cluster.c index aaba7aa7b..b4a291e6f 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -4063,6 +4063,18 @@ void clusterCommand(redisClient *c) { addReplyBulkCString(c,ni); sdsfree(ni); } + } else if (!strcasecmp(c->argv[1]->ptr,"count-failure-reports") && + c->argc == 3) + { + /* CLUSTER COUNT-FAILURE-REPORTS */ + clusterNode *n = clusterLookupNode(c->argv[2]->ptr); + + if (!n) { + addReplyErrorFormat(c,"Unknown node %s", (char*)c->argv[2]->ptr); + return; + } else { + addReplyLongLong(c,clusterNodeFailureReportsCount(n)); + } } else if (!strcasecmp(c->argv[1]->ptr,"failover") && (c->argc == 2 || c->argc == 3)) { From e5a22064cce137cb4897779050966a719dd4bdce Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 29 Jan 2015 15:40:08 +0100 Subject: [PATCH 0049/1928] Cluster: magical 10% of nodes explained in comments. --- src/cluster.c | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index b4a291e6f..69684fc9f 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -2110,8 +2110,32 @@ void clusterSendPing(clusterLink *link, int type) { * nodes in handshake state, disconnected, are not considered. */ int freshnodes = dictSize(server.cluster->nodes)-2; - /* How many gossip sections we want to add? 1/10 of the available nodes - * and anyway at least 3. */ + /* How many gossip sections we want to add? 1/10 of the number of nodes + * and anyway at least 3. Why 1/10? + * + * If we have N masters, with N/10 entries, and we consider that in + * node_timeout we exchange with each other node at least 4 packets + * (we ping in the worst case in node_timeout/2 time, and we also + * receive two pings from the host), we have a total of 8 packets + * in the node_timeout*2 falure reports validity time. So we have + * that, for a single PFAIL node, we can expect to receive the following + * number of failure reports (in the specified window of time): + * + * PROB * GOSSIP_ENTRIES_PER_PACKET * TOTAL_PACKETS: + * + * PROB = probability of being featured in a single gossip entry, + * which is 1 / NUM_OF_NODES. + * ENTRIES = 10. + * TOTAL_PACKETS = 2 * 4 * NUM_OF_MASTERS. + * + * If we assume we have just masters (so num of nodes and num of masters + * is the same), with 1/10 we always get over the majority, and specifically + * 80% of the number of nodes, to account for many masters failing at the + * same time. + * + * Since we have non-voting slaves that lower the probability of an entry + * to feature our node, we set the number of entires per packet as + * 10% of the total nodes we have. */ wanted = freshnodes/10; if (wanted < 3) wanted = 3; From 6b1c6334bec4277b2c5cb8f04975deb22e8e3a58 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 30 Jan 2015 10:41:45 +0100 Subject: [PATCH 0050/1928] Cluster: create-cluster script improved. --- utils/create-cluster/README | 2 +- utils/create-cluster/create-cluster | 62 +++++++++++++++++++++-------- 2 files changed, 46 insertions(+), 18 deletions(-) diff --git a/utils/create-cluster/README b/utils/create-cluster/README index f3a3f0883..1f43748ee 100644 --- a/utils/create-cluster/README +++ b/utils/create-cluster/README @@ -24,4 +24,4 @@ In order to stop a cluster: 1. Use "./craete-cluster stop" to stop all the instances. After you stopped the instances you can use "./create-cluster start" to restart them if you change ideas. 2. Use "./create-cluster clean" to remove all the AOF / log files to restat with a clean environment. -It is currently hardcoded that you start a cluster where each master has one slave, since the script is pretty basic. +Use the command "./create-cluster help" to get the full list of features. diff --git a/utils/create-cluster/create-cluster b/utils/create-cluster/create-cluster index 76f61091d..efb3135d4 100755 --- a/utils/create-cluster/create-cluster +++ b/utils/create-cluster/create-cluster @@ -1,8 +1,21 @@ #!/bin/bash +# Settings PORT=30000 -ENDPORT=30006 -TIMEOUT=15000 +TIMEOUT=2000 +NODES=6 +REPLICAS=1 + +# You may want to put the above config parameters into config.sh in order to +# override the defaults without modifying this script. + +if [ -a config.sh ] +then + source "config.sh" +fi + +# Computed vars +ENDPORT=$((PORT+NODES)) if [ "$1" == "start" ] then @@ -21,7 +34,7 @@ then PORT=$((PORT+1)) HOSTS="$HOSTS 127.0.0.1:$PORT" done - ../../src/redis-trib.rb create --replicas 1 $HOSTS + ../../src/redis-trib.rb create --replicas $REPLICAS $HOSTS exit 0 fi @@ -35,22 +48,31 @@ then exit 0 fi -if [ "$1" == "join" ] +if [ "$1" == "watch" ] +then + PORT=$((PORT+1)) + while [ 1 ]; do + clear + date + redis-cli -p $PORT cluster nodes | head -30 + sleep 1 + done + exit 0 +fi + +if [ "$1" == "tail" ] +then + INSTANCE=$2 + PORT=$((PORT+INSTANCE)) + tail -f ${PORT}.log + exit 0 +fi + +if [ "$1" == "call" ] then while [ $((PORT < ENDPORT)) != "0" ]; do PORT=$((PORT+1)) - echo "Joining $PORT" - redis-cli -p $PORT CLUSTER MEET 127.0.0.1 10002 - done - - echo "Waiting 5 seconds" - sleep 5 - - PORT=30000 - while [ $((PORT < ENDPORT)) != "0" ]; do - PORT=$((PORT+1)) - echo "Replicate $PORT" - redis-cli -p $PORT CLUSTER REPLICATE $2 + ../../src/redis-cli -p $PORT $2 $3 $4 $5 $6 $7 $8 $9 done exit 0 fi @@ -64,4 +86,10 @@ then exit 0 fi -echo "Usage: $0 [start|create|stop|join|clean]" +echo "Usage: $0 [start|create|stop|watch|tail|clean]" +echo "start -- Launch Redis Cluster instances." +echo "create -- Create a cluster using redis-trib create." +echo "stop -- Stop Redis Cluster instances." +echo "watch -- Show CLUSTER NODES output (first 30 lines) of first node." +echo "tail -- Run tail -f of instance at base port + ID." +echo "clean -- Remove all instances data, logs, configs." From 69b4f00d28f471f66ddf83155bc41ad608918f44 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 30 Jan 2015 11:23:27 +0100 Subject: [PATCH 0051/1928] More correct wanted / maxiterations values in clusterSendPing(). --- src/cluster.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index 69684fc9f..a296bdd8b 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -40,6 +40,7 @@ #include #include #include +#include /* A global reference to myself is handy to make code more clear. * Myself always points to server.cluster->myself, that is, the clusterNode @@ -2136,8 +2137,9 @@ void clusterSendPing(clusterLink *link, int type) { * Since we have non-voting slaves that lower the probability of an entry * to feature our node, we set the number of entires per packet as * 10% of the total nodes we have. */ - wanted = freshnodes/10; + wanted = floor(dictSize(server.cluster->nodes)/10); if (wanted < 3) wanted = 3; + if (wanted > freshnodes) wanted = freshnodes; /* Compute the maxium totlen to allocate our buffer. We'll fix the totlen * later according to the number of gossip sections we really were able @@ -2156,7 +2158,7 @@ void clusterSendPing(clusterLink *link, int type) { clusterBuildMessageHdr(hdr,type); /* Populate the gossip fields */ - int maxiterations = wanted+10; + int maxiterations = wanted*2; while(freshnodes > 0 && gossipcount < wanted && maxiterations--) { dictEntry *de = dictGetRandomKey(server.cluster->nodes); clusterNode *this = dictGetVal(de); @@ -2199,6 +2201,8 @@ void clusterSendPing(clusterLink *link, int type) { gossip->notused2 = 0; gossipcount++; } + redisLog(REDIS_VERBOSE,"WANTED: %d, USED_ITER: %d, GOSSIPCOUNT: %d", + wanted, wanted*2-maxiterations, gossipcount); /* Ready to send... fix the totlen fiend and queue the message in the * output buffer. */ From 233729fe7f6a26bfff62eba78716b711dc1719ce Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 30 Jan 2015 11:54:18 +0100 Subject: [PATCH 0052/1928] Cluster: some bias towwards FAIL/PFAIL nodes in gossip sections. This improves PFAIL -> FAIL switch. Too late at this point in the RC releases to add proper PFAIL/FAIL separate dictionary to do this in a less randomized way. Tested in practice with experiments that this helps. PFAIL -> FAIL average with 20 nodes and node-timeout set to 5 seconds takes 2.5 seconds without this commit, 1 second with this commit. --- src/cluster.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index a296bdd8b..2da0ed5f6 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -2158,7 +2158,7 @@ void clusterSendPing(clusterLink *link, int type) { clusterBuildMessageHdr(hdr,type); /* Populate the gossip fields */ - int maxiterations = wanted*2; + int maxiterations = wanted*3; while(freshnodes > 0 && gossipcount < wanted && maxiterations--) { dictEntry *de = dictGetRandomKey(server.cluster->nodes); clusterNode *this = dictGetVal(de); @@ -2169,6 +2169,11 @@ void clusterSendPing(clusterLink *link, int type) { * already, so we just gossip about other nodes. */ if (this == myself) continue; + /* Give a bias to FAIL/PFAIL nodes. */ + if (maxiterations > wanted*2 && + !(this->flags & (REDIS_NODE_PFAIL|REDIS_NODE_FAIL))) + continue; + /* In the gossip section don't include: * 1) Nodes in HANDSHAKE state. * 3) Nodes with the NOADDR flag set. @@ -2201,8 +2206,6 @@ void clusterSendPing(clusterLink *link, int type) { gossip->notused2 = 0; gossipcount++; } - redisLog(REDIS_VERBOSE,"WANTED: %d, USED_ITER: %d, GOSSIPCOUNT: %d", - wanted, wanted*2-maxiterations, gossipcount); /* Ready to send... fix the totlen fiend and queue the message in the * output buffer. */ From 79fa67cdb8a92ed98a499dbb31483740dac0686c Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 30 Jan 2015 12:03:17 +0100 Subject: [PATCH 0053/1928] Cluster: Tcl script to check avg pfail->fail time. --- utils/cluster_fail_time.tcl | 50 +++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 utils/cluster_fail_time.tcl diff --git a/utils/cluster_fail_time.tcl b/utils/cluster_fail_time.tcl new file mode 100644 index 000000000..87399495f --- /dev/null +++ b/utils/cluster_fail_time.tcl @@ -0,0 +1,50 @@ +# This simple script is used in order to estimate the average PFAIL->FAIL +# state switch after a failure. + +set ::sleep_time 10 ; # How much to sleep to trigger PFAIL. +set ::fail_port 30016 ; # Node to put in sleep. +set ::other_port 30001 ; # Node to use to monitor the flag switch. + +proc avg vector { + set sum 0.0 + foreach x $vector { + set sum [expr {$sum+$x}] + } + expr {$sum/[llength $vector]} +} + +set samples {} +while 1 { + exec redis-cli -p $::fail_port debug sleep $::sleep_time > /dev/null & + + # Wait for fail? to appear. + while 1 { + set output [exec redis-cli -p $::other_port cluster nodes] + if {[string match {*fail\?*} $output]} break + after 100 + } + + puts "FAIL?" + set start [clock milliseconds] + + # Wait for fail? to disappear. + while 1 { + set output [exec redis-cli -p $::other_port cluster nodes] + if {![string match {*fail\?*} $output]} break + after 100 + } + + puts "FAIL" + set now [clock milliseconds] + set elapsed [expr {$now-$start}] + puts $elapsed + lappend samples $elapsed + + puts "AVG([llength $samples]): [avg $samples]" + + # Wait for the instance to be available again. + exec redis-cli -p $::fail_port ping + + # Wait for the fail flag to be cleared. + after 2000 +} From 6502947a8519e9109c6fa1575460290d92b48f38 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 30 Jan 2015 15:19:39 +0100 Subject: [PATCH 0054/1928] redis-check-rdb: initialize entry in case while is never entered. --- src/redis-check-rdb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redis-check-rdb.c b/src/redis-check-rdb.c index c34204204..ff87f142f 100644 --- a/src/redis-check-rdb.c +++ b/src/redis-check-rdb.c @@ -545,7 +545,7 @@ static void printErrorStack(entry *e) { void process(void) { uint64_t num_errors = 0, num_valid_ops = 0, num_valid_bytes = 0; - entry entry; + entry entry = { NULL, -1, 0 }; int dump_version = processHeader(); /* Exclude the final checksum for RDB >= 5. Will be checked at the end. */ From c908774b9e071cc1e4fddd8f430e13aec31f9b67 Mon Sep 17 00:00:00 2001 From: Sun He Date: Mon, 2 Feb 2015 11:29:20 +0800 Subject: [PATCH 0055/1928] sort.c: REDIS_LIST's dontsort optimization also fix the situation "dontsort DESC" of a list --- src/sort.c | 33 ++++++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/src/sort.c b/src/sort.c index 74b27cb67..e9a191eb2 100644 --- a/src/sort.c +++ b/src/sort.c @@ -332,7 +332,7 @@ void sortCommand(redisClient *c) { * In this case to load all the objects in the vector is a huge waste of * resources. We just allocate a vector that is big enough for the selected * range length, and make sure to load just this part in the vector. */ - if (sortval->type == REDIS_ZSET && + if ((sortval->type == REDIS_ZSET || sortval->type == REDIS_LIST) && dontsort && (start != 0 || end != vectorlen-1)) { @@ -343,7 +343,35 @@ void sortCommand(redisClient *c) { vector = zmalloc(sizeof(redisSortObject)*vectorlen); j = 0; - if (sortval->type == REDIS_LIST) { + if (sortval->type == REDIS_LIST && dontsort) { + /* Special handling for a list, if 'dontsort' is true. + * This makes sure we return elements in the list original + * ordering, accordingly to DESC / ASC options. + * + * Note that in this case we also handle LIMIT here in a direct + * way, just getting the required range, as an optimization. */ + if (end >= start) { + listTypeIterator *li; + listTypeEntry entry; + li = listTypeInitIterator(sortval, + desc ? listTypeLength(sortval) - start - 1 : start, + desc ? REDIS_HEAD : REDIS_TAIL); + + while(j < vectorlen && listTypeNext(li,&entry)) { + vector[j].obj = listTypeGet(&entry); + vector[j].u.score = 0; + vector[j].u.cmpobj = NULL; + j++; + } + listTypeReleaseIterator(li); + /* The code producing the output does not know that in the case of + * sorted set, 'dontsort', and LIMIT, we are able to get just the + * range, already sorted, so we need to adjust "start" and "end" + * to make sure start is set to 0. */ + end -= start; + start = 0; + } + } else if (sortval->type == REDIS_LIST) { listTypeIterator *li = listTypeInitIterator(sortval,0,REDIS_TAIL); listTypeEntry entry; while(listTypeNext(li,&entry)) { @@ -420,7 +448,6 @@ void sortCommand(redisClient *c) { } else { redisPanic("Unknown type"); } - printf("j: %d; vectorlen: %d\n", j, vectorlen); redisAssertWithInfo(c,sortval,j == vectorlen); /* Now it's time to load the right scores in the sorting vector */ From d5e01519e5348ddb9166473f465dfc2dfab303ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Leandro=20L=C3=B3pez=20=28inkel=29?= Date: Mon, 2 Feb 2015 18:16:18 -0300 Subject: [PATCH 0056/1928] Support CLIENT commands in Redis Sentinel When trying to debug sentinel connections or max connections errors it would be very useful to have the ability to see the list of connected clients to a running sentinel. At the same time it would be very helpful to be able to name each sentinel connection or kill offending clients. This commits adds the already defined CLIENT commands back to Redis Sentinel. --- src/sentinel.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/sentinel.c b/src/sentinel.c index c693a5862..4030fca13 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -398,6 +398,7 @@ struct redisCommand sentinelcmds[] = { {"publish",sentinelPublishCommand,3,"",0,NULL,0,0,0,0,0}, {"info",sentinelInfoCommand,-1,"",0,NULL,0,0,0,0,0}, {"role",sentinelRoleCommand,1,"l",0,NULL,0,0,0,0,0}, + {"client",clientCommand,-2,"rs",0,NULL,0,0,0,0,0}, {"shutdown",shutdownCommand,-1,"",0,NULL,0,0,0,0,0} }; From 128c642d055c4502db8d096c3d63b971d4708c92 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 3 Feb 2015 09:34:20 +0100 Subject: [PATCH 0057/1928] Ignore config.sh inside create-cluster script dir. --- utils/create-cluster/.gitignore | 1 + 1 file changed, 1 insertion(+) create mode 100644 utils/create-cluster/.gitignore diff --git a/utils/create-cluster/.gitignore b/utils/create-cluster/.gitignore new file mode 100644 index 000000000..cdd7c19c8 --- /dev/null +++ b/utils/create-cluster/.gitignore @@ -0,0 +1 @@ +config.sh From 45102a6f639712cd6025112bac6a988dc6e6b897 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 3 Feb 2015 10:09:21 +0100 Subject: [PATCH 0058/1928] Norrow backtrace and setproctitle() to Linux+glibc. Backtrace is a glibc extension, while setproctitle() implementation depends on the memory layout and is partially libc dependent. --- src/config.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/config.h b/src/config.h index 2b5004baa..1ed8ef301 100644 --- a/src/config.h +++ b/src/config.h @@ -57,7 +57,7 @@ #endif /* Test for backtrace() */ -#if defined(__APPLE__) || defined(__linux__) +#if defined(__APPLE__) || (defined(__linux__) && defined(__GLIBC__)) #define HAVE_BACKTRACE 1 #endif @@ -118,7 +118,7 @@ #define USE_SETPROCTITLE #endif -#if (defined __linux || defined __APPLE__) +#if ((defined __linux && defined(__GLIBC__)) || defined __APPLE__) #define USE_SETPROCTITLE #define INIT_SETPROCTITLE_REPLACEMENT void spt_init(int argc, char *argv[]); From 7d1e15808422a1233280440566a5c684de7aa92f Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 3 Feb 2015 10:25:01 +0100 Subject: [PATCH 0059/1928] Handle redis-check-rdb as a standalone program. This also makes it backward compatible in the usage, but for the command name. However the old command name was less obvious so it is worth to break it probably. With the new setup the program main can perform argument parsing and everything else useful for an RDB check regardless of the Redis server itself. --- .gitignore | 1 + src/redis-check-rdb.c | 12 ++++++++++++ src/redis.c | 24 ++++++------------------ src/redis.h | 1 + 4 files changed, 20 insertions(+), 18 deletions(-) diff --git a/.gitignore b/.gitignore index cdab2150e..3d346fbcf 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ dump.rdb redis-benchmark redis-check-aof redis-check-rdb +redis-check-dump redis-cli redis-sentinel redis-server diff --git a/src/redis-check-rdb.c b/src/redis-check-rdb.c index ff87f142f..21f72c222 100644 --- a/src/redis-check-rdb.c +++ b/src/redis-check-rdb.c @@ -696,3 +696,15 @@ int redis_check_rdb(char *rdbfilename) { close(fd); return 0; } + +/* RDB check main: called form redis.c when Redis is executed with the + * redis-check-rdb alias. */ +int redis_check_rdb_main(char **argv, int argc) { + if (argc != 2) { + fprintf(stderr, "Usage: %s \n", argv[0]); + exit(1); + } + redisLog(REDIS_WARNING, "Checking RDB file %s", argv[1]); + exit(redis_check_rdb(argv[1])); + return 0; +} diff --git a/src/redis.c b/src/redis.c index 24739cc3c..b2f9ffc68 100644 --- a/src/redis.c +++ b/src/redis.c @@ -3550,17 +3550,6 @@ int checkForSentinelMode(int argc, char **argv) { return 0; } -/* Returns 1 if there is --check-rdb among the arguments or if - * argv[0] is exactly "redis-check-rdb". */ -int checkForCheckRDBMode(int argc, char **argv) { - int j; - - if (strstr(argv[0],"redis-check-rdb") != NULL) return 1; - for (j = 1; j < argc; j++) - if (!strcmp(argv[j],"--check-rdb")) return 1; - return 0; -} - /* Function called at startup to load RDB or AOF file in memory. */ void loadDataFromDisk(void) { long long start = ustime(); @@ -3746,6 +3735,12 @@ int main(int argc, char **argv) { initSentinel(); } + /* Check if we need to start in redis-check-rdb mode. We just execute + * the program main. However the program is part of the Redis executable + * so that we can easily execute an RDB check on loading errors. */ + if (strstr(argv[0],"redis-check-rdb") != NULL) + exit(redis_check_rdb_main(argv,argc)); + if (argc >= 2) { int j = 1; /* First option to parse in argv[] */ sds options = sdsempty(); @@ -3807,13 +3802,6 @@ int main(int argc, char **argv) { redisLog(REDIS_WARNING, "Warning: no config file specified, using the default config. In order to specify a config file use %s /path/to/%s.conf", argv[0], server.sentinel_mode ? "sentinel" : "redis"); } - if (checkForCheckRDBMode(argc, argv)) { - redisLog(REDIS_WARNING, "Checking RDB file %s", server.rdb_filename); - redisLog(REDIS_WARNING, "To check different RDB file: " - "redis-check-rdb --dbfilename "); - exit(redis_check_rdb(server.rdb_filename)); - } - server.supervised = redisIsSupervised(server.supervised_mode); int background = server.daemonize && !server.supervised; if (background) daemonize(); diff --git a/src/redis.h b/src/redis.h index 87bb811b2..2170c5d29 100644 --- a/src/redis.h +++ b/src/redis.h @@ -1382,6 +1382,7 @@ void sentinelIsRunning(void); /* redis-check-rdb */ int redis_check_rdb(char *rdbfilename); +int redis_check_rdb_main(char **argv, int argc); /* Scripting */ void scriptingInit(void); From fad758b32292b9467a65f9f0f32ec8d4d0c84d4e Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 3 Feb 2015 10:33:05 +0100 Subject: [PATCH 0060/1928] Check RDB automatically in a few more cases. --- src/rdb.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rdb.c b/src/rdb.c index 3cf8344aa..caa06db87 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -48,7 +48,7 @@ void rdbCheckThenExit(char *reason, int where) { redisLog(REDIS_WARNING, "Corrupt RDB detected at rdb.c:%d (%s). " - "Running 'redis-check-rdb --dbfilename %s'", + "Running 'redis-check-rdb %s'", where, reason, server.rdb_filename); redis_check_rdb(server.rdb_filename); exit(1); @@ -1375,7 +1375,7 @@ int rdbLoad(char *filename) { redisLog(REDIS_WARNING,"RDB file was saved with checksum disabled: no check performed."); } else if (cksum != expected) { redisLog(REDIS_WARNING,"Wrong RDB checksum. Aborting now."); - exit(1); + rdbExitReportCorruptRDB("RDB CRC error"); } } @@ -1385,7 +1385,7 @@ int rdbLoad(char *filename) { eoferr: /* unexpected end of file is handled here with a fatal exit */ redisLog(REDIS_WARNING,"Short read or OOM loading DB. Unrecoverable error, aborting now."); - exit(1); + rdbExitReportCorruptRDB("Unexpected EOF reading RDB file"); return REDIS_ERR; /* Just to avoid warning */ } From 1dbd8e94a7a0bed51c9c35f6c34486dad876ebc5 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 3 Feb 2015 14:17:06 +0100 Subject: [PATCH 0061/1928] More obvious indentation in setCommand(). --- src/t_string.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/t_string.c b/src/t_string.c index 34ab11b51..53da875cb 100644 --- a/src/t_string.c +++ b/src/t_string.c @@ -105,22 +105,26 @@ void setCommand(redisClient *c) { if ((a[0] == 'n' || a[0] == 'N') && (a[1] == 'x' || a[1] == 'X') && a[2] == '\0' && - !(flags & REDIS_SET_XX)) { + !(flags & REDIS_SET_XX)) + { flags |= REDIS_SET_NX; } else if ((a[0] == 'x' || a[0] == 'X') && (a[1] == 'x' || a[1] == 'X') && a[2] == '\0' && - !(flags & REDIS_SET_NX)) { + !(flags & REDIS_SET_NX)) + { flags |= REDIS_SET_XX; } else if ((a[0] == 'e' || a[0] == 'E') && (a[1] == 'x' || a[1] == 'X') && a[2] == '\0' && - !(flags & REDIS_SET_PX) && next) { + !(flags & REDIS_SET_PX) && next) + { flags |= REDIS_SET_EX; unit = UNIT_SECONDS; expire = next; j++; } else if ((a[0] == 'p' || a[0] == 'P') && (a[1] == 'x' || a[1] == 'X') && a[2] == '\0' && - !(flags & REDIS_SET_EX) && next) { + !(flags & REDIS_SET_EX) && next) + { flags |= REDIS_SET_PX; unit = UNIT_MILLISECONDS; expire = next; From 96abf659008e7e8e544e446bbfac922c059a5650 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 3 Feb 2015 15:38:21 +0100 Subject: [PATCH 0062/1928] Hopefully better sort.c optimization comments. Related to #2346. --- src/sort.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/src/sort.c b/src/sort.c index e9a191eb2..97500df45 100644 --- a/src/sort.c +++ b/src/sort.c @@ -322,16 +322,16 @@ void sortCommand(redisClient *c) { } if (end >= vectorlen) end = vectorlen-1; - /* Optimization: + /* Whenever possible, we load elements into the output array in a more + * direct way. This is possible if: * - * 1) if the object to sort is a sorted set. + * 1) The object to sort is a sorted set or a list (internally sorted). * 2) There is nothing to sort as dontsort is true (BY ). - * 3) We have a LIMIT option that actually reduces the number of elements - * to fetch. * - * In this case to load all the objects in the vector is a huge waste of - * resources. We just allocate a vector that is big enough for the selected - * range length, and make sure to load just this part in the vector. */ + * In this special case, if we have a LIMIT option that actually reduces + * the number of elements to fetch, we also optimize to just load the + * range we are interested in and allocating a vector that is big enough + * for the selected range length. */ if ((sortval->type == REDIS_ZSET || sortval->type == REDIS_LIST) && dontsort && (start != 0 || end != vectorlen-1)) @@ -364,10 +364,7 @@ void sortCommand(redisClient *c) { j++; } listTypeReleaseIterator(li); - /* The code producing the output does not know that in the case of - * sorted set, 'dontsort', and LIMIT, we are able to get just the - * range, already sorted, so we need to adjust "start" and "end" - * to make sure start is set to 0. */ + /* Fix start/end: output code is not aware of this optimization. */ end -= start; start = 0; } @@ -427,10 +424,7 @@ void sortCommand(redisClient *c) { j++; ln = desc ? ln->backward : ln->level[0].forward; } - /* The code producing the output does not know that in the case of - * sorted set, 'dontsort', and LIMIT, we are able to get just the - * range, already sorted, so we need to adjust "start" and "end" - * to make sure start is set to 0. */ + /* Fix start/end: output code is not aware of this optimization. */ end -= start; start = 0; } else if (sortval->type == REDIS_ZSET) { From 719c6c2b96bdb9638dc3eae66f3568608181add6 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 3 Feb 2015 15:42:56 +0100 Subject: [PATCH 0063/1928] Suppress sign warning in sort.c. Related to #2346. --- src/sort.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sort.c b/src/sort.c index 97500df45..c1b571313 100644 --- a/src/sort.c +++ b/src/sort.c @@ -354,7 +354,7 @@ void sortCommand(redisClient *c) { listTypeIterator *li; listTypeEntry entry; li = listTypeInitIterator(sortval, - desc ? listTypeLength(sortval) - start - 1 : start, + desc ? (long)(listTypeLength(sortval) - start - 1) : start, desc ? REDIS_HEAD : REDIS_TAIL); while(j < vectorlen && listTypeNext(li,&entry)) { From c5dd686ecb90d8d969f521d02caade4a3fa6d56e Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 4 Feb 2015 11:24:46 +0100 Subject: [PATCH 0064/1928] Replication: put server.master client creation into separated function. --- src/replication.c | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/src/replication.c b/src/replication.c index 7e36c3e99..697acbef5 100644 --- a/src/replication.c +++ b/src/replication.c @@ -854,6 +854,23 @@ void replicationEmptyDbCallback(void *privdata) { replicationSendNewlineToMaster(); } +/* Once we have a link with the master and the synchroniziation was + * performed, this function materializes the master client we store + * at server.master, starting from the specified file descriptor. */ +void replicationCreateMasterClient(int fd) { + server.master = createClient(fd); + server.master->flags |= REDIS_MASTER; + server.master->authenticated = 1; + server.repl_state = REDIS_REPL_CONNECTED; + server.master->reploff = server.repl_master_initial_offset; + memcpy(server.master->replrunid, server.repl_master_runid, + sizeof(server.repl_master_runid)); + /* If master offset is set to -1, this master is old and is not + * PSYNC capable, so we flag it accordingly. */ + if (server.master->reploff == -1) + server.master->flags |= REDIS_PRE_PSYNC; +} + /* Asynchronously read the SYNC payload we receive from a master */ #define REPL_MAX_WRITTEN_BEFORE_FSYNC (1024*1024*8) /* 8 MB */ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { @@ -1017,17 +1034,7 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { /* Final setup of the connected slave <- master link */ zfree(server.repl_transfer_tmpfile); close(server.repl_transfer_fd); - server.master = createClient(server.repl_transfer_s); - server.master->flags |= REDIS_MASTER; - server.master->authenticated = 1; - server.repl_state = REDIS_REPL_CONNECTED; - server.master->reploff = server.repl_master_initial_offset; - memcpy(server.master->replrunid, server.repl_master_runid, - sizeof(server.repl_master_runid)); - /* If master offset is set to -1, this master is old and is not - * PSYNC capable, so we flag it accordingly. */ - if (server.master->reploff == -1) - server.master->flags |= REDIS_PRE_PSYNC; + replicationCreateMasterClient(server.repl_transfer_s); redisLog(REDIS_NOTICE, "MASTER <-> SLAVE sync: Finished with success"); /* Restart the AOF subsystem now that we finished the sync. This * will trigger an AOF rewrite, and when done will start appending From 2ac7b5a8b41bb71fce52c86f9ba420769663609e Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 4 Feb 2015 11:53:19 +0100 Subject: [PATCH 0065/1928] Fix RDB corruption test after server behavior change. --- tests/integration/rdb.tcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/rdb.tcl b/tests/integration/rdb.tcl index 71876a6ed..a2f187ad4 100644 --- a/tests/integration/rdb.tcl +++ b/tests/integration/rdb.tcl @@ -90,7 +90,7 @@ start_server_and_kill_it [list "dir" $server_path] { test {Server should not start if RDB is corrupted} { wait_for_condition 50 100 { [string match {*RDB checksum*} \ - [exec tail -n1 < [dict get $srv stdout]]] + [exec tail -n10 < [dict get $srv stdout]]] } else { fail "Server started even if RDB was corrupted!" } From ba74711e0609b92721fc2d5a13c90aa88f623254 Mon Sep 17 00:00:00 2001 From: Chris Lamb Date: Wed, 4 Feb 2015 18:36:38 +0000 Subject: [PATCH 0066/1928] Support "1G" etc. units in CONFIG SET maxmemory Signed-off-by: Chris Lamb --- src/config.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/config.c b/src/config.c index 8255a56b7..927d7e245 100644 --- a/src/config.c +++ b/src/config.c @@ -643,8 +643,9 @@ void configSetCommand(redisClient *c) { zfree(server.masterauth); server.masterauth = ((char*)o->ptr)[0] ? zstrdup(o->ptr) : NULL; } else if (!strcasecmp(c->argv[2]->ptr,"maxmemory")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || - ll < 0) goto badfmt; + int err; + ll = memtoll(o->ptr,&err); + if (err || ll < 0) goto badfmt; server.maxmemory = ll; if (server.maxmemory) { if (server.maxmemory < zmalloc_used_memory()) { From 2860cf4139827c2791c5f79324555d7acf150243 Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 7 Feb 2015 14:50:12 +0100 Subject: [PATCH 0067/1928] Initial implementation of redis-cli --latency-dist. --- src/redis-cli.c | 153 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) diff --git a/src/redis-cli.c b/src/redis-cli.c index e243db451..a0321e738 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -44,6 +44,7 @@ #include #include #include +#include #include "hiredis.h" #include "sds.h" @@ -76,6 +77,7 @@ static struct config { int monitor_mode; int pubsub_mode; int latency_mode; + int latency_dist_mode; int latency_history; int cluster_mode; int cluster_reissue_command; @@ -770,6 +772,8 @@ static int parseOptions(int argc, char **argv) { config.output = OUTPUT_CSV; } else if (!strcmp(argv[i],"--latency")) { config.latency_mode = 1; + } else if (!strcmp(argv[i],"--latency-dist")) { + config.latency_dist_mode = 1; } else if (!strcmp(argv[i],"--latency-history")) { config.latency_mode = 1; config.latency_history = 1; @@ -862,6 +866,8 @@ static void usage(void) { " --latency Enter a special mode continuously sampling latency.\n" " --latency-history Like --latency but tracking latency changes over time.\n" " Default time interval is 15 sec. Change it using -i.\n" +" --latency-dist Shows latency as a spectrum, requires xterm 256 colors.\n" +" Default time interval is 1 sec. Change it using -i.\n" " --slave Simulate a slave showing commands received from the master.\n" " --rdb Transfer an RDB dump from remote server to local file.\n" " --pipe Transfer raw Redis protocol from stdin to server.\n" @@ -1107,6 +1113,146 @@ static void latencyMode(void) { } } +/*------------------------------------------------------------------------------ + * Latency distribution mode -- requires 256 colors xterm + *--------------------------------------------------------------------------- */ + +#define LATENCY_DIST_DEFAULT_INTERVAL 1000 /* milliseconds. */ +#define LATENCY_DIST_MIN_GRAY 233 /* Less than that is too hard to see gray. */ +#define LATENCY_DIST_MAX_GRAY 255 +#define LATENCY_DIST_GRAYS (LATENCY_DIST_MAX_GRAY-LATENCY_DIST_MIN_GRAY+1) + +/* Structure to store samples distribution. */ +struct distsamples { + long long max; /* Max latency to fit into this interval (usec). */ + long long count; /* Number of samples in this interval. */ + int character; /* Associated character in visualization. */ +}; + +/* Helper function for latencyDistMode(). Performs the spectrum visualization + * of the collected samples targeting an xterm 256 terminal. + * + * Takes an array of distsamples structures, ordered from smaller to bigger + * 'max' value. Last sample max must be 0, to mean that it olds all the + * samples greater than the previous one, and is also the stop sentinel. + * + * "tot' is the total number of samples in the different buckets, so it + * is the SUM(samples[i].conut) for i to 0 up to the max sample. + * + * As a side effect the function sets all the buckets count to 0. */ +void showLatencyDistSamples(struct distsamples *samples, long long tot) { + int j; + + /* We convert samples into a number between 0 and DIST_GRAYS, + * proportional to the percentage a given bucket represents. + * This way intensity of the different parts of the spectrum + * don't change relative to the number of requests, which avoids to + * pollute the visualization with non-latency related info. */ + printf("\033[38;5;0m"); /* Set foreground color to black. */ + for (j = 0; ; j++) { + float color = (float) samples[j].count / tot * LATENCY_DIST_GRAYS; + color = ceil(color) + (LATENCY_DIST_MIN_GRAY-1); + if (color == LATENCY_DIST_MIN_GRAY-1) { + printf("\033[48;5;0m "); + } else { + printf("\033[48;5;%dm%c", (int)color, samples[j].character); + } + samples[j].count = 0; + if (samples[j].max == 0) break; /* Last sample. */ + } + printf("\033[0m\n"); + fflush(stdout); +} + +/* Show the legend: different buckets values and colors meaning, so + * that the spectrum is more easily readable. */ +void showLatencyDistLegend(void) { + printf(". - * 0.01 0.125 0.5 milliseconds\n"); + printf("1,2,3,...,9 from 1 to 9 milliseconds\n"); + printf("A,B,C,D,E 10,20,30,40,50 milliseconds\n"); + printf("F,G,H,I,J .1,.2,.3,.4,.5 seconds\n"); + printf("K,L,M,N,O,P,Q,? 1,2,4,8,16,30,60,>60 seconds\n"); + printf("---------------------------------------------\n"); +} + +static void latencyDistMode(void) { + redisReply *reply; + long long start, latency, count = 0; + long long history_interval = + config.interval ? config.interval/1000 : + LATENCY_DIST_DEFAULT_INTERVAL; + long long history_start = ustime(); + int j, outputs = 0; + + struct distsamples samples[] = { + /* We use a mostly logarithmic scale, with certain linear intervals + * which are more interesting than others, like 1-10 milliseconds + * range. */ + {10,0,'.'}, /* 0.01 ms */ + {125,0,'-'}, /* 0.125 ms */ + {250,0,'*'}, /* 0.25 ms */ + {500,0,'#'}, /* 0.5 ms */ + {1000,0,'1'}, /* 1 ms */ + {2000,0,'2'}, /* 2 ms */ + {3000,0,'3'}, /* 3 ms */ + {4000,0,'4'}, /* 4 ms */ + {5000,0,'5'}, /* 5 ms */ + {6000,0,'6'}, /* 6 ms */ + {7000,0,'7'}, /* 7 ms */ + {8000,0,'8'}, /* 8 ms */ + {9000,0,'9'}, /* 9 ms */ + {10000,0,'A'}, /* 10 ms */ + {20000,0,'B'}, /* 20 ms */ + {30000,0,'C'}, /* 30 ms */ + {40000,0,'D'}, /* 40 ms */ + {50000,0,'E'}, /* 50 ms */ + {100000,0,'F'}, /* 0.1 s */ + {200000,0,'G'}, /* 0.2 s */ + {300000,0,'H'}, /* 0.3 s */ + {400000,0,'I'}, /* 0.4 s */ + {500000,0,'J'}, /* 0.5 s */ + {1000000,0,'K'}, /* 1 s */ + {2000000,0,'L'}, /* 2 s */ + {4000000,0,'M'}, /* 4 s */ + {8000000,0,'N'}, /* 8 s */ + {16000000,0,'O'}, /* 16 s */ + {30000000,0,'P'}, /* 30 s */ + {60000000,0,'Q'}, /* 1 minute */ + {0,0,'?'}, /* > 1 minute */ + }; + + if (!context) exit(1); + while(1) { + start = ustime(); + reply = redisCommand(context,"PING"); + if (reply == NULL) { + fprintf(stderr,"\nI/O error\n"); + exit(1); + } + latency = ustime()-start; + freeReplyObject(reply); + count++; + + /* Populate the relevant bucket. */ + for (j = 0; ; j++) { + if (samples[j].max == 0 || latency <= samples[j].max) { + samples[j].count++; + break; + } + } + + /* From time to time show the spectrum. */ + if (count && (ustime()-history_start)/1000 > history_interval) { + if ((outputs++ % 20) == 0) + showLatencyDistLegend(); + showLatencyDistSamples(samples,count); + history_start = ustime(); + count = 0; + } + usleep(LATENCY_SAMPLE_RATE * 1000); + } +} + /*------------------------------------------------------------------------------ * Slave mode *--------------------------------------------------------------------------- */ @@ -1923,6 +2069,7 @@ int main(int argc, char **argv) { config.monitor_mode = 0; config.pubsub_mode = 0; config.latency_mode = 0; + config.latency_dist_mode = 0; config.latency_history = 0; config.cluster_mode = 0; config.slave_mode = 0; @@ -1957,6 +2104,12 @@ int main(int argc, char **argv) { latencyMode(); } + /* Latency distribution mode */ + if (config.latency_dist_mode) { + if (cliConnect(0) == REDIS_ERR) exit(1); + latencyDistMode(); + } + /* Slave mode */ if (config.slave_mode) { if (cliConnect(0) == REDIS_ERR) exit(1); From 414df143f51bab2f984c0fb67fccc637733bc570 Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 7 Feb 2015 15:05:40 +0100 Subject: [PATCH 0068/1928] Add missing latency-dest legend symbol. --- src/redis-cli.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index a0321e738..4790b086b 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -1167,7 +1167,7 @@ void showLatencyDistSamples(struct distsamples *samples, long long tot) { /* Show the legend: different buckets values and colors meaning, so * that the spectrum is more easily readable. */ void showLatencyDistLegend(void) { - printf(". - * 0.01 0.125 0.5 milliseconds\n"); + printf(". - * # .01 .125 .25 .5 milliseconds\n"); printf("1,2,3,...,9 from 1 to 9 milliseconds\n"); printf("A,B,C,D,E 10,20,30,40,50 milliseconds\n"); printf("F,G,H,I,J .1,.2,.3,.4,.5 seconds\n"); From c83a739bd8c9e6451414bcacade98733aeaa9834 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mariano=20P=C3=A9rez=20Rodr=C3=ADguez?= Date: Sat, 7 Feb 2015 14:02:33 -0300 Subject: [PATCH 0069/1928] Fix for #2371 Fixing #2371 as per @mattsta's suggestion --- src/config.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/config.h b/src/config.h index 1ed8ef301..c455ad9f0 100644 --- a/src/config.h +++ b/src/config.h @@ -34,6 +34,11 @@ #include #endif +#ifdef __linux__ +#include +#include +#endif + /* Define redis_fstat to fstat or fstat64() */ #if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_6) #define redis_fstat fstat64 @@ -91,9 +96,6 @@ /* Define rdb_fsync_range to sync_file_range() on Linux, otherwise we use * the plain fsync() call. */ -#ifdef __linux__ -#include -#include #if defined(__GLIBC__) && defined(__GLIBC_PREREQ) #if (LINUX_VERSION_CODE >= 0x020611 && __GLIBC_PREREQ(2, 6)) #define HAVE_SYNC_FILE_RANGE 1 @@ -103,7 +105,6 @@ #define HAVE_SYNC_FILE_RANGE 1 #endif #endif -#endif #ifdef HAVE_SYNC_FILE_RANGE #define rdb_fsync_range(fd,off,size) sync_file_range(fd,off,size,SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE) From ace1acc532516bd5bd4af7faa9f173e233fa0549 Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 7 Feb 2015 18:06:10 +0100 Subject: [PATCH 0070/1928] redis-cli latency dist: add new top HL. --- src/redis-cli.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/redis-cli.c b/src/redis-cli.c index 4790b086b..90456fd50 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -1167,6 +1167,7 @@ void showLatencyDistSamples(struct distsamples *samples, long long tot) { /* Show the legend: different buckets values and colors meaning, so * that the spectrum is more easily readable. */ void showLatencyDistLegend(void) { + printf("---------------------------------------------\n"); printf(". - * # .01 .125 .25 .5 milliseconds\n"); printf("1,2,3,...,9 from 1 to 9 milliseconds\n"); printf("A,B,C,D,E 10,20,30,40,50 milliseconds\n"); From 2723412b7bd15f37b8e714dc43a91732b7b606a3 Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 7 Feb 2015 20:15:40 +0100 Subject: [PATCH 0071/1928] redis-cli --latency-dist now uses a color palette. Still not happy with the result but low grays are hard to see in certain monitors with a non perfect gamma. --- src/redis-cli.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 90456fd50..512f59fe2 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -1122,6 +1122,15 @@ static void latencyMode(void) { #define LATENCY_DIST_MAX_GRAY 255 #define LATENCY_DIST_GRAYS (LATENCY_DIST_MAX_GRAY-LATENCY_DIST_MIN_GRAY+1) +/* Gray palette. Currently not used. + * int spectrum_palette_size = 24; +* int spectrum_palette[] = {0, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255}; +*/ + +/* Color palette from https://github.com/draios/sysdig */ +int spectrum_palette[] = {0, 22, 28, 64, 34, 2, 76, 46, 118, 154, 191, 227, 226, 11, 220, 209, 208, 202, 197, 9, 1}; +int spectrum_palette_size = 21; + /* Structure to store samples distribution. */ struct distsamples { long long max; /* Max latency to fit into this interval (usec). */ @@ -1143,20 +1152,17 @@ struct distsamples { void showLatencyDistSamples(struct distsamples *samples, long long tot) { int j; - /* We convert samples into a number between 0 and DIST_GRAYS, + /* We convert samples into a index inside the palette * proportional to the percentage a given bucket represents. * This way intensity of the different parts of the spectrum * don't change relative to the number of requests, which avoids to * pollute the visualization with non-latency related info. */ printf("\033[38;5;0m"); /* Set foreground color to black. */ for (j = 0; ; j++) { - float color = (float) samples[j].count / tot * LATENCY_DIST_GRAYS; - color = ceil(color) + (LATENCY_DIST_MIN_GRAY-1); - if (color == LATENCY_DIST_MIN_GRAY-1) { - printf("\033[48;5;0m "); - } else { - printf("\033[48;5;%dm%c", (int)color, samples[j].character); - } + int coloridx = + ceil((float) samples[j].count / tot * (spectrum_palette_size-1)); + int color = spectrum_palette[coloridx]; + printf("\033[48;5;%dm%c", (int)color, samples[j].character); samples[j].count = 0; if (samples[j].max == 0) break; /* Last sample. */ } From cfe21852e79792b08afd2fe0872440edcddf577e Mon Sep 17 00:00:00 2001 From: antirez Date: Sun, 8 Feb 2015 17:39:42 +0100 Subject: [PATCH 0072/1928] redis-cli --latecy-dist reverted to gray scale. So far not able to find a color palette within the 256 colors which is not confusing. However I believe it is a possible task, so will try better later. --- src/redis-cli.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 512f59fe2..e95c5b413 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -1122,14 +1122,9 @@ static void latencyMode(void) { #define LATENCY_DIST_MAX_GRAY 255 #define LATENCY_DIST_GRAYS (LATENCY_DIST_MAX_GRAY-LATENCY_DIST_MIN_GRAY+1) -/* Gray palette. Currently not used. - * int spectrum_palette_size = 24; -* int spectrum_palette[] = {0, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255}; -*/ - -/* Color palette from https://github.com/draios/sysdig */ -int spectrum_palette[] = {0, 22, 28, 64, 34, 2, 76, 46, 118, 154, 191, 227, 226, 11, 220, 209, 208, 202, 197, 9, 1}; -int spectrum_palette_size = 21; +/* Gray palette. */ +int spectrum_palette_size = 24; +int spectrum_palette[] = {0, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255}; /* Structure to store samples distribution. */ struct distsamples { @@ -1173,12 +1168,19 @@ void showLatencyDistSamples(struct distsamples *samples, long long tot) { /* Show the legend: different buckets values and colors meaning, so * that the spectrum is more easily readable. */ void showLatencyDistLegend(void) { + int j; + printf("---------------------------------------------\n"); printf(". - * # .01 .125 .25 .5 milliseconds\n"); printf("1,2,3,...,9 from 1 to 9 milliseconds\n"); printf("A,B,C,D,E 10,20,30,40,50 milliseconds\n"); printf("F,G,H,I,J .1,.2,.3,.4,.5 seconds\n"); printf("K,L,M,N,O,P,Q,? 1,2,4,8,16,30,60,>60 seconds\n"); + printf("From 0 to 100%%: "); + for (j = 0; j < spectrum_palette_size; j++) { + printf("\033[48;5;%dm ", spectrum_palette[j]); + } + printf("\033[0m\n"); printf("---------------------------------------------\n"); } From ca23b2a6b739f179ed0dab3a3639153d8b18fd59 Mon Sep 17 00:00:00 2001 From: antirez Date: Sun, 8 Feb 2015 23:28:27 +0100 Subject: [PATCH 0073/1928] redis-cli: interactive reconnection for latency modes. --stat mode already used to reconnect automatically if the server is no longer available. This is useful since this is an interactive mode used for debugging, however the same applies to --latency and --latency-dist modes, so now both use the reconnecting command execution as well. The reconnection code was modified to use basic VT100 escape sequences in order to play better with different kinds of output on the screen when the reconnection happens, and to hide the reconnection attempt output when finally the reconnection happens. --- src/redis-cli.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index e95c5b413..4171e69c8 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -702,16 +702,17 @@ static int cliSendCommand(int argc, char **argv, int repeat) { return REDIS_OK; } -/* Send the INFO command, reconnecting the link if needed. */ -static redisReply *reconnectingInfo(void) { - redisContext *c = context; +/* Send a command reconnecting the link if needed. */ +static redisReply *reconnectingRedisCommand(redisContext *c, const char *fmt, ...) { redisReply *reply = NULL; int tries = 0; + va_list ap; assert(!c->err); while(reply == NULL) { while (c->err & (REDIS_ERR_IO | REDIS_ERR_EOF)) { - printf("Reconnecting (%d)...\r", ++tries); + printf("\r\x1b[0K"); /* Cursor to left edge + clear line. */ + printf("Reconnecting... %d\r", ++tries); fflush(stdout); redisFree(c); @@ -719,12 +720,15 @@ static redisReply *reconnectingInfo(void) { usleep(1000000); } - reply = redisCommand(c,"INFO"); + va_start(ap,fmt); + reply = redisvCommand(c,fmt,ap); + va_end(ap); + if (c->err && !(c->err & (REDIS_ERR_IO | REDIS_ERR_EOF))) { fprintf(stderr, "Error: %s\n", c->errstr); exit(1); } else if (tries > 0) { - printf("\n"); + printf("\r\x1b[0K"); /* Cursor to left edge + clear line. */ } } @@ -1083,7 +1087,7 @@ static void latencyMode(void) { if (!context) exit(1); while(1) { start = mstime(); - reply = redisCommand(context,"PING"); + reply = reconnectingRedisCommand(context,"PING"); if (reply == NULL) { fprintf(stderr,"\nI/O error\n"); exit(1); @@ -1233,7 +1237,7 @@ static void latencyDistMode(void) { if (!context) exit(1); while(1) { start = ustime(); - reply = redisCommand(context,"PING"); + reply = reconnectingRedisCommand(context,"PING"); if (reply == NULL) { fprintf(stderr,"\nI/O error\n"); exit(1); @@ -1881,7 +1885,7 @@ static void statMode(void) { char buf[64]; int j; - reply = reconnectingInfo(); + reply = reconnectingRedisCommand(context,"INFO"); if (reply->type == REDIS_REPLY_ERROR) { printf("ERROR: %s\n", reply->str); exit(1); From bd128f7969d0b28dbefe56bfdfa0b5b54102dc97 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 9 Feb 2015 11:06:44 +0100 Subject: [PATCH 0074/1928] redis-cli --lru-test implemented (cache workload simulator). --- src/redis-cli.c | 102 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) diff --git a/src/redis-cli.c b/src/redis-cli.c index 4171e69c8..d2460a10b 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -79,6 +79,8 @@ static struct config { int latency_mode; int latency_dist_mode; int latency_history; + int lru_test_mode; + long long lru_test_sample_size; int cluster_mode; int cluster_reissue_command; int slave_mode; @@ -781,6 +783,9 @@ static int parseOptions(int argc, char **argv) { } else if (!strcmp(argv[i],"--latency-history")) { config.latency_mode = 1; config.latency_history = 1; + } else if (!strcmp(argv[i],"--lru-test") && !lastarg) { + config.lru_test_mode = 1; + config.lru_test_sample_size = strtoll(argv[++i],NULL,10); } else if (!strcmp(argv[i],"--slave")) { config.slave_mode = 1; } else if (!strcmp(argv[i],"--stat")) { @@ -872,6 +877,7 @@ static void usage(void) { " Default time interval is 15 sec. Change it using -i.\n" " --latency-dist Shows latency as a spectrum, requires xterm 256 colors.\n" " Default time interval is 1 sec. Change it using -i.\n" +" --lru-test Simulate a cache workload with an 80-20 distribution.\n" " --slave Simulate a slave showing commands received from the master.\n" " --rdb Transfer an RDB dump from remote server to local file.\n" " --pipe Transfer raw Redis protocol from stdin to server.\n" @@ -1991,6 +1997,94 @@ static void scanMode(void) { exit(0); } +/*------------------------------------------------------------------------------ + * LRU test mode + *--------------------------------------------------------------------------- */ + +/* Return an integer from min to max (both inclusive) using a power-law + * distribution, depending on the value of alpha: the greater the alpha + * the more bias towards lower values. + * + * With alpha = 6.2 the output follows the 80-20 rule where 20% of + * the returned numbers will account for 80% of the frequency. */ +long long powerLawRand(long long min, long long max, double alpha) { + double pl, r; + + max += 1; + r = ((double)rand()) / RAND_MAX; + pl = pow( + ((pow(max,alpha+1) - pow(min,alpha+1))*r + pow(min,alpha+1)), + (1.0/(alpha+1))); + return (max-1-(long long)pl)+min; +} + +/* Generates a key name among a set of lru_test_sample_size keys, using + * an 80-20 distribution. */ +void LRUTestGenKey(char *buf, size_t buflen) { + snprintf(buf, buflen, "lru:%lld\n", + powerLawRand(1, config.lru_test_sample_size, 6.2)); +} + +#define LRU_CYCLE_PERIOD 1000 /* 1000 milliseconds. */ +#define LRU_CYCLE_PIPELINE_SIZE 250 +static void LRUTestMode(void) { + redisReply *reply; + char key[128]; + long long start_cycle; + int j; + + srand(time(NULL)^getpid()); + while(1) { + /* Perform cycles of 1 second with 50% writes and 50% reads. + * We use pipelining batching writes / reads N times per cycle in order + * to fill the target instance easily. */ + start_cycle = mstime(); + long long hits = 0, misses = 0; + while(mstime() - start_cycle < 1000) { + /* Write cycle. */ + for (j = 0; j < LRU_CYCLE_PIPELINE_SIZE; j++) { + LRUTestGenKey(key,sizeof(key)); + redisAppendCommand(context, "SET %s val",key); + } + for (j = 0; j < LRU_CYCLE_PIPELINE_SIZE; j++) + redisGetReply(context, (void**)&reply); + + /* Read cycle. */ + for (j = 0; j < LRU_CYCLE_PIPELINE_SIZE; j++) { + LRUTestGenKey(key,sizeof(key)); + redisAppendCommand(context, "GET %s",key); + } + for (j = 0; j < LRU_CYCLE_PIPELINE_SIZE; j++) { + if (redisGetReply(context, (void**)&reply) == REDIS_OK) { + switch(reply->type) { + case REDIS_REPLY_ERROR: + printf("%s\n", reply->str); + break; + case REDIS_REPLY_NIL: + misses++; + break; + default: + hits++; + break; + } + } + } + + if (context->err) { + fprintf(stderr,"I/O error during LRU test\n"); + exit(1); + } + } + /* Print stats. */ + printf( + "%lld Gets/sec | Hits: %lld (%.2f%%) | Misses: %lld (%.2f%%)\n", + hits+misses, + hits, (double)hits/(hits+misses)*100, + misses, (double)misses/(hits+misses)*100); + } + exit(0); +} + /*------------------------------------------------------------------------------ * Intrisic latency mode. * @@ -2084,6 +2178,8 @@ int main(int argc, char **argv) { config.latency_mode = 0; config.latency_dist_mode = 0; config.latency_history = 0; + config.lru_test_mode = 0; + config.lru_test_sample_size = 0; config.cluster_mode = 0; config.slave_mode = 0; config.getrdb_mode = 0; @@ -2160,6 +2256,12 @@ int main(int argc, char **argv) { scanMode(); } + /* LRU test mode */ + if (config.lru_test_mode) { + if (cliConnect(0) == REDIS_ERR) exit(1); + LRUTestMode(); + } + /* Intrinsic latency mode */ if (config.intrinsic_latency_mode) intrinsicLatencyMode(); From 9e9abe29fee509095dcd8af3648019bbe980279b Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 10 Feb 2015 09:29:14 +0100 Subject: [PATCH 0075/1928] HAVE_SYNC_FILE_RANGE should be protected by ifdef __linux__. Related to issue #2372. --- src/config.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/config.h b/src/config.h index c455ad9f0..9fd53626e 100644 --- a/src/config.h +++ b/src/config.h @@ -96,6 +96,7 @@ /* Define rdb_fsync_range to sync_file_range() on Linux, otherwise we use * the plain fsync() call. */ +#ifdef __linux__ #if defined(__GLIBC__) && defined(__GLIBC_PREREQ) #if (LINUX_VERSION_CODE >= 0x020611 && __GLIBC_PREREQ(2, 6)) #define HAVE_SYNC_FILE_RANGE 1 @@ -105,6 +106,7 @@ #define HAVE_SYNC_FILE_RANGE 1 #endif #endif +#endif #ifdef HAVE_SYNC_FILE_RANGE #define rdb_fsync_range(fd,off,size) sync_file_range(fd,off,size,SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE) From e1fce55237ee1092384017a157882d548c6a517f Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 10 Feb 2015 14:40:27 +0100 Subject: [PATCH 0076/1928] Added regression test for issue #2371. --- tests/integration/logging.tcl | 24 ++++++++++++++++++++++++ tests/test_helper.tcl | 1 + 2 files changed, 25 insertions(+) create mode 100644 tests/integration/logging.tcl diff --git a/tests/integration/logging.tcl b/tests/integration/logging.tcl new file mode 100644 index 000000000..c1f4854d4 --- /dev/null +++ b/tests/integration/logging.tcl @@ -0,0 +1,24 @@ +set server_path [tmpdir server.log] +set system_name [string tolower [exec uname -s]] + +if {$system_name eq {linux} || $system_name eq {darwin}} { + start_server [list overrides [list dir $server_path]] { + test "Server is able to generate a stack trace on selected systems" { + r config set watchdog-period 200 + r debug sleep 1 + set pattern "*debugCommand*" + set retry 10 + while {$retry} { + set result [exec tail -100 < [srv 0 stdout]] + if {[string match $pattern $result]} { + break + } + incr retry -1 + after 1000 + } + if {$retry == 0} { + error "assertion:expected stack trace not found into log file" + } + } + } +} diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index 212c95b4f..7e9e2cfaa 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -38,6 +38,7 @@ set ::all_tests { integration/aof integration/rdb integration/convert-zipmap-hash-on-load + integration/logging unit/pubsub unit/slowlog unit/scripting From 5e3dcc522b13d5441d6cdf4ee6ff48bd25df13cb Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 10 Feb 2015 14:47:45 +0100 Subject: [PATCH 0077/1928] Faster memory efficiency test. This test on Linux was extremely slow, since in Tcl we can't enable easily tcp-nodelay, so the busy loop used to take *a lot* with bigger writes. Fixed using pipelining. --- tests/unit/memefficiency.tcl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/unit/memefficiency.tcl b/tests/unit/memefficiency.tcl index 14e135ced..7ca9a705b 100644 --- a/tests/unit/memefficiency.tcl +++ b/tests/unit/memefficiency.tcl @@ -1,15 +1,20 @@ proc test_memory_efficiency {range} { r flushall + set rd [redis_deferring_client] set base_mem [s used_memory] set written 0 for {set j 0} {$j < 10000} {incr j} { set key key:$j set val [string repeat A [expr {int(rand()*$range)}]] - r set $key $val + $rd set $key $val incr written [string length $key] incr written [string length $val] incr written 2 ;# A separator is the minimum to store key-value data. } + for {set j 0} {$j < 10000} {incr j} { + $rd read ; # Discard replies + } + set current_mem [s used_memory] set used [expr {$current_mem-$base_mem}] set efficiency [expr {double($written)/$used}] From 170e41464dfff851f939fd73c1c4ec8a02507859 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 4 Feb 2015 22:12:46 +0100 Subject: [PATCH 0078/1928] Less blocking dictGetRandomKeys(). Related to issue #2306. --- src/dict.c | 54 +++++++++++++++++++++++++++++++----------------------- 1 file changed, 31 insertions(+), 23 deletions(-) diff --git a/src/dict.c b/src/dict.c index 7d8db3631..a1475772c 100644 --- a/src/dict.c +++ b/src/dict.c @@ -666,34 +666,42 @@ dictEntry *dictGetRandomKey(dict *d) * at producing N elements, and the elements are guaranteed to be non * repeating. */ unsigned int dictGetRandomKeys(dict *d, dictEntry **des, unsigned int count) { - int j; /* internal hash table id, 0 or 1. */ - unsigned int stored = 0; + unsigned int j; /* internal hash table id, 0 or 1. */ + unsigned int tables; /* 1 or 2 tables? */ + unsigned int stored = 0, maxsizemask; if (dictSize(d) < count) count = dictSize(d); - while(stored < count) { - for (j = 0; j < 2; j++) { - /* Pick a random point inside the hash table 0 or 1. */ - unsigned int i = random() & d->ht[j].sizemask; - int size = d->ht[j].size; - /* Make sure to visit every bucket by iterating 'size' times. */ - while(size--) { - dictEntry *he = d->ht[j].table[i]; - while (he) { - /* Collect all the elements of the buckets found non - * empty while iterating. */ - *des = he; - des++; - he = he->next; - stored++; - if (stored == count) return stored; - } - i = (i+1) & d->ht[j].sizemask; + /* Try to do a rehashing work proportional to 'count'. */ + for (j = 0; j < count; j++) { + if (dictIsRehashing(d)) + _dictRehashStep(d); + else + break; + } + + tables = dictIsRehashing(d) ? 2 : 1; + maxsizemask = d->ht[0].sizemask; + if (tables > 1 && maxsizemask < d->ht[1].sizemask) + maxsizemask = d->ht[1].sizemask; + + /* Pick a random point inside the larger table. */ + unsigned int i = random() & maxsizemask; + while(stored < count) { + for (j = 0; j < tables; j++) { + if (i >= d->ht[j].size) continue; /* Out of range for this table. */ + dictEntry *he = d->ht[j].table[i]; + while (he) { + /* Collect all the elements of the buckets found non + * empty while iterating. */ + *des = he; + des++; + he = he->next; + stored++; + if (stored == count) return stored; } - /* If there is only one table and we iterated it all, we should - * already have 'count' elements. Assert this condition. */ - assert(dictIsRehashing(d) != 0); } + i = (i+1) & maxsizemask; } return stored; /* Never reached. */ } From 777020839a1ba68ea5bd77a5b17648f4b831caf7 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 5 Feb 2015 10:42:09 +0100 Subject: [PATCH 0079/1928] dict.c: prevent useless resize to same size. Related to issue #2306. --- src/dict.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/dict.c b/src/dict.c index a1475772c..26707dd58 100644 --- a/src/dict.c +++ b/src/dict.c @@ -211,6 +211,9 @@ int dictExpand(dict *d, unsigned long size) if (dictIsRehashing(d) || d->ht[0].used > size) return DICT_ERR; + /* Rehashing to the same table size is not useful. */ + if (realsize == d->ht[0].size) return DICT_ERR; + /* Allocate the new hash table and initialize all pointers to NULL */ n.size = realsize; n.sizemask = realsize-1; From cd0fcf11e7704c72b68a19f92e093bc5976fe7fc Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 5 Feb 2015 10:51:05 +0100 Subject: [PATCH 0080/1928] dict.c: put a bound to max work dictRehash() call can do. Related to issue #2306. --- src/dict.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/dict.c b/src/dict.c index 26707dd58..ad74d222a 100644 --- a/src/dict.c +++ b/src/dict.c @@ -235,9 +235,15 @@ int dictExpand(dict *d, unsigned long size) /* Performs N steps of incremental rehashing. Returns 1 if there are still * keys to move from the old to the new hash table, otherwise 0 is returned. + * * Note that a rehashing step consists in moving a bucket (that may have more - * than one key as we use chaining) from the old to the new hash table. */ + * than one key as we use chaining) from the old to the new hash table, however + * since part of the hash table may be composed of empty spaces, it is not + * guaranteed that this function will rehash even a single bucket, since it + * will visit at max N*10 empty buckets in total, otherwise the amount of + * work it does would be unbound and the function may block for a long time. */ int dictRehash(dict *d, int n) { + int empty_visits = n*10; /* Max number of empty buckets to visit. */ if (!dictIsRehashing(d)) return 0; while(n--) { @@ -255,7 +261,10 @@ int dictRehash(dict *d, int n) { /* Note that rehashidx can't overflow as we are sure there are more * elements because ht[0].used != 0 */ assert(d->ht[0].size > (unsigned long)d->rehashidx); - while(d->ht[0].table[d->rehashidx] == NULL) d->rehashidx++; + while(d->ht[0].table[d->rehashidx] == NULL) { + d->rehashidx++; + if (--empty_visits == 0) return 1; + } de = d->ht[0].table[d->rehashidx]; /* Move all the keys in this bucket from the old to the new hash HT */ while(de) { From 88cd9ebc0964c9daf32d97631b5eeba5fd0e8b09 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 5 Feb 2015 10:58:28 +0100 Subject: [PATCH 0081/1928] dict.c: dictGetRandomKeys() visit pattern optimization. We use the invariant that the original table ht[0] is never populated up to the index before the current rehashing index. Related to issue #2306. --- src/dict.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/dict.c b/src/dict.c index ad74d222a..fbcdd35f8 100644 --- a/src/dict.c +++ b/src/dict.c @@ -701,6 +701,10 @@ unsigned int dictGetRandomKeys(dict *d, dictEntry **des, unsigned int count) { unsigned int i = random() & maxsizemask; while(stored < count) { for (j = 0; j < tables; j++) { + /* Invariant of the dict.c rehashing: up to the indexes already + * visited in ht[0] during the rehashing, there are no populated + * buckets, so we can skip ht[0] for indexes between 0 and idx-1. */ + if (j == 0 && i < d->rehashidx) continue; if (i >= d->ht[j].size) continue; /* Out of range for this table. */ dictEntry *he = d->ht[j].table[i]; while (he) { From 1bcf67a75f775b342653505465b5e8ee03974809 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 5 Feb 2015 11:28:45 +0100 Subject: [PATCH 0082/1928] dict.c: dictGetRandomKeys() optimization for big->small table case. Related to issue #2306. --- src/dict.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/dict.c b/src/dict.c index fbcdd35f8..e197266e8 100644 --- a/src/dict.c +++ b/src/dict.c @@ -704,7 +704,14 @@ unsigned int dictGetRandomKeys(dict *d, dictEntry **des, unsigned int count) { /* Invariant of the dict.c rehashing: up to the indexes already * visited in ht[0] during the rehashing, there are no populated * buckets, so we can skip ht[0] for indexes between 0 and idx-1. */ - if (j == 0 && i < d->rehashidx) continue; + if (tables == 2 && j == 0 && i < d->rehashidx) { + /* Moreover, if we are currently out of range in the second + * table, there will be no elements in both tables up to + * the current rehashing index, so we jump if possible. + * (this happens when going from big to small table). */ + if (i >= d->ht[1].size) i = d->rehashidx; + continue; + } if (i >= d->ht[j].size) continue; /* Out of range for this table. */ dictEntry *he = d->ht[j].table[i]; while (he) { From 4f427bc298b0cfc45bd25381cd04df86868c0843 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 5 Feb 2015 12:15:58 +0100 Subject: [PATCH 0083/1928] dict.c: don't try buckets that are empty for sure in dictGetRandomKey(). This is very similar to the optimization applied to dictGetRandomKeys, but applied to the single key variant. Related to issue #2306. --- src/dict.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/dict.c b/src/dict.c index e197266e8..9ba0d7d7e 100644 --- a/src/dict.c +++ b/src/dict.c @@ -631,7 +631,11 @@ dictEntry *dictGetRandomKey(dict *d) if (dictIsRehashing(d)) _dictRehashStep(d); if (dictIsRehashing(d)) { do { - h = random() % (d->ht[0].size+d->ht[1].size); + /* We are sure there are no elements in indexes from 0 + * to rehashidx-1 */ + h = d->rehashidx + (random() % (d->ht[0].size + + d->ht[1].size - + d->rehashidx)); he = (h >= d->ht[0].size) ? d->ht[1].table[h - d->ht[0].size] : d->ht[0].table[h]; } while(he == NULL); From 2385630d0d245482027e9e36c76d6f8675dd2f6b Mon Sep 17 00:00:00 2001 From: Sun He Date: Fri, 6 Feb 2015 11:18:58 +0800 Subject: [PATCH 0084/1928] dict.c/dictRehash: check again to update --- src/dict.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/dict.c b/src/dict.c index 9ba0d7d7e..3b23fbcd0 100644 --- a/src/dict.c +++ b/src/dict.c @@ -282,6 +282,13 @@ int dictRehash(dict *d, int n) { d->ht[0].table[d->rehashidx] = NULL; d->rehashidx++; } + /* Check again if we already rehashed the whole table... */ + if (d->ht[0].used == 0) { + zfree(d->ht[0].table); + d->ht[0] = d->ht[1]; + _dictReset(&d->ht[1]); + d->rehashidx = -1; + } return 1; } From f25fdd6246f01b6ee3c0ce557e2911bc8c068518 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 6 Feb 2015 10:48:13 +0100 Subject: [PATCH 0085/1928] dict.c: avoid code repetition in dictRehash(). Avoid code repetition introduced with PR #2367, also fixes the return value to always return 0 if there is nothing more to rehash. --- src/dict.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/src/dict.c b/src/dict.c index 3b23fbcd0..c8aaf1529 100644 --- a/src/dict.c +++ b/src/dict.c @@ -246,18 +246,9 @@ int dictRehash(dict *d, int n) { int empty_visits = n*10; /* Max number of empty buckets to visit. */ if (!dictIsRehashing(d)) return 0; - while(n--) { + while(n-- && d->ht[0].used != 0) { dictEntry *de, *nextde; - /* Check if we already rehashed the whole table... */ - if (d->ht[0].used == 0) { - zfree(d->ht[0].table); - d->ht[0] = d->ht[1]; - _dictReset(&d->ht[1]); - d->rehashidx = -1; - return 0; - } - /* Note that rehashidx can't overflow as we are sure there are more * elements because ht[0].used != 0 */ assert(d->ht[0].size > (unsigned long)d->rehashidx); @@ -282,13 +273,17 @@ int dictRehash(dict *d, int n) { d->ht[0].table[d->rehashidx] = NULL; d->rehashidx++; } - /* Check again if we already rehashed the whole table... */ + + /* Check if we already rehashed the whole table... */ if (d->ht[0].used == 0) { zfree(d->ht[0].table); d->ht[0] = d->ht[1]; _dictReset(&d->ht[1]); d->rehashidx = -1; + return 0; } + + /* More to rehash... */ return 1; } From 5792a217f85633225209557d952ad2fa6a3fa0c0 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 6 Feb 2015 15:48:42 +0100 Subject: [PATCH 0086/1928] dict.c: add dictGetSomeKeys(), specialized for eviction. --- src/dict.c | 94 ++++++++++++++++++++++++++++++++++++++++++++++++++++- src/dict.h | 1 + src/redis.c | 2 +- 3 files changed, 95 insertions(+), 2 deletions(-) diff --git a/src/dict.c b/src/dict.c index c8aaf1529..8f25c14bc 100644 --- a/src/dict.c +++ b/src/dict.c @@ -664,7 +664,10 @@ dictEntry *dictGetRandomKey(dict *d) return he; } -/* This is a version of dictGetRandomKey() that is modified in order to +/* XXX: This is going to be removed soon and SPOP internals + * reimplemented. + * + * This is a version of dictGetRandomKey() that is modified in order to * return multiple entries by jumping at a random place of the hash table * and scanning linearly for entries. * @@ -735,6 +738,95 @@ unsigned int dictGetRandomKeys(dict *d, dictEntry **des, unsigned int count) { return stored; /* Never reached. */ } + +/* This function samples the dictionary to return a few keys from random + * locations. + * + * It does not guarantee to return all the keys specified in 'count', nor + * it does guarantee to return non-duplicated elements, however it will make + * some effort to do both things. + * + * Returned pointers to hash table entries are stored into 'des' that + * points to an array of dictEntry pointers. The array must have room for + * at least 'count' elements, that is the argument we pass to the function + * to tell how many random elements we need. + * + * The function returns the number of items stored into 'des', that may + * be less than 'count' if the hash table has less than 'count' elements + * inside, or if not enough elements were found in a reasonable amount of + * steps. + * + * Note that this function is not suitable when you need a good distribution + * of the returned items, but only when you need to "sample" a given number + * of continuous elements to run some kind of algorithm or to produce + * statistics. However the function is much faster than dictGetRandomKey() + * at producing N elements. */ +unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count) { + unsigned int j; /* internal hash table id, 0 or 1. */ + unsigned int tables; /* 1 or 2 tables? */ + unsigned int stored = 0, maxsizemask; + unsigned int maxsteps; + + if (dictSize(d) < count) count = dictSize(d); + maxsteps = count*10; + + /* Try to do a rehashing work proportional to 'count'. */ + for (j = 0; j < count; j++) { + if (dictIsRehashing(d)) + _dictRehashStep(d); + else + break; + } + + tables = dictIsRehashing(d) ? 2 : 1; + maxsizemask = d->ht[0].sizemask; + if (tables > 1 && maxsizemask < d->ht[1].sizemask) + maxsizemask = d->ht[1].sizemask; + + /* Pick a random point inside the larger table. */ + unsigned int i = random() & maxsizemask; + unsigned int emptylen = 0; /* Continuous empty entries so far. */ + while(stored < count && maxsteps--) { + for (j = 0; j < tables; j++) { + /* Invariant of the dict.c rehashing: up to the indexes already + * visited in ht[0] during the rehashing, there are no populated + * buckets, so we can skip ht[0] for indexes between 0 and idx-1. */ + if (tables == 2 && j == 0 && i < d->rehashidx) { + /* Moreover, if we are currently out of range in the second + * table, there will be no elements in both tables up to + * the current rehashing index, so we jump if possible. + * (this happens when going from big to small table). */ + if (i >= d->ht[1].size) i = d->rehashidx; + continue; + } + if (i >= d->ht[j].size) continue; /* Out of range for this table. */ + dictEntry *he = d->ht[j].table[i]; + + /* Count contiguous empty buckets, and jump to other + * locations if they reach 'count' (with a minimum of 5). */ + if (he == NULL) { + emptylen++; + if (emptylen >= 5 && emptylen > count) { + i = random() & maxsizemask; + emptylen = 0; + } + } else { + while (he) { + /* Collect all the elements of the buckets found non + * empty while iterating. */ + *des = he; + des++; + he = he->next; + stored++; + if (stored == count) return stored; + } + } + } + i = (i+1) & maxsizemask; + } + return stored; +} + /* Function to reverse bits. Algorithm from: * http://graphics.stanford.edu/~seander/bithacks.html#ReverseParallel */ static unsigned long rev(unsigned long v) { diff --git a/src/dict.h b/src/dict.h index 7421078f8..5959a57b9 100644 --- a/src/dict.h +++ b/src/dict.h @@ -164,6 +164,7 @@ dictIterator *dictGetSafeIterator(dict *d); dictEntry *dictNext(dictIterator *iter); void dictReleaseIterator(dictIterator *iter); dictEntry *dictGetRandomKey(dict *d); +unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count); unsigned int dictGetRandomKeys(dict *d, dictEntry **des, unsigned int count); void dictPrintStats(dict *d); unsigned int dictGenHashFunction(const void *key, int len); diff --git a/src/redis.c b/src/redis.c index b2f9ffc68..a1d8375e6 100644 --- a/src/redis.c +++ b/src/redis.c @@ -3148,7 +3148,7 @@ void evictionPoolPopulate(dict *sampledict, dict *keydict, struct evictionPoolEn } #if 1 /* Use bulk get by default. */ - count = dictGetRandomKeys(sampledict,samples,server.maxmemory_samples); + count = dictGetSomeKeys(sampledict,samples,server.maxmemory_samples); #else count = server.maxmemory_samples; for (j = 0; j < count; j++) samples[j] = dictGetRandomKey(sampledict); From 8696874d75977e2916c427db4002d053fbb3ff1f Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 6 Feb 2015 16:17:11 +0100 Subject: [PATCH 0087/1928] Remove optional single-key path from evictionPoolPopulate(). --- src/redis.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/redis.c b/src/redis.c index a1d8375e6..a296871ac 100644 --- a/src/redis.c +++ b/src/redis.c @@ -3147,13 +3147,7 @@ void evictionPoolPopulate(dict *sampledict, dict *keydict, struct evictionPoolEn samples = zmalloc(sizeof(samples[0])*server.maxmemory_samples); } -#if 1 /* Use bulk get by default. */ count = dictGetSomeKeys(sampledict,samples,server.maxmemory_samples); -#else - count = server.maxmemory_samples; - for (j = 0; j < count; j++) samples[j] = dictGetRandomKey(sampledict); -#endif - for (j = 0; j < count; j++) { unsigned long long idle; sds key; From 05841a638697c0bbbc90bf2dc2da90659b71c26a Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 7 Feb 2015 09:54:07 +0100 Subject: [PATCH 0088/1928] redis-cli --stat: show LOAD when loading. --- src/redis-cli.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/redis-cli.c b/src/redis-cli.c index d2460a10b..8db34bcaf 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -1945,6 +1945,7 @@ static void statMode(void) { /* Children */ aux = getLongInfoField(reply->str,"bgsave_in_progress"); aux |= getLongInfoField(reply->str,"aof_rewrite_in_progress") << 1; + aux |= getLongInfoField(reply->str,"loading") << 2; switch(aux) { case 0: break; case 1: @@ -1956,6 +1957,9 @@ static void statMode(void) { case 3: printf("SAVE+AOF"); break; + case 4: + printf("LOAD"); + break; } printf("\n"); From edda00b902910c73b5d2dcb56e43a66b6d541193 Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 7 Feb 2015 10:11:43 +0100 Subject: [PATCH 0089/1928] dict.c Rehashing visualization code snippet added to utils. --- utils/hashtable/README | 13 ++++ utils/hashtable/rehashing.c | 132 ++++++++++++++++++++++++++++++++++++ 2 files changed, 145 insertions(+) create mode 100644 utils/hashtable/README create mode 100644 utils/hashtable/rehashing.c diff --git a/utils/hashtable/README b/utils/hashtable/README new file mode 100644 index 000000000..e2862f012 --- /dev/null +++ b/utils/hashtable/README @@ -0,0 +1,13 @@ +Hash table implementation related utilities. + +rehashing.c +--- + +Visually show buckets in the two hash tables between rehashings. Also stress +test getRandomKeys() implementation, that may actually disappear from +Redis soon, however visualizaiton some code is reusable in new bugs +investigation. + +Compile with: + + cc -I ../../src/ rehashing.c ../../src/zmalloc.c ../../src/dict.c -o rehashing_test diff --git a/utils/hashtable/rehashing.c b/utils/hashtable/rehashing.c new file mode 100644 index 000000000..df1f52bb1 --- /dev/null +++ b/utils/hashtable/rehashing.c @@ -0,0 +1,132 @@ +#include "redis.h" +#include "dict.h" + +void _redisAssert(char *x, char *y, int l) { + printf("ASSERT: %s %s %d\n",x,y,l); + exit(1); +} + +unsigned int dictKeyHash(const void *keyp) { + unsigned long key = (unsigned long)keyp; + key = dictGenHashFunction(&key,sizeof(key)); + key += ~(key << 15); + key ^= (key >> 10); + key += (key << 3); + key ^= (key >> 6); + key += ~(key << 11); + key ^= (key >> 16); + return key; +} + +int dictKeyCompare(void *privdata, const void *key1, const void *key2) { + unsigned long k1 = (unsigned long)key1; + unsigned long k2 = (unsigned long)key2; + return k1 == k2; +} + +dictType dictTypeTest = { + dictKeyHash, /* hash function */ + NULL, /* key dup */ + NULL, /* val dup */ + dictKeyCompare, /* key compare */ + NULL, /* key destructor */ + NULL /* val destructor */ +}; + +void showBuckets(dictht ht) { + if (ht.table == NULL) { + printf("NULL\n"); + } else { + int j; + for (j = 0; j < ht.size; j++) { + printf("%c", ht.table[j] ? '1' : '0'); + } + printf("\n"); + } +} + +void show(dict *d) { + int j; + if (d->rehashidx != -1) { + printf("rhidx: "); + for (j = 0; j < d->rehashidx; j++) + printf("."); + printf("|\n"); + } + printf("ht[0]: "); + showBuckets(d->ht[0]); + printf("ht[1]: "); + showBuckets(d->ht[1]); + printf("\n"); +} + +int sortPointers(const void *a, const void *b) { + unsigned long la, lb; + + la = (long) (*((dictEntry**)a)); + lb = (long) (*((dictEntry**)b)); + return la-lb; +} + +void stressGetKeys(dict *d, int times) { + int j; + dictEntry **des = zmalloc(sizeof(dictEntry*)*dictSize(d)); + for (j = 0; j < times; j++) { + int requested = rand() % (dictSize(d)+1); + int returned = dictGetRandomKeys(d, des, requested); + if (requested != returned) { + printf("*** ERROR! Req: %d, Ret: %d\n", requested, returned); + exit(1); + } + qsort(des,returned,sizeof(dictEntry*),sortPointers); + if (returned > 1) { + int i; + for (i = 0; i < returned-1; i++) { + if (des[i] == des[i+1]) { + printf("*** ERROR! Duplicated element detected\n"); + exit(1); + } + } + } + } + zfree(des); +} + +#define MAX1 120 +#define MAX2 1000 +int main(void) { + dict *d = dictCreate(&dictTypeTest,NULL); + unsigned long i; + srand(time(NULL)); + + for (i = 0; i < MAX1; i++) { + dictAdd(d,(void*)i,NULL); + show(d); + } + printf("Size: %d\n", (int)dictSize(d)); + + for (i = 0; i < MAX1; i++) { + dictDelete(d,(void*)i); + dictResize(d); + show(d); + } + dictRelease(d); + + d = dictCreate(&dictTypeTest,NULL); + printf("Getkeys stress test\n"); + + for (i = 0; i < MAX2; i++) { + dictAdd(d,(void*)i,NULL); + stressGetKeys(d,100); + } + + for (i = 0; i < MAX2; i++) { + dictDelete(d,(void*)i); + dictResize(d); + stressGetKeys(d,100); + } + dictRelease(d); + + printf("TEST PASSED!\n"); + return 0; +} From 8ddc14523fe5773d99a2dffba748d349f4419994 Mon Sep 17 00:00:00 2001 From: antirez Date: Sun, 8 Feb 2015 11:19:47 +0100 Subject: [PATCH 0090/1928] dict.c: reset emptylen when bucket is not empty. Fixed by @oranagra, thank you. --- src/dict.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/dict.c b/src/dict.c index 8f25c14bc..716d03fb0 100644 --- a/src/dict.c +++ b/src/dict.c @@ -811,6 +811,7 @@ unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count) { emptylen = 0; } } else { + emptylen = 0; while (he) { /* Collect all the elements of the buckets found non * empty while iterating. */ From 585d1a60bf5b451b83e066a454d87e02ead08666 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 9 Feb 2015 15:17:20 +0100 Subject: [PATCH 0091/1928] Separate latency monitoring of eviction loop and eviction DELs. --- src/latency.c | 12 +++++++++++- src/latency.h | 4 ++++ src/redis.c | 6 +++++- 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/src/latency.c b/src/latency.c index cb116fb90..fd76b3215 100644 --- a/src/latency.c +++ b/src/latency.c @@ -228,6 +228,7 @@ sds createLatencyReport(void) { int advise_write_load_info = 0; /* Print info about AOF and write load. */ int advise_hz = 0; /* Use higher HZ. */ int advise_large_objects = 0; /* Deletion of large objects. */ + int advise_mass_eviction = 0; /* Avoid mass eviction of keys. */ int advise_relax_fsync_policy = 0; /* appendfsync always is slow. */ int advise_disable_thp = 0; /* AnonHugePages detected. */ int advices = 0; @@ -364,11 +365,16 @@ sds createLatencyReport(void) { } /* Eviction cycle. */ - if (!strcasecmp(event,"eviction-cycle")) { + if (!strcasecmp(event,"eviction-del")) { advise_large_objects = 1; advices++; } + if (!strcasecmp(event,"eviction-cycle")) { + advise_mass_eviction = 1; + advices++; + } + report = sdscatlen(report,"\n",1); } dictReleaseIterator(di); @@ -452,6 +458,10 @@ sds createLatencyReport(void) { report = sdscat(report,"- Deleting, expiring or evicting (because of maxmemory policy) large objects is a blocking operation. If you have very large objects that are often deleted, expired, or evicted, try to fragment those objects into multiple smaller objects.\n"); } + if (advise_mass_eviction) { + report = sdscat(report,"- Sudden changes to the 'maxmemory' setting via 'CONFIG SET', or allocation of large objects via sets or sorted sets intersections, STORE option of SORT, Redis Cluster large keys migrations (RESTORE command), may create sudden memory pressure forcing the server to block trying to evict keys. \n"); + } + if (advise_disable_thp) { report = sdscat(report,"- I detected a non zero amount of anonymous huge pages used by your process. This creates very serious latency events in different conditions, especially when Redis is persisting on disk. To disable THP support use the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled', make sure to also add it into /etc/rc.local so that the command will be executed again after a reboot. Note that even if you have already disabled THP, you still need to restart the Redis process to get rid of the huge pages already created.\n"); } diff --git a/src/latency.h b/src/latency.h index 240f54b45..0fe26e0e4 100644 --- a/src/latency.h +++ b/src/latency.h @@ -86,4 +86,8 @@ int THPIsEnabled(void); (var) >= server.latency_monitor_threshold) \ latencyAddSample((event),(var)); +/* Remove time from a nested event. */ +#define latencyRemoveNestedEvent(event_var,nested_var) \ + event_var += nested_var; + #endif /* __LATENCY_H */ diff --git a/src/redis.c b/src/redis.c index a296871ac..6f5c40209 100644 --- a/src/redis.c +++ b/src/redis.c @@ -3202,7 +3202,7 @@ void evictionPoolPopulate(dict *sampledict, dict *keydict, struct evictionPoolEn int freeMemoryIfNeeded(void) { size_t mem_used, mem_tofree, mem_freed; int slaves = listLength(server.slaves); - mstime_t latency; + mstime_t latency, eviction_latency; /* Remove the size of slaves output buffers and AOF buffer from the * count of used memory. */ @@ -3333,7 +3333,11 @@ int freeMemoryIfNeeded(void) { * AOF and Output buffer memory will be freed eventually so * we only care about memory used by the key space. */ delta = (long long) zmalloc_used_memory(); + latencyStartMonitor(eviction_latency); dbDelete(db,keyobj); + latencyEndMonitor(eviction_latency); + latencyAddSampleIfNeeded("eviction-del",eviction_latency); + latencyRemoveNestedEvent(latency,eviction_latency); delta -= (long long) zmalloc_used_memory(); mem_freed += delta; server.stat_evictedkeys++; From 6b5922dcbbfe5428accc093892ff330211acf5d7 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 9 Feb 2015 22:49:27 +0100 Subject: [PATCH 0092/1928] SPOP with count: initial fixes to the implementation. Severan problems are addressed but still a few missing. Since replication of this command was more complex than others since it needs to replicate multiple SREM commands, an old API able to do this was reused (it was taken inside the implementation since it was pretty obvious soon or later that would be useful). The API was improved a bit so that now a command may opt-out for the standard command replication when the server.dirty counter is incremented, in order to "manually" replicate what it wants. --- src/redis.c | 23 ++++++++++++++---- src/redis.h | 4 +++- src/t_set.c | 69 +++++++++++++++++++++++++---------------------------- 3 files changed, 54 insertions(+), 42 deletions(-) diff --git a/src/redis.c b/src/redis.c index 6f5c40209..277d2e4b0 100644 --- a/src/redis.c +++ b/src/redis.c @@ -1529,6 +1529,7 @@ void initServerConfig(void) { server.lpushCommand = lookupCommandByCString("lpush"); server.lpopCommand = lookupCommandByCString("lpop"); server.rpopCommand = lookupCommandByCString("rpop"); + server.sremCommand = lookupCommandByCString("srem"); /* Slow log */ server.slowlog_log_slower_than = REDIS_SLOWLOG_LOG_SLOWER_THAN; @@ -2001,6 +2002,9 @@ struct redisCommand *lookupCommandOrOriginal(sds name) { * + REDIS_PROPAGATE_NONE (no propagation of command at all) * + REDIS_PROPAGATE_AOF (propagate into the AOF file if is enabled) * + REDIS_PROPAGATE_REPL (propagate into the replication link) + * + * This should not be used inside commands implementation. Use instead + * alsoPropagate(), preventCommandPropagation(), forceCommandPropagation(). */ void propagate(struct redisCommand *cmd, int dbid, robj **argv, int argc, int flags) @@ -2027,6 +2031,13 @@ void forceCommandPropagation(redisClient *c, int flags) { if (flags & REDIS_PROPAGATE_AOF) c->flags |= REDIS_FORCE_AOF; } +/* Avoid that the executed command is propagated at all. This way we + * are free to just propagate what we want using the alsoPropagate() + * API. */ +void preventCommandPropagation(redisClient *c) { + c->flags |= REDIS_PREVENT_PROP; +} + /* Call() is the core of Redis execution of a command */ void call(redisClient *c, int flags) { long long dirty, start, duration; @@ -2080,7 +2091,7 @@ void call(redisClient *c, int flags) { } /* Propagate the command into the AOF and replication link */ - if (flags & REDIS_CALL_PROPAGATE) { + if (flags & REDIS_CALL_PROPAGATE && (c->flags & REDIS_PREVENT_PROP) == 0) { int flags = REDIS_PROPAGATE_NONE; if (c->flags & REDIS_FORCE_REPL) flags |= REDIS_PROPAGATE_REPL; @@ -2091,13 +2102,15 @@ void call(redisClient *c, int flags) { propagate(c->cmd,c->db->id,c->argv,c->argc,flags); } - /* Restore the old FORCE_AOF/REPL flags, since call can be executed + /* Restore the old replication flags, since call can be executed * recursively. */ - c->flags &= ~(REDIS_FORCE_AOF|REDIS_FORCE_REPL); - c->flags |= client_old_flags & (REDIS_FORCE_AOF|REDIS_FORCE_REPL); + c->flags &= ~(REDIS_FORCE_AOF|REDIS_FORCE_REPL|REDIS_PREVENT_PROP); + c->flags |= client_old_flags & + (REDIS_FORCE_AOF|REDIS_FORCE_REPL|REDIS_PREVENT_PROP); /* Handle the alsoPropagate() API to handle commands that want to propagate - * multiple separated commands. */ + * multiple separated commands. Note that alsoPropagate() is not affected + * by REDIS_PREVENT_PROP flag. */ if (server.also_propagate.numops) { int j; redisOp *rop; diff --git a/src/redis.h b/src/redis.h index 2170c5d29..a675d4f12 100644 --- a/src/redis.h +++ b/src/redis.h @@ -257,6 +257,7 @@ typedef long long mstime_t; /* millisecond time type. */ #define REDIS_PRE_PSYNC (1<<16) /* Instance don't understand PSYNC. */ #define REDIS_READONLY (1<<17) /* Cluster client is in read-only state. */ #define REDIS_PUBSUB (1<<18) /* Client is in Pub/Sub mode. */ +#define REDIS_PREVENT_PROP (1<<19) /* Don't propagate to AOF / Slaves. */ /* Client block type (btype field in client structure) * if REDIS_BLOCKED flag is set. */ @@ -708,7 +709,7 @@ struct redisServer { off_t loading_process_events_interval_bytes; /* Fast pointers to often looked up command */ struct redisCommand *delCommand, *multiCommand, *lpushCommand, *lpopCommand, - *rpopCommand; + *rpopCommand, *sremCommand; /* Fields used only for stats */ time_t stat_starttime; /* Server start time */ long long stat_numcommands; /* Number of processed commands */ @@ -1252,6 +1253,7 @@ void call(redisClient *c, int flags); void propagate(struct redisCommand *cmd, int dbid, robj **argv, int argc, int flags); void alsoPropagate(struct redisCommand *cmd, int dbid, robj **argv, int argc, int target); void forceCommandPropagation(redisClient *c, int flags); +void preventCommandPropagation(redisClient *c); int prepareForShutdown(); #ifdef __GNUC__ void redisLog(int level, const char *fmt, ...) diff --git a/src/t_set.c b/src/t_set.c index f3f8bbaca..619b0f8a6 100644 --- a/src/t_set.c +++ b/src/t_set.c @@ -45,6 +45,11 @@ robj *setTypeCreate(robj *value) { return createSetObject(); } +/* Add the specified value into a set. The function takes care of incrementing + * the reference count of the object if needed in order to retain a copy. + * + * If the value was already member of the set, nothing is done and 0 is + * returned, otherwise the new element is added and 1 is returned. */ int setTypeAdd(robj *subject, robj *value) { long long llval; if (subject->encoding == REDIS_ENCODING_HT) { @@ -487,7 +492,7 @@ void spopWithCountCommand(redisClient *c) { long l; unsigned long count, size; unsigned long elements_returned; - robj *set, *aux, *aux_set; + robj *set, *aux_set; int64_t llele; /* Get the count argument */ @@ -522,7 +527,6 @@ void spopWithCountCommand(redisClient *c) { * The number of requested elements is greater than or equal to * the number of elements inside the set: simply return the whole set. */ if (count >= size) { - /* We just return the entire set */ sunionDiffGenericCommand(c,c->argv+1,1,NULL,REDIS_OP_UNION); @@ -531,10 +535,9 @@ void spopWithCountCommand(redisClient *c) { notifyKeyspaceEvent(REDIS_NOTIFY_GENERIC,"del",c->argv[1],c->db->id); /* Replicate/AOF this command as an SREM operation */ - aux = createStringObject("DEL",3); - rewriteClientCommandVector(c,2,aux,c->argv[1]); - decrRefCount(aux); - + rewriteClientCommandVector(c,2,shared.del,c->argv[1]); + signalModifiedKey(c->db,c->argv[1]); + server.dirty++; return; } @@ -544,9 +547,7 @@ void spopWithCountCommand(redisClient *c) { /* We need an auxiliary set. Optimistically, we create a set using an * Intset internally. */ - aux = createStringObjectFromLongLong(0); - aux_set = setTypeCreate(aux); - decrRefCount(aux); + aux_set = createIntsetObject(); /* Get the count requested of random elements from the set into our * auxiliary set. */ @@ -555,47 +556,43 @@ void spopWithCountCommand(redisClient *c) { { setTypeIterator *si; - robj *objele; + robj *objele, **propargv; int element_encoding; addReplyMultiBulkLen(c, elements_returned); - /* Replicate/AOF this command as an SREM operation */ - aux = createStringObject("SREM",4); - si = setTypeInitIterator(aux_set); while ((element_encoding = setTypeNext(si, &objele, &llele)) != -1) { if (element_encoding == REDIS_ENCODING_HT) { - - addReplyBulk(c, objele); - - /* Replicate/AOF this command as an SREM commands */ - rewriteClientCommandVector(c, 3, aux, c->argv[1], objele); - setTypeRemove(set, objele); - } - else if (element_encoding == REDIS_ENCODING_INTSET) { - /* TODO: setTypeRemove() forces us to convert all of the ints - * to string... isn't there a nicer way to do this? */ + incrRefCount(objele); + } else if (element_encoding == REDIS_ENCODING_INTSET) { objele = createStringObjectFromLongLong(llele); - addReplyBulk(c, objele); - - /* Replicate/AOF this command as an SREM commands */ - rewriteClientCommandVector(c, 3, aux, c->argv[1], objele); - setTypeRemove(set, objele); - - /* We created it, we kill it. */ - decrRefCount(objele); - } - else { + } else { redisPanic("Unknown set encoding"); } + setTypeRemove(set, objele); + addReplyBulk(c, objele); + + /* Replicate/AOF this command as an SREM operation */ + propargv = zmalloc(sizeof(robj*)*3); + propargv[0] = createStringObject("SREM",4); + propargv[1] = c->argv[1]; + incrRefCount(c->argv[1]); + propargv[2] = objele; + incrRefCount(objele); + + alsoPropagate(server.sremCommand,c->db->id,propargv,3,REDIS_PROPAGATE_AOF|REDIS_PROPAGATE_REPL); + decrRefCount(objele); + server.dirty++; } setTypeReleaseIterator(si); - - decrRefCount(aux); } - /* Free the auxiliary set - we need it no more. */ + /* Don't propagate the command itself even if we incremented the + * dirty counter. We don't want to propagate an SPOP command since + * we propagated the command as a set of SREMs operations using + * the alsoPropagate() API. */ + preventCommandPropagation(c); decrRefCount(aux_set); } From cc7f0434b5c590e5a61c3cefa6ed9fb193d021dd Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 9 Feb 2015 23:44:42 +0100 Subject: [PATCH 0093/1928] Change alsoPropagate() behavior to make it more usable. Now the API automatically creates its argv copy and increment ref count of passed objects. --- src/redis.c | 21 +++++++++++++++++++-- src/t_set.c | 12 +++++------- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/src/redis.c b/src/redis.c index 277d2e4b0..8bfcfdaad 100644 --- a/src/redis.c +++ b/src/redis.c @@ -2016,11 +2016,28 @@ void propagate(struct redisCommand *cmd, int dbid, robj **argv, int argc, } /* Used inside commands to schedule the propagation of additional commands - * after the current command is propagated to AOF / Replication. */ + * after the current command is propagated to AOF / Replication. + * + * 'cmd' must be a pointer to the Redis command to replicate, dbid is the + * database ID the command should be propagated into. + * Arguments of the command to propagte are passed as an array of redis + * objects pointers of len 'argc', using the 'argv' vector. + * + * The function does not take a reference to the passed 'argv' vector, + * so it is up to the caller to release the passed argv (but it is usually + * stack allocated). The function autoamtically increments ref count of + * passed objects, so the caller does not need to. */ void alsoPropagate(struct redisCommand *cmd, int dbid, robj **argv, int argc, int target) { - redisOpArrayAppend(&server.also_propagate,cmd,dbid,argv,argc,target); + robj **argvcopy = zmalloc(sizeof(robj*)*argc); + int j; + + for (j = 0; j < argc; j++) { + argvcopy[j] = argv[j]; + incrRefCount(argv[j]); + } + redisOpArrayAppend(&server.also_propagate,cmd,dbid,argvcopy,argc,target); } /* It is possible to call the function forceCommandPropagation() inside a diff --git a/src/t_set.c b/src/t_set.c index 619b0f8a6..47aa173a1 100644 --- a/src/t_set.c +++ b/src/t_set.c @@ -556,10 +556,12 @@ void spopWithCountCommand(redisClient *c) { { setTypeIterator *si; - robj *objele, **propargv; + robj *objele, *propargv[3]; int element_encoding; addReplyMultiBulkLen(c, elements_returned); + propargv[0] = createStringObject("SREM",4); + propargv[1] = c->argv[1]; si = setTypeInitIterator(aux_set); while ((element_encoding = setTypeNext(si, &objele, &llele)) != -1) { @@ -574,17 +576,13 @@ void spopWithCountCommand(redisClient *c) { addReplyBulk(c, objele); /* Replicate/AOF this command as an SREM operation */ - propargv = zmalloc(sizeof(robj*)*3); - propargv[0] = createStringObject("SREM",4); - propargv[1] = c->argv[1]; - incrRefCount(c->argv[1]); propargv[2] = objele; - incrRefCount(objele); - alsoPropagate(server.sremCommand,c->db->id,propargv,3,REDIS_PROPAGATE_AOF|REDIS_PROPAGATE_REPL); + decrRefCount(objele); server.dirty++; } + decrRefCount(propargv[0]); setTypeReleaseIterator(si); } From 55003f7a118beb20ed1bc65926e29e207c3b721a Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 9 Feb 2015 23:57:52 +0100 Subject: [PATCH 0094/1928] alsoPropagate: handle REDIS_CALL_PROPAGATE and AOF loading. --- src/redis.c | 13 +++++++++---- tests/integration/aof.tcl | 2 +- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/src/redis.c b/src/redis.c index 8bfcfdaad..db3af61b0 100644 --- a/src/redis.c +++ b/src/redis.c @@ -2030,9 +2030,12 @@ void propagate(struct redisCommand *cmd, int dbid, robj **argv, int argc, void alsoPropagate(struct redisCommand *cmd, int dbid, robj **argv, int argc, int target) { - robj **argvcopy = zmalloc(sizeof(robj*)*argc); + robj **argvcopy; int j; + if (server.loading) return; /* No propagation during loading. */ + + argvcopy = zmalloc(sizeof(robj*)*argc); for (j = 0; j < argc; j++) { argvcopy[j] = argv[j]; incrRefCount(argv[j]); @@ -2132,9 +2135,11 @@ void call(redisClient *c, int flags) { int j; redisOp *rop; - for (j = 0; j < server.also_propagate.numops; j++) { - rop = &server.also_propagate.ops[j]; - propagate(rop->cmd, rop->dbid, rop->argv, rop->argc, rop->target); + if (flags & REDIS_CALL_PROPAGATE) { + for (j = 0; j < server.also_propagate.numops; j++) { + rop = &server.also_propagate.ops[j]; + propagate(rop->cmd,rop->dbid,rop->argv,rop->argc,rop->target); + } } redisOpArrayFree(&server.also_propagate); } diff --git a/tests/integration/aof.tcl b/tests/integration/aof.tcl index 01b928bb5..832f996e1 100644 --- a/tests/integration/aof.tcl +++ b/tests/integration/aof.tcl @@ -204,7 +204,7 @@ tags {"aof"} { } } - ## Test that SPOP with (that modifies the client's argc/argv) is correctly free'd + ## Uses the alsoPropagate() API. create_aof { append_to_aof [formatCommand sadd set foo] append_to_aof [formatCommand sadd set bar] From 9feee428f25a5681a06cd13ed1c4cc644759e719 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 10 Feb 2015 22:59:12 +0100 Subject: [PATCH 0095/1928] SPOP: reimplemented for speed and better distribution. The old version of SPOP with "count" argument used an API call of dict.c which was actually designed for a different goal, and was not capable of good distribution. We follow a different three-cases approach optimized for different ratiion between sets and requested number of elements. The implementation is simpler and allowed the removal of a large amount of code. --- src/dict.c | 75 ------------------ src/dict.h | 1 - src/intset.c | 84 -------------------- src/intset.h | 1 - src/t_set.c | 220 +++++++++++++++++++-------------------------------- 5 files changed, 80 insertions(+), 301 deletions(-) diff --git a/src/dict.c b/src/dict.c index 716d03fb0..dbcfeb492 100644 --- a/src/dict.c +++ b/src/dict.c @@ -664,81 +664,6 @@ dictEntry *dictGetRandomKey(dict *d) return he; } -/* XXX: This is going to be removed soon and SPOP internals - * reimplemented. - * - * This is a version of dictGetRandomKey() that is modified in order to - * return multiple entries by jumping at a random place of the hash table - * and scanning linearly for entries. - * - * Returned pointers to hash table entries are stored into 'des' that - * points to an array of dictEntry pointers. The array must have room for - * at least 'count' elements, that is the argument we pass to the function - * to tell how many random elements we need. - * - * The function returns the number of items stored into 'des', that may - * be less than 'count' if the hash table has less than 'count' elements - * inside. - * - * Note that this function is not suitable when you need a good distribution - * of the returned items, but only when you need to "sample" a given number - * of continuous elements to run some kind of algorithm or to produce - * statistics. However the function is much faster than dictGetRandomKey() - * at producing N elements, and the elements are guaranteed to be non - * repeating. */ -unsigned int dictGetRandomKeys(dict *d, dictEntry **des, unsigned int count) { - unsigned int j; /* internal hash table id, 0 or 1. */ - unsigned int tables; /* 1 or 2 tables? */ - unsigned int stored = 0, maxsizemask; - - if (dictSize(d) < count) count = dictSize(d); - - /* Try to do a rehashing work proportional to 'count'. */ - for (j = 0; j < count; j++) { - if (dictIsRehashing(d)) - _dictRehashStep(d); - else - break; - } - - tables = dictIsRehashing(d) ? 2 : 1; - maxsizemask = d->ht[0].sizemask; - if (tables > 1 && maxsizemask < d->ht[1].sizemask) - maxsizemask = d->ht[1].sizemask; - - /* Pick a random point inside the larger table. */ - unsigned int i = random() & maxsizemask; - while(stored < count) { - for (j = 0; j < tables; j++) { - /* Invariant of the dict.c rehashing: up to the indexes already - * visited in ht[0] during the rehashing, there are no populated - * buckets, so we can skip ht[0] for indexes between 0 and idx-1. */ - if (tables == 2 && j == 0 && i < d->rehashidx) { - /* Moreover, if we are currently out of range in the second - * table, there will be no elements in both tables up to - * the current rehashing index, so we jump if possible. - * (this happens when going from big to small table). */ - if (i >= d->ht[1].size) i = d->rehashidx; - continue; - } - if (i >= d->ht[j].size) continue; /* Out of range for this table. */ - dictEntry *he = d->ht[j].table[i]; - while (he) { - /* Collect all the elements of the buckets found non - * empty while iterating. */ - *des = he; - des++; - he = he->next; - stored++; - if (stored == count) return stored; - } - } - i = (i+1) & maxsizemask; - } - return stored; /* Never reached. */ -} - - /* This function samples the dictionary to return a few keys from random * locations. * diff --git a/src/dict.h b/src/dict.h index 5959a57b9..014d18212 100644 --- a/src/dict.h +++ b/src/dict.h @@ -165,7 +165,6 @@ dictEntry *dictNext(dictIterator *iter); void dictReleaseIterator(dictIterator *iter); dictEntry *dictGetRandomKey(dict *d); unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count); -unsigned int dictGetRandomKeys(dict *d, dictEntry **des, unsigned int count); void dictPrintStats(dict *d); unsigned int dictGenHashFunction(const void *key, int len); unsigned int dictGenCaseHashFunction(const unsigned char *buf, int len); diff --git a/src/intset.c b/src/intset.c index 762bd48c8..b0a597fc7 100644 --- a/src/intset.c +++ b/src/intset.c @@ -261,90 +261,6 @@ int64_t intsetRandom(intset *is) { return _intsetGet(is,rand()%intrev32ifbe(is->length)); } -/* How many times bigger should the set length be compared to the requested - * count of members for us to use the Floyd algorithm instead of - * the Knuth algorithm */ -#define RANDOMMEMBERS_ALGORITHM_SELECTION_RATIO (2) - -/* Copies 'count' random members from the set into the 'values' array. - * 'values' must be an array of int64_t values, of length 'count'. - * Returns the amount of items returned. If this amount is less than 'count', - * then the remaining 'values' are left uninitialized. */ -int intsetRandomMembers(intset *is, int64_t* values, int count) { - - /* We don't check that is and values are non-NULL - the caller must - * play nice. */ - - int length = intsetLen(is); - - if (count > length) { - /* Return everything in the set */ - count = length; - } - - /* Choose between the Knuth shuffle algorithm, O(1) space, O(length) time, - * and the Floyd algorithm, O(length) space, O(count) time. */ - if ((RANDOMMEMBERS_ALGORITHM_SELECTION_RATIO * count) > length) { - - /* If the count of members requested is almost the length of the set, - * use the Knuth shuffle algorithm, O(1) space, O(length) time. */ - - /* First, fill the values array with unique random indexes inside - * the set. */ - int in, im, rn, rm; - im = 0; - for (in = 0; in < length && im < count; in++) { - - rn = length - in; - rm = count - im; - if (rand() % rn < rm) { - values[im++] = in; - } - } - - } else { - - /* If the length is considerably more than the count of members - * requested, use Robert Floyd's algorithm, O(length) space, - * O(count) time. - * Based on Jon Bentley's Programming Pearls */ - - int64_t *is_used = zcalloc(sizeof(int64_t) * length); - int in, im, r; - - r = 0; - im = 0; - - for (in = length - count; in < length && im < count; in++) { - - /* Generate a random number r */ - r = rand() % (in + 1); - - /* Do we already have the value in r? */ - if (is_used[r]) { - /* Use in instead of the generated number */ - r = in; - } - - values[im++] = r ; - - /* Mark it as used */ - is_used[r] = 1; - } - - zfree(is_used); - } - - /* Replace each random index with the value stored there in the intset */ - uint8_t encoding = intrev32ifbe(is->encoding); - for (int currentValue = 0; currentValue < count; currentValue++) { - values[currentValue] = - _intsetGetEncoded(is, values[currentValue], encoding); - } - - return count; -} - /* Sets the value to the value at the given position. When this position is * out of range the function returns 0, when in range it returns 1. */ uint8_t intsetGet(intset *is, uint32_t pos, int64_t *value) { diff --git a/src/intset.h b/src/intset.h index 7550df303..30a854f89 100644 --- a/src/intset.h +++ b/src/intset.h @@ -43,7 +43,6 @@ intset *intsetAdd(intset *is, int64_t value, uint8_t *success); intset *intsetRemove(intset *is, int64_t value, int *success); uint8_t intsetFind(intset *is, int64_t value); int64_t intsetRandom(intset *is); -int intsetRandomMembers(intset *is, int64_t* value, int count); uint8_t intsetGet(intset *is, uint32_t pos, int64_t *value); uint32_t intsetLen(intset *is); size_t intsetBlobLen(intset *is); diff --git a/src/t_set.c b/src/t_set.c index 47aa173a1..13b642dd4 100644 --- a/src/t_set.c +++ b/src/t_set.c @@ -212,106 +212,6 @@ int setTypeRandomElement(robj *setobj, robj **objele, int64_t *llele) { return setobj->encoding; } -/* Return a number of random elements from a non empty set. - * - * This is a version of setTypeRandomElement() that is modified in order to - * return multiple entries, using dictGetRandomKeys() and intsetRandomMembers(). - * - * The elements are stored into 'aux_set' which should be of a set type. - * - * The function returns the number of items stored into 'aux_set', that may - * be less than 'count' if the hash table has less than 'count' elements - * inside. - * - * Note that this function is not suitable when you need a good distribution - * of the returned items, but only when you need to "sample" a given number - * of continuous elements to run some kind of algorithm or to produce - * statistics. However the function is much faster than setTypeRandomElement() - * at producing N elements, and the elements are guaranteed to be non - * repeating. - */ -unsigned long setTypeRandomElements(robj *set, unsigned long count, - robj *aux_set) { - unsigned long set_size; - unsigned long elements_to_return = count; - unsigned long elements_copied = 0; - unsigned long current_element = 0; - - /* Like all setType* functions, we assume good behavior on part of the - * caller, so no extra parameter checks are made. */ - - /* If the number of elements in the the set is less than the count - * requested, just return all of them. */ - set_size = setTypeSize(set); - if (set_size < count) { - elements_to_return = set_size; - } - - /* TODO: It is definitely faster adding items to the set by directly - * handling the Dict or intset inside it, avoiding the constant encoding - * checks inside setTypeAdd(). However, We don't want to touch the set - * internals in non setType* functions. So, we just call setTypeAdd() - * multiple times, but this isn't an optimal solution. - * Another option would be to create a bulk-add function: - * setTypeAddBulk(). */ - if (set->encoding == REDIS_ENCODING_HT) { - /* Allocate result array */ - dictEntry **random_elements = - zmalloc(sizeof(dictEntry*) * elements_to_return); - - /* Get the random elements */ - elements_copied = - dictGetRandomKeys(set->ptr, random_elements, elements_to_return); - redisAssert(elements_copied == elements_to_return); - - /* Put them into the set */ - for (current_element = 0; current_element < elements_copied; - current_element++) { - - /* We get the key and duplicate it, as we know it is a string */ - setTypeAdd(aux_set, - dictGetKey(random_elements[current_element])); - } - - zfree(random_elements); - - } else if (set->encoding == REDIS_ENCODING_INTSET) { - /* Allocate result array */ - int64_t *random_elements = - zmalloc(sizeof(int64_t) * elements_to_return); - robj* element_as_str = NULL; - - elements_copied = - intsetRandomMembers((intset*) set->ptr, - random_elements, - elements_to_return); - - redisAssert(elements_copied == elements_to_return); - - /* Put them into the set */ - for (current_element = 0; current_element < elements_copied; - current_element++) { - - element_as_str = createStringObjectFromLongLong( - random_elements[current_element]); - - /* Put the values in the set */ - setTypeAdd(aux_set, - element_as_str); - - decrRefCount(element_as_str); - } - - zfree(random_elements); - } else { - redisPanic("Unknown set encoding"); - } - - /* We have a set with random elements. Return the actual elements in - the aux_set. */ - return elements_copied; -} - unsigned long setTypeSize(robj *subject) { if (subject->encoding == REDIS_ENCODING_HT) { return dictSize((dict*)subject->ptr); @@ -485,15 +385,18 @@ void scardCommand(redisClient *c) { addReplyLongLong(c,setTypeSize(o)); } -/* handle the "SPOP key " variant. The normal version of the +/* Handle the "SPOP key " variant. The normal version of the * command is handled by the spopCommand() function itself. */ +/* How many times bigger should be the set compared to the remaining size + * for us to use the "create new set" strategy? Read later in the + * implementation for more info. */ +#define SPOP_MOVE_STRATEGY_MUL 5 + void spopWithCountCommand(redisClient *c) { long l; unsigned long count, size; - unsigned long elements_returned; - robj *set, *aux_set; - int64_t llele; + robj *set; /* Get the count argument */ if (getLongFromObjectOrReply(c,c->argv[2],&l,NULL) != REDIS_OK) return; @@ -516,12 +419,11 @@ void spopWithCountCommand(redisClient *c) { return; } - /* Get the size of the set. It is always > 0, as empty sets get - * deleted. */ size = setTypeSize(set); /* Generate an SPOP keyspace notification */ notifyKeyspaceEvent(REDIS_NOTIFY_SET,"spop",c->argv[1],c->db->id); + server.dirty += count; /* CASE 1: * The number of requested elements is greater than or equal to @@ -534,64 +436,102 @@ void spopWithCountCommand(redisClient *c) { dbDelete(c->db,c->argv[1]); notifyKeyspaceEvent(REDIS_NOTIFY_GENERIC,"del",c->argv[1],c->db->id); - /* Replicate/AOF this command as an SREM operation */ + /* Propagate this command as an DEL operation */ rewriteClientCommandVector(c,2,shared.del,c->argv[1]); signalModifiedKey(c->db,c->argv[1]); server.dirty++; return; } - /* CASE 2: - * The number of requested elements is less than the number - * of elements inside the set. */ + /* Case 2 and 3 require to replicate SPOP as a set of SERM commands. + * Prepare our replication argument vector. Also send the array length + * which is common to both the code paths. */ + robj *propargv[3]; + propargv[0] = createStringObject("SREM",4); + propargv[1] = c->argv[1]; + addReplyMultiBulkLen(c,count); - /* We need an auxiliary set. Optimistically, we create a set using an - * Intset internally. */ - aux_set = createIntsetObject(); + /* Common iteration vars. */ + robj *objele; + int encoding; + int64_t llele; + unsigned long remaining = size-count; /* Elements left after SPOP. */ - /* Get the count requested of random elements from the set into our - * auxiliary set. */ - elements_returned = setTypeRandomElements(set, count, aux_set); - redisAssert(elements_returned == count); - - { - setTypeIterator *si; - robj *objele, *propargv[3]; - int element_encoding; - - addReplyMultiBulkLen(c, elements_returned); - propargv[0] = createStringObject("SREM",4); - propargv[1] = c->argv[1]; - - si = setTypeInitIterator(aux_set); - while ((element_encoding = setTypeNext(si, &objele, &llele)) != -1) { - if (element_encoding == REDIS_ENCODING_HT) { - incrRefCount(objele); - } else if (element_encoding == REDIS_ENCODING_INTSET) { + /* If we are here, the number of requested elements is less than the + * number of elements inside the set. Also we are sure that count < size. + * Use two different strategies. + * + * CASE 2: The number of elements to return is small compared to the + * set size. We can just extract random elements and return them to + * the set. */ + if (remaining*SPOP_MOVE_STRATEGY_MUL > count) { + while(count--) { + encoding = setTypeRandomElement(set,&objele,&llele); + if (encoding == REDIS_ENCODING_INTSET) { objele = createStringObjectFromLongLong(llele); } else { - redisPanic("Unknown set encoding"); + incrRefCount(objele); } - setTypeRemove(set, objele); - addReplyBulk(c, objele); + + /* Return the element to the client and remove from the set. */ + addReplyBulk(c,objele); + setTypeRemove(set,objele); /* Replicate/AOF this command as an SREM operation */ propargv[2] = objele; - alsoPropagate(server.sremCommand,c->db->id,propargv,3,REDIS_PROPAGATE_AOF|REDIS_PROPAGATE_REPL); - + alsoPropagate(server.sremCommand,c->db->id,propargv,3, + REDIS_PROPAGATE_AOF|REDIS_PROPAGATE_REPL); decrRefCount(objele); - server.dirty++; } - decrRefCount(propargv[0]); + } else { + /* CASE 3: The number of elements to return is very big, approaching + * the size of the set itself. After some time extracting random elements + * from such a set becomes computationally expensive, so we use + * a different strategy, we extract random elements that we don't + * want to return (the elements that will remain part of the set), + * creating a new set as we do this (that will be stored as the original + * set). Then we return the elements left in the original set and + * release it. */ + robj *newset = NULL; + + /* Create a new set with just the remaining elements. */ + while(remaining--) { + encoding = setTypeRandomElement(set,&objele,&llele); + if (encoding == REDIS_ENCODING_INTSET) { + objele = createStringObjectFromLongLong(llele); + } else { + incrRefCount(objele); + } + if (!newset) newset = setTypeCreate(objele); + setTypeAdd(newset,objele); + setTypeRemove(set,objele); + decrRefCount(objele); + } + + /* Assign the new set as the key value. */ + incrRefCount(set); /* Protect the old set value. */ + dbOverwrite(c->db,c->argv[1],newset); + + /* Tranfer the old set to the client and release it. */ + setTypeIterator *si; + si = setTypeInitIterator(set); + while((encoding = setTypeNext(si,&objele,&llele)) != -1) { + if (encoding == REDIS_ENCODING_INTSET) { + addReplyBulkLongLong(c,llele); + } else { + addReplyBulk(c,objele); + } + } setTypeReleaseIterator(si); + decrRefCount(set); } /* Don't propagate the command itself even if we incremented the * dirty counter. We don't want to propagate an SPOP command since * we propagated the command as a set of SREMs operations using * the alsoPropagate() API. */ + decrRefCount(propargv[0]); preventCommandPropagation(c); - decrRefCount(aux_set); } void spopCommand(redisClient *c) { From a37d0f8b48e91d1ec50ac97ef7fa832794cf6edd Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 11 Feb 2015 10:11:28 +0100 Subject: [PATCH 0096/1928] SPOP with count: fix replication for code path #3. --- src/t_set.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/t_set.c b/src/t_set.c index 13b642dd4..c8141c3f6 100644 --- a/src/t_set.c +++ b/src/t_set.c @@ -517,10 +517,18 @@ void spopWithCountCommand(redisClient *c) { si = setTypeInitIterator(set); while((encoding = setTypeNext(si,&objele,&llele)) != -1) { if (encoding == REDIS_ENCODING_INTSET) { - addReplyBulkLongLong(c,llele); + objele = createStringObjectFromLongLong(llele); } else { - addReplyBulk(c,objele); + incrRefCount(objele); } + addReplyBulk(c,objele); + + /* Replicate/AOF this command as an SREM operation */ + propargv[2] = objele; + alsoPropagate(server.sremCommand,c->db->id,propargv,3, + REDIS_PROPAGATE_AOF|REDIS_PROPAGATE_REPL); + + decrRefCount(objele); } setTypeReleaseIterator(si); decrRefCount(set); From dffbbb5a78154d42395284e9b8bf33b236a45599 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 11 Feb 2015 10:12:00 +0100 Subject: [PATCH 0097/1928] SPOP explicit tests for the three different code paths. --- tests/unit/type/set.tcl | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/tests/unit/type/set.tcl b/tests/unit/type/set.tcl index 74a8fb318..a9a3d0835 100644 --- a/tests/unit/type/set.tcl +++ b/tests/unit/type/set.tcl @@ -346,6 +346,33 @@ start_server { r spop nonexisting_key 100 } {} + test "SPOP new implementation: code path #1" { + set content {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20} + create_set myset $content + set res [r spop myset 30] + assert {[lsort $content] eq [lsort $res]} + } + + test "SPOP new implementation: code path #2" { + set content {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20} + create_set myset $content + set res [r spop myset 2] + assert {[llength $res] == 2} + assert {[r scard myset] == 18} + set union [concat [r smembers myset] $res] + assert {[lsort $union] eq [lsort $content]} + } + + test "SPOP new implementation: code path #3" { + set content {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20} + create_set myset $content + set res [r spop myset 18] + assert {[llength $res] == 18} + assert {[r scard myset] == 2} + set union [concat [r smembers myset] $res] + assert {[lsort $union] eq [lsort $content]} + } + test "SRANDMEMBER with against non existing key" { r srandmember nonexisting_key 100 } {} From a1d9ec0d448b3f0d41eff9c8ba48af3c47976ef2 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 11 Feb 2015 10:45:01 +0100 Subject: [PATCH 0098/1928] SPOP replication tests. --- tests/integration/replication-4.tcl | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tests/integration/replication-4.tcl b/tests/integration/replication-4.tcl index 6db9ffe2b..3af2e921d 100644 --- a/tests/integration/replication-4.tcl +++ b/tests/integration/replication-4.tcl @@ -132,5 +132,24 @@ start_server {tags {"repl"}} { } assert {[$master dbsize] > 0} } + + test {Replication of SPOP command -- alsoPropagate() API} { + $master del myset + set size [randomInt 100] + set content {} + for {set j 0} {$j < $size} {incr j} { + lappend content [randomValue] + } + $master sadd myset {*}$content + + set count [randomInt 100] + set result [$master spop myset $count] + + wait_for_condition 50 100 { + [$master debug digest] eq [$slave debug digest] + } else { + fail "SPOP replication inconsistency" + } + } } } From 2b3eba056cd1c36171436dad74bfeeedcdbed425 Mon Sep 17 00:00:00 2001 From: Masahiko Sawada Date: Thu, 12 Feb 2015 21:20:22 +0900 Subject: [PATCH 0099/1928] Unify to uppercase the headline --- redis.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/redis.conf b/redis.conf index 38e258698..9e15769a9 100644 --- a/redis.conf +++ b/redis.conf @@ -781,7 +781,7 @@ slowlog-max-len 128 # "CONFIG SET latency-monitor-threshold " if needed. latency-monitor-threshold 0 -############################# Event notification ############################## +############################# EVENT NOTIFICATION ############################## # Redis can notify Pub/Sub clients about events happening in the key space. # This feature is documented at http://redis.io/topics/notifications From 29b54db32091fb04a75a9dc196adc492e4933493 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 12 Feb 2015 16:40:41 +0100 Subject: [PATCH 0100/1928] Better memtoll() error checking. Related to PR #2357. --- src/util.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/src/util.c b/src/util.c index 543de112b..4190775b1 100644 --- a/src/util.c +++ b/src/util.c @@ -38,6 +38,7 @@ #include #include #include +#include #include "util.h" #include "sha1.h" @@ -170,11 +171,12 @@ int stringmatch(const char *pattern, const char *string, int nocase) { } /* Convert a string representing an amount of memory into the number of - * bytes, so for instance memtoll("1Gi") will return 1073741824 that is + * bytes, so for instance memtoll("1Gb") will return 1073741824 that is * (1024*1024*1024). * * On parsing error, if *err is not NULL, it's set to 1, otherwise it's - * set to 0 */ + * set to 0. On error the function return value is 0, regardless of the + * fact 'err' is NULL or not. */ long long memtoll(const char *p, int *err) { const char *u; char buf[128]; @@ -183,6 +185,7 @@ long long memtoll(const char *p, int *err) { unsigned int digits; if (err) *err = 0; + /* Search the first non digit character. */ u = p; if (*u == '-') u++; @@ -203,16 +206,26 @@ long long memtoll(const char *p, int *err) { mul = 1024L*1024*1024; } else { if (err) *err = 1; - mul = 1; + return 0; } + + /* Copy the digits into a buffer, we'll use strtoll() to convert + * the digit (without the unit) into a number. */ digits = u-p; if (digits >= sizeof(buf)) { if (err) *err = 1; - return LLONG_MAX; + return 0; } memcpy(buf,p,digits); buf[digits] = '\0'; - val = strtoll(buf,NULL,10); + + char *endptr; + errno = 0; + val = strtoll(buf,&endptr,10); + if ((val == 0 && errno == EINVAL) || *endptr != '\0') { + if (err) *err = 1; + return 0; + } return val*mul; } From cd4e0f43dfa02f718ee2e836ea96bd262c4cf28e Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 12 Feb 2015 16:51:20 +0100 Subject: [PATCH 0101/1928] Extend memory unit support in CONFIG SET. Related to PR #2357. --- src/config.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/config.c b/src/config.c index 927d7e245..3963a1218 100644 --- a/src/config.c +++ b/src/config.c @@ -624,6 +624,7 @@ void loadServerConfig(char *filename, char *options) { void configSetCommand(redisClient *c) { robj *o; long long ll; + int err; redisAssertWithInfo(c,c->argv[2],sdsEncodedObject(c->argv[2])); redisAssertWithInfo(c,c->argv[3],sdsEncodedObject(c->argv[3])); o = c->argv[3]; @@ -643,7 +644,6 @@ void configSetCommand(redisClient *c) { zfree(server.masterauth); server.masterauth = ((char*)o->ptr)[0] ? zstrdup(o->ptr) : NULL; } else if (!strcasecmp(c->argv[2]->ptr,"maxmemory")) { - int err; ll = memtoll(o->ptr,&err); if (err || ll < 0) goto badfmt; server.maxmemory = ll; @@ -867,7 +867,6 @@ void configSetCommand(redisClient *c) { * whole configuration string or accept it all, even if a single * error in a single client class is present. */ for (j = 0; j < vlen; j++) { - char *eptr; long val; if ((j % 4) == 0) { @@ -876,8 +875,8 @@ void configSetCommand(redisClient *c) { goto badfmt; } } else { - val = strtoll(v[j], &eptr, 10); - if (eptr[0] != '\0' || val < 0) { + val = memtoll(v[j], &err); + if (err || val < 0) { sdsfreesplitres(v,vlen); goto badfmt; } @@ -911,7 +910,8 @@ void configSetCommand(redisClient *c) { if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll <= 0) goto badfmt; server.repl_timeout = ll; } else if (!strcasecmp(c->argv[2]->ptr,"repl-backlog-size")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll <= 0) goto badfmt; + ll = memtoll(o->ptr,&err); + if (err || ll < 0) goto badfmt; resizeReplicationBacklog(ll); } else if (!strcasecmp(c->argv[2]->ptr,"repl-backlog-ttl")) { if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; From 3101d2bffb9437a3a82920ef48c51955783a7070 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 13 Feb 2015 12:44:40 +0100 Subject: [PATCH 0102/1928] redis-cli --latency-dist, hopefully better palette. Less grays: more readable palette since usually we have a non linear distribution of percentages and very near gray tones are hard to take apart. Final part of the palette is gradient from yellow to red. The red part is hardly reached because of usual distribution of latencies, but shows up mainly when latencies are very high because of the logarithmic scale, this is coherent to what people expect: red = bad. --- src/redis-cli.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 8db34bcaf..e8d4bce55 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -1133,8 +1133,8 @@ static void latencyMode(void) { #define LATENCY_DIST_GRAYS (LATENCY_DIST_MAX_GRAY-LATENCY_DIST_MIN_GRAY+1) /* Gray palette. */ -int spectrum_palette_size = 24; -int spectrum_palette[] = {0, 233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255}; +int spectrum_palette_size = 18; +int spectrum_palette[] = {0, 233,235,237,239,241,243,245,247,144,143,142,184,226,214,208,202,196}; /* Structure to store samples distribution. */ struct distsamples { From f638f045ce4d54b04d2e089bb3a57b6d4740c3f5 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 13 Feb 2015 14:38:21 +0100 Subject: [PATCH 0103/1928] redis-cli --latency-dist: one gray more, and --mono support. --- src/redis-cli.c | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index e8d4bce55..251e42fad 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -64,6 +64,17 @@ #define REDIS_CLI_HISTFILE_ENV "REDISCLI_HISTFILE" #define REDIS_CLI_HISTFILE_DEFAULT ".rediscli_history" +/* --latency-dist palettes. */ +int spectrum_palette_color_size = 19; +int spectrum_palette_color[] = {0,233,234,235,237,239,241,243,245,247,144,143,142,184,226,214,208,202,196}; + +int spectrum_palette_mono_size = 13; +int spectrum_palette_mono[] = {0,233,234,235,237,239,241,243,245,247,249,251,253}; + +/* The actual palette in use. */ +int *spectrum_palette; +int spectrum_palette_size; + static redisContext *context; static struct config { char *hostip; @@ -780,6 +791,9 @@ static int parseOptions(int argc, char **argv) { config.latency_mode = 1; } else if (!strcmp(argv[i],"--latency-dist")) { config.latency_dist_mode = 1; + } else if (!strcmp(argv[i],"--mono")) { + spectrum_palette = spectrum_palette_mono; + spectrum_palette_size = spectrum_palette_mono_size; } else if (!strcmp(argv[i],"--latency-history")) { config.latency_mode = 1; config.latency_history = 1; @@ -1128,13 +1142,6 @@ static void latencyMode(void) { *--------------------------------------------------------------------------- */ #define LATENCY_DIST_DEFAULT_INTERVAL 1000 /* milliseconds. */ -#define LATENCY_DIST_MIN_GRAY 233 /* Less than that is too hard to see gray. */ -#define LATENCY_DIST_MAX_GRAY 255 -#define LATENCY_DIST_GRAYS (LATENCY_DIST_MAX_GRAY-LATENCY_DIST_MIN_GRAY+1) - -/* Gray palette. */ -int spectrum_palette_size = 18; -int spectrum_palette[] = {0, 233,235,237,239,241,243,245,247,144,143,142,184,226,214,208,202,196}; /* Structure to store samples distribution. */ struct distsamples { @@ -2200,6 +2207,9 @@ int main(int argc, char **argv) { config.eval = NULL; config.last_cmd_type = -1; + spectrum_palette = spectrum_palette_color; + spectrum_palette_size = spectrum_palette_color_size; + if (!isatty(fileno(stdout)) && (getenv("FAKETTY") == NULL)) config.output = OUTPUT_RAW; else From cac9a900cb7e44b73d5d212309472b26c2a23765 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 13 Feb 2015 14:55:09 +0100 Subject: [PATCH 0104/1928] README -> README.md. --- README => README.md | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) rename README => README.md (88%) diff --git a/README b/README.md similarity index 88% rename from README rename to README.md index 369118631..5fa34d2e5 100644 --- a/README +++ b/README.md @@ -1,14 +1,11 @@ -Where to find complete Redis documentation? -------------------------------------------- - -This README is just a fast "quick start" document. You can find more detailed -documentation at http://redis.io +NOte: This README is just a fast "quick start" document. You can find more detailed documentation at http://redis.io Building Redis -------------- Redis can be compiled and used on Linux, OSX, OpenBSD, NetBSD, FreeBSD. -We support big endian and little endian architectures. +We support big endian and little endian architectures, and both 32 bit +and 64 bit systems. It may compile on Solaris derived systems (for instance SmartOS) but our support for this platform is "best effort" and Redis is not guaranteed to @@ -28,8 +25,8 @@ After building Redis is a good idea to test it, using: Fixing build problems with dependencies or cached build options —-------- -Redis has some dependencies which are included into the "deps" directory. -"make" does not rebuild dependencies automatically, even if something in the +Redis has some dependencies which are included into the `deps` directory. +`make` does not rebuild dependencies automatically, even if something in the source code of dependencies is changes. When you update the source code with `git pull` or when code inside the @@ -128,9 +125,7 @@ then in another terminal try the following: (integer) 2 redis> -You can find the list of all the available commands here: - - http://redis.io/commands +You can find the list of all the available commands at http://redis.io/commands. Installing Redis ----------------- @@ -139,7 +134,7 @@ In order to install Redis binaries into /usr/local/bin just use: % make install -You can use "make PREFIX=/some/other/directory install" if you wish to use a +You can use `make PREFIX=/some/other/directory install` if you wish to use a different destination. Make install will just install binaries in your system, but will not configure @@ -156,7 +151,7 @@ to run Redis properly as a background daemon that will start again on system reboots. You'll be able to stop and start Redis using the script named -/etc/init.d/redis_, for instance /etc/init.d/redis_6379. +`/etc/init.d/redis_`, for instance `/etc/init.d/redis_6379`. Code contributions --- From 097b3079b4be704879505a51189a7e7f7028457d Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 13 Feb 2015 14:55:53 +0100 Subject: [PATCH 0105/1928] Fix README minor grammar issues. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5fa34d2e5..1aef03559 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -NOte: This README is just a fast "quick start" document. You can find more detailed documentation at http://redis.io +This README is just a fast "quick start" document. You can find more detailed documentation at http://redis.io. Building Redis -------------- From c414db2f98b4d78f7b0dc8205f870b4b77346c99 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 13 Feb 2015 14:57:05 +0100 Subject: [PATCH 0106/1928] A few more README markdown conversions. --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 1aef03559..e4386baa0 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -This README is just a fast "quick start" document. You can find more detailed documentation at http://redis.io. +This README is just a fast *quick start* document. You can find more detailed documentation at http://redis.io. Building Redis -------------- @@ -8,8 +8,8 @@ We support big endian and little endian architectures, and both 32 bit and 64 bit systems. It may compile on Solaris derived systems (for instance SmartOS) but our -support for this platform is "best effort" and Redis is not guaranteed to -work as well as in Linux, OSX, and *BSD there. +support for this platform is *best effort* and Redis is not guaranteed to +work as well as in Linux, OSX, and `*`BSD there. It is as simple as: From 2576864ba7d26ee5ce2de883b06a41131c4babd9 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Fri, 13 Feb 2015 15:10:20 +0100 Subject: [PATCH 0107/1928] Use code tags where necessary, escape special markers --- README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index e4386baa0..e5ffe75ae 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ and 64 bit systems. It may compile on Solaris derived systems (for instance SmartOS) but our support for this platform is *best effort* and Redis is not guaranteed to -work as well as in Linux, OSX, and `*`BSD there. +work as well as in Linux, OSX, and \*BSD there. It is as simple as: @@ -24,7 +24,8 @@ After building Redis is a good idea to test it, using: % make test Fixing build problems with dependencies or cached build options -—-------- +--------- + Redis has some dependencies which are included into the `deps` directory. `make` does not rebuild dependencies automatically, even if something in the source code of dependencies is changes. @@ -39,7 +40,7 @@ This will clean: jemalloc, lua, hiredis, linenoise. Also if you force certain build options like 32bit target, no C compiler optimizations (for debugging purposes), and other similar build time options, -those options are cached indefinitely until you issue a "make distclean" +those options are cached indefinitely until you issue a `make distclean` command. Fixing problems building 32 bit binaries @@ -47,15 +48,14 @@ Fixing problems building 32 bit binaries If after building Redis with a 32 bit target you need to rebuild it with a 64 bit target, or the other way around, you need to perform a -"make distclean" in the root directory of the Redis distribution. +`make distclean` in the root directory of the Redis distribution. In case of build errors when trying to build a 32 bit binary of Redis, try the following steps: * Install the packages libc6-dev-i386 (also try g++-multilib). -* Try using the following command line instead of "make 32bit": - - make CFLAGS="-m32 -march=native" LDFLAGS="-m32" +* Try using the following command line instead of `make 32bit`: + `make CFLAGS="-m32 -march=native" LDFLAGS="-m32"` Allocator --------- @@ -123,7 +123,7 @@ then in another terminal try the following: (integer) 1 redis> incr mycounter (integer) 2 - redis> + redis> You can find the list of all the available commands at http://redis.io/commands. From f916a589b5e423a63269a3237da5cc10040ce298 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 13 Feb 2015 15:34:22 +0100 Subject: [PATCH 0108/1928] README: give idea about what Redis is. --- README.md | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/README.md b/README.md index e4386baa0..55c4b35e7 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,30 @@ This README is just a fast *quick start* document. You can find more detailed documentation at http://redis.io. +What is Redis? +-------------- + +Redis is often referred as a *data structures* server. What this means is that Redis provides access to mutable data structures via a set of commands, which are send using a *server-client* model with TCP sockets and a simple protocol. So different processes can query and modify the same data structures in a shared way. + +Data structures implemented into Redis have a few special properties: + +* Redis cares to store them on disk, even if they are always served and modified into the server memory. This means that Redis is fast, but that is also non-volatile. +* Implementation of data structures stress on memory efficiency, so data structures inside Redis will likely use less memory compared to the same data structure modeled using an high level programming language. +* Redis offers a number of features that are natural to find into a database, like replication, tunable levels of durability, cluster, high availability. + +Another good example is to think at Redis as a more complex version of memcached, where the opeations are not just SETs and GETs, but operations to work with complex data types like Lists, Sets, ordered data structures, and so forth. + +If you want to know more, this is a list of selected starting points: + +* [Introduction to Redis data types][1] +* [Try Redis directly inside your browser][2] +* [The full list of Redis commands][3] +* [There is much more inside the Redis official documentation][4] + +[1] http://redis.io/topics/data-types-intro +[2] http://try.redis.io +[3] http://redis.io/commands +[4] http://redis.io/documentation + Building Redis -------------- From 78c44ca6464de71e31021b6dc8355429cd703718 Mon Sep 17 00:00:00 2001 From: Salvatore Sanfilippo Date: Fri, 13 Feb 2015 15:37:19 +0100 Subject: [PATCH 0109/1928] Fix links into README. --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 55c4b35e7..34a41d103 100644 --- a/README.md +++ b/README.md @@ -15,10 +15,10 @@ Another good example is to think at Redis as a more complex version of memcached If you want to know more, this is a list of selected starting points: -* [Introduction to Redis data types][1] -* [Try Redis directly inside your browser][2] -* [The full list of Redis commands][3] -* [There is much more inside the Redis official documentation][4] +* [Introduction to Redis data types](1) +* [Try Redis directly inside your browser](2) +* [The full list of Redis commands](3) +* [There is much more inside the Redis official documentation](4) [1] http://redis.io/topics/data-types-intro [2] http://try.redis.io From 9795ad1bc25d30fc12f7c90824937a13eb9dcf5f Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 13 Feb 2015 15:39:44 +0100 Subject: [PATCH 0110/1928] Fix README links again. Apparently no refernece-style links supported in Github markdown. --- README.md | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 34a41d103..d40253322 100644 --- a/README.md +++ b/README.md @@ -15,15 +15,10 @@ Another good example is to think at Redis as a more complex version of memcached If you want to know more, this is a list of selected starting points: -* [Introduction to Redis data types](1) -* [Try Redis directly inside your browser](2) -* [The full list of Redis commands](3) -* [There is much more inside the Redis official documentation](4) - -[1] http://redis.io/topics/data-types-intro -[2] http://try.redis.io -[3] http://redis.io/commands -[4] http://redis.io/documentation +* Introduction to Redis data types. http://redis.io/topics/data-types-intro +* Try Redis directly inside your browser. http://try.redis.io +* The full list of Redis commands. http://redis.io/commands +* There is much more inside the Redis official documentation. http://redis.io/documentation Building Redis -------------- From 07a89654dae420fc060743c101837e8383c69537 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Fri, 13 Feb 2015 15:10:46 +0100 Subject: [PATCH 0111/1928] Link files in README --- README.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index e5ffe75ae..a0c242349 100644 --- a/README.md +++ b/README.md @@ -159,10 +159,13 @@ Code contributions Note: by contributing code to the Redis project in any form, including sending a pull request via Github, a code fragment or patch via private email or public discussion groups, you agree to release your code under the terms -of the BSD license that you can find in the COPYING file included in the Redis +of the BSD license that you can find in the [COPYING][1] file included in the Redis source distribution. -Please see the CONTRIBUTING file in this source distribution for more +Please see the [CONTRIBUTING][2] file in this source distribution for more information. Enjoy! + +[1]: https://github.com/antirez/redis/blob/unstable/COPYING +[2]: https://github.com/antirez/redis/blob/unstable/CONTRIBUTING From 41a1fb8186f2687fb738fe5cbab74e02595474e4 Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 14 Feb 2015 17:19:17 +0100 Subject: [PATCH 0112/1928] Version bumped to 3.1.999 (3.2 alpha versions). --- src/version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/version.h b/src/version.h index 00cbae681..dbb6197c8 100644 --- a/src/version.h +++ b/src/version.h @@ -1 +1 @@ -#define REDIS_VERSION "2.9.999" +#define REDIS_VERSION "3.1.999" From 395e11255cc924c2ee4cc4ddcbf30d269b193316 Mon Sep 17 00:00:00 2001 From: John Doe Date: Tue, 17 Feb 2015 00:03:21 +0800 Subject: [PATCH 0113/1928] Fix quicklist Pop() result Closes #2398 --- src/quicklist.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/quicklist.c b/src/quicklist.c index 6682b2087..d2371a9b5 100644 --- a/src/quicklist.c +++ b/src/quicklist.c @@ -1372,7 +1372,7 @@ REDIS_STATIC void *_quicklistSaver(unsigned char *data, unsigned int sz) { unsigned char *vstr; if (data) { vstr = zmalloc(sz); - memcpy(data, vstr, sz); + memcpy(vstr, data, sz); return vstr; } return NULL; From 552e5908bb0dc7e9c7db20db20315e961612db33 Mon Sep 17 00:00:00 2001 From: Matt Stancliff Date: Tue, 17 Feb 2015 11:28:45 -0500 Subject: [PATCH 0114/1928] Fix quicklist tests for Pop() Now the tests actually compare return values instead of just verifying _something_ got returned. --- src/quicklist.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/quicklist.c b/src/quicklist.c index d2371a9b5..be02e3276 100644 --- a/src/quicklist.c +++ b/src/quicklist.c @@ -1757,7 +1757,8 @@ int quicklistTest(int argc, char *argv[]) { TEST("pop 1 string from 1") { quicklist *ql = quicklistNew(-2, options[_i]); - quicklistPushHead(ql, genstr("hello", 331), 32); + char *populate = genstr("hello", 331); + quicklistPushHead(ql, populate, 32); unsigned char *data; unsigned int sz; long long lv; @@ -1765,6 +1766,9 @@ int quicklistTest(int argc, char *argv[]) { quicklistPop(ql, QUICKLIST_HEAD, &data, &sz, &lv); assert(data != NULL); assert(sz == 32); + if (strcmp(populate, (char *)data)) + ERR("Pop'd value (%.*s) didn't equal original value (%s)", sz, + data, populate); zfree(data); ql_verify(ql, 0, 0, 0, 0); quicklistRelease(ql); @@ -1797,6 +1801,9 @@ int quicklistTest(int argc, char *argv[]) { assert(ret == 1); assert(data != NULL); assert(sz == 32); + if (strcmp(genstr("hello", 499 - i), (char *)data)) + ERR("Pop'd value (%.*s) didn't equal original value (%s)", + sz, data, genstr("hello", 499 - i)); zfree(data); } ql_verify(ql, 0, 0, 0, 0); @@ -1816,6 +1823,10 @@ int quicklistTest(int argc, char *argv[]) { assert(ret == 1); assert(data != NULL); assert(sz == 32); + if (strcmp(genstr("hello", 499 - i), (char *)data)) + ERR("Pop'd value (%.*s) didn't equal original value " + "(%s)", + sz, data, genstr("hello", 499 - i)); zfree(data); } else { assert(ret == 0); From acb933a747ea5ecc85d984867e8627a813ffe2c1 Mon Sep 17 00:00:00 2001 From: Sisir Koppaka Date: Wed, 18 Feb 2015 08:16:41 -0500 Subject: [PATCH 0115/1928] rehashing.c: Fix compile error originating from SPOP rewrite --- utils/hashtable/rehashing.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/hashtable/rehashing.c b/utils/hashtable/rehashing.c index df1f52bb1..c900a8d2f 100644 --- a/utils/hashtable/rehashing.c +++ b/utils/hashtable/rehashing.c @@ -73,7 +73,7 @@ void stressGetKeys(dict *d, int times) { dictEntry **des = zmalloc(sizeof(dictEntry*)*dictSize(d)); for (j = 0; j < times; j++) { int requested = rand() % (dictSize(d)+1); - int returned = dictGetRandomKeys(d, des, requested); + int returned = dictGetSomeKeys(d, des, requested); if (requested != returned) { printf("*** ERROR! Req: %d, Ret: %d\n", requested, returned); exit(1); From 1024664247a0853569bcd8abe1127cd107a20fc8 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 19 Feb 2015 22:42:31 +0100 Subject: [PATCH 0116/1928] Example redis.conf binds just to 127.0.0.1. It's hard to pick a good approach here. A few arguments: 1) There are many exposed instances on the internet. 2) Changing the default when "bind" is not given is very dangerous, after an upgrade the server changes a fundamental behavior. 3) Usually Redis, when used in a proper way, will be protected *and* accessed often from other computers, so this new default is likely not what most people want. 4) However if users end with this default, they are using the example redis.conf: likely they are reading what is inside, and they'll see the warning. --- redis.conf | 70 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 42 insertions(+), 28 deletions(-) diff --git a/redis.conf b/redis.conf index 9e15769a9..eb67583c4 100644 --- a/redis.conf +++ b/redis.conf @@ -30,26 +30,27 @@ # include /path/to/local.conf # include /path/to/other.conf -################################ GENERAL ##################################### +################################## NETWORK ##################################### -# By default Redis does not run as a daemon. Use 'yes' if you need it. -# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. -daemonize no - -# If you run Redis from upstart or systemd, Redis can interact with your -# supervision tree. Options: -# supervised no - no supervision interaction -# supervised upstart - signal upstart by putting Redis into SIGSTOP mode -# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET -# supervised auto - detect upstart or systemd method based on -# UPSTART_JOB or NOTIFY_SOCKET environment variables -# Note: these supervision methods only signal "process is ready." -# They do not enable continuous liveness pings back to your supervisor. -supervised no - -# When running daemonized, Redis writes a pid file in /var/run/redis.pid by -# default. You can specify a custom pid file location here. -pidfile /var/run/redis.pid +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all the network interfaces available on the server. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 ::1 +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only into +# the IPv4 lookback interface address (this means Redis will be able to +# accept connections only from clients running into the same computer it +# is running). +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +bind 127.0.0.1 # Accept connections on the specified port, default is 6379. # If port 0 is specified Redis will not listen on a TCP socket. @@ -64,16 +65,8 @@ port 6379 # in order to get the desired effect. tcp-backlog 511 -# By default Redis listens for connections from all the network interfaces -# available on the server. It is possible to listen to just one or multiple -# interfaces using the "bind" configuration directive, followed by one or -# more IP addresses. +# Unix socket. # -# Examples: -# -# bind 192.168.1.100 10.0.0.1 -# bind 127.0.0.1 - # Specify the path for the Unix socket that will be used to listen for # incoming connections. There is no default, so Redis will not listen # on a unix socket when not specified. @@ -100,6 +93,27 @@ timeout 0 # A reasonable value for this option is 60 seconds. tcp-keepalive 0 +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize no + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous liveness pings back to your supervisor. +supervised no + +# When running daemonized, Redis writes a pid file in /var/run/redis.pid by +# default. You can specify a custom pid file location here. +pidfile /var/run/redis.pid + # Specify the server verbosity level. # This can be one of: # debug (a lot of information, useful for development/testing) From 0aa5acc8f31a45ba4ee625227bae80e125fd8bdb Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 19 Feb 2015 22:47:32 +0100 Subject: [PATCH 0117/1928] Give obvious instructions on how to bind all interfaces. --- redis.conf | 3 +++ 1 file changed, 3 insertions(+) diff --git a/redis.conf b/redis.conf index eb67583c4..d0684e86a 100644 --- a/redis.conf +++ b/redis.conf @@ -49,6 +49,9 @@ # the IPv4 lookback interface address (this means Redis will be able to # accept connections only from clients running into the same computer it # is running). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# JUST UNCOMMENT THE FOLLOWING LINE. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ bind 127.0.0.1 From db9461e466afeeb9b50a80b78934144922482f5b Mon Sep 17 00:00:00 2001 From: Jason Roth Date: Sat, 21 Feb 2015 15:01:18 +0000 Subject: [PATCH 0118/1928] added a new hvstrlen command the hvstrlen command returns the length of a hash field value --- src/redis.c | 1 + src/redis.h | 1 + src/t_hash.c | 14 ++++++++++++++ tests/unit/type/hash.tcl | 31 +++++++++++++++++++++++++++++++ 4 files changed, 47 insertions(+) diff --git a/src/redis.c b/src/redis.c index db3af61b0..c7206cdfc 100644 --- a/src/redis.c +++ b/src/redis.c @@ -202,6 +202,7 @@ struct redisCommand redisCommandTable[] = { {"hincrbyfloat",hincrbyfloatCommand,4,"wmF",0,NULL,1,1,1,0,0}, {"hdel",hdelCommand,-3,"wF",0,NULL,1,1,1,0,0}, {"hlen",hlenCommand,2,"rF",0,NULL,1,1,1,0,0}, + {"hvstrlen",hvstrlenCommand,3,"rF",0,NULL,1,1,1,0,0}, {"hkeys",hkeysCommand,2,"rS",0,NULL,1,1,1,0,0}, {"hvals",hvalsCommand,2,"rS",0,NULL,1,1,1,0,0}, {"hgetall",hgetallCommand,2,"r",0,NULL,1,1,1,0,0}, diff --git a/src/redis.h b/src/redis.h index a675d4f12..71b98a6e5 100644 --- a/src/redis.h +++ b/src/redis.h @@ -1516,6 +1516,7 @@ void hmsetCommand(redisClient *c); void hmgetCommand(redisClient *c); void hdelCommand(redisClient *c); void hlenCommand(redisClient *c); +void hvstrlenCommand(redisClient *c); void zremrangebyrankCommand(redisClient *c); void zunionstoreCommand(redisClient *c); void zinterstoreCommand(redisClient *c); diff --git a/src/t_hash.c b/src/t_hash.c index 7f33bba0c..ea4f3a704 100644 --- a/src/t_hash.c +++ b/src/t_hash.c @@ -685,6 +685,20 @@ void hlenCommand(redisClient *c) { addReplyLongLong(c,hashTypeLength(o)); } +void hvstrlenCommand(redisClient *c) { + robj *o; + robj *value; + if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.nullbulk)) == NULL || + checkType(c,o,REDIS_HASH)) return; + + if ((value = hashTypeGetObject(o,c->argv[2])) == NULL) { + addReply(c, shared.nullbulk); + } else { + addReplyLongLong(c,stringObjectLen(value)); + decrRefCount(value); + } +} + static void addHashIteratorCursorToReply(redisClient *c, hashTypeIterator *hi, int what) { if (hi->encoding == REDIS_ENCODING_ZIPLIST) { unsigned char *vstr = NULL; diff --git a/tests/unit/type/hash.tcl b/tests/unit/type/hash.tcl index fa52afd16..3d9be1fcc 100644 --- a/tests/unit/type/hash.tcl +++ b/tests/unit/type/hash.tcl @@ -390,6 +390,37 @@ start_server {tags {"hash"}} { lappend rv [string match "ERR*not*float*" $bigerr] } {1 1} + test {HVSTRLEN against the small hash} { + set err {} + foreach k [array names smallhash *] { + if {[string length $smallhash($k)] ne [r hvstrlen smallhash $k]} { + set err "[string length $smallhash($k)] != [r hvstrlen smallhash $k]" + break + } + } + set _ $err + } {} + + test {HVSTRLEN against the big hash} { + set err {} + foreach k [array names bighash *] { + if {[string length $bighash($k)] ne [r hvstrlen bighash $k]} { + set err "[string length $bighash($k)] != [r hvstrlen bighash $k]" + break + } + } + set _ $err + } {} + + test {HVSTRLEN against non existing key} { + set rv {} + lappend rv [r hvstrlen smallhash __123123123__] + lappend rv [r hvstrlen bighash __123123123__] + set _ $rv + + } {{} {}} + + test {Hash ziplist regression test for large keys} { r hset hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk a r hset hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk b From fc365a3a001d0ec55e6bde914708911aa1b524c4 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 23 Feb 2015 11:24:24 +0100 Subject: [PATCH 0119/1928] Change RENAME behavior when src and dst keys are the same. Fixes issue #2392. --- src/db.c | 14 +++++++++----- tests/unit/basic.tcl | 13 ++++++++++++- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/src/db.c b/src/db.c index 69d1a7768..36650318a 100644 --- a/src/db.c +++ b/src/db.c @@ -688,16 +688,20 @@ void shutdownCommand(redisClient *c) { void renameGenericCommand(redisClient *c, int nx) { robj *o; long long expire; + int samekey = 0; - /* To use the same key as src and dst is probably an error */ - if (sdscmp(c->argv[1]->ptr,c->argv[2]->ptr) == 0) { - addReply(c,shared.sameobjecterr); - return; - } + /* When source and dest key is the same, no operation is performed, + * if the key exists, however we still return an error on unexisting key. */ + if (sdscmp(c->argv[1]->ptr,c->argv[2]->ptr) == 0) samekey = 1; if ((o = lookupKeyWriteOrReply(c,c->argv[1],shared.nokeyerr)) == NULL) return; + if (samekey) { + addReply(c,nx ? shared.czero : shared.ok); + return; + } + incrRefCount(o); expire = getExpire(c->db,c->argv[1]); if (lookupKeyWrite(c->db,c->argv[2]) != NULL) { diff --git a/tests/unit/basic.tcl b/tests/unit/basic.tcl index b0b3b9bac..fec0df5ec 100644 --- a/tests/unit/basic.tcl +++ b/tests/unit/basic.tcl @@ -368,7 +368,18 @@ start_server {tags {"basic"}} { format $err } {ERR*} - test {RENAME where source and dest key is the same} { + test {RENAME where source and dest key are the same (existing)} { + r set mykey foo + r rename mykey mykey + } {OK} + + test {RENAMENX where source and dest key are the same (existing)} { + r set mykey foo + r renamenx mykey mykey + } {0} + + test {RENAME where source and dest key are the same (non existing)} { + r del mykey catch {r rename mykey mykey} err format $err } {ERR*} From 47ab570441f8012fb9aa6a0b38606bc9462e2bd3 Mon Sep 17 00:00:00 2001 From: Matt Stancliff Date: Tue, 24 Feb 2015 11:31:04 -0500 Subject: [PATCH 0120/1928] Fix types broken by previous type cleanup Revert some size_t back to off_t Set reply_bytes needs to 64 bits everywhere Revert bufpos to int since it's a max of 16k into buf[] --- src/redis.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/redis.h b/src/redis.h index ed4c3a813..e248d2901 100644 --- a/src/redis.h +++ b/src/redis.h @@ -543,7 +543,7 @@ typedef struct redisClient { int multibulklen; /* number of multi bulk arguments left to read */ long bulklen; /* length of bulk argument in multi bulk request */ list *reply; - size_t reply_bytes; /* Tot bytes of objects in reply list */ + unsigned long long reply_bytes; /* Tot bytes of objects in reply list */ size_t sentlen; /* Amount of bytes already sent in the current buffer or object being sent. */ time_t ctime; /* Client creation time */ @@ -554,8 +554,8 @@ typedef struct redisClient { int replstate; /* replication state if this is a slave */ int repl_put_online_on_ack; /* Install slave write handler on ACK. */ int repldbfd; /* replication DB file descriptor */ - size_t repldboff; /* replication DB file offset */ - size_t repldbsize; /* replication DB file size */ + off_t repldboff; /* replication DB file offset */ + off_t repldbsize; /* replication DB file size */ sds replpreamble; /* replication DB preamble. */ long long reploff; /* replication offset if this is our master */ long long repl_ack_off; /* replication ack offset, if this is a slave */ @@ -572,7 +572,7 @@ typedef struct redisClient { sds peerid; /* Cached peer ID. */ /* Response buffer */ - size_t bufpos; + int bufpos; char buf[REDIS_REPLY_CHUNK_BYTES]; } redisClient; From 954151f62b79a86b90f8b840fe7c1d1df78c71c3 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 25 Feb 2015 10:31:29 +0100 Subject: [PATCH 0121/1928] Test: split basic unit into string, incr, keyspace units. --- tests/test_helper.tcl | 4 +- tests/unit/keyspace.tcl | 249 ++++++++++++++ tests/unit/type/incr.tcl | 147 ++++++++ tests/unit/{basic.tcl => type/string.tcl} | 399 +--------------------- 4 files changed, 400 insertions(+), 399 deletions(-) create mode 100644 tests/unit/keyspace.tcl create mode 100644 tests/unit/type/incr.tcl rename tests/unit/{basic.tcl => type/string.tcl} (55%) diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index 7e9e2cfaa..914f6ba1e 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -16,8 +16,10 @@ set ::all_tests { unit/dump unit/auth unit/protocol - unit/basic + unit/keyspace unit/scan + unit/type/string + unit/type/incr unit/type/list unit/type/list-2 unit/type/list-3 diff --git a/tests/unit/keyspace.tcl b/tests/unit/keyspace.tcl new file mode 100644 index 000000000..e808aaf98 --- /dev/null +++ b/tests/unit/keyspace.tcl @@ -0,0 +1,249 @@ +start_server {tags {"keyspace"}} { + test {DEL against a single item} { + r set x foo + assert {[r get x] eq "foo"} + r del x + r get x + } {} + + test {Vararg DEL} { + r set foo1 a + r set foo2 b + r set foo3 c + list [r del foo1 foo2 foo3 foo4] [r mget foo1 foo2 foo3] + } {3 {{} {} {}}} + + test {KEYS with pattern} { + foreach key {key_x key_y key_z foo_a foo_b foo_c} { + r set $key hello + } + lsort [r keys foo*] + } {foo_a foo_b foo_c} + + test {KEYS to get all keys} { + lsort [r keys *] + } {foo_a foo_b foo_c key_x key_y key_z} + + test {DBSIZE} { + r dbsize + } {6} + + test {DEL all keys} { + foreach key [r keys *] {r del $key} + r dbsize + } {0} + + test "DEL against expired key" { + r debug set-active-expire 0 + r setex keyExpire 1 valExpire + after 1100 + assert_equal 0 [r del keyExpire] + r debug set-active-expire 1 + } + + test {EXISTS} { + set res {} + r set newkey test + append res [r exists newkey] + r del newkey + append res [r exists newkey] + } {10} + + test {Zero length value in key. SET/GET/EXISTS} { + r set emptykey {} + set res [r get emptykey] + append res [r exists emptykey] + r del emptykey + append res [r exists emptykey] + } {10} + + test {Commands pipelining} { + set fd [r channel] + puts -nonewline $fd "SET k1 xyzk\r\nGET k1\r\nPING\r\n" + flush $fd + set res {} + append res [string match OK* [r read]] + append res [r read] + append res [string match PONG* [r read]] + format $res + } {1xyzk1} + + test {Non existing command} { + catch {r foobaredcommand} err + string match ERR* $err + } {1} + + test {RENAME basic usage} { + r set mykey hello + r rename mykey mykey1 + r rename mykey1 mykey2 + r get mykey2 + } {hello} + + test {RENAME source key should no longer exist} { + r exists mykey + } {0} + + test {RENAME against already existing key} { + r set mykey a + r set mykey2 b + r rename mykey2 mykey + set res [r get mykey] + append res [r exists mykey2] + } {b0} + + test {RENAMENX basic usage} { + r del mykey + r del mykey2 + r set mykey foobar + r renamenx mykey mykey2 + set res [r get mykey2] + append res [r exists mykey] + } {foobar0} + + test {RENAMENX against already existing key} { + r set mykey foo + r set mykey2 bar + r renamenx mykey mykey2 + } {0} + + test {RENAMENX against already existing key (2)} { + set res [r get mykey] + append res [r get mykey2] + } {foobar} + + test {RENAME against non existing source key} { + catch {r rename nokey foobar} err + format $err + } {ERR*} + + test {RENAME where source and dest key are the same (existing)} { + r set mykey foo + r rename mykey mykey + } {OK} + + test {RENAMENX where source and dest key are the same (existing)} { + r set mykey foo + r renamenx mykey mykey + } {0} + + test {RENAME where source and dest key are the same (non existing)} { + r del mykey + catch {r rename mykey mykey} err + format $err + } {ERR*} + + test {RENAME with volatile key, should move the TTL as well} { + r del mykey mykey2 + r set mykey foo + r expire mykey 100 + assert {[r ttl mykey] > 95 && [r ttl mykey] <= 100} + r rename mykey mykey2 + assert {[r ttl mykey2] > 95 && [r ttl mykey2] <= 100} + } + + test {RENAME with volatile key, should not inherit TTL of target key} { + r del mykey mykey2 + r set mykey foo + r set mykey2 bar + r expire mykey2 100 + assert {[r ttl mykey] == -1 && [r ttl mykey2] > 0} + r rename mykey mykey2 + r ttl mykey2 + } {-1} + + test {DEL all keys again (DB 0)} { + foreach key [r keys *] { + r del $key + } + r dbsize + } {0} + + test {DEL all keys again (DB 1)} { + r select 10 + foreach key [r keys *] { + r del $key + } + set res [r dbsize] + r select 9 + format $res + } {0} + + test {MOVE basic usage} { + r set mykey foobar + r move mykey 10 + set res {} + lappend res [r exists mykey] + lappend res [r dbsize] + r select 10 + lappend res [r get mykey] + lappend res [r dbsize] + r select 9 + format $res + } [list 0 0 foobar 1] + + test {MOVE against key existing in the target DB} { + r set mykey hello + r move mykey 10 + } {0} + + test {MOVE against non-integer DB (#1428)} { + r set mykey hello + catch {r move mykey notanumber} e + set e + } {*ERR*index out of range} + + test {SET/GET keys in different DBs} { + r set a hello + r set b world + r select 10 + r set a foo + r set b bared + r select 9 + set res {} + lappend res [r get a] + lappend res [r get b] + r select 10 + lappend res [r get a] + lappend res [r get b] + r select 9 + format $res + } {hello world foo bared} + + test {RANDOMKEY} { + r flushdb + r set foo x + r set bar y + set foo_seen 0 + set bar_seen 0 + for {set i 0} {$i < 100} {incr i} { + set rkey [r randomkey] + if {$rkey eq {foo}} { + set foo_seen 1 + } + if {$rkey eq {bar}} { + set bar_seen 1 + } + } + list $foo_seen $bar_seen + } {1 1} + + test {RANDOMKEY against empty DB} { + r flushdb + r randomkey + } {} + + test {RANDOMKEY regression 1} { + r flushdb + r set x 10 + r del x + r randomkey + } {} + + test {KEYS * two times with long key, Github issue #1208} { + r flushdb + r set dlskeriewrioeuwqoirueioqwrueoqwrueqw test + r keys * + r keys * + } {dlskeriewrioeuwqoirueioqwrueoqwrueqw} +} diff --git a/tests/unit/type/incr.tcl b/tests/unit/type/incr.tcl new file mode 100644 index 000000000..2287aaae2 --- /dev/null +++ b/tests/unit/type/incr.tcl @@ -0,0 +1,147 @@ +start_server {tags {"incr"}} { + test {INCR against non existing key} { + set res {} + append res [r incr novar] + append res [r get novar] + } {11} + + test {INCR against key created by incr itself} { + r incr novar + } {2} + + test {INCR against key originally set with SET} { + r set novar 100 + r incr novar + } {101} + + test {INCR over 32bit value} { + r set novar 17179869184 + r incr novar + } {17179869185} + + test {INCRBY over 32bit value with over 32bit increment} { + r set novar 17179869184 + r incrby novar 17179869184 + } {34359738368} + + test {INCR fails against key with spaces (left)} { + r set novar " 11" + catch {r incr novar} err + format $err + } {ERR*} + + test {INCR fails against key with spaces (right)} { + r set novar "11 " + catch {r incr novar} err + format $err + } {ERR*} + + test {INCR fails against key with spaces (both)} { + r set novar " 11 " + catch {r incr novar} err + format $err + } {ERR*} + + test {INCR fails against a key holding a list} { + r rpush mylist 1 + catch {r incr mylist} err + r rpop mylist + format $err + } {WRONGTYPE*} + + test {DECRBY over 32bit value with over 32bit increment, negative res} { + r set novar 17179869184 + r decrby novar 17179869185 + } {-1} + + test {INCR uses shared objects in the 0-9999 range} { + r set foo -1 + r incr foo + assert {[r object refcount foo] > 1} + r set foo 9998 + r incr foo + assert {[r object refcount foo] > 1} + r incr foo + assert {[r object refcount foo] == 1} + } + + test {INCR can modify objects in-place} { + r set foo 20000 + r incr foo + assert {[r object refcount foo] == 1} + set old [lindex [split [r debug object foo]] 1] + r incr foo + set new [lindex [split [r debug object foo]] 1] + assert {[string range $old 0 2] eq "at:"} + assert {[string range $new 0 2] eq "at:"} + assert {$old eq $new} + } + + test {INCRBYFLOAT against non existing key} { + r del novar + list [roundFloat [r incrbyfloat novar 1]] \ + [roundFloat [r get novar]] \ + [roundFloat [r incrbyfloat novar 0.25]] \ + [roundFloat [r get novar]] + } {1 1 1.25 1.25} + + test {INCRBYFLOAT against key originally set with SET} { + r set novar 1.5 + roundFloat [r incrbyfloat novar 1.5] + } {3} + + test {INCRBYFLOAT over 32bit value} { + r set novar 17179869184 + r incrbyfloat novar 1.5 + } {17179869185.5} + + test {INCRBYFLOAT over 32bit value with over 32bit increment} { + r set novar 17179869184 + r incrbyfloat novar 17179869184 + } {34359738368} + + test {INCRBYFLOAT fails against key with spaces (left)} { + set err {} + r set novar " 11" + catch {r incrbyfloat novar 1.0} err + format $err + } {ERR*valid*} + + test {INCRBYFLOAT fails against key with spaces (right)} { + set err {} + r set novar "11 " + catch {r incrbyfloat novar 1.0} err + format $err + } {ERR*valid*} + + test {INCRBYFLOAT fails against key with spaces (both)} { + set err {} + r set novar " 11 " + catch {r incrbyfloat novar 1.0} err + format $err + } {ERR*valid*} + + test {INCRBYFLOAT fails against a key holding a list} { + r del mylist + set err {} + r rpush mylist 1 + catch {r incrbyfloat mylist 1.0} err + r del mylist + format $err + } {WRONGTYPE*} + + test {INCRBYFLOAT does not allow NaN or Infinity} { + r set foo 0 + set err {} + catch {r incrbyfloat foo +inf} err + set err + # p.s. no way I can force NaN to test it from the API because + # there is no way to increment / decrement by infinity nor to + # perform divisions. + } {ERR*would produce*} + + test {INCRBYFLOAT decrement} { + r set foo 1 + roundFloat [r incrbyfloat foo -1.1] + } {-0.1} +} diff --git a/tests/unit/basic.tcl b/tests/unit/type/string.tcl similarity index 55% rename from tests/unit/basic.tcl rename to tests/unit/type/string.tcl index fec0df5ec..c98d56815 100644 --- a/tests/unit/basic.tcl +++ b/tests/unit/type/string.tcl @@ -1,9 +1,4 @@ -start_server {tags {"basic"}} { - test {DEL all keys to start with a clean DB} { - foreach key [r keys *] {r del $key} - r dbsize - } {0} - +start_server {tags {"string"}} { test {SET and GET an item} { r set x foobar r get x @@ -14,38 +9,6 @@ start_server {tags {"basic"}} { r get x } {} - test {DEL against a single item} { - r del x - r get x - } {} - - test {Vararg DEL} { - r set foo1 a - r set foo2 b - r set foo3 c - list [r del foo1 foo2 foo3 foo4] [r mget foo1 foo2 foo3] - } {3 {{} {} {}}} - - test {KEYS with pattern} { - foreach key {key_x key_y key_z foo_a foo_b foo_c} { - r set $key hello - } - lsort [r keys foo*] - } {foo_a foo_b foo_c} - - test {KEYS to get all keys} { - lsort [r keys *] - } {foo_a foo_b foo_c key_x key_y key_z} - - test {DBSIZE} { - r dbsize - } {6} - - test {DEL all keys} { - foreach key [r keys *] {r del $key} - r dbsize - } {0} - test {Very big payload in GET/SET} { set buf [string repeat "abcd" 1000000] r set foo $buf @@ -95,152 +58,6 @@ start_server {tags {"basic"}} { } {10101} } - test {INCR against non existing key} { - set res {} - append res [r incr novar] - append res [r get novar] - } {11} - - test {INCR against key created by incr itself} { - r incr novar - } {2} - - test {INCR against key originally set with SET} { - r set novar 100 - r incr novar - } {101} - - test {INCR over 32bit value} { - r set novar 17179869184 - r incr novar - } {17179869185} - - test {INCRBY over 32bit value with over 32bit increment} { - r set novar 17179869184 - r incrby novar 17179869184 - } {34359738368} - - test {INCR fails against key with spaces (left)} { - r set novar " 11" - catch {r incr novar} err - format $err - } {ERR*} - - test {INCR fails against key with spaces (right)} { - r set novar "11 " - catch {r incr novar} err - format $err - } {ERR*} - - test {INCR fails against key with spaces (both)} { - r set novar " 11 " - catch {r incr novar} err - format $err - } {ERR*} - - test {INCR fails against a key holding a list} { - r rpush mylist 1 - catch {r incr mylist} err - r rpop mylist - format $err - } {WRONGTYPE*} - - test {DECRBY over 32bit value with over 32bit increment, negative res} { - r set novar 17179869184 - r decrby novar 17179869185 - } {-1} - - test {INCR uses shared objects in the 0-9999 range} { - r set foo -1 - r incr foo - assert {[r object refcount foo] > 1} - r set foo 9998 - r incr foo - assert {[r object refcount foo] > 1} - r incr foo - assert {[r object refcount foo] == 1} - } - - test {INCR can modify objects in-place} { - r set foo 20000 - r incr foo - assert {[r object refcount foo] == 1} - set old [lindex [split [r debug object foo]] 1] - r incr foo - set new [lindex [split [r debug object foo]] 1] - assert {[string range $old 0 2] eq "at:"} - assert {[string range $new 0 2] eq "at:"} - assert {$old eq $new} - } - - test {INCRBYFLOAT against non existing key} { - r del novar - list [roundFloat [r incrbyfloat novar 1]] \ - [roundFloat [r get novar]] \ - [roundFloat [r incrbyfloat novar 0.25]] \ - [roundFloat [r get novar]] - } {1 1 1.25 1.25} - - test {INCRBYFLOAT against key originally set with SET} { - r set novar 1.5 - roundFloat [r incrbyfloat novar 1.5] - } {3} - - test {INCRBYFLOAT over 32bit value} { - r set novar 17179869184 - r incrbyfloat novar 1.5 - } {17179869185.5} - - test {INCRBYFLOAT over 32bit value with over 32bit increment} { - r set novar 17179869184 - r incrbyfloat novar 17179869184 - } {34359738368} - - test {INCRBYFLOAT fails against key with spaces (left)} { - set err {} - r set novar " 11" - catch {r incrbyfloat novar 1.0} err - format $err - } {ERR*valid*} - - test {INCRBYFLOAT fails against key with spaces (right)} { - set err {} - r set novar "11 " - catch {r incrbyfloat novar 1.0} err - format $err - } {ERR*valid*} - - test {INCRBYFLOAT fails against key with spaces (both)} { - set err {} - r set novar " 11 " - catch {r incrbyfloat novar 1.0} err - format $err - } {ERR*valid*} - - test {INCRBYFLOAT fails against a key holding a list} { - r del mylist - set err {} - r rpush mylist 1 - catch {r incrbyfloat mylist 1.0} err - r del mylist - format $err - } {WRONGTYPE*} - - test {INCRBYFLOAT does not allow NaN or Infinity} { - r set foo 0 - set err {} - catch {r incrbyfloat foo +inf} err - set err - # p.s. no way I can force NaN to test it from the API because - # there is no way to increment / decrement by infinity nor to - # perform divisions. - } {ERR*would produce*} - - test {INCRBYFLOAT decrement} { - r set foo 1 - roundFloat [r incrbyfloat foo -1.1] - } {-0.1} - test "SETNX target key missing" { r del novar assert_equal 1 [r setnx novar foobared] @@ -284,183 +101,6 @@ start_server {tags {"basic"}} { assert_equal 20 [r get x] } - test "DEL against expired key" { - r debug set-active-expire 0 - r setex keyExpire 1 valExpire - after 1100 - assert_equal 0 [r del keyExpire] - r debug set-active-expire 1 - } - - test {EXISTS} { - set res {} - r set newkey test - append res [r exists newkey] - r del newkey - append res [r exists newkey] - } {10} - - test {Zero length value in key. SET/GET/EXISTS} { - r set emptykey {} - set res [r get emptykey] - append res [r exists emptykey] - r del emptykey - append res [r exists emptykey] - } {10} - - test {Commands pipelining} { - set fd [r channel] - puts -nonewline $fd "SET k1 xyzk\r\nGET k1\r\nPING\r\n" - flush $fd - set res {} - append res [string match OK* [r read]] - append res [r read] - append res [string match PONG* [r read]] - format $res - } {1xyzk1} - - test {Non existing command} { - catch {r foobaredcommand} err - string match ERR* $err - } {1} - - test {RENAME basic usage} { - r set mykey hello - r rename mykey mykey1 - r rename mykey1 mykey2 - r get mykey2 - } {hello} - - test {RENAME source key should no longer exist} { - r exists mykey - } {0} - - test {RENAME against already existing key} { - r set mykey a - r set mykey2 b - r rename mykey2 mykey - set res [r get mykey] - append res [r exists mykey2] - } {b0} - - test {RENAMENX basic usage} { - r del mykey - r del mykey2 - r set mykey foobar - r renamenx mykey mykey2 - set res [r get mykey2] - append res [r exists mykey] - } {foobar0} - - test {RENAMENX against already existing key} { - r set mykey foo - r set mykey2 bar - r renamenx mykey mykey2 - } {0} - - test {RENAMENX against already existing key (2)} { - set res [r get mykey] - append res [r get mykey2] - } {foobar} - - test {RENAME against non existing source key} { - catch {r rename nokey foobar} err - format $err - } {ERR*} - - test {RENAME where source and dest key are the same (existing)} { - r set mykey foo - r rename mykey mykey - } {OK} - - test {RENAMENX where source and dest key are the same (existing)} { - r set mykey foo - r renamenx mykey mykey - } {0} - - test {RENAME where source and dest key are the same (non existing)} { - r del mykey - catch {r rename mykey mykey} err - format $err - } {ERR*} - - test {RENAME with volatile key, should move the TTL as well} { - r del mykey mykey2 - r set mykey foo - r expire mykey 100 - assert {[r ttl mykey] > 95 && [r ttl mykey] <= 100} - r rename mykey mykey2 - assert {[r ttl mykey2] > 95 && [r ttl mykey2] <= 100} - } - - test {RENAME with volatile key, should not inherit TTL of target key} { - r del mykey mykey2 - r set mykey foo - r set mykey2 bar - r expire mykey2 100 - assert {[r ttl mykey] == -1 && [r ttl mykey2] > 0} - r rename mykey mykey2 - r ttl mykey2 - } {-1} - - test {DEL all keys again (DB 0)} { - foreach key [r keys *] { - r del $key - } - r dbsize - } {0} - - test {DEL all keys again (DB 1)} { - r select 10 - foreach key [r keys *] { - r del $key - } - set res [r dbsize] - r select 9 - format $res - } {0} - - test {MOVE basic usage} { - r set mykey foobar - r move mykey 10 - set res {} - lappend res [r exists mykey] - lappend res [r dbsize] - r select 10 - lappend res [r get mykey] - lappend res [r dbsize] - r select 9 - format $res - } [list 0 0 foobar 1] - - test {MOVE against key existing in the target DB} { - r set mykey hello - r move mykey 10 - } {0} - - test {MOVE against non-integer DB (#1428)} { - r set mykey hello - catch {r move mykey notanumber} e - set e - } {*ERR*index out of range} - - test {SET/GET keys in different DBs} { - r set a hello - r set b world - r select 10 - r set a foo - r set b bared - r select 9 - set res {} - lappend res [r get a] - lappend res [r get b] - r select 10 - lappend res [r get a] - lappend res [r get b] - r select 9 - format $res - } {hello world foo bared} - test {MGET} { r flushdb r set foo BAR @@ -478,36 +118,6 @@ start_server {tags {"basic"}} { r mget foo baazz bar myset } {BAR {} FOO {}} - test {RANDOMKEY} { - r flushdb - r set foo x - r set bar y - set foo_seen 0 - set bar_seen 0 - for {set i 0} {$i < 100} {incr i} { - set rkey [r randomkey] - if {$rkey eq {foo}} { - set foo_seen 1 - } - if {$rkey eq {bar}} { - set bar_seen 1 - } - } - list $foo_seen $bar_seen - } {1 1} - - test {RANDOMKEY against empty DB} { - r flushdb - r randomkey - } {} - - test {RANDOMKEY regression 1} { - r flushdb - r set x 10 - r del x - r randomkey - } {} - test {GETSET (set new value)} { list [r getset foo xyz] [r get foo] } {{} xyz} @@ -803,13 +413,6 @@ start_server {tags {"basic"}} { assert {$ttl <= 10 && $ttl > 5} } - test {KEYS * two times with long key, Github issue #1208} { - r flushdb - r set dlskeriewrioeuwqoirueioqwrueoqwrueqw test - r keys * - r keys * - } {dlskeriewrioeuwqoirueioqwrueoqwrueqw} - test {GETRANGE with huge ranges, Github issue #1844} { r set foo bar r getrange foo 0 4294967297 From 27c30b0e84224cfc5f4189c26e16800f3e0dcd27 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 25 Feb 2015 10:33:37 +0100 Subject: [PATCH 0122/1928] Cast sentlen to int before comparison wit bufpos. This is safe since bufpos is small, inside the range of the local client buffer. --- src/networking.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/networking.c b/src/networking.c index 0b69f5408..c844a9995 100644 --- a/src/networking.c +++ b/src/networking.c @@ -813,7 +813,7 @@ void sendReplyToClient(aeEventLoop *el, int fd, void *privdata, int mask) { /* If the buffer was sent, set bufpos to zero to continue with * the remainder of the reply. */ - if (c->sentlen == c->bufpos) { + if ((int)c->sentlen == c->bufpos) { c->bufpos = 0; c->sentlen = 0; } From 74354ceef53651aa30486a1b7181438d71cfbd1b Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 25 Feb 2015 10:37:52 +0100 Subject: [PATCH 0123/1928] Test: fixes a few tests after basic unit refactoring. --- tests/unit/type/string.tcl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/unit/type/string.tcl b/tests/unit/type/string.tcl index c98d56815..7122fd987 100644 --- a/tests/unit/type/string.tcl +++ b/tests/unit/type/string.tcl @@ -38,6 +38,7 @@ start_server {tags {"string"}} { } {} test {SET 10000 numeric keys and access all them in reverse order} { + r flushdb set err {} for {set x 0} {$x < 10000} {incr x} { r set $x $x @@ -53,9 +54,9 @@ start_server {tags {"string"}} { set _ $err } {} - test {DBSIZE should be 10101 now} { + test {DBSIZE should be 10000 now} { r dbsize - } {10101} + } {10000} } test "SETNX target key missing" { @@ -119,6 +120,7 @@ start_server {tags {"string"}} { } {BAR {} FOO {}} test {GETSET (set new value)} { + r del foo list [r getset foo xyz] [r get foo] } {{} xyz} From cc0d339bd19d37367806cc95111f1539aa268004 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 25 Feb 2015 13:02:04 +0100 Subject: [PATCH 0124/1928] utils/hashtable/rehashing.c test updated to use new API. --- utils/hashtable/rehashing.c | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/utils/hashtable/rehashing.c b/utils/hashtable/rehashing.c index c900a8d2f..b57a9043a 100644 --- a/utils/hashtable/rehashing.c +++ b/utils/hashtable/rehashing.c @@ -68,26 +68,30 @@ int sortPointers(const void *a, const void *b) { return la-lb; } -void stressGetKeys(dict *d, int times) { +void stressGetKeys(dict *d, int times, int *perfect_run, int *approx_run) { int j; + dictEntry **des = zmalloc(sizeof(dictEntry*)*dictSize(d)); for (j = 0; j < times; j++) { int requested = rand() % (dictSize(d)+1); int returned = dictGetSomeKeys(d, des, requested); - if (requested != returned) { - printf("*** ERROR! Req: %d, Ret: %d\n", requested, returned); - exit(1); - } + int dup = 0; + qsort(des,returned,sizeof(dictEntry*),sortPointers); if (returned > 1) { int i; for (i = 0; i < returned-1; i++) { - if (des[i] == des[i+1]) { - printf("*** ERROR! Duplicated element detected\n"); - exit(1); - } + if (des[i] == des[i+1]) dup++; } } + + if (requested == returned && dup == 0) { + (*perfect_run)++; + } else { + (*approx_run)++; + printf("Requested, returned, duplicated: %d %d %d\n", + requested, returned, dup); + } } zfree(des); } @@ -113,18 +117,24 @@ int main(void) { dictRelease(d); d = dictCreate(&dictTypeTest,NULL); - printf("Getkeys stress test\n"); + + printf("Stress testing dictGetSomeKeys\n"); + int perfect_run = 0, approx_run = 0; for (i = 0; i < MAX2; i++) { dictAdd(d,(void*)i,NULL); - stressGetKeys(d,100); + stressGetKeys(d,100,&perfect_run,&approx_run); } for (i = 0; i < MAX2; i++) { dictDelete(d,(void*)i); dictResize(d); - stressGetKeys(d,100); + stressGetKeys(d,100,&perfect_run,&approx_run); } + + printf("dictGetSomeKey, %d perfect runs, %d approximated runs\n", + perfect_run, approx_run); + dictRelease(d); printf("TEST PASSED!\n"); From 7fda935ad3b00dee511283f9bed92cd43cabcbf5 Mon Sep 17 00:00:00 2001 From: Tommy Wang Date: Wed, 25 Feb 2015 12:29:06 -0600 Subject: [PATCH 0125/1928] Add last_dbid to migrateCachedSocket to avoid redundant SELECT Avoid redundant SELECT calls when continuously migrating keys to the same dbid within a target Redis instance. --- src/cluster.c | 59 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 36 insertions(+), 23 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index 9a0a228dc..a5f15b920 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -4362,11 +4362,12 @@ void restoreCommand(redisClient *c) { typedef struct migrateCachedSocket { int fd; + long last_dbid; time_t last_use_time; } migrateCachedSocket; -/* Return a TCP socket connected with the target instance, possibly returning - * a cached one. +/* Return a migrateCachedSocket containing a TCP socket connected with the + * target instance, possibly returning a cached one. * * This function is responsible of sending errors to the client if a * connection can't be established. In this case -1 is returned. @@ -4376,7 +4377,7 @@ typedef struct migrateCachedSocket { * If the caller detects an error while using the socket, migrateCloseSocket() * should be called so that the connection will be created from scratch * the next time. */ -int migrateGetSocket(redisClient *c, robj *host, robj *port, long timeout) { +migrateCachedSocket* migrateGetSocket(redisClient *c, robj *host, robj *port, long timeout) { int fd; sds name = sdsempty(); migrateCachedSocket *cs; @@ -4389,7 +4390,7 @@ int migrateGetSocket(redisClient *c, robj *host, robj *port, long timeout) { if (cs) { sdsfree(name); cs->last_use_time = server.unixtime; - return cs->fd; + return cs; } /* No cached socket, create one. */ @@ -4409,7 +4410,7 @@ int migrateGetSocket(redisClient *c, robj *host, robj *port, long timeout) { sdsfree(name); addReplyErrorFormat(c,"Can't connect to target node: %s", server.neterr); - return -1; + return NULL; } anetEnableTcpNoDelay(server.neterr,fd); @@ -4419,15 +4420,16 @@ int migrateGetSocket(redisClient *c, robj *host, robj *port, long timeout) { addReplySds(c, sdsnew("-IOERR error or timeout connecting to the client\r\n")); close(fd); - return -1; + return NULL; } /* Add to the cache and return it to the caller. */ cs = zmalloc(sizeof(*cs)); cs->fd = fd; + cs->last_dbid = -1; cs->last_use_time = server.unixtime; dictAdd(server.migrate_cached_sockets,name,cs); - return fd; + return cs; } /* Free a migrate cached connection. */ @@ -4468,7 +4470,8 @@ void migrateCloseTimedoutSockets(void) { /* MIGRATE host port key dbid timeout [COPY | REPLACE] */ void migrateCommand(redisClient *c) { - int fd, copy, replace, j; + migrateCachedSocket *cs; + int copy, replace, j; long timeout; long dbid; long long ttl, expireat; @@ -4478,6 +4481,7 @@ void migrateCommand(redisClient *c) { try_again: /* Initialization */ + cs = NULL; copy = 0; replace = 0; ttl = 0; @@ -4510,14 +4514,17 @@ try_again: } /* Connect */ - fd = migrateGetSocket(c,c->argv[1],c->argv[2],timeout); - if (fd == -1) return; /* error sent to the client by migrateGetSocket() */ + cs = migrateGetSocket(c,c->argv[1],c->argv[2],timeout); + if (cs == NULL) return; /* error sent to the client by migrateGetSocket() */ + + rioInitWithBuffer(&cmd,sdsempty()); /* Create RESTORE payload and generate the protocol to call the command. */ - rioInitWithBuffer(&cmd,sdsempty()); - redisAssertWithInfo(c,NULL,rioWriteBulkCount(&cmd,'*',2)); - redisAssertWithInfo(c,NULL,rioWriteBulkString(&cmd,"SELECT",6)); - redisAssertWithInfo(c,NULL,rioWriteBulkLongLong(&cmd,dbid)); + if (cs->last_dbid != dbid) { + redisAssertWithInfo(c,NULL,rioWriteBulkCount(&cmd,'*',2)); + redisAssertWithInfo(c,NULL,rioWriteBulkString(&cmd,"SELECT",6)); + redisAssertWithInfo(c,NULL,rioWriteBulkLongLong(&cmd,dbid)); + } expireat = getExpire(c->db,c->argv[3]); if (expireat != -1) { @@ -4556,7 +4563,7 @@ try_again: while ((towrite = sdslen(buf)-pos) > 0) { towrite = (towrite > (64*1024) ? (64*1024) : towrite); - nwritten = syncWrite(fd,buf+pos,towrite,timeout); + nwritten = syncWrite(cs->fd,buf+pos,towrite,timeout); if (nwritten != (signed)towrite) goto socket_wr_err; pos += nwritten; } @@ -4568,14 +4575,18 @@ try_again: char buf2[1024]; /* Read the two replies */ - if (syncReadLine(fd, buf1, sizeof(buf1), timeout) <= 0) + if (cs->last_dbid != dbid && syncReadLine(cs->fd, buf1, sizeof(buf1), timeout) <= 0) goto socket_rd_err; - if (syncReadLine(fd, buf2, sizeof(buf2), timeout) <= 0) + if (syncReadLine(cs->fd, buf2, sizeof(buf2), timeout) <= 0) goto socket_rd_err; - if (buf1[0] == '-' || buf2[0] == '-') { + if ((cs->last_dbid != dbid && buf1[0] == '-') || buf2[0] == '-') { + /* If we got an error at all, assume that the last_dbid is no longer valid */ + cs->last_dbid = -1; addReplyErrorFormat(c,"Target instance replied with error: %s", - (buf1[0] == '-') ? buf1+1 : buf2+1); + (cs->last_dbid != dbid && buf1[0] == '-') ? buf1+1 : buf2+1); } else { + /* Update the last_dbid in migrateCachedSocket */ + cs->last_dbid = dbid; robj *aux; if (!copy) { @@ -4586,10 +4597,12 @@ try_again: addReply(c,shared.ok); server.dirty++; - /* Translate MIGRATE as DEL for replication/AOF. */ - aux = createStringObject("DEL",3); - rewriteClientCommandVector(c,2,aux,c->argv[3]); - decrRefCount(aux); + if (!copy) { + /* Translate MIGRATE as DEL for replication/AOF. */ + aux = createStringObject("DEL",3); + rewriteClientCommandVector(c,2,aux,c->argv[3]); + decrRefCount(aux); + } } } From 832b0c7ccef0e7e11e0663530367de1d3fb9a959 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 26 Feb 2015 10:25:15 +0100 Subject: [PATCH 0126/1928] Improvements to PR #2425 1. Remove useless "cs" initialization. 2. Add a "select" var to capture a condition checked multiple times. 3. Avoid duplication of the same if (!copy) conditional. 4. Don't increment dirty if copy is given (no deletion is performed), otherwise we propagate MIGRATE when not needed. --- src/cluster.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index a5f15b920..c606448f9 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -4481,7 +4481,6 @@ void migrateCommand(redisClient *c) { try_again: /* Initialization */ - cs = NULL; copy = 0; replace = 0; ttl = 0; @@ -4519,13 +4518,15 @@ try_again: rioInitWithBuffer(&cmd,sdsempty()); - /* Create RESTORE payload and generate the protocol to call the command. */ - if (cs->last_dbid != dbid) { + /* Send the SELECT command if the current DB is not already selected. */ + int select = cs->last_dbid != dbid; /* Should we emit SELECT? */ + if (select) { redisAssertWithInfo(c,NULL,rioWriteBulkCount(&cmd,'*',2)); redisAssertWithInfo(c,NULL,rioWriteBulkString(&cmd,"SELECT",6)); redisAssertWithInfo(c,NULL,rioWriteBulkLongLong(&cmd,dbid)); } + /* Create RESTORE payload and generate the protocol to call the command. */ expireat = getExpire(c->db,c->argv[3]); if (expireat != -1) { ttl = expireat-mstime(); @@ -4575,12 +4576,12 @@ try_again: char buf2[1024]; /* Read the two replies */ - if (cs->last_dbid != dbid && syncReadLine(cs->fd, buf1, sizeof(buf1), timeout) <= 0) + if (select && syncReadLine(cs->fd, buf1, sizeof(buf1), timeout) <= 0) goto socket_rd_err; if (syncReadLine(cs->fd, buf2, sizeof(buf2), timeout) <= 0) goto socket_rd_err; - if ((cs->last_dbid != dbid && buf1[0] == '-') || buf2[0] == '-') { - /* If we got an error at all, assume that the last_dbid is no longer valid */ + if ((select && buf1[0] == '-') || buf2[0] == '-') { + /* On error assume that last_dbid is no longer valid. */ cs->last_dbid = -1; addReplyErrorFormat(c,"Target instance replied with error: %s", (cs->last_dbid != dbid && buf1[0] == '-') ? buf1+1 : buf2+1); @@ -4589,15 +4590,14 @@ try_again: cs->last_dbid = dbid; robj *aux; + addReply(c,shared.ok); + if (!copy) { /* No COPY option: remove the local key, signal the change. */ dbDelete(c->db,c->argv[3]); signalModifiedKey(c->db,c->argv[3]); - } - addReply(c,shared.ok); - server.dirty++; + server.dirty++; - if (!copy) { /* Translate MIGRATE as DEL for replication/AOF. */ aux = createStringObject("DEL",3); rewriteClientCommandVector(c,2,aux,c->argv[3]); From 7e6b4ea67bf38595ae25bdd6aa253702a1fd5509 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 26 Feb 2015 18:31:06 +0100 Subject: [PATCH 0127/1928] server.current_client fix and minor refactoring. Thanks to @codeslinger (Toby DiPasquale) for identifying the issue. Related to issue #2409. --- src/blocked.c | 2 -- src/networking.c | 26 +++++++++++--------------- 2 files changed, 11 insertions(+), 17 deletions(-) diff --git a/src/blocked.c b/src/blocked.c index 4cd632bd3..ef0d5246d 100644 --- a/src/blocked.c +++ b/src/blocked.c @@ -118,9 +118,7 @@ void processUnblockedClients(void) { /* Process remaining data in the input buffer. */ if (c->querybuf && sdslen(c->querybuf) > 0) { - server.current_client = c; processInputBuffer(c); - server.current_client = NULL; } } } diff --git a/src/networking.c b/src/networking.c index c844a9995..58275a219 100644 --- a/src/networking.c +++ b/src/networking.c @@ -1106,18 +1106,19 @@ int processMultibulkBuffer(redisClient *c) { } void processInputBuffer(redisClient *c) { + server.current_client = c; /* Keep processing while there is something in the input buffer */ while(sdslen(c->querybuf)) { /* Return if clients are paused. */ - if (!(c->flags & REDIS_SLAVE) && clientsArePaused()) return; + if (!(c->flags & REDIS_SLAVE) && clientsArePaused()) break; /* Immediately abort if the client is in the middle of something. */ - if (c->flags & REDIS_BLOCKED) return; + if (c->flags & REDIS_BLOCKED) break; /* REDIS_CLOSE_AFTER_REPLY closes the connection once the reply is * written to the client. Make sure to not let the reply grow after * this flag has been set (i.e. don't process more commands). */ - if (c->flags & REDIS_CLOSE_AFTER_REPLY) return; + if (c->flags & REDIS_CLOSE_AFTER_REPLY) break; /* Determine request type when unknown. */ if (!c->reqtype) { @@ -1145,6 +1146,7 @@ void processInputBuffer(redisClient *c) { resetClient(c); } } + server.current_client = NULL; } void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) { @@ -1154,7 +1156,6 @@ void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) { REDIS_NOTUSED(el); REDIS_NOTUSED(mask); - server.current_client = c; readlen = REDIS_IOBUF_LEN; /* If this is a multi bulk request, and we are processing a bulk reply * that is large enough, try to maximize the probability that the query @@ -1176,7 +1177,7 @@ void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) { nread = read(fd, c->querybuf+qblen, readlen); if (nread == -1) { if (errno == EAGAIN) { - nread = 0; + return; } else { redisLog(REDIS_VERBOSE, "Reading from client: %s",strerror(errno)); freeClient(c); @@ -1187,15 +1188,11 @@ void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) { freeClient(c); return; } - if (nread) { - sdsIncrLen(c->querybuf,nread); - c->lastinteraction = server.unixtime; - if (c->flags & REDIS_MASTER) c->reploff += nread; - server.stat_net_input_bytes += nread; - } else { - server.current_client = NULL; - return; - } + + sdsIncrLen(c->querybuf,nread); + c->lastinteraction = server.unixtime; + if (c->flags & REDIS_MASTER) c->reploff += nread; + server.stat_net_input_bytes += nread; if (sdslen(c->querybuf) > server.client_max_querybuf_len) { sds ci = catClientInfoString(sdsempty(),c), bytes = sdsempty(); @@ -1207,7 +1204,6 @@ void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) { return; } processInputBuffer(c); - server.current_client = NULL; } void getClientsMaxBuffers(unsigned long *longest_output_list, From c95507881acb0d8cdaf7e0a29f445ee2fdaa2c80 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 27 Feb 2015 15:20:58 +0100 Subject: [PATCH 0128/1928] Utils: added function to get radix 10 string length of signed integer. --- src/util.c | 12 ++++++++++++ src/util.h | 2 ++ 2 files changed, 14 insertions(+) diff --git a/src/util.c b/src/util.c index 4190775b1..4b1aaaba7 100644 --- a/src/util.c +++ b/src/util.c @@ -251,6 +251,18 @@ uint32_t digits10(uint64_t v) { return 12 + digits10(v / 1000000000000UL); } +/* Like digits10() but for signed values. */ +uint32_t sdigits10(int64_t v) { + if (v < 0) { + /* Abs value of LLONG_MIN requires special handling. */ + uint64_t uv = (v != LLONG_MIN) ? + -v : ((uint64_t) LLONG_MAX)+1; + return digits10(uv)+1; /* +1 for the minus. */ + } else { + return digits10(v); + } +} + /* Convert a long long into a string. Returns the number of * characters needed to represent the number. * If the buffer is not big enough to store the string, 0 is returned. diff --git a/src/util.h b/src/util.h index 666042c9b..544b9b8bd 100644 --- a/src/util.h +++ b/src/util.h @@ -35,6 +35,8 @@ int stringmatchlen(const char *p, int plen, const char *s, int slen, int nocase); int stringmatch(const char *p, const char *s, int nocase); long long memtoll(const char *p, int *err); +uint32_t digits10(uint64_t v); +uint32_t sdigits10(int64_t v); int ll2string(char *s, size_t len, long long value); int string2ll(const char *s, size_t slen, long long *value); int string2l(const char *s, size_t slen, long *value); From d8f8b0575f489bba28cd2b03380bdbbbc48b6f66 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 27 Feb 2015 15:22:49 +0100 Subject: [PATCH 0129/1928] Hash: API to get value string len by field name. --- src/t_hash.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/src/t_hash.c b/src/t_hash.c index 7f33bba0c..c228765db 100644 --- a/src/t_hash.c +++ b/src/t_hash.c @@ -130,7 +130,6 @@ robj *hashTypeGetObject(robj *o, robj *field) { value = createStringObjectFromLongLong(vll); } } - } else if (o->encoding == REDIS_ENCODING_HT) { robj *aux; @@ -144,6 +143,29 @@ robj *hashTypeGetObject(robj *o, robj *field) { return value; } +/* Higher level function using hashTypeGet*() to return the length of the + * object associated with the requested field, or 0 if the field does not + * exist. */ +size_t hashTypeGetValueLength(robj *o, robj *field) { + size_t len = 0; + if (o->encoding == REDIS_ENCODING_ZIPLIST) { + unsigned char *vstr = NULL; + unsigned int vlen = UINT_MAX; + long long vll = LLONG_MAX; + + if (hashTypeGetFromZiplist(o, field, &vstr, &vlen, &vll) == 0) + len = vstr ? vlen : sdigits10(vll); + } else if (o->encoding == REDIS_ENCODING_HT) { + robj *aux; + + if (hashTypeGetFromHashTable(o, field, &aux) == 0) + len = sdslen(aux->ptr); + } else { + redisPanic("Unknown hash encoding"); + } + return len; +} + /* Test if the specified field exists in the given hash. Returns 1 if the field * exists, and 0 when it doesn't. */ int hashTypeExists(robj *o, robj *field) { From 4e54b85a19027855f05b4c825ad4ac0c71fd9fea Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 27 Feb 2015 15:30:15 +0100 Subject: [PATCH 0130/1928] Hash: HSTRLEN (was HVSTRLEN) improved. 1. HVSTRLEN -> HSTRLEN. It's unlikely one needs the length of the key, not clear how the API would work (by value does not make sense) and there will be better names anyway. 2. Default is to return 0 when field is missing. 3. Default is to return 0 when key is missing. 4. The implementation was slower than needed, and produced unnecessary COW. Related issue #2415. --- src/redis.c | 2 +- src/redis.h | 2 +- src/t_hash.c | 15 +++++---------- tests/unit/type/hash.tcl | 22 ++++++++++------------ 4 files changed, 17 insertions(+), 24 deletions(-) diff --git a/src/redis.c b/src/redis.c index 41d74f4e3..86b5a9ebf 100644 --- a/src/redis.c +++ b/src/redis.c @@ -202,7 +202,7 @@ struct redisCommand redisCommandTable[] = { {"hincrbyfloat",hincrbyfloatCommand,4,"wmF",0,NULL,1,1,1,0,0}, {"hdel",hdelCommand,-3,"wF",0,NULL,1,1,1,0,0}, {"hlen",hlenCommand,2,"rF",0,NULL,1,1,1,0,0}, - {"hvstrlen",hvstrlenCommand,3,"rF",0,NULL,1,1,1,0,0}, + {"hstrlen",hstrlenCommand,3,"rF",0,NULL,1,1,1,0,0}, {"hkeys",hkeysCommand,2,"rS",0,NULL,1,1,1,0,0}, {"hvals",hvalsCommand,2,"rS",0,NULL,1,1,1,0,0}, {"hgetall",hgetallCommand,2,"r",0,NULL,1,1,1,0,0}, diff --git a/src/redis.h b/src/redis.h index 142e830e4..232ada5e5 100644 --- a/src/redis.h +++ b/src/redis.h @@ -1516,7 +1516,7 @@ void hmsetCommand(redisClient *c); void hmgetCommand(redisClient *c); void hdelCommand(redisClient *c); void hlenCommand(redisClient *c); -void hvstrlenCommand(redisClient *c); +void hstrlenCommand(redisClient *c); void zremrangebyrankCommand(redisClient *c); void zunionstoreCommand(redisClient *c); void zinterstoreCommand(redisClient *c); diff --git a/src/t_hash.c b/src/t_hash.c index c05bcdffd..2058dfd8f 100644 --- a/src/t_hash.c +++ b/src/t_hash.c @@ -701,24 +701,19 @@ void hdelCommand(redisClient *c) { void hlenCommand(redisClient *c) { robj *o; + if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.czero)) == NULL || checkType(c,o,REDIS_HASH)) return; addReplyLongLong(c,hashTypeLength(o)); } -void hvstrlenCommand(redisClient *c) { +void hstrlenCommand(redisClient *c) { robj *o; - robj *value; - if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.nullbulk)) == NULL || - checkType(c,o,REDIS_HASH)) return; - if ((value = hashTypeGetObject(o,c->argv[2])) == NULL) { - addReply(c, shared.nullbulk); - } else { - addReplyLongLong(c,stringObjectLen(value)); - decrRefCount(value); - } + if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.czero)) == NULL || + checkType(c,o,REDIS_HASH)) return; + addReplyLongLong(c,hashTypeGetValueLength(o,c->argv[2])); } static void addHashIteratorCursorToReply(redisClient *c, hashTypeIterator *hi, int what) { diff --git a/tests/unit/type/hash.tcl b/tests/unit/type/hash.tcl index 3d9be1fcc..bd446427b 100644 --- a/tests/unit/type/hash.tcl +++ b/tests/unit/type/hash.tcl @@ -390,36 +390,34 @@ start_server {tags {"hash"}} { lappend rv [string match "ERR*not*float*" $bigerr] } {1 1} - test {HVSTRLEN against the small hash} { + test {HSTRLEN against the small hash} { set err {} foreach k [array names smallhash *] { - if {[string length $smallhash($k)] ne [r hvstrlen smallhash $k]} { - set err "[string length $smallhash($k)] != [r hvstrlen smallhash $k]" + if {[string length $smallhash($k)] ne [r hstrlen smallhash $k]} { + set err "[string length $smallhash($k)] != [r hstrlen smallhash $k]" break } } set _ $err } {} - test {HVSTRLEN against the big hash} { + test {HSTRLEN against the big hash} { set err {} foreach k [array names bighash *] { - if {[string length $bighash($k)] ne [r hvstrlen bighash $k]} { - set err "[string length $bighash($k)] != [r hvstrlen bighash $k]" + if {[string length $bighash($k)] ne [r hstrlen bighash $k]} { + set err "[string length $bighash($k)] != [r hstrlen bighash $k]" break } } set _ $err } {} - test {HVSTRLEN against non existing key} { + test {HSTRLEN against non existing field} { set rv {} - lappend rv [r hvstrlen smallhash __123123123__] - lappend rv [r hvstrlen bighash __123123123__] + lappend rv [r hstrlen smallhash __123123123__] + lappend rv [r hstrlen bighash __123123123__] set _ $rv - - } {{} {}} - + } {0 0} test {Hash ziplist regression test for large keys} { r hset hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk a From 0ace1e6d04c6c7cb38a5e74c4bc5e53aac891905 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 27 Feb 2015 15:37:04 +0100 Subject: [PATCH 0131/1928] Hash: HSTRLEN crash fixed when getting len of int-encoded value --- src/t_hash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/t_hash.c b/src/t_hash.c index 2058dfd8f..2f3487f6a 100644 --- a/src/t_hash.c +++ b/src/t_hash.c @@ -159,7 +159,7 @@ size_t hashTypeGetValueLength(robj *o, robj *field) { robj *aux; if (hashTypeGetFromHashTable(o, field, &aux) == 0) - len = sdslen(aux->ptr); + len = stringObjectLen(aux); } else { redisPanic("Unknown hash encoding"); } From 084a59c324d4213772e863d949c01a7d1b621b90 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 27 Feb 2015 15:44:44 +0100 Subject: [PATCH 0132/1928] Test: HSTRLEN stress test of corner cases. Main point here is to correctly report LLONG_MIN length, since to take the absolute value we need care in sdigits10(). --- tests/unit/type/hash.tcl | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/unit/type/hash.tcl b/tests/unit/type/hash.tcl index bd446427b..5ceef150c 100644 --- a/tests/unit/type/hash.tcl +++ b/tests/unit/type/hash.tcl @@ -419,6 +419,22 @@ start_server {tags {"hash"}} { set _ $rv } {0 0} + test {HSTRLEN corner cases} { + set vals { + -9223372036854775808 9223372036854775807 9223372036854775808 + {} 0 -1 x + } + foreach v $vals { + r hmset smallhash field $v + r hmset bighash field $v + set len1 [string length $v] + set len2 [r hstrlen smallhash field] + set len3 [r hstrlen bighash field] + assert {$len1 == $len2} + assert {$len2 == $len3} + } + } + test {Hash ziplist regression test for large keys} { r hset hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk a r hset hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk b From 0e5e8ca9e67dbd9e8cd943db343564d26ca7e398 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 27 Feb 2015 16:01:45 +0100 Subject: [PATCH 0133/1928] Utils: Include stdint.h and fix signess in sdigits10(). --- src/util.c | 2 +- src/util.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/util.c b/src/util.c index 4b1aaaba7..295f32642 100644 --- a/src/util.c +++ b/src/util.c @@ -256,7 +256,7 @@ uint32_t sdigits10(int64_t v) { if (v < 0) { /* Abs value of LLONG_MIN requires special handling. */ uint64_t uv = (v != LLONG_MIN) ? - -v : ((uint64_t) LLONG_MAX)+1; + (uint64_t)-v : ((uint64_t) LLONG_MAX)+1; return digits10(uv)+1; /* +1 for the minus. */ } else { return digits10(v); diff --git a/src/util.h b/src/util.h index 544b9b8bd..c27f7017b 100644 --- a/src/util.h +++ b/src/util.h @@ -30,6 +30,7 @@ #ifndef __REDIS_UTIL_H #define __REDIS_UTIL_H +#include #include "sds.h" int stringmatchlen(const char *p, int plen, const char *s, int slen, int nocase); From 4f56f035a70293be6fbc20a0545c1be53a131e7c Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 27 Feb 2015 16:08:50 +0100 Subject: [PATCH 0134/1928] String: use new sdigits10() API in stringObjectLen(). Should be much faster, and regardless, the code is more obvious now compared to generating a string just to get the return value of the ll2stirng() function. --- src/object.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/object.c b/src/object.c index f75421ee8..8905db18d 100644 --- a/src/object.c +++ b/src/object.c @@ -529,9 +529,7 @@ size_t stringObjectLen(robj *o) { if (sdsEncodedObject(o)) { return sdslen(o->ptr); } else { - char buf[32]; - - return ll2string(buf,32,(long)o->ptr); + return sdigits10((long)o->ptr); } } From c77081a45a7c350428785b6e7fafeb2b57ea4330 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 27 Feb 2015 22:33:54 +0100 Subject: [PATCH 0135/1928] Migrate: replace conditional with pre-computed value. --- src/cluster.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cluster.c b/src/cluster.c index c606448f9..df1498bdd 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -4584,7 +4584,7 @@ try_again: /* On error assume that last_dbid is no longer valid. */ cs->last_dbid = -1; addReplyErrorFormat(c,"Target instance replied with error: %s", - (cs->last_dbid != dbid && buf1[0] == '-') ? buf1+1 : buf2+1); + (select && buf1[0] == '-') ? buf1+1 : buf2+1); } else { /* Update the last_dbid in migrateCachedSocket */ cs->last_dbid = dbid; From 509a6cc1e89e3d988511d8595dbc47a4e04b999c Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 4 Mar 2015 11:48:16 -0800 Subject: [PATCH 0136/1928] Fix iterator for issue #2438. Itereator misuse due to analyzeLatencyForEvent() accessing the dictionary during the iteration, without the iterator being reclared as safe. --- src/latency.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/latency.c b/src/latency.c index fd76b3215..54ed03778 100644 --- a/src/latency.c +++ b/src/latency.c @@ -248,7 +248,7 @@ sds createLatencyReport(void) { dictEntry *de; int eventnum = 0; - di = dictGetIterator(server.latency_events); + di = dictGetSafeIterator(server.latency_events); while((de = dictNext(di)) != NULL) { char *event = dictGetKey(de); struct latencyTimeSeries *ts = dictGetVal(de); From a664040eb773922bd3190bd13a272bfeb70e092e Mon Sep 17 00:00:00 2001 From: antirez Date: Sun, 8 Mar 2015 15:33:42 +0100 Subject: [PATCH 0137/1928] Config: activerehashing option support in CONFIG SET. --- src/config.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/config.c b/src/config.c index 3963a1218..05c27235e 100644 --- a/src/config.c +++ b/src/config.c @@ -800,6 +800,11 @@ void configSetCommand(redisClient *c) { if (yn == -1) goto badfmt; server.repl_slave_ro = yn; + } else if (!strcasecmp(c->argv[2]->ptr,"activerehashing")) { + int yn = yesnotoi(o->ptr); + + if (yn == -1) goto badfmt; + server.activerehashing = yn; } else if (!strcasecmp(c->argv[2]->ptr,"dir")) { if (chdir((char*)o->ptr) == -1) { addReplyErrorFormat(c,"Changing directory: %s", strerror(errno)); From d68f28a367389ebf28cc31775e678d3e655aa660 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 10 Mar 2015 12:37:39 +0100 Subject: [PATCH 0138/1928] CONFIG SET refactoring of bool and value fields. Not perfect since The Solution IMHO is to have a DSL with a table of configuration functions with type, limits, and aux functions to handle the odd ones. However this hacky macro solution is already better and forces to put limits in the range of numerical fields. More field types to be refactored in the next commits hopefully. --- src/config.c | 244 +++++++++++++++++++++------------------------------ 1 file changed, 98 insertions(+), 146 deletions(-) diff --git a/src/config.c b/src/config.c index 05c27235e..e1695d5a0 100644 --- a/src/config.c +++ b/src/config.c @@ -621,6 +621,19 @@ void loadServerConfig(char *filename, char *options) { * CONFIG SET implementation *----------------------------------------------------------------------------*/ +#define config_set_bool_field(_name,_var) \ + } else if (!strcasecmp(c->argv[2]->ptr,_name)) { \ + int yn = yesnotoi(o->ptr); \ + if (yn == -1) goto badfmt; \ + _var = yn; \ + +#define config_set_numerical_field(_name,_var,min,max) \ + } else if (!strcasecmp(c->argv[2]->ptr,_name)) { \ + if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; \ + if (min != LLONG_MIN && ll < min) goto badfmt; \ + if (max != LLONG_MAX && ll > max) goto badfmt; \ + _var = ll; \ + void configSetCommand(redisClient *c) { robj *o; long long ll; @@ -679,11 +692,6 @@ void configSetCommand(redisClient *c) { } } } - } else if (!strcasecmp(c->argv[2]->ptr,"hz")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; - server.hz = ll; - if (server.hz < REDIS_MIN_HZ) server.hz = REDIS_MIN_HZ; - if (server.hz > REDIS_MAX_HZ) server.hz = REDIS_MAX_HZ; } else if (!strcasecmp(c->argv[2]->ptr,"maxmemory-policy")) { if (!strcasecmp(o->ptr,"volatile-lru")) { server.maxmemory_policy = REDIS_MAXMEMORY_VOLATILE_LRU; @@ -700,18 +708,6 @@ void configSetCommand(redisClient *c) { } else { goto badfmt; } - } else if (!strcasecmp(c->argv[2]->ptr,"maxmemory-samples")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || - ll <= 0) goto badfmt; - server.maxmemory_samples = ll; - } else if (!strcasecmp(c->argv[2]->ptr,"timeout")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || - ll < 0 || ll > LONG_MAX) goto badfmt; - server.maxidletime = ll; - } else if (!strcasecmp(c->argv[2]->ptr,"tcp-keepalive")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || - ll < 0 || ll > INT_MAX) goto badfmt; - server.tcpkeepalive = ll; } else if (!strcasecmp(c->argv[2]->ptr,"appendfsync")) { if (!strcasecmp(o->ptr,"no")) { server.aof_fsync = AOF_FSYNC_NO; @@ -722,11 +718,6 @@ void configSetCommand(redisClient *c) { } else { goto badfmt; } - } else if (!strcasecmp(c->argv[2]->ptr,"no-appendfsync-on-rewrite")) { - int yn = yesnotoi(o->ptr); - - if (yn == -1) goto badfmt; - server.aof_no_fsync_on_rewrite = yn; } else if (!strcasecmp(c->argv[2]->ptr,"appendonly")) { int enable = yesnotoi(o->ptr); @@ -740,22 +731,6 @@ void configSetCommand(redisClient *c) { return; } } - } else if (!strcasecmp(c->argv[2]->ptr,"auto-aof-rewrite-percentage")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; - server.aof_rewrite_perc = ll; - } else if (!strcasecmp(c->argv[2]->ptr,"auto-aof-rewrite-min-size")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; - server.aof_rewrite_min_size = ll; - } else if (!strcasecmp(c->argv[2]->ptr,"aof-rewrite-incremental-fsync")) { - int yn = yesnotoi(o->ptr); - - if (yn == -1) goto badfmt; - server.aof_rewrite_incremental_fsync = yn; - } else if (!strcasecmp(c->argv[2]->ptr,"aof-load-truncated")) { - int yn = yesnotoi(o->ptr); - - if (yn == -1) goto badfmt; - server.aof_load_truncated = yn; } else if (!strcasecmp(c->argv[2]->ptr,"save")) { int vlen, j; sds *v = sdssplitlen(o->ptr,sdslen(o->ptr)," ",1,&vlen); @@ -790,62 +765,14 @@ void configSetCommand(redisClient *c) { appendServerSaveParams(seconds, changes); } sdsfreesplitres(v,vlen); - } else if (!strcasecmp(c->argv[2]->ptr,"slave-serve-stale-data")) { - int yn = yesnotoi(o->ptr); - - if (yn == -1) goto badfmt; - server.repl_serve_stale_data = yn; - } else if (!strcasecmp(c->argv[2]->ptr,"slave-read-only")) { - int yn = yesnotoi(o->ptr); - - if (yn == -1) goto badfmt; - server.repl_slave_ro = yn; - } else if (!strcasecmp(c->argv[2]->ptr,"activerehashing")) { - int yn = yesnotoi(o->ptr); - - if (yn == -1) goto badfmt; - server.activerehashing = yn; } else if (!strcasecmp(c->argv[2]->ptr,"dir")) { if (chdir((char*)o->ptr) == -1) { addReplyErrorFormat(c,"Changing directory: %s", strerror(errno)); return; } - } else if (!strcasecmp(c->argv[2]->ptr,"hash-max-ziplist-entries")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; - server.hash_max_ziplist_entries = ll; - } else if (!strcasecmp(c->argv[2]->ptr,"hash-max-ziplist-value")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; - server.hash_max_ziplist_value = ll; - } else if (!strcasecmp(c->argv[2]->ptr,"list-max-ziplist-size")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; - server.list_max_ziplist_size = ll; - } else if (!strcasecmp(c->argv[2]->ptr,"list-compress-depth")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; - server.list_compress_depth = ll; - } else if (!strcasecmp(c->argv[2]->ptr,"set-max-intset-entries")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; - server.set_max_intset_entries = ll; - } else if (!strcasecmp(c->argv[2]->ptr,"zset-max-ziplist-entries")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; - server.zset_max_ziplist_entries = ll; - } else if (!strcasecmp(c->argv[2]->ptr,"zset-max-ziplist-value")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; - server.zset_max_ziplist_value = ll; - } else if (!strcasecmp(c->argv[2]->ptr,"hll-sparse-max-bytes")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; - server.hll_sparse_max_bytes = ll; - } else if (!strcasecmp(c->argv[2]->ptr,"lua-time-limit")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; - server.lua_time_limit = ll; - } else if (!strcasecmp(c->argv[2]->ptr,"slowlog-log-slower-than")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR) goto badfmt; - server.slowlog_log_slower_than = ll; } else if (!strcasecmp(c->argv[2]->ptr,"slowlog-max-len")) { if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; server.slowlog_max_len = (unsigned)ll; - } else if (!strcasecmp(c->argv[2]->ptr,"latency-monitor-threshold")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; - server.latency_monitor_threshold = ll; } else if (!strcasecmp(c->argv[2]->ptr,"loglevel")) { if (!strcasecmp(o->ptr,"warning")) { server.verbosity = REDIS_WARNING; @@ -903,85 +830,110 @@ void configSetCommand(redisClient *c) { server.client_obuf_limits[class].soft_limit_seconds = soft_seconds; } sdsfreesplitres(v,vlen); - } else if (!strcasecmp(c->argv[2]->ptr,"stop-writes-on-bgsave-error")) { - int yn = yesnotoi(o->ptr); - - if (yn == -1) goto badfmt; - server.stop_writes_on_bgsave_err = yn; - } else if (!strcasecmp(c->argv[2]->ptr,"repl-ping-slave-period")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll <= 0) goto badfmt; - server.repl_ping_slave_period = ll; - } else if (!strcasecmp(c->argv[2]->ptr,"repl-timeout")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll <= 0) goto badfmt; - server.repl_timeout = ll; } else if (!strcasecmp(c->argv[2]->ptr,"repl-backlog-size")) { ll = memtoll(o->ptr,&err); if (err || ll < 0) goto badfmt; resizeReplicationBacklog(ll); - } else if (!strcasecmp(c->argv[2]->ptr,"repl-backlog-ttl")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; - server.repl_backlog_time_limit = ll; } else if (!strcasecmp(c->argv[2]->ptr,"watchdog-period")) { if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; if (ll) enableWatchdog(ll); else disableWatchdog(); - } else if (!strcasecmp(c->argv[2]->ptr,"rdbcompression")) { - int yn = yesnotoi(o->ptr); - - if (yn == -1) goto badfmt; - server.rdb_compression = yn; } else if (!strcasecmp(c->argv[2]->ptr,"notify-keyspace-events")) { int flags = keyspaceEventsStringToFlags(o->ptr); if (flags == -1) goto badfmt; server.notify_keyspace_events = flags; - } else if (!strcasecmp(c->argv[2]->ptr,"repl-disable-tcp-nodelay")) { - int yn = yesnotoi(o->ptr); - if (yn == -1) goto badfmt; - server.repl_disable_tcp_nodelay = yn; - } else if (!strcasecmp(c->argv[2]->ptr,"repl-diskless-sync")) { - int yn = yesnotoi(o->ptr); + /* Boolean fields. + * config_set_bool_field(name,var). */ + config_set_bool_field( + "rdbcompression", server.rdb_compression) { + } config_set_bool_field( + "repl-disable-tcp-nodelay",server.repl_disable_tcp_nodelay) { + } config_set_bool_field( + "repl-diskless-sync",server.repl_diskless_sync) { + } config_set_bool_field( + "cluster-require-full-coverage",server.cluster_require_full_coverage) { + } config_set_bool_field( + "aof-rewrite-incremental-fsync",server.aof_rewrite_incremental_fsync) { + } config_set_bool_field( + "aof-load-truncated",server.aof_load_truncated) { + } config_set_bool_field( + "slave-serve-stale-data",server.repl_serve_stale_data) { + } config_set_bool_field( + "slave-read-only",server.repl_slave_ro) { + } config_set_bool_field( + "activerehashing",server.activerehashing) { + } config_set_bool_field( + "stop-writes-on-bgsave-error",server.stop_writes_on_bgsave_err) { + } config_set_bool_field( + "tcp-keepalive",server.tcpkeepalive) { - if (yn == -1) goto badfmt; - server.repl_diskless_sync = yn; - } else if (!strcasecmp(c->argv[2]->ptr,"repl-diskless-sync-delay")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || - ll < 0) goto badfmt; - server.repl_diskless_sync_delay = ll; - } else if (!strcasecmp(c->argv[2]->ptr,"slave-priority")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || - ll < 0) goto badfmt; - server.slave_priority = ll; - } else if (!strcasecmp(c->argv[2]->ptr,"min-slaves-to-write")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || - ll < 0) goto badfmt; - server.repl_min_slaves_to_write = ll; + /* Numerical fields. + * config_set_numerical_field(name,var,min,max) */ + } config_set_numerical_field( + "maxmemory-samples",server.maxmemory_samples,1,LLONG_MAX) { + } config_set_numerical_field( + "timeout",server.maxidletime,0,LONG_MAX) { + } config_set_numerical_field( + "auto-aof-rewrite-percentage",server.aof_rewrite_perc,0,LLONG_MAX){ + } config_set_numerical_field( + "auto-aof-rewrite-min-size",server.aof_rewrite_min_size,0,LLONG_MAX) { + } config_set_numerical_field( + "hash-max-ziplist-entries",server.hash_max_ziplist_entries,0,LLONG_MAX) { + } config_set_numerical_field( + "hash-max-ziplist-value",server.hash_max_ziplist_value,0,LLONG_MAX) { + } config_set_numerical_field( + "list-max-ziplist-size",server.list_max_ziplist_size,0,LLONG_MAX) { + } config_set_numerical_field( + "list-compress-depth",server.list_compress_depth,0,LLONG_MAX) { + } config_set_numerical_field( + "set-max-intset-entries",server.set_max_intset_entries,0,LLONG_MAX) { + } config_set_numerical_field( + "zset-max-ziplist-entries",server.zset_max_ziplist_entries,0,LLONG_MAX) { + } config_set_numerical_field( + "zset-max-ziplist-value",server.zset_max_ziplist_value,0,LLONG_MAX) { + } config_set_numerical_field( + "hll-sparse-max-bytes",server.hll_sparse_max_bytes,0,LLONG_MAX) { + } config_set_numerical_field( + "lua-time-limit",server.lua_time_limit,0,LLONG_MAX) { + } config_set_numerical_field( + "slowlog-log-slower-than",server.slowlog_log_slower_than,0,LLONG_MAX) { + } config_set_numerical_field( + "latency-monitor-threshold",server.latency_monitor_threshold,0,LLONG_MAX){ + } config_set_numerical_field( + "repl-ping-slave-period",server.repl_ping_slave_period,1,LLONG_MAX) { + } config_set_numerical_field( + "repl-timeout",server.repl_timeout,1,LLONG_MAX) { + } config_set_numerical_field( + "repl-backlog-ttl",server.repl_backlog_time_limit,0,LLONG_MAX) { + } config_set_numerical_field( + "repl-diskless-sync-delay",server.repl_diskless_sync_delay,0,LLONG_MAX) { + } config_set_numerical_field( + "slave-priority",server.slave_priority,0,LLONG_MAX) { + } config_set_numerical_field( + "min-slaves-to-write",server.repl_min_slaves_to_write,0,LLONG_MAX) { refreshGoodSlavesCount(); - } else if (!strcasecmp(c->argv[2]->ptr,"min-slaves-max-lag")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || - ll < 0) goto badfmt; - server.repl_min_slaves_max_lag = ll; + } config_set_numerical_field( + "min-slaves-max-lag",server.repl_min_slaves_max_lag,0,LLONG_MAX) { refreshGoodSlavesCount(); - } else if (!strcasecmp(c->argv[2]->ptr,"cluster-require-full-coverage")) { - int yn = yesnotoi(o->ptr); + } config_set_numerical_field( + "cluster-node-timeout",server.cluster_node_timeout,0,LLONG_MAX) { + } config_set_numerical_field( + "cluster-migration-barrier",server.cluster_migration_barrier,0,LLONG_MAX){ + } config_set_numerical_field( + "cluster-slave-validity-factor",server.cluster_slave_validity_factor,0,LLONG_MAX) { + } config_set_numerical_field( + "hz",server.hz,0,LLONG_MAX) { + /* Hz is more an hint from the user, so we accept values out of range + * but cap them to reasonable values. */ + if (server.hz < REDIS_MIN_HZ) server.hz = REDIS_MIN_HZ; + if (server.hz > REDIS_MAX_HZ) server.hz = REDIS_MAX_HZ; + } - if (yn == -1) goto badfmt; - server.cluster_require_full_coverage = yn; - } else if (!strcasecmp(c->argv[2]->ptr,"cluster-node-timeout")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || - ll <= 0) goto badfmt; - server.cluster_node_timeout = ll; - } else if (!strcasecmp(c->argv[2]->ptr,"cluster-migration-barrier")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || - ll < 0) goto badfmt; - server.cluster_migration_barrier = ll; - } else if (!strcasecmp(c->argv[2]->ptr,"cluster-slave-validity-factor")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || - ll < 0) goto badfmt; - server.cluster_slave_validity_factor = ll; + /* Everyhing else is an error... */ } else { addReplyErrorFormat(c,"Unsupported CONFIG parameter: %s", (char*)c->argv[2]->ptr); @@ -1086,7 +1038,6 @@ void configGetCommand(redisClient *c) { config_get_numerical_field("maxmemory",server.maxmemory); config_get_numerical_field("maxmemory-samples",server.maxmemory_samples); config_get_numerical_field("timeout",server.maxidletime); - config_get_numerical_field("tcp-keepalive",server.tcpkeepalive); config_get_numerical_field("auto-aof-rewrite-percentage", server.aof_rewrite_perc); config_get_numerical_field("auto-aof-rewrite-min-size", @@ -1133,6 +1084,7 @@ void configGetCommand(redisClient *c) { config_get_numerical_field("repl-diskless-sync-delay",server.repl_diskless_sync_delay); /* Bool (yes/no) values */ + config_get_bool_field("tcp-keepalive",server.tcpkeepalive); config_get_bool_field("cluster-require-full-coverage", server.cluster_require_full_coverage); config_get_bool_field("no-appendfsync-on-rewrite", From 3da7408359190fe3e5f7dadce1681609439b546e Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 10 Mar 2015 13:00:33 +0100 Subject: [PATCH 0139/1928] CONFIG SET: additional 2 numerical fields refactored. --- src/config.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/config.c b/src/config.c index e1695d5a0..0b78f2ad2 100644 --- a/src/config.c +++ b/src/config.c @@ -770,9 +770,6 @@ void configSetCommand(redisClient *c) { addReplyErrorFormat(c,"Changing directory: %s", strerror(errno)); return; } - } else if (!strcasecmp(c->argv[2]->ptr,"slowlog-max-len")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; - server.slowlog_max_len = (unsigned)ll; } else if (!strcasecmp(c->argv[2]->ptr,"loglevel")) { if (!strcasecmp(o->ptr,"warning")) { server.verbosity = REDIS_WARNING; @@ -834,12 +831,6 @@ void configSetCommand(redisClient *c) { ll = memtoll(o->ptr,&err); if (err || ll < 0) goto badfmt; resizeReplicationBacklog(ll); - } else if (!strcasecmp(c->argv[2]->ptr,"watchdog-period")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; - if (ll) - enableWatchdog(ll); - else - disableWatchdog(); } else if (!strcasecmp(c->argv[2]->ptr,"notify-keyspace-events")) { int flags = keyspaceEventsStringToFlags(o->ptr); @@ -901,6 +892,10 @@ void configSetCommand(redisClient *c) { "lua-time-limit",server.lua_time_limit,0,LLONG_MAX) { } config_set_numerical_field( "slowlog-log-slower-than",server.slowlog_log_slower_than,0,LLONG_MAX) { + } config_set_numerical_field( + "slowlog-max-len",ll,0,LLONG_MAX) { + /* Cast to unsigned. */ + server.slowlog_max_len = (unsigned)ll; } config_set_numerical_field( "latency-monitor-threshold",server.latency_monitor_threshold,0,LLONG_MAX){ } config_set_numerical_field( @@ -931,6 +926,12 @@ void configSetCommand(redisClient *c) { * but cap them to reasonable values. */ if (server.hz < REDIS_MIN_HZ) server.hz = REDIS_MIN_HZ; if (server.hz > REDIS_MAX_HZ) server.hz = REDIS_MAX_HZ; + } config_set_numerical_field( + "watchdog-period",ll,0,LLONG_MAX) { + if (ll) + enableWatchdog(ll); + else + disableWatchdog(); } /* Everyhing else is an error... */ From 6201eb0c55503310a41a0f3448d1581069624303 Mon Sep 17 00:00:00 2001 From: Michel Martens Date: Tue, 10 Mar 2015 16:43:19 +0000 Subject: [PATCH 0140/1928] Add command CLUSTER MYID --- src/cluster.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/cluster.c b/src/cluster.c index df1498bdd..f1e767a6d 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -3760,6 +3760,9 @@ void clusterCommand(redisClient *c) { o = createObject(REDIS_STRING,ci); addReplyBulk(c,o); decrRefCount(o); + } else if (!strcasecmp(c->argv[1]->ptr,"myid") && c->argc == 2) { + /* CLUSTER MYID */ + addReplyBulkCBuffer(c,myself->name, REDIS_CLUSTER_NAMELEN); } else if (!strcasecmp(c->argv[1]->ptr,"slots") && c->argc == 2) { /* CLUSTER SLOTS */ clusterReplyMultiBulkSlots(c); From 4a2a0d9e9d0112b1c870914aaeccf58ebc6c6ef2 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 11 Mar 2015 09:02:04 +0100 Subject: [PATCH 0141/1928] CONFIG SET: memory and special field macros. --- src/config.c | 68 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 39 insertions(+), 29 deletions(-) diff --git a/src/config.c b/src/config.c index 0b78f2ad2..7240d4a0b 100644 --- a/src/config.c +++ b/src/config.c @@ -625,14 +625,23 @@ void loadServerConfig(char *filename, char *options) { } else if (!strcasecmp(c->argv[2]->ptr,_name)) { \ int yn = yesnotoi(o->ptr); \ if (yn == -1) goto badfmt; \ - _var = yn; \ + _var = yn; #define config_set_numerical_field(_name,_var,min,max) \ } else if (!strcasecmp(c->argv[2]->ptr,_name)) { \ if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; \ if (min != LLONG_MIN && ll < min) goto badfmt; \ if (max != LLONG_MAX && ll > max) goto badfmt; \ - _var = ll; \ + _var = ll; + +#define config_set_memory_field(_name,_var) \ + } else if (!strcasecmp(c->argv[2]->ptr,_name)) { \ + ll = memtoll(o->ptr,&err); \ + if (err || ll < 0) goto badfmt; \ + _var = ll; + +#define config_set_special_field(_name) \ + } else if (!strcasecmp(c->argv[2]->ptr,_name)) { void configSetCommand(redisClient *c) { robj *o; @@ -642,31 +651,24 @@ void configSetCommand(redisClient *c) { redisAssertWithInfo(c,c->argv[3],sdsEncodedObject(c->argv[3])); o = c->argv[3]; - if (!strcasecmp(c->argv[2]->ptr,"dbfilename")) { + if (0) { /* this starts the config_set macros else-if chain. */ + + /* Special fields that can't be handled with general macros. */ + config_set_special_field("dbfilename") { if (!pathIsBaseName(o->ptr)) { addReplyError(c, "dbfilename can't be a path, just a filename"); return; } zfree(server.rdb_filename); server.rdb_filename = zstrdup(o->ptr); - } else if (!strcasecmp(c->argv[2]->ptr,"requirepass")) { + } config_set_special_field("requirepass") { if (sdslen(o->ptr) > REDIS_AUTHPASS_MAX_LEN) goto badfmt; zfree(server.requirepass); server.requirepass = ((char*)o->ptr)[0] ? zstrdup(o->ptr) : NULL; - } else if (!strcasecmp(c->argv[2]->ptr,"masterauth")) { + } config_set_special_field("masterauth") { zfree(server.masterauth); server.masterauth = ((char*)o->ptr)[0] ? zstrdup(o->ptr) : NULL; - } else if (!strcasecmp(c->argv[2]->ptr,"maxmemory")) { - ll = memtoll(o->ptr,&err); - if (err || ll < 0) goto badfmt; - server.maxmemory = ll; - if (server.maxmemory) { - if (server.maxmemory < zmalloc_used_memory()) { - redisLog(REDIS_WARNING,"WARNING: the new maxmemory value set via CONFIG SET is smaller than the current memory usage. This will result in keys eviction and/or inability to accept new write commands depending on the maxmemory-policy."); - } - freeMemoryIfNeeded(); - } - } else if (!strcasecmp(c->argv[2]->ptr,"maxclients")) { + } config_set_special_field("maxclients") { int orig_value = server.maxclients; if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 1) goto badfmt; @@ -692,7 +694,7 @@ void configSetCommand(redisClient *c) { } } } - } else if (!strcasecmp(c->argv[2]->ptr,"maxmemory-policy")) { + } config_set_special_field("maxmemory-policy") { if (!strcasecmp(o->ptr,"volatile-lru")) { server.maxmemory_policy = REDIS_MAXMEMORY_VOLATILE_LRU; } else if (!strcasecmp(o->ptr,"volatile-random")) { @@ -708,7 +710,7 @@ void configSetCommand(redisClient *c) { } else { goto badfmt; } - } else if (!strcasecmp(c->argv[2]->ptr,"appendfsync")) { + } config_set_special_field("appendfsync") { if (!strcasecmp(o->ptr,"no")) { server.aof_fsync = AOF_FSYNC_NO; } else if (!strcasecmp(o->ptr,"everysec")) { @@ -718,7 +720,7 @@ void configSetCommand(redisClient *c) { } else { goto badfmt; } - } else if (!strcasecmp(c->argv[2]->ptr,"appendonly")) { + } config_set_special_field("appendonly") { int enable = yesnotoi(o->ptr); if (enable == -1) goto badfmt; @@ -731,7 +733,7 @@ void configSetCommand(redisClient *c) { return; } } - } else if (!strcasecmp(c->argv[2]->ptr,"save")) { + } config_set_special_field("save") { int vlen, j; sds *v = sdssplitlen(o->ptr,sdslen(o->ptr)," ",1,&vlen); @@ -765,12 +767,12 @@ void configSetCommand(redisClient *c) { appendServerSaveParams(seconds, changes); } sdsfreesplitres(v,vlen); - } else if (!strcasecmp(c->argv[2]->ptr,"dir")) { + } config_set_special_field("dir") { if (chdir((char*)o->ptr) == -1) { addReplyErrorFormat(c,"Changing directory: %s", strerror(errno)); return; } - } else if (!strcasecmp(c->argv[2]->ptr,"loglevel")) { + } config_set_special_field("loglevel") { if (!strcasecmp(o->ptr,"warning")) { server.verbosity = REDIS_WARNING; } else if (!strcasecmp(o->ptr,"notice")) { @@ -782,7 +784,7 @@ void configSetCommand(redisClient *c) { } else { goto badfmt; } - } else if (!strcasecmp(c->argv[2]->ptr,"client-output-buffer-limit")) { + } config_set_special_field("client-output-buffer-limit") { int vlen, j; sds *v = sdssplitlen(o->ptr,sdslen(o->ptr)," ",1,&vlen); @@ -827,11 +829,7 @@ void configSetCommand(redisClient *c) { server.client_obuf_limits[class].soft_limit_seconds = soft_seconds; } sdsfreesplitres(v,vlen); - } else if (!strcasecmp(c->argv[2]->ptr,"repl-backlog-size")) { - ll = memtoll(o->ptr,&err); - if (err || ll < 0) goto badfmt; - resizeReplicationBacklog(ll); - } else if (!strcasecmp(c->argv[2]->ptr,"notify-keyspace-events")) { + } config_set_special_field("notify-keyspace-events") { int flags = keyspaceEventsStringToFlags(o->ptr); if (flags == -1) goto badfmt; @@ -839,7 +837,7 @@ void configSetCommand(redisClient *c) { /* Boolean fields. * config_set_bool_field(name,var). */ - config_set_bool_field( + } config_set_bool_field( "rdbcompression", server.rdb_compression) { } config_set_bool_field( "repl-disable-tcp-nodelay",server.repl_disable_tcp_nodelay) { @@ -932,6 +930,18 @@ void configSetCommand(redisClient *c) { enableWatchdog(ll); else disableWatchdog(); + + /* Memory fields. + * config_set_memory_field(name,var) */ + } config_set_memory_field("maxmemory",server.maxmemory) { + if (server.maxmemory) { + if (server.maxmemory < zmalloc_used_memory()) { + redisLog(REDIS_WARNING,"WARNING: the new maxmemory value set via CONFIG SET is smaller than the current memory usage. This will result in keys eviction and/or inability to accept new write commands depending on the maxmemory-policy."); + } + freeMemoryIfNeeded(); + } + } config_set_memory_field("repl-backlog-size",ll) { + resizeReplicationBacklog(ll); } /* Everyhing else is an error... */ From 8e219224b9c9112806ecc868156fdc0b5e0b6d80 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 11 Mar 2015 16:59:56 +0100 Subject: [PATCH 0142/1928] CONFIG refactoring: configEnum abstraction. Still many things to convert inside config.c in the next commits. Some const safety in String objects creation and addReply() family functions. --- src/config.c | 157 ++++++++++++++++++++++++++--------------------- src/networking.c | 18 +++--- src/object.c | 6 +- src/redis.c | 2 +- src/redis.h | 18 +++--- 5 files changed, 108 insertions(+), 93 deletions(-) diff --git a/src/config.c b/src/config.c index 7240d4a0b..bfdc63a8e 100644 --- a/src/config.c +++ b/src/config.c @@ -34,10 +34,26 @@ #include #include -static struct { - const char *name; - const int value; -} validSyslogFacilities[] = { +/*----------------------------------------------------------------------------- + * Config file name-value maps. + *----------------------------------------------------------------------------*/ + +typedef struct configEnum { + const char *name; + const int val; +} configEnum; + +configEnum maxmemory_policy_enum[] = { + {"volatile-lru", REDIS_MAXMEMORY_VOLATILE_LRU}, + {"volatile-random",REDIS_MAXMEMORY_VOLATILE_RANDOM}, + {"volatile-ttl",REDIS_MAXMEMORY_VOLATILE_TTL}, + {"allkeys-lru",REDIS_MAXMEMORY_ALLKEYS_LRU}, + {"allkeys-random",REDIS_MAXMEMORY_ALLKEYS_RANDOM}, + {"noeviction",REDIS_MAXMEMORY_NO_EVICTION}, + {NULL, 0} +}; + +configEnum syslog_facility_enum[] = { {"user", LOG_USER}, {"local0", LOG_LOCAL0}, {"local1", LOG_LOCAL1}, @@ -50,12 +66,47 @@ static struct { {NULL, 0} }; +/* Output buffer limits presets. */ clientBufferLimitsConfig clientBufferLimitsDefaults[REDIS_CLIENT_TYPE_COUNT] = { {0, 0, 0}, /* normal */ {1024*1024*256, 1024*1024*64, 60}, /* slave */ {1024*1024*32, 1024*1024*8, 60} /* pubsub */ }; +/*----------------------------------------------------------------------------- + * Enum access functions + *----------------------------------------------------------------------------*/ + +/* Get enum value from name. If there is no match INT_MIN is returned. */ +int configEnumGetValue(configEnum *ce, char *name) { + while(ce->name != NULL) { + if (!strcasecmp(ce->name,name)) return ce->val; + ce++; + } + return INT_MIN; +} + +/* Get enum name from value. If no match is found NULL is returned. */ +const char *configEnumGetName(configEnum *ce, int val) { + while(ce->name != NULL) { + if (ce->val == val) return ce->name; + ce++; + } + return NULL; +} + +/* Wrapper for configEnumGetName() returning "unknown" insetad of NULL if + * there is no match. */ +const char *configEnumGetNameOrUnknown(configEnum *ce, int val) { + const char *name = configEnumGetName(ce,val); + return name ? name : "unknown"; +} + +/* Used for INFO generation. */ +const char *maxmemoryToString(void) { + return configEnumGetNameOrUnknown(maxmemory_policy_enum,server.maxmemory); +} + /*----------------------------------------------------------------------------- * Config file parsing *----------------------------------------------------------------------------*/ @@ -201,16 +252,9 @@ void loadServerConfigFromString(char *config) { if (server.syslog_ident) zfree(server.syslog_ident); server.syslog_ident = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"syslog-facility") && argc == 2) { - int i; - - for (i = 0; validSyslogFacilities[i].name; i++) { - if (!strcasecmp(validSyslogFacilities[i].name, argv[1])) { - server.syslog_facility = validSyslogFacilities[i].value; - break; - } - } - - if (!validSyslogFacilities[i].name) { + server.syslog_facility = + configEnumGetValue(syslog_facility_enum,argv[1]); + if (server.syslog_facility == INT_MIN) { err = "Invalid log facility. Must be one of USER or between LOCAL0-LOCAL7"; goto loaderr; } @@ -229,19 +273,9 @@ void loadServerConfigFromString(char *config) { } else if (!strcasecmp(argv[0],"maxmemory") && argc == 2) { server.maxmemory = memtoll(argv[1],NULL); } else if (!strcasecmp(argv[0],"maxmemory-policy") && argc == 2) { - if (!strcasecmp(argv[1],"volatile-lru")) { - server.maxmemory_policy = REDIS_MAXMEMORY_VOLATILE_LRU; - } else if (!strcasecmp(argv[1],"volatile-random")) { - server.maxmemory_policy = REDIS_MAXMEMORY_VOLATILE_RANDOM; - } else if (!strcasecmp(argv[1],"volatile-ttl")) { - server.maxmemory_policy = REDIS_MAXMEMORY_VOLATILE_TTL; - } else if (!strcasecmp(argv[1],"allkeys-lru")) { - server.maxmemory_policy = REDIS_MAXMEMORY_ALLKEYS_LRU; - } else if (!strcasecmp(argv[1],"allkeys-random")) { - server.maxmemory_policy = REDIS_MAXMEMORY_ALLKEYS_RANDOM; - } else if (!strcasecmp(argv[1],"noeviction")) { - server.maxmemory_policy = REDIS_MAXMEMORY_NO_EVICTION; - } else { + server.maxmemory_policy = + configEnumGetValue(maxmemory_policy_enum,argv[1]); + if (server.maxmemory_policy == INT_MIN) { err = "Invalid maxmemory policy"; goto loaderr; } @@ -640,6 +674,12 @@ void loadServerConfig(char *filename, char *options) { if (err || ll < 0) goto badfmt; \ _var = ll; +#define config_set_enum_field(_name,_var,_enumvar) \ + } else if (!strcasecmp(c->argv[2]->ptr,_name)) { \ + int enumval = configEnumGetValue(_enumvar,o->ptr); \ + if (enumval == INT_MIN) goto badfmt; \ + _var = enumval; + #define config_set_special_field(_name) \ } else if (!strcasecmp(c->argv[2]->ptr,_name)) { @@ -694,22 +734,8 @@ void configSetCommand(redisClient *c) { } } } - } config_set_special_field("maxmemory-policy") { - if (!strcasecmp(o->ptr,"volatile-lru")) { - server.maxmemory_policy = REDIS_MAXMEMORY_VOLATILE_LRU; - } else if (!strcasecmp(o->ptr,"volatile-random")) { - server.maxmemory_policy = REDIS_MAXMEMORY_VOLATILE_RANDOM; - } else if (!strcasecmp(o->ptr,"volatile-ttl")) { - server.maxmemory_policy = REDIS_MAXMEMORY_VOLATILE_TTL; - } else if (!strcasecmp(o->ptr,"allkeys-lru")) { - server.maxmemory_policy = REDIS_MAXMEMORY_ALLKEYS_LRU; - } else if (!strcasecmp(o->ptr,"allkeys-random")) { - server.maxmemory_policy = REDIS_MAXMEMORY_ALLKEYS_RANDOM; - } else if (!strcasecmp(o->ptr,"noeviction")) { - server.maxmemory_policy = REDIS_MAXMEMORY_NO_EVICTION; - } else { - goto badfmt; - } + } config_set_enum_field( + "maxmemory-policy",server.maxmemory_policy,maxmemory_policy_enum) { } config_set_special_field("appendfsync") { if (!strcasecmp(o->ptr,"no")) { server.aof_fsync = AOF_FSYNC_NO; @@ -988,19 +1014,13 @@ badfmt: /* Bad format errors */ } \ } while(0); -char *maxmemoryToString() { - char *s; - switch(server.maxmemory_policy) { - case REDIS_MAXMEMORY_VOLATILE_LRU: s = "volatile-lru"; break; - case REDIS_MAXMEMORY_VOLATILE_TTL: s = "volatile-ttl"; break; - case REDIS_MAXMEMORY_VOLATILE_RANDOM: s = "volatile-random"; break; - case REDIS_MAXMEMORY_ALLKEYS_LRU: s = "allkeys-lru"; break; - case REDIS_MAXMEMORY_ALLKEYS_RANDOM: s = "allkeys-random"; break; - case REDIS_MAXMEMORY_NO_EVICTION: s = "noeviction"; break; - default: s = "unknown"; break; - } - return s; -} +#define config_get_enum_field(_name,_var,_enumvar) do { \ + if (stringmatch(pattern,_name,0)) { \ + addReplyBulkCString(c,_name); \ + addReplyBulkCString(c,configEnumGetNameOrUnknown(_enumvar,_var)); \ + matches++; \ + } \ +} while(0); int supervisedToMode(const char *str) { int mode; @@ -1029,6 +1049,7 @@ char *supervisedToString(void) { } return s; } + void configGetCommand(redisClient *c) { robj *o = c->argv[2]; void *replylen = addDeferredMultiBulkLength(c); @@ -1119,6 +1140,10 @@ void configGetCommand(redisClient *c) { config_get_bool_field("aof-load-truncated", server.aof_load_truncated); + /* Enum values */ + config_get_enum_field("maxmemory-policy", + server.maxmemory_policy,maxmemory_policy_enum); + /* Everything we can't handle with macros follows. */ if (stringmatch(pattern,"appendonly",0)) { @@ -1136,11 +1161,6 @@ void configGetCommand(redisClient *c) { addReplyBulkCString(c,buf); matches++; } - if (stringmatch(pattern,"maxmemory-policy",0)) { - addReplyBulkCString(c,"maxmemory-policy"); - addReplyBulkCString(c,maxmemoryToString()); - matches++; - } if (stringmatch(pattern,"appendfsync",0)) { char *policy; @@ -1313,7 +1333,7 @@ void rewriteConfigAddLineNumberToOption(struct rewriteConfigState *state, sds op * This is useful as only unused lines of processed options will be blanked * in the config file, while options the rewrite process does not understand * remain untouched. */ -void rewriteConfigMarkAsProcessed(struct rewriteConfigState *state, char *option) { +void rewriteConfigMarkAsProcessed(struct rewriteConfigState *state, const char *option) { sds opt = sdsnew(option); if (dictAdd(state->rewritten,opt,NULL) != DICT_OK) sdsfree(opt); @@ -1397,7 +1417,7 @@ struct rewriteConfigState *rewriteConfigReadOldFile(char *path) { * * "line" is either used, or freed, so the caller does not need to free it * in any way. */ -void rewriteConfigRewriteLine(struct rewriteConfigState *state, char *option, sds line, int force) { +void rewriteConfigRewriteLine(struct rewriteConfigState *state, const char *option, sds line, int force) { sds o = sdsnew(option); list *l = dictFetchValue(state->option_to_line,o); @@ -1536,17 +1556,12 @@ void rewriteConfigEnumOption(struct rewriteConfigState *state, char *option, int /* Rewrite the syslog-facility option. */ void rewriteConfigSyslogfacilityOption(struct rewriteConfigState *state) { - int value = server.syslog_facility, j; + int value = server.syslog_facility; int force = value != LOG_LOCAL0; - char *name = NULL, *option = "syslog-facility"; + const char *name = NULL, *option = "syslog-facility"; sds line; - for (j = 0; validSyslogFacilities[j].name; j++) { - if (validSyslogFacilities[j].value == value) { - name = (char*) validSyslogFacilities[j].name; - break; - } - } + name = configEnumGetNameOrUnknown(syslog_facility_enum,value); line = sdscatprintf(sdsempty(),"%s %s",option,name); rewriteConfigRewriteLine(state,option,line,force); } diff --git a/src/networking.c b/src/networking.c index 58275a219..1125b86fe 100644 --- a/src/networking.c +++ b/src/networking.c @@ -175,7 +175,7 @@ robj *dupLastObjectIfNeeded(list *reply) { * Low level functions to add more data to output buffers. * -------------------------------------------------------------------------- */ -int _addReplyToBuffer(redisClient *c, char *s, size_t len) { +int _addReplyToBuffer(redisClient *c, const char *s, size_t len) { size_t available = sizeof(c->buf)-c->bufpos; if (c->flags & REDIS_CLOSE_AFTER_REPLY) return REDIS_OK; @@ -255,7 +255,7 @@ void _addReplySdsToList(redisClient *c, sds s) { asyncCloseClientOnOutputBufferLimitReached(c); } -void _addReplyStringToList(redisClient *c, char *s, size_t len) { +void _addReplyStringToList(redisClient *c, const char *s, size_t len) { robj *tail; if (c->flags & REDIS_CLOSE_AFTER_REPLY) return; @@ -341,19 +341,19 @@ void addReplySds(redisClient *c, sds s) { } } -void addReplyString(redisClient *c, char *s, size_t len) { +void addReplyString(redisClient *c, const char *s, size_t len) { if (prepareClientToWrite(c) != REDIS_OK) return; if (_addReplyToBuffer(c,s,len) != REDIS_OK) _addReplyStringToList(c,s,len); } -void addReplyErrorLength(redisClient *c, char *s, size_t len) { +void addReplyErrorLength(redisClient *c, const char *s, size_t len) { addReplyString(c,"-ERR ",5); addReplyString(c,s,len); addReplyString(c,"\r\n",2); } -void addReplyError(redisClient *c, char *err) { +void addReplyError(redisClient *c, const char *err) { addReplyErrorLength(c,err,strlen(err)); } @@ -373,13 +373,13 @@ void addReplyErrorFormat(redisClient *c, const char *fmt, ...) { sdsfree(s); } -void addReplyStatusLength(redisClient *c, char *s, size_t len) { +void addReplyStatusLength(redisClient *c, const char *s, size_t len) { addReplyString(c,"+",1); addReplyString(c,s,len); addReplyString(c,"\r\n",2); } -void addReplyStatus(redisClient *c, char *status) { +void addReplyStatus(redisClient *c, const char *status) { addReplyStatusLength(c,status,strlen(status)); } @@ -519,7 +519,7 @@ void addReplyBulk(redisClient *c, robj *obj) { } /* Add a C buffer as bulk reply */ -void addReplyBulkCBuffer(redisClient *c, void *p, size_t len) { +void addReplyBulkCBuffer(redisClient *c, const void *p, size_t len) { addReplyLongLongWithPrefix(c,len,'$'); addReplyString(c,p,len); addReply(c,shared.crlf); @@ -534,7 +534,7 @@ void addReplyBulkSds(redisClient *c, sds s) { } /* Add a C nul term string as bulk reply */ -void addReplyBulkCString(redisClient *c, char *s) { +void addReplyBulkCString(redisClient *c, const char *s) { if (s == NULL) { addReply(c,shared.nullbulk); } else { diff --git a/src/object.c b/src/object.c index 8905db18d..dcd896917 100644 --- a/src/object.c +++ b/src/object.c @@ -50,14 +50,14 @@ robj *createObject(int type, void *ptr) { /* Create a string object with encoding REDIS_ENCODING_RAW, that is a plain * string object where o->ptr points to a proper sds string. */ -robj *createRawStringObject(char *ptr, size_t len) { +robj *createRawStringObject(const char *ptr, size_t len) { return createObject(REDIS_STRING,sdsnewlen(ptr,len)); } /* Create a string object with encoding REDIS_ENCODING_EMBSTR, that is * an object where the sds string is actually an unmodifiable string * allocated in the same chunk as the object itself. */ -robj *createEmbeddedStringObject(char *ptr, size_t len) { +robj *createEmbeddedStringObject(const char *ptr, size_t len) { robj *o = zmalloc(sizeof(robj)+sizeof(struct sdshdr)+len+1); struct sdshdr *sh = (void*)(o+1); @@ -85,7 +85,7 @@ robj *createEmbeddedStringObject(char *ptr, size_t len) { * The current limit of 39 is chosen so that the biggest string object * we allocate as EMBSTR will still fit into the 64 byte arena of jemalloc. */ #define REDIS_ENCODING_EMBSTR_SIZE_LIMIT 39 -robj *createStringObject(char *ptr, size_t len) { +robj *createStringObject(const char *ptr, size_t len) { if (len <= REDIS_ENCODING_EMBSTR_SIZE_LIMIT) return createEmbeddedStringObject(ptr,len); else diff --git a/src/redis.c b/src/redis.c index 86b5a9ebf..7c9ec9028 100644 --- a/src/redis.c +++ b/src/redis.c @@ -2742,7 +2742,7 @@ sds genRedisInfoString(char *section) { char maxmemory_hmem[64]; size_t zmalloc_used = zmalloc_used_memory(); size_t total_system_mem = server.system_memory_size; - char *evict_policy = maxmemoryToString(); + const char *evict_policy = maxmemoryToString(); long long memory_lua = (long long)lua_gc(server.lua,LUA_GCCOUNT,0)*1024; /* Peak memory is updated from time to time by serverCron() so it diff --git a/src/redis.h b/src/redis.h index 232ada5e5..34d8b0a4c 100644 --- a/src/redis.h +++ b/src/redis.h @@ -1053,14 +1053,14 @@ void acceptTcpHandler(aeEventLoop *el, int fd, void *privdata, int mask); void acceptUnixHandler(aeEventLoop *el, int fd, void *privdata, int mask); void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask); void addReplyBulk(redisClient *c, robj *obj); -void addReplyBulkCString(redisClient *c, char *s); -void addReplyBulkCBuffer(redisClient *c, void *p, size_t len); +void addReplyBulkCString(redisClient *c, const char *s); +void addReplyBulkCBuffer(redisClient *c, const void *p, size_t len); void addReplyBulkLongLong(redisClient *c, long long ll); void addReply(redisClient *c, robj *obj); void addReplySds(redisClient *c, sds s); void addReplyBulkSds(redisClient *c, sds s); -void addReplyError(redisClient *c, char *err); -void addReplyStatus(redisClient *c, char *status); +void addReplyError(redisClient *c, const char *err); +void addReplyStatus(redisClient *c, const char *status); void addReplyDouble(redisClient *c, double d); void addReplyLongLong(redisClient *c, long long ll); void addReplyMultiBulkLen(redisClient *c, long length); @@ -1136,9 +1136,9 @@ void freeSetObject(robj *o); void freeZsetObject(robj *o); void freeHashObject(robj *o); robj *createObject(int type, void *ptr); -robj *createStringObject(char *ptr, size_t len); -robj *createRawStringObject(char *ptr, size_t len); -robj *createEmbeddedStringObject(char *ptr, size_t len); +robj *createStringObject(const char *ptr, size_t len); +robj *createRawStringObject(const char *ptr, size_t len); +robj *createEmbeddedStringObject(const char *ptr, size_t len); robj *dupStringObject(robj *o); int isObjectRepresentableAsLongLong(robj *o, long long *llongval); robj *tryObjectEncoding(robj *o); @@ -1274,7 +1274,7 @@ void closeListeningSockets(int unlink_unix_socket); void updateCachedTime(void); void resetServerStats(void); unsigned int getLRUClock(void); -char *maxmemoryToString(void); +const char *maxmemoryToString(void); /* Set data type */ robj *setTypeCreate(robj *value); @@ -1327,7 +1327,7 @@ void loadServerConfig(char *filename, char *options); void appendServerSaveParams(time_t seconds, int changes); void resetServerSaveParams(void); struct rewriteConfigState; /* Forward declaration to export API. */ -void rewriteConfigRewriteLine(struct rewriteConfigState *state, char *option, sds line, int force); +void rewriteConfigRewriteLine(struct rewriteConfigState *state, const char *option, sds line, int force); int rewriteConfig(char *path); /* db.c -- Keyspace access API */ From 535b295f96e9644bff01650c6696ea67cc6f2470 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 11 Mar 2015 17:24:55 +0100 Subject: [PATCH 0143/1928] Net: better Unix socket error. Issue #2449. --- src/redis.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redis.c b/src/redis.c index 7c9ec9028..75975a498 100644 --- a/src/redis.c +++ b/src/redis.c @@ -1781,7 +1781,7 @@ void initServer(void) { server.sofd = anetUnixServer(server.neterr,server.unixsocket, server.unixsocketperm, server.tcp_backlog); if (server.sofd == ANET_ERR) { - redisLog(REDIS_WARNING, "Opening socket: %s", server.neterr); + redisLog(REDIS_WARNING, "Opening Unix socket: %s", server.neterr); exit(1); } anetNonBlock(NULL,server.sofd); From 50b41b6ad3beee554e49fadc667ff0e6b00dfafa Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 11 Mar 2015 23:20:57 +0100 Subject: [PATCH 0144/1928] CONFIG SET refactoring: use enums in more places. --- src/config.c | 215 ++++++++++++++++----------------------------------- 1 file changed, 65 insertions(+), 150 deletions(-) diff --git a/src/config.c b/src/config.c index bfdc63a8e..64478304e 100644 --- a/src/config.c +++ b/src/config.c @@ -66,6 +66,29 @@ configEnum syslog_facility_enum[] = { {NULL, 0} }; +configEnum loglevel_enum[] = { + {"debug", REDIS_DEBUG}, + {"verbose", REDIS_VERBOSE}, + {"notice", REDIS_NOTICE}, + {"warning", REDIS_WARNING}, + {NULL,0} +}; + +configEnum supervised_mode_enum[] = { + {"upstart", REDIS_SUPERVISED_UPSTART}, + {"systemd", REDIS_SUPERVISED_SYSTEMD}, + {"auto", REDIS_SUPERVISED_AUTODETECT}, + {"no", REDIS_SUPERVISED_NONE}, + {NULL, 0} +}; + +configEnum aof_fsync_enum[] = { + {"everysec", AOF_FSYNC_EVERYSEC}, + {"always", AOF_FSYNC_ALWAYS}, + {"no", AOF_FSYNC_NO}, + {NULL, 0} +}; + /* Output buffer limits presets. */ clientBufferLimitsConfig clientBufferLimitsDefaults[REDIS_CLIENT_TYPE_COUNT] = { {0, 0, 0}, /* normal */ @@ -111,8 +134,6 @@ const char *maxmemoryToString(void) { * Config file parsing *----------------------------------------------------------------------------*/ -int supervisedToMode(const char *str); - int yesnotoi(char *s) { if (!strcasecmp(s,"yes")) return 1; else if (!strcasecmp(s,"no")) return 0; @@ -220,12 +241,10 @@ void loadServerConfigFromString(char *config) { exit(1); } } else if (!strcasecmp(argv[0],"loglevel") && argc == 2) { - if (!strcasecmp(argv[1],"debug")) server.verbosity = REDIS_DEBUG; - else if (!strcasecmp(argv[1],"verbose")) server.verbosity = REDIS_VERBOSE; - else if (!strcasecmp(argv[1],"notice")) server.verbosity = REDIS_NOTICE; - else if (!strcasecmp(argv[1],"warning")) server.verbosity = REDIS_WARNING; - else { - err = "Invalid log level. Must be one of debug, notice, warning"; + server.verbosity = configEnumGetValue(loglevel_enum,argv[1]); + if (server.verbosity == INT_MIN) { + err = "Invalid log level. " + "Must be one of debug, notice, warning"; goto loaderr; } } else if (!strcasecmp(argv[0],"logfile") && argc == 2) { @@ -379,13 +398,8 @@ void loadServerConfigFromString(char *config) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"appendfsync") && argc == 2) { - if (!strcasecmp(argv[1],"no")) { - server.aof_fsync = AOF_FSYNC_NO; - } else if (!strcasecmp(argv[1],"always")) { - server.aof_fsync = AOF_FSYNC_ALWAYS; - } else if (!strcasecmp(argv[1],"everysec")) { - server.aof_fsync = AOF_FSYNC_EVERYSEC; - } else { + server.aof_fsync = configEnumGetValue(aof_fsync_enum,argv[1]); + if (server.aof_fsync == INT_MIN) { err = "argument must be 'no', 'always' or 'everysec'"; goto loaderr; } @@ -570,14 +584,14 @@ void loadServerConfigFromString(char *config) { } server.notify_keyspace_events = flags; } else if (!strcasecmp(argv[0],"supervised") && argc == 2) { - int mode = supervisedToMode(argv[1]); + server.supervised_mode = + configEnumGetValue(supervised_mode_enum,argv[1]); - if (mode == -1) { + if (server.supervised_mode == INT_MIN) { err = "Invalid option for 'supervised'. " "Allowed values: 'upstart', 'systemd', 'auto', or 'no'"; goto loaderr; } - server.supervised_mode = mode; } else if (!strcasecmp(argv[0],"sentinel")) { /* argc == 1 is handled by main() as we need to enter the sentinel * mode ASAP. */ @@ -683,6 +697,8 @@ void loadServerConfig(char *filename, char *options) { #define config_set_special_field(_name) \ } else if (!strcasecmp(c->argv[2]->ptr,_name)) { +#define config_set_else } else + void configSetCommand(redisClient *c) { robj *o; long long ll; @@ -734,18 +750,6 @@ void configSetCommand(redisClient *c) { } } } - } config_set_enum_field( - "maxmemory-policy",server.maxmemory_policy,maxmemory_policy_enum) { - } config_set_special_field("appendfsync") { - if (!strcasecmp(o->ptr,"no")) { - server.aof_fsync = AOF_FSYNC_NO; - } else if (!strcasecmp(o->ptr,"everysec")) { - server.aof_fsync = AOF_FSYNC_EVERYSEC; - } else if (!strcasecmp(o->ptr,"always")) { - server.aof_fsync = AOF_FSYNC_ALWAYS; - } else { - goto badfmt; - } } config_set_special_field("appendonly") { int enable = yesnotoi(o->ptr); @@ -798,18 +802,6 @@ void configSetCommand(redisClient *c) { addReplyErrorFormat(c,"Changing directory: %s", strerror(errno)); return; } - } config_set_special_field("loglevel") { - if (!strcasecmp(o->ptr,"warning")) { - server.verbosity = REDIS_WARNING; - } else if (!strcasecmp(o->ptr,"notice")) { - server.verbosity = REDIS_NOTICE; - } else if (!strcasecmp(o->ptr,"verbose")) { - server.verbosity = REDIS_VERBOSE; - } else if (!strcasecmp(o->ptr,"debug")) { - server.verbosity = REDIS_DEBUG; - } else { - goto badfmt; - } } config_set_special_field("client-output-buffer-limit") { int vlen, j; sds *v = sdssplitlen(o->ptr,sdslen(o->ptr)," ",1,&vlen); @@ -968,14 +960,24 @@ void configSetCommand(redisClient *c) { } } config_set_memory_field("repl-backlog-size",ll) { resizeReplicationBacklog(ll); - } + + /* Enumeration fields. + * config_set_enum_field(name,var,enum_var) */ + } config_set_enum_field( + "loglevel",server.verbosity,loglevel_enum) { + } config_set_enum_field( + "maxmemory-policy",server.maxmemory_policy,maxmemory_policy_enum) { + } config_set_enum_field( + "appendfsync",server.aof_fsync,aof_fsync_enum) { /* Everyhing else is an error... */ - } else { + } config_set_else { addReplyErrorFormat(c,"Unsupported CONFIG parameter: %s", (char*)c->argv[2]->ptr); return; } + + /* On success we just return a generic OK for all the options. */ addReply(c,shared.ok); return; @@ -1022,34 +1024,6 @@ badfmt: /* Bad format errors */ } \ } while(0); -int supervisedToMode(const char *str) { - int mode; - if (!strcasecmp(str,"upstart")) { - mode = REDIS_SUPERVISED_UPSTART; - } else if (!strcasecmp(str,"systemd")) { - mode = REDIS_SUPERVISED_SYSTEMD; - } else if (!strcasecmp(str,"auto")) { - mode = REDIS_SUPERVISED_AUTODETECT; - } else if (!strcasecmp(str,"no")) { - mode = REDIS_SUPERVISED_NONE; - } else { - mode = -1; - } - return mode; -} - -char *supervisedToString(void) { - char *s; - switch(server.supervised_mode) { - case REDIS_SUPERVISED_UPSTART: s = "upstart"; break; - case REDIS_SUPERVISED_SYSTEMD: s = "systemd"; break; - case REDIS_SUPERVISED_AUTODETECT: s = "auto"; break; - case REDIS_SUPERVISED_NONE: s = "no"; break; - default: s = "no"; break; - } - return s; -} - void configGetCommand(redisClient *c) { robj *o = c->argv[2]; void *replylen = addDeferredMultiBulkLength(c); @@ -1143,6 +1117,12 @@ void configGetCommand(redisClient *c) { /* Enum values */ config_get_enum_field("maxmemory-policy", server.maxmemory_policy,maxmemory_policy_enum); + config_get_enum_field("loglevel", + server.verbosity,loglevel_enum); + config_get_enum_field("supervised", + server.supervised_mode,supervised_mode_enum); + config_get_enum_field("appendfsync", + server.aof_fsync,aof_fsync_enum); /* Everything we can't handle with macros follows. */ @@ -1161,19 +1141,6 @@ void configGetCommand(redisClient *c) { addReplyBulkCString(c,buf); matches++; } - if (stringmatch(pattern,"appendfsync",0)) { - char *policy; - - switch(server.aof_fsync) { - case AOF_FSYNC_NO: policy = "no"; break; - case AOF_FSYNC_EVERYSEC: policy = "everysec"; break; - case AOF_FSYNC_ALWAYS: policy = "always"; break; - default: policy = "unknown"; break; /* too harmless to panic */ - } - addReplyBulkCString(c,"appendfsync"); - addReplyBulkCString(c,policy); - matches++; - } if (stringmatch(pattern,"save",0)) { sds buf = sdsempty(); int j; @@ -1190,25 +1157,6 @@ void configGetCommand(redisClient *c) { sdsfree(buf); matches++; } - if (stringmatch(pattern,"loglevel",0)) { - char *s; - - switch(server.verbosity) { - case REDIS_WARNING: s = "warning"; break; - case REDIS_VERBOSE: s = "verbose"; break; - case REDIS_NOTICE: s = "notice"; break; - case REDIS_DEBUG: s = "debug"; break; - default: s = "unknown"; break; /* too harmless to panic */ - } - addReplyBulkCString(c,"loglevel"); - addReplyBulkCString(c,s); - matches++; - } - if (stringmatch(pattern,"supervised",0)) { - addReplyBulkCString(c,"supervised"); - addReplyBulkCString(c,supervisedToString()); - matches++; - } if (stringmatch(pattern,"client-output-buffer-limit",0)) { sds buf = sdsempty(); int j; @@ -1528,29 +1476,15 @@ void rewriteConfigOctalOption(struct rewriteConfigState *state, char *option, in rewriteConfigRewriteLine(state,option,line,force); } -/* Rewrite an enumeration option, after the "value" every enum/value pair - * is specified, terminated by NULL. After NULL the default value is - * specified. See how the function is used for more information. */ -void rewriteConfigEnumOption(struct rewriteConfigState *state, char *option, int value, ...) { - va_list ap; - char *enum_name, *matching_name = NULL; - int enum_val, def_val, force; +/* Rewrite an enumeration option. It takes as usually state and option name, + * and in addition the enumeration array and the default value for the + * option. */ +void rewriteConfigEnumOption(struct rewriteConfigState *state, char *option, int value, configEnum *ce, int defval) { sds line; + const char *name = configEnumGetNameOrUnknown(ce,value); + int force = value != defval; - va_start(ap, value); - while(1) { - enum_name = va_arg(ap,char*); - enum_val = va_arg(ap,int); - if (enum_name == NULL) { - def_val = enum_val; - break; - } - if (value == enum_val) matching_name = enum_name; - } - va_end(ap); - - force = value != def_val; - line = sdscatprintf(sdsempty(),"%s %s",option,matching_name); + line = sdscatprintf(sdsempty(),"%s %s",option,name); rewriteConfigRewriteLine(state,option,line,force); } @@ -1822,12 +1756,7 @@ int rewriteConfig(char *path) { rewriteConfigOctalOption(state,"unixsocketperm",server.unixsocketperm,REDIS_DEFAULT_UNIX_SOCKET_PERM); rewriteConfigNumericalOption(state,"timeout",server.maxidletime,REDIS_MAXIDLETIME); rewriteConfigNumericalOption(state,"tcp-keepalive",server.tcpkeepalive,REDIS_DEFAULT_TCP_KEEPALIVE); - rewriteConfigEnumOption(state,"loglevel",server.verbosity, - "debug", REDIS_DEBUG, - "verbose", REDIS_VERBOSE, - "notice", REDIS_NOTICE, - "warning", REDIS_WARNING, - NULL, REDIS_DEFAULT_VERBOSITY); + rewriteConfigEnumOption(state,"loglevel",server.verbosity,loglevel_enum,REDIS_DEFAULT_VERBOSITY); rewriteConfigStringOption(state,"logfile",server.logfile,REDIS_DEFAULT_LOGFILE); rewriteConfigYesNoOption(state,"syslog-enabled",server.syslog_enabled,REDIS_DEFAULT_SYSLOG_ENABLED); rewriteConfigStringOption(state,"syslog-ident",server.syslog_ident,REDIS_DEFAULT_SYSLOG_IDENT); @@ -1856,22 +1785,11 @@ int rewriteConfig(char *path) { rewriteConfigStringOption(state,"requirepass",server.requirepass,NULL); rewriteConfigNumericalOption(state,"maxclients",server.maxclients,REDIS_MAX_CLIENTS); rewriteConfigBytesOption(state,"maxmemory",server.maxmemory,REDIS_DEFAULT_MAXMEMORY); - rewriteConfigEnumOption(state,"maxmemory-policy",server.maxmemory_policy, - "volatile-lru", REDIS_MAXMEMORY_VOLATILE_LRU, - "allkeys-lru", REDIS_MAXMEMORY_ALLKEYS_LRU, - "volatile-random", REDIS_MAXMEMORY_VOLATILE_RANDOM, - "allkeys-random", REDIS_MAXMEMORY_ALLKEYS_RANDOM, - "volatile-ttl", REDIS_MAXMEMORY_VOLATILE_TTL, - "noeviction", REDIS_MAXMEMORY_NO_EVICTION, - NULL, REDIS_DEFAULT_MAXMEMORY_POLICY); + rewriteConfigEnumOption(state,"maxmemory-policy",server.maxmemory_policy,maxmemory_policy_enum,REDIS_DEFAULT_MAXMEMORY_POLICY); rewriteConfigNumericalOption(state,"maxmemory-samples",server.maxmemory_samples,REDIS_DEFAULT_MAXMEMORY_SAMPLES); rewriteConfigYesNoOption(state,"appendonly",server.aof_state != REDIS_AOF_OFF,0); rewriteConfigStringOption(state,"appendfilename",server.aof_filename,REDIS_DEFAULT_AOF_FILENAME); - rewriteConfigEnumOption(state,"appendfsync",server.aof_fsync, - "everysec", AOF_FSYNC_EVERYSEC, - "always", AOF_FSYNC_ALWAYS, - "no", AOF_FSYNC_NO, - NULL, REDIS_DEFAULT_AOF_FSYNC); + rewriteConfigEnumOption(state,"appendfsync",server.aof_fsync,aof_fsync_enum,REDIS_DEFAULT_AOF_FSYNC); rewriteConfigYesNoOption(state,"no-appendfsync-on-rewrite",server.aof_no_fsync_on_rewrite,REDIS_DEFAULT_AOF_NO_FSYNC_ON_REWRITE); rewriteConfigNumericalOption(state,"auto-aof-rewrite-percentage",server.aof_rewrite_perc,REDIS_AOF_REWRITE_PERC); rewriteConfigBytesOption(state,"auto-aof-rewrite-min-size",server.aof_rewrite_min_size,REDIS_AOF_REWRITE_MIN_SIZE); @@ -1899,12 +1817,9 @@ int rewriteConfig(char *path) { rewriteConfigNumericalOption(state,"hz",server.hz,REDIS_DEFAULT_HZ); rewriteConfigYesNoOption(state,"aof-rewrite-incremental-fsync",server.aof_rewrite_incremental_fsync,REDIS_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC); rewriteConfigYesNoOption(state,"aof-load-truncated",server.aof_load_truncated,REDIS_DEFAULT_AOF_LOAD_TRUNCATED); - rewriteConfigEnumOption(state,"supervised",server.supervised_mode, - "upstart", REDIS_SUPERVISED_UPSTART, - "systemd", REDIS_SUPERVISED_SYSTEMD, - "auto", REDIS_SUPERVISED_AUTODETECT, - "no", REDIS_SUPERVISED_NONE, - NULL, REDIS_SUPERVISED_NONE); + rewriteConfigEnumOption(state,"supervised",server.supervised_mode,supervised_mode_enum,REDIS_SUPERVISED_NONE); + + /* Rewrite Sentinel config if in Sentinel mode. */ if (server.sentinel_mode) rewriteConfigSentinelOption(state); /* Step 3: remove all the orphaned lines in the old file, that is, lines From 792c531688d81bbb1a6f4ca1b575f7137c24c8b9 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 12 Mar 2015 09:59:10 +0100 Subject: [PATCH 0145/1928] CONFIG GET syslog-facility added. Was missing for some reason. Trivial to add after config.c refactoring. --- src/config.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/config.c b/src/config.c index 64478304e..df829c21b 100644 --- a/src/config.c +++ b/src/config.c @@ -1123,6 +1123,8 @@ void configGetCommand(redisClient *c) { server.supervised_mode,supervised_mode_enum); config_get_enum_field("appendfsync", server.aof_fsync,aof_fsync_enum); + config_get_enum_field("syslog-facility", + server.syslog_facility,syslog_facility_enum); /* Everything we can't handle with macros follows. */ From b2e8eca70d3cf0c52c251f5a0a050d13846336a1 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 12 Mar 2015 14:43:07 +0100 Subject: [PATCH 0146/1928] Config: improve loglevel message error. --- src/config.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config.c b/src/config.c index df829c21b..6d51b1b08 100644 --- a/src/config.c +++ b/src/config.c @@ -244,7 +244,7 @@ void loadServerConfigFromString(char *config) { server.verbosity = configEnumGetValue(loglevel_enum,argv[1]); if (server.verbosity == INT_MIN) { err = "Invalid log level. " - "Must be one of debug, notice, warning"; + "Must be one of debug, verbose, notice, warning"; goto loaderr; } } else if (!strcasecmp(argv[0],"logfile") && argc == 2) { From e1b6c9dd1839f5ce46ce06e87f6b4095e3f92a4f Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 13 Mar 2015 11:26:04 +0100 Subject: [PATCH 0147/1928] Cluster: CLUSTER NODES speedup. --- src/cluster.c | 48 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 16 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index f1e767a6d..1e9d8867e 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -3547,28 +3547,30 @@ sds representRedisNodeFlags(sds ci, uint16_t flags) { /* Generate a csv-alike representation of the specified cluster node. * See clusterGenNodesDescription() top comment for more information. * - * The function returns the string representation as an SDS string. */ -sds clusterGenNodeDescription(clusterNode *node) { + * The function appends the node representation to the SDS string 'ci' and + * returns it (that may point to a different string as usually with the + * SDS-style API). */ +sds clusterGenNodeDescription(sds ci, clusterNode *node) { int j, start; - sds ci; /* Node coordinates */ - ci = sdscatprintf(sdsempty(),"%.40s %s:%d ", - node->name, - node->ip, - node->port); + ci = sdscatlen(ci,node->name,40); + ci = sdscatfmt(ci," %s:%i ",node->ip,node->port); /* Flags */ ci = representRedisNodeFlags(ci, node->flags); /* Slave of... or just "-" */ - if (node->slaveof) - ci = sdscatprintf(ci," %.40s ",node->slaveof->name); - else + if (node->slaveof) { + ci = sdscatlen(ci," ",1); + ci = sdscatlen(ci,node->slaveof->name,40); + ci = sdscatlen(ci," ",1); + } else { ci = sdscatlen(ci," - ",3); + } /* Latency from the POV of this node, link status */ - ci = sdscatprintf(ci,"%lld %lld %llu %s", + ci = sdscatfmt(ci,"%I %I %U %s", (long long) node->ping_sent, (long long) node->pong_received, (unsigned long long) node->configEpoch, @@ -3580,6 +3582,19 @@ sds clusterGenNodeDescription(clusterNode *node) { for (j = 0; j < REDIS_CLUSTER_SLOTS; j++) { int bit; + /* It is common for a node to have pretty contiguous slots, so + * optimize this loop by skipping whole 32bit words if they have + * no set bits. We stop to the penultimate word because last word + * has special handling when start != -1 (later in the loop). */ + if ((j&31)==0 && j < REDIS_CLUSTER_SLOTS-32) { + uint32_t *slotword = ((uint32_t*)node->slots)+(j/32); + if ((start == -1 && *slotword == 0) || + (start != -1 && *slotword == UINT32_MAX)) { + j += 32; + continue; + } + } + if ((bit = clusterNodeGetSlotBit(node,j)) != 0) { if (start == -1) start = j; } @@ -3625,18 +3640,19 @@ sds clusterGenNodeDescription(clusterNode *node) { * of the CLUSTER NODES function, and as format for the cluster * configuration file (nodes.conf) for a given node. */ sds clusterGenNodesDescription(int filter) { - sds ci = sdsempty(), ni; + sds ci = sdsempty(); dictIterator *di; dictEntry *de; + /* Make room to avoid multiple resizes of the buffer. */ + ci = sdsMakeRoomFor(ci,256*dictSize(server.cluster->nodes)); + di = dictGetSafeIterator(server.cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); if (node->flags & filter) continue; - ni = clusterGenNodeDescription(node); - ci = sdscatsds(ci,ni); - sdsfree(ni); + ci = clusterGenNodeDescription(ci,node); ci = sdscatlen(ci,"\n",1); } dictReleaseIterator(di); @@ -4093,7 +4109,7 @@ void clusterCommand(redisClient *c) { addReplyMultiBulkLen(c,n->numslaves); for (j = 0; j < n->numslaves; j++) { - sds ni = clusterGenNodeDescription(n->slaves[j]); + sds ni = clusterGenNodeDescription(sdsempty(),n->slaves[j]); addReplyBulkCString(c,ni); sdsfree(ni); } From 4ed7582c7ba37dae64491e52f8b0d310e95298b8 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 13 Mar 2015 11:32:51 +0100 Subject: [PATCH 0148/1928] Cluster: ignore various node files in create-cluster dir. --- utils/create-cluster/.gitignore | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/utils/create-cluster/.gitignore b/utils/create-cluster/.gitignore index cdd7c19c8..2988ee919 100644 --- a/utils/create-cluster/.gitignore +++ b/utils/create-cluster/.gitignore @@ -1 +1,5 @@ config.sh +*.rdb +*.aof +*.conf +*.log From 93b1320fac8eae53fd9fca570a1784cbd2a89d27 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 13 Mar 2015 13:16:30 +0100 Subject: [PATCH 0149/1928] Cluster: fix CLUSTER NODES optimization error in 'j' increment. --- src/cluster.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cluster.c b/src/cluster.c index 1e9d8867e..d4c6300d3 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -3590,7 +3590,7 @@ sds clusterGenNodeDescription(sds ci, clusterNode *node) { uint32_t *slotword = ((uint32_t*)node->slots)+(j/32); if ((start == -1 && *slotword == 0) || (start != -1 && *slotword == UINT32_MAX)) { - j += 32; + j += 31; /* The for loop will increment j one more time. */ continue; } } From e791e2dda19b83307790527ecd5f0086322a1478 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 13 Mar 2015 17:30:13 +0100 Subject: [PATCH 0150/1928] Test: fix SPOP replication test count. If count is 0 SADD is called without element arguments, which is currently invalid. --- tests/integration/replication-4.tcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/replication-4.tcl b/tests/integration/replication-4.tcl index 3af2e921d..1c559b706 100644 --- a/tests/integration/replication-4.tcl +++ b/tests/integration/replication-4.tcl @@ -135,7 +135,7 @@ start_server {tags {"repl"}} { test {Replication of SPOP command -- alsoPropagate() API} { $master del myset - set size [randomInt 100] + set size [expr 1+[randomInt 100]] set content {} for {set j 0} {$j < $size} {incr j} { lappend content [randomValue] From 438a1a84e8aa37a19a744c38488b261dd62c145c Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 18 Mar 2015 12:09:31 +0100 Subject: [PATCH 0151/1928] Cluster: more robust slave check in CLUSTER REPLICATE. There are rare conditions where node->slaveof may be NULL even if the node is a slave. To check by flag is much more robust. --- src/cluster.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cluster.c b/src/cluster.c index d4c6300d3..e24b15d2b 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -4071,7 +4071,7 @@ void clusterCommand(redisClient *c) { } /* Can't replicate a slave. */ - if (n->slaveof != NULL) { + if (nodeIsSlave(n)) { addReplyError(c,"I can only replicate a master, not a slave."); return; } From 2ecb5edf3454e3752a428e50b6ee068e357c7fae Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 20 Mar 2015 09:46:10 +0100 Subject: [PATCH 0152/1928] Cluster: move clusterBeforeSleep() call before unblocked clients processing. Related to issue #2288. --- src/redis.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/redis.c b/src/redis.c index 75975a498..48e7f24ea 100644 --- a/src/redis.c +++ b/src/redis.c @@ -1260,6 +1260,12 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { void beforeSleep(struct aeEventLoop *eventLoop) { REDIS_NOTUSED(eventLoop); + /* Call the Redis Cluster before sleep function. Note that this function + * may change the state of Redis Cluster (frok ok to fail or vice versa), + * so it's a good idea to call it before serving the unblocked clients + * later in this function. */ + if (server.cluster_enabled) clusterBeforeSleep(); + /* Run a fast expire cycle (the called function will return * ASAP if a fast cycle is not needed). */ if (server.active_expire_enabled && server.masterhost == NULL) @@ -1291,9 +1297,6 @@ void beforeSleep(struct aeEventLoop *eventLoop) { /* Write the AOF buffer on disk */ flushAppendOnlyFile(0); - - /* Call the Redis Cluster before sleep function. */ - if (server.cluster_enabled) clusterBeforeSleep(); } /* =========================== Server initialization ======================== */ From 25c0f5ac63320e85f81750bd0f999fc6f5eba5c1 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 20 Mar 2015 09:57:10 +0100 Subject: [PATCH 0153/1928] Cluster: better cluster state transiction handling. Before we relied on the global cluster state to make sure all the hash slots are linked to some node, when getNodeByQuery() is called. So finding the hash slot unbound was checked with an assertion. However this is fragile. The cluster state is often updated in the clusterBeforeSleep() function, and not ASAP on state change, so it may happen to process clients with a cluster state that is 'ok' but yet certain hash slots set to NULL. With this commit the condition is also checked in getNodeByQuery() and reported with a identical error code of -CLUSTERDOWN but slightly different error message so that we have more debugging clue in the future. Root cause of issue #2288. --- src/cluster.c | 20 ++++++++++++++++++-- src/cluster.h | 1 + src/redis.c | 2 ++ 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index e24b15d2b..a5f682ba8 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -4703,7 +4703,12 @@ void readwriteCommand(redisClient *c) { * * REDIS_CLUSTER_REDIR_UNSTABLE if the request contains mutliple keys * belonging to the same slot, but the slot is not stable (in migration or - * importing state, likely because a resharding is in progress). */ + * importing state, likely because a resharding is in progress). + * + * REDIS_CLUSTER_REDIR_DOWN if the request addresses a slot which is not + * bound to any node. In this case the cluster global state should be already + * "down" but it is fragile to rely on the update of the global state, so + * we also handle it here. */ clusterNode *getNodeByQuery(redisClient *c, struct redisCommand *cmd, robj **argv, int argc, int *hashslot, int *error_code) { clusterNode *n = NULL; robj *firstkey = NULL; @@ -4757,7 +4762,18 @@ clusterNode *getNodeByQuery(redisClient *c, struct redisCommand *cmd, robj **arg firstkey = thiskey; slot = thisslot; n = server.cluster->slots[slot]; - redisAssertWithInfo(c,firstkey,n != NULL); + + /* Error: If a slot is not served, we are in "cluster down" + * state. However the state is yet to be updated, so this was + * not trapped earlier in processCommand(). Report the same + * error to the client. */ + if (n == NULL) { + getKeysFreeResult(keyindex); + if (error_code) + *error_code = REDIS_CLUSTER_REDIR_DOWN; + return NULL; + } + /* If we are migrating or importing this slot, we need to check * if we have all the keys in the request (the only way we * can safely serve the request, otherwise we return a TRYAGAIN diff --git a/src/cluster.h b/src/cluster.h index ef5caf0d6..8eaa0ab98 100644 --- a/src/cluster.h +++ b/src/cluster.h @@ -30,6 +30,7 @@ #define REDIS_CLUSTER_REDIR_UNSTABLE 2 /* Keys in slot resharding. */ #define REDIS_CLUSTER_REDIR_ASK 3 /* -ASK redirection required. */ #define REDIS_CLUSTER_REDIR_MOVED 4 /* -MOVED redirection required. */ +#define REDIS_CLUSTER_REDIR_DOWN 5 /* -CLUSTERDOWN error. */ struct clusterNode; diff --git a/src/redis.c b/src/redis.c index 48e7f24ea..e216109c1 100644 --- a/src/redis.c +++ b/src/redis.c @@ -2219,6 +2219,8 @@ int processCommand(redisClient *c) { * but the slot is not "stable" currently as there is * a migration or import in progress. */ addReplySds(c,sdsnew("-TRYAGAIN Multiple keys request during rehashing of slot\r\n")); + } else if (error_code == REDIS_CLUSTER_REDIR_DOWN) { + addReplySds(c,sdsnew("-CLUSTERDOWN The cluster is down. Hash slot is unbound\r\n")); } else { redisPanic("getNodeByQuery() unknown error."); } From 4f2555aa17504b1f99a5f35a69302cc425b30d74 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 20 Mar 2015 16:42:49 +0100 Subject: [PATCH 0154/1928] Cluster: refactoring around configEpoch handling. This commit moves the process of generating a new config epoch without consensus out of the clusterCommand() implementation, in order to make it reusable for other reasons (current target is to have a CLUSTER FAILOVER option forcing the failover when no master majority is reachable). Moreover the commit moves other functions which are similarly related to config epochs in a new logical section of the cluster.c file, just for clarity. --- src/cluster.c | 281 ++++++++++++++++++++++++++++---------------------- 1 file changed, 157 insertions(+), 124 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index a5f682ba8..4ff8fb4d6 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -74,27 +74,13 @@ void clusterCloseAllSlots(void); void clusterSetNodeAsMaster(clusterNode *n); void clusterDelNode(clusterNode *delnode); sds representRedisNodeFlags(sds ci, uint16_t flags); +uint64_t clusterGetMaxEpoch(void); +int clusterBumpConfigEpochWithoutConsensus(void); /* ----------------------------------------------------------------------------- * Initialization * -------------------------------------------------------------------------- */ -/* Return the greatest configEpoch found in the cluster. */ -uint64_t clusterGetMaxEpoch(void) { - uint64_t max = 0; - dictIterator *di; - dictEntry *de; - - di = dictGetSafeIterator(server.cluster->nodes); - while((de = dictNext(di)) != NULL) { - clusterNode *node = dictGetVal(de); - if (node->configEpoch > max) max = node->configEpoch; - } - dictReleaseIterator(di); - if (max < server.cluster->currentEpoch) max = server.cluster->currentEpoch; - return max; -} - /* Load the cluster config from 'filename'. * * If the file does not exist or is zero-length (this may happen because @@ -927,6 +913,137 @@ void clusterRenameNode(clusterNode *node, char *newname) { clusterAddNode(node); } +/* ----------------------------------------------------------------------------- + * CLUSTER config epoch handling + * -------------------------------------------------------------------------- */ + +/* Return the greatest configEpoch found in the cluster. */ +uint64_t clusterGetMaxEpoch(void) { + uint64_t max = 0; + dictIterator *di; + dictEntry *de; + + di = dictGetSafeIterator(server.cluster->nodes); + while((de = dictNext(di)) != NULL) { + clusterNode *node = dictGetVal(de); + if (node->configEpoch > max) max = node->configEpoch; + } + dictReleaseIterator(di); + if (max < server.cluster->currentEpoch) max = server.cluster->currentEpoch; + return max; +} + +/* If this node epoch is zero or is not already the greatest across the + * cluster (from the POV of the local configuration), this function will: + * + * 1) Generate a new config epoch increment the current epoch. + * 2) Assign the new epoch to this node, WITHOUT any consensus. + * 3) Persist the configuration on disk before sending packets with the + * new configuration. + * + * If the new config epoch is generated and assigend, REDIS_OK is returned, + * otherwise REDIS_ERR is returned (since the node has already the greatest + * configuration around) and no operation is performed. + * + * Important note: this function violates the principle that config epochs + * should be generated with consensus and should be unique across the cluster. + * However Redis Cluster uses this auto-generated new config epochs in two + * cases: + * + * 1) When slots are closed after importing. Otherwise resharding would be + * too exansive. + * 2) When CLUSTER FAILOVER is called with options that force a slave to + * failover its master even if there is not master majority able to + * create a new configuration epoch. + * + * Redis Cluster does not explode using this function, even in the case of + * a collision between this node and another node, generating the same + * configuration epoch unilaterally, because the config epoch conflict + * resolution algorithm will eventually move colliding nodes to different + * config epochs. However usign this function may violate the "last failover + * wins" rule, so should only be used with care. */ +int clusterBumpConfigEpochWithoutConsensus(void) { + uint64_t maxEpoch = clusterGetMaxEpoch(); + + if (myself->configEpoch == 0 || + myself->configEpoch != maxEpoch) + { + server.cluster->currentEpoch++; + myself->configEpoch = server.cluster->currentEpoch; + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| + CLUSTER_TODO_FSYNC_CONFIG); + redisLog(REDIS_WARNING, + "New configEpoch set to %llu", + (unsigned long long) myself->configEpoch); + return REDIS_OK; + } else { + return REDIS_ERR; + } +} + +/* This function is called when this node is a master, and we receive from + * another master a configuration epoch that is equal to our configuration + * epoch. + * + * BACKGROUND + * + * It is not possible that different slaves get the same config + * epoch during a failover election, because the slaves need to get voted + * by a majority. However when we perform a manual resharding of the cluster + * the node will assign a configuration epoch to itself without to ask + * for agreement. Usually resharding happens when the cluster is working well + * and is supervised by the sysadmin, however it is possible for a failover + * to happen exactly while the node we are resharding a slot to assigns itself + * a new configuration epoch, but before it is able to propagate it. + * + * So technically it is possible in this condition that two nodes end with + * the same configuration epoch. + * + * Another possibility is that there are bugs in the implementation causing + * this to happen. + * + * Moreover when a new cluster is created, all the nodes start with the same + * configEpoch. This collision resolution code allows nodes to automatically + * end with a different configEpoch at startup automatically. + * + * In all the cases, we want a mechanism that resolves this issue automatically + * as a safeguard. The same configuration epoch for masters serving different + * set of slots is not harmful, but it is if the nodes end serving the same + * slots for some reason (manual errors or software bugs) without a proper + * failover procedure. + * + * In general we want a system that eventually always ends with different + * masters having different configuration epochs whatever happened, since + * nothign is worse than a split-brain condition in a distributed system. + * + * BEHAVIOR + * + * When this function gets called, what happens is that if this node + * has the lexicographically smaller Node ID compared to the other node + * with the conflicting epoch (the 'sender' node), it will assign itself + * the greatest configuration epoch currently detected among nodes plus 1. + * + * This means that even if there are multiple nodes colliding, the node + * with the greatest Node ID never moves forward, so eventually all the nodes + * end with a different configuration epoch. + */ +void clusterHandleConfigEpochCollision(clusterNode *sender) { + /* Prerequisites: nodes have the same configEpoch and are both masters. */ + if (sender->configEpoch != myself->configEpoch || + !nodeIsMaster(sender) || !nodeIsMaster(myself)) return; + /* Don't act if the colliding node has a smaller Node ID. */ + if (memcmp(sender->name,myself->name,REDIS_CLUSTER_NAMELEN) <= 0) return; + /* Get the next ID available at the best of this node knowledge. */ + server.cluster->currentEpoch++; + myself->configEpoch = server.cluster->currentEpoch; + clusterSaveConfigOrDie(1); + redisLog(REDIS_VERBOSE, + "WARNING: configEpoch collision with node %.40s." + " configEpoch set to %llu", + sender->name, + (unsigned long long) myself->configEpoch); +} + /* ----------------------------------------------------------------------------- * CLUSTER nodes blacklist * @@ -1399,69 +1516,6 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc } } -/* This function is called when this node is a master, and we receive from - * another master a configuration epoch that is equal to our configuration - * epoch. - * - * BACKGROUND - * - * It is not possible that different slaves get the same config - * epoch during a failover election, because the slaves need to get voted - * by a majority. However when we perform a manual resharding of the cluster - * the node will assign a configuration epoch to itself without to ask - * for agreement. Usually resharding happens when the cluster is working well - * and is supervised by the sysadmin, however it is possible for a failover - * to happen exactly while the node we are resharding a slot to assigns itself - * a new configuration epoch, but before it is able to propagate it. - * - * So technically it is possible in this condition that two nodes end with - * the same configuration epoch. - * - * Another possibility is that there are bugs in the implementation causing - * this to happen. - * - * Moreover when a new cluster is created, all the nodes start with the same - * configEpoch. This collision resolution code allows nodes to automatically - * end with a different configEpoch at startup automatically. - * - * In all the cases, we want a mechanism that resolves this issue automatically - * as a safeguard. The same configuration epoch for masters serving different - * set of slots is not harmful, but it is if the nodes end serving the same - * slots for some reason (manual errors or software bugs) without a proper - * failover procedure. - * - * In general we want a system that eventually always ends with different - * masters having different configuration epochs whatever happened, since - * nothign is worse than a split-brain condition in a distributed system. - * - * BEHAVIOR - * - * When this function gets called, what happens is that if this node - * has the lexicographically smaller Node ID compared to the other node - * with the conflicting epoch (the 'sender' node), it will assign itself - * the greatest configuration epoch currently detected among nodes plus 1. - * - * This means that even if there are multiple nodes colliding, the node - * with the greatest Node ID never moves forward, so eventually all the nodes - * end with a different configuration epoch. - */ -void clusterHandleConfigEpochCollision(clusterNode *sender) { - /* Prerequisites: nodes have the same configEpoch and are both masters. */ - if (sender->configEpoch != myself->configEpoch || - !nodeIsMaster(sender) || !nodeIsMaster(myself)) return; - /* Don't act if the colliding node has a smaller Node ID. */ - if (memcmp(sender->name,myself->name,REDIS_CLUSTER_NAMELEN) <= 0) return; - /* Get the next ID available at the best of this node knowledge. */ - server.cluster->currentEpoch++; - myself->configEpoch = server.cluster->currentEpoch; - clusterSaveConfigOrDie(1); - redisLog(REDIS_VERBOSE, - "WARNING: configEpoch collision with node %.40s." - " configEpoch set to %llu", - sender->name, - (unsigned long long) myself->configEpoch); -} - /* When this function is called, there is a packet to process starting * at node->rcvbuf. Releasing the buffer is up to the caller, so this * function should just handle the higher level stuff of processing the @@ -3547,30 +3601,28 @@ sds representRedisNodeFlags(sds ci, uint16_t flags) { /* Generate a csv-alike representation of the specified cluster node. * See clusterGenNodesDescription() top comment for more information. * - * The function appends the node representation to the SDS string 'ci' and - * returns it (that may point to a different string as usually with the - * SDS-style API). */ -sds clusterGenNodeDescription(sds ci, clusterNode *node) { + * The function returns the string representation as an SDS string. */ +sds clusterGenNodeDescription(clusterNode *node) { int j, start; + sds ci; /* Node coordinates */ - ci = sdscatlen(ci,node->name,40); - ci = sdscatfmt(ci," %s:%i ",node->ip,node->port); + ci = sdscatprintf(sdsempty(),"%.40s %s:%d ", + node->name, + node->ip, + node->port); /* Flags */ ci = representRedisNodeFlags(ci, node->flags); /* Slave of... or just "-" */ - if (node->slaveof) { - ci = sdscatlen(ci," ",1); - ci = sdscatlen(ci,node->slaveof->name,40); - ci = sdscatlen(ci," ",1); - } else { + if (node->slaveof) + ci = sdscatprintf(ci," %.40s ",node->slaveof->name); + else ci = sdscatlen(ci," - ",3); - } /* Latency from the POV of this node, link status */ - ci = sdscatfmt(ci,"%I %I %U %s", + ci = sdscatprintf(ci,"%lld %lld %llu %s", (long long) node->ping_sent, (long long) node->pong_received, (unsigned long long) node->configEpoch, @@ -3582,19 +3634,6 @@ sds clusterGenNodeDescription(sds ci, clusterNode *node) { for (j = 0; j < REDIS_CLUSTER_SLOTS; j++) { int bit; - /* It is common for a node to have pretty contiguous slots, so - * optimize this loop by skipping whole 32bit words if they have - * no set bits. We stop to the penultimate word because last word - * has special handling when start != -1 (later in the loop). */ - if ((j&31)==0 && j < REDIS_CLUSTER_SLOTS-32) { - uint32_t *slotword = ((uint32_t*)node->slots)+(j/32); - if ((start == -1 && *slotword == 0) || - (start != -1 && *slotword == UINT32_MAX)) { - j += 31; /* The for loop will increment j one more time. */ - continue; - } - } - if ((bit = clusterNodeGetSlotBit(node,j)) != 0) { if (start == -1) start = j; } @@ -3640,19 +3679,18 @@ sds clusterGenNodeDescription(sds ci, clusterNode *node) { * of the CLUSTER NODES function, and as format for the cluster * configuration file (nodes.conf) for a given node. */ sds clusterGenNodesDescription(int filter) { - sds ci = sdsempty(); + sds ci = sdsempty(), ni; dictIterator *di; dictEntry *de; - /* Make room to avoid multiple resizes of the buffer. */ - ci = sdsMakeRoomFor(ci,256*dictSize(server.cluster->nodes)); - di = dictGetSafeIterator(server.cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); if (node->flags & filter) continue; - ci = clusterGenNodeDescription(ci,node); + ni = clusterGenNodeDescription(node); + ci = sdscatsds(ci,ni); + sdsfree(ni); ci = sdscatlen(ci,"\n",1); } dictReleaseIterator(di); @@ -3918,17 +3956,9 @@ void clusterCommand(redisClient *c) { * failover happens at the same time we close the slot, the * configEpoch collision resolution will fix it assigning * a different epoch to each node. */ - uint64_t maxEpoch = clusterGetMaxEpoch(); - - if (myself->configEpoch == 0 || - myself->configEpoch != maxEpoch) - { - server.cluster->currentEpoch++; - myself->configEpoch = server.cluster->currentEpoch; - clusterDoBeforeSleep(CLUSTER_TODO_FSYNC_CONFIG); + if (clusterBumpConfigEpochWithoutConsensus() == REDIS_OK) { redisLog(REDIS_WARNING, - "configEpoch set to %llu after importing slot %d", - (unsigned long long) myself->configEpoch, slot); + "configEpoch updated after importing slot %d", slot); } server.cluster->importing_slots_from[slot] = NULL; } @@ -3989,7 +4019,10 @@ void clusterCommand(redisClient *c) { server.cluster->stats_bus_messages_sent, server.cluster->stats_bus_messages_received ); - addReplyBulkSds(c, info); + addReplySds(c,sdscatprintf(sdsempty(),"$%lu\r\n", + (unsigned long)sdslen(info))); + addReplySds(c,info); + addReply(c,shared.crlf); } else if (!strcasecmp(c->argv[1]->ptr,"saveconfig") && c->argc == 2) { int retval = clusterSaveConfig(1); @@ -4109,7 +4142,7 @@ void clusterCommand(redisClient *c) { addReplyMultiBulkLen(c,n->numslaves); for (j = 0; j < n->numslaves; j++) { - sds ni = clusterGenNodeDescription(sdsempty(),n->slaves[j]); + sds ni = clusterGenNodeDescription(n->slaves[j]); addReplyBulkCString(c,ni); sdsfree(ni); } @@ -4526,7 +4559,7 @@ try_again: /* Check if the key is here. If not we reply with success as there is * nothing to migrate (for instance the key expired in the meantime), but * we include such information in the reply string. */ - if ((o = lookupKeyWrite(c->db,c->argv[3])) == NULL) { + if ((o = lookupKeyRead(c->db,c->argv[3])) == NULL) { addReplySds(c,sdsnew("+NOKEY\r\n")); return; } @@ -4579,7 +4612,7 @@ try_again: { sds buf = cmd.io.buffer.ptr; size_t pos = 0, towrite; - ssize_t nwritten = 0; + int nwritten = 0; while ((towrite = sdslen(buf)-pos) > 0) { towrite = (towrite > (64*1024) ? (64*1024) : towrite); From 230d14142032bd054efceab21801068b5aa8a330 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 20 Mar 2015 16:56:44 +0100 Subject: [PATCH 0155/1928] Cluster: separate unknown master check from the rest. In no case we should try to attempt to failover if myself->slaveof is NULL. --- src/cluster.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index 4ff8fb4d6..74468cfae 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -4176,9 +4176,12 @@ void clusterCommand(redisClient *c) { if (nodeIsMaster(myself)) { addReplyError(c,"You should send CLUSTER FAILOVER to a slave"); return; + } else if (myself->slaveof == NULL) { + addReplyError(c,"I'm a slave but my master is unknown to me"); + return; } else if (!force && - (myself->slaveof == NULL || nodeFailed(myself->slaveof) || - myself->slaveof->link == NULL)) + (nodeFailed(myself->slaveof) || + myself->slaveof->link == NULL)) { addReplyError(c,"Master is down or failed, " "please use CLUSTER FAILOVER FORCE"); From a7010ae20813658c5cd3b6f1a6651ec5cb0909e2 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 20 Mar 2015 17:55:22 +0100 Subject: [PATCH 0156/1928] Cluster: non-conditional steps of slave failover refactored into a function. --- src/cluster.c | 67 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 40 insertions(+), 27 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index 74468cfae..4f445f5d7 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -2636,6 +2636,42 @@ void clusterLogCantFailover(int reason) { redisLog(REDIS_WARNING,"Currently unable to failover: %s", msg); } +/* This function implements the final part of automatic and manual failovers, + * where the slave grabs its master's hash slots, and propagates the new + * configuration. + * + * Note that it's up to the caller to be sure that the node got a new + * configuration epoch already. */ +void clusterFailoverReplaceYourMaster(void) { + int j; + clusterNode *oldmaster = myself->slaveof; + + if (nodeIsMaster(myself) || oldmaster == NULL) return; + + /* 1) Turn this node into a master. */ + clusterSetNodeAsMaster(myself); + replicationUnsetMaster(); + + /* 2) Claim all the slots assigned to our master. */ + for (j = 0; j < REDIS_CLUSTER_SLOTS; j++) { + if (clusterNodeGetSlotBit(oldmaster,j)) { + clusterDelSlot(j); + clusterAddSlot(myself,j); + } + } + + /* 3) Update state and save config. */ + clusterUpdateState(); + clusterSaveConfigOrDie(1); + + /* 4) Pong all the other nodes so that they can update the state + * accordingly and detect that we switched to master role. */ + clusterBroadcastPong(CLUSTER_BROADCAST_ALL); + + /* 5) If there was a manual failover in progress, clear the state. */ + resetManualFailover(); +} + /* This function is called if we are a slave node and our master serving * a non-zero amount of hash slots is in FAIL state. * @@ -2650,7 +2686,6 @@ void clusterHandleSlaveFailover(void) { int needed_quorum = (server.cluster->size / 2) + 1; int manual_failover = server.cluster->mf_end != 0 && server.cluster->mf_can_start; - int j; mstime_t auth_timeout, auth_retry_time; server.cluster->todo_before_sleep &= ~CLUSTER_TODO_HANDLE_FAILOVER; @@ -2792,26 +2827,12 @@ void clusterHandleSlaveFailover(void) { /* Check if we reached the quorum. */ if (server.cluster->failover_auth_count >= needed_quorum) { - clusterNode *oldmaster = myself->slaveof; + /* We have the quorum, we can finally failover the master. */ redisLog(REDIS_WARNING, "Failover election won: I'm the new master."); - /* We have the quorum, perform all the steps to correctly promote - * this slave to a master. - * - * 1) Turn this node into a master. */ - clusterSetNodeAsMaster(myself); - replicationUnsetMaster(); - /* 2) Claim all the slots assigned to our master. */ - for (j = 0; j < REDIS_CLUSTER_SLOTS; j++) { - if (clusterNodeGetSlotBit(oldmaster,j)) { - clusterDelSlot(j); - clusterAddSlot(myself,j); - } - } - - /* 3) Update my configEpoch to the epoch of the election. */ + /* Update my configEpoch to the epoch of the election. */ if (myself->configEpoch < server.cluster->failover_auth_epoch) { myself->configEpoch = server.cluster->failover_auth_epoch; redisLog(REDIS_WARNING, @@ -2819,16 +2840,8 @@ void clusterHandleSlaveFailover(void) { (unsigned long long) myself->configEpoch); } - /* 4) Update state and save config. */ - clusterUpdateState(); - clusterSaveConfigOrDie(1); - - /* 5) Pong all the other nodes so that they can update the state - * accordingly and detect that we switched to master role. */ - clusterBroadcastPong(CLUSTER_BROADCAST_ALL); - - /* 6) If there was a manual failover in progress, clear the state. */ - resetManualFailover(); + /* Take responsability for the cluster slots. */ + clusterFailoverReplaceYourMaster(); } else { clusterLogCantFailover(REDIS_CLUSTER_CANT_FAILOVER_WAITING_VOTES); } From 5fe4a2313177a8c9bcd231dacda38317da17a3cf Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 21 Mar 2015 09:03:46 +0100 Subject: [PATCH 0157/1928] Net: clientsArePaused() should not touch blocked clients. When the list of unblocked clients were processed, btype was set to blocking type none, but the client remained flagged with REDIS_BLOCKED. When timeout is reached (or when the client disconnects), unblocking it will trigger an assertion. There is no need to process pending requests from blocked clients, so now clientsArePaused() just avoid touching blocked clients. Close #2467. --- src/networking.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/networking.c b/src/networking.c index 1125b86fe..5c8f56dbf 100644 --- a/src/networking.c +++ b/src/networking.c @@ -1689,7 +1689,7 @@ int clientsArePaused(void) { while ((ln = listNext(&li)) != NULL) { c = listNodeValue(ln); - if (c->flags & REDIS_SLAVE) continue; + if (c->flags & (REDIS_SLAVE|REDIS_BLOCKED)) continue; listAddNodeTail(server.unblocked_clients,c); } } From 2b278a3394ab6b32b4bdcb7db56323649622f784 Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 21 Mar 2015 09:13:29 +0100 Subject: [PATCH 0158/1928] Net: processUnblockedClients() and clientsArePaused() minor changes. 1. No need to set btype in processUnblockedClients(), since clients flagged REDIS_UNBLOCKED should have it already cleared. 2. When putting clients in the unblocked clients list, clientsArePaused() should flag them with REDIS_UNBLOCKED. Not strictly needed with the current code but is more coherent. --- src/blocked.c | 1 - src/networking.c | 7 ++++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/blocked.c b/src/blocked.c index ef0d5246d..3509dd134 100644 --- a/src/blocked.c +++ b/src/blocked.c @@ -114,7 +114,6 @@ void processUnblockedClients(void) { c = ln->value; listDelNode(server.unblocked_clients,ln); c->flags &= ~REDIS_UNBLOCKED; - c->btype = REDIS_BLOCKED_NONE; /* Process remaining data in the input buffer. */ if (c->querybuf && sdslen(c->querybuf) > 0) { diff --git a/src/networking.c b/src/networking.c index 5c8f56dbf..ba35e487c 100644 --- a/src/networking.c +++ b/src/networking.c @@ -1676,7 +1676,9 @@ void pauseClients(mstime_t end) { /* Return non-zero if clients are currently paused. As a side effect the * function checks if the pause time was reached and clear it. */ int clientsArePaused(void) { - if (server.clients_paused && server.clients_pause_end_time < server.mstime) { + if (server.clients_paused && + server.clients_pause_end_time < server.mstime) + { listNode *ln; listIter li; redisClient *c; @@ -1689,7 +1691,10 @@ int clientsArePaused(void) { while ((ln = listNext(&li)) != NULL) { c = listNodeValue(ln); + /* Don't touch slaves and blocked clients. The latter pending + * requests be processed when unblocked. */ if (c->flags & (REDIS_SLAVE|REDIS_BLOCKED)) continue; + c->flags |= REDIS_UNBLOCKED; listAddNodeTail(server.unblocked_clients,c); } } From d544600aa524e008338bf690ced7377876d5a657 Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 21 Mar 2015 09:19:02 +0100 Subject: [PATCH 0159/1928] Fix typo in beforeSleep() comment. --- src/redis.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redis.c b/src/redis.c index e216109c1..3f887f3e7 100644 --- a/src/redis.c +++ b/src/redis.c @@ -1261,7 +1261,7 @@ void beforeSleep(struct aeEventLoop *eventLoop) { REDIS_NOTUSED(eventLoop); /* Call the Redis Cluster before sleep function. Note that this function - * may change the state of Redis Cluster (frok ok to fail or vice versa), + * may change the state of Redis Cluster (from ok to fail or vice versa), * so it's a good idea to call it before serving the unblocked clients * later in this function. */ if (server.cluster_enabled) clusterBeforeSleep(); From 2950824ab67e5fef59aae87bf4c0ada8ab39d570 Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 21 Mar 2015 11:54:32 +0100 Subject: [PATCH 0160/1928] Cluster: TAKEOVER option for manual failover. --- src/cluster.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index 4f445f5d7..17270b0f4 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -4174,18 +4174,22 @@ void clusterCommand(redisClient *c) { } else if (!strcasecmp(c->argv[1]->ptr,"failover") && (c->argc == 2 || c->argc == 3)) { - /* CLUSTER FAILOVER [FORCE] */ - int force = 0; + /* CLUSTER FAILOVER [FORCE|TAKEOVER] */ + int force = 0, takeover = 0; if (c->argc == 3) { if (!strcasecmp(c->argv[2]->ptr,"force")) { force = 1; + } else if (!strcasecmp(c->argv[2]->ptr,"takeover")) { + takeover = 1; + force = 1; /* Takeover also implies force. */ } else { addReply(c,shared.syntaxerr); return; } } + /* Check preconditions. */ if (nodeIsMaster(myself)) { addReplyError(c,"You should send CLUSTER FAILOVER to a slave"); return; @@ -4203,15 +4207,24 @@ void clusterCommand(redisClient *c) { resetManualFailover(); server.cluster->mf_end = mstime() + REDIS_CLUSTER_MF_TIMEOUT; - /* If this is a forced failover, we don't need to talk with our master - * to agree about the offset. We just failover taking over it without - * coordination. */ - if (force) { + if (takeover) { + /* A takeover does not perform any initial check. It just + * generates a new configuration epoch for this node without + * consensus, claims the master's slots, and broadcast the new + * configuration. */ + redisLog(REDIS_WARNING,"Taking over the master (user request)."); + clusterBumpConfigEpochWithoutConsensus(); + clusterFailoverReplaceYourMaster(); + } else if (force) { + /* If this is a forced failover, we don't need to talk with our + * master to agree about the offset. We just failover taking over + * it without coordination. */ + redisLog(REDIS_WARNING,"Forced failover user request accepted."); server.cluster->mf_can_start = 1; } else { + redisLog(REDIS_WARNING,"Manual failover user request accepted."); clusterSendMFStart(myself->slaveof); } - redisLog(REDIS_WARNING,"Manual failover user request accepted."); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"set-config-epoch") && c->argc == 3) { From 94030fa4d7962a8e241ad27cadbc71a0f1b61d1b Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 21 Mar 2015 12:12:23 +0100 Subject: [PATCH 0161/1928] Two cluster.c comments improved. --- src/cluster.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index 17270b0f4..e1ae92f02 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -917,7 +917,8 @@ void clusterRenameNode(clusterNode *node, char *newname) { * CLUSTER config epoch handling * -------------------------------------------------------------------------- */ -/* Return the greatest configEpoch found in the cluster. */ +/* Return the greatest configEpoch found in the cluster, or the current + * epoch if greater than any node configEpoch. */ uint64_t clusterGetMaxEpoch(void) { uint64_t max = 0; dictIterator *di; @@ -3634,7 +3635,7 @@ sds clusterGenNodeDescription(clusterNode *node) { else ci = sdscatlen(ci," - ",3); - /* Latency from the POV of this node, link status */ + /* Latency from the POV of this node, config epoch, link status */ ci = sdscatprintf(ci,"%lld %lld %llu %s", (long long) node->ping_sent, (long long) node->pong_received, From 2f4240b9d9e36b83fcd6bf2525484effabe69298 Mon Sep 17 00:00:00 2001 From: antirez Date: Sun, 22 Mar 2015 22:23:41 +0100 Subject: [PATCH 0162/1928] Cluster: fix Lua scripts replication to slave nodes. --- src/redis.c | 2 ++ src/scripting.c | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/redis.c b/src/redis.c index 3f887f3e7..81fa7be2f 100644 --- a/src/redis.c +++ b/src/redis.c @@ -2199,6 +2199,8 @@ int processCommand(redisClient *c) { * 2) The command has no key arguments. */ if (server.cluster_enabled && !(c->flags & REDIS_MASTER) && + !(c->flags & REDIS_LUA_CLIENT && + server.lua_caller->flags & REDIS_MASTER) && !(c->cmd->getkeys_proc == NULL && c->cmd->firstkey == 0)) { int hashslot; diff --git a/src/scripting.c b/src/scripting.c index c5dd4e718..4f807f4e2 100644 --- a/src/scripting.c +++ b/src/scripting.c @@ -357,8 +357,9 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) { if (cmd->flags & REDIS_CMD_WRITE) server.lua_write_dirty = 1; /* If this is a Redis Cluster node, we need to make sure Lua is not - * trying to access non-local keys. */ - if (server.cluster_enabled) { + * trying to access non-local keys, with the exception of commands + * received from our master. */ + if (server.cluster_enabled && !(server.lua_caller->flags & REDIS_MASTER)) { /* Duplicate relevant flags in the lua client. */ c->flags &= ~(REDIS_READONLY|REDIS_ASKING); c->flags |= server.lua_caller->flags & (REDIS_READONLY|REDIS_ASKING); From 3b4de6aa18b9937632c1e18042a11f5c8ce5b4b6 Mon Sep 17 00:00:00 2001 From: antirez Date: Sun, 22 Mar 2015 22:24:49 +0100 Subject: [PATCH 0163/1928] Cluster: new tests1 for manual failover and scripts replication. --- tests/cluster/tests/10-manual-failover.tcl | 95 ++++++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 tests/cluster/tests/10-manual-failover.tcl diff --git a/tests/cluster/tests/10-manual-failover.tcl b/tests/cluster/tests/10-manual-failover.tcl new file mode 100644 index 000000000..43fcecd8e --- /dev/null +++ b/tests/cluster/tests/10-manual-failover.tcl @@ -0,0 +1,95 @@ +# Check the basic monitoring and failover capabilities. + +source "../tests/includes/init-tests.tcl" + +test "Create a 5 nodes cluster" { + create_cluster 5 5 +} + +test "Cluster is up" { + assert_cluster_state ok +} + +test "Cluster is writable" { + cluster_write_test 0 +} + +test "Instance #5 is a slave" { + assert {[RI 5 role] eq {slave}} +} + +test "Instance #5 synced with the master" { + wait_for_condition 1000 50 { + [RI 5 master_link_status] eq {up} + } else { + fail "Instance #5 master link status is not up" + } +} + +set current_epoch [CI 1 cluster_current_epoch] + +set numkeys 50000 +set numops 10000 +set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]] +catch {unset content} +array set content {} + +test "Send CLUSTER FAILOVER to #5, during load" { + for {set j 0} {$j < $numops} {incr j} { + # Write random data to random list. + set listid [randomInt $numkeys] + set key "key:$listid" + set ele [randomValue] + # We write both with Lua scripts and with plain commands. + # This way we are able to stress Lua -> Redis command invocation + # as well, that has tests to prevent Lua to write into wrong + # hash slots. + if {$listid % 2} { + $cluster rpush $key $ele + } else { + $cluster eval {redis.call("rpush",KEYS[1],ARGV[1])} 1 $key $ele + } + lappend content($key) $ele + + if {($j % 1000) == 0} { + puts -nonewline W; flush stdout + } + + if {$j == $numops/2} {R 5 cluster failover} + } +} + +test "Wait for failover" { + wait_for_condition 1000 50 { + [CI 1 cluster_current_epoch] > $current_epoch + } else { + fail "No failover detected" + } +} + +test "Cluster should eventually be up again" { + assert_cluster_state ok +} + +test "Cluster is writable" { + cluster_write_test 1 +} + +test "Instance #5 is now a master" { + assert {[RI 5 role] eq {master}} +} + +test "Verify $numkeys keys for consistency with logical content" { + # Check that the Redis Cluster content matches our logical content. + foreach {key value} [array get content] { + assert {[$cluster lrange $key 0 -1] eq $value} + } +} + +test "Instance #0 gets converted into a slave" { + wait_for_condition 1000 50 { + [RI 0 role] eq {slave} + } else { + fail "Old master was not converted into slave" + } +} From 631538cfe06c16c3e12fae63d1d81a26ce73070e Mon Sep 17 00:00:00 2001 From: antirez Date: Sun, 22 Mar 2015 22:44:02 +0100 Subject: [PATCH 0164/1928] Cluster: more tests for manual failover + FORCE. --- tests/cluster/tests/10-manual-failover.tcl | 94 +++++++++++++++++++++- 1 file changed, 93 insertions(+), 1 deletion(-) diff --git a/tests/cluster/tests/10-manual-failover.tcl b/tests/cluster/tests/10-manual-failover.tcl index 43fcecd8e..5fa3d32bf 100644 --- a/tests/cluster/tests/10-manual-failover.tcl +++ b/tests/cluster/tests/10-manual-failover.tcl @@ -1,4 +1,4 @@ -# Check the basic monitoring and failover capabilities. +# Check the manual failover source "../tests/includes/init-tests.tcl" @@ -93,3 +93,95 @@ test "Instance #0 gets converted into a slave" { fail "Old master was not converted into slave" } } + +## Check that manual failover does not happen if we can't talk with the master. + +source "../tests/includes/init-tests.tcl" + +test "Create a 5 nodes cluster" { + create_cluster 5 5 +} + +test "Cluster is up" { + assert_cluster_state ok +} + +test "Cluster is writable" { + cluster_write_test 0 +} + +test "Instance #5 is a slave" { + assert {[RI 5 role] eq {slave}} +} + +test "Instance #5 synced with the master" { + wait_for_condition 1000 50 { + [RI 5 master_link_status] eq {up} + } else { + fail "Instance #5 master link status is not up" + } +} + +test "Make instance #0 unreachable without killing it" { + R 0 deferred 1 + R 0 DEBUG SLEEP 10 +} + +test "Send CLUSTER FAILOVER to instance #5" { + R 5 cluster failover +} + +test "Instance #5 is still a slave after some time (no failover)" { + after 5000 + assert {[RI 5 role] eq {master}} +} + +test "Wait for instance #0 to return back alive" { + R 0 deferred 0 + assert {[R 0 read] eq {OK}} +} + +## Check with "force" failover happens anyway. + +source "../tests/includes/init-tests.tcl" + +test "Create a 5 nodes cluster" { + create_cluster 5 5 +} + +test "Cluster is up" { + assert_cluster_state ok +} + +test "Cluster is writable" { + cluster_write_test 0 +} + +test "Instance #5 is a slave" { + assert {[RI 5 role] eq {slave}} +} + +test "Instance #5 synced with the master" { + wait_for_condition 1000 50 { + [RI 5 master_link_status] eq {up} + } else { + fail "Instance #5 master link status is not up" + } +} + +test "Make instance #0 unreachable without killing it" { + R 0 deferred 1 + R 0 DEBUG SLEEP 10 +} + +test "Send CLUSTER FAILOVER to instance #5" { + R 5 cluster failover force +} + +test "Instance #5 is a master after some time" { + wait_for_condition 1000 50 { + [RI 5 role] eq {master} + } else { + fail "Instance #5 is not a master after some time regardless of FORCE" + } +} From f300680408c3c444f773dd0de27f8641d4cb7184 Mon Sep 17 00:00:00 2001 From: antirez Date: Sun, 22 Mar 2015 22:44:23 +0100 Subject: [PATCH 0165/1928] Cluster: CLUSTER FAILOVER TAKEOVER tests. --- tests/cluster/tests/11-manual-takeover.tcl | 59 ++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 tests/cluster/tests/11-manual-takeover.tcl diff --git a/tests/cluster/tests/11-manual-takeover.tcl b/tests/cluster/tests/11-manual-takeover.tcl new file mode 100644 index 000000000..f567c6962 --- /dev/null +++ b/tests/cluster/tests/11-manual-takeover.tcl @@ -0,0 +1,59 @@ +# Manual takeover test + +source "../tests/includes/init-tests.tcl" + +test "Create a 5 nodes cluster" { + create_cluster 5 5 +} + +test "Cluster is up" { + assert_cluster_state ok +} + +test "Cluster is writable" { + cluster_write_test 0 +} + +test "Killing majority of master nodes" { + kill_instance redis 0 + kill_instance redis 1 + kill_instance redis 2 +} + +test "Cluster should eventually be down" { + assert_cluster_state fail +} + +test "Use takeover to bring slaves back" { + R 5 cluster failover takeover + R 6 cluster failover takeover + R 7 cluster failover takeover +} + +test "Cluster should eventually be up again" { + assert_cluster_state ok +} + +test "Cluster is writable" { + cluster_write_test 4 +} + +test "Instance #5, #6, #7 are now masters" { + assert {[RI 5 role] eq {master}} + assert {[RI 6 role] eq {master}} + assert {[RI 7 role] eq {master}} +} + +test "Restarting the previously killed master nodes" { + restart_instance redis 0 + restart_instance redis 1 + restart_instance redis 2 +} + +test "Instance #0, #1, #2 gets converted into a slaves" { + wait_for_condition 1000 50 { + [RI 0 role] eq {slave} && [RI 1 role] eq {slave} && [RI 2 role] eq {slave} + } else { + fail "Old masters not converted into slaves" + } +} From 9b0bcf25e1407a610f71704db17394e6d6be6622 Mon Sep 17 00:00:00 2001 From: antirez Date: Sun, 22 Mar 2015 22:58:53 +0100 Subject: [PATCH 0166/1928] Cluster: unit 10 modified to leave cluster in proper state. --- tests/cluster/tests/10-manual-failover.tcl | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/cluster/tests/10-manual-failover.tcl b/tests/cluster/tests/10-manual-failover.tcl index 5fa3d32bf..5441b79f3 100644 --- a/tests/cluster/tests/10-manual-failover.tcl +++ b/tests/cluster/tests/10-manual-failover.tcl @@ -185,3 +185,8 @@ test "Instance #5 is a master after some time" { fail "Instance #5 is not a master after some time regardless of FORCE" } } + +test "Wait for instance #0 to return back alive" { + R 0 deferred 0 + assert {[R 0 read] eq {OK}} +} From 761fc16b4a98f3be56d1bd5079a4880bd44d37f8 Mon Sep 17 00:00:00 2001 From: superlogical Date: Tue, 24 Mar 2015 09:44:52 +1300 Subject: [PATCH 0167/1928] create-cluster fix for stop and watch commands --- utils/create-cluster/create-cluster | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/create-cluster/create-cluster b/utils/create-cluster/create-cluster index efb3135d4..98941496f 100755 --- a/utils/create-cluster/create-cluster +++ b/utils/create-cluster/create-cluster @@ -43,7 +43,7 @@ then while [ $((PORT < ENDPORT)) != "0" ]; do PORT=$((PORT+1)) echo "Stopping $PORT" - redis-cli -p $PORT shutdown nosave + ../../src/redis-cli -p $PORT shutdown nosave done exit 0 fi @@ -54,7 +54,7 @@ then while [ 1 ]; do clear date - redis-cli -p $PORT cluster nodes | head -30 + ../../src/redis-cli -p $PORT cluster nodes | head -30 sleep 1 done exit 0 From 9b7f8b1c9b379ab842d40df4636dfbbeb6376fcb Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 24 Mar 2015 11:07:10 +0100 Subject: [PATCH 0168/1928] Cluster: redirection refactoring + handling of blocked clients. There was a bug in Redis Cluster caused by clients blocked in a blocking list pop operation, for keys no longer handled by the instance, or in a condition where the cluster became down after the client blocked. A typical situation is: 1) BLPOP 0 2) hash slot is resharded to another master. The client will block forever int this case. A symmentrical non-cluster-specific bug happens when an instance is turned from master to slave. In that case it is more serious since this will desynchronize data between slaves and masters. This other bug was discovered as a side effect of thinking about the bug explained and fixed in this commit, but will be fixed in a separated commit. --- src/blocked.c | 2 ++ src/cluster.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++++--- src/cluster.h | 9 ++++-- src/redis.c | 30 ++++++----------- 4 files changed, 102 insertions(+), 29 deletions(-) diff --git a/src/blocked.c b/src/blocked.c index 3509dd134..ae2500aac 100644 --- a/src/blocked.c +++ b/src/blocked.c @@ -59,6 +59,8 @@ * When implementing a new type of blocking opeation, the implementation * should modify unblockClient() and replyToBlockedClientTimedOut() in order * to handle the btype-specific behavior of this two functions. + * If the blocking operation waits for certain keys to change state, the + * clusterRedirectBlockedClientIfNeeded() function should also be updated. */ #include "redis.h" diff --git a/src/cluster.c b/src/cluster.c index e1ae92f02..916a4be6a 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -4768,10 +4768,10 @@ void readwriteCommand(redisClient *c) { * belonging to the same slot, but the slot is not stable (in migration or * importing state, likely because a resharding is in progress). * - * REDIS_CLUSTER_REDIR_DOWN if the request addresses a slot which is not - * bound to any node. In this case the cluster global state should be already - * "down" but it is fragile to rely on the update of the global state, so - * we also handle it here. */ + * REDIS_CLUSTER_REDIR_DOWN_UNBOUND if the request addresses a slot which is + * not bound to any node. In this case the cluster global state should be + * already "down" but it is fragile to rely on the update of the global state, + * so we also handle it here. */ clusterNode *getNodeByQuery(redisClient *c, struct redisCommand *cmd, robj **argv, int argc, int *hashslot, int *error_code) { clusterNode *n = NULL; robj *firstkey = NULL; @@ -4833,7 +4833,7 @@ clusterNode *getNodeByQuery(redisClient *c, struct redisCommand *cmd, robj **arg if (n == NULL) { getKeysFreeResult(keyindex); if (error_code) - *error_code = REDIS_CLUSTER_REDIR_DOWN; + *error_code = REDIS_CLUSTER_REDIR_DOWN_UNBOUND; return NULL; } @@ -4925,3 +4925,83 @@ clusterNode *getNodeByQuery(redisClient *c, struct redisCommand *cmd, robj **arg if (n != myself && error_code) *error_code = REDIS_CLUSTER_REDIR_MOVED; return n; } + +/* Send the client the right redirection code, according to error_code + * that should be set to one of REDIS_CLUSTER_REDIR_* macros. + * + * If REDIS_CLUSTER_REDIR_ASK or REDIS_CLUSTER_REDIR_MOVED error codes + * are used, then the node 'n' should not be NULL, but should be the + * node we want to mention in the redirection. Moreover hashslot should + * be set to the hash slot that caused the redirection. */ +void clusterRedirectClient(redisClient *c, clusterNode *n, int hashslot, int error_code) { + if (error_code == REDIS_CLUSTER_REDIR_CROSS_SLOT) { + addReplySds(c,sdsnew("-CROSSSLOT Keys in request don't hash to the same slot\r\n")); + } else if (error_code == REDIS_CLUSTER_REDIR_UNSTABLE) { + /* The request spawns mutliple keys in the same slot, + * but the slot is not "stable" currently as there is + * a migration or import in progress. */ + addReplySds(c,sdsnew("-TRYAGAIN Multiple keys request during rehashing of slot\r\n")); + } else if (error_code == REDIS_CLUSTER_REDIR_DOWN_STATE) { + addReplySds(c,sdsnew("-CLUSTERDOWN The cluster is down\r\n")); + } else if (error_code == REDIS_CLUSTER_REDIR_DOWN_UNBOUND) { + addReplySds(c,sdsnew("-CLUSTERDOWN Hash slot not served\r\n")); + } else if (error_code == REDIS_CLUSTER_REDIR_MOVED || + error_code == REDIS_CLUSTER_REDIR_ASK) + { + addReplySds(c,sdscatprintf(sdsempty(), + "-%s %d %s:%d\r\n", + (error_code == REDIS_CLUSTER_REDIR_ASK) ? "ASK" : "MOVED", + hashslot,n->ip,n->port)); + } else { + redisPanic("getNodeByQuery() unknown error."); + } +} + +/* This function is called by the function processing clients incrementally + * to detect timeouts, in order to handle the following case: + * + * 1) A client blocks with BLPOP or similar blocking operation. + * 2) The master migrates the hash slot elsewhere or turns into a slave. + * 3) The client may remain blocked forever (or up to the max timeout time) + * waiting for a key change that will never happen. + * + * If the client is found to be blocked into an hash slot this node no + * longer handles, the client is sent a redirection error, and the function + * returns 1. Otherwise 0 is returned and no operation is performed. */ +int clusterRedirectBlockedClientIfNeeded(redisClient *c) { + if (c->flags & REDIS_BLOCKED && c->btype == REDIS_BLOCKED_LIST) { + dictEntry *de; + dictIterator *di; + + /* If the cluster is down, unblock the client with the right error. */ + if (server.cluster->state == REDIS_CLUSTER_FAIL) { + clusterRedirectClient(c,NULL,0,REDIS_CLUSTER_REDIR_DOWN_STATE); + return 1; + } + + di = dictGetIterator(c->bpop.keys); + while((de = dictNext(di)) != NULL) { + robj *key = dictGetKey(de); + int slot = keyHashSlot((char*)key->ptr, sdslen(key->ptr)); + clusterNode *node = server.cluster->slots[slot]; + + /* We send an error and unblock the client if: + * 1) The slot is unassigned, emitting a cluster down error. + * 2) The slot is not handled by this node, nor being imported. */ + if (node != myself && + server.cluster->importing_slots_from[slot] == NULL) + { + if (node == NULL) { + clusterRedirectClient(c,NULL,0, + REDIS_CLUSTER_REDIR_DOWN_UNBOUND); + } else { + clusterRedirectClient(c,node,slot, + REDIS_CLUSTER_REDIR_MOVED); + } + return 1; + } + } + dictReleaseIterator(di); + } + return 0; +} diff --git a/src/cluster.h b/src/cluster.h index 8eaa0ab98..bf442a222 100644 --- a/src/cluster.h +++ b/src/cluster.h @@ -26,11 +26,12 @@ /* Redirection errors returned by getNodeByQuery(). */ #define REDIS_CLUSTER_REDIR_NONE 0 /* Node can serve the request. */ -#define REDIS_CLUSTER_REDIR_CROSS_SLOT 1 /* Keys in different slots. */ -#define REDIS_CLUSTER_REDIR_UNSTABLE 2 /* Keys in slot resharding. */ +#define REDIS_CLUSTER_REDIR_CROSS_SLOT 1 /* -CROSSSLOT request. */ +#define REDIS_CLUSTER_REDIR_UNSTABLE 2 /* -TRYAGAIN redirection required */ #define REDIS_CLUSTER_REDIR_ASK 3 /* -ASK redirection required. */ #define REDIS_CLUSTER_REDIR_MOVED 4 /* -MOVED redirection required. */ -#define REDIS_CLUSTER_REDIR_DOWN 5 /* -CLUSTERDOWN error. */ +#define REDIS_CLUSTER_REDIR_DOWN_STATE 5 /* -CLUSTERDOWN, global state. */ +#define REDIS_CLUSTER_REDIR_DOWN_UNBOUND 6 /* -CLUSTERDOWN, unbound slot. */ struct clusterNode; @@ -249,5 +250,7 @@ typedef struct { /* ---------------------- API exported outside cluster.c -------------------- */ clusterNode *getNodeByQuery(redisClient *c, struct redisCommand *cmd, robj **argv, int argc, int *hashslot, int *ask); +int clusterRedirectBlockedClientIfNeeded(redisClient *c); +void clusterRedirectClient(redisClient *c, clusterNode *n, int hashslot, int error_code); #endif /* __REDIS_CLUSTER_H */ diff --git a/src/redis.c b/src/redis.c index 81fa7be2f..787663e4a 100644 --- a/src/redis.c +++ b/src/redis.c @@ -926,8 +926,14 @@ int clientsCronHandleTimeout(redisClient *c) { mstime_t now_ms = mstime(); if (c->bpop.timeout != 0 && c->bpop.timeout < now_ms) { + /* Handle blocking operation specific timeout. */ replyToBlockedClientTimedOut(c); unblockClient(c); + } else if (server.cluster_enabled) { + /* Cluster: handle unblock & redirect of clients blocked + * into keys no longer served by this server. */ + if (clusterRedirectBlockedClientIfNeeded(c)) + unblockClient(c); } } return 0; @@ -2207,32 +2213,14 @@ int processCommand(redisClient *c) { if (server.cluster->state != REDIS_CLUSTER_OK) { flagTransaction(c); - addReplySds(c,sdsnew("-CLUSTERDOWN The cluster is down. Use CLUSTER INFO for more information\r\n")); + clusterRedirectClient(c,NULL,0,REDIS_CLUSTER_REDIR_DOWN_STATE); return REDIS_OK; } else { int error_code; clusterNode *n = getNodeByQuery(c,c->cmd,c->argv,c->argc,&hashslot,&error_code); - if (n == NULL) { + if (n == NULL || n != server.cluster->myself) { flagTransaction(c); - if (error_code == REDIS_CLUSTER_REDIR_CROSS_SLOT) { - addReplySds(c,sdsnew("-CROSSSLOT Keys in request don't hash to the same slot\r\n")); - } else if (error_code == REDIS_CLUSTER_REDIR_UNSTABLE) { - /* The request spawns mutliple keys in the same slot, - * but the slot is not "stable" currently as there is - * a migration or import in progress. */ - addReplySds(c,sdsnew("-TRYAGAIN Multiple keys request during rehashing of slot\r\n")); - } else if (error_code == REDIS_CLUSTER_REDIR_DOWN) { - addReplySds(c,sdsnew("-CLUSTERDOWN The cluster is down. Hash slot is unbound\r\n")); - } else { - redisPanic("getNodeByQuery() unknown error."); - } - return REDIS_OK; - } else if (n != server.cluster->myself) { - flagTransaction(c); - addReplySds(c,sdscatprintf(sdsempty(), - "-%s %d %s:%d\r\n", - (error_code == REDIS_CLUSTER_REDIR_ASK) ? "ASK" : "MOVED", - hashslot,n->ip,n->port)); + clusterRedirectClient(c,n,hashslot,error_code); return REDIS_OK; } } From c3ad70901f962808a1c0c474951406af81d26a3f Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 24 Mar 2015 16:00:09 +0100 Subject: [PATCH 0169/1928] Replication: disconnect blocked clients when switching to slave role. Bug as old as Redis and blocking operations. It's hard to trigger since only happens on instance role switch, but the results are quite bad since an inconsistency between master and slave is created. How to trigger the bug is a good description of the bug itself. 1. Client does "BLPOP mylist 0" in master. 2. Master is turned into slave, that replicates from New-Master. 3. Client does "LPUSH mylist foo" in New-Master. 4. New-Master propagates write to slave. 5. Slave receives the LPUSH, the blocked client get served. Now Master "mylist" key has "foo", Slave "mylist" key is empty. Highlights: * At step "2" above, the client remains attached, basically escaping any check performed during command dispatch: read only slave, in that case. * At step "5" the slave (that was the master), serves the blocked client consuming a list element, which is not consumed on the master side. This scenario is technically likely to happen during failovers, however since Redis Sentinel already disconnects clients using the CLIENT command when changing the role of the instance, the bug is avoided in Sentinel deployments. Closes #2473. --- src/blocked.c | 24 ++++++++++++++++++++++++ src/redis.h | 1 + src/replication.c | 1 + 3 files changed, 26 insertions(+) diff --git a/src/blocked.c b/src/blocked.c index ae2500aac..8acfb8184 100644 --- a/src/blocked.c +++ b/src/blocked.c @@ -155,3 +155,27 @@ void replyToBlockedClientTimedOut(redisClient *c) { } } +/* Mass-unblock clients because something changed in the instance that makes + * blocking no longer safe. For example clients blocked in list operations + * in an instance which turns from master to slave is unsafe, so this function + * is called when a master turns into a slave. + * + * The semantics is to send an -UNBLOCKED error to the client, disconnecting + * it at the same time. */ +void disconnectAllBlockedClients(void) { + listNode *ln; + listIter li; + + listRewind(server.clients,&li); + while((ln = listNext(&li))) { + redisClient *c = listNodeValue(ln); + + if (c->flags & REDIS_BLOCKED) { + addReplySds(c,sdsnew( + "-UNBLOCKED force unblock from blocking operation, " + "instance state changed (master -> slave?)\r\n")); + unblockClient(c); + c->flags |= REDIS_CLOSE_AFTER_REPLY; + } + } +} diff --git a/src/redis.h b/src/redis.h index 34d8b0a4c..53f3967d7 100644 --- a/src/redis.h +++ b/src/redis.h @@ -1395,6 +1395,7 @@ void blockClient(redisClient *c, int btype); void unblockClient(redisClient *c); void replyToBlockedClientTimedOut(redisClient *c); int getTimeoutFromObjectOrReply(redisClient *c, robj *object, mstime_t *timeout, int unit); +void disconnectAllBlockedClients(void); /* Git SHA1 */ char *redisGitSHA1(void); diff --git a/src/replication.c b/src/replication.c index 697acbef5..afea75b6a 100644 --- a/src/replication.c +++ b/src/replication.c @@ -1444,6 +1444,7 @@ void replicationSetMaster(char *ip, int port) { server.masterhost = sdsnew(ip); server.masterport = port; if (server.master) freeClient(server.master); + disconnectAllBlockedClients(); /* Clients blocked in master, now slave. */ disconnectSlaves(); /* Force our slaves to resync with us as well. */ replicationDiscardCachedMaster(); /* Don't try a PSYNC. */ freeReplicationBacklog(); /* Don't allow our chained slaves to PSYNC. */ From 9cd8333ed283689b028a062fc43820fcf15fa81c Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 27 Mar 2015 10:10:22 +0100 Subject: [PATCH 0170/1928] dict.c: add casting to avoid compilation warning. rehashidx is always positive in the two code paths, since the only negative value it could have is -1 when there is no rehashing in progress, and the condition is explicitly checked. --- src/dict.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dict.c b/src/dict.c index dbcfeb492..c6dbceaf2 100644 --- a/src/dict.c +++ b/src/dict.c @@ -716,7 +716,7 @@ unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count) { /* Invariant of the dict.c rehashing: up to the indexes already * visited in ht[0] during the rehashing, there are no populated * buckets, so we can skip ht[0] for indexes between 0 and idx-1. */ - if (tables == 2 && j == 0 && i < d->rehashidx) { + if (tables == 2 && j == 0 && i < (unsigned int) d->rehashidx) { /* Moreover, if we are currently out of range in the second * table, there will be no elements in both tables up to * the current rehashing index, so we jump if possible. From 068d3c9737b368f921808e753f6f000a12ca5ae8 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 27 Mar 2015 10:14:52 +0100 Subject: [PATCH 0171/1928] dict.c: convert types to unsigned long where appropriate. No semantical changes since to make dict.c truly able to scale over the 32 bit table size limit, the hash function shoulds and other internals related to hash function output should be 64 bit ready. --- src/dict.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/dict.c b/src/dict.c index c6dbceaf2..f728d381e 100644 --- a/src/dict.c +++ b/src/dict.c @@ -687,10 +687,10 @@ dictEntry *dictGetRandomKey(dict *d) * statistics. However the function is much faster than dictGetRandomKey() * at producing N elements. */ unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count) { - unsigned int j; /* internal hash table id, 0 or 1. */ - unsigned int tables; /* 1 or 2 tables? */ - unsigned int stored = 0, maxsizemask; - unsigned int maxsteps; + unsigned long j; /* internal hash table id, 0 or 1. */ + unsigned long tables; /* 1 or 2 tables? */ + unsigned long stored = 0, maxsizemask; + unsigned long maxsteps; if (dictSize(d) < count) count = dictSize(d); maxsteps = count*10; @@ -709,14 +709,14 @@ unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count) { maxsizemask = d->ht[1].sizemask; /* Pick a random point inside the larger table. */ - unsigned int i = random() & maxsizemask; - unsigned int emptylen = 0; /* Continuous empty entries so far. */ + unsigned long i = random() & maxsizemask; + unsigned long emptylen = 0; /* Continuous empty entries so far. */ while(stored < count && maxsteps--) { for (j = 0; j < tables; j++) { /* Invariant of the dict.c rehashing: up to the indexes already * visited in ht[0] during the rehashing, there are no populated * buckets, so we can skip ht[0] for indexes between 0 and idx-1. */ - if (tables == 2 && j == 0 && i < (unsigned int) d->rehashidx) { + if (tables == 2 && j == 0 && i < (unsigned long) d->rehashidx) { /* Moreover, if we are currently out of range in the second * table, there will be no elements in both tables up to * the current rehashing index, so we jump if possible. From 37260bc3bed5fb46649262789c65089ab7de0dc6 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 27 Mar 2015 12:10:46 +0100 Subject: [PATCH 0172/1928] Test: regression for issue #2473. --- tests/integration/replication.tcl | 52 ++++++++++++++++++++++++++----- 1 file changed, 44 insertions(+), 8 deletions(-) diff --git a/tests/integration/replication.tcl b/tests/integration/replication.tcl index 71a7ec60a..bb907eba8 100644 --- a/tests/integration/replication.tcl +++ b/tests/integration/replication.tcl @@ -1,10 +1,17 @@ start_server {tags {"repl"}} { + set A [srv 0 client] + set A_host [srv 0 host] + set A_port [srv 0 port] start_server {} { - test {First server should have role slave after SLAVEOF} { - r -1 slaveof [srv 0 host] [srv 0 port] + set B [srv 0 client] + set B_host [srv 0 host] + set B_port [srv 0 port] + + test {Set instance A as slave of B} { + $A slaveof $B_host $B_port wait_for_condition 50 100 { - [s -1 role] eq {slave} && - [string match {*master_link_status:up*} [r -1 info replication]] + [lindex [$A role] 0] eq {slave} && + [string match {*master_link_status:up*} [$A info replication]] } else { fail "Can't turn the instance into a slave" } @@ -15,9 +22,9 @@ start_server {tags {"repl"}} { $rd brpoplpush a b 5 r lpush a foo wait_for_condition 50 100 { - [r debug digest] eq [r -1 debug digest] + [$A debug digest] eq [$B debug digest] } else { - fail "Master and slave have different digest: [r debug digest] VS [r -1 debug digest]" + fail "Master and slave have different digest: [$A debug digest] VS [$B debug digest]" } } @@ -28,7 +35,36 @@ start_server {tags {"repl"}} { r lpush c 3 $rd brpoplpush c d 5 after 1000 - assert_equal [r debug digest] [r -1 debug digest] + assert_equal [$A debug digest] [$B debug digest] + } + + test {BLPOP followed by role change, issue #2473} { + set rd [redis_deferring_client] + $rd blpop foo 0 ; # Block while B is a master + + # Turn B into master of A + $A slaveof no one + $B slaveof $A_host $A_port + wait_for_condition 50 100 { + [lindex [$B role] 0] eq {slave} && + [string match {*master_link_status:up*} [$B info replication]] + } else { + fail "Can't turn the instance into a slave" + } + + # Push elements into the "foo" list of the new slave. + # If the client is still attached to the instance, we'll get + # a desync between the two instances. + $A rpush foo a b c + after 100 + + wait_for_condition 50 100 { + [$A debug digest] eq [$B debug digest] && + [$A lrange foo 0 -1] eq {a b c} && + [$B lrange foo 0 -1] eq {a b c} + } else { + fail "Master and slave have different digest: [$A debug digest] VS [$B debug digest]" + } } } } @@ -113,7 +149,7 @@ foreach dl {no yes} { start_server {} { lappend slaves [srv 0 client] test "Connect multiple slaves at the same time (issue #141), diskless=$dl" { - # Send SALVEOF commands to slaves + # Send SLAVEOF commands to slaves [lindex $slaves 0] slaveof $master_host $master_port [lindex $slaves 1] slaveof $master_host $master_port [lindex $slaves 2] slaveof $master_host $master_port From 221d2932b51dc605130130369301c92f34336987 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 30 Mar 2015 11:54:49 +0200 Subject: [PATCH 0173/1928] Ensure array index is in range in addReplyLongLongWithPrefix(). Change done in order to remove a warning and improve code robustness. No actual bug here. --- src/networking.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/networking.c b/src/networking.c index ba35e487c..a2d80adf3 100644 --- a/src/networking.c +++ b/src/networking.c @@ -454,10 +454,10 @@ void addReplyLongLongWithPrefix(redisClient *c, long long ll, char prefix) { /* Things like $3\r\n or *2\r\n are emitted very often by the protocol * so we have a few shared objects to use if the integer is small * like it is most of the times. */ - if (prefix == '*' && ll < REDIS_SHARED_BULKHDR_LEN) { + if (prefix == '*' && ll < REDIS_SHARED_BULKHDR_LEN && ll >= 0) { addReply(c,shared.mbulkhdr[ll]); return; - } else if (prefix == '$' && ll < REDIS_SHARED_BULKHDR_LEN) { + } else if (prefix == '$' && ll < REDIS_SHARED_BULKHDR_LEN && ll >= 0) { addReply(c,shared.bulkhdr[ll]); return; } From 34460dd6ee0c9ae6561de54f1005f493bfcc543c Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 30 Mar 2015 12:17:46 +0200 Subject: [PATCH 0174/1928] Check bio.c job type at thread startup. Another one just to avoid a warning. Slightly more defensive code anyway. --- src/bio.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/bio.c b/src/bio.c index 4bd5a17c6..27bc9abfc 100644 --- a/src/bio.c +++ b/src/bio.c @@ -142,6 +142,13 @@ void *bioProcessBackgroundJobs(void *arg) { unsigned long type = (unsigned long) arg; sigset_t sigset; + /* Check that the type is within the right interval. */ + if (type >= REDIS_BIO_NUM_OPS) { + redisLog(REDIS_WARNING, + "Warning: bio thread started with wrong type %lu",type); + return NULL; + } + /* Make the thread killable at any time, so that bioKillThreads() * can work reliably. */ pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); From 7f330b16f93d9feed0113e928a1b96f182b73e45 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 30 Mar 2015 12:24:57 +0200 Subject: [PATCH 0175/1928] Set: setType*() API more defensive initializing both values. This change fixes several warnings compiling at -O3 level with GCC 4.8.2, and at the same time, in case of misuse of the API, we have the pointer initialize to NULL or the integer initialized to the value -123456789 which is easy to spot by naked eye. --- src/t_set.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/t_set.c b/src/t_set.c index c8141c3f6..44580098c 100644 --- a/src/t_set.c +++ b/src/t_set.c @@ -154,9 +154,13 @@ int setTypeNext(setTypeIterator *si, robj **objele, int64_t *llele) { dictEntry *de = dictNext(si->di); if (de == NULL) return -1; *objele = dictGetKey(de); + *llele = -123456789; /* Not needed. Defensive. */ } else if (si->encoding == REDIS_ENCODING_INTSET) { if (!intsetGet(si->subject->ptr,si->ii++,llele)) return -1; + *objele = NULL; /* Not needed. Defensive. */ + } else { + redisPanic("Wrong set encoding in setTypeNext"); } return si->encoding; } @@ -204,8 +208,10 @@ int setTypeRandomElement(robj *setobj, robj **objele, int64_t *llele) { if (setobj->encoding == REDIS_ENCODING_HT) { dictEntry *de = dictGetRandomKey(setobj->ptr); *objele = dictGetKey(de); + *llele = -123456789; /* Not needed. Defensive. */ } else if (setobj->encoding == REDIS_ENCODING_INTSET) { *llele = intsetRandom(setobj->ptr); + *objele = NULL; /* Not needed. Defensive. */ } else { redisPanic("Unknown set encoding"); } From 65090401b713a074b5261342e3079754377b6bbf Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 30 Mar 2015 14:29:01 +0200 Subject: [PATCH 0176/1928] Sentinel / Cluster test: exit with non-zero error code on failures. --- tests/cluster/run.tcl | 1 + tests/instances.tcl | 14 ++++++++++++++ tests/sentinel/run.tcl | 1 + 3 files changed, 16 insertions(+) diff --git a/tests/cluster/run.tcl b/tests/cluster/run.tcl index f764cea0a..93603ddc9 100644 --- a/tests/cluster/run.tcl +++ b/tests/cluster/run.tcl @@ -17,6 +17,7 @@ proc main {} { } run_tests cleanup + end_tests } if {[catch main e]} { diff --git a/tests/instances.tcl b/tests/instances.tcl index 353d9b2d2..370d5e3aa 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -19,6 +19,7 @@ set ::verbose 0 set ::valgrind 0 set ::pause_on_error 0 set ::simulate_error 0 +set ::failed 0 set ::sentinel_instances {} set ::redis_instances {} set ::sentinel_base_port 20000 @@ -231,6 +232,7 @@ proc test {descr code} { flush stdout if {[catch {set retval [uplevel 1 $code]} error]} { + incr ::failed if {[string match "assertion:*" $error]} { set msg [string range $error 10 end] puts [colorstr red $msg] @@ -246,6 +248,7 @@ proc test {descr code} { } } +# Execute all the units inside the 'tests' directory. proc run_tests {} { set tests [lsort [glob ../tests/*]] foreach test $tests { @@ -258,6 +261,17 @@ proc run_tests {} { } } +# Print a message and exists with 0 / 1 according to zero or more failures. +proc end_tests {} { + if {$::failed == 0} { + puts "GOOD! No errors." + exit 0 + } else { + puts "WARNING $::failed tests faield." + exit 1 + } +} + # The "S" command is used to interact with the N-th Sentinel. # The general form is: # diff --git a/tests/sentinel/run.tcl b/tests/sentinel/run.tcl index f33029959..9a2fcfb49 100644 --- a/tests/sentinel/run.tcl +++ b/tests/sentinel/run.tcl @@ -13,6 +13,7 @@ proc main {} { spawn_instance redis $::redis_base_port $::instances_count run_tests cleanup + end_tests } if {[catch main e]} { From 66f9393ee4d526e27df38e7b610daef30a4c89bd Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 31 Mar 2015 15:22:56 +0200 Subject: [PATCH 0177/1928] Fix setTypeNext call assuming NULL can be passed. Segfault introduced during a refactoring / warning suppression a few commits away. This particular call assumed that it is safe to pass NULL to the object pointer argument when we are sure the set has a given encoding. This can't be assumed and is now guaranteed to segfault because of the new API of setTypeNext(). --- src/t_set.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/t_set.c b/src/t_set.c index 44580098c..c974c1855 100644 --- a/src/t_set.c +++ b/src/t_set.c @@ -144,7 +144,11 @@ void setTypeReleaseIterator(setTypeIterator *si) { * Since set elements can be internally be stored as redis objects or * simple arrays of integers, setTypeNext returns the encoding of the * set object you are iterating, and will populate the appropriate pointer - * (eobj) or (llobj) accordingly. + * (objele) or (llele) accordingly. + * + * Note that both the objele and llele pointers should be passed and cannot + * be NULL since the function will try to defensively populate the non + * used field with values which are easy to trap if misused. * * When there are no longer elements -1 is returned. * Returned objects ref count is not incremented, so this function is @@ -201,6 +205,10 @@ robj *setTypeNextObject(setTypeIterator *si) { * field of the object and is used by the caller to check if the * int64_t pointer or the redis object pointer was populated. * + * Note that both the objele and llele pointers should be passed and cannot + * be NULL since the function will try to defensively populate the non + * used field with values which are easy to trap if misused. + * * When an object is returned (the set was a real set) the ref count * of the object is not incremented so this function can be considered * copy on write friendly. */ @@ -246,7 +254,7 @@ void setTypeConvert(robj *setobj, int enc) { /* To add the elements we extract integers and create redis objects */ si = setTypeInitIterator(setobj); - while (setTypeNext(si,NULL,&intele) != -1) { + while (setTypeNext(si,&element,&intele) != -1) { element = createStringObjectFromLongLong(intele); redisAssertWithInfo(NULL,element, dictAdd(d,element,NULL) == DICT_OK); From 159875b5a3a8dd7780aadbf1f7674061fc760920 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 31 Mar 2015 23:42:08 +0300 Subject: [PATCH 0178/1928] fixes to diskless replication. master was closing the connection if the RDB transfer took long time. and also sent PINGs to the slave before it got the initial ACK, in which case the slave wouldn't be able to find the EOF marker. --- src/networking.c | 2 +- src/replication.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/networking.c b/src/networking.c index a2d80adf3..3b4f3b0fe 100644 --- a/src/networking.c +++ b/src/networking.c @@ -149,7 +149,7 @@ int prepareClientToWrite(redisClient *c) { if (c->fd <= 0) return REDIS_ERR; /* Fake client */ if (c->bufpos == 0 && listLength(c->reply) == 0 && (c->replstate == REDIS_REPL_NONE || - c->replstate == REDIS_REPL_ONLINE) && + c->replstate == REDIS_REPL_ONLINE) && !c->repl_put_online_on_ack && aeCreateFileEvent(server.el, c->fd, AE_WRITABLE, sendReplyToClient, c) == AE_ERR) return REDIS_ERR; return REDIS_OK; diff --git a/src/replication.c b/src/replication.c index afea75b6a..c01cd52e6 100644 --- a/src/replication.c +++ b/src/replication.c @@ -773,6 +773,7 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) { * is technically online now. */ slave->replstate = REDIS_REPL_ONLINE; slave->repl_put_online_on_ack = 1; + slave->repl_ack_time = server.unixtime; } else { if (bgsaveerr != REDIS_OK) { freeClient(slave); From 386804246f35e27a8db0d31fd0badd85cc6ecc04 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 31 Mar 2015 23:43:38 +0200 Subject: [PATCH 0179/1928] Test: be more patient waiting for servers to exit. This should likely fix a false positive when running with the --valgrind option. --- tests/support/server.tcl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/support/server.tcl b/tests/support/server.tcl index 317b40a84..e5c31af28 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -54,10 +54,15 @@ proc kill_server config { # kill server and wait for the process to be totally exited catch {exec kill $pid} + if {$::valgrind} { + set max_wait 60000 + } else { + set max_wait 10000 + } while {[is_alive $config]} { incr wait 10 - if {$wait >= 5000} { + if {$wait >= $max_wait} { puts "Forcing process $pid to exit..." catch {exec kill -KILL $pid} } elseif {$wait % 1000 == 0} { From 6c60526db91e23fb2d666fc52facc9a11780a2a3 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 1 Apr 2015 10:07:08 +0200 Subject: [PATCH 0180/1928] Net: improve prepareClientToWrite() error handling and comments. When we fail to setup the write handler it does not make sense to take the client around, it is missing writes: whatever is a client or a slave anyway the connection should terminated ASAP. Moreover what the function does exactly with its return value, and in which case the write handler is installed on the socket, was not clear, so the functions comment are improved to make the goals of the function more obvious. Also related to #2485. --- src/networking.c | 40 +++++++++++++++++++++++++++++++++------- src/replication.c | 7 ++++--- 2 files changed, 37 insertions(+), 10 deletions(-) diff --git a/src/networking.c b/src/networking.c index 3b4f3b0fe..761645cb2 100644 --- a/src/networking.c +++ b/src/networking.c @@ -135,23 +135,49 @@ redisClient *createClient(int fd) { * returns REDIS_OK, and make sure to install the write handler in our event * loop so that when the socket is writable new data gets written. * - * If the client should not receive new data, because it is a fake client, - * a master, a slave not yet online, or because the setup of the write handler - * failed, the function returns REDIS_ERR. + * If the client should not receive new data, because it is a fake client + * (used to load AOF in memory), a master or because the setup of the write + * handler failed, the function returns REDIS_ERR. + * + * The function may return REDIS_OK without actually installing the write + * event handler in the following cases: + * + * 1) The event handler should already be installed since the output buffer + * already contained something. + * 2) The client is a slave but not yet online, so we want to just accumulate + * writes in the buffer but not actually sending them yet. * * Typically gets called every time a reply is built, before adding more * data to the clients output buffers. If the function returns REDIS_ERR no * data should be appended to the output buffers. */ int prepareClientToWrite(redisClient *c) { + /* If it's the Lua client we always return ok without installing any + * handler since there is no socket at all. */ if (c->flags & REDIS_LUA_CLIENT) return REDIS_OK; + + /* Masters don't receive replies, unless REDIS_MASTER_FORCE_REPLY flag + * is set. */ if ((c->flags & REDIS_MASTER) && !(c->flags & REDIS_MASTER_FORCE_REPLY)) return REDIS_ERR; - if (c->fd <= 0) return REDIS_ERR; /* Fake client */ + + if (c->fd <= 0) return REDIS_ERR; /* Fake client for AOF loading. */ + + /* Only install the handler if not already installed and, in case of + * slaves, if the client can actually receive writes. */ if (c->bufpos == 0 && listLength(c->reply) == 0 && (c->replstate == REDIS_REPL_NONE || - c->replstate == REDIS_REPL_ONLINE) && !c->repl_put_online_on_ack && - aeCreateFileEvent(server.el, c->fd, AE_WRITABLE, - sendReplyToClient, c) == AE_ERR) return REDIS_ERR; + (c->replstate == REDIS_REPL_ONLINE && !c->repl_put_online_on_ack))) + { + /* Try to install the write handler. */ + if (aeCreateFileEvent(server.el, c->fd, AE_WRITABLE, + sendReplyToClient, c) == AE_ERR) + { + freeClientAsync(c); + return REDIS_ERR; + } + } + + /* Authorize the caller to queue in the output buffer of this client. */ return REDIS_OK; } diff --git a/src/replication.c b/src/replication.c index c01cd52e6..ca263527a 100644 --- a/src/replication.c +++ b/src/replication.c @@ -652,7 +652,8 @@ void replconfCommand(redisClient *c) { * * It does a few things: * - * 1) Put the slave in ONLINE state. + * 1) Put the slave in ONLINE state (useless when the function is called + * because state is already ONLINE but repl_put_online_on_ack is true). * 2) Make sure the writable event is re-installed, since calling the SYNC * command disables it, so that we can accumulate output buffer without * sending it to the slave. @@ -660,7 +661,7 @@ void replconfCommand(redisClient *c) { void putSlaveOnline(redisClient *slave) { slave->replstate = REDIS_REPL_ONLINE; slave->repl_put_online_on_ack = 0; - slave->repl_ack_time = server.unixtime; + slave->repl_ack_time = server.unixtime; /* Prevent false timeout. */ if (aeCreateFileEvent(server.el, slave->fd, AE_WRITABLE, sendReplyToClient, slave) == AE_ERR) { redisLog(REDIS_WARNING,"Unable to register writable event for slave bulk transfer: %s", strerror(errno)); @@ -773,7 +774,7 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) { * is technically online now. */ slave->replstate = REDIS_REPL_ONLINE; slave->repl_put_online_on_ack = 1; - slave->repl_ack_time = server.unixtime; + slave->repl_ack_time = server.unixtime; /* Timeout otherwise. */ } else { if (bgsaveerr != REDIS_OK) { freeClient(slave); From 626b4f69078276e35ed947e0d4ebd704e7f09992 Mon Sep 17 00:00:00 2001 From: Glenn Nethercutt Date: Fri, 17 Apr 2015 09:27:54 -0400 Subject: [PATCH 0181/1928] uphold the smove contract to return 0 when the element is not a member of the source set, even if source=dest --- src/t_set.c | 5 ++++- tests/unit/type/set.tcl | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/t_set.c b/src/t_set.c index c974c1855..f50c0aa34 100644 --- a/src/t_set.c +++ b/src/t_set.c @@ -343,7 +343,10 @@ void smoveCommand(redisClient *c) { /* If srcset and dstset are equal, SMOVE is a no-op */ if (srcset == dstset) { - addReply(c,shared.cone); + if (setTypeIsMember(srcset,ele)) + addReply(c,shared.cone); + else + addReply(c,shared.czero); return; } diff --git a/tests/unit/type/set.tcl b/tests/unit/type/set.tcl index a9a3d0835..7b467f1c4 100644 --- a/tests/unit/type/set.tcl +++ b/tests/unit/type/set.tcl @@ -519,6 +519,7 @@ start_server { test "SMOVE non existing key" { setup_move assert_equal 0 [r smove myset1 myset2 foo] + assert_equal 0 [r smove myset1 myset1 foo] assert_equal {1 a b} [lsort [r smembers myset1]] assert_equal {2 3 4} [lsort [r smembers myset2]] } From 42b36c5ce9071ebdfd5580fa0499a7bf354f1841 Mon Sep 17 00:00:00 2001 From: FuGangqiang Date: Sun, 19 Apr 2015 23:42:27 +0800 Subject: [PATCH 0182/1928] fix typo --- src/sds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sds.c b/src/sds.c index 05ee0ad56..7c2d00ed9 100644 --- a/src/sds.c +++ b/src/sds.c @@ -71,7 +71,7 @@ sds sdsempty(void) { return sdsnewlen("",0); } -/* Create a new sds string starting from a null termined C string. */ +/* Create a new sds string starting from a null terminated C string. */ sds sdsnew(const char *init) { size_t initlen = (init == NULL) ? 0 : strlen(init); return sdsnewlen(init, initlen); From 239494db645c6ed87e605cc4b0a10db78b50e5cd Mon Sep 17 00:00:00 2001 From: FuGangqiang Date: Mon, 20 Apr 2015 21:46:48 +0800 Subject: [PATCH 0183/1928] fix doc example --- src/sds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sds.c b/src/sds.c index 7c2d00ed9..5c7cdcb12 100644 --- a/src/sds.c +++ b/src/sds.c @@ -557,7 +557,7 @@ sds sdscatfmt(sds s, char const *fmt, ...) { * Example: * * s = sdsnew("AA...AA.a.aa.aHelloWorld :::"); - * s = sdstrim(s,"A. :"); + * s = sdstrim(s,"Aa. :"); * printf("%s\n", s); * * Output will be just "Hello World". From 26a1a08fc798fb36c8086138b5f78119aa8c0e21 Mon Sep 17 00:00:00 2001 From: FuGangqiang Date: Mon, 20 Apr 2015 23:03:34 +0800 Subject: [PATCH 0184/1928] sdsfree x and y --- src/sds.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/sds.c b/src/sds.c index 5c7cdcb12..2ebe286d1 100644 --- a/src/sds.c +++ b/src/sds.c @@ -1098,6 +1098,7 @@ int sdsTest(int argc, char *argv[]) { unsigned int oldfree; sdsfree(x); + sdsfree(y); x = sdsnew("0"); sh = (void*) (x-(sizeof(struct sdshdr))); test_cond("sdsnew() free/len buffers", sh->len == 1 && sh->free == 0); @@ -1110,6 +1111,8 @@ int sdsTest(int argc, char *argv[]) { test_cond("sdsIncrLen() -- content", x[0] == '0' && x[1] == '1'); test_cond("sdsIncrLen() -- len", sh->len == 2); test_cond("sdsIncrLen() -- free", sh->free == oldfree-1); + + sdsfree(x); } } test_report() From 921ca063f70b6e725ca6159a00aa02d8af1e4181 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Tue, 21 Apr 2015 18:54:49 +0300 Subject: [PATCH 0185/1928] update copyright year --- COPYING | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/COPYING b/COPYING index a58de44dd..ac68e012b 100644 --- a/COPYING +++ b/COPYING @@ -1,4 +1,4 @@ -Copyright (c) 2006-2014, Salvatore Sanfilippo +Copyright (c) 2006-2015, Salvatore Sanfilippo All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: From 066d7a29ebe5a5a2a567f15e65a6689ff71e8248 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Fri, 24 Apr 2015 01:33:41 +0300 Subject: [PATCH 0186/1928] Added reference to IANA ticket for port 6379 Just so it's extra official :smile: --- redis.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/redis.conf b/redis.conf index d0684e86a..36b4f840a 100644 --- a/redis.conf +++ b/redis.conf @@ -55,7 +55,7 @@ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ bind 127.0.0.1 -# Accept connections on the specified port, default is 6379. +# Accept connections on the specified port, default is 6379 (IANA #815344). # If port 0 is specified Redis will not listen on a TCP socket. port 6379 From 49c1b60bd8cbca6bbec7a171645dfeb67c1a7ddf Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Sun, 26 Apr 2015 12:04:16 +0300 Subject: [PATCH 0187/1928] Fix Redis server crash when Lua command exceeds client output buffer limit. --- src/networking.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/networking.c b/src/networking.c index 761645cb2..19032ae01 100644 --- a/src/networking.c +++ b/src/networking.c @@ -805,7 +805,7 @@ void freeClient(redisClient *c) { * a context where calling freeClient() is not possible, because the client * should be valid for the continuation of the flow of the program. */ void freeClientAsync(redisClient *c) { - if (c->flags & REDIS_CLOSE_ASAP) return; + if (c->flags & REDIS_CLOSE_ASAP || c->flags & REDIS_LUA_CLIENT) return; c->flags |= REDIS_CLOSE_ASAP; listAddNodeTail(server.clients_to_close,c); } From 1a93501f8baebfb44bc2548b2026d47c3cc91d1f Mon Sep 17 00:00:00 2001 From: antirez Date: Sun, 26 Apr 2015 19:23:24 +0200 Subject: [PATCH 0188/1928] Example redis.conf doc about pidfile fixed. An user changed the behavior via a PR without upgrading the doc. --- redis.conf | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/redis.conf b/redis.conf index d0684e86a..127912c31 100644 --- a/redis.conf +++ b/redis.conf @@ -113,8 +113,15 @@ daemonize no # They do not enable continuous liveness pings back to your supervisor. supervised no -# When running daemonized, Redis writes a pid file in /var/run/redis.pid by -# default. You can specify a custom pid file location here. +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. pidfile /var/run/redis.pid # Specify the server verbosity level. From 1b25757f415d6e6da0cdf1769f94f8e318e5be25 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 27 Apr 2015 12:07:49 +0200 Subject: [PATCH 0189/1928] sha1.c: use standard uint32_t. --- src/sha1.c | 14 +++++++------- src/sha1.h | 8 ++++---- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/sha1.c b/src/sha1.c index 7f73b40d3..ce487e367 100644 --- a/src/sha1.c +++ b/src/sha1.c @@ -23,7 +23,7 @@ A million repetitions of "a" #include #include -#include /* for u_int*_t */ +#include #include "solarisfixes.h" #include "sha1.h" #include "config.h" @@ -53,12 +53,12 @@ A million repetitions of "a" /* Hash a single 512-bit block. This is the core of the algorithm. */ -void SHA1Transform(u_int32_t state[5], const unsigned char buffer[64]) +void SHA1Transform(uint32_t state[5], const unsigned char buffer[64]) { - u_int32_t a, b, c, d, e; + uint32_t a, b, c, d, e; typedef union { unsigned char c[64]; - u_int32_t l[16]; + uint32_t l[16]; } CHAR64LONG16; #ifdef SHA1HANDSOFF CHAR64LONG16 block[1]; /* use array to appear as a pointer */ @@ -128,9 +128,9 @@ void SHA1Init(SHA1_CTX* context) /* Run your data through this. */ -void SHA1Update(SHA1_CTX* context, const unsigned char* data, u_int32_t len) +void SHA1Update(SHA1_CTX* context, const unsigned char* data, uint32_t len) { - u_int32_t i, j; + uint32_t i, j; j = context->count[0]; if ((context->count[0] += len << 3) < j) @@ -168,7 +168,7 @@ void SHA1Final(unsigned char digest[20], SHA1_CTX* context) for (i = 0; i < 2; i++) { - u_int32_t t = context->count[i]; + uint32_t t = context->count[i]; int j; for (j = 0; j < 4; t >>= 8, j++) diff --git a/src/sha1.h b/src/sha1.h index 4c76d19da..f41691258 100644 --- a/src/sha1.h +++ b/src/sha1.h @@ -8,14 +8,14 @@ By Steve Reid */ typedef struct { - u_int32_t state[5]; - u_int32_t count[2]; + uint32_t state[5]; + uint32_t count[2]; unsigned char buffer[64]; } SHA1_CTX; -void SHA1Transform(u_int32_t state[5], const unsigned char buffer[64]); +void SHA1Transform(uint32_t state[5], const unsigned char buffer[64]); void SHA1Init(SHA1_CTX* context); -void SHA1Update(SHA1_CTX* context, const unsigned char* data, u_int32_t len); +void SHA1Update(SHA1_CTX* context, const unsigned char* data, uint32_t len); void SHA1Final(unsigned char digest[20], SHA1_CTX* context); #ifdef REDIS_TEST From 3ff49afff1bf9b0d84d8ad53fb2d6b9b5d4b039c Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 28 Apr 2015 11:07:21 +0200 Subject: [PATCH 0190/1928] Fix spelling and grammatical errors in readme Closes #2549 --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 0565418f6..8efc7faca 100644 --- a/README.md +++ b/README.md @@ -3,15 +3,15 @@ This README is just a fast *quick start* document. You can find more detailed do What is Redis? -------------- -Redis is often referred as a *data structures* server. What this means is that Redis provides access to mutable data structures via a set of commands, which are send using a *server-client* model with TCP sockets and a simple protocol. So different processes can query and modify the same data structures in a shared way. +Redis is often referred as a *data structures* server. What this means is that Redis provides access to mutable data structures via a set of commands, which are sent using a *server-client* model with TCP sockets and a simple protocol. So different processes can query and modify the same data structures in a shared way. Data structures implemented into Redis have a few special properties: * Redis cares to store them on disk, even if they are always served and modified into the server memory. This means that Redis is fast, but that is also non-volatile. * Implementation of data structures stress on memory efficiency, so data structures inside Redis will likely use less memory compared to the same data structure modeled using an high level programming language. -* Redis offers a number of features that are natural to find into a database, like replication, tunable levels of durability, cluster, high availability. +* Redis offers a number of features that are natural to find in a database, like replication, tunable levels of durability, cluster, high availability. -Another good example is to think at Redis as a more complex version of memcached, where the opeations are not just SETs and GETs, but operations to work with complex data types like Lists, Sets, ordered data structures, and so forth. +Another good example is to think at Redis as a more complex version of memcached, where the operations are not just SETs and GETs, but operations to work with complex data types like Lists, Sets, ordered data structures, and so forth. If you want to know more, this is a list of selected starting points: From fb53288110b6a55802999a1e9f036abefc02e01d Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 28 Apr 2015 15:21:11 +0200 Subject: [PATCH 0191/1928] One more small fix --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8efc7faca..223ef13c6 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ Data structures implemented into Redis have a few special properties: * Implementation of data structures stress on memory efficiency, so data structures inside Redis will likely use less memory compared to the same data structure modeled using an high level programming language. * Redis offers a number of features that are natural to find in a database, like replication, tunable levels of durability, cluster, high availability. -Another good example is to think at Redis as a more complex version of memcached, where the operations are not just SETs and GETs, but operations to work with complex data types like Lists, Sets, ordered data structures, and so forth. +Another good example is to think of Redis as a more complex version of memcached, where the operations are not just SETs and GETs, but operations to work with complex data types like Lists, Sets, ordered data structures, and so forth. If you want to know more, this is a list of selected starting points: From eff212ea959e27058df2d459f8acbe690376b888 Mon Sep 17 00:00:00 2001 From: "clark.kang" Date: Wed, 29 Apr 2015 00:05:26 +0900 Subject: [PATCH 0192/1928] fix sentinel memory leak --- src/sentinel.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/sentinel.c b/src/sentinel.c index a099156e9..ae83996b6 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -922,6 +922,7 @@ sentinelRedisInstance *createSentinelRedisInstance(char *name, int flags, char * else if (flags & SRI_SENTINEL) table = master->sentinels; sdsname = sdsnew(name); if (dictFind(table,sdsname)) { + releaseSentinelAddr(addr); sdsfree(sdsname); errno = EBUSY; return NULL; From c806dd799bc8f3c578581194d499b50acec44b7d Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 28 Apr 2015 22:10:23 +0200 Subject: [PATCH 0193/1928] Fix Sentinel memory leak (hiredis bug) This fixes issue #2535, that was actually an hiredis library bug (I submitted an issue and fix to the redis/hiredis repo as well). When an asynchronous hiredis connection subscribes to a Pub/Sub channel and gets an error, and in other related conditions, the function redisProcessCallbacks() enters a code path where the link is disconnected, however the function returns before freeing the allocated reply object. This causes a memory leak. The memory leak was trivial to trigger in Redis Sentinel, which uses hiredis, every time we tried to subscribe to an instance that required a password, in case the Sentinel was configured either with the wrong password or without password at all. In this case, the -AUTH error caused the leaking code path to be executed. It was verified with Valgrind that after this change the leak no longer happens in Sentinel with a misconfigured authentication password. --- deps/hiredis/async.c | 1 + 1 file changed, 1 insertion(+) diff --git a/deps/hiredis/async.c b/deps/hiredis/async.c index f7f343bef..9cc35638f 100644 --- a/deps/hiredis/async.c +++ b/deps/hiredis/async.c @@ -443,6 +443,7 @@ void redisProcessCallbacks(redisAsyncContext *ac) { if (((redisReply*)reply)->type == REDIS_REPLY_ERROR) { c->err = REDIS_ERR_OTHER; snprintf(c->errstr,sizeof(c->errstr),"%s",((redisReply*)reply)->str); + c->reader->fn->freeObject(reply); __redisAsyncDisconnect(ac); return; } From 9e7f39d29dbd382212478f1c425ca4dcba89228b Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 29 Apr 2015 10:33:21 +0200 Subject: [PATCH 0194/1928] Add header guard for ziplist.h As suggested in #2543. --- src/ziplist.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/ziplist.h b/src/ziplist.h index e92b5e783..ae96823f9 100644 --- a/src/ziplist.h +++ b/src/ziplist.h @@ -28,6 +28,9 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#ifndef _ZIPLIST_H +#define _ZIPLIST_H + #define ZIPLIST_HEAD 0 #define ZIPLIST_TAIL 1 @@ -49,3 +52,5 @@ size_t ziplistBlobLen(unsigned char *zl); #ifdef REDIS_TEST int ziplistTest(int argc, char *argv[]); #endif + +#endif /* _ZIPLIST_H */ From 99c93f34a76fd7b64847bdbb5b8828f3bb5ea09e Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 4 May 2015 12:50:44 +0200 Subject: [PATCH 0195/1928] Sentinel: remove useless sentinelFlushConfig() call To rewrite the config in the loop that adds slaves back after a master reset, in order to handle switching to another master, is useless: it just adds latency since there is an fsync call in the inner loop, without providing any additional guarantee, but the contrary, since if after the first loop iteration the server crashes we end with just a single slave entry losing all the other informations. It is wiser to rewrite the config at the end when the full new state is configured. --- src/sentinel.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/sentinel.c b/src/sentinel.c index ae83996b6..8d1311f4b 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -1271,10 +1271,7 @@ int sentinelResetMasterAndChangeAddress(sentinelRedisInstance *master, char *ip, slave = createSentinelRedisInstance(NULL,SRI_SLAVE,slaves[j]->ip, slaves[j]->port, master->quorum, master); releaseSentinelAddr(slaves[j]); - if (slave) { - sentinelEvent(REDIS_NOTICE,"+slave",slave,"%@"); - sentinelFlushConfig(); - } + if (slave) sentinelEvent(REDIS_NOTICE,"+slave",slave,"%@"); } zfree(slaves); From cc799d253fc16f9f9dbc5f32269da0e90d78409f Mon Sep 17 00:00:00 2001 From: therealbill Date: Thu, 23 Apr 2015 11:56:15 -0500 Subject: [PATCH 0196/1928] Making sentinel flush config on +slave Originally, only the +slave event which occurs when a slave is reconfigured during sentinelResetMasterAndChangeAddress triggers a flush of the config to disk. However, newly discovered slaves don't apparently trigger this flush but do trigger the +slave event issuance. So if you start up a sentinel, add a master, then add a slave to the master (as a way to reproduce it) you'll see the +slave event issued, but the sentinel config won't be updated with the known-slave entry. This change makes sentinel do the flush of the config if a new slave is deteted in sentinelRefreshInstanceInfo. --- src/sentinel.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/sentinel.c b/src/sentinel.c index 8d1311f4b..0df7989a3 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -1847,6 +1847,7 @@ void sentinelRefreshInstanceInfo(sentinelRedisInstance *ri, const char *info) { atoi(port), ri->quorum, ri)) != NULL) { sentinelEvent(REDIS_NOTICE,"+slave",slave,"%@"); + sentinelFlushConfig(); } } } From 9f9c44feef1b68bb443270118570c030c04b0085 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 5 May 2015 11:17:40 +0200 Subject: [PATCH 0197/1928] Fix order of release scripts. --- utils/releasetools/{00_test_release.sh => 03_test_release.sh} | 0 utils/releasetools/{03_release_hash.sh => 04_release_hash.sh} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename utils/releasetools/{00_test_release.sh => 03_test_release.sh} (100%) rename utils/releasetools/{03_release_hash.sh => 04_release_hash.sh} (100%) diff --git a/utils/releasetools/00_test_release.sh b/utils/releasetools/03_test_release.sh similarity index 100% rename from utils/releasetools/00_test_release.sh rename to utils/releasetools/03_test_release.sh diff --git a/utils/releasetools/03_release_hash.sh b/utils/releasetools/04_release_hash.sh similarity index 100% rename from utils/releasetools/03_release_hash.sh rename to utils/releasetools/04_release_hash.sh From 8d18692018fb8125df354898e26fda85ca30a47f Mon Sep 17 00:00:00 2001 From: "clark.kang" Date: Tue, 5 May 2015 22:51:27 +0900 Subject: [PATCH 0198/1928] fix compile error for struct msghdr --- src/redis.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/redis.c b/src/redis.c index 787663e4a..09653119a 100644 --- a/src/redis.c +++ b/src/redis.c @@ -54,6 +54,7 @@ #include #include #include +#include /* Our shared "common" objects */ From f7bd816bbbcb5bb944750c10d798b6b9e63e1d3b Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 5 May 2015 16:32:53 +0200 Subject: [PATCH 0199/1928] Don't put clients into unblocked list multiple times --- src/blocked.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/blocked.c b/src/blocked.c index 8acfb8184..3442b9dd4 100644 --- a/src/blocked.c +++ b/src/blocked.c @@ -137,10 +137,14 @@ void unblockClient(redisClient *c) { /* Clear the flags, and put the client in the unblocked list so that * we'll process new commands in its query buffer ASAP. */ c->flags &= ~REDIS_BLOCKED; - c->flags |= REDIS_UNBLOCKED; c->btype = REDIS_BLOCKED_NONE; server.bpop_blocked_clients--; - listAddNodeTail(server.unblocked_clients,c); + /* The client may already be into the unblocked list because of a previous + * blocking operation, don't add back it into the list multiple times. */ + if (!(c->flags & REDIS_UNBLOCKED)) { + c->flags |= REDIS_UNBLOCKED; + listAddNodeTail(server.unblocked_clients,c); + } } /* This function gets called when a blocked client timed out in order to From 2bc1527a9564cf9d7776fc817b8dc13c3e53c1b0 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 5 May 2015 16:35:44 +0200 Subject: [PATCH 0200/1928] processUnblockedClients: don't process clients that blocekd again --- src/blocked.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/blocked.c b/src/blocked.c index 3442b9dd4..c4618a5cc 100644 --- a/src/blocked.c +++ b/src/blocked.c @@ -117,9 +117,14 @@ void processUnblockedClients(void) { listDelNode(server.unblocked_clients,ln); c->flags &= ~REDIS_UNBLOCKED; - /* Process remaining data in the input buffer. */ - if (c->querybuf && sdslen(c->querybuf) > 0) { - processInputBuffer(c); + /* Process remaining data in the input buffer, unless the client + * is blocked again. Actually processInputBuffer() checks that the + * client is not blocked before to proceed, but things may change and + * the code is conceptually more correct this way. */ + if (!(c->flags & DISQUE_BLOCKED)) { + if (c->querybuf && sdslen(c->querybuf) > 0) { + processInputBuffer(c); + } } } } From 23e304e3132b575d49c3b4c409fec0e40aa602c9 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 5 May 2015 16:36:35 +0200 Subject: [PATCH 0201/1928] Substitute DISQUE to REDIS after merge from Disque Probably this stuff should be called CLIENT_* in order to cross merge more easily. --- src/blocked.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/blocked.c b/src/blocked.c index c4618a5cc..2ec2cf626 100644 --- a/src/blocked.c +++ b/src/blocked.c @@ -121,7 +121,7 @@ void processUnblockedClients(void) { * is blocked again. Actually processInputBuffer() checks that the * client is not blocked before to proceed, but things may change and * the code is conceptually more correct this way. */ - if (!(c->flags & DISQUE_BLOCKED)) { + if (!(c->flags & REDIS_BLOCKED)) { if (c->querybuf && sdslen(c->querybuf) > 0) { processInputBuffer(c); } From 794fc4c9a8b2e4721196df341b84cb0569ab0efa Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 6 May 2015 16:19:14 +0200 Subject: [PATCH 0202/1928] Sentinel: persist its unique ID across restarts. Previously Sentinels always changed unique ID across restarts, relying on the server.runid field. This is not a good idea, and forced Sentinel to rely on detection of duplicated Sentinels and a potentially dangerous clean-up and re-add operation of the Sentinel instance that was rebooted. Now the ID is generated at the first start and persisted in the configuration file, so that a given Sentinel will have its unique ID forever (unless the configuration is manually deleted or there is a filesystem corruption). --- src/sentinel.c | 59 +++++++++++++++++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 17 deletions(-) diff --git a/src/sentinel.c b/src/sentinel.c index 0df7989a3..0a3dfc772 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -118,7 +118,7 @@ typedef struct sentinelAddr { typedef struct sentinelRedisInstance { int flags; /* See SRI_... defines */ char *name; /* Master name from the point of view of this sentinel. */ - char *runid; /* run ID of this instance. */ + char *runid; /* Run ID of this instance, or unique ID if is a Sentinel.*/ uint64_t config_epoch; /* Configuration epoch. */ sentinelAddr *addr; /* Master host. */ redisAsyncContext *cc; /* Hiredis context for commands. */ @@ -195,19 +195,20 @@ typedef struct sentinelRedisInstance { /* Main state. */ struct sentinelState { - uint64_t current_epoch; /* Current epoch. */ + char myid[REDIS_RUN_ID_SIZE+1]; /* This sentinel ID. */ + uint64_t current_epoch; /* Current epoch. */ dict *masters; /* Dictionary of master sentinelRedisInstances. Key is the instance name, value is the sentinelRedisInstance structure pointer. */ int tilt; /* Are we in TILT mode? */ int running_scripts; /* Number of scripts in execution right now. */ - mstime_t tilt_start_time; /* When TITL started. */ - mstime_t previous_time; /* Last time we ran the time handler. */ - list *scripts_queue; /* Queue of user scripts to execute. */ - char *announce_ip; /* IP addr that is gossiped to other sentinels if - not NULL. */ - int announce_port; /* Port that is gossiped to other sentinels if - non zero. */ + mstime_t tilt_start_time; /* When TITL started. */ + mstime_t previous_time; /* Last time we ran the time handler. */ + list *scripts_queue; /* Queue of user scripts to execute. */ + char *announce_ip; /* IP addr that is gossiped to other sentinels if + not NULL. */ + int announce_port; /* Port that is gossiped to other sentinels if + non zero. */ } sentinel; /* A script execution job. */ @@ -433,12 +434,13 @@ void initSentinel(void) { sentinel.scripts_queue = listCreate(); sentinel.announce_ip = NULL; sentinel.announce_port = 0; + memset(sentinel.myid,0,sizeof(sentinel.myid)); } /* This function gets called when the server is in Sentinel mode, started, * loaded the configuration, and is ready for normal operations. */ void sentinelIsRunning(void) { - redisLog(REDIS_WARNING,"Sentinel runid is %s", server.runid); + int j; if (server.configfile == NULL) { redisLog(REDIS_WARNING, @@ -451,6 +453,21 @@ void sentinelIsRunning(void) { exit(1); } + /* If this Sentinel has yet no ID set in the configuration file, we + * pick a random one and persist the config on disk. From now on this + * will be this Sentinel ID across restarts. */ + for (j = 0; j < REDIS_RUN_ID_SIZE; j++) + if (sentinel.myid[j] != 0) break; + + if (j == REDIS_RUN_ID_SIZE) { + /* Pick ID and presist the config. */ + getRandomHexChars(sentinel.myid,REDIS_RUN_ID_SIZE); + sentinelFlushConfig(); + } + + /* Log its ID to make debugging of issues simpler. */ + redisLog(REDIS_WARNING,"Sentinel ID is %s", sentinel.myid); + /* We want to generate a +monitor event for every configured master * at startup. */ sentinelGenerateInitialMonitorEvents(); @@ -1392,6 +1409,10 @@ char *sentinelHandleConfiguration(char **argv, int argc) { unsigned long long current_epoch = strtoull(argv[1],NULL,10); if (current_epoch > sentinel.current_epoch) sentinel.current_epoch = current_epoch; + } else if (!strcasecmp(argv[0],"myid") && argc == 2) { + if (strlen(argv[1]) != REDIS_RUN_ID_SIZE) + return "Malformed Sentinel id in myid option."; + memcpy(sentinel.myid,argv[1],REDIS_RUN_ID_SIZE); } else if (!strcasecmp(argv[0],"config-epoch") && argc == 3) { /* config-epoch */ ri = sentinelGetMasterByName(argv[1]); @@ -1460,6 +1481,10 @@ void rewriteConfigSentinelOption(struct rewriteConfigState *state) { sentinelRedisInstance *master, *ri; sentinelAddr *master_addr; + /* sentinel unique ID. */ + line = sdscatprintf(sdsempty(), "sentinel myid %s", sentinel.myid); + rewriteConfigRewriteLine(state,"sentinel",line,1); + /* sentinel monitor */ master = dictGetVal(de); master_addr = sentinelGetCurrentMasterAddress(master); @@ -1691,7 +1716,7 @@ void sentinelSendAuthIfNeeded(sentinelRedisInstance *ri, redisAsyncContext *c) { void sentinelSetClientName(sentinelRedisInstance *ri, redisAsyncContext *c, char *type) { char name[64]; - snprintf(name,sizeof(name),"sentinel-%.8s-%s",server.runid,type); + snprintf(name,sizeof(name),"sentinel-%.8s-%s",sentinel.myid,type); if (redisAsyncCommand(c, sentinelDiscardReplyCallback, NULL, "CLIENT SETNAME %s", name) == REDIS_OK) { @@ -2225,7 +2250,7 @@ void sentinelReceiveHelloMessages(redisAsyncContext *c, void *reply, void *privd strcmp(r->element[0]->str,"message") != 0) return; /* We are not interested in meeting ourselves */ - if (strstr(r->element[2]->str,server.runid) != NULL) return; + if (strstr(r->element[2]->str,sentinel.myid) != NULL) return; sentinelProcessHelloMessage(r->element[2]->str, r->element[2]->len); } @@ -2268,7 +2293,7 @@ int sentinelSendHello(sentinelRedisInstance *ri) { snprintf(payload,sizeof(payload), "%s,%d,%s,%llu," /* Info about this sentinel. */ "%s,%s,%d,%llu", /* Info about current master. */ - announce_ip, announce_port, server.runid, + announce_ip, announce_port, sentinel.myid, (unsigned long long) sentinel.current_epoch, /* --- */ master->name,master_addr->ip,master_addr->port, @@ -3233,7 +3258,7 @@ void sentinelAskMasterStateToOtherSentinels(sentinelRedisInstance *master, int f master->addr->ip, port, sentinel.current_epoch, (master->failover_state > SENTINEL_FAILOVER_STATE_NONE) ? - server.runid : "*"); + sentinel.myid : "*"); if (retval == REDIS_OK) ri->pending_commands++; } dictReleaseIterator(di); @@ -3265,7 +3290,7 @@ char *sentinelVoteLeader(sentinelRedisInstance *master, uint64_t req_epoch, char /* If we did not voted for ourselves, set the master failover start * time to now, in order to force a delay before we can start a * failover for the same master. */ - if (strcasecmp(master->leader,server.runid)) + if (strcasecmp(master->leader,sentinel.myid)) master->failover_start_time = mstime()+rand()%SENTINEL_MAX_DESYNC; } @@ -3346,7 +3371,7 @@ char *sentinelGetLeader(sentinelRedisInstance *master, uint64_t epoch) { if (winner) myvote = sentinelVoteLeader(master,epoch,winner,&leader_epoch); else - myvote = sentinelVoteLeader(master,epoch,server.runid,&leader_epoch); + myvote = sentinelVoteLeader(master,epoch,sentinel.myid,&leader_epoch); if (myvote && leader_epoch == epoch) { uint64_t votes = sentinelLeaderIncr(counters,myvote); @@ -3598,7 +3623,7 @@ void sentinelFailoverWaitStart(sentinelRedisInstance *ri) { /* Check if we are the leader for the failover epoch. */ leader = sentinelGetLeader(ri, ri->failover_epoch); - isleader = leader && strcasecmp(leader,server.runid) == 0; + isleader = leader && strcasecmp(leader,sentinel.myid) == 0; sdsfree(leader); /* If I'm not the leader, and it is not a forced failover via From a0cd75cd1b441915655eac070bd7f93cfa0f7990 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 7 May 2015 10:03:40 +0200 Subject: [PATCH 0203/1928] Sentinel: don't detect duplicated Sentinels, just address switch Since with a previous commit Sentinels now persist their unique ID, we no longer need to detect duplicated Sentinels and re-add them. We remove and re-add back using different events only in the case of address switch of the same Sentinel, without generating a new +sentinel event. --- src/sentinel.c | 43 +++++++++++++++++-------------------------- 1 file changed, 17 insertions(+), 26 deletions(-) diff --git a/src/sentinel.c b/src/sentinel.c index 0a3dfc772..8d4105889 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -1068,35 +1068,29 @@ const char *sentinelRedisInstanceTypeStr(sentinelRedisInstance *ri) { else return "unknown"; } -/* This function removes all the instances found in the dictionary of - * sentinels in the specified 'master', having either: +/* This function remove the Sentinel with the specified ID from the + * specified master. * - * 1) The same ip/port as specified. - * 2) The same runid. + * If "runid" is NULL the function returns ASAP. * - * "1" and "2" don't need to verify at the same time, just one is enough. - * If "runid" is NULL it is not checked. - * Similarly if "ip" is NULL it is not checked. + * This function is useful because on Sentinels address switch, we want to + * remove our old entry and add a new one for the same ID but with the new + * address. * - * This function is useful because every time we add a new Sentinel into - * a master's Sentinels dictionary, we want to be very sure about not - * having duplicated instances for any reason. This is important because - * other sentinels are needed to reach ODOWN quorum, and later to get - * voted for a given configuration epoch in order to perform the failover. - * - * The function returns the number of Sentinels removed. */ -int removeMatchingSentinelsFromMaster(sentinelRedisInstance *master, char *ip, int port, char *runid) { + * The function returns 1 if the matching Sentinel was removed, otherwise + * 0 if there was no Sentinel with this ID. */ +int removeMatchingSentinelFromMaster(sentinelRedisInstance *master, char *runid) { dictIterator *di; dictEntry *de; int removed = 0; + if (runid == NULL) return 0; + di = dictGetSafeIterator(master->sentinels); while((de = dictNext(di)) != NULL) { sentinelRedisInstance *ri = dictGetVal(de); - if ((ri->runid && runid && strcmp(ri->runid,runid) == 0) || - (ip && strcmp(ri->addr->ip,ip) == 0 && port == ri->addr->port)) - { + if (ri->runid && strcmp(ri->runid,runid) == 0) { dictDelete(master->sentinels,ri->name); removed++; } @@ -2161,21 +2155,18 @@ void sentinelProcessHelloMessage(char *hello, int hello_len) { if (!si) { /* If not, remove all the sentinels that have the same runid - * OR the same ip/port, because it's either a restart or a - * network topology change. */ - removed = removeMatchingSentinelsFromMaster(master,token[0],port, - token[2]); + * because there was an address change, and add the same Sentinel + * with the new address back. */ + removed = removeMatchingSentinelFromMaster(master,token[2]); if (removed) { - sentinelEvent(REDIS_NOTICE,"-dup-sentinel",master, - "%@ #duplicate of %s:%d or %s", - token[0],port,token[2]); + sentinelEvent(REDIS_NOTICE,"+sentinel-address-switch",master, + "%@ ip %s port %d for %s", token[0],port,token[2]); } /* Add the new sentinel. */ si = createSentinelRedisInstance(NULL,SRI_SENTINEL, token[0],port,master->quorum,master); if (si) { - sentinelEvent(REDIS_NOTICE,"+sentinel",si,"%@"); /* The runid is NULL after a new instance creation and * for Sentinels we don't have a later chance to fill it, * so do it now. */ From b849886a0df86f17d8c2f4be35f503c58dd5d178 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 8 May 2015 17:04:01 +0200 Subject: [PATCH 0204/1928] Sentinel: clarify arguments of SENTINEL IS-MASTER-DOWN-BY-ADDR --- src/sentinel.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/sentinel.c b/src/sentinel.c index 8d4105889..ef04eb6c9 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -2665,7 +2665,23 @@ void sentinelCommand(redisClient *c) { return; addReplyDictOfRedisInstances(c,ri->sentinels); } else if (!strcasecmp(c->argv[1]->ptr,"is-master-down-by-addr")) { - /* SENTINEL IS-MASTER-DOWN-BY-ADDR */ + /* SENTINEL IS-MASTER-DOWN-BY-ADDR + * + * Arguments: + * + * ip and port are the ip and port of the master we want to be + * checked by Sentinel. Note that the command will not check by + * name but just by master, in theory different Sentinels may monitor + * differnet masters with the same name. + * + * current-epoch is needed in order to understand if we are allowed + * to vote for a failover leader or not. Each Senitnel can vote just + * one time per epoch. + * + * runid is "*" if we are not seeking for a vote from the Sentinel + * in order to elect the failover leader. Otherwise it is set to the + * runid we want the Sentinel to vote if it did not already voted. + */ sentinelRedisInstance *ri; long long req_epoch; uint64_t leader_epoch = 0; From b91434cab122f7760b7ae4b5c514eda17e644ac8 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 8 May 2015 17:12:13 +0200 Subject: [PATCH 0205/1928] Sentinel: Use privdata instead of c->data in sentinelReceiveHelloMessages() This way we may later share the hiredis link "c" among the same Sentinel instance referenced multiple times for multiple masters. --- src/sentinel.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/sentinel.c b/src/sentinel.c index ef04eb6c9..9524f42bc 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -1767,7 +1767,7 @@ void sentinelReconnectInstance(sentinelRedisInstance *ri) { sentinelSetClientName(ri,ri->pc,"pubsub"); /* Now we subscribe to the Sentinels "Hello" channel. */ retval = redisAsyncCommand(ri->pc, - sentinelReceiveHelloMessages, NULL, "SUBSCRIBE %s", + sentinelReceiveHelloMessages, ri, "SUBSCRIBE %s", SENTINEL_HELLO_CHANNEL); if (retval != REDIS_OK) { /* If we can't subscribe, the Pub/Sub connection is useless @@ -2219,9 +2219,8 @@ cleanup: /* This is our Pub/Sub callback for the Hello channel. It's useful in order * to discover other sentinels attached at the same master. */ void sentinelReceiveHelloMessages(redisAsyncContext *c, void *reply, void *privdata) { - sentinelRedisInstance *ri = c->data; + sentinelRedisInstance *ri = privdata; redisReply *r; - REDIS_NOTUSED(privdata); if (!reply || !ri) return; r = reply; @@ -3186,9 +3185,8 @@ void sentinelCheckObjectivelyDown(sentinelRedisInstance *master) { /* Receive the SENTINEL is-master-down-by-addr reply, see the * sentinelAskMasterStateToOtherSentinels() function for more information. */ void sentinelReceiveIsMasterDownReply(redisAsyncContext *c, void *reply, void *privdata) { - sentinelRedisInstance *ri = c->data; + sentinelRedisInstance *ri = privdata; redisReply *r; - REDIS_NOTUSED(privdata); if (ri) ri->pending_commands--; if (!reply || !ri) return; @@ -3260,7 +3258,7 @@ void sentinelAskMasterStateToOtherSentinels(sentinelRedisInstance *master, int f /* Ask */ ll2string(port,sizeof(port),master->addr->port); retval = redisAsyncCommand(ri->cc, - sentinelReceiveIsMasterDownReply, NULL, + sentinelReceiveIsMasterDownReply, ri, "SENTINEL is-master-down-by-addr %s %s %llu %s", master->addr->ip, port, sentinel.current_epoch, From 3eca0752a68e6b2185c35ed95b053f7f3562b618 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 8 May 2015 17:15:26 +0200 Subject: [PATCH 0206/1928] Sentinel: generate +sentinel again, removed in prev commit. --- src/sentinel.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/sentinel.c b/src/sentinel.c index 9524f42bc..8e0dd2005 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -2167,6 +2167,7 @@ void sentinelProcessHelloMessage(char *hello, int hello_len) { si = createSentinelRedisInstance(NULL,SRI_SENTINEL, token[0],port,master->quorum,master); if (si) { + if (!removed) sentinelEvent(REDIS_NOTICE,"+sentinel",si,"%@"); /* The runid is NULL after a new instance creation and * for Sentinels we don't have a later chance to fill it, * so do it now. */ From 611283f7438009fe690023673c1fac5d9393034d Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 8 May 2015 17:17:59 +0200 Subject: [PATCH 0207/1928] Sentinel: suppress warnings for not used args. --- src/sentinel.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/sentinel.c b/src/sentinel.c index 8e0dd2005..dfb096d34 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -2222,6 +2222,7 @@ cleanup: void sentinelReceiveHelloMessages(redisAsyncContext *c, void *reply, void *privdata) { sentinelRedisInstance *ri = privdata; redisReply *r; + REDIS_NOTUSED(c); if (!reply || !ri) return; r = reply; @@ -3188,6 +3189,7 @@ void sentinelCheckObjectivelyDown(sentinelRedisInstance *master) { void sentinelReceiveIsMasterDownReply(redisAsyncContext *c, void *reply, void *privdata) { sentinelRedisInstance *ri = privdata; redisReply *r; + REDIS_NOTUSED(c); if (ri) ri->pending_commands--; if (!reply || !ri) return; From 1029276c0d46e643a5740120d44a9cce8ba652b9 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 11 May 2015 13:15:26 +0200 Subject: [PATCH 0208/1928] Sentinel: connection sharing WIP #1 --- src/sentinel.c | 515 ++++++++++++++++++++++++++++--------------------- 1 file changed, 295 insertions(+), 220 deletions(-) diff --git a/src/sentinel.c b/src/sentinel.c index dfb096d34..c28bf6dd3 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -54,19 +54,18 @@ typedef struct sentinelAddr { #define SRI_MASTER (1<<0) #define SRI_SLAVE (1<<1) #define SRI_SENTINEL (1<<2) -#define SRI_DISCONNECTED (1<<3) -#define SRI_S_DOWN (1<<4) /* Subjectively down (no quorum). */ -#define SRI_O_DOWN (1<<5) /* Objectively down (confirmed by others). */ -#define SRI_MASTER_DOWN (1<<6) /* A Sentinel with this flag set thinks that +#define SRI_S_DOWN (1<<3) /* Subjectively down (no quorum). */ +#define SRI_O_DOWN (1<<4) /* Objectively down (confirmed by others). */ +#define SRI_MASTER_DOWN (1<<5) /* A Sentinel with this flag set thinks that its master is down. */ -#define SRI_FAILOVER_IN_PROGRESS (1<<7) /* Failover is in progress for +#define SRI_FAILOVER_IN_PROGRESS (1<<6) /* Failover is in progress for this master. */ -#define SRI_PROMOTED (1<<8) /* Slave selected for promotion. */ -#define SRI_RECONF_SENT (1<<9) /* SLAVEOF sent. */ -#define SRI_RECONF_INPROG (1<<10) /* Slave synchronization in progress. */ -#define SRI_RECONF_DONE (1<<11) /* Slave synchronized with new master. */ -#define SRI_FORCE_FAILOVER (1<<12) /* Force failover with master up. */ -#define SRI_SCRIPT_KILL_SENT (1<<13) /* SCRIPT KILL already sent on -BUSY */ +#define SRI_PROMOTED (1<<7) /* Slave selected for promotion. */ +#define SRI_RECONF_SENT (1<<8) /* SLAVEOF sent. */ +#define SRI_RECONF_INPROG (1<<9) /* Slave synchronization in progress. */ +#define SRI_RECONF_DONE (1<<10) /* Slave synchronized with new master. */ +#define SRI_FORCE_FAILOVER (1<<11) /* Force failover with master up. */ +#define SRI_SCRIPT_KILL_SENT (1<<12) /* SCRIPT KILL already sent on -BUSY */ /* Note: times are in milliseconds. */ #define SENTINEL_INFO_PERIOD 10000 @@ -115,15 +114,26 @@ typedef struct sentinelAddr { #define SENTINEL_SCRIPT_MAX_RETRY 10 #define SENTINEL_SCRIPT_RETRY_DELAY 30000 /* 30 seconds between retries. */ -typedef struct sentinelRedisInstance { - int flags; /* See SRI_... defines */ - char *name; /* Master name from the point of view of this sentinel. */ - char *runid; /* Run ID of this instance, or unique ID if is a Sentinel.*/ - uint64_t config_epoch; /* Configuration epoch. */ - sentinelAddr *addr; /* Master host. */ +/* The link to a sentinelRedisInstance. When we have the same set of Sentinels + * monitoring many masters, we have different instances representing the + * same Sentinels, one per master, and we need to share the hiredis connections + * among them. Oherwise if 5 Senintels are monitoring 100 masters we create + * 500 outgoing connections instead of 5. + * + * So this structure represents a reference counted link in terms of the two + * hiredis connections for commands and Pub/Sub, and the fields needed for + * failure detection, since the ping/pong time are now local to the link: if + * the link is available, the instance is avaialbe. This way we don't just + * have 5 connections instead of 500, we also send 5 pings instead of 500. + * + * Links are shared only for Sentinels: master and slave instances have + * a link with refcount = 1, always. */ +typedef struct instanceLink { + int refcount; /* Number of sentinelRedisInstance owners. */ + int disconnected; /* Non-zero if we need to reconnect cc or pc. */ + int pending_commands; /* Number of commands sent waiting for a reply. */ redisAsyncContext *cc; /* Hiredis context for commands. */ redisAsyncContext *pc; /* Hiredis context for Pub / Sub. */ - int pending_commands; /* Number of commands sent waiting for a reply. */ mstime_t cc_conn_time; /* cc connection time. */ mstime_t pc_conn_time; /* pc connection time. */ mstime_t pc_last_activity; /* Last time we received any message. */ @@ -136,6 +146,15 @@ typedef struct sentinelRedisInstance { mstime_t last_pong_time; /* Last time the instance replied to ping, whatever the reply was. That's used to check if the link is idle and must be reconnected. */ +} instanceLink; + +typedef struct sentinelRedisInstance { + int flags; /* See SRI_... defines */ + char *name; /* Master name from the point of view of this sentinel. */ + char *runid; /* Run ID of this instance, or unique ID if is a Sentinel.*/ + uint64_t config_epoch; /* Configuration epoch. */ + sentinelAddr *addr; /* Master host. */ + instanceLink *link; /* Link to the instance, may be shared for Sentinels. */ mstime_t last_pub_time; /* Last time we sent hello via Pub/Sub. */ mstime_t last_hello_time; /* Only used if SRI_SENTINEL is set. Last time we received a hello from this Sentinel @@ -328,8 +347,7 @@ sentinelRedisInstance *sentinelGetMasterByName(char *name); char *sentinelGetSubjectiveLeader(sentinelRedisInstance *master); char *sentinelGetObjectiveLeader(sentinelRedisInstance *master); int yesnotoi(char *s); -void sentinelDisconnectInstanceFromContext(const redisAsyncContext *c); -void sentinelKillLink(sentinelRedisInstance *ri, redisAsyncContext *c); +void instanceLinkConnectionError(const redisAsyncContext *c); const char *sentinelRedisInstanceTypeStr(sentinelRedisInstance *ri); void sentinelAbortFailover(sentinelRedisInstance *ri); void sentinelEvent(int level, char *type, sentinelRedisInstance *ri, const char *fmt, ...); @@ -889,6 +907,118 @@ void sentinelCallClientReconfScript(sentinelRedisInstance *master, int role, cha state, from->ip, fromport, to->ip, toport, NULL); } +/* =============================== instanceLink ============================= */ + +/* Create a not yet connected link object. */ +instanceLink *createInstanceLink(void) { + instanceLink *link = zmalloc(sizeof(*link)); + + link->refcount = 1; + link->disconnected = 1; + link->pending_commands = 0; + link->cc = NULL; + link->pc = NULL; + link->cc_conn_time = 0; + link->pc_conn_time = 0; + link->pc_last_activity = 0; + /* We set the last_ping_time to "now" even if we actually don't have yet + * a connection with the node, nor we sent a ping. + * This is useful to detect a timeout in case we'll not be able to connect + * with the node at all. */ + link->last_ping_time = mstime(); + link->last_avail_time = mstime(); + link->last_pong_time = mstime(); + return link; +} + +/* Disconnect an hiredis connection in the context of an instance link. */ +void instanceLinkCloseConnection(instanceLink *link, redisAsyncContext *c) { + if (c == NULL) return; + + if (link->cc == c) { + link->cc = NULL; + link->pending_commands = 0; + } + if (link->pc == c) link->pc = NULL; + c->data = NULL; + link->disconnected = 1; + redisAsyncFree(c); +} + +/* Decrement the refcount of a link object, if it drops to zero, actually + * free it and return NULL. Otherwise don't do anything and return the pointer + * to the object. + * + * If we are not going to free the link and ri is not NULL, we rebind all the + * pending requests in link->cc (hiredis connection for commands) to a + * callback that will just ignore them. This is useful to avoid processing + * replies for an instance that no longer exists. */ +instanceLink *releaseInstanceLink(instanceLink *link, sentinelRedisInstance *ri) +{ + redisAssert(link->refcount > 0); + link->refcount--; + if (link->refcount != 0) { + if (ri) { + /* TODO: run the callbacks list and rebind. */ + } + return link; /* Other active users. */ + } + + instanceLinkCloseConnection(link,link->cc); + instanceLinkCloseConnection(link,link->pc); + zfree(link); + return NULL; +} + +/* This function will attempt to share the instance link we already have + * for the same Sentinel in the context of a different master, with the + * instance we are passing as argument. + * + * This way multiple Sentinel objects that refer all to the same physical + * Sentinel instance but in the context of different masters will use + * a single connection, will send a single PING per second for failure + * detection and so forth. */ +void tryConnectionSharing(sentinelRedisInstance *ri) { + redisAssert(ri->flags & SRI_SENTINEL); + + /* TODO: + * 1) Check if there is a match. + * 2) Free our current link. + * 3) Reference the other link and increment its reference count. */ + REDIS_NOTUSED(ri); +} + +/* This function is called when an hiredis connection reported an error. + * We set it to NULL and mark the link as disconnected so that it will be + * reconnected again. + * + * Note: we don't free the hiredis context as hiredis will do it for us + * for async connections. */ +void instanceLinkConnectionError(const redisAsyncContext *c) { + instanceLink *link = c->data; + int pubsub; + + if (!link) return; + + pubsub = (link->pc == c); + if (pubsub) + link->pc = NULL; + else + link->cc = NULL; + link->disconnected = 1; +} + +/* Hiredis connection established / disconnected callbacks. We need them + * just to cleanup our link state. */ +void sentinelLinkEstablishedCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) instanceLinkConnectionError(c); +} + +void sentinelDisconnectCallback(const redisAsyncContext *c, int status) { + REDIS_NOTUSED(status); + instanceLinkConnectionError(c); +} + /* ========================== sentinelRedisInstance ========================= */ /* Create a redis instance, the following fields must be populated by the @@ -911,6 +1041,7 @@ void sentinelCallClientReconfScript(sentinelRedisInstance *master, int role, cha * * The function may also fail and return NULL with errno set to EBUSY if * a master or slave with the same name already exists. */ + sentinelRedisInstance *createSentinelRedisInstance(char *name, int flags, char *hostname, int port, int quorum, sentinelRedisInstance *master) { sentinelRedisInstance *ri; sentinelAddr *addr; @@ -949,24 +1080,12 @@ sentinelRedisInstance *createSentinelRedisInstance(char *name, int flags, char * ri = zmalloc(sizeof(*ri)); /* Note that all the instances are started in the disconnected state, * the event loop will take care of connecting them. */ - ri->flags = flags | SRI_DISCONNECTED; + ri->flags = flags; ri->name = sdsname; ri->runid = NULL; ri->config_epoch = 0; ri->addr = addr; - ri->cc = NULL; - ri->pc = NULL; - ri->pending_commands = 0; - ri->cc_conn_time = 0; - ri->pc_conn_time = 0; - ri->pc_last_activity = 0; - /* We set the last_ping_time to "now" even if we actually don't have yet - * a connection with the node, nor we sent a ping. - * This is useful to detect a timeout in case we'll not be able to connect - * with the node at all. */ - ri->last_ping_time = mstime(); - ri->last_avail_time = mstime(); - ri->last_pong_time = mstime(); + ri->link = createInstanceLink(); ri->last_pub_time = mstime(); ri->last_hello_time = mstime(); ri->last_master_down_reply_time = mstime(); @@ -1022,9 +1141,8 @@ void releaseSentinelRedisInstance(sentinelRedisInstance *ri) { dictRelease(ri->sentinels); dictRelease(ri->slaves); - /* Release hiredis connections. */ - if (ri->cc) sentinelKillLink(ri,ri->cc); - if (ri->pc) sentinelKillLink(ri,ri->pc); + /* Disconnect the instance. */ + releaseInstanceLink(ri->link,ri); /* Free other resources. */ sdsfree(ri->name); @@ -1183,9 +1301,9 @@ void sentinelResetMaster(sentinelRedisInstance *ri, int flags) { dictRelease(ri->sentinels); ri->sentinels = dictCreate(&instancesDictType,NULL); } - if (ri->cc) sentinelKillLink(ri,ri->cc); - if (ri->pc) sentinelKillLink(ri,ri->pc); - ri->flags &= SRI_MASTER|SRI_DISCONNECTED; + instanceLinkCloseConnection(ri->link,ri->link->cc); + instanceLinkCloseConnection(ri->link,ri->link->pc); + ri->flags &= SRI_MASTER; if (ri->leader) { sdsfree(ri->leader); ri->leader = NULL; @@ -1198,9 +1316,9 @@ void sentinelResetMaster(sentinelRedisInstance *ri, int flags) { sdsfree(ri->slave_master_host); ri->runid = NULL; ri->slave_master_host = NULL; - ri->last_ping_time = mstime(); - ri->last_avail_time = mstime(); - ri->last_pong_time = mstime(); + ri->link->last_ping_time = mstime(); + ri->link->last_avail_time = mstime(); + ri->link->last_pong_time = mstime(); ri->role_reported_time = mstime(); ri->role_reported = SRI_MASTER; if (flags & SENTINEL_GENERATE_EVENT) @@ -1445,7 +1563,10 @@ char *sentinelHandleConfiguration(char **argv, int argc) { { return "Wrong hostname or port for sentinel."; } - if (argc == 5) si->runid = sdsnew(argv[4]); + if (argc == 5) { + si->runid = sdsnew(argv[4]); + tryConnectionSharing(si); + } } else if (!strcasecmp(argv[0],"announce-ip") && argc == 2) { /* announce-ip */ if (strlen(argv[1])) @@ -1634,57 +1755,6 @@ werr: /* ====================== hiredis connection handling ======================= */ -/* Completely disconnect a hiredis link from an instance. */ -void sentinelKillLink(sentinelRedisInstance *ri, redisAsyncContext *c) { - if (ri->cc == c) { - ri->cc = NULL; - ri->pending_commands = 0; - } - if (ri->pc == c) ri->pc = NULL; - c->data = NULL; - ri->flags |= SRI_DISCONNECTED; - redisAsyncFree(c); -} - -/* This function takes a hiredis context that is in an error condition - * and make sure to mark the instance as disconnected performing the - * cleanup needed. - * - * Note: we don't free the hiredis context as hiredis will do it for us - * for async connections. */ -void sentinelDisconnectInstanceFromContext(const redisAsyncContext *c) { - sentinelRedisInstance *ri = c->data; - int pubsub; - - if (ri == NULL) return; /* The instance no longer exists. */ - - pubsub = (ri->pc == c); - sentinelEvent(REDIS_DEBUG, pubsub ? "-pubsub-link" : "-cmd-link", ri, - "%@ #%s", c->errstr); - if (pubsub) - ri->pc = NULL; - else - ri->cc = NULL; - ri->flags |= SRI_DISCONNECTED; -} - -void sentinelLinkEstablishedCallback(const redisAsyncContext *c, int status) { - if (status != REDIS_OK) { - sentinelDisconnectInstanceFromContext(c); - } else { - sentinelRedisInstance *ri = c->data; - int pubsub = (ri->pc == c); - - sentinelEvent(REDIS_DEBUG, pubsub ? "+pubsub-link" : "+cmd-link", ri, - "%@"); - } -} - -void sentinelDisconnectCallback(const redisAsyncContext *c, int status) { - REDIS_NOTUSED(status); - sentinelDisconnectInstanceFromContext(c); -} - /* Send the AUTH command with the specified master password if needed. * Note that for slaves the password set for the master is used. * @@ -1696,8 +1766,8 @@ void sentinelSendAuthIfNeeded(sentinelRedisInstance *ri, redisAsyncContext *c) { ri->master->auth_pass; if (auth_pass) { - if (redisAsyncCommand(c, sentinelDiscardReplyCallback, NULL, "AUTH %s", - auth_pass) == REDIS_OK) ri->pending_commands++; + if (redisAsyncCommand(c, sentinelDiscardReplyCallback, ri, "AUTH %s", + auth_pass) == REDIS_OK) ri->link->pending_commands++; } } @@ -1711,76 +1781,77 @@ void sentinelSetClientName(sentinelRedisInstance *ri, redisAsyncContext *c, char char name[64]; snprintf(name,sizeof(name),"sentinel-%.8s-%s",sentinel.myid,type); - if (redisAsyncCommand(c, sentinelDiscardReplyCallback, NULL, + if (redisAsyncCommand(c, sentinelDiscardReplyCallback, ri, "CLIENT SETNAME %s", name) == REDIS_OK) { - ri->pending_commands++; + ri->link->pending_commands++; } } -/* Create the async connections for the specified instance if the instance - * is disconnected. Note that the SRI_DISCONNECTED flag is set even if just +/* Create the async connections for the instance link if the link + * is disconnected. Note that link->disconnected is true even if just * one of the two links (commands and pub/sub) is missing. */ void sentinelReconnectInstance(sentinelRedisInstance *ri) { - if (!(ri->flags & SRI_DISCONNECTED)) return; + if (ri->link->disconnected == 0) return; + instanceLink *link = ri->link; /* Commands connection. */ - if (ri->cc == NULL) { - ri->cc = redisAsyncConnectBind(ri->addr->ip,ri->addr->port,REDIS_BIND_ADDR); - if (ri->cc->err) { + if (link->cc == NULL) { + link->cc = redisAsyncConnectBind(ri->addr->ip,ri->addr->port,REDIS_BIND_ADDR); + if (link->cc->err) { sentinelEvent(REDIS_DEBUG,"-cmd-link-reconnection",ri,"%@ #%s", - ri->cc->errstr); - sentinelKillLink(ri,ri->cc); + link->cc->errstr); + instanceLinkCloseConnection(link,link->cc); } else { - ri->cc_conn_time = mstime(); - ri->cc->data = ri; - redisAeAttach(server.el,ri->cc); - redisAsyncSetConnectCallback(ri->cc, - sentinelLinkEstablishedCallback); - redisAsyncSetDisconnectCallback(ri->cc, - sentinelDisconnectCallback); - sentinelSendAuthIfNeeded(ri,ri->cc); - sentinelSetClientName(ri,ri->cc,"cmd"); + link->cc_conn_time = mstime(); + link->cc->data = link; + redisAeAttach(server.el,link->cc); + redisAsyncSetConnectCallback(link->cc, + sentinelLinkEstablishedCallback); + redisAsyncSetDisconnectCallback(link->cc, + sentinelDisconnectCallback); + sentinelSendAuthIfNeeded(ri,link->cc); + sentinelSetClientName(ri,link->cc,"cmd"); /* Send a PING ASAP when reconnecting. */ sentinelSendPing(ri); } } /* Pub / Sub */ - if ((ri->flags & (SRI_MASTER|SRI_SLAVE)) && ri->pc == NULL) { - ri->pc = redisAsyncConnectBind(ri->addr->ip,ri->addr->port,REDIS_BIND_ADDR); - if (ri->pc->err) { + if ((ri->flags & (SRI_MASTER|SRI_SLAVE)) && link->pc == NULL) { + link->pc = redisAsyncConnectBind(ri->addr->ip,ri->addr->port,REDIS_BIND_ADDR); + if (link->pc->err) { sentinelEvent(REDIS_DEBUG,"-pubsub-link-reconnection",ri,"%@ #%s", - ri->pc->errstr); - sentinelKillLink(ri,ri->pc); + link->pc->errstr); + instanceLinkCloseConnection(link,link->pc); } else { int retval; - ri->pc_conn_time = mstime(); - ri->pc->data = ri; - redisAeAttach(server.el,ri->pc); - redisAsyncSetConnectCallback(ri->pc, - sentinelLinkEstablishedCallback); - redisAsyncSetDisconnectCallback(ri->pc, - sentinelDisconnectCallback); - sentinelSendAuthIfNeeded(ri,ri->pc); - sentinelSetClientName(ri,ri->pc,"pubsub"); + link->pc_conn_time = mstime(); + link->pc->data = link; + redisAeAttach(server.el,link->pc); + redisAsyncSetConnectCallback(link->pc, + sentinelLinkEstablishedCallback); + redisAsyncSetDisconnectCallback(link->pc, + sentinelDisconnectCallback); + sentinelSendAuthIfNeeded(ri,link->pc); + sentinelSetClientName(ri,link->pc,"pubsub"); /* Now we subscribe to the Sentinels "Hello" channel. */ - retval = redisAsyncCommand(ri->pc, + retval = redisAsyncCommand(link->pc, sentinelReceiveHelloMessages, ri, "SUBSCRIBE %s", SENTINEL_HELLO_CHANNEL); if (retval != REDIS_OK) { /* If we can't subscribe, the Pub/Sub connection is useless * and we can simply disconnect it and try again. */ - sentinelKillLink(ri,ri->pc); + instanceLinkCloseConnection(link,link->pc); return; } } } - /* Clear the DISCONNECTED flags only if we have both the connections + /* Clear the disconnected status only if we have both the connections * (or just the commands connection if this is a sentinel instance). */ - if (ri->cc && (ri->flags & SRI_SENTINEL || ri->pc)) - ri->flags &= ~SRI_DISCONNECTED; + if (link->cc && (ri->flags & SRI_SENTINEL || link->pc)) + link->disconnected = 0; } /* ======================== Redis instances pinging ======================== */ @@ -2049,36 +2120,35 @@ void sentinelRefreshInstanceInfo(sentinelRedisInstance *ri, const char *info) { } void sentinelInfoReplyCallback(redisAsyncContext *c, void *reply, void *privdata) { - sentinelRedisInstance *ri = c->data; + sentinelRedisInstance *ri = privdata; + instanceLink *link = c->data; redisReply *r; - REDIS_NOTUSED(privdata); - if (ri) ri->pending_commands--; - if (!reply || !ri) return; + if (!reply || !link) return; + link->pending_commands--; r = reply; - if (r->type == REDIS_REPLY_STRING) { + if (r->type == REDIS_REPLY_STRING) sentinelRefreshInstanceInfo(ri,r->str); - } } /* Just discard the reply. We use this when we are not monitoring the return * value of the command but its effects directly. */ void sentinelDiscardReplyCallback(redisAsyncContext *c, void *reply, void *privdata) { - sentinelRedisInstance *ri = c->data; + instanceLink *link = c->data; REDIS_NOTUSED(reply); REDIS_NOTUSED(privdata); - if (ri) ri->pending_commands--; + if (link) link->pending_commands--; } void sentinelPingReplyCallback(redisAsyncContext *c, void *reply, void *privdata) { - sentinelRedisInstance *ri = c->data; + sentinelRedisInstance *ri = privdata; + instanceLink *link = c->data; redisReply *r; - REDIS_NOTUSED(privdata); - if (ri) ri->pending_commands--; - if (!reply || !ri) return; + if (!reply || !link) return; + link->pending_commands--; r = reply; if (r->type == REDIS_REPLY_STATUS || @@ -2089,8 +2159,8 @@ void sentinelPingReplyCallback(redisAsyncContext *c, void *reply, void *privdata strncmp(r->str,"LOADING",7) == 0 || strncmp(r->str,"MASTERDOWN",10) == 0) { - ri->last_avail_time = mstime(); - ri->last_ping_time = 0; /* Flag the pong as received. */ + link->last_avail_time = mstime(); + link->last_ping_time = 0; /* Flag the pong as received. */ } else { /* Send a SCRIPT KILL command if the instance appears to be * down because of a busy script. */ @@ -2098,26 +2168,26 @@ void sentinelPingReplyCallback(redisAsyncContext *c, void *reply, void *privdata (ri->flags & SRI_S_DOWN) && !(ri->flags & SRI_SCRIPT_KILL_SENT)) { - if (redisAsyncCommand(ri->cc, - sentinelDiscardReplyCallback, NULL, + if (redisAsyncCommand(ri->link->cc, + sentinelDiscardReplyCallback, ri, "SCRIPT KILL") == REDIS_OK) - ri->pending_commands++; + ri->link->pending_commands++; ri->flags |= SRI_SCRIPT_KILL_SENT; } } } - ri->last_pong_time = mstime(); + link->last_pong_time = mstime(); } /* This is called when we get the reply about the PUBLISH command we send * to the master to advertise this sentinel. */ void sentinelPublishReplyCallback(redisAsyncContext *c, void *reply, void *privdata) { - sentinelRedisInstance *ri = c->data; + sentinelRedisInstance *ri = privdata; + instanceLink *link = c->data; redisReply *r; - REDIS_NOTUSED(privdata); - if (ri) ri->pending_commands--; - if (!reply || !ri) return; + if (!reply || !link) return; + link->pending_commands--; r = reply; /* Only update pub_time if we actually published our message. Otherwise @@ -2172,6 +2242,7 @@ void sentinelProcessHelloMessage(char *hello, int hello_len) { * for Sentinels we don't have a later chance to fill it, * so do it now. */ si->runid = sdsnew(token[2]); + tryConnectionSharing(si); sentinelFlushConfig(); } } @@ -2230,7 +2301,7 @@ void sentinelReceiveHelloMessages(redisAsyncContext *c, void *reply, void *privd /* Update the last activity in the pubsub channel. Note that since we * receive our messages as well this timestamp can be used to detect * if the link is probably disconnected even if it seems otherwise. */ - ri->pc_last_activity = mstime(); + ri->link->pc_last_activity = mstime(); /* Sanity check in the reply we expect, so that the code that follows * can avoid to check for details. */ @@ -2267,14 +2338,14 @@ int sentinelSendHello(sentinelRedisInstance *ri) { sentinelRedisInstance *master = (ri->flags & SRI_MASTER) ? ri : ri->master; sentinelAddr *master_addr = sentinelGetCurrentMasterAddress(master); - if (ri->flags & SRI_DISCONNECTED) return REDIS_ERR; + if (ri->link->disconnected) return REDIS_ERR; /* Use the specified announce address if specified, otherwise try to * obtain our own IP address. */ if (sentinel.announce_ip) { announce_ip = sentinel.announce_ip; } else { - if (anetSockName(ri->cc->c.fd,ip,sizeof(ip),NULL) == -1) + if (anetSockName(ri->link->cc->c.fd,ip,sizeof(ip),NULL) == -1) return REDIS_ERR; announce_ip = ip; } @@ -2290,11 +2361,11 @@ int sentinelSendHello(sentinelRedisInstance *ri) { /* --- */ master->name,master_addr->ip,master_addr->port, (unsigned long long) master->config_epoch); - retval = redisAsyncCommand(ri->cc, - sentinelPublishReplyCallback, NULL, "PUBLISH %s %s", + retval = redisAsyncCommand(ri->link->cc, + sentinelPublishReplyCallback, ri, "PUBLISH %s %s", SENTINEL_HELLO_CHANNEL,payload); if (retval != REDIS_OK) return REDIS_ERR; - ri->pending_commands++; + ri->link->pending_commands++; return REDIS_OK; } @@ -2336,14 +2407,14 @@ int sentinelForceHelloUpdateForMaster(sentinelRedisInstance *master) { * On error zero is returned, and we can't consider the PING command * queued in the connection. */ int sentinelSendPing(sentinelRedisInstance *ri) { - int retval = redisAsyncCommand(ri->cc, - sentinelPingReplyCallback, NULL, "PING"); + int retval = redisAsyncCommand(ri->link->cc, + sentinelPingReplyCallback, ri, "PING"); if (retval == REDIS_OK) { - ri->pending_commands++; + ri->link->pending_commands++; /* We update the ping time only if we received the pong for * the previous ping, otherwise we are technically waiting * since the first ping that did not received a reply. */ - if (ri->last_ping_time == 0) ri->last_ping_time = mstime(); + if (ri->link->last_ping_time == 0) ri->link->last_ping_time = mstime(); return 1; } else { return 0; @@ -2359,7 +2430,7 @@ void sentinelSendPeriodicCommands(sentinelRedisInstance *ri) { /* Return ASAP if we have already a PING or INFO already pending, or * in the case the instance is not properly connected. */ - if (ri->flags & SRI_DISCONNECTED) return; + if (ri->link->disconnected) return; /* For INFO, PING, PUBLISH that are not critical commands to send we * also have a limit of SENTINEL_MAX_PENDING_COMMANDS. We don't @@ -2367,7 +2438,8 @@ void sentinelSendPeriodicCommands(sentinelRedisInstance *ri) { * properly (note that anyway there is a redundant protection about this, * that is, the link will be disconnected and reconnected if a long * timeout condition is detected. */ - if (ri->pending_commands >= SENTINEL_MAX_PENDING_COMMANDS) return; + if (ri->link->pending_commands >= + SENTINEL_MAX_PENDING_COMMANDS * ri->link->refcount) return; /* If this is a slave of a master in O_DOWN condition we start sending * it INFO every second, instead of the usual SENTINEL_INFO_PERIOD @@ -2391,10 +2463,10 @@ void sentinelSendPeriodicCommands(sentinelRedisInstance *ri) { (now - ri->info_refresh) > info_period)) { /* Send INFO to masters and slaves, not sentinels. */ - retval = redisAsyncCommand(ri->cc, - sentinelInfoReplyCallback, NULL, "INFO"); - if (retval == REDIS_OK) ri->pending_commands++; - } else if ((now - ri->last_pong_time) > ping_period) { + retval = redisAsyncCommand(ri->link->cc, + sentinelInfoReplyCallback, ri, "INFO"); + if (retval == REDIS_OK) ri->link->pending_commands++; + } else if ((now - ri->link->last_pong_time) > ping_period) { /* Send PING to all the three kinds of instances. */ sentinelSendPing(ri); } else if ((now - ri->last_pub_time) > SENTINEL_PUBLISH_PERIOD) { @@ -2448,7 +2520,7 @@ void addReplySentinelRedisInstance(redisClient *c, sentinelRedisInstance *ri) { if (ri->flags & SRI_MASTER) flags = sdscat(flags,"master,"); if (ri->flags & SRI_SLAVE) flags = sdscat(flags,"slave,"); if (ri->flags & SRI_SENTINEL) flags = sdscat(flags,"sentinel,"); - if (ri->flags & SRI_DISCONNECTED) flags = sdscat(flags,"disconnected,"); + if (ri->link->disconnected) flags = sdscat(flags,"disconnected,"); if (ri->flags & SRI_MASTER_DOWN) flags = sdscat(flags,"master_down,"); if (ri->flags & SRI_FAILOVER_IN_PROGRESS) flags = sdscat(flags,"failover_in_progress,"); @@ -2462,8 +2534,8 @@ void addReplySentinelRedisInstance(redisClient *c, sentinelRedisInstance *ri) { sdsfree(flags); fields++; - addReplyBulkCString(c,"pending-commands"); - addReplyBulkLongLong(c,ri->pending_commands); + addReplyBulkCString(c,"link-pending-commands"); + addReplyBulkLongLong(c,ri->link->pending_commands); fields++; if (ri->flags & SRI_FAILOVER_IN_PROGRESS) { @@ -2474,15 +2546,15 @@ void addReplySentinelRedisInstance(redisClient *c, sentinelRedisInstance *ri) { addReplyBulkCString(c,"last-ping-sent"); addReplyBulkLongLong(c, - ri->last_ping_time ? (mstime() - ri->last_ping_time) : 0); + ri->link->last_ping_time ? (mstime() - ri->link->last_ping_time) : 0); fields++; addReplyBulkCString(c,"last-ok-ping-reply"); - addReplyBulkLongLong(c,mstime() - ri->last_avail_time); + addReplyBulkLongLong(c,mstime() - ri->link->last_avail_time); fields++; addReplyBulkCString(c,"last-ping-reply"); - addReplyBulkLongLong(c,mstime() - ri->last_pong_time); + addReplyBulkLongLong(c,mstime() - ri->link->last_pong_time); fields++; if (ri->flags & SRI_S_DOWN) { @@ -2676,7 +2748,7 @@ void sentinelCommand(redisClient *c) { * differnet masters with the same name. * * current-epoch is needed in order to understand if we are allowed - * to vote for a failover leader or not. Each Senitnel can vote just + * to vote for a failover leader or not. Each Sentinel can vote just * one time per epoch. * * runid is "*" if we are not seeking for a vote from the Sentinel @@ -3084,8 +3156,8 @@ void sentinelPublishCommand(redisClient *c) { void sentinelCheckSubjectivelyDown(sentinelRedisInstance *ri) { mstime_t elapsed = 0; - if (ri->last_ping_time) - elapsed = mstime() - ri->last_ping_time; + if (ri->link->last_ping_time) + elapsed = mstime() - ri->link->last_ping_time; /* Check if we are in need for a reconnection of one of the * links, because we are detecting low activity. @@ -3093,15 +3165,16 @@ void sentinelCheckSubjectivelyDown(sentinelRedisInstance *ri) { * 1) Check if the command link seems connected, was connected not less * than SENTINEL_MIN_LINK_RECONNECT_PERIOD, but still we have a * pending ping for more than half the timeout. */ - if (ri->cc && - (mstime() - ri->cc_conn_time) > SENTINEL_MIN_LINK_RECONNECT_PERIOD && - ri->last_ping_time != 0 && /* Ther is a pending ping... */ + if (ri->link->cc && + (mstime() - ri->link->cc_conn_time) > + SENTINEL_MIN_LINK_RECONNECT_PERIOD && + ri->link->last_ping_time != 0 && /* Ther is a pending ping... */ /* The pending ping is delayed, and we did not received * error replies as well. */ - (mstime() - ri->last_ping_time) > (ri->down_after_period/2) && - (mstime() - ri->last_pong_time) > (ri->down_after_period/2)) + (mstime() - ri->link->last_ping_time) > (ri->down_after_period/2) && + (mstime() - ri->link->last_pong_time) > (ri->down_after_period/2)) { - sentinelKillLink(ri,ri->cc); + instanceLinkCloseConnection(ri->link,ri->link->cc); } /* 2) Check if the pubsub link seems connected, was connected not less @@ -3109,11 +3182,12 @@ void sentinelCheckSubjectivelyDown(sentinelRedisInstance *ri) { * activity in the Pub/Sub channel for more than * SENTINEL_PUBLISH_PERIOD * 3. */ - if (ri->pc && - (mstime() - ri->pc_conn_time) > SENTINEL_MIN_LINK_RECONNECT_PERIOD && - (mstime() - ri->pc_last_activity) > (SENTINEL_PUBLISH_PERIOD*3)) + if (ri->link->pc && + (mstime() - ri->link->pc_conn_time) > + SENTINEL_MIN_LINK_RECONNECT_PERIOD && + (mstime() - ri->link->pc_last_activity) > (SENTINEL_PUBLISH_PERIOD*3)) { - sentinelKillLink(ri,ri->pc); + instanceLinkCloseConnection(ri->link,ri->link->pc); } /* Update the SDOWN flag. We believe the instance is SDOWN if: @@ -3188,11 +3262,11 @@ void sentinelCheckObjectivelyDown(sentinelRedisInstance *master) { * sentinelAskMasterStateToOtherSentinels() function for more information. */ void sentinelReceiveIsMasterDownReply(redisAsyncContext *c, void *reply, void *privdata) { sentinelRedisInstance *ri = privdata; + instanceLink *link = c->data; redisReply *r; - REDIS_NOTUSED(c); - if (ri) ri->pending_commands--; - if (!reply || !ri) return; + if (!reply || !link) return; + link->pending_commands--; r = reply; /* Ignore every error or unexpected reply. @@ -3253,21 +3327,21 @@ void sentinelAskMasterStateToOtherSentinels(sentinelRedisInstance *master, int f * 2) Sentinel is connected. * 3) We did not received the info within SENTINEL_ASK_PERIOD ms. */ if ((master->flags & SRI_S_DOWN) == 0) continue; - if (ri->flags & SRI_DISCONNECTED) continue; + if (ri->link->disconnected) continue; if (!(flags & SENTINEL_ASK_FORCED) && mstime() - ri->last_master_down_reply_time < SENTINEL_ASK_PERIOD) continue; /* Ask */ ll2string(port,sizeof(port),master->addr->port); - retval = redisAsyncCommand(ri->cc, + retval = redisAsyncCommand(ri->link->cc, sentinelReceiveIsMasterDownReply, ri, "SENTINEL is-master-down-by-addr %s %s %llu %s", master->addr->ip, port, sentinel.current_epoch, (master->failover_state > SENTINEL_FAILOVER_STATE_NONE) ? sentinel.myid : "*"); - if (retval == REDIS_OK) ri->pending_commands++; + if (retval == REDIS_OK) ri->link->pending_commands++; } dictReleaseIterator(di); } @@ -3433,35 +3507,35 @@ int sentinelSendSlaveOf(sentinelRedisInstance *ri, char *host, int port) { * * Note that we don't check the replies returned by commands, since we * will observe instead the effects in the next INFO output. */ - retval = redisAsyncCommand(ri->cc, - sentinelDiscardReplyCallback, NULL, "MULTI"); + retval = redisAsyncCommand(ri->link->cc, + sentinelDiscardReplyCallback, ri, "MULTI"); if (retval == REDIS_ERR) return retval; - ri->pending_commands++; + ri->link->pending_commands++; - retval = redisAsyncCommand(ri->cc, - sentinelDiscardReplyCallback, NULL, "SLAVEOF %s %s", host, portstr); + retval = redisAsyncCommand(ri->link->cc, + sentinelDiscardReplyCallback, ri, "SLAVEOF %s %s", host, portstr); if (retval == REDIS_ERR) return retval; - ri->pending_commands++; + ri->link->pending_commands++; - retval = redisAsyncCommand(ri->cc, - sentinelDiscardReplyCallback, NULL, "CONFIG REWRITE"); + retval = redisAsyncCommand(ri->link->cc, + sentinelDiscardReplyCallback, ri, "CONFIG REWRITE"); if (retval == REDIS_ERR) return retval; - ri->pending_commands++; + ri->link->pending_commands++; /* CLIENT KILL TYPE is only supported starting from Redis 2.8.12, * however sending it to an instance not understanding this command is not * an issue because CLIENT is variadic command, so Redis will not * recognized as a syntax error, and the transaction will not fail (but * only the unsupported command will fail). */ - retval = redisAsyncCommand(ri->cc, - sentinelDiscardReplyCallback, NULL, "CLIENT KILL TYPE normal"); + retval = redisAsyncCommand(ri->link->cc, + sentinelDiscardReplyCallback, ri, "CLIENT KILL TYPE normal"); if (retval == REDIS_ERR) return retval; - ri->pending_commands++; + ri->link->pending_commands++; - retval = redisAsyncCommand(ri->cc, - sentinelDiscardReplyCallback, NULL, "EXEC"); + retval = redisAsyncCommand(ri->link->cc, + sentinelDiscardReplyCallback, ri, "EXEC"); if (retval == REDIS_ERR) return retval; - ri->pending_commands++; + ri->link->pending_commands++; return REDIS_OK; } @@ -3599,8 +3673,9 @@ sentinelRedisInstance *sentinelSelectSlave(sentinelRedisInstance *master) { sentinelRedisInstance *slave = dictGetVal(de); mstime_t info_validity_time; - if (slave->flags & (SRI_S_DOWN|SRI_O_DOWN|SRI_DISCONNECTED)) continue; - if (mstime() - slave->last_avail_time > SENTINEL_PING_PERIOD*5) continue; + if (slave->flags & (SRI_S_DOWN|SRI_O_DOWN)) continue; + if (slave->link->disconnected) continue; + if (mstime() - slave->link->last_avail_time > SENTINEL_PING_PERIOD*5) continue; if (slave->slave_priority == 0) continue; /* If the master is in SDOWN state we get INFO for slaves every second. @@ -3681,7 +3756,7 @@ void sentinelFailoverSendSlaveOfNoOne(sentinelRedisInstance *ri) { /* We can't send the command to the promoted slave if it is now * disconnected. Retry again and again with this state until the timeout * is reached, then abort the failover. */ - if (ri->promoted_slave->flags & SRI_DISCONNECTED) { + if (ri->link->disconnected) { if (mstime() - ri->failover_state_change_time > ri->failover_timeout) { sentinelEvent(REDIS_WARNING,"-failover-abort-slave-timeout",ri,"%@"); sentinelAbortFailover(ri); @@ -3760,8 +3835,8 @@ void sentinelFailoverDetectEnd(sentinelRedisInstance *master) { sentinelRedisInstance *slave = dictGetVal(de); int retval; - if (slave->flags & - (SRI_RECONF_DONE|SRI_RECONF_SENT|SRI_DISCONNECTED)) continue; + if (slave->flags & (SRI_RECONF_DONE|SRI_RECONF_SENT)) continue; + if (slave->link->disconnected) continue; retval = sentinelSendSlaveOf(slave, master->promoted_slave->addr->ip, @@ -3816,8 +3891,8 @@ void sentinelFailoverReconfNextSlave(sentinelRedisInstance *master) { /* Nothing to do for instances that are disconnected or already * in RECONF_SENT state. */ - if (slave->flags & (SRI_DISCONNECTED|SRI_RECONF_SENT|SRI_RECONF_INPROG)) - continue; + if (slave->flags & (SRI_RECONF_SENT|SRI_RECONF_INPROG)) continue; + if (slave->link->disconnected) continue; /* Send SLAVEOF . */ retval = sentinelSendSlaveOf(slave, From 4e8ccbe7eac053011a281760e570ba29c4dfb905 Mon Sep 17 00:00:00 2001 From: therealbill Date: Mon, 11 May 2015 14:08:57 -0500 Subject: [PATCH 0209/1928] adding a sentinel command: "flushconfig" This new command triggers a config flush to save the in-memory config to disk. This is useful for cases of a configuration management system or a package manager wiping out your sentinel config while the process is still running - and has not yet been restarted. It can also be useful for scripting a backup and migrate or clone of a running sentinel. --- src/sentinel.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/sentinel.c b/src/sentinel.c index 0df7989a3..1abbd3ae5 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -2775,6 +2775,10 @@ void sentinelCommand(redisClient *c) { sentinelEvent(REDIS_WARNING,"+monitor",ri,"%@ quorum %d",ri->quorum); addReply(c,shared.ok); } + } else if (!strcasecmp(c->argv[1]->ptr,"flushconfig")) { + sentinelFlushConfig(); + addReply(c,shared.ok); + return; } else if (!strcasecmp(c->argv[1]->ptr,"remove")) { /* SENTINEL REMOVE */ sentinelRedisInstance *ri; From d6e1347869d22f19746b004551d43059b375868e Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 11 May 2015 23:49:19 +0200 Subject: [PATCH 0210/1928] Sentinel: add link refcount to instance description --- src/sentinel.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/sentinel.c b/src/sentinel.c index c28bf6dd3..fc1bdf82a 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -2538,6 +2538,10 @@ void addReplySentinelRedisInstance(redisClient *c, sentinelRedisInstance *ri) { addReplyBulkLongLong(c,ri->link->pending_commands); fields++; + addReplyBulkCString(c,"link-refcount"); + addReplyBulkLongLong(c,ri->link->refcount); + fields++; + if (ri->flags & SRI_FAILOVER_IN_PROGRESS) { addReplyBulkCString(c,"failover-state"); addReplyBulkCString(c,(char*)sentinelFailoverStateStr(ri->failover_state)); From 6b953a268186a37084d07046267dcbf263d61372 Mon Sep 17 00:00:00 2001 From: Jungtaek Lim Date: Tue, 12 May 2015 10:04:52 +0900 Subject: [PATCH 0211/1928] protocol error log should be seen debug/verbose level --- src/networking.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/networking.c b/src/networking.c index 19032ae01..260dcda5e 100644 --- a/src/networking.c +++ b/src/networking.c @@ -983,7 +983,7 @@ int processInlineBuffer(redisClient *c) { /* Helper function. Trims query buffer to make the function that processes * multi bulk requests idempotent. */ static void setProtocolError(redisClient *c, int pos) { - if (server.verbosity >= REDIS_VERBOSE) { + if (server.verbosity <= REDIS_VERBOSE) { sds client = catClientInfoString(sdsempty(),c); redisLog(REDIS_VERBOSE, "Protocol error from client: %s", client); From e0a5246f06951f76bc6bbfaa9900b8103f093dea Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 12 May 2015 12:12:25 +0200 Subject: [PATCH 0212/1928] Sentinel: add sentinelGetInstanceTypeString() fuction This is useful for debugging and logging activities: given a sentinelRedisInstance object returns a C string representing the instance type: master, slave, sentinel. --- src/sentinel.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/sentinel.c b/src/sentinel.c index fc1bdf82a..11eb73d08 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -1458,6 +1458,13 @@ void sentinelPropagateDownAfterPeriod(sentinelRedisInstance *master) { } } +char *sentinelGetInstanceTypeString(sentinelRedisInstance *ri) { + if (ri->flags & SRI_MASTER) return "master"; + else if (ri->flags & SRI_SLAVE) return "slave"; + else if (ri->flags & SRI_SENTINEL) return "sentinel"; + else return "unknown"; +} + /* ============================ Config handling ============================= */ char *sentinelHandleConfiguration(char **argv, int argc) { sentinelRedisInstance *ri; From 9d5e2ed3922dd6f424a17c1d6712e672d4ee5fa0 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 12 May 2015 17:03:00 +0200 Subject: [PATCH 0213/1928] Sentinel: same-Sentinel link sharing across masters --- src/sentinel.c | 43 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 9 deletions(-) diff --git a/src/sentinel.c b/src/sentinel.c index 11eb73d08..916ab994c 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -361,6 +361,7 @@ void sentinelFlushConfig(void); void sentinelGenerateInitialMonitorEvents(void); int sentinelSendPing(sentinelRedisInstance *ri); int sentinelForceHelloUpdateForMaster(sentinelRedisInstance *master); +sentinelRedisInstance *getSentinelRedisInstanceByAddrAndRunID(dict *instances, char *ip, int port, char *runid); /* ========================= Dictionary types =============================== */ @@ -977,15 +978,39 @@ instanceLink *releaseInstanceLink(instanceLink *link, sentinelRedisInstance *ri) * This way multiple Sentinel objects that refer all to the same physical * Sentinel instance but in the context of different masters will use * a single connection, will send a single PING per second for failure - * detection and so forth. */ -void tryConnectionSharing(sentinelRedisInstance *ri) { + * detection and so forth. + * + * Return REDIS_OK if a matching Sentinel was found in the context of a + * different master and sharing was performed. Otherwise REDIS_ERR + * is returned. */ +int sentinelTryConnectionSharing(sentinelRedisInstance *ri) { redisAssert(ri->flags & SRI_SENTINEL); + dictIterator *di; + dictEntry *de; - /* TODO: - * 1) Check if there is a match. - * 2) Free our current link. - * 3) Reference the other link and increment its reference count. */ - REDIS_NOTUSED(ri); + if (ri->runid == NULL) return REDIS_ERR; /* No way to identify it. */ + if (ri->link->refcount > 1) return REDIS_ERR; /* Already shared. */ + + di = dictGetIterator(sentinel.masters); + while((de = dictNext(di)) != NULL) { + sentinelRedisInstance *master = dictGetVal(de), *match; + /* We want to share with the same physical Senitnel referenced + * in other masters, so skip our master. */ + if (master == ri->master) continue; + match = getSentinelRedisInstanceByAddrAndRunID(master->sentinels, + NULL,0,ri->runid); + if (match == ri) continue; /* Should never happen but... safer. */ + + /* We identified a matching Sentinel, great! Let's free our link + * and use the one of the matching Sentinel. */ + releaseInstanceLink(ri->link,NULL); + ri->link = match->link; + match->link->refcount++; + printf("SHARED!\n"); + return REDIS_OK; + } + dictReleaseIterator(di); + return REDIS_ERR; } /* This function is called when an hiredis connection reported an error. @@ -1572,7 +1597,7 @@ char *sentinelHandleConfiguration(char **argv, int argc) { } if (argc == 5) { si->runid = sdsnew(argv[4]); - tryConnectionSharing(si); + sentinelTryConnectionSharing(si); } } else if (!strcasecmp(argv[0],"announce-ip") && argc == 2) { /* announce-ip */ @@ -2249,7 +2274,7 @@ void sentinelProcessHelloMessage(char *hello, int hello_len) { * for Sentinels we don't have a later chance to fill it, * so do it now. */ si->runid = sdsnew(token[2]); - tryConnectionSharing(si); + sentinelTryConnectionSharing(si); sentinelFlushConfig(); } } From 0eb0b55ff0840c52527ff65f0fbcac84a6e0e231 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 12 May 2015 17:03:53 +0200 Subject: [PATCH 0214/1928] Sentinel: PING trigger improved It's ok to ping as soon as the ping period has elapsed since we received the last PONG, but it's not good that we ping again if there is a pending ping... With this change we'll send a new ping if there is one pending only if two times the ping period elapsed since the ping which is still pending was sent. --- src/sentinel.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/sentinel.c b/src/sentinel.c index 916ab994c..40ffcce83 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -2498,7 +2498,10 @@ void sentinelSendPeriodicCommands(sentinelRedisInstance *ri) { retval = redisAsyncCommand(ri->link->cc, sentinelInfoReplyCallback, ri, "INFO"); if (retval == REDIS_OK) ri->link->pending_commands++; - } else if ((now - ri->link->last_pong_time) > ping_period) { + } else if ((now - ri->link->last_pong_time) > ping_period && + (ri->link->last_ping_time == 0 || + now - ri->link->last_ping_time > ping_period*2)) + { /* Send PING to all the three kinds of instances. */ sentinelSendPing(ri); } else if ((now - ri->last_pub_time) > SENTINEL_PUBLISH_PERIOD) { From 3ab49895b4533fac367dec7dc6be48036067f31a Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 13 May 2015 14:23:57 +0200 Subject: [PATCH 0215/1928] Sentinel: limit reconnection frequency to the ping period --- src/sentinel.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/sentinel.c b/src/sentinel.c index 40ffcce83..973f35556 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -146,6 +146,8 @@ typedef struct instanceLink { mstime_t last_pong_time; /* Last time the instance replied to ping, whatever the reply was. That's used to check if the link is idle and must be reconnected. */ + mstime_t last_reconn_time; /* Last reconnection attempt performed when + the link was down. */ } instanceLink; typedef struct sentinelRedisInstance { @@ -921,6 +923,7 @@ instanceLink *createInstanceLink(void) { link->pc = NULL; link->cc_conn_time = 0; link->pc_conn_time = 0; + link->last_reconn_time = 0; link->pc_last_activity = 0; /* We set the last_ping_time to "now" even if we actually don't have yet * a connection with the node, nor we sent a ping. @@ -1826,6 +1829,10 @@ void sentinelSetClientName(sentinelRedisInstance *ri, redisAsyncContext *c, char void sentinelReconnectInstance(sentinelRedisInstance *ri) { if (ri->link->disconnected == 0) return; instanceLink *link = ri->link; + mstime_t now = mstime(); + + if (now - ri->link->last_reconn_time < SENTINEL_PING_PERIOD) return; + ri->link->last_reconn_time = now; /* Commands connection. */ if (link->cc == NULL) { From 58d2bb951a3ad85b312e9e009a66eb1397e38780 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 14 May 2015 09:56:23 +0200 Subject: [PATCH 0216/1928] Sentinel: use active/last time for ping logic The PING trigger was improved again by using two fields instead of a single one to remember when the last ping was sent: 1. The "active" ping is the time at which we sent the last ping that still received no reply. However we continue to ping non replying instances even if they have an old active ping: the link may be disconnected and reconencted in the meantime so the older pings may get lost even if it's a TCP socket. 2. The "last" ping is the time at which we really sent the last ping on the wire, and this is used in order to throttle the amount of pings we send during failures (when no pong is received). All in all the failure detector effectiveness should be identical but we avoid to flood instances with pings during failures or when they are slow. --- src/sentinel.c | 58 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 37 insertions(+), 21 deletions(-) diff --git a/src/sentinel.c b/src/sentinel.c index 973f35556..bc5935b02 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -139,10 +139,15 @@ typedef struct instanceLink { mstime_t pc_last_activity; /* Last time we received any message. */ mstime_t last_avail_time; /* Last time the instance replied to ping with a reply we consider valid. */ - mstime_t last_ping_time; /* Last time a pending ping was sent in the - context of the current command connection - with the instance. 0 if still not sent or - if pong already received. */ + mstime_t act_ping_time; /* Time at which the last pending ping (no pong + received after it) was sent. This field is + set to 0 when a pong is received, and set again + to the current time if the value is 0 and a new + ping is sent. */ + mstime_t last_ping_time; /* Time at which we sent the last ping. This is + only used to avoid sending too many pings + during failure. Idle time is computed using + the act_ping_time field. */ mstime_t last_pong_time; /* Last time the instance replied to ping, whatever the reply was. That's used to check if the link is idle and must be reconnected. */ @@ -925,11 +930,12 @@ instanceLink *createInstanceLink(void) { link->pc_conn_time = 0; link->last_reconn_time = 0; link->pc_last_activity = 0; - /* We set the last_ping_time to "now" even if we actually don't have yet + /* We set the act_ping_time to "now" even if we actually don't have yet * a connection with the node, nor we sent a ping. * This is useful to detect a timeout in case we'll not be able to connect * with the node at all. */ - link->last_ping_time = mstime(); + link->act_ping_time = mstime(); + link->last_ping_time = 0; link->last_avail_time = mstime(); link->last_pong_time = mstime(); return link; @@ -1344,7 +1350,8 @@ void sentinelResetMaster(sentinelRedisInstance *ri, int flags) { sdsfree(ri->slave_master_host); ri->runid = NULL; ri->slave_master_host = NULL; - ri->link->last_ping_time = mstime(); + ri->link->act_ping_time = mstime(); + ri->link->last_ping_time = 0; ri->link->last_avail_time = mstime(); ri->link->last_pong_time = mstime(); ri->role_reported_time = mstime(); @@ -2199,7 +2206,7 @@ void sentinelPingReplyCallback(redisAsyncContext *c, void *reply, void *privdata strncmp(r->str,"MASTERDOWN",10) == 0) { link->last_avail_time = mstime(); - link->last_ping_time = 0; /* Flag the pong as received. */ + link->act_ping_time = 0; /* Flag the pong as received. */ } else { /* Send a SCRIPT KILL command if the instance appears to be * down because of a busy script. */ @@ -2440,20 +2447,31 @@ int sentinelForceHelloUpdateForMaster(sentinelRedisInstance *master) { return REDIS_OK; } -/* Send a PING to the specified instance and refresh the last_ping_time +/* Send a PING to the specified instance and refresh the act_ping_time * if it is zero (that is, if we received a pong for the previous ping). * * On error zero is returned, and we can't consider the PING command * queued in the connection. */ int sentinelSendPing(sentinelRedisInstance *ri) { + static unsigned long long counters[256]; + static time_t last; + // printf("(%lld) PING %s\n", mstime(), sentinelGetInstanceTypeString(ri)); + counters[ri->flags & (SRI_SLAVE|SRI_MASTER|SRI_SENTINEL)]++; + if (time(NULL)-last >= 5) { + printf("slave: %llu master: %llu sentinel: %llu\n", + counters[SRI_SLAVE], counters[SRI_MASTER], counters[SRI_SENTINEL]); + last = time(NULL); + } int retval = redisAsyncCommand(ri->link->cc, sentinelPingReplyCallback, ri, "PING"); if (retval == REDIS_OK) { ri->link->pending_commands++; - /* We update the ping time only if we received the pong for - * the previous ping, otherwise we are technically waiting - * since the first ping that did not received a reply. */ - if (ri->link->last_ping_time == 0) ri->link->last_ping_time = mstime(); + ri->link->last_ping_time = mstime(); + /* We update the active ping time only if we received the pong for + * the previous ping, otherwise we are technically waiting since the + * first ping that did not received a reply. */ + if (ri->link->act_ping_time == 0) + ri->link->act_ping_time = ri->link->last_ping_time; return 1; } else { return 0; @@ -2506,9 +2524,7 @@ void sentinelSendPeriodicCommands(sentinelRedisInstance *ri) { sentinelInfoReplyCallback, ri, "INFO"); if (retval == REDIS_OK) ri->link->pending_commands++; } else if ((now - ri->link->last_pong_time) > ping_period && - (ri->link->last_ping_time == 0 || - now - ri->link->last_ping_time > ping_period*2)) - { + (now - ri->link->last_ping_time) > ping_period/2) { /* Send PING to all the three kinds of instances. */ sentinelSendPing(ri); } else if ((now - ri->last_pub_time) > SENTINEL_PUBLISH_PERIOD) { @@ -2592,7 +2608,7 @@ void addReplySentinelRedisInstance(redisClient *c, sentinelRedisInstance *ri) { addReplyBulkCString(c,"last-ping-sent"); addReplyBulkLongLong(c, - ri->link->last_ping_time ? (mstime() - ri->link->last_ping_time) : 0); + ri->link->act_ping_time ? (mstime() - ri->link->act_ping_time) : 0); fields++; addReplyBulkCString(c,"last-ok-ping-reply"); @@ -3202,8 +3218,8 @@ void sentinelPublishCommand(redisClient *c) { void sentinelCheckSubjectivelyDown(sentinelRedisInstance *ri) { mstime_t elapsed = 0; - if (ri->link->last_ping_time) - elapsed = mstime() - ri->link->last_ping_time; + if (ri->link->act_ping_time) + elapsed = mstime() - ri->link->act_ping_time; /* Check if we are in need for a reconnection of one of the * links, because we are detecting low activity. @@ -3214,10 +3230,10 @@ void sentinelCheckSubjectivelyDown(sentinelRedisInstance *ri) { if (ri->link->cc && (mstime() - ri->link->cc_conn_time) > SENTINEL_MIN_LINK_RECONNECT_PERIOD && - ri->link->last_ping_time != 0 && /* Ther is a pending ping... */ + ri->link->act_ping_time != 0 && /* Ther is a pending ping... */ /* The pending ping is delayed, and we did not received * error replies as well. */ - (mstime() - ri->link->last_ping_time) > (ri->down_after_period/2) && + (mstime() - ri->link->act_ping_time) > (ri->down_after_period/2) && (mstime() - ri->link->last_pong_time) > (ri->down_after_period/2)) { instanceLinkCloseConnection(ri->link,ri->link->cc); From 05dbc820051daab748761ec67c9c1bba37f2717e Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 14 May 2015 10:52:32 +0200 Subject: [PATCH 0217/1928] Sentinel: debugging code removed from sentinelSendPing() --- src/sentinel.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/sentinel.c b/src/sentinel.c index bc5935b02..57d5fd27d 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -2453,15 +2453,6 @@ int sentinelForceHelloUpdateForMaster(sentinelRedisInstance *master) { * On error zero is returned, and we can't consider the PING command * queued in the connection. */ int sentinelSendPing(sentinelRedisInstance *ri) { - static unsigned long long counters[256]; - static time_t last; - // printf("(%lld) PING %s\n", mstime(), sentinelGetInstanceTypeString(ri)); - counters[ri->flags & (SRI_SLAVE|SRI_MASTER|SRI_SENTINEL)]++; - if (time(NULL)-last >= 5) { - printf("slave: %llu master: %llu sentinel: %llu\n", - counters[SRI_SLAVE], counters[SRI_MASTER], counters[SRI_SENTINEL]); - last = time(NULL); - } int retval = redisAsyncCommand(ri->link->cc, sentinelPingReplyCallback, ri, "PING"); if (retval == REDIS_OK) { From 5a0516b5b96b7bbd16c1942b281c798f3db03630 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 14 May 2015 13:39:26 +0200 Subject: [PATCH 0218/1928] Sentinel: rewrite callback chain removing instances with shared links Otherwise pending commands callbacks will fire with a reference that no longer exists. --- src/sentinel.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/sentinel.c b/src/sentinel.c index 57d5fd27d..0d33d8219 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -969,7 +969,23 @@ instanceLink *releaseInstanceLink(instanceLink *link, sentinelRedisInstance *ri) link->refcount--; if (link->refcount != 0) { if (ri) { - /* TODO: run the callbacks list and rebind. */ + /* This instance may have pending callbacks in the hiredis async + * context, having as 'privdata' the instance that we are going to + * free. Let's rewrite the callback list, directly exploiting + * hiredis internal data structures, in order to bind them with + * a callback that will ignore the reply at all. */ + redisCallback *cb; + redisCallbackList *callbacks = &link->cc->replies; + + cb = callbacks->head; + while(cb) { + if (cb->privdata == ri) { + printf("HERE\n"); + cb->fn = sentinelDiscardReplyCallback; + cb->privdata = NULL; /* Not strictly needed. */ + } + cb = cb->next; + } } return link; /* Other active users. */ } From 87b6013adb750b1f5ba161202876a84ffac45d3a Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 14 May 2015 13:40:23 +0200 Subject: [PATCH 0219/1928] Sentinel: remove SHARED! debugging printf --- src/sentinel.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/sentinel.c b/src/sentinel.c index 0d33d8219..b63781d2d 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -1031,7 +1031,6 @@ int sentinelTryConnectionSharing(sentinelRedisInstance *ri) { releaseInstanceLink(ri->link,NULL); ri->link = match->link; match->link->refcount++; - printf("SHARED!\n"); return REDIS_OK; } dictReleaseIterator(di); From b44c37482c6d944e54765318e38f63d92f36c09b Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 14 May 2015 14:08:19 +0200 Subject: [PATCH 0220/1928] Sentinel: fix access to NULL link->cc in releaseInstanceLink() --- src/sentinel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sentinel.c b/src/sentinel.c index b63781d2d..cd7f23229 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -968,7 +968,7 @@ instanceLink *releaseInstanceLink(instanceLink *link, sentinelRedisInstance *ri) redisAssert(link->refcount > 0); link->refcount--; if (link->refcount != 0) { - if (ri) { + if (ri && ri->link->cc) { /* This instance may have pending callbacks in the hiredis async * context, having as 'privdata' the instance that we are going to * free. Let's rewrite the callback list, directly exploiting From f9e942d4ae310a9416d0f91fa0e32c799009b8bc Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 14 May 2015 14:12:45 +0200 Subject: [PATCH 0221/1928] Sentinel: remove debugging message from releaseInstanceLink() --- src/sentinel.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/sentinel.c b/src/sentinel.c index cd7f23229..12b82baa9 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -980,7 +980,6 @@ instanceLink *releaseInstanceLink(instanceLink *link, sentinelRedisInstance *ri) cb = callbacks->head; while(cb) { if (cb->privdata == ri) { - printf("HERE\n"); cb->fn = sentinelDiscardReplyCallback; cb->privdata = NULL; /* Not strictly needed. */ } From 4dee18cb66fe37190c619232d61f4bea43d6dfb0 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 14 May 2015 17:44:52 +0200 Subject: [PATCH 0222/1928] Sentinel: config-rewrite unique ID just one time --- src/sentinel.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/sentinel.c b/src/sentinel.c index 12b82baa9..4050b4609 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -1646,16 +1646,16 @@ void rewriteConfigSentinelOption(struct rewriteConfigState *state) { dictEntry *de; sds line; + /* sentinel unique ID. */ + line = sdscatprintf(sdsempty(), "sentinel myid %s", sentinel.myid); + rewriteConfigRewriteLine(state,"sentinel",line,1); + /* For every master emit a "sentinel monitor" config entry. */ di = dictGetIterator(sentinel.masters); while((de = dictNext(di)) != NULL) { sentinelRedisInstance *master, *ri; sentinelAddr *master_addr; - /* sentinel unique ID. */ - line = sdscatprintf(sdsempty(), "sentinel myid %s", sentinel.myid); - rewriteConfigRewriteLine(state,"sentinel",line,1); - /* sentinel monitor */ master = dictGetVal(de); master_addr = sentinelGetCurrentMasterAddress(master); From b43431ac256c8eeb80b144b7492b04b6d692e061 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 15 May 2015 09:47:05 +0200 Subject: [PATCH 0223/1928] Sentinel: port address update code to shared links logic --- src/sentinel.c | 40 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/src/sentinel.c b/src/sentinel.c index 4050b4609..15a9beddb 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -117,7 +117,7 @@ typedef struct sentinelAddr { /* The link to a sentinelRedisInstance. When we have the same set of Sentinels * monitoring many masters, we have different instances representing the * same Sentinels, one per master, and we need to share the hiredis connections - * among them. Oherwise if 5 Senintels are monitoring 100 masters we create + * among them. Oherwise if 5 Sentinels are monitoring 100 masters we create * 500 outgoing connections instead of 5. * * So this structure represents a reference counted link in terms of the two @@ -1018,7 +1018,7 @@ int sentinelTryConnectionSharing(sentinelRedisInstance *ri) { di = dictGetIterator(sentinel.masters); while((de = dictNext(di)) != NULL) { sentinelRedisInstance *master = dictGetVal(de), *match; - /* We want to share with the same physical Senitnel referenced + /* We want to share with the same physical Sentinel referenced * in other masters, so skip our master. */ if (master == ri->master) continue; match = getSentinelRedisInstanceByAddrAndRunID(master->sentinels, @@ -1036,6 +1036,41 @@ int sentinelTryConnectionSharing(sentinelRedisInstance *ri) { return REDIS_ERR; } +/* When we detect a Sentinel to switch address (reporting a different IP/port + * pair in Hello messages), let's update all the matching Sentinels in the + * context of other masters as well and disconnect the links, so that everybody + * will be updated. + * + * Return the number of updated Sentinel addresses. */ +int sentinelUpdateSentinelAddressInAllMasters(sentinelRedisInstance *ri) { + redisAssert(ri->flags & SRI_SENTINEL); + dictIterator *di; + dictEntry *de; + int reconfigured = 0; + + di = dictGetIterator(sentinel.masters); + while((de = dictNext(di)) != NULL) { + sentinelRedisInstance *master = dictGetVal(de), *match; + match = getSentinelRedisInstanceByAddrAndRunID(master->sentinels, + NULL,0,ri->runid); + if (match->link->disconnected == 0) { + instanceLinkCloseConnection(match->link,match->link->cc); + instanceLinkCloseConnection(match->link,match->link->pc); + } + if (match == ri) continue; /* Address already updated for it. */ + /* Update the address of the matching Sentinel by copying the address + * of the Sentinel object that received the address update. */ + releaseSentinelAddr(match->addr); + match->addr = dupSentinelAddr(ri->addr); + reconfigured++; + } + dictReleaseIterator(di); + if (reconfigured) + sentinelEvent(REDIS_NOTICE,"+sentinel-address-update", ri, + "%@ %d additional matching instances", reconfigured); + return reconfigured; +} + /* This function is called when an hiredis connection reported an error. * We set it to NULL and mark the link as disconnected so that it will be * reconnected again. @@ -2303,6 +2338,7 @@ void sentinelProcessHelloMessage(char *hello, int hello_len) { * so do it now. */ si->runid = sdsnew(token[2]); sentinelTryConnectionSharing(si); + if (removed) sentinelUpdateSentinelAddressInAllMasters(si); sentinelFlushConfig(); } } From eb138f1511127c3aff428729feed99741b4f0124 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 15 May 2015 17:38:48 +0200 Subject: [PATCH 0224/1928] Rewrite smoveCommand test with ternary operator --- src/t_set.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/t_set.c b/src/t_set.c index f50c0aa34..e8ce783c0 100644 --- a/src/t_set.c +++ b/src/t_set.c @@ -343,10 +343,7 @@ void smoveCommand(redisClient *c) { /* If srcset and dstset are equal, SMOVE is a no-op */ if (srcset == dstset) { - if (setTypeIsMember(srcset,ele)) - addReply(c,shared.cone); - else - addReply(c,shared.czero); + addReply(c,setTypeIsMember(srcset,ele) ? shared.cone : shared.czero); return; } From abc65e8987df5a6c94132d7ff1da3cdfbe4a989e Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 18 May 2015 12:52:03 +0200 Subject: [PATCH 0225/1928] Sentinel: SENTINEL CKQUORUM command A way for monitoring systems to check that Sentinel is technically able to reach the quorum and failover, using the currently visible Sentinels. --- src/sentinel.c | 51 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/src/sentinel.c b/src/sentinel.c index 15a9beddb..de2e6f3fd 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -2811,6 +2811,31 @@ sentinelRedisInstance *sentinelGetMasterByNameOrReplyError(redisClient *c, return ri; } +#define SENTINEL_ISQR_OK 0 +#define SENTINEL_ISQR_NOQUORUM (1<<0) +#define SENTINEL_ISQR_NOAUTH (1<<1) +int sentinelIsQuorumReachable(sentinelRedisInstance *master, int *usableptr) { + dictIterator *di; + dictEntry *de; + int usable = 1; /* Number of usable Sentinels. Init to 1 to count myself. */ + int result = SENTINEL_ISQR_OK; + int voters = dictSize(master->sentinels)+1; /* Known Sentinels + myself. */ + + di = dictGetIterator(master->sentinels); + while((de = dictNext(di)) != NULL) { + sentinelRedisInstance *ri = dictGetVal(de); + + if (ri->flags & (SRI_S_DOWN|SRI_O_DOWN)) continue; + usable++; + } + dictReleaseIterator(di); + + if (usable < (int)master->quorum) result |= SENTINEL_ISQR_NOQUORUM; + if (usable < voters/2+1) result |= SENTINEL_ISQR_NOAUTH; + if (usableptr) *usableptr = usable; + return result; +} + void sentinelCommand(redisClient *c) { if (!strcasecmp(c->argv[1]->ptr,"masters")) { /* SENTINEL MASTERS */ @@ -2993,6 +3018,32 @@ void sentinelCommand(redisClient *c) { dictDelete(sentinel.masters,c->argv[2]->ptr); sentinelFlushConfig(); addReply(c,shared.ok); + } else if (!strcasecmp(c->argv[1]->ptr,"ckquorum")) { + /* SENTINEL CKQUORUM */ + sentinelRedisInstance *ri; + int usable; + + if ((ri = sentinelGetMasterByNameOrReplyError(c,c->argv[2])) + == NULL) return; + int result = sentinelIsQuorumReachable(ri,&usable); + if (result == SENTINEL_ISQR_OK) { + addReplySds(c, sdscatfmt(sdsempty(), + "+OK %i usable Sentinels. Quorum and failover authorization " + "can be reached\r\n",usable)); + } else { + sds e = sdscatfmt(sdsempty(), + "-NOQUORUM %i usable Sentinels. ",usable); + if (result & SENTINEL_ISQR_NOQUORUM) + e = sdscat(e,"Not enough available Sentinels to reach the" + " specified quorum for this master"); + if (result & SENTINEL_ISQR_NOAUTH) { + if (result & SENTINEL_ISQR_NOQUORUM) e = sdscat(e,". "); + e = sdscat(e, "Not enough available Sentinels to reach the" + " majority and authorize a failover"); + } + e = sdscat(e,"\r\n"); + addReplySds(c,e); + } } else if (!strcasecmp(c->argv[1]->ptr,"set")) { if (c->argc < 3 || c->argc % 2 == 0) goto numargserr; sentinelSetCommand(c); From d614f1c37e7cef759f655d72eadd9f9e421b2196 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 19 May 2015 12:26:09 +0200 Subject: [PATCH 0226/1928] Sentinel: CKQUORUM tests --- tests/sentinel/tests/06-ckquorum.tcl | 34 ++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 tests/sentinel/tests/06-ckquorum.tcl diff --git a/tests/sentinel/tests/06-ckquorum.tcl b/tests/sentinel/tests/06-ckquorum.tcl new file mode 100644 index 000000000..31e5fa2f8 --- /dev/null +++ b/tests/sentinel/tests/06-ckquorum.tcl @@ -0,0 +1,34 @@ +# Test for the SENTINEL CKQUORUM command + +source "../tests/includes/init-tests.tcl" +set num_sentinels [llength $::sentinel_instances] + +test "CKQUORUM reports OK and the right amount of Sentinels" { + foreach_sentinel_id id { + assert_match "*OK $num_sentinels usable*" [S $id SENTINEL CKQUORUM mymaster] + } +} + +test "CKQUORUM detects quorum cannot be reached" { + set orig_quorum [expr {$num_sentinels/2+1}] + S 0 SENTINEL SET mymaster quorum [expr {$num_sentinels+1}] + catch {[S 0 SENTINEL CKQUORUM mymaster]} err + assert_match "*NOQUORUM*" $err + S 0 SENTINEL SET mymaster quorum $orig_quorum +} + +test "CKQUORUM detects failover authorization cannot be reached" { + set orig_quorum [expr {$num_sentinels/2+1}] + S 0 SENTINEL SET mymaster quorum 1 + kill_instance sentinel 1 + kill_instance sentinel 2 + kill_instance sentinel 3 + after 5000 + catch {[S 0 SENTINEL CKQUORUM mymaster]} err + assert_match "*NOQUORUM*" $err + S 0 SENTINEL SET mymaster quorum $orig_quorum + restart_instance sentinel 1 + restart_instance sentinel 2 + restart_instance sentinel 3 +} + From c54de703f20d17d9484a5becabe211a4681cd746 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 20 May 2015 09:59:55 +0200 Subject: [PATCH 0227/1928] Sentinel: fix sentinelTryConnectionSharing() by checking for no match Trivial omission of the obvious no-match case. --- src/sentinel.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/sentinel.c b/src/sentinel.c index de2e6f3fd..27240cb7a 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -1023,6 +1023,7 @@ int sentinelTryConnectionSharing(sentinelRedisInstance *ri) { if (master == ri->master) continue; match = getSentinelRedisInstanceByAddrAndRunID(master->sentinels, NULL,0,ri->runid); + if (match == NULL) continue; /* No match. */ if (match == ri) continue; /* Should never happen but... safer. */ /* We identified a matching Sentinel, great! Let's free our link From 8c423c0bd6e2e54cc398c15f15164995b502f121 Mon Sep 17 00:00:00 2001 From: Huachao Huang Date: Thu, 21 May 2015 17:40:17 +0800 Subject: [PATCH 0228/1928] Update anet.c --- src/anet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/anet.c b/src/anet.c index 0ec5c55a2..3ef517cec 100644 --- a/src/anet.c +++ b/src/anet.c @@ -402,7 +402,7 @@ int anetRead(int fd, char *buf, int count) return totlen; } -/* Like write(2) but make sure 'count' is read before to return +/* Like write(2) but make sure 'count' is written before to return * (unless error is encountered) */ int anetWrite(int fd, char *buf, int count) { From 575eeb1a1c473ee3425175ac44bbc37fdea55e74 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Thu, 21 May 2015 13:24:51 +0300 Subject: [PATCH 0229/1928] Removed incorrect suggestion DEL/INCR/DECR and others could be NTH but apparently never made it to the implementation of SORT --- src/sort.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sort.c b/src/sort.c index c1b571313..7da4de152 100644 --- a/src/sort.c +++ b/src/sort.c @@ -209,7 +209,7 @@ void sortCommand(redisClient *c) { } /* Create a list of operations to perform for every sorted element. - * Operations can be GET/DEL/INCR/DECR */ + * Operations can be GET */ operations = listCreate(); listSetFreeMethod(operations,zfree); j = 2; /* options start at argv[2] */ From fb3af75f7478c6d0717d076289976f16b2389e28 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 22 May 2015 11:49:11 +0200 Subject: [PATCH 0230/1928] Sentinel: initial failure simulator implemented This commit adds the SENTINEL simulate-failure, that sets specific hooks inside the state machine that will crash Sentinel, for testing purposes. --- src/sentinel.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 48 insertions(+), 2 deletions(-) diff --git a/src/sentinel.c b/src/sentinel.c index 27240cb7a..b4ba4e824 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -114,6 +114,11 @@ typedef struct sentinelAddr { #define SENTINEL_SCRIPT_MAX_RETRY 10 #define SENTINEL_SCRIPT_RETRY_DELAY 30000 /* 30 seconds between retries. */ +/* SENTINEL SIMULATE-FAILURE command flags. */ +#define SENTINEL_SIMFAILURE_NONE 0 +#define SENTINEL_SIMFAILURE_CRASH_AFTER_ELECTION (1<<0) +#define SENTINEL_SIMFAILURE_CRASH_AFTER_PROMOTION (1<<1) + /* The link to a sentinelRedisInstance. When we have the same set of Sentinels * monitoring many masters, we have different instances representing the * same Sentinels, one per master, and we need to share the hiredis connections @@ -235,6 +240,7 @@ struct sentinelState { not NULL. */ int announce_port; /* Port that is gossiped to other sentinels if non zero. */ + unsigned long simfailure_flags; /* Failures simulation. */ } sentinel; /* A script execution job. */ @@ -369,6 +375,7 @@ void sentinelGenerateInitialMonitorEvents(void); int sentinelSendPing(sentinelRedisInstance *ri); int sentinelForceHelloUpdateForMaster(sentinelRedisInstance *master); sentinelRedisInstance *getSentinelRedisInstanceByAddrAndRunID(dict *instances, char *ip, int port, char *runid); +void sentinelSimFailureCrash(void); /* ========================= Dictionary types =============================== */ @@ -460,6 +467,7 @@ void initSentinel(void) { sentinel.scripts_queue = listCreate(); sentinel.announce_ip = NULL; sentinel.announce_port = 0; + sentinel.simfailure_flags = SENTINEL_SIMFAILURE_NONE; memset(sentinel.myid,0,sizeof(sentinel.myid)); } @@ -2140,6 +2148,9 @@ void sentinelRefreshInstanceInfo(sentinelRedisInstance *ri, const char *info) { ri->master->failover_state_change_time = mstime(); sentinelFlushConfig(); sentinelEvent(REDIS_WARNING,"+promoted-slave",ri,"%@"); + if (sentinel.simfailure_flags & + SENTINEL_SIMFAILURE_CRASH_AFTER_PROMOTION) + sentinelSimFailureCrash(); sentinelEvent(REDIS_WARNING,"+failover-state-reconf-slaves", ri->master,"%@"); sentinelCallClientReconfScript(ri->master,SENTINEL_LEADER, @@ -3049,6 +3060,7 @@ void sentinelCommand(redisClient *c) { if (c->argc < 3 || c->argc % 2 == 0) goto numargserr; sentinelSetCommand(c); } else if (!strcasecmp(c->argv[1]->ptr,"info-cache")) { + /* SENTINEL INFO-CACHE */ if (c->argc < 2) goto numargserr; mstime_t now = mstime(); @@ -3109,6 +3121,29 @@ void sentinelCommand(redisClient *c) { } dictReleaseIterator(di); if (masters_local != sentinel.masters) dictRelease(masters_local); + } else if (!strcasecmp(c->argv[1]->ptr,"simulate-failure")) { + /* SENTINEL SIMULATE-FAILURE ... */ + int j; + + sentinel.simfailure_flags = SENTINEL_SIMFAILURE_NONE; + for (j = 2; j < c->argc; j++) { + if (!strcasecmp(c->argv[j]->ptr,"crash-after-election")) { + sentinel.simfailure_flags |= + SENTINEL_SIMFAILURE_CRASH_AFTER_ELECTION; + redisLog(REDIS_WARNING,"Failure simulation: this Sentinel " + "will crash after being successfully elected as failover " + "leader"); + } else if (!strcasecmp(c->argv[j]->ptr,"crash-after-promotion")) { + sentinel.simfailure_flags |= + SENTINEL_SIMFAILURE_CRASH_AFTER_PROMOTION; + redisLog(REDIS_WARNING,"Failure simulation: this Sentinel " + "will crash after promoting the selected slave to master"); + } else { + addReplyError(c,"Unknown failure simulation specified"); + return; + } + } + addReply(c,shared.ok); } else { addReplyErrorFormat(c,"Unknown sentinel subcommand '%s'", (char*)c->argv[1]->ptr); @@ -3156,11 +3191,13 @@ void sentinelInfoCommand(redisClient *c) { "sentinel_masters:%lu\r\n" "sentinel_tilt:%d\r\n" "sentinel_running_scripts:%d\r\n" - "sentinel_scripts_queue_length:%ld\r\n", + "sentinel_scripts_queue_length:%ld\r\n" + "sentinel_simulate_failure_flags:%lu\r\n", dictSize(sentinel.masters), sentinel.tilt, sentinel.running_scripts, - listLength(sentinel.scripts_queue)); + listLength(sentinel.scripts_queue), + sentinel.simfailure_flags); di = dictGetIterator(sentinel.masters); while((de = dictNext(di)) != NULL) { @@ -3503,6 +3540,13 @@ void sentinelAskMasterStateToOtherSentinels(sentinelRedisInstance *master, int f /* =============================== FAILOVER ================================= */ +/* Crash because of user request via SENTINEL simulate-failure command. */ +void sentinelSimFailureCrash(void) { + redisLog(REDIS_WARNING, + "Sentinel CRASH because of SENTINEL simulate-failure"); + exit(99); +} + /* Vote for the sentinel with 'req_runid' or return the old vote if already * voted for the specifed 'req_epoch' or one greater. * @@ -3881,6 +3925,8 @@ void sentinelFailoverWaitStart(sentinelRedisInstance *ri) { return; } sentinelEvent(REDIS_WARNING,"+elected-leader",ri,"%@"); + if (sentinel.simfailure_flags & SENTINEL_SIMFAILURE_CRASH_AFTER_ELECTION) + sentinelSimFailureCrash(); ri->failover_state = SENTINEL_FAILOVER_STATE_SELECT_SLAVE; ri->failover_state_change_time = mstime(); sentinelEvent(REDIS_WARNING,"+failover-state-select-slave",ri,"%@"); From 5080f2d69908e2324311f03224a5dc7c98322569 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 25 May 2015 10:24:27 +0200 Subject: [PATCH 0231/1928] Sentinel: help subcommand in simulate-failure command --- src/sentinel.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/sentinel.c b/src/sentinel.c index b4ba4e824..6f10c27f2 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -3138,6 +3138,10 @@ void sentinelCommand(redisClient *c) { SENTINEL_SIMFAILURE_CRASH_AFTER_PROMOTION; redisLog(REDIS_WARNING,"Failure simulation: this Sentinel " "will crash after promoting the selected slave to master"); + } else if (!strcasecmp(c->argv[j]->ptr,"help")) { + addReplyMultiBulkLen(c,2); + addReplyBulkCString(c,"crash-after-election"); + addReplyBulkCString(c,"crash-after-promotion"); } else { addReplyError(c,"Unknown failure simulation specified"); return; From 20700fe566938d5bea945f739dbb5ab73131febb Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 25 May 2015 10:32:28 +0200 Subject: [PATCH 0232/1928] Sentinel: clarify effect of resetting failover_start_time. --- src/sentinel.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/sentinel.c b/src/sentinel.c index 6f10c27f2..9c9e5fe4d 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -1379,7 +1379,9 @@ void sentinelDelFlagsToDictOfRedisInstances(dict *instances, int flags) { * 1) Remove all slaves. * 2) Remove all sentinels. * 3) Remove most of the flags resulting from runtime operations. - * 4) Reset timers to their default value. + * 4) Reset timers to their default value. For example after a reset it will be + * possible to failover again the same master ASAP, without waiting the + * failover timeout delay. * 5) In the process of doing this undo the failover if in progress. * 6) Disconnect the connections with the master (will reconnect automatically). */ @@ -1402,7 +1404,7 @@ void sentinelResetMaster(sentinelRedisInstance *ri, int flags) { } ri->failover_state = SENTINEL_FAILOVER_STATE_NONE; ri->failover_state_change_time = 0; - ri->failover_start_time = 0; + ri->failover_start_time = 0; /* We can failover again ASAP. */ ri->promoted_slave = NULL; sdsfree(ri->runid); sdsfree(ri->slave_master_host); From 382a943414d17ace950901aad07dbde577f2142a Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 28 May 2015 18:06:16 +0200 Subject: [PATCH 0233/1928] ZADD implemenation able to take options. --- src/t_zset.c | 57 +++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 48 insertions(+), 9 deletions(-) diff --git a/src/t_zset.c b/src/t_zset.c index 64418c9b4..8496bc296 100644 --- a/src/t_zset.c +++ b/src/t_zset.c @@ -1171,27 +1171,65 @@ void zsetConvert(robj *zobj, int encoding) { *----------------------------------------------------------------------------*/ /* This generic command implements both ZADD and ZINCRBY. */ -void zaddGenericCommand(redisClient *c, int incr) { +#define ZADD_NONE 0 +#define ZADD_INCR (1<<0) /* Increment the score instead of setting it. */ +#define ZADD_NX (1<<1) /* Don't touch elements not already existing. */ +#define ZADD_XX (1<<2) /* Only touch elements already exisitng. */ +void zaddGenericCommand(redisClient *c, int flags) { static char *nanerr = "resulting score is not a number (NaN)"; robj *key = c->argv[1]; robj *ele; robj *zobj; robj *curobj; double score = 0, *scores = NULL, curscore = 0.0; - int j, elements = (c->argc-2)/2; - int added = 0, updated = 0; + int j, elements; + int added = 0, updated = 0, scoreidx = 0; - if (c->argc % 2) { + /* Parse options. At the end 'scoreidx' is set to the argument position + * of the score of the first score-element pair. */ + scoreidx = 2; + while(scoreidx < c->argc) { + char *opt = c->argv[scoreidx]->ptr; + if (!strcasecmp(opt,"nx")) flags |= ZADD_NX; + else if (!strcasecmp(opt,"xx")) flags |= ZADD_XX; + else if (!strcasecmp(opt,"incr")) flags |= ZADD_INCR; + else break; + scoreidx++; + } + + /* Turn options into simple to check vars. */ + int incr = (flags & ZADD_INCR) != 0; + int nx = (flags & ZADD_NX) != 0; + int xx = (flags & ZADD_XX) != 0; + + /* After the options, we expect to have an even number of args, since + * we expect any number of score-element pairs. */ + elements = c->argc-scoreidx; + if (elements % 2) { addReply(c,shared.syntaxerr); return; } + elements /= 2; /* Now this holds the number of score-element pairs. */ + + /* Check for incompatible options. */ + if (nx && xx) { + addReplyError(c, + "XX and NX options at the same time are not compatible"); + return; + } + + if (incr && elements > 1) { + addReplyError(c, + "INCR option supports a single increment-element pair"); + return; + } /* Start parsing all the scores, we need to emit any syntax error * before executing additions to the sorted set, as the command should * either execute fully or nothing at all. */ scores = zmalloc(sizeof(double)*elements); for (j = 0; j < elements; j++) { - if (getDoubleFromObjectOrReply(c,c->argv[2+j*2],&scores[j],NULL) + if (getDoubleFromObjectOrReply(c,c->argv[scoreidx+j*2],&scores[j],NULL) != REDIS_OK) goto cleanup; } @@ -1220,7 +1258,7 @@ void zaddGenericCommand(redisClient *c, int incr) { unsigned char *eptr; /* Prefer non-encoded element when dealing with ziplists. */ - ele = c->argv[3+j*2]; + ele = c->argv[scoreidx+1+j*2]; if ((eptr = zzlFind(zobj->ptr,ele,&curscore)) != NULL) { if (incr) { score += curscore; @@ -1253,7 +1291,8 @@ void zaddGenericCommand(redisClient *c, int incr) { zskiplistNode *znode; dictEntry *de; - ele = c->argv[3+j*2] = tryObjectEncoding(c->argv[3+j*2]); + ele = c->argv[scoreidx+1+j*2] = + tryObjectEncoding(c->argv[scoreidx+1+j*2]); de = dictFind(zs->dict,ele); if (de != NULL) { curobj = dictGetKey(de); @@ -1307,11 +1346,11 @@ cleanup: } void zaddCommand(redisClient *c) { - zaddGenericCommand(c,0); + zaddGenericCommand(c,ZADD_NONE); } void zincrbyCommand(redisClient *c) { - zaddGenericCommand(c,1); + zaddGenericCommand(c,ZADD_INCR); } void zremCommand(redisClient *c) { From 5d32abbb9e1b74ee08f03cae4305126063744a7a Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 29 May 2015 09:59:42 +0200 Subject: [PATCH 0234/1928] ZADD NX and XX options --- src/t_zset.c | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/src/t_zset.c b/src/t_zset.c index 8496bc296..081a01099 100644 --- a/src/t_zset.c +++ b/src/t_zset.c @@ -1183,7 +1183,14 @@ void zaddGenericCommand(redisClient *c, int flags) { robj *curobj; double score = 0, *scores = NULL, curscore = 0.0; int j, elements; - int added = 0, updated = 0, scoreidx = 0; + int scoreidx = 0; + /* The following vars are used in order to track what the command actually + * did during the execution, to reply to the client and to trigger the + * notification of keyspace change. */ + int added = 0; /* Number of new elements added. */ + int updated = 0; /* Number of elements with updated score. */ + int processed = 0; /* Number of elements processed, may remain zero with + options like XX. */ /* Parse options. At the end 'scoreidx' is set to the argument position * of the score of the first score-element pair. */ @@ -1236,6 +1243,7 @@ void zaddGenericCommand(redisClient *c, int flags) { /* Lookup the key and create the sorted set if does not exist. */ zobj = lookupKeyWrite(c->db,key); if (zobj == NULL) { + if (xx) goto reply_to_client; /* No key + XX option: nothing to do. */ if (server.zset_max_ziplist_entries == 0 || server.zset_max_ziplist_value < sdslen(c->argv[3]->ptr)) { @@ -1260,6 +1268,7 @@ void zaddGenericCommand(redisClient *c, int flags) { /* Prefer non-encoded element when dealing with ziplists. */ ele = c->argv[scoreidx+1+j*2]; if ((eptr = zzlFind(zobj->ptr,ele,&curscore)) != NULL) { + if (nx) continue; if (incr) { score += curscore; if (isnan(score)) { @@ -1275,7 +1284,8 @@ void zaddGenericCommand(redisClient *c, int flags) { server.dirty++; updated++; } - } else { + processed++; + } else if (!xx) { /* Optimize: check if the element is too large or the list * becomes too long *before* executing zzlInsert. */ zobj->ptr = zzlInsert(zobj->ptr,ele,score); @@ -1285,6 +1295,7 @@ void zaddGenericCommand(redisClient *c, int flags) { zsetConvert(zobj,REDIS_ENCODING_SKIPLIST); server.dirty++; added++; + processed++; } } else if (zobj->encoding == REDIS_ENCODING_SKIPLIST) { zset *zs = zobj->ptr; @@ -1295,6 +1306,7 @@ void zaddGenericCommand(redisClient *c, int flags) { tryObjectEncoding(c->argv[scoreidx+1+j*2]); de = dictFind(zs->dict,ele); if (de != NULL) { + if (nx) continue; curobj = dictGetKey(de); curscore = *(double*)dictGetVal(de); @@ -1319,22 +1331,30 @@ void zaddGenericCommand(redisClient *c, int flags) { server.dirty++; updated++; } - } else { + processed++; + } else if (!xx) { znode = zslInsert(zs->zsl,score,ele); incrRefCount(ele); /* Inserted in skiplist. */ redisAssertWithInfo(c,NULL,dictAdd(zs->dict,ele,&znode->score) == DICT_OK); incrRefCount(ele); /* Added to dictionary. */ server.dirty++; added++; + processed++; } } else { redisPanic("Unknown sorted set encoding"); } } - if (incr) /* ZINCRBY */ - addReplyDouble(c,score); - else /* ZADD */ + +reply_to_client: + if (incr) { /* ZINCRBY or INCR option. */ + if (processed) + addReplyDouble(c,score); + else + addReply(c,shared.nullbulk); + } else { /* ZADD. */ addReplyLongLong(c,added); + } cleanup: zfree(scores); From c043a4e6f407d03e794a07d3b449ebe9cd74c507 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 29 May 2015 11:22:03 +0200 Subject: [PATCH 0235/1928] ZADD RETCH option: Return number of elements added or updated Normally ZADD only returns the number of elements added to a sorted set, using the RETCH option it returns the sum of elements added or for which the score was updated. --- src/t_zset.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/t_zset.c b/src/t_zset.c index 081a01099..63436f25d 100644 --- a/src/t_zset.c +++ b/src/t_zset.c @@ -1175,6 +1175,7 @@ void zsetConvert(robj *zobj, int encoding) { #define ZADD_INCR (1<<0) /* Increment the score instead of setting it. */ #define ZADD_NX (1<<1) /* Don't touch elements not already existing. */ #define ZADD_XX (1<<2) /* Only touch elements already exisitng. */ +#define ZADD_RETCH (1<<3) /* Return the number of elements added or updated.*/ void zaddGenericCommand(redisClient *c, int flags) { static char *nanerr = "resulting score is not a number (NaN)"; robj *key = c->argv[1]; @@ -1199,6 +1200,7 @@ void zaddGenericCommand(redisClient *c, int flags) { char *opt = c->argv[scoreidx]->ptr; if (!strcasecmp(opt,"nx")) flags |= ZADD_NX; else if (!strcasecmp(opt,"xx")) flags |= ZADD_XX; + else if (!strcasecmp(opt,"retch")) flags |= ZADD_RETCH; else if (!strcasecmp(opt,"incr")) flags |= ZADD_INCR; else break; scoreidx++; @@ -1208,6 +1210,7 @@ void zaddGenericCommand(redisClient *c, int flags) { int incr = (flags & ZADD_INCR) != 0; int nx = (flags & ZADD_NX) != 0; int xx = (flags & ZADD_XX) != 0; + int retch = (flags & ZADD_RETCH) != 0; /* After the options, we expect to have an even number of args, since * we expect any number of score-element pairs. */ @@ -1353,7 +1356,7 @@ reply_to_client: else addReply(c,shared.nullbulk); } else { /* ZADD. */ - addReplyLongLong(c,added); + addReplyLongLong(c,retch ? added+updated : added); } cleanup: From 910e72d1c10ac8bcb7d869714dcad43dc318c8c5 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 29 May 2015 11:23:49 +0200 Subject: [PATCH 0236/1928] Test: ZADD NX and XX options tests --- tests/unit/type/zset.tcl | 59 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/tests/unit/type/zset.tcl b/tests/unit/type/zset.tcl index 238eebb9d..d33b4bb35 100644 --- a/tests/unit/type/zset.tcl +++ b/tests/unit/type/zset.tcl @@ -43,6 +43,63 @@ start_server {tags {"zset"}} { assert_error "*not*float*" {r zadd myzset nan abc} } + test "ZADD with options syntax error with incomplete pair" { + r del ztmp + catch {r zadd ztmp xx 10 x 20} err + set err + } {ERR*} + + test "ZADD XX option without key - $encoding" { + r del ztmp + assert {[r zadd ztmp xx 10 x] == 0} + assert {[r type ztmp] eq {none}} + } + + test "ZADD XX existing key - $encoding" { + r del ztmp + r zadd ztmp 10 x + assert {[r zadd ztmp xx 20 y] == 0} + assert {[r zcard ztmp] == 1} + } + + test "ZADD XX returns the number of elements actually added" { + r del ztmp + r zadd ztmp 10 x + set retval [r zadd ztmp 10 x 20 y 30 z] + assert {$retval == 2} + } + + test "ZADD XX updates existing elements score" { + r del ztmp + r zadd ztmp 10 x 20 y 30 z + r zadd ztmp xx 5 foo 11 x 21 y 40 zap + assert {[r zcard ztmp] == 3} + assert {[r zscore ztmp x] == 11} + assert {[r zscore ztmp y] == 21} + } + + test "ZADD XX and NX are not compatible" { + r del ztmp + catch {r zadd ztmp xx nx 10 x} err + set err + } {ERR*} + + test "ZADD NX with non exisitng key" { + r del ztmp + r zadd ztmp nx 10 x 20 y 30 z + assert {[r zcard ztmp] == 3} + } + + test "ZADD NX only add new elements without updating old ones" { + r del ztmp + r zadd ztmp 10 x 20 y 30 z + assert {[r zadd ztmp nx 11 x 21 y 100 a 200 b] == 2} + assert {[r zscore ztmp x] == 10} + assert {[r zscore ztmp y] == 20} + assert {[r zscore ztmp a] == 100} + assert {[r zscore ztmp b] == 200} + } + test "ZINCRBY calls leading to NaN result in error" { r zincrby myzset +inf abc assert_error "*NaN*" {r zincrby myzset -inf abc} @@ -77,6 +134,8 @@ start_server {tags {"zset"}} { } test "ZCARD basics - $encoding" { + r del ztmp + r zadd ztmp 10 a 20 b 30 c assert_equal 3 [r zcard ztmp] assert_equal 0 [r zcard zdoesntexist] } From 23ba5c10920b9893eb1ed82503157850131819af Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 29 May 2015 11:28:49 +0200 Subject: [PATCH 0237/1928] Test: ZADD INCR test --- tests/unit/type/zset.tcl | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/unit/type/zset.tcl b/tests/unit/type/zset.tcl index d33b4bb35..ea2d4e374 100644 --- a/tests/unit/type/zset.tcl +++ b/tests/unit/type/zset.tcl @@ -100,6 +100,20 @@ start_server {tags {"zset"}} { assert {[r zscore ztmp b] == 200} } + test "ZADD INCR works like ZINCRBY" { + r del ztmp + r zadd ztmp 10 x 20 y 30 z + r zadd ztmp INCR 15 x + assert {[r zscore ztmp x] == 25} + } + + test "ZADD INCR works with a single score-elemenet pair" { + r del ztmp + r zadd ztmp 10 x 20 y 30 z + catch {r zadd ztmp INCR 15 x 10 y} err + set err + } {ERR*} + test "ZINCRBY calls leading to NaN result in error" { r zincrby myzset +inf abc assert_error "*NaN*" {r zincrby myzset -inf abc} From d8a8dca7fd52d6be8383b2a0d2b7e403f3c1d71d Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 29 May 2015 11:32:22 +0200 Subject: [PATCH 0238/1928] ZADD RETCH option renamed CH MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit From Twitter: "@antirez that’s an awfully-named command :( http://en.wikipedia.org/wiki/Retching" --- src/t_zset.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/t_zset.c b/src/t_zset.c index 63436f25d..3a30b84d9 100644 --- a/src/t_zset.c +++ b/src/t_zset.c @@ -1175,7 +1175,7 @@ void zsetConvert(robj *zobj, int encoding) { #define ZADD_INCR (1<<0) /* Increment the score instead of setting it. */ #define ZADD_NX (1<<1) /* Don't touch elements not already existing. */ #define ZADD_XX (1<<2) /* Only touch elements already exisitng. */ -#define ZADD_RETCH (1<<3) /* Return the number of elements added or updated.*/ +#define ZADD_CH (1<<3) /* Return num of elements added or updated. */ void zaddGenericCommand(redisClient *c, int flags) { static char *nanerr = "resulting score is not a number (NaN)"; robj *key = c->argv[1]; @@ -1200,7 +1200,7 @@ void zaddGenericCommand(redisClient *c, int flags) { char *opt = c->argv[scoreidx]->ptr; if (!strcasecmp(opt,"nx")) flags |= ZADD_NX; else if (!strcasecmp(opt,"xx")) flags |= ZADD_XX; - else if (!strcasecmp(opt,"retch")) flags |= ZADD_RETCH; + else if (!strcasecmp(opt,"ch")) flags |= ZADD_CH; else if (!strcasecmp(opt,"incr")) flags |= ZADD_INCR; else break; scoreidx++; @@ -1210,7 +1210,7 @@ void zaddGenericCommand(redisClient *c, int flags) { int incr = (flags & ZADD_INCR) != 0; int nx = (flags & ZADD_NX) != 0; int xx = (flags & ZADD_XX) != 0; - int retch = (flags & ZADD_RETCH) != 0; + int ch = (flags & ZADD_CH) != 0; /* After the options, we expect to have an even number of args, since * we expect any number of score-element pairs. */ @@ -1356,7 +1356,7 @@ reply_to_client: else addReply(c,shared.nullbulk); } else { /* ZADD. */ - addReplyLongLong(c,retch ? added+updated : added); + addReplyLongLong(c,ch ? added+updated : added); } cleanup: From 39b49bcaaf221d9d8bd6ca62fe1ea0befe4089fd Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 29 May 2015 11:34:43 +0200 Subject: [PATCH 0239/1928] Test: ZADD CH tests --- tests/unit/type/zset.tcl | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/unit/type/zset.tcl b/tests/unit/type/zset.tcl index ea2d4e374..cc5601439 100644 --- a/tests/unit/type/zset.tcl +++ b/tests/unit/type/zset.tcl @@ -114,6 +114,13 @@ start_server {tags {"zset"}} { set err } {ERR*} + test "ZADD CH option changes return value to all changed elements" { + r del ztmp + r zadd ztmp 10 x 20 y 30 z + assert {[r zadd ztmp 11 x 21 y 30 z] == 0} + assert {[r zadd ztmp ch 12 x 22 y 30 z] == 2} + } + test "ZINCRBY calls leading to NaN result in error" { r zincrby myzset +inf abc assert_error "*NaN*" {r zincrby myzset -inf abc} From 0dc6a5d497904776b3a6296b08207b917ef59bab Mon Sep 17 00:00:00 2001 From: linfangrong Date: Tue, 2 Jun 2015 18:12:57 +0800 Subject: [PATCH 0240/1928] Update t_zset.c --- src/t_zset.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/t_zset.c b/src/t_zset.c index 3a30b84d9..dbc561a93 100644 --- a/src/t_zset.c +++ b/src/t_zset.c @@ -1248,7 +1248,7 @@ void zaddGenericCommand(redisClient *c, int flags) { if (zobj == NULL) { if (xx) goto reply_to_client; /* No key + XX option: nothing to do. */ if (server.zset_max_ziplist_entries == 0 || - server.zset_max_ziplist_value < sdslen(c->argv[3]->ptr)) + server.zset_max_ziplist_value < sdslen(c->argv[scoreidx+1]->ptr)) { zobj = createZsetObject(); } else { From 357a40c4fc1f5ad3c143e6afbb0ada8d68432221 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 3 Jun 2015 08:44:43 +0200 Subject: [PATCH 0241/1928] Scripting: Lua cmsgpack lib updated to include str8 support --- deps/lua/src/lua_cmsgpack.c | 71 ++++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 29 deletions(-) diff --git a/deps/lua/src/lua_cmsgpack.c b/deps/lua/src/lua_cmsgpack.c index e13f053d2..0b82d008d 100644 --- a/deps/lua/src/lua_cmsgpack.c +++ b/deps/lua/src/lua_cmsgpack.c @@ -66,7 +66,7 @@ /* Reverse memory bytes if arch is little endian. Given the conceptual * simplicity of the Lua build system we prefer check for endianess at runtime. * The performance difference should be acceptable. */ -static void memrevifle(void *ptr, size_t len) { +void memrevifle(void *ptr, size_t len) { unsigned char *p = (unsigned char *)ptr, *e = (unsigned char *)p+len-1, aux; @@ -96,7 +96,7 @@ typedef struct mp_buf { size_t len, free; } mp_buf; -static void *mp_realloc(lua_State *L, void *target, size_t osize,size_t nsize) { +void *mp_realloc(lua_State *L, void *target, size_t osize,size_t nsize) { void *(*local_realloc) (void *, void *, size_t osize, size_t nsize) = NULL; void *ud; @@ -105,7 +105,7 @@ static void *mp_realloc(lua_State *L, void *target, size_t osize,size_t nsize) { return local_realloc(ud, target, osize, nsize); } -static mp_buf *mp_buf_new(lua_State *L) { +mp_buf *mp_buf_new(lua_State *L) { mp_buf *buf = NULL; /* Old size = 0; new size = sizeof(*buf) */ @@ -117,7 +117,7 @@ static mp_buf *mp_buf_new(lua_State *L) { return buf; } -static void mp_buf_append(mp_buf *buf, const unsigned char *s, size_t len) { +void mp_buf_append(mp_buf *buf, const unsigned char *s, size_t len) { if (buf->free < len) { size_t newlen = buf->len+len; @@ -153,7 +153,7 @@ typedef struct mp_cur { int err; } mp_cur; -static void mp_cur_init(mp_cur *cursor, const unsigned char *s, size_t len) { +void mp_cur_init(mp_cur *cursor, const unsigned char *s, size_t len) { cursor->p = s; cursor->left = len; cursor->err = MP_CUR_ERROR_NONE; @@ -173,13 +173,17 @@ static void mp_cur_init(mp_cur *cursor, const unsigned char *s, size_t len) { /* ------------------------- Low level MP encoding -------------------------- */ -static void mp_encode_bytes(mp_buf *buf, const unsigned char *s, size_t len) { +void mp_encode_bytes(mp_buf *buf, const unsigned char *s, size_t len) { unsigned char hdr[5]; int hdrlen; if (len < 32) { hdr[0] = 0xa0 | (len&0xff); /* fix raw */ hdrlen = 1; + } else if (len <= 0xff) { + hdr[0] = 0xd9; + hdr[1] = len; + hdrlen = 2; } else if (len <= 0xffff) { hdr[0] = 0xda; hdr[1] = (len&0xff00)>>8; @@ -198,7 +202,7 @@ static void mp_encode_bytes(mp_buf *buf, const unsigned char *s, size_t len) { } /* we assume IEEE 754 internal format for single and double precision floats. */ -static void mp_encode_double(mp_buf *buf, double d) { +void mp_encode_double(mp_buf *buf, double d) { unsigned char b[9]; float f = d; @@ -216,7 +220,7 @@ static void mp_encode_double(mp_buf *buf, double d) { } } -static void mp_encode_int(mp_buf *buf, int64_t n) { +void mp_encode_int(mp_buf *buf, int64_t n) { unsigned char b[9]; int enclen; @@ -288,7 +292,7 @@ static void mp_encode_int(mp_buf *buf, int64_t n) { mp_buf_append(buf,b,enclen); } -static void mp_encode_array(mp_buf *buf, int64_t n) { +void mp_encode_array(mp_buf *buf, int64_t n) { unsigned char b[5]; int enclen; @@ -311,7 +315,7 @@ static void mp_encode_array(mp_buf *buf, int64_t n) { mp_buf_append(buf,b,enclen); } -static void mp_encode_map(mp_buf *buf, int64_t n) { +void mp_encode_map(mp_buf *buf, int64_t n) { unsigned char b[5]; int enclen; @@ -336,7 +340,7 @@ static void mp_encode_map(mp_buf *buf, int64_t n) { /* --------------------------- Lua types encoding --------------------------- */ -static void mp_encode_lua_string(lua_State *L, mp_buf *buf) { +void mp_encode_lua_string(lua_State *L, mp_buf *buf) { size_t len; const char *s; @@ -344,13 +348,13 @@ static void mp_encode_lua_string(lua_State *L, mp_buf *buf) { mp_encode_bytes(buf,(const unsigned char*)s,len); } -static void mp_encode_lua_bool(lua_State *L, mp_buf *buf) { +void mp_encode_lua_bool(lua_State *L, mp_buf *buf) { unsigned char b = lua_toboolean(L,-1) ? 0xc3 : 0xc2; mp_buf_append(buf,&b,1); } /* Lua 5.3 has a built in 64-bit integer type */ -static void mp_encode_lua_integer(lua_State *L, mp_buf *buf) { +void mp_encode_lua_integer(lua_State *L, mp_buf *buf) { #if (LUA_VERSION_NUM < 503) && BITS_32 lua_Number i = lua_tonumber(L,-1); #else @@ -362,7 +366,7 @@ static void mp_encode_lua_integer(lua_State *L, mp_buf *buf) { /* Lua 5.2 and lower only has 64-bit doubles, so we need to * detect if the double may be representable as an int * for Lua < 5.3 */ -static void mp_encode_lua_number(lua_State *L, mp_buf *buf) { +void mp_encode_lua_number(lua_State *L, mp_buf *buf) { lua_Number n = lua_tonumber(L,-1); if (IS_INT64_EQUIVALENT(n)) { @@ -372,10 +376,10 @@ static void mp_encode_lua_number(lua_State *L, mp_buf *buf) { } } -static void mp_encode_lua_type(lua_State *L, mp_buf *buf, int level); +void mp_encode_lua_type(lua_State *L, mp_buf *buf, int level); /* Convert a lua table into a message pack list. */ -static void mp_encode_lua_table_as_array(lua_State *L, mp_buf *buf, int level) { +void mp_encode_lua_table_as_array(lua_State *L, mp_buf *buf, int level) { #if LUA_VERSION_NUM < 502 size_t len = lua_objlen(L,-1), j; #else @@ -391,7 +395,7 @@ static void mp_encode_lua_table_as_array(lua_State *L, mp_buf *buf, int level) { } /* Convert a lua table into a message pack key-value map. */ -static void mp_encode_lua_table_as_map(lua_State *L, mp_buf *buf, int level) { +void mp_encode_lua_table_as_map(lua_State *L, mp_buf *buf, int level) { size_t len = 0; /* First step: count keys into table. No other way to do it with the @@ -418,7 +422,7 @@ static void mp_encode_lua_table_as_map(lua_State *L, mp_buf *buf, int level) { /* Returns true if the Lua table on top of the stack is exclusively composed * of keys from numerical keys from 1 up to N, with N being the total number * of elements, without any hole in the middle. */ -static int table_is_an_array(lua_State *L) { +int table_is_an_array(lua_State *L) { int count = 0, max = 0; #if LUA_VERSION_NUM < 503 lua_Number n; @@ -461,14 +465,14 @@ static int table_is_an_array(lua_State *L) { /* If the length operator returns non-zero, that is, there is at least * an object at key '1', we serialize to message pack list. Otherwise * we use a map. */ -static void mp_encode_lua_table(lua_State *L, mp_buf *buf, int level) { +void mp_encode_lua_table(lua_State *L, mp_buf *buf, int level) { if (table_is_an_array(L)) mp_encode_lua_table_as_array(L,buf,level); else mp_encode_lua_table_as_map(L,buf,level); } -static void mp_encode_lua_null(lua_State *L, mp_buf *buf) { +void mp_encode_lua_null(lua_State *L, mp_buf *buf) { unsigned char b[1]; (void)L; @@ -476,7 +480,7 @@ static void mp_encode_lua_null(lua_State *L, mp_buf *buf) { mp_buf_append(buf,b,1); } -static void mp_encode_lua_type(lua_State *L, mp_buf *buf, int level) { +void mp_encode_lua_type(lua_State *L, mp_buf *buf, int level) { int t = lua_type(L,-1); /* Limit the encoding of nested tables to a specified maximum depth, so that @@ -506,7 +510,7 @@ static void mp_encode_lua_type(lua_State *L, mp_buf *buf, int level) { * Packs all arguments as a stream for multiple upacking later. * Returns error if no arguments provided. */ -static int mp_pack(lua_State *L) { +int mp_pack(lua_State *L) { int nargs = lua_gettop(L); int i; mp_buf *buf; @@ -687,6 +691,15 @@ void mp_decode_to_lua_type(lua_State *L, mp_cur *c) { mp_cur_consume(c,9); } break; + case 0xd9: /* raw 8 */ + mp_cur_need(c,2); + { + size_t l = c->p[1]; + mp_cur_need(c,2+l); + lua_pushlstring(L,(char*)c->p+2,l); + mp_cur_consume(c,2+l); + } + break; case 0xda: /* raw 16 */ mp_cur_need(c,3); { @@ -773,7 +786,7 @@ void mp_decode_to_lua_type(lua_State *L, mp_cur *c) { } } -static int mp_unpack_full(lua_State *L, int limit, int offset) { +int mp_unpack_full(lua_State *L, int limit, int offset) { size_t len; const char *s; mp_cur c; @@ -826,18 +839,18 @@ static int mp_unpack_full(lua_State *L, int limit, int offset) { return cnt; } -static int mp_unpack(lua_State *L) { +int mp_unpack(lua_State *L) { return mp_unpack_full(L, 0, 0); } -static int mp_unpack_one(lua_State *L) { +int mp_unpack_one(lua_State *L) { int offset = luaL_optinteger(L, 2, 0); /* Variable pop because offset may not exist */ lua_pop(L, lua_gettop(L)-1); return mp_unpack_full(L, 1, offset); } -static int mp_unpack_limit(lua_State *L) { +int mp_unpack_limit(lua_State *L) { int limit = luaL_checkinteger(L, 2); int offset = luaL_optinteger(L, 3, 0); /* Variable pop because offset may not exist */ @@ -846,7 +859,7 @@ static int mp_unpack_limit(lua_State *L) { return mp_unpack_full(L, limit, offset); } -static int mp_safe(lua_State *L) { +int mp_safe(lua_State *L) { int argc, err, total_results; argc = lua_gettop(L); @@ -869,7 +882,7 @@ static int mp_safe(lua_State *L) { } /* -------------------------------------------------------------------------- */ -static const struct luaL_Reg cmds[] = { +const struct luaL_Reg cmds[] = { {"pack", mp_pack}, {"unpack", mp_unpack}, {"unpack_one", mp_unpack_one}, @@ -877,7 +890,7 @@ static const struct luaL_Reg cmds[] = { {0} }; -static int luaopen_create(lua_State *L) { +int luaopen_create(lua_State *L) { int i; /* Manually construct our module table instead of * relying on _register or _newlib */ From fdf9d455098f54f7666c702ae464e6ea21e25411 Mon Sep 17 00:00:00 2001 From: Ben Murphy Date: Mon, 11 May 2015 23:24:24 +0100 Subject: [PATCH 0242/1928] disable loading lua bytecode --- deps/lua/src/ldo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/lua/src/ldo.c b/deps/lua/src/ldo.c index d1bf786cb..514f7a2a3 100644 --- a/deps/lua/src/ldo.c +++ b/deps/lua/src/ldo.c @@ -495,7 +495,7 @@ static void f_parser (lua_State *L, void *ud) { struct SParser *p = cast(struct SParser *, ud); int c = luaZ_lookahead(p->z); luaC_checkGC(L); - tf = ((c == LUA_SIGNATURE[0]) ? luaU_undump : luaY_parser)(L, p->z, + tf = (luaY_parser)(L, p->z, &p->buff, p->name); cl = luaF_newLclosure(L, tf->nups, hvalue(gt(L))); cl->l.p = tf; From ffd6637e90d816b7a17a96f144f75153c952d8cf Mon Sep 17 00:00:00 2001 From: Ben Murphy Date: Mon, 11 May 2015 23:24:37 +0100 Subject: [PATCH 0243/1928] hide access to debug table --- src/scripting.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/scripting.c b/src/scripting.c index 4f807f4e2..53c0c9ed2 100644 --- a/src/scripting.c +++ b/src/scripting.c @@ -612,11 +612,12 @@ void scriptingEnableGlobalsProtection(lua_State *lua) { /* strict.lua from: http://metalua.luaforge.net/src/lib/strict.lua.html. * Modified to be adapted to Redis. */ + s[j++]="local dbg=debug\n"; s[j++]="local mt = {}\n"; s[j++]="setmetatable(_G, mt)\n"; s[j++]="mt.__newindex = function (t, n, v)\n"; - s[j++]=" if debug.getinfo(2) then\n"; - s[j++]=" local w = debug.getinfo(2, \"S\").what\n"; + s[j++]=" if dbg.getinfo(2) then\n"; + s[j++]=" local w = dbg.getinfo(2, \"S\").what\n"; s[j++]=" if w ~= \"main\" and w ~= \"C\" then\n"; s[j++]=" error(\"Script attempted to create global variable '\"..tostring(n)..\"'\", 2)\n"; s[j++]=" end\n"; @@ -624,11 +625,12 @@ void scriptingEnableGlobalsProtection(lua_State *lua) { s[j++]=" rawset(t, n, v)\n"; s[j++]="end\n"; s[j++]="mt.__index = function (t, n)\n"; - s[j++]=" if debug.getinfo(2) and debug.getinfo(2, \"S\").what ~= \"C\" then\n"; + s[j++]=" if dbg.getinfo(2) and dbg.getinfo(2, \"S\").what ~= \"C\" then\n"; s[j++]=" error(\"Script attempted to access unexisting global variable '\"..tostring(n)..\"'\", 2)\n"; s[j++]=" end\n"; s[j++]=" return rawget(t, n)\n"; s[j++]="end\n"; + s[j++]="debug = nil\n"; s[j++]=NULL; for (j = 0; s[j] != NULL; j++) code = sdscatlen(code,s[j],strlen(s[j])); @@ -732,10 +734,11 @@ void scriptingInit(void) { * information about the caller, that's what makes sense from the point * of view of the user debugging a script. */ { - char *errh_func = "function __redis__err__handler(err)\n" - " local i = debug.getinfo(2,'nSl')\n" + char *errh_func = "local dbg = debug\n" + "function __redis__err__handler(err)\n" + " local i = dbg.getinfo(2,'nSl')\n" " if i and i.what == 'C' then\n" - " i = debug.getinfo(3,'nSl')\n" + " i = dbg.getinfo(3,'nSl')\n" " end\n" " if i then\n" " return i.source .. ':' .. i.currentline .. ': ' .. err\n" From a401a84eb2fb0b22e003cdd2c5231995f57bab8b Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 11 Jun 2015 12:29:37 +0200 Subject: [PATCH 0244/1928] Don't try to bind the source address for MIGRATE Related to issues #2609 and #2612. --- src/cluster.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cluster.c b/src/cluster.c index 916a4be6a..fb45bd063 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -4486,8 +4486,8 @@ migrateCachedSocket* migrateGetSocket(redisClient *c, robj *host, robj *port, lo } /* Create the socket */ - fd = anetTcpNonBlockBindConnect(server.neterr,c->argv[1]->ptr, - atoi(c->argv[2]->ptr),REDIS_BIND_ADDR); + fd = anetTcpNonBlockConnect(server.neterr,c->argv[1]->ptr, + atoi(c->argv[2]->ptr)); if (fd == -1) { sdsfree(name); addReplyErrorFormat(c,"Can't connect to target node: %s", From 8fa8b251a9766f987a7704f034e66c645f0c3afe Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 11 Jun 2015 12:46:55 +0200 Subject: [PATCH 0245/1928] anetTcpGenericConnect(), jump to error not end on error Two code paths jumped to the "ok, return the socket to the user" code path to handle error conditions. Related to issues #2609 and #2612. --- src/anet.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/anet.c b/src/anet.c index 3ef517cec..bdaba8b0f 100644 --- a/src/anet.c +++ b/src/anet.c @@ -295,7 +295,7 @@ static int anetTcpGenericConnect(char *err, char *addr, int port, if ((rv = getaddrinfo(source_addr, NULL, &hints, &bservinfo)) != 0) { anetSetError(err, "%s", gai_strerror(rv)); - goto end; + goto error; } for (b = bservinfo; b != NULL; b = b->ai_next) { if (bind(s,b->ai_addr,b->ai_addrlen) != -1) { @@ -306,7 +306,7 @@ static int anetTcpGenericConnect(char *err, char *addr, int port, freeaddrinfo(bservinfo); if (!bound) { anetSetError(err, "bind: %s", strerror(errno)); - goto end; + goto error; } } if (connect(s,p->ai_addr,p->ai_addrlen) == -1) { From a017b7ec0e68792fa79b611023f3065c4e0c9394 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 11 Jun 2015 12:55:58 +0200 Subject: [PATCH 0246/1928] anet.c: new API anetTcpNonBlockBestEffortBindConnect() This performs a best effort source address binding attempt. If it is possible to bind the local address and still have a successful connect(), then this socket is returned. Otherwise the call is retried without source address binding attempt. Related to issues #2609 and #2612. --- src/anet.c | 24 +++++++++++++++++++++--- src/anet.h | 1 + 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/src/anet.c b/src/anet.c index bdaba8b0f..1728f3eb9 100644 --- a/src/anet.c +++ b/src/anet.c @@ -264,6 +264,7 @@ static int anetCreateSocket(char *err, int domain) { #define ANET_CONNECT_NONE 0 #define ANET_CONNECT_NONBLOCK 1 +#define ANET_CONNECT_BE_BINDING 2 /* Best effort binding. */ static int anetTcpGenericConnect(char *err, char *addr, int port, char *source_addr, int flags) { @@ -331,9 +332,17 @@ error: close(s); s = ANET_ERR; } + end: freeaddrinfo(servinfo); - return s; + + /* Handle best effort binding: if a binding address was used, but it is + * not possible to create a socket, try again without a binding address. */ + if (s == ANET_ERR && source_addr && (flags & ANET_CONNECT_BE_BINDING)) { + return anetTcpGenericConnect(err,addr,port,NULL,flags); + } else { + return s; + } } int anetTcpConnect(char *err, char *addr, int port) @@ -346,9 +355,18 @@ int anetTcpNonBlockConnect(char *err, char *addr, int port) return anetTcpGenericConnect(err,addr,port,NULL,ANET_CONNECT_NONBLOCK); } -int anetTcpNonBlockBindConnect(char *err, char *addr, int port, char *source_addr) +int anetTcpNonBlockBindConnect(char *err, char *addr, int port, + char *source_addr) { - return anetTcpGenericConnect(err,addr,port,source_addr,ANET_CONNECT_NONBLOCK); + return anetTcpGenericConnect(err,addr,port,source_addr, + ANET_CONNECT_NONBLOCK); +} + +int anetTcpNonBlockBestEffortBindConnect(char *err, char *addr, int port, + char *source_addr) +{ + return anetTcpGenericConnect(err,addr,port,source_addr, + ANET_CONNECT_NONBLOCK|ANET_CONNECT_BE_BINDING); } int anetUnixGenericConnect(char *err, char *path, int flags) diff --git a/src/anet.h b/src/anet.h index ea9c77f2e..8740a95d0 100644 --- a/src/anet.h +++ b/src/anet.h @@ -50,6 +50,7 @@ int anetTcpConnect(char *err, char *addr, int port); int anetTcpNonBlockConnect(char *err, char *addr, int port); int anetTcpNonBlockBindConnect(char *err, char *addr, int port, char *source_addr); +int anetTcpNonBlockBestEffortBindConnect(char *err, char *addr, int port, char *source_addr); int anetUnixConnect(char *err, char *path); int anetUnixNonBlockConnect(char *err, char *path); int anetRead(int fd, char *buf, int count); From 8366907bed1f8e798889f51ce46c8035a28a6cae Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 11 Jun 2015 12:57:53 +0200 Subject: [PATCH 0247/1928] Use best effort address binding to connect to the master We usually want to reach the master using the address of the interface Redis is bound to (via the "bind" config option). That's useful since the master will get (and publish) the slave address getting the peer name of the incoming socket connection from the slave. However, when this is not possible, for example because the slave is bound to the loopback interface but repliaces from a master accessed via an external interface, we want to still connect with the master even from a different interface: in this case it is not really important that the master will provide any other address, while it is vital to be able to replicate correctly. Related to issues #2609 and #2612. --- src/replication.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/replication.c b/src/replication.c index ca263527a..c0145b4a1 100644 --- a/src/replication.c +++ b/src/replication.c @@ -1384,7 +1384,7 @@ error: int connectWithMaster(void) { int fd; - fd = anetTcpNonBlockBindConnect(NULL, + fd = anetTcpNonBlockBestEffortBindConnect(NULL, server.masterhost,server.masterport,REDIS_BIND_ADDR); if (fd == -1) { redisLog(REDIS_WARNING,"Unable to connect to MASTER: %s", From 821a986643717018cad8af9f35cba49818e60294 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 12 Jun 2015 18:33:03 +0200 Subject: [PATCH 0248/1928] Sentinel: fix bug in config rewriting during failover We have a check to rewrite the config properly when a failover is in progress, in order to add the current (already failed over) master as slave, and don't include in the slave list the promoted slave itself. However there was an issue, the variable with the right address was computed but never used when the code was modified, and no tests are available for this feature for two reasons: 1. The Sentinel unit test currently does not test Sentinel ability to persist its state at all. 2. It is a very hard to trigger state since it lasts for little time in the context of the testing framework. However this feature should be covered in the test in some way. The bug was found by @badboy using the clang static analyzer. Effects of the bug on safety of Sentinel === This bug results in severe issues in the following case: 1. A Sentinel is elected leader. 2. During the failover, it persists a wrong config with a known-slave entry listing the master address. 3. The Sentinel crashes and restarts, reading invalid configuration from disk. 4. It sees that the slave now does not obey the logical configuration (should replicate from the current master), so it sends a SLAVEOF command to the master (since the slave master is the same) creating a replication loop (attempt to replicate from itself) which Redis is currently unable to detect. 5. This means that the master is no longer available because of the bug. However the lack of availability should be only transient (at least in my tests, but other states could be possible where the problem is not recovered automatically) because: 6. Sentinels treat masters reporting to be slaves as failing. 7. A new failover is triggered, and a slave is promoted to master. Bug lifetime === The bug is there forever. Commit 16237d78 actually tried to fix the bug but in the wrong way (the computed variable was never used! My fault). So this bug is there basically since the start of Sentinel. Since the bug is hard to trigger, I remember little reports matching this condition, but I remember at least a few. Also in automated tests where instances were stopped and restarted multiple times automatically I remember hitting this issue, however I was not able to reproduce nor to determine with the information I had at the time what was causing the issue. --- src/sentinel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sentinel.c b/src/sentinel.c index c7e7f672d..3ff8899d7 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -1787,7 +1787,7 @@ void rewriteConfigSentinelOption(struct rewriteConfigState *state) { slave_addr = master->addr; line = sdscatprintf(sdsempty(), "sentinel known-slave %s %s %d", - master->name, ri->addr->ip, ri->addr->port); + master->name, slave_addr->ip, slave_addr->port); rewriteConfigRewriteLine(state,"sentinel",line,1); } dictReleaseIterator(di2); From 7f4ac3d19c28e0a7a608fe94411e92bc59097e11 Mon Sep 17 00:00:00 2001 From: Matt Stancliff Date: Mon, 12 May 2014 14:38:17 -0400 Subject: [PATCH 0249/1928] [In-Progress] Add Geo Commands Current todo: - replace functions in zset.{c,h} with a new unified Redis zset access API. Once we get the zset interface fixed, we can squash relevant commits in this branch and have one nice commit to merge into unstable. This commit adds: - Geo commands - Tests; runnable with: ./runtest --single unit/geo - Geo helpers in deps/geohash-int/ - src/geo.{c,h} and src/geojson.{c,h} implementing geo commands - Updated build configurations to get everything working - TEMPORARY: src/zset.{c,h} implementing zset score and zset range reading without writing to client output buffers. - Modified linkage of one t_zset.c function for use in zset.c Conflicts: src/Makefile src/redis.c --- deps/Makefile | 7 + deps/geohash-int/Makefile | 23 + deps/geohash-int/geohash.c | 290 ++++++++++++ deps/geohash-int/geohash.h | 120 +++++ deps/geohash-int/geohash_helper.c | 279 +++++++++++ deps/geohash-int/geohash_helper.h | 78 ++++ src/Makefile | 11 +- src/geo.c | 749 ++++++++++++++++++++++++++++++ src/geo.h | 12 + src/geojson.c | 265 +++++++++++ src/geojson.h | 54 +++ src/redis.c | 5 + src/redis.h | 9 + src/t_zset.c | 2 +- src/zset.c | 185 ++++++++ src/zset.h | 31 ++ tests/unit/geo.tcl | 53 +++ 17 files changed, 2167 insertions(+), 6 deletions(-) create mode 100644 deps/geohash-int/Makefile create mode 100644 deps/geohash-int/geohash.c create mode 100644 deps/geohash-int/geohash.h create mode 100644 deps/geohash-int/geohash_helper.c create mode 100644 deps/geohash-int/geohash_helper.h create mode 100644 src/geo.c create mode 100644 src/geo.h create mode 100644 src/geojson.c create mode 100644 src/geojson.h create mode 100644 src/zset.c create mode 100644 src/zset.h create mode 100644 tests/unit/geo.tcl diff --git a/deps/Makefile b/deps/Makefile index 71f6d3a2c..10ae6e790 100644 --- a/deps/Makefile +++ b/deps/Makefile @@ -36,6 +36,7 @@ distclean: -(cd hiredis && $(MAKE) clean) > /dev/null || true -(cd linenoise && $(MAKE) clean) > /dev/null || true -(cd lua && $(MAKE) clean) > /dev/null || true + -(cd geohash-int && $(MAKE) clean) > /dev/null || true -(cd jemalloc && [ -f Makefile ] && $(MAKE) distclean) > /dev/null || true -(rm -f .make-*) @@ -81,3 +82,9 @@ jemalloc: .make-prerequisites cd jemalloc && $(MAKE) CFLAGS="$(JEMALLOC_CFLAGS)" LDFLAGS="$(JEMALLOC_LDFLAGS)" lib/libjemalloc.a .PHONY: jemalloc + +geohash-int: .make-prerequisites + @printf '%b %b\n' $(MAKECOLOR)MAKE$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR) + cd geohash-int && $(MAKE) + +.PHONY: geohash-int diff --git a/deps/geohash-int/Makefile b/deps/geohash-int/Makefile new file mode 100644 index 000000000..bf9eaebb8 --- /dev/null +++ b/deps/geohash-int/Makefile @@ -0,0 +1,23 @@ +STD= +WARN= -Wall +OPT= -Ofast + +R_CFLAGS= $(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) +R_LDFLAGS= $(LDFLAGS) +DEBUG= -g + +R_CC=$(CC) $(R_CFLAGS) +R_LD=$(CC) $(R_LDFLAGS) + +all: geohash.o geohash_helper.o + +.PHONY: all + +geohash.o: geohash.h geohash.c +geohash_helper.o: geohash.h geohash_helper.h geohash_helper.c + +.c.o: + $(R_CC) -c $< + +clean: + rm -f *.o diff --git a/deps/geohash-int/geohash.c b/deps/geohash-int/geohash.c new file mode 100644 index 000000000..9212736bf --- /dev/null +++ b/deps/geohash-int/geohash.c @@ -0,0 +1,290 @@ +/* + * Copyright (c) 2013-2014, yinqiwen + * Copyright (c) 2014, Matt Stancliff . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "geohash.h" + +/** + * Hashing works like this: + * Divide the world into 4 buckets. Label each one as such: + * ----------------- + * | | | + * | | | + * | 0,1 | 1,1 | + * ----------------- + * | | | + * | | | + * | 0,0 | 1,0 | + * ----------------- + */ + +bool geohashGetCoordRange(uint8_t coord_type, GeoHashRange *lat_range, + GeoHashRange *long_range) { + switch (coord_type) { + case GEO_WGS84_TYPE: { + /* These are constraints from EPSG:900913 / EPSG:3785 / OSGEO:41001 */ + /* We can't geocode at the north/south pole. */ + lat_range->max = 85.05112878; + lat_range->min = -85.05112878; + long_range->max = 180.0; + long_range->min = -180.0; + break; + } + case GEO_MERCATOR_TYPE: { + lat_range->max = 20037726.37; + lat_range->min = -20037726.37; + long_range->max = 20037726.37; + long_range->min = -20037726.37; + break; + } + default: { return false; } + } + return true; +} + +bool geohashEncode(GeoHashRange *lat_range, GeoHashRange *long_range, + double latitude, double longitude, uint8_t step, + GeoHashBits *hash) { + uint8_t i; + + if (NULL == hash || step > 32 || step == 0 || RANGEPISZERO(lat_range) || + RANGEPISZERO(long_range)) { + return false; + } + + hash->bits = 0; + hash->step = step; + + if (latitude < lat_range->min || latitude > lat_range->max || + longitude < long_range->min || longitude > long_range->max) { + return false; + } + + for (i = 0; i < step; i++) { + uint8_t lat_bit, long_bit; + + if (lat_range->max - latitude >= latitude - lat_range->min) { + lat_bit = 0; + lat_range->max = (lat_range->max + lat_range->min) / 2; + } else { + lat_bit = 1; + lat_range->min = (lat_range->max + lat_range->min) / 2; + } + if (long_range->max - longitude >= longitude - long_range->min) { + long_bit = 0; + long_range->max = (long_range->max + long_range->min) / 2; + } else { + long_bit = 1; + long_range->min = (long_range->max + long_range->min) / 2; + } + + hash->bits <<= 1; + hash->bits += long_bit; + hash->bits <<= 1; + hash->bits += lat_bit; + } + return true; +} + +bool geohashEncodeType(uint8_t coord_type, double latitude, double longitude, + uint8_t step, GeoHashBits *hash) { + GeoHashRange r[2] = { { 0 } }; + geohashGetCoordRange(coord_type, &r[0], &r[1]); + return geohashEncode(&r[0], &r[1], latitude, longitude, step, hash); +} + +bool geohashEncodeWGS84(double latitude, double longitude, uint8_t step, + GeoHashBits *hash) { + return geohashEncodeType(GEO_WGS84_TYPE, latitude, longitude, step, hash); +} + +bool geohashEncodeMercator(double latitude, double longitude, uint8_t step, + GeoHashBits *hash) { + return geohashEncodeType(GEO_MERCATOR_TYPE, latitude, longitude, step, + hash); +} + +static inline uint8_t get_bit(uint64_t bits, uint8_t pos) { + return (bits >> pos) & 0x01; +} + +bool geohashDecode(const GeoHashRange lat_range, const GeoHashRange long_range, + const GeoHashBits hash, GeoHashArea *area) { + uint8_t i; + + if (HASHISZERO(hash) || NULL == area || RANGEISZERO(lat_range) || + RANGEISZERO(long_range)) { + return false; + } + + area->hash = hash; + area->latitude.min = lat_range.min; + area->latitude.max = lat_range.max; + area->longitude.min = long_range.min; + area->longitude.max = long_range.max; + + for (i = 0; i < hash.step; i++) { + uint8_t lat_bit, long_bit; + + long_bit = get_bit(hash.bits, (hash.step - i) * 2 - 1); + lat_bit = get_bit(hash.bits, (hash.step - i) * 2 - 2); + + if (lat_bit == 0) { + area->latitude.max = (area->latitude.max + area->latitude.min) / 2; + } else { + area->latitude.min = (area->latitude.max + area->latitude.min) / 2; + } + + if (long_bit == 0) { + area->longitude.max = + (area->longitude.max + area->longitude.min) / 2; + } else { + area->longitude.min = + (area->longitude.max + area->longitude.min) / 2; + } + } + return true; +} + +bool geohashDecodeType(uint8_t coord_type, const GeoHashBits hash, + GeoHashArea *area) { + GeoHashRange r[2] = { { 0 } }; + geohashGetCoordRange(coord_type, &r[0], &r[1]); + return geohashDecode(r[0], r[1], hash, area); +} + +bool geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea *area) { + return geohashDecodeType(GEO_WGS84_TYPE, hash, area); +} + +bool geohashDecodeMercator(const GeoHashBits hash, GeoHashArea *area) { + return geohashDecodeType(GEO_MERCATOR_TYPE, hash, area); +} + +bool geohashDecodeAreaToLatLong(const GeoHashArea *area, double *latlong) { + double y, x; + + if (!latlong) + return false; + + y = (area->latitude.min + area->latitude.max) / 2; + x = (area->longitude.min + area->longitude.max) / 2; + + latlong[0] = y; + latlong[1] = x; + return true; +} + +bool geohashDecodeToLatLongType(uint8_t coord_type, const GeoHashBits hash, + double *latlong) { + GeoHashArea area = { { 0 } }; + if (!latlong || !geohashDecodeType(coord_type, hash, &area)) + return false; + return geohashDecodeAreaToLatLong(&area, latlong); +} + +bool geohashDecodeToLatLongWGS84(const GeoHashBits hash, double *latlong) { + return geohashDecodeToLatLongType(GEO_WGS84_TYPE, hash, latlong); +} + +bool geohashDecodeToLatLongMercator(const GeoHashBits hash, double *latlong) { + return geohashDecodeToLatLongType(GEO_MERCATOR_TYPE, hash, latlong); +} + +static void geohash_move_x(GeoHashBits *hash, int8_t d) { + if (d == 0) + return; + + uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaLL; + uint64_t y = hash->bits & 0x5555555555555555LL; + + uint64_t zz = 0x5555555555555555LL >> (64 - hash->step * 2); + + if (d > 0) { + x = x + (zz + 1); + } else { + x = x | zz; + x = x - (zz + 1); + } + + x &= (0xaaaaaaaaaaaaaaaaLL >> (64 - hash->step * 2)); + hash->bits = (x | y); +} + +static void geohash_move_y(GeoHashBits *hash, int8_t d) { + if (d == 0) + return; + + uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaLL; + uint64_t y = hash->bits & 0x5555555555555555LL; + + uint64_t zz = 0xaaaaaaaaaaaaaaaaLL >> (64 - hash->step * 2); + if (d > 0) { + y = y + (zz + 1); + } else { + y = y | zz; + y = y - (zz + 1); + } + y &= (0x5555555555555555LL >> (64 - hash->step * 2)); + hash->bits = (x | y); +} + +void geohashNeighbors(const GeoHashBits *hash, GeoHashNeighbors *neighbors) { + neighbors->east = *hash; + neighbors->west = *hash; + neighbors->north = *hash; + neighbors->south = *hash; + neighbors->south_east = *hash; + neighbors->south_west = *hash; + neighbors->north_east = *hash; + neighbors->north_west = *hash; + + geohash_move_x(&neighbors->east, 1); + geohash_move_y(&neighbors->east, 0); + + geohash_move_x(&neighbors->west, -1); + geohash_move_y(&neighbors->west, 0); + + geohash_move_x(&neighbors->south, 0); + geohash_move_y(&neighbors->south, -1); + + geohash_move_x(&neighbors->north, 0); + geohash_move_y(&neighbors->north, 1); + + geohash_move_x(&neighbors->north_west, -1); + geohash_move_y(&neighbors->north_west, 1); + + geohash_move_x(&neighbors->north_east, 1); + geohash_move_y(&neighbors->north_east, 1); + + geohash_move_x(&neighbors->south_east, 1); + geohash_move_y(&neighbors->south_east, -1); + + geohash_move_x(&neighbors->south_west, -1); + geohash_move_y(&neighbors->south_west, -1); +} diff --git a/deps/geohash-int/geohash.h b/deps/geohash-int/geohash.h new file mode 100644 index 000000000..30fc17144 --- /dev/null +++ b/deps/geohash-int/geohash.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2013-2014, yinqiwen + * Copyright (c) 2014, Matt Stancliff . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef GEOHASH_H_ +#define GEOHASH_H_ + +#include +#include +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +#define HASHISZERO(r) (!(r).bits && !(r).step) +#define RANGEISZERO(r) (!(r).max && !(r).min) +#define RANGEPISZERO(r) (r == NULL || RANGEISZERO(*r)) + +#define GEO_WGS84_TYPE 1 +#define GEO_MERCATOR_TYPE 2 + +#define GEO_STEP_MAX 26 + +typedef enum { + GEOHASH_NORTH = 0, + GEOHASH_EAST, + GEOHASH_WEST, + GEOHASH_SOUTH, + GEOHASH_SOUTH_WEST, + GEOHASH_SOUTH_EAST, + GEOHASH_NORT_WEST, + GEOHASH_NORT_EAST +} GeoDirection; + +typedef struct { + uint64_t bits; + uint8_t step; +} GeoHashBits; + +typedef struct { + double max; + double min; +} GeoHashRange; + +typedef struct { + GeoHashBits hash; + GeoHashRange latitude; + GeoHashRange longitude; +} GeoHashArea; + +typedef struct { + GeoHashBits north; + GeoHashBits east; + GeoHashBits west; + GeoHashBits south; + GeoHashBits north_east; + GeoHashBits south_east; + GeoHashBits north_west; + GeoHashBits south_west; +} GeoHashNeighbors; + +/* + * 0:success + * -1:failed + */ +bool geohashGetCoordRange(uint8_t coord_type, GeoHashRange *lat_range, + GeoHashRange *long_range); +bool geohashEncode(GeoHashRange *lat_range, GeoHashRange *long_range, + double latitude, double longitude, uint8_t step, + GeoHashBits *hash); +bool geohashEncodeType(uint8_t coord_type, double latitude, double longitude, + uint8_t step, GeoHashBits *hash); +bool geohashEncodeMercator(double latitude, double longitude, uint8_t step, + GeoHashBits *hash); +bool geohashEncodeWGS84(double latitude, double longitude, uint8_t step, + GeoHashBits *hash); +bool geohashDecode(const GeoHashRange lat_range, const GeoHashRange long_range, + const GeoHashBits hash, GeoHashArea *area); +bool geohashDecodeType(uint8_t coord_type, const GeoHashBits hash, + GeoHashArea *area); +bool geohashDecodeMercator(const GeoHashBits hash, GeoHashArea *area); +bool geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea *area); +bool geohashDecodeAreaToLatLong(const GeoHashArea *area, double *latlong); +bool geohashDecodeToLatLongType(uint8_t coord_type, const GeoHashBits hash, + double *latlong); +bool geohashDecodeToLatLongWGS84(const GeoHashBits hash, double *latlong); +bool geohashDecodeToLatLongMercator(const GeoHashBits hash, double *latlong); +void geohashNeighbors(const GeoHashBits *hash, GeoHashNeighbors *neighbors); + +#if defined(__cplusplus) +} +#endif +#endif /* GEOHASH_H_ */ diff --git a/deps/geohash-int/geohash_helper.c b/deps/geohash-int/geohash_helper.c new file mode 100644 index 000000000..010ab070b --- /dev/null +++ b/deps/geohash-int/geohash_helper.c @@ -0,0 +1,279 @@ +/* + * Copyright (c) 2013-2014, yinqiwen + * Copyright (c) 2014, Matt Stancliff . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* This is a C++ to C conversion from the ardb project. + * This file started out as: + * https://github.com/yinqiwen/ardb/blob/d42503/src/geo/geohash_helper.cpp + */ + +#include "geohash_helper.h" + +#define D_R (M_PI / 180.0) +#define R_MAJOR 6378137.0 +#define R_MINOR 6356752.3142 +#define RATIO (R_MINOR / R_MAJOR) +#define ECCENT (sqrt(1.0 - (RATIO *RATIO))) +#define COM (0.5 * ECCENT) + +/// @brief The usual PI/180 constant +const double DEG_TO_RAD = 0.017453292519943295769236907684886; +/// @brief Earth's quatratic mean radius for WGS-84 +const double EARTH_RADIUS_IN_METERS = 6372797.560856; + +const double MERCATOR_MAX = 20037726.37; +const double MERCATOR_MIN = -20037726.37; + +static inline double deg_rad(double ang) { return ang * D_R; } +static inline double rad_deg(double ang) { return ang / D_R; } + +double mercator_y(double lat) { + lat = fmin(89.5, fmax(lat, -89.5)); + double phi = deg_rad(lat); + double sinphi = sin(phi); + double con = ECCENT * sinphi; + con = pow((1.0 - con) / (1.0 + con), COM); + double ts = tan(0.5 * (M_PI * 0.5 - phi)) / con; + return 0 - R_MAJOR * log(ts); +} + +double mercator_x(double lon) { return R_MAJOR * deg_rad(lon); } +double merc_lon(double x) { return rad_deg(x) / R_MAJOR; } + +double merc_lat(double y) { + double ts = exp(-y / R_MAJOR); + double phi = M_PI_2 - 2 * atan(ts); + double dphi = 1.0; + int i; + for (i = 0; fabs(dphi) > 0.000000001 && i < 15; i++) { + double con = ECCENT * sin(phi); + dphi = + M_PI_2 - 2 * atan(ts * pow((1.0 - con) / (1.0 + con), COM)) - phi; + phi += dphi; + } + return rad_deg(phi); +} + +/* You must *ONLY* estimate steps when you are encoding. + * If you are decoding, always decode to GEO_STEP_MAX (26). */ +uint8_t geohashEstimateStepsByRadius(double range_meters) { + uint8_t step = 1; + while (range_meters > 0 && range_meters < MERCATOR_MAX) { + range_meters *= 2; + step++; + } + step--; + if (!step) + step = 26; /* if range = 0, give max resolution */ + return step > 26 ? 26 : step; +} + +double geohashGetXWGS84(double x) { return merc_lon(x); } +double geohashGetYWGS84(double y) { return merc_lat(y); } + +double geohashGetXMercator(double longtitude) { + if (longtitude > 180 || longtitude < -180) { + return longtitude; + } + return mercator_x(longtitude); +} +double geohashGetYMercator(double latitude) { + if (latitude > 90 || latitude < -90) { + return latitude; + } + return mercator_y(latitude); +} + +int geohashBitsComparator(const GeoHashBits *a, const GeoHashBits *b) { + /* If step not equal, compare on step. Else, compare on bits. */ + return a->step != b->step ? a->step - b->step : a->bits - b->bits; +} + +bool geohashBoundingBox(double latitude, double longitude, double radius_meters, + double *bounds) { + if (!bounds) + return false; + + double latr, lonr; + latr = deg_rad(latitude); + lonr = deg_rad(longitude); + + double distance = radius_meters / EARTH_RADIUS_IN_METERS; + double min_latitude = latr - distance; + double max_latitude = latr + distance; + + /* Note: we're being lazy and not accounting for coordinates near poles */ + double min_longitude, max_longitude; + double difference_longitude = asin(sin(distance) / cos(latr)); + min_longitude = lonr - difference_longitude; + max_longitude = lonr + difference_longitude; + + bounds[0] = rad_deg(min_latitude); + bounds[1] = rad_deg(min_longitude); + bounds[2] = rad_deg(max_latitude); + bounds[3] = rad_deg(max_longitude); + + return true; +} + +GeoHashRadius geohashGetAreasByRadius(uint8_t coord_type, double latitude, + double longitude, double radius_meters) { + GeoHashRange lat_range, long_range; + GeoHashRadius radius = { { 0 } }; + GeoHashBits hash = { 0 }; + GeoHashNeighbors neighbors = { { 0 } }; + GeoHashArea area = { { 0 } }; + double delta_longitude, delta_latitude; + double min_lat, max_lat, min_lon, max_lon; + int steps; + + if (coord_type == GEO_WGS84_TYPE) { + double bounds[4]; + geohashBoundingBox(latitude, longitude, radius_meters, bounds); + min_lat = bounds[0]; + min_lon = bounds[1]; + max_lat = bounds[2]; + max_lon = bounds[3]; + } else { + delta_latitude = delta_longitude = radius_meters; + min_lat = latitude - delta_latitude; + max_lat = latitude + delta_latitude; + min_lon = longitude - delta_longitude; + max_lon = longitude + delta_longitude; + } + + steps = geohashEstimateStepsByRadius(radius_meters); + + geohashGetCoordRange(coord_type, &lat_range, &long_range); + geohashEncode(&lat_range, &long_range, latitude, longitude, steps, &hash); + geohashNeighbors(&hash, &neighbors); + geohashDecode(lat_range, long_range, hash, &area); + + if (area.latitude.min < min_lat) { + GZERO(neighbors.south); + GZERO(neighbors.south_west); + GZERO(neighbors.south_east); + } + if (area.latitude.max > max_lat) { + GZERO(neighbors.north); + GZERO(neighbors.north_east); + GZERO(neighbors.north_west); + } + if (area.longitude.min < min_lon) { + GZERO(neighbors.west); + GZERO(neighbors.south_west); + GZERO(neighbors.north_west); + } + if (area.longitude.max > max_lon) { + GZERO(neighbors.east); + GZERO(neighbors.south_east); + GZERO(neighbors.north_east); + } + radius.hash = hash; + radius.neighbors = neighbors; + radius.area = area; + return radius; +} + +GeoHashRadius geohashGetAreasByRadiusWGS84(double latitude, double longitude, + double radius_meters) { + return geohashGetAreasByRadius(GEO_WGS84_TYPE, latitude, longitude, + radius_meters); +} + +GeoHashRadius geohashGetAreasByRadiusMercator(double latitude, double longitude, + double radius_meters) { + return geohashGetAreasByRadius(GEO_MERCATOR_TYPE, latitude, longitude, + radius_meters); +} + +GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash) { + uint64_t bits = hash.bits; + bits <<= (52 - hash.step * 2); + return bits; +} + +/* calculate distance using haversin great circle distance formula */ +double distanceEarth(double lat1d, double lon1d, double lat2d, double lon2d) { + double lat1r, lon1r, lat2r, lon2r, u, v; + lat1r = deg_rad(lat1d); + lon1r = deg_rad(lon1d); + lat2r = deg_rad(lat2d); + lon2r = deg_rad(lon2d); + u = sin((lat2r - lat1r) / 2); + v = sin((lon2r - lon1r) / 2); + return 2.0 * EARTH_RADIUS_IN_METERS * + asin(sqrt(u * u + cos(lat1r) * cos(lat2r) * v * v)); +} + +bool geohashGetDistanceIfInRadius(uint8_t coord_type, double x1, double y1, + double x2, double y2, double radius, + double *distance) { + if (coord_type == GEO_WGS84_TYPE) { + *distance = distanceEarth(y1, x1, y2, x2); + if (*distance > radius) { + return false; + } + } else { + double xx = (x1 - x2) * (x1 - x2); + double yy = (y1 - y2) * (y1 - y2); + double dd = xx + yy; + *distance = dd; + if (dd > (radius * radius)) { + return false; + } + } + return true; +} + +bool geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, + double y2, double radius, + double *distance) { + return geohashGetDistanceIfInRadius(GEO_WGS84_TYPE, x1, y1, x2, y2, radius, + distance); +} + +bool geohashGetDistanceSquaredIfInRadiusMercator(double x1, double y1, + double x2, double y2, + double radius, + double *distance) { + return geohashGetDistanceIfInRadius(GEO_MERCATOR_TYPE, x1, y1, x2, y2, + radius, distance); +} + +bool geohashVerifyCoordinates(uint8_t coord_type, double x, double y) { + GeoHashRange lat_range, long_range; + geohashGetCoordRange(coord_type, &lat_range, &long_range); + + if (x < long_range.min || x > long_range.max || y < lat_range.min || + y > lat_range.max) { + return false; + } + return true; +} diff --git a/deps/geohash-int/geohash_helper.h b/deps/geohash-int/geohash_helper.h new file mode 100644 index 000000000..b72e51442 --- /dev/null +++ b/deps/geohash-int/geohash_helper.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2013-2014, yinqiwen + * Copyright (c) 2014, Matt Stancliff . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef GEOHASH_HELPER_HPP_ +#define GEOHASH_HELPER_HPP_ + +#include +#include +#include "geohash.h" + +#define GZERO(s) s.bits = s.step = 0; +#define GISZERO(s) (!s.bits && !s.step) +#define GISNOTZERO(s) (s.bits || s.step) + +typedef uint64_t GeoHashFix52Bits; +typedef uint64_t GeoHashVarBits; + +typedef struct { + GeoHashBits hash; + GeoHashArea area; + GeoHashNeighbors neighbors; +} GeoHashRadius; + +int GeoHashBitsComparator(const GeoHashBits *a, const GeoHashBits *b); +uint8_t geohashEstimateStepsByRadius(double range_meters); +bool geohashBoundingBox(double latitude, double longitude, double radius_meters, + double *bounds); +GeoHashRadius geohashGetAreasByRadius(uint8_t coord_type, double latitude, + double longitude, double radius_meters); +GeoHashRadius geohashGetAreasByRadiusWGS84(double latitude, double longitude, + double radius_meters); +GeoHashRadius geohashGetAreasByRadiusMercator(double latitude, double longitude, + double radius_meters); +GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash); +double geohashGetXMercator(double longtitude); +double geohashGetYMercator(double latitude); +double geohashGetXWGS84(double x); +double geohashGetYWGS84(double y); +bool geohashVerifyCoordinates(uint8_t coord_type, double x, double y); +bool geohashGetDistanceIfInRadius(uint8_t coord_type, double x1, double y1, + double x2, double y2, double radius, + double *distance); +bool geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, + double y2, double radius, + double *distance); +bool geohashGetDistanceSquaredIfInRadiusMercator(double x1, double y1, + double x2, double y2, + double radius, + double *distance); + +#endif /* GEOHASH_HELPER_HPP_ */ diff --git a/src/Makefile b/src/Makefile index 271ab34d8..054857ca5 100644 --- a/src/Makefile +++ b/src/Makefile @@ -14,8 +14,8 @@ release_hdr := $(shell sh -c './mkreleasehdr.sh') uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not') -OPTIMIZATION?=-O2 -DEPENDENCY_TARGETS=hiredis linenoise lua +#OPTIMIZATION?=-O2 +DEPENDENCY_TARGETS=hiredis linenoise lua geohash-int # Default settings STD=-std=c99 -pedantic -DREDIS_STATIC='' @@ -53,7 +53,7 @@ endif # Override default settings if possible -include .make-settings -FINAL_CFLAGS=$(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(REDIS_CFLAGS) +FINAL_CFLAGS=$(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(REDIS_CFLAGS) -I../deps/geohash-int FINAL_LDFLAGS=$(LDFLAGS) $(REDIS_LDFLAGS) $(DEBUG) FINAL_LIBS=-lm DEBUG=-g -ggdb @@ -117,7 +117,8 @@ endif REDIS_SERVER_NAME=redis-server REDIS_SENTINEL_NAME=redis-sentinel -REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o redis.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o +REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o redis.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o geo.o zset.o geojson.o +REDIS_GEOHASH_OBJ=../deps/geohash-int/geohash.o ../deps/geohash-int/geohash_helper.o REDIS_CLI_NAME=redis-cli REDIS_CLI_OBJ=anet.o sds.o adlist.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o REDIS_BENCHMARK_NAME=redis-benchmark @@ -171,7 +172,7 @@ endif # redis-server $(REDIS_SERVER_NAME): $(REDIS_SERVER_OBJ) - $(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/lua/src/liblua.a $(FINAL_LIBS) + $(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/lua/src/liblua.a $(REDIS_GEOHASH_OBJ) $(FINAL_LIBS) # redis-sentinel $(REDIS_SENTINEL_NAME): $(REDIS_SERVER_NAME) diff --git a/src/geo.c b/src/geo.c new file mode 100644 index 000000000..36e87eef5 --- /dev/null +++ b/src/geo.c @@ -0,0 +1,749 @@ +/* + * Copyright (c) 2014, Matt Stancliff . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "geo.h" +#include "geohash_helper.h" +#include "geojson.h" +#include "zset.h" + +/* ==================================================================== + * Redis Add-on Module: geo + * Provides commands: geoadd, georadius, georadiusbymember, + * geoencode, geodecode + * Behaviors: + * - geoadd - add coordinates for value to geoset + * - georadius - search radius by coordinates in geoset + * - georadiusbymember - search radius based on geoset member position + * - geoencode - encode coordinates to a geohash integer + * - geodecode - decode geohash integer to representative coordinates + * ==================================================================== */ + +/* ==================================================================== + * Helpers + * ==================================================================== */ +static inline bool decodeGeohash(double bits, double *latlong) { + GeoHashBits hash = { .bits = (uint64_t)bits, .step = GEO_STEP_MAX }; + return geohashDecodeToLatLongWGS84(hash, latlong); +} + +/* Input Argument Helper */ +/* Take a pointer to the latitude arg then use the next arg for longitude */ +static inline bool extractLatLongOrReply(redisClient *c, robj **argv, + double *latlong) { + for (int i = 0; i < 2; i++) { + if (getDoubleFromObjectOrReply(c, argv[i], latlong + i, NULL) != + REDIS_OK) { + return false; + } + } + return true; +} + +/* Input Argument Helper */ +/* Decode lat/long from a zset member's score */ +static bool latLongFromMember(robj *zobj, robj *member, double *latlong) { + double score = 0; + + if (!zsetScore(zobj, member, &score)) + return false; + + if (!decodeGeohash(score, latlong)) + return false; + + return true; +} + +/* Input Argument Helper */ +static double extractDistanceOrReply(redisClient *c, robj **argv, + double *conversion) { + double distance; + if (getDoubleFromObjectOrReply(c, argv[0], &distance, + "need numeric radius") != REDIS_OK) { + return -1; + } + + double to_meters; + sds units = argv[1]->ptr; + if (!strcmp(units, "m") || !strncmp(units, "meter", 5)) { + to_meters = 1; + } else if (!strcmp(units, "ft") || !strncmp(units, "feet", 4)) { + to_meters = 0.3048; + } else if (!strcmp(units, "mi") || !strncmp(units, "mile", 4)) { + to_meters = 1609.34; + } else if (!strcmp(units, "km") || !strncmp(units, "kilometer", 9)) { + to_meters = 1000; + } else { + addReplyError(c, "unsupported unit provided. please use meters (m), " + "kilometers (km), miles (mi), or feet (ft)"); + return -1; + } + + if (conversion) + *conversion = to_meters; + + return distance * to_meters; +} + +/* Output Reply Helper */ +static void latLongToGeojsonAndReply(redisClient *c, struct geojsonPoint *gp, + char *units) { + sds geojson = geojsonLatLongToPointFeature( + gp->latitude, gp->longitude, gp->set, gp->member, gp->dist, units); + + addReplyBulkCBuffer(c, geojson, sdslen(geojson)); + sdsfree(geojson); +} + +/* Output Reply Helper */ +static void decodeGeohashToGeojsonBoundsAndReply(redisClient *c, + uint64_t hashbits, + struct geojsonPoint *gp) { + GeoHashArea area = { { 0 } }; + GeoHashBits hash = { .bits = hashbits, .step = GEO_STEP_MAX }; + + geohashDecodeWGS84(hash, &area); + + sds geojson = geojsonBoxToPolygonFeature( + area.latitude.min, area.longitude.min, area.latitude.max, + area.longitude.max, gp->set, gp->member); + addReplyBulkCBuffer(c, geojson, sdslen(geojson)); + sdsfree(geojson); +} + +/* The defailt addReplyDouble has too much accuracy. We use this + * for returning location distances. "5.21 meters away" is nicer + * than "5.2144992818115 meters away." */ +static inline void addReplyDoubleNicer(redisClient *c, double d) { + char dbuf[128] = { 0 }; + int dlen = snprintf(dbuf, sizeof(dbuf), "%.2f", d); + addReplyBulkCBuffer(c, dbuf, dlen); +} + +/* Output Reply Helper */ +static void replyGeojsonCollection(redisClient *c, struct geojsonPoint *gp, + long result_length, char *units) { + sds geojson = geojsonFeatureCollection(gp, result_length, units); + addReplyBulkCBuffer(c, geojson, sdslen(geojson)); + sdsfree(geojson); +} + +/* geohash range+zset access helper */ +/* Obtain all members between the min/max of this geohash bounding box. */ +/* Returns list of results. List must be listRelease()'d later. */ +static list *membersOfGeoHashBox(robj *zobj, GeoHashBits hash) { + GeoHashFix52Bits min, max; + + min = geohashAlign52Bits(hash); + hash.bits++; + max = geohashAlign52Bits(hash); + + return geozrangebyscore(zobj, min, max, -1); /* -1 = no limit */ +} + +/* Search all eight neighbors + self geohash box */ +static list *membersOfAllNeighbors(robj *zobj, GeoHashRadius n, double x, + double y, double radius) { + list *l = NULL; + GeoHashBits neighbors[9]; + + neighbors[0] = n.hash; + neighbors[1] = n.neighbors.north; + neighbors[2] = n.neighbors.south; + neighbors[3] = n.neighbors.east; + neighbors[4] = n.neighbors.west; + neighbors[5] = n.neighbors.north_east; + neighbors[6] = n.neighbors.north_west; + neighbors[7] = n.neighbors.south_east; + neighbors[8] = n.neighbors.south_west; + + /* For each neighbor (*and* our own hashbox), get all the matching + * members and add them to the potential result list. */ + for (int i = 0; i < sizeof(neighbors) / sizeof(*neighbors); i++) { + list *r; + + if (HASHISZERO(neighbors[i])) + continue; + + r = membersOfGeoHashBox(zobj, neighbors[i]); + if (!r) + continue; + + if (!l) { + l = r; + } else { + listJoin(l, r); + } + } + + /* if no results across any neighbors (*and* ourself, which is unlikely), + * then just give up. */ + if (!l) + return NULL; + + /* Iterate over all matching results in the combined 9-grid search area */ + /* Remove any results outside of our search radius. */ + listIter li; + listNode *ln; + listRewind(l, &li); + while ((ln = listNext(&li))) { + struct zipresult *zr = listNodeValue(ln); + GeoHashArea area = { { 0 } }; + GeoHashBits hash = { .bits = (uint64_t)zr->score, + .step = GEO_STEP_MAX }; + + if (!geohashDecodeWGS84(hash, &area)) { + /* Perhaps we should delete this node if the decode fails? */ + continue; + } + + double neighbor_y = (area.latitude.min + area.latitude.max) / 2; + double neighbor_x = (area.longitude.min + area.longitude.max) / 2; + + double distance; + if (!geohashGetDistanceIfInRadiusWGS84(x, y, neighbor_x, neighbor_y, + radius, &distance)) { + /* If result is in the grid, but not in our radius, remove it. */ + listDelNode(l, ln); +#ifdef DEBUG + fprintf(stderr, "No match for neighbor (%f, %f) within (%f, %f) at " + "distance %f\n", + neighbor_y, neighbor_x, y, x, distance); +#endif + } else { +/* Else: bueno. */ +#ifdef DEBUG + fprintf( + stderr, + "Matched neighbor (%f, %f) within (%f, %f) at distance %f\n", + neighbor_y, neighbor_x, y, x, distance); +#endif + zr->distance = distance; + } + } + + /* We found results, but rejected all of them as out of range. Clean up. */ + if (!listLength(l)) { + listRelease(l); + l = NULL; + } + + /* Success! */ + return l; +} + +/* With no subscribers, each call of this function adds a median latency of 2 + * microseconds. */ +/* We aren't participating in any keyspace/keyevent notifications other than + * what's provided by the underlying zset itself, but it's probably not useful + * for clients to get the 52-bit integer geohash as an "update" value. */ +static int publishLocationUpdate(const sds zset, const sds member, + const double latitude, + const double longitude) { + int published; + + /* event is: " " */ + sds event = sdscatprintf(sdsempty(), "%.7f %.7f", latitude, longitude); + robj *eventobj = createObject(REDIS_STRING, event); + + /* channel is: __geo:: */ + /* If you want all events for this zset then just psubscribe + * to "__geo::*" */ + sds chan = sdsnewlen("__geo:", 6); + chan = sdscatsds(chan, zset); + chan = sdscatlen(chan, ":", 1); + chan = sdscatsds(chan, member); + robj *chanobj = createObject(REDIS_STRING, chan); + + published = pubsubPublishMessage(chanobj, eventobj); + + decrRefCount(chanobj); + decrRefCount(eventobj); + + return published; +} + +/* Sort comparators for qsort() */ +static int sort_gp_asc(const void *a, const void *b) { + const struct geojsonPoint *gpa = a, *gpb = b; + /* We can't do adist - bdist because they are doubles and + * the comparator returns an int. */ + if (gpa->dist > gpb->dist) + return 1; + else if (gpa->dist == gpb->dist) + return 0; + else + return -1; +} + +static int sort_gp_desc(const void *a, const void *b) { + return -sort_gp_asc(a, b); +} + +/* ==================================================================== + * Commands + * ==================================================================== */ +void geoAddCommand(redisClient *c) { + /* args 0-4: [cmd, key, lat, lng, val]; optional 5-6: [radius, units] + * - OR - + * args 0-N: [cmd, key, lat, lng, val, lat2, lng2, val2, ...] */ + robj *cmd = c->argv[0]; + robj *key = c->argv[1]; + + /* Prepare for the three different forms of the add command. */ + double radius_meters = 0; + if (c->argc == 7) { + if ((radius_meters = extractDistanceOrReply(c, c->argv + 5, NULL)) < + 0) { + return; + } + } else if (c->argc == 6) { + addReplyError(c, "must provide units when asking for radius encode"); + return; + } else if ((c->argc - 2) % 3 != 0) { + /* Need an odd number of arguments if we got this far... */ + addReplyError(c, "format is: geoadd [key] [lat1] [long1] [member1] " + "[lat2] [long2] [member2] ... "); + return; + } + + redisClient *client = c; + int elements = (c->argc - 2) / 3; + /* elements will always be correct size (integer math floors for us if we + * have 6 or 7 total arguments) */ + if (elements > 1) { + /* We should probably use a static client and not create/free it + * for every multi-add */ + client = createClient(-1); /* fake client for multi-zadd */ + + /* Tell fake client to use the same DB as our calling client. */ + selectDb(client, c->db->id); + } + + /* Capture all lat/long components up front so if we encounter an error we + * return before making any changes to the database. */ + double latlong[elements * 2]; + for (int i = 0; i < elements; i++) { + if (!extractLatLongOrReply(c, (c->argv + 2) + (i * 3), + latlong + (i * 2))) + return; + } + + /* Add all (lat, long, value) triples to the requested zset */ + for (int i = 0; i < elements; i++) { + uint8_t step = geohashEstimateStepsByRadius(radius_meters); + +#ifdef DEBUG + printf("Adding with step size: %d\n", step); +#endif + GeoHashBits hash; + int ll_offset = i * 2; + double latitude = latlong[ll_offset]; + double longitude = latlong[ll_offset + 1]; + geohashEncodeWGS84(latitude, longitude, step, &hash); + + GeoHashFix52Bits bits = geohashAlign52Bits(hash); + robj *score = createObject(REDIS_STRING, sdsfromlonglong(bits)); + robj *val = c->argv[2 + i * 3 + 2]; + /* (base args) + (offset for this triple) + (offset of value arg) */ + + rewriteClientCommandVector(client, 4, cmd, key, score, val); + decrRefCount(score); + zaddCommand(client); + publishLocationUpdate(key->ptr, val->ptr, latitude, longitude); + } + + /* If we used a fake client, return a real reply then free fake client. */ + if (client != c) { + addReplyLongLong(c, elements); + freeClient(client); + } +} + +#define SORT_NONE 0 +#define SORT_ASC 1 +#define SORT_DESC 2 + +#define RADIUS_COORDS 1 +#define RADIUS_MEMBER 2 + +static void geoRadiusGeneric(redisClient *c, int type) { + /* type == cords: [cmd, key, lat, long, radius, units, [optionals]] + * type == member: [cmd, key, member, radius, units, [optionals]] */ + robj *key = c->argv[1]; + + /* Look up the requested zset */ + robj *zobj = NULL; + if ((zobj = lookupKeyReadOrReply(c, key, shared.emptymultibulk)) == NULL || + checkType(c, zobj, REDIS_ZSET)) { + return; + } + + /* Find lat/long to use for radius search based on inquiry type */ + int base_args; + double latlong[2] = { 0 }; + if (type == RADIUS_COORDS) { + base_args = 6; + if (!extractLatLongOrReply(c, c->argv + 2, latlong)) + return; + } else if (type == RADIUS_MEMBER) { + base_args = 5; + robj *member = c->argv[2]; + if (!latLongFromMember(zobj, member, latlong)) { + addReplyError(c, "could not decode requested zset member"); + return; + } + } else { + addReplyError(c, "unknown georadius search type"); + return; + } + + /* Extract radius and units from arguments */ + double radius_meters = 0, conversion = 1; + if ((radius_meters = extractDistanceOrReply(c, c->argv + base_args - 2, + &conversion)) < 0) { + return; + } + + sds units = c->argv[base_args - 2 + 1]->ptr; + + /* Discover and populate all optional parameters. */ + bool withdist = false, withhash = false, withcoords = false, + withgeojson = false, withgeojsonbounds = false, + withgeojsoncollection = false, noproperties = false; + int sort = SORT_NONE; + if (c->argc > base_args) { + int remaining = c->argc - base_args; + for (int i = 0; i < remaining; i++) { + char *arg = c->argv[base_args + i]->ptr; + if (!strncasecmp(arg, "withdist", 8)) + withdist = true; + else if (!strcasecmp(arg, "withhash")) + withhash = true; + else if (!strncasecmp(arg, "withcoord", 9)) + withcoords = true; + else if (!strncasecmp(arg, "withgeojsonbound", 16)) + withgeojsonbounds = true; + else if (!strncasecmp(arg, "withgeojsoncollection", 21)) + withgeojsoncollection = true; + else if (!strncasecmp(arg, "withgeo", 7) || + !strcasecmp(arg, "geojson") || !strcasecmp(arg, "json") || + !strcasecmp(arg, "withjson")) + withgeojson = true; + else if (!strncasecmp(arg, "noprop", 6) || + !strncasecmp(arg, "withoutprop", 11)) + noproperties = true; + else if (!strncasecmp(arg, "asc", 3) || + !strncasecmp(arg, "sort", 4)) + sort = SORT_ASC; + else if (!strncasecmp(arg, "desc", 4)) + sort = SORT_DESC; + else { + addReply(c, shared.syntaxerr); + return; + } + } + } + + bool withgeo = withgeojsonbounds || withgeojsoncollection || withgeojson; + + /* Get all neighbor geohash boxes for our radius search */ + GeoHashRadius georadius = + geohashGetAreasByRadiusWGS84(latlong[0], latlong[1], radius_meters); + +#ifdef DEBUG + printf("Searching with step size: %d\n", georadius.hash.step); +#endif + /* {Lat, Long} = {y, x} */ + double y = latlong[0]; + double x = latlong[1]; + + /* Search the zset for all matching points */ + list *found_matches = + membersOfAllNeighbors(zobj, georadius, x, y, radius_meters); + + /* If no matching results, the user gets an empty reply. */ + if (!found_matches) { + addReply(c, shared.emptymultibulk); + return; + } + + long result_length = listLength(found_matches); + long option_length = 0; + + /* Our options are self-contained nested multibulk replies, so we + * only need to track how many of those nested replies we return. */ + if (withdist) + option_length++; + + if (withcoords) + option_length++; + + if (withhash) + option_length++; + + if (withgeojson) + option_length++; + + if (withgeojsonbounds) + option_length++; + + /* The multibulk len we send is exactly result_length. The result is either + * all strings of just zset members *or* a nested multi-bulk reply + * containing the zset member string _and_ all the additional options the + * user enabled for this request. */ + addReplyMultiBulkLen(c, result_length + withgeojsoncollection); + + /* Iterate over results, populate struct used for sorting and result sending + */ + listIter li; + listRewind(found_matches, &li); + struct geojsonPoint gp[result_length]; + /* populate gp array from our results */ + for (int i = 0; i < result_length; i++) { + struct zipresult *zr = listNodeValue(listNext(&li)); + + gp[i].member = NULL; + gp[i].set = key->ptr; + gp[i].dist = zr->distance / conversion; + gp[i].userdata = zr; + + /* The layout of geojsonPoint allows us to pass the start offset + * of the struct directly to decodeGeohash. */ + decodeGeohash(zr->score, (double *)(gp + i)); + } + + /* Process [optional] requested sorting */ + if (sort == SORT_ASC) { + qsort(gp, result_length, sizeof(*gp), sort_gp_asc); + } else if (sort == SORT_DESC) { + qsort(gp, result_length, sizeof(*gp), sort_gp_desc); + } + + /* Finally send results back to the caller */ + for (int i = 0; i < result_length; i++) { + struct zipresult *zr = gp[i].userdata; + + /* If we have options in option_length, return each sub-result + * as a nested multi-bulk. Add 1 to account for result value itself. */ + if (option_length) + addReplyMultiBulkLen(c, option_length + 1); + + switch (zr->type) { + case ZR_LONG: + addReplyBulkLongLong(c, zr->val.v); + if (withgeo && !noproperties) + gp[i].member = sdscatprintf(sdsempty(), "%llu", zr->val.v); + break; + case ZR_STRING: + addReplyBulkCBuffer(c, zr->val.s, sdslen(zr->val.s)); + if (withgeo && !noproperties) + gp[i].member = sdsdup(zr->val.s); + break; + } + + if (withdist) + addReplyDoubleNicer(c, gp[i].dist); + + if (withhash) + addReplyLongLong(c, zr->score); + + if (withcoords) { + addReplyMultiBulkLen(c, 2); + addReplyDouble(c, gp[i].latitude); + addReplyDouble(c, gp[i].longitude); + } + + if (withgeojson) + latLongToGeojsonAndReply(c, gp + i, units); + + if (withgeojsonbounds) + decodeGeohashToGeojsonBoundsAndReply(c, zr->score, gp + i); + } + + if (withgeojsoncollection) + replyGeojsonCollection(c, gp, result_length, units); + + if (withgeo && !noproperties) + for (int i = 0; i < result_length; i++) + sdsfree(gp[i].member); + + listRelease(found_matches); +} + +void geoRadiusCommand(redisClient *c) { + /* args 0-5: ["georadius", key, lat, long, radius, units]; + * optionals: [withdist, withcoords, asc|desc] */ + geoRadiusGeneric(c, RADIUS_COORDS); +} + +void geoRadiusByMemberCommand(redisClient *c) { + /* args 0-4: ["georadius", key, compare-against-member, radius, units]; + * optionals: [withdist, withcoords, asc|desc] */ + geoRadiusGeneric(c, RADIUS_MEMBER); +} + +void geoDecodeCommand(redisClient *c) { + /* args 0-1: ["geodecode", geohash]; + * optional: [geojson] */ + + GeoHashBits geohash; + if (getLongLongFromObjectOrReply(c, c->argv[1], (long long *)&geohash.bits, + NULL) != REDIS_OK) + return; + + bool withgeojson = false; + if (c->argc == 3) + withgeojson = true; + + GeoHashArea area; + geohash.step = GEO_STEP_MAX; + geohashDecodeWGS84(geohash, &area); + + double y = (area.latitude.min + area.latitude.max) / 2; + double x = (area.longitude.min + area.longitude.max) / 2; + + /* Returning three nested replies */ + addReplyMultiBulkLen(c, 3 + withgeojson * 2); + + /* First, the minimum corner */ + addReplyMultiBulkLen(c, 2); + addReplyDouble(c, area.latitude.min); + addReplyDouble(c, area.longitude.min); + + /* Next, the maximum corner */ + addReplyMultiBulkLen(c, 2); + addReplyDouble(c, area.latitude.max); + addReplyDouble(c, area.longitude.max); + + /* Last, the averaged center of this bounding box */ + addReplyMultiBulkLen(c, 2); + addReplyDouble(c, y); + addReplyDouble(c, x); + + if (withgeojson) { + struct geojsonPoint gp = { .latitude = y, + .longitude = x, + .member = NULL }; + + /* Return geojson Feature Point */ + latLongToGeojsonAndReply(c, &gp, NULL); + + /* Return geojson Feature Polygon */ + decodeGeohashToGeojsonBoundsAndReply(c, geohash.bits, &gp); + } +} + +void geoEncodeCommand(redisClient *c) { + /* args 0-2: ["geoencode", lat, long]; + * optionals: [radius, units] + * - AND / OR - + * optional: [geojson] */ + + bool withgeojson = false; + for (int i = 3; i < c->argc; i++) { + char *arg = c->argv[i]->ptr; + if (!strncasecmp(arg, "withgeo", 7) || !strcasecmp(arg, "geojson") || + !strcasecmp(arg, "json") || !strcasecmp(arg, "withjson")) { + withgeojson = true; + break; + } + } + + double radius_meters = 0; + if (c->argc >= 5) { + if ((radius_meters = extractDistanceOrReply(c, c->argv + 3, NULL)) < + 0) { + return; + } + } else if (c->argc == 4 && !withgeojson) { + addReplyError(c, "must provide units when asking for radius encode"); + return; + } + + double latlong[2]; + if (!extractLatLongOrReply(c, c->argv + 1, latlong)) + return; + + /* Encode lat/long into our geohash */ + GeoHashBits geohash; + uint8_t step = geohashEstimateStepsByRadius(radius_meters); + geohashEncodeWGS84(latlong[0], latlong[1], step, &geohash); + + /* Align the hash to a valid 52-bit integer based on step size */ + GeoHashFix52Bits bits = geohashAlign52Bits(geohash); + +/* Decode the hash so we can return its bounding box */ +#ifdef DEBUG + printf("Decoding with step size: %d\n", geohash.step); +#endif + GeoHashArea area; + geohashDecodeWGS84(geohash, &area); + + double y = (area.latitude.min + area.latitude.max) / 2; + double x = (area.longitude.min + area.longitude.max) / 2; + + /* Return four nested multibulk replies with optional geojson returns */ + addReplyMultiBulkLen(c, 4 + withgeojson * 2); + + /* Return the binary geohash we calculated as 52-bit integer */ + addReplyLongLong(c, bits); + + /* Return the minimum corner */ + addReplyMultiBulkLen(c, 2); + addReplyDouble(c, area.latitude.min); + addReplyDouble(c, area.longitude.min); + + /* Return the maximum corner */ + addReplyMultiBulkLen(c, 2); + addReplyDouble(c, area.latitude.max); + addReplyDouble(c, area.longitude.max); + + /* Return the averaged center */ + addReplyMultiBulkLen(c, 2); + addReplyDouble(c, y); + addReplyDouble(c, x); + + if (withgeojson) { + struct geojsonPoint gp = { .latitude = y, + .longitude = x, + .member = NULL }; + + /* Return geojson Feature Point */ + latLongToGeojsonAndReply(c, &gp, NULL); + + /* Return geojson Feature Polygon (bounding box for this step size) */ + /* We don't use the helper function here because we can't re-calculate + * the area if we have a non-GEO_STEP_MAX step size. */ + sds geojson = geojsonBoxToPolygonFeature( + area.latitude.min, area.longitude.min, area.latitude.max, + area.longitude.max, gp.set, gp.member); + addReplyBulkCBuffer(c, geojson, sdslen(geojson)); + sdsfree(geojson); + } +} diff --git a/src/geo.h b/src/geo.h new file mode 100644 index 000000000..f82071663 --- /dev/null +++ b/src/geo.h @@ -0,0 +1,12 @@ +#ifndef __GEO_H__ +#define __GEO_H__ + +#include "redis.h" + +void geoEncodeCommand(redisClient *c); +void geoDecodeCommand(redisClient *c); +void geoRadiusByMemberCommand(redisClient *c); +void geoRadiusCommand(redisClient *c); +void geoAddCommand(redisClient *c); + +#endif diff --git a/src/geojson.c b/src/geojson.c new file mode 100644 index 000000000..bb0befc95 --- /dev/null +++ b/src/geojson.c @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2014, Matt Stancliff . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "geojson.h" + +#define L server.lua + +/* ==================================================================== + * The Encoder + * ==================================================================== */ +static sds jsonEncode() { + /* When entering this function, stack is: [1:[geojson table to encode]] */ + lua_getglobal(L, "cjson"); + lua_getfield(L, -1, "encode"); + + /* Stack is now: [1:[geojson table], 2:'cjson', 3:'encode'] */ + + /* Move current top ('encode') to bottom of stack */ + lua_insert(L, 1); + + /* Move current top ('cjson') to bottom of stack so we can 'cjson.encode' */ + lua_insert(L, 1); + + /* Stack is now: [1:'cjson', 2:'encode', 3:[table of geojson to encode]] */ + + /* Call cjson.encode on the element above it on the stack; + * obtain one return value */ + if (lua_pcall(L, 1, 1, 0) != 0) + redisLog(REDIS_WARNING, "Could not encode geojson: %s", + lua_tostring(L, -1)); + + sds geojson = sdsnew(lua_tostring(L, -1)); + + /* We're done. Remove entire stack. Drop mic. Walk away. */ + lua_pop(L, lua_gettop(L)); + + /* Return sds the caller must sdsfree() on their own */ + return geojson; +} + +/* ==================================================================== + * The Lua Helpers + * ==================================================================== */ +static inline void luaCreateFieldFromPrevious(const char *field) { + lua_setfield(L, -2, field); +} + +static inline void luaCreateFieldStr(const char *field, const char *value) { + lua_pushstring(L, value); + luaCreateFieldFromPrevious(field); +} + +/* Creates [Lat, Long] array attached to "coordinates" key */ +static void luaCreateCoordinates(const double x, const double y) { + /* Create array table with two elements */ + lua_createtable(L, 2, 0); + + lua_pushnumber(L, x); + lua_rawseti(L, -2, 1); + lua_pushnumber(L, y); + lua_rawseti(L, -2, 2); +} + +static void luaCreatePropertyNull(void) { + /* Create empty table and give it a name. This is a json {} value. */ + lua_createtable(L, 0, 0); + luaCreateFieldFromPrevious("properties"); +} + +static void _luaCreateProperties(const char *k1, const char *v1, const char *k2, + const char *v2, const int noclose) { + /* we may add additional properties outside of here, so newtable instead of + * fixed-size createtable */ + lua_newtable(L); + + luaCreateFieldStr(k1, v1); + luaCreateFieldStr(k2, v2); + + if (!noclose) + luaCreateFieldFromPrevious("properties"); +} + +static void luaCreateProperties(const char *k1, const char *v1, const char *k2, + const char *v2) { + _luaCreateProperties(k1, v1, k2, v2, 0); +} + +/* ==================================================================== + * The Lua Aggregation Helpers + * ==================================================================== */ +static void attachProperties(const char *set, const char *member) { + if (member) + luaCreateProperties("set", set, "member", member); + else + luaCreatePropertyNull(); +} + +static void attachPropertiesWithDist(const char *set, const char *member, + double dist, const char *units) { + if (member) { + _luaCreateProperties("set", set, "member", member, 1); + if (units) { + /* Add units then distance. After encoding it comes + * out as distance followed by units in the json. */ + lua_pushstring(L, units); + luaCreateFieldFromPrevious("units"); + lua_pushnumber(L, dist); + luaCreateFieldFromPrevious("distance"); + } + + /* We requested to leave the properties table open, but now we + * are done and can close it. */ + luaCreateFieldFromPrevious("properties"); + } else { + luaCreatePropertyNull(); + } +} + +static void createGeometryPoint(const double x, const double y) { + lua_createtable(L, 0, 2); + + /* coordinates = [x, y] */ + luaCreateCoordinates(x, y); + luaCreateFieldFromPrevious("coordinates"); + + /* type = Point */ + luaCreateFieldStr("type", "Point"); + + /* geometry = (coordinates = [x, y]) */ + luaCreateFieldFromPrevious("geometry"); +} + +static void createGeometryBox(const double x1, const double y1, const double x2, + const double y2) { + lua_createtable(L, 0, 2); + + /* Result = [[[x1,y1],[x2,y1],[x2,y2],[x1,y2], [x1,y1]] */ + /* The end coord is the start coord to make a closed polygon */ + lua_createtable(L, 1, 0); + lua_createtable(L, 5, 0); + + /* Bottom left */ + luaCreateCoordinates(x1, y1); + lua_rawseti(L, -2, 1); + + /* Top Left */ + luaCreateCoordinates(x2, y1); + lua_rawseti(L, -2, 2); + + /* Top Right */ + luaCreateCoordinates(x2, y2); + lua_rawseti(L, -2, 3); + + /* Bottom Right */ + luaCreateCoordinates(x1, y2); + lua_rawseti(L, -2, 4); + + /* Bottom Left (Again) */ + luaCreateCoordinates(x1, y1); + lua_rawseti(L, -2, 5); + + /* Set the outer array of our inner array of the inner coords */ + lua_rawseti(L, -2, 1); + + /* Bundle those together in coordinates: [a, b, c, d] */ + luaCreateFieldFromPrevious("coordinates"); + + /* Add type field */ + luaCreateFieldStr("type", "Polygon"); + + luaCreateFieldFromPrevious("geometry"); +} + +static void createFeature() { + /* Features have three fields: type, geometry, and properties */ + lua_createtable(L, 0, 3); + + luaCreateFieldStr("type", "Feature"); + + /* You must call attachProperties on your own */ +} + +static void createCollection(size_t size) { + /* FeatureCollections have two fields: type and features */ + lua_createtable(L, 0, 2); + + luaCreateFieldStr("type", "FeatureCollection"); +} + +static void pointsToCollection(const struct geojsonPoint *pts, const size_t len, + const char *units) { + createCollection(len); + + lua_createtable(L, len, 0); + for (int i = 0; i < len; i++) { + createFeature(); + createGeometryPoint(pts[i].longitude, pts[i].latitude); /* x, y */ + attachPropertiesWithDist(pts[i].set, pts[i].member, pts[i].dist, units); + lua_rawseti(L, -2, i + 1); /* Attach this Feature to "features" array */ + } + luaCreateFieldFromPrevious("features"); +} + +static void latLongToPointFeature(const double latitude, + const double longitude) { + createFeature(); + createGeometryPoint(longitude, latitude); /* geojson is: x,y */ +} + +static void squareToPolygonFeature(const double x1, const double y1, + const double x2, const double y2) { + createFeature(); + createGeometryBox(x1, y1, x2, y2); +} + +/* ==================================================================== + * The Interface Functions + * ==================================================================== */ +sds geojsonFeatureCollection(const struct geojsonPoint *pts, const size_t len, + const char *units) { + pointsToCollection(pts, len, units); + return jsonEncode(); +} + +sds geojsonLatLongToPointFeature(const double latitude, const double longitude, + const char *set, const char *member, + const double dist, const char *units) { + latLongToPointFeature(latitude, longitude); + attachPropertiesWithDist(set, member, dist, units); + return jsonEncode(); +} + +sds geojsonBoxToPolygonFeature(const double y1, const double x1, + const double y2, const double x2, + const char *set, const char *member) { + squareToPolygonFeature(x1, y1, x2, y2); + attachProperties(set, member); + return jsonEncode(); +} diff --git a/src/geojson.h b/src/geojson.h new file mode 100644 index 000000000..55993beae --- /dev/null +++ b/src/geojson.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2014, Matt Stancliff . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __GEOJSON_H__ +#define __GEOJSON_H__ + +#include "redis.h" +#include "geohash_helper.h" + +struct geojsonPoint { + double latitude; + double longitude; + double dist; + char *set; + char *member; + void *userdata; +}; + +sds geojsonLatLongToPointFeature(const double latitude, const double longitude, + const char *set, const char *member, + const double dist, const char *units); +sds geojsonBoxToPolygonFeature(const double x1, const double y1, + const double x2, const double y2, + const char *set, const char *member); +sds geojsonFeatureCollection(const struct geojsonPoint *pts, const size_t len, + const char *units); + +#endif diff --git a/src/redis.c b/src/redis.c index 09653119a..359ccb35e 100644 --- a/src/redis.c +++ b/src/redis.c @@ -282,6 +282,11 @@ struct redisCommand redisCommandTable[] = { {"bitpos",bitposCommand,-3,"r",0,NULL,1,1,1,0,0}, {"wait",waitCommand,3,"rs",0,NULL,0,0,0,0,0}, {"command",commandCommand,0,"rlt",0,NULL,0,0,0,0,0}, + {"geoadd",geoAddCommand,-5,"wm",0,NULL,1,1,1,0,0}, + {"georadius",geoRadiusCommand,-6,"r",0,NULL,1,1,1,0,0}, + {"georadiusbymember",geoRadiusByMemberCommand,-5,"r",0,NULL,1,1,1,0,0}, + {"geoencode",geoEncodeCommand,-3,"r",0,NULL,0,0,0,0,0}, + {"geodecode",geoDecodeCommand,-2,"r",0,NULL,0,0,0,0,0}, {"pfselftest",pfselftestCommand,1,"r",0,NULL,0,0,0,0,0}, {"pfadd",pfaddCommand,-2,"wmF",0,NULL,1,1,1,0,0}, {"pfcount",pfcountCommand,-2,"r",0,NULL,1,1,1,0,0}, diff --git a/src/redis.h b/src/redis.h index 53f3967d7..2344c9eba 100644 --- a/src/redis.h +++ b/src/redis.h @@ -1556,6 +1556,11 @@ void bitcountCommand(redisClient *c); void bitposCommand(redisClient *c); void replconfCommand(redisClient *c); void waitCommand(redisClient *c); +void geoEncodeCommand(redisClient *c); +void geoDecodeCommand(redisClient *c); +void geoRadiusByMemberCommand(redisClient *c); +void geoRadiusCommand(redisClient *c); +void geoAddCommand(redisClient *c); void pfselftestCommand(redisClient *c); void pfaddCommand(redisClient *c); void pfcountCommand(redisClient *c); @@ -1588,4 +1593,8 @@ void redisLogHexDump(int level, char *descr, void *value, size_t len); #define redisDebugMark() \ printf("-- MARK %s:%d --\n", __FILE__, __LINE__) +/***** TEMPORARY *******/ +#include "zset.h" +/***** END TEMPORARY *******/ + #endif diff --git a/src/t_zset.c b/src/t_zset.c index dbc561a93..b900a9ccb 100644 --- a/src/t_zset.c +++ b/src/t_zset.c @@ -213,7 +213,7 @@ static int zslValueGteMin(double value, zrangespec *spec) { return spec->minex ? (value > spec->min) : (value >= spec->min); } -static int zslValueLteMax(double value, zrangespec *spec) { +int zslValueLteMax(double value, zrangespec *spec) { return spec->maxex ? (value < spec->max) : (value <= spec->max); } diff --git a/src/zset.c b/src/zset.c new file mode 100644 index 000000000..2412a1c44 --- /dev/null +++ b/src/zset.c @@ -0,0 +1,185 @@ +#include "zset.h" + +/* t_zset.c prototypes (there's no t_zset.h) */ +unsigned char *zzlFirstInRange(unsigned char *zl, zrangespec *range); +unsigned char *zzlFind(unsigned char *zl, robj *ele, double *score); +int zzlLexValueLteMax(unsigned char *p, zlexrangespec *spec); + +/* Converted from static in t_zset.c: */ +int zslValueLteMax(double value, zrangespec *spec); + +/* ==================================================================== + * Direct Redis DB Interaction + * ==================================================================== */ + +/* zset access is mostly a copy/paste from zscoreCommand() */ +bool zsetScore(robj *zobj, robj *member, double *score) { + if (!zobj || !member) + return false; + + if (zobj->encoding == REDIS_ENCODING_ZIPLIST) { + if (zzlFind(zobj->ptr, member, score) == NULL) + return false; + } else if (zobj->encoding == REDIS_ENCODING_SKIPLIST) { + zset *zs = zobj->ptr; + dictEntry *de; + + member = tryObjectEncoding(member); + de = dictFind(zs->dict, member); + if (de != NULL) { + *score = *(double *)dictGetVal(de); + } else + return false; + } else { + return false; + } + return true; +} + +/* Largely extracted from genericZrangebyscoreCommand() in t_zset.c */ +/* The zrangebyscoreCommand expects to only operate on a live redisClient, + * but we need results returned to us, not sent over an async socket. */ +list *geozrangebyscore(robj *zobj, double min, double max, int limit) { + /* minex 0 = include min in range; maxex 1 = exclude max in range */ + /* That's: min <= val < max */ + zrangespec range = { .min = min, .max = max, .minex = 0, .maxex = 1 }; + list *l = NULL; /* result list */ + + if (zobj->encoding == REDIS_ENCODING_ZIPLIST) { + unsigned char *zl = zobj->ptr; + unsigned char *eptr, *sptr; + unsigned char *vstr = NULL; + unsigned int vlen = 0; + long long vlong = 0; + double score = 0; + + if ((eptr = zzlFirstInRange(zl, &range)) == NULL) { + /* Nothing exists starting at our min. No results. */ + return NULL; + } + + l = listCreate(); + + sptr = ziplistNext(zl, eptr); + + while (eptr && limit--) { + score = zzlGetScore(sptr); + + /* If we fell out of range, break. */ + if (!zslValueLteMax(score, &range)) + break; + + /* We know the element exists. ziplistGet should always succeed */ + ziplistGet(eptr, &vstr, &vlen, &vlong); + if (vstr == NULL) { + listAddNodeTail(l, result_long(score, vlong)); + } else { + listAddNodeTail(l, result_str(score, vstr, vlen)); + } + zzlNext(zl, &eptr, &sptr); + } + } else if (zobj->encoding == REDIS_ENCODING_SKIPLIST) { + zset *zs = zobj->ptr; + zskiplist *zsl = zs->zsl; + zskiplistNode *ln; + + if ((ln = zslFirstInRange(zsl, &range)) == NULL) { + /* Nothing exists starting at our min. No results. */ + return NULL; + } + + l = listCreate(); + + while (ln && limit--) { + robj *o = ln->obj; + /* Abort when the node is no longer in range. */ + if (!zslValueLteMax(ln->score, &range)) + break; + + if (o->encoding == REDIS_ENCODING_INT) { + listAddNodeTail(l, result_long(ln->score, (long)o->ptr)); + } else { + listAddNodeTail(l, + result_str(ln->score, o->ptr, sdslen(o->ptr))); + } + + ln = ln->level[0].forward; + } + } + if (l) { + listSetFreeMethod(l, (void (*)(void *ptr)) & free_zipresult); + } + + return l; +} + +/* ==================================================================== + * Helpers + * ==================================================================== */ + +/* join 'join' to 'join_to' and free 'join' container */ +void listJoin(list *join_to, list *join) { + /* If the current list has zero size, move join to become join_to. + * If not, append the new list to the current list. */ + if (join_to->len == 0) { + join_to->head = join->head; + } else { + join_to->tail->next = join->head; + join->head->prev = join_to->tail; + join_to->tail = join->tail; + } + + /* Update total element count */ + join_to->len += join->len; + + /* Release original list container. Internal nodes were transferred over. */ + zfree(join); +} + +/* A ziplist member may be either a long long or a string. We create the + * contents of our return zipresult based on what the ziplist contained. */ +static struct zipresult *result(double score, long long v, unsigned char *s, + int len) { + struct zipresult *r = zmalloc(sizeof(*r)); + + /* If string and length, become a string. */ + /* Else, if not string or no length, become a long. */ + if (s && len >= 0) + r->type = ZR_STRING; + else if (!s || len < 0) + r->type = ZR_LONG; + + r->score = score; + switch (r->type) { + case(ZR_LONG) : + r->val.v = v; + break; + case(ZR_STRING) : + r->val.s = sdsnewlen(s, len); + break; + } + return r; +} + +struct zipresult *result_long(double score, long long v) { + return result(score, v, NULL, -1); +} + +struct zipresult *result_str(double score, unsigned char *str, int len) { + return result(score, 0, str, len); +} + +void free_zipresult(struct zipresult *r) { + if (!r) + return; + + switch (r->type) { + case(ZR_LONG) : + break; + case(ZR_STRING) : + sdsfree(r->val.s); + break; + } + + zfree(r); +} diff --git a/src/zset.h b/src/zset.h new file mode 100644 index 000000000..6a78d1ed2 --- /dev/null +++ b/src/zset.h @@ -0,0 +1,31 @@ +#ifndef __ZSET_H__ +#define __ZSET_H__ + +#include "redis.h" +#include + +#define ZR_LONG 1 +#define ZR_STRING 2 +struct zipresult { + double score; + union { + long long v; + sds s; + } val; + double distance; /* distance is in meters */ + char type; /* access type for the union */ +}; + +/* Redis DB Access */ +bool zsetScore(robj *zobj, robj *member, double *score); +list *geozrangebyscore(robj *zobj, double min, double max, int limit); + +/* New list operation: append one list to another */ +void listJoin(list *join_to, list *join); + +/* Helpers for returning zrangebyscore results */ +struct zipresult *result_str(double score, unsigned char *str, int len); +struct zipresult *result_long(double score, long long v); +void free_zipresult(struct zipresult *r); + +#endif diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl new file mode 100644 index 000000000..015fcf87c --- /dev/null +++ b/tests/unit/geo.tcl @@ -0,0 +1,53 @@ +start_server {tags {"geo"}} { + test {GEOADD create} { + r geoadd nyc 40.747533 -73.9454966 "lic market" + } {1} + + test {GEOADD update} { + r geoadd nyc 40.747533 -73.9454966 "lic market" + } {0} + + test {GEOADD multi add} { + r geoadd nyc 40.7648057 -73.9733487 "central park n/q/r" 40.7362513 -73.9903085 "union square" 40.7126674 -74.0131604 "wtc one" 40.6428986 -73.7858139 "jfk" 40.7498929 -73.9375699 "q4" 40.7480973 -73.9564142 4545 + } {6} + + test {Check geoset values} { + r zrange nyc 0 -1 withscores + } {{wtc one} 1791873972053020 {union square} 1791875485187452 {central park n/q/r} 1791875761332224 4545 1791875796750882 {lic market} 1791875804419201 q4 1791875830079666 jfk 1791895905559723} + + test {GEORADIUS simple (sorted)} { + r georadius nyc 40.7598464 -73.9798091 3 km ascending + } {{central park n/q/r} 4545 {union square}} + + test {GEORADIUS withdistance (sorted)} { + r georadius nyc 40.7598464 -73.9798091 3 km withdistance ascending + } {{{central park n/q/r} 0.78} {4545 2.37} {{union square} 2.77}} + + test {GEORADIUSBYMEMBER simple (sorted)} { + r georadiusbymember nyc "wtc one" 7 km + } {{wtc one} {union square} {central park n/q/r} 4545 {lic market}} + + test {GEORADIUSBYMEMBER simple (sorted, json)} { + r georadiusbymember nyc "wtc one" 7 km withgeojson + } {{{wtc one} {{"type":"Feature","geometry":{"type":"Point","coordinates":[-74.01316255331,40.712667181451]},"properties":{"distance":0,"member":"wtc one","units":"km","set":"nyc"}}}}\ + {{union square} {{"type":"Feature","geometry":{"type":"Point","coordinates":[-73.990310132504,40.736250227118]},"properties":{"distance":3.2543954573354,"member":"union square","units":"km","set":"nyc"}}}}\ + {{central park n/q/r} {{"type":"Feature","geometry":{"type":"Point","coordinates":[-73.973347842693,40.764806395699]},"properties":{"distance":6.7000029092796,"member":"central park n\/q\/r","units":"km","set":"nyc"}}}}\ + {4545 {{"type":"Feature","geometry":{"type":"Point","coordinates":[-73.956412374973,40.748097513816]},"properties":{"distance":6.1975173818008,"member":"4545","units":"km","set":"nyc"}}}}\ + {{lic market} {{"type":"Feature","geometry":{"type":"Point","coordinates":[-73.945495784283,40.747532270998]},"properties":{"distance":6.8968709532081,"member":"lic market","units":"km","set":"nyc"}}}}} + + test {GEORADIUSBYMEMBER withdistance (sorted)} { + r georadiusbymember nyc "wtc one" 7 km withdist + } {{{wtc one} 0.00} {{union square} 3.25} {{central park n/q/r} 6.70} {4545 6.20} {{lic market} 6.90}} + + test {GEOENCODE simple} { + r geoencode 41.2358883 1.8063239 + } {3471579339700058 {41.235888125243704 1.8063229322433472}\ + {41.235890659964866 1.806328296661377}\ + {41.235889392604285 1.8063256144523621}} + + test {GEODECODE simple} { + r geodecode 3471579339700058 + } {{41.235888125243704 1.8063229322433472}\ + {41.235890659964866 1.806328296661377}\ + {41.235889392604285 1.8063256144523621}} +} From 5e46e8a58a3b85dae8e37a71183f8a7139ea88b9 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 22 Jun 2015 11:02:27 +0200 Subject: [PATCH 0250/1928] Geo: removed useless functions, Marcatore coordinates, bool usage --- deps/geohash-int/geohash.c | 99 ++++++++-------------- deps/geohash-int/geohash.h | 43 ++++------ deps/geohash-int/geohash_helper.c | 136 ++++++------------------------ deps/geohash-int/geohash_helper.h | 27 +++--- 4 files changed, 92 insertions(+), 213 deletions(-) diff --git a/deps/geohash-int/geohash.c b/deps/geohash-int/geohash.c index 9212736bf..e57434efd 100644 --- a/deps/geohash-int/geohash.c +++ b/deps/geohash-int/geohash.c @@ -43,38 +43,23 @@ * ----------------- */ -bool geohashGetCoordRange(uint8_t coord_type, GeoHashRange *lat_range, - GeoHashRange *long_range) { - switch (coord_type) { - case GEO_WGS84_TYPE: { - /* These are constraints from EPSG:900913 / EPSG:3785 / OSGEO:41001 */ - /* We can't geocode at the north/south pole. */ - lat_range->max = 85.05112878; - lat_range->min = -85.05112878; - long_range->max = 180.0; - long_range->min = -180.0; - break; - } - case GEO_MERCATOR_TYPE: { - lat_range->max = 20037726.37; - lat_range->min = -20037726.37; - long_range->max = 20037726.37; - long_range->min = -20037726.37; - break; - } - default: { return false; } - } - return true; +void geohashGetCoordRange(GeoHashRange *lat_range, GeoHashRange *long_range) { + /* These are constraints from EPSG:900913 / EPSG:3785 / OSGEO:41001 */ + /* We can't geocode at the north/south pole. */ + lat_range->max = 85.05112878; + lat_range->min = -85.05112878; + long_range->max = 180.0; + long_range->min = -180.0; } -bool geohashEncode(GeoHashRange *lat_range, GeoHashRange *long_range, - double latitude, double longitude, uint8_t step, - GeoHashBits *hash) { +int geohashEncode(GeoHashRange *lat_range, GeoHashRange *long_range, + double latitude, double longitude, uint8_t step, + GeoHashBits *hash) { uint8_t i; if (NULL == hash || step > 32 || step == 0 || RANGEPISZERO(lat_range) || RANGEPISZERO(long_range)) { - return false; + return 0; } hash->bits = 0; @@ -82,7 +67,7 @@ bool geohashEncode(GeoHashRange *lat_range, GeoHashRange *long_range, if (latitude < lat_range->min || latitude > lat_range->max || longitude < long_range->min || longitude > long_range->max) { - return false; + return 0; } for (i = 0; i < step; i++) { @@ -108,38 +93,31 @@ bool geohashEncode(GeoHashRange *lat_range, GeoHashRange *long_range, hash->bits <<= 1; hash->bits += lat_bit; } - return true; + return 1; } -bool geohashEncodeType(uint8_t coord_type, double latitude, double longitude, - uint8_t step, GeoHashBits *hash) { +int geohashEncodeType(double latitude, double longitude, uint8_t step, GeoHashBits *hash) { GeoHashRange r[2] = { { 0 } }; - geohashGetCoordRange(coord_type, &r[0], &r[1]); + geohashGetCoordRange(&r[0], &r[1]); return geohashEncode(&r[0], &r[1], latitude, longitude, step, hash); } -bool geohashEncodeWGS84(double latitude, double longitude, uint8_t step, - GeoHashBits *hash) { - return geohashEncodeType(GEO_WGS84_TYPE, latitude, longitude, step, hash); -} - -bool geohashEncodeMercator(double latitude, double longitude, uint8_t step, - GeoHashBits *hash) { - return geohashEncodeType(GEO_MERCATOR_TYPE, latitude, longitude, step, - hash); +int geohashEncodeWGS84(double latitude, double longitude, uint8_t step, + GeoHashBits *hash) { + return geohashEncodeType(latitude, longitude, step, hash); } static inline uint8_t get_bit(uint64_t bits, uint8_t pos) { return (bits >> pos) & 0x01; } -bool geohashDecode(const GeoHashRange lat_range, const GeoHashRange long_range, +int geohashDecode(const GeoHashRange lat_range, const GeoHashRange long_range, const GeoHashBits hash, GeoHashArea *area) { uint8_t i; if (HASHISZERO(hash) || NULL == area || RANGEISZERO(lat_range) || RANGEISZERO(long_range)) { - return false; + return 0; } area->hash = hash; @@ -168,52 +146,45 @@ bool geohashDecode(const GeoHashRange lat_range, const GeoHashRange long_range, (area->longitude.max + area->longitude.min) / 2; } } - return true; + return 1; } -bool geohashDecodeType(uint8_t coord_type, const GeoHashBits hash, - GeoHashArea *area) { +int geohashDecodeType(const GeoHashBits hash, GeoHashArea *area) { GeoHashRange r[2] = { { 0 } }; - geohashGetCoordRange(coord_type, &r[0], &r[1]); + geohashGetCoordRange(&r[0], &r[1]); return geohashDecode(r[0], r[1], hash, area); } -bool geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea *area) { - return geohashDecodeType(GEO_WGS84_TYPE, hash, area); +int geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea *area) { + return geohashDecodeType(hash, area); } -bool geohashDecodeMercator(const GeoHashBits hash, GeoHashArea *area) { - return geohashDecodeType(GEO_MERCATOR_TYPE, hash, area); -} - -bool geohashDecodeAreaToLatLong(const GeoHashArea *area, double *latlong) { +int geohashDecodeAreaToLatLong(const GeoHashArea *area, double *latlong) { double y, x; - if (!latlong) - return false; + if (!latlong) return 0; y = (area->latitude.min + area->latitude.max) / 2; x = (area->longitude.min + area->longitude.max) / 2; latlong[0] = y; latlong[1] = x; - return true; + return 1; } -bool geohashDecodeToLatLongType(uint8_t coord_type, const GeoHashBits hash, - double *latlong) { +int geohashDecodeToLatLongType(const GeoHashBits hash, double *latlong) { GeoHashArea area = { { 0 } }; - if (!latlong || !geohashDecodeType(coord_type, hash, &area)) - return false; + if (!latlong || !geohashDecodeType(hash, &area)) + return 0; return geohashDecodeAreaToLatLong(&area, latlong); } -bool geohashDecodeToLatLongWGS84(const GeoHashBits hash, double *latlong) { - return geohashDecodeToLatLongType(GEO_WGS84_TYPE, hash, latlong); +int geohashDecodeToLatLongWGS84(const GeoHashBits hash, double *latlong) { + return geohashDecodeToLatLongType(hash, latlong); } -bool geohashDecodeToLatLongMercator(const GeoHashBits hash, double *latlong) { - return geohashDecodeToLatLongType(GEO_MERCATOR_TYPE, hash, latlong); +int geohashDecodeToLatLongMercator(const GeoHashBits hash, double *latlong) { + return geohashDecodeToLatLongType(hash, latlong); } static void geohash_move_x(GeoHashBits *hash, int8_t d) { diff --git a/deps/geohash-int/geohash.h b/deps/geohash-int/geohash.h index 30fc17144..78310715f 100644 --- a/deps/geohash-int/geohash.h +++ b/deps/geohash-int/geohash.h @@ -32,7 +32,7 @@ #define GEOHASH_H_ #include -#include +#include #include #if defined(__cplusplus) @@ -43,9 +43,6 @@ extern "C" { #define RANGEISZERO(r) (!(r).max && !(r).min) #define RANGEPISZERO(r) (r == NULL || RANGEISZERO(*r)) -#define GEO_WGS84_TYPE 1 -#define GEO_MERCATOR_TYPE 2 - #define GEO_STEP_MAX 26 typedef enum { @@ -90,28 +87,22 @@ typedef struct { * 0:success * -1:failed */ -bool geohashGetCoordRange(uint8_t coord_type, GeoHashRange *lat_range, - GeoHashRange *long_range); -bool geohashEncode(GeoHashRange *lat_range, GeoHashRange *long_range, - double latitude, double longitude, uint8_t step, - GeoHashBits *hash); -bool geohashEncodeType(uint8_t coord_type, double latitude, double longitude, - uint8_t step, GeoHashBits *hash); -bool geohashEncodeMercator(double latitude, double longitude, uint8_t step, - GeoHashBits *hash); -bool geohashEncodeWGS84(double latitude, double longitude, uint8_t step, - GeoHashBits *hash); -bool geohashDecode(const GeoHashRange lat_range, const GeoHashRange long_range, - const GeoHashBits hash, GeoHashArea *area); -bool geohashDecodeType(uint8_t coord_type, const GeoHashBits hash, - GeoHashArea *area); -bool geohashDecodeMercator(const GeoHashBits hash, GeoHashArea *area); -bool geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea *area); -bool geohashDecodeAreaToLatLong(const GeoHashArea *area, double *latlong); -bool geohashDecodeToLatLongType(uint8_t coord_type, const GeoHashBits hash, - double *latlong); -bool geohashDecodeToLatLongWGS84(const GeoHashBits hash, double *latlong); -bool geohashDecodeToLatLongMercator(const GeoHashBits hash, double *latlong); +void geohashGetCoordRange(GeoHashRange *lat_range, GeoHashRange *long_range); +int geohashEncode(GeoHashRange *lat_range, GeoHashRange *long_range, + double latitude, double longitude, uint8_t step, + GeoHashBits *hash); +int geohashEncodeType(double latitude, double longitude, + uint8_t step, GeoHashBits *hash); +int geohashEncodeWGS84(double latitude, double longitude, uint8_t step, + GeoHashBits *hash); +int geohashDecode(const GeoHashRange lat_range, const GeoHashRange long_range, + const GeoHashBits hash, GeoHashArea *area); +int geohashDecodeType(const GeoHashBits hash, GeoHashArea *area); +int geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea *area); +int geohashDecodeAreaToLatLong(const GeoHashArea *area, double *latlong); +int geohashDecodeToLatLongType(const GeoHashBits hash, double *latlong); +int geohashDecodeToLatLongWGS84(const GeoHashBits hash, double *latlong); +int geohashDecodeToLatLongMercator(const GeoHashBits hash, double *latlong); void geohashNeighbors(const GeoHashBits *hash, GeoHashNeighbors *neighbors); #if defined(__cplusplus) diff --git a/deps/geohash-int/geohash_helper.c b/deps/geohash-int/geohash_helper.c index 010ab070b..7271c7b31 100644 --- a/deps/geohash-int/geohash_helper.c +++ b/deps/geohash-int/geohash_helper.c @@ -53,33 +53,6 @@ const double MERCATOR_MIN = -20037726.37; static inline double deg_rad(double ang) { return ang * D_R; } static inline double rad_deg(double ang) { return ang / D_R; } -double mercator_y(double lat) { - lat = fmin(89.5, fmax(lat, -89.5)); - double phi = deg_rad(lat); - double sinphi = sin(phi); - double con = ECCENT * sinphi; - con = pow((1.0 - con) / (1.0 + con), COM); - double ts = tan(0.5 * (M_PI * 0.5 - phi)) / con; - return 0 - R_MAJOR * log(ts); -} - -double mercator_x(double lon) { return R_MAJOR * deg_rad(lon); } -double merc_lon(double x) { return rad_deg(x) / R_MAJOR; } - -double merc_lat(double y) { - double ts = exp(-y / R_MAJOR); - double phi = M_PI_2 - 2 * atan(ts); - double dphi = 1.0; - int i; - for (i = 0; fabs(dphi) > 0.000000001 && i < 15; i++) { - double con = ECCENT * sin(phi); - dphi = - M_PI_2 - 2 * atan(ts * pow((1.0 - con) / (1.0 + con), COM)) - phi; - phi += dphi; - } - return rad_deg(phi); -} - /* You must *ONLY* estimate steps when you are encoding. * If you are decoding, always decode to GEO_STEP_MAX (26). */ uint8_t geohashEstimateStepsByRadius(double range_meters) { @@ -94,31 +67,14 @@ uint8_t geohashEstimateStepsByRadius(double range_meters) { return step > 26 ? 26 : step; } -double geohashGetXWGS84(double x) { return merc_lon(x); } -double geohashGetYWGS84(double y) { return merc_lat(y); } - -double geohashGetXMercator(double longtitude) { - if (longtitude > 180 || longtitude < -180) { - return longtitude; - } - return mercator_x(longtitude); -} -double geohashGetYMercator(double latitude) { - if (latitude > 90 || latitude < -90) { - return latitude; - } - return mercator_y(latitude); -} - int geohashBitsComparator(const GeoHashBits *a, const GeoHashBits *b) { /* If step not equal, compare on step. Else, compare on bits. */ return a->step != b->step ? a->step - b->step : a->bits - b->bits; } -bool geohashBoundingBox(double latitude, double longitude, double radius_meters, - double *bounds) { - if (!bounds) - return false; +int geohashBoundingBox(double latitude, double longitude, double radius_meters, + double *bounds) { + if (!bounds) return 0; double latr, lonr; latr = deg_rad(latitude); @@ -139,38 +95,28 @@ bool geohashBoundingBox(double latitude, double longitude, double radius_meters, bounds[2] = rad_deg(max_latitude); bounds[3] = rad_deg(max_longitude); - return true; + return 1; } -GeoHashRadius geohashGetAreasByRadius(uint8_t coord_type, double latitude, - double longitude, double radius_meters) { +GeoHashRadius geohashGetAreasByRadius(double latitude, double longitude, double radius_meters) { GeoHashRange lat_range, long_range; GeoHashRadius radius = { { 0 } }; GeoHashBits hash = { 0 }; GeoHashNeighbors neighbors = { { 0 } }; GeoHashArea area = { { 0 } }; - double delta_longitude, delta_latitude; double min_lat, max_lat, min_lon, max_lon; + double bounds[4]; int steps; - if (coord_type == GEO_WGS84_TYPE) { - double bounds[4]; - geohashBoundingBox(latitude, longitude, radius_meters, bounds); - min_lat = bounds[0]; - min_lon = bounds[1]; - max_lat = bounds[2]; - max_lon = bounds[3]; - } else { - delta_latitude = delta_longitude = radius_meters; - min_lat = latitude - delta_latitude; - max_lat = latitude + delta_latitude; - min_lon = longitude - delta_longitude; - max_lon = longitude + delta_longitude; - } + geohashBoundingBox(latitude, longitude, radius_meters, bounds); + min_lat = bounds[0]; + min_lon = bounds[1]; + max_lat = bounds[2]; + max_lon = bounds[3]; steps = geohashEstimateStepsByRadius(radius_meters); - geohashGetCoordRange(coord_type, &lat_range, &long_range); + geohashGetCoordRange(&lat_range, &long_range); geohashEncode(&lat_range, &long_range, latitude, longitude, steps, &hash); geohashNeighbors(&hash, &neighbors); geohashDecode(lat_range, long_range, hash, &area); @@ -203,14 +149,7 @@ GeoHashRadius geohashGetAreasByRadius(uint8_t coord_type, double latitude, GeoHashRadius geohashGetAreasByRadiusWGS84(double latitude, double longitude, double radius_meters) { - return geohashGetAreasByRadius(GEO_WGS84_TYPE, latitude, longitude, - radius_meters); -} - -GeoHashRadius geohashGetAreasByRadiusMercator(double latitude, double longitude, - double radius_meters) { - return geohashGetAreasByRadius(GEO_MERCATOR_TYPE, latitude, longitude, - radius_meters); + return geohashGetAreasByRadius(latitude, longitude, radius_meters); } GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash) { @@ -232,48 +171,27 @@ double distanceEarth(double lat1d, double lon1d, double lat2d, double lon2d) { asin(sqrt(u * u + cos(lat1r) * cos(lat2r) * v * v)); } -bool geohashGetDistanceIfInRadius(uint8_t coord_type, double x1, double y1, - double x2, double y2, double radius, - double *distance) { - if (coord_type == GEO_WGS84_TYPE) { - *distance = distanceEarth(y1, x1, y2, x2); - if (*distance > radius) { - return false; - } - } else { - double xx = (x1 - x2) * (x1 - x2); - double yy = (y1 - y2) * (y1 - y2); - double dd = xx + yy; - *distance = dd; - if (dd > (radius * radius)) { - return false; - } - } - return true; +int geohashGetDistanceIfInRadius(double x1, double y1, + double x2, double y2, double radius, + double *distance) { + *distance = distanceEarth(y1, x1, y2, x2); + if (*distance > radius) return 0; + return 1; } -bool geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, - double y2, double radius, - double *distance) { - return geohashGetDistanceIfInRadius(GEO_WGS84_TYPE, x1, y1, x2, y2, radius, - distance); +int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, + double y2, double radius, + double *distance) { + return geohashGetDistanceIfInRadius(x1, y1, x2, y2, radius, distance); } -bool geohashGetDistanceSquaredIfInRadiusMercator(double x1, double y1, - double x2, double y2, - double radius, - double *distance) { - return geohashGetDistanceIfInRadius(GEO_MERCATOR_TYPE, x1, y1, x2, y2, - radius, distance); -} - -bool geohashVerifyCoordinates(uint8_t coord_type, double x, double y) { +int geohashVerifyCoordinates(double x, double y) { GeoHashRange lat_range, long_range; - geohashGetCoordRange(coord_type, &lat_range, &long_range); + geohashGetCoordRange(&lat_range, &long_range); if (x < long_range.min || x > long_range.max || y < lat_range.min || y > lat_range.max) { - return false; + return 0; } - return true; + return 1; } diff --git a/deps/geohash-int/geohash_helper.h b/deps/geohash-int/geohash_helper.h index b72e51442..c10a02c6a 100644 --- a/deps/geohash-int/geohash_helper.h +++ b/deps/geohash-int/geohash_helper.h @@ -32,7 +32,6 @@ #define GEOHASH_HELPER_HPP_ #include -#include #include "geohash.h" #define GZERO(s) s.bits = s.step = 0; @@ -50,9 +49,9 @@ typedef struct { int GeoHashBitsComparator(const GeoHashBits *a, const GeoHashBits *b); uint8_t geohashEstimateStepsByRadius(double range_meters); -bool geohashBoundingBox(double latitude, double longitude, double radius_meters, +int geohashBoundingBox(double latitude, double longitude, double radius_meters, double *bounds); -GeoHashRadius geohashGetAreasByRadius(uint8_t coord_type, double latitude, +GeoHashRadius geohashGetAreasByRadius(double latitude, double longitude, double radius_meters); GeoHashRadius geohashGetAreasByRadiusWGS84(double latitude, double longitude, double radius_meters); @@ -63,16 +62,16 @@ double geohashGetXMercator(double longtitude); double geohashGetYMercator(double latitude); double geohashGetXWGS84(double x); double geohashGetYWGS84(double y); -bool geohashVerifyCoordinates(uint8_t coord_type, double x, double y); -bool geohashGetDistanceIfInRadius(uint8_t coord_type, double x1, double y1, - double x2, double y2, double radius, - double *distance); -bool geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, - double y2, double radius, - double *distance); -bool geohashGetDistanceSquaredIfInRadiusMercator(double x1, double y1, - double x2, double y2, - double radius, - double *distance); +int geohashVerifyCoordinates(double x, double y); +int geohashGetDistanceIfInRadius(double x1, double y1, + double x2, double y2, double radius, + double *distance); +int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, + double y2, double radius, + double *distance); +int geohashGetDistanceSquaredIfInRadiusMercator(double x1, double y1, + double x2, double y2, + double radius, + double *distance); #endif /* GEOHASH_HELPER_HPP_ */ From 73134f6a0b764ee1bc2df8d31180ed47ae55137e Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 22 Jun 2015 11:16:36 +0200 Subject: [PATCH 0251/1928] Geo: removed JSON failing test (false positive) Server output is matched to a pre-computed output. The last digits differ because of rouding errors. --- tests/unit/geo.tcl | 8 -------- 1 file changed, 8 deletions(-) diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl index 015fcf87c..cdd52e861 100644 --- a/tests/unit/geo.tcl +++ b/tests/unit/geo.tcl @@ -27,14 +27,6 @@ start_server {tags {"geo"}} { r georadiusbymember nyc "wtc one" 7 km } {{wtc one} {union square} {central park n/q/r} 4545 {lic market}} - test {GEORADIUSBYMEMBER simple (sorted, json)} { - r georadiusbymember nyc "wtc one" 7 km withgeojson - } {{{wtc one} {{"type":"Feature","geometry":{"type":"Point","coordinates":[-74.01316255331,40.712667181451]},"properties":{"distance":0,"member":"wtc one","units":"km","set":"nyc"}}}}\ - {{union square} {{"type":"Feature","geometry":{"type":"Point","coordinates":[-73.990310132504,40.736250227118]},"properties":{"distance":3.2543954573354,"member":"union square","units":"km","set":"nyc"}}}}\ - {{central park n/q/r} {{"type":"Feature","geometry":{"type":"Point","coordinates":[-73.973347842693,40.764806395699]},"properties":{"distance":6.7000029092796,"member":"central park n\/q\/r","units":"km","set":"nyc"}}}}\ - {4545 {{"type":"Feature","geometry":{"type":"Point","coordinates":[-73.956412374973,40.748097513816]},"properties":{"distance":6.1975173818008,"member":"4545","units":"km","set":"nyc"}}}}\ - {{lic market} {{"type":"Feature","geometry":{"type":"Point","coordinates":[-73.945495784283,40.747532270998]},"properties":{"distance":6.8968709532081,"member":"lic market","units":"km","set":"nyc"}}}}} - test {GEORADIUSBYMEMBER withdistance (sorted)} { r georadiusbymember nyc "wtc one" 7 km withdist } {{{wtc one} 0.00} {{union square} 3.25} {{central park n/q/r} 6.70} {4545 6.20} {{lic market} 6.90}} From f193b3caa8e2de38573831d81da4bf6438c2ae79 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 22 Jun 2015 11:24:58 +0200 Subject: [PATCH 0252/1928] Geo: removed bool usage from Geo code inside Redis --- src/geo.c | 53 +++++++++++++++++++++++++++-------------------------- src/zset.c | 12 ++++++------ src/zset.h | 3 +-- 3 files changed, 34 insertions(+), 34 deletions(-) diff --git a/src/geo.c b/src/geo.c index 36e87eef5..296cf7e04 100644 --- a/src/geo.c +++ b/src/geo.c @@ -47,36 +47,36 @@ /* ==================================================================== * Helpers * ==================================================================== */ -static inline bool decodeGeohash(double bits, double *latlong) { +static inline int decodeGeohash(double bits, double *latlong) { GeoHashBits hash = { .bits = (uint64_t)bits, .step = GEO_STEP_MAX }; return geohashDecodeToLatLongWGS84(hash, latlong); } /* Input Argument Helper */ /* Take a pointer to the latitude arg then use the next arg for longitude */ -static inline bool extractLatLongOrReply(redisClient *c, robj **argv, +static inline int extractLatLongOrReply(redisClient *c, robj **argv, double *latlong) { for (int i = 0; i < 2; i++) { if (getDoubleFromObjectOrReply(c, argv[i], latlong + i, NULL) != REDIS_OK) { - return false; + return 0; } } - return true; + return 1; } /* Input Argument Helper */ /* Decode lat/long from a zset member's score */ -static bool latLongFromMember(robj *zobj, robj *member, double *latlong) { +static int latLongFromMember(robj *zobj, robj *member, double *latlong) { double score = 0; if (!zsetScore(zobj, member, &score)) - return false; + return 0; if (!decodeGeohash(score, latlong)) - return false; + return 0; - return true; + return 1; } /* Input Argument Helper */ @@ -124,7 +124,7 @@ static void latLongToGeojsonAndReply(redisClient *c, struct geojsonPoint *gp, static void decodeGeohashToGeojsonBoundsAndReply(redisClient *c, uint64_t hashbits, struct geojsonPoint *gp) { - GeoHashArea area = { { 0 } }; + GeoHashArea area = {{0,0},{0,0},{0,0}}; GeoHashBits hash = { .bits = hashbits, .step = GEO_STEP_MAX }; geohashDecodeWGS84(hash, &area); @@ -171,6 +171,7 @@ static list *membersOfAllNeighbors(robj *zobj, GeoHashRadius n, double x, double y, double radius) { list *l = NULL; GeoHashBits neighbors[9]; + unsigned int i; neighbors[0] = n.hash; neighbors[1] = n.neighbors.north; @@ -184,7 +185,7 @@ static list *membersOfAllNeighbors(robj *zobj, GeoHashRadius n, double x, /* For each neighbor (*and* our own hashbox), get all the matching * members and add them to the potential result list. */ - for (int i = 0; i < sizeof(neighbors) / sizeof(*neighbors); i++) { + for (i = 0; i < sizeof(neighbors) / sizeof(*neighbors); i++) { list *r; if (HASHISZERO(neighbors[i])) @@ -213,7 +214,7 @@ static list *membersOfAllNeighbors(robj *zobj, GeoHashRadius n, double x, listRewind(l, &li); while ((ln = listNext(&li))) { struct zipresult *zr = listNodeValue(ln); - GeoHashArea area = { { 0 } }; + GeoHashArea area = {{0,0},{0,0},{0,0}}; GeoHashBits hash = { .bits = (uint64_t)zr->score, .step = GEO_STEP_MAX }; @@ -433,31 +434,31 @@ static void geoRadiusGeneric(redisClient *c, int type) { sds units = c->argv[base_args - 2 + 1]->ptr; /* Discover and populate all optional parameters. */ - bool withdist = false, withhash = false, withcoords = false, - withgeojson = false, withgeojsonbounds = false, - withgeojsoncollection = false, noproperties = false; + int withdist = 0, withhash = 0, withcoords = 0, + withgeojson = 0, withgeojsonbounds = 0, + withgeojsoncollection = 0, noproperties = 0; int sort = SORT_NONE; if (c->argc > base_args) { int remaining = c->argc - base_args; for (int i = 0; i < remaining; i++) { char *arg = c->argv[base_args + i]->ptr; if (!strncasecmp(arg, "withdist", 8)) - withdist = true; + withdist = 1; else if (!strcasecmp(arg, "withhash")) - withhash = true; + withhash = 1; else if (!strncasecmp(arg, "withcoord", 9)) - withcoords = true; + withcoords = 1; else if (!strncasecmp(arg, "withgeojsonbound", 16)) - withgeojsonbounds = true; + withgeojsonbounds = 1; else if (!strncasecmp(arg, "withgeojsoncollection", 21)) - withgeojsoncollection = true; + withgeojsoncollection = 1; else if (!strncasecmp(arg, "withgeo", 7) || !strcasecmp(arg, "geojson") || !strcasecmp(arg, "json") || !strcasecmp(arg, "withjson")) - withgeojson = true; + withgeojson = 1; else if (!strncasecmp(arg, "noprop", 6) || !strncasecmp(arg, "withoutprop", 11)) - noproperties = true; + noproperties = 1; else if (!strncasecmp(arg, "asc", 3) || !strncasecmp(arg, "sort", 4)) sort = SORT_ASC; @@ -470,7 +471,7 @@ static void geoRadiusGeneric(redisClient *c, int type) { } } - bool withgeo = withgeojsonbounds || withgeojsoncollection || withgeojson; + int withgeo = withgeojsonbounds || withgeojsoncollection || withgeojson; /* Get all neighbor geohash boxes for our radius search */ GeoHashRadius georadius = @@ -617,9 +618,9 @@ void geoDecodeCommand(redisClient *c) { NULL) != REDIS_OK) return; - bool withgeojson = false; + int withgeojson = 0; if (c->argc == 3) - withgeojson = true; + withgeojson = 1; GeoHashArea area; geohash.step = GEO_STEP_MAX; @@ -665,12 +666,12 @@ void geoEncodeCommand(redisClient *c) { * - AND / OR - * optional: [geojson] */ - bool withgeojson = false; + int withgeojson = 0; for (int i = 3; i < c->argc; i++) { char *arg = c->argv[i]->ptr; if (!strncasecmp(arg, "withgeo", 7) || !strcasecmp(arg, "geojson") || !strcasecmp(arg, "json") || !strcasecmp(arg, "withjson")) { - withgeojson = true; + withgeojson = 1; break; } } diff --git a/src/zset.c b/src/zset.c index 2412a1c44..1f69d3f02 100644 --- a/src/zset.c +++ b/src/zset.c @@ -13,13 +13,13 @@ int zslValueLteMax(double value, zrangespec *spec); * ==================================================================== */ /* zset access is mostly a copy/paste from zscoreCommand() */ -bool zsetScore(robj *zobj, robj *member, double *score) { +int zsetScore(robj *zobj, robj *member, double *score) { if (!zobj || !member) - return false; + return 0; if (zobj->encoding == REDIS_ENCODING_ZIPLIST) { if (zzlFind(zobj->ptr, member, score) == NULL) - return false; + return 0; } else if (zobj->encoding == REDIS_ENCODING_SKIPLIST) { zset *zs = zobj->ptr; dictEntry *de; @@ -29,11 +29,11 @@ bool zsetScore(robj *zobj, robj *member, double *score) { if (de != NULL) { *score = *(double *)dictGetVal(de); } else - return false; + return 0; } else { - return false; + return 0; } - return true; + return 1; } /* Largely extracted from genericZrangebyscoreCommand() in t_zset.c */ diff --git a/src/zset.h b/src/zset.h index 6a78d1ed2..5e02bf23a 100644 --- a/src/zset.h +++ b/src/zset.h @@ -2,7 +2,6 @@ #define __ZSET_H__ #include "redis.h" -#include #define ZR_LONG 1 #define ZR_STRING 2 @@ -17,7 +16,7 @@ struct zipresult { }; /* Redis DB Access */ -bool zsetScore(robj *zobj, robj *member, double *score); +int zsetScore(robj *zobj, robj *member, double *score); list *geozrangebyscore(robj *zobj, double min, double max, int limit); /* New list operation: append one list to another */ From b18c68aa7fa780ab0d591c91ad758246fa9fdc9e Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 22 Jun 2015 11:53:14 +0200 Subject: [PATCH 0253/1928] Geo: JSON features removed The command can only return data in the normal Redis protocol. It is up to the caller to translate to JSON if needed. --- src/Makefile | 2 +- src/geo.c | 153 +++-------------------------- src/geo.h | 9 ++ src/geojson.c | 265 -------------------------------------------------- src/geojson.h | 54 ---------- src/redis.c | 2 +- 6 files changed, 25 insertions(+), 460 deletions(-) delete mode 100644 src/geojson.c delete mode 100644 src/geojson.h diff --git a/src/Makefile b/src/Makefile index 054857ca5..8056f9100 100644 --- a/src/Makefile +++ b/src/Makefile @@ -117,7 +117,7 @@ endif REDIS_SERVER_NAME=redis-server REDIS_SENTINEL_NAME=redis-sentinel -REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o redis.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o geo.o zset.o geojson.o +REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o redis.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o geo.o zset.o REDIS_GEOHASH_OBJ=../deps/geohash-int/geohash.o ../deps/geohash-int/geohash_helper.o REDIS_CLI_NAME=redis-cli REDIS_CLI_OBJ=anet.o sds.o adlist.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o diff --git a/src/geo.c b/src/geo.c index 296cf7e04..802acb919 100644 --- a/src/geo.c +++ b/src/geo.c @@ -29,7 +29,6 @@ #include "geo.h" #include "geohash_helper.h" -#include "geojson.h" #include "zset.h" /* ==================================================================== @@ -110,49 +109,15 @@ static double extractDistanceOrReply(redisClient *c, robj **argv, return distance * to_meters; } -/* Output Reply Helper */ -static void latLongToGeojsonAndReply(redisClient *c, struct geojsonPoint *gp, - char *units) { - sds geojson = geojsonLatLongToPointFeature( - gp->latitude, gp->longitude, gp->set, gp->member, gp->dist, units); - - addReplyBulkCBuffer(c, geojson, sdslen(geojson)); - sdsfree(geojson); -} - -/* Output Reply Helper */ -static void decodeGeohashToGeojsonBoundsAndReply(redisClient *c, - uint64_t hashbits, - struct geojsonPoint *gp) { - GeoHashArea area = {{0,0},{0,0},{0,0}}; - GeoHashBits hash = { .bits = hashbits, .step = GEO_STEP_MAX }; - - geohashDecodeWGS84(hash, &area); - - sds geojson = geojsonBoxToPolygonFeature( - area.latitude.min, area.longitude.min, area.latitude.max, - area.longitude.max, gp->set, gp->member); - addReplyBulkCBuffer(c, geojson, sdslen(geojson)); - sdsfree(geojson); -} - /* The defailt addReplyDouble has too much accuracy. We use this * for returning location distances. "5.21 meters away" is nicer * than "5.2144992818115 meters away." */ -static inline void addReplyDoubleNicer(redisClient *c, double d) { +static inline void addReplyDoubleMeters(redisClient *c, double d) { char dbuf[128] = { 0 }; int dlen = snprintf(dbuf, sizeof(dbuf), "%.2f", d); addReplyBulkCBuffer(c, dbuf, dlen); } -/* Output Reply Helper */ -static void replyGeojsonCollection(redisClient *c, struct geojsonPoint *gp, - long result_length, char *units) { - sds geojson = geojsonFeatureCollection(gp, result_length, units); - addReplyBulkCBuffer(c, geojson, sdslen(geojson)); - sdsfree(geojson); -} - /* geohash range+zset access helper */ /* Obtain all members between the min/max of this geohash bounding box. */ /* Returns list of results. List must be listRelease()'d later. */ @@ -291,7 +256,7 @@ static int publishLocationUpdate(const sds zset, const sds member, /* Sort comparators for qsort() */ static int sort_gp_asc(const void *a, const void *b) { - const struct geojsonPoint *gpa = a, *gpb = b; + const struct geoPoint *gpa = a, *gpb = b; /* We can't do adist - bdist because they are doubles and * the comparator returns an int. */ if (gpa->dist > gpb->dist) @@ -431,12 +396,8 @@ static void geoRadiusGeneric(redisClient *c, int type) { return; } - sds units = c->argv[base_args - 2 + 1]->ptr; - /* Discover and populate all optional parameters. */ - int withdist = 0, withhash = 0, withcoords = 0, - withgeojson = 0, withgeojsonbounds = 0, - withgeojsoncollection = 0, noproperties = 0; + int withdist = 0, withhash = 0, withcoords = 0, noproperties = 0; int sort = SORT_NONE; if (c->argc > base_args) { int remaining = c->argc - base_args; @@ -448,14 +409,6 @@ static void geoRadiusGeneric(redisClient *c, int type) { withhash = 1; else if (!strncasecmp(arg, "withcoord", 9)) withcoords = 1; - else if (!strncasecmp(arg, "withgeojsonbound", 16)) - withgeojsonbounds = 1; - else if (!strncasecmp(arg, "withgeojsoncollection", 21)) - withgeojsoncollection = 1; - else if (!strncasecmp(arg, "withgeo", 7) || - !strcasecmp(arg, "geojson") || !strcasecmp(arg, "json") || - !strcasecmp(arg, "withjson")) - withgeojson = 1; else if (!strncasecmp(arg, "noprop", 6) || !strncasecmp(arg, "withoutprop", 11)) noproperties = 1; @@ -471,8 +424,6 @@ static void geoRadiusGeneric(redisClient *c, int type) { } } - int withgeo = withgeojsonbounds || withgeojsoncollection || withgeojson; - /* Get all neighbor geohash boxes for our radius search */ GeoHashRadius georadius = geohashGetAreasByRadiusWGS84(latlong[0], latlong[1], radius_meters); @@ -508,23 +459,17 @@ static void geoRadiusGeneric(redisClient *c, int type) { if (withhash) option_length++; - if (withgeojson) - option_length++; - - if (withgeojsonbounds) - option_length++; - /* The multibulk len we send is exactly result_length. The result is either * all strings of just zset members *or* a nested multi-bulk reply * containing the zset member string _and_ all the additional options the * user enabled for this request. */ - addReplyMultiBulkLen(c, result_length + withgeojsoncollection); + addReplyMultiBulkLen(c, result_length); /* Iterate over results, populate struct used for sorting and result sending */ listIter li; listRewind(found_matches, &li); - struct geojsonPoint gp[result_length]; + struct geoPoint gp[result_length]; /* populate gp array from our results */ for (int i = 0; i < result_length; i++) { struct zipresult *zr = listNodeValue(listNext(&li)); @@ -534,7 +479,7 @@ static void geoRadiusGeneric(redisClient *c, int type) { gp[i].dist = zr->distance / conversion; gp[i].userdata = zr; - /* The layout of geojsonPoint allows us to pass the start offset + /* The layout of geoPoint allows us to pass the start offset * of the struct directly to decodeGeohash. */ decodeGeohash(zr->score, (double *)(gp + i)); } @@ -558,18 +503,14 @@ static void geoRadiusGeneric(redisClient *c, int type) { switch (zr->type) { case ZR_LONG: addReplyBulkLongLong(c, zr->val.v); - if (withgeo && !noproperties) - gp[i].member = sdscatprintf(sdsempty(), "%llu", zr->val.v); break; case ZR_STRING: addReplyBulkCBuffer(c, zr->val.s, sdslen(zr->val.s)); - if (withgeo && !noproperties) - gp[i].member = sdsdup(zr->val.s); break; } if (withdist) - addReplyDoubleNicer(c, gp[i].dist); + addReplyDoubleMeters(c, gp[i].dist); if (withhash) addReplyLongLong(c, zr->score); @@ -579,21 +520,7 @@ static void geoRadiusGeneric(redisClient *c, int type) { addReplyDouble(c, gp[i].latitude); addReplyDouble(c, gp[i].longitude); } - - if (withgeojson) - latLongToGeojsonAndReply(c, gp + i, units); - - if (withgeojsonbounds) - decodeGeohashToGeojsonBoundsAndReply(c, zr->score, gp + i); } - - if (withgeojsoncollection) - replyGeojsonCollection(c, gp, result_length, units); - - if (withgeo && !noproperties) - for (int i = 0; i < result_length; i++) - sdsfree(gp[i].member); - listRelease(found_matches); } @@ -610,18 +537,11 @@ void geoRadiusByMemberCommand(redisClient *c) { } void geoDecodeCommand(redisClient *c) { - /* args 0-1: ["geodecode", geohash]; - * optional: [geojson] */ - GeoHashBits geohash; if (getLongLongFromObjectOrReply(c, c->argv[1], (long long *)&geohash.bits, NULL) != REDIS_OK) return; - int withgeojson = 0; - if (c->argc == 3) - withgeojson = 1; - GeoHashArea area; geohash.step = GEO_STEP_MAX; geohashDecodeWGS84(geohash, &area); @@ -630,7 +550,7 @@ void geoDecodeCommand(redisClient *c) { double x = (area.longitude.min + area.longitude.max) / 2; /* Returning three nested replies */ - addReplyMultiBulkLen(c, 3 + withgeojson * 2); + addReplyMultiBulkLen(c, 3); /* First, the minimum corner */ addReplyMultiBulkLen(c, 2); @@ -646,50 +566,23 @@ void geoDecodeCommand(redisClient *c) { addReplyMultiBulkLen(c, 2); addReplyDouble(c, y); addReplyDouble(c, x); - - if (withgeojson) { - struct geojsonPoint gp = { .latitude = y, - .longitude = x, - .member = NULL }; - - /* Return geojson Feature Point */ - latLongToGeojsonAndReply(c, &gp, NULL); - - /* Return geojson Feature Polygon */ - decodeGeohashToGeojsonBoundsAndReply(c, geohash.bits, &gp); - } } void geoEncodeCommand(redisClient *c) { /* args 0-2: ["geoencode", lat, long]; - * optionals: [radius, units] - * - AND / OR - - * optional: [geojson] */ - - int withgeojson = 0; - for (int i = 3; i < c->argc; i++) { - char *arg = c->argv[i]->ptr; - if (!strncasecmp(arg, "withgeo", 7) || !strcasecmp(arg, "geojson") || - !strcasecmp(arg, "json") || !strcasecmp(arg, "withjson")) { - withgeojson = 1; - break; - } - } + * optionals: [radius, units] */ double radius_meters = 0; if (c->argc >= 5) { - if ((radius_meters = extractDistanceOrReply(c, c->argv + 3, NULL)) < - 0) { + if ((radius_meters = extractDistanceOrReply(c, c->argv + 3, NULL)) < 0) return; - } - } else if (c->argc == 4 && !withgeojson) { + } else if (c->argc == 4) { addReplyError(c, "must provide units when asking for radius encode"); return; } double latlong[2]; - if (!extractLatLongOrReply(c, c->argv + 1, latlong)) - return; + if (!extractLatLongOrReply(c, c->argv + 1, latlong)) return; /* Encode lat/long into our geohash */ GeoHashBits geohash; @@ -709,8 +602,8 @@ void geoEncodeCommand(redisClient *c) { double y = (area.latitude.min + area.latitude.max) / 2; double x = (area.longitude.min + area.longitude.max) / 2; - /* Return four nested multibulk replies with optional geojson returns */ - addReplyMultiBulkLen(c, 4 + withgeojson * 2); + /* Return four nested multibulk replies. */ + addReplyMultiBulkLen(c, 4); /* Return the binary geohash we calculated as 52-bit integer */ addReplyLongLong(c, bits); @@ -729,22 +622,4 @@ void geoEncodeCommand(redisClient *c) { addReplyMultiBulkLen(c, 2); addReplyDouble(c, y); addReplyDouble(c, x); - - if (withgeojson) { - struct geojsonPoint gp = { .latitude = y, - .longitude = x, - .member = NULL }; - - /* Return geojson Feature Point */ - latLongToGeojsonAndReply(c, &gp, NULL); - - /* Return geojson Feature Polygon (bounding box for this step size) */ - /* We don't use the helper function here because we can't re-calculate - * the area if we have a non-GEO_STEP_MAX step size. */ - sds geojson = geojsonBoxToPolygonFeature( - area.latitude.min, area.longitude.min, area.latitude.max, - area.longitude.max, gp.set, gp.member); - addReplyBulkCBuffer(c, geojson, sdslen(geojson)); - sdsfree(geojson); - } } diff --git a/src/geo.h b/src/geo.h index f82071663..9aa85cf9d 100644 --- a/src/geo.h +++ b/src/geo.h @@ -9,4 +9,13 @@ void geoRadiusByMemberCommand(redisClient *c); void geoRadiusCommand(redisClient *c); void geoAddCommand(redisClient *c); +struct geoPoint { + double latitude; + double longitude; + double dist; + char *set; + char *member; + void *userdata; +}; + #endif diff --git a/src/geojson.c b/src/geojson.c deleted file mode 100644 index bb0befc95..000000000 --- a/src/geojson.c +++ /dev/null @@ -1,265 +0,0 @@ -/* - * Copyright (c) 2014, Matt Stancliff . - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include "geojson.h" - -#define L server.lua - -/* ==================================================================== - * The Encoder - * ==================================================================== */ -static sds jsonEncode() { - /* When entering this function, stack is: [1:[geojson table to encode]] */ - lua_getglobal(L, "cjson"); - lua_getfield(L, -1, "encode"); - - /* Stack is now: [1:[geojson table], 2:'cjson', 3:'encode'] */ - - /* Move current top ('encode') to bottom of stack */ - lua_insert(L, 1); - - /* Move current top ('cjson') to bottom of stack so we can 'cjson.encode' */ - lua_insert(L, 1); - - /* Stack is now: [1:'cjson', 2:'encode', 3:[table of geojson to encode]] */ - - /* Call cjson.encode on the element above it on the stack; - * obtain one return value */ - if (lua_pcall(L, 1, 1, 0) != 0) - redisLog(REDIS_WARNING, "Could not encode geojson: %s", - lua_tostring(L, -1)); - - sds geojson = sdsnew(lua_tostring(L, -1)); - - /* We're done. Remove entire stack. Drop mic. Walk away. */ - lua_pop(L, lua_gettop(L)); - - /* Return sds the caller must sdsfree() on their own */ - return geojson; -} - -/* ==================================================================== - * The Lua Helpers - * ==================================================================== */ -static inline void luaCreateFieldFromPrevious(const char *field) { - lua_setfield(L, -2, field); -} - -static inline void luaCreateFieldStr(const char *field, const char *value) { - lua_pushstring(L, value); - luaCreateFieldFromPrevious(field); -} - -/* Creates [Lat, Long] array attached to "coordinates" key */ -static void luaCreateCoordinates(const double x, const double y) { - /* Create array table with two elements */ - lua_createtable(L, 2, 0); - - lua_pushnumber(L, x); - lua_rawseti(L, -2, 1); - lua_pushnumber(L, y); - lua_rawseti(L, -2, 2); -} - -static void luaCreatePropertyNull(void) { - /* Create empty table and give it a name. This is a json {} value. */ - lua_createtable(L, 0, 0); - luaCreateFieldFromPrevious("properties"); -} - -static void _luaCreateProperties(const char *k1, const char *v1, const char *k2, - const char *v2, const int noclose) { - /* we may add additional properties outside of here, so newtable instead of - * fixed-size createtable */ - lua_newtable(L); - - luaCreateFieldStr(k1, v1); - luaCreateFieldStr(k2, v2); - - if (!noclose) - luaCreateFieldFromPrevious("properties"); -} - -static void luaCreateProperties(const char *k1, const char *v1, const char *k2, - const char *v2) { - _luaCreateProperties(k1, v1, k2, v2, 0); -} - -/* ==================================================================== - * The Lua Aggregation Helpers - * ==================================================================== */ -static void attachProperties(const char *set, const char *member) { - if (member) - luaCreateProperties("set", set, "member", member); - else - luaCreatePropertyNull(); -} - -static void attachPropertiesWithDist(const char *set, const char *member, - double dist, const char *units) { - if (member) { - _luaCreateProperties("set", set, "member", member, 1); - if (units) { - /* Add units then distance. After encoding it comes - * out as distance followed by units in the json. */ - lua_pushstring(L, units); - luaCreateFieldFromPrevious("units"); - lua_pushnumber(L, dist); - luaCreateFieldFromPrevious("distance"); - } - - /* We requested to leave the properties table open, but now we - * are done and can close it. */ - luaCreateFieldFromPrevious("properties"); - } else { - luaCreatePropertyNull(); - } -} - -static void createGeometryPoint(const double x, const double y) { - lua_createtable(L, 0, 2); - - /* coordinates = [x, y] */ - luaCreateCoordinates(x, y); - luaCreateFieldFromPrevious("coordinates"); - - /* type = Point */ - luaCreateFieldStr("type", "Point"); - - /* geometry = (coordinates = [x, y]) */ - luaCreateFieldFromPrevious("geometry"); -} - -static void createGeometryBox(const double x1, const double y1, const double x2, - const double y2) { - lua_createtable(L, 0, 2); - - /* Result = [[[x1,y1],[x2,y1],[x2,y2],[x1,y2], [x1,y1]] */ - /* The end coord is the start coord to make a closed polygon */ - lua_createtable(L, 1, 0); - lua_createtable(L, 5, 0); - - /* Bottom left */ - luaCreateCoordinates(x1, y1); - lua_rawseti(L, -2, 1); - - /* Top Left */ - luaCreateCoordinates(x2, y1); - lua_rawseti(L, -2, 2); - - /* Top Right */ - luaCreateCoordinates(x2, y2); - lua_rawseti(L, -2, 3); - - /* Bottom Right */ - luaCreateCoordinates(x1, y2); - lua_rawseti(L, -2, 4); - - /* Bottom Left (Again) */ - luaCreateCoordinates(x1, y1); - lua_rawseti(L, -2, 5); - - /* Set the outer array of our inner array of the inner coords */ - lua_rawseti(L, -2, 1); - - /* Bundle those together in coordinates: [a, b, c, d] */ - luaCreateFieldFromPrevious("coordinates"); - - /* Add type field */ - luaCreateFieldStr("type", "Polygon"); - - luaCreateFieldFromPrevious("geometry"); -} - -static void createFeature() { - /* Features have three fields: type, geometry, and properties */ - lua_createtable(L, 0, 3); - - luaCreateFieldStr("type", "Feature"); - - /* You must call attachProperties on your own */ -} - -static void createCollection(size_t size) { - /* FeatureCollections have two fields: type and features */ - lua_createtable(L, 0, 2); - - luaCreateFieldStr("type", "FeatureCollection"); -} - -static void pointsToCollection(const struct geojsonPoint *pts, const size_t len, - const char *units) { - createCollection(len); - - lua_createtable(L, len, 0); - for (int i = 0; i < len; i++) { - createFeature(); - createGeometryPoint(pts[i].longitude, pts[i].latitude); /* x, y */ - attachPropertiesWithDist(pts[i].set, pts[i].member, pts[i].dist, units); - lua_rawseti(L, -2, i + 1); /* Attach this Feature to "features" array */ - } - luaCreateFieldFromPrevious("features"); -} - -static void latLongToPointFeature(const double latitude, - const double longitude) { - createFeature(); - createGeometryPoint(longitude, latitude); /* geojson is: x,y */ -} - -static void squareToPolygonFeature(const double x1, const double y1, - const double x2, const double y2) { - createFeature(); - createGeometryBox(x1, y1, x2, y2); -} - -/* ==================================================================== - * The Interface Functions - * ==================================================================== */ -sds geojsonFeatureCollection(const struct geojsonPoint *pts, const size_t len, - const char *units) { - pointsToCollection(pts, len, units); - return jsonEncode(); -} - -sds geojsonLatLongToPointFeature(const double latitude, const double longitude, - const char *set, const char *member, - const double dist, const char *units) { - latLongToPointFeature(latitude, longitude); - attachPropertiesWithDist(set, member, dist, units); - return jsonEncode(); -} - -sds geojsonBoxToPolygonFeature(const double y1, const double x1, - const double y2, const double x2, - const char *set, const char *member) { - squareToPolygonFeature(x1, y1, x2, y2); - attachProperties(set, member); - return jsonEncode(); -} diff --git a/src/geojson.h b/src/geojson.h deleted file mode 100644 index 55993beae..000000000 --- a/src/geojson.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2014, Matt Stancliff . - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __GEOJSON_H__ -#define __GEOJSON_H__ - -#include "redis.h" -#include "geohash_helper.h" - -struct geojsonPoint { - double latitude; - double longitude; - double dist; - char *set; - char *member; - void *userdata; -}; - -sds geojsonLatLongToPointFeature(const double latitude, const double longitude, - const char *set, const char *member, - const double dist, const char *units); -sds geojsonBoxToPolygonFeature(const double x1, const double y1, - const double x2, const double y2, - const char *set, const char *member); -sds geojsonFeatureCollection(const struct geojsonPoint *pts, const size_t len, - const char *units); - -#endif diff --git a/src/redis.c b/src/redis.c index 359ccb35e..e0561183d 100644 --- a/src/redis.c +++ b/src/redis.c @@ -286,7 +286,7 @@ struct redisCommand redisCommandTable[] = { {"georadius",geoRadiusCommand,-6,"r",0,NULL,1,1,1,0,0}, {"georadiusbymember",geoRadiusByMemberCommand,-5,"r",0,NULL,1,1,1,0,0}, {"geoencode",geoEncodeCommand,-3,"r",0,NULL,0,0,0,0,0}, - {"geodecode",geoDecodeCommand,-2,"r",0,NULL,0,0,0,0,0}, + {"geodecode",geoDecodeCommand,2,"r",0,NULL,0,0,0,0,0}, {"pfselftest",pfselftestCommand,1,"r",0,NULL,0,0,0,0,0}, {"pfadd",pfaddCommand,-2,"wmF",0,NULL,1,1,1,0,0}, {"pfcount",pfcountCommand,-2,"r",0,NULL,1,1,1,0,0}, From fc03d08ee0bc7abcb5f036454458d5b085fff10c Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 22 Jun 2015 13:08:46 +0200 Subject: [PATCH 0254/1928] Geo: addReplyDoubleDistance() precision set to 4 digits Also: 1. The function was renamed. 2. An useless initialization of a buffer was removed. --- src/geo.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/geo.c b/src/geo.c index 802acb919..8ab04eb39 100644 --- a/src/geo.c +++ b/src/geo.c @@ -110,11 +110,13 @@ static double extractDistanceOrReply(redisClient *c, robj **argv, } /* The defailt addReplyDouble has too much accuracy. We use this - * for returning location distances. "5.21 meters away" is nicer - * than "5.2144992818115 meters away." */ -static inline void addReplyDoubleMeters(redisClient *c, double d) { - char dbuf[128] = { 0 }; - int dlen = snprintf(dbuf, sizeof(dbuf), "%.2f", d); + * for returning location distances. "5.2145 meters away" is nicer + * than "5.2144992818115 meters away." We provide 4 digits after the dot + * so that the returned value is decently accurate even when the unit is + * the kilometer. */ +static inline void addReplyDoubleDistance(redisClient *c, double d) { + char dbuf[128]; + int dlen = snprintf(dbuf, sizeof(dbuf), "%.4f", d); addReplyBulkCBuffer(c, dbuf, dlen); } @@ -510,7 +512,7 @@ static void geoRadiusGeneric(redisClient *c, int type) { } if (withdist) - addReplyDoubleMeters(c, gp[i].dist); + addReplyDoubleDistance(c, gp[i].dist); if (withhash) addReplyLongLong(c, zr->score); From 2f66550729924ccfc20c1418f498f21e0e4bdeca Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 22 Jun 2015 13:40:26 +0200 Subject: [PATCH 0255/1928] Geo: Pub/Sub feature removed This feature apparently is not going to be very useful, to send a GEOADD+PUBLISH combo is exactly the same. One that would make a ton of difference is the ability to subscribe to a position and a radius, and get the updates in terms of objects entering/exiting the area. --- src/geo.c | 32 -------------------------------- 1 file changed, 32 deletions(-) diff --git a/src/geo.c b/src/geo.c index 8ab04eb39..4663a2e50 100644 --- a/src/geo.c +++ b/src/geo.c @@ -225,37 +225,6 @@ static list *membersOfAllNeighbors(robj *zobj, GeoHashRadius n, double x, return l; } -/* With no subscribers, each call of this function adds a median latency of 2 - * microseconds. */ -/* We aren't participating in any keyspace/keyevent notifications other than - * what's provided by the underlying zset itself, but it's probably not useful - * for clients to get the 52-bit integer geohash as an "update" value. */ -static int publishLocationUpdate(const sds zset, const sds member, - const double latitude, - const double longitude) { - int published; - - /* event is: " " */ - sds event = sdscatprintf(sdsempty(), "%.7f %.7f", latitude, longitude); - robj *eventobj = createObject(REDIS_STRING, event); - - /* channel is: __geo:: */ - /* If you want all events for this zset then just psubscribe - * to "__geo::*" */ - sds chan = sdsnewlen("__geo:", 6); - chan = sdscatsds(chan, zset); - chan = sdscatlen(chan, ":", 1); - chan = sdscatsds(chan, member); - robj *chanobj = createObject(REDIS_STRING, chan); - - published = pubsubPublishMessage(chanobj, eventobj); - - decrRefCount(chanobj); - decrRefCount(eventobj); - - return published; -} - /* Sort comparators for qsort() */ static int sort_gp_asc(const void *a, const void *b) { const struct geoPoint *gpa = a, *gpb = b; @@ -343,7 +312,6 @@ void geoAddCommand(redisClient *c) { rewriteClientCommandVector(client, 4, cmd, key, score, val); decrRefCount(score); zaddCommand(client); - publishLocationUpdate(key->ptr, val->ptr, latitude, longitude); } /* If we used a fake client, return a real reply then free fake client. */ From 575e247a0e91d662163fd92032b47e7a3b1f2b6b Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 22 Jun 2015 15:00:37 +0200 Subject: [PATCH 0256/1928] Geo: fix tests after distance precision change --- tests/unit/geo.tcl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl index cdd52e861..f42928f91 100644 --- a/tests/unit/geo.tcl +++ b/tests/unit/geo.tcl @@ -21,7 +21,7 @@ start_server {tags {"geo"}} { test {GEORADIUS withdistance (sorted)} { r georadius nyc 40.7598464 -73.9798091 3 km withdistance ascending - } {{{central park n/q/r} 0.78} {4545 2.37} {{union square} 2.77}} + } {{{central park n/q/r} 0.7750} {4545 2.3651} {{union square} 2.7697}} test {GEORADIUSBYMEMBER simple (sorted)} { r georadiusbymember nyc "wtc one" 7 km @@ -29,7 +29,7 @@ start_server {tags {"geo"}} { test {GEORADIUSBYMEMBER withdistance (sorted)} { r georadiusbymember nyc "wtc one" 7 km withdist - } {{{wtc one} 0.00} {{union square} 3.25} {{central park n/q/r} 6.70} {4545 6.20} {{lic market} 6.90}} + } {{{wtc one} 0.0000} {{union square} 3.2544} {{central park n/q/r} 6.7000} {4545 6.1975} {{lic market} 6.8969}} test {GEOENCODE simple} { r geoencode 41.2358883 1.8063239 From 9fc47ddf0b8174e5e652ba11bc3d368f6536ba40 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 22 Jun 2015 17:26:36 +0200 Subject: [PATCH 0257/1928] Geo: zsetScore refactoring Now used both in geo.c and t_zset to provide ZSCORE. --- src/redis.h | 1 + src/t_zset.c | 41 +++++++++++++++++++++++------------------ src/zset.c | 24 ------------------------ src/zset.h | 1 - 4 files changed, 24 insertions(+), 43 deletions(-) diff --git a/src/redis.h b/src/redis.h index 2344c9eba..3115689f5 100644 --- a/src/redis.h +++ b/src/redis.h @@ -1240,6 +1240,7 @@ void zzlNext(unsigned char *zl, unsigned char **eptr, unsigned char **sptr); void zzlPrev(unsigned char *zl, unsigned char **eptr, unsigned char **sptr); unsigned int zsetLength(robj *zobj); void zsetConvert(robj *zobj, int encoding); +int zsetScore(robj *zobj, robj *member, double *score); unsigned long zslGetRank(zskiplist *zsl, double score, robj *o); /* Core functions */ diff --git a/src/t_zset.c b/src/t_zset.c index b900a9ccb..93808a871 100644 --- a/src/t_zset.c +++ b/src/t_zset.c @@ -1166,6 +1166,26 @@ void zsetConvert(robj *zobj, int encoding) { } } +/* Return (by reference) the score of the specified member of the sorted set + * storing it into *score. If the element does not exist REDIS_ERR is returned + * otherwise REDIS_OK is returned and *score is correctly populated. + * If 'zobj' or 'member' is NULL, REDIS_ERR is returned. */ +int zsetScore(robj *zobj, robj *member, double *score) { + if (!zobj || !member) return REDIS_ERR; + + if (zobj->encoding == REDIS_ENCODING_ZIPLIST) { + if (zzlFind(zobj->ptr, member, score) == NULL) return REDIS_ERR; + } else if (zobj->encoding == REDIS_ENCODING_SKIPLIST) { + zset *zs = zobj->ptr; + dictEntry *de = dictFind(zs->dict, member); + if (de == NULL) return REDIS_ERR; + *score = *(double*)dictGetVal(de); + } else { + redisPanic("Unknown sorted set encoding"); + } + return REDIS_OK; +} + /*----------------------------------------------------------------------------- * Sorted set commands *----------------------------------------------------------------------------*/ @@ -2815,25 +2835,10 @@ void zscoreCommand(redisClient *c) { if ((zobj = lookupKeyReadOrReply(c,key,shared.nullbulk)) == NULL || checkType(c,zobj,REDIS_ZSET)) return; - if (zobj->encoding == REDIS_ENCODING_ZIPLIST) { - if (zzlFind(zobj->ptr,c->argv[2],&score) != NULL) - addReplyDouble(c,score); - else - addReply(c,shared.nullbulk); - } else if (zobj->encoding == REDIS_ENCODING_SKIPLIST) { - zset *zs = zobj->ptr; - dictEntry *de; - - c->argv[2] = tryObjectEncoding(c->argv[2]); - de = dictFind(zs->dict,c->argv[2]); - if (de != NULL) { - score = *(double*)dictGetVal(de); - addReplyDouble(c,score); - } else { - addReply(c,shared.nullbulk); - } + if (zsetScore(zobj,c->argv[2],&score) == REDIS_ERR) { + addReply(c,shared.nullbulk); } else { - redisPanic("Unknown sorted set encoding"); + addReplyDouble(c,score); } } diff --git a/src/zset.c b/src/zset.c index 1f69d3f02..7a80d3a47 100644 --- a/src/zset.c +++ b/src/zset.c @@ -12,30 +12,6 @@ int zslValueLteMax(double value, zrangespec *spec); * Direct Redis DB Interaction * ==================================================================== */ -/* zset access is mostly a copy/paste from zscoreCommand() */ -int zsetScore(robj *zobj, robj *member, double *score) { - if (!zobj || !member) - return 0; - - if (zobj->encoding == REDIS_ENCODING_ZIPLIST) { - if (zzlFind(zobj->ptr, member, score) == NULL) - return 0; - } else if (zobj->encoding == REDIS_ENCODING_SKIPLIST) { - zset *zs = zobj->ptr; - dictEntry *de; - - member = tryObjectEncoding(member); - de = dictFind(zs->dict, member); - if (de != NULL) { - *score = *(double *)dictGetVal(de); - } else - return 0; - } else { - return 0; - } - return 1; -} - /* Largely extracted from genericZrangebyscoreCommand() in t_zset.c */ /* The zrangebyscoreCommand expects to only operate on a live redisClient, * but we need results returned to us, not sent over an async socket. */ diff --git a/src/zset.h b/src/zset.h index 5e02bf23a..a861811e4 100644 --- a/src/zset.h +++ b/src/zset.h @@ -16,7 +16,6 @@ struct zipresult { }; /* Redis DB Access */ -int zsetScore(robj *zobj, robj *member, double *score); list *geozrangebyscore(robj *zobj, double min, double max, int limit); /* New list operation: append one list to another */ From 3d9031eda43c3018244c1e0495a4cfb2ef606974 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 22 Jun 2015 17:28:48 +0200 Subject: [PATCH 0258/1928] Geo: compile again with optimizations For some reason the Geo PR included disabling the fact that Redis is compiled with optimizations. Apparently it was just @mattsta attempt to speedup the modify-compile-test iteration and there are no other reasons. --- src/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Makefile b/src/Makefile index 8056f9100..d61f8010f 100644 --- a/src/Makefile +++ b/src/Makefile @@ -14,7 +14,7 @@ release_hdr := $(shell sh -c './mkreleasehdr.sh') uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not') -#OPTIMIZATION?=-O2 +OPTIMIZATION?=-O2 DEPENDENCY_TARGETS=hiredis linenoise lua geohash-int # Default settings From 0b93139048c9e541feeaeacb79a604f50f6a2149 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 22 Jun 2015 18:08:06 +0200 Subject: [PATCH 0259/1928] Geo: big refactoring of geo.c, zset.[ch] removed. This commit simplifies the implementation in a few ways: 1. zsetScore implementation improved a bit and moved into t_zset.c where is now also used to implement the ZSCORE command. 2. Range extraction from the sorted set remains a separated implementation from the one in t_zset.c, but was hyper-specialized in order to avoid accumulating results into a list and remove the ones outside the radius. 3. A new type is introduced: geoArray, which can accumulate geoPoint structures in a vector with power of two expansion policy. This is useful since we have to call qsort() against it before returning the result to the user. 4. As a result of 1, 2, 3, the two files zset.c and zset.h are now removed, including the function to merge two lists (now handled with functions that can add elements to existing geoArray arrays) and the machinery used in order to pass zset results. 5. geoPoint structure simplified because of the general code structure simplification, so we no longer need to take references to objects. 6. Not counting the JSON removal the refactoring removes 200 lines of code for the same functionalities, with a simpler to read implementation. 7. GEORADIUS is now 2.5 times faster testing with 10k elements and a radius resulting in 124 elements returned. However this is mostly a side effect of the refactoring and simplification. More speed gains can be achieved by trying to optimize the code. --- deps/geohash-int/geohash.c | 4 - src/Makefile | 2 +- src/geo.c | 306 +++++++++++++++++++++---------------- src/geo.h | 15 +- src/redis.h | 4 - src/zset.c | 161 ------------------- src/zset.h | 29 ---- 7 files changed, 190 insertions(+), 331 deletions(-) delete mode 100644 src/zset.c delete mode 100644 src/zset.h diff --git a/deps/geohash-int/geohash.c b/deps/geohash-int/geohash.c index e57434efd..66cff082c 100644 --- a/deps/geohash-int/geohash.c +++ b/deps/geohash-int/geohash.c @@ -183,10 +183,6 @@ int geohashDecodeToLatLongWGS84(const GeoHashBits hash, double *latlong) { return geohashDecodeToLatLongType(hash, latlong); } -int geohashDecodeToLatLongMercator(const GeoHashBits hash, double *latlong) { - return geohashDecodeToLatLongType(hash, latlong); -} - static void geohash_move_x(GeoHashBits *hash, int8_t d) { if (d == 0) return; diff --git a/src/Makefile b/src/Makefile index d61f8010f..650d438f7 100644 --- a/src/Makefile +++ b/src/Makefile @@ -117,7 +117,7 @@ endif REDIS_SERVER_NAME=redis-server REDIS_SENTINEL_NAME=redis-sentinel -REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o redis.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o geo.o zset.o +REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o redis.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o geo.o REDIS_GEOHASH_OBJ=../deps/geohash-int/geohash.o ../deps/geohash-int/geohash_helper.o REDIS_CLI_NAME=redis-cli REDIS_CLI_OBJ=anet.o sds.o adlist.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o diff --git a/src/geo.c b/src/geo.c index 4663a2e50..dd7a1886c 100644 --- a/src/geo.c +++ b/src/geo.c @@ -29,13 +29,15 @@ #include "geo.h" #include "geohash_helper.h" -#include "zset.h" + +/* Things exported from t_zset.c only for geo.c, since it is the only other + * part of Redis that requires close zset introspection. */ +unsigned char *zzlFirstInRange(unsigned char *zl, zrangespec *range); +int zslValueLteMax(double value, zrangespec *spec); /* ==================================================================== - * Redis Add-on Module: geo - * Provides commands: geoadd, georadius, georadiusbymember, - * geoencode, geodecode - * Behaviors: + * This file implements the following commands: + * * - geoadd - add coordinates for value to geoset * - georadius - search radius by coordinates in geoset * - georadiusbymember - search radius based on geoset member position @@ -43,6 +45,40 @@ * - geodecode - decode geohash integer to representative coordinates * ==================================================================== */ +/* ==================================================================== + * geoArray implementation + * ==================================================================== */ + +/* Create a new array of geoPoints. */ +geoArray *geoArrayCreate(void) { + geoArray *ga = zmalloc(sizeof(*ga)); + /* It gets allocated on first geoArrayAppend() call. */ + ga->array = NULL; + ga->buckets = 0; + ga->used = 0; + return ga; +} + +/* Add a new entry and return its pointer so that the caller can populate + * it with data. */ +geoPoint *geoArrayAppend(geoArray *ga) { + if (ga->used == ga->buckets) { + ga->buckets = (ga->buckets == 0) ? 8 : ga->buckets*2; + ga->array = zrealloc(ga->array,sizeof(geoPoint)*ga->buckets); + } + geoPoint *gp = ga->array+ga->used; + ga->used++; + return gp; +} + +/* Destroy a geoArray created with geoArrayCreate(). */ +void geoArrayFree(geoArray *ga) { + size_t i; + for (i = 0; i < ga->used; i++) sdsfree(ga->array[i].member); + zfree(ga->array); + zfree(ga); +} + /* ==================================================================== * Helpers * ==================================================================== */ @@ -65,16 +101,13 @@ static inline int extractLatLongOrReply(redisClient *c, robj **argv, } /* Input Argument Helper */ -/* Decode lat/long from a zset member's score */ +/* Decode lat/long from a zset member's score. + * Returns non-zero on successful decoding. */ static int latLongFromMember(robj *zobj, robj *member, double *latlong) { double score = 0; - if (!zsetScore(zobj, member, &score)) - return 0; - - if (!decodeGeohash(score, latlong)) - return 0; - + if (zsetScore(zobj, member, &score) == REDIS_ERR) return 0; + if (!decodeGeohash(score, latlong)) return 0; return 1; } @@ -120,25 +153,129 @@ static inline void addReplyDoubleDistance(redisClient *c, double d) { addReplyBulkCBuffer(c, dbuf, dlen); } -/* geohash range+zset access helper */ -/* Obtain all members between the min/max of this geohash bounding box. */ -/* Returns list of results. List must be listRelease()'d later. */ -static list *membersOfGeoHashBox(robj *zobj, GeoHashBits hash) { +/* Helper function for geoGetPointsInRange(): given a sorted set score + * representing a point, and another point (the center of our search) and + * a radius, appends this entry as a geoPoint into the specified geoArray + * only if the point is within the search area. + * + * returns REDIS_OK if the point is included, or REIDS_ERR if it is outside. */ +int geoAppendIfWithinRadius(geoArray *ga, double x, double y, double radius, double score, sds member) { + GeoHashArea area = {{0,0},{0,0},{0,0}}; + GeoHashBits hash = { .bits = (uint64_t)score, .step = GEO_STEP_MAX }; + double distance; + + if (!geohashDecodeWGS84(hash, &area)) return REDIS_ERR; /* Can't decode. */ + + double neighbor_y = (area.latitude.min + area.latitude.max) / 2; + double neighbor_x = (area.longitude.min + area.longitude.max) / 2; + + if (!geohashGetDistanceIfInRadiusWGS84(x, y, neighbor_x, neighbor_y, + radius, &distance)) { + return REDIS_ERR; + } + + /* Append the new element. */ + geoPoint *gp = geoArrayAppend(ga); + gp->latitude = neighbor_y; + gp->longitude = neighbor_x; + gp->dist = distance; + gp->member = member; + gp->score = score; + return REDIS_OK; +} + +/* Query a Redis sorted set to extract all the elements between 'min' and + * 'max', appending them into the array of geoPoint structures 'gparray'. + * The command returns the number of elements added to the array. + * + * Elements which are farest than 'radius' from the specified 'x' and 'y' + * coordinates are not included. + * + * The ability of this function to append to an existing set of points is + * important for good performances because querying by radius is performed + * using multiple queries to the sorted set, that we later need to sort + * via qsort. Similarly we need to be able to reject points outside the search + * radius area ASAP in order to allocate and process more points than needed. */ +int geoGetPointsInRange(robj *zobj, double min, double max, double x, double y, double radius, geoArray *ga) { + /* minex 0 = include min in range; maxex 1 = exclude max in range */ + /* That's: min <= val < max */ + zrangespec range = { .min = min, .max = max, .minex = 0, .maxex = 1 }; + size_t origincount = ga->used; + sds member; + + if (zobj->encoding == REDIS_ENCODING_ZIPLIST) { + unsigned char *zl = zobj->ptr; + unsigned char *eptr, *sptr; + unsigned char *vstr = NULL; + unsigned int vlen = 0; + long long vlong = 0; + double score = 0; + + if ((eptr = zzlFirstInRange(zl, &range)) == NULL) { + /* Nothing exists starting at our min. No results. */ + return 0; + } + + sptr = ziplistNext(zl, eptr); + while (eptr) { + score = zzlGetScore(sptr); + + /* If we fell out of range, break. */ + if (!zslValueLteMax(score, &range)) + break; + + /* We know the element exists. ziplistGet should always succeed */ + ziplistGet(eptr, &vstr, &vlen, &vlong); + member = (vstr == NULL) ? sdsfromlonglong(vlong) : + sdsnewlen(vstr,vlen); + if (geoAppendIfWithinRadius(ga,x,y,radius,score,member) + == REDIS_ERR) sdsfree(member); + zzlNext(zl, &eptr, &sptr); + } + } else if (zobj->encoding == REDIS_ENCODING_SKIPLIST) { + zset *zs = zobj->ptr; + zskiplist *zsl = zs->zsl; + zskiplistNode *ln; + + if ((ln = zslFirstInRange(zsl, &range)) == NULL) { + /* Nothing exists starting at our min. No results. */ + return 0; + } + + while (ln) { + robj *o = ln->obj; + /* Abort when the node is no longer in range. */ + if (!zslValueLteMax(ln->score, &range)) + break; + + member = (o->encoding == REDIS_ENCODING_INT) ? + sdsfromlonglong((long)o->ptr) : + sdsdup(o->ptr); + if (geoAppendIfWithinRadius(ga,x,y,radius,ln->score,member) + == REDIS_ERR) sdsfree(member); + ln = ln->level[0].forward; + } + } + return ga->used - origincount; +} + +/* Obtain all members between the min/max of this geohash bounding box. + * Populate a geoArray of GeoPoints by calling geoGetPointsInRange(). + * Return the number of points added to the array. */ +int membersOfGeoHashBox(robj *zobj, GeoHashBits hash, geoArray *ga, double x, double y, double radius) { GeoHashFix52Bits min, max; min = geohashAlign52Bits(hash); hash.bits++; max = geohashAlign52Bits(hash); - return geozrangebyscore(zobj, min, max, -1); /* -1 = no limit */ + return geoGetPointsInRange(zobj, min, max, x, y, radius, ga); } /* Search all eight neighbors + self geohash box */ -static list *membersOfAllNeighbors(robj *zobj, GeoHashRadius n, double x, - double y, double radius) { - list *l = NULL; +int membersOfAllNeighbors(robj *zobj, GeoHashRadius n, double x, double y, double radius, geoArray *ga) { GeoHashBits neighbors[9]; - unsigned int i; + unsigned int i, count = 0; neighbors[0] = n.hash; neighbors[1] = n.neighbors.north; @@ -153,76 +290,11 @@ static list *membersOfAllNeighbors(robj *zobj, GeoHashRadius n, double x, /* For each neighbor (*and* our own hashbox), get all the matching * members and add them to the potential result list. */ for (i = 0; i < sizeof(neighbors) / sizeof(*neighbors); i++) { - list *r; - if (HASHISZERO(neighbors[i])) continue; - - r = membersOfGeoHashBox(zobj, neighbors[i]); - if (!r) - continue; - - if (!l) { - l = r; - } else { - listJoin(l, r); - } + count += membersOfGeoHashBox(zobj, neighbors[i], ga, x, y, radius); } - - /* if no results across any neighbors (*and* ourself, which is unlikely), - * then just give up. */ - if (!l) - return NULL; - - /* Iterate over all matching results in the combined 9-grid search area */ - /* Remove any results outside of our search radius. */ - listIter li; - listNode *ln; - listRewind(l, &li); - while ((ln = listNext(&li))) { - struct zipresult *zr = listNodeValue(ln); - GeoHashArea area = {{0,0},{0,0},{0,0}}; - GeoHashBits hash = { .bits = (uint64_t)zr->score, - .step = GEO_STEP_MAX }; - - if (!geohashDecodeWGS84(hash, &area)) { - /* Perhaps we should delete this node if the decode fails? */ - continue; - } - - double neighbor_y = (area.latitude.min + area.latitude.max) / 2; - double neighbor_x = (area.longitude.min + area.longitude.max) / 2; - - double distance; - if (!geohashGetDistanceIfInRadiusWGS84(x, y, neighbor_x, neighbor_y, - radius, &distance)) { - /* If result is in the grid, but not in our radius, remove it. */ - listDelNode(l, ln); -#ifdef DEBUG - fprintf(stderr, "No match for neighbor (%f, %f) within (%f, %f) at " - "distance %f\n", - neighbor_y, neighbor_x, y, x, distance); -#endif - } else { -/* Else: bueno. */ -#ifdef DEBUG - fprintf( - stderr, - "Matched neighbor (%f, %f) within (%f, %f) at distance %f\n", - neighbor_y, neighbor_x, y, x, distance); -#endif - zr->distance = distance; - } - } - - /* We found results, but rejected all of them as out of range. Clean up. */ - if (!listLength(l)) { - listRelease(l); - l = NULL; - } - - /* Success! */ - return l; + return count; } /* Sort comparators for qsort() */ @@ -406,16 +478,17 @@ static void geoRadiusGeneric(redisClient *c, int type) { double x = latlong[1]; /* Search the zset for all matching points */ - list *found_matches = - membersOfAllNeighbors(zobj, georadius, x, y, radius_meters); + geoArray *ga = geoArrayCreate(); + membersOfAllNeighbors(zobj, georadius, x, y, radius_meters, ga); /* If no matching results, the user gets an empty reply. */ - if (!found_matches) { + if (ga->used == 0) { addReply(c, shared.emptymultibulk); + geoArrayFree(ga); return; } - long result_length = listLength(found_matches); + long result_length = ga->used; long option_length = 0; /* Our options are self-contained nested multibulk replies, so we @@ -435,63 +508,40 @@ static void geoRadiusGeneric(redisClient *c, int type) { * user enabled for this request. */ addReplyMultiBulkLen(c, result_length); - /* Iterate over results, populate struct used for sorting and result sending - */ - listIter li; - listRewind(found_matches, &li); - struct geoPoint gp[result_length]; - /* populate gp array from our results */ - for (int i = 0; i < result_length; i++) { - struct zipresult *zr = listNodeValue(listNext(&li)); - - gp[i].member = NULL; - gp[i].set = key->ptr; - gp[i].dist = zr->distance / conversion; - gp[i].userdata = zr; - - /* The layout of geoPoint allows us to pass the start offset - * of the struct directly to decodeGeohash. */ - decodeGeohash(zr->score, (double *)(gp + i)); - } - /* Process [optional] requested sorting */ if (sort == SORT_ASC) { - qsort(gp, result_length, sizeof(*gp), sort_gp_asc); + qsort(ga->array, result_length, sizeof(geoPoint), sort_gp_asc); } else if (sort == SORT_DESC) { - qsort(gp, result_length, sizeof(*gp), sort_gp_desc); + qsort(ga->array, result_length, sizeof(geoPoint), sort_gp_desc); } /* Finally send results back to the caller */ - for (int i = 0; i < result_length; i++) { - struct zipresult *zr = gp[i].userdata; + int i; + for (i = 0; i < result_length; i++) { + geoPoint *gp = ga->array+i; + gp->dist /= conversion; /* Fix according to unit. */ /* If we have options in option_length, return each sub-result * as a nested multi-bulk. Add 1 to account for result value itself. */ if (option_length) addReplyMultiBulkLen(c, option_length + 1); - switch (zr->type) { - case ZR_LONG: - addReplyBulkLongLong(c, zr->val.v); - break; - case ZR_STRING: - addReplyBulkCBuffer(c, zr->val.s, sdslen(zr->val.s)); - break; - } + addReplyBulkSds(c,gp->member); + gp->member = NULL; if (withdist) - addReplyDoubleDistance(c, gp[i].dist); + addReplyDoubleDistance(c, gp->dist); if (withhash) - addReplyLongLong(c, zr->score); + addReplyLongLong(c, gp->score); if (withcoords) { addReplyMultiBulkLen(c, 2); - addReplyDouble(c, gp[i].latitude); - addReplyDouble(c, gp[i].longitude); + addReplyDouble(c, gp->latitude); + addReplyDouble(c, gp->longitude); } } - listRelease(found_matches); + geoArrayFree(ga); } void geoRadiusCommand(redisClient *c) { diff --git a/src/geo.h b/src/geo.h index 9aa85cf9d..9cd1f56b4 100644 --- a/src/geo.h +++ b/src/geo.h @@ -9,13 +9,20 @@ void geoRadiusByMemberCommand(redisClient *c); void geoRadiusCommand(redisClient *c); void geoAddCommand(redisClient *c); -struct geoPoint { +/* Structures used inside geo.c in order to represent points and array of + * points on the earth. */ +typedef struct geoPoint { double latitude; double longitude; double dist; - char *set; + double score; char *member; - void *userdata; -}; +} geoPoint; + +typedef struct geoArray { + struct geoPoint *array; + size_t buckets; + size_t used; +} geoArray; #endif diff --git a/src/redis.h b/src/redis.h index 3115689f5..7dd2137ac 100644 --- a/src/redis.h +++ b/src/redis.h @@ -1594,8 +1594,4 @@ void redisLogHexDump(int level, char *descr, void *value, size_t len); #define redisDebugMark() \ printf("-- MARK %s:%d --\n", __FILE__, __LINE__) -/***** TEMPORARY *******/ -#include "zset.h" -/***** END TEMPORARY *******/ - #endif diff --git a/src/zset.c b/src/zset.c deleted file mode 100644 index 7a80d3a47..000000000 --- a/src/zset.c +++ /dev/null @@ -1,161 +0,0 @@ -#include "zset.h" - -/* t_zset.c prototypes (there's no t_zset.h) */ -unsigned char *zzlFirstInRange(unsigned char *zl, zrangespec *range); -unsigned char *zzlFind(unsigned char *zl, robj *ele, double *score); -int zzlLexValueLteMax(unsigned char *p, zlexrangespec *spec); - -/* Converted from static in t_zset.c: */ -int zslValueLteMax(double value, zrangespec *spec); - -/* ==================================================================== - * Direct Redis DB Interaction - * ==================================================================== */ - -/* Largely extracted from genericZrangebyscoreCommand() in t_zset.c */ -/* The zrangebyscoreCommand expects to only operate on a live redisClient, - * but we need results returned to us, not sent over an async socket. */ -list *geozrangebyscore(robj *zobj, double min, double max, int limit) { - /* minex 0 = include min in range; maxex 1 = exclude max in range */ - /* That's: min <= val < max */ - zrangespec range = { .min = min, .max = max, .minex = 0, .maxex = 1 }; - list *l = NULL; /* result list */ - - if (zobj->encoding == REDIS_ENCODING_ZIPLIST) { - unsigned char *zl = zobj->ptr; - unsigned char *eptr, *sptr; - unsigned char *vstr = NULL; - unsigned int vlen = 0; - long long vlong = 0; - double score = 0; - - if ((eptr = zzlFirstInRange(zl, &range)) == NULL) { - /* Nothing exists starting at our min. No results. */ - return NULL; - } - - l = listCreate(); - - sptr = ziplistNext(zl, eptr); - - while (eptr && limit--) { - score = zzlGetScore(sptr); - - /* If we fell out of range, break. */ - if (!zslValueLteMax(score, &range)) - break; - - /* We know the element exists. ziplistGet should always succeed */ - ziplistGet(eptr, &vstr, &vlen, &vlong); - if (vstr == NULL) { - listAddNodeTail(l, result_long(score, vlong)); - } else { - listAddNodeTail(l, result_str(score, vstr, vlen)); - } - zzlNext(zl, &eptr, &sptr); - } - } else if (zobj->encoding == REDIS_ENCODING_SKIPLIST) { - zset *zs = zobj->ptr; - zskiplist *zsl = zs->zsl; - zskiplistNode *ln; - - if ((ln = zslFirstInRange(zsl, &range)) == NULL) { - /* Nothing exists starting at our min. No results. */ - return NULL; - } - - l = listCreate(); - - while (ln && limit--) { - robj *o = ln->obj; - /* Abort when the node is no longer in range. */ - if (!zslValueLteMax(ln->score, &range)) - break; - - if (o->encoding == REDIS_ENCODING_INT) { - listAddNodeTail(l, result_long(ln->score, (long)o->ptr)); - } else { - listAddNodeTail(l, - result_str(ln->score, o->ptr, sdslen(o->ptr))); - } - - ln = ln->level[0].forward; - } - } - if (l) { - listSetFreeMethod(l, (void (*)(void *ptr)) & free_zipresult); - } - - return l; -} - -/* ==================================================================== - * Helpers - * ==================================================================== */ - -/* join 'join' to 'join_to' and free 'join' container */ -void listJoin(list *join_to, list *join) { - /* If the current list has zero size, move join to become join_to. - * If not, append the new list to the current list. */ - if (join_to->len == 0) { - join_to->head = join->head; - } else { - join_to->tail->next = join->head; - join->head->prev = join_to->tail; - join_to->tail = join->tail; - } - - /* Update total element count */ - join_to->len += join->len; - - /* Release original list container. Internal nodes were transferred over. */ - zfree(join); -} - -/* A ziplist member may be either a long long or a string. We create the - * contents of our return zipresult based on what the ziplist contained. */ -static struct zipresult *result(double score, long long v, unsigned char *s, - int len) { - struct zipresult *r = zmalloc(sizeof(*r)); - - /* If string and length, become a string. */ - /* Else, if not string or no length, become a long. */ - if (s && len >= 0) - r->type = ZR_STRING; - else if (!s || len < 0) - r->type = ZR_LONG; - - r->score = score; - switch (r->type) { - case(ZR_LONG) : - r->val.v = v; - break; - case(ZR_STRING) : - r->val.s = sdsnewlen(s, len); - break; - } - return r; -} - -struct zipresult *result_long(double score, long long v) { - return result(score, v, NULL, -1); -} - -struct zipresult *result_str(double score, unsigned char *str, int len) { - return result(score, 0, str, len); -} - -void free_zipresult(struct zipresult *r) { - if (!r) - return; - - switch (r->type) { - case(ZR_LONG) : - break; - case(ZR_STRING) : - sdsfree(r->val.s); - break; - } - - zfree(r); -} diff --git a/src/zset.h b/src/zset.h deleted file mode 100644 index a861811e4..000000000 --- a/src/zset.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef __ZSET_H__ -#define __ZSET_H__ - -#include "redis.h" - -#define ZR_LONG 1 -#define ZR_STRING 2 -struct zipresult { - double score; - union { - long long v; - sds s; - } val; - double distance; /* distance is in meters */ - char type; /* access type for the union */ -}; - -/* Redis DB Access */ -list *geozrangebyscore(robj *zobj, double min, double max, int limit); - -/* New list operation: append one list to another */ -void listJoin(list *join_to, list *join); - -/* Helpers for returning zrangebyscore results */ -struct zipresult *result_str(double score, unsigned char *str, int len); -struct zipresult *result_long(double score, long long v); -void free_zipresult(struct zipresult *r); - -#endif From 51b4a4724b9570584ae1bbb0e30e3bf4d8db736a Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 23 Jun 2015 09:03:56 +0200 Subject: [PATCH 0260/1928] Geo: use the high level API to decode in geoAppendIfWithinRadius() --- src/geo.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/src/geo.c b/src/geo.c index dd7a1886c..eb79144ea 100644 --- a/src/geo.c +++ b/src/geo.c @@ -160,24 +160,19 @@ static inline void addReplyDoubleDistance(redisClient *c, double d) { * * returns REDIS_OK if the point is included, or REIDS_ERR if it is outside. */ int geoAppendIfWithinRadius(geoArray *ga, double x, double y, double radius, double score, sds member) { - GeoHashArea area = {{0,0},{0,0},{0,0}}; - GeoHashBits hash = { .bits = (uint64_t)score, .step = GEO_STEP_MAX }; - double distance; + double distance, latlong[2]; - if (!geohashDecodeWGS84(hash, &area)) return REDIS_ERR; /* Can't decode. */ - - double neighbor_y = (area.latitude.min + area.latitude.max) / 2; - double neighbor_x = (area.longitude.min + area.longitude.max) / 2; - - if (!geohashGetDistanceIfInRadiusWGS84(x, y, neighbor_x, neighbor_y, - radius, &distance)) { + if (!decodeGeohash(score,latlong)) return REDIS_ERR; /* Can't decode. */ + if (!geohashGetDistanceIfInRadiusWGS84(x,y,latlong[1], latlong[0], + radius, &distance)) + { return REDIS_ERR; } /* Append the new element. */ geoPoint *gp = geoArrayAppend(ga); - gp->latitude = neighbor_y; - gp->longitude = neighbor_x; + gp->latitude = latlong[0]; + gp->longitude = latlong[1]; gp->dist = distance; gp->member = member; gp->score = score; From a3018a215f958d42224413de04f9e34387cac4c4 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 23 Jun 2015 09:30:14 +0200 Subject: [PATCH 0261/1928] Geo: rename x,y to lat,lon for clarity --- src/geo.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/src/geo.c b/src/geo.c index eb79144ea..992096a13 100644 --- a/src/geo.c +++ b/src/geo.c @@ -159,11 +159,13 @@ static inline void addReplyDoubleDistance(redisClient *c, double d) { * only if the point is within the search area. * * returns REDIS_OK if the point is included, or REIDS_ERR if it is outside. */ -int geoAppendIfWithinRadius(geoArray *ga, double x, double y, double radius, double score, sds member) { +int geoAppendIfWithinRadius(geoArray *ga, double lat, double lon, double radius, double score, sds member) { double distance, latlong[2]; if (!decodeGeohash(score,latlong)) return REDIS_ERR; /* Can't decode. */ - if (!geohashGetDistanceIfInRadiusWGS84(x,y,latlong[1], latlong[0], + /* Note that geohashGetDistanceIfInRadiusWGS84() takes arguments in + * reverse order: longitude first, latitude later. */ + if (!geohashGetDistanceIfInRadiusWGS84(lon,lat,latlong[1], latlong[0], radius, &distance)) { return REDIS_ERR; @@ -191,7 +193,7 @@ int geoAppendIfWithinRadius(geoArray *ga, double x, double y, double radius, dou * using multiple queries to the sorted set, that we later need to sort * via qsort. Similarly we need to be able to reject points outside the search * radius area ASAP in order to allocate and process more points than needed. */ -int geoGetPointsInRange(robj *zobj, double min, double max, double x, double y, double radius, geoArray *ga) { +int geoGetPointsInRange(robj *zobj, double min, double max, double lat, double lon, double radius, geoArray *ga) { /* minex 0 = include min in range; maxex 1 = exclude max in range */ /* That's: min <= val < max */ zrangespec range = { .min = min, .max = max, .minex = 0, .maxex = 1 }; @@ -223,7 +225,7 @@ int geoGetPointsInRange(robj *zobj, double min, double max, double x, double y, ziplistGet(eptr, &vstr, &vlen, &vlong); member = (vstr == NULL) ? sdsfromlonglong(vlong) : sdsnewlen(vstr,vlen); - if (geoAppendIfWithinRadius(ga,x,y,radius,score,member) + if (geoAppendIfWithinRadius(ga,lat,lon,radius,score,member) == REDIS_ERR) sdsfree(member); zzlNext(zl, &eptr, &sptr); } @@ -246,7 +248,7 @@ int geoGetPointsInRange(robj *zobj, double min, double max, double x, double y, member = (o->encoding == REDIS_ENCODING_INT) ? sdsfromlonglong((long)o->ptr) : sdsdup(o->ptr); - if (geoAppendIfWithinRadius(ga,x,y,radius,ln->score,member) + if (geoAppendIfWithinRadius(ga,lat,lon,radius,ln->score,member) == REDIS_ERR) sdsfree(member); ln = ln->level[0].forward; } @@ -257,18 +259,18 @@ int geoGetPointsInRange(robj *zobj, double min, double max, double x, double y, /* Obtain all members between the min/max of this geohash bounding box. * Populate a geoArray of GeoPoints by calling geoGetPointsInRange(). * Return the number of points added to the array. */ -int membersOfGeoHashBox(robj *zobj, GeoHashBits hash, geoArray *ga, double x, double y, double radius) { +int membersOfGeoHashBox(robj *zobj, GeoHashBits hash, geoArray *ga, double lat, double lon, double radius) { GeoHashFix52Bits min, max; min = geohashAlign52Bits(hash); hash.bits++; max = geohashAlign52Bits(hash); - return geoGetPointsInRange(zobj, min, max, x, y, radius, ga); + return geoGetPointsInRange(zobj, min, max, lat, lon, radius, ga); } /* Search all eight neighbors + self geohash box */ -int membersOfAllNeighbors(robj *zobj, GeoHashRadius n, double x, double y, double radius, geoArray *ga) { +int membersOfAllNeighbors(robj *zobj, GeoHashRadius n, double lat, double lon, double radius, geoArray *ga) { GeoHashBits neighbors[9]; unsigned int i, count = 0; @@ -287,7 +289,7 @@ int membersOfAllNeighbors(robj *zobj, GeoHashRadius n, double x, double y, doubl for (i = 0; i < sizeof(neighbors) / sizeof(*neighbors); i++) { if (HASHISZERO(neighbors[i])) continue; - count += membersOfGeoHashBox(zobj, neighbors[i], ga, x, y, radius); + count += membersOfGeoHashBox(zobj, neighbors[i], ga, lat, lon, radius); } return count; } @@ -468,13 +470,10 @@ static void geoRadiusGeneric(redisClient *c, int type) { #ifdef DEBUG printf("Searching with step size: %d\n", georadius.hash.step); #endif - /* {Lat, Long} = {y, x} */ - double y = latlong[0]; - double x = latlong[1]; /* Search the zset for all matching points */ geoArray *ga = geoArrayCreate(); - membersOfAllNeighbors(zobj, georadius, x, y, radius_meters, ga); + membersOfAllNeighbors(zobj, georadius, latlong[0], latlong[1], radius_meters, ga); /* If no matching results, the user gets an empty reply. */ if (ga->used == 0) { From ae5fd11563230e06fbc637d63028b59a65e4bf51 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 23 Jun 2015 09:35:43 +0200 Subject: [PATCH 0262/1928] Geo: more x,y renamed lat,lon --- src/geo.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/geo.c b/src/geo.c index 992096a13..8720413f4 100644 --- a/src/geo.c +++ b/src/geo.c @@ -560,8 +560,8 @@ void geoDecodeCommand(redisClient *c) { geohash.step = GEO_STEP_MAX; geohashDecodeWGS84(geohash, &area); - double y = (area.latitude.min + area.latitude.max) / 2; - double x = (area.longitude.min + area.longitude.max) / 2; + double lat = (area.latitude.min + area.latitude.max) / 2; + double lon = (area.longitude.min + area.longitude.max) / 2; /* Returning three nested replies */ addReplyMultiBulkLen(c, 3); @@ -578,8 +578,8 @@ void geoDecodeCommand(redisClient *c) { /* Last, the averaged center of this bounding box */ addReplyMultiBulkLen(c, 2); - addReplyDouble(c, y); - addReplyDouble(c, x); + addReplyDouble(c, lat); + addReplyDouble(c, lon); } void geoEncodeCommand(redisClient *c) { @@ -613,8 +613,8 @@ void geoEncodeCommand(redisClient *c) { GeoHashArea area; geohashDecodeWGS84(geohash, &area); - double y = (area.latitude.min + area.latitude.max) / 2; - double x = (area.longitude.min + area.longitude.max) / 2; + double lat = (area.latitude.min + area.latitude.max) / 2; + double lon = (area.longitude.min + area.longitude.max) / 2; /* Return four nested multibulk replies. */ addReplyMultiBulkLen(c, 4); @@ -634,6 +634,6 @@ void geoEncodeCommand(redisClient *c) { /* Return the averaged center */ addReplyMultiBulkLen(c, 2); - addReplyDouble(c, y); - addReplyDouble(c, x); + addReplyDouble(c, lat); + addReplyDouble(c, lon); } From bb3284563c1465556f0d02ab7b4616dcc414b930 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 23 Jun 2015 10:18:23 +0200 Subject: [PATCH 0263/1928] Geo: GEOADD implementation improved, replication fixed 1. We no longer use a fake client but just rewriting. 2. We group all the inserts into a single ZADD dispatch (big speed win). 3. As a side effect of the correct implementation, replication works. 4. The return value of the command is now correct. --- src/geo.c | 63 +++++++++++++++++++++--------------------------- src/networking.c | 10 ++++++++ src/redis.h | 1 + 3 files changed, 38 insertions(+), 36 deletions(-) diff --git a/src/geo.c b/src/geo.c index 8720413f4..ddc70dfbf 100644 --- a/src/geo.c +++ b/src/geo.c @@ -318,8 +318,6 @@ void geoAddCommand(redisClient *c) { /* args 0-4: [cmd, key, lat, lng, val]; optional 5-6: [radius, units] * - OR - * args 0-N: [cmd, key, lat, lng, val, lat2, lng2, val2, ...] */ - robj *cmd = c->argv[0]; - robj *key = c->argv[1]; /* Prepare for the three different forms of the add command. */ double radius_meters = 0; @@ -338,56 +336,49 @@ void geoAddCommand(redisClient *c) { return; } - redisClient *client = c; int elements = (c->argc - 2) / 3; - /* elements will always be correct size (integer math floors for us if we - * have 6 or 7 total arguments) */ - if (elements > 1) { - /* We should probably use a static client and not create/free it - * for every multi-add */ - client = createClient(-1); /* fake client for multi-zadd */ + int argc = 2+elements*2; /* ZADD key score ele ... */ + robj **argv = zcalloc(argc*sizeof(robj*)); + argv[0] = createRawStringObject("zadd",4); + argv[1] = c->argv[1]; /* key */ + incrRefCount(argv[1]); - /* Tell fake client to use the same DB as our calling client. */ - selectDb(client, c->db->id); - } + /* Create the argument vector to call ZADD in order to add all + * the score,value pairs to the requested zset, where score is actually + * an encoded version of lat,long. */ + uint8_t step = geohashEstimateStepsByRadius(radius_meters); + int i; + for (i = 0; i < elements; i++) { + double latlong[elements * 2]; - /* Capture all lat/long components up front so if we encounter an error we - * return before making any changes to the database. */ - double latlong[elements * 2]; - for (int i = 0; i < elements; i++) { - if (!extractLatLongOrReply(c, (c->argv + 2) + (i * 3), - latlong + (i * 2))) + if (!extractLatLongOrReply(c, (c->argv+2)+(i*3),latlong)) { + for (i = 0; i < argc; i++) + if (argv[i]) decrRefCount(argv[i]); + zfree(argv); return; - } - - /* Add all (lat, long, value) triples to the requested zset */ - for (int i = 0; i < elements; i++) { - uint8_t step = geohashEstimateStepsByRadius(radius_meters); + } #ifdef DEBUG printf("Adding with step size: %d\n", step); #endif + + /* Turn the coordinates into the score of the element. */ GeoHashBits hash; - int ll_offset = i * 2; - double latitude = latlong[ll_offset]; - double longitude = latlong[ll_offset + 1]; + double latitude = latlong[0]; + double longitude = latlong[1]; geohashEncodeWGS84(latitude, longitude, step, &hash); GeoHashFix52Bits bits = geohashAlign52Bits(hash); robj *score = createObject(REDIS_STRING, sdsfromlonglong(bits)); robj *val = c->argv[2 + i * 3 + 2]; - /* (base args) + (offset for this triple) + (offset of value arg) */ - - rewriteClientCommandVector(client, 4, cmd, key, score, val); - decrRefCount(score); - zaddCommand(client); + argv[2+i*2] = score; + argv[3+i*2] = val; + incrRefCount(val); } - /* If we used a fake client, return a real reply then free fake client. */ - if (client != c) { - addReplyLongLong(c, elements); - freeClient(client); - } + /* Finally call ZADD that will do the work for us. */ + replaceClientCommandVector(c,argc,argv); + zaddCommand(c); } #define SORT_NONE 0 diff --git a/src/networking.c b/src/networking.c index 260dcda5e..eb033ae61 100644 --- a/src/networking.c +++ b/src/networking.c @@ -1527,6 +1527,16 @@ void rewriteClientCommandVector(redisClient *c, int argc, ...) { va_end(ap); } +/* Completely replace the client command vector with the provided one. */ +void replaceClientCommandVector(redisClient *c, int argc, robj **argv) { + freeClientArgv(c); + zfree(c->argv); + c->argv = argv; + c->argc = argc; + c->cmd = lookupCommandOrOriginal(c->argv[0]->ptr); + redisAssertWithInfo(c,NULL,c->cmd != NULL); +} + /* Rewrite a single item in the command vector. * The new val ref count is incremented, and the old decremented. */ void rewriteClientCommandArgument(redisClient *c, int i, robj *newval) { diff --git a/src/redis.h b/src/redis.h index 7dd2137ac..3e7641a93 100644 --- a/src/redis.h +++ b/src/redis.h @@ -1074,6 +1074,7 @@ sds catClientInfoString(sds s, redisClient *client); sds getAllClientsInfoString(void); void rewriteClientCommandVector(redisClient *c, int argc, ...); void rewriteClientCommandArgument(redisClient *c, int i, robj *newval); +void replaceClientCommandVector(redisClient *c, int argc, robj **argv); unsigned long getClientOutputBufferMemoryUsage(redisClient *c); void freeClientsInAsyncFreeQueue(void); void asyncCloseClientOnOutputBufferLimitReached(redisClient *c); From 0425c60381f9f2d1adf5a08b48f305ecf30dbfa6 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 23 Jun 2015 10:19:40 +0200 Subject: [PATCH 0264/1928] Geo: test GEOADD with wrong input coordinates --- tests/unit/geo.tcl | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl index f42928f91..116659190 100644 --- a/tests/unit/geo.tcl +++ b/tests/unit/geo.tcl @@ -7,6 +7,14 @@ start_server {tags {"geo"}} { r geoadd nyc 40.747533 -73.9454966 "lic market" } {0} + test {GEOADD invalid coordinates} { + catch { + r geoadd nyc 40.747533 -73.9454966 "lic market" \ + foo bar "luck market" + } err + set err + } {*valid*} + test {GEOADD multi add} { r geoadd nyc 40.7648057 -73.9733487 "central park n/q/r" 40.7362513 -73.9903085 "union square" 40.7126674 -74.0131604 "wtc one" 40.6428986 -73.7858139 "jfk" 40.7498929 -73.9375699 "q4" 40.7480973 -73.9564142 4545 } {6} From 8d5ad19d154dde494a87ce0af29811bc2d3217e1 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 23 Jun 2015 10:27:45 +0200 Subject: [PATCH 0265/1928] Geo: return REDIS_* where appropriate, improve commenting --- src/geo.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/src/geo.c b/src/geo.c index ddc70dfbf..c26f76e63 100644 --- a/src/geo.c +++ b/src/geo.c @@ -88,30 +88,37 @@ static inline int decodeGeohash(double bits, double *latlong) { } /* Input Argument Helper */ -/* Take a pointer to the latitude arg then use the next arg for longitude */ +/* Take a pointer to the latitude arg then use the next arg for longitude. + * On parse error REDIS_ERR is returned, otherwise REDIS_OK. */ static inline int extractLatLongOrReply(redisClient *c, robj **argv, double *latlong) { for (int i = 0; i < 2; i++) { if (getDoubleFromObjectOrReply(c, argv[i], latlong + i, NULL) != REDIS_OK) { - return 0; + return REDIS_ERR; } } - return 1; + return REDIS_OK; } /* Input Argument Helper */ /* Decode lat/long from a zset member's score. - * Returns non-zero on successful decoding. */ + * Returns REDIS_OK on successful decoding, otherwise REDIS_ERR is returned. */ static int latLongFromMember(robj *zobj, robj *member, double *latlong) { double score = 0; - if (zsetScore(zobj, member, &score) == REDIS_ERR) return 0; - if (!decodeGeohash(score, latlong)) return 0; - return 1; + if (zsetScore(zobj, member, &score) == REDIS_ERR) return REDIS_ERR; + if (!decodeGeohash(score, latlong)) return REDIS_ERR; + return REDIS_OK; } -/* Input Argument Helper */ +/* Input Argument Helper. + * Extract the dinstance from the specified two arguments starting at 'argv' + * that shouldbe in the form: and return the dinstance in the + * specified unit on success. *conversino is populated with the coefficient + * to use in order to convert meters to the unit. + * + * On error a value less than zero is returned. */ static double extractDistanceOrReply(redisClient *c, robj **argv, double *conversion) { double distance; @@ -351,7 +358,7 @@ void geoAddCommand(redisClient *c) { for (i = 0; i < elements; i++) { double latlong[elements * 2]; - if (!extractLatLongOrReply(c, (c->argv+2)+(i*3),latlong)) { + if (extractLatLongOrReply(c, (c->argv+2)+(i*3),latlong) == REDIS_ERR) { for (i = 0; i < argc; i++) if (argv[i]) decrRefCount(argv[i]); zfree(argv); @@ -405,12 +412,12 @@ static void geoRadiusGeneric(redisClient *c, int type) { double latlong[2] = { 0 }; if (type == RADIUS_COORDS) { base_args = 6; - if (!extractLatLongOrReply(c, c->argv + 2, latlong)) + if (extractLatLongOrReply(c, c->argv + 2, latlong) == REDIS_ERR) return; } else if (type == RADIUS_MEMBER) { base_args = 5; robj *member = c->argv[2]; - if (!latLongFromMember(zobj, member, latlong)) { + if (latLongFromMember(zobj, member, latlong) == REDIS_ERR) { addReplyError(c, "could not decode requested zset member"); return; } @@ -587,7 +594,7 @@ void geoEncodeCommand(redisClient *c) { } double latlong[2]; - if (!extractLatLongOrReply(c, c->argv + 1, latlong)) return; + if (extractLatLongOrReply(c, c->argv + 1, latlong) == REDIS_ERR) return; /* Encode lat/long into our geohash */ GeoHashBits geohash; From cf89a19f162ce0550b1c3cfbaf11a64659856ded Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 23 Jun 2015 15:02:37 +0200 Subject: [PATCH 0266/1928] Geo: GEORADIUS fuzzy testing by reimplementing it in Tcl. We set random points in the world, pick a random position, and check if the returned points by Redis match the ones computed by Tcl by brute forcing all the points using the distance between two points formula. This approach is sounding since immediately resulted in finding a bug in the original implementation. --- tests/unit/geo.tcl | 63 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl index 116659190..09028438e 100644 --- a/tests/unit/geo.tcl +++ b/tests/unit/geo.tcl @@ -1,3 +1,28 @@ +# Helper functins to simulate search-in-radius in the Tcl side in order to +# verify the Redis implementation with a fuzzy test. +proc geo_degrad deg {expr {$deg*atan(1)*8/360}} + +proc geo_distance {lat1d lon1d lat2d lon2d} { + set lat1r [geo_degrad $lat1d] + set lon1r [geo_degrad $lon1d] + set lat2r [geo_degrad $lat2d] + set lon2r [geo_degrad $lon2d] + set u [expr {sin(($lat2r - $lat1r) / 2)}] + set v [expr {sin(($lon2r - $lon1r) / 2)}] + expr {2.0 * 6372797.560856 * \ + asin(sqrt($u * $u + cos($lat1r) * cos($lat2r) * $v * $v))} +} + +proc geo_random_point {latvar lonvar} { + upvar 1 $latvar lat + upvar 1 $lonvar lon + # Note that the actual latitude limit should be -85 to +85, we restrict + # the test to -70 to +70 since in this range the algorithm is more precise + # while outside this range occasionally some element may be missing. + set lat [expr {-70 + rand()*140}] + set lon [expr {-180 + rand()*360}] +} + start_server {tags {"geo"}} { test {GEOADD create} { r geoadd nyc 40.747533 -73.9454966 "lic market" @@ -50,4 +75,42 @@ start_server {tags {"geo"}} { } {{41.235888125243704 1.8063229322433472}\ {41.235890659964866 1.806328296661377}\ {41.235889392604285 1.8063256144523621}} + + test {GEOADD + GEORANGE randomized test} { + set attempt 10 + while {[incr attempt -1]} { + unset -nocomplain debuginfo + set srand_seed [randomInt 1000000] + lappend debuginfo "srand_seed is $srand_seed" + expr {srand($srand_seed)} ; # If you need a reproducible run + r del mypoints + set radius_km [expr {[randomInt 200]+10}] + set radius_m [expr {$radius_km*1000}] + geo_random_point search_lat search_lon + lappend debuginfo "Search area: $search_lat,$search_lon $radius_km km" + set tcl_result {} + set argv {} + for {set j 0} {$j < 20000} {incr j} { + geo_random_point lat lon + lappend argv $lat $lon "place:$j" + if {[geo_distance $lat $lon $search_lat $search_lon] < $radius_m} { + lappend tcl_result "place:$j" + lappend debuginfo "place:$j $lat $lon [expr {[geo_distance $lat $lon $search_lat $search_lon]/1000}] km" + } + } + r geoadd mypoints {*}$argv + set res [lsort [r georadius mypoints $search_lat $search_lon $radius_km km]] + set res2 [lsort $tcl_result] + set test_result OK + if {$res != $res2} { + puts "Redis: $res" + puts "Tcl : $res2" + puts [join $debuginfo "\n"] + set test_result FAIL + } + unset -nocomplain debuginfo + if {$test_result ne {OK}} break + } + set test_result + } {OK} } From 55c4a365d74c5218a9303e31f1f7b67c424c9c2c Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 24 Jun 2015 10:42:16 +0200 Subject: [PATCH 0267/1928] Geo: Fix geohashEstimateStepsByRadius() step underestimation. The returned step was in some case not enough towards normal coordinates (for example when our search position was was already near the margin of the central area, and we had to match, using the east or west neighbor, a very far point). Example: geoadd points 67.575457940146066 -62.001317572780565 far geoadd points 66.685439060295664 -58.925040587282297 center georadius points 66.685439060295664 -58.925040587282297 200 km In the above case the code failed to find a match (happens at smaller latitudes too) even if far and center are at less than 200km. Another fix introduced by this commit is a progressively larger area towards the poles, since meridians are a lot less far away, so we need to compensate for this. The current implementation works comparably to the Tcl brute-force stress tester implemented in the fuzzy test in the geo.tcl unit for latitudes between -70 and 70, and is pretty accurate over +/-80 too, with sporadic false negatives. A more mathematically clean implementation is possible by computing the meridian distance at the specified latitude and computing the step according to it. --- deps/geohash-int/geohash_helper.c | 25 +++++++++++++++++-------- deps/geohash-int/geohash_helper.h | 2 +- src/geo.c | 4 ++-- 3 files changed, 20 insertions(+), 11 deletions(-) diff --git a/deps/geohash-int/geohash_helper.c b/deps/geohash-int/geohash_helper.c index 7271c7b31..b6a00b7b5 100644 --- a/deps/geohash-int/geohash_helper.c +++ b/deps/geohash-int/geohash_helper.c @@ -55,16 +55,24 @@ static inline double rad_deg(double ang) { return ang / D_R; } /* You must *ONLY* estimate steps when you are encoding. * If you are decoding, always decode to GEO_STEP_MAX (26). */ -uint8_t geohashEstimateStepsByRadius(double range_meters) { - uint8_t step = 1; - while (range_meters > 0 && range_meters < MERCATOR_MAX) { +uint8_t geohashEstimateStepsByRadius(double range_meters, double lat) { + if (range_meters == 0) return 26; + int step = 1; + while (range_meters < MERCATOR_MAX) { range_meters *= 2; step++; } - step--; - if (!step) - step = 26; /* if range = 0, give max resolution */ - return step > 26 ? 26 : step; + step -= 2; /* Make sure range is included in the worst case. */ + /* Wider range torwards the poles... Note: it is possible to do better + * than this approximation by computing the distance between meridians + * at this latitude, but this does the trick for now. */ + if (lat > 67 || lat < -67) step--; + if (lat > 80 || lat < -80) step--; + + /* Frame to valid range. */ + if (step < 1) step = 1; + if (step > 26) step = 25; + return step; } int geohashBitsComparator(const GeoHashBits *a, const GeoHashBits *b) { @@ -114,11 +122,12 @@ GeoHashRadius geohashGetAreasByRadius(double latitude, double longitude, double max_lat = bounds[2]; max_lon = bounds[3]; - steps = geohashEstimateStepsByRadius(radius_meters); + steps = geohashEstimateStepsByRadius(radius_meters,latitude); geohashGetCoordRange(&lat_range, &long_range); geohashEncode(&lat_range, &long_range, latitude, longitude, steps, &hash); geohashNeighbors(&hash, &neighbors); + geohashGetCoordRange(&lat_range, &long_range); geohashDecode(lat_range, long_range, hash, &area); if (area.latitude.min < min_lat) { diff --git a/deps/geohash-int/geohash_helper.h b/deps/geohash-int/geohash_helper.h index c10a02c6a..9354a3238 100644 --- a/deps/geohash-int/geohash_helper.h +++ b/deps/geohash-int/geohash_helper.h @@ -48,7 +48,7 @@ typedef struct { } GeoHashRadius; int GeoHashBitsComparator(const GeoHashBits *a, const GeoHashBits *b); -uint8_t geohashEstimateStepsByRadius(double range_meters); +uint8_t geohashEstimateStepsByRadius(double range_meters, double lat); int geohashBoundingBox(double latitude, double longitude, double radius_meters, double *bounds); GeoHashRadius geohashGetAreasByRadius(double latitude, diff --git a/src/geo.c b/src/geo.c index c26f76e63..87cd88cf4 100644 --- a/src/geo.c +++ b/src/geo.c @@ -353,7 +353,7 @@ void geoAddCommand(redisClient *c) { /* Create the argument vector to call ZADD in order to add all * the score,value pairs to the requested zset, where score is actually * an encoded version of lat,long. */ - uint8_t step = geohashEstimateStepsByRadius(radius_meters); + uint8_t step = geohashEstimateStepsByRadius(radius_meters,0); int i; for (i = 0; i < elements; i++) { double latlong[elements * 2]; @@ -598,7 +598,7 @@ void geoEncodeCommand(redisClient *c) { /* Encode lat/long into our geohash */ GeoHashBits geohash; - uint8_t step = geohashEstimateStepsByRadius(radius_meters); + uint8_t step = geohashEstimateStepsByRadius(radius_meters,0); geohashEncodeWGS84(latlong[0], latlong[1], step, &geohash); /* Align the hash to a valid 52-bit integer based on step size */ From d28c51d166f554633da2fd44a26ecec2be420ee7 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Wed, 24 Jun 2015 12:55:00 +0200 Subject: [PATCH 0268/1928] Do not attempt to lock on Solaris --- src/cluster.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/cluster.c b/src/cluster.c index fb45bd063..6280677ae 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -358,6 +358,11 @@ void clusterSaveConfigOrDie(int do_fsync) { * On success REDIS_OK is returned, otherwise an error is logged and * the function returns REDIS_ERR to signal a lock was not acquired. */ int clusterLockConfig(char *filename) { +/* flock() does not exist on Solaris + * and a fcntl-based solution won't help, as we constantly re-open that file, + * which will release _all_ locks anyway + */ +#if !defined(__sun) /* To lock it, we need to open the file in a way it is created if * it does not exist, otherwise there is a race condition with other * processes. */ @@ -385,6 +390,8 @@ int clusterLockConfig(char *filename) { } /* Lock acquired: leak the 'fd' by not closing it, so that we'll retain the * lock to the file as long as the process exists. */ +#endif /* __sun */ + return REDIS_OK; } From c7462ca9ffd8ccafee2605bd5c69e6a5bc5b240a Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Wed, 24 Jun 2015 14:48:48 +0200 Subject: [PATCH 0269/1928] Don't include sysctl header It's not needed (anymore) and is not available on Solaris. --- src/redis.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/redis.c b/src/redis.c index 09653119a..c2f572930 100644 --- a/src/redis.c +++ b/src/redis.c @@ -53,7 +53,6 @@ #include #include #include -#include #include /* Our shared "common" objects */ From 87521f44550a4a7825af5decb8add0eab3ff8812 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 24 Jun 2015 16:31:14 +0200 Subject: [PATCH 0270/1928] Geo: GEOHASH command added, returning standard geohash strings. --- src/geo.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++ src/redis.c | 1 + src/redis.h | 1 + 3 files changed, 57 insertions(+) diff --git a/src/geo.c b/src/geo.c index 87cd88cf4..9811970e6 100644 --- a/src/geo.c +++ b/src/geo.c @@ -635,3 +635,58 @@ void geoEncodeCommand(redisClient *c) { addReplyDouble(c, lat); addReplyDouble(c, lon); } + +/* GEOHASH key ele1 ele2 ... eleN + * + * Returns an array with an 11 characters geohash representation of the + * position of the specified elements. */ +void geoHashCommand(redisClient *c) { + char *geoalphabet= "0123456789bcdefghjkmnpqrstuvwxyz"; + int j; + + /* Look up the requested zset */ + robj *zobj = NULL; + if ((zobj = lookupKeyReadOrReply(c, c->argv[1], shared.emptymultibulk)) + == NULL || checkType(c, zobj, REDIS_ZSET)) return; + + /* Geohash elements one after the other, using a null bulk reply for + * missing elements. */ + addReplyMultiBulkLen(c,c->argc-2); + for (j = 2; j < c->argc; j++) { + double score; + if (zsetScore(zobj, c->argv[j], &score) == REDIS_ERR) { + addReply(c,shared.nullbulk); + } else { + /* The internal format we use for geocoding is a bit different + * than the standard, since we use as initial latitude range + * -85,85, while the normal geohashing algorithm uses -90,90. + * So we have to decode our position and re-encode using the + * standard ranges in order to output a valid geohash string. */ + + /* Decode... */ + double latlong[2]; + if (!decodeGeohash(score,latlong)) { + addReply(c,shared.nullbulk); + continue; + } + + /* Re-encode */ + GeoHashRange r[2]; + GeoHashBits hash; + r[0].min = -90; + r[0].max = 90; + r[1].min = -180; + r[1].max = 180; + geohashEncode(&r[0],&r[1],latlong[0],latlong[1],26,&hash); + + char buf[12]; + int i; + for (i = 0; i < 11; i++) { + int idx = (hash.bits >> (52-((i+1)*5))) & 0x1f; + buf[i] = geoalphabet[idx]; + } + buf[11] = '\0'; + addReplyBulkCBuffer(c,buf,11); + } + } +} diff --git a/src/redis.c b/src/redis.c index e0561183d..2221e128b 100644 --- a/src/redis.c +++ b/src/redis.c @@ -287,6 +287,7 @@ struct redisCommand redisCommandTable[] = { {"georadiusbymember",geoRadiusByMemberCommand,-5,"r",0,NULL,1,1,1,0,0}, {"geoencode",geoEncodeCommand,-3,"r",0,NULL,0,0,0,0,0}, {"geodecode",geoDecodeCommand,2,"r",0,NULL,0,0,0,0,0}, + {"geohash",geoHashCommand,-2,"r",0,NULL,0,0,0,0,0}, {"pfselftest",pfselftestCommand,1,"r",0,NULL,0,0,0,0,0}, {"pfadd",pfaddCommand,-2,"wmF",0,NULL,1,1,1,0,0}, {"pfcount",pfcountCommand,-2,"r",0,NULL,1,1,1,0,0}, diff --git a/src/redis.h b/src/redis.h index 3e7641a93..4fd643ac1 100644 --- a/src/redis.h +++ b/src/redis.h @@ -1563,6 +1563,7 @@ void geoDecodeCommand(redisClient *c); void geoRadiusByMemberCommand(redisClient *c); void geoRadiusCommand(redisClient *c); void geoAddCommand(redisClient *c); +void geoHashCommand(redisClient *c); void pfselftestCommand(redisClient *c); void pfaddCommand(redisClient *c); void pfcountCommand(redisClient *c); From 5fd756bf13e05429318ceafddb89b5f8039ff7b9 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 24 Jun 2015 16:34:17 +0200 Subject: [PATCH 0271/1928] Geo: GEOHASH command test. --- tests/unit/geo.tcl | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl index 09028438e..eedc40c96 100644 --- a/tests/unit/geo.tcl +++ b/tests/unit/geo.tcl @@ -76,6 +76,13 @@ start_server {tags {"geo"}} { {41.235890659964866 1.806328296661377}\ {41.235889392604285 1.8063256144523621}} + test {GEOHASH is able to return geohash strings} { + # Example from Wikipedia. + r del points + r geoadd points 42.6 -5.6 test + lindex [r geohash points test] 0 + } {ezs42e44yx0} + test {GEOADD + GEORANGE randomized test} { set attempt 10 while {[incr attempt -1]} { From 03ce18962848fdd1b7a8427a7365096a0c7b3d4f Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 24 Jun 2015 17:37:20 +0200 Subject: [PATCH 0272/1928] Geo: explain increment magic in membersOfGeoHashBox(). --- src/geo.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/src/geo.c b/src/geo.c index 9811970e6..eb813473e 100644 --- a/src/geo.c +++ b/src/geo.c @@ -269,6 +269,26 @@ int geoGetPointsInRange(robj *zobj, double min, double max, double lat, double l int membersOfGeoHashBox(robj *zobj, GeoHashBits hash, geoArray *ga, double lat, double lon, double radius) { GeoHashFix52Bits min, max; + /* We want to compute the sorted set scores that will include all the + * elements inside the specified Geohash 'hash', which has as many + * bits as specified by hash.step * 2. + * + * So if step is, for example, 3, and the hash value in binary + * is 101010, since our score is 52 bits we want every element which + * is in binary: 101010????????????????????????????????????????????? + * Where ? can be 0 or 1. + * + * To get the min score we just use the initial hash value left + * shifted enough to get the 52 bit value. Later we increment the + * 6 bit prefis (see the hash.bits++ statement), and get the new + * prefix: 101011, which we align again to 52 bits to get the maximum + * value (which is excluded from the search). So we get everything + * between the two following scores (represented in binary): + * + * 1010100000000000000000000000000000000000000000000000 (included) + * and + * 1010110000000000000000000000000000000000000000000000 (excluded). + */ min = geohashAlign52Bits(hash); hash.bits++; max = geohashAlign52Bits(hash); From fa9d62d34fd739160b2a45b713e912a6c4dabb75 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 25 Jun 2015 18:05:45 +0200 Subject: [PATCH 0273/1928] Geo: from lat,lon API to lon,lat API according to GIS standard The GIS standard and all the major DBs implementing GIS related functions take coordinates as x,y that is longitude,latitude. It was a bad start for Redis to do things differently, so even if this means that existing users of the Geo module will be required to change their code, Redis now conforms to the standard. Usually Redis is very backward compatible, but this is not an exception to this rule, since this is the first Geo implementation entering the official Redis source code. It is not wise to try to be backward compatible with code forks... :-) Close #2637. --- deps/geohash-int/geohash.c | 48 ++++++------- deps/geohash-int/geohash.h | 24 +++---- deps/geohash-int/geohash_helper.c | 60 +++++++--------- deps/geohash-int/geohash_helper.h | 19 ++--- src/geo.c | 113 +++++++++++++++--------------- src/geo.h | 2 +- tests/unit/geo.tcl | 56 +++++++-------- 7 files changed, 146 insertions(+), 176 deletions(-) diff --git a/deps/geohash-int/geohash.c b/deps/geohash-int/geohash.c index 66cff082c..e797fbc8f 100644 --- a/deps/geohash-int/geohash.c +++ b/deps/geohash-int/geohash.c @@ -43,17 +43,17 @@ * ----------------- */ -void geohashGetCoordRange(GeoHashRange *lat_range, GeoHashRange *long_range) { +void geohashGetCoordRange(GeoHashRange *long_range, GeoHashRange *lat_range) { /* These are constraints from EPSG:900913 / EPSG:3785 / OSGEO:41001 */ /* We can't geocode at the north/south pole. */ - lat_range->max = 85.05112878; - lat_range->min = -85.05112878; long_range->max = 180.0; long_range->min = -180.0; + lat_range->max = 85.05112878; + lat_range->min = -85.05112878; } -int geohashEncode(GeoHashRange *lat_range, GeoHashRange *long_range, - double latitude, double longitude, uint8_t step, +int geohashEncode(GeoHashRange *long_range, GeoHashRange *lat_range, + double longitude, double latitude, uint8_t step, GeoHashBits *hash) { uint8_t i; @@ -96,22 +96,22 @@ int geohashEncode(GeoHashRange *lat_range, GeoHashRange *long_range, return 1; } -int geohashEncodeType(double latitude, double longitude, uint8_t step, GeoHashBits *hash) { +int geohashEncodeType(double longitude, double latitude, uint8_t step, GeoHashBits *hash) { GeoHashRange r[2] = { { 0 } }; geohashGetCoordRange(&r[0], &r[1]); - return geohashEncode(&r[0], &r[1], latitude, longitude, step, hash); + return geohashEncode(&r[0], &r[1], longitude, latitude, step, hash); } -int geohashEncodeWGS84(double latitude, double longitude, uint8_t step, +int geohashEncodeWGS84(double longitude, double latitude, uint8_t step, GeoHashBits *hash) { - return geohashEncodeType(latitude, longitude, step, hash); + return geohashEncodeType(longitude, latitude, step, hash); } static inline uint8_t get_bit(uint64_t bits, uint8_t pos) { return (bits >> pos) & 0x01; } -int geohashDecode(const GeoHashRange lat_range, const GeoHashRange long_range, +int geohashDecode(const GeoHashRange long_range, const GeoHashRange lat_range, const GeoHashBits hash, GeoHashArea *area) { uint8_t i; @@ -121,10 +121,10 @@ int geohashDecode(const GeoHashRange lat_range, const GeoHashRange long_range, } area->hash = hash; - area->latitude.min = lat_range.min; - area->latitude.max = lat_range.max; area->longitude.min = long_range.min; area->longitude.max = long_range.max; + area->latitude.min = lat_range.min; + area->latitude.max = lat_range.max; for (i = 0; i < hash.step; i++) { uint8_t lat_bit, long_bit; @@ -159,28 +159,22 @@ int geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea *area) { return geohashDecodeType(hash, area); } -int geohashDecodeAreaToLatLong(const GeoHashArea *area, double *latlong) { - double y, x; - - if (!latlong) return 0; - - y = (area->latitude.min + area->latitude.max) / 2; - x = (area->longitude.min + area->longitude.max) / 2; - - latlong[0] = y; - latlong[1] = x; +int geohashDecodeAreaToLongLat(const GeoHashArea *area, double *xy) { + if (!xy) return 0; + xy[0] = (area->longitude.min + area->longitude.max) / 2; + xy[1] = (area->latitude.min + area->latitude.max) / 2; return 1; } -int geohashDecodeToLatLongType(const GeoHashBits hash, double *latlong) { +int geohashDecodeToLongLatType(const GeoHashBits hash, double *xy) { GeoHashArea area = { { 0 } }; - if (!latlong || !geohashDecodeType(hash, &area)) + if (!xy || !geohashDecodeType(hash, &area)) return 0; - return geohashDecodeAreaToLatLong(&area, latlong); + return geohashDecodeAreaToLongLat(&area, xy); } -int geohashDecodeToLatLongWGS84(const GeoHashBits hash, double *latlong) { - return geohashDecodeToLatLongType(hash, latlong); +int geohashDecodeToLongLatWGS84(const GeoHashBits hash, double *xy) { + return geohashDecodeToLongLatType(hash, xy); } static void geohash_move_x(GeoHashBits *hash, int8_t d) { diff --git a/deps/geohash-int/geohash.h b/deps/geohash-int/geohash.h index 78310715f..2bf1f5df8 100644 --- a/deps/geohash-int/geohash.h +++ b/deps/geohash-int/geohash.h @@ -62,14 +62,14 @@ typedef struct { } GeoHashBits; typedef struct { - double max; double min; + double max; } GeoHashRange; typedef struct { GeoHashBits hash; - GeoHashRange latitude; GeoHashRange longitude; + GeoHashRange latitude; } GeoHashArea; typedef struct { @@ -87,22 +87,22 @@ typedef struct { * 0:success * -1:failed */ -void geohashGetCoordRange(GeoHashRange *lat_range, GeoHashRange *long_range); -int geohashEncode(GeoHashRange *lat_range, GeoHashRange *long_range, - double latitude, double longitude, uint8_t step, +void geohashGetCoordRange(GeoHashRange *long_range, GeoHashRange *lat_range); +int geohashEncode(GeoHashRange *long_range, GeoHashRange *lat_range, + double longitude, double latitude, uint8_t step, GeoHashBits *hash); -int geohashEncodeType(double latitude, double longitude, +int geohashEncodeType(double longitude, double latitude, uint8_t step, GeoHashBits *hash); -int geohashEncodeWGS84(double latitude, double longitude, uint8_t step, +int geohashEncodeWGS84(double longitude, double latitude, uint8_t step, GeoHashBits *hash); -int geohashDecode(const GeoHashRange lat_range, const GeoHashRange long_range, +int geohashDecode(const GeoHashRange long_range, const GeoHashRange lat_range, const GeoHashBits hash, GeoHashArea *area); int geohashDecodeType(const GeoHashBits hash, GeoHashArea *area); int geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea *area); -int geohashDecodeAreaToLatLong(const GeoHashArea *area, double *latlong); -int geohashDecodeToLatLongType(const GeoHashBits hash, double *latlong); -int geohashDecodeToLatLongWGS84(const GeoHashBits hash, double *latlong); -int geohashDecodeToLatLongMercator(const GeoHashBits hash, double *latlong); +int geohashDecodeAreaToLongLat(const GeoHashArea *area, double *xy); +int geohashDecodeToLongLatType(const GeoHashBits hash, double *xy); +int geohashDecodeToLongLatWGS84(const GeoHashBits hash, double *xy); +int geohashDecodeToLongLatMercator(const GeoHashBits hash, double *xy); void geohashNeighbors(const GeoHashBits *hash, GeoHashNeighbors *neighbors); #if defined(__cplusplus) diff --git a/deps/geohash-int/geohash_helper.c b/deps/geohash-int/geohash_helper.c index b6a00b7b5..729f010ea 100644 --- a/deps/geohash-int/geohash_helper.c +++ b/deps/geohash-int/geohash_helper.c @@ -80,13 +80,13 @@ int geohashBitsComparator(const GeoHashBits *a, const GeoHashBits *b) { return a->step != b->step ? a->step - b->step : a->bits - b->bits; } -int geohashBoundingBox(double latitude, double longitude, double radius_meters, +int geohashBoundingBox(double longitude, double latitude, double radius_meters, double *bounds) { if (!bounds) return 0; - double latr, lonr; - latr = deg_rad(latitude); + double lonr, latr; lonr = deg_rad(longitude); + latr = deg_rad(latitude); double distance = radius_meters / EARTH_RADIUS_IN_METERS; double min_latitude = latr - distance; @@ -98,37 +98,36 @@ int geohashBoundingBox(double latitude, double longitude, double radius_meters, min_longitude = lonr - difference_longitude; max_longitude = lonr + difference_longitude; - bounds[0] = rad_deg(min_latitude); - bounds[1] = rad_deg(min_longitude); - bounds[2] = rad_deg(max_latitude); - bounds[3] = rad_deg(max_longitude); - + bounds[0] = rad_deg(min_longitude); + bounds[1] = rad_deg(min_latitude); + bounds[2] = rad_deg(max_longitude); + bounds[3] = rad_deg(max_latitude); return 1; } -GeoHashRadius geohashGetAreasByRadius(double latitude, double longitude, double radius_meters) { - GeoHashRange lat_range, long_range; +GeoHashRadius geohashGetAreasByRadius(double longitude, double latitude, double radius_meters) { + GeoHashRange long_range, lat_range; GeoHashRadius radius = { { 0 } }; GeoHashBits hash = { 0 }; GeoHashNeighbors neighbors = { { 0 } }; GeoHashArea area = { { 0 } }; - double min_lat, max_lat, min_lon, max_lon; + double min_lon, max_lon, min_lat, max_lat; double bounds[4]; int steps; - geohashBoundingBox(latitude, longitude, radius_meters, bounds); - min_lat = bounds[0]; - min_lon = bounds[1]; - max_lat = bounds[2]; - max_lon = bounds[3]; + geohashBoundingBox(longitude, latitude, radius_meters, bounds); + min_lon = bounds[0]; + min_lat = bounds[1]; + max_lon = bounds[2]; + max_lat = bounds[3]; steps = geohashEstimateStepsByRadius(radius_meters,latitude); - geohashGetCoordRange(&lat_range, &long_range); - geohashEncode(&lat_range, &long_range, latitude, longitude, steps, &hash); + geohashGetCoordRange(&long_range, &lat_range); + geohashEncode(&long_range, &lat_range, longitude, latitude, steps, &hash); geohashNeighbors(&hash, &neighbors); - geohashGetCoordRange(&lat_range, &long_range); - geohashDecode(lat_range, long_range, hash, &area); + geohashGetCoordRange(&long_range, &lat_range); + geohashDecode(long_range, lat_range, hash, &area); if (area.latitude.min < min_lat) { GZERO(neighbors.south); @@ -156,9 +155,9 @@ GeoHashRadius geohashGetAreasByRadius(double latitude, double longitude, double return radius; } -GeoHashRadius geohashGetAreasByRadiusWGS84(double latitude, double longitude, +GeoHashRadius geohashGetAreasByRadiusWGS84(double longitude, double latitude, double radius_meters) { - return geohashGetAreasByRadius(latitude, longitude, radius_meters); + return geohashGetAreasByRadius(longitude, latitude, radius_meters); } GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash) { @@ -167,8 +166,8 @@ GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash) { return bits; } -/* calculate distance using haversin great circle distance formula */ -double distanceEarth(double lat1d, double lon1d, double lat2d, double lon2d) { +/* Calculate distance using haversin great circle distance formula. */ +double distanceEarth(double lon1d, double lat1d, double lon2d, double lat2d) { double lat1r, lon1r, lat2r, lon2r, u, v; lat1r = deg_rad(lat1d); lon1r = deg_rad(lon1d); @@ -183,7 +182,7 @@ double distanceEarth(double lat1d, double lon1d, double lat2d, double lon2d) { int geohashGetDistanceIfInRadius(double x1, double y1, double x2, double y2, double radius, double *distance) { - *distance = distanceEarth(y1, x1, y2, x2); + *distance = distanceEarth(x1, y1, x2, y2); if (*distance > radius) return 0; return 1; } @@ -193,14 +192,3 @@ int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, double *distance) { return geohashGetDistanceIfInRadius(x1, y1, x2, y2, radius, distance); } - -int geohashVerifyCoordinates(double x, double y) { - GeoHashRange lat_range, long_range; - geohashGetCoordRange(&lat_range, &long_range); - - if (x < long_range.min || x > long_range.max || y < lat_range.min || - y > lat_range.max) { - return 0; - } - return 1; -} diff --git a/deps/geohash-int/geohash_helper.h b/deps/geohash-int/geohash_helper.h index 9354a3238..0e38740de 100644 --- a/deps/geohash-int/geohash_helper.h +++ b/deps/geohash-int/geohash_helper.h @@ -49,29 +49,20 @@ typedef struct { int GeoHashBitsComparator(const GeoHashBits *a, const GeoHashBits *b); uint8_t geohashEstimateStepsByRadius(double range_meters, double lat); -int geohashBoundingBox(double latitude, double longitude, double radius_meters, +int geohashBoundingBox(double longitude, double latitude, double radius_meters, double *bounds); -GeoHashRadius geohashGetAreasByRadius(double latitude, - double longitude, double radius_meters); -GeoHashRadius geohashGetAreasByRadiusWGS84(double latitude, double longitude, +GeoHashRadius geohashGetAreasByRadius(double longitude, + double latitude, double radius_meters); +GeoHashRadius geohashGetAreasByRadiusWGS84(double longitude, double latitude, double radius_meters); -GeoHashRadius geohashGetAreasByRadiusMercator(double latitude, double longitude, +GeoHashRadius geohashGetAreasByRadiusMercator(double longitude, double latitude, double radius_meters); GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash); -double geohashGetXMercator(double longtitude); -double geohashGetYMercator(double latitude); -double geohashGetXWGS84(double x); -double geohashGetYWGS84(double y); -int geohashVerifyCoordinates(double x, double y); int geohashGetDistanceIfInRadius(double x1, double y1, double x2, double y2, double radius, double *distance); int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, double y2, double radius, double *distance); -int geohashGetDistanceSquaredIfInRadiusMercator(double x1, double y1, - double x2, double y2, - double radius, - double *distance); #endif /* GEOHASH_HELPER_HPP_ */ diff --git a/src/geo.c b/src/geo.c index eb813473e..40d962f31 100644 --- a/src/geo.c +++ b/src/geo.c @@ -82,18 +82,18 @@ void geoArrayFree(geoArray *ga) { /* ==================================================================== * Helpers * ==================================================================== */ -static inline int decodeGeohash(double bits, double *latlong) { +static inline int decodeGeohash(double bits, double *xy) { GeoHashBits hash = { .bits = (uint64_t)bits, .step = GEO_STEP_MAX }; - return geohashDecodeToLatLongWGS84(hash, latlong); + return geohashDecodeToLongLatWGS84(hash, xy); } /* Input Argument Helper */ /* Take a pointer to the latitude arg then use the next arg for longitude. * On parse error REDIS_ERR is returned, otherwise REDIS_OK. */ -static inline int extractLatLongOrReply(redisClient *c, robj **argv, - double *latlong) { +static inline int extractLongLatOrReply(redisClient *c, robj **argv, + double *xy) { for (int i = 0; i < 2; i++) { - if (getDoubleFromObjectOrReply(c, argv[i], latlong + i, NULL) != + if (getDoubleFromObjectOrReply(c, argv[i], xy + i, NULL) != REDIS_OK) { return REDIS_ERR; } @@ -104,11 +104,11 @@ static inline int extractLatLongOrReply(redisClient *c, robj **argv, /* Input Argument Helper */ /* Decode lat/long from a zset member's score. * Returns REDIS_OK on successful decoding, otherwise REDIS_ERR is returned. */ -static int latLongFromMember(robj *zobj, robj *member, double *latlong) { +static int longLatFromMember(robj *zobj, robj *member, double *xy) { double score = 0; if (zsetScore(zobj, member, &score) == REDIS_ERR) return REDIS_ERR; - if (!decodeGeohash(score, latlong)) return REDIS_ERR; + if (!decodeGeohash(score, xy)) return REDIS_ERR; return REDIS_OK; } @@ -166,13 +166,13 @@ static inline void addReplyDoubleDistance(redisClient *c, double d) { * only if the point is within the search area. * * returns REDIS_OK if the point is included, or REIDS_ERR if it is outside. */ -int geoAppendIfWithinRadius(geoArray *ga, double lat, double lon, double radius, double score, sds member) { - double distance, latlong[2]; +int geoAppendIfWithinRadius(geoArray *ga, double lon, double lat, double radius, double score, sds member) { + double distance, xy[2]; - if (!decodeGeohash(score,latlong)) return REDIS_ERR; /* Can't decode. */ + if (!decodeGeohash(score,xy)) return REDIS_ERR; /* Can't decode. */ /* Note that geohashGetDistanceIfInRadiusWGS84() takes arguments in * reverse order: longitude first, latitude later. */ - if (!geohashGetDistanceIfInRadiusWGS84(lon,lat,latlong[1], latlong[0], + if (!geohashGetDistanceIfInRadiusWGS84(lon,lat, xy[0], xy[1], radius, &distance)) { return REDIS_ERR; @@ -180,8 +180,8 @@ int geoAppendIfWithinRadius(geoArray *ga, double lat, double lon, double radius, /* Append the new element. */ geoPoint *gp = geoArrayAppend(ga); - gp->latitude = latlong[0]; - gp->longitude = latlong[1]; + gp->longitude = xy[0]; + gp->latitude = xy[1]; gp->dist = distance; gp->member = member; gp->score = score; @@ -200,7 +200,7 @@ int geoAppendIfWithinRadius(geoArray *ga, double lat, double lon, double radius, * using multiple queries to the sorted set, that we later need to sort * via qsort. Similarly we need to be able to reject points outside the search * radius area ASAP in order to allocate and process more points than needed. */ -int geoGetPointsInRange(robj *zobj, double min, double max, double lat, double lon, double radius, geoArray *ga) { +int geoGetPointsInRange(robj *zobj, double min, double max, double lon, double lat, double radius, geoArray *ga) { /* minex 0 = include min in range; maxex 1 = exclude max in range */ /* That's: min <= val < max */ zrangespec range = { .min = min, .max = max, .minex = 0, .maxex = 1 }; @@ -232,7 +232,7 @@ int geoGetPointsInRange(robj *zobj, double min, double max, double lat, double l ziplistGet(eptr, &vstr, &vlen, &vlong); member = (vstr == NULL) ? sdsfromlonglong(vlong) : sdsnewlen(vstr,vlen); - if (geoAppendIfWithinRadius(ga,lat,lon,radius,score,member) + if (geoAppendIfWithinRadius(ga,lon,lat,radius,score,member) == REDIS_ERR) sdsfree(member); zzlNext(zl, &eptr, &sptr); } @@ -255,7 +255,7 @@ int geoGetPointsInRange(robj *zobj, double min, double max, double lat, double l member = (o->encoding == REDIS_ENCODING_INT) ? sdsfromlonglong((long)o->ptr) : sdsdup(o->ptr); - if (geoAppendIfWithinRadius(ga,lat,lon,radius,ln->score,member) + if (geoAppendIfWithinRadius(ga,lon,lat,radius,ln->score,member) == REDIS_ERR) sdsfree(member); ln = ln->level[0].forward; } @@ -266,7 +266,7 @@ int geoGetPointsInRange(robj *zobj, double min, double max, double lat, double l /* Obtain all members between the min/max of this geohash bounding box. * Populate a geoArray of GeoPoints by calling geoGetPointsInRange(). * Return the number of points added to the array. */ -int membersOfGeoHashBox(robj *zobj, GeoHashBits hash, geoArray *ga, double lat, double lon, double radius) { +int membersOfGeoHashBox(robj *zobj, GeoHashBits hash, geoArray *ga, double lon, double lat, double radius) { GeoHashFix52Bits min, max; /* We want to compute the sorted set scores that will include all the @@ -293,11 +293,11 @@ int membersOfGeoHashBox(robj *zobj, GeoHashBits hash, geoArray *ga, double lat, hash.bits++; max = geohashAlign52Bits(hash); - return geoGetPointsInRange(zobj, min, max, lat, lon, radius, ga); + return geoGetPointsInRange(zobj, min, max, lon, lat, radius, ga); } /* Search all eight neighbors + self geohash box */ -int membersOfAllNeighbors(robj *zobj, GeoHashRadius n, double lat, double lon, double radius, geoArray *ga) { +int membersOfAllNeighbors(robj *zobj, GeoHashRadius n, double lon, double lat, double radius, geoArray *ga) { GeoHashBits neighbors[9]; unsigned int i, count = 0; @@ -316,7 +316,7 @@ int membersOfAllNeighbors(robj *zobj, GeoHashRadius n, double lat, double lon, d for (i = 0; i < sizeof(neighbors) / sizeof(*neighbors); i++) { if (HASHISZERO(neighbors[i])) continue; - count += membersOfGeoHashBox(zobj, neighbors[i], ga, lat, lon, radius); + count += membersOfGeoHashBox(zobj, neighbors[i], ga, lon, lat, radius); } return count; } @@ -342,9 +342,9 @@ static int sort_gp_desc(const void *a, const void *b) { * Commands * ==================================================================== */ void geoAddCommand(redisClient *c) { - /* args 0-4: [cmd, key, lat, lng, val]; optional 5-6: [radius, units] + /* args 0-4: [cmd, key, lng, lat, val]; optional 5-6: [radius, units] * - OR - - * args 0-N: [cmd, key, lat, lng, val, lat2, lng2, val2, ...] */ + * args 0-N: [cmd, key, lng, lat, val, lng2, lat2, val2, ...] */ /* Prepare for the three different forms of the add command. */ double radius_meters = 0; @@ -358,8 +358,8 @@ void geoAddCommand(redisClient *c) { return; } else if ((c->argc - 2) % 3 != 0) { /* Need an odd number of arguments if we got this far... */ - addReplyError(c, "format is: geoadd [key] [lat1] [long1] [member1] " - "[lat2] [long2] [member2] ... "); + addReplyError(c, "format is: geoadd [key] [x1] [y1] [member1] " + "[x2] [y2] [member2] ... "); return; } @@ -376,9 +376,9 @@ void geoAddCommand(redisClient *c) { uint8_t step = geohashEstimateStepsByRadius(radius_meters,0); int i; for (i = 0; i < elements; i++) { - double latlong[elements * 2]; + double xy[2]; - if (extractLatLongOrReply(c, (c->argv+2)+(i*3),latlong) == REDIS_ERR) { + if (extractLongLatOrReply(c, (c->argv+2)+(i*3),xy) == REDIS_ERR) { for (i = 0; i < argc; i++) if (argv[i]) decrRefCount(argv[i]); zfree(argv); @@ -391,10 +391,7 @@ void geoAddCommand(redisClient *c) { /* Turn the coordinates into the score of the element. */ GeoHashBits hash; - double latitude = latlong[0]; - double longitude = latlong[1]; - geohashEncodeWGS84(latitude, longitude, step, &hash); - + geohashEncodeWGS84(xy[0], xy[1], step, &hash); GeoHashFix52Bits bits = geohashAlign52Bits(hash); robj *score = createObject(REDIS_STRING, sdsfromlonglong(bits)); robj *val = c->argv[2 + i * 3 + 2]; @@ -416,7 +413,7 @@ void geoAddCommand(redisClient *c) { #define RADIUS_MEMBER 2 static void geoRadiusGeneric(redisClient *c, int type) { - /* type == cords: [cmd, key, lat, long, radius, units, [optionals]] + /* type == cords: [cmd, key, long, lat, radius, units, [optionals]] * type == member: [cmd, key, member, radius, units, [optionals]] */ robj *key = c->argv[1]; @@ -427,17 +424,17 @@ static void geoRadiusGeneric(redisClient *c, int type) { return; } - /* Find lat/long to use for radius search based on inquiry type */ + /* Find long/lat to use for radius search based on inquiry type */ int base_args; - double latlong[2] = { 0 }; + double xy[2] = { 0 }; if (type == RADIUS_COORDS) { base_args = 6; - if (extractLatLongOrReply(c, c->argv + 2, latlong) == REDIS_ERR) + if (extractLongLatOrReply(c, c->argv + 2, xy) == REDIS_ERR) return; } else if (type == RADIUS_MEMBER) { base_args = 5; robj *member = c->argv[2]; - if (latLongFromMember(zobj, member, latlong) == REDIS_ERR) { + if (longLatFromMember(zobj, member, xy) == REDIS_ERR) { addReplyError(c, "could not decode requested zset member"); return; } @@ -483,7 +480,7 @@ static void geoRadiusGeneric(redisClient *c, int type) { /* Get all neighbor geohash boxes for our radius search */ GeoHashRadius georadius = - geohashGetAreasByRadiusWGS84(latlong[0], latlong[1], radius_meters); + geohashGetAreasByRadiusWGS84(xy[0], xy[1], radius_meters); #ifdef DEBUG printf("Searching with step size: %d\n", georadius.hash.step); @@ -491,7 +488,7 @@ static void geoRadiusGeneric(redisClient *c, int type) { /* Search the zset for all matching points */ geoArray *ga = geoArrayCreate(); - membersOfAllNeighbors(zobj, georadius, latlong[0], latlong[1], radius_meters, ga); + membersOfAllNeighbors(zobj, georadius, xy[0], xy[1], radius_meters, ga); /* If no matching results, the user gets an empty reply. */ if (ga->used == 0) { @@ -549,15 +546,15 @@ static void geoRadiusGeneric(redisClient *c, int type) { if (withcoords) { addReplyMultiBulkLen(c, 2); - addReplyDouble(c, gp->latitude); addReplyDouble(c, gp->longitude); + addReplyDouble(c, gp->latitude); } } geoArrayFree(ga); } void geoRadiusCommand(redisClient *c) { - /* args 0-5: ["georadius", key, lat, long, radius, units]; + /* args 0-5: ["georadius", key, long, lat, radius, units]; * optionals: [withdist, withcoords, asc|desc] */ geoRadiusGeneric(c, RADIUS_COORDS); } @@ -578,30 +575,30 @@ void geoDecodeCommand(redisClient *c) { geohash.step = GEO_STEP_MAX; geohashDecodeWGS84(geohash, &area); - double lat = (area.latitude.min + area.latitude.max) / 2; double lon = (area.longitude.min + area.longitude.max) / 2; + double lat = (area.latitude.min + area.latitude.max) / 2; /* Returning three nested replies */ addReplyMultiBulkLen(c, 3); /* First, the minimum corner */ addReplyMultiBulkLen(c, 2); - addReplyDouble(c, area.latitude.min); addReplyDouble(c, area.longitude.min); + addReplyDouble(c, area.latitude.min); /* Next, the maximum corner */ addReplyMultiBulkLen(c, 2); - addReplyDouble(c, area.latitude.max); addReplyDouble(c, area.longitude.max); + addReplyDouble(c, area.latitude.max); /* Last, the averaged center of this bounding box */ addReplyMultiBulkLen(c, 2); - addReplyDouble(c, lat); addReplyDouble(c, lon); + addReplyDouble(c, lat); } void geoEncodeCommand(redisClient *c) { - /* args 0-2: ["geoencode", lat, long]; + /* args 0-2: ["geoencode", long, lat]; * optionals: [radius, units] */ double radius_meters = 0; @@ -613,13 +610,13 @@ void geoEncodeCommand(redisClient *c) { return; } - double latlong[2]; - if (extractLatLongOrReply(c, c->argv + 1, latlong) == REDIS_ERR) return; + double xy[2]; + if (extractLongLatOrReply(c, c->argv + 1, xy) == REDIS_ERR) return; /* Encode lat/long into our geohash */ GeoHashBits geohash; uint8_t step = geohashEstimateStepsByRadius(radius_meters,0); - geohashEncodeWGS84(latlong[0], latlong[1], step, &geohash); + geohashEncodeWGS84(xy[0], xy[1], step, &geohash); /* Align the hash to a valid 52-bit integer based on step size */ GeoHashFix52Bits bits = geohashAlign52Bits(geohash); @@ -631,8 +628,8 @@ void geoEncodeCommand(redisClient *c) { GeoHashArea area; geohashDecodeWGS84(geohash, &area); - double lat = (area.latitude.min + area.latitude.max) / 2; double lon = (area.longitude.min + area.longitude.max) / 2; + double lat = (area.latitude.min + area.latitude.max) / 2; /* Return four nested multibulk replies. */ addReplyMultiBulkLen(c, 4); @@ -642,18 +639,18 @@ void geoEncodeCommand(redisClient *c) { /* Return the minimum corner */ addReplyMultiBulkLen(c, 2); - addReplyDouble(c, area.latitude.min); addReplyDouble(c, area.longitude.min); + addReplyDouble(c, area.latitude.min); /* Return the maximum corner */ addReplyMultiBulkLen(c, 2); - addReplyDouble(c, area.latitude.max); addReplyDouble(c, area.longitude.max); + addReplyDouble(c, area.latitude.max); /* Return the averaged center */ addReplyMultiBulkLen(c, 2); - addReplyDouble(c, lat); addReplyDouble(c, lon); + addReplyDouble(c, lat); } /* GEOHASH key ele1 ele2 ... eleN @@ -684,8 +681,8 @@ void geoHashCommand(redisClient *c) { * standard ranges in order to output a valid geohash string. */ /* Decode... */ - double latlong[2]; - if (!decodeGeohash(score,latlong)) { + double xy[2]; + if (!decodeGeohash(score,xy)) { addReply(c,shared.nullbulk); continue; } @@ -693,11 +690,11 @@ void geoHashCommand(redisClient *c) { /* Re-encode */ GeoHashRange r[2]; GeoHashBits hash; - r[0].min = -90; - r[0].max = 90; - r[1].min = -180; - r[1].max = 180; - geohashEncode(&r[0],&r[1],latlong[0],latlong[1],26,&hash); + r[0].min = -180; + r[0].max = 180; + r[1].min = -90; + r[1].max = 90; + geohashEncode(&r[0],&r[1],xy[0],xy[1],26,&hash); char buf[12]; int i; diff --git a/src/geo.h b/src/geo.h index 9cd1f56b4..4f5c5e6fe 100644 --- a/src/geo.h +++ b/src/geo.h @@ -12,8 +12,8 @@ void geoAddCommand(redisClient *c); /* Structures used inside geo.c in order to represent points and array of * points on the earth. */ typedef struct geoPoint { - double latitude; double longitude; + double latitude; double dist; double score; char *member; diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl index eedc40c96..a049804ae 100644 --- a/tests/unit/geo.tcl +++ b/tests/unit/geo.tcl @@ -2,46 +2,46 @@ # verify the Redis implementation with a fuzzy test. proc geo_degrad deg {expr {$deg*atan(1)*8/360}} -proc geo_distance {lat1d lon1d lat2d lon2d} { - set lat1r [geo_degrad $lat1d] +proc geo_distance {lon1d lat1d lon2d lat2d} { set lon1r [geo_degrad $lon1d] - set lat2r [geo_degrad $lat2d] + set lat1r [geo_degrad $lat1d] set lon2r [geo_degrad $lon2d] - set u [expr {sin(($lat2r - $lat1r) / 2)}] + set lat2r [geo_degrad $lat2d] set v [expr {sin(($lon2r - $lon1r) / 2)}] + set u [expr {sin(($lat2r - $lat1r) / 2)}] expr {2.0 * 6372797.560856 * \ asin(sqrt($u * $u + cos($lat1r) * cos($lat2r) * $v * $v))} } -proc geo_random_point {latvar lonvar} { - upvar 1 $latvar lat +proc geo_random_point {lonvar latvar} { upvar 1 $lonvar lon + upvar 1 $latvar lat # Note that the actual latitude limit should be -85 to +85, we restrict # the test to -70 to +70 since in this range the algorithm is more precise # while outside this range occasionally some element may be missing. - set lat [expr {-70 + rand()*140}] set lon [expr {-180 + rand()*360}] + set lat [expr {-70 + rand()*140}] } start_server {tags {"geo"}} { test {GEOADD create} { - r geoadd nyc 40.747533 -73.9454966 "lic market" + r geoadd nyc -73.9454966 40.747533 "lic market" } {1} test {GEOADD update} { - r geoadd nyc 40.747533 -73.9454966 "lic market" + r geoadd nyc -73.9454966 40.747533 "lic market" } {0} test {GEOADD invalid coordinates} { catch { - r geoadd nyc 40.747533 -73.9454966 "lic market" \ + r geoadd nyc -73.9454966 40.747533 "lic market" \ foo bar "luck market" } err set err } {*valid*} test {GEOADD multi add} { - r geoadd nyc 40.7648057 -73.9733487 "central park n/q/r" 40.7362513 -73.9903085 "union square" 40.7126674 -74.0131604 "wtc one" 40.6428986 -73.7858139 "jfk" 40.7498929 -73.9375699 "q4" 40.7480973 -73.9564142 4545 + r geoadd nyc -73.9733487 40.7648057 "central park n/q/r" -73.9903085 40.7362513 "union square" -74.0131604 40.7126674 "wtc one" -73.7858139 40.6428986 "jfk" -73.9375699 40.7498929 "q4" -73.9564142 40.7480973 4545 } {6} test {Check geoset values} { @@ -49,11 +49,11 @@ start_server {tags {"geo"}} { } {{wtc one} 1791873972053020 {union square} 1791875485187452 {central park n/q/r} 1791875761332224 4545 1791875796750882 {lic market} 1791875804419201 q4 1791875830079666 jfk 1791895905559723} test {GEORADIUS simple (sorted)} { - r georadius nyc 40.7598464 -73.9798091 3 km ascending + r georadius nyc -73.9798091 40.7598464 3 km ascending } {{central park n/q/r} 4545 {union square}} test {GEORADIUS withdistance (sorted)} { - r georadius nyc 40.7598464 -73.9798091 3 km withdistance ascending + r georadius nyc -73.9798091 40.7598464 3 km withdistance ascending } {{{central park n/q/r} 0.7750} {4545 2.3651} {{union square} 2.7697}} test {GEORADIUSBYMEMBER simple (sorted)} { @@ -65,21 +65,21 @@ start_server {tags {"geo"}} { } {{{wtc one} 0.0000} {{union square} 3.2544} {{central park n/q/r} 6.7000} {4545 6.1975} {{lic market} 6.8969}} test {GEOENCODE simple} { - r geoencode 41.2358883 1.8063239 - } {3471579339700058 {41.235888125243704 1.8063229322433472}\ - {41.235890659964866 1.806328296661377}\ - {41.235889392604285 1.8063256144523621}} + r geoencode 1.8063239 41.2358883 + } {3471579339700058 {1.8063229322433472 41.235888125243704}\ + {1.806328296661377 41.235890659964866}\ + {1.8063256144523621 41.235889392604285}} test {GEODECODE simple} { r geodecode 3471579339700058 - } {{41.235888125243704 1.8063229322433472}\ - {41.235890659964866 1.806328296661377}\ - {41.235889392604285 1.8063256144523621}} + } {{1.8063229322433472 41.235888125243704}\ + {1.806328296661377 41.235890659964866}\ + {1.8063256144523621 41.235889392604285}} test {GEOHASH is able to return geohash strings} { # Example from Wikipedia. r del points - r geoadd points 42.6 -5.6 test + r geoadd points -5.6 42.6 test lindex [r geohash points test] 0 } {ezs42e44yx0} @@ -93,20 +93,20 @@ start_server {tags {"geo"}} { r del mypoints set radius_km [expr {[randomInt 200]+10}] set radius_m [expr {$radius_km*1000}] - geo_random_point search_lat search_lon - lappend debuginfo "Search area: $search_lat,$search_lon $radius_km km" + geo_random_point search_lon search_lat + lappend debuginfo "Search area: $search_lon,$search_lat $radius_km km" set tcl_result {} set argv {} for {set j 0} {$j < 20000} {incr j} { - geo_random_point lat lon - lappend argv $lat $lon "place:$j" - if {[geo_distance $lat $lon $search_lat $search_lon] < $radius_m} { + geo_random_point lon lat + lappend argv $lon $lat "place:$j" + if {[geo_distance $lon $lat $search_lon $search_lat] < $radius_m} { lappend tcl_result "place:$j" - lappend debuginfo "place:$j $lat $lon [expr {[geo_distance $lat $lon $search_lat $search_lon]/1000}] km" + lappend debuginfo "place:$j $lon $lat [expr {[geo_distance $lon $lat $search_lon $search_lat]/1000}] km" } } r geoadd mypoints {*}$argv - set res [lsort [r georadius mypoints $search_lat $search_lon $radius_km km]] + set res [lsort [r georadius mypoints $search_lon $search_lat $radius_km km]] set res2 [lsort $tcl_result] set test_result OK if {$res != $res2} { From c2322357343756b56adefd8f2f70dc09f43b1bb8 Mon Sep 17 00:00:00 2001 From: MOON_CLJ Date: Fri, 26 Jun 2015 17:58:45 +0800 Subject: [PATCH 0274/1928] pfcount support multi keys --- src/redis.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redis.c b/src/redis.c index 09653119a..a392da110 100644 --- a/src/redis.c +++ b/src/redis.c @@ -284,7 +284,7 @@ struct redisCommand redisCommandTable[] = { {"command",commandCommand,0,"rlt",0,NULL,0,0,0,0,0}, {"pfselftest",pfselftestCommand,1,"r",0,NULL,0,0,0,0,0}, {"pfadd",pfaddCommand,-2,"wmF",0,NULL,1,1,1,0,0}, - {"pfcount",pfcountCommand,-2,"r",0,NULL,1,1,1,0,0}, + {"pfcount",pfcountCommand,-2,"r",0,NULL,1,-1,1,0,0}, {"pfmerge",pfmergeCommand,-2,"wm",0,NULL,1,-1,1,0,0}, {"pfdebug",pfdebugCommand,-3,"w",0,NULL,0,0,0,0,0}, {"latency",latencyCommand,-2,"arslt",0,NULL,0,0,0,0,0} From 710c05ac2ab0eedc56cc19ba16ad8fc40d24e81d Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 27 Jun 2015 09:38:39 +0200 Subject: [PATCH 0275/1928] Geo: remove useless variable. geoRadiusGeneric() top comment improved. --- src/geo.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/geo.c b/src/geo.c index 40d962f31..72dadb885 100644 --- a/src/geo.c +++ b/src/geo.c @@ -412,9 +412,9 @@ void geoAddCommand(redisClient *c) { #define RADIUS_COORDS 1 #define RADIUS_MEMBER 2 +/* GEORADIUS key x y radius unit [WITHDIST] [WITHHASH] [WITHCOORD] [ASC|DESC] + * GEORADIUSBYMEMBER key member radius unit ... options ... */ static void geoRadiusGeneric(redisClient *c, int type) { - /* type == cords: [cmd, key, long, lat, radius, units, [optionals]] - * type == member: [cmd, key, member, radius, units, [optionals]] */ robj *key = c->argv[1]; /* Look up the requested zset */ @@ -451,7 +451,7 @@ static void geoRadiusGeneric(redisClient *c, int type) { } /* Discover and populate all optional parameters. */ - int withdist = 0, withhash = 0, withcoords = 0, noproperties = 0; + int withdist = 0, withhash = 0, withcoords = 0; int sort = SORT_NONE; if (c->argc > base_args) { int remaining = c->argc - base_args; @@ -463,11 +463,7 @@ static void geoRadiusGeneric(redisClient *c, int type) { withhash = 1; else if (!strncasecmp(arg, "withcoord", 9)) withcoords = 1; - else if (!strncasecmp(arg, "noprop", 6) || - !strncasecmp(arg, "withoutprop", 11)) - noproperties = 1; - else if (!strncasecmp(arg, "asc", 3) || - !strncasecmp(arg, "sort", 4)) + else if (!strncasecmp(arg, "asc", 3)) sort = SORT_ASC; else if (!strncasecmp(arg, "desc", 4)) sort = SORT_DESC; From cd91beea1c11a37be9811260c16dfe8eb8e57e9e Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 27 Jun 2015 09:43:47 +0200 Subject: [PATCH 0276/1928] Geo: only one way to specify any given option. --- src/geo.c | 13 +++++++------ tests/unit/geo.tcl | 8 ++++---- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/src/geo.c b/src/geo.c index 72dadb885..1a90fa87b 100644 --- a/src/geo.c +++ b/src/geo.c @@ -413,6 +413,7 @@ void geoAddCommand(redisClient *c) { #define RADIUS_MEMBER 2 /* GEORADIUS key x y radius unit [WITHDIST] [WITHHASH] [WITHCOORD] [ASC|DESC] + * [LIMIT count] * GEORADIUSBYMEMBER key member radius unit ... options ... */ static void geoRadiusGeneric(redisClient *c, int type) { robj *key = c->argv[1]; @@ -457,17 +458,17 @@ static void geoRadiusGeneric(redisClient *c, int type) { int remaining = c->argc - base_args; for (int i = 0; i < remaining; i++) { char *arg = c->argv[base_args + i]->ptr; - if (!strncasecmp(arg, "withdist", 8)) + if (!strcasecmp(arg, "withdist")) { withdist = 1; - else if (!strcasecmp(arg, "withhash")) + } else if (!strcasecmp(arg, "withhash")) { withhash = 1; - else if (!strncasecmp(arg, "withcoord", 9)) + } else if (!strcasecmp(arg, "withcoord")) { withcoords = 1; - else if (!strncasecmp(arg, "asc", 3)) + } else if (!strcasecmp(arg, "asc")) { sort = SORT_ASC; - else if (!strncasecmp(arg, "desc", 4)) + } else if (!strcasecmp(arg, "desc")) { sort = SORT_DESC; - else { + } else { addReply(c, shared.syntaxerr); return; } diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl index a049804ae..5544d6555 100644 --- a/tests/unit/geo.tcl +++ b/tests/unit/geo.tcl @@ -49,18 +49,18 @@ start_server {tags {"geo"}} { } {{wtc one} 1791873972053020 {union square} 1791875485187452 {central park n/q/r} 1791875761332224 4545 1791875796750882 {lic market} 1791875804419201 q4 1791875830079666 jfk 1791895905559723} test {GEORADIUS simple (sorted)} { - r georadius nyc -73.9798091 40.7598464 3 km ascending + r georadius nyc -73.9798091 40.7598464 3 km asc } {{central park n/q/r} 4545 {union square}} - test {GEORADIUS withdistance (sorted)} { - r georadius nyc -73.9798091 40.7598464 3 km withdistance ascending + test {GEORADIUS withdist (sorted)} { + r georadius nyc -73.9798091 40.7598464 3 km withdist asc } {{{central park n/q/r} 0.7750} {4545 2.3651} {{union square} 2.7697}} test {GEORADIUSBYMEMBER simple (sorted)} { r georadiusbymember nyc "wtc one" 7 km } {{wtc one} {union square} {central park n/q/r} 4545 {lic market}} - test {GEORADIUSBYMEMBER withdistance (sorted)} { + test {GEORADIUSBYMEMBER withdist (sorted)} { r georadiusbymember nyc "wtc one" 7 km withdist } {{{wtc one} 0.0000} {{union square} 3.2544} {{central park n/q/r} 6.7000} {4545 6.1975} {{lic market} 6.8969}} From a3b07b1718368447f76788dd8febe01635a11f69 Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 27 Jun 2015 10:23:58 +0200 Subject: [PATCH 0277/1928] Geo: COUNT option for GEORADIUS. --- src/geo.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/src/geo.c b/src/geo.c index 1a90fa87b..c48646635 100644 --- a/src/geo.c +++ b/src/geo.c @@ -413,7 +413,7 @@ void geoAddCommand(redisClient *c) { #define RADIUS_MEMBER 2 /* GEORADIUS key x y radius unit [WITHDIST] [WITHHASH] [WITHCOORD] [ASC|DESC] - * [LIMIT count] + * [COUNT count] * GEORADIUSBYMEMBER key member radius unit ... options ... */ static void geoRadiusGeneric(redisClient *c, int type) { robj *key = c->argv[1]; @@ -454,6 +454,7 @@ static void geoRadiusGeneric(redisClient *c, int type) { /* Discover and populate all optional parameters. */ int withdist = 0, withhash = 0, withcoords = 0; int sort = SORT_NONE; + long long count = 0; if (c->argc > base_args) { int remaining = c->argc - base_args; for (int i = 0; i < remaining; i++) { @@ -468,6 +469,14 @@ static void geoRadiusGeneric(redisClient *c, int type) { sort = SORT_ASC; } else if (!strcasecmp(arg, "desc")) { sort = SORT_DESC; + } else if (!strcasecmp(arg, "count") && remaining > 0) { + if (getLongLongFromObjectOrReply(c, c->argv[base_args+i+1], + &count, NULL) != REDIS_OK) return; + if (count <= 0) { + addReplyError(c,"COUNT must be > 0"); + return; + } + i++; } else { addReply(c, shared.syntaxerr); return; @@ -475,6 +484,10 @@ static void geoRadiusGeneric(redisClient *c, int type) { } } + /* COUNT without ordering does not make much sense, force ASC + * ordering if COUNT was specified but no sorting was requested. */ + if (count != 0 && sort == SORT_NONE) sort = SORT_ASC; + /* Get all neighbor geohash boxes for our radius search */ GeoHashRadius georadius = geohashGetAreasByRadiusWGS84(xy[0], xy[1], radius_meters); @@ -512,7 +525,8 @@ static void geoRadiusGeneric(redisClient *c, int type) { * all strings of just zset members *or* a nested multi-bulk reply * containing the zset member string _and_ all the additional options the * user enabled for this request. */ - addReplyMultiBulkLen(c, result_length); + addReplyMultiBulkLen(c, (count == 0 || result_length < count) ? + result_length : count); /* Process [optional] requested sorting */ if (sort == SORT_ASC) { @@ -546,6 +560,10 @@ static void geoRadiusGeneric(redisClient *c, int type) { addReplyDouble(c, gp->longitude); addReplyDouble(c, gp->latitude); } + + /* Stop if COUNT was specified and we already provided the + * specified number of elements. */ + if (count != 0 && count == i+1) break; } geoArrayFree(ga); } From 7d59e0a8c3fb42bcf0e362a41f8abd01c1509429 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 29 Jun 2015 09:16:27 +0200 Subject: [PATCH 0278/1928] Geo: commands top comment as in other Redis code. --- src/geo.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/src/geo.c b/src/geo.c index c48646635..888bf8745 100644 --- a/src/geo.c +++ b/src/geo.c @@ -341,11 +341,9 @@ static int sort_gp_desc(const void *a, const void *b) { /* ==================================================================== * Commands * ==================================================================== */ -void geoAddCommand(redisClient *c) { - /* args 0-4: [cmd, key, lng, lat, val]; optional 5-6: [radius, units] - * - OR - - * args 0-N: [cmd, key, lng, lat, val, lng2, lat2, val2, ...] */ +/* GEOADD key long lat name [long2 lat2 name2 ... longN latN nameN] */ +void geoAddCommand(redisClient *c) { /* Prepare for the three different forms of the add command. */ double radius_meters = 0; if (c->argc == 7) { @@ -568,18 +566,17 @@ static void geoRadiusGeneric(redisClient *c, int type) { geoArrayFree(ga); } +/* GEORADIUS wrapper function. */ void geoRadiusCommand(redisClient *c) { - /* args 0-5: ["georadius", key, long, lat, radius, units]; - * optionals: [withdist, withcoords, asc|desc] */ geoRadiusGeneric(c, RADIUS_COORDS); } +/* GEORADIUSBYMEMBER wrapper function. */ void geoRadiusByMemberCommand(redisClient *c) { - /* args 0-4: ["georadius", key, compare-against-member, radius, units]; - * optionals: [withdist, withcoords, asc|desc] */ geoRadiusGeneric(c, RADIUS_MEMBER); } +/* GEODECODE long lat */ void geoDecodeCommand(redisClient *c) { GeoHashBits geohash; if (getLongLongFromObjectOrReply(c, c->argv[1], (long long *)&geohash.bits, @@ -612,10 +609,8 @@ void geoDecodeCommand(redisClient *c) { addReplyDouble(c, lat); } +/* GEOENCODE long lat [radius unit] */ void geoEncodeCommand(redisClient *c) { - /* args 0-2: ["geoencode", long, lat]; - * optionals: [radius, units] */ - double radius_meters = 0; if (c->argc >= 5) { if ((radius_meters = extractDistanceOrReply(c, c->argv + 3, NULL)) < 0) @@ -628,7 +623,7 @@ void geoEncodeCommand(redisClient *c) { double xy[2]; if (extractLongLatOrReply(c, c->argv + 1, xy) == REDIS_ERR) return; - /* Encode lat/long into our geohash */ + /* Encode long/lat into our geohash */ GeoHashBits geohash; uint8_t step = geohashEstimateStepsByRadius(radius_meters,0); geohashEncodeWGS84(xy[0], xy[1], step, &geohash); From 6d21027a23864500aadcc200fecfc594b98e9c88 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 29 Jun 2015 09:20:07 +0200 Subject: [PATCH 0279/1928] Geo: GEOADD form using radius removed. Can't immagine how this is useful in the context of the API exported by Redis, and we are always in time to add more bloat if needed, later. --- src/geo.c | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/src/geo.c b/src/geo.c index 888bf8745..8a07879fc 100644 --- a/src/geo.c +++ b/src/geo.c @@ -344,20 +344,11 @@ static int sort_gp_desc(const void *a, const void *b) { /* GEOADD key long lat name [long2 lat2 name2 ... longN latN nameN] */ void geoAddCommand(redisClient *c) { - /* Prepare for the three different forms of the add command. */ - double radius_meters = 0; - if (c->argc == 7) { - if ((radius_meters = extractDistanceOrReply(c, c->argv + 5, NULL)) < - 0) { - return; - } - } else if (c->argc == 6) { - addReplyError(c, "must provide units when asking for radius encode"); - return; - } else if ((c->argc - 2) % 3 != 0) { + /* Check arguments number for sanity. */ + if ((c->argc - 2) % 3 != 0) { /* Need an odd number of arguments if we got this far... */ - addReplyError(c, "format is: geoadd [key] [x1] [y1] [member1] " - "[x2] [y2] [member2] ... "); + addReplyError(c, "syntax error. Try GEOADD key [x1] [y1] [name1] " + "[x2] [y2] [name2] ... "); return; } @@ -371,7 +362,6 @@ void geoAddCommand(redisClient *c) { /* Create the argument vector to call ZADD in order to add all * the score,value pairs to the requested zset, where score is actually * an encoded version of lat,long. */ - uint8_t step = geohashEstimateStepsByRadius(radius_meters,0); int i; for (i = 0; i < elements; i++) { double xy[2]; @@ -383,13 +373,9 @@ void geoAddCommand(redisClient *c) { return; } -#ifdef DEBUG - printf("Adding with step size: %d\n", step); -#endif - /* Turn the coordinates into the score of the element. */ GeoHashBits hash; - geohashEncodeWGS84(xy[0], xy[1], step, &hash); + geohashEncodeWGS84(xy[0], xy[1], GEO_STEP_MAX, &hash); GeoHashFix52Bits bits = geohashAlign52Bits(hash); robj *score = createObject(REDIS_STRING, sdsfromlonglong(bits)); robj *val = c->argv[2 + i * 3 + 2]; From db3df441844f8479cd341d1e2d7553139ee4cfec Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 29 Jun 2015 09:21:31 +0200 Subject: [PATCH 0280/1928] Geo: debugging printf calls removed. --- src/geo.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/geo.c b/src/geo.c index 8a07879fc..c0efa3ca2 100644 --- a/src/geo.c +++ b/src/geo.c @@ -476,10 +476,6 @@ static void geoRadiusGeneric(redisClient *c, int type) { GeoHashRadius georadius = geohashGetAreasByRadiusWGS84(xy[0], xy[1], radius_meters); -#ifdef DEBUG - printf("Searching with step size: %d\n", georadius.hash.step); -#endif - /* Search the zset for all matching points */ geoArray *ga = geoArrayCreate(); membersOfAllNeighbors(zobj, georadius, xy[0], xy[1], radius_meters, ga); @@ -618,9 +614,6 @@ void geoEncodeCommand(redisClient *c) { GeoHashFix52Bits bits = geohashAlign52Bits(geohash); /* Decode the hash so we can return its bounding box */ -#ifdef DEBUG - printf("Decoding with step size: %d\n", geohash.step); -#endif GeoHashArea area; geohashDecodeWGS84(geohash, &area); From 1884bff12d1e91983e91c27637b5fa8854f68cba Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 29 Jun 2015 09:24:22 +0200 Subject: [PATCH 0281/1928] Geo: fix comment indentation. --- src/geo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/geo.c b/src/geo.c index c0efa3ca2..f9a12bae3 100644 --- a/src/geo.c +++ b/src/geo.c @@ -613,7 +613,7 @@ void geoEncodeCommand(redisClient *c) { /* Align the hash to a valid 52-bit integer based on step size */ GeoHashFix52Bits bits = geohashAlign52Bits(geohash); -/* Decode the hash so we can return its bounding box */ + /* Decode the hash so we can return its bounding box */ GeoHashArea area; geohashDecodeWGS84(geohash, &area); From 6a8e108e2d220e95668a14db4bc891bfe1bb9743 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 29 Jun 2015 09:34:05 +0200 Subject: [PATCH 0282/1928] Geo: GEOENCODE now returns score ranges. If GEOENCODE must be our door to enter the Geocoding implementation details and do fancy things client side, than return the scores as well so that we can query the sorted sets directly if we wish to do the same search multiple times, or want to compute the boxes in the client side to refine our search needs. --- src/geo.c | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/src/geo.c b/src/geo.c index f9a12bae3..c2e963f60 100644 --- a/src/geo.c +++ b/src/geo.c @@ -263,12 +263,10 @@ int geoGetPointsInRange(robj *zobj, double min, double max, double lon, double l return ga->used - origincount; } -/* Obtain all members between the min/max of this geohash bounding box. - * Populate a geoArray of GeoPoints by calling geoGetPointsInRange(). - * Return the number of points added to the array. */ -int membersOfGeoHashBox(robj *zobj, GeoHashBits hash, geoArray *ga, double lon, double lat, double radius) { - GeoHashFix52Bits min, max; - +/* Compute the sorted set scores min (inclusive), max (exclusive) we should + * query in order to retrieve all the elements inside the specified area + * 'hash'. The two scores are returned by reference in *min and *max. */ +void scoresOfGeoHashBox(GeoHashBits hash, GeoHashFix52Bits *min, GeoHashFix52Bits *max) { /* We want to compute the sorted set scores that will include all the * elements inside the specified Geohash 'hash', which has as many * bits as specified by hash.step * 2. @@ -289,10 +287,18 @@ int membersOfGeoHashBox(robj *zobj, GeoHashBits hash, geoArray *ga, double lon, * and * 1010110000000000000000000000000000000000000000000000 (excluded). */ - min = geohashAlign52Bits(hash); + *min = geohashAlign52Bits(hash); hash.bits++; - max = geohashAlign52Bits(hash); + *max = geohashAlign52Bits(hash); +} +/* Obtain all members between the min/max of this geohash bounding box. + * Populate a geoArray of GeoPoints by calling geoGetPointsInRange(). + * Return the number of points added to the array. */ +int membersOfGeoHashBox(robj *zobj, GeoHashBits hash, geoArray *ga, double lon, double lat, double radius) { + GeoHashFix52Bits min, max; + + scoresOfGeoHashBox(hash,&min,&max); return geoGetPointsInRange(zobj, min, max, lon, lat, radius, ga); } @@ -621,7 +627,7 @@ void geoEncodeCommand(redisClient *c) { double lat = (area.latitude.min + area.latitude.max) / 2; /* Return four nested multibulk replies. */ - addReplyMultiBulkLen(c, 4); + addReplyMultiBulkLen(c, 5); /* Return the binary geohash we calculated as 52-bit integer */ addReplyLongLong(c, bits); @@ -640,6 +646,13 @@ void geoEncodeCommand(redisClient *c) { addReplyMultiBulkLen(c, 2); addReplyDouble(c, lon); addReplyDouble(c, lat); + + /* Return the two scores to query to get the range from the sorted set. */ + GeoHashFix52Bits min, max; + scoresOfGeoHashBox(geohash,&min,&max); + addReplyMultiBulkLen(c, 2); + addReplyDouble(c, min); + addReplyDouble(c, max); } /* GEOHASH key ele1 ele2 ... eleN From ddc7b85c5f28fd96009a006e5bed5c775bc7f367 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 29 Jun 2015 09:39:34 +0200 Subject: [PATCH 0283/1928] Geo: GEOENCODE: fix command arity check. --- src/geo.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/geo.c b/src/geo.c index c2e963f60..37d632815 100644 --- a/src/geo.c +++ b/src/geo.c @@ -600,11 +600,11 @@ void geoDecodeCommand(redisClient *c) { /* GEOENCODE long lat [radius unit] */ void geoEncodeCommand(redisClient *c) { double radius_meters = 0; - if (c->argc >= 5) { + if (c->argc == 5) { if ((radius_meters = extractDistanceOrReply(c, c->argv + 3, NULL)) < 0) return; - } else if (c->argc == 4) { - addReplyError(c, "must provide units when asking for radius encode"); + } else if (c->argc == 4 || c->argc > 5) { + addReplyError(c, "syntax error, try: GEOENCODE x y [radius unit]"); return; } From 7cd2a4e19600e827afe883b36cb899b0a5473187 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 29 Jun 2015 09:46:58 +0200 Subject: [PATCH 0284/1928] Geo: GEOENCODE test fixed for new return value. --- tests/unit/geo.tcl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl index 5544d6555..cd119020a 100644 --- a/tests/unit/geo.tcl +++ b/tests/unit/geo.tcl @@ -68,7 +68,8 @@ start_server {tags {"geo"}} { r geoencode 1.8063239 41.2358883 } {3471579339700058 {1.8063229322433472 41.235888125243704}\ {1.806328296661377 41.235890659964866}\ - {1.8063256144523621 41.235889392604285}} + {1.8063256144523621 41.235889392604285}\ + {3471579339700058 3471579339700059}} test {GEODECODE simple} { r geodecode 3471579339700058 From f6edd0cb933c1c82c3e9e9dd33597654602a1ba4 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 29 Jun 2015 09:52:23 +0200 Subject: [PATCH 0285/1928] Geo: GEORADIUS COUNT tests. --- tests/unit/geo.tcl | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl index cd119020a..c67334936 100644 --- a/tests/unit/geo.tcl +++ b/tests/unit/geo.tcl @@ -56,6 +56,14 @@ start_server {tags {"geo"}} { r georadius nyc -73.9798091 40.7598464 3 km withdist asc } {{{central park n/q/r} 0.7750} {4545 2.3651} {{union square} 2.7697}} + test {GEORADIUS with COUNT} { + r georadius nyc -73.9798091 40.7598464 10 km COUNT 3 + } {{central park n/q/r} 4545 {union square}} + + test {GEORADIUS with COUNT DESC} { + r georadius nyc -73.9798091 40.7598464 10 km COUNT 2 DESC + } {{wtc one} q4} + test {GEORADIUSBYMEMBER simple (sorted)} { r georadiusbymember nyc "wtc one" 7 km } {{wtc one} {union square} {central park n/q/r} 4545 {lic market}} From aae0a1f9cce0ced9e6aa2e76977d6db72f6b4edc Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 29 Jun 2015 10:47:07 +0200 Subject: [PATCH 0286/1928] Geo: GEOPOS command and tests. --- src/geo.c | 33 +++++++++++++++++++++++++++++++++ src/redis.c | 1 + src/redis.h | 1 + tests/unit/geo.tcl | 17 +++++++++++++++++ 4 files changed, 52 insertions(+) diff --git a/src/geo.c b/src/geo.c index 37d632815..18b0efac1 100644 --- a/src/geo.c +++ b/src/geo.c @@ -709,3 +709,36 @@ void geoHashCommand(redisClient *c) { } } } + +/* GEOPOS key ele1 ele2 ... eleN + * + * Returns an array of two-items arrays representing the x,y position of each + * element specified in the arguments. For missing elements NULL is returned. */ +void geoposCommand(redisClient *c) { + int j; + + /* Look up the requested zset */ + robj *zobj = NULL; + if ((zobj = lookupKeyReadOrReply(c, c->argv[1], shared.emptymultibulk)) + == NULL || checkType(c, zobj, REDIS_ZSET)) return; + + /* Report elements one after the other, using a null bulk reply for + * missing elements. */ + addReplyMultiBulkLen(c,c->argc-2); + for (j = 2; j < c->argc; j++) { + double score; + if (zsetScore(zobj, c->argv[j], &score) == REDIS_ERR) { + addReply(c,shared.nullmultibulk); + } else { + /* Decode... */ + double xy[2]; + if (!decodeGeohash(score,xy)) { + addReply(c,shared.nullmultibulk); + continue; + } + addReplyMultiBulkLen(c,2); + addReplyDouble(c,xy[0]); + addReplyDouble(c,xy[1]); + } + } +} diff --git a/src/redis.c b/src/redis.c index 2221e128b..38430b82c 100644 --- a/src/redis.c +++ b/src/redis.c @@ -288,6 +288,7 @@ struct redisCommand redisCommandTable[] = { {"geoencode",geoEncodeCommand,-3,"r",0,NULL,0,0,0,0,0}, {"geodecode",geoDecodeCommand,2,"r",0,NULL,0,0,0,0,0}, {"geohash",geoHashCommand,-2,"r",0,NULL,0,0,0,0,0}, + {"geopos",geoposCommand,-2,"r",0,NULL,0,0,0,0,0}, {"pfselftest",pfselftestCommand,1,"r",0,NULL,0,0,0,0,0}, {"pfadd",pfaddCommand,-2,"wmF",0,NULL,1,1,1,0,0}, {"pfcount",pfcountCommand,-2,"r",0,NULL,1,1,1,0,0}, diff --git a/src/redis.h b/src/redis.h index 4fd643ac1..8eca36d07 100644 --- a/src/redis.h +++ b/src/redis.h @@ -1564,6 +1564,7 @@ void geoRadiusByMemberCommand(redisClient *c); void geoRadiusCommand(redisClient *c); void geoAddCommand(redisClient *c); void geoHashCommand(redisClient *c); +void geoposCommand(redisClient *c); void pfselftestCommand(redisClient *c); void pfaddCommand(redisClient *c); void pfcountCommand(redisClient *c); diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl index c67334936..191f88c58 100644 --- a/tests/unit/geo.tcl +++ b/tests/unit/geo.tcl @@ -92,6 +92,23 @@ start_server {tags {"geo"}} { lindex [r geohash points test] 0 } {ezs42e44yx0} + test {GEOPOS simple} { + r del points + r geoadd points 10 20 a 30 40 b + lassign [lindex [r geopos points a b] 0] x1 y1 + lassign [lindex [r geopos points a b] 1] x2 y2 + assert {abs($x1 - 10) < 0.001} + assert {abs($y1 - 20) < 0.001} + assert {abs($x2 - 30) < 0.001} + assert {abs($y2 - 40) < 0.001} + } + + test {GEOPOS missing element} { + r del points + r geoadd points 10 20 a 30 40 b + lindex [r geopos points a x b] 1 + } {} + test {GEOADD + GEORANGE randomized test} { set attempt 10 while {[incr attempt -1]} { From a12192f5ff33298eb7082cc3f6e2de17957e7d26 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 29 Jun 2015 12:07:18 +0200 Subject: [PATCH 0287/1928] Geo: command function names converted to lowercase, as elsewhere. In Redis MULTIWORDCOMMANDNAME are mapped to functions where the command name is all lowercase: multiwordcommandnameCommand(). --- src/geo.c | 18 +++++++++--------- src/geo.h | 6 ------ src/redis.c | 12 ++++++------ src/redis.h | 12 ++++++------ 4 files changed, 21 insertions(+), 27 deletions(-) diff --git a/src/geo.c b/src/geo.c index 18b0efac1..2869550ad 100644 --- a/src/geo.c +++ b/src/geo.c @@ -349,7 +349,7 @@ static int sort_gp_desc(const void *a, const void *b) { * ==================================================================== */ /* GEOADD key long lat name [long2 lat2 name2 ... longN latN nameN] */ -void geoAddCommand(redisClient *c) { +void geoaddCommand(redisClient *c) { /* Check arguments number for sanity. */ if ((c->argc - 2) % 3 != 0) { /* Need an odd number of arguments if we got this far... */ @@ -405,7 +405,7 @@ void geoAddCommand(redisClient *c) { /* GEORADIUS key x y radius unit [WITHDIST] [WITHHASH] [WITHCOORD] [ASC|DESC] * [COUNT count] * GEORADIUSBYMEMBER key member radius unit ... options ... */ -static void geoRadiusGeneric(redisClient *c, int type) { +static void georadiusGeneric(redisClient *c, int type) { robj *key = c->argv[1]; /* Look up the requested zset */ @@ -555,17 +555,17 @@ static void geoRadiusGeneric(redisClient *c, int type) { } /* GEORADIUS wrapper function. */ -void geoRadiusCommand(redisClient *c) { - geoRadiusGeneric(c, RADIUS_COORDS); +void georadiusCommand(redisClient *c) { + georadiusGeneric(c, RADIUS_COORDS); } /* GEORADIUSBYMEMBER wrapper function. */ -void geoRadiusByMemberCommand(redisClient *c) { - geoRadiusGeneric(c, RADIUS_MEMBER); +void georadiusByMemberCommand(redisClient *c) { + georadiusGeneric(c, RADIUS_MEMBER); } /* GEODECODE long lat */ -void geoDecodeCommand(redisClient *c) { +void geodecodeCommand(redisClient *c) { GeoHashBits geohash; if (getLongLongFromObjectOrReply(c, c->argv[1], (long long *)&geohash.bits, NULL) != REDIS_OK) @@ -598,7 +598,7 @@ void geoDecodeCommand(redisClient *c) { } /* GEOENCODE long lat [radius unit] */ -void geoEncodeCommand(redisClient *c) { +void geoencodeCommand(redisClient *c) { double radius_meters = 0; if (c->argc == 5) { if ((radius_meters = extractDistanceOrReply(c, c->argv + 3, NULL)) < 0) @@ -659,7 +659,7 @@ void geoEncodeCommand(redisClient *c) { * * Returns an array with an 11 characters geohash representation of the * position of the specified elements. */ -void geoHashCommand(redisClient *c) { +void geohashCommand(redisClient *c) { char *geoalphabet= "0123456789bcdefghjkmnpqrstuvwxyz"; int j; diff --git a/src/geo.h b/src/geo.h index 4f5c5e6fe..cf4e42c90 100644 --- a/src/geo.h +++ b/src/geo.h @@ -3,12 +3,6 @@ #include "redis.h" -void geoEncodeCommand(redisClient *c); -void geoDecodeCommand(redisClient *c); -void geoRadiusByMemberCommand(redisClient *c); -void geoRadiusCommand(redisClient *c); -void geoAddCommand(redisClient *c); - /* Structures used inside geo.c in order to represent points and array of * points on the earth. */ typedef struct geoPoint { diff --git a/src/redis.c b/src/redis.c index 38430b82c..e6707d5f2 100644 --- a/src/redis.c +++ b/src/redis.c @@ -282,12 +282,12 @@ struct redisCommand redisCommandTable[] = { {"bitpos",bitposCommand,-3,"r",0,NULL,1,1,1,0,0}, {"wait",waitCommand,3,"rs",0,NULL,0,0,0,0,0}, {"command",commandCommand,0,"rlt",0,NULL,0,0,0,0,0}, - {"geoadd",geoAddCommand,-5,"wm",0,NULL,1,1,1,0,0}, - {"georadius",geoRadiusCommand,-6,"r",0,NULL,1,1,1,0,0}, - {"georadiusbymember",geoRadiusByMemberCommand,-5,"r",0,NULL,1,1,1,0,0}, - {"geoencode",geoEncodeCommand,-3,"r",0,NULL,0,0,0,0,0}, - {"geodecode",geoDecodeCommand,2,"r",0,NULL,0,0,0,0,0}, - {"geohash",geoHashCommand,-2,"r",0,NULL,0,0,0,0,0}, + {"geoadd",geoaddCommand,-5,"wm",0,NULL,1,1,1,0,0}, + {"georadius",georadiusCommand,-6,"r",0,NULL,1,1,1,0,0}, + {"georadiusbymember",georadiusByMemberCommand,-5,"r",0,NULL,1,1,1,0,0}, + {"geoencode",geoencodeCommand,-3,"r",0,NULL,0,0,0,0,0}, + {"geodecode",geodecodeCommand,2,"r",0,NULL,0,0,0,0,0}, + {"geohash",geohashCommand,-2,"r",0,NULL,0,0,0,0,0}, {"geopos",geoposCommand,-2,"r",0,NULL,0,0,0,0,0}, {"pfselftest",pfselftestCommand,1,"r",0,NULL,0,0,0,0,0}, {"pfadd",pfaddCommand,-2,"wmF",0,NULL,1,1,1,0,0}, diff --git a/src/redis.h b/src/redis.h index 8eca36d07..70b301a59 100644 --- a/src/redis.h +++ b/src/redis.h @@ -1558,12 +1558,12 @@ void bitcountCommand(redisClient *c); void bitposCommand(redisClient *c); void replconfCommand(redisClient *c); void waitCommand(redisClient *c); -void geoEncodeCommand(redisClient *c); -void geoDecodeCommand(redisClient *c); -void geoRadiusByMemberCommand(redisClient *c); -void geoRadiusCommand(redisClient *c); -void geoAddCommand(redisClient *c); -void geoHashCommand(redisClient *c); +void geoencodeCommand(redisClient *c); +void geodecodeCommand(redisClient *c); +void georadiusByMemberCommand(redisClient *c); +void georadiusCommand(redisClient *c); +void geoaddCommand(redisClient *c); +void geohashCommand(redisClient *c); void geoposCommand(redisClient *c); void pfselftestCommand(redisClient *c); void pfaddCommand(redisClient *c); From f108c687ad122d76e8468f98934255ffb51cc7e8 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 29 Jun 2015 12:44:31 +0200 Subject: [PATCH 0288/1928] Geo: GEODIST and tests. --- deps/geohash-int/geohash_helper.c | 4 +- deps/geohash-int/geohash_helper.h | 2 + src/geo.c | 84 ++++++++++++++++++++++++------- src/redis.c | 1 + src/redis.h | 1 + tests/unit/geo.tcl | 22 ++++++++ 6 files changed, 94 insertions(+), 20 deletions(-) diff --git a/deps/geohash-int/geohash_helper.c b/deps/geohash-int/geohash_helper.c index 729f010ea..88a972b47 100644 --- a/deps/geohash-int/geohash_helper.c +++ b/deps/geohash-int/geohash_helper.c @@ -167,7 +167,7 @@ GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash) { } /* Calculate distance using haversin great circle distance formula. */ -double distanceEarth(double lon1d, double lat1d, double lon2d, double lat2d) { +double geohashGetDistance(double lon1d, double lat1d, double lon2d, double lat2d) { double lat1r, lon1r, lat2r, lon2r, u, v; lat1r = deg_rad(lat1d); lon1r = deg_rad(lon1d); @@ -182,7 +182,7 @@ double distanceEarth(double lon1d, double lat1d, double lon2d, double lat2d) { int geohashGetDistanceIfInRadius(double x1, double y1, double x2, double y2, double radius, double *distance) { - *distance = distanceEarth(x1, y1, x2, y2); + *distance = geohashGetDistance(x1, y1, x2, y2); if (*distance > radius) return 0; return 1; } diff --git a/deps/geohash-int/geohash_helper.h b/deps/geohash-int/geohash_helper.h index 0e38740de..70c6b2095 100644 --- a/deps/geohash-int/geohash_helper.h +++ b/deps/geohash-int/geohash_helper.h @@ -58,6 +58,8 @@ GeoHashRadius geohashGetAreasByRadiusWGS84(double longitude, double latitude, GeoHashRadius geohashGetAreasByRadiusMercator(double longitude, double latitude, double radius_meters); GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash); +double geohashGetDistance(double lon1d, double lat1d, + double lon2d, double lat2d); int geohashGetDistanceIfInRadius(double x1, double y1, double x2, double y2, double radius, double *distance); diff --git a/src/geo.c b/src/geo.c index 2869550ad..e11f5b371 100644 --- a/src/geo.c +++ b/src/geo.c @@ -112,6 +112,30 @@ static int longLatFromMember(robj *zobj, robj *member, double *xy) { return REDIS_OK; } +/* Check that the unit argument matches one of the known units, and returns + * the conversion factor to meters (you need to divide meters by the conversion + * factor to convert to the right unit). + * + * If the unit is not valid, an error is reported to the client, and a value + * less than zero is returned. */ +double extractUnitOrReply(redisClient *c, robj *unit) { + char *u = unit->ptr; + + if (!strcmp(u, "m") || !strncmp(u, "meter", 5)) { + return 1; + } else if (!strcmp(u, "ft") || !strncmp(u, "feet", 4)) { + return 0.3048; + } else if (!strcmp(u, "mi") || !strncmp(u, "mile", 4)) { + return 1609.34; + } else if (!strcmp(u, "km") || !strncmp(u, "kilometer", 9)) { + return 1000; + } else { + addReplyError(c, "unsupported unit provided. please use meters (m), " + "kilometers (km), miles (mi), or feet (ft)"); + return -1; + } +} + /* Input Argument Helper. * Extract the dinstance from the specified two arguments starting at 'argv' * that shouldbe in the form: and return the dinstance in the @@ -127,25 +151,10 @@ static double extractDistanceOrReply(redisClient *c, robj **argv, return -1; } - double to_meters; - sds units = argv[1]->ptr; - if (!strcmp(units, "m") || !strncmp(units, "meter", 5)) { - to_meters = 1; - } else if (!strcmp(units, "ft") || !strncmp(units, "feet", 4)) { - to_meters = 0.3048; - } else if (!strcmp(units, "mi") || !strncmp(units, "mile", 4)) { - to_meters = 1609.34; - } else if (!strcmp(units, "km") || !strncmp(units, "kilometer", 9)) { - to_meters = 1000; - } else { - addReplyError(c, "unsupported unit provided. please use meters (m), " - "kilometers (km), miles (mi), or feet (ft)"); - return -1; - } - - if (conversion) - *conversion = to_meters; + double to_meters = extractUnitOrReply(c,argv[1]); + if (to_meters < 0) return -1; + if (conversion) *conversion = to_meters; return distance * to_meters; } @@ -742,3 +751,42 @@ void geoposCommand(redisClient *c) { } } } + +/* GEODIST key ele1 ele2 [unit] + * + * Return the distance, in meters by default, otherwise accordig to "unit", + * between points ele1 and ele2. If one or more elements are missing NULL + * is returned. */ +void geodistCommand(redisClient *c) { + double to_meter = 1; + + /* Check if there is the unit to extract, otherwise assume meters. */ + if (c->argc == 5) { + to_meter = extractUnitOrReply(c,c->argv[4]); + if (to_meter < 0) return; + } else if (c->argc > 5) { + addReply(c,shared.syntaxerr); + return; + } + + /* Look up the requested zset */ + robj *zobj = NULL; + if ((zobj = lookupKeyReadOrReply(c, c->argv[1], shared.emptybulk)) + == NULL || checkType(c, zobj, REDIS_ZSET)) return; + + /* Get the scores. We need both otherwise NULL is returned. */ + double score1, score2, xyxy[4]; + if (zsetScore(zobj, c->argv[2], &score1) == REDIS_ERR || + zsetScore(zobj, c->argv[3], &score2) == REDIS_ERR) + { + addReply(c,shared.nullbulk); + return; + } + + /* Decode & compute the distance. */ + if (!decodeGeohash(score1,xyxy) || !decodeGeohash(score2,xyxy+2)) + addReply(c,shared.nullbulk); + else + addReplyDouble(c, + geohashGetDistance(xyxy[0],xyxy[1],xyxy[2],xyxy[3]) / to_meter); +} diff --git a/src/redis.c b/src/redis.c index e6707d5f2..cb5c73771 100644 --- a/src/redis.c +++ b/src/redis.c @@ -289,6 +289,7 @@ struct redisCommand redisCommandTable[] = { {"geodecode",geodecodeCommand,2,"r",0,NULL,0,0,0,0,0}, {"geohash",geohashCommand,-2,"r",0,NULL,0,0,0,0,0}, {"geopos",geoposCommand,-2,"r",0,NULL,0,0,0,0,0}, + {"geodist",geodistCommand,-4,"r",0,NULL,0,0,0,0,0}, {"pfselftest",pfselftestCommand,1,"r",0,NULL,0,0,0,0,0}, {"pfadd",pfaddCommand,-2,"wmF",0,NULL,1,1,1,0,0}, {"pfcount",pfcountCommand,-2,"r",0,NULL,1,1,1,0,0}, diff --git a/src/redis.h b/src/redis.h index 70b301a59..b64a7697a 100644 --- a/src/redis.h +++ b/src/redis.h @@ -1565,6 +1565,7 @@ void georadiusCommand(redisClient *c); void geoaddCommand(redisClient *c); void geohashCommand(redisClient *c); void geoposCommand(redisClient *c); +void geodistCommand(redisClient *c); void pfselftestCommand(redisClient *c); void pfaddCommand(redisClient *c); void pfcountCommand(redisClient *c); diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl index 191f88c58..cf6d8c614 100644 --- a/tests/unit/geo.tcl +++ b/tests/unit/geo.tcl @@ -109,6 +109,28 @@ start_server {tags {"geo"}} { lindex [r geopos points a x b] 1 } {} + test {GEODIST simple & unit} { + r del points + r geoadd points 13.361389 38.115556 "Palermo" \ + 15.087269 37.502669 "Catania" + set m [r geodist points Palermo Catania] + assert {$m > 166274 && $m < 166275} + set km [r geodist points Palermo Catania km] + assert {$km > 166.2 && $km < 166.3} + } + + test {GEODIST missing elements} { + r del points + r geoadd points 13.361389 38.115556 "Palermo" \ + 15.087269 37.502669 "Catania" + set m [r geodist points Palermo Agrigento] + assert {$m eq {}} + set m [r geodist points Ragusa Agrigento] + assert {$m eq {}} + set m [r geodist empty_key Palermo Catania] + assert {$m eq {}} + } + test {GEOADD + GEORANGE randomized test} { set attempt 10 while {[incr attempt -1]} { From 083acbebc85191314840a02a8618f051f49d0319 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 29 Jun 2015 15:57:17 +0200 Subject: [PATCH 0289/1928] Geo: remove static declarations. Stack traces produced by Redis on crash are the most useful tool we have to fix non easily reproducible crashes, or even easily reproducible ones where the user just posts a bug report and does not collaborate furhter. By declaring functions "static" they no longer show up in the stack trace. --- src/geo.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/geo.c b/src/geo.c index e11f5b371..364f0d829 100644 --- a/src/geo.c +++ b/src/geo.c @@ -82,7 +82,7 @@ void geoArrayFree(geoArray *ga) { /* ==================================================================== * Helpers * ==================================================================== */ -static inline int decodeGeohash(double bits, double *xy) { +int decodeGeohash(double bits, double *xy) { GeoHashBits hash = { .bits = (uint64_t)bits, .step = GEO_STEP_MAX }; return geohashDecodeToLongLatWGS84(hash, xy); } @@ -90,7 +90,7 @@ static inline int decodeGeohash(double bits, double *xy) { /* Input Argument Helper */ /* Take a pointer to the latitude arg then use the next arg for longitude. * On parse error REDIS_ERR is returned, otherwise REDIS_OK. */ -static inline int extractLongLatOrReply(redisClient *c, robj **argv, +int extractLongLatOrReply(redisClient *c, robj **argv, double *xy) { for (int i = 0; i < 2; i++) { if (getDoubleFromObjectOrReply(c, argv[i], xy + i, NULL) != @@ -104,7 +104,7 @@ static inline int extractLongLatOrReply(redisClient *c, robj **argv, /* Input Argument Helper */ /* Decode lat/long from a zset member's score. * Returns REDIS_OK on successful decoding, otherwise REDIS_ERR is returned. */ -static int longLatFromMember(robj *zobj, robj *member, double *xy) { +int longLatFromMember(robj *zobj, robj *member, double *xy) { double score = 0; if (zsetScore(zobj, member, &score) == REDIS_ERR) return REDIS_ERR; @@ -143,7 +143,7 @@ double extractUnitOrReply(redisClient *c, robj *unit) { * to use in order to convert meters to the unit. * * On error a value less than zero is returned. */ -static double extractDistanceOrReply(redisClient *c, robj **argv, +double extractDistanceOrReply(redisClient *c, robj **argv, double *conversion) { double distance; if (getDoubleFromObjectOrReply(c, argv[0], &distance, @@ -163,7 +163,7 @@ static double extractDistanceOrReply(redisClient *c, robj **argv, * than "5.2144992818115 meters away." We provide 4 digits after the dot * so that the returned value is decently accurate even when the unit is * the kilometer. */ -static inline void addReplyDoubleDistance(redisClient *c, double d) { +inline void addReplyDoubleDistance(redisClient *c, double d) { char dbuf[128]; int dlen = snprintf(dbuf, sizeof(dbuf), "%.4f", d); addReplyBulkCBuffer(c, dbuf, dlen); @@ -414,7 +414,7 @@ void geoaddCommand(redisClient *c) { /* GEORADIUS key x y radius unit [WITHDIST] [WITHHASH] [WITHCOORD] [ASC|DESC] * [COUNT count] * GEORADIUSBYMEMBER key member radius unit ... options ... */ -static void georadiusGeneric(redisClient *c, int type) { +void georadiusGeneric(redisClient *c, int type) { robj *key = c->argv[1]; /* Look up the requested zset */ From 69c5b27273272c93822a5d2bd776ddc11210e88a Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 29 Jun 2015 16:02:33 +0200 Subject: [PATCH 0290/1928] Geo: support units only in abbreviated form. I'm not a strong believer in multiple syntax for the same stuff, so now units can be specified only as m, km, ft, mi. --- src/geo.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/geo.c b/src/geo.c index 364f0d829..91cfb15a4 100644 --- a/src/geo.c +++ b/src/geo.c @@ -121,17 +121,17 @@ int longLatFromMember(robj *zobj, robj *member, double *xy) { double extractUnitOrReply(redisClient *c, robj *unit) { char *u = unit->ptr; - if (!strcmp(u, "m") || !strncmp(u, "meter", 5)) { + if (!strcmp(u, "m")) { return 1; - } else if (!strcmp(u, "ft") || !strncmp(u, "feet", 4)) { - return 0.3048; - } else if (!strcmp(u, "mi") || !strncmp(u, "mile", 4)) { - return 1609.34; - } else if (!strcmp(u, "km") || !strncmp(u, "kilometer", 9)) { + } else if (!strcmp(u, "km")) { return 1000; + } else if (!strcmp(u, "ft")) { + return 0.3048; + } else if (!strcmp(u, "mi")) { + return 1609.34; } else { - addReplyError(c, "unsupported unit provided. please use meters (m), " - "kilometers (km), miles (mi), or feet (ft)"); + addReplyError(c, + "unsupported unit provided. please use m, km, ft, mi"); return -1; } } From d308cadc8a18fe8aea41898d8d881f91103617a0 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 29 Jun 2015 16:34:02 +0200 Subject: [PATCH 0291/1928] Geo: added my copyright notice in modified files. --- deps/geohash-int/geohash.c | 1 + deps/geohash-int/geohash.h | 1 + deps/geohash-int/geohash_helper.c | 1 + deps/geohash-int/geohash_helper.h | 1 + src/geo.c | 1 + 5 files changed, 5 insertions(+) diff --git a/deps/geohash-int/geohash.c b/deps/geohash-int/geohash.c index e797fbc8f..f2b6a8de9 100644 --- a/deps/geohash-int/geohash.c +++ b/deps/geohash-int/geohash.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2013-2014, yinqiwen * Copyright (c) 2014, Matt Stancliff . + * Copyright (c) 2015, Salvatore Sanfilippo . * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/deps/geohash-int/geohash.h b/deps/geohash-int/geohash.h index 2bf1f5df8..5e76c249c 100644 --- a/deps/geohash-int/geohash.h +++ b/deps/geohash-int/geohash.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2013-2014, yinqiwen * Copyright (c) 2014, Matt Stancliff . + * Copyright (c) 2015, Salvatore Sanfilippo . * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/deps/geohash-int/geohash_helper.c b/deps/geohash-int/geohash_helper.c index 88a972b47..4c3762faf 100644 --- a/deps/geohash-int/geohash_helper.c +++ b/deps/geohash-int/geohash_helper.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2013-2014, yinqiwen * Copyright (c) 2014, Matt Stancliff . + * Copyright (c) 2015, Salvatore Sanfilippo . * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/deps/geohash-int/geohash_helper.h b/deps/geohash-int/geohash_helper.h index 70c6b2095..bff111dbe 100644 --- a/deps/geohash-int/geohash_helper.h +++ b/deps/geohash-int/geohash_helper.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2013-2014, yinqiwen * Copyright (c) 2014, Matt Stancliff . + * Copyright (c) 2015, Salvatore Sanfilippo . * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/geo.c b/src/geo.c index 91cfb15a4..66c17a111 100644 --- a/src/geo.c +++ b/src/geo.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2014, Matt Stancliff . + * Copyright (c) 2015, Salvatore Sanfilippo . * All rights reserved. * * Redistribution and use in source and binary forms, with or without From 4160bf0448e93e25607f844a149b1418e3fa3dd4 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 1 Jul 2015 16:12:08 +0200 Subject: [PATCH 0292/1928] Geo: sync faster decoding from krtm that synched from Ardb. Instead of successive divisions in iteration the new code uses bitwise magic to interleave / deinterleave two 32bit values into a 64bit one. All tests still passing and is measurably faster, so worth it. --- deps/geohash-int/geohash.c | 141 +++++++++++++++++++++++-------------- src/geo.c | 2 +- 2 files changed, 91 insertions(+), 52 deletions(-) diff --git a/deps/geohash-int/geohash.c b/deps/geohash-int/geohash.c index f2b6a8de9..eaad9858d 100644 --- a/deps/geohash-int/geohash.c +++ b/deps/geohash-int/geohash.c @@ -44,6 +44,71 @@ * ----------------- */ +/* Interleave lower bits of x and y, so the bits of x + * are in the even positions and bits from y in the odd; + * x and y must initially be less than 2**32 (65536). + * From: https://graphics.stanford.edu/~seander/bithacks.html#InterleaveBMN + */ +static inline uint64_t interleave64(uint32_t xlo, uint32_t ylo) { + static const uint64_t B[] = {0x5555555555555555, 0x3333333333333333, + 0x0F0F0F0F0F0F0F0F, 0x00FF00FF00FF00FF, + 0x0000FFFF0000FFFF}; + static const unsigned int S[] = {1, 2, 4, 8, 16}; + + uint64_t x = xlo; + uint64_t y = ylo; + + x = (x | (x << S[4])) & B[4]; + y = (y | (y << S[4])) & B[4]; + + x = (x | (x << S[3])) & B[3]; + y = (y | (y << S[3])) & B[3]; + + x = (x | (x << S[2])) & B[2]; + y = (y | (y << S[2])) & B[2]; + + x = (x | (x << S[1])) & B[1]; + y = (y | (y << S[1])) & B[1]; + + x = (x | (x << S[0])) & B[0]; + y = (y | (y << S[0])) & B[0]; + + return x | (y << 1); +} + +/* reverse the interleave process + * derived from http://stackoverflow.com/questions/4909263 + */ +static inline uint64_t deinterleave64(uint64_t interleaved) { + static const uint64_t B[] = {0x5555555555555555, 0x3333333333333333, + 0x0F0F0F0F0F0F0F0F, 0x00FF00FF00FF00FF, + 0x0000FFFF0000FFFF, 0x00000000FFFFFFFF}; + static const unsigned int S[] = {0, 1, 2, 4, 8, 16}; + + uint64_t x = interleaved; + uint64_t y = interleaved >> 1; + + x = (x | (x >> S[0])) & B[0]; + y = (y | (y >> S[0])) & B[0]; + + x = (x | (x >> S[1])) & B[1]; + y = (y | (y >> S[1])) & B[1]; + + x = (x | (x >> S[2])) & B[2]; + y = (y | (y >> S[2])) & B[2]; + + x = (x | (x >> S[3])) & B[3]; + y = (y | (y >> S[3])) & B[3]; + + x = (x | (x >> S[4])) & B[4]; + y = (y | (y >> S[4])) & B[4]; + + x = (x | (x >> S[5])) & B[5]; + y = (y | (y >> S[5])) & B[5]; + + return x | (y << 32); +} + void geohashGetCoordRange(GeoHashRange *long_range, GeoHashRange *lat_range) { /* These are constraints from EPSG:900913 / EPSG:3785 / OSGEO:41001 */ /* We can't geocode at the north/south pole. */ @@ -56,8 +121,6 @@ void geohashGetCoordRange(GeoHashRange *long_range, GeoHashRange *lat_range) { int geohashEncode(GeoHashRange *long_range, GeoHashRange *lat_range, double longitude, double latitude, uint8_t step, GeoHashBits *hash) { - uint8_t i; - if (NULL == hash || step > 32 || step == 0 || RANGEPISZERO(lat_range) || RANGEPISZERO(long_range)) { return 0; @@ -71,29 +134,15 @@ int geohashEncode(GeoHashRange *long_range, GeoHashRange *lat_range, return 0; } - for (i = 0; i < step; i++) { - uint8_t lat_bit, long_bit; + double lat_offset = + (latitude - lat_range->min) / (lat_range->max - lat_range->min); + double long_offset = + (longitude - long_range->min) / (long_range->max - long_range->min); - if (lat_range->max - latitude >= latitude - lat_range->min) { - lat_bit = 0; - lat_range->max = (lat_range->max + lat_range->min) / 2; - } else { - lat_bit = 1; - lat_range->min = (lat_range->max + lat_range->min) / 2; - } - if (long_range->max - longitude >= longitude - long_range->min) { - long_bit = 0; - long_range->max = (long_range->max + long_range->min) / 2; - } else { - long_bit = 1; - long_range->min = (long_range->max + long_range->min) / 2; - } - - hash->bits <<= 1; - hash->bits += long_bit; - hash->bits <<= 1; - hash->bits += lat_bit; - } + /* convert to fixed point based on the step size */ + lat_offset *= (1 << step); + long_offset *= (1 << step); + hash->bits = interleave64(lat_offset, long_offset); return 1; } @@ -108,45 +157,35 @@ int geohashEncodeWGS84(double longitude, double latitude, uint8_t step, return geohashEncodeType(longitude, latitude, step, hash); } -static inline uint8_t get_bit(uint64_t bits, uint8_t pos) { - return (bits >> pos) & 0x01; -} - int geohashDecode(const GeoHashRange long_range, const GeoHashRange lat_range, const GeoHashBits hash, GeoHashArea *area) { - uint8_t i; - if (HASHISZERO(hash) || NULL == area || RANGEISZERO(lat_range) || RANGEISZERO(long_range)) { return 0; } area->hash = hash; - area->longitude.min = long_range.min; - area->longitude.max = long_range.max; - area->latitude.min = lat_range.min; - area->latitude.max = lat_range.max; + uint8_t step = hash.step; + uint64_t hash_sep = deinterleave64(hash.bits); /* hash = [LAT][LONG] */ - for (i = 0; i < hash.step; i++) { - uint8_t lat_bit, long_bit; + double lat_scale = lat_range.max - lat_range.min; + double long_scale = long_range.max - long_range.min; - long_bit = get_bit(hash.bits, (hash.step - i) * 2 - 1); - lat_bit = get_bit(hash.bits, (hash.step - i) * 2 - 2); + uint32_t ilato = hash_sep; /* get lat part of deinterleaved hash */ + uint32_t ilono = hash_sep >> 32; /* shift over to get long part of hash */ - if (lat_bit == 0) { - area->latitude.max = (area->latitude.max + area->latitude.min) / 2; - } else { - area->latitude.min = (area->latitude.max + area->latitude.min) / 2; - } + /* divide by 2**step. + * Then, for 0-1 coordinate, multiply times scale and add + to the min to get the absolute coordinate. */ + area->latitude.min = + lat_range.min + (ilato * 1.0 / (1ull << step)) * lat_scale; + area->latitude.max = + lat_range.min + ((ilato + 1) * 1.0 / (1ull << step)) * lat_scale; + area->longitude.min = + long_range.min + (ilono * 1.0 / (1ull << step)) * long_scale; + area->longitude.max = + long_range.min + ((ilono + 1) * 1.0 / (1ull << step)) * long_scale; - if (long_bit == 0) { - area->longitude.max = - (area->longitude.max + area->longitude.min) / 2; - } else { - area->longitude.min = - (area->longitude.max + area->longitude.min) / 2; - } - } return 1; } diff --git a/src/geo.c b/src/geo.c index 66c17a111..b56711123 100644 --- a/src/geo.c +++ b/src/geo.c @@ -164,7 +164,7 @@ double extractDistanceOrReply(redisClient *c, robj **argv, * than "5.2144992818115 meters away." We provide 4 digits after the dot * so that the returned value is decently accurate even when the unit is * the kilometer. */ -inline void addReplyDoubleDistance(redisClient *c, double d) { +void addReplyDoubleDistance(redisClient *c, double d) { char dbuf[128]; int dlen = snprintf(dbuf, sizeof(dbuf), "%.4f", d); addReplyBulkCBuffer(c, dbuf, dlen); From 5254c2d3c350f4edbb2f5dfde506fcb1ec58ee66 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 3 Jul 2015 09:47:08 +0200 Subject: [PATCH 0293/1928] Removed useless tryObjectEncoding() call from ZRANK. --- src/t_zset.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/t_zset.c b/src/t_zset.c index 93808a871..386258da1 100644 --- a/src/t_zset.c +++ b/src/t_zset.c @@ -2886,7 +2886,7 @@ void zrankGenericCommand(redisClient *c, int reverse) { dictEntry *de; double score; - ele = c->argv[2] = tryObjectEncoding(c->argv[2]); + ele = c->argv[2]; de = dictFind(zs->dict,ele); if (de != NULL) { score = *(double*)dictGetVal(de); From 5e04189887ed9100577374cede428c62d23fabe4 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 6 Jul 2015 18:39:25 +0200 Subject: [PATCH 0294/1928] Geo: validate long,lat passed by user via API --- deps/geohash-int/geohash.c | 20 ++++++++++++-------- deps/geohash-int/geohash.h | 8 +++++++- src/geo.c | 6 ++++++ 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/deps/geohash-int/geohash.c b/deps/geohash-int/geohash.c index eaad9858d..5a0f3263d 100644 --- a/deps/geohash-int/geohash.c +++ b/deps/geohash-int/geohash.c @@ -112,19 +112,23 @@ static inline uint64_t deinterleave64(uint64_t interleaved) { void geohashGetCoordRange(GeoHashRange *long_range, GeoHashRange *lat_range) { /* These are constraints from EPSG:900913 / EPSG:3785 / OSGEO:41001 */ /* We can't geocode at the north/south pole. */ - long_range->max = 180.0; - long_range->min = -180.0; - lat_range->max = 85.05112878; - lat_range->min = -85.05112878; + long_range->max = GEO_LONG_MAX; + long_range->min = GEO_LONG_MIN; + lat_range->max = GEO_LAT_MAX; + lat_range->min = GEO_LAT_MIN; } int geohashEncode(GeoHashRange *long_range, GeoHashRange *lat_range, double longitude, double latitude, uint8_t step, GeoHashBits *hash) { - if (NULL == hash || step > 32 || step == 0 || RANGEPISZERO(lat_range) || - RANGEPISZERO(long_range)) { - return 0; - } + /* Check basic arguments sanity. */ + if (hash == NULL || step > 32 || step == 0 || + RANGEPISZERO(lat_range) || RANGEPISZERO(long_range)) return 0; + + /* Return an error when trying to index outside the supported + * constraints. */ + if (longitude > 180 || longitude < -180 || + latitude > 85.05112878 || latitude < -85.05112878) return 0; hash->bits = 0; hash->step = step; diff --git a/deps/geohash-int/geohash.h b/deps/geohash-int/geohash.h index 5e76c249c..c2f57bed0 100644 --- a/deps/geohash-int/geohash.h +++ b/deps/geohash-int/geohash.h @@ -44,7 +44,13 @@ extern "C" { #define RANGEISZERO(r) (!(r).max && !(r).min) #define RANGEPISZERO(r) (r == NULL || RANGEISZERO(*r)) -#define GEO_STEP_MAX 26 +#define GEO_STEP_MAX 26 /* 26*2 = 52 bits. */ + +/* Limits from EPSG:900913 / EPSG:3785 / OSGEO:41001 */ +#define GEO_LAT_MIN -85.05112878 +#define GEO_LAT_MAX 85.05112878 +#define GEO_LONG_MIN -180 +#define GEO_LONG_MAX 180 typedef enum { GEOHASH_NORTH = 0, diff --git a/src/geo.c b/src/geo.c index b56711123..90c59c807 100644 --- a/src/geo.c +++ b/src/geo.c @@ -98,6 +98,12 @@ int extractLongLatOrReply(redisClient *c, robj **argv, REDIS_OK) { return REDIS_ERR; } + if (xy[0] < GEO_LONG_MIN || xy[0] > GEO_LONG_MAX || + xy[1] < GEO_LAT_MIN || xy[1] > GEO_LAT_MAX) { + addReplySds(c, sdscatprintf(sdsempty(), + "-ERR invalid longitude,latitude pair %f,%f\r\n",xy[0],xy[1])); + return REDIS_ERR; + } } return REDIS_OK; } From 1e12784259483991710183d127dc3abd4bd21f56 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 9 Jul 2015 11:25:10 +0200 Subject: [PATCH 0295/1928] Geo: -Ofast breaks builds on older GCCs. --- deps/geohash-int/Makefile | 2 +- src/geo.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deps/geohash-int/Makefile b/deps/geohash-int/Makefile index bf9eaebb8..b7c259577 100644 --- a/deps/geohash-int/Makefile +++ b/deps/geohash-int/Makefile @@ -1,6 +1,6 @@ STD= WARN= -Wall -OPT= -Ofast +OPT= -O2 R_CFLAGS= $(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) R_LDFLAGS= $(LDFLAGS) diff --git a/src/geo.c b/src/geo.c index 90c59c807..35931f4f5 100644 --- a/src/geo.c +++ b/src/geo.c @@ -580,7 +580,7 @@ void georadiusByMemberCommand(redisClient *c) { georadiusGeneric(c, RADIUS_MEMBER); } -/* GEODECODE long lat */ +/* GEODECODE score */ void geodecodeCommand(redisClient *c) { GeoHashBits geohash; if (getLongLongFromObjectOrReply(c, c->argv[1], (long long *)&geohash.bits, From 965abcf10a2e0453ddc88dd96782418ff1d587f9 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 9 Jul 2015 11:27:53 +0200 Subject: [PATCH 0296/1928] Geo: use ULL suffix for unsigned 64 bit constants. --- deps/geohash-int/geohash.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/deps/geohash-int/geohash.c b/deps/geohash-int/geohash.c index 5a0f3263d..d3bc7de25 100644 --- a/deps/geohash-int/geohash.c +++ b/deps/geohash-int/geohash.c @@ -50,9 +50,9 @@ * From: https://graphics.stanford.edu/~seander/bithacks.html#InterleaveBMN */ static inline uint64_t interleave64(uint32_t xlo, uint32_t ylo) { - static const uint64_t B[] = {0x5555555555555555, 0x3333333333333333, - 0x0F0F0F0F0F0F0F0F, 0x00FF00FF00FF00FF, - 0x0000FFFF0000FFFF}; + static const uint64_t B[] = {0x5555555555555555ULL, 0x3333333333333333ULL, + 0x0F0F0F0F0F0F0F0FULL, 0x00FF00FF00FF00FFULL, + 0x0000FFFF0000FFFFULL}; static const unsigned int S[] = {1, 2, 4, 8, 16}; uint64_t x = xlo; @@ -80,9 +80,9 @@ static inline uint64_t interleave64(uint32_t xlo, uint32_t ylo) { * derived from http://stackoverflow.com/questions/4909263 */ static inline uint64_t deinterleave64(uint64_t interleaved) { - static const uint64_t B[] = {0x5555555555555555, 0x3333333333333333, - 0x0F0F0F0F0F0F0F0F, 0x00FF00FF00FF00FF, - 0x0000FFFF0000FFFF, 0x00000000FFFFFFFF}; + static const uint64_t B[] = {0x5555555555555555ULL, 0x3333333333333333ULL, + 0x0F0F0F0F0F0F0F0FULL, 0x00FF00FF00FF00FFULL, + 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL}; static const unsigned int S[] = {0, 1, 2, 4, 8, 16}; uint64_t x = interleaved; @@ -225,10 +225,10 @@ static void geohash_move_x(GeoHashBits *hash, int8_t d) { if (d == 0) return; - uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaLL; - uint64_t y = hash->bits & 0x5555555555555555LL; + uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaULL; + uint64_t y = hash->bits & 0x5555555555555555ULL; - uint64_t zz = 0x5555555555555555LL >> (64 - hash->step * 2); + uint64_t zz = 0x5555555555555555ULL >> (64 - hash->step * 2); if (d > 0) { x = x + (zz + 1); @@ -237,7 +237,7 @@ static void geohash_move_x(GeoHashBits *hash, int8_t d) { x = x - (zz + 1); } - x &= (0xaaaaaaaaaaaaaaaaLL >> (64 - hash->step * 2)); + x &= (0xaaaaaaaaaaaaaaaaULL >> (64 - hash->step * 2)); hash->bits = (x | y); } @@ -245,17 +245,17 @@ static void geohash_move_y(GeoHashBits *hash, int8_t d) { if (d == 0) return; - uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaLL; - uint64_t y = hash->bits & 0x5555555555555555LL; + uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaULL; + uint64_t y = hash->bits & 0x5555555555555555ULL; - uint64_t zz = 0xaaaaaaaaaaaaaaaaLL >> (64 - hash->step * 2); + uint64_t zz = 0xaaaaaaaaaaaaaaaaULL >> (64 - hash->step * 2); if (d > 0) { y = y + (zz + 1); } else { y = y | zz; y = y - (zz + 1); } - y &= (0x5555555555555555LL >> (64 - hash->step * 2)); + y &= (0x5555555555555555ULL >> (64 - hash->step * 2)); hash->bits = (x | y); } From b96af595a5fddbbdcbf78ed3c51acd60976416f4 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 9 Jul 2015 17:42:59 +0200 Subject: [PATCH 0297/1928] GEOENCODE / GEODECODE commands removed. Rationale: 1. The commands look like internals exposed without a real strong use case. 2. Whatever there is an use case, the client would implement the commands client side instead of paying RTT just to use a simple to reimplement library. 3. They add complexity to an otherwise quite straightforward API. So for now KILLED ;-) --- src/geo.c | 93 ---------------------------------------------- src/redis.c | 2 - tests/unit/geo.tcl | 13 ------- 3 files changed, 108 deletions(-) diff --git a/src/geo.c b/src/geo.c index 35931f4f5..63500bed7 100644 --- a/src/geo.c +++ b/src/geo.c @@ -42,8 +42,6 @@ int zslValueLteMax(double value, zrangespec *spec); * - geoadd - add coordinates for value to geoset * - georadius - search radius by coordinates in geoset * - georadiusbymember - search radius based on geoset member position - * - geoencode - encode coordinates to a geohash integer - * - geodecode - decode geohash integer to representative coordinates * ==================================================================== */ /* ==================================================================== @@ -580,97 +578,6 @@ void georadiusByMemberCommand(redisClient *c) { georadiusGeneric(c, RADIUS_MEMBER); } -/* GEODECODE score */ -void geodecodeCommand(redisClient *c) { - GeoHashBits geohash; - if (getLongLongFromObjectOrReply(c, c->argv[1], (long long *)&geohash.bits, - NULL) != REDIS_OK) - return; - - GeoHashArea area; - geohash.step = GEO_STEP_MAX; - geohashDecodeWGS84(geohash, &area); - - double lon = (area.longitude.min + area.longitude.max) / 2; - double lat = (area.latitude.min + area.latitude.max) / 2; - - /* Returning three nested replies */ - addReplyMultiBulkLen(c, 3); - - /* First, the minimum corner */ - addReplyMultiBulkLen(c, 2); - addReplyDouble(c, area.longitude.min); - addReplyDouble(c, area.latitude.min); - - /* Next, the maximum corner */ - addReplyMultiBulkLen(c, 2); - addReplyDouble(c, area.longitude.max); - addReplyDouble(c, area.latitude.max); - - /* Last, the averaged center of this bounding box */ - addReplyMultiBulkLen(c, 2); - addReplyDouble(c, lon); - addReplyDouble(c, lat); -} - -/* GEOENCODE long lat [radius unit] */ -void geoencodeCommand(redisClient *c) { - double radius_meters = 0; - if (c->argc == 5) { - if ((radius_meters = extractDistanceOrReply(c, c->argv + 3, NULL)) < 0) - return; - } else if (c->argc == 4 || c->argc > 5) { - addReplyError(c, "syntax error, try: GEOENCODE x y [radius unit]"); - return; - } - - double xy[2]; - if (extractLongLatOrReply(c, c->argv + 1, xy) == REDIS_ERR) return; - - /* Encode long/lat into our geohash */ - GeoHashBits geohash; - uint8_t step = geohashEstimateStepsByRadius(radius_meters,0); - geohashEncodeWGS84(xy[0], xy[1], step, &geohash); - - /* Align the hash to a valid 52-bit integer based on step size */ - GeoHashFix52Bits bits = geohashAlign52Bits(geohash); - - /* Decode the hash so we can return its bounding box */ - GeoHashArea area; - geohashDecodeWGS84(geohash, &area); - - double lon = (area.longitude.min + area.longitude.max) / 2; - double lat = (area.latitude.min + area.latitude.max) / 2; - - /* Return four nested multibulk replies. */ - addReplyMultiBulkLen(c, 5); - - /* Return the binary geohash we calculated as 52-bit integer */ - addReplyLongLong(c, bits); - - /* Return the minimum corner */ - addReplyMultiBulkLen(c, 2); - addReplyDouble(c, area.longitude.min); - addReplyDouble(c, area.latitude.min); - - /* Return the maximum corner */ - addReplyMultiBulkLen(c, 2); - addReplyDouble(c, area.longitude.max); - addReplyDouble(c, area.latitude.max); - - /* Return the averaged center */ - addReplyMultiBulkLen(c, 2); - addReplyDouble(c, lon); - addReplyDouble(c, lat); - - /* Return the two scores to query to get the range from the sorted set. */ - GeoHashFix52Bits min, max; - scoresOfGeoHashBox(geohash,&min,&max); - addReplyMultiBulkLen(c, 2); - addReplyDouble(c, min); - addReplyDouble(c, max); -} - /* GEOHASH key ele1 ele2 ... eleN * * Returns an array with an 11 characters geohash representation of the diff --git a/src/redis.c b/src/redis.c index cb5c73771..020264924 100644 --- a/src/redis.c +++ b/src/redis.c @@ -285,8 +285,6 @@ struct redisCommand redisCommandTable[] = { {"geoadd",geoaddCommand,-5,"wm",0,NULL,1,1,1,0,0}, {"georadius",georadiusCommand,-6,"r",0,NULL,1,1,1,0,0}, {"georadiusbymember",georadiusByMemberCommand,-5,"r",0,NULL,1,1,1,0,0}, - {"geoencode",geoencodeCommand,-3,"r",0,NULL,0,0,0,0,0}, - {"geodecode",geodecodeCommand,2,"r",0,NULL,0,0,0,0,0}, {"geohash",geohashCommand,-2,"r",0,NULL,0,0,0,0,0}, {"geopos",geoposCommand,-2,"r",0,NULL,0,0,0,0,0}, {"geodist",geodistCommand,-4,"r",0,NULL,0,0,0,0,0}, diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl index cf6d8c614..1c6b90b63 100644 --- a/tests/unit/geo.tcl +++ b/tests/unit/geo.tcl @@ -72,19 +72,6 @@ start_server {tags {"geo"}} { r georadiusbymember nyc "wtc one" 7 km withdist } {{{wtc one} 0.0000} {{union square} 3.2544} {{central park n/q/r} 6.7000} {4545 6.1975} {{lic market} 6.8969}} - test {GEOENCODE simple} { - r geoencode 1.8063239 41.2358883 - } {3471579339700058 {1.8063229322433472 41.235888125243704}\ - {1.806328296661377 41.235890659964866}\ - {1.8063256144523621 41.235889392604285}\ - {3471579339700058 3471579339700059}} - - test {GEODECODE simple} { - r geodecode 3471579339700058 - } {{1.8063229322433472 41.235888125243704}\ - {1.806328296661377 41.235890659964866}\ - {1.8063256144523621 41.235889392604285}} - test {GEOHASH is able to return geohash strings} { # Example from Wikipedia. r del points From 5c4fcaf3fe448c5575a9911edbcd421c6dbebb54 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 13 Jul 2015 15:30:11 +0200 Subject: [PATCH 0298/1928] Geo: fix command table keys position indexes for three commands. GEOHASH, GEOPOS and GEODIST where declared as commands not accepting keys, so the Redis Cluster redirection did not worked. Close #2671. --- src/redis.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/redis.c b/src/redis.c index 020264924..1bcd8375e 100644 --- a/src/redis.c +++ b/src/redis.c @@ -285,9 +285,9 @@ struct redisCommand redisCommandTable[] = { {"geoadd",geoaddCommand,-5,"wm",0,NULL,1,1,1,0,0}, {"georadius",georadiusCommand,-6,"r",0,NULL,1,1,1,0,0}, {"georadiusbymember",georadiusByMemberCommand,-5,"r",0,NULL,1,1,1,0,0}, - {"geohash",geohashCommand,-2,"r",0,NULL,0,0,0,0,0}, - {"geopos",geoposCommand,-2,"r",0,NULL,0,0,0,0,0}, - {"geodist",geodistCommand,-4,"r",0,NULL,0,0,0,0,0}, + {"geohash",geohashCommand,-2,"r",0,NULL,1,1,1,0,0}, + {"geopos",geoposCommand,-2,"r",0,NULL,1,1,1,0,0}, + {"geodist",geodistCommand,-4,"r",0,NULL,1,1,1,0,0}, {"pfselftest",pfselftestCommand,1,"r",0,NULL,0,0,0,0,0}, {"pfadd",pfaddCommand,-2,"wmF",0,NULL,1,1,1,0,0}, {"pfcount",pfcountCommand,-2,"r",0,NULL,1,1,1,0,0}, From 4c7ee0d5848ab12b9d2b18bca62cffcbfac0e885 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 13 Jul 2015 18:06:24 +0200 Subject: [PATCH 0299/1928] EXISTS is now variadic. The new return value is the number of keys existing, among the ones specified in the command line, counting the same key multiple times if given multiple times (and if it exists). See PR #2667. --- src/db.c | 14 +++++++++----- src/redis.c | 2 +- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/db.c b/src/db.c index 36650318a..1493f0a20 100644 --- a/src/db.c +++ b/src/db.c @@ -318,13 +318,17 @@ void delCommand(redisClient *c) { addReplyLongLong(c,deleted); } +/* EXISTS key1 key2 ... key_N. + * Return value is the number of keys existing. */ void existsCommand(redisClient *c) { - expireIfNeeded(c->db,c->argv[1]); - if (dbExists(c->db,c->argv[1])) { - addReply(c, shared.cone); - } else { - addReply(c, shared.czero); + long long count = 0; + int j; + + for (j = 1; j < c->argc; j++) { + expireIfNeeded(c->db,c->argv[j]); + if (dbExists(c->db,c->argv[j])) count++; } + addReplyLongLong(c,count); } void selectCommand(redisClient *c) { diff --git a/src/redis.c b/src/redis.c index 1bcd8375e..656c21ff2 100644 --- a/src/redis.c +++ b/src/redis.c @@ -132,7 +132,7 @@ struct redisCommand redisCommandTable[] = { {"append",appendCommand,3,"wm",0,NULL,1,1,1,0,0}, {"strlen",strlenCommand,2,"rF",0,NULL,1,1,1,0,0}, {"del",delCommand,-2,"w",0,NULL,1,-1,1,0,0}, - {"exists",existsCommand,2,"rF",0,NULL,1,1,1,0,0}, + {"exists",existsCommand,-2,"rF",0,NULL,1,-1,1,0,0}, {"setbit",setbitCommand,4,"wm",0,NULL,1,1,1,0,0}, {"getbit",getbitCommand,3,"rF",0,NULL,1,1,1,0,0}, {"setrange",setrangeCommand,4,"wm",0,NULL,1,1,1,0,0}, From 0f64080dcb9f44c923379f909aae82f6c2b2ed19 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 14 Jul 2015 17:15:37 +0200 Subject: [PATCH 0300/1928] DEBUG HTSTATS added. The command reports information about the hash table internal state representing the specified database ID. This can be used in order to investigate rehashings, memory usage issues and for other debugging purposes. --- src/debug.c | 21 ++++++++ src/dict.c | 130 ++++++++++++++++-------------------------------- src/dict.h | 2 +- src/redis-cli.c | 6 +-- 4 files changed, 67 insertions(+), 92 deletions(-) diff --git a/src/debug.c b/src/debug.c index b8dcf648e..2acba1495 100644 --- a/src/debug.c +++ b/src/debug.c @@ -425,6 +425,27 @@ void debugCommand(redisClient *c) { sizes = sdscatprintf(sizes,"dictentry:%d ", (int)sizeof(dictEntry)); sizes = sdscatprintf(sizes,"sdshdr:%d", (int)sizeof(struct sdshdr)); addReplyBulkSds(c,sizes); + } else if (!strcasecmp(c->argv[1]->ptr,"htstats") && c->argc == 3) { + long dbid; + sds stats = sdsempty(); + char buf[4096]; + + if (getLongFromObjectOrReply(c, c->argv[2], &dbid, NULL) != REDIS_OK) + return; + if (dbid < 0 || dbid >= server.dbnum) { + addReplyError(c,"Out of range database"); + return; + } + + stats = sdscatprintf(stats,"[Dictionary HT]\n"); + dictGetStats(buf,sizeof(buf),server.db[dbid].dict); + stats = sdscat(stats,buf); + + stats = sdscatprintf(stats,"[Expires HT]\n"); + dictGetStats(buf,sizeof(buf),server.db[dbid].expires); + stats = sdscat(stats,buf); + + addReplyBulkSds(c,stats); } else if (!strcasecmp(c->argv[1]->ptr,"jemalloc") && c->argc == 3) { #if defined(USE_JEMALLOC) if (!strcasecmp(c->argv[2]->ptr, "info")) { diff --git a/src/dict.c b/src/dict.c index f728d381e..068262757 100644 --- a/src/dict.c +++ b/src/dict.c @@ -1002,24 +1002,21 @@ void dictDisableResize(void) { dict_can_resize = 0; } -#if 0 - -/* The following is code that we don't use for Redis currently, but that is part -of the library. */ - -/* ----------------------- Debugging ------------------------*/ +/* ------------------------------- Debugging ---------------------------------*/ #define DICT_STATS_VECTLEN 50 -static void _dictPrintStatsHt(dictht *ht) { +size_t _dictGetStatsHt(char *buf, size_t bufsize, dictht *ht, int tableid) { unsigned long i, slots = 0, chainlen, maxchainlen = 0; unsigned long totchainlen = 0; unsigned long clvector[DICT_STATS_VECTLEN]; + size_t l = 0; if (ht->used == 0) { - printf("No stats available for empty dictionaries\n"); - return; + return snprintf(buf,bufsize, + "No stats available for empty dictionaries\n"); } + /* Compute stats. */ for (i = 0; i < DICT_STATS_VECTLEN; i++) clvector[i] = 0; for (i = 0; i < ht->size; i++) { dictEntry *he; @@ -1040,89 +1037,46 @@ static void _dictPrintStatsHt(dictht *ht) { if (chainlen > maxchainlen) maxchainlen = chainlen; totchainlen += chainlen; } - printf("Hash table stats:\n"); - printf(" table size: %ld\n", ht->size); - printf(" number of elements: %ld\n", ht->used); - printf(" different slots: %ld\n", slots); - printf(" max chain length: %ld\n", maxchainlen); - printf(" avg chain length (counted): %.02f\n", (float)totchainlen/slots); - printf(" avg chain length (computed): %.02f\n", (float)ht->used/slots); - printf(" Chain length distribution:\n"); + + /* Generate human readable stats. */ + l += snprintf(buf+l,bufsize-l, + "Hash table %d stats (%s):\n" + " table size: %ld\n" + " number of elements: %ld\n" + " different slots: %ld\n" + " max chain length: %ld\n" + " avg chain length (counted): %.02f\n" + " avg chain length (computed): %.02f\n" + " Chain length distribution:\n", + tableid, (tableid == 0) ? "main hash table" : "rehashing target", + ht->size, ht->used, slots, maxchainlen, + (float)totchainlen/slots, (float)ht->used/slots); + for (i = 0; i < DICT_STATS_VECTLEN-1; i++) { if (clvector[i] == 0) continue; - printf(" %s%ld: %ld (%.02f%%)\n",(i == DICT_STATS_VECTLEN-1)?">= ":"", i, clvector[i], ((float)clvector[i]/ht->size)*100); + if (l >= bufsize) break; + l += snprintf(buf+l,bufsize-l, + " %s%ld: %ld (%.02f%%)\n", + (i == DICT_STATS_VECTLEN-1)?">= ":"", + i, clvector[i], ((float)clvector[i]/ht->size)*100); } + + /* Unlike snprintf(), teturn the number of characters actually written. */ + if (bufsize) buf[bufsize-1] = '\0'; + return strlen(buf); } -void dictPrintStats(dict *d) { - _dictPrintStatsHt(&d->ht[0]); - if (dictIsRehashing(d)) { - printf("-- Rehashing into ht[1]:\n"); - _dictPrintStatsHt(&d->ht[1]); +void dictGetStats(char *buf, size_t bufsize, dict *d) { + size_t l; + char *orig_buf = buf; + size_t orig_bufsize = bufsize; + + l = _dictGetStatsHt(buf,bufsize,&d->ht[0],0); + buf += l; + bufsize -= l; + if (dictIsRehashing(d) && bufsize > 0) { + _dictGetStatsHt(buf,bufsize,&d->ht[1],1); } + /* Make sure there is a NULL term at the end. */ + if (orig_bufsize) orig_buf[orig_bufsize-1] = '\0'; } - -/* ----------------------- StringCopy Hash Table Type ------------------------*/ - -static unsigned int _dictStringCopyHTHashFunction(const void *key) -{ - return dictGenHashFunction(key, strlen(key)); -} - -static void *_dictStringDup(void *privdata, const void *key) -{ - int len = strlen(key); - char *copy = zmalloc(len+1); - DICT_NOTUSED(privdata); - - memcpy(copy, key, len); - copy[len] = '\0'; - return copy; -} - -static int _dictStringCopyHTKeyCompare(void *privdata, const void *key1, - const void *key2) -{ - DICT_NOTUSED(privdata); - - return strcmp(key1, key2) == 0; -} - -static void _dictStringDestructor(void *privdata, void *key) -{ - DICT_NOTUSED(privdata); - - zfree(key); -} - -dictType dictTypeHeapStringCopyKey = { - _dictStringCopyHTHashFunction, /* hash function */ - _dictStringDup, /* key dup */ - NULL, /* val dup */ - _dictStringCopyHTKeyCompare, /* key compare */ - _dictStringDestructor, /* key destructor */ - NULL /* val destructor */ -}; - -/* This is like StringCopy but does not auto-duplicate the key. - * It's used for intepreter's shared strings. */ -dictType dictTypeHeapStrings = { - _dictStringCopyHTHashFunction, /* hash function */ - NULL, /* key dup */ - NULL, /* val dup */ - _dictStringCopyHTKeyCompare, /* key compare */ - _dictStringDestructor, /* key destructor */ - NULL /* val destructor */ -}; - -/* This is like StringCopy but also automatically handle dynamic - * allocated C strings as values. */ -dictType dictTypeHeapStringCopyKeyValue = { - _dictStringCopyHTHashFunction, /* hash function */ - _dictStringDup, /* key dup */ - _dictStringDup, /* val dup */ - _dictStringCopyHTKeyCompare, /* key compare */ - _dictStringDestructor, /* key destructor */ - _dictStringDestructor, /* val destructor */ -}; -#endif diff --git a/src/dict.h b/src/dict.h index 014d18212..e31daee2a 100644 --- a/src/dict.h +++ b/src/dict.h @@ -165,7 +165,7 @@ dictEntry *dictNext(dictIterator *iter); void dictReleaseIterator(dictIterator *iter); dictEntry *dictGetRandomKey(dict *d); unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count); -void dictPrintStats(dict *d); +void dictGetStats(char *buf, size_t bufsize, dict *d); unsigned int dictGenHashFunction(const void *key, int len); unsigned int dictGenCaseHashFunction(const unsigned char *buf, int len); void dictEmpty(dict *d, void(callback)(void*)); diff --git a/src/redis-cli.c b/src/redis-cli.c index 251e42fad..acf7c98b9 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -644,9 +644,9 @@ static int cliSendCommand(int argc, char **argv, int repeat) { output_raw = 0; if (!strcasecmp(command,"info") || - (argc == 3 && !strcasecmp(command,"debug") && - (!strcasecmp(argv[1],"jemalloc") && - !strcasecmp(argv[2],"info"))) || + (argc >= 2 && !strcasecmp(command,"debug") && + (!strcasecmp(argv[1],"jemalloc") || + !strcasecmp(argv[1],"htstats"))) || (argc == 2 && !strcasecmp(command,"cluster") && (!strcasecmp(argv[1],"nodes") || !strcasecmp(argv[1],"info"))) || From f15df8ba5db09bdf4be58c53930799d82120cc34 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Thu, 9 Apr 2015 10:37:01 +0300 Subject: [PATCH 0301/1928] sds size classes - memory optimization --- src/Makefile | 2 +- src/debug.c | 5 +- src/networking.c | 33 +++--- src/object.c | 9 +- src/redis-cli.c | 4 +- src/scripting.c | 12 +- src/sds.c | 287 ++++++++++++++++++++++++++++++++--------------- src/sds.h | 149 ++++++++++++++++++++++-- 8 files changed, 363 insertions(+), 138 deletions(-) diff --git a/src/Makefile b/src/Makefile index 650d438f7..106fef340 100644 --- a/src/Makefile +++ b/src/Makefile @@ -120,7 +120,7 @@ REDIS_SENTINEL_NAME=redis-sentinel REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o redis.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o geo.o REDIS_GEOHASH_OBJ=../deps/geohash-int/geohash.o ../deps/geohash-int/geohash_helper.o REDIS_CLI_NAME=redis-cli -REDIS_CLI_OBJ=anet.o sds.o adlist.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o +REDIS_CLI_OBJ=anet.o adlist.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o REDIS_BENCHMARK_NAME=redis-benchmark REDIS_BENCHMARK_OBJ=ae.o anet.o redis-benchmark.o sds.o adlist.o zmalloc.o redis-benchmark.o REDIS_CHECK_RDB_NAME=redis-check-rdb diff --git a/src/debug.c b/src/debug.c index 2acba1495..e071fa0b6 100644 --- a/src/debug.c +++ b/src/debug.c @@ -423,7 +423,10 @@ void debugCommand(redisClient *c) { sizes = sdscatprintf(sizes,"bits:%d ", (sizeof(void*) == 8)?64:32); sizes = sdscatprintf(sizes,"robj:%d ", (int)sizeof(robj)); sizes = sdscatprintf(sizes,"dictentry:%d ", (int)sizeof(dictEntry)); - sizes = sdscatprintf(sizes,"sdshdr:%d", (int)sizeof(struct sdshdr)); + sizes = sdscatprintf(sizes,"sdshdr8:%d", (int)sizeof(struct sdshdr8)); + sizes = sdscatprintf(sizes,"sdshdr16:%d", (int)sizeof(struct sdshdr16)); + sizes = sdscatprintf(sizes,"sdshdr32:%d", (int)sizeof(struct sdshdr32)); + sizes = sdscatprintf(sizes,"sdshdr64:%d", (int)sizeof(struct sdshdr64)); addReplyBulkSds(c,sizes); } else if (!strcasecmp(c->argv[1]->ptr,"htstats") && c->argc == 3) { long dbid; diff --git a/src/networking.c b/src/networking.c index eb033ae61..12f8d2e9f 100644 --- a/src/networking.c +++ b/src/networking.c @@ -33,21 +33,14 @@ static void setProtocolError(redisClient *c, int pos); -/* To evaluate the output buffer size of a client we need to get size of - * allocated objects, however we can't used zmalloc_size() directly on sds - * strings because of the trick they use to work (the header is before the - * returned pointer), so we use this helper function. */ -size_t zmalloc_size_sds(sds s) { - return zmalloc_size(s-sizeof(struct sdshdr)); -} /* Return the amount of memory used by the sds string at object->ptr * for a string object. */ size_t getStringObjectSdsUsedMemory(robj *o) { redisAssertWithInfo(NULL,o,o->type == REDIS_STRING); switch(o->encoding) { - case REDIS_ENCODING_RAW: return zmalloc_size_sds(o->ptr); - case REDIS_ENCODING_EMBSTR: return sdslen(o->ptr); + case REDIS_ENCODING_RAW: return sdsZmallocSize(o->ptr); + case REDIS_ENCODING_EMBSTR: return zmalloc_size(o)-sizeof(robj); default: return 0; /* Just integer encoding for now. */ } } @@ -235,10 +228,10 @@ void _addReplyObjectToList(redisClient *c, robj *o) { tail->encoding == REDIS_ENCODING_RAW && sdslen(tail->ptr)+sdslen(o->ptr) <= REDIS_REPLY_CHUNK_BYTES) { - c->reply_bytes -= zmalloc_size_sds(tail->ptr); + c->reply_bytes -= sdsZmallocSize(tail->ptr); tail = dupLastObjectIfNeeded(c->reply); tail->ptr = sdscatlen(tail->ptr,o->ptr,sdslen(o->ptr)); - c->reply_bytes += zmalloc_size_sds(tail->ptr); + c->reply_bytes += sdsZmallocSize(tail->ptr); } else { incrRefCount(o); listAddNodeTail(c->reply,o); @@ -260,7 +253,7 @@ void _addReplySdsToList(redisClient *c, sds s) { if (listLength(c->reply) == 0) { listAddNodeTail(c->reply,createObject(REDIS_STRING,s)); - c->reply_bytes += zmalloc_size_sds(s); + c->reply_bytes += sdsZmallocSize(s); } else { tail = listNodeValue(listLast(c->reply)); @@ -268,14 +261,14 @@ void _addReplySdsToList(redisClient *c, sds s) { if (tail->ptr != NULL && tail->encoding == REDIS_ENCODING_RAW && sdslen(tail->ptr)+sdslen(s) <= REDIS_REPLY_CHUNK_BYTES) { - c->reply_bytes -= zmalloc_size_sds(tail->ptr); + c->reply_bytes -= sdsZmallocSize(tail->ptr); tail = dupLastObjectIfNeeded(c->reply); tail->ptr = sdscatlen(tail->ptr,s,sdslen(s)); - c->reply_bytes += zmalloc_size_sds(tail->ptr); + c->reply_bytes += sdsZmallocSize(tail->ptr); sdsfree(s); } else { listAddNodeTail(c->reply,createObject(REDIS_STRING,s)); - c->reply_bytes += zmalloc_size_sds(s); + c->reply_bytes += sdsZmallocSize(s); } } asyncCloseClientOnOutputBufferLimitReached(c); @@ -298,10 +291,10 @@ void _addReplyStringToList(redisClient *c, const char *s, size_t len) { if (tail->ptr != NULL && tail->encoding == REDIS_ENCODING_RAW && sdslen(tail->ptr)+len <= REDIS_REPLY_CHUNK_BYTES) { - c->reply_bytes -= zmalloc_size_sds(tail->ptr); + c->reply_bytes -= sdsZmallocSize(tail->ptr); tail = dupLastObjectIfNeeded(c->reply); tail->ptr = sdscatlen(tail->ptr,s,len); - c->reply_bytes += zmalloc_size_sds(tail->ptr); + c->reply_bytes += sdsZmallocSize(tail->ptr); } else { robj *o = createStringObject(s,len); @@ -440,16 +433,16 @@ void setDeferredMultiBulkLength(redisClient *c, void *node, long length) { len = listNodeValue(ln); len->ptr = sdscatprintf(sdsempty(),"*%ld\r\n",length); len->encoding = REDIS_ENCODING_RAW; /* in case it was an EMBSTR. */ - c->reply_bytes += zmalloc_size_sds(len->ptr); + c->reply_bytes += sdsZmallocSize(len->ptr); if (ln->next != NULL) { next = listNodeValue(ln->next); /* Only glue when the next node is non-NULL (an sds in this case) */ if (next->ptr != NULL) { - c->reply_bytes -= zmalloc_size_sds(len->ptr); + c->reply_bytes -= sdsZmallocSize(len->ptr); c->reply_bytes -= getStringObjectSdsUsedMemory(next); len->ptr = sdscatlen(len->ptr,next->ptr,sdslen(next->ptr)); - c->reply_bytes += zmalloc_size_sds(len->ptr); + c->reply_bytes += sdsZmallocSize(len->ptr); listDelNode(c->reply,ln->next); } } diff --git a/src/object.c b/src/object.c index dcd896917..881b1ac4b 100644 --- a/src/object.c +++ b/src/object.c @@ -58,8 +58,8 @@ robj *createRawStringObject(const char *ptr, size_t len) { * an object where the sds string is actually an unmodifiable string * allocated in the same chunk as the object itself. */ robj *createEmbeddedStringObject(const char *ptr, size_t len) { - robj *o = zmalloc(sizeof(robj)+sizeof(struct sdshdr)+len+1); - struct sdshdr *sh = (void*)(o+1); + robj *o = zmalloc(sizeof(robj)+sizeof(struct sdshdr8)+len+1); + struct sdshdr8 *sh = (void*)(o+1); o->type = REDIS_STRING; o->encoding = REDIS_ENCODING_EMBSTR; @@ -68,7 +68,8 @@ robj *createEmbeddedStringObject(const char *ptr, size_t len) { o->lru = LRU_CLOCK(); sh->len = len; - sh->free = 0; + sh->alloc = len; + sh->flags = SDS_TYPE_8; if (ptr) { memcpy(sh->buf,ptr,len); sh->buf[len] = '\0'; @@ -84,7 +85,7 @@ robj *createEmbeddedStringObject(const char *ptr, size_t len) { * * The current limit of 39 is chosen so that the biggest string object * we allocate as EMBSTR will still fit into the 64 byte arena of jemalloc. */ -#define REDIS_ENCODING_EMBSTR_SIZE_LIMIT 39 +#define REDIS_ENCODING_EMBSTR_SIZE_LIMIT 44 robj *createStringObject(const char *ptr, size_t len) { if (len <= REDIS_ENCODING_EMBSTR_SIZE_LIMIT) return createEmbeddedStringObject(ptr,len); diff --git a/src/redis-cli.c b/src/redis-cli.c index acf7c98b9..64bf210bf 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -46,8 +46,8 @@ #include #include -#include "hiredis.h" -#include "sds.h" +#include +#include /* use sds.h from hiredis, so that only one set of sds functions will be present in the binary */ #include "zmalloc.h" #include "linenoise.h" #include "help.h" diff --git a/src/scripting.c b/src/scripting.c index 53c0c9ed2..e8da69f74 100644 --- a/src/scripting.c +++ b/src/scripting.c @@ -265,14 +265,11 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) { if (j < LUA_CMD_OBJCACHE_SIZE && cached_objects[j] && cached_objects_len[j] >= obj_len) { - char *s = cached_objects[j]->ptr; - struct sdshdr *sh = (void*)(s-(sizeof(struct sdshdr))); - + sds s = cached_objects[j]->ptr; argv[j] = cached_objects[j]; cached_objects[j] = NULL; memcpy(s,obj_s,obj_len+1); - sh->free += sh->len - obj_len; - sh->len = obj_len; + sdssetlen(s, obj_len); } else { argv[j] = createStringObject(obj_s, obj_len); } @@ -422,11 +419,10 @@ cleanup: o->encoding == REDIS_ENCODING_EMBSTR) && sdslen(o->ptr) <= LUA_CMD_OBJCACHE_MAX_LEN) { - struct sdshdr *sh = (void*)(((char*)(o->ptr))-(sizeof(struct sdshdr))); - + sds s = o->ptr; if (cached_objects[j]) decrRefCount(cached_objects[j]); cached_objects[j] = o; - cached_objects_len[j] = sh->free + sh->len; + cached_objects_len[j] = sdsalloc(s); } else { decrRefCount(o); } diff --git a/src/sds.c b/src/sds.c index 2ebe286d1..dbf6c64ad 100644 --- a/src/sds.c +++ b/src/sds.c @@ -36,6 +36,30 @@ #include "sds.h" #include "zmalloc.h" +static inline int sdsHdrSize(char type) { + switch(type&SDS_TYPE_MASK) { + case SDS_TYPE_8: + return sizeof(struct sdshdr8); + case SDS_TYPE_16: + return sizeof(struct sdshdr16); + case SDS_TYPE_32: + return sizeof(struct sdshdr32); + case SDS_TYPE_64: + return sizeof(struct sdshdr64); + } + return 0; +} + +static inline char sdsReqType(size_t string_size) { + if (string_size<0xff) + return SDS_TYPE_8; + if (string_size<0xffff) + return SDS_TYPE_16; + if (string_size<0xffffffff) + return SDS_TYPE_32; + return SDS_TYPE_64; +} + /* Create a new sds string with the content specified by the 'init' pointer * and 'initlen'. * If NULL is used for 'init' the string is initialized with zero bytes. @@ -49,20 +73,65 @@ * end of the string. However the string is binary safe and can contain * \0 characters in the middle, as the length is stored in the sds header. */ sds sdsnewlen(const void *init, size_t initlen) { - struct sdshdr *sh; - - if (init) { - sh = zmalloc(sizeof(struct sdshdr)+initlen+1); - } else { - sh = zcalloc(sizeof(struct sdshdr)+initlen+1); - } + void *sh; + sds s; + char type = sdsReqType(initlen); + int hdrlen = sdsHdrSize(type); + + sh = zmalloc(hdrlen+initlen+1); + if (!init) + memset(sh, 0, hdrlen+initlen+1); if (sh == NULL) return NULL; - sh->len = initlen; - sh->free = 0; + s = (char*)sh+hdrlen; + switch(type) { + case SDS_TYPE_8: { + SDS_HDR_VAR(8,s); + sh->len = initlen; + sh->alloc = initlen; + break; + } + case SDS_TYPE_16: { + SDS_HDR_VAR(16,s); + sh->len = initlen; + sh->alloc = initlen; + break; + } + case SDS_TYPE_32: { + SDS_HDR_VAR(32,s); + sh->len = initlen; + sh->alloc = initlen; + break; + } + case SDS_TYPE_64: { + SDS_HDR_VAR(64,s); + sh->len = initlen; + sh->alloc = initlen; + break; + } + } + s[-1] = type; if (initlen && init) - memcpy(sh->buf, init, initlen); - sh->buf[initlen] = '\0'; - return (char*)sh->buf; + memcpy(s, init, initlen); + s[initlen] = '\0'; + return s; +} + +void sdsIncRefcount(sds s) { + unsigned char flags = s[-1]; + unsigned refs = flags>>SDS_TYPE_BITS; + assert(++refs); + s[-1] = (refs<>SDS_TYPE_BITS; + assert(refs); + if (!(--refs)) + zfree(sh); + else + s[-1] = (refs<free += (sh->len-reallen); - sh->len = reallen; + sdssetlen(s, reallen); } /* Modify an sds string in-place to make it empty (zero length). @@ -114,10 +181,8 @@ void sdsupdatelen(sds s) { * so that next append operations will not require allocations up to the * number of bytes previously available. */ void sdsclear(sds s) { - struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); - sh->free += sh->len; - sh->len = 0; - sh->buf[0] = '\0'; + sdssetlen(s, 0); + s[0] = '\0'; } /* Enlarge the free space at the end of the sds string so that the caller @@ -127,23 +192,41 @@ void sdsclear(sds s) { * Note: this does not change the *length* of the sds string as returned * by sdslen(), but only the free buffer space we have. */ sds sdsMakeRoomFor(sds s, size_t addlen) { - struct sdshdr *sh, *newsh; - size_t free = sdsavail(s); + void *sh, *newsh; + size_t avail = sdsavail(s); size_t len, newlen; + char type, oldtype = s[-1]; + int hdrlen; - if (free >= addlen) return s; + if (avail >= addlen) return s; len = sdslen(s); - sh = (void*) (s-(sizeof(struct sdshdr))); + sh = (char*)s-sdsHdrSize(oldtype); newlen = (len+addlen); if (newlen < SDS_MAX_PREALLOC) newlen *= 2; else newlen += SDS_MAX_PREALLOC; - newsh = zrealloc(sh, sizeof(struct sdshdr)+newlen+1); - if (newsh == NULL) return NULL; - newsh->free = newlen - len; - return newsh->buf; + assert(!(s[-1]>>SDS_TYPE_BITS));/* verify that the ref count is 0 (non ref count managed string) */ + type = sdsReqType(newlen); + hdrlen = sdsHdrSize(type); + if (oldtype==type) { + newsh = zrealloc(sh, hdrlen+newlen+1); + if (newsh == NULL) return NULL; + s = (char*)newsh+hdrlen; + } else { + /* since the header size changes, need to move the string forward, and can't use realloc */ + newsh = zmalloc(hdrlen+newlen+1); + if (newsh == NULL) return NULL; + memcpy((char*)newsh+hdrlen, s, len+1); + zfree(sh); + s = (char*)newsh+hdrlen; + s[-1] = type; + sdssetlen(s, len); + } + sdssetalloc(s, newlen); + s[-1] = type; + return s; } /* Reallocate the sds string so that it has no free space at the end. The @@ -153,12 +236,31 @@ sds sdsMakeRoomFor(sds s, size_t addlen) { * After the call, the passed sds string is no longer valid and all the * references must be substituted with the new pointer returned by the call. */ sds sdsRemoveFreeSpace(sds s) { - struct sdshdr *sh; + void *sh, *newsh; + char type, oldtype = s[-1]; + int hdrlen; + size_t len = sdslen(s); + sh = (char*)s-sdsHdrSize(oldtype); - sh = (void*) (s-(sizeof(struct sdshdr))); - sh = zrealloc(sh, sizeof(struct sdshdr)+sh->len+1); - sh->free = 0; - return sh->buf; + type = sdsReqType(len); + hdrlen = sdsHdrSize(type); + if (oldtype==type) { + newsh = zrealloc(sh, hdrlen+len+1); + if (newsh == NULL) return NULL; + s = (char*)newsh+hdrlen; + } else { + newsh = zmalloc(hdrlen+len+1); + if (newsh == NULL) return NULL; + memcpy((char*)newsh+hdrlen, s, len+1); + zfree(sh); + s = (char*)newsh+hdrlen; + s[-1] = type; + sdssetlen(s, len); + } + sdssetalloc(s, len); + assert(!(s[-1]>>SDS_TYPE_BITS));/* verify that the ref count is 0 (non ref count managed string) */ + s[-1] = type; + return s; } /* Return the total size of the allocation of the specifed sds string, @@ -169,9 +271,15 @@ sds sdsRemoveFreeSpace(sds s) { * 4) The implicit null term. */ size_t sdsAllocSize(sds s) { - struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); + size_t alloc = sdsalloc(s); + return sdsHdrSize(s[-1])+alloc+1; +} - return sizeof(*sh)+sh->len+sh->free+1; +/* Return the size consumed from the allocator, + * including internal fragmentation */ +size_t sdsZmallocSize(sds s) { + struct sdshdr *sh = (void*) (s-sdsHdrSize(s[-1])); + return zmalloc_size(sh); } /* Increment the sds length and decrements the left free space at the @@ -198,15 +306,35 @@ size_t sdsAllocSize(sds s) { * sdsIncrLen(s, nread); */ void sdsIncrLen(sds s, int incr) { - struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); - - if (incr >= 0) - assert(sh->free >= (unsigned int)incr); - else - assert(sh->len >= (unsigned int)(-incr)); - sh->len += incr; - sh->free -= incr; - s[sh->len] = '\0'; + char flags = s[-1]; + size_t len; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_8: { + SDS_HDR_VAR(8,s); + assert((incr >= 0 && sh->alloc-sh->len >= incr) || (incr < 0 && sh->len >= (unsigned int)(-incr))); + len = (sh->len += incr); + break; + } + case SDS_TYPE_16: { + SDS_HDR_VAR(16,s); + assert((incr >= 0 && sh->alloc-sh->len >= incr) || (incr < 0 && sh->len >= (unsigned int)(-incr))); + len = (sh->len += incr); + break; + } + case SDS_TYPE_32: { + SDS_HDR_VAR(32,s); + assert((incr >= 0 && sh->alloc-sh->len >= (unsigned int)incr) || (incr < 0 && sh->len >= (unsigned int)(-incr))); + len = (sh->len += incr); + break; + } + case SDS_TYPE_64: { + SDS_HDR_VAR(64,s); + assert((incr >= 0 && sh->alloc-sh->len >= (uint64_t)incr) || (incr < 0 && sh->len >= (uint64_t)(-incr))); + len = (sh->len += incr); + break; + } + } + s[len] = '\0'; } /* Grow the sds to have the specified length. Bytes that were not part of @@ -215,19 +343,15 @@ void sdsIncrLen(sds s, int incr) { * if the specified length is smaller than the current length, no operation * is performed. */ sds sdsgrowzero(sds s, size_t len) { - struct sdshdr *sh = (void*)(s-(sizeof(struct sdshdr))); - size_t totlen, curlen = sh->len; + size_t curlen = sdslen(s); if (len <= curlen) return s; s = sdsMakeRoomFor(s,len-curlen); if (s == NULL) return NULL; /* Make sure added region doesn't contain garbage */ - sh = (void*)(s-(sizeof(struct sdshdr))); memset(s+curlen,0,(len-curlen+1)); /* also set trailing \0 byte */ - totlen = sh->len+sh->free; - sh->len = len; - sh->free = totlen-sh->len; + sdssetlen(s, len); return s; } @@ -237,15 +361,12 @@ sds sdsgrowzero(sds s, size_t len) { * After the call, the passed sds string is no longer valid and all the * references must be substituted with the new pointer returned by the call. */ sds sdscatlen(sds s, const void *t, size_t len) { - struct sdshdr *sh; size_t curlen = sdslen(s); s = sdsMakeRoomFor(s,len); if (s == NULL) return NULL; - sh = (void*) (s-(sizeof(struct sdshdr))); memcpy(s+curlen, t, len); - sh->len = curlen+len; - sh->free = sh->free-len; + sdssetlen(s, curlen+len); s[curlen+len] = '\0'; return s; } @@ -269,19 +390,13 @@ sds sdscatsds(sds s, const sds t) { /* Destructively modify the sds string 's' to hold the specified binary * safe string pointed by 't' of length 'len' bytes. */ sds sdscpylen(sds s, const char *t, size_t len) { - struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); - size_t totlen = sh->free+sh->len; - - if (totlen < len) { - s = sdsMakeRoomFor(s,len-sh->len); + if (sdsalloc(s) < len) { + s = sdsMakeRoomFor(s,len-sdslen(s)); if (s == NULL) return NULL; - sh = (void*) (s-(sizeof(struct sdshdr))); - totlen = sh->free+sh->len; } memcpy(s, t, len); s[len] = '\0'; - sh->len = len; - sh->free = totlen-len; + sdssetlen(s, len); return s; } @@ -449,7 +564,6 @@ sds sdscatprintf(sds s, const char *fmt, ...) { * %% - Verbatim "%" character. */ sds sdscatfmt(sds s, char const *fmt, ...) { - struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); size_t initlen = sdslen(s); const char *f = fmt; int i; @@ -460,14 +574,13 @@ sds sdscatfmt(sds s, char const *fmt, ...) { i = initlen; /* Position of the next byte to write to dest str. */ while(*f) { char next, *str; - unsigned int l; + size_t l; long long num; unsigned long long unum; /* Make sure there is always space for at least 1 char. */ - if (sh->free == 0) { + if (sdsavail(s)==0) { s = sdsMakeRoomFor(s,1); - sh = (void*) (s-(sizeof(struct sdshdr))); } switch(*f) { @@ -479,13 +592,11 @@ sds sdscatfmt(sds s, char const *fmt, ...) { case 'S': str = va_arg(ap,char*); l = (next == 's') ? strlen(str) : sdslen(str); - if (sh->free < l) { + if (sdsavail(s) < l) { s = sdsMakeRoomFor(s,l); - sh = (void*) (s-(sizeof(struct sdshdr))); } memcpy(s+i,str,l); - sh->len += l; - sh->free -= l; + sdsinclen(s,l); i += l; break; case 'i': @@ -497,13 +608,11 @@ sds sdscatfmt(sds s, char const *fmt, ...) { { char buf[SDS_LLSTR_SIZE]; l = sdsll2str(buf,num); - if (sh->free < l) { + if (sdsavail(s) < l) { s = sdsMakeRoomFor(s,l); - sh = (void*) (s-(sizeof(struct sdshdr))); } memcpy(s+i,buf,l); - sh->len += l; - sh->free -= l; + sdsinclen(s,l); i += l; } break; @@ -516,27 +625,23 @@ sds sdscatfmt(sds s, char const *fmt, ...) { { char buf[SDS_LLSTR_SIZE]; l = sdsull2str(buf,unum); - if (sh->free < l) { + if (sdsavail(s) < l) { s = sdsMakeRoomFor(s,l); - sh = (void*) (s-(sizeof(struct sdshdr))); } memcpy(s+i,buf,l); - sh->len += l; - sh->free -= l; + sdsinclen(s,l); i += l; } break; default: /* Handle %% and generally %. */ s[i++] = next; - sh->len += 1; - sh->free -= 1; + sdsinclen(s,1); break; } break; default: s[i++] = *f; - sh->len += 1; - sh->free -= 1; + sdsinclen(s,1); break; } f++; @@ -563,7 +668,6 @@ sds sdscatfmt(sds s, char const *fmt, ...) { * Output will be just "Hello World". */ sds sdstrim(sds s, const char *cset) { - struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); char *start, *end, *sp, *ep; size_t len; @@ -572,10 +676,9 @@ sds sdstrim(sds s, const char *cset) { while(sp <= end && strchr(cset, *sp)) sp++; while(ep > sp && strchr(cset, *ep)) ep--; len = (sp > ep) ? 0 : ((ep-sp)+1); - if (sh->buf != sp) memmove(sh->buf, sp, len); - sh->buf[len] = '\0'; - sh->free = sh->free+(sh->len-len); - sh->len = len; + if (s != sp) memmove(s, sp, len); + s[len] = '\0'; + sdssetlen(s,len); return s; } @@ -596,7 +699,6 @@ sds sdstrim(sds s, const char *cset) { * sdsrange(s,1,-1); => "ello World" */ void sdsrange(sds s, int start, int end) { - struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); size_t newlen, len = sdslen(s); if (len == 0) return; @@ -619,10 +721,9 @@ void sdsrange(sds s, int start, int end) { } else { start = 0; } - if (start && newlen) memmove(sh->buf, sh->buf+start, newlen); - sh->buf[newlen] = 0; - sh->free = sh->free+(sh->len-newlen); - sh->len = newlen; + if (start && newlen) memmove(s, s+start, newlen); + s[newlen] = 0; + sdssetlen(s,newlen); } /* Apply tolower() to every character of the sds string 's'. */ diff --git a/src/sds.h b/src/sds.h index 93dd4f28e..9201a751c 100644 --- a/src/sds.h +++ b/src/sds.h @@ -35,32 +35,157 @@ #include #include +#include typedef char *sds; -struct sdshdr { - unsigned int len; - unsigned int free; +struct __attribute__ ((__packed__)) sdshdr8 { + uint8_t len; /* used */ + uint8_t alloc; /* excluding the header and null terminator */ + char flags; /* 2 lsb of type, and 6 msb of refcount */ + char buf[]; +}; +struct __attribute__ ((__packed__)) sdshdr16 { + uint16_t len; /* used */ + uint16_t alloc; /* excluding the header and null terminator */ + char flags; /* 2 lsb of type, and 6 msb of refcount */ + char buf[]; +}; +struct __attribute__ ((__packed__)) sdshdr32 { + uint32_t len; /* used */ + uint32_t alloc; /* excluding the header and null terminator */ + char flags; /* 2 lsb of type, and 6 msb of refcount */ + char buf[]; +}; +struct __attribute__ ((__packed__)) sdshdr64 { + uint64_t len; /* used */ + uint64_t alloc; /* excluding the header and null terminator */ + char flags; /* 2 lsb of type, and 6 msb of refcount */ char buf[]; }; +#define SDS_TYPE_8 0 +#define SDS_TYPE_16 1 +#define SDS_TYPE_32 2 +#define SDS_TYPE_64 3 +#define SDS_TYPE_MASK 3 +#define SDS_TYPE_BITS 2 +#define SDS_HDR_VAR(T,s) struct sdshdr##T *sh = (void*)((s)-(sizeof(struct sdshdr##T))); +#define SDS_HDR(T,s) ((struct sdshdr##T *)((s)-(sizeof(struct sdshdr##T)))) + static inline size_t sdslen(const sds s) { - struct sdshdr *sh = (void*)(s-(sizeof(struct sdshdr))); - return sh->len; + char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_8: + return SDS_HDR(8,s)->len; + case SDS_TYPE_16: + return SDS_HDR(16,s)->len; + case SDS_TYPE_32: + return SDS_HDR(32,s)->len; + case SDS_TYPE_64: + return SDS_HDR(64,s)->len; + } + return 0; } static inline size_t sdsavail(const sds s) { - struct sdshdr *sh = (void*)(s-(sizeof(struct sdshdr))); - return sh->free; + char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_8: { + SDS_HDR_VAR(8,s); + return sh->alloc - sh->len; + } + case SDS_TYPE_16: { + SDS_HDR_VAR(16,s); + return sh->alloc - sh->len; + } + case SDS_TYPE_32: { + SDS_HDR_VAR(32,s); + return sh->alloc - sh->len; + } + case SDS_TYPE_64: { + SDS_HDR_VAR(64,s); + return sh->alloc - sh->len; + } + } + return 0; +} + +static inline void sdssetlen(sds s, size_t newlen) { + char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_8: + SDS_HDR(8,s)->len = newlen; + break; + case SDS_TYPE_16: + SDS_HDR(16,s)->len = newlen; + break; + case SDS_TYPE_32: + SDS_HDR(32,s)->len = newlen; + break; + case SDS_TYPE_64: + SDS_HDR(64,s)->len = newlen; + break; + } +} + +static inline void sdsinclen(sds s, size_t inc) { + char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_8: + SDS_HDR(8,s)->len += inc; + break; + case SDS_TYPE_16: + SDS_HDR(16,s)->len += inc; + break; + case SDS_TYPE_32: + SDS_HDR(32,s)->len += inc; + break; + case SDS_TYPE_64: + SDS_HDR(64,s)->len += inc; + break; + } +} + +/* sdsalloc() = sdsavail() + sdslen() */ +static inline size_t sdsalloc(const sds s) { + char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_8: + return SDS_HDR(8,s)->alloc; + case SDS_TYPE_16: + return SDS_HDR(16,s)->alloc; + case SDS_TYPE_32: + return SDS_HDR(32,s)->alloc; + case SDS_TYPE_64: + return SDS_HDR(64,s)->alloc; + } + return 0; +} + +static inline void sdssetalloc(sds s, size_t newlen) { + char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_8: + SDS_HDR(8,s)->alloc = newlen; + break; + case SDS_TYPE_16: + SDS_HDR(16,s)->alloc = newlen; + break; + case SDS_TYPE_32: + SDS_HDR(32,s)->alloc = newlen; + break; + case SDS_TYPE_64: + SDS_HDR(64,s)->alloc = newlen; + break; + } } sds sdsnewlen(const void *init, size_t initlen); sds sdsnew(const char *init); sds sdsempty(void); -size_t sdslen(const sds s); sds sdsdup(const sds s); void sdsfree(sds s); -size_t sdsavail(const sds s); sds sdsgrowzero(sds s, size_t len); sds sdscatlen(sds s, const void *t, size_t len); sds sdscat(sds s, const char *t); @@ -68,6 +193,11 @@ sds sdscatsds(sds s, const sds t); sds sdscpylen(sds s, const char *t, size_t len); sds sdscpy(sds s, const char *t); +/* we can add a reference count on top of any + * existing sds. (max up to 63 references) */ +void sdsIncRefcount(sds s); +void sdsDecRefcount(sds s); + sds sdscatvprintf(sds s, const char *fmt, va_list ap); #ifdef __GNUC__ sds sdscatprintf(sds s, const char *fmt, ...) @@ -97,6 +227,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen); void sdsIncrLen(sds s, int incr); sds sdsRemoveFreeSpace(sds s); size_t sdsAllocSize(sds s); +size_t sdsZmallocSize(sds s); #ifdef REDIS_TEST int sdsTest(int argc, char *argv[]); From a76b380e06d6758dcd84277003ca7af520c2b422 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 14 Jul 2015 16:04:00 +0200 Subject: [PATCH 0302/1928] Fix DEBUG structsize output. --- src/debug.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/debug.c b/src/debug.c index e071fa0b6..3f7a85357 100644 --- a/src/debug.c +++ b/src/debug.c @@ -420,13 +420,13 @@ void debugCommand(redisClient *c) { addReplySds(c,errstr); } else if (!strcasecmp(c->argv[1]->ptr,"structsize") && c->argc == 2) { sds sizes = sdsempty(); - sizes = sdscatprintf(sizes,"bits:%d ", (sizeof(void*) == 8)?64:32); - sizes = sdscatprintf(sizes,"robj:%d ", (int)sizeof(robj)); - sizes = sdscatprintf(sizes,"dictentry:%d ", (int)sizeof(dictEntry)); - sizes = sdscatprintf(sizes,"sdshdr8:%d", (int)sizeof(struct sdshdr8)); - sizes = sdscatprintf(sizes,"sdshdr16:%d", (int)sizeof(struct sdshdr16)); - sizes = sdscatprintf(sizes,"sdshdr32:%d", (int)sizeof(struct sdshdr32)); - sizes = sdscatprintf(sizes,"sdshdr64:%d", (int)sizeof(struct sdshdr64)); + sizes = sdscatprintf(sizes,"bits:%d ",(sizeof(void*) == 8)?64:32); + sizes = sdscatprintf(sizes,"robj:%d ",(int)sizeof(robj)); + sizes = sdscatprintf(sizes,"dictentry:%d ",(int)sizeof(dictEntry)); + sizes = sdscatprintf(sizes,"sdshdr8:%d ",(int)sizeof(struct sdshdr8)); + sizes = sdscatprintf(sizes,"sdshdr16:%d ",(int)sizeof(struct sdshdr16)); + sizes = sdscatprintf(sizes,"sdshdr32:%d ",(int)sizeof(struct sdshdr32)); + sizes = sdscatprintf(sizes,"sdshdr64:%d ",(int)sizeof(struct sdshdr64)); addReplyBulkSds(c,sizes); } else if (!strcasecmp(c->argv[1]->ptr,"htstats") && c->argc == 3) { long dbid; From 056a0ca199edbc9f4644684468b8833884e74cd7 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 14 Jul 2015 17:33:30 +0200 Subject: [PATCH 0303/1928] Fix redis-benchmark sds binding. Same as redis-cli, now redis-benchmark requires to use hiredis sds copy since it is different compared to the memory optimized fork of Redis sds. --- src/Makefile | 2 +- src/redis-benchmark.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Makefile b/src/Makefile index 106fef340..449b4ec26 100644 --- a/src/Makefile +++ b/src/Makefile @@ -122,7 +122,7 @@ REDIS_GEOHASH_OBJ=../deps/geohash-int/geohash.o ../deps/geohash-int/geohash_help REDIS_CLI_NAME=redis-cli REDIS_CLI_OBJ=anet.o adlist.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o REDIS_BENCHMARK_NAME=redis-benchmark -REDIS_BENCHMARK_OBJ=ae.o anet.o redis-benchmark.o sds.o adlist.o zmalloc.o redis-benchmark.o +REDIS_BENCHMARK_OBJ=ae.o anet.o redis-benchmark.o adlist.o zmalloc.o redis-benchmark.o REDIS_CHECK_RDB_NAME=redis-check-rdb REDIS_CHECK_AOF_NAME=redis-check-aof REDIS_CHECK_AOF_OBJ=redis-check-aof.o diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c index f735aeb63..e19fdce14 100644 --- a/src/redis-benchmark.c +++ b/src/redis-benchmark.c @@ -40,9 +40,9 @@ #include #include +#include /* Use hiredis sds. */ #include "ae.h" #include "hiredis.h" -#include "sds.h" #include "adlist.h" #include "zmalloc.h" From 0ab27a4594aa73ffdabf2afb935d85ab6c03f0ee Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 15 Jul 2015 12:24:49 +0200 Subject: [PATCH 0304/1928] SDS: New sds type 5 implemented. This is an attempt to use the refcount feature of the sds.c fork provided in the Pull Request #2509. A new type, SDS_TYPE_5 is introduced having a one byte header with just the string length, without information about the available additional length at the end of the string (this means that sdsMakeRoomFor() will be required each time we want to append something, since the string will always report to have 0 bytes available). More work needed in order to avoid common SDS functions will pay the cost of this type. For example both sdscatprintf() and sdscatfmt() should try to upgrade to SDS_TYPE_8 ASAP when appending chars. --- src/sds.c | 72 +++++++++++++++++++++++++++---------------------------- src/sds.h | 68 ++++++++++++++++++++++++++++++++++++---------------- 2 files changed, 83 insertions(+), 57 deletions(-) diff --git a/src/sds.c b/src/sds.c index dbf6c64ad..4771b6b3e 100644 --- a/src/sds.c +++ b/src/sds.c @@ -38,6 +38,8 @@ static inline int sdsHdrSize(char type) { switch(type&SDS_TYPE_MASK) { + case SDS_TYPE_5: + return sizeof(struct sdshdr5); case SDS_TYPE_8: return sizeof(struct sdshdr8); case SDS_TYPE_16: @@ -51,11 +53,13 @@ static inline int sdsHdrSize(char type) { } static inline char sdsReqType(size_t string_size) { - if (string_size<0xff) + if (string_size < 32) + return SDS_TYPE_5; + if (string_size < 0xff) return SDS_TYPE_8; - if (string_size<0xffff) + if (string_size < 0xffff) return SDS_TYPE_16; - if (string_size<0xffffffff) + if (string_size < 0xffffffff) return SDS_TYPE_32; return SDS_TYPE_64; } @@ -77,63 +81,54 @@ sds sdsnewlen(const void *init, size_t initlen) { sds s; char type = sdsReqType(initlen); int hdrlen = sdsHdrSize(type); - + unsigned char *fp; /* flags pointer. */ + sh = zmalloc(hdrlen+initlen+1); if (!init) memset(sh, 0, hdrlen+initlen+1); if (sh == NULL) return NULL; s = (char*)sh+hdrlen; + fp = ((unsigned char*)s)-1; switch(type) { + case SDS_TYPE_5: { + *fp = type | (initlen << SDS_TYPE_BITS); + break; + } case SDS_TYPE_8: { SDS_HDR_VAR(8,s); sh->len = initlen; sh->alloc = initlen; + *fp = type; break; } case SDS_TYPE_16: { SDS_HDR_VAR(16,s); sh->len = initlen; sh->alloc = initlen; + *fp = type; break; } case SDS_TYPE_32: { SDS_HDR_VAR(32,s); sh->len = initlen; sh->alloc = initlen; + *fp = type; break; } case SDS_TYPE_64: { SDS_HDR_VAR(64,s); sh->len = initlen; sh->alloc = initlen; + *fp = type; break; } } - s[-1] = type; if (initlen && init) memcpy(s, init, initlen); s[initlen] = '\0'; return s; } -void sdsIncRefcount(sds s) { - unsigned char flags = s[-1]; - unsigned refs = flags>>SDS_TYPE_BITS; - assert(++refs); - s[-1] = (refs<>SDS_TYPE_BITS; - assert(refs); - if (!(--refs)) - zfree(sh); - else - s[-1] = (refs<= addlen) return s; @@ -207,7 +202,6 @@ sds sdsMakeRoomFor(sds s, size_t addlen) { else newlen += SDS_MAX_PREALLOC; - assert(!(s[-1]>>SDS_TYPE_BITS));/* verify that the ref count is 0 (non ref count managed string) */ type = sdsReqType(newlen); hdrlen = sdsHdrSize(type); if (oldtype==type) { @@ -215,7 +209,8 @@ sds sdsMakeRoomFor(sds s, size_t addlen) { if (newsh == NULL) return NULL; s = (char*)newsh+hdrlen; } else { - /* since the header size changes, need to move the string forward, and can't use realloc */ + /* Since the header size changes, need to move the string forward, + * and can't use realloc */ newsh = zmalloc(hdrlen+newlen+1); if (newsh == NULL) return NULL; memcpy((char*)newsh+hdrlen, s, len+1); @@ -225,7 +220,6 @@ sds sdsMakeRoomFor(sds s, size_t addlen) { sdssetlen(s, len); } sdssetalloc(s, newlen); - s[-1] = type; return s; } @@ -237,7 +231,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen) { * references must be substituted with the new pointer returned by the call. */ sds sdsRemoveFreeSpace(sds s) { void *sh, *newsh; - char type, oldtype = s[-1]; + char type, oldtype = s[-1] & SDS_TYPE_MASK; int hdrlen; size_t len = sdslen(s); sh = (char*)s-sdsHdrSize(oldtype); @@ -258,8 +252,6 @@ sds sdsRemoveFreeSpace(sds s) { sdssetlen(s, len); } sdssetalloc(s, len); - assert(!(s[-1]>>SDS_TYPE_BITS));/* verify that the ref count is 0 (non ref count managed string) */ - s[-1] = type; return s; } @@ -275,7 +267,7 @@ size_t sdsAllocSize(sds s) { return sdsHdrSize(s[-1])+alloc+1; } -/* Return the size consumed from the allocator, +/* Return the size consumed from the allocator, * including internal fragmentation */ size_t sdsZmallocSize(sds s) { struct sdshdr *sh = (void*) (s-sdsHdrSize(s[-1])); @@ -306,9 +298,17 @@ size_t sdsZmallocSize(sds s) { * sdsIncrLen(s, nread); */ void sdsIncrLen(sds s, int incr) { - char flags = s[-1]; + unsigned char flags = s[-1]; size_t len; switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: { + unsigned char *fp = ((unsigned char*)s)-1; + unsigned char oldlen = SDS_TYPE_5_LEN(flags); + assert((incr > 0 && oldlen+incr < 32) || (incr < 0 && oldlen >= (unsigned int)(-incr))); + *fp = SDS_TYPE_5 | ((oldlen+1) << SDS_TYPE_BITS); + len = oldlen+1; + break; + } case SDS_TYPE_8: { SDS_HDR_VAR(8,s); assert((incr >= 0 && sh->alloc-sh->len >= incr) || (incr < 0 && sh->len >= (unsigned int)(-incr))); @@ -1069,11 +1069,8 @@ sds sdsjoin(char **argv, int argc, char *sep) { #include "limits.h" #define UNUSED(x) (void)(x) -int sdsTest(int argc, char *argv[]) { - UNUSED(argc); - UNUSED(argv); +int sdsTest(void) { { - struct sdshdr *sh; sds x = sdsnew("foo"), y; test_cond("Create a string and obtain the length", @@ -1109,6 +1106,7 @@ int sdsTest(int argc, char *argv[]) { sdslen(x) == 60 && memcmp(x,"--Hello Hi! World -9223372036854775808," "9223372036854775807--",60) == 0) + printf("[%s]\n",x); sdsfree(x); x = sdsnew("--"); @@ -1195,6 +1193,7 @@ int sdsTest(int argc, char *argv[]) { test_cond("sdscatrepr(...data...)", memcmp(y,"\"\\a\\n\\x00foo\\r\"",15) == 0) +#if 0 { unsigned int oldfree; @@ -1215,6 +1214,7 @@ int sdsTest(int argc, char *argv[]) { sdsfree(x); } +#endif } test_report() return 0; diff --git a/src/sds.h b/src/sds.h index 9201a751c..1fcbe1155 100644 --- a/src/sds.h +++ b/src/sds.h @@ -39,43 +39,53 @@ typedef char *sds; +/* Note: sdshdr5 is never used, we just access the flags byte directly. + * However is here to document the layout of type 5 SDS strings. */ +struct __attribute__ ((__packed__)) sdshdr5 { + unsigned char flags; /* 3 lsb of type, and 5 msb of string length */ + char buf[]; +}; struct __attribute__ ((__packed__)) sdshdr8 { uint8_t len; /* used */ uint8_t alloc; /* excluding the header and null terminator */ - char flags; /* 2 lsb of type, and 6 msb of refcount */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ char buf[]; }; struct __attribute__ ((__packed__)) sdshdr16 { uint16_t len; /* used */ uint16_t alloc; /* excluding the header and null terminator */ - char flags; /* 2 lsb of type, and 6 msb of refcount */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ char buf[]; }; struct __attribute__ ((__packed__)) sdshdr32 { uint32_t len; /* used */ uint32_t alloc; /* excluding the header and null terminator */ - char flags; /* 2 lsb of type, and 6 msb of refcount */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ char buf[]; }; struct __attribute__ ((__packed__)) sdshdr64 { uint64_t len; /* used */ uint64_t alloc; /* excluding the header and null terminator */ - char flags; /* 2 lsb of type, and 6 msb of refcount */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ char buf[]; }; -#define SDS_TYPE_8 0 -#define SDS_TYPE_16 1 -#define SDS_TYPE_32 2 -#define SDS_TYPE_64 3 -#define SDS_TYPE_MASK 3 -#define SDS_TYPE_BITS 2 +#define SDS_TYPE_5 0 +#define SDS_TYPE_8 1 +#define SDS_TYPE_16 2 +#define SDS_TYPE_32 3 +#define SDS_TYPE_64 4 +#define SDS_TYPE_MASK 7 +#define SDS_TYPE_BITS 3 #define SDS_HDR_VAR(T,s) struct sdshdr##T *sh = (void*)((s)-(sizeof(struct sdshdr##T))); #define SDS_HDR(T,s) ((struct sdshdr##T *)((s)-(sizeof(struct sdshdr##T)))) +#define SDS_TYPE_5_LEN(f) ((f)>>SDS_TYPE_BITS) static inline size_t sdslen(const sds s) { - char flags = s[-1]; + unsigned char flags = s[-1]; switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + return SDS_TYPE_5_LEN(flags); case SDS_TYPE_8: return SDS_HDR(8,s)->len; case SDS_TYPE_16: @@ -89,8 +99,11 @@ static inline size_t sdslen(const sds s) { } static inline size_t sdsavail(const sds s) { - char flags = s[-1]; + unsigned char flags = s[-1]; switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: { + return 0; + } case SDS_TYPE_8: { SDS_HDR_VAR(8,s); return sh->alloc - sh->len; @@ -112,8 +125,14 @@ static inline size_t sdsavail(const sds s) { } static inline void sdssetlen(sds s, size_t newlen) { - char flags = s[-1]; + unsigned char flags = s[-1]; switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + { + unsigned char *fp = ((unsigned char*)s)-1; + *fp = SDS_TYPE_5 | (newlen << SDS_TYPE_BITS); + } + break; case SDS_TYPE_8: SDS_HDR(8,s)->len = newlen; break; @@ -130,8 +149,15 @@ static inline void sdssetlen(sds s, size_t newlen) { } static inline void sdsinclen(sds s, size_t inc) { - char flags = s[-1]; + unsigned char flags = s[-1]; switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + { + unsigned char *fp = ((unsigned char*)s)-1; + unsigned char newlen = SDS_TYPE_5_LEN(flags)+inc; + *fp = SDS_TYPE_5 | (newlen << SDS_TYPE_BITS); + } + break; case SDS_TYPE_8: SDS_HDR(8,s)->len += inc; break; @@ -149,8 +175,10 @@ static inline void sdsinclen(sds s, size_t inc) { /* sdsalloc() = sdsavail() + sdslen() */ static inline size_t sdsalloc(const sds s) { - char flags = s[-1]; + unsigned char flags = s[-1]; switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + return SDS_TYPE_5_LEN(flags); case SDS_TYPE_8: return SDS_HDR(8,s)->alloc; case SDS_TYPE_16: @@ -164,8 +192,11 @@ static inline size_t sdsalloc(const sds s) { } static inline void sdssetalloc(sds s, size_t newlen) { - char flags = s[-1]; + unsigned char flags = s[-1]; switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + /* Nothing to do, this type has no total allocation info. */ + break; case SDS_TYPE_8: SDS_HDR(8,s)->alloc = newlen; break; @@ -193,11 +224,6 @@ sds sdscatsds(sds s, const sds t); sds sdscpylen(sds s, const char *t, size_t len); sds sdscpy(sds s, const char *t); -/* we can add a reference count on top of any - * existing sds. (max up to 63 references) */ -void sdsIncRefcount(sds s); -void sdsDecRefcount(sds s); - sds sdscatvprintf(sds s, const char *fmt, va_list ap); #ifdef __GNUC__ sds sdscatprintf(sds s, const char *fmt, ...) From 3da97ea67f3b25097d5a57aeda9ce5d94461035e Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 16 Jul 2015 09:14:39 +0200 Subject: [PATCH 0305/1928] Add sdshdr5 to DEBUG structsize. --- src/debug.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/debug.c b/src/debug.c index 3f7a85357..0d2f24245 100644 --- a/src/debug.c +++ b/src/debug.c @@ -423,6 +423,7 @@ void debugCommand(redisClient *c) { sizes = sdscatprintf(sizes,"bits:%d ",(sizeof(void*) == 8)?64:32); sizes = sdscatprintf(sizes,"robj:%d ",(int)sizeof(robj)); sizes = sdscatprintf(sizes,"dictentry:%d ",(int)sizeof(dictEntry)); + sizes = sdscatprintf(sizes,"sdshdr5:%d ",(int)sizeof(struct sdshdr5)); sizes = sdscatprintf(sizes,"sdshdr8:%d ",(int)sizeof(struct sdshdr8)); sizes = sdscatprintf(sizes,"sdshdr16:%d ",(int)sizeof(struct sdshdr16)); sizes = sdscatprintf(sizes,"sdshdr32:%d ",(int)sizeof(struct sdshdr32)); From e0bb454a16eff5596814e5773540c7e5e5409bde Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 16 Jul 2015 09:26:36 +0200 Subject: [PATCH 0306/1928] Clarify a comment in clientsCron(). --- src/redis.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/redis.c b/src/redis.c index 656c21ff2..61d646363 100644 --- a/src/redis.c +++ b/src/redis.c @@ -973,11 +973,11 @@ int clientsCronResizeQueryBuffer(redisClient *c) { } void clientsCron(void) { - /* Make sure to process at least 1/(server.hz*10) of clients per call. - * Since this function is called server.hz times per second we are sure that - * in the worst case we process all the clients in 10 seconds. - * In normal conditions (a reasonable number of clients) we process - * all the clients in a shorter time. */ + /* Make sure to process at least numclients/(server.hz*10) of clients + * per call. Since this function is called server.hz times per second + * we are sure that in the worst case we process all the clients in 10 + * seconds. In normal conditions (a reasonable number of clients) we + * process all the clients in a shorter time. */ int numclients = listLength(server.clients); int iterations = numclients/(server.hz*10); From 92c146dfd3f756855a37cb50c8fe29c9fb4b12c2 Mon Sep 17 00:00:00 2001 From: Jiahao Huang Date: Tue, 14 Jul 2015 22:32:53 +0800 Subject: [PATCH 0307/1928] config tcp-keepalive should be numerical field not bool --- src/config.c | 6 +++--- src/scripting.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/config.c b/src/config.c index 6d51b1b08..fd56269ef 100644 --- a/src/config.c +++ b/src/config.c @@ -875,11 +875,11 @@ void configSetCommand(redisClient *c) { "activerehashing",server.activerehashing) { } config_set_bool_field( "stop-writes-on-bgsave-error",server.stop_writes_on_bgsave_err) { - } config_set_bool_field( - "tcp-keepalive",server.tcpkeepalive) { /* Numerical fields. * config_set_numerical_field(name,var,min,max) */ + } config_set_numerical_field( + "tcp-keepalive",server.tcpkeepalive,0,LLONG_MAX) { } config_set_numerical_field( "maxmemory-samples",server.maxmemory_samples,1,LLONG_MAX) { } config_set_numerical_field( @@ -1088,9 +1088,9 @@ void configGetCommand(redisClient *c) { config_get_numerical_field("cluster-migration-barrier",server.cluster_migration_barrier); config_get_numerical_field("cluster-slave-validity-factor",server.cluster_slave_validity_factor); config_get_numerical_field("repl-diskless-sync-delay",server.repl_diskless_sync_delay); + config_get_numerical_field("tcp-keepalive",server.tcpkeepalive); /* Bool (yes/no) values */ - config_get_bool_field("tcp-keepalive",server.tcpkeepalive); config_get_bool_field("cluster-require-full-coverage", server.cluster_require_full_coverage); config_get_bool_field("no-appendfsync-on-rewrite", diff --git a/src/scripting.c b/src/scripting.c index 53c0c9ed2..2819c05fb 100644 --- a/src/scripting.c +++ b/src/scripting.c @@ -880,7 +880,7 @@ void luaSetGlobalArray(lua_State *lua, char *var, robj **elev, int elec) { } /* Define a lua function with the specified function name and body. - * The function name musts be a 2 characters long string, since all the + * The function name musts be a 42 characters long string, since all the * functions we defined in the Lua context are in the form: * * f_ From 25e1cb3f040262deb13c9be6a07e19b6cac485f0 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 16 Jul 2015 10:54:12 +0200 Subject: [PATCH 0308/1928] Client timeout handling improved. The previos attempt to process each client at least once every ten seconds was not a good idea, because: 1. Usually because of the past min iterations set to 50, you get much better processing period most of the times. 2. However when there are many clients and a normal setting for server.hz, the edge case is triggered, and waiting 10 seconds for a BLPOP that asked for 1 second is not ok. 3. Moreover, because of the high min-itereations limit of 50, when HZ was set to an high value, the actual behavior was to process a lot of clients per second. Also the function checking for timeouts called gettimeofday() at each iteration which can be costly. The new implementation will try to process each client once per second, gets the current time as argument, and does not attempt to process more than 5 clients per iteration if not needed. So now: 1. The CPU usage of an idle Redis process is the same or better. 2. The CPU usage of a busy Redis process is the same or better. 3. However a non trivial amount of work may be performed per iteration when there are many many clients. In this particular case the user may want to raise the "HZ" value if needed. Btw with 4000 clients it was still not possible to noticy any actual latency created by processing 400 clients per second, since the work performed for each client is pretty small. --- src/redis.c | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/src/redis.c b/src/redis.c index 61d646363..782f9f53f 100644 --- a/src/redis.c +++ b/src/redis.c @@ -912,9 +912,12 @@ long long getInstantaneousMetric(int metric) { return sum / REDIS_METRIC_SAMPLES; } -/* Check for timeouts. Returns non-zero if the client was terminated */ -int clientsCronHandleTimeout(redisClient *c) { - time_t now = server.unixtime; +/* Check for timeouts. Returns non-zero if the client was terminated. + * The function gets the current time in milliseconds as argument since + * it gets called multiple times in a loop, so calling gettimeofday() for + * each iteration would be costly without any actual gain. */ +int clientsCronHandleTimeout(redisClient *c, mstime_t now_ms) { + time_t now = now_ms/1000; if (server.maxidletime && !(c->flags & REDIS_SLAVE) && /* no timeout for slaves */ @@ -930,7 +933,6 @@ int clientsCronHandleTimeout(redisClient *c) { /* Blocked OPS timeout is handled with milliseconds resolution. * However note that the actual resolution is limited by * server.hz. */ - mstime_t now_ms = mstime(); if (c->bpop.timeout != 0 && c->bpop.timeout < now_ms) { /* Handle blocking operation specific timeout. */ @@ -972,17 +974,23 @@ int clientsCronResizeQueryBuffer(redisClient *c) { return 0; } +#define CLIENTS_CRON_MIN_ITERATIONS 5 void clientsCron(void) { - /* Make sure to process at least numclients/(server.hz*10) of clients + /* Make sure to process at least numclients/server.hz of clients * per call. Since this function is called server.hz times per second - * we are sure that in the worst case we process all the clients in 10 - * seconds. In normal conditions (a reasonable number of clients) we - * process all the clients in a shorter time. */ + * we are sure that in the worst case we process all the clients in 1 + * second. */ int numclients = listLength(server.clients); - int iterations = numclients/(server.hz*10); + int iterations = numclients/server.hz; + mstime_t now = mstime(); + + /* Process at least a few clients while we are at it, even if we need + * to process less than CLIENTS_CRON_MIN_ITERATIONS to meet our contract + * of processing each client once per second. */ + if (iterations < CLIENTS_CRON_MIN_ITERATIONS) + iterations = (numclients < CLIENTS_CRON_MIN_ITERATIONS) ? + numclients : CLIENTS_CRON_MIN_ITERATIONS; - if (iterations < 50) - iterations = (numclients < 50) ? numclients : 50; while(listLength(server.clients) && iterations--) { redisClient *c; listNode *head; @@ -996,7 +1004,7 @@ void clientsCron(void) { /* The following functions do different service checks on the client. * The protocol is that they return non-zero if the client was * terminated. */ - if (clientsCronHandleTimeout(c)) continue; + if (clientsCronHandleTimeout(c,now)) continue; if (clientsCronResizeQueryBuffer(c)) continue; } } From 6142ddc6ebf2bdaf75dca73c795e36d1f0470422 Mon Sep 17 00:00:00 2001 From: Tom Kiemes Date: Wed, 15 Jul 2015 16:11:40 +0200 Subject: [PATCH 0309/1928] Fix: aof_delayed_fsync is not reset aof_delayed_fsync was not set to 0 when calling CONFIG RESETSTAT --- src/redis.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/redis.c b/src/redis.c index 782f9f53f..38402de4c 100644 --- a/src/redis.c +++ b/src/redis.c @@ -1761,6 +1761,7 @@ void resetServerStats(void) { } server.stat_net_input_bytes = 0; server.stat_net_output_bytes = 0; + server.aof_delayed_fsync = 0; } void initServer(void) { From 427794d845534ea0e84e0ef9582f9ae5800c96cf Mon Sep 17 00:00:00 2001 From: Yongyue Sun Date: Fri, 10 Jul 2015 15:25:40 +0800 Subject: [PATCH 0310/1928] bugfix: errno might change before logging Signed-off-by: Yongyue Sun --- src/aof.c | 2 +- src/rdb.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aof.c b/src/aof.c index dc7d11873..01f0ce320 100644 --- a/src/aof.c +++ b/src/aof.c @@ -1146,9 +1146,9 @@ int rewriteAppendOnlyFile(char *filename) { return REDIS_OK; werr: + redisLog(REDIS_WARNING,"Write error writing append only file on disk: %s", strerror(errno)); fclose(fp); unlink(tmpfile); - redisLog(REDIS_WARNING,"Write error writing append only file on disk: %s", strerror(errno)); if (di) dictReleaseIterator(di); return REDIS_ERR; } diff --git a/src/rdb.c b/src/rdb.c index e3236e12c..e4da23ba1 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -869,9 +869,9 @@ int rdbSave(char *filename) { return REDIS_OK; werr: + redisLog(REDIS_WARNING,"Write error saving DB on disk: %s", strerror(errno)); fclose(fp); unlink(tmpfile); - redisLog(REDIS_WARNING,"Write error saving DB on disk: %s", strerror(errno)); return REDIS_ERR; } From cf68f4ee6a4c466b893fbb269f6aff14c7c75e6a Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 20 Jul 2015 16:18:06 +0200 Subject: [PATCH 0311/1928] Fix SDS type 5 sdsIncrLen() bug and added test. Thanks to @oranagra for spotting this error. --- src/sds.c | 42 +++++++++++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/src/sds.c b/src/sds.c index 4771b6b3e..8283ec438 100644 --- a/src/sds.c +++ b/src/sds.c @@ -305,8 +305,8 @@ void sdsIncrLen(sds s, int incr) { unsigned char *fp = ((unsigned char*)s)-1; unsigned char oldlen = SDS_TYPE_5_LEN(flags); assert((incr > 0 && oldlen+incr < 32) || (incr < 0 && oldlen >= (unsigned int)(-incr))); - *fp = SDS_TYPE_5 | ((oldlen+1) << SDS_TYPE_BITS); - len = oldlen+1; + *fp = SDS_TYPE_5 | ((oldlen+incr) << SDS_TYPE_BITS); + len = oldlen+incr; break; } case SDS_TYPE_8: { @@ -1193,28 +1193,40 @@ int sdsTest(void) { test_cond("sdscatrepr(...data...)", memcmp(y,"\"\\a\\n\\x00foo\\r\"",15) == 0) -#if 0 { unsigned int oldfree; + char *p; + int step = 10, j, i; sdsfree(x); sdsfree(y); x = sdsnew("0"); - sh = (void*) (x-(sizeof(struct sdshdr))); - test_cond("sdsnew() free/len buffers", sh->len == 1 && sh->free == 0); - x = sdsMakeRoomFor(x,1); - sh = (void*) (x-(sizeof(struct sdshdr))); - test_cond("sdsMakeRoomFor()", sh->len == 1 && sh->free > 0); - oldfree = sh->free; - x[1] = '1'; - sdsIncrLen(x,1); - test_cond("sdsIncrLen() -- content", x[0] == '0' && x[1] == '1'); - test_cond("sdsIncrLen() -- len", sh->len == 2); - test_cond("sdsIncrLen() -- free", sh->free == oldfree-1); + test_cond("sdsnew() free/len buffers", sdslen(x) == 1 && sdsavail(x) == 0); + + /* Run the test a few times in order to hit the first two + * SDS header types. */ + for (i = 0; i < 10; i++) { + int oldlen = sdslen(x); + x = sdsMakeRoomFor(x,step); + int type = x[-1]&SDS_TYPE_MASK; + + test_cond("sdsMakeRoomFor() len", sdslen(x) == oldlen); + if (type != SDS_TYPE_5) { + test_cond("sdsMakeRoomFor() free", sdsavail(x) >= step); + oldfree = sdsavail(x); + } + p = x+oldlen; + for (j = 0; j < step; j++) { + p[j] = 'A'+j; + } + sdsIncrLen(x,step); + } + test_cond("sdsMakeRoomFor() content", + memcmp("0ABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJ",x,101) == 0); + test_cond("sdsMakeRoomFor() final length",sdslen(x)==101); sdsfree(x); } -#endif } test_report() return 0; From ea9bd243ecf02760ac7a5e9a25bd2d067b71ee84 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 23 Jul 2015 09:16:47 +0200 Subject: [PATCH 0312/1928] SDS: use type 8 if we are likely to append to the string. When empty strings are created, or when sdsMakeRoomFor() is called, we are likely into an appending pattern. Use at least type 8 SDS strings since TYPE 5 does not remember the free allocation size and requires to call sdsMakeRoomFor() at every new piece appended. --- src/sds.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/sds.c b/src/sds.c index 8283ec438..c9a6583ee 100644 --- a/src/sds.c +++ b/src/sds.c @@ -80,6 +80,9 @@ sds sdsnewlen(const void *init, size_t initlen) { void *sh; sds s; char type = sdsReqType(initlen); + /* Empty strings are usually created in order to append. Use type 8 + * since type 5 is not good at this. */ + if (type == SDS_TYPE_5 && initlen == 0) type = SDS_TYPE_8; int hdrlen = sdsHdrSize(type); unsigned char *fp; /* flags pointer. */ @@ -193,7 +196,9 @@ sds sdsMakeRoomFor(sds s, size_t addlen) { char type, oldtype = s[-1] & SDS_TYPE_MASK; int hdrlen; + /* Return ASAP if there is enough space left. */ if (avail >= addlen) return s; + len = sdslen(s); sh = (char*)s-sdsHdrSize(oldtype); newlen = (len+addlen); @@ -203,6 +208,12 @@ sds sdsMakeRoomFor(sds s, size_t addlen) { newlen += SDS_MAX_PREALLOC; type = sdsReqType(newlen); + + /* Don't use type 5: the user is appending to the string and type 5 is + * not able to remember empty space, so sdsMakeRoomFor() must be called + * at every appending operation. */ + if (type == SDS_TYPE_5) type = SDS_TYPE_8; + hdrlen = sdsHdrSize(type); if (oldtype==type) { newsh = zrealloc(sh, hdrlen+newlen+1); From 64fcd0e6ff1f7a6b85f5432767a298a95eacfe00 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 24 Jul 2015 09:39:12 +0200 Subject: [PATCH 0313/1928] SDS: avoid compiler warning in sdsIncrLen(). --- src/sds.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/sds.c b/src/sds.c index c9a6583ee..d0649dcf7 100644 --- a/src/sds.c +++ b/src/sds.c @@ -344,6 +344,7 @@ void sdsIncrLen(sds s, int incr) { len = (sh->len += incr); break; } + default: len = 0; /* Just to avoid compilation warnings. */ } s[len] = '\0'; } From 6b836b6b4148a3623e35807e998097865b9ebb3a Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 24 Jul 2015 10:15:04 +0200 Subject: [PATCH 0314/1928] Jemalloc: use LG_QUANTUM of 3 for AMD64 and I386. This gives us a 24 bytes size class which is dict.c dictEntry size, thus improving the memory efficiency of Redis significantly. Moreover other non 16 bytes aligned tiny classes are added that further reduce the fragmentation of the allocator. Technically speaking LG_QUANTUM should be 4 on i386 / AMD64 because of SSE types and other 16 bytes types, however we don't use those, and our jemalloc only targets Redis. New versions of Jemalloc will have an explicit configure switch in order to specify the quantum value for a platform without requiring any change to the Jemalloc source code: we'll switch to this system when available. This change was originally proposed by Oran Agra (@oranagra) as a change to the Jemalloc script to generate the size classes define. We ended doing it differently by changing LG_QUANTUM since it is apparently the supported Jemalloc method to obtain a 24 bytes size class, moreover it also provides us other potentially useful size classes. Related to issue #2510. --- .../jemalloc/include/jemalloc/internal/jemalloc_internal.h.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in index 574bbb141..df266abb7 100644 --- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in +++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in @@ -242,7 +242,7 @@ static const bool config_ivsalloc = */ #ifndef LG_QUANTUM # if (defined(__i386__) || defined(_M_IX86)) -# define LG_QUANTUM 4 +# define LG_QUANTUM 3 # endif # ifdef __ia64__ # define LG_QUANTUM 4 @@ -254,7 +254,7 @@ static const bool config_ivsalloc = # define LG_QUANTUM 4 # endif # if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) -# define LG_QUANTUM 4 +# define LG_QUANTUM 3 # endif # ifdef __arm__ # define LG_QUANTUM 3 From ef29748d0d758c956f8adca70bc74902b0c2e20c Mon Sep 17 00:00:00 2001 From: Rogerio Goncalves Date: Fri, 24 Jul 2015 14:08:50 +0200 Subject: [PATCH 0315/1928] Check args before run ckquorum. Fix issue #2635 --- src/sentinel.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/sentinel.c b/src/sentinel.c index 3ff8899d7..9ea2ef629 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -3041,6 +3041,7 @@ void sentinelCommand(redisClient *c) { sentinelRedisInstance *ri; int usable; + if (c->argc != 3) goto numargserr; if ((ri = sentinelGetMasterByNameOrReplyError(c,c->argv[2])) == NULL) return; int result = sentinelIsQuorumReachable(ri,&usable); From 11425c89cf4a2c36941ac7b85c5f4d74636d4e45 Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 25 Jul 2015 17:05:20 +0200 Subject: [PATCH 0316/1928] SDS: sdsjoinsds() call ported from antirez/sds fork. --- src/sds.c | 12 ++++++++++++ src/sds.h | 1 + 2 files changed, 13 insertions(+) diff --git a/src/sds.c b/src/sds.c index d0649dcf7..c1c78e285 100644 --- a/src/sds.c +++ b/src/sds.c @@ -1075,6 +1075,18 @@ sds sdsjoin(char **argv, int argc, char *sep) { return join; } +/* Like sdsjoin, but joins an array of SDS strings. */ +sds sdsjoinsds(sds *argv, int argc, const char *sep, size_t seplen) { + sds join = sdsempty(); + int j; + + for (j = 0; j < argc; j++) { + join = sdscatsds(join, argv[j]); + if (j != argc-1) join = sdscatlen(join,sep,seplen); + } + return join; +} + #if defined(REDIS_TEST) || defined(SDS_TEST_MAIN) #include #include "testhelp.h" diff --git a/src/sds.h b/src/sds.h index 1fcbe1155..7cc8e9c14 100644 --- a/src/sds.h +++ b/src/sds.h @@ -247,6 +247,7 @@ sds sdscatrepr(sds s, const char *p, size_t len); sds *sdssplitargs(const char *line, int *argc); sds sdsmapchars(sds s, const char *from, const char *to, size_t setlen); sds sdsjoin(char **argv, int argc, char *sep); +sds sdsjoinsds(sds *argv, int argc, const char *sep, size_t seplen); /* Low level functions exposed to the user API */ sds sdsMakeRoomFor(sds s, size_t addlen); From 9894495c5a8e37a8e1dad9698e49cf842a027b2e Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 25 Jul 2015 17:08:44 +0200 Subject: [PATCH 0317/1928] SDS: Copyright notice updated. --- src/sds.c | 5 +++-- src/sds.h | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/sds.c b/src/sds.c index c1c78e285..72ab92ed2 100644 --- a/src/sds.c +++ b/src/sds.c @@ -1,6 +1,7 @@ -/* SDSLib, A C dynamic strings library +/* SDSLib 2.0 -- A C dynamic strings library * - * Copyright (c) 2006-2012, Salvatore Sanfilippo + * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2015, Redis Labs, Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/sds.h b/src/sds.h index 7cc8e9c14..31e04d56f 100644 --- a/src/sds.h +++ b/src/sds.h @@ -1,6 +1,7 @@ -/* SDSLib, A C dynamic strings library +/* SDSLib 2.0 -- A C dynamic strings library * - * Copyright (c) 2006-2010, Salvatore Sanfilippo + * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2015, Redis Labs, Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without From cb2782c314f0af3df56853974f7ba5541c095eeb Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 25 Jul 2015 17:25:44 +0200 Subject: [PATCH 0318/1928] SDS: changes to unify Redis SDS with antirez/sds repo. --- src/networking.c | 7 +++++++ src/sds.c | 51 ++++++++++++++++++++++++------------------------ src/sds.h | 2 +- src/sdsalloc.h | 42 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 75 insertions(+), 27 deletions(-) create mode 100644 src/sdsalloc.h diff --git a/src/networking.c b/src/networking.c index 12f8d2e9f..fc3746fd9 100644 --- a/src/networking.c +++ b/src/networking.c @@ -33,6 +33,13 @@ static void setProtocolError(redisClient *c, int pos); +/* Return the size consumed from the allocator, for the specified SDS string, + * including internal fragmentation. This function is used in order to compute + * the client output buffer size. */ +size_t sdsZmallocSize(sds s) { + void *sh = sdsAllocPtr(s); + return zmalloc_size(sh); +} /* Return the amount of memory used by the sds string at object->ptr * for a string object. */ diff --git a/src/sds.c b/src/sds.c index 72ab92ed2..aff9cd1f9 100644 --- a/src/sds.c +++ b/src/sds.c @@ -35,7 +35,7 @@ #include #include #include "sds.h" -#include "zmalloc.h" +#include "sdsalloc.h" static inline int sdsHdrSize(char type) { switch(type&SDS_TYPE_MASK) { @@ -87,7 +87,7 @@ sds sdsnewlen(const void *init, size_t initlen) { int hdrlen = sdsHdrSize(type); unsigned char *fp; /* flags pointer. */ - sh = zmalloc(hdrlen+initlen+1); + sh = s_malloc(hdrlen+initlen+1); if (!init) memset(sh, 0, hdrlen+initlen+1); if (sh == NULL) return NULL; @@ -153,7 +153,7 @@ sds sdsdup(const sds s) { /* Free an sds string. No operation is performed if 's' is NULL. */ void sdsfree(sds s) { if (s == NULL) return; - zfree((char*)s-sdsHdrSize(s[-1])); + s_free((char*)s-sdsHdrSize(s[-1])); } /* Set the sds string length to the length as obtained with strlen(), so @@ -217,16 +217,16 @@ sds sdsMakeRoomFor(sds s, size_t addlen) { hdrlen = sdsHdrSize(type); if (oldtype==type) { - newsh = zrealloc(sh, hdrlen+newlen+1); + newsh = s_realloc(sh, hdrlen+newlen+1); if (newsh == NULL) return NULL; s = (char*)newsh+hdrlen; } else { /* Since the header size changes, need to move the string forward, * and can't use realloc */ - newsh = zmalloc(hdrlen+newlen+1); + newsh = s_malloc(hdrlen+newlen+1); if (newsh == NULL) return NULL; memcpy((char*)newsh+hdrlen, s, len+1); - zfree(sh); + s_free(sh); s = (char*)newsh+hdrlen; s[-1] = type; sdssetlen(s, len); @@ -251,14 +251,14 @@ sds sdsRemoveFreeSpace(sds s) { type = sdsReqType(len); hdrlen = sdsHdrSize(type); if (oldtype==type) { - newsh = zrealloc(sh, hdrlen+len+1); + newsh = s_realloc(sh, hdrlen+len+1); if (newsh == NULL) return NULL; s = (char*)newsh+hdrlen; } else { - newsh = zmalloc(hdrlen+len+1); + newsh = s_malloc(hdrlen+len+1); if (newsh == NULL) return NULL; memcpy((char*)newsh+hdrlen, s, len+1); - zfree(sh); + s_free(sh); s = (char*)newsh+hdrlen; s[-1] = type; sdssetlen(s, len); @@ -279,11 +279,10 @@ size_t sdsAllocSize(sds s) { return sdsHdrSize(s[-1])+alloc+1; } -/* Return the size consumed from the allocator, - * including internal fragmentation */ -size_t sdsZmallocSize(sds s) { - struct sdshdr *sh = (void*) (s-sdsHdrSize(s[-1])); - return zmalloc_size(sh); +/* Return the pointer of the actual SDS allocation (normally SDS strings + * are referenced by the start of the string buffer). */ +void *sdsAllocPtr(sds s) { + return (void*) (s-sdsHdrSize(s[-1])); } /* Increment the sds length and decrements the left free space at the @@ -506,7 +505,7 @@ sds sdscatvprintf(sds s, const char *fmt, va_list ap) { /* We try to start using a static buffer for speed. * If not possible we revert to heap allocation. */ if (buflen > sizeof(staticbuf)) { - buf = zmalloc(buflen); + buf = s_malloc(buflen); if (buf == NULL) return NULL; } else { buflen = sizeof(staticbuf); @@ -520,9 +519,9 @@ sds sdscatvprintf(sds s, const char *fmt, va_list ap) { vsnprintf(buf, buflen, fmt, cpy); va_end(cpy); if (buf[buflen-2] != '\0') { - if (buf != staticbuf) zfree(buf); + if (buf != staticbuf) s_free(buf); buflen *= 2; - buf = zmalloc(buflen); + buf = s_malloc(buflen); if (buf == NULL) return NULL; continue; } @@ -531,7 +530,7 @@ sds sdscatvprintf(sds s, const char *fmt, va_list ap) { /* Finally concat the obtained string to the SDS string and return it. */ t = sdscat(s, buf); - if (buf != staticbuf) zfree(buf); + if (buf != staticbuf) s_free(buf); return t; } @@ -798,7 +797,7 @@ sds *sdssplitlen(const char *s, int len, const char *sep, int seplen, int *count if (seplen < 1 || len < 0) return NULL; - tokens = zmalloc(sizeof(sds)*slots); + tokens = s_malloc(sizeof(sds)*slots); if (tokens == NULL) return NULL; if (len == 0) { @@ -811,7 +810,7 @@ sds *sdssplitlen(const char *s, int len, const char *sep, int seplen, int *count sds *newtokens; slots *= 2; - newtokens = zrealloc(tokens,sizeof(sds)*slots); + newtokens = s_realloc(tokens,sizeof(sds)*slots); if (newtokens == NULL) goto cleanup; tokens = newtokens; } @@ -835,7 +834,7 @@ cleanup: { int i; for (i = 0; i < elements; i++) sdsfree(tokens[i]); - zfree(tokens); + s_free(tokens); *count = 0; return NULL; } @@ -846,7 +845,7 @@ void sdsfreesplitres(sds *tokens, int count) { if (!tokens) return; while(count--) sdsfree(tokens[count]); - zfree(tokens); + s_free(tokens); } /* Append to the sds string "s" an escaped string representation where @@ -1020,13 +1019,13 @@ sds *sdssplitargs(const char *line, int *argc) { if (*p) p++; } /* add the token to the vector */ - vector = zrealloc(vector,((*argc)+1)*sizeof(char*)); + vector = s_realloc(vector,((*argc)+1)*sizeof(char*)); vector[*argc] = current; (*argc)++; current = NULL; } else { /* Even on empty input string return something not NULL. */ - if (vector == NULL) vector = zmalloc(sizeof(void*)); + if (vector == NULL) vector = s_malloc(sizeof(void*)); return vector; } } @@ -1034,7 +1033,7 @@ sds *sdssplitargs(const char *line, int *argc) { err: while((*argc)--) sdsfree(vector[*argc]); - zfree(vector); + s_free(vector); if (current) sdsfree(current); *argc = 0; return NULL; @@ -1088,7 +1087,7 @@ sds sdsjoinsds(sds *argv, int argc, const char *sep, size_t seplen) { return join; } -#if defined(REDIS_TEST) || defined(SDS_TEST_MAIN) +#if defined(SDS_TEST_MAIN) #include #include "testhelp.h" #include "limits.h" diff --git a/src/sds.h b/src/sds.h index 31e04d56f..20eaac0ce 100644 --- a/src/sds.h +++ b/src/sds.h @@ -255,7 +255,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen); void sdsIncrLen(sds s, int incr); sds sdsRemoveFreeSpace(sds s); size_t sdsAllocSize(sds s); -size_t sdsZmallocSize(sds s); +void *sdsAllocPtr(sds s); #ifdef REDIS_TEST int sdsTest(int argc, char *argv[]); diff --git a/src/sdsalloc.h b/src/sdsalloc.h new file mode 100644 index 000000000..531d41929 --- /dev/null +++ b/src/sdsalloc.h @@ -0,0 +1,42 @@ +/* SDSLib 2.0 -- A C dynamic strings library + * + * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2015, Redis Labs, Inc + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* SDS allocator selection. + * + * This file is used in order to change the SDS allocator at compile time. + * Just define the following defines to what you want to use. Also add + * the include of your alternate allocator if needed (not needed in order + * to use the default libc allocator). */ + +#include "zmalloc.h" +#define s_malloc zmalloc +#define s_realloc zrealloc +#define s_free zfree From c6333def1347c104e0861d21721121824554acee Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 25 Jul 2015 17:41:56 +0200 Subject: [PATCH 0319/1928] SDS: Copyright updated further. --- src/sds.c | 1 + src/sds.h | 1 + 2 files changed, 2 insertions(+) diff --git a/src/sds.c b/src/sds.c index aff9cd1f9..f1e4f949d 100644 --- a/src/sds.c +++ b/src/sds.c @@ -1,6 +1,7 @@ /* SDSLib 2.0 -- A C dynamic strings library * * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2015, Oran Agra * Copyright (c) 2015, Redis Labs, Inc * All rights reserved. * diff --git a/src/sds.h b/src/sds.h index 20eaac0ce..d7eb4c7d0 100644 --- a/src/sds.h +++ b/src/sds.h @@ -1,6 +1,7 @@ /* SDSLib 2.0 -- A C dynamic strings library * * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2015, Oran Agra * Copyright (c) 2015, Redis Labs, Inc * All rights reserved. * From b684e2dad1d884af7bf1047febbb54ee6e0324ba Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 25 Jul 2015 17:50:47 +0200 Subject: [PATCH 0320/1928] deps/hiredis SDS updated to version 2.0.0. --- deps/hiredis/sds.c | 426 +++++++++++++++++++++++++++------------- deps/hiredis/sds.h | 186 ++++++++++++++++-- deps/hiredis/sdsalloc.h | 42 ++++ 3 files changed, 511 insertions(+), 143 deletions(-) create mode 100644 deps/hiredis/sdsalloc.h diff --git a/deps/hiredis/sds.c b/deps/hiredis/sds.c index 95454e997..f1e4f949d 100644 --- a/deps/hiredis/sds.c +++ b/deps/hiredis/sds.c @@ -1,6 +1,8 @@ -/* SDSLib, A C dynamic strings library +/* SDSLib 2.0 -- A C dynamic strings library * - * Copyright (c) 2006-2012, Salvatore Sanfilippo + * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2015, Oran Agra + * Copyright (c) 2015, Redis Labs, Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -34,7 +36,35 @@ #include #include #include "sds.h" -#include "zmalloc.h" +#include "sdsalloc.h" + +static inline int sdsHdrSize(char type) { + switch(type&SDS_TYPE_MASK) { + case SDS_TYPE_5: + return sizeof(struct sdshdr5); + case SDS_TYPE_8: + return sizeof(struct sdshdr8); + case SDS_TYPE_16: + return sizeof(struct sdshdr16); + case SDS_TYPE_32: + return sizeof(struct sdshdr32); + case SDS_TYPE_64: + return sizeof(struct sdshdr64); + } + return 0; +} + +static inline char sdsReqType(size_t string_size) { + if (string_size < 32) + return SDS_TYPE_5; + if (string_size < 0xff) + return SDS_TYPE_8; + if (string_size < 0xffff) + return SDS_TYPE_16; + if (string_size < 0xffffffff) + return SDS_TYPE_32; + return SDS_TYPE_64; +} /* Create a new sds string with the content specified by the 'init' pointer * and 'initlen'. @@ -43,26 +73,65 @@ * The string is always null-termined (all the sds strings are, always) so * even if you create an sds string with: * - * mystring = sdsnewlen("abc",3"); + * mystring = sdsnewlen("abc",3); * * You can print the string with printf() as there is an implicit \0 at the * end of the string. However the string is binary safe and can contain * \0 characters in the middle, as the length is stored in the sds header. */ sds sdsnewlen(const void *init, size_t initlen) { - struct sdshdr *sh; + void *sh; + sds s; + char type = sdsReqType(initlen); + /* Empty strings are usually created in order to append. Use type 8 + * since type 5 is not good at this. */ + if (type == SDS_TYPE_5 && initlen == 0) type = SDS_TYPE_8; + int hdrlen = sdsHdrSize(type); + unsigned char *fp; /* flags pointer. */ - if (init) { - sh = zmalloc(sizeof(struct sdshdr)+initlen+1); - } else { - sh = zcalloc(sizeof(struct sdshdr)+initlen+1); - } + sh = s_malloc(hdrlen+initlen+1); + if (!init) + memset(sh, 0, hdrlen+initlen+1); if (sh == NULL) return NULL; - sh->len = initlen; - sh->free = 0; + s = (char*)sh+hdrlen; + fp = ((unsigned char*)s)-1; + switch(type) { + case SDS_TYPE_5: { + *fp = type | (initlen << SDS_TYPE_BITS); + break; + } + case SDS_TYPE_8: { + SDS_HDR_VAR(8,s); + sh->len = initlen; + sh->alloc = initlen; + *fp = type; + break; + } + case SDS_TYPE_16: { + SDS_HDR_VAR(16,s); + sh->len = initlen; + sh->alloc = initlen; + *fp = type; + break; + } + case SDS_TYPE_32: { + SDS_HDR_VAR(32,s); + sh->len = initlen; + sh->alloc = initlen; + *fp = type; + break; + } + case SDS_TYPE_64: { + SDS_HDR_VAR(64,s); + sh->len = initlen; + sh->alloc = initlen; + *fp = type; + break; + } + } if (initlen && init) - memcpy(sh->buf, init, initlen); - sh->buf[initlen] = '\0'; - return (char*)sh->buf; + memcpy(s, init, initlen); + s[initlen] = '\0'; + return s; } /* Create an empty (zero length) sds string. Even in this case the string @@ -71,7 +140,7 @@ sds sdsempty(void) { return sdsnewlen("",0); } -/* Create a new sds string starting from a null termined C string. */ +/* Create a new sds string starting from a null terminated C string. */ sds sdsnew(const char *init) { size_t initlen = (init == NULL) ? 0 : strlen(init); return sdsnewlen(init, initlen); @@ -85,7 +154,7 @@ sds sdsdup(const sds s) { /* Free an sds string. No operation is performed if 's' is NULL. */ void sdsfree(sds s) { if (s == NULL) return; - zfree(s-sizeof(struct sdshdr)); + s_free((char*)s-sdsHdrSize(s[-1])); } /* Set the sds string length to the length as obtained with strlen(), so @@ -103,21 +172,17 @@ void sdsfree(sds s) { * the output will be "6" as the string was modified but the logical length * remains 6 bytes. */ void sdsupdatelen(sds s) { - struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); int reallen = strlen(s); - sh->free += (sh->len-reallen); - sh->len = reallen; + sdssetlen(s, reallen); } -/* Modify an sds string on-place to make it empty (zero length). +/* Modify an sds string in-place to make it empty (zero length). * However all the existing buffer is not discarded but set as free space * so that next append operations will not require allocations up to the * number of bytes previously available. */ void sdsclear(sds s) { - struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); - sh->free += sh->len; - sh->len = 0; - sh->buf[0] = '\0'; + sdssetlen(s, 0); + s[0] = '\0'; } /* Enlarge the free space at the end of the sds string so that the caller @@ -127,23 +192,48 @@ void sdsclear(sds s) { * Note: this does not change the *length* of the sds string as returned * by sdslen(), but only the free buffer space we have. */ sds sdsMakeRoomFor(sds s, size_t addlen) { - struct sdshdr *sh, *newsh; - size_t free = sdsavail(s); + void *sh, *newsh; + size_t avail = sdsavail(s); size_t len, newlen; + char type, oldtype = s[-1] & SDS_TYPE_MASK; + int hdrlen; + + /* Return ASAP if there is enough space left. */ + if (avail >= addlen) return s; - if (free >= addlen) return s; len = sdslen(s); - sh = (void*) (s-(sizeof(struct sdshdr))); + sh = (char*)s-sdsHdrSize(oldtype); newlen = (len+addlen); if (newlen < SDS_MAX_PREALLOC) newlen *= 2; else newlen += SDS_MAX_PREALLOC; - newsh = zrealloc(sh, sizeof(struct sdshdr)+newlen+1); - if (newsh == NULL) return NULL; - newsh->free = newlen - len; - return newsh->buf; + type = sdsReqType(newlen); + + /* Don't use type 5: the user is appending to the string and type 5 is + * not able to remember empty space, so sdsMakeRoomFor() must be called + * at every appending operation. */ + if (type == SDS_TYPE_5) type = SDS_TYPE_8; + + hdrlen = sdsHdrSize(type); + if (oldtype==type) { + newsh = s_realloc(sh, hdrlen+newlen+1); + if (newsh == NULL) return NULL; + s = (char*)newsh+hdrlen; + } else { + /* Since the header size changes, need to move the string forward, + * and can't use realloc */ + newsh = s_malloc(hdrlen+newlen+1); + if (newsh == NULL) return NULL; + memcpy((char*)newsh+hdrlen, s, len+1); + s_free(sh); + s = (char*)newsh+hdrlen; + s[-1] = type; + sdssetlen(s, len); + } + sdssetalloc(s, newlen); + return s; } /* Reallocate the sds string so that it has no free space at the end. The @@ -153,12 +243,29 @@ sds sdsMakeRoomFor(sds s, size_t addlen) { * After the call, the passed sds string is no longer valid and all the * references must be substituted with the new pointer returned by the call. */ sds sdsRemoveFreeSpace(sds s) { - struct sdshdr *sh; + void *sh, *newsh; + char type, oldtype = s[-1] & SDS_TYPE_MASK; + int hdrlen; + size_t len = sdslen(s); + sh = (char*)s-sdsHdrSize(oldtype); - sh = (void*) (s-(sizeof(struct sdshdr))); - sh = zrealloc(sh, sizeof(struct sdshdr)+sh->len+1); - sh->free = 0; - return sh->buf; + type = sdsReqType(len); + hdrlen = sdsHdrSize(type); + if (oldtype==type) { + newsh = s_realloc(sh, hdrlen+len+1); + if (newsh == NULL) return NULL; + s = (char*)newsh+hdrlen; + } else { + newsh = s_malloc(hdrlen+len+1); + if (newsh == NULL) return NULL; + memcpy((char*)newsh+hdrlen, s, len+1); + s_free(sh); + s = (char*)newsh+hdrlen; + s[-1] = type; + sdssetlen(s, len); + } + sdssetalloc(s, len); + return s; } /* Return the total size of the allocation of the specifed sds string, @@ -169,9 +276,14 @@ sds sdsRemoveFreeSpace(sds s) { * 4) The implicit null term. */ size_t sdsAllocSize(sds s) { - struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); + size_t alloc = sdsalloc(s); + return sdsHdrSize(s[-1])+alloc+1; +} - return sizeof(*sh)+sh->len+sh->free+1; +/* Return the pointer of the actual SDS allocation (normally SDS strings + * are referenced by the start of the string buffer). */ +void *sdsAllocPtr(sds s) { + return (void*) (s-sdsHdrSize(s[-1])); } /* Increment the sds length and decrements the left free space at the @@ -198,15 +310,44 @@ size_t sdsAllocSize(sds s) { * sdsIncrLen(s, nread); */ void sdsIncrLen(sds s, int incr) { - struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); - - if (incr >= 0) - assert(sh->free >= (unsigned int)incr); - else - assert(sh->len >= (unsigned int)(-incr)); - sh->len += incr; - sh->free -= incr; - s[sh->len] = '\0'; + unsigned char flags = s[-1]; + size_t len; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: { + unsigned char *fp = ((unsigned char*)s)-1; + unsigned char oldlen = SDS_TYPE_5_LEN(flags); + assert((incr > 0 && oldlen+incr < 32) || (incr < 0 && oldlen >= (unsigned int)(-incr))); + *fp = SDS_TYPE_5 | ((oldlen+incr) << SDS_TYPE_BITS); + len = oldlen+incr; + break; + } + case SDS_TYPE_8: { + SDS_HDR_VAR(8,s); + assert((incr >= 0 && sh->alloc-sh->len >= incr) || (incr < 0 && sh->len >= (unsigned int)(-incr))); + len = (sh->len += incr); + break; + } + case SDS_TYPE_16: { + SDS_HDR_VAR(16,s); + assert((incr >= 0 && sh->alloc-sh->len >= incr) || (incr < 0 && sh->len >= (unsigned int)(-incr))); + len = (sh->len += incr); + break; + } + case SDS_TYPE_32: { + SDS_HDR_VAR(32,s); + assert((incr >= 0 && sh->alloc-sh->len >= (unsigned int)incr) || (incr < 0 && sh->len >= (unsigned int)(-incr))); + len = (sh->len += incr); + break; + } + case SDS_TYPE_64: { + SDS_HDR_VAR(64,s); + assert((incr >= 0 && sh->alloc-sh->len >= (uint64_t)incr) || (incr < 0 && sh->len >= (uint64_t)(-incr))); + len = (sh->len += incr); + break; + } + default: len = 0; /* Just to avoid compilation warnings. */ + } + s[len] = '\0'; } /* Grow the sds to have the specified length. Bytes that were not part of @@ -215,19 +356,15 @@ void sdsIncrLen(sds s, int incr) { * if the specified length is smaller than the current length, no operation * is performed. */ sds sdsgrowzero(sds s, size_t len) { - struct sdshdr *sh = (void*)(s-(sizeof(struct sdshdr))); - size_t totlen, curlen = sh->len; + size_t curlen = sdslen(s); if (len <= curlen) return s; s = sdsMakeRoomFor(s,len-curlen); if (s == NULL) return NULL; /* Make sure added region doesn't contain garbage */ - sh = (void*)(s-(sizeof(struct sdshdr))); memset(s+curlen,0,(len-curlen+1)); /* also set trailing \0 byte */ - totlen = sh->len+sh->free; - sh->len = len; - sh->free = totlen-sh->len; + sdssetlen(s, len); return s; } @@ -237,15 +374,12 @@ sds sdsgrowzero(sds s, size_t len) { * After the call, the passed sds string is no longer valid and all the * references must be substituted with the new pointer returned by the call. */ sds sdscatlen(sds s, const void *t, size_t len) { - struct sdshdr *sh; size_t curlen = sdslen(s); s = sdsMakeRoomFor(s,len); if (s == NULL) return NULL; - sh = (void*) (s-(sizeof(struct sdshdr))); memcpy(s+curlen, t, len); - sh->len = curlen+len; - sh->free = sh->free-len; + sdssetlen(s, curlen+len); s[curlen+len] = '\0'; return s; } @@ -269,19 +403,13 @@ sds sdscatsds(sds s, const sds t) { /* Destructively modify the sds string 's' to hold the specified binary * safe string pointed by 't' of length 'len' bytes. */ sds sdscpylen(sds s, const char *t, size_t len) { - struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); - size_t totlen = sh->free+sh->len; - - if (totlen < len) { - s = sdsMakeRoomFor(s,len-sh->len); + if (sdsalloc(s) < len) { + s = sdsMakeRoomFor(s,len-sdslen(s)); if (s == NULL) return NULL; - sh = (void*) (s-(sizeof(struct sdshdr))); - totlen = sh->free+sh->len; } memcpy(s, t, len); s[len] = '\0'; - sh->len = len; - sh->free = totlen-len; + sdssetlen(s, len); return s; } @@ -295,7 +423,7 @@ sds sdscpy(sds s, const char *t) { * conversion. 's' must point to a string with room for at least * SDS_LLSTR_SIZE bytes. * - * The function returns the lenght of the null-terminated string + * The function returns the length of the null-terminated string * representation stored at 's'. */ #define SDS_LLSTR_SIZE 21 int sdsll2str(char *s, long long value) { @@ -369,7 +497,7 @@ sds sdsfromlonglong(long long value) { return sdsnewlen(buf,len); } -/* Like sdscatpritf() but gets va_list instead of being variadic. */ +/* Like sdscatprintf() but gets va_list instead of being variadic. */ sds sdscatvprintf(sds s, const char *fmt, va_list ap) { va_list cpy; char staticbuf[1024], *buf = staticbuf, *t; @@ -378,7 +506,7 @@ sds sdscatvprintf(sds s, const char *fmt, va_list ap) { /* We try to start using a static buffer for speed. * If not possible we revert to heap allocation. */ if (buflen > sizeof(staticbuf)) { - buf = zmalloc(buflen); + buf = s_malloc(buflen); if (buf == NULL) return NULL; } else { buflen = sizeof(staticbuf); @@ -390,11 +518,11 @@ sds sdscatvprintf(sds s, const char *fmt, va_list ap) { buf[buflen-2] = '\0'; va_copy(cpy,ap); vsnprintf(buf, buflen, fmt, cpy); - va_end(ap); + va_end(cpy); if (buf[buflen-2] != '\0') { - if (buf != staticbuf) zfree(buf); + if (buf != staticbuf) s_free(buf); buflen *= 2; - buf = zmalloc(buflen); + buf = s_malloc(buflen); if (buf == NULL) return NULL; continue; } @@ -403,7 +531,7 @@ sds sdscatvprintf(sds s, const char *fmt, va_list ap) { /* Finally concat the obtained string to the SDS string and return it. */ t = sdscat(s, buf); - if (buf != staticbuf) zfree(buf); + if (buf != staticbuf) s_free(buf); return t; } @@ -415,7 +543,7 @@ sds sdscatvprintf(sds s, const char *fmt, va_list ap) { * * Example: * - * s = sdsempty("Sum is: "); + * s = sdsnew("Sum is: "); * s = sdscatprintf(s,"%d+%d = %d",a,b,a+b). * * Often you need to create a string from scratch with the printf-alike @@ -449,7 +577,6 @@ sds sdscatprintf(sds s, const char *fmt, ...) { * %% - Verbatim "%" character. */ sds sdscatfmt(sds s, char const *fmt, ...) { - struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); size_t initlen = sdslen(s); const char *f = fmt; int i; @@ -460,14 +587,13 @@ sds sdscatfmt(sds s, char const *fmt, ...) { i = initlen; /* Position of the next byte to write to dest str. */ while(*f) { char next, *str; - unsigned int l; + size_t l; long long num; unsigned long long unum; /* Make sure there is always space for at least 1 char. */ - if (sh->free == 0) { + if (sdsavail(s)==0) { s = sdsMakeRoomFor(s,1); - sh = (void*) (s-(sizeof(struct sdshdr))); } switch(*f) { @@ -479,13 +605,11 @@ sds sdscatfmt(sds s, char const *fmt, ...) { case 'S': str = va_arg(ap,char*); l = (next == 's') ? strlen(str) : sdslen(str); - if (sh->free < l) { + if (sdsavail(s) < l) { s = sdsMakeRoomFor(s,l); - sh = (void*) (s-(sizeof(struct sdshdr))); } memcpy(s+i,str,l); - sh->len += l; - sh->free -= l; + sdsinclen(s,l); i += l; break; case 'i': @@ -497,13 +621,11 @@ sds sdscatfmt(sds s, char const *fmt, ...) { { char buf[SDS_LLSTR_SIZE]; l = sdsll2str(buf,num); - if (sh->free < l) { + if (sdsavail(s) < l) { s = sdsMakeRoomFor(s,l); - sh = (void*) (s-(sizeof(struct sdshdr))); } memcpy(s+i,buf,l); - sh->len += l; - sh->free -= l; + sdsinclen(s,l); i += l; } break; @@ -516,27 +638,23 @@ sds sdscatfmt(sds s, char const *fmt, ...) { { char buf[SDS_LLSTR_SIZE]; l = sdsull2str(buf,unum); - if (sh->free < l) { + if (sdsavail(s) < l) { s = sdsMakeRoomFor(s,l); - sh = (void*) (s-(sizeof(struct sdshdr))); } memcpy(s+i,buf,l); - sh->len += l; - sh->free -= l; + sdsinclen(s,l); i += l; } break; default: /* Handle %% and generally %. */ s[i++] = next; - sh->len += 1; - sh->free -= 1; + sdsinclen(s,1); break; } break; default: s[i++] = *f; - sh->len += 1; - sh->free -= 1; + sdsinclen(s,1); break; } f++; @@ -557,25 +675,23 @@ sds sdscatfmt(sds s, char const *fmt, ...) { * Example: * * s = sdsnew("AA...AA.a.aa.aHelloWorld :::"); - * s = sdstrim(s,"A. :"); + * s = sdstrim(s,"Aa. :"); * printf("%s\n", s); * * Output will be just "Hello World". */ sds sdstrim(sds s, const char *cset) { - struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); char *start, *end, *sp, *ep; size_t len; sp = start = s; ep = end = s+sdslen(s)-1; while(sp <= end && strchr(cset, *sp)) sp++; - while(ep > start && strchr(cset, *ep)) ep--; + while(ep > sp && strchr(cset, *ep)) ep--; len = (sp > ep) ? 0 : ((ep-sp)+1); - if (sh->buf != sp) memmove(sh->buf, sp, len); - sh->buf[len] = '\0'; - sh->free = sh->free+(sh->len-len); - sh->len = len; + if (s != sp) memmove(s, sp, len); + s[len] = '\0'; + sdssetlen(s,len); return s; } @@ -596,7 +712,6 @@ sds sdstrim(sds s, const char *cset) { * sdsrange(s,1,-1); => "ello World" */ void sdsrange(sds s, int start, int end) { - struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); size_t newlen, len = sdslen(s); if (len == 0) return; @@ -619,10 +734,9 @@ void sdsrange(sds s, int start, int end) { } else { start = 0; } - if (start && newlen) memmove(sh->buf, sh->buf+start, newlen); - sh->buf[newlen] = 0; - sh->free = sh->free+(sh->len-newlen); - sh->len = newlen; + if (start && newlen) memmove(s, s+start, newlen); + s[newlen] = 0; + sdssetlen(s,newlen); } /* Apply tolower() to every character of the sds string 's'. */ @@ -643,8 +757,8 @@ void sdstoupper(sds s) { * * Return value: * - * 1 if s1 > s2. - * -1 if s1 < s2. + * positive if s1 > s2. + * negative if s1 < s2. * 0 if s1 and s2 are exactly the same binary string. * * If two strings share exactly the same prefix, but one of the two has @@ -684,7 +798,7 @@ sds *sdssplitlen(const char *s, int len, const char *sep, int seplen, int *count if (seplen < 1 || len < 0) return NULL; - tokens = zmalloc(sizeof(sds)*slots); + tokens = s_malloc(sizeof(sds)*slots); if (tokens == NULL) return NULL; if (len == 0) { @@ -697,7 +811,7 @@ sds *sdssplitlen(const char *s, int len, const char *sep, int seplen, int *count sds *newtokens; slots *= 2; - newtokens = zrealloc(tokens,sizeof(sds)*slots); + newtokens = s_realloc(tokens,sizeof(sds)*slots); if (newtokens == NULL) goto cleanup; tokens = newtokens; } @@ -721,7 +835,7 @@ cleanup: { int i; for (i = 0; i < elements; i++) sdsfree(tokens[i]); - zfree(tokens); + s_free(tokens); *count = 0; return NULL; } @@ -732,7 +846,7 @@ void sdsfreesplitres(sds *tokens, int count) { if (!tokens) return; while(count--) sdsfree(tokens[count]); - zfree(tokens); + s_free(tokens); } /* Append to the sds string "s" an escaped string representation where @@ -906,13 +1020,13 @@ sds *sdssplitargs(const char *line, int *argc) { if (*p) p++; } /* add the token to the vector */ - vector = zrealloc(vector,((*argc)+1)*sizeof(char*)); + vector = s_realloc(vector,((*argc)+1)*sizeof(char*)); vector[*argc] = current; (*argc)++; current = NULL; } else { /* Even on empty input string return something not NULL. */ - if (vector == NULL) vector = zmalloc(sizeof(void*)); + if (vector == NULL) vector = s_malloc(sizeof(void*)); return vector; } } @@ -920,7 +1034,7 @@ sds *sdssplitargs(const char *line, int *argc) { err: while((*argc)--) sdsfree(vector[*argc]); - zfree(vector); + s_free(vector); if (current) sdsfree(current); *argc = 0; return NULL; @@ -962,14 +1076,26 @@ sds sdsjoin(char **argv, int argc, char *sep) { return join; } -#ifdef SDS_TEST_MAIN +/* Like sdsjoin, but joins an array of SDS strings. */ +sds sdsjoinsds(sds *argv, int argc, const char *sep, size_t seplen) { + sds join = sdsempty(); + int j; + + for (j = 0; j < argc; j++) { + join = sdscatsds(join, argv[j]); + if (j != argc-1) join = sdscatlen(join,sep,seplen); + } + return join; +} + +#if defined(SDS_TEST_MAIN) #include #include "testhelp.h" #include "limits.h" -int main(void) { +#define UNUSED(x) (void)(x) +int sdsTest(void) { { - struct sdshdr *sh; sds x = sdsnew("foo"), y; test_cond("Create a string and obtain the length", @@ -1005,6 +1131,7 @@ int main(void) { sdslen(x) == 60 && memcmp(x,"--Hello Hi! World -9223372036854775808," "9223372036854775807--",60) == 0) + printf("[%s]\n",x); sdsfree(x); x = sdsnew("--"); @@ -1013,6 +1140,18 @@ int main(void) { sdslen(x) == 35 && memcmp(x,"--4294967295,18446744073709551615--",35) == 0) + sdsfree(x); + x = sdsnew(" x "); + sdstrim(x," x"); + test_cond("sdstrim() works when all chars match", + sdslen(x) == 0) + + sdsfree(x); + x = sdsnew(" x "); + sdstrim(x," "); + test_cond("sdstrim() works when a single char remains", + sdslen(x) == 1 && x[0] == 'x') + sdsfree(x); x = sdsnew("xxciaoyyy"); sdstrim(x,"xy"); @@ -1080,24 +1219,47 @@ int main(void) { memcmp(y,"\"\\a\\n\\x00foo\\r\"",15) == 0) { - int oldfree; + unsigned int oldfree; + char *p; + int step = 10, j, i; sdsfree(x); + sdsfree(y); x = sdsnew("0"); - sh = (void*) (x-(sizeof(struct sdshdr))); - test_cond("sdsnew() free/len buffers", sh->len == 1 && sh->free == 0); - x = sdsMakeRoomFor(x,1); - sh = (void*) (x-(sizeof(struct sdshdr))); - test_cond("sdsMakeRoomFor()", sh->len == 1 && sh->free > 0); - oldfree = sh->free; - x[1] = '1'; - sdsIncrLen(x,1); - test_cond("sdsIncrLen() -- content", x[0] == '0' && x[1] == '1'); - test_cond("sdsIncrLen() -- len", sh->len == 2); - test_cond("sdsIncrLen() -- free", sh->free == oldfree-1); + test_cond("sdsnew() free/len buffers", sdslen(x) == 1 && sdsavail(x) == 0); + + /* Run the test a few times in order to hit the first two + * SDS header types. */ + for (i = 0; i < 10; i++) { + int oldlen = sdslen(x); + x = sdsMakeRoomFor(x,step); + int type = x[-1]&SDS_TYPE_MASK; + + test_cond("sdsMakeRoomFor() len", sdslen(x) == oldlen); + if (type != SDS_TYPE_5) { + test_cond("sdsMakeRoomFor() free", sdsavail(x) >= step); + oldfree = sdsavail(x); + } + p = x+oldlen; + for (j = 0; j < step; j++) { + p[j] = 'A'+j; + } + sdsIncrLen(x,step); + } + test_cond("sdsMakeRoomFor() content", + memcmp("0ABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJ",x,101) == 0); + test_cond("sdsMakeRoomFor() final length",sdslen(x)==101); + + sdsfree(x); } } test_report() return 0; } #endif + +#ifdef SDS_TEST_MAIN +int main(void) { + return sdsTest(); +} +#endif diff --git a/deps/hiredis/sds.h b/deps/hiredis/sds.h index 37aaf7a28..d7eb4c7d0 100644 --- a/deps/hiredis/sds.h +++ b/deps/hiredis/sds.h @@ -1,6 +1,8 @@ -/* SDSLib, A C dynamic strings library +/* SDSLib 2.0 -- A C dynamic strings library * - * Copyright (c) 2006-2010, Salvatore Sanfilippo + * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2015, Oran Agra + * Copyright (c) 2015, Redis Labs, Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -35,32 +37,188 @@ #include #include +#include typedef char *sds; -struct sdshdr { - unsigned int len; - unsigned int free; +/* Note: sdshdr5 is never used, we just access the flags byte directly. + * However is here to document the layout of type 5 SDS strings. */ +struct __attribute__ ((__packed__)) sdshdr5 { + unsigned char flags; /* 3 lsb of type, and 5 msb of string length */ + char buf[]; +}; +struct __attribute__ ((__packed__)) sdshdr8 { + uint8_t len; /* used */ + uint8_t alloc; /* excluding the header and null terminator */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ + char buf[]; +}; +struct __attribute__ ((__packed__)) sdshdr16 { + uint16_t len; /* used */ + uint16_t alloc; /* excluding the header and null terminator */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ + char buf[]; +}; +struct __attribute__ ((__packed__)) sdshdr32 { + uint32_t len; /* used */ + uint32_t alloc; /* excluding the header and null terminator */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ + char buf[]; +}; +struct __attribute__ ((__packed__)) sdshdr64 { + uint64_t len; /* used */ + uint64_t alloc; /* excluding the header and null terminator */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ char buf[]; }; +#define SDS_TYPE_5 0 +#define SDS_TYPE_8 1 +#define SDS_TYPE_16 2 +#define SDS_TYPE_32 3 +#define SDS_TYPE_64 4 +#define SDS_TYPE_MASK 7 +#define SDS_TYPE_BITS 3 +#define SDS_HDR_VAR(T,s) struct sdshdr##T *sh = (void*)((s)-(sizeof(struct sdshdr##T))); +#define SDS_HDR(T,s) ((struct sdshdr##T *)((s)-(sizeof(struct sdshdr##T)))) +#define SDS_TYPE_5_LEN(f) ((f)>>SDS_TYPE_BITS) + static inline size_t sdslen(const sds s) { - struct sdshdr *sh = (void*)(s-(sizeof(struct sdshdr))); - return sh->len; + unsigned char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + return SDS_TYPE_5_LEN(flags); + case SDS_TYPE_8: + return SDS_HDR(8,s)->len; + case SDS_TYPE_16: + return SDS_HDR(16,s)->len; + case SDS_TYPE_32: + return SDS_HDR(32,s)->len; + case SDS_TYPE_64: + return SDS_HDR(64,s)->len; + } + return 0; } static inline size_t sdsavail(const sds s) { - struct sdshdr *sh = (void*)(s-(sizeof(struct sdshdr))); - return sh->free; + unsigned char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: { + return 0; + } + case SDS_TYPE_8: { + SDS_HDR_VAR(8,s); + return sh->alloc - sh->len; + } + case SDS_TYPE_16: { + SDS_HDR_VAR(16,s); + return sh->alloc - sh->len; + } + case SDS_TYPE_32: { + SDS_HDR_VAR(32,s); + return sh->alloc - sh->len; + } + case SDS_TYPE_64: { + SDS_HDR_VAR(64,s); + return sh->alloc - sh->len; + } + } + return 0; +} + +static inline void sdssetlen(sds s, size_t newlen) { + unsigned char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + { + unsigned char *fp = ((unsigned char*)s)-1; + *fp = SDS_TYPE_5 | (newlen << SDS_TYPE_BITS); + } + break; + case SDS_TYPE_8: + SDS_HDR(8,s)->len = newlen; + break; + case SDS_TYPE_16: + SDS_HDR(16,s)->len = newlen; + break; + case SDS_TYPE_32: + SDS_HDR(32,s)->len = newlen; + break; + case SDS_TYPE_64: + SDS_HDR(64,s)->len = newlen; + break; + } +} + +static inline void sdsinclen(sds s, size_t inc) { + unsigned char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + { + unsigned char *fp = ((unsigned char*)s)-1; + unsigned char newlen = SDS_TYPE_5_LEN(flags)+inc; + *fp = SDS_TYPE_5 | (newlen << SDS_TYPE_BITS); + } + break; + case SDS_TYPE_8: + SDS_HDR(8,s)->len += inc; + break; + case SDS_TYPE_16: + SDS_HDR(16,s)->len += inc; + break; + case SDS_TYPE_32: + SDS_HDR(32,s)->len += inc; + break; + case SDS_TYPE_64: + SDS_HDR(64,s)->len += inc; + break; + } +} + +/* sdsalloc() = sdsavail() + sdslen() */ +static inline size_t sdsalloc(const sds s) { + unsigned char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + return SDS_TYPE_5_LEN(flags); + case SDS_TYPE_8: + return SDS_HDR(8,s)->alloc; + case SDS_TYPE_16: + return SDS_HDR(16,s)->alloc; + case SDS_TYPE_32: + return SDS_HDR(32,s)->alloc; + case SDS_TYPE_64: + return SDS_HDR(64,s)->alloc; + } + return 0; +} + +static inline void sdssetalloc(sds s, size_t newlen) { + unsigned char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + /* Nothing to do, this type has no total allocation info. */ + break; + case SDS_TYPE_8: + SDS_HDR(8,s)->alloc = newlen; + break; + case SDS_TYPE_16: + SDS_HDR(16,s)->alloc = newlen; + break; + case SDS_TYPE_32: + SDS_HDR(32,s)->alloc = newlen; + break; + case SDS_TYPE_64: + SDS_HDR(64,s)->alloc = newlen; + break; + } } sds sdsnewlen(const void *init, size_t initlen); sds sdsnew(const char *init); sds sdsempty(void); -size_t sdslen(const sds s); sds sdsdup(const sds s); void sdsfree(sds s); -size_t sdsavail(const sds s); sds sdsgrowzero(sds s, size_t len); sds sdscatlen(sds s, const void *t, size_t len); sds sdscat(sds s, const char *t); @@ -91,11 +249,17 @@ sds sdscatrepr(sds s, const char *p, size_t len); sds *sdssplitargs(const char *line, int *argc); sds sdsmapchars(sds s, const char *from, const char *to, size_t setlen); sds sdsjoin(char **argv, int argc, char *sep); +sds sdsjoinsds(sds *argv, int argc, const char *sep, size_t seplen); /* Low level functions exposed to the user API */ sds sdsMakeRoomFor(sds s, size_t addlen); void sdsIncrLen(sds s, int incr); sds sdsRemoveFreeSpace(sds s); size_t sdsAllocSize(sds s); +void *sdsAllocPtr(sds s); + +#ifdef REDIS_TEST +int sdsTest(int argc, char *argv[]); +#endif #endif diff --git a/deps/hiredis/sdsalloc.h b/deps/hiredis/sdsalloc.h new file mode 100644 index 000000000..531d41929 --- /dev/null +++ b/deps/hiredis/sdsalloc.h @@ -0,0 +1,42 @@ +/* SDSLib 2.0 -- A C dynamic strings library + * + * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2015, Redis Labs, Inc + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* SDS allocator selection. + * + * This file is used in order to change the SDS allocator at compile time. + * Just define the following defines to what you want to use. Also add + * the include of your alternate allocator if needed (not needed in order + * to use the default libc allocator). */ + +#include "zmalloc.h" +#define s_malloc zmalloc +#define s_realloc zrealloc +#define s_free zfree From cef054e86856463d3e79d4a5048507377c85eab7 Mon Sep 17 00:00:00 2001 From: antirez Date: Sun, 26 Jul 2015 15:14:57 +0200 Subject: [PATCH 0321/1928] RDMF (Redis/Disque merge friendlyness) refactoring WIP 1. --- src/Makefile | 2 +- src/Makefile.dep | 62 ++++++++++++++++---------------- src/aof.c | 2 +- src/bio.c | 2 +- src/bitops.c | 2 +- src/blocked.c | 2 +- src/cluster.c | 2 +- src/config.c | 74 +++++++++++++++++++------------------- src/crc16.c | 2 +- src/db.c | 2 +- src/debug.c | 2 +- src/geo.h | 2 +- src/hyperloglog.c | 2 +- src/latency.c | 2 +- src/multi.c | 2 +- src/networking.c | 2 +- src/notify.c | 2 +- src/object.c | 2 +- src/pubsub.c | 2 +- src/rdb.c | 2 +- src/rdb.h | 2 +- src/redis-check-rdb.c | 2 +- src/replication.c | 2 +- src/rio.c | 2 +- src/scripting.c | 2 +- src/sentinel.c | 6 ++-- src/{redis.c => server.c} | 76 +++++++++++++++++++-------------------- src/{redis.h => server.h} | 74 +++++++++++++++++++------------------- src/slowlog.c | 2 +- src/sort.c | 2 +- src/sparkline.c | 2 +- src/syncio.c | 2 +- src/t_hash.c | 2 +- src/t_list.c | 2 +- src/t_set.c | 2 +- src/t_string.c | 2 +- src/t_zset.c | 2 +- 37 files changed, 178 insertions(+), 178 deletions(-) rename src/{redis.c => server.c} (98%) rename src/{redis.h => server.h} (97%) diff --git a/src/Makefile b/src/Makefile index 449b4ec26..648127a25 100644 --- a/src/Makefile +++ b/src/Makefile @@ -117,7 +117,7 @@ endif REDIS_SERVER_NAME=redis-server REDIS_SENTINEL_NAME=redis-sentinel -REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o redis.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o geo.o +REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o geo.o REDIS_GEOHASH_OBJ=../deps/geohash-int/geohash.o ../deps/geohash-int/geohash_helper.o REDIS_CLI_NAME=redis-cli REDIS_CLI_OBJ=anet.o adlist.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o diff --git a/src/Makefile.dep b/src/Makefile.dep index 33e89137d..ff8acb794 100644 --- a/src/Makefile.dep +++ b/src/Makefile.dep @@ -5,72 +5,72 @@ ae_evport.o: ae_evport.c ae_kqueue.o: ae_kqueue.c ae_select.o: ae_select.c anet.o: anet.c fmacros.h anet.h -aof.o: aof.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +aof.o: aof.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ bio.h -bio.o: bio.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +bio.o: bio.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ bio.h -bitops.o: bitops.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +bitops.o: bitops.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h -blocked.o: blocked.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +blocked.o: blocked.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h -cluster.o: cluster.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +cluster.o: cluster.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ cluster.h endianconv.h -config.o: config.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +config.o: config.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ cluster.h -crc16.o: crc16.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +crc16.o: crc16.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h crc64.o: crc64.c -db.o: db.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +db.o: db.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ cluster.h -debug.o: debug.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +debug.o: debug.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ sha1.h crc64.h bio.h dict.o: dict.c fmacros.h dict.h zmalloc.h redisassert.h endianconv.o: endianconv.c -hyperloglog.o: hyperloglog.c redis.h fmacros.h config.h \ +hyperloglog.o: hyperloglog.c server.h fmacros.h config.h \ ../deps/lua/src/lua.h ../deps/lua/src/luaconf.h ae.h sds.h dict.h \ adlist.h zmalloc.h anet.h ziplist.h intset.h version.h util.h latency.h \ sparkline.h rdb.h rio.h intset.o: intset.c intset.h zmalloc.h endianconv.h config.h -latency.o: latency.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +latency.o: latency.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h lzf_c.o: lzf_c.c lzfP.h lzf_d.o: lzf_d.c lzfP.h memtest.o: memtest.c config.h -multi.o: multi.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +multi.o: multi.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h -networking.o: networking.c redis.h fmacros.h config.h \ +networking.o: networking.c server.h fmacros.h config.h \ ../deps/lua/src/lua.h ../deps/lua/src/luaconf.h ae.h sds.h dict.h \ adlist.h zmalloc.h anet.h ziplist.h intset.h version.h util.h latency.h \ sparkline.h rdb.h rio.h -notify.o: notify.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +notify.o: notify.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h -object.o: object.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +object.o: object.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h pqsort.o: pqsort.c -pubsub.o: pubsub.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +pubsub.o: pubsub.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h rand.o: rand.c -rdb.o: rdb.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +rdb.o: rdb.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ lzf.h zipmap.h endianconv.h @@ -80,59 +80,59 @@ redis-check-aof.o: redis-check-aof.c fmacros.h config.h redis-check-dump.o: redis-check-dump.c lzf.h crc64.h redis-cli.o: redis-cli.c fmacros.h version.h ../deps/hiredis/hiredis.h \ sds.h zmalloc.h ../deps/linenoise/linenoise.h help.h anet.h ae.h -redis.o: redis.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +redis.o: redis.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ cluster.h slowlog.h bio.h asciilogo.h release.o: release.c release.h version.h crc64.h -replication.o: replication.c redis.h fmacros.h config.h \ +replication.o: replication.c server.h fmacros.h config.h \ ../deps/lua/src/lua.h ../deps/lua/src/luaconf.h ae.h sds.h dict.h \ adlist.h zmalloc.h anet.h ziplist.h intset.h version.h util.h latency.h \ sparkline.h rdb.h rio.h -rio.o: rio.c fmacros.h rio.h sds.h util.h crc64.h config.h redis.h \ +rio.o: rio.c fmacros.h rio.h sds.h util.h crc64.h config.h server.h \ ../deps/lua/src/lua.h ../deps/lua/src/luaconf.h ae.h dict.h adlist.h \ zmalloc.h anet.h ziplist.h intset.h version.h latency.h sparkline.h \ rdb.h -scripting.o: scripting.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +scripting.o: scripting.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ sha1.h rand.h ../deps/lua/src/lauxlib.h ../deps/lua/src/lua.h \ ../deps/lua/src/lualib.h sds.o: sds.c sds.h zmalloc.h -sentinel.o: sentinel.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +sentinel.o: sentinel.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ ../deps/hiredis/hiredis.h ../deps/hiredis/async.h \ ../deps/hiredis/hiredis.h setproctitle.o: setproctitle.c sha1.o: sha1.c sha1.h config.h -slowlog.o: slowlog.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +slowlog.o: slowlog.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ slowlog.h -sort.o: sort.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +sort.o: sort.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ pqsort.h -sparkline.o: sparkline.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +sparkline.o: sparkline.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h -syncio.o: syncio.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +syncio.o: syncio.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h -t_hash.o: t_hash.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +t_hash.o: t_hash.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h -t_list.o: t_list.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +t_list.o: t_list.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h -t_set.o: t_set.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +t_set.o: t_set.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h -t_string.o: t_string.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +t_string.o: t_string.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h -t_zset.o: t_zset.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ +t_zset.o: t_zset.c server.h fmacros.h config.h ../deps/lua/src/lua.h \ ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h util.o: util.c fmacros.h util.h sds.h diff --git a/src/aof.c b/src/aof.c index 01f0ce320..4a6ad6d5d 100644 --- a/src/aof.c +++ b/src/aof.c @@ -27,7 +27,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" #include "bio.h" #include "rio.h" diff --git a/src/bio.c b/src/bio.c index 27bc9abfc..a9bccff18 100644 --- a/src/bio.c +++ b/src/bio.c @@ -58,7 +58,7 @@ */ -#include "redis.h" +#include "server.h" #include "bio.h" static pthread_t bio_threads[REDIS_BIO_NUM_OPS]; diff --git a/src/bitops.c b/src/bitops.c index ec912bc24..6763bf1b0 100644 --- a/src/bitops.c +++ b/src/bitops.c @@ -28,7 +28,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" /* ----------------------------------------------------------------------------- * Helpers and low level bit functions. diff --git a/src/blocked.c b/src/blocked.c index 2ec2cf626..95b68fba1 100644 --- a/src/blocked.c +++ b/src/blocked.c @@ -63,7 +63,7 @@ * clusterRedirectBlockedClientIfNeeded() function should also be updated. */ -#include "redis.h" +#include "server.h" /* Get a timeout value from an object and store it into 'timeout'. * The final timeout is always stored as milliseconds as a time where the diff --git a/src/cluster.c b/src/cluster.c index 6280677ae..f7f3da8a3 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -28,7 +28,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" #include "cluster.h" #include "endianconv.h" diff --git a/src/config.c b/src/config.c index fd56269ef..a6ea1ce7d 100644 --- a/src/config.c +++ b/src/config.c @@ -28,7 +28,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" #include "cluster.h" #include @@ -1750,60 +1750,60 @@ int rewriteConfig(char *path) { * the rewrite state. */ rewriteConfigYesNoOption(state,"daemonize",server.daemonize,0); - rewriteConfigStringOption(state,"pidfile",server.pidfile,REDIS_DEFAULT_PID_FILE); + rewriteConfigStringOption(state,"pidfile",server.pidfile,CONFIG_DEFAULT_PID_FILE); rewriteConfigNumericalOption(state,"port",server.port,REDIS_SERVERPORT); rewriteConfigNumericalOption(state,"tcp-backlog",server.tcp_backlog,REDIS_TCP_BACKLOG); rewriteConfigBindOption(state); rewriteConfigStringOption(state,"unixsocket",server.unixsocket,NULL); - rewriteConfigOctalOption(state,"unixsocketperm",server.unixsocketperm,REDIS_DEFAULT_UNIX_SOCKET_PERM); + rewriteConfigOctalOption(state,"unixsocketperm",server.unixsocketperm,CONFIG_DEFAULT_UNIX_SOCKET_PERM); rewriteConfigNumericalOption(state,"timeout",server.maxidletime,REDIS_MAXIDLETIME); - rewriteConfigNumericalOption(state,"tcp-keepalive",server.tcpkeepalive,REDIS_DEFAULT_TCP_KEEPALIVE); - rewriteConfigEnumOption(state,"loglevel",server.verbosity,loglevel_enum,REDIS_DEFAULT_VERBOSITY); - rewriteConfigStringOption(state,"logfile",server.logfile,REDIS_DEFAULT_LOGFILE); - rewriteConfigYesNoOption(state,"syslog-enabled",server.syslog_enabled,REDIS_DEFAULT_SYSLOG_ENABLED); - rewriteConfigStringOption(state,"syslog-ident",server.syslog_ident,REDIS_DEFAULT_SYSLOG_IDENT); + rewriteConfigNumericalOption(state,"tcp-keepalive",server.tcpkeepalive,CONFIG_DEFAULT_TCP_KEEPALIVE); + rewriteConfigEnumOption(state,"loglevel",server.verbosity,loglevel_enum,CONFIG_DEFAULT_VERBOSITY); + rewriteConfigStringOption(state,"logfile",server.logfile,CONFIG_DEFAULT_LOGFILE); + rewriteConfigYesNoOption(state,"syslog-enabled",server.syslog_enabled,CONFIG_DEFAULT_SYSLOG_ENABLED); + rewriteConfigStringOption(state,"syslog-ident",server.syslog_ident,CONFIG_DEFAULT_SYSLOG_IDENT); rewriteConfigSyslogfacilityOption(state); rewriteConfigSaveOption(state); - rewriteConfigNumericalOption(state,"databases",server.dbnum,REDIS_DEFAULT_DBNUM); - rewriteConfigYesNoOption(state,"stop-writes-on-bgsave-error",server.stop_writes_on_bgsave_err,REDIS_DEFAULT_STOP_WRITES_ON_BGSAVE_ERROR); - rewriteConfigYesNoOption(state,"rdbcompression",server.rdb_compression,REDIS_DEFAULT_RDB_COMPRESSION); - rewriteConfigYesNoOption(state,"rdbchecksum",server.rdb_checksum,REDIS_DEFAULT_RDB_CHECKSUM); - rewriteConfigStringOption(state,"dbfilename",server.rdb_filename,REDIS_DEFAULT_RDB_FILENAME); + rewriteConfigNumericalOption(state,"databases",server.dbnum,CONFIG_DEFAULT_DBNUM); + rewriteConfigYesNoOption(state,"stop-writes-on-bgsave-error",server.stop_writes_on_bgsave_err,CONFIG_DEFAULT_STOP_WRITES_ON_BGSAVE_ERROR); + rewriteConfigYesNoOption(state,"rdbcompression",server.rdb_compression,CONFIG_DEFAULT_RDB_COMPRESSION); + rewriteConfigYesNoOption(state,"rdbchecksum",server.rdb_checksum,CONFIG_DEFAULT_RDB_CHECKSUM); + rewriteConfigStringOption(state,"dbfilename",server.rdb_filename,CONFIG_DEFAULT_RDB_FILENAME); rewriteConfigDirOption(state); rewriteConfigSlaveofOption(state); rewriteConfigStringOption(state,"masterauth",server.masterauth,NULL); - rewriteConfigYesNoOption(state,"slave-serve-stale-data",server.repl_serve_stale_data,REDIS_DEFAULT_SLAVE_SERVE_STALE_DATA); - rewriteConfigYesNoOption(state,"slave-read-only",server.repl_slave_ro,REDIS_DEFAULT_SLAVE_READ_ONLY); + rewriteConfigYesNoOption(state,"slave-serve-stale-data",server.repl_serve_stale_data,CONFIG_DEFAULT_SLAVE_SERVE_STALE_DATA); + rewriteConfigYesNoOption(state,"slave-read-only",server.repl_slave_ro,CONFIG_DEFAULT_SLAVE_READ_ONLY); rewriteConfigNumericalOption(state,"repl-ping-slave-period",server.repl_ping_slave_period,REDIS_REPL_PING_SLAVE_PERIOD); rewriteConfigNumericalOption(state,"repl-timeout",server.repl_timeout,REDIS_REPL_TIMEOUT); - rewriteConfigBytesOption(state,"repl-backlog-size",server.repl_backlog_size,REDIS_DEFAULT_REPL_BACKLOG_SIZE); - rewriteConfigBytesOption(state,"repl-backlog-ttl",server.repl_backlog_time_limit,REDIS_DEFAULT_REPL_BACKLOG_TIME_LIMIT); - rewriteConfigYesNoOption(state,"repl-disable-tcp-nodelay",server.repl_disable_tcp_nodelay,REDIS_DEFAULT_REPL_DISABLE_TCP_NODELAY); - rewriteConfigYesNoOption(state,"repl-diskless-sync",server.repl_diskless_sync,REDIS_DEFAULT_REPL_DISKLESS_SYNC); - rewriteConfigNumericalOption(state,"repl-diskless-sync-delay",server.repl_diskless_sync_delay,REDIS_DEFAULT_REPL_DISKLESS_SYNC_DELAY); - rewriteConfigNumericalOption(state,"slave-priority",server.slave_priority,REDIS_DEFAULT_SLAVE_PRIORITY); - rewriteConfigNumericalOption(state,"min-slaves-to-write",server.repl_min_slaves_to_write,REDIS_DEFAULT_MIN_SLAVES_TO_WRITE); - rewriteConfigNumericalOption(state,"min-slaves-max-lag",server.repl_min_slaves_max_lag,REDIS_DEFAULT_MIN_SLAVES_MAX_LAG); + rewriteConfigBytesOption(state,"repl-backlog-size",server.repl_backlog_size,CONFIG_DEFAULT_REPL_BACKLOG_SIZE); + rewriteConfigBytesOption(state,"repl-backlog-ttl",server.repl_backlog_time_limit,CONFIG_DEFAULT_REPL_BACKLOG_TIME_LIMIT); + rewriteConfigYesNoOption(state,"repl-disable-tcp-nodelay",server.repl_disable_tcp_nodelay,CONFIG_DEFAULT_REPL_DISABLE_TCP_NODELAY); + rewriteConfigYesNoOption(state,"repl-diskless-sync",server.repl_diskless_sync,CONFIG_DEFAULT_REPL_DISKLESS_SYNC); + rewriteConfigNumericalOption(state,"repl-diskless-sync-delay",server.repl_diskless_sync_delay,CONFIG_DEFAULT_REPL_DISKLESS_SYNC_DELAY); + rewriteConfigNumericalOption(state,"slave-priority",server.slave_priority,CONFIG_DEFAULT_SLAVE_PRIORITY); + rewriteConfigNumericalOption(state,"min-slaves-to-write",server.repl_min_slaves_to_write,CONFIG_DEFAULT_MIN_SLAVES_TO_WRITE); + rewriteConfigNumericalOption(state,"min-slaves-max-lag",server.repl_min_slaves_max_lag,CONFIG_DEFAULT_MIN_SLAVES_MAX_LAG); rewriteConfigStringOption(state,"requirepass",server.requirepass,NULL); - rewriteConfigNumericalOption(state,"maxclients",server.maxclients,REDIS_MAX_CLIENTS); - rewriteConfigBytesOption(state,"maxmemory",server.maxmemory,REDIS_DEFAULT_MAXMEMORY); - rewriteConfigEnumOption(state,"maxmemory-policy",server.maxmemory_policy,maxmemory_policy_enum,REDIS_DEFAULT_MAXMEMORY_POLICY); - rewriteConfigNumericalOption(state,"maxmemory-samples",server.maxmemory_samples,REDIS_DEFAULT_MAXMEMORY_SAMPLES); + rewriteConfigNumericalOption(state,"maxclients",server.maxclients,CONFIG_DEFAULT_MAX_CLIENTS); + rewriteConfigBytesOption(state,"maxmemory",server.maxmemory,CONFIG_DEFAULT_MAXMEMORY); + rewriteConfigEnumOption(state,"maxmemory-policy",server.maxmemory_policy,maxmemory_policy_enum,CONFIG_DEFAULT_MAXMEMORY_POLICY); + rewriteConfigNumericalOption(state,"maxmemory-samples",server.maxmemory_samples,CONFIG_DEFAULT_MAXMEMORY_SAMPLES); rewriteConfigYesNoOption(state,"appendonly",server.aof_state != REDIS_AOF_OFF,0); - rewriteConfigStringOption(state,"appendfilename",server.aof_filename,REDIS_DEFAULT_AOF_FILENAME); - rewriteConfigEnumOption(state,"appendfsync",server.aof_fsync,aof_fsync_enum,REDIS_DEFAULT_AOF_FSYNC); - rewriteConfigYesNoOption(state,"no-appendfsync-on-rewrite",server.aof_no_fsync_on_rewrite,REDIS_DEFAULT_AOF_NO_FSYNC_ON_REWRITE); + rewriteConfigStringOption(state,"appendfilename",server.aof_filename,CONFIG_DEFAULT_AOF_FILENAME); + rewriteConfigEnumOption(state,"appendfsync",server.aof_fsync,aof_fsync_enum,CONFIG_DEFAULT_AOF_FSYNC); + rewriteConfigYesNoOption(state,"no-appendfsync-on-rewrite",server.aof_no_fsync_on_rewrite,CONFIG_DEFAULT_AOF_NO_FSYNC_ON_REWRITE); rewriteConfigNumericalOption(state,"auto-aof-rewrite-percentage",server.aof_rewrite_perc,REDIS_AOF_REWRITE_PERC); rewriteConfigBytesOption(state,"auto-aof-rewrite-min-size",server.aof_rewrite_min_size,REDIS_AOF_REWRITE_MIN_SIZE); rewriteConfigNumericalOption(state,"lua-time-limit",server.lua_time_limit,REDIS_LUA_TIME_LIMIT); rewriteConfigYesNoOption(state,"cluster-enabled",server.cluster_enabled,0); - rewriteConfigStringOption(state,"cluster-config-file",server.cluster_configfile,REDIS_DEFAULT_CLUSTER_CONFIG_FILE); + rewriteConfigStringOption(state,"cluster-config-file",server.cluster_configfile,CONFIG_DEFAULT_CLUSTER_CONFIG_FILE); rewriteConfigYesNoOption(state,"cluster-require-full-coverage",server.cluster_require_full_coverage,REDIS_CLUSTER_DEFAULT_REQUIRE_FULL_COVERAGE); rewriteConfigNumericalOption(state,"cluster-node-timeout",server.cluster_node_timeout,REDIS_CLUSTER_DEFAULT_NODE_TIMEOUT); rewriteConfigNumericalOption(state,"cluster-migration-barrier",server.cluster_migration_barrier,REDIS_CLUSTER_DEFAULT_MIGRATION_BARRIER); rewriteConfigNumericalOption(state,"cluster-slave-validity-factor",server.cluster_slave_validity_factor,REDIS_CLUSTER_DEFAULT_SLAVE_VALIDITY); rewriteConfigNumericalOption(state,"slowlog-log-slower-than",server.slowlog_log_slower_than,REDIS_SLOWLOG_LOG_SLOWER_THAN); - rewriteConfigNumericalOption(state,"latency-monitor-threshold",server.latency_monitor_threshold,REDIS_DEFAULT_LATENCY_MONITOR_THRESHOLD); + rewriteConfigNumericalOption(state,"latency-monitor-threshold",server.latency_monitor_threshold,CONFIG_DEFAULT_LATENCY_MONITOR_THRESHOLD); rewriteConfigNumericalOption(state,"slowlog-max-len",server.slowlog_max_len,REDIS_SLOWLOG_MAX_LEN); rewriteConfigNotifykeyspaceeventsOption(state); rewriteConfigNumericalOption(state,"hash-max-ziplist-entries",server.hash_max_ziplist_entries,REDIS_HASH_MAX_ZIPLIST_ENTRIES); @@ -1813,12 +1813,12 @@ int rewriteConfig(char *path) { rewriteConfigNumericalOption(state,"set-max-intset-entries",server.set_max_intset_entries,REDIS_SET_MAX_INTSET_ENTRIES); rewriteConfigNumericalOption(state,"zset-max-ziplist-entries",server.zset_max_ziplist_entries,REDIS_ZSET_MAX_ZIPLIST_ENTRIES); rewriteConfigNumericalOption(state,"zset-max-ziplist-value",server.zset_max_ziplist_value,REDIS_ZSET_MAX_ZIPLIST_VALUE); - rewriteConfigNumericalOption(state,"hll-sparse-max-bytes",server.hll_sparse_max_bytes,REDIS_DEFAULT_HLL_SPARSE_MAX_BYTES); - rewriteConfigYesNoOption(state,"activerehashing",server.activerehashing,REDIS_DEFAULT_ACTIVE_REHASHING); + rewriteConfigNumericalOption(state,"hll-sparse-max-bytes",server.hll_sparse_max_bytes,CONFIG_DEFAULT_HLL_SPARSE_MAX_BYTES); + rewriteConfigYesNoOption(state,"activerehashing",server.activerehashing,CONFIG_DEFAULT_ACTIVE_REHASHING); rewriteConfigClientoutputbufferlimitOption(state); - rewriteConfigNumericalOption(state,"hz",server.hz,REDIS_DEFAULT_HZ); - rewriteConfigYesNoOption(state,"aof-rewrite-incremental-fsync",server.aof_rewrite_incremental_fsync,REDIS_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC); - rewriteConfigYesNoOption(state,"aof-load-truncated",server.aof_load_truncated,REDIS_DEFAULT_AOF_LOAD_TRUNCATED); + rewriteConfigNumericalOption(state,"hz",server.hz,CONFIG_DEFAULT_HZ); + rewriteConfigYesNoOption(state,"aof-rewrite-incremental-fsync",server.aof_rewrite_incremental_fsync,CONFIG_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC); + rewriteConfigYesNoOption(state,"aof-load-truncated",server.aof_load_truncated,CONFIG_DEFAULT_AOF_LOAD_TRUNCATED); rewriteConfigEnumOption(state,"supervised",server.supervised_mode,supervised_mode_enum,REDIS_SUPERVISED_NONE); /* Rewrite Sentinel config if in Sentinel mode. */ diff --git a/src/crc16.c b/src/crc16.c index 1ec9161c9..7b8c1dad0 100644 --- a/src/crc16.c +++ b/src/crc16.c @@ -1,4 +1,4 @@ -#include "redis.h" +#include "server.h" /* * Copyright 2001-2010 Georges Menie (www.menie.org) diff --git a/src/db.c b/src/db.c index 1493f0a20..35481fecd 100644 --- a/src/db.c +++ b/src/db.c @@ -27,7 +27,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" #include "cluster.h" #include diff --git a/src/debug.c b/src/debug.c index 0d2f24245..fffefe6ad 100644 --- a/src/debug.c +++ b/src/debug.c @@ -27,7 +27,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" #include "sha1.h" /* SHA1 is used for DEBUG DIGEST */ #include "crc64.h" diff --git a/src/geo.h b/src/geo.h index cf4e42c90..79d0a6a4a 100644 --- a/src/geo.h +++ b/src/geo.h @@ -1,7 +1,7 @@ #ifndef __GEO_H__ #define __GEO_H__ -#include "redis.h" +#include "server.h" /* Structures used inside geo.c in order to represent points and array of * points on the earth. */ diff --git a/src/hyperloglog.c b/src/hyperloglog.c index b3542f997..74e7b8fc6 100644 --- a/src/hyperloglog.c +++ b/src/hyperloglog.c @@ -29,7 +29,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" #include #include diff --git a/src/latency.c b/src/latency.c index 54ed03778..d6261f603 100644 --- a/src/latency.c +++ b/src/latency.c @@ -33,7 +33,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" /* Dictionary type for latency events. */ int dictStringKeyCompare(void *privdata, const void *key1, const void *key2) { diff --git a/src/multi.c b/src/multi.c index c82876456..313fccd04 100644 --- a/src/multi.c +++ b/src/multi.c @@ -27,7 +27,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" /* ================================ MULTI/EXEC ============================== */ diff --git a/src/networking.c b/src/networking.c index fc3746fd9..0df37c0fa 100644 --- a/src/networking.c +++ b/src/networking.c @@ -27,7 +27,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" #include #include diff --git a/src/notify.c b/src/notify.c index f77239ecf..6c915e659 100644 --- a/src/notify.c +++ b/src/notify.c @@ -27,7 +27,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" /* This file implements keyspace events notification via Pub/Sub ad * described at http://redis.io/topics/keyspace-events. */ diff --git a/src/object.c b/src/object.c index 881b1ac4b..e3f44b3d8 100644 --- a/src/object.c +++ b/src/object.c @@ -28,7 +28,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" #include #include diff --git a/src/pubsub.c b/src/pubsub.c index d6cfbdf3c..0711387c2 100644 --- a/src/pubsub.c +++ b/src/pubsub.c @@ -27,7 +27,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" /*----------------------------------------------------------------------------- * Pubsub low level API diff --git a/src/rdb.c b/src/rdb.c index e4da23ba1..bbc791046 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -27,7 +27,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" #include "lzf.h" /* LZF compression library */ #include "zipmap.h" #include "endianconv.h" diff --git a/src/rdb.h b/src/rdb.h index a72607b71..da0d50e83 100644 --- a/src/rdb.h +++ b/src/rdb.h @@ -34,7 +34,7 @@ #include "rio.h" /* TBD: include only necessary headers. */ -#include "redis.h" +#include "server.h" /* The current RDB version. When the format changes in a way that is no longer * backward compatible this number gets incremented. */ diff --git a/src/redis-check-rdb.c b/src/redis-check-rdb.c index 21f72c222..da73bd38b 100644 --- a/src/redis-check-rdb.c +++ b/src/redis-check-rdb.c @@ -29,7 +29,7 @@ */ -#include "redis.h" +#include "server.h" #include "rdb.h" #include #include diff --git a/src/replication.c b/src/replication.c index c0145b4a1..0d35ccd3d 100644 --- a/src/replication.c +++ b/src/replication.c @@ -29,7 +29,7 @@ */ -#include "redis.h" +#include "server.h" #include #include diff --git a/src/rio.c b/src/rio.c index 738e56fd0..1cc6225f7 100644 --- a/src/rio.c +++ b/src/rio.c @@ -53,7 +53,7 @@ #include "util.h" #include "crc64.h" #include "config.h" -#include "redis.h" +#include "server.h" /* ------------------------- Buffer I/O implementation ----------------------- */ diff --git a/src/scripting.c b/src/scripting.c index 228770bfa..6c1963793 100644 --- a/src/scripting.c +++ b/src/scripting.c @@ -27,7 +27,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" #include "sha1.h" #include "rand.h" #include "cluster.h" diff --git a/src/sentinel.c b/src/sentinel.c index 3ff8899d7..5fa8258cd 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -28,7 +28,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" #include "hiredis.h" #include "async.h" @@ -1840,7 +1840,7 @@ void sentinelFlushConfig(void) { int saved_hz = server.hz; int rewrite_status; - server.hz = REDIS_DEFAULT_HZ; + server.hz = CONFIG_DEFAULT_HZ; rewrite_status = rewriteConfig(server.configfile); server.hz = saved_hz; @@ -4288,6 +4288,6 @@ void sentinelTimer(void) { * exactly continue to stay synchronized asking to be voted at the * same time again and again (resulting in nobody likely winning the * election because of split brain voting). */ - server.hz = REDIS_DEFAULT_HZ + rand() % REDIS_DEFAULT_HZ; + server.hz = CONFIG_DEFAULT_HZ + rand() % CONFIG_DEFAULT_HZ; } diff --git a/src/redis.c b/src/server.c similarity index 98% rename from src/redis.c rename to src/server.c index b5ade925e..3459a0119 100644 --- a/src/redis.c +++ b/src/server.c @@ -27,7 +27,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" #include "cluster.h" #include "slowlog.h" #include "bio.h" @@ -1416,34 +1416,34 @@ void initServerConfig(void) { getRandomHexChars(server.runid,REDIS_RUN_ID_SIZE); server.configfile = NULL; - server.hz = REDIS_DEFAULT_HZ; + server.hz = CONFIG_DEFAULT_HZ; server.runid[REDIS_RUN_ID_SIZE] = '\0'; server.arch_bits = (sizeof(long) == 8) ? 64 : 32; server.port = REDIS_SERVERPORT; server.tcp_backlog = REDIS_TCP_BACKLOG; server.bindaddr_count = 0; server.unixsocket = NULL; - server.unixsocketperm = REDIS_DEFAULT_UNIX_SOCKET_PERM; + server.unixsocketperm = CONFIG_DEFAULT_UNIX_SOCKET_PERM; server.ipfd_count = 0; server.sofd = -1; - server.dbnum = REDIS_DEFAULT_DBNUM; - server.verbosity = REDIS_DEFAULT_VERBOSITY; + server.dbnum = CONFIG_DEFAULT_DBNUM; + server.verbosity = CONFIG_DEFAULT_VERBOSITY; server.maxidletime = REDIS_MAXIDLETIME; - server.tcpkeepalive = REDIS_DEFAULT_TCP_KEEPALIVE; + server.tcpkeepalive = CONFIG_DEFAULT_TCP_KEEPALIVE; server.active_expire_enabled = 1; server.client_max_querybuf_len = REDIS_MAX_QUERYBUF_LEN; server.saveparams = NULL; server.loading = 0; - server.logfile = zstrdup(REDIS_DEFAULT_LOGFILE); - server.syslog_enabled = REDIS_DEFAULT_SYSLOG_ENABLED; - server.syslog_ident = zstrdup(REDIS_DEFAULT_SYSLOG_IDENT); + server.logfile = zstrdup(CONFIG_DEFAULT_LOGFILE); + server.syslog_enabled = CONFIG_DEFAULT_SYSLOG_ENABLED; + server.syslog_ident = zstrdup(CONFIG_DEFAULT_SYSLOG_IDENT); server.syslog_facility = LOG_LOCAL0; - server.daemonize = REDIS_DEFAULT_DAEMONIZE; + server.daemonize = CONFIG_DEFAULT_DAEMONIZE; server.supervised = 0; server.supervised_mode = REDIS_SUPERVISED_NONE; server.aof_state = REDIS_AOF_OFF; - server.aof_fsync = REDIS_DEFAULT_AOF_FSYNC; - server.aof_no_fsync_on_rewrite = REDIS_DEFAULT_AOF_NO_FSYNC_ON_REWRITE; + server.aof_fsync = CONFIG_DEFAULT_AOF_FSYNC; + server.aof_no_fsync_on_rewrite = CONFIG_DEFAULT_AOF_NO_FSYNC_ON_REWRITE; server.aof_rewrite_perc = REDIS_AOF_REWRITE_PERC; server.aof_rewrite_min_size = REDIS_AOF_REWRITE_MIN_SIZE; server.aof_rewrite_base_size = 0; @@ -1456,22 +1456,22 @@ void initServerConfig(void) { server.aof_fd = -1; server.aof_selected_db = -1; /* Make sure the first time will not match */ server.aof_flush_postponed_start = 0; - server.aof_rewrite_incremental_fsync = REDIS_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC; - server.aof_load_truncated = REDIS_DEFAULT_AOF_LOAD_TRUNCATED; + server.aof_rewrite_incremental_fsync = CONFIG_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC; + server.aof_load_truncated = CONFIG_DEFAULT_AOF_LOAD_TRUNCATED; server.pidfile = NULL; - server.rdb_filename = zstrdup(REDIS_DEFAULT_RDB_FILENAME); - server.aof_filename = zstrdup(REDIS_DEFAULT_AOF_FILENAME); + server.rdb_filename = zstrdup(CONFIG_DEFAULT_RDB_FILENAME); + server.aof_filename = zstrdup(CONFIG_DEFAULT_AOF_FILENAME); server.requirepass = NULL; - server.rdb_compression = REDIS_DEFAULT_RDB_COMPRESSION; - server.rdb_checksum = REDIS_DEFAULT_RDB_CHECKSUM; - server.stop_writes_on_bgsave_err = REDIS_DEFAULT_STOP_WRITES_ON_BGSAVE_ERROR; - server.activerehashing = REDIS_DEFAULT_ACTIVE_REHASHING; + server.rdb_compression = CONFIG_DEFAULT_RDB_COMPRESSION; + server.rdb_checksum = CONFIG_DEFAULT_RDB_CHECKSUM; + server.stop_writes_on_bgsave_err = CONFIG_DEFAULT_STOP_WRITES_ON_BGSAVE_ERROR; + server.activerehashing = CONFIG_DEFAULT_ACTIVE_REHASHING; server.notify_keyspace_events = 0; - server.maxclients = REDIS_MAX_CLIENTS; + server.maxclients = CONFIG_DEFAULT_MAX_CLIENTS; server.bpop_blocked_clients = 0; - server.maxmemory = REDIS_DEFAULT_MAXMEMORY; - server.maxmemory_policy = REDIS_DEFAULT_MAXMEMORY_POLICY; - server.maxmemory_samples = REDIS_DEFAULT_MAXMEMORY_SAMPLES; + server.maxmemory = CONFIG_DEFAULT_MAXMEMORY; + server.maxmemory_policy = CONFIG_DEFAULT_MAXMEMORY_POLICY; + server.maxmemory_samples = CONFIG_DEFAULT_MAXMEMORY_SAMPLES; server.hash_max_ziplist_entries = REDIS_HASH_MAX_ZIPLIST_ENTRIES; server.hash_max_ziplist_value = REDIS_HASH_MAX_ZIPLIST_VALUE; server.list_max_ziplist_size = REDIS_LIST_MAX_ZIPLIST_SIZE; @@ -1479,18 +1479,18 @@ void initServerConfig(void) { server.set_max_intset_entries = REDIS_SET_MAX_INTSET_ENTRIES; server.zset_max_ziplist_entries = REDIS_ZSET_MAX_ZIPLIST_ENTRIES; server.zset_max_ziplist_value = REDIS_ZSET_MAX_ZIPLIST_VALUE; - server.hll_sparse_max_bytes = REDIS_DEFAULT_HLL_SPARSE_MAX_BYTES; + server.hll_sparse_max_bytes = CONFIG_DEFAULT_HLL_SPARSE_MAX_BYTES; server.shutdown_asap = 0; server.repl_ping_slave_period = REDIS_REPL_PING_SLAVE_PERIOD; server.repl_timeout = REDIS_REPL_TIMEOUT; - server.repl_min_slaves_to_write = REDIS_DEFAULT_MIN_SLAVES_TO_WRITE; - server.repl_min_slaves_max_lag = REDIS_DEFAULT_MIN_SLAVES_MAX_LAG; + server.repl_min_slaves_to_write = CONFIG_DEFAULT_MIN_SLAVES_TO_WRITE; + server.repl_min_slaves_max_lag = CONFIG_DEFAULT_MIN_SLAVES_MAX_LAG; server.cluster_enabled = 0; server.cluster_node_timeout = REDIS_CLUSTER_DEFAULT_NODE_TIMEOUT; server.cluster_migration_barrier = REDIS_CLUSTER_DEFAULT_MIGRATION_BARRIER; server.cluster_slave_validity_factor = REDIS_CLUSTER_DEFAULT_SLAVE_VALIDITY; server.cluster_require_full_coverage = REDIS_CLUSTER_DEFAULT_REQUIRE_FULL_COVERAGE; - server.cluster_configfile = zstrdup(REDIS_DEFAULT_CLUSTER_CONFIG_FILE); + server.cluster_configfile = zstrdup(CONFIG_DEFAULT_CLUSTER_CONFIG_FILE); server.lua_caller = NULL; server.lua_time_limit = REDIS_LUA_TIME_LIMIT; server.lua_client = NULL; @@ -1514,22 +1514,22 @@ void initServerConfig(void) { server.repl_master_initial_offset = -1; server.repl_state = REDIS_REPL_NONE; server.repl_syncio_timeout = REDIS_REPL_SYNCIO_TIMEOUT; - server.repl_serve_stale_data = REDIS_DEFAULT_SLAVE_SERVE_STALE_DATA; - server.repl_slave_ro = REDIS_DEFAULT_SLAVE_READ_ONLY; + server.repl_serve_stale_data = CONFIG_DEFAULT_SLAVE_SERVE_STALE_DATA; + server.repl_slave_ro = CONFIG_DEFAULT_SLAVE_READ_ONLY; server.repl_down_since = 0; /* Never connected, repl is down since EVER. */ - server.repl_disable_tcp_nodelay = REDIS_DEFAULT_REPL_DISABLE_TCP_NODELAY; - server.repl_diskless_sync = REDIS_DEFAULT_REPL_DISKLESS_SYNC; - server.repl_diskless_sync_delay = REDIS_DEFAULT_REPL_DISKLESS_SYNC_DELAY; - server.slave_priority = REDIS_DEFAULT_SLAVE_PRIORITY; + server.repl_disable_tcp_nodelay = CONFIG_DEFAULT_REPL_DISABLE_TCP_NODELAY; + server.repl_diskless_sync = CONFIG_DEFAULT_REPL_DISKLESS_SYNC; + server.repl_diskless_sync_delay = CONFIG_DEFAULT_REPL_DISKLESS_SYNC_DELAY; + server.slave_priority = CONFIG_DEFAULT_SLAVE_PRIORITY; server.master_repl_offset = 0; /* Replication partial resync backlog */ server.repl_backlog = NULL; - server.repl_backlog_size = REDIS_DEFAULT_REPL_BACKLOG_SIZE; + server.repl_backlog_size = CONFIG_DEFAULT_REPL_BACKLOG_SIZE; server.repl_backlog_histlen = 0; server.repl_backlog_idx = 0; server.repl_backlog_off = 0; - server.repl_backlog_time_limit = REDIS_DEFAULT_REPL_BACKLOG_TIME_LIMIT; + server.repl_backlog_time_limit = CONFIG_DEFAULT_REPL_BACKLOG_TIME_LIMIT; server.repl_no_slaves_since = time(NULL); /* Client output buffer limits */ @@ -1560,7 +1560,7 @@ void initServerConfig(void) { server.slowlog_max_len = REDIS_SLOWLOG_MAX_LEN; /* Latency monitor */ - server.latency_monitor_threshold = REDIS_DEFAULT_LATENCY_MONITOR_THRESHOLD; + server.latency_monitor_threshold = CONFIG_DEFAULT_LATENCY_MONITOR_THRESHOLD; /* Debugging */ server.assert_failed = ""; @@ -3455,7 +3455,7 @@ void linuxMemoryWarnings(void) { void createPidFile(void) { /* If pidfile requested, but no pidfile defined, use * default pidfile path */ - if (!server.pidfile) server.pidfile = zstrdup(REDIS_DEFAULT_PID_FILE); + if (!server.pidfile) server.pidfile = zstrdup(CONFIG_DEFAULT_PID_FILE); /* Try to write the pid file in a best-effort way. */ FILE *fp = fopen(server.pidfile,"w"); diff --git a/src/redis.h b/src/server.h similarity index 97% rename from src/redis.h rename to src/server.h index b64a7697a..7149bb4f5 100644 --- a/src/redis.h +++ b/src/server.h @@ -75,13 +75,13 @@ typedef long long mstime_t; /* millisecond time type. */ #define REDIS_ERR -1 /* Static server configuration */ -#define REDIS_DEFAULT_HZ 10 /* Time interrupt calls/sec. */ +#define CONFIG_DEFAULT_HZ 10 /* Time interrupt calls/sec. */ #define REDIS_MIN_HZ 1 #define REDIS_MAX_HZ 500 #define REDIS_SERVERPORT 6379 /* TCP port */ #define REDIS_TCP_BACKLOG 511 /* TCP listen backlog */ #define REDIS_MAXIDLETIME 0 /* default client timeout: infinite */ -#define REDIS_DEFAULT_DBNUM 16 +#define CONFIG_DEFAULT_DBNUM 16 #define REDIS_CONFIGLINE_MAX 1024 #define REDIS_DBCRON_DBS_PER_CALL 16 #define REDIS_MAX_WRITE_PER_EVENT (1024*64) @@ -94,48 +94,48 @@ typedef long long mstime_t; /* millisecond time type. */ #define REDIS_AOF_REWRITE_ITEMS_PER_CMD 64 #define REDIS_SLOWLOG_LOG_SLOWER_THAN 10000 #define REDIS_SLOWLOG_MAX_LEN 128 -#define REDIS_MAX_CLIENTS 10000 +#define CONFIG_DEFAULT_MAX_CLIENTS 10000 #define REDIS_AUTHPASS_MAX_LEN 512 -#define REDIS_DEFAULT_SLAVE_PRIORITY 100 +#define CONFIG_DEFAULT_SLAVE_PRIORITY 100 #define REDIS_REPL_TIMEOUT 60 #define REDIS_REPL_PING_SLAVE_PERIOD 10 #define REDIS_RUN_ID_SIZE 40 #define REDIS_EOF_MARK_SIZE 40 -#define REDIS_DEFAULT_REPL_BACKLOG_SIZE (1024*1024) /* 1mb */ -#define REDIS_DEFAULT_REPL_BACKLOG_TIME_LIMIT (60*60) /* 1 hour */ +#define CONFIG_DEFAULT_REPL_BACKLOG_SIZE (1024*1024) /* 1mb */ +#define CONFIG_DEFAULT_REPL_BACKLOG_TIME_LIMIT (60*60) /* 1 hour */ #define REDIS_REPL_BACKLOG_MIN_SIZE (1024*16) /* 16k */ #define REDIS_BGSAVE_RETRY_DELAY 5 /* Wait a few secs before trying again. */ -#define REDIS_DEFAULT_PID_FILE "/var/run/redis.pid" -#define REDIS_DEFAULT_SYSLOG_IDENT "redis" -#define REDIS_DEFAULT_CLUSTER_CONFIG_FILE "nodes.conf" -#define REDIS_DEFAULT_DAEMONIZE 0 -#define REDIS_DEFAULT_UNIX_SOCKET_PERM 0 -#define REDIS_DEFAULT_TCP_KEEPALIVE 0 -#define REDIS_DEFAULT_LOGFILE "" -#define REDIS_DEFAULT_SYSLOG_ENABLED 0 -#define REDIS_DEFAULT_STOP_WRITES_ON_BGSAVE_ERROR 1 -#define REDIS_DEFAULT_RDB_COMPRESSION 1 -#define REDIS_DEFAULT_RDB_CHECKSUM 1 -#define REDIS_DEFAULT_RDB_FILENAME "dump.rdb" -#define REDIS_DEFAULT_REPL_DISKLESS_SYNC 0 -#define REDIS_DEFAULT_REPL_DISKLESS_SYNC_DELAY 5 -#define REDIS_DEFAULT_SLAVE_SERVE_STALE_DATA 1 -#define REDIS_DEFAULT_SLAVE_READ_ONLY 1 -#define REDIS_DEFAULT_REPL_DISABLE_TCP_NODELAY 0 -#define REDIS_DEFAULT_MAXMEMORY 0 -#define REDIS_DEFAULT_MAXMEMORY_SAMPLES 5 -#define REDIS_DEFAULT_AOF_FILENAME "appendonly.aof" -#define REDIS_DEFAULT_AOF_NO_FSYNC_ON_REWRITE 0 -#define REDIS_DEFAULT_AOF_LOAD_TRUNCATED 1 -#define REDIS_DEFAULT_ACTIVE_REHASHING 1 -#define REDIS_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC 1 -#define REDIS_DEFAULT_MIN_SLAVES_TO_WRITE 0 -#define REDIS_DEFAULT_MIN_SLAVES_MAX_LAG 10 +#define CONFIG_DEFAULT_PID_FILE "/var/run/redis.pid" +#define CONFIG_DEFAULT_SYSLOG_IDENT "redis" +#define CONFIG_DEFAULT_CLUSTER_CONFIG_FILE "nodes.conf" +#define CONFIG_DEFAULT_DAEMONIZE 0 +#define CONFIG_DEFAULT_UNIX_SOCKET_PERM 0 +#define CONFIG_DEFAULT_TCP_KEEPALIVE 0 +#define CONFIG_DEFAULT_LOGFILE "" +#define CONFIG_DEFAULT_SYSLOG_ENABLED 0 +#define CONFIG_DEFAULT_STOP_WRITES_ON_BGSAVE_ERROR 1 +#define CONFIG_DEFAULT_RDB_COMPRESSION 1 +#define CONFIG_DEFAULT_RDB_CHECKSUM 1 +#define CONFIG_DEFAULT_RDB_FILENAME "dump.rdb" +#define CONFIG_DEFAULT_REPL_DISKLESS_SYNC 0 +#define CONFIG_DEFAULT_REPL_DISKLESS_SYNC_DELAY 5 +#define CONFIG_DEFAULT_SLAVE_SERVE_STALE_DATA 1 +#define CONFIG_DEFAULT_SLAVE_READ_ONLY 1 +#define CONFIG_DEFAULT_REPL_DISABLE_TCP_NODELAY 0 +#define CONFIG_DEFAULT_MAXMEMORY 0 +#define CONFIG_DEFAULT_MAXMEMORY_SAMPLES 5 +#define CONFIG_DEFAULT_AOF_FILENAME "appendonly.aof" +#define CONFIG_DEFAULT_AOF_NO_FSYNC_ON_REWRITE 0 +#define CONFIG_DEFAULT_AOF_LOAD_TRUNCATED 1 +#define CONFIG_DEFAULT_ACTIVE_REHASHING 1 +#define CONFIG_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC 1 +#define CONFIG_DEFAULT_MIN_SLAVES_TO_WRITE 0 +#define CONFIG_DEFAULT_MIN_SLAVES_MAX_LAG 10 #define REDIS_IP_STR_LEN 46 /* INET6_ADDRSTRLEN is 46, but we need to be sure */ #define REDIS_PEER_ID_LEN (REDIS_IP_STR_LEN+32) /* Must be enough for ip:port */ #define REDIS_BINDADDR_MAX 16 #define REDIS_MIN_RESERVED_FDS 32 -#define REDIS_DEFAULT_LATENCY_MONITOR_THRESHOLD 0 +#define CONFIG_DEFAULT_LATENCY_MONITOR_THRESHOLD 0 #define ACTIVE_EXPIRE_CYCLE_LOOKUPS_PER_LOOP 20 /* Loopkups per loop. */ #define ACTIVE_EXPIRE_CYCLE_FAST_DURATION 1000 /* Microseconds */ @@ -312,7 +312,7 @@ typedef long long mstime_t; /* millisecond time type. */ #define REDIS_NOTICE 2 #define REDIS_WARNING 3 #define REDIS_LOG_RAW (1<<10) /* Modifier to log without timestamp */ -#define REDIS_DEFAULT_VERBOSITY REDIS_NOTICE +#define CONFIG_DEFAULT_VERBOSITY REDIS_NOTICE /* Supervision options */ #define REDIS_SUPERVISED_NONE 0 @@ -330,7 +330,7 @@ typedef long long mstime_t; /* millisecond time type. */ #define AOF_FSYNC_NO 0 #define AOF_FSYNC_ALWAYS 1 #define AOF_FSYNC_EVERYSEC 2 -#define REDIS_DEFAULT_AOF_FSYNC AOF_FSYNC_EVERYSEC +#define CONFIG_DEFAULT_AOF_FSYNC AOF_FSYNC_EVERYSEC /* Zip structure related defaults */ #define REDIS_HASH_MAX_ZIPLIST_ENTRIES 512 @@ -344,7 +344,7 @@ typedef long long mstime_t; /* millisecond time type. */ #define REDIS_LIST_COMPRESS_DEPTH 0 /* HyperLogLog defines */ -#define REDIS_DEFAULT_HLL_SPARSE_MAX_BYTES 3000 +#define CONFIG_DEFAULT_HLL_SPARSE_MAX_BYTES 3000 /* Sets operations codes */ #define REDIS_OP_UNION 0 @@ -358,7 +358,7 @@ typedef long long mstime_t; /* millisecond time type. */ #define REDIS_MAXMEMORY_ALLKEYS_LRU 3 #define REDIS_MAXMEMORY_ALLKEYS_RANDOM 4 #define REDIS_MAXMEMORY_NO_EVICTION 5 -#define REDIS_DEFAULT_MAXMEMORY_POLICY REDIS_MAXMEMORY_NO_EVICTION +#define CONFIG_DEFAULT_MAXMEMORY_POLICY REDIS_MAXMEMORY_NO_EVICTION /* Scripting */ #define REDIS_LUA_TIME_LIMIT 5000 /* milliseconds */ diff --git a/src/slowlog.c b/src/slowlog.c index ff6ccf472..a88967ae1 100644 --- a/src/slowlog.c +++ b/src/slowlog.c @@ -39,7 +39,7 @@ */ -#include "redis.h" +#include "server.h" #include "slowlog.h" /* Create a new slowlog entry. diff --git a/src/sort.c b/src/sort.c index 7da4de152..026f90002 100644 --- a/src/sort.c +++ b/src/sort.c @@ -29,7 +29,7 @@ */ -#include "redis.h" +#include "server.h" #include "pqsort.h" /* Partial qsort for SORT+LIMIT */ #include /* isnan() */ diff --git a/src/sparkline.c b/src/sparkline.c index 8e2764aee..0a986883d 100644 --- a/src/sparkline.c +++ b/src/sparkline.c @@ -30,7 +30,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" #include diff --git a/src/syncio.c b/src/syncio.c index ac2a4a373..f9504390b 100644 --- a/src/syncio.c +++ b/src/syncio.c @@ -28,7 +28,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" /* ----------------- Blocking sockets I/O with timeouts --------------------- */ diff --git a/src/t_hash.c b/src/t_hash.c index 2f3487f6a..ddfcf31a8 100644 --- a/src/t_hash.c +++ b/src/t_hash.c @@ -27,7 +27,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" #include /*----------------------------------------------------------------------------- diff --git a/src/t_list.c b/src/t_list.c index 232cb5c52..93f5f7058 100644 --- a/src/t_list.c +++ b/src/t_list.c @@ -27,7 +27,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" /*----------------------------------------------------------------------------- * List API diff --git a/src/t_set.c b/src/t_set.c index e8ce783c0..746d0f19c 100644 --- a/src/t_set.c +++ b/src/t_set.c @@ -27,7 +27,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" /*----------------------------------------------------------------------------- * Set Commands diff --git a/src/t_string.c b/src/t_string.c index 06c2e9ceb..c0e86d4df 100644 --- a/src/t_string.c +++ b/src/t_string.c @@ -27,7 +27,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "redis.h" +#include "server.h" #include /* isnan(), isinf() */ /*----------------------------------------------------------------------------- diff --git a/src/t_zset.c b/src/t_zset.c index 386258da1..3380832d9 100644 --- a/src/t_zset.c +++ b/src/t_zset.c @@ -49,7 +49,7 @@ * pointers being only at "level 1". This allows to traverse the list * from tail to head, useful for ZREVRANGE. */ -#include "redis.h" +#include "server.h" #include static int zslLexValueGteMin(robj *value, zlexrangespec *spec); From 424fe9afd9264991cddb502204276a244537c87f Mon Sep 17 00:00:00 2001 From: antirez Date: Sun, 26 Jul 2015 15:17:43 +0200 Subject: [PATCH 0322/1928] RDMF: redisLog -> serverLog. --- src/aof.c | 86 +++++++++++----------- src/bio.c | 10 +-- src/cluster.c | 140 ++++++++++++++++++------------------ src/config.c | 12 ++-- src/debug.c | 120 +++++++++++++++---------------- src/networking.c | 26 +++---- src/rdb.c | 54 +++++++------- src/redis-check-rdb.c | 14 ++-- src/replication.c | 162 +++++++++++++++++++++--------------------- src/scripting.c | 8 +-- src/sentinel.c | 24 +++---- src/server.c | 120 +++++++++++++++---------------- src/server.h | 12 ++-- src/t_hash.c | 2 +- 14 files changed, 395 insertions(+), 395 deletions(-) diff --git a/src/aof.c b/src/aof.c index 4a6ad6d5d..7d7870908 100644 --- a/src/aof.c +++ b/src/aof.c @@ -152,7 +152,7 @@ void aofRewriteBufferAppend(unsigned char *s, unsigned long len) { if (((numblocks+1) % 10) == 0) { int level = ((numblocks+1) % 100) == 0 ? REDIS_WARNING : REDIS_NOTICE; - redisLog(level,"Background AOF buffer size: %lu MB", + serverLog(level,"Background AOF buffer size: %lu MB", aofRewriteBufferSize()/(1024*1024)); } } @@ -216,7 +216,7 @@ void stopAppendOnly(void) { if (server.aof_child_pid != -1) { int statloc; - redisLog(REDIS_NOTICE,"Killing running AOF rewrite child: %ld", + serverLog(REDIS_NOTICE,"Killing running AOF rewrite child: %ld", (long) server.aof_child_pid); if (kill(server.aof_child_pid,SIGUSR1) != -1) wait3(&statloc,0,NULL); @@ -237,12 +237,12 @@ int startAppendOnly(void) { server.aof_fd = open(server.aof_filename,O_WRONLY|O_APPEND|O_CREAT,0644); redisAssert(server.aof_state == REDIS_AOF_OFF); if (server.aof_fd == -1) { - redisLog(REDIS_WARNING,"Redis needs to enable the AOF but can't open the append only file: %s",strerror(errno)); + serverLog(REDIS_WARNING,"Redis needs to enable the AOF but can't open the append only file: %s",strerror(errno)); return REDIS_ERR; } if (rewriteAppendOnlyFileBackground() == REDIS_ERR) { close(server.aof_fd); - redisLog(REDIS_WARNING,"Redis needs to enable the AOF but can't trigger a background AOF rewrite operation. Check the above logs for more info about the error."); + serverLog(REDIS_WARNING,"Redis needs to enable the AOF but can't trigger a background AOF rewrite operation. Check the above logs for more info about the error."); return REDIS_ERR; } /* We correctly switched on AOF, now wait for the rewrite to be complete @@ -298,7 +298,7 @@ void flushAppendOnlyFile(int force) { /* Otherwise fall trough, and go write since we can't wait * over two seconds. */ server.aof_delayed_fsync++; - redisLog(REDIS_NOTICE,"Asynchronous AOF fsync is taking too long (disk is busy?). Writing the AOF buffer without waiting for fsync to complete, this may slow down Redis."); + serverLog(REDIS_NOTICE,"Asynchronous AOF fsync is taking too long (disk is busy?). Writing the AOF buffer without waiting for fsync to complete, this may slow down Redis."); } } /* We want to perform a single write. This should be guaranteed atomic @@ -340,13 +340,13 @@ void flushAppendOnlyFile(int force) { /* Log the AOF write error and record the error code. */ if (nwritten == -1) { if (can_log) { - redisLog(REDIS_WARNING,"Error writing to the AOF file: %s", + serverLog(REDIS_WARNING,"Error writing to the AOF file: %s", strerror(errno)); server.aof_last_write_errno = errno; } } else { if (can_log) { - redisLog(REDIS_WARNING,"Short write while writing to " + serverLog(REDIS_WARNING,"Short write while writing to " "the AOF file: (nwritten=%lld, " "expected=%lld)", (long long)nwritten, @@ -355,7 +355,7 @@ void flushAppendOnlyFile(int force) { if (ftruncate(server.aof_fd, server.aof_current_size) == -1) { if (can_log) { - redisLog(REDIS_WARNING, "Could not remove short write " + serverLog(REDIS_WARNING, "Could not remove short write " "from the append-only file. Redis may refuse " "to load the AOF the next time it starts. " "ftruncate: %s", strerror(errno)); @@ -374,7 +374,7 @@ void flushAppendOnlyFile(int force) { * reply for the client is already in the output buffers, and we * have the contract with the user that on acknowledged write data * is synced on disk. */ - redisLog(REDIS_WARNING,"Can't recover from AOF write error when the AOF fsync policy is 'always'. Exiting..."); + serverLog(REDIS_WARNING,"Can't recover from AOF write error when the AOF fsync policy is 'always'. Exiting..."); exit(1); } else { /* Recover from failed write leaving data into the buffer. However @@ -394,7 +394,7 @@ void flushAppendOnlyFile(int force) { /* Successful write(2). If AOF was in error state, restore the * OK state and log the event. */ if (server.aof_last_write_status == REDIS_ERR) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "AOF write error looks solved, Redis can write again."); server.aof_last_write_status = REDIS_OK; } @@ -611,7 +611,7 @@ int loadAppendOnlyFile(char *filename) { } if (fp == NULL) { - redisLog(REDIS_WARNING,"Fatal error: can't open the append log file for reading: %s",strerror(errno)); + serverLog(REDIS_WARNING,"Fatal error: can't open the append log file for reading: %s",strerror(errno)); exit(1); } @@ -677,7 +677,7 @@ int loadAppendOnlyFile(char *filename) { /* Command lookup */ cmd = lookupCommand(argv[0]->ptr); if (!cmd) { - redisLog(REDIS_WARNING,"Unknown command '%s' reading the append only file", (char*)argv[0]->ptr); + serverLog(REDIS_WARNING,"Unknown command '%s' reading the append only file", (char*)argv[0]->ptr); exit(1); } @@ -710,40 +710,40 @@ loaded_ok: /* DB loaded, cleanup and return REDIS_OK to the caller. */ readerr: /* Read error. If feof(fp) is true, fall through to unexpected EOF. */ if (!feof(fp)) { - redisLog(REDIS_WARNING,"Unrecoverable error reading the append only file: %s", strerror(errno)); + serverLog(REDIS_WARNING,"Unrecoverable error reading the append only file: %s", strerror(errno)); exit(1); } uxeof: /* Unexpected AOF end of file. */ if (server.aof_load_truncated) { - redisLog(REDIS_WARNING,"!!! Warning: short read while loading the AOF file !!!"); - redisLog(REDIS_WARNING,"!!! Truncating the AOF at offset %llu !!!", + serverLog(REDIS_WARNING,"!!! Warning: short read while loading the AOF file !!!"); + serverLog(REDIS_WARNING,"!!! Truncating the AOF at offset %llu !!!", (unsigned long long) valid_up_to); if (valid_up_to == -1 || truncate(filename,valid_up_to) == -1) { if (valid_up_to == -1) { - redisLog(REDIS_WARNING,"Last valid command offset is invalid"); + serverLog(REDIS_WARNING,"Last valid command offset is invalid"); } else { - redisLog(REDIS_WARNING,"Error truncating the AOF file: %s", + serverLog(REDIS_WARNING,"Error truncating the AOF file: %s", strerror(errno)); } } else { /* Make sure the AOF file descriptor points to the end of the * file after the truncate call. */ if (server.aof_fd != -1 && lseek(server.aof_fd,0,SEEK_END) == -1) { - redisLog(REDIS_WARNING,"Can't seek the end of the AOF file: %s", + serverLog(REDIS_WARNING,"Can't seek the end of the AOF file: %s", strerror(errno)); } else { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "AOF loaded anyway because aof-load-truncated is enabled"); goto loaded_ok; } } } - redisLog(REDIS_WARNING,"Unexpected end of file reading the append only file. You can: 1) Make a backup of your AOF file, then use ./redis-check-aof --fix . 2) Alternatively you can set the 'aof-load-truncated' configuration option to yes and restart the server."); + serverLog(REDIS_WARNING,"Unexpected end of file reading the append only file. You can: 1) Make a backup of your AOF file, then use ./redis-check-aof --fix . 2) Alternatively you can set the 'aof-load-truncated' configuration option to yes and restart the server."); exit(1); fmterr: /* Format error. */ - redisLog(REDIS_WARNING,"Bad file format reading the append only file: make a backup of your AOF file, then use ./redis-check-aof --fix "); + serverLog(REDIS_WARNING,"Bad file format reading the append only file: make a backup of your AOF file, then use ./redis-check-aof --fix "); exit(1); } @@ -1011,7 +1011,7 @@ int rewriteAppendOnlyFile(char *filename) { snprintf(tmpfile,256,"temp-rewriteaof-%d.aof", (int) getpid()); fp = fopen(tmpfile,"w"); if (!fp) { - redisLog(REDIS_WARNING, "Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s", strerror(errno)); + serverLog(REDIS_WARNING, "Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s", strerror(errno)); return REDIS_ERR; } @@ -1118,13 +1118,13 @@ int rewriteAppendOnlyFile(char *filename) { * the child will eventually get terminated. */ if (syncRead(server.aof_pipe_read_ack_from_parent,&byte,1,5000) != 1 || byte != '!') goto werr; - redisLog(REDIS_NOTICE,"Parent agreed to stop sending diffs. Finalizing AOF..."); + serverLog(REDIS_NOTICE,"Parent agreed to stop sending diffs. Finalizing AOF..."); /* Read the final diff if any. */ aofReadDiffFromParent(); /* Write the received diff to the file. */ - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "Concatenating %.2f MB of AOF diff received from parent.", (double) sdslen(server.aof_child_diff) / (1024*1024)); if (rioWrite(&aof,server.aof_child_diff,sdslen(server.aof_child_diff)) == 0) @@ -1138,15 +1138,15 @@ int rewriteAppendOnlyFile(char *filename) { /* Use RENAME to make sure the DB file is changed atomically only * if the generate DB file is ok. */ if (rename(tmpfile,filename) == -1) { - redisLog(REDIS_WARNING,"Error moving temp append only file on the final destination: %s", strerror(errno)); + serverLog(REDIS_WARNING,"Error moving temp append only file on the final destination: %s", strerror(errno)); unlink(tmpfile); return REDIS_ERR; } - redisLog(REDIS_NOTICE,"SYNC append only file rewrite performed"); + serverLog(REDIS_NOTICE,"SYNC append only file rewrite performed"); return REDIS_OK; werr: - redisLog(REDIS_WARNING,"Write error writing append only file on disk: %s", strerror(errno)); + serverLog(REDIS_WARNING,"Write error writing append only file on disk: %s", strerror(errno)); fclose(fp); unlink(tmpfile); if (di) dictReleaseIterator(di); @@ -1167,14 +1167,14 @@ void aofChildPipeReadable(aeEventLoop *el, int fd, void *privdata, int mask) { REDIS_NOTUSED(mask); if (read(fd,&byte,1) == 1 && byte == '!') { - redisLog(REDIS_NOTICE,"AOF rewrite child asks to stop sending diffs."); + serverLog(REDIS_NOTICE,"AOF rewrite child asks to stop sending diffs."); server.aof_stop_sending_diff = 1; if (write(server.aof_pipe_write_ack_to_child,"!",1) != 1) { /* If we can't send the ack, inform the user, but don't try again * since in the other side the children will use a timeout if the * kernel can't buffer our write, or, the children was * terminated. */ - redisLog(REDIS_WARNING,"Can't send ACK to AOF child: %s", + serverLog(REDIS_WARNING,"Can't send ACK to AOF child: %s", strerror(errno)); } } @@ -1210,7 +1210,7 @@ int aofCreatePipes(void) { return REDIS_OK; error: - redisLog(REDIS_WARNING,"Error opening /setting AOF rewrite IPC pipes: %s", + serverLog(REDIS_WARNING,"Error opening /setting AOF rewrite IPC pipes: %s", strerror(errno)); for (j = 0; j < 6; j++) if(fds[j] != -1) close(fds[j]); return REDIS_ERR; @@ -1261,7 +1261,7 @@ int rewriteAppendOnlyFileBackground(void) { size_t private_dirty = zmalloc_get_private_dirty(); if (private_dirty) { - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "AOF rewrite: %zu MB of memory used by copy-on-write", private_dirty/(1024*1024)); } @@ -1275,12 +1275,12 @@ int rewriteAppendOnlyFileBackground(void) { server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_time / (1024*1024*1024); /* GB per second. */ latencyAddSampleIfNeeded("fork",server.stat_fork_time/1000); if (childpid == -1) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Can't rewrite append only file in background: fork: %s", strerror(errno)); return REDIS_ERR; } - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "Background append only file rewriting started by pid %d",childpid); server.aof_rewrite_scheduled = 0; server.aof_rewrite_time_start = time(NULL); @@ -1327,7 +1327,7 @@ void aofUpdateCurrentSize(void) { latencyStartMonitor(latency); if (redis_fstat(server.aof_fd,&sb) == -1) { - redisLog(REDIS_WARNING,"Unable to obtain the AOF file length. stat: %s", + serverLog(REDIS_WARNING,"Unable to obtain the AOF file length. stat: %s", strerror(errno)); } else { server.aof_current_size = sb.st_size; @@ -1345,7 +1345,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { long long now = ustime(); mstime_t latency; - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "Background AOF rewrite terminated with success"); /* Flush the differences accumulated by the parent to the @@ -1355,13 +1355,13 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { (int)server.aof_child_pid); newfd = open(tmpfile,O_WRONLY|O_APPEND); if (newfd == -1) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Unable to open the temporary AOF produced by the child: %s", strerror(errno)); goto cleanup; } if (aofRewriteBufferWrite(newfd) == -1) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Error trying to flush the parent diff to the rewritten AOF: %s", strerror(errno)); close(newfd); goto cleanup; @@ -1369,7 +1369,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { latencyEndMonitor(latency); latencyAddSampleIfNeeded("aof-rewrite-diff-write",latency); - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "Residual parent diff successfully flushed to the rewritten AOF (%.2f MB)", (double) aofRewriteBufferSize() / (1024*1024)); /* The only remaining thing to do is to rename the temporary file to @@ -1415,7 +1415,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { * it exists, because we reference it with "oldfd". */ latencyStartMonitor(latency); if (rename(tmpfile,server.aof_filename) == -1) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Error trying to rename the temporary AOF file: %s", strerror(errno)); close(newfd); if (oldfd != -1) close(oldfd); @@ -1448,7 +1448,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { server.aof_lastbgrewrite_status = REDIS_OK; - redisLog(REDIS_NOTICE, "Background AOF rewrite finished successfully"); + serverLog(REDIS_NOTICE, "Background AOF rewrite finished successfully"); /* Change state from WAIT_REWRITE to ON if needed */ if (server.aof_state == REDIS_AOF_WAIT_REWRITE) server.aof_state = REDIS_AOF_ON; @@ -1456,17 +1456,17 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { /* Asynchronously close the overwritten AOF. */ if (oldfd != -1) bioCreateBackgroundJob(REDIS_BIO_CLOSE_FILE,(void*)(long)oldfd,NULL,NULL); - redisLog(REDIS_VERBOSE, + serverLog(REDIS_VERBOSE, "Background AOF rewrite signal handler took %lldus", ustime()-now); } else if (!bysignal && exitcode != 0) { server.aof_lastbgrewrite_status = REDIS_ERR; - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Background AOF rewrite terminated with error"); } else { server.aof_lastbgrewrite_status = REDIS_ERR; - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Background AOF rewrite terminated by signal %d", bysignal); } diff --git a/src/bio.c b/src/bio.c index a9bccff18..e76c8f1e6 100644 --- a/src/bio.c +++ b/src/bio.c @@ -116,7 +116,7 @@ void bioInit(void) { for (j = 0; j < REDIS_BIO_NUM_OPS; j++) { void *arg = (void*)(unsigned long) j; if (pthread_create(&thread,&attr,bioProcessBackgroundJobs,arg) != 0) { - redisLog(REDIS_WARNING,"Fatal: Can't initialize Background Jobs."); + serverLog(REDIS_WARNING,"Fatal: Can't initialize Background Jobs."); exit(1); } bio_threads[j] = thread; @@ -144,7 +144,7 @@ void *bioProcessBackgroundJobs(void *arg) { /* Check that the type is within the right interval. */ if (type >= REDIS_BIO_NUM_OPS) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Warning: bio thread started with wrong type %lu",type); return NULL; } @@ -160,7 +160,7 @@ void *bioProcessBackgroundJobs(void *arg) { sigemptyset(&sigset); sigaddset(&sigset, SIGALRM); if (pthread_sigmask(SIG_BLOCK, &sigset, NULL)) - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Warning: can't mask SIGALRM in bio.c thread: %s", strerror(errno)); while(1) { @@ -215,11 +215,11 @@ void bioKillThreads(void) { for (j = 0; j < REDIS_BIO_NUM_OPS; j++) { if (pthread_cancel(bio_threads[j]) == 0) { if ((err = pthread_join(bio_threads[j],NULL)) != 0) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Bio thread for job type #%d can be joined: %s", j, strerror(err)); } else { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Bio thread for job type #%d terminated",j); } } diff --git a/src/cluster.c b/src/cluster.c index f7f3da8a3..8fd5c9328 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -97,7 +97,7 @@ int clusterLoadConfig(char *filename) { if (errno == ENOENT) { return REDIS_ERR; } else { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Loading the cluster node config from %s: %s", filename, strerror(errno)); exit(1); @@ -146,7 +146,7 @@ int clusterLoadConfig(char *filename) { server.cluster->lastVoteEpoch = strtoull(argv[j+1],NULL,10); } else { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Skipping unknown cluster config variable '%s'", argv[j]); } @@ -264,7 +264,7 @@ int clusterLoadConfig(char *filename) { zfree(line); fclose(fp); - redisLog(REDIS_NOTICE,"Node configuration loaded, I'm %.40s", myself->name); + serverLog(REDIS_NOTICE,"Node configuration loaded, I'm %.40s", myself->name); /* Something that should never happen: currentEpoch smaller than * the max epoch found in the nodes configuration. However we handle this @@ -275,7 +275,7 @@ int clusterLoadConfig(char *filename) { return REDIS_OK; fmterr: - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Unrecoverable error: corrupted cluster config file."); zfree(line); if (fp) fclose(fp); @@ -343,7 +343,7 @@ err: void clusterSaveConfigOrDie(int do_fsync) { if (clusterSaveConfig(do_fsync) == -1) { - redisLog(REDIS_WARNING,"Fatal: can't update cluster config file."); + serverLog(REDIS_WARNING,"Fatal: can't update cluster config file."); exit(1); } } @@ -368,7 +368,7 @@ int clusterLockConfig(char *filename) { * processes. */ int fd = open(filename,O_WRONLY|O_CREAT,0644); if (fd == -1) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Can't open %s in order to acquire a lock: %s", filename, strerror(errno)); return REDIS_ERR; @@ -376,13 +376,13 @@ int clusterLockConfig(char *filename) { if (flock(fd,LOCK_EX|LOCK_NB) == -1) { if (errno == EWOULDBLOCK) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Sorry, the cluster configuration file %s is already used " "by a different Redis Cluster node. Please make sure that " "different nodes use different cluster configuration " "files.", filename); } else { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Impossible to lock %s: %s", filename, strerror(errno)); } close(fd); @@ -429,7 +429,7 @@ void clusterInit(void) { * by the createClusterNode() function. */ myself = server.cluster->myself = createClusterNode(NULL,REDIS_NODE_MYSELF|REDIS_NODE_MASTER); - redisLog(REDIS_NOTICE,"No cluster configuration found, I'm %.40s", + serverLog(REDIS_NOTICE,"No cluster configuration found, I'm %.40s", myself->name); clusterAddNode(myself); saveconf = 1; @@ -443,7 +443,7 @@ void clusterInit(void) { * The other handshake port check is triggered too late to stop * us from trying to use a too-high cluster port number. */ if (server.port > (65535-REDIS_CLUSTER_PORT_INCR)) { - redisLog(REDIS_WARNING, "Redis port number too high. " + serverLog(REDIS_WARNING, "Redis port number too high. " "Cluster communication port is 10,000 port " "numbers higher than your Redis port. " "Your Redis port number must be " @@ -522,7 +522,7 @@ void clusterReset(int hard) { server.cluster->currentEpoch = 0; server.cluster->lastVoteEpoch = 0; myself->configEpoch = 0; - redisLog(REDIS_WARNING, "configEpoch set to 0 via CLUSTER RESET HARD"); + serverLog(REDIS_WARNING, "configEpoch set to 0 via CLUSTER RESET HARD"); /* To change the Node ID we need to remove the old name from the * nodes table, change the ID, and re-add back with new name. */ @@ -587,7 +587,7 @@ void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) { cfd = anetTcpAccept(server.neterr, fd, cip, sizeof(cip), &cport); if (cfd == ANET_ERR) { if (errno != EWOULDBLOCK) - redisLog(REDIS_VERBOSE, + serverLog(REDIS_VERBOSE, "Error accepting cluster node: %s", server.neterr); return; } @@ -595,7 +595,7 @@ void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) { anetEnableTcpNoDelay(NULL,cfd); /* Use non-blocking I/O for cluster messages. */ - redisLog(REDIS_VERBOSE,"Accepted cluster node %s:%d", cip, cport); + serverLog(REDIS_VERBOSE,"Accepted cluster node %s:%d", cip, cport); /* Create a link object we use to handle the connection. * It gets passed to the readable handler when data is available. * Initiallly the link->node pointer is set to NULL as we don't know @@ -911,7 +911,7 @@ void clusterRenameNode(clusterNode *node, char *newname) { int retval; sds s = sdsnewlen(node->name, REDIS_CLUSTER_NAMELEN); - redisLog(REDIS_DEBUG,"Renaming node %.40s into %.40s", + serverLog(REDIS_DEBUG,"Renaming node %.40s into %.40s", node->name, newname); retval = dictDelete(server.cluster->nodes, s); sdsfree(s); @@ -980,7 +980,7 @@ int clusterBumpConfigEpochWithoutConsensus(void) { myself->configEpoch = server.cluster->currentEpoch; clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| CLUSTER_TODO_FSYNC_CONFIG); - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "New configEpoch set to %llu", (unsigned long long) myself->configEpoch); return REDIS_OK; @@ -1045,7 +1045,7 @@ void clusterHandleConfigEpochCollision(clusterNode *sender) { server.cluster->currentEpoch++; myself->configEpoch = server.cluster->currentEpoch; clusterSaveConfigOrDie(1); - redisLog(REDIS_VERBOSE, + serverLog(REDIS_VERBOSE, "WARNING: configEpoch collision with node %.40s." " configEpoch set to %llu", sender->name, @@ -1163,7 +1163,7 @@ void markNodeAsFailingIfNeeded(clusterNode *node) { if (nodeIsMaster(myself)) failures++; if (failures < needed_quorum) return; /* No weak agreement from masters. */ - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "Marking node %.40s as failing (quorum reached).", node->name); /* Mark the node as failing. */ @@ -1188,7 +1188,7 @@ void clearNodeFailureIfNeeded(clusterNode *node) { /* For slaves we always clear the FAIL flag if we can contact the * node again. */ if (nodeIsSlave(node) || node->numslots == 0) { - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "Clear FAIL state for node %.40s: %s is reachable again.", node->name, nodeIsSlave(node) ? "slave" : "master without slots"); @@ -1204,7 +1204,7 @@ void clearNodeFailureIfNeeded(clusterNode *node) { (now - node->fail_time) > (server.cluster_node_timeout * REDIS_CLUSTER_FAIL_UNDO_TIME_MULT)) { - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "Clear FAIL state for node %.40s: is reachable again and nobody is serving its slots after some time.", node->name); node->flags &= ~REDIS_NODE_FAIL; @@ -1304,7 +1304,7 @@ void clusterProcessGossipSection(clusterMsg *hdr, clusterLink *link) { sds ci; ci = representRedisNodeFlags(sdsempty(), flags); - redisLog(REDIS_DEBUG,"GOSSIP %.40s %s:%d %s", + serverLog(REDIS_DEBUG,"GOSSIP %.40s %s:%d %s", g->nodename, g->ip, ntohs(g->port), @@ -1319,14 +1319,14 @@ void clusterProcessGossipSection(clusterMsg *hdr, clusterLink *link) { if (sender && nodeIsMaster(sender) && node != myself) { if (flags & (REDIS_NODE_FAIL|REDIS_NODE_PFAIL)) { if (clusterNodeAddFailureReport(node,sender)) { - redisLog(REDIS_VERBOSE, + serverLog(REDIS_VERBOSE, "Node %.40s reported node %.40s as not reachable.", sender->name, node->name); } markNodeAsFailingIfNeeded(node); } else { if (clusterNodeDelFailureReport(node,sender)) { - redisLog(REDIS_VERBOSE, + serverLog(REDIS_VERBOSE, "Node %.40s reported node %.40s is back online.", sender->name, node->name); } @@ -1397,7 +1397,7 @@ int nodeUpdateAddressIfNeeded(clusterNode *node, clusterLink *link, int port) { node->port = port; if (node->link) freeClusterLink(node->link); node->flags &= ~REDIS_NODE_NOADDR; - redisLog(REDIS_WARNING,"Address updated for node %.40s, now %s:%d", + serverLog(REDIS_WARNING,"Address updated for node %.40s, now %s:%d", node->name, node->ip, node->port); /* Check if this is our master and we have to change the @@ -1453,7 +1453,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc curmaster = nodeIsMaster(myself) ? myself : myself->slaveof; if (sender == myself) { - redisLog(REDIS_WARNING,"Discarding UPDATE message about myself."); + serverLog(REDIS_WARNING,"Discarding UPDATE message about myself."); return; } @@ -1504,7 +1504,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc * 2) We are a slave and our master is left without slots. We need * to replicate to the new slots owner. */ if (newmaster && curmaster->numslots == 0) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Configuration change detected. Reconfiguring myself " "as a replica of %.40s", sender->name); clusterSetMaster(sender); @@ -1542,7 +1542,7 @@ int clusterProcessPacket(clusterLink *link) { clusterNode *sender; server.cluster->stats_bus_messages_received++; - redisLog(REDIS_DEBUG,"--- Processing packet of type %d, %lu bytes", + serverLog(REDIS_DEBUG,"--- Processing packet of type %d, %lu bytes", type, (unsigned long) totlen); /* Perform sanity checks */ @@ -1612,7 +1612,7 @@ int clusterProcessPacket(clusterLink *link) { server.cluster->mf_master_offset == 0) { server.cluster->mf_master_offset = sender->repl_offset; - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Received replication offset for paused " "master manual failover: %lld", server.cluster->mf_master_offset); @@ -1621,7 +1621,7 @@ int clusterProcessPacket(clusterLink *link) { /* Initial processing of PING and MEET requests replying with a PONG. */ if (type == CLUSTERMSG_TYPE_PING || type == CLUSTERMSG_TYPE_MEET) { - redisLog(REDIS_DEBUG,"Ping packet received: %p", (void*)link->node); + serverLog(REDIS_DEBUG,"Ping packet received: %p", (void*)link->node); /* We use incoming MEET messages in order to set the address * for 'myself', since only other cluster nodes will send us @@ -1641,7 +1641,7 @@ int clusterProcessPacket(clusterLink *link) { strcmp(ip,myself->ip)) { memcpy(myself->ip,ip,REDIS_IP_STR_LEN); - redisLog(REDIS_WARNING,"IP address for this node updated to %s", + serverLog(REDIS_WARNING,"IP address for this node updated to %s", myself->ip); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); } @@ -1675,7 +1675,7 @@ int clusterProcessPacket(clusterLink *link) { if (type == CLUSTERMSG_TYPE_PING || type == CLUSTERMSG_TYPE_PONG || type == CLUSTERMSG_TYPE_MEET) { - redisLog(REDIS_DEBUG,"%s packet received: %p", + serverLog(REDIS_DEBUG,"%s packet received: %p", type == CLUSTERMSG_TYPE_PING ? "ping" : "pong", (void*)link->node); if (link->node) { @@ -1683,7 +1683,7 @@ int clusterProcessPacket(clusterLink *link) { /* If we already have this node, try to change the * IP/port of the node with the new one. */ if (sender) { - redisLog(REDIS_VERBOSE, + serverLog(REDIS_VERBOSE, "Handshake: we already know node %.40s, " "updating the address if needed.", sender->name); if (nodeUpdateAddressIfNeeded(sender,link,ntohs(hdr->port))) @@ -1700,7 +1700,7 @@ int clusterProcessPacket(clusterLink *link) { /* First thing to do is replacing the random name with the * right node name if this was a handshake stage. */ clusterRenameNode(link->node, hdr->sender); - redisLog(REDIS_DEBUG,"Handshake with node %.40s completed.", + serverLog(REDIS_DEBUG,"Handshake with node %.40s completed.", link->node->name); link->node->flags &= ~REDIS_NODE_HANDSHAKE; link->node->flags |= flags&(REDIS_NODE_MASTER|REDIS_NODE_SLAVE); @@ -1711,7 +1711,7 @@ int clusterProcessPacket(clusterLink *link) { /* If the reply has a non matching node ID we * disconnect this node and set it as not having an associated * address. */ - redisLog(REDIS_DEBUG,"PONG contains mismatching sender ID"); + serverLog(REDIS_DEBUG,"PONG contains mismatching sender ID"); link->node->flags |= REDIS_NODE_NOADDR; link->node->ip[0] = '\0'; link->node->port = 0; @@ -1842,7 +1842,7 @@ int clusterProcessPacket(clusterLink *link) { if (server.cluster->slots[j]->configEpoch > senderConfigEpoch) { - redisLog(REDIS_VERBOSE, + serverLog(REDIS_VERBOSE, "Node %.40s has old slots configuration, sending " "an UPDATE message about %.40s", sender->name, server.cluster->slots[j]->name); @@ -1877,7 +1877,7 @@ int clusterProcessPacket(clusterLink *link) { if (failing && !(failing->flags & (REDIS_NODE_FAIL|REDIS_NODE_MYSELF))) { - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "FAIL message received from %.40s about %.40s", hdr->sender, hdr->data.fail.about.nodename); failing->flags |= REDIS_NODE_FAIL; @@ -1887,7 +1887,7 @@ int clusterProcessPacket(clusterLink *link) { CLUSTER_TODO_UPDATE_STATE); } } else { - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "Ignoring FAIL message from unknown node %.40s about %.40s", hdr->sender, hdr->data.fail.about.nodename); } @@ -1937,7 +1937,7 @@ int clusterProcessPacket(clusterLink *link) { server.cluster->mf_end = mstime() + REDIS_CLUSTER_MF_TIMEOUT; server.cluster->mf_slave = sender; pauseClients(mstime()+(REDIS_CLUSTER_MF_TIMEOUT*2)); - redisLog(REDIS_WARNING,"Manual failover requested by slave %.40s.", + serverLog(REDIS_WARNING,"Manual failover requested by slave %.40s.", sender->name); } else if (type == CLUSTERMSG_TYPE_UPDATE) { clusterNode *n; /* The node the update is about. */ @@ -1962,7 +1962,7 @@ int clusterProcessPacket(clusterLink *link) { clusterUpdateSlotsConfigWith(n,reportedConfigEpoch, hdr->data.update.nodecfg.slots); } else { - redisLog(REDIS_WARNING,"Received unknown packet type: %d", type); + serverLog(REDIS_WARNING,"Received unknown packet type: %d", type); } return 1; } @@ -1988,7 +1988,7 @@ void clusterWriteHandler(aeEventLoop *el, int fd, void *privdata, int mask) { nwritten = write(fd, link->sndbuf, sdslen(link->sndbuf)); if (nwritten <= 0) { - redisLog(REDIS_DEBUG,"I/O error writing to node link: %s", + serverLog(REDIS_DEBUG,"I/O error writing to node link: %s", strerror(errno)); handleLinkIOError(link); return; @@ -2025,7 +2025,7 @@ void clusterReadHandler(aeEventLoop *el, int fd, void *privdata, int mask) { if (memcmp(hdr->sig,"RCmb",4) != 0 || ntohl(hdr->totlen) < CLUSTERMSG_MIN_LEN) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Bad message length or signature received " "from Cluster bus."); handleLinkIOError(link); @@ -2041,7 +2041,7 @@ void clusterReadHandler(aeEventLoop *el, int fd, void *privdata, int mask) { if (nread <= 0) { /* I/O error... */ - redisLog(REDIS_DEBUG,"I/O error reading from node link: %s", + serverLog(REDIS_DEBUG,"I/O error reading from node link: %s", (nread == 0) ? "connection closed" : strerror(errno)); handleLinkIOError(link); return; @@ -2471,7 +2471,7 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { * our currentEpoch was updated as a side effect of receiving this * request, if the request epoch was greater. */ if (requestCurrentEpoch < server.cluster->currentEpoch) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Failover auth denied to %.40s: reqEpoch (%llu) < curEpoch(%llu)", node->name, (unsigned long long) requestCurrentEpoch, @@ -2481,7 +2481,7 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { /* I already voted for this epoch? Return ASAP. */ if (server.cluster->lastVoteEpoch == server.cluster->currentEpoch) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Failover auth denied to %.40s: already voted for epoch %llu", node->name, (unsigned long long) server.cluster->currentEpoch); @@ -2495,15 +2495,15 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { (!nodeFailed(master) && !force_ack)) { if (nodeIsMaster(node)) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Failover auth denied to %.40s: it is a master node", node->name); } else if (master == NULL) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Failover auth denied to %.40s: I don't know its master", node->name); } else if (!nodeFailed(master)) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Failover auth denied to %.40s: its master is up", node->name); } @@ -2515,7 +2515,7 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { * of the algorithm but makes the base case more linear. */ if (mstime() - node->slaveof->voted_time < server.cluster_node_timeout * 2) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Failover auth denied to %.40s: " "can't vote about this master before %lld milliseconds", node->name, @@ -2537,7 +2537,7 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { /* If we reached this point we found a slot that in our current slots * is served by a master with a greater configEpoch than the one claimed * by the slave requesting our vote. Refuse to vote for this slave. */ - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Failover auth denied to %.40s: " "slot %d epoch (%llu) > reqEpoch (%llu)", node->name, j, @@ -2550,7 +2550,7 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { clusterSendFailoverAuth(node); server.cluster->lastVoteEpoch = server.cluster->currentEpoch; node->slaveof->voted_time = mstime(); - redisLog(REDIS_WARNING, "Failover auth granted to %.40s for epoch %llu", + serverLog(REDIS_WARNING, "Failover auth granted to %.40s for epoch %llu", node->name, (unsigned long long) server.cluster->currentEpoch); } @@ -2641,7 +2641,7 @@ void clusterLogCantFailover(int reason) { break; } lastlog_time = time(NULL); - redisLog(REDIS_WARNING,"Currently unable to failover: %s", msg); + serverLog(REDIS_WARNING,"Currently unable to failover: %s", msg); } /* This function implements the final part of automatic and manual failovers, @@ -2774,7 +2774,7 @@ void clusterHandleSlaveFailover(void) { server.cluster->failover_auth_time = mstime(); server.cluster->failover_auth_rank = 0; } - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Start of election delayed for %lld milliseconds " "(rank #%d, offset %lld).", server.cluster->failover_auth_time - mstime(), @@ -2801,7 +2801,7 @@ void clusterHandleSlaveFailover(void) { (newrank - server.cluster->failover_auth_rank) * 1000; server.cluster->failover_auth_time += added_delay; server.cluster->failover_auth_rank = newrank; - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Slave rank updated to #%d, added %lld milliseconds of delay.", newrank, added_delay); } @@ -2823,7 +2823,7 @@ void clusterHandleSlaveFailover(void) { if (server.cluster->failover_auth_sent == 0) { server.cluster->currentEpoch++; server.cluster->failover_auth_epoch = server.cluster->currentEpoch; - redisLog(REDIS_WARNING,"Starting a failover election for epoch %llu.", + serverLog(REDIS_WARNING,"Starting a failover election for epoch %llu.", (unsigned long long) server.cluster->currentEpoch); clusterRequestFailoverAuth(); server.cluster->failover_auth_sent = 1; @@ -2837,13 +2837,13 @@ void clusterHandleSlaveFailover(void) { if (server.cluster->failover_auth_count >= needed_quorum) { /* We have the quorum, we can finally failover the master. */ - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Failover election won: I'm the new master."); /* Update my configEpoch to the epoch of the election. */ if (myself->configEpoch < server.cluster->failover_auth_epoch) { myself->configEpoch = server.cluster->failover_auth_epoch; - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "configEpoch set to %llu after successful failover", (unsigned long long) myself->configEpoch); } @@ -2941,7 +2941,7 @@ void clusterHandleSlaveMigration(int max_slaves) { /* Step 4: perform the migration if there is a target, and if I'm the * candidate. */ if (target && candidate == myself) { - redisLog(REDIS_WARNING,"Migrating to orphaned master %.40s", + serverLog(REDIS_WARNING,"Migrating to orphaned master %.40s", target->name); clusterSetMaster(target); } @@ -2995,7 +2995,7 @@ void resetManualFailover(void) { /* If a manual failover timed out, abort it. */ void manualFailoverCheckTimeout(void) { if (server.cluster->mf_end && server.cluster->mf_end < mstime()) { - redisLog(REDIS_WARNING,"Manual failover timed out."); + serverLog(REDIS_WARNING,"Manual failover timed out."); resetManualFailover(); } } @@ -3016,7 +3016,7 @@ void clusterHandleManualFailover(void) { /* Our replication offset matches the master replication offset * announced after clients were paused. We can start the failover. */ server.cluster->mf_can_start = 1; - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "All master replication stream processed, " "manual failover can start."); } @@ -3076,7 +3076,7 @@ void clusterCron(void) { * so we claim we actually sent a ping now (that will * be really sent as soon as the link is obtained). */ if (node->ping_sent == 0) node->ping_sent = mstime(); - redisLog(REDIS_DEBUG, "Unable to connect to " + serverLog(REDIS_DEBUG, "Unable to connect to " "Cluster Node [%s]:%d -> %s", node->ip, node->port+REDIS_CLUSTER_PORT_INCR, server.neterr); @@ -3109,7 +3109,7 @@ void clusterCron(void) { * normal PING packets. */ node->flags &= ~REDIS_NODE_MEET; - redisLog(REDIS_DEBUG,"Connecting with Node %.40s at %s:%d", + serverLog(REDIS_DEBUG,"Connecting with Node %.40s at %s:%d", node->name, node->ip, node->port+REDIS_CLUSTER_PORT_INCR); } } @@ -3136,7 +3136,7 @@ void clusterCron(void) { } } if (min_pong_node) { - redisLog(REDIS_DEBUG,"Pinging node %.40s", min_pong_node->name); + serverLog(REDIS_DEBUG,"Pinging node %.40s", min_pong_node->name); clusterSendPing(min_pong_node->link, CLUSTERMSG_TYPE_PING); } } @@ -3225,7 +3225,7 @@ void clusterCron(void) { /* Timeout reached. Set the node as possibly failing if it is * not already in this state. */ if (!(node->flags & (REDIS_NODE_PFAIL|REDIS_NODE_FAIL))) { - redisLog(REDIS_DEBUG,"*** NODE %.40s possibly failing", + serverLog(REDIS_DEBUG,"*** NODE %.40s possibly failing", node->name); node->flags |= REDIS_NODE_PFAIL; update_state = 1; @@ -3488,7 +3488,7 @@ void clusterUpdateState(void) { } /* Change the state and log the event. */ - redisLog(REDIS_WARNING,"Cluster state changed: %s", + serverLog(REDIS_WARNING,"Cluster state changed: %s", new_state == REDIS_CLUSTER_OK ? "ok" : "fail"); server.cluster->state = new_state; } @@ -3546,11 +3546,11 @@ int verifyClusterConfigWithData(void) { update_config++; /* Case A: slot is unassigned. Take responsibility for it. */ if (server.cluster->slots[j] == NULL) { - redisLog(REDIS_WARNING, "I have keys for unassigned slot %d. " + serverLog(REDIS_WARNING, "I have keys for unassigned slot %d. " "Taking responsibility for it.",j); clusterAddSlot(myself,j); } else { - redisLog(REDIS_WARNING, "I have keys for slot %d, but the slot is " + serverLog(REDIS_WARNING, "I have keys for slot %d, but the slot is " "assigned to another node. " "Setting it to importing state.",j); server.cluster->importing_slots_from[j] = server.cluster->slots[j]; @@ -3978,7 +3978,7 @@ void clusterCommand(redisClient *c) { * configEpoch collision resolution will fix it assigning * a different epoch to each node. */ if (clusterBumpConfigEpochWithoutConsensus() == REDIS_OK) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "configEpoch updated after importing slot %d", slot); } server.cluster->importing_slots_from[slot] = NULL; @@ -4220,17 +4220,17 @@ void clusterCommand(redisClient *c) { * generates a new configuration epoch for this node without * consensus, claims the master's slots, and broadcast the new * configuration. */ - redisLog(REDIS_WARNING,"Taking over the master (user request)."); + serverLog(REDIS_WARNING,"Taking over the master (user request)."); clusterBumpConfigEpochWithoutConsensus(); clusterFailoverReplaceYourMaster(); } else if (force) { /* If this is a forced failover, we don't need to talk with our * master to agree about the offset. We just failover taking over * it without coordination. */ - redisLog(REDIS_WARNING,"Forced failover user request accepted."); + serverLog(REDIS_WARNING,"Forced failover user request accepted."); server.cluster->mf_can_start = 1; } else { - redisLog(REDIS_WARNING,"Manual failover user request accepted."); + serverLog(REDIS_WARNING,"Manual failover user request accepted."); clusterSendMFStart(myself->slaveof); } addReply(c,shared.ok); @@ -4257,7 +4257,7 @@ void clusterCommand(redisClient *c) { addReplyError(c,"Node config epoch is already non-zero"); } else { myself->configEpoch = epoch; - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "configEpoch set to %llu via CLUSTER SET-CONFIG-EPOCH", (unsigned long long) myself->configEpoch); diff --git a/src/config.c b/src/config.c index a6ea1ce7d..0659ee2fc 100644 --- a/src/config.c +++ b/src/config.c @@ -236,7 +236,7 @@ void loadServerConfigFromString(char *config) { } } else if (!strcasecmp(argv[0],"dir") && argc == 2) { if (chdir(argv[1]) == -1) { - redisLog(REDIS_WARNING,"Can't chdir to '%s': %s", + serverLog(REDIS_WARNING,"Can't chdir to '%s': %s", argv[1], strerror(errno)); exit(1); } @@ -647,7 +647,7 @@ void loadServerConfig(char *filename, char *options) { fp = stdin; } else { if ((fp = fopen(filename,"r")) == NULL) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Fatal error, can't open config file '%s'", filename); exit(1); } @@ -954,7 +954,7 @@ void configSetCommand(redisClient *c) { } config_set_memory_field("maxmemory",server.maxmemory) { if (server.maxmemory) { if (server.maxmemory < zmalloc_used_memory()) { - redisLog(REDIS_WARNING,"WARNING: the new maxmemory value set via CONFIG SET is smaller than the current memory usage. This will result in keys eviction and/or inability to accept new write commands depending on the maxmemory-policy."); + serverLog(REDIS_WARNING,"WARNING: the new maxmemory value set via CONFIG SET is smaller than the current memory usage. This will result in keys eviction and/or inability to accept new write commands depending on the maxmemory-policy."); } freeMemoryIfNeeded(); } @@ -1657,7 +1657,7 @@ void rewriteConfigRemoveOrphaned(struct rewriteConfigState *state) { /* Don't blank lines about options the rewrite process * don't understand. */ if (dictFind(state->rewritten,option) == NULL) { - redisLog(REDIS_DEBUG,"Not rewritten option: %s", option); + serverLog(REDIS_DEBUG,"Not rewritten option: %s", option); continue; } @@ -1862,10 +1862,10 @@ void configCommand(redisClient *c) { return; } if (rewriteConfig(server.configfile) == -1) { - redisLog(REDIS_WARNING,"CONFIG REWRITE failed: %s", strerror(errno)); + serverLog(REDIS_WARNING,"CONFIG REWRITE failed: %s", strerror(errno)); addReplyErrorFormat(c,"Rewriting config file: %s", strerror(errno)); } else { - redisLog(REDIS_WARNING,"CONFIG REWRITE executed with success."); + serverLog(REDIS_WARNING,"CONFIG REWRITE executed with success."); addReply(c,shared.ok); } } else { diff --git a/src/debug.c b/src/debug.c index fffefe6ad..9c9118eb2 100644 --- a/src/debug.c +++ b/src/debug.c @@ -278,7 +278,7 @@ void debugCommand(redisClient *c) { addReplyError(c,"Error trying to load the RDB dump"); return; } - redisLog(REDIS_WARNING,"DB reloaded by DEBUG RELOAD"); + serverLog(REDIS_WARNING,"DB reloaded by DEBUG RELOAD"); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"loadaof")) { emptyDb(NULL); @@ -287,7 +287,7 @@ void debugCommand(redisClient *c) { return; } server.dirty = 0; /* Prevent AOF / replication */ - redisLog(REDIS_WARNING,"Append Only File loaded by DEBUG LOADAOF"); + serverLog(REDIS_WARNING,"Append Only File loaded by DEBUG LOADAOF"); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"object") && c->argc == 3) { dictEntry *de; @@ -472,13 +472,13 @@ void debugCommand(redisClient *c) { void _redisAssert(char *estr, char *file, int line) { bugReportStart(); - redisLog(REDIS_WARNING,"=== ASSERTION FAILED ==="); - redisLog(REDIS_WARNING,"==> %s:%d '%s' is not true",file,line,estr); + serverLog(REDIS_WARNING,"=== ASSERTION FAILED ==="); + serverLog(REDIS_WARNING,"==> %s:%d '%s' is not true",file,line,estr); #ifdef HAVE_BACKTRACE server.assert_failed = estr; server.assert_file = file; server.assert_line = line; - redisLog(REDIS_WARNING,"(forcing SIGSEGV to print the bug report.)"); + serverLog(REDIS_WARNING,"(forcing SIGSEGV to print the bug report.)"); #endif *((char*)-1) = 'x'; } @@ -487,10 +487,10 @@ void _redisAssertPrintClientInfo(redisClient *c) { int j; bugReportStart(); - redisLog(REDIS_WARNING,"=== ASSERTION FAILED CLIENT CONTEXT ==="); - redisLog(REDIS_WARNING,"client->flags = %d", c->flags); - redisLog(REDIS_WARNING,"client->fd = %d", c->fd); - redisLog(REDIS_WARNING,"client->argc = %d", c->argc); + serverLog(REDIS_WARNING,"=== ASSERTION FAILED CLIENT CONTEXT ==="); + serverLog(REDIS_WARNING,"client->flags = %d", c->flags); + serverLog(REDIS_WARNING,"client->fd = %d", c->fd); + serverLog(REDIS_WARNING,"client->argc = %d", c->argc); for (j=0; j < c->argc; j++) { char buf[128]; char *arg; @@ -502,39 +502,39 @@ void _redisAssertPrintClientInfo(redisClient *c) { c->argv[j]->type, c->argv[j]->encoding); arg = buf; } - redisLog(REDIS_WARNING,"client->argv[%d] = \"%s\" (refcount: %d)", + serverLog(REDIS_WARNING,"client->argv[%d] = \"%s\" (refcount: %d)", j, arg, c->argv[j]->refcount); } } -void redisLogObjectDebugInfo(robj *o) { - redisLog(REDIS_WARNING,"Object type: %d", o->type); - redisLog(REDIS_WARNING,"Object encoding: %d", o->encoding); - redisLog(REDIS_WARNING,"Object refcount: %d", o->refcount); +void serverLogObjectDebugInfo(robj *o) { + serverLog(REDIS_WARNING,"Object type: %d", o->type); + serverLog(REDIS_WARNING,"Object encoding: %d", o->encoding); + serverLog(REDIS_WARNING,"Object refcount: %d", o->refcount); if (o->type == REDIS_STRING && sdsEncodedObject(o)) { - redisLog(REDIS_WARNING,"Object raw string len: %zu", sdslen(o->ptr)); + serverLog(REDIS_WARNING,"Object raw string len: %zu", sdslen(o->ptr)); if (sdslen(o->ptr) < 4096) { sds repr = sdscatrepr(sdsempty(),o->ptr,sdslen(o->ptr)); - redisLog(REDIS_WARNING,"Object raw string content: %s", repr); + serverLog(REDIS_WARNING,"Object raw string content: %s", repr); sdsfree(repr); } } else if (o->type == REDIS_LIST) { - redisLog(REDIS_WARNING,"List length: %d", (int) listTypeLength(o)); + serverLog(REDIS_WARNING,"List length: %d", (int) listTypeLength(o)); } else if (o->type == REDIS_SET) { - redisLog(REDIS_WARNING,"Set size: %d", (int) setTypeSize(o)); + serverLog(REDIS_WARNING,"Set size: %d", (int) setTypeSize(o)); } else if (o->type == REDIS_HASH) { - redisLog(REDIS_WARNING,"Hash size: %d", (int) hashTypeLength(o)); + serverLog(REDIS_WARNING,"Hash size: %d", (int) hashTypeLength(o)); } else if (o->type == REDIS_ZSET) { - redisLog(REDIS_WARNING,"Sorted set size: %d", (int) zsetLength(o)); + serverLog(REDIS_WARNING,"Sorted set size: %d", (int) zsetLength(o)); if (o->encoding == REDIS_ENCODING_SKIPLIST) - redisLog(REDIS_WARNING,"Skiplist level: %d", (int) ((zset*)o->ptr)->zsl->level); + serverLog(REDIS_WARNING,"Skiplist level: %d", (int) ((zset*)o->ptr)->zsl->level); } } void _redisAssertPrintObject(robj *o) { bugReportStart(); - redisLog(REDIS_WARNING,"=== ASSERTION FAILED OBJECT CONTEXT ==="); - redisLogObjectDebugInfo(o); + serverLog(REDIS_WARNING,"=== ASSERTION FAILED OBJECT CONTEXT ==="); + serverLogObjectDebugInfo(o); } void _redisAssertWithInfo(redisClient *c, robj *o, char *estr, char *file, int line) { @@ -545,19 +545,19 @@ void _redisAssertWithInfo(redisClient *c, robj *o, char *estr, char *file, int l void _redisPanic(char *msg, char *file, int line) { bugReportStart(); - redisLog(REDIS_WARNING,"------------------------------------------------"); - redisLog(REDIS_WARNING,"!!! Software Failure. Press left mouse button to continue"); - redisLog(REDIS_WARNING,"Guru Meditation: %s #%s:%d",msg,file,line); + serverLog(REDIS_WARNING,"------------------------------------------------"); + serverLog(REDIS_WARNING,"!!! Software Failure. Press left mouse button to continue"); + serverLog(REDIS_WARNING,"Guru Meditation: %s #%s:%d",msg,file,line); #ifdef HAVE_BACKTRACE - redisLog(REDIS_WARNING,"(forcing SIGSEGV in order to print the stack trace)"); + serverLog(REDIS_WARNING,"(forcing SIGSEGV in order to print the stack trace)"); #endif - redisLog(REDIS_WARNING,"------------------------------------------------"); + serverLog(REDIS_WARNING,"------------------------------------------------"); *((char*)-1) = 'x'; } void bugReportStart(void) { if (server.bug_report_start == 0) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "\n\n=== REDIS BUG REPORT START: Cut & paste starting from here ==="); server.bug_report_start = 1; } @@ -602,20 +602,20 @@ void logStackContent(void **sp) { unsigned long val = (unsigned long) sp[i]; if (sizeof(long) == 4) - redisLog(REDIS_WARNING, "(%08lx) -> %08lx", addr, val); + serverLog(REDIS_WARNING, "(%08lx) -> %08lx", addr, val); else - redisLog(REDIS_WARNING, "(%016lx) -> %016lx", addr, val); + serverLog(REDIS_WARNING, "(%016lx) -> %016lx", addr, val); } } void logRegisters(ucontext_t *uc) { - redisLog(REDIS_WARNING, "--- REGISTERS"); + serverLog(REDIS_WARNING, "--- REGISTERS"); /* OSX */ #if defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6) /* OSX AMD64 */ #if defined(_STRUCT_X86_THREAD_STATE64) && !defined(__i386__) - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "\n" "RAX:%016lx RBX:%016lx\nRCX:%016lx RDX:%016lx\n" "RDI:%016lx RSI:%016lx\nRBP:%016lx RSP:%016lx\n" @@ -647,7 +647,7 @@ void logRegisters(ucontext_t *uc) { logStackContent((void**)uc->uc_mcontext->__ss.__rsp); #else /* OSX x86 */ - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "\n" "EAX:%08lx EBX:%08lx ECX:%08lx EDX:%08lx\n" "EDI:%08lx ESI:%08lx EBP:%08lx ESP:%08lx\n" @@ -676,7 +676,7 @@ void logRegisters(ucontext_t *uc) { #elif defined(__linux__) /* Linux x86 */ #if defined(__i386__) - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "\n" "EAX:%08lx EBX:%08lx ECX:%08lx EDX:%08lx\n" "EDI:%08lx ESI:%08lx EBP:%08lx ESP:%08lx\n" @@ -702,7 +702,7 @@ void logRegisters(ucontext_t *uc) { logStackContent((void**)uc->uc_mcontext.gregs[7]); #elif defined(__X86_64__) || defined(__x86_64__) /* Linux AMD64 */ - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "\n" "RAX:%016lx RBX:%016lx\nRCX:%016lx RDX:%016lx\n" "RDI:%016lx RSI:%016lx\nRBP:%016lx RSP:%016lx\n" @@ -732,7 +732,7 @@ void logRegisters(ucontext_t *uc) { logStackContent((void**)uc->uc_mcontext.gregs[15]); #endif #else - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, " Dumping of registers not supported for this OS/arch"); #endif } @@ -774,15 +774,15 @@ void logCurrentClient(void) { sds client; int j; - redisLog(REDIS_WARNING, "--- CURRENT CLIENT INFO"); + serverLog(REDIS_WARNING, "--- CURRENT CLIENT INFO"); client = catClientInfoString(sdsempty(),cc); - redisLog(REDIS_WARNING,"client: %s", client); + serverLog(REDIS_WARNING,"client: %s", client); sdsfree(client); for (j = 0; j < cc->argc; j++) { robj *decoded; decoded = getDecodedObject(cc->argv[j]); - redisLog(REDIS_WARNING,"argv[%d]: '%s'", j, (char*)decoded->ptr); + serverLog(REDIS_WARNING,"argv[%d]: '%s'", j, (char*)decoded->ptr); decrRefCount(decoded); } /* Check if the first argument, usually a key, is found inside the @@ -795,8 +795,8 @@ void logCurrentClient(void) { de = dictFind(cc->db->dict, key->ptr); if (de) { val = dictGetVal(de); - redisLog(REDIS_WARNING,"key '%s' found in DB containing the following object:", (char*)key->ptr); - redisLogObjectDebugInfo(val); + serverLog(REDIS_WARNING,"key '%s' found in DB containing the following object:", (char*)key->ptr); + serverLogObjectDebugInfo(val); } decrRefCount(key); } @@ -892,25 +892,25 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) { REDIS_NOTUSED(info); bugReportStart(); - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, " Redis %s crashed by signal: %d", REDIS_VERSION, sig); - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, " Failed assertion: %s (%s:%d)", server.assert_failed, server.assert_file, server.assert_line); /* Log the stack trace */ - redisLog(REDIS_WARNING, "--- STACK TRACE"); + serverLog(REDIS_WARNING, "--- STACK TRACE"); logStackTrace(uc); /* Log INFO and CLIENT LIST */ - redisLog(REDIS_WARNING, "--- INFO OUTPUT"); + serverLog(REDIS_WARNING, "--- INFO OUTPUT"); infostring = genRedisInfoString("all"); infostring = sdscatprintf(infostring, "hash_init_value: %u\n", dictGetHashFunctionSeed()); - redisLogRaw(REDIS_WARNING, infostring); - redisLog(REDIS_WARNING, "--- CLIENT LIST OUTPUT"); + serverLogRaw(REDIS_WARNING, infostring); + serverLog(REDIS_WARNING, "--- CLIENT LIST OUTPUT"); clients = getAllClientsInfoString(); - redisLogRaw(REDIS_WARNING, clients); + serverLogRaw(REDIS_WARNING, clients); sdsfree(infostring); sdsfree(clients); @@ -922,18 +922,18 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) { #if defined(HAVE_PROC_MAPS) /* Test memory */ - redisLog(REDIS_WARNING, "--- FAST MEMORY TEST"); + serverLog(REDIS_WARNING, "--- FAST MEMORY TEST"); bioKillThreads(); if (memtest_test_linux_anonymous_maps()) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "!!! MEMORY ERROR DETECTED! Check your memory ASAP !!!"); } else { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Fast memory test PASSED, however your memory can still be broken. Please run a memory test for several hours if possible."); } #endif - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "\n=== REDIS BUG REPORT END. Make sure to include from START to END. ===\n\n" " Please report the crash by opening an issue on github:\n\n" " http://github.com/antirez/redis/issues\n\n" @@ -954,12 +954,12 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) { /* ==================== Logging functions for debugging ===================== */ -void redisLogHexDump(int level, char *descr, void *value, size_t len) { +void serverLogHexDump(int level, char *descr, void *value, size_t len) { char buf[65], *b; unsigned char *v = value; char charset[] = "0123456789abcdef"; - redisLog(level,"%s (hexdump):", descr); + serverLog(level,"%s (hexdump):", descr); b = buf; while(len) { b[0] = charset[(*v)>>4]; @@ -969,11 +969,11 @@ void redisLogHexDump(int level, char *descr, void *value, size_t len) { len--; v++; if (b-buf == 64 || len == 0) { - redisLogRaw(level|REDIS_LOG_RAW,buf); + serverLogRaw(level|REDIS_LOG_RAW,buf); b = buf; } } - redisLogRaw(level|REDIS_LOG_RAW,"\n"); + serverLogRaw(level|REDIS_LOG_RAW,"\n"); } /* =========================== Software Watchdog ============================ */ @@ -986,13 +986,13 @@ void watchdogSignalHandler(int sig, siginfo_t *info, void *secret) { REDIS_NOTUSED(info); REDIS_NOTUSED(sig); - redisLogFromHandler(REDIS_WARNING,"\n--- WATCHDOG TIMER EXPIRED ---"); + serverLogFromHandler(REDIS_WARNING,"\n--- WATCHDOG TIMER EXPIRED ---"); #ifdef HAVE_BACKTRACE logStackTrace(uc); #else - redisLogFromHandler(REDIS_WARNING,"Sorry: no support for backtrace()."); + serverLogFromHandler(REDIS_WARNING,"Sorry: no support for backtrace()."); #endif - redisLogFromHandler(REDIS_WARNING,"--------\n"); + serverLogFromHandler(REDIS_WARNING,"--------\n"); } /* Schedule a SIGALRM delivery after the specified period in milliseconds. diff --git a/src/networking.c b/src/networking.c index 0df37c0fa..ec9aef2bc 100644 --- a/src/networking.c +++ b/src/networking.c @@ -592,7 +592,7 @@ void copyClientOutputBuffer(redisClient *dst, redisClient *src) { static void acceptCommonHandler(int fd, int flags) { redisClient *c; if ((c = createClient(fd)) == NULL) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Error registering fd event for the new client: %s (fd=%d)", strerror(errno),fd); close(fd); /* May be already closed, just ignore errors */ @@ -628,11 +628,11 @@ void acceptTcpHandler(aeEventLoop *el, int fd, void *privdata, int mask) { cfd = anetTcpAccept(server.neterr, fd, cip, sizeof(cip), &cport); if (cfd == ANET_ERR) { if (errno != EWOULDBLOCK) - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Accepting client connection: %s", server.neterr); return; } - redisLog(REDIS_VERBOSE,"Accepted %s:%d", cip, cport); + serverLog(REDIS_VERBOSE,"Accepted %s:%d", cip, cport); acceptCommonHandler(cfd,0); } } @@ -647,11 +647,11 @@ void acceptUnixHandler(aeEventLoop *el, int fd, void *privdata, int mask) { cfd = anetUnixAccept(server.neterr, fd); if (cfd == ANET_ERR) { if (errno != EWOULDBLOCK) - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Accepting client connection: %s", server.neterr); return; } - redisLog(REDIS_VERBOSE,"Accepted connection to %s", server.unixsocket); + serverLog(REDIS_VERBOSE,"Accepted connection to %s", server.unixsocket); acceptCommonHandler(cfd,REDIS_UNIX_SOCKET); } } @@ -700,7 +700,7 @@ void freeClient(redisClient *c) { * Note that before doing this we make sure that the client is not in * some unexpected state, by checking its flags. */ if (server.master && c->flags & REDIS_MASTER) { - redisLog(REDIS_WARNING,"Connection with master lost."); + serverLog(REDIS_WARNING,"Connection with master lost."); if (!(c->flags & (REDIS_CLOSE_AFTER_REPLY| REDIS_CLOSE_ASAP| REDIS_BLOCKED| @@ -713,7 +713,7 @@ void freeClient(redisClient *c) { /* Log link disconnection with slave */ if ((c->flags & REDIS_SLAVE) && !(c->flags & REDIS_MONITOR)) { - redisLog(REDIS_WARNING,"Connection with slave %s lost.", + serverLog(REDIS_WARNING,"Connection with slave %s lost.", replicationGetSlaveName(c)); } @@ -883,7 +883,7 @@ void sendReplyToClient(aeEventLoop *el, int fd, void *privdata, int mask) { if (errno == EAGAIN) { nwritten = 0; } else { - redisLog(REDIS_VERBOSE, + serverLog(REDIS_VERBOSE, "Error writing to client: %s", strerror(errno)); freeClient(c); return; @@ -985,7 +985,7 @@ int processInlineBuffer(redisClient *c) { static void setProtocolError(redisClient *c, int pos) { if (server.verbosity <= REDIS_VERBOSE) { sds client = catClientInfoString(sdsempty(),c); - redisLog(REDIS_VERBOSE, + serverLog(REDIS_VERBOSE, "Protocol error from client: %s", client); sdsfree(client); } @@ -1205,12 +1205,12 @@ void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) { if (errno == EAGAIN) { return; } else { - redisLog(REDIS_VERBOSE, "Reading from client: %s",strerror(errno)); + serverLog(REDIS_VERBOSE, "Reading from client: %s",strerror(errno)); freeClient(c); return; } } else if (nread == 0) { - redisLog(REDIS_VERBOSE, "Client closed connection"); + serverLog(REDIS_VERBOSE, "Client closed connection"); freeClient(c); return; } @@ -1223,7 +1223,7 @@ void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) { sds ci = catClientInfoString(sdsempty(),c), bytes = sdsempty(); bytes = sdscatrepr(bytes,c->querybuf,64); - redisLog(REDIS_WARNING,"Closing client that reached max query buffer length: %s (qbuf initial bytes: %s)", ci, bytes); + serverLog(REDIS_WARNING,"Closing client that reached max query buffer length: %s (qbuf initial bytes: %s)", ci, bytes); sdsfree(ci); sdsfree(bytes); freeClient(c); @@ -1660,7 +1660,7 @@ void asyncCloseClientOnOutputBufferLimitReached(redisClient *c) { sds client = catClientInfoString(sdsempty(),c); freeClientAsync(c); - redisLog(REDIS_WARNING,"Client %s scheduled to be closed ASAP for overcoming of output buffer limits.", client); + serverLog(REDIS_WARNING,"Client %s scheduled to be closed ASAP for overcoming of output buffer limits.", client); sdsfree(client); } } diff --git a/src/rdb.c b/src/rdb.c index bbc791046..8e652cde5 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -47,7 +47,7 @@ #define rdbExitReportCorruptRDB(reason) rdbCheckThenExit(reason, __LINE__); void rdbCheckThenExit(char *reason, int where) { - redisLog(REDIS_WARNING, "Corrupt RDB detected at rdb.c:%d (%s). " + serverLog(REDIS_WARNING, "Corrupt RDB detected at rdb.c:%d (%s). " "Running 'redis-check-rdb %s'", where, reason, server.rdb_filename); redis_check_rdb(server.rdb_filename); @@ -839,7 +839,7 @@ int rdbSave(char *filename) { snprintf(tmpfile,256,"temp-%d.rdb", (int) getpid()); fp = fopen(tmpfile,"w"); if (!fp) { - redisLog(REDIS_WARNING, "Failed opening .rdb for saving: %s", + serverLog(REDIS_WARNING, "Failed opening .rdb for saving: %s", strerror(errno)); return REDIS_ERR; } @@ -858,18 +858,18 @@ int rdbSave(char *filename) { /* Use RENAME to make sure the DB file is changed atomically only * if the generate DB file is ok. */ if (rename(tmpfile,filename) == -1) { - redisLog(REDIS_WARNING,"Error moving temp DB file on the final destination: %s", strerror(errno)); + serverLog(REDIS_WARNING,"Error moving temp DB file on the final destination: %s", strerror(errno)); unlink(tmpfile); return REDIS_ERR; } - redisLog(REDIS_NOTICE,"DB saved on disk"); + serverLog(REDIS_NOTICE,"DB saved on disk"); server.dirty = 0; server.lastsave = time(NULL); server.lastbgsave_status = REDIS_OK; return REDIS_OK; werr: - redisLog(REDIS_WARNING,"Write error saving DB on disk: %s", strerror(errno)); + serverLog(REDIS_WARNING,"Write error saving DB on disk: %s", strerror(errno)); fclose(fp); unlink(tmpfile); return REDIS_ERR; @@ -896,7 +896,7 @@ int rdbSaveBackground(char *filename) { size_t private_dirty = zmalloc_get_private_dirty(); if (private_dirty) { - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "RDB: %zu MB of memory used by copy-on-write", private_dirty/(1024*1024)); } @@ -909,11 +909,11 @@ int rdbSaveBackground(char *filename) { latencyAddSampleIfNeeded("fork",server.stat_fork_time/1000); if (childpid == -1) { server.lastbgsave_status = REDIS_ERR; - redisLog(REDIS_WARNING,"Can't save in background: fork: %s", + serverLog(REDIS_WARNING,"Can't save in background: fork: %s", strerror(errno)); return REDIS_ERR; } - redisLog(REDIS_NOTICE,"Background saving started by pid %d",childpid); + serverLog(REDIS_NOTICE,"Background saving started by pid %d",childpid); server.rdb_save_time_start = time(NULL); server.rdb_child_pid = childpid; server.rdb_child_type = REDIS_RDB_CHILD_TYPE_DISK; @@ -1250,14 +1250,14 @@ int rdbLoad(char *filename) { buf[9] = '\0'; if (memcmp(buf,"REDIS",5) != 0) { fclose(fp); - redisLog(REDIS_WARNING,"Wrong signature trying to load DB from file"); + serverLog(REDIS_WARNING,"Wrong signature trying to load DB from file"); errno = EINVAL; return REDIS_ERR; } rdbver = atoi(buf+5); if (rdbver < 1 || rdbver > REDIS_RDB_VERSION) { fclose(fp); - redisLog(REDIS_WARNING,"Can't handle RDB format version %d",rdbver); + serverLog(REDIS_WARNING,"Can't handle RDB format version %d",rdbver); errno = EINVAL; return REDIS_ERR; } @@ -1295,7 +1295,7 @@ int rdbLoad(char *filename) { if ((dbid = rdbLoadLen(&rdb,NULL)) == REDIS_RDB_LENERR) goto eoferr; if (dbid >= (unsigned)server.dbnum) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "FATAL: Data file was created with a Redis " "server configured to handle more than %d " "databases. Exiting\n", server.dbnum); @@ -1328,13 +1328,13 @@ int rdbLoad(char *filename) { /* All the fields with a name staring with '%' are considered * information fields and are logged at startup with a log * level of NOTICE. */ - redisLog(REDIS_NOTICE,"RDB '%s': %s", + serverLog(REDIS_NOTICE,"RDB '%s': %s", (char*)auxkey->ptr, (char*)auxval->ptr); } else { /* We ignore fields we don't understand, as by AUX field * contract. */ - redisLog(REDIS_DEBUG,"Unrecognized RDB AUX field: '%s'", + serverLog(REDIS_DEBUG,"Unrecognized RDB AUX field: '%s'", (char*)auxkey->ptr); } @@ -1372,9 +1372,9 @@ int rdbLoad(char *filename) { if (rioRead(&rdb,&cksum,8) == 0) goto eoferr; memrev64ifbe(&cksum); if (cksum == 0) { - redisLog(REDIS_WARNING,"RDB file was saved with checksum disabled: no check performed."); + serverLog(REDIS_WARNING,"RDB file was saved with checksum disabled: no check performed."); } else if (cksum != expected) { - redisLog(REDIS_WARNING,"Wrong RDB checksum. Aborting now."); + serverLog(REDIS_WARNING,"Wrong RDB checksum. Aborting now."); rdbExitReportCorruptRDB("RDB CRC error"); } } @@ -1384,7 +1384,7 @@ int rdbLoad(char *filename) { return REDIS_OK; eoferr: /* unexpected end of file is handled here with a fatal exit */ - redisLog(REDIS_WARNING,"Short read or OOM loading DB. Unrecoverable error, aborting now."); + serverLog(REDIS_WARNING,"Short read or OOM loading DB. Unrecoverable error, aborting now."); rdbExitReportCorruptRDB("Unexpected EOF reading RDB file"); return REDIS_ERR; /* Just to avoid warning */ } @@ -1393,18 +1393,18 @@ eoferr: /* unexpected end of file is handled here with a fatal exit */ * This function covers the case of actual BGSAVEs. */ void backgroundSaveDoneHandlerDisk(int exitcode, int bysignal) { if (!bysignal && exitcode == 0) { - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "Background saving terminated with success"); server.dirty = server.dirty - server.dirty_before_bgsave; server.lastsave = time(NULL); server.lastbgsave_status = REDIS_OK; } else if (!bysignal && exitcode != 0) { - redisLog(REDIS_WARNING, "Background saving error"); + serverLog(REDIS_WARNING, "Background saving error"); server.lastbgsave_status = REDIS_ERR; } else { mstime_t latency; - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Background saving terminated by signal %d", bysignal); latencyStartMonitor(latency); rdbRemoveTempFile(server.rdb_child_pid); @@ -1431,12 +1431,12 @@ void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) { uint64_t *ok_slaves; if (!bysignal && exitcode == 0) { - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "Background RDB transfer terminated with success"); } else if (!bysignal && exitcode != 0) { - redisLog(REDIS_WARNING, "Background transfer error"); + serverLog(REDIS_WARNING, "Background transfer error"); } else { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Background transfer terminated by signal %d", bysignal); } server.rdb_child_pid = -1; @@ -1498,14 +1498,14 @@ void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) { } } if (j == ok_slaves[0] || errorcode != 0) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Closing slave %s: child->slave RDB transfer failed: %s", replicationGetSlaveName(slave), (errorcode == 0) ? "RDB transfer child aborted" : strerror(errorcode)); freeClient(slave); } else { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Slave %s correctly received the streamed RDB file.", replicationGetSlaveName(slave)); /* Restore the socket as non-blocking. */ @@ -1601,7 +1601,7 @@ int rdbSaveToSlavesSockets(void) { size_t private_dirty = zmalloc_get_private_dirty(); if (private_dirty) { - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "RDB: %zu MB of memory used by copy-on-write", private_dirty/(1024*1024)); } @@ -1654,14 +1654,14 @@ int rdbSaveToSlavesSockets(void) { server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_time / (1024*1024*1024); /* GB per second. */ latencyAddSampleIfNeeded("fork",server.stat_fork_time/1000); if (childpid == -1) { - redisLog(REDIS_WARNING,"Can't save in background: fork: %s", + serverLog(REDIS_WARNING,"Can't save in background: fork: %s", strerror(errno)); zfree(fds); close(pipefds[0]); close(pipefds[1]); return REDIS_ERR; } - redisLog(REDIS_NOTICE,"Background RDB transfer started by pid %d",childpid); + serverLog(REDIS_NOTICE,"Background RDB transfer started by pid %d",childpid); server.rdb_save_time_start = time(NULL); server.rdb_child_pid = childpid; server.rdb_child_type = REDIS_RDB_CHILD_TYPE_SOCKET; diff --git a/src/redis-check-rdb.c b/src/redis-check-rdb.c index da73bd38b..459aec86e 100644 --- a/src/redis-check-rdb.c +++ b/src/redis-check-rdb.c @@ -41,7 +41,7 @@ #include "crc64.h" #define ERROR(...) { \ - redisLog(REDIS_WARNING, __VA_ARGS__); \ + serverLog(REDIS_WARNING, __VA_ARGS__); \ exit(1); \ } @@ -491,7 +491,7 @@ static void printCentered(int indent, int width, char* body) { memset(head, '=', indent); memset(tail, '=', width - 2 - indent - strlen(body)); - redisLog(REDIS_WARNING, "%s %s %s", head, body, tail); + serverLog(REDIS_WARNING, "%s %s %s", head, body, tail); } static void printValid(uint64_t ops, uint64_t bytes) { @@ -538,7 +538,7 @@ static void printErrorStack(entry *e) { /* display error stack */ for (i = 0; i < errors.level; i++) { - redisLog(REDIS_WARNING, "0x%08lx - %s", + serverLog(REDIS_WARNING, "0x%08lx - %s", (unsigned long) errors.offset[i], errors.error[i]); } } @@ -551,7 +551,7 @@ void process(void) { /* Exclude the final checksum for RDB >= 5. Will be checked at the end. */ if (dump_version >= 5) { if (positions[0].size < 8) { - redisLog(REDIS_WARNING, "RDB version >= 5 but no room for checksum."); + serverLog(REDIS_WARNING, "RDB version >= 5 but no room for checksum."); exit(1); } positions[0].size -= 8; @@ -636,13 +636,13 @@ void process(void) { if (crc != crc2) { SHIFT_ERROR(positions[0].offset, "RDB CRC64 does not match."); } else { - redisLog(REDIS_WARNING, "CRC64 checksum is OK"); + serverLog(REDIS_WARNING, "CRC64 checksum is OK"); } } /* print summary on errors */ if (num_errors) { - redisLog(REDIS_WARNING, "Total unprocessable opcodes: %llu", + serverLog(REDIS_WARNING, "Total unprocessable opcodes: %llu", (unsigned long long) num_errors); } } @@ -704,7 +704,7 @@ int redis_check_rdb_main(char **argv, int argc) { fprintf(stderr, "Usage: %s \n", argv[0]); exit(1); } - redisLog(REDIS_WARNING, "Checking RDB file %s", argv[1]); + serverLog(REDIS_WARNING, "Checking RDB file %s", argv[1]); exit(redis_check_rdb(argv[1])); return 0; } diff --git a/src/replication.c b/src/replication.c index 0d35ccd3d..5f366f189 100644 --- a/src/replication.c +++ b/src/replication.c @@ -302,32 +302,32 @@ void replicationFeedMonitors(redisClient *c, list *monitors, int dictid, robj ** long long addReplyReplicationBacklog(redisClient *c, long long offset) { long long j, skip, len; - redisLog(REDIS_DEBUG, "[PSYNC] Slave request offset: %lld", offset); + serverLog(REDIS_DEBUG, "[PSYNC] Slave request offset: %lld", offset); if (server.repl_backlog_histlen == 0) { - redisLog(REDIS_DEBUG, "[PSYNC] Backlog history len is zero"); + serverLog(REDIS_DEBUG, "[PSYNC] Backlog history len is zero"); return 0; } - redisLog(REDIS_DEBUG, "[PSYNC] Backlog size: %lld", + serverLog(REDIS_DEBUG, "[PSYNC] Backlog size: %lld", server.repl_backlog_size); - redisLog(REDIS_DEBUG, "[PSYNC] First byte: %lld", + serverLog(REDIS_DEBUG, "[PSYNC] First byte: %lld", server.repl_backlog_off); - redisLog(REDIS_DEBUG, "[PSYNC] History len: %lld", + serverLog(REDIS_DEBUG, "[PSYNC] History len: %lld", server.repl_backlog_histlen); - redisLog(REDIS_DEBUG, "[PSYNC] Current index: %lld", + serverLog(REDIS_DEBUG, "[PSYNC] Current index: %lld", server.repl_backlog_idx); /* Compute the amount of bytes we need to discard. */ skip = offset - server.repl_backlog_off; - redisLog(REDIS_DEBUG, "[PSYNC] Skipping: %lld", skip); + serverLog(REDIS_DEBUG, "[PSYNC] Skipping: %lld", skip); /* Point j to the oldest byte, that is actaully our * server.repl_backlog_off byte. */ j = (server.repl_backlog_idx + (server.repl_backlog_size-server.repl_backlog_histlen)) % server.repl_backlog_size; - redisLog(REDIS_DEBUG, "[PSYNC] Index of first byte: %lld", j); + serverLog(REDIS_DEBUG, "[PSYNC] Index of first byte: %lld", j); /* Discard the amount of data to seek to the specified 'offset'. */ j = (j + skip) % server.repl_backlog_size; @@ -335,13 +335,13 @@ long long addReplyReplicationBacklog(redisClient *c, long long offset) { /* Feed slave with data. Since it is a circular buffer we have to * split the reply in two parts if we are cross-boundary. */ len = server.repl_backlog_histlen - skip; - redisLog(REDIS_DEBUG, "[PSYNC] Reply total length: %lld", len); + serverLog(REDIS_DEBUG, "[PSYNC] Reply total length: %lld", len); while(len) { long long thislen = ((server.repl_backlog_size - j) < len) ? (server.repl_backlog_size - j) : len; - redisLog(REDIS_DEBUG, "[PSYNC] addReply() length: %lld", thislen); + serverLog(REDIS_DEBUG, "[PSYNC] addReply() length: %lld", thislen); addReplySds(c,sdsnewlen(server.repl_backlog + j, thislen)); len -= thislen; j = 0; @@ -366,11 +366,11 @@ int masterTryPartialResynchronization(redisClient *c) { if (strcasecmp(master_runid, server.runid)) { /* Run id "?" is used by slaves that want to force a full resync. */ if (master_runid[0] != '?') { - redisLog(REDIS_NOTICE,"Partial resynchronization not accepted: " + serverLog(REDIS_NOTICE,"Partial resynchronization not accepted: " "Runid mismatch (Client asked for runid '%s', my runid is '%s')", master_runid, server.runid); } else { - redisLog(REDIS_NOTICE,"Full resync requested by slave %s", + serverLog(REDIS_NOTICE,"Full resync requested by slave %s", replicationGetSlaveName(c)); } goto need_full_resync; @@ -383,10 +383,10 @@ int masterTryPartialResynchronization(redisClient *c) { psync_offset < server.repl_backlog_off || psync_offset > (server.repl_backlog_off + server.repl_backlog_histlen)) { - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "Unable to partial resync with slave %s for lack of backlog (Slave request was: %lld).", replicationGetSlaveName(c), psync_offset); if (psync_offset > server.master_repl_offset) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Warning: slave %s tried to PSYNC with an offset that is greater than the master replication offset.", replicationGetSlaveName(c)); } goto need_full_resync; @@ -410,7 +410,7 @@ int masterTryPartialResynchronization(redisClient *c) { return REDIS_OK; } psync_len = addReplyReplicationBacklog(c,psync_offset); - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "Partial resynchronization request from %s accepted. Sending %lld bytes of backlog starting from offset %lld.", replicationGetSlaveName(c), psync_len, psync_offset); @@ -445,7 +445,7 @@ need_full_resync: int startBgsaveForReplication(void) { int retval; - redisLog(REDIS_NOTICE,"Starting BGSAVE for SYNC with target: %s", + serverLog(REDIS_NOTICE,"Starting BGSAVE for SYNC with target: %s", server.repl_diskless_sync ? "slaves sockets" : "disk"); if (server.repl_diskless_sync) @@ -480,7 +480,7 @@ void syncCommand(redisClient *c) { return; } - redisLog(REDIS_NOTICE,"Slave %s asks for synchronization", + serverLog(REDIS_NOTICE,"Slave %s asks for synchronization", replicationGetSlaveName(c)); /* Try a partial resynchronization if this is a PSYNC command. @@ -537,12 +537,12 @@ void syncCommand(redisClient *c) { * another slave. Set the right state, and copy the buffer. */ copyClientOutputBuffer(c,slave); c->replstate = REDIS_REPL_WAIT_BGSAVE_END; - redisLog(REDIS_NOTICE,"Waiting for end of BGSAVE for SYNC"); + serverLog(REDIS_NOTICE,"Waiting for end of BGSAVE for SYNC"); } else { /* No way, we need to wait for the next BGSAVE in order to * register differences. */ c->replstate = REDIS_REPL_WAIT_BGSAVE_START; - redisLog(REDIS_NOTICE,"Waiting for next BGSAVE for SYNC"); + serverLog(REDIS_NOTICE,"Waiting for next BGSAVE for SYNC"); } } else if (server.rdb_child_pid != -1 && server.rdb_child_type == REDIS_RDB_CHILD_TYPE_SOCKET) @@ -551,7 +551,7 @@ void syncCommand(redisClient *c) { * children sockets. We need to wait for the next BGSAVE * in order to synchronize. */ c->replstate = REDIS_REPL_WAIT_BGSAVE_START; - redisLog(REDIS_NOTICE,"Waiting for next BGSAVE for SYNC"); + serverLog(REDIS_NOTICE,"Waiting for next BGSAVE for SYNC"); } else { if (server.repl_diskless_sync) { /* Diskless replication RDB child is created inside @@ -559,11 +559,11 @@ void syncCommand(redisClient *c) { * few seconds to wait for more slaves to arrive. */ c->replstate = REDIS_REPL_WAIT_BGSAVE_START; if (server.repl_diskless_sync_delay) - redisLog(REDIS_NOTICE,"Delay next BGSAVE for SYNC"); + serverLog(REDIS_NOTICE,"Delay next BGSAVE for SYNC"); } else { /* Ok we don't have a BGSAVE in progress, let's start one. */ if (startBgsaveForReplication() != REDIS_OK) { - redisLog(REDIS_NOTICE,"Replication failed, can't BGSAVE"); + serverLog(REDIS_NOTICE,"Replication failed, can't BGSAVE"); addReplyError(c,"Unable to perform background save"); return; } @@ -664,12 +664,12 @@ void putSlaveOnline(redisClient *slave) { slave->repl_ack_time = server.unixtime; /* Prevent false timeout. */ if (aeCreateFileEvent(server.el, slave->fd, AE_WRITABLE, sendReplyToClient, slave) == AE_ERR) { - redisLog(REDIS_WARNING,"Unable to register writable event for slave bulk transfer: %s", strerror(errno)); + serverLog(REDIS_WARNING,"Unable to register writable event for slave bulk transfer: %s", strerror(errno)); freeClient(slave); return; } refreshGoodSlavesCount(); - redisLog(REDIS_NOTICE,"Synchronization with slave %s succeeded", + serverLog(REDIS_NOTICE,"Synchronization with slave %s succeeded", replicationGetSlaveName(slave)); } @@ -686,7 +686,7 @@ void sendBulkToSlave(aeEventLoop *el, int fd, void *privdata, int mask) { if (slave->replpreamble) { nwritten = write(fd,slave->replpreamble,sdslen(slave->replpreamble)); if (nwritten == -1) { - redisLog(REDIS_VERBOSE,"Write error sending RDB preamble to slave: %s", + serverLog(REDIS_VERBOSE,"Write error sending RDB preamble to slave: %s", strerror(errno)); freeClient(slave); return; @@ -706,14 +706,14 @@ void sendBulkToSlave(aeEventLoop *el, int fd, void *privdata, int mask) { lseek(slave->repldbfd,slave->repldboff,SEEK_SET); buflen = read(slave->repldbfd,buf,REDIS_IOBUF_LEN); if (buflen <= 0) { - redisLog(REDIS_WARNING,"Read error sending DB to slave: %s", + serverLog(REDIS_WARNING,"Read error sending DB to slave: %s", (buflen == 0) ? "premature EOF" : strerror(errno)); freeClient(slave); return; } if ((nwritten = write(fd,buf,buflen)) == -1) { if (errno != EAGAIN) { - redisLog(REDIS_WARNING,"Write error sending DB to slave: %s", + serverLog(REDIS_WARNING,"Write error sending DB to slave: %s", strerror(errno)); freeClient(slave); } @@ -764,7 +764,7 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) { * diskless replication, our work is trivial, we can just put * the slave online. */ if (type == REDIS_RDB_CHILD_TYPE_SOCKET) { - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "Streamed RDB transfer with slave %s succeeded (socket). Waiting for REPLCONF ACK from slave to enable streaming", replicationGetSlaveName(slave)); /* Note: we wait for a REPLCONF ACK message from slave in @@ -778,13 +778,13 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) { } else { if (bgsaveerr != REDIS_OK) { freeClient(slave); - redisLog(REDIS_WARNING,"SYNC failed. BGSAVE child returned an error"); + serverLog(REDIS_WARNING,"SYNC failed. BGSAVE child returned an error"); continue; } if ((slave->repldbfd = open(server.rdb_filename,O_RDONLY)) == -1 || redis_fstat(slave->repldbfd,&buf) == -1) { freeClient(slave); - redisLog(REDIS_WARNING,"SYNC failed. Can't open/stat DB after BGSAVE: %s", strerror(errno)); + serverLog(REDIS_WARNING,"SYNC failed. Can't open/stat DB after BGSAVE: %s", strerror(errno)); continue; } slave->repldboff = 0; @@ -806,7 +806,7 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) { listIter li; listRewind(server.slaves,&li); - redisLog(REDIS_WARNING,"SYNC failed. BGSAVE failed"); + serverLog(REDIS_WARNING,"SYNC failed. BGSAVE failed"); while((ln = listNext(&li))) { redisClient *slave = ln->value; @@ -893,14 +893,14 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { * from the master reply. */ if (server.repl_transfer_size == -1) { if (syncReadLine(fd,buf,1024,server.repl_syncio_timeout*1000) == -1) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "I/O error reading bulk count from MASTER: %s", strerror(errno)); goto error; } if (buf[0] == '-') { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "MASTER aborted replication with an error: %s", buf+1); goto error; @@ -911,7 +911,7 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { server.repl_transfer_lastio = server.unixtime; return; } else if (buf[0] != '$') { - redisLog(REDIS_WARNING,"Bad protocol from MASTER, the first byte is not '$' (we received '%s'), are you sure the host and port are right?", buf); + serverLog(REDIS_WARNING,"Bad protocol from MASTER, the first byte is not '$' (we received '%s'), are you sure the host and port are right?", buf); goto error; } @@ -932,12 +932,12 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { /* Set any repl_transfer_size to avoid entering this code path * at the next call. */ server.repl_transfer_size = 0; - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "MASTER <-> SLAVE sync: receiving streamed RDB from master"); } else { usemark = 0; server.repl_transfer_size = strtol(buf+1,NULL,10); - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "MASTER <-> SLAVE sync: receiving %lld bytes from master", (long long) server.repl_transfer_size); } @@ -954,7 +954,7 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { nread = read(fd,buf,readlen); if (nread <= 0) { - redisLog(REDIS_WARNING,"I/O error trying to sync with MASTER: %s", + serverLog(REDIS_WARNING,"I/O error trying to sync with MASTER: %s", (nread == -1) ? strerror(errno) : "connection lost"); replicationAbortSyncTransfer(); return; @@ -979,7 +979,7 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { server.repl_transfer_lastio = server.unixtime; if (write(server.repl_transfer_fd,buf,nread) != nread) { - redisLog(REDIS_WARNING,"Write error or short write writing to the DB dump file needed for MASTER <-> SLAVE synchronization: %s", strerror(errno)); + serverLog(REDIS_WARNING,"Write error or short write writing to the DB dump file needed for MASTER <-> SLAVE synchronization: %s", strerror(errno)); goto error; } server.repl_transfer_read += nread; @@ -989,7 +989,7 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { if (ftruncate(server.repl_transfer_fd, server.repl_transfer_read - REDIS_RUN_ID_SIZE) == -1) { - redisLog(REDIS_WARNING,"Error truncating the RDB file received from the master for SYNC: %s", strerror(errno)); + serverLog(REDIS_WARNING,"Error truncating the RDB file received from the master for SYNC: %s", strerror(errno)); goto error; } } @@ -1015,11 +1015,11 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { if (eof_reached) { if (rename(server.repl_transfer_tmpfile,server.rdb_filename) == -1) { - redisLog(REDIS_WARNING,"Failed trying to rename the temp DB into dump.rdb in MASTER <-> SLAVE synchronization: %s", strerror(errno)); + serverLog(REDIS_WARNING,"Failed trying to rename the temp DB into dump.rdb in MASTER <-> SLAVE synchronization: %s", strerror(errno)); replicationAbortSyncTransfer(); return; } - redisLog(REDIS_NOTICE, "MASTER <-> SLAVE sync: Flushing old data"); + serverLog(REDIS_NOTICE, "MASTER <-> SLAVE sync: Flushing old data"); signalFlushedDb(-1); emptyDb(replicationEmptyDbCallback); /* Before loading the DB into memory we need to delete the readable @@ -1027,9 +1027,9 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { * rdbLoad() will call the event loop to process events from time to * time for non blocking loading. */ aeDeleteFileEvent(server.el,server.repl_transfer_s,AE_READABLE); - redisLog(REDIS_NOTICE, "MASTER <-> SLAVE sync: Loading DB in memory"); + serverLog(REDIS_NOTICE, "MASTER <-> SLAVE sync: Loading DB in memory"); if (rdbLoad(server.rdb_filename) != REDIS_OK) { - redisLog(REDIS_WARNING,"Failed trying to load the MASTER synchronization DB from disk"); + serverLog(REDIS_WARNING,"Failed trying to load the MASTER synchronization DB from disk"); replicationAbortSyncTransfer(); return; } @@ -1037,7 +1037,7 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { zfree(server.repl_transfer_tmpfile); close(server.repl_transfer_fd); replicationCreateMasterClient(server.repl_transfer_s); - redisLog(REDIS_NOTICE, "MASTER <-> SLAVE sync: Finished with success"); + serverLog(REDIS_NOTICE, "MASTER <-> SLAVE sync: Finished with success"); /* Restart the AOF subsystem now that we finished the sync. This * will trigger an AOF rewrite, and when done will start appending * to the new file. */ @@ -1046,11 +1046,11 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { stopAppendOnly(); while (retry-- && startAppendOnly() == REDIS_ERR) { - redisLog(REDIS_WARNING,"Failed enabling the AOF after successful master synchronization! Trying it again in one second."); + serverLog(REDIS_WARNING,"Failed enabling the AOF after successful master synchronization! Trying it again in one second."); sleep(1); } if (!retry) { - redisLog(REDIS_WARNING,"FATAL: this slave instance finished the synchronization with its master, but the AOF can't be turned on. Exiting now."); + serverLog(REDIS_WARNING,"FATAL: this slave instance finished the synchronization with its master, but the AOF can't be turned on. Exiting now."); exit(1); } } @@ -1145,9 +1145,9 @@ int slaveTryPartialResynchronization(int fd) { if (server.cached_master) { psync_runid = server.cached_master->replrunid; snprintf(psync_offset,sizeof(psync_offset),"%lld", server.cached_master->reploff+1); - redisLog(REDIS_NOTICE,"Trying a partial resynchronization (request %s:%s).", psync_runid, psync_offset); + serverLog(REDIS_NOTICE,"Trying a partial resynchronization (request %s:%s).", psync_runid, psync_offset); } else { - redisLog(REDIS_NOTICE,"Partial resynchronization not possible (no cached master)"); + serverLog(REDIS_NOTICE,"Partial resynchronization not possible (no cached master)"); psync_runid = "?"; memcpy(psync_offset,"-1",3); } @@ -1167,7 +1167,7 @@ int slaveTryPartialResynchronization(int fd) { if (offset) offset++; } if (!runid || !offset || (offset-runid-1) != REDIS_RUN_ID_SIZE) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Master replied with wrong +FULLRESYNC syntax."); /* This is an unexpected condition, actually the +FULLRESYNC * reply means that the master supports PSYNC, but the reply @@ -1178,7 +1178,7 @@ int slaveTryPartialResynchronization(int fd) { memcpy(server.repl_master_runid, runid, offset-runid-1); server.repl_master_runid[REDIS_RUN_ID_SIZE] = '\0'; server.repl_master_initial_offset = strtoll(offset,NULL,10); - redisLog(REDIS_NOTICE,"Full resync from master: %s:%lld", + serverLog(REDIS_NOTICE,"Full resync from master: %s:%lld", server.repl_master_runid, server.repl_master_initial_offset); } @@ -1190,7 +1190,7 @@ int slaveTryPartialResynchronization(int fd) { if (!strncmp(reply,"+CONTINUE",9)) { /* Partial resync was accepted, set the replication state accordingly */ - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "Successful partial resynchronization with master."); sdsfree(reply); replicationResurrectCachedMaster(fd); @@ -1203,10 +1203,10 @@ int slaveTryPartialResynchronization(int fd) { if (strncmp(reply,"-ERR",4)) { /* If it's not an error, log the unexpected event. */ - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Unexpected reply to PSYNC from master: %s", reply); } else { - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "Master does not support PSYNC or is in " "error state (reply: %s)", reply); } @@ -1236,7 +1236,7 @@ void syncWithMaster(aeEventLoop *el, int fd, void *privdata, int mask) { sockerr = errno; if (sockerr) { aeDeleteFileEvent(server.el,fd,AE_READABLE|AE_WRITABLE); - redisLog(REDIS_WARNING,"Error condition on socket for SYNC: %s", + serverLog(REDIS_WARNING,"Error condition on socket for SYNC: %s", strerror(sockerr)); goto error; } @@ -1246,7 +1246,7 @@ void syncWithMaster(aeEventLoop *el, int fd, void *privdata, int mask) { * replication process where we have long timeouts in the order of * seconds (in the meantime the slave would block). */ if (server.repl_state == REDIS_REPL_CONNECTING) { - redisLog(REDIS_NOTICE,"Non blocking connect for SYNC fired the event."); + serverLog(REDIS_NOTICE,"Non blocking connect for SYNC fired the event."); /* Delete the writable event so that the readable event remains * registered and we can wait for the PONG reply. */ aeDeleteFileEvent(server.el,fd,AE_WRITABLE); @@ -1270,7 +1270,7 @@ void syncWithMaster(aeEventLoop *el, int fd, void *privdata, int mask) { if (syncReadLine(fd,buf,sizeof(buf), server.repl_syncio_timeout*1000) == -1) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "I/O error reading PING reply from master: %s", strerror(errno)); goto error; @@ -1285,10 +1285,10 @@ void syncWithMaster(aeEventLoop *el, int fd, void *privdata, int mask) { strncmp(buf,"-NOAUTH",7) != 0 && strncmp(buf,"-ERR operation not permitted",28) != 0) { - redisLog(REDIS_WARNING,"Error reply to PING from master: '%s'",buf); + serverLog(REDIS_WARNING,"Error reply to PING from master: '%s'",buf); goto error; } else { - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "Master replied to PING, replication can continue..."); } } @@ -1297,7 +1297,7 @@ void syncWithMaster(aeEventLoop *el, int fd, void *privdata, int mask) { if(server.masterauth) { err = sendSynchronousCommand(fd,"AUTH",server.masterauth,NULL); if (err[0] == '-') { - redisLog(REDIS_WARNING,"Unable to AUTH to MASTER: %s",err); + serverLog(REDIS_WARNING,"Unable to AUTH to MASTER: %s",err); sdsfree(err); goto error; } @@ -1314,7 +1314,7 @@ void syncWithMaster(aeEventLoop *el, int fd, void *privdata, int mask) { /* Ignore the error if any, not all the Redis versions support * REPLCONF listening-port. */ if (err[0] == '-') { - redisLog(REDIS_NOTICE,"(Non critical) Master does not understand REPLCONF listening-port: %s", err); + serverLog(REDIS_NOTICE,"(Non critical) Master does not understand REPLCONF listening-port: %s", err); } sdsfree(err); } @@ -1326,7 +1326,7 @@ void syncWithMaster(aeEventLoop *el, int fd, void *privdata, int mask) { * reconnection attempt. */ psync_result = slaveTryPartialResynchronization(fd); if (psync_result == PSYNC_CONTINUE) { - redisLog(REDIS_NOTICE, "MASTER <-> SLAVE sync: Master accepted a Partial Resynchronization."); + serverLog(REDIS_NOTICE, "MASTER <-> SLAVE sync: Master accepted a Partial Resynchronization."); return; } @@ -1334,9 +1334,9 @@ void syncWithMaster(aeEventLoop *el, int fd, void *privdata, int mask) { * and the server.repl_master_runid and repl_master_initial_offset are * already populated. */ if (psync_result == PSYNC_NOT_SUPPORTED) { - redisLog(REDIS_NOTICE,"Retrying with SYNC..."); + serverLog(REDIS_NOTICE,"Retrying with SYNC..."); if (syncWrite(fd,"SYNC\r\n",6,server.repl_syncio_timeout*1000) == -1) { - redisLog(REDIS_WARNING,"I/O error writing to MASTER: %s", + serverLog(REDIS_WARNING,"I/O error writing to MASTER: %s", strerror(errno)); goto error; } @@ -1351,7 +1351,7 @@ void syncWithMaster(aeEventLoop *el, int fd, void *privdata, int mask) { sleep(1); } if (dfd == -1) { - redisLog(REDIS_WARNING,"Opening the temp file needed for MASTER <-> SLAVE synchronization: %s",strerror(errno)); + serverLog(REDIS_WARNING,"Opening the temp file needed for MASTER <-> SLAVE synchronization: %s",strerror(errno)); goto error; } @@ -1359,7 +1359,7 @@ void syncWithMaster(aeEventLoop *el, int fd, void *privdata, int mask) { if (aeCreateFileEvent(server.el,fd, AE_READABLE,readSyncBulkPayload,NULL) == AE_ERR) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Can't create readable event for SYNC: %s (fd=%d)", strerror(errno),fd); goto error; @@ -1387,7 +1387,7 @@ int connectWithMaster(void) { fd = anetTcpNonBlockBestEffortBindConnect(NULL, server.masterhost,server.masterport,REDIS_BIND_ADDR); if (fd == -1) { - redisLog(REDIS_WARNING,"Unable to connect to MASTER: %s", + serverLog(REDIS_WARNING,"Unable to connect to MASTER: %s", strerror(errno)); return REDIS_ERR; } @@ -1396,7 +1396,7 @@ int connectWithMaster(void) { AE_ERR) { close(fd); - redisLog(REDIS_WARNING,"Can't create readable event for SYNC"); + serverLog(REDIS_WARNING,"Can't create readable event for SYNC"); return REDIS_ERR; } @@ -1491,7 +1491,7 @@ void slaveofCommand(redisClient *c) { !strcasecmp(c->argv[2]->ptr,"one")) { if (server.masterhost) { replicationUnsetMaster(); - redisLog(REDIS_NOTICE,"MASTER MODE enabled (user request)"); + serverLog(REDIS_NOTICE,"MASTER MODE enabled (user request)"); } } else { long port; @@ -1502,14 +1502,14 @@ void slaveofCommand(redisClient *c) { /* Check if we are already attached to the specified slave */ if (server.masterhost && !strcasecmp(server.masterhost,c->argv[1]->ptr) && server.masterport == port) { - redisLog(REDIS_NOTICE,"SLAVE OF would result into synchronization with the master we are already connected with. No operation performed."); + serverLog(REDIS_NOTICE,"SLAVE OF would result into synchronization with the master we are already connected with. No operation performed."); addReplySds(c,sdsnew("+OK Already connected to specified master\r\n")); return; } /* There was no previous master or the user specified a different one, * we can continue. */ replicationSetMaster(c->argv[1]->ptr, port); - redisLog(REDIS_NOTICE,"SLAVE OF %s:%d enabled (user request)", + serverLog(REDIS_NOTICE,"SLAVE OF %s:%d enabled (user request)", server.masterhost, server.masterport); } addReply(c,shared.ok); @@ -1604,7 +1604,7 @@ void replicationCacheMaster(redisClient *c) { listNode *ln; redisAssert(server.master != NULL && server.cached_master == NULL); - redisLog(REDIS_NOTICE,"Caching the disconnected master state."); + serverLog(REDIS_NOTICE,"Caching the disconnected master state."); /* Remove from the list of clients, we don't want this client to be * listed by CLIENT LIST or processed in any way by batch operations. */ @@ -1642,7 +1642,7 @@ void replicationCacheMaster(redisClient *c) { void replicationDiscardCachedMaster(void) { if (server.cached_master == NULL) return; - redisLog(REDIS_NOTICE,"Discarding previously cached master state."); + serverLog(REDIS_NOTICE,"Discarding previously cached master state."); server.cached_master->flags &= ~REDIS_MASTER; freeClient(server.cached_master); server.cached_master = NULL; @@ -1667,7 +1667,7 @@ void replicationResurrectCachedMaster(int newfd) { listAddNodeTail(server.clients,server.master); if (aeCreateFileEvent(server.el, newfd, AE_READABLE, readQueryFromClient, server.master)) { - redisLog(REDIS_WARNING,"Error resurrecting the cached master, impossible to add the readable handler: %s", strerror(errno)); + serverLog(REDIS_WARNING,"Error resurrecting the cached master, impossible to add the readable handler: %s", strerror(errno)); freeClientAsync(server.master); /* Close ASAP. */ } @@ -1676,7 +1676,7 @@ void replicationResurrectCachedMaster(int newfd) { if (server.master->bufpos || listLength(server.master->reply)) { if (aeCreateFileEvent(server.el, newfd, AE_WRITABLE, sendReplyToClient, server.master)) { - redisLog(REDIS_WARNING,"Error resurrecting the cached master, impossible to add the writable handler: %s", strerror(errno)); + serverLog(REDIS_WARNING,"Error resurrecting the cached master, impossible to add the writable handler: %s", strerror(errno)); freeClientAsync(server.master); /* Close ASAP. */ } } @@ -1949,7 +1949,7 @@ void replicationCron(void) { server.repl_state == REDIS_REPL_RECEIVE_PONG) && (time(NULL)-server.repl_transfer_lastio) > server.repl_timeout) { - redisLog(REDIS_WARNING,"Timeout connecting to the MASTER..."); + serverLog(REDIS_WARNING,"Timeout connecting to the MASTER..."); undoConnectWithMaster(); } @@ -1957,7 +1957,7 @@ void replicationCron(void) { if (server.masterhost && server.repl_state == REDIS_REPL_TRANSFER && (time(NULL)-server.repl_transfer_lastio) > server.repl_timeout) { - redisLog(REDIS_WARNING,"Timeout receiving bulk data from MASTER... If the problem persists try to set the 'repl-timeout' parameter in redis.conf to a larger value."); + serverLog(REDIS_WARNING,"Timeout receiving bulk data from MASTER... If the problem persists try to set the 'repl-timeout' parameter in redis.conf to a larger value."); replicationAbortSyncTransfer(); } @@ -1965,16 +1965,16 @@ void replicationCron(void) { if (server.masterhost && server.repl_state == REDIS_REPL_CONNECTED && (time(NULL)-server.master->lastinteraction) > server.repl_timeout) { - redisLog(REDIS_WARNING,"MASTER timeout: no data nor PING received..."); + serverLog(REDIS_WARNING,"MASTER timeout: no data nor PING received..."); freeClient(server.master); } /* Check if we should connect to a MASTER */ if (server.repl_state == REDIS_REPL_CONNECT) { - redisLog(REDIS_NOTICE,"Connecting to MASTER %s:%d", + serverLog(REDIS_NOTICE,"Connecting to MASTER %s:%d", server.masterhost, server.masterport); if (connectWithMaster() == REDIS_OK) { - redisLog(REDIS_NOTICE,"MASTER <-> SLAVE sync started"); + serverLog(REDIS_NOTICE,"MASTER <-> SLAVE sync started"); } } @@ -2031,7 +2031,7 @@ void replicationCron(void) { if (slave->flags & REDIS_PRE_PSYNC) continue; if ((server.unixtime - slave->repl_ack_time) > server.repl_timeout) { - redisLog(REDIS_WARNING, "Disconnecting timedout slave: %s", + serverLog(REDIS_WARNING, "Disconnecting timedout slave: %s", replicationGetSlaveName(slave)); freeClient(slave); } @@ -2047,7 +2047,7 @@ void replicationCron(void) { if (idle > server.repl_backlog_time_limit) { freeReplicationBacklog(); - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "Replication backlog freed after %d seconds " "without connected slaves.", (int) server.repl_backlog_time_limit); diff --git a/src/scripting.c b/src/scripting.c index 6c1963793..79547aa4d 100644 --- a/src/scripting.c +++ b/src/scripting.c @@ -224,7 +224,7 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) { char *recursion_warning = "luaRedisGenericCommand() recursive call detected. " "Are you doing funny stuff with Lua debug hooks?"; - redisLog(REDIS_WARNING,"%s",recursion_warning); + serverLog(REDIS_WARNING,"%s",recursion_warning); luaPushError(lua,recursion_warning); return 1; } @@ -532,7 +532,7 @@ int luaLogCommand(lua_State *lua) { log = sdscatlen(log,s,len); } } - redisLogRaw(level,log); + serverLogRaw(level,log); sdsfree(log); return 0; } @@ -544,7 +544,7 @@ void luaMaskCountHook(lua_State *lua, lua_Debug *ar) { elapsed = mstime() - server.lua_time_start; if (elapsed >= server.lua_time_limit && server.lua_timedout == 0) { - redisLog(REDIS_WARNING,"Lua slow script detected: still in execution after %lld milliseconds. You can try killing the script using the SCRIPT KILL command.",elapsed); + serverLog(REDIS_WARNING,"Lua slow script detected: still in execution after %lld milliseconds. You can try killing the script using the SCRIPT KILL command.",elapsed); server.lua_timedout = 1; /* Once the script timeouts we reenter the event loop to permit others * to call SCRIPT KILL or SHUTDOWN NOSAVE if needed. For this reason @@ -555,7 +555,7 @@ void luaMaskCountHook(lua_State *lua, lua_Debug *ar) { } if (server.lua_timedout) processEventsWhileBlocked(); if (server.lua_kill) { - redisLog(REDIS_WARNING,"Lua script killed by user with SCRIPT KILL."); + serverLog(REDIS_WARNING,"Lua script killed by user with SCRIPT KILL."); lua_pushstring(lua,"Script killed by user with SCRIPT KILL..."); lua_error(lua); } diff --git a/src/sentinel.c b/src/sentinel.c index 5fa8258cd..bd315ccd5 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -477,11 +477,11 @@ void sentinelIsRunning(void) { int j; if (server.configfile == NULL) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Sentinel started without a config file. Exiting..."); exit(1); } else if (access(server.configfile,W_OK) == -1) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Sentinel config file %s is not writable: %s. Exiting...", server.configfile,strerror(errno)); exit(1); @@ -500,7 +500,7 @@ void sentinelIsRunning(void) { } /* Log its ID to make debugging of issues simpler. */ - redisLog(REDIS_WARNING,"Sentinel ID is %s", sentinel.myid); + serverLog(REDIS_WARNING,"Sentinel ID is %s", sentinel.myid); /* We want to generate a +monitor event for every configured master * at startup. */ @@ -614,7 +614,7 @@ void sentinelEvent(int level, char *type, sentinelRedisInstance *ri, /* Log the message if the log level allows it to be logged. */ if (level >= server.verbosity) - redisLog(level,"%s %s",type,msg); + serverLog(level,"%s %s",type,msg); /* Publish the message via Pub/Sub if it's not a debugging one. */ if (level != REDIS_DEBUG) { @@ -808,7 +808,7 @@ void sentinelCollectTerminatedScripts(void) { ln = sentinelGetScriptListNodeByPid(pid); if (ln == NULL) { - redisLog(REDIS_WARNING,"wait3() returned a pid (%ld) we can't find in our scripts execution queue!", (long)pid); + serverLog(REDIS_WARNING,"wait3() returned a pid (%ld) we can't find in our scripts execution queue!", (long)pid); continue; } sj = ln->value; @@ -1852,7 +1852,7 @@ void sentinelFlushConfig(void) { werr: if (fd != -1) close(fd); - redisLog(REDIS_WARNING,"WARNING: Sentinel was not able to save the new configuration on disk!!!: %s", strerror(errno)); + serverLog(REDIS_WARNING,"WARNING: Sentinel was not able to save the new configuration on disk!!!: %s", strerror(errno)); } /* ====================== hiredis connection handling ======================= */ @@ -2967,7 +2967,7 @@ void sentinelCommand(redisClient *c) { addReplySds(c,sdsnew("-NOGOODSLAVE No suitable slave to promote\r\n")); return; } - redisLog(REDIS_WARNING,"Executing user requested FAILOVER of '%s'", + serverLog(REDIS_WARNING,"Executing user requested FAILOVER of '%s'", ri->name); sentinelStartFailover(ri); ri->flags |= SRI_FORCE_FAILOVER; @@ -3136,13 +3136,13 @@ void sentinelCommand(redisClient *c) { if (!strcasecmp(c->argv[j]->ptr,"crash-after-election")) { sentinel.simfailure_flags |= SENTINEL_SIMFAILURE_CRASH_AFTER_ELECTION; - redisLog(REDIS_WARNING,"Failure simulation: this Sentinel " + serverLog(REDIS_WARNING,"Failure simulation: this Sentinel " "will crash after being successfully elected as failover " "leader"); } else if (!strcasecmp(c->argv[j]->ptr,"crash-after-promotion")) { sentinel.simfailure_flags |= SENTINEL_SIMFAILURE_CRASH_AFTER_PROMOTION; - redisLog(REDIS_WARNING,"Failure simulation: this Sentinel " + serverLog(REDIS_WARNING,"Failure simulation: this Sentinel " "will crash after promoting the selected slave to master"); } else if (!strcasecmp(c->argv[j]->ptr,"help")) { addReplyMultiBulkLen(c,2); @@ -3490,7 +3490,7 @@ void sentinelReceiveIsMasterDownReply(redisAsyncContext *c, void *reply, void *p * replied with a vote. */ sdsfree(ri->leader); if ((long long)ri->leader_epoch != r->element[2]->integer) - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "%s voted for %s %llu", ri->name, r->element[1]->str, (unsigned long long) r->element[2]->integer); @@ -3552,7 +3552,7 @@ void sentinelAskMasterStateToOtherSentinels(sentinelRedisInstance *master, int f /* Crash because of user request via SENTINEL simulate-failure command. */ void sentinelSimFailureCrash(void) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Sentinel CRASH because of SENTINEL simulate-failure"); exit(99); } @@ -3793,7 +3793,7 @@ int sentinelStartFailoverIfNeeded(sentinelRedisInstance *master) { ctime_r(&clock,ctimebuf); ctimebuf[24] = '\0'; /* Remove newline. */ master->failover_delay_logged = master->failover_start_time; - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Next failover delay: I will not start a failover before %s", ctimebuf); } diff --git a/src/server.c b/src/server.c index 3459a0119..b666e8d63 100644 --- a/src/server.c +++ b/src/server.c @@ -300,8 +300,8 @@ struct evictionPoolEntry *evictionPoolAlloc(void); /*============================ Utility functions ============================ */ /* Low level logging. To use only for very big messages, otherwise - * redisLog() is to prefer. */ -void redisLogRaw(int level, const char *msg) { + * serverLog() is to prefer. */ +void serverLogRaw(int level, const char *msg) { const int syslogLevelMap[] = { LOG_DEBUG, LOG_INFO, LOG_NOTICE, LOG_WARNING }; const char *c = ".-*#"; FILE *fp; @@ -342,10 +342,10 @@ void redisLogRaw(int level, const char *msg) { if (server.syslog_enabled) syslog(syslogLevelMap[level], "%s", msg); } -/* Like redisLogRaw() but with printf-alike support. This is the function that +/* Like serverLogRaw() but with printf-alike support. This is the function that * is used across the code. The raw version is only used in order to dump * the INFO output on crash. */ -void redisLog(int level, const char *fmt, ...) { +void serverLog(int level, const char *fmt, ...) { va_list ap; char msg[REDIS_MAX_LOGMSG_LEN]; @@ -355,7 +355,7 @@ void redisLog(int level, const char *fmt, ...) { vsnprintf(msg, sizeof(msg), fmt, ap); va_end(ap); - redisLogRaw(level,msg); + serverLogRaw(level,msg); } /* Log a fixed message without printf-alike capabilities, in a way that is @@ -363,8 +363,8 @@ void redisLog(int level, const char *fmt, ...) { * * We actually use this only for signals that are not fatal from the point * of view of Redis. Signals that are going to kill the server anyway and - * where we need printf-alike features are served by redisLog(). */ -void redisLogFromHandler(int level, const char *msg) { + * where we need printf-alike features are served by serverLog(). */ +void serverLogFromHandler(int level, const char *msg) { int fd; int log_to_stdout = server.logfile[0] == '\0'; char buf[64]; @@ -925,7 +925,7 @@ int clientsCronHandleTimeout(redisClient *c, mstime_t now_ms) { !(c->flags & REDIS_PUBSUB) && /* no timeout for Pub/Sub clients */ (now - c->lastinteraction > server.maxidletime)) { - redisLog(REDIS_VERBOSE,"Closing idle client"); + serverLog(REDIS_VERBOSE,"Closing idle client"); freeClient(c); return 1; } else if (c->flags & REDIS_BLOCKED) { @@ -1126,7 +1126,7 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { * not ok doing so inside the signal handler. */ if (server.shutdown_asap) { if (prepareForShutdown(0) == REDIS_OK) exit(0); - redisLog(REDIS_WARNING,"SIGTERM received but errors trying to shut down the server, check the logs for more information"); + serverLog(REDIS_WARNING,"SIGTERM received but errors trying to shut down the server, check the logs for more information"); server.shutdown_asap = 0; } @@ -1139,7 +1139,7 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { used = dictSize(server.db[j].dict); vkeys = dictSize(server.db[j].expires); if (used || vkeys) { - redisLog(REDIS_VERBOSE,"DB %d: %lld keys (%lld volatile) in %lld slots HT.",j,used,vkeys,size); + serverLog(REDIS_VERBOSE,"DB %d: %lld keys (%lld volatile) in %lld slots HT.",j,used,vkeys,size); /* dictPrintStats(server.dict); */ } } @@ -1148,7 +1148,7 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { /* Show information about connected clients */ if (!server.sentinel_mode) { run_with_period(5000) { - redisLog(REDIS_VERBOSE, + serverLog(REDIS_VERBOSE, "%lu clients connected (%lu slaves), %zu bytes in use", listLength(server.clients)-listLength(server.slaves), listLength(server.slaves), @@ -1186,7 +1186,7 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { } else if (pid == server.aof_child_pid) { backgroundRewriteDoneHandler(exitcode,bysignal); } else { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Warning, detected child with unmatched pid: %ld", (long)pid); } @@ -1208,7 +1208,7 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { REDIS_BGSAVE_RETRY_DELAY || server.lastbgsave_status == REDIS_OK)) { - redisLog(REDIS_NOTICE,"%d changes in %d seconds. Saving...", + serverLog(REDIS_NOTICE,"%d changes in %d seconds. Saving...", sp->changes, (int)sp->seconds); rdbSaveBackground(server.rdb_filename); break; @@ -1225,7 +1225,7 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { server.aof_rewrite_base_size : 1; long long growth = (server.aof_current_size*100/base) - 100; if (growth >= server.aof_rewrite_perc) { - redisLog(REDIS_NOTICE,"Starting automatic rewriting of AOF on %lld%% growth",growth); + serverLog(REDIS_NOTICE,"Starting automatic rewriting of AOF on %lld%% growth",growth); rewriteAppendOnlyFileBackground(); } } @@ -1583,7 +1583,7 @@ void adjustOpenFilesLimit(void) { struct rlimit limit; if (getrlimit(RLIMIT_NOFILE,&limit) == -1) { - redisLog(REDIS_WARNING,"Unable to obtain the current NOFILE limit (%s), assuming 1024 and setting the max clients configuration accordingly.", + serverLog(REDIS_WARNING,"Unable to obtain the current NOFILE limit (%s), assuming 1024 and setting the max clients configuration accordingly.", strerror(errno)); server.maxclients = 1024-REDIS_MIN_RESERVED_FDS; } else { @@ -1620,7 +1620,7 @@ void adjustOpenFilesLimit(void) { int old_maxclients = server.maxclients; server.maxclients = bestlimit-REDIS_MIN_RESERVED_FDS; if (server.maxclients < 1) { - redisLog(REDIS_WARNING,"Your current 'ulimit -n' " + serverLog(REDIS_WARNING,"Your current 'ulimit -n' " "of %llu is not enough for Redis to start. " "Please increase your open file limit to at least " "%llu. Exiting.", @@ -1628,20 +1628,20 @@ void adjustOpenFilesLimit(void) { (unsigned long long) maxfiles); exit(1); } - redisLog(REDIS_WARNING,"You requested maxclients of %d " + serverLog(REDIS_WARNING,"You requested maxclients of %d " "requiring at least %llu max file descriptors.", old_maxclients, (unsigned long long) maxfiles); - redisLog(REDIS_WARNING,"Redis can't set maximum open files " + serverLog(REDIS_WARNING,"Redis can't set maximum open files " "to %llu because of OS error: %s.", (unsigned long long) maxfiles, strerror(setrlimit_error)); - redisLog(REDIS_WARNING,"Current maximum open files is %llu. " + serverLog(REDIS_WARNING,"Current maximum open files is %llu. " "maxclients has been reduced to %d to compensate for " "low ulimit. " "If you need higher maxclients increase 'ulimit -n'.", (unsigned long long) bestlimit, server.maxclients); } else { - redisLog(REDIS_NOTICE,"Increased maximum number of open files " + serverLog(REDIS_NOTICE,"Increased maximum number of open files " "to %llu (it was originally set to %llu).", (unsigned long long) maxfiles, (unsigned long long) oldlimit); @@ -1660,7 +1660,7 @@ void checkTcpBacklogSettings(void) { if (fgets(buf,sizeof(buf),fp) != NULL) { int somaxconn = atoi(buf); if (somaxconn > 0 && somaxconn < server.tcp_backlog) { - redisLog(REDIS_WARNING,"WARNING: The TCP backlog setting of %d cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of %d.", server.tcp_backlog, somaxconn); + serverLog(REDIS_WARNING,"WARNING: The TCP backlog setting of %d cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of %d.", server.tcp_backlog, somaxconn); } } fclose(fp); @@ -1721,7 +1721,7 @@ int listenToPort(int port, int *fds, int *count) { server.tcp_backlog); } if (fds[*count] == ANET_ERR) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Creating Server TCP listening socket %s:%d: %s", server.bindaddr[j] ? server.bindaddr[j] : "*", port, server.neterr); @@ -1805,7 +1805,7 @@ void initServer(void) { server.sofd = anetUnixServer(server.neterr,server.unixsocket, server.unixsocketperm, server.tcp_backlog); if (server.sofd == ANET_ERR) { - redisLog(REDIS_WARNING, "Opening Unix socket: %s", server.neterr); + serverLog(REDIS_WARNING, "Opening Unix socket: %s", server.neterr); exit(1); } anetNonBlock(NULL,server.sofd); @@ -1813,7 +1813,7 @@ void initServer(void) { /* Abort if there are no listening sockets at all. */ if (server.ipfd_count == 0 && server.sofd < 0) { - redisLog(REDIS_WARNING, "Configured to not listen anywhere, exiting."); + serverLog(REDIS_WARNING, "Configured to not listen anywhere, exiting."); exit(1); } @@ -1879,7 +1879,7 @@ void initServer(void) { server.aof_fd = open(server.aof_filename, O_WRONLY|O_APPEND|O_CREAT,0644); if (server.aof_fd == -1) { - redisLog(REDIS_WARNING, "Can't open the append-only file: %s", + serverLog(REDIS_WARNING, "Can't open the append-only file: %s", strerror(errno)); exit(1); } @@ -1890,7 +1890,7 @@ void initServer(void) { * at 3 GB using maxmemory with 'noeviction' policy'. This avoids * useless crashes of the Redis instance for out of memory. */ if (server.arch_bits == 32 && server.maxmemory == 0) { - redisLog(REDIS_WARNING,"Warning: 32 bit instance detected but no memory limit set. Setting 3 GB maxmemory limit with 'noeviction' policy now."); + serverLog(REDIS_WARNING,"Warning: 32 bit instance detected but no memory limit set. Setting 3 GB maxmemory limit with 'noeviction' policy now."); server.maxmemory = 3072LL*(1024*1024); /* 3 GB */ server.maxmemory_policy = REDIS_MAXMEMORY_NO_EVICTION; } @@ -2372,7 +2372,7 @@ void closeListeningSockets(int unlink_unix_socket) { if (server.cluster_enabled) for (j = 0; j < server.cfd_count; j++) close(server.cfd[j]); if (unlink_unix_socket && server.unixsocket) { - redisLog(REDIS_NOTICE,"Removing the unix socket file."); + serverLog(REDIS_NOTICE,"Removing the unix socket file."); unlink(server.unixsocket); /* don't care if this fails */ } } @@ -2381,12 +2381,12 @@ int prepareForShutdown(int flags) { int save = flags & REDIS_SHUTDOWN_SAVE; int nosave = flags & REDIS_SHUTDOWN_NOSAVE; - redisLog(REDIS_WARNING,"User requested shutdown..."); + serverLog(REDIS_WARNING,"User requested shutdown..."); /* Kill the saving child if there is a background saving in progress. We want to avoid race conditions, for instance our saving child may overwrite the synchronous saving did by SHUTDOWN. */ if (server.rdb_child_pid != -1) { - redisLog(REDIS_WARNING,"There is a child saving an .rdb. Killing it!"); + serverLog(REDIS_WARNING,"There is a child saving an .rdb. Killing it!"); kill(server.rdb_child_pid,SIGUSR1); rdbRemoveTempFile(server.rdb_child_pid); } @@ -2397,19 +2397,19 @@ int prepareForShutdown(int flags) { /* If we have AOF enabled but haven't written the AOF yet, don't * shutdown or else the dataset will be lost. */ if (server.aof_state == REDIS_AOF_WAIT_REWRITE) { - redisLog(REDIS_WARNING, "Writing initial AOF, can't exit."); + serverLog(REDIS_WARNING, "Writing initial AOF, can't exit."); return REDIS_ERR; } - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "There is a child rewriting the AOF. Killing it!"); kill(server.aof_child_pid,SIGUSR1); } /* Append only file: fsync() the AOF and exit */ - redisLog(REDIS_NOTICE,"Calling fsync() on the AOF file."); + serverLog(REDIS_NOTICE,"Calling fsync() on the AOF file."); aof_fsync(server.aof_fd); } if ((server.saveparamslen > 0 && !nosave) || save) { - redisLog(REDIS_NOTICE,"Saving the final RDB snapshot before exiting."); + serverLog(REDIS_NOTICE,"Saving the final RDB snapshot before exiting."); /* Snapshotting. Perform a SYNC SAVE and exit */ if (rdbSave(server.rdb_filename) != REDIS_OK) { /* Ooops.. error saving! The best we can do is to continue @@ -2417,17 +2417,17 @@ int prepareForShutdown(int flags) { * in the next cron() Redis will be notified that the background * saving aborted, handling special stuff like slaves pending for * synchronization... */ - redisLog(REDIS_WARNING,"Error trying to save the DB, can't exit."); + serverLog(REDIS_WARNING,"Error trying to save the DB, can't exit."); return REDIS_ERR; } } if (server.daemonize || server.pidfile) { - redisLog(REDIS_NOTICE,"Removing the pid file."); + serverLog(REDIS_NOTICE,"Removing the pid file."); unlink(server.pidfile); } /* Close the listening sockets. Apparently this allows faster restarts. */ closeListeningSockets(1); - redisLog(REDIS_WARNING,"%s is now ready to exit, bye bye...", + serverLog(REDIS_WARNING,"%s is now ready to exit, bye bye...", server.sentinel_mode ? "Sentinel" : "Redis"); return REDIS_OK; } @@ -3444,10 +3444,10 @@ int linuxOvercommitMemoryValue(void) { void linuxMemoryWarnings(void) { if (linuxOvercommitMemoryValue() == 0) { - redisLog(REDIS_WARNING,"WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect."); + serverLog(REDIS_WARNING,"WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect."); } if (THPIsEnabled()) { - redisLog(REDIS_WARNING,"WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled."); + serverLog(REDIS_WARNING,"WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled."); } } #endif /* __linux__ */ @@ -3520,7 +3520,7 @@ void redisAsciiArt(void) { else mode = "standalone"; if (server.syslog_enabled) { - redisLog(REDIS_NOTICE, + serverLog(REDIS_NOTICE, "Redis %s (%s/%d) %s bit, %s mode, port %d, pid %ld ready to start.", REDIS_VERSION, redisGitSHA1(), @@ -3538,7 +3538,7 @@ void redisAsciiArt(void) { mode, server.port, (long) getpid() ); - redisLogRaw(REDIS_NOTICE|REDIS_LOG_RAW,buf); + serverLogRaw(REDIS_NOTICE|REDIS_LOG_RAW,buf); } zfree(buf); } @@ -3562,14 +3562,14 @@ static void sigShutdownHandler(int sig) { * the user really wanting to quit ASAP without waiting to persist * on disk. */ if (server.shutdown_asap && sig == SIGINT) { - redisLogFromHandler(REDIS_WARNING, "You insist... exiting now."); + serverLogFromHandler(REDIS_WARNING, "You insist... exiting now."); rdbRemoveTempFile(getpid()); exit(1); /* Exit with an error since this was not a clean shutdown. */ } else if (server.loading) { exit(0); } - redisLogFromHandler(REDIS_WARNING, msg); + serverLogFromHandler(REDIS_WARNING, msg); server.shutdown_asap = 1; } @@ -3614,20 +3614,20 @@ void loadDataFromDisk(void) { long long start = ustime(); if (server.aof_state == REDIS_AOF_ON) { if (loadAppendOnlyFile(server.aof_filename) == REDIS_OK) - redisLog(REDIS_NOTICE,"DB loaded from append only file: %.3f seconds",(float)(ustime()-start)/1000000); + serverLog(REDIS_NOTICE,"DB loaded from append only file: %.3f seconds",(float)(ustime()-start)/1000000); } else { if (rdbLoad(server.rdb_filename) == REDIS_OK) { - redisLog(REDIS_NOTICE,"DB loaded from disk: %.3f seconds", + serverLog(REDIS_NOTICE,"DB loaded from disk: %.3f seconds", (float)(ustime()-start)/1000000); } else if (errno != ENOENT) { - redisLog(REDIS_WARNING,"Fatal error loading the DB: %s. Exiting.",strerror(errno)); + serverLog(REDIS_WARNING,"Fatal error loading the DB: %s. Exiting.",strerror(errno)); exit(1); } } } void redisOutOfMemoryHandler(size_t allocation_size) { - redisLog(REDIS_WARNING,"Out Of Memory allocating %zu bytes!", + serverLog(REDIS_WARNING,"Out Of Memory allocating %zu bytes!", allocation_size); redisPanic("Redis aborting for OUT OF MEMORY"); } @@ -3656,12 +3656,12 @@ int redisSupervisedUpstart(void) { const char *upstart_job = getenv("UPSTART_JOB"); if (!upstart_job) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "upstart supervision requested, but UPSTART_JOB not found"); return 0; } - redisLog(REDIS_NOTICE, "supervised by upstart, will stop to signal readyness"); + serverLog(REDIS_NOTICE, "supervised by upstart, will stop to signal readyness"); raise(SIGSTOP); unsetenv("UPSTART_JOB"); return 1; @@ -3676,7 +3676,7 @@ int redisSupervisedSystemd(void) { int sendto_flags = 0; if (!notify_socket) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "systemd supervision requested, but NOTIFY_SOCKET not found"); return 0; } @@ -3685,9 +3685,9 @@ int redisSupervisedSystemd(void) { return 0; } - redisLog(REDIS_NOTICE, "supervised by systemd, will signal readyness"); + serverLog(REDIS_NOTICE, "supervised by systemd, will signal readyness"); if ((fd = socket(AF_UNIX, SOCK_DGRAM, 0)) == -1) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Can't connect to systemd socket %s", notify_socket); return 0; } @@ -3716,7 +3716,7 @@ int redisSupervisedSystemd(void) { sendto_flags |= MSG_NOSIGNAL; #endif if (sendmsg(fd, &hdr, sendto_flags) < 0) { - redisLog(REDIS_WARNING, "Can't send notification to systemd"); + serverLog(REDIS_WARNING, "Can't send notification to systemd"); close(fd); return 0; } @@ -3847,9 +3847,9 @@ int main(int argc, char **argv) { j++; } if (server.sentinel_mode && configfile && *configfile == '-') { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Sentinel config from STDIN not allowed."); - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "Sentinel needs config file on disk to save state. Exiting..."); exit(1); } @@ -3858,7 +3858,7 @@ int main(int argc, char **argv) { loadServerConfig(configfile,options); sdsfree(options); } else { - redisLog(REDIS_WARNING, "Warning: no config file specified, using the default config. In order to specify a config file use %s /path/to/%s.conf", argv[0], server.sentinel_mode ? "sentinel" : "redis"); + serverLog(REDIS_WARNING, "Warning: no config file specified, using the default config. In order to specify a config file use %s /path/to/%s.conf", argv[0], server.sentinel_mode ? "sentinel" : "redis"); } server.supervised = redisIsSupervised(server.supervised_mode); @@ -3872,7 +3872,7 @@ int main(int argc, char **argv) { if (!server.sentinel_mode) { /* Things not needed when running in Sentinel mode. */ - redisLog(REDIS_WARNING,"Server started, Redis version " REDIS_VERSION); + serverLog(REDIS_WARNING,"Server started, Redis version " REDIS_VERSION); #ifdef __linux__ linuxMemoryWarnings(); #endif @@ -3880,23 +3880,23 @@ int main(int argc, char **argv) { loadDataFromDisk(); if (server.cluster_enabled) { if (verifyClusterConfigWithData() == REDIS_ERR) { - redisLog(REDIS_WARNING, + serverLog(REDIS_WARNING, "You can't have keys in a DB different than DB 0 when in " "Cluster mode. Exiting."); exit(1); } } if (server.ipfd_count > 0) - redisLog(REDIS_NOTICE,"The server is now ready to accept connections on port %d", server.port); + serverLog(REDIS_NOTICE,"The server is now ready to accept connections on port %d", server.port); if (server.sofd > 0) - redisLog(REDIS_NOTICE,"The server is now ready to accept connections at %s", server.unixsocket); + serverLog(REDIS_NOTICE,"The server is now ready to accept connections at %s", server.unixsocket); } else { sentinelIsRunning(); } /* Warning the user about suspicious maxmemory setting. */ if (server.maxmemory > 0 && server.maxmemory < 1024*1024) { - redisLog(REDIS_WARNING,"WARNING: You specified a maxmemory value that is less than 1MB (current value is %llu bytes). Are you sure this is what you really want?", server.maxmemory); + serverLog(REDIS_WARNING,"WARNING: You specified a maxmemory value that is less than 1MB (current value is %llu bytes). Are you sure this is what you really want?", server.maxmemory); } aeSetBeforeSleepProc(server.el,beforeSleep); diff --git a/src/server.h b/src/server.h index 7149bb4f5..2e926b989 100644 --- a/src/server.h +++ b/src/server.h @@ -1258,13 +1258,13 @@ void forceCommandPropagation(redisClient *c, int flags); void preventCommandPropagation(redisClient *c); int prepareForShutdown(); #ifdef __GNUC__ -void redisLog(int level, const char *fmt, ...) +void serverLog(int level, const char *fmt, ...) __attribute__((format(printf, 2, 3))); #else -void redisLog(int level, const char *fmt, ...); +void serverLog(int level, const char *fmt, ...); #endif -void redisLogRaw(int level, const char *msg); -void redisLogFromHandler(int level, const char *msg); +void serverLogRaw(int level, const char *msg); +void serverLogFromHandler(int level, const char *msg); void usage(void); void updateDictResizePolicy(void); int htNeedsResize(dict *dict); @@ -1585,13 +1585,13 @@ void _redisAssertWithInfo(redisClient *c, robj *o, char *estr, char *file, int l void _redisAssert(char *estr, char *file, int line); void _redisPanic(char *msg, char *file, int line); void bugReportStart(void); -void redisLogObjectDebugInfo(robj *o); +void serverLogObjectDebugInfo(robj *o); void sigsegvHandler(int sig, siginfo_t *info, void *secret); sds genRedisInfoString(char *section); void enableWatchdog(int period); void disableWatchdog(void); void watchdogScheduleSignal(int period); -void redisLogHexDump(int level, char *descr, void *value, size_t len); +void serverLogHexDump(int level, char *descr, void *value, size_t len); #define redisDebug(fmt, ...) \ printf("DEBUG %s:%d > " fmt "\n", __FILE__, __LINE__, __VA_ARGS__) diff --git a/src/t_hash.c b/src/t_hash.c index ddfcf31a8..8754f3182 100644 --- a/src/t_hash.c +++ b/src/t_hash.c @@ -452,7 +452,7 @@ void hashTypeConvertZiplist(robj *o, int enc) { value = tryObjectEncoding(value); ret = dictAdd(dict, field, value); if (ret != DICT_OK) { - redisLogHexDump(REDIS_WARNING,"ziplist with dup elements dump", + serverLogHexDump(REDIS_WARNING,"ziplist with dup elements dump", o->ptr,ziplistBlobLen(o->ptr)); redisAssert(ret == DICT_OK); } From 554bd0e7bd81715e319cafda437ed2aebd44b6e9 Mon Sep 17 00:00:00 2001 From: antirez Date: Sun, 26 Jul 2015 15:20:46 +0200 Subject: [PATCH 0323/1928] RDMF: use client instead of redisClient, like Disque. --- src/aof.c | 12 +- src/bitops.c | 12 +- src/blocked.c | 12 +- src/cluster.c | 26 +-- src/cluster.h | 6 +- src/config.c | 6 +- src/db.c | 60 +++--- src/debug.c | 8 +- src/geo.c | 22 +-- src/hyperloglog.c | 12 +- src/latency.c | 6 +- src/multi.c | 30 +-- src/networking.c | 118 +++++------ src/object.c | 16 +- src/pubsub.c | 28 +-- src/rdb.c | 8 +- src/replication.c | 58 +++--- src/scripting.c | 14 +- src/sentinel.c | 28 +-- src/server.c | 36 ++-- src/server.h | 484 +++++++++++++++++++++++----------------------- src/slowlog.c | 2 +- src/slowlog.h | 2 +- src/sort.c | 2 +- src/t_hash.c | 38 ++-- src/t_list.c | 52 ++--- src/t_set.c | 38 ++-- src/t_string.c | 46 ++--- src/t_zset.c | 56 +++--- 29 files changed, 619 insertions(+), 619 deletions(-) diff --git a/src/aof.c b/src/aof.c index 7d7870908..17cae875c 100644 --- a/src/aof.c +++ b/src/aof.c @@ -550,8 +550,8 @@ void feedAppendOnlyFile(struct redisCommand *cmd, int dictid, robj **argv, int a /* In Redis commands are always executed in the context of a client, so in * order to load the append only file we need to create a fake client. */ -struct redisClient *createFakeClient(void) { - struct redisClient *c = zmalloc(sizeof(*c)); +struct client *createFakeClient(void) { + struct client *c = zmalloc(sizeof(*c)); selectDb(c,0); c->fd = -1; @@ -577,7 +577,7 @@ struct redisClient *createFakeClient(void) { return c; } -void freeFakeClientArgv(struct redisClient *c) { +void freeFakeClientArgv(struct client *c) { int j; for (j = 0; j < c->argc; j++) @@ -585,7 +585,7 @@ void freeFakeClientArgv(struct redisClient *c) { zfree(c->argv); } -void freeFakeClient(struct redisClient *c) { +void freeFakeClient(struct client *c) { sdsfree(c->querybuf); listRelease(c->reply); listRelease(c->watched_keys); @@ -597,7 +597,7 @@ void freeFakeClient(struct redisClient *c) { * error (the append only file is zero-length) REDIS_ERR is returned. On * fatal error an error message is logged and the program exists. */ int loadAppendOnlyFile(char *filename) { - struct redisClient *fakeClient; + struct client *fakeClient; FILE *fp = fopen(filename,"r"); struct redis_stat sb; int old_aof_state = server.aof_state; @@ -1297,7 +1297,7 @@ int rewriteAppendOnlyFileBackground(void) { return REDIS_OK; /* unreached */ } -void bgrewriteaofCommand(redisClient *c) { +void bgrewriteaofCommand(client *c) { if (server.aof_child_pid != -1) { addReplyError(c,"Background append only file rewriting already in progress"); } else if (server.rdb_child_pid != -1) { diff --git a/src/bitops.c b/src/bitops.c index 6763bf1b0..66fa97ef5 100644 --- a/src/bitops.c +++ b/src/bitops.c @@ -37,7 +37,7 @@ /* This helper function used by GETBIT / SETBIT parses the bit offset argument * making sure an error is returned if it is negative or if it overflows * Redis 512 MB limit for the string value. */ -static int getBitOffsetFromArgument(redisClient *c, robj *o, size_t *offset) { +static int getBitOffsetFromArgument(client *c, robj *o, size_t *offset) { long long loffset; char *err = "bit offset is not an integer or out of range"; @@ -209,7 +209,7 @@ long redisBitpos(void *s, unsigned long count, int bit) { #define BITOP_NOT 3 /* SETBIT key offset bitvalue */ -void setbitCommand(redisClient *c) { +void setbitCommand(client *c) { robj *o; char *err = "bit is not an integer or out of range"; size_t bitoffset; @@ -256,7 +256,7 @@ void setbitCommand(redisClient *c) { } /* GETBIT key offset */ -void getbitCommand(redisClient *c) { +void getbitCommand(client *c) { robj *o; char llbuf[32]; size_t bitoffset; @@ -283,7 +283,7 @@ void getbitCommand(redisClient *c) { } /* BITOP op_name target_key src_key1 src_key2 src_key3 ... src_keyN */ -void bitopCommand(redisClient *c) { +void bitopCommand(client *c) { char *opname = c->argv[1]->ptr; robj *o, *targetkey = c->argv[2]; unsigned long op, j, numkeys; @@ -457,7 +457,7 @@ void bitopCommand(redisClient *c) { } /* BITCOUNT key [start end] */ -void bitcountCommand(redisClient *c) { +void bitcountCommand(client *c) { robj *o; long start, end, strlen; unsigned char *p; @@ -511,7 +511,7 @@ void bitcountCommand(redisClient *c) { } /* BITPOS key bit [start [end]] */ -void bitposCommand(redisClient *c) { +void bitposCommand(client *c) { robj *o; long bit, start, end, strlen; unsigned char *p; diff --git a/src/blocked.c b/src/blocked.c index 95b68fba1..d084521a3 100644 --- a/src/blocked.c +++ b/src/blocked.c @@ -73,7 +73,7 @@ * Note that if the timeout is zero (usually from the point of view of * commands API this means no timeout) the value stored into 'timeout' * is zero. */ -int getTimeoutFromObjectOrReply(redisClient *c, robj *object, mstime_t *timeout, int unit) { +int getTimeoutFromObjectOrReply(client *c, robj *object, mstime_t *timeout, int unit) { long long tval; if (getLongLongFromObjectOrReply(c,object,&tval, @@ -97,7 +97,7 @@ int getTimeoutFromObjectOrReply(redisClient *c, robj *object, mstime_t *timeout, /* Block a client for the specific operation type. Once the REDIS_BLOCKED * flag is set client query buffer is not longer processed, but accumulated, * and will be processed when the client is unblocked. */ -void blockClient(redisClient *c, int btype) { +void blockClient(client *c, int btype) { c->flags |= REDIS_BLOCKED; c->btype = btype; server.bpop_blocked_clients++; @@ -108,7 +108,7 @@ void blockClient(redisClient *c, int btype) { * unblocked after a blocking operation. */ void processUnblockedClients(void) { listNode *ln; - redisClient *c; + client *c; while (listLength(server.unblocked_clients)) { ln = listFirst(server.unblocked_clients); @@ -131,7 +131,7 @@ void processUnblockedClients(void) { /* Unblock a client calling the right function depending on the kind * of operation the client is blocking for. */ -void unblockClient(redisClient *c) { +void unblockClient(client *c) { if (c->btype == REDIS_BLOCKED_LIST) { unblockClientWaitingData(c); } else if (c->btype == REDIS_BLOCKED_WAIT) { @@ -154,7 +154,7 @@ void unblockClient(redisClient *c) { /* This function gets called when a blocked client timed out in order to * send it a reply of some kind. */ -void replyToBlockedClientTimedOut(redisClient *c) { +void replyToBlockedClientTimedOut(client *c) { if (c->btype == REDIS_BLOCKED_LIST) { addReply(c,shared.nullmultibulk); } else if (c->btype == REDIS_BLOCKED_WAIT) { @@ -177,7 +177,7 @@ void disconnectAllBlockedClients(void) { listRewind(server.clients,&li); while((ln = listNext(&li))) { - redisClient *c = listNodeValue(ln); + client *c = listNodeValue(ln); if (c->flags & REDIS_BLOCKED) { addReplySds(c,sdsnew( diff --git a/src/cluster.c b/src/cluster.c index 8fd5c9328..a294937c3 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -3722,7 +3722,7 @@ sds clusterGenNodesDescription(int filter) { * CLUSTER command * -------------------------------------------------------------------------- */ -int getSlotOrReply(redisClient *c, robj *o) { +int getSlotOrReply(client *c, robj *o) { long long slot; if (getLongLongFromObject(o,&slot) != REDIS_OK || @@ -3734,7 +3734,7 @@ int getSlotOrReply(redisClient *c, robj *o) { return (int) slot; } -void clusterReplyMultiBulkSlots(redisClient *c) { +void clusterReplyMultiBulkSlots(client *c) { /* Format: 1) 1) start slot * 2) end slot * 3) 1) master IP @@ -3804,7 +3804,7 @@ void clusterReplyMultiBulkSlots(redisClient *c) { setDeferredMultiBulkLength(c, slot_replylen, num_masters); } -void clusterCommand(redisClient *c) { +void clusterCommand(client *c) { if (server.cluster_enabled == 0) { addReplyError(c,"This instance has cluster support disabled"); return; @@ -4363,7 +4363,7 @@ int verifyDumpPayload(unsigned char *p, size_t len) { /* DUMP keyname * DUMP is actually not used by Redis Cluster but it is the obvious * complement of RESTORE and can be useful for different applications. */ -void dumpCommand(redisClient *c) { +void dumpCommand(client *c) { robj *o, *dumpobj; rio payload; @@ -4384,7 +4384,7 @@ void dumpCommand(redisClient *c) { } /* RESTORE key ttl serialized-value [REPLACE] */ -void restoreCommand(redisClient *c) { +void restoreCommand(client *c) { long long ttl; rio payload; int j, type, replace = 0; @@ -4466,7 +4466,7 @@ typedef struct migrateCachedSocket { * If the caller detects an error while using the socket, migrateCloseSocket() * should be called so that the connection will be created from scratch * the next time. */ -migrateCachedSocket* migrateGetSocket(redisClient *c, robj *host, robj *port, long timeout) { +migrateCachedSocket* migrateGetSocket(client *c, robj *host, robj *port, long timeout) { int fd; sds name = sdsempty(); migrateCachedSocket *cs; @@ -4558,7 +4558,7 @@ void migrateCloseTimedoutSockets(void) { } /* MIGRATE host port key dbid timeout [COPY | REPLACE] */ -void migrateCommand(redisClient *c) { +void migrateCommand(client *c) { migrateCachedSocket *cs; int copy, replace, j; long timeout; @@ -4723,7 +4723,7 @@ socket_rd_err: * The client should issue ASKING before to actually send the command to * the target instance. See the Redis Cluster specification for more * information. */ -void askingCommand(redisClient *c) { +void askingCommand(client *c) { if (server.cluster_enabled == 0) { addReplyError(c,"This instance has cluster support disabled"); return; @@ -4735,7 +4735,7 @@ void askingCommand(redisClient *c) { /* The READONLY command is used by clients to enter the read-only mode. * In this mode slaves will not redirect clients as long as clients access * with read-only commands to keys that are served by the slave's master. */ -void readonlyCommand(redisClient *c) { +void readonlyCommand(client *c) { if (server.cluster_enabled == 0) { addReplyError(c,"This instance has cluster support disabled"); return; @@ -4745,7 +4745,7 @@ void readonlyCommand(redisClient *c) { } /* The READWRITE command just clears the READONLY command state. */ -void readwriteCommand(redisClient *c) { +void readwriteCommand(client *c) { c->flags &= ~REDIS_READONLY; addReply(c,shared.ok); } @@ -4779,7 +4779,7 @@ void readwriteCommand(redisClient *c) { * not bound to any node. In this case the cluster global state should be * already "down" but it is fragile to rely on the update of the global state, * so we also handle it here. */ -clusterNode *getNodeByQuery(redisClient *c, struct redisCommand *cmd, robj **argv, int argc, int *hashslot, int *error_code) { +clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, int argc, int *hashslot, int *error_code) { clusterNode *n = NULL; robj *firstkey = NULL; int multiple_keys = 0; @@ -4940,7 +4940,7 @@ clusterNode *getNodeByQuery(redisClient *c, struct redisCommand *cmd, robj **arg * are used, then the node 'n' should not be NULL, but should be the * node we want to mention in the redirection. Moreover hashslot should * be set to the hash slot that caused the redirection. */ -void clusterRedirectClient(redisClient *c, clusterNode *n, int hashslot, int error_code) { +void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_code) { if (error_code == REDIS_CLUSTER_REDIR_CROSS_SLOT) { addReplySds(c,sdsnew("-CROSSSLOT Keys in request don't hash to the same slot\r\n")); } else if (error_code == REDIS_CLUSTER_REDIR_UNSTABLE) { @@ -4975,7 +4975,7 @@ void clusterRedirectClient(redisClient *c, clusterNode *n, int hashslot, int err * If the client is found to be blocked into an hash slot this node no * longer handles, the client is sent a redirection error, and the function * returns 1. Otherwise 0 is returned and no operation is performed. */ -int clusterRedirectBlockedClientIfNeeded(redisClient *c) { +int clusterRedirectBlockedClientIfNeeded(client *c) { if (c->flags & REDIS_BLOCKED && c->btype == REDIS_BLOCKED_LIST) { dictEntry *de; dictIterator *di; diff --git a/src/cluster.h b/src/cluster.h index bf442a222..a6c7e4462 100644 --- a/src/cluster.h +++ b/src/cluster.h @@ -249,8 +249,8 @@ typedef struct { master is up. */ /* ---------------------- API exported outside cluster.c -------------------- */ -clusterNode *getNodeByQuery(redisClient *c, struct redisCommand *cmd, robj **argv, int argc, int *hashslot, int *ask); -int clusterRedirectBlockedClientIfNeeded(redisClient *c); -void clusterRedirectClient(redisClient *c, clusterNode *n, int hashslot, int error_code); +clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, int argc, int *hashslot, int *ask); +int clusterRedirectBlockedClientIfNeeded(client *c); +void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_code); #endif /* __REDIS_CLUSTER_H */ diff --git a/src/config.c b/src/config.c index 0659ee2fc..167ea6fd7 100644 --- a/src/config.c +++ b/src/config.c @@ -699,7 +699,7 @@ void loadServerConfig(char *filename, char *options) { #define config_set_else } else -void configSetCommand(redisClient *c) { +void configSetCommand(client *c) { robj *o; long long ll; int err; @@ -1024,7 +1024,7 @@ badfmt: /* Bad format errors */ } \ } while(0); -void configGetCommand(redisClient *c) { +void configGetCommand(client *c) { robj *o = c->argv[2]; void *replylen = addDeferredMultiBulkLength(c); char *pattern = o->ptr; @@ -1843,7 +1843,7 @@ int rewriteConfig(char *path) { * CONFIG command entry point *----------------------------------------------------------------------------*/ -void configCommand(redisClient *c) { +void configCommand(client *c) { if (!strcasecmp(c->argv[1]->ptr,"set")) { if (c->argc != 4) goto badarity; configSetCommand(c); diff --git a/src/db.c b/src/db.c index 35481fecd..406d7b4ce 100644 --- a/src/db.c +++ b/src/db.c @@ -99,13 +99,13 @@ robj *lookupKeyWrite(redisDb *db, robj *key) { return lookupKey(db,key); } -robj *lookupKeyReadOrReply(redisClient *c, robj *key, robj *reply) { +robj *lookupKeyReadOrReply(client *c, robj *key, robj *reply) { robj *o = lookupKeyRead(c->db, key); if (!o) addReply(c,reply); return o; } -robj *lookupKeyWriteOrReply(redisClient *c, robj *key, robj *reply) { +robj *lookupKeyWriteOrReply(client *c, robj *key, robj *reply) { robj *o = lookupKeyWrite(c->db, key); if (!o) addReply(c,reply); return o; @@ -247,7 +247,7 @@ long long emptyDb(void(callback)(void*)) { return removed; } -int selectDb(redisClient *c, int id) { +int selectDb(client *c, int id) { if (id < 0 || id >= server.dbnum) return REDIS_ERR; c->db = &server.db[id]; @@ -275,7 +275,7 @@ void signalFlushedDb(int dbid) { * Type agnostic commands operating on the key space *----------------------------------------------------------------------------*/ -void flushdbCommand(redisClient *c) { +void flushdbCommand(client *c) { server.dirty += dictSize(c->db->dict); signalFlushedDb(c->db->id); dictEmpty(c->db->dict,NULL); @@ -284,7 +284,7 @@ void flushdbCommand(redisClient *c) { addReply(c,shared.ok); } -void flushallCommand(redisClient *c) { +void flushallCommand(client *c) { signalFlushedDb(-1); server.dirty += emptyDb(NULL); addReply(c,shared.ok); @@ -302,7 +302,7 @@ void flushallCommand(redisClient *c) { server.dirty++; } -void delCommand(redisClient *c) { +void delCommand(client *c) { int deleted = 0, j; for (j = 1; j < c->argc; j++) { @@ -320,7 +320,7 @@ void delCommand(redisClient *c) { /* EXISTS key1 key2 ... key_N. * Return value is the number of keys existing. */ -void existsCommand(redisClient *c) { +void existsCommand(client *c) { long long count = 0; int j; @@ -331,7 +331,7 @@ void existsCommand(redisClient *c) { addReplyLongLong(c,count); } -void selectCommand(redisClient *c) { +void selectCommand(client *c) { long id; if (getLongFromObjectOrReply(c, c->argv[1], &id, @@ -349,7 +349,7 @@ void selectCommand(redisClient *c) { } } -void randomkeyCommand(redisClient *c) { +void randomkeyCommand(client *c) { robj *key; if ((key = dbRandomKey(c->db)) == NULL) { @@ -361,7 +361,7 @@ void randomkeyCommand(redisClient *c) { decrRefCount(key); } -void keysCommand(redisClient *c) { +void keysCommand(client *c) { dictIterator *di; dictEntry *de; sds pattern = c->argv[1]->ptr; @@ -423,7 +423,7 @@ void scanCallback(void *privdata, const dictEntry *de) { * if the cursor is valid, store it as unsigned integer into *cursor and * returns REDIS_OK. Otherwise return REDIS_ERR and send an error to the * client. */ -int parseScanCursorOrReply(redisClient *c, robj *o, unsigned long *cursor) { +int parseScanCursorOrReply(client *c, robj *o, unsigned long *cursor) { char *eptr; /* Use strtoul() because we need an *unsigned* long, so @@ -449,7 +449,7 @@ int parseScanCursorOrReply(redisClient *c, robj *o, unsigned long *cursor) { * * In the case of a Hash object the function returns both the field and value * of every element on the Hash. */ -void scanGenericCommand(redisClient *c, robj *o, unsigned long cursor) { +void scanGenericCommand(client *c, robj *o, unsigned long cursor) { int i, j; list *keys = listCreate(); listNode *node, *nextnode; @@ -627,21 +627,21 @@ cleanup: } /* The SCAN command completely relies on scanGenericCommand. */ -void scanCommand(redisClient *c) { +void scanCommand(client *c) { unsigned long cursor; if (parseScanCursorOrReply(c,c->argv[1],&cursor) == REDIS_ERR) return; scanGenericCommand(c,NULL,cursor); } -void dbsizeCommand(redisClient *c) { +void dbsizeCommand(client *c) { addReplyLongLong(c,dictSize(c->db->dict)); } -void lastsaveCommand(redisClient *c) { +void lastsaveCommand(client *c) { addReplyLongLong(c,server.lastsave); } -void typeCommand(redisClient *c) { +void typeCommand(client *c) { robj *o; char *type; @@ -661,7 +661,7 @@ void typeCommand(redisClient *c) { addReplyStatus(c,type); } -void shutdownCommand(redisClient *c) { +void shutdownCommand(client *c) { int flags = 0; if (c->argc > 2) { @@ -689,7 +689,7 @@ void shutdownCommand(redisClient *c) { addReplyError(c,"Errors trying to SHUTDOWN. Check logs."); } -void renameGenericCommand(redisClient *c, int nx) { +void renameGenericCommand(client *c, int nx) { robj *o; long long expire; int samekey = 0; @@ -731,15 +731,15 @@ void renameGenericCommand(redisClient *c, int nx) { addReply(c,nx ? shared.cone : shared.ok); } -void renameCommand(redisClient *c) { +void renameCommand(client *c) { renameGenericCommand(c,0); } -void renamenxCommand(redisClient *c) { +void renamenxCommand(client *c) { renameGenericCommand(c,1); } -void moveCommand(redisClient *c) { +void moveCommand(client *c) { robj *o; redisDb *src, *dst; int srcid; @@ -899,7 +899,7 @@ int expireIfNeeded(redisDb *db, robj *key) { * * unit is either UNIT_SECONDS or UNIT_MILLISECONDS, and is only used for * the argv[2] parameter. The basetime is always specified in milliseconds. */ -void expireGenericCommand(redisClient *c, long long basetime, int unit) { +void expireGenericCommand(client *c, long long basetime, int unit) { robj *key = c->argv[1], *param = c->argv[2]; long long when; /* unix time in milliseconds when the key will expire. */ @@ -945,23 +945,23 @@ void expireGenericCommand(redisClient *c, long long basetime, int unit) { } } -void expireCommand(redisClient *c) { +void expireCommand(client *c) { expireGenericCommand(c,mstime(),UNIT_SECONDS); } -void expireatCommand(redisClient *c) { +void expireatCommand(client *c) { expireGenericCommand(c,0,UNIT_SECONDS); } -void pexpireCommand(redisClient *c) { +void pexpireCommand(client *c) { expireGenericCommand(c,mstime(),UNIT_MILLISECONDS); } -void pexpireatCommand(redisClient *c) { +void pexpireatCommand(client *c) { expireGenericCommand(c,0,UNIT_MILLISECONDS); } -void ttlGenericCommand(redisClient *c, int output_ms) { +void ttlGenericCommand(client *c, int output_ms) { long long expire, ttl = -1; /* If the key does not exist at all, return -2 */ @@ -983,15 +983,15 @@ void ttlGenericCommand(redisClient *c, int output_ms) { } } -void ttlCommand(redisClient *c) { +void ttlCommand(client *c) { ttlGenericCommand(c, 0); } -void pttlCommand(redisClient *c) { +void pttlCommand(client *c) { ttlGenericCommand(c, 1); } -void persistCommand(redisClient *c) { +void persistCommand(client *c) { dictEntry *de; de = dictFind(c->db->dict,c->argv[1]->ptr); diff --git a/src/debug.c b/src/debug.c index 9c9118eb2..1e87300eb 100644 --- a/src/debug.c +++ b/src/debug.c @@ -258,7 +258,7 @@ void inputCatSds(void *result, const char *str) { *info = sdscat(*info, str); } -void debugCommand(redisClient *c) { +void debugCommand(client *c) { if (!strcasecmp(c->argv[1]->ptr,"segfault")) { *((char*)-1) = 'x'; } else if (!strcasecmp(c->argv[1]->ptr,"oom")) { @@ -483,7 +483,7 @@ void _redisAssert(char *estr, char *file, int line) { *((char*)-1) = 'x'; } -void _redisAssertPrintClientInfo(redisClient *c) { +void _redisAssertPrintClientInfo(client *c) { int j; bugReportStart(); @@ -537,7 +537,7 @@ void _redisAssertPrintObject(robj *o) { serverLogObjectDebugInfo(o); } -void _redisAssertWithInfo(redisClient *c, robj *o, char *estr, char *file, int line) { +void _redisAssertWithInfo(client *c, robj *o, char *estr, char *file, int line) { if (c) _redisAssertPrintClientInfo(c); if (o) _redisAssertPrintObject(o); _redisAssert(estr,file,line); @@ -770,7 +770,7 @@ void logStackTrace(ucontext_t *uc) { void logCurrentClient(void) { if (server.current_client == NULL) return; - redisClient *cc = server.current_client; + client *cc = server.current_client; sds client; int j; diff --git a/src/geo.c b/src/geo.c index 63500bed7..a0b2ea370 100644 --- a/src/geo.c +++ b/src/geo.c @@ -89,7 +89,7 @@ int decodeGeohash(double bits, double *xy) { /* Input Argument Helper */ /* Take a pointer to the latitude arg then use the next arg for longitude. * On parse error REDIS_ERR is returned, otherwise REDIS_OK. */ -int extractLongLatOrReply(redisClient *c, robj **argv, +int extractLongLatOrReply(client *c, robj **argv, double *xy) { for (int i = 0; i < 2; i++) { if (getDoubleFromObjectOrReply(c, argv[i], xy + i, NULL) != @@ -123,7 +123,7 @@ int longLatFromMember(robj *zobj, robj *member, double *xy) { * * If the unit is not valid, an error is reported to the client, and a value * less than zero is returned. */ -double extractUnitOrReply(redisClient *c, robj *unit) { +double extractUnitOrReply(client *c, robj *unit) { char *u = unit->ptr; if (!strcmp(u, "m")) { @@ -148,7 +148,7 @@ double extractUnitOrReply(redisClient *c, robj *unit) { * to use in order to convert meters to the unit. * * On error a value less than zero is returned. */ -double extractDistanceOrReply(redisClient *c, robj **argv, +double extractDistanceOrReply(client *c, robj **argv, double *conversion) { double distance; if (getDoubleFromObjectOrReply(c, argv[0], &distance, @@ -168,7 +168,7 @@ double extractDistanceOrReply(redisClient *c, robj **argv, * than "5.2144992818115 meters away." We provide 4 digits after the dot * so that the returned value is decently accurate even when the unit is * the kilometer. */ -void addReplyDoubleDistance(redisClient *c, double d) { +void addReplyDoubleDistance(client *c, double d) { char dbuf[128]; int dlen = snprintf(dbuf, sizeof(dbuf), "%.4f", d); addReplyBulkCBuffer(c, dbuf, dlen); @@ -363,7 +363,7 @@ static int sort_gp_desc(const void *a, const void *b) { * ==================================================================== */ /* GEOADD key long lat name [long2 lat2 name2 ... longN latN nameN] */ -void geoaddCommand(redisClient *c) { +void geoaddCommand(client *c) { /* Check arguments number for sanity. */ if ((c->argc - 2) % 3 != 0) { /* Need an odd number of arguments if we got this far... */ @@ -419,7 +419,7 @@ void geoaddCommand(redisClient *c) { /* GEORADIUS key x y radius unit [WITHDIST] [WITHHASH] [WITHCOORD] [ASC|DESC] * [COUNT count] * GEORADIUSBYMEMBER key member radius unit ... options ... */ -void georadiusGeneric(redisClient *c, int type) { +void georadiusGeneric(client *c, int type) { robj *key = c->argv[1]; /* Look up the requested zset */ @@ -569,12 +569,12 @@ void georadiusGeneric(redisClient *c, int type) { } /* GEORADIUS wrapper function. */ -void georadiusCommand(redisClient *c) { +void georadiusCommand(client *c) { georadiusGeneric(c, RADIUS_COORDS); } /* GEORADIUSBYMEMBER wrapper function. */ -void georadiusByMemberCommand(redisClient *c) { +void georadiusByMemberCommand(client *c) { georadiusGeneric(c, RADIUS_MEMBER); } @@ -582,7 +582,7 @@ void georadiusByMemberCommand(redisClient *c) { * * Returns an array with an 11 characters geohash representation of the * position of the specified elements. */ -void geohashCommand(redisClient *c) { +void geohashCommand(client *c) { char *geoalphabet= "0123456789bcdefghjkmnpqrstuvwxyz"; int j; @@ -637,7 +637,7 @@ void geohashCommand(redisClient *c) { * * Returns an array of two-items arrays representing the x,y position of each * element specified in the arguments. For missing elements NULL is returned. */ -void geoposCommand(redisClient *c) { +void geoposCommand(client *c) { int j; /* Look up the requested zset */ @@ -671,7 +671,7 @@ void geoposCommand(redisClient *c) { * Return the distance, in meters by default, otherwise accordig to "unit", * between points ele1 and ele2. If one or more elements are missing NULL * is returned. */ -void geodistCommand(redisClient *c) { +void geodistCommand(client *c) { double to_meter = 1; /* Check if there is the unit to extract, otherwise assume meters. */ diff --git a/src/hyperloglog.c b/src/hyperloglog.c index 74e7b8fc6..6405f5ba9 100644 --- a/src/hyperloglog.c +++ b/src/hyperloglog.c @@ -1121,7 +1121,7 @@ robj *createHLLObject(void) { /* Check if the object is a String with a valid HLL representation. * Return REDIS_OK if this is true, otherwise reply to the client * with an error and return REDIS_ERR. */ -int isHLLObjectOrReply(redisClient *c, robj *o) { +int isHLLObjectOrReply(client *c, robj *o) { struct hllhdr *hdr; /* Key exists, check type */ @@ -1152,7 +1152,7 @@ invalid: } /* PFADD var ele ele ele ... ele => :0 or :1 */ -void pfaddCommand(redisClient *c) { +void pfaddCommand(client *c) { robj *o = lookupKeyWrite(c->db,c->argv[1]); struct hllhdr *hdr; int updated = 0, j; @@ -1192,7 +1192,7 @@ void pfaddCommand(redisClient *c) { } /* PFCOUNT var -> approximated cardinality of set. */ -void pfcountCommand(redisClient *c) { +void pfcountCommand(client *c) { robj *o; struct hllhdr *hdr; uint64_t card; @@ -1282,7 +1282,7 @@ void pfcountCommand(redisClient *c) { } /* PFMERGE dest src1 src2 src3 ... srcN => OK */ -void pfmergeCommand(redisClient *c) { +void pfmergeCommand(client *c) { uint8_t max[HLL_REGISTERS]; struct hllhdr *hdr; int j; @@ -1348,7 +1348,7 @@ void pfmergeCommand(redisClient *c) { * This command performs a self-test of the HLL registers implementation. * Something that is not easy to test from within the outside. */ #define HLL_TEST_CYCLES 1000 -void pfselftestCommand(redisClient *c) { +void pfselftestCommand(client *c) { unsigned int j, i; sds bitcounters = sdsnewlen(NULL,HLL_DENSE_SIZE); struct hllhdr *hdr = (struct hllhdr*) bitcounters, *hdr2; @@ -1452,7 +1452,7 @@ cleanup: /* PFDEBUG ... args ... * Different debugging related operations about the HLL implementation. */ -void pfdebugCommand(redisClient *c) { +void pfdebugCommand(client *c) { char *cmd = c->argv[1]->ptr; struct hllhdr *hdr; robj *o; diff --git a/src/latency.c b/src/latency.c index d6261f603..49a01f590 100644 --- a/src/latency.c +++ b/src/latency.c @@ -474,7 +474,7 @@ sds createLatencyReport(void) { /* latencyCommand() helper to produce a time-delay reply for all the samples * in memory for the specified time series. */ -void latencyCommandReplyWithSamples(redisClient *c, struct latencyTimeSeries *ts) { +void latencyCommandReplyWithSamples(client *c, struct latencyTimeSeries *ts) { void *replylen = addDeferredMultiBulkLength(c); int samples = 0, j; @@ -492,7 +492,7 @@ void latencyCommandReplyWithSamples(redisClient *c, struct latencyTimeSeries *ts /* latencyCommand() helper to produce the reply for the LATEST subcommand, * listing the last latency sample for every event type registered so far. */ -void latencyCommandReplyWithLatestEvents(redisClient *c) { +void latencyCommandReplyWithLatestEvents(client *c) { dictIterator *di; dictEntry *de; @@ -564,7 +564,7 @@ sds latencyCommandGenSparkeline(char *event, struct latencyTimeSeries *ts) { * LATENCY DOCTOR: returns an human readable analysis of instance latency. * LATENCY GRAPH: provide an ASCII graph of the latency of the specified event. */ -void latencyCommand(redisClient *c) { +void latencyCommand(client *c) { struct latencyTimeSeries *ts; if (!strcasecmp(c->argv[1]->ptr,"history") && c->argc == 3) { diff --git a/src/multi.c b/src/multi.c index 313fccd04..aff394023 100644 --- a/src/multi.c +++ b/src/multi.c @@ -32,13 +32,13 @@ /* ================================ MULTI/EXEC ============================== */ /* Client state initialization for MULTI/EXEC */ -void initClientMultiState(redisClient *c) { +void initClientMultiState(client *c) { c->mstate.commands = NULL; c->mstate.count = 0; } /* Release all the resources associated with MULTI/EXEC state */ -void freeClientMultiState(redisClient *c) { +void freeClientMultiState(client *c) { int j; for (j = 0; j < c->mstate.count; j++) { @@ -53,7 +53,7 @@ void freeClientMultiState(redisClient *c) { } /* Add a new command into the MULTI commands queue */ -void queueMultiCommand(redisClient *c) { +void queueMultiCommand(client *c) { multiCmd *mc; int j; @@ -69,7 +69,7 @@ void queueMultiCommand(redisClient *c) { c->mstate.count++; } -void discardTransaction(redisClient *c) { +void discardTransaction(client *c) { freeClientMultiState(c); initClientMultiState(c); c->flags &= ~(REDIS_MULTI|REDIS_DIRTY_CAS|REDIS_DIRTY_EXEC); @@ -78,12 +78,12 @@ void discardTransaction(redisClient *c) { /* Flag the transacation as DIRTY_EXEC so that EXEC will fail. * Should be called every time there is an error while queueing a command. */ -void flagTransaction(redisClient *c) { +void flagTransaction(client *c) { if (c->flags & REDIS_MULTI) c->flags |= REDIS_DIRTY_EXEC; } -void multiCommand(redisClient *c) { +void multiCommand(client *c) { if (c->flags & REDIS_MULTI) { addReplyError(c,"MULTI calls can not be nested"); return; @@ -92,7 +92,7 @@ void multiCommand(redisClient *c) { addReply(c,shared.ok); } -void discardCommand(redisClient *c) { +void discardCommand(client *c) { if (!(c->flags & REDIS_MULTI)) { addReplyError(c,"DISCARD without MULTI"); return; @@ -103,7 +103,7 @@ void discardCommand(redisClient *c) { /* Send a MULTI command to all the slaves and AOF file. Check the execCommand * implementation for more information. */ -void execCommandPropagateMulti(redisClient *c) { +void execCommandPropagateMulti(client *c) { robj *multistring = createStringObject("MULTI",5); propagate(server.multiCommand,c->db->id,&multistring,1, @@ -111,7 +111,7 @@ void execCommandPropagateMulti(redisClient *c) { decrRefCount(multistring); } -void execCommand(redisClient *c) { +void execCommand(client *c) { int j; robj **orig_argv; int orig_argc; @@ -199,7 +199,7 @@ typedef struct watchedKey { } watchedKey; /* Watch for the specified key */ -void watchForKey(redisClient *c, robj *key) { +void watchForKey(client *c, robj *key) { list *clients = NULL; listIter li; listNode *ln; @@ -230,7 +230,7 @@ void watchForKey(redisClient *c, robj *key) { /* Unwatch all the keys watched by this client. To clean the EXEC dirty * flag is up to the caller. */ -void unwatchAllKeys(redisClient *c) { +void unwatchAllKeys(client *c) { listIter li; listNode *ln; @@ -271,7 +271,7 @@ void touchWatchedKey(redisDb *db, robj *key) { /* Check if we are already watching for this key */ listRewind(clients,&li); while((ln = listNext(&li))) { - redisClient *c = listNodeValue(ln); + client *c = listNodeValue(ln); c->flags |= REDIS_DIRTY_CAS; } @@ -288,7 +288,7 @@ void touchWatchedKeysOnFlush(int dbid) { /* For every client, check all the waited keys */ listRewind(server.clients,&li1); while((ln = listNext(&li1))) { - redisClient *c = listNodeValue(ln); + client *c = listNodeValue(ln); listRewind(c->watched_keys,&li2); while((ln = listNext(&li2))) { watchedKey *wk = listNodeValue(ln); @@ -304,7 +304,7 @@ void touchWatchedKeysOnFlush(int dbid) { } } -void watchCommand(redisClient *c) { +void watchCommand(client *c) { int j; if (c->flags & REDIS_MULTI) { @@ -316,7 +316,7 @@ void watchCommand(redisClient *c) { addReply(c,shared.ok); } -void unwatchCommand(redisClient *c) { +void unwatchCommand(client *c) { unwatchAllKeys(c); c->flags &= (~REDIS_DIRTY_CAS); addReply(c,shared.ok); diff --git a/src/networking.c b/src/networking.c index ec9aef2bc..277c400fc 100644 --- a/src/networking.c +++ b/src/networking.c @@ -31,7 +31,7 @@ #include #include -static void setProtocolError(redisClient *c, int pos); +static void setProtocolError(client *c, int pos); /* Return the size consumed from the allocator, for the specified SDS string, * including internal fragmentation. This function is used in order to compute @@ -61,8 +61,8 @@ int listMatchObjects(void *a, void *b) { return equalStringObjects(a,b); } -redisClient *createClient(int fd) { - redisClient *c = zmalloc(sizeof(redisClient)); +client *createClient(int fd) { + client *c = zmalloc(sizeof(client)); /* passing -1 as fd it is possible to create a non connected client. * This is useful since all the Redis commands needs to be executed @@ -150,7 +150,7 @@ redisClient *createClient(int fd) { * Typically gets called every time a reply is built, before adding more * data to the clients output buffers. If the function returns REDIS_ERR no * data should be appended to the output buffers. */ -int prepareClientToWrite(redisClient *c) { +int prepareClientToWrite(client *c) { /* If it's the Lua client we always return ok without installing any * handler since there is no socket at all. */ if (c->flags & REDIS_LUA_CLIENT) return REDIS_OK; @@ -201,7 +201,7 @@ robj *dupLastObjectIfNeeded(list *reply) { * Low level functions to add more data to output buffers. * -------------------------------------------------------------------------- */ -int _addReplyToBuffer(redisClient *c, const char *s, size_t len) { +int _addReplyToBuffer(client *c, const char *s, size_t len) { size_t available = sizeof(c->buf)-c->bufpos; if (c->flags & REDIS_CLOSE_AFTER_REPLY) return REDIS_OK; @@ -218,7 +218,7 @@ int _addReplyToBuffer(redisClient *c, const char *s, size_t len) { return REDIS_OK; } -void _addReplyObjectToList(redisClient *c, robj *o) { +void _addReplyObjectToList(client *c, robj *o) { robj *tail; if (c->flags & REDIS_CLOSE_AFTER_REPLY) return; @@ -250,7 +250,7 @@ void _addReplyObjectToList(redisClient *c, robj *o) { /* This method takes responsibility over the sds. When it is no longer * needed it will be free'd, otherwise it ends up in a robj. */ -void _addReplySdsToList(redisClient *c, sds s) { +void _addReplySdsToList(client *c, sds s) { robj *tail; if (c->flags & REDIS_CLOSE_AFTER_REPLY) { @@ -281,7 +281,7 @@ void _addReplySdsToList(redisClient *c, sds s) { asyncCloseClientOnOutputBufferLimitReached(c); } -void _addReplyStringToList(redisClient *c, const char *s, size_t len) { +void _addReplyStringToList(client *c, const char *s, size_t len) { robj *tail; if (c->flags & REDIS_CLOSE_AFTER_REPLY) return; @@ -317,7 +317,7 @@ void _addReplyStringToList(redisClient *c, const char *s, size_t len) { * The following functions are the ones that commands implementations will call. * -------------------------------------------------------------------------- */ -void addReply(redisClient *c, robj *obj) { +void addReply(client *c, robj *obj) { if (prepareClientToWrite(c) != REDIS_OK) return; /* This is an important place where we can avoid copy-on-write @@ -353,7 +353,7 @@ void addReply(redisClient *c, robj *obj) { } } -void addReplySds(redisClient *c, sds s) { +void addReplySds(client *c, sds s) { if (prepareClientToWrite(c) != REDIS_OK) { /* The caller expects the sds to be free'd. */ sdsfree(s); @@ -367,23 +367,23 @@ void addReplySds(redisClient *c, sds s) { } } -void addReplyString(redisClient *c, const char *s, size_t len) { +void addReplyString(client *c, const char *s, size_t len) { if (prepareClientToWrite(c) != REDIS_OK) return; if (_addReplyToBuffer(c,s,len) != REDIS_OK) _addReplyStringToList(c,s,len); } -void addReplyErrorLength(redisClient *c, const char *s, size_t len) { +void addReplyErrorLength(client *c, const char *s, size_t len) { addReplyString(c,"-ERR ",5); addReplyString(c,s,len); addReplyString(c,"\r\n",2); } -void addReplyError(redisClient *c, const char *err) { +void addReplyError(client *c, const char *err) { addReplyErrorLength(c,err,strlen(err)); } -void addReplyErrorFormat(redisClient *c, const char *fmt, ...) { +void addReplyErrorFormat(client *c, const char *fmt, ...) { size_t l, j; va_list ap; va_start(ap,fmt); @@ -399,17 +399,17 @@ void addReplyErrorFormat(redisClient *c, const char *fmt, ...) { sdsfree(s); } -void addReplyStatusLength(redisClient *c, const char *s, size_t len) { +void addReplyStatusLength(client *c, const char *s, size_t len) { addReplyString(c,"+",1); addReplyString(c,s,len); addReplyString(c,"\r\n",2); } -void addReplyStatus(redisClient *c, const char *status) { +void addReplyStatus(client *c, const char *status) { addReplyStatusLength(c,status,strlen(status)); } -void addReplyStatusFormat(redisClient *c, const char *fmt, ...) { +void addReplyStatusFormat(client *c, const char *fmt, ...) { va_list ap; va_start(ap,fmt); sds s = sdscatvprintf(sdsempty(),fmt,ap); @@ -420,7 +420,7 @@ void addReplyStatusFormat(redisClient *c, const char *fmt, ...) { /* Adds an empty object to the reply list that will contain the multi bulk * length, which is not known when this function is called. */ -void *addDeferredMultiBulkLength(redisClient *c) { +void *addDeferredMultiBulkLength(client *c) { /* Note that we install the write event here even if the object is not * ready to be sent, since we are sure that before returning to the * event loop setDeferredMultiBulkLength() will be called. */ @@ -430,7 +430,7 @@ void *addDeferredMultiBulkLength(redisClient *c) { } /* Populate the length object and try gluing it to the next chunk. */ -void setDeferredMultiBulkLength(redisClient *c, void *node, long length) { +void setDeferredMultiBulkLength(client *c, void *node, long length) { listNode *ln = (listNode*)node; robj *len, *next; @@ -457,7 +457,7 @@ void setDeferredMultiBulkLength(redisClient *c, void *node, long length) { } /* Add a double as a bulk reply */ -void addReplyDouble(redisClient *c, double d) { +void addReplyDouble(client *c, double d) { char dbuf[128], sbuf[128]; int dlen, slen; if (isinf(d)) { @@ -473,7 +473,7 @@ void addReplyDouble(redisClient *c, double d) { /* Add a long long as integer reply or bulk len / multi bulk count. * Basically this is used to output . */ -void addReplyLongLongWithPrefix(redisClient *c, long long ll, char prefix) { +void addReplyLongLongWithPrefix(client *c, long long ll, char prefix) { char buf[128]; int len; @@ -495,7 +495,7 @@ void addReplyLongLongWithPrefix(redisClient *c, long long ll, char prefix) { addReplyString(c,buf,len+3); } -void addReplyLongLong(redisClient *c, long long ll) { +void addReplyLongLong(client *c, long long ll) { if (ll == 0) addReply(c,shared.czero); else if (ll == 1) @@ -504,7 +504,7 @@ void addReplyLongLong(redisClient *c, long long ll) { addReplyLongLongWithPrefix(c,ll,':'); } -void addReplyMultiBulkLen(redisClient *c, long length) { +void addReplyMultiBulkLen(client *c, long length) { if (length < REDIS_SHARED_BULKHDR_LEN) addReply(c,shared.mbulkhdr[length]); else @@ -512,7 +512,7 @@ void addReplyMultiBulkLen(redisClient *c, long length) { } /* Create the length prefix of a bulk reply, example: $2234 */ -void addReplyBulkLen(redisClient *c, robj *obj) { +void addReplyBulkLen(client *c, robj *obj) { size_t len; if (sdsEncodedObject(obj)) { @@ -538,21 +538,21 @@ void addReplyBulkLen(redisClient *c, robj *obj) { } /* Add a Redis Object as a bulk reply */ -void addReplyBulk(redisClient *c, robj *obj) { +void addReplyBulk(client *c, robj *obj) { addReplyBulkLen(c,obj); addReply(c,obj); addReply(c,shared.crlf); } /* Add a C buffer as bulk reply */ -void addReplyBulkCBuffer(redisClient *c, const void *p, size_t len) { +void addReplyBulkCBuffer(client *c, const void *p, size_t len) { addReplyLongLongWithPrefix(c,len,'$'); addReplyString(c,p,len); addReply(c,shared.crlf); } /* Add sds to reply (takes ownership of sds and frees it) */ -void addReplyBulkSds(redisClient *c, sds s) { +void addReplyBulkSds(client *c, sds s) { addReplySds(c,sdscatfmt(sdsempty(),"$%u\r\n", (unsigned long)sdslen(s))); addReplySds(c,s); @@ -560,7 +560,7 @@ void addReplyBulkSds(redisClient *c, sds s) { } /* Add a C nul term string as bulk reply */ -void addReplyBulkCString(redisClient *c, const char *s) { +void addReplyBulkCString(client *c, const char *s) { if (s == NULL) { addReply(c,shared.nullbulk); } else { @@ -569,7 +569,7 @@ void addReplyBulkCString(redisClient *c, const char *s) { } /* Add a long long as a bulk reply */ -void addReplyBulkLongLong(redisClient *c, long long ll) { +void addReplyBulkLongLong(client *c, long long ll) { char buf[64]; int len; @@ -580,7 +580,7 @@ void addReplyBulkLongLong(redisClient *c, long long ll) { /* Copy 'src' client output buffers into 'dst' client output buffers. * The function takes care of freeing the old output buffers of the * destination client. */ -void copyClientOutputBuffer(redisClient *dst, redisClient *src) { +void copyClientOutputBuffer(client *dst, client *src) { listRelease(dst->reply); dst->reply = listDup(src->reply); memcpy(dst->buf,src->buf,src->bufpos); @@ -590,7 +590,7 @@ void copyClientOutputBuffer(redisClient *dst, redisClient *src) { #define MAX_ACCEPTS_PER_CALL 1000 static void acceptCommonHandler(int fd, int flags) { - redisClient *c; + client *c; if ((c = createClient(fd)) == NULL) { serverLog(REDIS_WARNING, "Error registering fd event for the new client: %s (fd=%d)", @@ -656,7 +656,7 @@ void acceptUnixHandler(aeEventLoop *el, int fd, void *privdata, int mask) { } } -static void freeClientArgv(redisClient *c) { +static void freeClientArgv(client *c) { int j; for (j = 0; j < c->argc; j++) decrRefCount(c->argv[j]); @@ -670,7 +670,7 @@ static void freeClientArgv(redisClient *c) { void disconnectSlaves(void) { while (listLength(server.slaves)) { listNode *ln = listFirst(server.slaves); - freeClient((redisClient*)ln->value); + freeClient((client*)ln->value); } } @@ -688,7 +688,7 @@ void replicationHandleMasterDisconnection(void) { if (server.masterhost != NULL) disconnectSlaves(); } -void freeClient(redisClient *c) { +void freeClient(client *c) { listNode *ln; /* If this is marked as current client unset it */ @@ -804,7 +804,7 @@ void freeClient(redisClient *c) { * This function is useful when we need to terminate a client but we are in * a context where calling freeClient() is not possible, because the client * should be valid for the continuation of the flow of the program. */ -void freeClientAsync(redisClient *c) { +void freeClientAsync(client *c) { if (c->flags & REDIS_CLOSE_ASAP || c->flags & REDIS_LUA_CLIENT) return; c->flags |= REDIS_CLOSE_ASAP; listAddNodeTail(server.clients_to_close,c); @@ -813,7 +813,7 @@ void freeClientAsync(redisClient *c) { void freeClientsInAsyncFreeQueue(void) { while (listLength(server.clients_to_close)) { listNode *ln = listFirst(server.clients_to_close); - redisClient *c = listNodeValue(ln); + client *c = listNodeValue(ln); c->flags &= ~REDIS_CLOSE_ASAP; freeClient(c); @@ -822,7 +822,7 @@ void freeClientsInAsyncFreeQueue(void) { } void sendReplyToClient(aeEventLoop *el, int fd, void *privdata, int mask) { - redisClient *c = privdata; + client *c = privdata; ssize_t nwritten = 0, totwritten = 0; size_t objlen; size_t objmem; @@ -906,7 +906,7 @@ void sendReplyToClient(aeEventLoop *el, int fd, void *privdata, int mask) { } /* resetClient prepare the client to process the next command */ -void resetClient(redisClient *c) { +void resetClient(client *c) { redisCommandProc *prevcmd = c->cmd ? c->cmd->proc : NULL; freeClientArgv(c); @@ -919,7 +919,7 @@ void resetClient(redisClient *c) { c->flags &= (~REDIS_ASKING); } -int processInlineBuffer(redisClient *c) { +int processInlineBuffer(client *c) { char *newline; int argc, j; sds *argv, aux; @@ -982,7 +982,7 @@ int processInlineBuffer(redisClient *c) { /* Helper function. Trims query buffer to make the function that processes * multi bulk requests idempotent. */ -static void setProtocolError(redisClient *c, int pos) { +static void setProtocolError(client *c, int pos) { if (server.verbosity <= REDIS_VERBOSE) { sds client = catClientInfoString(sdsempty(),c); serverLog(REDIS_VERBOSE, @@ -993,7 +993,7 @@ static void setProtocolError(redisClient *c, int pos) { sdsrange(c->querybuf,pos,-1); } -int processMultibulkBuffer(redisClient *c) { +int processMultibulkBuffer(client *c) { char *newline = NULL; int pos = 0, ok; long long ll; @@ -1131,7 +1131,7 @@ int processMultibulkBuffer(redisClient *c) { return REDIS_ERR; } -void processInputBuffer(redisClient *c) { +void processInputBuffer(client *c) { server.current_client = c; /* Keep processing while there is something in the input buffer */ while(sdslen(c->querybuf)) { @@ -1176,7 +1176,7 @@ void processInputBuffer(redisClient *c) { } void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) { - redisClient *c = (redisClient*) privdata; + client *c = (client*) privdata; int nread, readlen; size_t qblen; REDIS_NOTUSED(el); @@ -1234,7 +1234,7 @@ void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) { void getClientsMaxBuffers(unsigned long *longest_output_list, unsigned long *biggest_input_buffer) { - redisClient *c; + client *c; listNode *ln; listIter li; unsigned long lol = 0, bib = 0; @@ -1261,7 +1261,7 @@ void getClientsMaxBuffers(unsigned long *longest_output_list, * On failure the function still populates 'peerid' with the "?:0" string * in case you want to relax error checking or need to display something * anyway (see anetPeerToString implementation for more info). */ -void genClientPeerId(redisClient *client, char *peerid, +void genClientPeerId(client *client, char *peerid, size_t peerid_len) { if (client->flags & REDIS_UNIX_SOCKET) { /* Unix socket client. */ @@ -1276,7 +1276,7 @@ void genClientPeerId(redisClient *client, char *peerid, * if client->peerid is NULL, otherwise returning the cached value. * The Peer ID never changes during the life of the client, however it * is expensive to compute. */ -char *getClientPeerId(redisClient *c) { +char *getClientPeerId(client *c) { char peerid[REDIS_PEER_ID_LEN]; if (c->peerid == NULL) { @@ -1288,7 +1288,7 @@ char *getClientPeerId(redisClient *c) { /* Concatenate a string representing the state of a client in an human * readable format, into the sds string 's'. */ -sds catClientInfoString(sds s, redisClient *client) { +sds catClientInfoString(sds s, client *client) { char flags[16], events[3], *p; int emask; @@ -1341,7 +1341,7 @@ sds catClientInfoString(sds s, redisClient *client) { sds getAllClientsInfoString(void) { listNode *ln; listIter li; - redisClient *client; + client *client; sds o = sdsempty(); o = sdsMakeRoomFor(o,200*listLength(server.clients)); @@ -1354,10 +1354,10 @@ sds getAllClientsInfoString(void) { return o; } -void clientCommand(redisClient *c) { +void clientCommand(client *c) { listNode *ln; listIter li; - redisClient *client; + client *client; if (!strcasecmp(c->argv[1]->ptr,"list") && c->argc == 2) { /* CLIENT LIST */ @@ -1500,7 +1500,7 @@ void clientCommand(redisClient *c) { /* Rewrite the command vector of the client. All the new objects ref count * is incremented. The old command vector is freed, and the old objects * ref count is decremented. */ -void rewriteClientCommandVector(redisClient *c, int argc, ...) { +void rewriteClientCommandVector(client *c, int argc, ...) { va_list ap; int j; robj **argv; /* The new argument vector */ @@ -1528,7 +1528,7 @@ void rewriteClientCommandVector(redisClient *c, int argc, ...) { } /* Completely replace the client command vector with the provided one. */ -void replaceClientCommandVector(redisClient *c, int argc, robj **argv) { +void replaceClientCommandVector(client *c, int argc, robj **argv) { freeClientArgv(c); zfree(c->argv); c->argv = argv; @@ -1539,7 +1539,7 @@ void replaceClientCommandVector(redisClient *c, int argc, robj **argv) { /* Rewrite a single item in the command vector. * The new val ref count is incremented, and the old decremented. */ -void rewriteClientCommandArgument(redisClient *c, int i, robj *newval) { +void rewriteClientCommandArgument(client *c, int i, robj *newval) { robj *oldval; redisAssertWithInfo(c,NULL,i < c->argc); @@ -1568,7 +1568,7 @@ void rewriteClientCommandArgument(redisClient *c, int i, robj *newval) { * Note: this function is very fast so can be called as many time as * the caller wishes. The main usage of this function currently is * enforcing the client output length limits. */ -unsigned long getClientOutputBufferMemoryUsage(redisClient *c) { +unsigned long getClientOutputBufferMemoryUsage(client *c) { unsigned long list_item_size = sizeof(listNode)+sizeof(robj); return c->reply_bytes + (list_item_size*listLength(c->reply)); @@ -1582,7 +1582,7 @@ unsigned long getClientOutputBufferMemoryUsage(redisClient *c) { * REDIS_CLIENT_TYPE_SLAVE -> Slave or client executing MONITOR command * REDIS_CLIENT_TYPE_PUBSUB -> Client subscribed to Pub/Sub channels */ -int getClientType(redisClient *c) { +int getClientType(client *c) { if ((c->flags & REDIS_SLAVE) && !(c->flags & REDIS_MONITOR)) return REDIS_CLIENT_TYPE_SLAVE; if (c->flags & REDIS_PUBSUB) @@ -1612,7 +1612,7 @@ char *getClientTypeName(int class) { * * Return value: non-zero if the client reached the soft or the hard limit. * Otherwise zero is returned. */ -int checkClientOutputBufferLimits(redisClient *c) { +int checkClientOutputBufferLimits(client *c) { int soft = 0, hard = 0, class; unsigned long used_mem = getClientOutputBufferMemoryUsage(c); @@ -1653,7 +1653,7 @@ int checkClientOutputBufferLimits(redisClient *c) { * Note: we need to close the client asynchronously because this function is * called from contexts where the client can't be freed safely, i.e. from the * lower level functions pushing data inside the client output buffers. */ -void asyncCloseClientOnOutputBufferLimitReached(redisClient *c) { +void asyncCloseClientOnOutputBufferLimitReached(client *c) { redisAssert(c->reply_bytes < SIZE_MAX-(1024*64)); if (c->reply_bytes == 0 || c->flags & REDIS_CLOSE_ASAP) return; if (checkClientOutputBufferLimits(c)) { @@ -1673,7 +1673,7 @@ void flushSlavesOutputBuffers(void) { listRewind(server.slaves,&li); while((ln = listNext(&li))) { - redisClient *slave = listNodeValue(ln); + client *slave = listNodeValue(ln); int events; events = aeGetFileEvents(server.el,slave->fd); @@ -1717,7 +1717,7 @@ int clientsArePaused(void) { { listNode *ln; listIter li; - redisClient *c; + client *c; server.clients_paused = 0; diff --git a/src/object.c b/src/object.c index e3f44b3d8..d8fcf1658 100644 --- a/src/object.c +++ b/src/object.c @@ -339,7 +339,7 @@ robj *resetRefCount(robj *obj) { return obj; } -int checkType(redisClient *c, robj *o, int type) { +int checkType(client *c, robj *o, int type) { if (o->type != type) { addReply(c,shared.wrongtypeerr); return 1; @@ -562,7 +562,7 @@ int getDoubleFromObject(robj *o, double *target) { return REDIS_OK; } -int getDoubleFromObjectOrReply(redisClient *c, robj *o, double *target, const char *msg) { +int getDoubleFromObjectOrReply(client *c, robj *o, double *target, const char *msg) { double value; if (getDoubleFromObject(o, &value) != REDIS_OK) { if (msg != NULL) { @@ -600,7 +600,7 @@ int getLongDoubleFromObject(robj *o, long double *target) { return REDIS_OK; } -int getLongDoubleFromObjectOrReply(redisClient *c, robj *o, long double *target, const char *msg) { +int getLongDoubleFromObjectOrReply(client *c, robj *o, long double *target, const char *msg) { long double value; if (getLongDoubleFromObject(o, &value) != REDIS_OK) { if (msg != NULL) { @@ -638,7 +638,7 @@ int getLongLongFromObject(robj *o, long long *target) { return REDIS_OK; } -int getLongLongFromObjectOrReply(redisClient *c, robj *o, long long *target, const char *msg) { +int getLongLongFromObjectOrReply(client *c, robj *o, long long *target, const char *msg) { long long value; if (getLongLongFromObject(o, &value) != REDIS_OK) { if (msg != NULL) { @@ -652,7 +652,7 @@ int getLongLongFromObjectOrReply(redisClient *c, robj *o, long long *target, con return REDIS_OK; } -int getLongFromObjectOrReply(redisClient *c, robj *o, long *target, const char *msg) { +int getLongFromObjectOrReply(client *c, robj *o, long *target, const char *msg) { long long value; if (getLongLongFromObjectOrReply(c, o, &value, msg) != REDIS_OK) return REDIS_ERR; @@ -696,14 +696,14 @@ unsigned long long estimateObjectIdleTime(robj *o) { /* This is a helper function for the OBJECT command. We need to lookup keys * without any modification of LRU or other parameters. */ -robj *objectCommandLookup(redisClient *c, robj *key) { +robj *objectCommandLookup(client *c, robj *key) { dictEntry *de; if ((de = dictFind(c->db->dict,key->ptr)) == NULL) return NULL; return (robj*) dictGetVal(de); } -robj *objectCommandLookupOrReply(redisClient *c, robj *key, robj *reply) { +robj *objectCommandLookupOrReply(client *c, robj *key, robj *reply) { robj *o = objectCommandLookup(c,key); if (!o) addReply(c, reply); @@ -712,7 +712,7 @@ robj *objectCommandLookupOrReply(redisClient *c, robj *key, robj *reply) { /* Object command allows to inspect the internals of an Redis Object. * Usage: OBJECT */ -void objectCommand(redisClient *c) { +void objectCommand(client *c) { robj *o; if (!strcasecmp(c->argv[1]->ptr,"refcount") && c->argc == 3) { diff --git a/src/pubsub.c b/src/pubsub.c index 0711387c2..edb08b04b 100644 --- a/src/pubsub.c +++ b/src/pubsub.c @@ -48,14 +48,14 @@ int listMatchPubsubPattern(void *a, void *b) { } /* Return the number of channels + patterns a client is subscribed to. */ -int clientSubscriptionsCount(redisClient *c) { +int clientSubscriptionsCount(client *c) { return dictSize(c->pubsub_channels)+ listLength(c->pubsub_patterns); } /* Subscribe a client to a channel. Returns 1 if the operation succeeded, or * 0 if the client was already subscribed to that channel. */ -int pubsubSubscribeChannel(redisClient *c, robj *channel) { +int pubsubSubscribeChannel(client *c, robj *channel) { dictEntry *de; list *clients = NULL; int retval = 0; @@ -85,7 +85,7 @@ int pubsubSubscribeChannel(redisClient *c, robj *channel) { /* Unsubscribe a client from a channel. Returns 1 if the operation succeeded, or * 0 if the client was not subscribed to the specified channel. */ -int pubsubUnsubscribeChannel(redisClient *c, robj *channel, int notify) { +int pubsubUnsubscribeChannel(client *c, robj *channel, int notify) { dictEntry *de; list *clients; listNode *ln; @@ -124,7 +124,7 @@ int pubsubUnsubscribeChannel(redisClient *c, robj *channel, int notify) { } /* Subscribe a client to a pattern. Returns 1 if the operation succeeded, or 0 if the client was already subscribed to that pattern. */ -int pubsubSubscribePattern(redisClient *c, robj *pattern) { +int pubsubSubscribePattern(client *c, robj *pattern) { int retval = 0; if (listSearchKey(c->pubsub_patterns,pattern) == NULL) { @@ -147,7 +147,7 @@ int pubsubSubscribePattern(redisClient *c, robj *pattern) { /* Unsubscribe a client from a channel. Returns 1 if the operation succeeded, or * 0 if the client was not subscribed to the specified channel. */ -int pubsubUnsubscribePattern(redisClient *c, robj *pattern, int notify) { +int pubsubUnsubscribePattern(client *c, robj *pattern, int notify) { listNode *ln; pubsubPattern pat; int retval = 0; @@ -175,7 +175,7 @@ int pubsubUnsubscribePattern(redisClient *c, robj *pattern, int notify) { /* Unsubscribe from all the channels. Return the number of channels the * client was subscribed to. */ -int pubsubUnsubscribeAllChannels(redisClient *c, int notify) { +int pubsubUnsubscribeAllChannels(client *c, int notify) { dictIterator *di = dictGetSafeIterator(c->pubsub_channels); dictEntry *de; int count = 0; @@ -199,7 +199,7 @@ int pubsubUnsubscribeAllChannels(redisClient *c, int notify) { /* Unsubscribe from all the patterns. Return the number of patterns the * client was subscribed from. */ -int pubsubUnsubscribeAllPatterns(redisClient *c, int notify) { +int pubsubUnsubscribeAllPatterns(client *c, int notify) { listNode *ln; listIter li; int count = 0; @@ -237,7 +237,7 @@ int pubsubPublishMessage(robj *channel, robj *message) { listRewind(list,&li); while ((ln = listNext(&li)) != NULL) { - redisClient *c = ln->value; + client *c = ln->value; addReply(c,shared.mbulkhdr[3]); addReply(c,shared.messagebulk); @@ -274,7 +274,7 @@ int pubsubPublishMessage(robj *channel, robj *message) { * Pubsub commands implementation *----------------------------------------------------------------------------*/ -void subscribeCommand(redisClient *c) { +void subscribeCommand(client *c) { int j; for (j = 1; j < c->argc; j++) @@ -282,7 +282,7 @@ void subscribeCommand(redisClient *c) { c->flags |= REDIS_PUBSUB; } -void unsubscribeCommand(redisClient *c) { +void unsubscribeCommand(client *c) { if (c->argc == 1) { pubsubUnsubscribeAllChannels(c,1); } else { @@ -294,7 +294,7 @@ void unsubscribeCommand(redisClient *c) { if (clientSubscriptionsCount(c) == 0) c->flags &= ~REDIS_PUBSUB; } -void psubscribeCommand(redisClient *c) { +void psubscribeCommand(client *c) { int j; for (j = 1; j < c->argc; j++) @@ -302,7 +302,7 @@ void psubscribeCommand(redisClient *c) { c->flags |= REDIS_PUBSUB; } -void punsubscribeCommand(redisClient *c) { +void punsubscribeCommand(client *c) { if (c->argc == 1) { pubsubUnsubscribeAllPatterns(c,1); } else { @@ -314,7 +314,7 @@ void punsubscribeCommand(redisClient *c) { if (clientSubscriptionsCount(c) == 0) c->flags &= ~REDIS_PUBSUB; } -void publishCommand(redisClient *c) { +void publishCommand(client *c) { int receivers = pubsubPublishMessage(c->argv[1],c->argv[2]); if (server.cluster_enabled) clusterPropagatePublish(c->argv[1],c->argv[2]); @@ -324,7 +324,7 @@ void publishCommand(redisClient *c) { } /* PUBSUB command for Pub/Sub introspection. */ -void pubsubCommand(redisClient *c) { +void pubsubCommand(client *c) { if (!strcasecmp(c->argv[1]->ptr,"channels") && (c->argc == 2 || c->argc ==3)) { diff --git a/src/rdb.c b/src/rdb.c index 8e652cde5..6bff4fc82 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -1482,7 +1482,7 @@ void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) { listRewind(server.slaves,&li); while((ln = listNext(&li))) { - redisClient *slave = ln->value; + client *slave = ln->value; if (slave->replstate == REDIS_REPL_WAIT_BGSAVE_END) { uint64_t j; @@ -1566,7 +1566,7 @@ int rdbSaveToSlavesSockets(void) { listRewind(server.slaves,&li); while((ln = listNext(&li))) { - redisClient *slave = ln->value; + client *slave = ln->value; if (slave->replstate == REDIS_REPL_WAIT_BGSAVE_START) { clientids[numfds] = slave->id; @@ -1672,7 +1672,7 @@ int rdbSaveToSlavesSockets(void) { return REDIS_OK; /* unreached */ } -void saveCommand(redisClient *c) { +void saveCommand(client *c) { if (server.rdb_child_pid != -1) { addReplyError(c,"Background save already in progress"); return; @@ -1684,7 +1684,7 @@ void saveCommand(redisClient *c) { } } -void bgsaveCommand(redisClient *c) { +void bgsaveCommand(client *c) { if (server.rdb_child_pid != -1) { addReplyError(c,"Background save already in progress"); } else if (server.aof_child_pid != -1) { diff --git a/src/replication.c b/src/replication.c index 5f366f189..fb983218c 100644 --- a/src/replication.c +++ b/src/replication.c @@ -40,7 +40,7 @@ void replicationDiscardCachedMaster(void); void replicationResurrectCachedMaster(int newfd); void replicationSendAck(void); -void putSlaveOnline(redisClient *slave); +void putSlaveOnline(client *slave); /* --------------------------- Utility functions ---------------------------- */ @@ -48,7 +48,7 @@ void putSlaveOnline(redisClient *slave); * pair. Mostly useful for logging, since we want to log a slave using its * IP address and it's listening port which is more clear for the user, for * example: "Closing connection with slave 10.1.2.3:6380". */ -char *replicationGetSlaveName(redisClient *c) { +char *replicationGetSlaveName(client *c) { static char buf[REDIS_PEER_ID_LEN]; char ip[REDIS_IP_STR_LEN]; @@ -200,7 +200,7 @@ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) { /* Send it to slaves. */ listRewind(slaves,&li); while((ln = listNext(&li))) { - redisClient *slave = ln->value; + client *slave = ln->value; addReply(slave,selectcmd); } @@ -239,7 +239,7 @@ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) { /* Write the command to every slave. */ listRewind(server.slaves,&li); while((ln = listNext(&li))) { - redisClient *slave = ln->value; + client *slave = ln->value; /* Don't feed slaves that are still waiting for BGSAVE to start */ if (slave->replstate == REDIS_REPL_WAIT_BGSAVE_START) continue; @@ -258,7 +258,7 @@ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) { } } -void replicationFeedMonitors(redisClient *c, list *monitors, int dictid, robj **argv, int argc) { +void replicationFeedMonitors(client *c, list *monitors, int dictid, robj **argv, int argc) { listNode *ln; listIter li; int j; @@ -291,7 +291,7 @@ void replicationFeedMonitors(redisClient *c, list *monitors, int dictid, robj ** listRewind(monitors,&li); while((ln = listNext(&li))) { - redisClient *monitor = ln->value; + client *monitor = ln->value; addReply(monitor,cmdobj); } decrRefCount(cmdobj); @@ -299,7 +299,7 @@ void replicationFeedMonitors(redisClient *c, list *monitors, int dictid, robj ** /* Feed the slave 'c' with the replication backlog starting from the * specified 'offset' up to the end of the backlog. */ -long long addReplyReplicationBacklog(redisClient *c, long long offset) { +long long addReplyReplicationBacklog(client *c, long long offset) { long long j, skip, len; serverLog(REDIS_DEBUG, "[PSYNC] Slave request offset: %lld", offset); @@ -354,7 +354,7 @@ long long addReplyReplicationBacklog(redisClient *c, long long offset) { * * On success return REDIS_OK, otherwise REDIS_ERR is returned and we proceed * with the usual full resync. */ -int masterTryPartialResynchronization(redisClient *c) { +int masterTryPartialResynchronization(client *c) { long long psync_offset, psync_len; char *master_runid = c->argv[1]->ptr; char buf[128]; @@ -460,7 +460,7 @@ int startBgsaveForReplication(void) { } /* SYNC and PSYNC command implemenation. */ -void syncCommand(redisClient *c) { +void syncCommand(client *c) { /* ignore SYNC if already slave or in monitor mode */ if (c->flags & REDIS_SLAVE) return; @@ -523,7 +523,7 @@ void syncCommand(redisClient *c) { /* Ok a background save is in progress. Let's check if it is a good * one for replication, i.e. if there is another slave that is * registering differences since the server forked to save. */ - redisClient *slave; + client *slave; listNode *ln; listIter li; @@ -594,7 +594,7 @@ void syncCommand(redisClient *c) { * In the future the same command can be used in order to configure * the replication to initiate an incremental replication instead of a * full resync. */ -void replconfCommand(redisClient *c) { +void replconfCommand(client *c) { int j; if ((c->argc % 2) == 0) { @@ -658,7 +658,7 @@ void replconfCommand(redisClient *c) { * command disables it, so that we can accumulate output buffer without * sending it to the slave. * 3) Update the count of good slaves. */ -void putSlaveOnline(redisClient *slave) { +void putSlaveOnline(client *slave) { slave->replstate = REDIS_REPL_ONLINE; slave->repl_put_online_on_ack = 0; slave->repl_ack_time = server.unixtime; /* Prevent false timeout. */ @@ -674,7 +674,7 @@ void putSlaveOnline(redisClient *slave) { } void sendBulkToSlave(aeEventLoop *el, int fd, void *privdata, int mask) { - redisClient *slave = privdata; + client *slave = privdata; REDIS_NOTUSED(el); REDIS_NOTUSED(mask); char buf[REDIS_IOBUF_LEN]; @@ -750,7 +750,7 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) { listRewind(server.slaves,&li); while((ln = listNext(&li))) { - redisClient *slave = ln->value; + client *slave = ln->value; if (slave->replstate == REDIS_REPL_WAIT_BGSAVE_START) { startbgsave = 1; @@ -808,7 +808,7 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) { listRewind(server.slaves,&li); serverLog(REDIS_WARNING,"SYNC failed. BGSAVE failed"); while((ln = listNext(&li))) { - redisClient *slave = ln->value; + client *slave = ln->value; if (slave->replstate == REDIS_REPL_WAIT_BGSAVE_START) freeClient(slave); @@ -1477,7 +1477,7 @@ void replicationUnsetMaster(void) { server.repl_state = REDIS_REPL_NONE; } -void slaveofCommand(redisClient *c) { +void slaveofCommand(client *c) { /* SLAVEOF is not allowed in cluster mode as replication is automatically * configured using the current address of the master node. */ if (server.cluster_enabled) { @@ -1518,7 +1518,7 @@ void slaveofCommand(redisClient *c) { /* ROLE command: provide information about the role of the instance * (master or slave) and additional information related to replication * in an easy to process format. */ -void roleCommand(redisClient *c) { +void roleCommand(client *c) { if (server.masterhost == NULL) { listIter li; listNode *ln; @@ -1531,7 +1531,7 @@ void roleCommand(redisClient *c) { mbcount = addDeferredMultiBulkLength(c); listRewind(server.slaves,&li); while((ln = listNext(&li))) { - redisClient *slave = ln->value; + client *slave = ln->value; char ip[REDIS_IP_STR_LEN]; if (anetPeerToString(slave->fd,ip,sizeof(ip),NULL) == -1) continue; @@ -1568,7 +1568,7 @@ void roleCommand(redisClient *c) { * processed offset. If we are not connected with a master, the command has * no effects. */ void replicationSendAck(void) { - redisClient *c = server.master; + client *c = server.master; if (c != NULL) { c->flags |= REDIS_MASTER_FORCE_REPLY; @@ -1600,7 +1600,7 @@ void replicationSendAck(void) { * replicationResurrectCachedMaster() that is used after a successful PSYNC * handshake in order to reactivate the cached master. */ -void replicationCacheMaster(redisClient *c) { +void replicationCacheMaster(client *c) { listNode *ln; redisAssert(server.master != NULL && server.cached_master == NULL); @@ -1697,7 +1697,7 @@ void refreshGoodSlavesCount(void) { listRewind(server.slaves,&li); while((ln = listNext(&li))) { - redisClient *slave = ln->value; + client *slave = ln->value; time_t lag = server.unixtime - slave->repl_ack_time; if (slave->replstate == REDIS_REPL_ONLINE && @@ -1833,7 +1833,7 @@ int replicationCountAcksByOffset(long long offset) { listRewind(server.slaves,&li); while((ln = listNext(&li))) { - redisClient *slave = ln->value; + client *slave = ln->value; if (slave->replstate != REDIS_REPL_ONLINE) continue; if (slave->repl_ack_off >= offset) count++; @@ -1843,7 +1843,7 @@ int replicationCountAcksByOffset(long long offset) { /* WAIT for N replicas to acknowledge the processing of our latest * write command (and all the previous commands). */ -void waitCommand(redisClient *c) { +void waitCommand(client *c) { mstime_t timeout; long numreplicas, ackreplicas; long long offset = c->woff; @@ -1878,7 +1878,7 @@ void waitCommand(redisClient *c) { * specific cleanup. We just remove the client from the list of clients * waiting for replica acks. Never call it directly, call unblockClient() * instead. */ -void unblockClientWaitingReplicas(redisClient *c) { +void unblockClientWaitingReplicas(client *c) { listNode *ln = listSearchKey(server.clients_waiting_acks,c); redisAssert(ln != NULL); listDelNode(server.clients_waiting_acks,ln); @@ -1895,7 +1895,7 @@ void processClientsWaitingReplicas(void) { listRewind(server.clients_waiting_acks,&li); while((ln = listNext(&li))) { - redisClient *c = ln->value; + client *c = ln->value; /* Every time we find a client that is satisfied for a given * offset and number of replicas, we remember it so the next client @@ -2005,7 +2005,7 @@ void replicationCron(void) { * last-io timer preventing a timeout. */ listRewind(server.slaves,&li); while((ln = listNext(&li))) { - redisClient *slave = ln->value; + client *slave = ln->value; if (slave->replstate == REDIS_REPL_WAIT_BGSAVE_START || (slave->replstate == REDIS_REPL_WAIT_BGSAVE_END && @@ -2025,7 +2025,7 @@ void replicationCron(void) { listRewind(server.slaves,&li); while((ln = listNext(&li))) { - redisClient *slave = ln->value; + client *slave = ln->value; if (slave->replstate != REDIS_REPL_ONLINE) continue; if (slave->flags & REDIS_PRE_PSYNC) continue; @@ -2079,7 +2079,7 @@ void replicationCron(void) { listRewind(server.slaves,&li); while((ln = listNext(&li))) { - redisClient *slave = ln->value; + client *slave = ln->value; if (slave->replstate == REDIS_REPL_WAIT_BGSAVE_START) { idle = server.unixtime - slave->lastinteraction; if (idle > max_idle) max_idle = idle; @@ -2098,7 +2098,7 @@ void replicationCron(void) { * startBgsaveForReplication(). */ listRewind(server.slaves,&li); while((ln = listNext(&li))) { - redisClient *slave = ln->value; + client *slave = ln->value; if (slave->replstate == REDIS_REPL_WAIT_BGSAVE_START) slave->replstate = REDIS_REPL_WAIT_BGSAVE_END; } diff --git a/src/scripting.c b/src/scripting.c index 79547aa4d..6be950ace 100644 --- a/src/scripting.c +++ b/src/scripting.c @@ -206,7 +206,7 @@ void luaSortArray(lua_State *lua) { int luaRedisGenericCommand(lua_State *lua, int raise_error) { int j, argc = lua_gettop(lua); struct redisCommand *cmd; - redisClient *c = server.lua_client; + client *c = server.lua_client; sds reply; /* Cached across calls. */ @@ -798,7 +798,7 @@ void sha1hex(char *digest, char *script, size_t len) { digest[40] = '\0'; } -void luaReplyToRedisReply(redisClient *c, lua_State *lua) { +void luaReplyToRedisReply(client *c, lua_State *lua) { int t = lua_type(lua,-1); switch(t) { @@ -884,7 +884,7 @@ void luaSetGlobalArray(lua_State *lua, char *var, robj **elev, int elec) { * On success REDIS_OK is returned, and nothing is left on the Lua stack. * On error REDIS_ERR is returned and an appropriate error is set in the * client context. */ -int luaCreateFunction(redisClient *c, lua_State *lua, char *funcname, robj *body) { +int luaCreateFunction(client *c, lua_State *lua, char *funcname, robj *body) { sds funcdef = sdsempty(); funcdef = sdscat(funcdef,"function "); @@ -920,7 +920,7 @@ int luaCreateFunction(redisClient *c, lua_State *lua, char *funcname, robj *body return REDIS_OK; } -void evalGenericCommand(redisClient *c, int evalsha) { +void evalGenericCommand(client *c, int evalsha) { lua_State *lua = server.lua; char funcname[43]; long long numkeys; @@ -1090,11 +1090,11 @@ void evalGenericCommand(redisClient *c, int evalsha) { } } -void evalCommand(redisClient *c) { +void evalCommand(client *c) { evalGenericCommand(c,0); } -void evalShaCommand(redisClient *c) { +void evalShaCommand(client *c) { if (sdslen(c->argv[1]->ptr) != 40) { /* We know that a match is not possible if the provided SHA is * not the right length. So we return an error ASAP, this way @@ -1149,7 +1149,7 @@ int redis_math_randomseed (lua_State *L) { * SCRIPT command for script environment introspection and control * ------------------------------------------------------------------------- */ -void scriptCommand(redisClient *c) { +void scriptCommand(client *c) { if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"flush")) { scriptingReset(); addReply(c,shared.ok); diff --git a/src/sentinel.c b/src/sentinel.c index bd315ccd5..42e8eab1f 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -416,11 +416,11 @@ dictType leaderVotesDictType = { /* =========================== Initialization =============================== */ -void sentinelCommand(redisClient *c); -void sentinelInfoCommand(redisClient *c); -void sentinelSetCommand(redisClient *c); -void sentinelPublishCommand(redisClient *c); -void sentinelRoleCommand(redisClient *c); +void sentinelCommand(client *c); +void sentinelInfoCommand(client *c); +void sentinelSetCommand(client *c); +void sentinelPublishCommand(client *c); +void sentinelRoleCommand(client *c); struct redisCommand sentinelcmds[] = { {"ping",pingCommand,1,"",0,NULL,0,0,0,0,0}, @@ -859,7 +859,7 @@ void sentinelKillTimedoutScripts(void) { } /* Implements SENTINEL PENDING-SCRIPTS command. */ -void sentinelPendingScriptsCommand(redisClient *c) { +void sentinelPendingScriptsCommand(client *c) { listNode *ln; listIter li; @@ -2604,7 +2604,7 @@ const char *sentinelFailoverStateStr(int state) { } /* Redis instance to Redis protocol representation. */ -void addReplySentinelRedisInstance(redisClient *c, sentinelRedisInstance *ri) { +void addReplySentinelRedisInstance(client *c, sentinelRedisInstance *ri) { char *flags = sdsempty(); void *mbl; int fields = 0; @@ -2795,7 +2795,7 @@ void addReplySentinelRedisInstance(redisClient *c, sentinelRedisInstance *ri) { /* Output a number of instances contained inside a dictionary as * Redis protocol. */ -void addReplyDictOfRedisInstances(redisClient *c, dict *instances) { +void addReplyDictOfRedisInstances(client *c, dict *instances) { dictIterator *di; dictEntry *de; @@ -2812,7 +2812,7 @@ void addReplyDictOfRedisInstances(redisClient *c, dict *instances) { /* Lookup the named master into sentinel.masters. * If the master is not found reply to the client with an error and returns * NULL. */ -sentinelRedisInstance *sentinelGetMasterByNameOrReplyError(redisClient *c, +sentinelRedisInstance *sentinelGetMasterByNameOrReplyError(client *c, robj *name) { sentinelRedisInstance *ri; @@ -2850,7 +2850,7 @@ int sentinelIsQuorumReachable(sentinelRedisInstance *master, int *usableptr) { return result; } -void sentinelCommand(redisClient *c) { +void sentinelCommand(client *c) { if (!strcasecmp(c->argv[1]->ptr,"masters")) { /* SENTINEL MASTERS */ if (c->argc != 2) goto numargserr; @@ -3166,7 +3166,7 @@ numargserr: } /* SENTINEL INFO [section] */ -void sentinelInfoCommand(redisClient *c) { +void sentinelInfoCommand(client *c) { if (c->argc > 2) { addReply(c,shared.syntaxerr); return; @@ -3232,7 +3232,7 @@ void sentinelInfoCommand(redisClient *c) { /* Implements Sentinel verison of the ROLE command. The output is * "sentinel" and the list of currently monitored master names. */ -void sentinelRoleCommand(redisClient *c) { +void sentinelRoleCommand(client *c) { dictIterator *di; dictEntry *de; @@ -3250,7 +3250,7 @@ void sentinelRoleCommand(redisClient *c) { } /* SENTINEL SET [