From 57a38dca29d08a6bb70f23523e9e5e9c4fe3822c Mon Sep 17 00:00:00 2001 From: VivekSainiEQ Date: Mon, 1 Mar 2021 21:31:33 +0000 Subject: [PATCH 001/149] Fixed bug where make USE_SYSTEMD=yes would not pass in correct flags, issue #226 Former-commit-id: 69dc0a06bfd34516c5c18a6e22985ef01a472339 --- src/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Makefile b/src/Makefile index 9e4070d4a..75f273490 100644 --- a/src/Makefile +++ b/src/Makefile @@ -227,6 +227,7 @@ endif ifeq ($(BUILD_WITH_SYSTEMD),yes) FINAL_LIBS+=$(shell $(PKG_CONFIG) --libs libsystemd) FINAL_CFLAGS+= -DHAVE_LIBSYSTEMD + FINAL_CXXFLAGS+= -DHAVE_LIBSYSTEMD endif ifeq ($(MALLOC),tcmalloc) From 927c08aca53a1aa9ba31677c3fb4ec7585c3d23f Mon Sep 17 00:00:00 2001 From: christianEQ Date: Tue, 27 Apr 2021 20:56:37 +0000 Subject: [PATCH 002/149] added keydb-diagnostic-tool binary (copy of benchmark) Former-commit-id: a2c0bce4cc1403e01e70508b4297cfe5e76643cc --- .gitignore | 1 + src/Makefile | 12 +- src/keydb-diagnostic-tool.cpp | 1830 +++++++++++++++++++++++++++++++++ 3 files changed, 1840 insertions(+), 3 deletions(-) create mode 100644 src/keydb-diagnostic-tool.cpp diff --git a/.gitignore b/.gitignore index 21f903288..450530f79 100644 --- a/.gitignore +++ b/.gitignore @@ -30,6 +30,7 @@ redis-check-rdb keydb-check-rdb redis-check-dump keydb-check-dump +keydb-diagnostic-tool redis-cli redis-sentinel redis-server diff --git a/src/Makefile b/src/Makefile index 9e4070d4a..11516e15c 100644 --- a/src/Makefile +++ b/src/Makefile @@ -303,8 +303,9 @@ REDIS_BENCHMARK_NAME=keydb-benchmark$(PROG_SUFFIX) REDIS_BENCHMARK_OBJ=ae.o anet.o redis-benchmark.o adlist.o dict.o zmalloc.o siphash.o redis-benchmark.o storage-lite.o fastlock.o new.o $(ASM_OBJ) REDIS_CHECK_RDB_NAME=keydb-check-rdb$(PROG_SUFFIX) REDIS_CHECK_AOF_NAME=keydb-check-aof$(PROG_SUFFIX) +REDIS_DIAGNOSTIC_NAME=keydb-diagnostic-tool$(PROG_SUFFIX) -all: $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) +all: $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) $(REDIS_DIAGNOSTIC_NAME) @echo "" @echo "Hint: It's a good idea to run 'make test' ;)" @echo "" @@ -376,6 +377,10 @@ $(REDIS_CLI_NAME): $(REDIS_CLI_OBJ) $(REDIS_BENCHMARK_NAME): $(REDIS_BENCHMARK_OBJ) $(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a $(FINAL_LIBS) +# keydb-diagnostic-tool +$(REDIS_DIAGNOSTIC_NAME): $(REDIS_BENCHMARK_OBJ) + $(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a $(FINAL_LIBS) + dict-benchmark: dict.cpp zmalloc.cpp sds.c siphash.c $(REDIS_CC) $(FINAL_CFLAGS) $^ -D DICT_BENCHMARK_MAIN -o $@ $(FINAL_LIBS) @@ -395,7 +400,7 @@ DEP = $(REDIS_SERVER_OBJ:%.o=%.d) $(REDIS_CLI_OBJ:%.o=%.d) $(REDIS_BENCHMARK_OBJ $(KEYDB_AS) $< -o $@ clean: - rm -rf $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) *.o *.gcda *.gcno *.gcov KeyDB.info lcov-html Makefile.dep dict-benchmark + rm -rf $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) $(REDIS_DIAGNOSTIC_NAME) *.o *.gcda *.gcno *.gcov KeyDB.info lcov-html Makefile.dep dict-benchmark rm -f $(DEP) .PHONY: clean @@ -459,7 +464,8 @@ install: all $(REDIS_INSTALL) $(REDIS_CLI_NAME) $(INSTALL_BIN) $(REDIS_INSTALL) $(REDIS_CHECK_RDB_NAME) $(INSTALL_BIN) $(REDIS_INSTALL) $(REDIS_CHECK_AOF_NAME) $(INSTALL_BIN) + $(REDIS_INSTALL) $(REDIS_DIAGNOSTIC_NAME) $(INSTALL_BIN) @ln -sf $(REDIS_SERVER_NAME) $(INSTALL_BIN)/$(REDIS_SENTINEL_NAME) uninstall: - rm -f $(INSTALL_BIN)/{$(REDIS_SERVER_NAME),$(REDIS_BENCHMARK_NAME),$(REDIS_CLI_NAME),$(REDIS_CHECK_RDB_NAME),$(REDIS_CHECK_AOF_NAME),$(REDIS_SENTINEL_NAME)} + rm -f $(INSTALL_BIN)/{$(REDIS_SERVER_NAME),$(REDIS_BENCHMARK_NAME),$(REDIS_CLI_NAME),$(REDIS_CHECK_RDB_NAME),$(REDIS_CHECK_AOF_NAME),$(REDIS_SENTINEL_NAME),$(REDIS_DIAGNOSTIC_NAME)} diff --git a/src/keydb-diagnostic-tool.cpp b/src/keydb-diagnostic-tool.cpp new file mode 100644 index 000000000..8dea6cdbf --- /dev/null +++ b/src/keydb-diagnostic-tool.cpp @@ -0,0 +1,1830 @@ +/* KeyDB diagnostic utility. + * + * Copyright (c) 2009-2012, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fmacros.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +extern "C" { +#include /* Use hiredis sds. */ +#include "hiredis.h" +} +#include "ae.h" +#include "adlist.h" +#include "dict.h" +#include "zmalloc.h" +#include "storage.h" +#include "atomicvar.h" +#include "crc16_slottable.h" + +#define UNUSED(V) ((void) V) +#define RANDPTR_INITIAL_SIZE 8 +#define MAX_LATENCY_PRECISION 3 +#define MAX_THREADS 500 +#define CLUSTER_SLOTS 16384 + +#define CLIENT_GET_EVENTLOOP(c) \ + (c->thread_id >= 0 ? config.threads[c->thread_id]->el : config.el) + +struct benchmarkThread; +struct clusterNode; +struct redisConfig; + +int g_fTestMode = false; + +static struct config { + aeEventLoop *el; + const char *hostip; + int hostport; + const char *hostsocket; + int numclients; + int liveclients; + int requests; + int requests_issued; + int requests_finished; + int keysize; + int datasize; + int randomkeys; + int randomkeys_keyspacelen; + int keepalive; + int pipeline; + int showerrors; + long long start; + long long totlatency; + long long *latency; + const char *title; + list *clients; + int quiet; + int csv; + int loop; + int idlemode; + int dbnum; + sds dbnumstr; + char *tests; + char *auth; + const char *user; + int precision; + int num_threads; + struct benchmarkThread **threads; + int cluster_mode; + int cluster_node_count; + struct clusterNode **cluster_nodes; + struct redisConfig *redis_config; + int is_fetching_slots; + int is_updating_slots; + int slots_last_update; + int enable_tracking; + /* Thread mutexes to be used as fallbacks by atomicvar.h */ + pthread_mutex_t requests_issued_mutex; + pthread_mutex_t requests_finished_mutex; + pthread_mutex_t liveclients_mutex; + pthread_mutex_t is_fetching_slots_mutex; + pthread_mutex_t is_updating_slots_mutex; + pthread_mutex_t updating_slots_mutex; + pthread_mutex_t slots_last_update_mutex; +} config; + +typedef struct _client { + redisContext *context; + sds obuf; + char **randptr; /* Pointers to :rand: strings inside the command buf */ + size_t randlen; /* Number of pointers in client->randptr */ + size_t randfree; /* Number of unused pointers in client->randptr */ + char **stagptr; /* Pointers to slot hashtags (cluster mode only) */ + size_t staglen; /* Number of pointers in client->stagptr */ + size_t stagfree; /* Number of unused pointers in client->stagptr */ + size_t written; /* Bytes of 'obuf' already written */ + long long start; /* Start time of a request */ + long long latency; /* Request latency */ + int pending; /* Number of pending requests (replies to consume) */ + int prefix_pending; /* If non-zero, number of pending prefix commands. Commands + such as auth and select are prefixed to the pipeline of + benchmark commands and discarded after the first send. */ + int prefixlen; /* Size in bytes of the pending prefix commands */ + int thread_id; + struct clusterNode *cluster_node; + int slots_last_update; +} *client; + +/* Threads. */ + +typedef struct benchmarkThread { + int index; + pthread_t thread; + aeEventLoop *el; +} benchmarkThread; + +/* Cluster. */ +typedef struct clusterNode { + char *ip; + int port; + sds name; + int flags; + sds replicate; /* Master ID if node is a replica */ + int *slots; + int slots_count; + int current_slot_index; + int *updated_slots; /* Used by updateClusterSlotsConfiguration */ + int updated_slots_count; /* Used by updateClusterSlotsConfiguration */ + int replicas_count; + sds *migrating; /* An array of sds where even strings are slots and odd + * strings are the destination node IDs. */ + sds *importing; /* An array of sds where even strings are slots and odd + * strings are the source node IDs. */ + int migrating_count; /* Length of the migrating array (migrating slots*2) */ + int importing_count; /* Length of the importing array (importing slots*2) */ + struct redisConfig *redis_config; +} clusterNode; + +typedef struct redisConfig { + sds save; + sds appendonly; +} redisConfig; + +int g_fInCrash = false; + +/* Prototypes */ +static void writeHandler(aeEventLoop *el, int fd, void *privdata, int mask); +static void createMissingClients(client c); +static benchmarkThread *createBenchmarkThread(int index); +static void freeBenchmarkThread(benchmarkThread *thread); +static void freeBenchmarkThreads(); +static void *execBenchmarkThread(void *ptr); +static clusterNode *createClusterNode(char *ip, int port); +static redisConfig *getRedisConfig(const char *ip, int port, + const char *hostsocket); +static redisContext *getRedisContext(const char *ip, int port, + const char *hostsocket); +static void freeRedisConfig(redisConfig *cfg); +static int fetchClusterSlotsConfiguration(client c); +static void updateClusterSlotsConfiguration(); +int showThroughput(struct aeEventLoop *eventLoop, long long id, + void *clientData); + +/* Dict callbacks */ +static uint64_t dictSdsHash(const void *key); +static int dictSdsKeyCompare(void *privdata, const void *key1, + const void *key2); + +/* Implementation */ +static long long ustime(void) { + struct timeval tv; + long long ust; + + gettimeofday(&tv, NULL); + ust = ((long)tv.tv_sec)*1000000; + ust += tv.tv_usec; + return ust; +} + +static long long mstime(void) { + struct timeval tv; + long long mst; + + gettimeofday(&tv, NULL); + mst = ((long long)tv.tv_sec)*1000; + mst += tv.tv_usec/1000; + return mst; +} + +static uint64_t dictSdsHash(const void *key) { + return dictGenHashFunction((unsigned char*)key, sdslen((char*)key)); +} + +static int dictSdsKeyCompare(void *privdata, const void *key1, + const void *key2) +{ + int l1,l2; + DICT_NOTUSED(privdata); + + l1 = sdslen((sds)key1); + l2 = sdslen((sds)key2); + if (l1 != l2) return 0; + return memcmp(key1, key2, l1) == 0; +} + +/* _serverAssert is needed by dict */ +extern "C" void _serverAssert(const char *estr, const char *file, int line) { + fprintf(stderr, "=== ASSERTION FAILED ==="); + fprintf(stderr, "==> %s:%d '%s' is not true",file,line,estr); + *((char*)-1) = 'x'; +} + +static redisContext *getRedisContext(const char *ip, int port, + const char *hostsocket) +{ + redisContext *ctx = NULL; + redisReply *reply = NULL; + if (hostsocket == NULL) + ctx = redisConnect(ip, port); + else + ctx = redisConnectUnix(hostsocket); + if (ctx == NULL || ctx->err) { + fprintf(stderr,"Could not connect to Redis at "); + const char *err = (ctx != NULL ? ctx->errstr : ""); + if (hostsocket == NULL) + fprintf(stderr,"%s:%d: %s\n",ip,port,err); + else + fprintf(stderr,"%s: %s\n",hostsocket,err); + goto cleanup; + } + if (config.auth == NULL) + return ctx; + if (config.user == NULL) + reply = (redisReply*)redisCommand(ctx,"AUTH %s", config.auth); + else + reply = (redisReply*)redisCommand(ctx,"AUTH %s %s", config.user, config.auth); + if (reply != NULL) { + if (reply->type == REDIS_REPLY_ERROR) { + if (hostsocket == NULL) + fprintf(stderr, "Node %s:%d replied with error:\n%s\n", ip, port, reply->str); + else + fprintf(stderr, "Node %s replied with error:\n%s\n", hostsocket, reply->str); + goto cleanup; + } + freeReplyObject(reply); + return ctx; + } + fprintf(stderr, "ERROR: failed to fetch reply from "); + if (hostsocket == NULL) + fprintf(stderr, "%s:%d\n", ip, port); + else + fprintf(stderr, "%s\n", hostsocket); +cleanup: + freeReplyObject(reply); + redisFree(ctx); + return NULL; +} + +static redisConfig *getRedisConfig(const char *ip, int port, + const char *hostsocket) +{ + redisConfig *cfg = (redisConfig*)zcalloc(sizeof(*cfg)); + if (!cfg) return NULL; + redisContext *c = NULL; + redisReply *reply = NULL, *sub_reply = NULL; + c = getRedisContext(ip, port, hostsocket); + if (c == NULL) { + freeRedisConfig(cfg); + return NULL; + } + redisAppendCommand(c, "CONFIG GET %s", "save"); + redisAppendCommand(c, "CONFIG GET %s", "appendonly"); + + void *r; + for (int i=0; i < 2; i++) { + int res = redisGetReply(c, &r); + if (reply) freeReplyObject(reply); + reply = res == REDIS_OK ? ((redisReply *) r) : NULL; + if (res != REDIS_OK || !r) goto fail; + if (reply->type == REDIS_REPLY_ERROR) { + fprintf(stderr, "ERROR: %s\n", reply->str); + goto fail; + } + if (reply->type != REDIS_REPLY_ARRAY || reply->elements < 2) goto fail; + sub_reply = reply->element[1]; + const char *value = sub_reply->str; + if (!value) value = ""; + switch (i) { + case 0: cfg->save = sdsnew(value); break; + case 1: cfg->appendonly = sdsnew(value); break; + } + } + freeReplyObject(reply); + redisFree(c); + return cfg; +fail: + fprintf(stderr, "ERROR: failed to fetch CONFIG from "); + if (hostsocket == NULL) fprintf(stderr, "%s:%d\n", ip, port); + else fprintf(stderr, "%s\n", hostsocket); + freeReplyObject(reply); + redisFree(c); + freeRedisConfig(cfg); + return NULL; +} +static void freeRedisConfig(redisConfig *cfg) { + if (cfg->save) sdsfree(cfg->save); + if (cfg->appendonly) sdsfree(cfg->appendonly); + zfree(cfg); +} + +static void freeClient(client c) { + aeEventLoop *el = CLIENT_GET_EVENTLOOP(c); + listNode *ln; + aeDeleteFileEvent(el,c->context->fd,AE_WRITABLE); + aeDeleteFileEvent(el,c->context->fd,AE_READABLE); + if (c->thread_id >= 0) { + int requests_finished = 0; + atomicGet(config.requests_finished, requests_finished); + if (requests_finished >= config.requests) { + aeStop(el); + } + } + redisFree(c->context); + sdsfree(c->obuf); + zfree(c->randptr); + zfree(c->stagptr); + zfree(c); + if (config.num_threads) pthread_mutex_lock(&(config.liveclients_mutex)); + config.liveclients--; + ln = listSearchKey(config.clients,c); + assert(ln != NULL); + listDelNode(config.clients,ln); + if (config.num_threads) pthread_mutex_unlock(&(config.liveclients_mutex)); +} + +static void freeAllClients(void) { + listNode *ln = config.clients->head, *next; + + while(ln) { + next = ln->next; + freeClient((client)ln->value); + ln = next; + } +} + +static void resetClient(client c) { + aeEventLoop *el = CLIENT_GET_EVENTLOOP(c); + aeDeleteFileEvent(el,c->context->fd,AE_WRITABLE); + aeDeleteFileEvent(el,c->context->fd,AE_READABLE); + aeCreateFileEvent(el,c->context->fd,AE_WRITABLE,writeHandler,c); + c->written = 0; + c->pending = config.pipeline; +} + +static void randomizeClientKey(client c) { + size_t i; + + for (i = 0; i < c->randlen; i++) { + char *p = c->randptr[i]+11; + size_t r = 0; + if (config.randomkeys_keyspacelen != 0) + r = random() % config.randomkeys_keyspacelen; + size_t j; + + for (j = 0; j < 12; j++) { + *p = '0'+r%10; + r/=10; + p--; + } + } +} + +static void setClusterKeyHashTag(client c) { + assert(c->thread_id >= 0); + clusterNode *node = c->cluster_node; + assert(node); + assert(node->current_slot_index < node->slots_count); + int is_updating_slots = 0; + atomicGet(config.is_updating_slots, is_updating_slots); + /* If updateClusterSlotsConfiguration is updating the slots array, + * call updateClusterSlotsConfiguration is order to block the thread + * since the mutex is locked. When the slots will be updated by the + * thread that's actually performing the update, the execution of + * updateClusterSlotsConfiguration won't actually do anything, since + * the updated_slots_count array will be already NULL. */ + if (is_updating_slots) updateClusterSlotsConfiguration(); + int slot = node->slots[node->current_slot_index]; + const char *tag = crc16_slot_table[slot]; + int taglen = strlen(tag); + size_t i; + for (i = 0; i < c->staglen; i++) { + char *p = c->stagptr[i] + 1; + p[0] = tag[0]; + p[1] = (taglen >= 2 ? tag[1] : '}'); + p[2] = (taglen == 3 ? tag[2] : '}'); + } +} + +static void clientDone(client c) { + int requests_finished = 0; + atomicGet(config.requests_finished, requests_finished); + if (requests_finished >= config.requests) { + freeClient(c); + if (!config.num_threads && config.el) aeStop(config.el); + return; + } + if (config.keepalive) { + resetClient(c); + } else { + if (config.num_threads) pthread_mutex_lock(&(config.liveclients_mutex)); + config.liveclients--; + createMissingClients(c); + config.liveclients++; + if (config.num_threads) + pthread_mutex_unlock(&(config.liveclients_mutex)); + freeClient(c); + } +} + +static void readHandler(aeEventLoop *el, int fd, void *privdata, int mask) { + client c = (client)privdata; + void *reply = NULL; + UNUSED(el); + UNUSED(fd); + UNUSED(mask); + + /* Calculate latency only for the first read event. This means that the + * server already sent the reply and we need to parse it. Parsing overhead + * is not part of the latency, so calculate it only once, here. */ + if (c->latency < 0) c->latency = ustime()-(c->start); + + if (redisBufferRead(c->context) != REDIS_OK) { + fprintf(stderr,"Error: %s\n",c->context->errstr); + exit(1); + } else { + while(c->pending) { + if (redisGetReply(c->context,&reply) != REDIS_OK) { + fprintf(stderr,"Error: %s\n",c->context->errstr); + exit(1); + } + if (reply != NULL) { + if (reply == (void*)REDIS_REPLY_ERROR) { + fprintf(stderr,"Unexpected error reply, exiting...\n"); + exit(1); + } + redisReply *r = (redisReply*)reply; + int is_err = (r->type == REDIS_REPLY_ERROR); + + if (is_err && config.showerrors) { + /* TODO: static lasterr_time not thread-safe */ + static time_t lasterr_time = 0; + time_t now = time(NULL); + if (lasterr_time != now) { + lasterr_time = now; + if (c->cluster_node) { + printf("Error from server %s:%d: %s\n", + c->cluster_node->ip, + c->cluster_node->port, + r->str); + } else printf("Error from server: %s\n", r->str); + } + } + + /* Try to update slots configuration if reply error is + * MOVED/ASK/CLUSTERDOWN and the key(s) used by the command + * contain(s) the slot hash tag. */ + if (is_err && c->cluster_node && c->staglen) { + int fetch_slots = 0, do_wait = 0; + if (!strncmp(r->str,"MOVED",5) || !strncmp(r->str,"ASK",3)) + fetch_slots = 1; + else if (!strncmp(r->str,"CLUSTERDOWN",11)) { + /* Usually the cluster is able to recover itself after + * a CLUSTERDOWN error, so try to sleep one second + * before requesting the new configuration. */ + fetch_slots = 1; + do_wait = 1; + printf("Error from server %s:%d: %s\n", + c->cluster_node->ip, + c->cluster_node->port, + r->str); + } + if (do_wait) sleep(1); + if (fetch_slots && !fetchClusterSlotsConfiguration(c)) + exit(1); + } + + freeReplyObject(reply); + /* This is an OK for prefix commands such as auth and select.*/ + if (c->prefix_pending > 0) { + c->prefix_pending--; + c->pending--; + /* Discard prefix commands on first response.*/ + if (c->prefixlen > 0) { + size_t j; + sdsrange(c->obuf, c->prefixlen, -1); + /* We also need to fix the pointers to the strings + * we need to randomize. */ + for (j = 0; j < c->randlen; j++) + c->randptr[j] -= c->prefixlen; + c->prefixlen = 0; + } + continue; + } + int requests_finished = 0; + atomicGetIncr(config.requests_finished, requests_finished, 1); + if (requests_finished < config.requests) + config.latency[requests_finished] = c->latency; + c->pending--; + if (c->pending == 0) { + clientDone(c); + break; + } + } else { + break; + } + } + } +} + +static void writeHandler(aeEventLoop *el, int fd, void *privdata, int mask) { + client c = (client)privdata; + UNUSED(el); + UNUSED(fd); + UNUSED(mask); + + /* Initialize request when nothing was written. */ + if (c->written == 0) { + /* Enforce upper bound to number of requests. */ + int requests_issued = 0; + atomicGetIncr(config.requests_issued, requests_issued, 1); + if (requests_issued >= config.requests) { + freeClient(c); + return; + } + + /* Really initialize: randomize keys and set start time. */ + if (config.randomkeys) randomizeClientKey(c); + if (config.cluster_mode && c->staglen > 0) setClusterKeyHashTag(c); + atomicGet(config.slots_last_update, c->slots_last_update); + c->start = ustime(); + c->latency = -1; + } + if (sdslen(c->obuf) > c->written) { + void *ptr = c->obuf+c->written; + ssize_t nwritten = write(c->context->fd,ptr,sdslen(c->obuf)-c->written); + if (nwritten == -1) { + if (errno != EPIPE) + fprintf(stderr, "Writing to socket: %s\n", strerror(errno)); + freeClient(c); + return; + } + c->written += nwritten; + if (sdslen(c->obuf) == c->written) { + aeDeleteFileEvent(el,c->context->fd,AE_WRITABLE); + aeCreateFileEvent(el,c->context->fd,AE_READABLE,readHandler,c); + } + } +} + +/* Create a benchmark client, configured to send the command passed as 'cmd' of + * 'len' bytes. + * + * The command is copied N times in the client output buffer (that is reused + * again and again to send the request to the server) accordingly to the configured + * pipeline size. + * + * Also an initial SELECT command is prepended in order to make sure the right + * database is selected, if needed. The initial SELECT will be discarded as soon + * as the first reply is received. + * + * To create a client from scratch, the 'from' pointer is set to NULL. If instead + * we want to create a client using another client as reference, the 'from' pointer + * points to the client to use as reference. In such a case the following + * information is take from the 'from' client: + * + * 1) The command line to use. + * 2) The offsets of the __rand_int__ elements inside the command line, used + * for arguments randomization. + * + * Even when cloning another client, prefix commands are applied if needed.*/ +static client createClient(const char *cmd, size_t len, client from, int thread_id) { + int j; + int is_cluster_client = (config.cluster_mode && thread_id >= 0); + client c = (client)zmalloc(sizeof(struct _client), MALLOC_LOCAL); + + const char *ip = NULL; + int port = 0; + c->cluster_node = NULL; + if (config.hostsocket == NULL || is_cluster_client) { + if (!is_cluster_client) { + ip = config.hostip; + port = config.hostport; + } else { + int node_idx = 0; + if (config.num_threads < config.cluster_node_count) + node_idx = config.liveclients % config.cluster_node_count; + else + node_idx = thread_id % config.cluster_node_count; + clusterNode *node = config.cluster_nodes[node_idx]; + assert(node != NULL); + ip = (const char *) node->ip; + port = node->port; + c->cluster_node = node; + } + c->context = redisConnectNonBlock(ip,port); + } else { + c->context = redisConnectUnixNonBlock(config.hostsocket); + } + if (c->context->err) { + fprintf(stderr,"Could not connect to Redis at "); + if (config.hostsocket == NULL || is_cluster_client) + fprintf(stderr,"%s:%d: %s\n",ip,port,c->context->errstr); + else + fprintf(stderr,"%s: %s\n",config.hostsocket,c->context->errstr); + exit(1); + } + c->thread_id = thread_id; + /* Suppress hiredis cleanup of unused buffers for max speed. */ + c->context->reader->maxbuf = 0; + + /* Build the request buffer: + * Queue N requests accordingly to the pipeline size, or simply clone + * the example client buffer. */ + c->obuf = sdsempty(); + /* Prefix the request buffer with AUTH and/or SELECT commands, if applicable. + * These commands are discarded after the first response, so if the client is + * reused the commands will not be used again. */ + c->prefix_pending = 0; + if (config.auth) { + char *buf = NULL; + int len; + if (config.user == NULL) + len = redisFormatCommand(&buf, "AUTH %s", config.auth); + else + len = redisFormatCommand(&buf, "AUTH %s %s", + config.user, config.auth); + c->obuf = sdscatlen(c->obuf, buf, len); + free(buf); + c->prefix_pending++; + } + + if (config.enable_tracking) { + char *buf = NULL; + int len = redisFormatCommand(&buf, "CLIENT TRACKING on"); + c->obuf = sdscatlen(c->obuf, buf, len); + free(buf); + c->prefix_pending++; + } + + /* If a DB number different than zero is selected, prefix our request + * buffer with the SELECT command, that will be discarded the first + * time the replies are received, so if the client is reused the + * SELECT command will not be used again. */ + if (config.dbnum != 0 && !is_cluster_client) { + c->obuf = sdscatprintf(c->obuf,"*2\r\n$6\r\nSELECT\r\n$%d\r\n%s\r\n", + (int)sdslen(config.dbnumstr),config.dbnumstr); + c->prefix_pending++; + } + c->prefixlen = sdslen(c->obuf); + /* Append the request itself. */ + if (from) { + c->obuf = sdscatlen(c->obuf, + from->obuf+from->prefixlen, + sdslen(from->obuf)-from->prefixlen); + } else { + for (j = 0; j < config.pipeline; j++) + c->obuf = sdscatlen(c->obuf,cmd,len); + } + + c->written = 0; + c->pending = config.pipeline+c->prefix_pending; + c->randptr = NULL; + c->randlen = 0; + c->stagptr = NULL; + c->staglen = 0; + + /* Find substrings in the output buffer that need to be randomized. */ + if (config.randomkeys) { + if (from) { + c->randlen = from->randlen; + c->randfree = 0; + c->randptr = (char**)zmalloc(sizeof(char*)*c->randlen, MALLOC_LOCAL); + /* copy the offsets. */ + for (j = 0; j < (int)c->randlen; j++) { + c->randptr[j] = c->obuf + (from->randptr[j]-from->obuf); + /* Adjust for the different select prefix length. */ + c->randptr[j] += c->prefixlen - from->prefixlen; + } + } else { + char *p = c->obuf; + + c->randlen = 0; + c->randfree = RANDPTR_INITIAL_SIZE; + c->randptr = (char**)zmalloc(sizeof(char*)*c->randfree, MALLOC_LOCAL); + while ((p = strstr(p,"__rand_int__")) != NULL) { + if (c->randfree == 0) { + c->randptr = (char**)zrealloc(c->randptr,sizeof(char*)*c->randlen*2, MALLOC_LOCAL); + c->randfree += c->randlen; + } + c->randptr[c->randlen++] = p; + c->randfree--; + p += 12; /* 12 is strlen("__rand_int__). */ + } + } + } + /* If cluster mode is enabled, set slot hashtags pointers. */ + if (config.cluster_mode) { + if (from) { + c->staglen = from->staglen; + c->stagfree = 0; + c->stagptr = (char**)zmalloc(sizeof(char*)*c->staglen, MALLOC_LOCAL); + /* copy the offsets. */ + for (j = 0; j < (int)c->staglen; j++) { + c->stagptr[j] = c->obuf + (from->stagptr[j]-from->obuf); + /* Adjust for the different select prefix length. */ + c->stagptr[j] += c->prefixlen - from->prefixlen; + } + } else { + char *p = c->obuf; + + c->staglen = 0; + c->stagfree = RANDPTR_INITIAL_SIZE; + c->stagptr = (char**)zmalloc(sizeof(char*)*c->stagfree, MALLOC_LOCAL); + while ((p = strstr(p,"{tag}")) != NULL) { + if (c->stagfree == 0) { + c->stagptr = (char**)zrealloc(c->stagptr, + sizeof(char*) * c->staglen*2, MALLOC_LOCAL); + c->stagfree += c->staglen; + } + c->stagptr[c->staglen++] = p; + c->stagfree--; + p += 5; /* 5 is strlen("{tag}"). */ + } + } + } + aeEventLoop *el = NULL; + if (thread_id < 0) el = config.el; + else { + benchmarkThread *thread = config.threads[thread_id]; + el = thread->el; + } + if (config.idlemode == 0) + aeCreateFileEvent(el,c->context->fd,AE_WRITABLE,writeHandler,c); + listAddNodeTail(config.clients,c); + atomicIncr(config.liveclients, 1); + atomicGet(config.slots_last_update, c->slots_last_update); + return c; +} + +static void createMissingClients(client c) { + int n = 0; + while(config.liveclients < config.numclients) { + int thread_id = -1; + if (config.num_threads) + thread_id = config.liveclients % config.num_threads; + createClient(NULL,0,c,thread_id); + + /* Listen backlog is quite limited on most systems */ + if (++n > 64) { + usleep(50000); + n = 0; + } + } +} + +static int compareLatency(const void *a, const void *b) { + return (*(long long*)a)-(*(long long*)b); +} + +static int ipow(int base, int exp) { + int result = 1; + while (exp) { + if (exp & 1) result *= base; + exp /= 2; + base *= base; + } + return result; +} + +static void showLatencyReport(void) { + int i, curlat = 0; + int usbetweenlat = ipow(10, MAX_LATENCY_PRECISION-config.precision); + float perc, reqpersec; + + reqpersec = (float)config.requests_finished/((float)config.totlatency/1000); + if (!config.quiet && !config.csv) { + printf("====== %s ======\n", config.title); + printf(" %d requests completed in %.2f seconds\n", config.requests_finished, + (float)config.totlatency/1000); + printf(" %d parallel clients\n", config.numclients); + printf(" %d bytes payload\n", config.datasize); + printf(" keep alive: %d\n", config.keepalive); + if (config.cluster_mode) { + printf(" cluster mode: yes (%d masters)\n", + config.cluster_node_count); + int m ; + for (m = 0; m < config.cluster_node_count; m++) { + clusterNode *node = config.cluster_nodes[m]; + redisConfig *cfg = node->redis_config; + if (cfg == NULL) continue; + printf(" node [%d] configuration:\n",m ); + printf(" save: %s\n", + sdslen(cfg->save) ? cfg->save : "NONE"); + printf(" appendonly: %s\n", cfg->appendonly); + } + } else { + if (config.redis_config) { + printf(" host configuration \"save\": %s\n", + config.redis_config->save); + printf(" host configuration \"appendonly\": %s\n", + config.redis_config->appendonly); + } + } + printf(" multi-thread: %s\n", (config.num_threads ? "yes" : "no")); + if (config.num_threads) + printf(" threads: %d\n", config.num_threads); + + printf("\n"); + + qsort(config.latency,config.requests,sizeof(long long),compareLatency); + for (i = 0; i < config.requests; i++) { + if (config.latency[i]/usbetweenlat != curlat || + i == (config.requests-1)) + { + /* After the 2 milliseconds latency to have percentages split + * by decimals will just add a lot of noise to the output. */ + if (config.latency[i] >= 2000) { + config.precision = 0; + usbetweenlat = ipow(10, + MAX_LATENCY_PRECISION-config.precision); + } + + curlat = config.latency[i]/usbetweenlat; + perc = ((float)(i+1)*100)/config.requests; + printf("%.2f%% <= %.*f milliseconds\n", perc, config.precision, + curlat/pow(10.0, config.precision)); + } + } + printf("%.2f requests per second\n\n", reqpersec); + } else if (config.csv) { + printf("\"%s\",\"%.2f\"\n", config.title, reqpersec); + } else { + printf("%s: %.2f requests per second\n", config.title, reqpersec); + } +} + +static void initBenchmarkThreads() { + int i; + if (config.threads) freeBenchmarkThreads(); + config.threads = (benchmarkThread**)zmalloc(config.num_threads * sizeof(benchmarkThread*), MALLOC_LOCAL); + for (i = 0; i < config.num_threads; i++) { + benchmarkThread *thread = createBenchmarkThread(i); + config.threads[i] = thread; + } +} + +static void startBenchmarkThreads() { + int i; + for (i = 0; i < config.num_threads; i++) { + benchmarkThread *t = config.threads[i]; + if (pthread_create(&(t->thread), NULL, execBenchmarkThread, t)){ + fprintf(stderr, "FATAL: Failed to start thread %d.\n", i); + exit(1); + } + } + for (i = 0; i < config.num_threads; i++) + pthread_join(config.threads[i]->thread, NULL); +} + +static void benchmark(const char *title, const char *cmd, int len) { + client c; + + config.title = title; + config.requests_issued = 0; + config.requests_finished = 0; + + if (config.num_threads) initBenchmarkThreads(); + + int thread_id = config.num_threads > 0 ? 0 : -1; + c = createClient(cmd,len,NULL,thread_id); + createMissingClients(c); + + config.start = mstime(); + if (!config.num_threads) aeMain(config.el); + else startBenchmarkThreads(); + config.totlatency = mstime()-config.start; + + showLatencyReport(); + freeAllClients(); + if (config.threads) freeBenchmarkThreads(); +} + +/* Thread functions. */ + +static benchmarkThread *createBenchmarkThread(int index) { + benchmarkThread *thread = (benchmarkThread*)zmalloc(sizeof(*thread), MALLOC_LOCAL); + if (thread == NULL) return NULL; + thread->index = index; + thread->el = aeCreateEventLoop(1024*10); + aeCreateTimeEvent(thread->el,1,showThroughput,NULL,NULL); + return thread; +} + +static void freeBenchmarkThread(benchmarkThread *thread) { + if (thread->el) aeDeleteEventLoop(thread->el); + zfree(thread); +} + +static void freeBenchmarkThreads() { + int i = 0; + for (; i < config.num_threads; i++) { + benchmarkThread *thread = config.threads[i]; + if (thread) freeBenchmarkThread(thread); + } + zfree(config.threads); + config.threads = NULL; +} + +static void *execBenchmarkThread(void *ptr) { + benchmarkThread *thread = (benchmarkThread *) ptr; + aeMain(thread->el); + return NULL; +} + +/* Cluster helper functions. */ + +static clusterNode *createClusterNode(char *ip, int port) { + clusterNode *node = (clusterNode*)zmalloc(sizeof(*node), MALLOC_LOCAL); + if (!node) return NULL; + node->ip = ip; + node->port = port; + node->name = NULL; + node->flags = 0; + node->replicate = NULL; + node->replicas_count = 0; + node->slots = (int*)zmalloc(CLUSTER_SLOTS * sizeof(int), MALLOC_LOCAL); + node->slots_count = 0; + node->current_slot_index = 0; + node->updated_slots = NULL; + node->updated_slots_count = 0; + node->migrating = NULL; + node->importing = NULL; + node->migrating_count = 0; + node->importing_count = 0; + node->redis_config = NULL; + return node; +} + +static void freeClusterNode(clusterNode *node) { + int i; + if (node->name) sdsfree(node->name); + if (node->replicate) sdsfree(node->replicate); + if (node->migrating != NULL) { + for (i = 0; i < node->migrating_count; i++) sdsfree(node->migrating[i]); + zfree(node->migrating); + } + if (node->importing != NULL) { + for (i = 0; i < node->importing_count; i++) sdsfree(node->importing[i]); + zfree(node->importing); + } + /* If the node is not the reference node, that uses the address from + * config.hostip and config.hostport, then the node ip has been + * allocated by fetchClusterConfiguration, so it must be freed. */ + if (node->ip && strcmp(node->ip, config.hostip) != 0) sdsfree(node->ip); + if (node->redis_config != NULL) freeRedisConfig(node->redis_config); + zfree(node->slots); + zfree(node); +} + +static void freeClusterNodes() { + int i = 0; + for (; i < config.cluster_node_count; i++) { + clusterNode *n = config.cluster_nodes[i]; + if (n) freeClusterNode(n); + } + zfree(config.cluster_nodes); + config.cluster_nodes = NULL; +} + +static clusterNode **addClusterNode(clusterNode *node) { + int count = config.cluster_node_count + 1; + config.cluster_nodes = (clusterNode**)zrealloc(config.cluster_nodes, + count * sizeof(*node), MALLOC_LOCAL); + if (!config.cluster_nodes) return NULL; + config.cluster_nodes[config.cluster_node_count++] = node; + return config.cluster_nodes; +} + +static int fetchClusterConfiguration() { + int success = 1; + redisContext *ctx = NULL; + redisReply *reply = NULL; + char *lines = NULL; + char *line = NULL; + char *p = NULL; + ctx = getRedisContext(config.hostip, config.hostport, config.hostsocket); + if (ctx == NULL) { + exit(1); + } + clusterNode *firstNode = createClusterNode((char *) config.hostip, + config.hostport); + if (!firstNode) {success = 0; goto cleanup;} + reply = (redisReply*)redisCommand(ctx, "CLUSTER NODES"); + success = (reply != NULL); + if (!success) goto cleanup; + success = (reply->type != REDIS_REPLY_ERROR); + if (!success) { + if (config.hostsocket == NULL) { + fprintf(stderr, "Cluster node %s:%d replied with error:\n%s\n", + config.hostip, config.hostport, reply->str); + } else { + fprintf(stderr, "Cluster node %s replied with error:\n%s\n", + config.hostsocket, reply->str); + } + goto cleanup; + } + lines = reply->str; + while ((p = strstr(lines, "\n")) != NULL) { + *p = '\0'; + line = lines; + lines = p + 1; + char *name = NULL, *addr = NULL, *flags = NULL, *master_id = NULL; + int i = 0; + while ((p = strchr(line, ' ')) != NULL) { + *p = '\0'; + char *token = line; + line = p + 1; + switch(i++){ + case 0: name = token; break; + case 1: addr = token; break; + case 2: flags = token; break; + case 3: master_id = token; break; + } + if (i == 8) break; // Slots + } + if (!flags) { + fprintf(stderr, "Invalid CLUSTER NODES reply: missing flags.\n"); + success = 0; + goto cleanup; + } + int myself = (strstr(flags, "myself") != NULL); + int is_replica = (strstr(flags, "slave") != NULL || + (master_id != NULL && master_id[0] != '-')); + if (is_replica) continue; + if (addr == NULL) { + fprintf(stderr, "Invalid CLUSTER NODES reply: missing addr.\n"); + success = 0; + goto cleanup; + } + clusterNode *node = NULL; + char *ip = NULL; + int port = 0; + char *paddr = strchr(addr, ':'); + if (paddr != NULL) { + *paddr = '\0'; + ip = addr; + addr = paddr + 1; + /* If internal bus is specified, then just drop it. */ + if ((paddr = strchr(addr, '@')) != NULL) *paddr = '\0'; + port = atoi(addr); + } + if (myself) { + node = firstNode; + if (node->ip == NULL && ip != NULL) { + node->ip = ip; + node->port = port; + } + } else { + node = createClusterNode(sdsnew(ip), port); + } + if (node == NULL) { + success = 0; + goto cleanup; + } + if (name != NULL) node->name = sdsnew(name); + if (i == 8) { + int remaining = strlen(line); + while (remaining > 0) { + p = strchr(line, ' '); + if (p == NULL) p = line + remaining; + remaining -= (p - line); + + char *slotsdef = line; + *p = '\0'; + if (remaining) { + line = p + 1; + remaining--; + } else line = p; + char *dash = NULL; + if (slotsdef[0] == '[') { + slotsdef++; + if ((p = strstr(slotsdef, "->-"))) { // Migrating + *p = '\0'; + p += 3; + char *closing_bracket = strchr(p, ']'); + if (closing_bracket) *closing_bracket = '\0'; + sds slot = sdsnew(slotsdef); + sds dst = sdsnew(p); + node->migrating_count += 2; + node->migrating = + (char**)zrealloc(node->migrating, + (node->migrating_count * sizeof(sds)), MALLOC_LOCAL); + node->migrating[node->migrating_count - 2] = + slot; + node->migrating[node->migrating_count - 1] = + dst; + } else if ((p = strstr(slotsdef, "-<-"))) {//Importing + *p = '\0'; + p += 3; + char *closing_bracket = strchr(p, ']'); + if (closing_bracket) *closing_bracket = '\0'; + sds slot = sdsnew(slotsdef); + sds src = sdsnew(p); + node->importing_count += 2; + node->importing = (char**)zrealloc(node->importing, + (node->importing_count * sizeof(sds)), MALLOC_LOCAL); + node->importing[node->importing_count - 2] = + slot; + node->importing[node->importing_count - 1] = + src; + } + } else if ((dash = strchr(slotsdef, '-')) != NULL) { + p = dash; + int start, stop; + *p = '\0'; + start = atoi(slotsdef); + stop = atoi(p + 1); + while (start <= stop) { + int slot = start++; + node->slots[node->slots_count++] = slot; + } + } else if (p > slotsdef) { + int slot = atoi(slotsdef); + node->slots[node->slots_count++] = slot; + } + } + } + if (node->slots_count == 0) { + printf("WARNING: master node %s:%d has no slots, skipping...\n", + node->ip, node->port); + continue; + } + if (!addClusterNode(node)) { + success = 0; + goto cleanup; + } + } +cleanup: + if (ctx) redisFree(ctx); + if (!success) { + if (config.cluster_nodes) freeClusterNodes(); + } + if (reply) freeReplyObject(reply); + return success; +} + +/* Request the current cluster slots configuration by calling CLUSTER SLOTS + * and atomically update the slots after a successful reply. */ +static int fetchClusterSlotsConfiguration(client c) { + UNUSED(c); + int success = 1, is_fetching_slots = 0, last_update = 0; + size_t i; + atomicGet(config.slots_last_update, last_update); + if (c->slots_last_update < last_update) { + c->slots_last_update = last_update; + return -1; + } + redisReply *reply = NULL; + atomicGetIncr(config.is_fetching_slots, is_fetching_slots, 1); + if (is_fetching_slots) return -1; //TODO: use other codes || errno ? + atomicSet(config.is_fetching_slots, 1); + if (config.showerrors) + printf("Cluster slots configuration changed, fetching new one...\n"); + const char *errmsg = "Failed to update cluster slots configuration"; + static dictType dtype = { + dictSdsHash, /* hash function */ + NULL, /* key dup */ + NULL, /* val dup */ + dictSdsKeyCompare, /* key compare */ + NULL, /* key destructor */ + NULL /* val destructor */ + }; + /* printf("[%d] fetchClusterSlotsConfiguration\n", c->thread_id); */ + dict *masters = dictCreate(&dtype, NULL); + redisContext *ctx = NULL; + for (i = 0; i < (size_t) config.cluster_node_count; i++) { + clusterNode *node = config.cluster_nodes[i]; + assert(node->ip != NULL); + assert(node->name != NULL); + assert(node->port); + /* Use first node as entry point to connect to. */ + if (ctx == NULL) { + ctx = getRedisContext(node->ip, node->port, NULL); + if (!ctx) { + success = 0; + goto cleanup; + } + } + if (node->updated_slots != NULL) + zfree(node->updated_slots); + node->updated_slots = NULL; + node->updated_slots_count = 0; + dictReplace(masters, node->name, node) ; + } + reply = (redisReply*)redisCommand(ctx, "CLUSTER SLOTS"); + if (reply == NULL || reply->type == REDIS_REPLY_ERROR) { + success = 0; + if (reply) + fprintf(stderr,"%s\nCLUSTER SLOTS ERROR: %s\n",errmsg,reply->str); + goto cleanup; + } + assert(reply->type == REDIS_REPLY_ARRAY); + for (i = 0; i < reply->elements; i++) { + redisReply *r = reply->element[i]; + assert(r->type == REDIS_REPLY_ARRAY); + assert(r->elements >= 3); + int from, to, slot; + from = r->element[0]->integer; + to = r->element[1]->integer; + redisReply *nr = r->element[2]; + assert(nr->type == REDIS_REPLY_ARRAY && nr->elements >= 3); + assert(nr->element[2]->str != NULL); + sds name = sdsnew(nr->element[2]->str); + dictEntry *entry = dictFind(masters, name); + if (entry == NULL) { + success = 0; + fprintf(stderr, "%s: could not find node with ID %s in current " + "configuration.\n", errmsg, name); + if (name) sdsfree(name); + goto cleanup; + } + sdsfree(name); + clusterNode *node = (clusterNode*)dictGetVal(entry); + if (node->updated_slots == NULL) + node->updated_slots = (int*)zcalloc(CLUSTER_SLOTS * sizeof(int), MALLOC_LOCAL); + for (slot = from; slot <= to; slot++) + node->updated_slots[node->updated_slots_count++] = slot; + } + updateClusterSlotsConfiguration(); +cleanup: + freeReplyObject(reply); + redisFree(ctx); + dictRelease(masters); + atomicSet(config.is_fetching_slots, 0); + return success; +} + +/* Atomically update the new slots configuration. */ +static void updateClusterSlotsConfiguration() { + pthread_mutex_lock(&config.is_updating_slots_mutex); + atomicSet(config.is_updating_slots, 1); + int i; + for (i = 0; i < config.cluster_node_count; i++) { + clusterNode *node = config.cluster_nodes[i]; + if (node->updated_slots != NULL) { + int *oldslots = node->slots; + node->slots = node->updated_slots; + node->slots_count = node->updated_slots_count; + node->current_slot_index = 0; + node->updated_slots = NULL; + node->updated_slots_count = 0; + zfree(oldslots); + } + } + atomicSet(config.is_updating_slots, 0); + atomicIncr(config.slots_last_update, 1); + pthread_mutex_unlock(&config.is_updating_slots_mutex); +} + +/* Generate random data for redis benchmark. See #7196. */ +static void genBenchmarkRandomData(char *data, int count) { + static uint32_t state = 1234; + int i = 0; + + while (count--) { + state = (state*1103515245+12345); + data[i++] = '0'+((state>>16)&63); + } +} + +/* Returns number of consumed options. */ +int parseOptions(int argc, const char **argv) { + int i; + int lastarg; + int exit_status = 1; + + for (i = 1; i < argc; i++) { + lastarg = (i == (argc-1)); + + if (!strcmp(argv[i],"-c")) { + if (lastarg) goto invalid; + config.numclients = atoi(argv[++i]); + } else if (!strcmp(argv[i],"-n")) { + if (lastarg) goto invalid; + config.requests = atoi(argv[++i]); + } else if (!strcmp(argv[i],"-k")) { + if (lastarg) goto invalid; + config.keepalive = atoi(argv[++i]); + } else if (!strcmp(argv[i],"-h")) { + if (lastarg) goto invalid; + config.hostip = strdup(argv[++i]); + } else if (!strcmp(argv[i],"-p")) { + if (lastarg) goto invalid; + config.hostport = atoi(argv[++i]); + } else if (!strcmp(argv[i],"-s")) { + if (lastarg) goto invalid; + config.hostsocket = strdup(argv[++i]); + } else if (!strcmp(argv[i],"-a") ) { + if (lastarg) goto invalid; + config.auth = strdup(argv[++i]); + } else if (!strcmp(argv[i],"--user")) { + if (lastarg) goto invalid; + config.user = argv[++i]; + } else if (!strcmp(argv[i],"-d")) { + if (lastarg) goto invalid; + config.datasize = atoi(argv[++i]); + if (config.datasize < 1) config.datasize=1; + if (config.datasize > 1024*1024*1024) config.datasize = 1024*1024*1024; + } else if (!strcmp(argv[i],"-P")) { + if (lastarg) goto invalid; + config.pipeline = atoi(argv[++i]); + if (config.pipeline <= 0) config.pipeline=1; + } else if (!strcmp(argv[i],"-r")) { + if (lastarg) goto invalid; + const char *next = argv[++i], *p = next; + if (*p == '-') { + p++; + if (*p < '0' || *p > '9') goto invalid; + } + config.randomkeys = 1; + config.randomkeys_keyspacelen = atoi(next); + if (config.randomkeys_keyspacelen < 0) + config.randomkeys_keyspacelen = 0; + } else if (!strcmp(argv[i],"-q")) { + config.quiet = 1; + } else if (!strcmp(argv[i],"--csv")) { + config.csv = 1; + } else if (!strcmp(argv[i],"-l")) { + config.loop = 1; + } else if (!strcmp(argv[i],"-I")) { + config.idlemode = 1; + } else if (!strcmp(argv[i],"-e")) { + config.showerrors = 1; + } else if (!strcmp(argv[i],"-t")) { + if (lastarg) goto invalid; + /* We get the list of tests to run as a string in the form + * get,set,lrange,...,test_N. Then we add a comma before and + * after the string in order to make sure that searching + * for ",testname," will always get a match if the test is + * enabled. */ + config.tests = sdsnew(","); + config.tests = sdscat(config.tests,(char*)argv[++i]); + config.tests = sdscat(config.tests,","); + sdstolower(config.tests); + } else if (!strcmp(argv[i],"--dbnum")) { + if (lastarg) goto invalid; + config.dbnum = atoi(argv[++i]); + config.dbnumstr = sdsfromlonglong(config.dbnum); + } else if (!strcmp(argv[i],"--precision")) { + if (lastarg) goto invalid; + config.precision = atoi(argv[++i]); + if (config.precision < 0) config.precision = 0; + if (config.precision > MAX_LATENCY_PRECISION) config.precision = MAX_LATENCY_PRECISION; + } else if (!strcmp(argv[i],"--threads")) { + if (lastarg) goto invalid; + config.num_threads = atoi(argv[++i]); + if (config.num_threads > MAX_THREADS) { + printf("WARNING: too many threads, limiting threads to %d.\n", + MAX_THREADS); + config.num_threads = MAX_THREADS; + } else if (config.num_threads < 0) config.num_threads = 0; + } else if (!strcmp(argv[i],"--cluster")) { + config.cluster_mode = 1; + } else if (!strcmp(argv[i],"--enable-tracking")) { + config.enable_tracking = 1; + } else if (!strcmp(argv[i],"--help")) { + exit_status = 0; + goto usage; + } else { + /* Assume the user meant to provide an option when the arg starts + * with a dash. We're done otherwise and should use the remainder + * as the command and arguments for running the benchmark. */ + if (argv[i][0] == '-') goto invalid; + return i; + } + } + + return i; + +invalid: + printf("Invalid option \"%s\" or option argument missing\n\n",argv[i]); + +usage: + printf( +"Usage: keydb-benchmark [-h ] [-p ] [-c ] [-n ] [-k ]\n\n" +" -h Server hostname (default 127.0.0.1)\n" +" -p Server port (default 6379)\n" +" -s Server socket (overrides host and port)\n" +" -a Password for Redis Auth\n" +" --user Used to send ACL style 'AUTH username pass'. Needs -a.\n" +" -c Number of parallel connections (default 50)\n" +" -n Total number of requests (default 100000)\n" +" -d Data size of SET/GET value in bytes (default 3)\n" +" --dbnum SELECT the specified db number (default 0)\n" +" --threads Enable multi-thread mode.\n" +" --cluster Enable cluster mode.\n" +" --enable-tracking Send CLIENT TRACKING on before starting benchmark.\n" +" -k 1=keep alive 0=reconnect (default 1)\n" +" -r Use random keys for SET/GET/INCR, random values for SADD,\n" +" random members and scores for ZADD.\n" +" Using this option the benchmark will expand the string __rand_int__\n" +" inside an argument with a 12 digits number in the specified range\n" +" from 0 to keyspacelen-1. The substitution changes every time a command\n" +" is executed. Default tests use this to hit random keys in the\n" +" specified range.\n" +" -P Pipeline requests. Default 1 (no pipeline).\n" +" -e If server replies with errors, show them on stdout.\n" +" (no more than 1 error per second is displayed)\n" +" -q Quiet. Just show query/sec values\n" +" --precision Number of decimal places to display in latency output (default 0)\n" +" --csv Output in CSV format\n" +" -l Loop. Run the tests forever\n" +" -t Only run the comma separated list of tests. The test\n" +" names are the same as the ones produced as output.\n" +" -I Idle mode. Just open N idle connections and wait.\n\n" +"Examples:\n\n" +" Run the benchmark with the default configuration against 127.0.0.1:6379:\n" +" $ keydb-benchmark\n\n" +" Use 20 parallel clients, for a total of 100k requests, against 192.168.1.1:\n" +" $ keydb-benchmark -h 192.168.1.1 -p 6379 -n 100000 -c 20\n\n" +" Fill 127.0.0.1:6379 with about 1 million keys only using the SET test:\n" +" $ keydb-benchmark -t set -n 1000000 -r 100000000\n\n" +" Benchmark 127.0.0.1:6379 for a few commands producing CSV output:\n" +" $ keydb-benchmark -t ping,set,get -n 100000 --csv\n\n" +" Benchmark a specific command line:\n" +" $ keydb-benchmark -r 10000 -n 10000 eval 'return redis.call(\"ping\")' 0\n\n" +" Fill a list with 10000 random elements:\n" +" $ keydb-benchmark -r 10000 -n 10000 lpush mylist __rand_int__\n\n" +" On user specified command lines __rand_int__ is replaced with a random integer\n" +" with a range of values selected by the -r option.\n" + ); + exit(exit_status); +} + +int showThroughput(struct aeEventLoop *eventLoop, long long id, void *clientData) { + UNUSED(eventLoop); + UNUSED(id); + UNUSED(clientData); + int liveclients = 0; + int requests_finished = 0; + atomicGet(config.liveclients, liveclients); + atomicGet(config.requests_finished, requests_finished); + + if (liveclients == 0 && requests_finished != config.requests) { + fprintf(stderr,"All clients disconnected... aborting.\n"); + exit(1); + } + if (config.num_threads && requests_finished >= config.requests) { + aeStop(eventLoop); + return AE_NOMORE; + } + if (config.csv) return 250; + if (config.idlemode == 1) { + printf("clients: %d\r", config.liveclients); + fflush(stdout); + return 250; + } + float dt = (float)(mstime()-config.start)/1000.0; + float rps = (float)requests_finished/dt; + printf("%s: %.2f\r", config.title, rps); + fflush(stdout); + return 250; /* every 250ms */ +} + +/* Return true if the named test was selected using the -t command line + * switch, or if all the tests are selected (no -t passed by user). */ +int test_is_selected(const char *name) { + char buf[256]; + int l = strlen(name); + + if (config.tests == NULL) return 1; + buf[0] = ','; + memcpy(buf+1,name,l); + buf[l+1] = ','; + buf[l+2] = '\0'; + return strstr(config.tests,buf) != NULL; +} + +int main(int argc, const char **argv) { + int i; + char *data, *cmd; + const char *tag; + int len; + + client c; + + storage_init(NULL, 0); + + srandom(time(NULL)); + signal(SIGHUP, SIG_IGN); + signal(SIGPIPE, SIG_IGN); + + config.numclients = 50; + config.requests = 100000; + config.liveclients = 0; + config.el = aeCreateEventLoop(1024*10); + aeCreateTimeEvent(config.el,1,showThroughput,NULL,NULL); + config.keepalive = 1; + config.datasize = 3; + config.pipeline = 1; + config.showerrors = 0; + config.randomkeys = 0; + config.randomkeys_keyspacelen = 0; + config.quiet = 0; + config.csv = 0; + config.loop = 0; + config.idlemode = 0; + config.latency = NULL; + config.clients = listCreate(); + config.hostip = "127.0.0.1"; + config.hostport = 6379; + config.hostsocket = NULL; + config.tests = NULL; + config.dbnum = 0; + config.auth = NULL; + config.precision = 1; + config.num_threads = 0; + config.threads = NULL; + config.cluster_mode = 0; + config.cluster_node_count = 0; + config.cluster_nodes = NULL; + config.redis_config = NULL; + config.is_fetching_slots = 0; + config.is_updating_slots = 0; + config.slots_last_update = 0; + config.enable_tracking = 0; + + i = parseOptions(argc,argv); + argc -= i; + argv += i; + + config.latency = (long long*)zmalloc(sizeof(long long)*config.requests, MALLOC_LOCAL); + + tag = ""; + + if (config.cluster_mode) { + // We only include the slot placeholder {tag} if cluster mode is enabled + tag = ":{tag}"; + + /* Fetch cluster configuration. */ + if (!fetchClusterConfiguration() || !config.cluster_nodes) { + if (!config.hostsocket) { + fprintf(stderr, "Failed to fetch cluster configuration from " + "%s:%d\n", config.hostip, config.hostport); + } else { + fprintf(stderr, "Failed to fetch cluster configuration from " + "%s\n", config.hostsocket); + } + exit(1); + } + if (config.cluster_node_count <= 1) { + fprintf(stderr, "Invalid cluster: %d node(s).\n", + config.cluster_node_count); + exit(1); + } + printf("Cluster has %d master nodes:\n\n", config.cluster_node_count); + int i = 0; + for (; i < config.cluster_node_count; i++) { + clusterNode *node = config.cluster_nodes[i]; + if (!node) { + fprintf(stderr, "Invalid cluster node #%d\n", i); + exit(1); + } + printf("Master %d: ", i); + if (node->name) printf("%s ", node->name); + printf("%s:%d\n", node->ip, node->port); + node->redis_config = getRedisConfig(node->ip, node->port, NULL); + if (node->redis_config == NULL) { + fprintf(stderr, "WARN: could not fetch node CONFIG %s:%d\n", + node->ip, node->port); + } + } + printf("\n"); + /* Automatically set thread number to node count if not specified + * by the user. */ + if (config.num_threads == 0) + config.num_threads = config.cluster_node_count; + } else { + config.redis_config = + getRedisConfig(config.hostip, config.hostport, config.hostsocket); + if (config.redis_config == NULL) + fprintf(stderr, "WARN: could not fetch server CONFIG\n"); + } + + if (config.num_threads > 0) { + int err = 0; + err |= pthread_mutex_init(&(config.requests_issued_mutex), NULL); + err |= pthread_mutex_init(&(config.requests_finished_mutex), NULL); + err |= pthread_mutex_init(&(config.liveclients_mutex), NULL); + err |= pthread_mutex_init(&(config.is_fetching_slots_mutex), NULL); + err |= pthread_mutex_init(&(config.is_updating_slots_mutex), NULL); + err |= pthread_mutex_init(&(config.updating_slots_mutex), NULL); + err |= pthread_mutex_init(&(config.slots_last_update_mutex), NULL); + if (err != 0) + { + perror("Failed to initialize mutex"); + exit(EXIT_FAILURE); + } + } + + if (config.keepalive == 0) { + printf("WARNING: keepalive disabled, you probably need 'echo 1 > /proc/sys/net/ipv4/tcp_tw_reuse' for Linux and 'sudo sysctl -w net.inet.tcp.msl=1000' for Mac OS X in order to use a lot of clients/requests\n"); + } + + if (config.idlemode) { + printf("Creating %d idle connections and waiting forever (Ctrl+C when done)\n", config.numclients); + int thread_id = -1, use_threads = (config.num_threads > 0); + if (use_threads) { + thread_id = 0; + initBenchmarkThreads(); + } + c = createClient("",0,NULL,thread_id); /* will never receive a reply */ + createMissingClients(c); + if (use_threads) startBenchmarkThreads(); + else aeMain(config.el); + /* and will wait for every */ + } + + /* Run benchmark with command in the remainder of the arguments. */ + if (argc) { + sds title = sdsnew(argv[0]); + for (i = 1; i < argc; i++) { + title = sdscatlen(title, " ", 1); + title = sdscatlen(title, (char*)argv[i], strlen(argv[i])); + } + + do { + len = redisFormatCommandArgv(&cmd,argc,argv,NULL); + benchmark(title,cmd,len); + free(cmd); + } while(config.loop); + + if (config.redis_config != NULL) freeRedisConfig(config.redis_config); + return 0; + } + + /* Run default benchmark suite. */ + data = (char*)zmalloc(config.datasize+1, MALLOC_LOCAL); + do { + genBenchmarkRandomData(data, config.datasize); + data[config.datasize] = '\0'; + + if (test_is_selected("ping_inline") || test_is_selected("ping")) + benchmark("PING_INLINE","PING\r\n",6); + + if (test_is_selected("ping_mbulk") || test_is_selected("ping")) { + len = redisFormatCommand(&cmd,"PING"); + benchmark("PING_BULK",cmd,len); + free(cmd); + } + + if (test_is_selected("set")) { + len = redisFormatCommand(&cmd,"SET key%s:__rand_int__ %s",tag,data); + benchmark("SET",cmd,len); + free(cmd); + } + + if (test_is_selected("get")) { + len = redisFormatCommand(&cmd,"GET key%s:__rand_int__",tag); + benchmark("GET",cmd,len); + free(cmd); + } + + if (test_is_selected("incr")) { + len = redisFormatCommand(&cmd,"INCR counter%s:__rand_int__",tag); + benchmark("INCR",cmd,len); + free(cmd); + } + + if (test_is_selected("lpush")) { + len = redisFormatCommand(&cmd,"LPUSH mylist%s %s",tag,data); + benchmark("LPUSH",cmd,len); + free(cmd); + } + + if (test_is_selected("rpush")) { + len = redisFormatCommand(&cmd,"RPUSH mylist%s %s",tag,data); + benchmark("RPUSH",cmd,len); + free(cmd); + } + + if (test_is_selected("lpop")) { + len = redisFormatCommand(&cmd,"LPOP mylist%s",tag); + benchmark("LPOP",cmd,len); + free(cmd); + } + + if (test_is_selected("rpop")) { + len = redisFormatCommand(&cmd,"RPOP mylist%s",tag); + benchmark("RPOP",cmd,len); + free(cmd); + } + + if (test_is_selected("sadd")) { + len = redisFormatCommand(&cmd, + "SADD myset%s element:__rand_int__",tag); + benchmark("SADD",cmd,len); + free(cmd); + } + + if (test_is_selected("hset")) { + len = redisFormatCommand(&cmd, + "HSET myhash%s element:__rand_int__ %s",tag,data); + benchmark("HSET",cmd,len); + free(cmd); + } + + if (test_is_selected("spop")) { + len = redisFormatCommand(&cmd,"SPOP myset%s",tag); + benchmark("SPOP",cmd,len); + free(cmd); + } + + if (test_is_selected("zadd")) { + const char *score = "0"; + if (config.randomkeys) score = "__rand_int__"; + len = redisFormatCommand(&cmd, + "ZADD myzset%s %s element:__rand_int__",tag,score); + benchmark("ZADD",cmd,len); + free(cmd); + } + + if (test_is_selected("zpopmin")) { + len = redisFormatCommand(&cmd,"ZPOPMIN myzset%s",tag); + benchmark("ZPOPMIN",cmd,len); + free(cmd); + } + + if (test_is_selected("lrange") || + test_is_selected("lrange_100") || + test_is_selected("lrange_300") || + test_is_selected("lrange_500") || + test_is_selected("lrange_600")) + { + len = redisFormatCommand(&cmd,"LPUSH mylist%s %s",tag,data); + benchmark("LPUSH (needed to benchmark LRANGE)",cmd,len); + free(cmd); + } + + if (test_is_selected("lrange") || test_is_selected("lrange_100")) { + len = redisFormatCommand(&cmd,"LRANGE mylist%s 0 99",tag); + benchmark("LRANGE_100 (first 100 elements)",cmd,len); + free(cmd); + } + + if (test_is_selected("lrange") || test_is_selected("lrange_300")) { + len = redisFormatCommand(&cmd,"LRANGE mylist%s 0 299",tag); + benchmark("LRANGE_300 (first 300 elements)",cmd,len); + free(cmd); + } + + if (test_is_selected("lrange") || test_is_selected("lrange_500")) { + len = redisFormatCommand(&cmd,"LRANGE mylist%s 0 449",tag); + benchmark("LRANGE_500 (first 450 elements)",cmd,len); + free(cmd); + } + + if (test_is_selected("lrange") || test_is_selected("lrange_600")) { + len = redisFormatCommand(&cmd,"LRANGE mylist%s 0 599",tag); + benchmark("LRANGE_600 (first 600 elements)",cmd,len); + free(cmd); + } + + if (test_is_selected("mset")) { + const char *cmd_argv[21]; + cmd_argv[0] = "MSET"; + sds key_placeholder = sdscatprintf(sdsnew(""),"key%s:__rand_int__",tag); + for (i = 1; i < 21; i += 2) { + cmd_argv[i] = key_placeholder; + cmd_argv[i+1] = data; + } + len = redisFormatCommandArgv(&cmd,21,cmd_argv,NULL); + benchmark("MSET (10 keys)",cmd,len); + free(cmd); + sdsfree(key_placeholder); + } + + if (!config.csv) printf("\n"); + } while(config.loop); + + if (config.redis_config != NULL) freeRedisConfig(config.redis_config); + + return 0; +} From 57836424e7f8d125a95d7358f061ad06d93ee990 Mon Sep 17 00:00:00 2001 From: christianEQ Date: Tue, 27 Apr 2021 21:14:05 +0000 Subject: [PATCH 003/149] updated makefile vars to keydb names Former-commit-id: e6ab823473f7f215dcd61c3101b7c9ad310a0483 --- src/Makefile | 116 +++++++++++++++++++++++++-------------------------- 1 file changed, 58 insertions(+), 58 deletions(-) diff --git a/src/Makefile b/src/Makefile index 11516e15c..40f1be7cf 100644 --- a/src/Makefile +++ b/src/Makefile @@ -6,8 +6,8 @@ # what is needed for Redis plus the standard CFLAGS and LDFLAGS passed. # However when building the dependencies (Jemalloc, Lua, Hiredis, ...) # CFLAGS and LDFLAGS are propagated to the dependencies, so to pass -# flags only to be used when compiling / linking Redis itself REDIS_CFLAGS -# and REDIS_LDFLAGS are used instead (this is the case of 'make gcov'). +# flags only to be used when compiling / linking Redis itself KEYDB_CFLAGS +# and KEYDB_LDFLAGS are used instead (this is the case of 'make gcov'). # # Dependencies are stored in the Makefile.dep file. To rebuild this file # Just use 'make dep', but this is only needed by developers. @@ -20,7 +20,7 @@ DEPENDENCY_TARGETS=hiredis linenoise lua NODEPS:=clean distclean # Default settings -STD=-std=c11 -pedantic -DREDIS_STATIC='' +STD=-std=c11 -pedantic -DKEYDB_STATIC='' CXX_STD=-std=c++14 -pedantic -fno-rtti -D__STDC_FORMAT_MACROS ifneq (,$(findstring clang,$(CC))) ifneq (,$(findstring FreeBSD,$(uname_S))) @@ -104,9 +104,9 @@ endif # Override default settings if possible -include .make-settings -FINAL_CFLAGS=$(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(REDIS_CFLAGS) -FINAL_CXXFLAGS=$(CXX_STD) $(WARN) $(OPT) $(DEBUG) $(CXXFLAGS) $(REDIS_CFLAGS) -FINAL_LDFLAGS=$(LDFLAGS) $(REDIS_LDFLAGS) $(DEBUG) +FINAL_CFLAGS=$(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(KEYDB_CFLAGS) +FINAL_CXXFLAGS=$(CXX_STD) $(WARN) $(OPT) $(DEBUG) $(CXXFLAGS) $(KEYDB_CFLAGS) +FINAL_LDFLAGS=$(LDFLAGS) $(KEYDB_LDFLAGS) $(DEBUG) FINAL_LIBS+=-lm DEBUG=-g -ggdb @@ -275,11 +275,11 @@ endif FINAL_LIBS += ../deps/hiredis/libhiredis_ssl.a $(LIBSSL_LIBS) $(LIBCRYPTO_LIBS) endif -REDIS_CC=$(QUIET_CC)$(CC) $(FINAL_CFLAGS) -REDIS_CXX=$(QUIET_CC)$(CXX) $(FINAL_CXXFLAGS) +KEYDB_CC=$(QUIET_CC)$(CC) $(FINAL_CFLAGS) +KEYDB_CXX=$(QUIET_CC)$(CXX) $(FINAL_CXXFLAGS) KEYDB_AS=$(QUIET_CC) as --64 -g -REDIS_LD=$(QUIET_LINK)$(CXX) $(FINAL_LDFLAGS) -REDIS_INSTALL=$(QUIET_INSTALL)$(INSTALL) +KEYDB_LD=$(QUIET_LINK)$(CXX) $(FINAL_LDFLAGS) +KEYDB_INSTALL=$(QUIET_INSTALL)$(INSTALL) CCCOLOR="\033[34m" LINKCOLOR="\033[34;1m" @@ -294,24 +294,24 @@ QUIET_LINK = @printf ' %b %b\n' $(LINKCOLOR)LINK$(ENDCOLOR) $(BINCOLOR)$@$(EN QUIET_INSTALL = @printf ' %b %b\n' $(LINKCOLOR)INSTALL$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR); endif -REDIS_SERVER_NAME=keydb-server$(PROG_SUFFIX) -REDIS_SENTINEL_NAME=keydb-sentinel$(PROG_SUFFIX) -REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o t_nhash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crcspeed.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o redis-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o acl.o storage.o rdb-s3.o fastlock.o new.o tracking.o cron.o connection.o tls.o sha256.o motd.o timeout.o setcpuaffinity.o $(ASM_OBJ) -REDIS_CLI_NAME=keydb-cli$(PROG_SUFFIX) -REDIS_CLI_OBJ=anet.o adlist.o dict.o redis-cli.o redis-cli-cpphelper.o zmalloc.o release.o anet.o ae.o crcspeed.o crc64.o siphash.o crc16.o storage-lite.o fastlock.o new.o motd.o $(ASM_OBJ) -REDIS_BENCHMARK_NAME=keydb-benchmark$(PROG_SUFFIX) -REDIS_BENCHMARK_OBJ=ae.o anet.o redis-benchmark.o adlist.o dict.o zmalloc.o siphash.o redis-benchmark.o storage-lite.o fastlock.o new.o $(ASM_OBJ) -REDIS_CHECK_RDB_NAME=keydb-check-rdb$(PROG_SUFFIX) -REDIS_CHECK_AOF_NAME=keydb-check-aof$(PROG_SUFFIX) -REDIS_DIAGNOSTIC_NAME=keydb-diagnostic-tool$(PROG_SUFFIX) +KEYDB_SERVER_NAME=keydb-server$(PROG_SUFFIX) +KEYDB_SENTINEL_NAME=keydb-sentinel$(PROG_SUFFIX) +KEYDB_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o t_nhash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crcspeed.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o redis-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o acl.o storage.o rdb-s3.o fastlock.o new.o tracking.o cron.o connection.o tls.o sha256.o motd.o timeout.o setcpuaffinity.o $(ASM_OBJ) +KEYDB_CLI_NAME=keydb-cli$(PROG_SUFFIX) +KEYDB_CLI_OBJ=anet.o adlist.o dict.o redis-cli.o redis-cli-cpphelper.o zmalloc.o release.o anet.o ae.o crcspeed.o crc64.o siphash.o crc16.o storage-lite.o fastlock.o new.o motd.o $(ASM_OBJ) +KEYDB_BENCHMARK_NAME=keydb-benchmark$(PROG_SUFFIX) +KEYDB_BENCHMARK_OBJ=ae.o anet.o redis-benchmark.o adlist.o dict.o zmalloc.o siphash.o redis-benchmark.o storage-lite.o fastlock.o new.o $(ASM_OBJ) +KEYDB_CHECK_RDB_NAME=keydb-check-rdb$(PROG_SUFFIX) +KEYDB_CHECK_AOF_NAME=keydb-check-aof$(PROG_SUFFIX) +KEYDB_DIAGNOSTIC_NAME=keydb-diagnostic-tool$(PROG_SUFFIX) -all: $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) $(REDIS_DIAGNOSTIC_NAME) +all: $(KEYDB_SERVER_NAME) $(KEYDB_SENTINEL_NAME) $(KEYDB_CLI_NAME) $(KEYDB_BENCHMARK_NAME) $(KEYDB_CHECK_RDB_NAME) $(KEYDB_CHECK_AOF_NAME) $(KEYDB_DIAGNOSTIC_NAME) @echo "" @echo "Hint: It's a good idea to run 'make test' ;)" @echo "" Makefile.dep: - -$(REDIS_CC) -MM *.c > Makefile.dep 2> /dev/null || true + -$(KEYDB_CC) -MM *.c > Makefile.dep 2> /dev/null || true ifeq (0, $(words $(findstring $(MAKECMDGOALS), $(NODEPS)))) -include Makefile.dep @@ -329,9 +329,9 @@ persist-settings: distclean echo CFLAGS=$(CFLAGS) >> .make-settings echo CXXFLAGS=$(CXXFLAGS) >> .make-settings echo LDFLAGS=$(LDFLAGS) >> .make-settings - echo REDIS_CFLAGS=$(REDIS_CFLAGS) >> .make-settings - echo REDIS_CXXFLAGS=$(REDIS_CXXFLAGS) >> .make-settings - echo REDIS_LDFLAGS=$(REDIS_LDFLAGS) >> .make-settings + echo KEYDB_CFLAGS=$(KEYDB_CFLAGS) >> .make-settings + echo KEYDB_CXXFLAGS=$(KEYDB_CXXFLAGS) >> .make-settings + echo KEYDB_LDFLAGS=$(KEYDB_LDFLAGS) >> .make-settings echo PREV_FINAL_CFLAGS=$(FINAL_CFLAGS) >> .make-settings echo PREV_FINAL_CXXFLAGS=$(FINAL_CXXFLAGS) >> .make-settings echo PREV_FINAL_LDFLAGS=$(FINAL_LDFLAGS) >> .make-settings @@ -354,53 +354,53 @@ endif @touch $@ # keydb-server -$(REDIS_SERVER_NAME): $(REDIS_SERVER_OBJ) - $(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/lua/src/liblua.a $(FINAL_LIBS) +$(KEYDB_SERVER_NAME): $(KEYDB_SERVER_OBJ) + $(KEYDB_LD) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/lua/src/liblua.a $(FINAL_LIBS) # keydb-sentinel -$(REDIS_SENTINEL_NAME): $(REDIS_SERVER_NAME) - $(REDIS_INSTALL) $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) +$(KEYDB_SENTINEL_NAME): $(KEYDB_SERVER_NAME) + $(KEYDB_INSTALL) $(KEYDB_SERVER_NAME) $(KEYDB_SENTINEL_NAME) # keydb-check-rdb -$(REDIS_CHECK_RDB_NAME): $(REDIS_SERVER_NAME) - $(REDIS_INSTALL) $(REDIS_SERVER_NAME) $(REDIS_CHECK_RDB_NAME) +$(KEYDB_CHECK_RDB_NAME): $(KEYDB_SERVER_NAME) + $(KEYDB_INSTALL) $(KEYDB_SERVER_NAME) $(KEYDB_CHECK_RDB_NAME) # keydb-check-aof -$(REDIS_CHECK_AOF_NAME): $(REDIS_SERVER_NAME) - $(REDIS_INSTALL) $(REDIS_SERVER_NAME) $(REDIS_CHECK_AOF_NAME) +$(KEYDB_CHECK_AOF_NAME): $(KEYDB_SERVER_NAME) + $(KEYDB_INSTALL) $(KEYDB_SERVER_NAME) $(KEYDB_CHECK_AOF_NAME) # keydb-cli -$(REDIS_CLI_NAME): $(REDIS_CLI_OBJ) - $(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/linenoise/linenoise.o $(FINAL_LIBS) +$(KEYDB_CLI_NAME): $(KEYDB_CLI_OBJ) + $(KEYDB_LD) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/linenoise/linenoise.o $(FINAL_LIBS) # keydb-benchmark -$(REDIS_BENCHMARK_NAME): $(REDIS_BENCHMARK_OBJ) - $(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a $(FINAL_LIBS) +$(KEYDB_BENCHMARK_NAME): $(KEYDB_BENCHMARK_OBJ) + $(KEYDB_LD) -o $@ $^ ../deps/hiredis/libhiredis.a $(FINAL_LIBS) # keydb-diagnostic-tool -$(REDIS_DIAGNOSTIC_NAME): $(REDIS_BENCHMARK_OBJ) - $(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a $(FINAL_LIBS) +$(KEYDB_DIAGNOSTIC_NAME): $(KEYDB_BENCHMARK_OBJ) + $(KEYDB_LD) -o $@ $^ ../deps/hiredis/libhiredis.a $(FINAL_LIBS) dict-benchmark: dict.cpp zmalloc.cpp sds.c siphash.c - $(REDIS_CC) $(FINAL_CFLAGS) $^ -D DICT_BENCHMARK_MAIN -o $@ $(FINAL_LIBS) + $(KEYDB_CC) $(FINAL_CFLAGS) $^ -D DICT_BENCHMARK_MAIN -o $@ $(FINAL_LIBS) -DEP = $(REDIS_SERVER_OBJ:%.o=%.d) $(REDIS_CLI_OBJ:%.o=%.d) $(REDIS_BENCHMARK_OBJ:%.o=%.d) +DEP = $(KEYDB_SERVER_OBJ:%.o=%.d) $(KEYDB_CLI_OBJ:%.o=%.d) $(KEYDB_BENCHMARK_OBJ:%.o=%.d) -include $(DEP) # Because the jemalloc.h header is generated as a part of the jemalloc build, # building it should complete before building any other object. Instead of # depending on a single artifact, build all dependencies first. %.o: %.c .make-prerequisites - $(REDIS_CC) -MMD -o $@ -c $< + $(KEYDB_CC) -MMD -o $@ -c $< %.o: %.cpp .make-prerequisites - $(REDIS_CXX) -MMD -o $@ -c $< + $(KEYDB_CXX) -MMD -o $@ -c $< %.o: %.asm .make-prerequisites $(KEYDB_AS) $< -o $@ clean: - rm -rf $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) $(REDIS_DIAGNOSTIC_NAME) *.o *.gcda *.gcno *.gcov KeyDB.info lcov-html Makefile.dep dict-benchmark + rm -rf $(KEYDB_SERVER_NAME) $(KEYDB_SENTINEL_NAME) $(KEYDB_CLI_NAME) $(KEYDB_BENCHMARK_NAME) $(KEYDB_CHECK_RDB_NAME) $(KEYDB_CHECK_AOF_NAME) $(KEYDB_DIAGNOSTIC_NAME) *.o *.gcda *.gcno *.gcov KeyDB.info lcov-html Makefile.dep dict-benchmark rm -f $(DEP) .PHONY: clean @@ -412,10 +412,10 @@ distclean: clean .PHONY: distclean -test: $(REDIS_SERVER_NAME) $(REDIS_CHECK_AOF_NAME) +test: $(KEYDB_SERVER_NAME) $(KEYDB_CHECK_AOF_NAME) @(cd ..; ./runtest) -test-sentinel: $(REDIS_SENTINEL_NAME) +test-sentinel: $(KEYDB_SENTINEL_NAME) @(cd ..; ./runtest-sentinel) check: test @@ -428,13 +428,13 @@ lcov: @genhtml --legend -o lcov-html KeyDB.info | grep lines | awk '{print $$2;}' | sed 's/%//g' test-sds: sds.c sds.h - $(REDIS_CC) sds.c zmalloc.cpp -DSDS_TEST_MAIN $(FINAL_LIBS) -o /tmp/sds_test + $(KEYDB_CC) sds.c zmalloc.cpp -DSDS_TEST_MAIN $(FINAL_LIBS) -o /tmp/sds_test /tmp/sds_test .PHONY: lcov -bench: $(REDIS_BENCHMARK_NAME) - ./$(REDIS_BENCHMARK_NAME) +bench: $(KEYDB_BENCHMARK_NAME) + ./$(KEYDB_BENCHMARK_NAME) 32bit: @echo "" @@ -443,7 +443,7 @@ bench: $(REDIS_BENCHMARK_NAME) $(MAKE) CXXFLAGS="-m32" CFLAGS="-m32" LDFLAGS="-m32" gcov: - $(MAKE) REDIS_CXXFLAGS="-fprofile-arcs -ftest-coverage -DCOVERAGE_TEST" REDIS_CFLAGS="-fprofile-arcs -ftest-coverage -DCOVERAGE_TEST" REDIS_LDFLAGS="-fprofile-arcs -ftest-coverage" + $(MAKE) KEYDB_CXXFLAGS="-fprofile-arcs -ftest-coverage -DCOVERAGE_TEST" KEYDB_CFLAGS="-fprofile-arcs -ftest-coverage -DCOVERAGE_TEST" KEYDB_LDFLAGS="-fprofile-arcs -ftest-coverage" noopt: $(MAKE) OPTIMIZATION="-O0" @@ -459,13 +459,13 @@ src/help.h: install: all @mkdir -p $(INSTALL_BIN) - $(REDIS_INSTALL) $(REDIS_SERVER_NAME) $(INSTALL_BIN) - $(REDIS_INSTALL) $(REDIS_BENCHMARK_NAME) $(INSTALL_BIN) - $(REDIS_INSTALL) $(REDIS_CLI_NAME) $(INSTALL_BIN) - $(REDIS_INSTALL) $(REDIS_CHECK_RDB_NAME) $(INSTALL_BIN) - $(REDIS_INSTALL) $(REDIS_CHECK_AOF_NAME) $(INSTALL_BIN) - $(REDIS_INSTALL) $(REDIS_DIAGNOSTIC_NAME) $(INSTALL_BIN) - @ln -sf $(REDIS_SERVER_NAME) $(INSTALL_BIN)/$(REDIS_SENTINEL_NAME) + $(KEYDB_INSTALL) $(KEYDB_SERVER_NAME) $(INSTALL_BIN) + $(KEYDB_INSTALL) $(KEYDB_BENCHMARK_NAME) $(INSTALL_BIN) + $(KEYDB_INSTALL) $(KEYDB_CLI_NAME) $(INSTALL_BIN) + $(KEYDB_INSTALL) $(KEYDB_CHECK_RDB_NAME) $(INSTALL_BIN) + $(KEYDB_INSTALL) $(KEYDB_CHECK_AOF_NAME) $(INSTALL_BIN) + $(KEYDB_INSTALL) $(KEYDB_DIAGNOSTIC_NAME) $(INSTALL_BIN) + @ln -sf $(KEYDB_SERVER_NAME) $(INSTALL_BIN)/$(KEYDB_SENTINEL_NAME) uninstall: - rm -f $(INSTALL_BIN)/{$(REDIS_SERVER_NAME),$(REDIS_BENCHMARK_NAME),$(REDIS_CLI_NAME),$(REDIS_CHECK_RDB_NAME),$(REDIS_CHECK_AOF_NAME),$(REDIS_SENTINEL_NAME),$(REDIS_DIAGNOSTIC_NAME)} + rm -f $(INSTALL_BIN)/{$(KEYDB_SERVER_NAME),$(KEYDB_BENCHMARK_NAME),$(KEYDB_CLI_NAME),$(KEYDB_CHECK_RDB_NAME),$(KEYDB_CHECK_AOF_NAME),$(KEYDB_SENTINEL_NAME),$(KEYDB_DIAGNOSTIC_NAME)} From 060b9192505d78a16d6da4ae1bf6d1b576617632 Mon Sep 17 00:00:00 2001 From: christianEQ Date: Wed, 28 Apr 2021 16:02:44 +0000 Subject: [PATCH 004/149] fixed diagnostic tool to use correct obj files Former-commit-id: 66547bd28ab025c4d118e6b8d35e2aa0b1f42f10 --- src/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Makefile b/src/Makefile index 40f1be7cf..5781c2a7d 100644 --- a/src/Makefile +++ b/src/Makefile @@ -304,6 +304,7 @@ KEYDB_BENCHMARK_OBJ=ae.o anet.o redis-benchmark.o adlist.o dict.o zmalloc.o siph KEYDB_CHECK_RDB_NAME=keydb-check-rdb$(PROG_SUFFIX) KEYDB_CHECK_AOF_NAME=keydb-check-aof$(PROG_SUFFIX) KEYDB_DIAGNOSTIC_NAME=keydb-diagnostic-tool$(PROG_SUFFIX) +KEYDB_DIAGNOSTIC_OBJ=ae.o anet.o keydb-diagnostic-tool.o adlist.o dict.o zmalloc.o siphash.o keydb-diagnostic-tool.o storage-lite.o fastlock.o new.o $(ASM_OBJ) all: $(KEYDB_SERVER_NAME) $(KEYDB_SENTINEL_NAME) $(KEYDB_CLI_NAME) $(KEYDB_BENCHMARK_NAME) $(KEYDB_CHECK_RDB_NAME) $(KEYDB_CHECK_AOF_NAME) $(KEYDB_DIAGNOSTIC_NAME) @echo "" @@ -378,7 +379,7 @@ $(KEYDB_BENCHMARK_NAME): $(KEYDB_BENCHMARK_OBJ) $(KEYDB_LD) -o $@ $^ ../deps/hiredis/libhiredis.a $(FINAL_LIBS) # keydb-diagnostic-tool -$(KEYDB_DIAGNOSTIC_NAME): $(KEYDB_BENCHMARK_OBJ) +$(KEYDB_DIAGNOSTIC_NAME): $(KEYDB_DIAGNOSTIC_OBJ) $(KEYDB_LD) -o $@ $^ ../deps/hiredis/libhiredis.a $(FINAL_LIBS) dict-benchmark: dict.cpp zmalloc.cpp sds.c siphash.c From 68cdb2f1ad90a7280bf41051cf55c9c92ea9bb7e Mon Sep 17 00:00:00 2001 From: VivekSainiEQ Date: Tue, 25 May 2021 16:55:47 +0000 Subject: [PATCH 005/149] Initialized serverTL in more places in module.cpp Former-commit-id: 8d81592bec0a62b8f3eae6b8c924887839909e2c --- src/module.cpp | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/src/module.cpp b/src/module.cpp index 183269c16..2877cdeb7 100644 --- a/src/module.cpp +++ b/src/module.cpp @@ -568,6 +568,19 @@ int moduleDelKeyIfEmpty(RedisModuleKey *key) { } } +/* This function is used to set the thread local variables (serverTL) for + * arbitrary module threads. All incoming module threads share the same set of + * thread local variables (modulethreadvar). + * + * This is needed as some KeyDB functions use thread local variables to do things, + * and we don't want to share the thread local variables of existing server threads */ +void moduleSetThreadVariablesIfNeeded(void) { + if (serverTL == nullptr) { + serverTL = &g_pserver->modulethreadvar; + g_fModuleThread = true; + } +} + /* -------------------------------------------------------------------------- * Service API exported to modules * @@ -2113,6 +2126,7 @@ int RM_GetContextFlags(RedisModuleCtx *ctx) { * periodically in timer callbacks or other periodic callbacks. */ int RM_AvoidReplicaTraffic() { + moduleSetThreadVariablesIfNeeded(); return clientsArePaused(); } @@ -2181,9 +2195,11 @@ void *RM_OpenKey(RedisModuleCtx *ctx, robj *keyname, int mode) { /* Destroy a RedisModuleKey struct (freeing is the responsibility of the caller). */ static void moduleCloseKey(RedisModuleKey *key) { int signal = SHOULD_SIGNAL_MODIFIED_KEYS(key->ctx); + moduleAcquireGIL(false); if ((key->mode & REDISMODULE_WRITE) && signal) signalModifiedKey(key->ctx->client,key->db,key->key); /* TODO: if (key->iter) RM_KeyIteratorStop(kp); */ + moduleReleaseGIL(false); RM_ZsetRangeStop(key); decrRefCount(key->key); } @@ -4773,10 +4789,7 @@ int moduleClientIsBlockedOnKeys(client *c) { * RedisModule_BlockClientOnKeys() is accessible from the timeout * callback via RM_GetBlockedClientPrivateData). */ int RM_UnblockClient(RedisModuleBlockedClient *bc, void *privdata) { - if (serverTL == nullptr) { - serverTL = &g_pserver->modulethreadvar; - g_fModuleThread = true; - } + moduleSetThreadVariablesIfNeeded(); if (bc->blocked_on_keys) { /* In theory the user should always pass the timeout handler as an * argument, but better to be safe than sorry. */ @@ -5056,10 +5069,7 @@ void RM_FreeThreadSafeContext(RedisModuleCtx *ctx) { * a blocked client connected to the thread safe context. */ void RM_ThreadSafeContextLock(RedisModuleCtx *ctx) { UNUSED(ctx); - if (serverTL == nullptr) { - serverTL = &g_pserver->modulethreadvar; - g_fModuleThread = true; - } + moduleSetThreadVariablesIfNeeded(); moduleAcquireGIL(FALSE /*fServerThread*/, true /*fExclusive*/); } From 70602db1662de9ba3fceb4fa9002f2fe243ca048 Mon Sep 17 00:00:00 2001 From: christianEQ Date: Tue, 1 Jun 2021 20:54:48 +0000 Subject: [PATCH 006/149] working diag tool prototype Former-commit-id: fefbe96816f6a18ca6f8c8d3794502e6e610650f --- src/keydb-diagnostic-tool.cpp | 1073 +++------------------------------ 1 file changed, 98 insertions(+), 975 deletions(-) diff --git a/src/keydb-diagnostic-tool.cpp b/src/keydb-diagnostic-tool.cpp index 8dea6cdbf..c41995284 100644 --- a/src/keydb-diagnostic-tool.cpp +++ b/src/keydb-diagnostic-tool.cpp @@ -100,7 +100,7 @@ static struct config { char *auth; const char *user; int precision; - int num_threads; + int max_threads; struct benchmarkThread **threads; int cluster_mode; int cluster_node_count; @@ -140,6 +140,7 @@ typedef struct _client { int thread_id; struct clusterNode *cluster_node; int slots_last_update; + redisReply *lastReply; } *client; /* Threads. */ @@ -181,26 +182,11 @@ int g_fInCrash = false; /* Prototypes */ static void writeHandler(aeEventLoop *el, int fd, void *privdata, int mask); -static void createMissingClients(client c); static benchmarkThread *createBenchmarkThread(int index); static void freeBenchmarkThread(benchmarkThread *thread); static void freeBenchmarkThreads(); -static void *execBenchmarkThread(void *ptr); -static clusterNode *createClusterNode(char *ip, int port); -static redisConfig *getRedisConfig(const char *ip, int port, - const char *hostsocket); static redisContext *getRedisContext(const char *ip, int port, const char *hostsocket); -static void freeRedisConfig(redisConfig *cfg); -static int fetchClusterSlotsConfiguration(client c); -static void updateClusterSlotsConfiguration(); -int showThroughput(struct aeEventLoop *eventLoop, long long id, - void *clientData); - -/* Dict callbacks */ -static uint64_t dictSdsHash(const void *key); -static int dictSdsKeyCompare(void *privdata, const void *key1, - const void *key2); /* Implementation */ static long long ustime(void) { @@ -213,32 +199,6 @@ static long long ustime(void) { return ust; } -static long long mstime(void) { - struct timeval tv; - long long mst; - - gettimeofday(&tv, NULL); - mst = ((long long)tv.tv_sec)*1000; - mst += tv.tv_usec/1000; - return mst; -} - -static uint64_t dictSdsHash(const void *key) { - return dictGenHashFunction((unsigned char*)key, sdslen((char*)key)); -} - -static int dictSdsKeyCompare(void *privdata, const void *key1, - const void *key2) -{ - int l1,l2; - DICT_NOTUSED(privdata); - - l1 = sdslen((sds)key1); - l2 = sdslen((sds)key2); - if (l1 != l2) return 0; - return memcmp(key1, key2, l1) == 0; -} - /* _serverAssert is needed by dict */ extern "C" void _serverAssert(const char *estr, const char *file, int line) { fprintf(stderr, "=== ASSERTION FAILED ==="); @@ -292,58 +252,6 @@ cleanup: return NULL; } -static redisConfig *getRedisConfig(const char *ip, int port, - const char *hostsocket) -{ - redisConfig *cfg = (redisConfig*)zcalloc(sizeof(*cfg)); - if (!cfg) return NULL; - redisContext *c = NULL; - redisReply *reply = NULL, *sub_reply = NULL; - c = getRedisContext(ip, port, hostsocket); - if (c == NULL) { - freeRedisConfig(cfg); - return NULL; - } - redisAppendCommand(c, "CONFIG GET %s", "save"); - redisAppendCommand(c, "CONFIG GET %s", "appendonly"); - - void *r; - for (int i=0; i < 2; i++) { - int res = redisGetReply(c, &r); - if (reply) freeReplyObject(reply); - reply = res == REDIS_OK ? ((redisReply *) r) : NULL; - if (res != REDIS_OK || !r) goto fail; - if (reply->type == REDIS_REPLY_ERROR) { - fprintf(stderr, "ERROR: %s\n", reply->str); - goto fail; - } - if (reply->type != REDIS_REPLY_ARRAY || reply->elements < 2) goto fail; - sub_reply = reply->element[1]; - const char *value = sub_reply->str; - if (!value) value = ""; - switch (i) { - case 0: cfg->save = sdsnew(value); break; - case 1: cfg->appendonly = sdsnew(value); break; - } - } - freeReplyObject(reply); - redisFree(c); - return cfg; -fail: - fprintf(stderr, "ERROR: failed to fetch CONFIG from "); - if (hostsocket == NULL) fprintf(stderr, "%s:%d\n", ip, port); - else fprintf(stderr, "%s\n", hostsocket); - freeReplyObject(reply); - redisFree(c); - freeRedisConfig(cfg); - return NULL; -} -static void freeRedisConfig(redisConfig *cfg) { - if (cfg->save) sdsfree(cfg->save); - if (cfg->appendonly) sdsfree(cfg->appendonly); - zfree(cfg); -} - static void freeClient(client c) { aeEventLoop *el = CLIENT_GET_EVENTLOOP(c); listNode *ln; @@ -361,12 +269,12 @@ static void freeClient(client c) { zfree(c->randptr); zfree(c->stagptr); zfree(c); - if (config.num_threads) pthread_mutex_lock(&(config.liveclients_mutex)); + if (config.max_threads) pthread_mutex_lock(&(config.liveclients_mutex)); config.liveclients--; ln = listSearchKey(config.clients,c); assert(ln != NULL); listDelNode(config.clients,ln); - if (config.num_threads) pthread_mutex_unlock(&(config.liveclients_mutex)); + if (config.max_threads) pthread_mutex_unlock(&(config.liveclients_mutex)); } static void freeAllClients(void) { @@ -406,53 +314,6 @@ static void randomizeClientKey(client c) { } } -static void setClusterKeyHashTag(client c) { - assert(c->thread_id >= 0); - clusterNode *node = c->cluster_node; - assert(node); - assert(node->current_slot_index < node->slots_count); - int is_updating_slots = 0; - atomicGet(config.is_updating_slots, is_updating_slots); - /* If updateClusterSlotsConfiguration is updating the slots array, - * call updateClusterSlotsConfiguration is order to block the thread - * since the mutex is locked. When the slots will be updated by the - * thread that's actually performing the update, the execution of - * updateClusterSlotsConfiguration won't actually do anything, since - * the updated_slots_count array will be already NULL. */ - if (is_updating_slots) updateClusterSlotsConfiguration(); - int slot = node->slots[node->current_slot_index]; - const char *tag = crc16_slot_table[slot]; - int taglen = strlen(tag); - size_t i; - for (i = 0; i < c->staglen; i++) { - char *p = c->stagptr[i] + 1; - p[0] = tag[0]; - p[1] = (taglen >= 2 ? tag[1] : '}'); - p[2] = (taglen == 3 ? tag[2] : '}'); - } -} - -static void clientDone(client c) { - int requests_finished = 0; - atomicGet(config.requests_finished, requests_finished); - if (requests_finished >= config.requests) { - freeClient(c); - if (!config.num_threads && config.el) aeStop(config.el); - return; - } - if (config.keepalive) { - resetClient(c); - } else { - if (config.num_threads) pthread_mutex_lock(&(config.liveclients_mutex)); - config.liveclients--; - createMissingClients(c); - config.liveclients++; - if (config.num_threads) - pthread_mutex_unlock(&(config.liveclients_mutex)); - freeClient(c); - } -} - static void readHandler(aeEventLoop *el, int fd, void *privdata, int mask) { client c = (client)privdata; void *reply = NULL; @@ -497,29 +358,6 @@ static void readHandler(aeEventLoop *el, int fd, void *privdata, int mask) { } } - /* Try to update slots configuration if reply error is - * MOVED/ASK/CLUSTERDOWN and the key(s) used by the command - * contain(s) the slot hash tag. */ - if (is_err && c->cluster_node && c->staglen) { - int fetch_slots = 0, do_wait = 0; - if (!strncmp(r->str,"MOVED",5) || !strncmp(r->str,"ASK",3)) - fetch_slots = 1; - else if (!strncmp(r->str,"CLUSTERDOWN",11)) { - /* Usually the cluster is able to recover itself after - * a CLUSTERDOWN error, so try to sleep one second - * before requesting the new configuration. */ - fetch_slots = 1; - do_wait = 1; - printf("Error from server %s:%d: %s\n", - c->cluster_node->ip, - c->cluster_node->port, - r->str); - } - if (do_wait) sleep(1); - if (fetch_slots && !fetchClusterSlotsConfiguration(c)) - exit(1); - } - freeReplyObject(reply); /* This is an OK for prefix commands such as auth and select.*/ if (c->prefix_pending > 0) { @@ -543,7 +381,7 @@ static void readHandler(aeEventLoop *el, int fd, void *privdata, int mask) { config.latency[requests_finished] = c->latency; c->pending--; if (c->pending == 0) { - clientDone(c); + resetClient(c); break; } } else { @@ -571,7 +409,6 @@ static void writeHandler(aeEventLoop *el, int fd, void *privdata, int mask) { /* Really initialize: randomize keys and set start time. */ if (config.randomkeys) randomizeClientKey(c); - if (config.cluster_mode && c->staglen > 0) setClusterKeyHashTag(c); atomicGet(config.slots_last_update, c->slots_last_update); c->start = ustime(); c->latency = -1; @@ -628,7 +465,7 @@ static client createClient(const char *cmd, size_t len, client from, int thread_ port = config.hostport; } else { int node_idx = 0; - if (config.num_threads < config.cluster_node_count) + if (config.max_threads < config.cluster_node_count) node_idx = config.liveclients % config.cluster_node_count; else node_idx = thread_id % config.cluster_node_count; @@ -783,149 +620,16 @@ static client createClient(const char *cmd, size_t len, client from, int thread_ return c; } -static void createMissingClients(client c) { - int n = 0; - while(config.liveclients < config.numclients) { - int thread_id = -1; - if (config.num_threads) - thread_id = config.liveclients % config.num_threads; - createClient(NULL,0,c,thread_id); - - /* Listen backlog is quite limited on most systems */ - if (++n > 64) { - usleep(50000); - n = 0; - } - } -} - -static int compareLatency(const void *a, const void *b) { - return (*(long long*)a)-(*(long long*)b); -} - -static int ipow(int base, int exp) { - int result = 1; - while (exp) { - if (exp & 1) result *= base; - exp /= 2; - base *= base; - } - return result; -} - -static void showLatencyReport(void) { - int i, curlat = 0; - int usbetweenlat = ipow(10, MAX_LATENCY_PRECISION-config.precision); - float perc, reqpersec; - - reqpersec = (float)config.requests_finished/((float)config.totlatency/1000); - if (!config.quiet && !config.csv) { - printf("====== %s ======\n", config.title); - printf(" %d requests completed in %.2f seconds\n", config.requests_finished, - (float)config.totlatency/1000); - printf(" %d parallel clients\n", config.numclients); - printf(" %d bytes payload\n", config.datasize); - printf(" keep alive: %d\n", config.keepalive); - if (config.cluster_mode) { - printf(" cluster mode: yes (%d masters)\n", - config.cluster_node_count); - int m ; - for (m = 0; m < config.cluster_node_count; m++) { - clusterNode *node = config.cluster_nodes[m]; - redisConfig *cfg = node->redis_config; - if (cfg == NULL) continue; - printf(" node [%d] configuration:\n",m ); - printf(" save: %s\n", - sdslen(cfg->save) ? cfg->save : "NONE"); - printf(" appendonly: %s\n", cfg->appendonly); - } - } else { - if (config.redis_config) { - printf(" host configuration \"save\": %s\n", - config.redis_config->save); - printf(" host configuration \"appendonly\": %s\n", - config.redis_config->appendonly); - } - } - printf(" multi-thread: %s\n", (config.num_threads ? "yes" : "no")); - if (config.num_threads) - printf(" threads: %d\n", config.num_threads); - - printf("\n"); - - qsort(config.latency,config.requests,sizeof(long long),compareLatency); - for (i = 0; i < config.requests; i++) { - if (config.latency[i]/usbetweenlat != curlat || - i == (config.requests-1)) - { - /* After the 2 milliseconds latency to have percentages split - * by decimals will just add a lot of noise to the output. */ - if (config.latency[i] >= 2000) { - config.precision = 0; - usbetweenlat = ipow(10, - MAX_LATENCY_PRECISION-config.precision); - } - - curlat = config.latency[i]/usbetweenlat; - perc = ((float)(i+1)*100)/config.requests; - printf("%.2f%% <= %.*f milliseconds\n", perc, config.precision, - curlat/pow(10.0, config.precision)); - } - } - printf("%.2f requests per second\n\n", reqpersec); - } else if (config.csv) { - printf("\"%s\",\"%.2f\"\n", config.title, reqpersec); - } else { - printf("%s: %.2f requests per second\n", config.title, reqpersec); - } -} - static void initBenchmarkThreads() { int i; if (config.threads) freeBenchmarkThreads(); - config.threads = (benchmarkThread**)zmalloc(config.num_threads * sizeof(benchmarkThread*), MALLOC_LOCAL); - for (i = 0; i < config.num_threads; i++) { + config.threads = (benchmarkThread**)zmalloc(config.max_threads * sizeof(benchmarkThread*), MALLOC_LOCAL); + for (i = 0; i < config.max_threads; i++) { benchmarkThread *thread = createBenchmarkThread(i); config.threads[i] = thread; } } -static void startBenchmarkThreads() { - int i; - for (i = 0; i < config.num_threads; i++) { - benchmarkThread *t = config.threads[i]; - if (pthread_create(&(t->thread), NULL, execBenchmarkThread, t)){ - fprintf(stderr, "FATAL: Failed to start thread %d.\n", i); - exit(1); - } - } - for (i = 0; i < config.num_threads; i++) - pthread_join(config.threads[i]->thread, NULL); -} - -static void benchmark(const char *title, const char *cmd, int len) { - client c; - - config.title = title; - config.requests_issued = 0; - config.requests_finished = 0; - - if (config.num_threads) initBenchmarkThreads(); - - int thread_id = config.num_threads > 0 ? 0 : -1; - c = createClient(cmd,len,NULL,thread_id); - createMissingClients(c); - - config.start = mstime(); - if (!config.num_threads) aeMain(config.el); - else startBenchmarkThreads(); - config.totlatency = mstime()-config.start; - - showLatencyReport(); - freeAllClients(); - if (config.threads) freeBenchmarkThreads(); -} - /* Thread functions. */ static benchmarkThread *createBenchmarkThread(int index) { @@ -933,7 +637,6 @@ static benchmarkThread *createBenchmarkThread(int index) { if (thread == NULL) return NULL; thread->index = index; thread->el = aeCreateEventLoop(1024*10); - aeCreateTimeEvent(thread->el,1,showThroughput,NULL,NULL); return thread; } @@ -944,7 +647,7 @@ static void freeBenchmarkThread(benchmarkThread *thread) { static void freeBenchmarkThreads() { int i = 0; - for (; i < config.num_threads; i++) { + for (; i < config.max_threads; i++) { benchmarkThread *thread = config.threads[i]; if (thread) freeBenchmarkThread(thread); } @@ -958,360 +661,40 @@ static void *execBenchmarkThread(void *ptr) { return NULL; } -/* Cluster helper functions. */ - -static clusterNode *createClusterNode(char *ip, int port) { - clusterNode *node = (clusterNode*)zmalloc(sizeof(*node), MALLOC_LOCAL); - if (!node) return NULL; - node->ip = ip; - node->port = port; - node->name = NULL; - node->flags = 0; - node->replicate = NULL; - node->replicas_count = 0; - node->slots = (int*)zmalloc(CLUSTER_SLOTS * sizeof(int), MALLOC_LOCAL); - node->slots_count = 0; - node->current_slot_index = 0; - node->updated_slots = NULL; - node->updated_slots_count = 0; - node->migrating = NULL; - node->importing = NULL; - node->migrating_count = 0; - node->importing_count = 0; - node->redis_config = NULL; - return node; -} - -static void freeClusterNode(clusterNode *node) { - int i; - if (node->name) sdsfree(node->name); - if (node->replicate) sdsfree(node->replicate); - if (node->migrating != NULL) { - for (i = 0; i < node->migrating_count; i++) sdsfree(node->migrating[i]); - zfree(node->migrating); - } - if (node->importing != NULL) { - for (i = 0; i < node->importing_count; i++) sdsfree(node->importing[i]); - zfree(node->importing); - } - /* If the node is not the reference node, that uses the address from - * config.hostip and config.hostport, then the node ip has been - * allocated by fetchClusterConfiguration, so it must be freed. */ - if (node->ip && strcmp(node->ip, config.hostip) != 0) sdsfree(node->ip); - if (node->redis_config != NULL) freeRedisConfig(node->redis_config); - zfree(node->slots); - zfree(node); -} - -static void freeClusterNodes() { - int i = 0; - for (; i < config.cluster_node_count; i++) { - clusterNode *n = config.cluster_nodes[i]; - if (n) freeClusterNode(n); - } - zfree(config.cluster_nodes); +void initConfigDefaults() { + config.numclients = 50; + config.requests = 100000; + config.liveclients = 0; + config.el = aeCreateEventLoop(1024*10); + config.keepalive = 1; + config.datasize = 3; + config.pipeline = 1; + config.showerrors = 0; + config.randomkeys = 0; + config.randomkeys_keyspacelen = 0; + config.quiet = 0; + config.csv = 0; + config.loop = 0; + config.idlemode = 0; + config.latency = NULL; + config.clients = listCreate(); + config.hostip = "127.0.0.1"; + config.hostport = 6379; + config.hostsocket = NULL; + config.tests = NULL; + config.dbnum = 0; + config.auth = NULL; + config.precision = 1; + config.max_threads = MAX_THREADS; + config.threads = NULL; + config.cluster_mode = 0; + config.cluster_node_count = 0; config.cluster_nodes = NULL; -} - -static clusterNode **addClusterNode(clusterNode *node) { - int count = config.cluster_node_count + 1; - config.cluster_nodes = (clusterNode**)zrealloc(config.cluster_nodes, - count * sizeof(*node), MALLOC_LOCAL); - if (!config.cluster_nodes) return NULL; - config.cluster_nodes[config.cluster_node_count++] = node; - return config.cluster_nodes; -} - -static int fetchClusterConfiguration() { - int success = 1; - redisContext *ctx = NULL; - redisReply *reply = NULL; - char *lines = NULL; - char *line = NULL; - char *p = NULL; - ctx = getRedisContext(config.hostip, config.hostport, config.hostsocket); - if (ctx == NULL) { - exit(1); - } - clusterNode *firstNode = createClusterNode((char *) config.hostip, - config.hostport); - if (!firstNode) {success = 0; goto cleanup;} - reply = (redisReply*)redisCommand(ctx, "CLUSTER NODES"); - success = (reply != NULL); - if (!success) goto cleanup; - success = (reply->type != REDIS_REPLY_ERROR); - if (!success) { - if (config.hostsocket == NULL) { - fprintf(stderr, "Cluster node %s:%d replied with error:\n%s\n", - config.hostip, config.hostport, reply->str); - } else { - fprintf(stderr, "Cluster node %s replied with error:\n%s\n", - config.hostsocket, reply->str); - } - goto cleanup; - } - lines = reply->str; - while ((p = strstr(lines, "\n")) != NULL) { - *p = '\0'; - line = lines; - lines = p + 1; - char *name = NULL, *addr = NULL, *flags = NULL, *master_id = NULL; - int i = 0; - while ((p = strchr(line, ' ')) != NULL) { - *p = '\0'; - char *token = line; - line = p + 1; - switch(i++){ - case 0: name = token; break; - case 1: addr = token; break; - case 2: flags = token; break; - case 3: master_id = token; break; - } - if (i == 8) break; // Slots - } - if (!flags) { - fprintf(stderr, "Invalid CLUSTER NODES reply: missing flags.\n"); - success = 0; - goto cleanup; - } - int myself = (strstr(flags, "myself") != NULL); - int is_replica = (strstr(flags, "slave") != NULL || - (master_id != NULL && master_id[0] != '-')); - if (is_replica) continue; - if (addr == NULL) { - fprintf(stderr, "Invalid CLUSTER NODES reply: missing addr.\n"); - success = 0; - goto cleanup; - } - clusterNode *node = NULL; - char *ip = NULL; - int port = 0; - char *paddr = strchr(addr, ':'); - if (paddr != NULL) { - *paddr = '\0'; - ip = addr; - addr = paddr + 1; - /* If internal bus is specified, then just drop it. */ - if ((paddr = strchr(addr, '@')) != NULL) *paddr = '\0'; - port = atoi(addr); - } - if (myself) { - node = firstNode; - if (node->ip == NULL && ip != NULL) { - node->ip = ip; - node->port = port; - } - } else { - node = createClusterNode(sdsnew(ip), port); - } - if (node == NULL) { - success = 0; - goto cleanup; - } - if (name != NULL) node->name = sdsnew(name); - if (i == 8) { - int remaining = strlen(line); - while (remaining > 0) { - p = strchr(line, ' '); - if (p == NULL) p = line + remaining; - remaining -= (p - line); - - char *slotsdef = line; - *p = '\0'; - if (remaining) { - line = p + 1; - remaining--; - } else line = p; - char *dash = NULL; - if (slotsdef[0] == '[') { - slotsdef++; - if ((p = strstr(slotsdef, "->-"))) { // Migrating - *p = '\0'; - p += 3; - char *closing_bracket = strchr(p, ']'); - if (closing_bracket) *closing_bracket = '\0'; - sds slot = sdsnew(slotsdef); - sds dst = sdsnew(p); - node->migrating_count += 2; - node->migrating = - (char**)zrealloc(node->migrating, - (node->migrating_count * sizeof(sds)), MALLOC_LOCAL); - node->migrating[node->migrating_count - 2] = - slot; - node->migrating[node->migrating_count - 1] = - dst; - } else if ((p = strstr(slotsdef, "-<-"))) {//Importing - *p = '\0'; - p += 3; - char *closing_bracket = strchr(p, ']'); - if (closing_bracket) *closing_bracket = '\0'; - sds slot = sdsnew(slotsdef); - sds src = sdsnew(p); - node->importing_count += 2; - node->importing = (char**)zrealloc(node->importing, - (node->importing_count * sizeof(sds)), MALLOC_LOCAL); - node->importing[node->importing_count - 2] = - slot; - node->importing[node->importing_count - 1] = - src; - } - } else if ((dash = strchr(slotsdef, '-')) != NULL) { - p = dash; - int start, stop; - *p = '\0'; - start = atoi(slotsdef); - stop = atoi(p + 1); - while (start <= stop) { - int slot = start++; - node->slots[node->slots_count++] = slot; - } - } else if (p > slotsdef) { - int slot = atoi(slotsdef); - node->slots[node->slots_count++] = slot; - } - } - } - if (node->slots_count == 0) { - printf("WARNING: master node %s:%d has no slots, skipping...\n", - node->ip, node->port); - continue; - } - if (!addClusterNode(node)) { - success = 0; - goto cleanup; - } - } -cleanup: - if (ctx) redisFree(ctx); - if (!success) { - if (config.cluster_nodes) freeClusterNodes(); - } - if (reply) freeReplyObject(reply); - return success; -} - -/* Request the current cluster slots configuration by calling CLUSTER SLOTS - * and atomically update the slots after a successful reply. */ -static int fetchClusterSlotsConfiguration(client c) { - UNUSED(c); - int success = 1, is_fetching_slots = 0, last_update = 0; - size_t i; - atomicGet(config.slots_last_update, last_update); - if (c->slots_last_update < last_update) { - c->slots_last_update = last_update; - return -1; - } - redisReply *reply = NULL; - atomicGetIncr(config.is_fetching_slots, is_fetching_slots, 1); - if (is_fetching_slots) return -1; //TODO: use other codes || errno ? - atomicSet(config.is_fetching_slots, 1); - if (config.showerrors) - printf("Cluster slots configuration changed, fetching new one...\n"); - const char *errmsg = "Failed to update cluster slots configuration"; - static dictType dtype = { - dictSdsHash, /* hash function */ - NULL, /* key dup */ - NULL, /* val dup */ - dictSdsKeyCompare, /* key compare */ - NULL, /* key destructor */ - NULL /* val destructor */ - }; - /* printf("[%d] fetchClusterSlotsConfiguration\n", c->thread_id); */ - dict *masters = dictCreate(&dtype, NULL); - redisContext *ctx = NULL; - for (i = 0; i < (size_t) config.cluster_node_count; i++) { - clusterNode *node = config.cluster_nodes[i]; - assert(node->ip != NULL); - assert(node->name != NULL); - assert(node->port); - /* Use first node as entry point to connect to. */ - if (ctx == NULL) { - ctx = getRedisContext(node->ip, node->port, NULL); - if (!ctx) { - success = 0; - goto cleanup; - } - } - if (node->updated_slots != NULL) - zfree(node->updated_slots); - node->updated_slots = NULL; - node->updated_slots_count = 0; - dictReplace(masters, node->name, node) ; - } - reply = (redisReply*)redisCommand(ctx, "CLUSTER SLOTS"); - if (reply == NULL || reply->type == REDIS_REPLY_ERROR) { - success = 0; - if (reply) - fprintf(stderr,"%s\nCLUSTER SLOTS ERROR: %s\n",errmsg,reply->str); - goto cleanup; - } - assert(reply->type == REDIS_REPLY_ARRAY); - for (i = 0; i < reply->elements; i++) { - redisReply *r = reply->element[i]; - assert(r->type == REDIS_REPLY_ARRAY); - assert(r->elements >= 3); - int from, to, slot; - from = r->element[0]->integer; - to = r->element[1]->integer; - redisReply *nr = r->element[2]; - assert(nr->type == REDIS_REPLY_ARRAY && nr->elements >= 3); - assert(nr->element[2]->str != NULL); - sds name = sdsnew(nr->element[2]->str); - dictEntry *entry = dictFind(masters, name); - if (entry == NULL) { - success = 0; - fprintf(stderr, "%s: could not find node with ID %s in current " - "configuration.\n", errmsg, name); - if (name) sdsfree(name); - goto cleanup; - } - sdsfree(name); - clusterNode *node = (clusterNode*)dictGetVal(entry); - if (node->updated_slots == NULL) - node->updated_slots = (int*)zcalloc(CLUSTER_SLOTS * sizeof(int), MALLOC_LOCAL); - for (slot = from; slot <= to; slot++) - node->updated_slots[node->updated_slots_count++] = slot; - } - updateClusterSlotsConfiguration(); -cleanup: - freeReplyObject(reply); - redisFree(ctx); - dictRelease(masters); - atomicSet(config.is_fetching_slots, 0); - return success; -} - -/* Atomically update the new slots configuration. */ -static void updateClusterSlotsConfiguration() { - pthread_mutex_lock(&config.is_updating_slots_mutex); - atomicSet(config.is_updating_slots, 1); - int i; - for (i = 0; i < config.cluster_node_count; i++) { - clusterNode *node = config.cluster_nodes[i]; - if (node->updated_slots != NULL) { - int *oldslots = node->slots; - node->slots = node->updated_slots; - node->slots_count = node->updated_slots_count; - node->current_slot_index = 0; - node->updated_slots = NULL; - node->updated_slots_count = 0; - zfree(oldslots); - } - } - atomicSet(config.is_updating_slots, 0); - atomicIncr(config.slots_last_update, 1); - pthread_mutex_unlock(&config.is_updating_slots_mutex); -} - -/* Generate random data for redis benchmark. See #7196. */ -static void genBenchmarkRandomData(char *data, int count) { - static uint32_t state = 1234; - int i = 0; - - while (count--) { - state = (state*1103515245+12345); - data[i++] = '0'+((state>>16)&63); - } + config.redis_config = NULL; + config.is_fetching_slots = 0; + config.is_updating_slots = 0; + config.slots_last_update = 0; + config.enable_tracking = 0; } /* Returns number of consumed options. */ @@ -1399,12 +782,12 @@ int parseOptions(int argc, const char **argv) { if (config.precision > MAX_LATENCY_PRECISION) config.precision = MAX_LATENCY_PRECISION; } else if (!strcmp(argv[i],"--threads")) { if (lastarg) goto invalid; - config.num_threads = atoi(argv[++i]); - if (config.num_threads > MAX_THREADS) { + config.max_threads = atoi(argv[++i]); + if (config.max_threads > MAX_THREADS) { printf("WARNING: too many threads, limiting threads to %d.\n", MAX_THREADS); - config.num_threads = MAX_THREADS; - } else if (config.num_threads < 0) config.num_threads = 0; + config.max_threads = MAX_THREADS; + } else if (config.max_threads < 0) config.max_threads = 0; } else if (!strcmp(argv[i],"--cluster")) { config.cluster_mode = 1; } else if (!strcmp(argv[i],"--enable-tracking")) { @@ -1478,98 +861,26 @@ usage: exit(exit_status); } -int showThroughput(struct aeEventLoop *eventLoop, long long id, void *clientData) { - UNUSED(eventLoop); - UNUSED(id); - UNUSED(clientData); - int liveclients = 0; - int requests_finished = 0; - atomicGet(config.liveclients, liveclients); - atomicGet(config.requests_finished, requests_finished); - - if (liveclients == 0 && requests_finished != config.requests) { - fprintf(stderr,"All clients disconnected... aborting.\n"); - exit(1); - } - if (config.num_threads && requests_finished >= config.requests) { - aeStop(eventLoop); - return AE_NOMORE; - } - if (config.csv) return 250; - if (config.idlemode == 1) { - printf("clients: %d\r", config.liveclients); - fflush(stdout); - return 250; - } - float dt = (float)(mstime()-config.start)/1000.0; - float rps = (float)requests_finished/dt; - printf("%s: %.2f\r", config.title, rps); - fflush(stdout); - return 250; /* every 250ms */ -} - -/* Return true if the named test was selected using the -t command line - * switch, or if all the tests are selected (no -t passed by user). */ -int test_is_selected(const char *name) { - char buf[256]; - int l = strlen(name); - - if (config.tests == NULL) return 1; - buf[0] = ','; - memcpy(buf+1,name,l); - buf[l+1] = ','; - buf[l+2] = '\0'; - return strstr(config.tests,buf) != NULL; +int extractPropertyFromInfo(const char *info, const char *key, double &val) { + char *line = strstr((char*)info, key); + if (line == nullptr) return 1; + line += strlen(key) + 1; // Skip past key name and following colon + char *newline = strchr(line, '\n'); + *newline = 0; // Terminate string after relevant line + val = strtod(line, nullptr); + return 0; } int main(int argc, const char **argv) { int i; - char *data, *cmd; - const char *tag; - int len; - - client c; - + storage_init(NULL, 0); srandom(time(NULL)); signal(SIGHUP, SIG_IGN); signal(SIGPIPE, SIG_IGN); - config.numclients = 50; - config.requests = 100000; - config.liveclients = 0; - config.el = aeCreateEventLoop(1024*10); - aeCreateTimeEvent(config.el,1,showThroughput,NULL,NULL); - config.keepalive = 1; - config.datasize = 3; - config.pipeline = 1; - config.showerrors = 0; - config.randomkeys = 0; - config.randomkeys_keyspacelen = 0; - config.quiet = 0; - config.csv = 0; - config.loop = 0; - config.idlemode = 0; - config.latency = NULL; - config.clients = listCreate(); - config.hostip = "127.0.0.1"; - config.hostport = 6379; - config.hostsocket = NULL; - config.tests = NULL; - config.dbnum = 0; - config.auth = NULL; - config.precision = 1; - config.num_threads = 0; - config.threads = NULL; - config.cluster_mode = 0; - config.cluster_node_count = 0; - config.cluster_nodes = NULL; - config.redis_config = NULL; - config.is_fetching_slots = 0; - config.is_updating_slots = 0; - config.slots_last_update = 0; - config.enable_tracking = 0; + initConfigDefaults(); i = parseOptions(argc,argv); argc -= i; @@ -1577,58 +888,7 @@ int main(int argc, const char **argv) { config.latency = (long long*)zmalloc(sizeof(long long)*config.requests, MALLOC_LOCAL); - tag = ""; - - if (config.cluster_mode) { - // We only include the slot placeholder {tag} if cluster mode is enabled - tag = ":{tag}"; - - /* Fetch cluster configuration. */ - if (!fetchClusterConfiguration() || !config.cluster_nodes) { - if (!config.hostsocket) { - fprintf(stderr, "Failed to fetch cluster configuration from " - "%s:%d\n", config.hostip, config.hostport); - } else { - fprintf(stderr, "Failed to fetch cluster configuration from " - "%s\n", config.hostsocket); - } - exit(1); - } - if (config.cluster_node_count <= 1) { - fprintf(stderr, "Invalid cluster: %d node(s).\n", - config.cluster_node_count); - exit(1); - } - printf("Cluster has %d master nodes:\n\n", config.cluster_node_count); - int i = 0; - for (; i < config.cluster_node_count; i++) { - clusterNode *node = config.cluster_nodes[i]; - if (!node) { - fprintf(stderr, "Invalid cluster node #%d\n", i); - exit(1); - } - printf("Master %d: ", i); - if (node->name) printf("%s ", node->name); - printf("%s:%d\n", node->ip, node->port); - node->redis_config = getRedisConfig(node->ip, node->port, NULL); - if (node->redis_config == NULL) { - fprintf(stderr, "WARN: could not fetch node CONFIG %s:%d\n", - node->ip, node->port); - } - } - printf("\n"); - /* Automatically set thread number to node count if not specified - * by the user. */ - if (config.num_threads == 0) - config.num_threads = config.cluster_node_count; - } else { - config.redis_config = - getRedisConfig(config.hostip, config.hostport, config.hostsocket); - if (config.redis_config == NULL) - fprintf(stderr, "WARN: could not fetch server CONFIG\n"); - } - - if (config.num_threads > 0) { + if (config.max_threads > 0) { int err = 0; err |= pthread_mutex_init(&(config.requests_issued_mutex), NULL); err |= pthread_mutex_init(&(config.requests_finished_mutex), NULL); @@ -1644,187 +904,50 @@ int main(int argc, const char **argv) { } } - if (config.keepalive == 0) { - printf("WARNING: keepalive disabled, you probably need 'echo 1 > /proc/sys/net/ipv4/tcp_tw_reuse' for Linux and 'sudo sysctl -w net.inet.tcp.msl=1000' for Mac OS X in order to use a lot of clients/requests\n"); + const char *set_value = "abcdefghijklmnopqrstuvwxyz"; + int threads_used = 0; + char command[63]; + + initBenchmarkThreads(); + redisContext *ctx = getRedisContext(config.hostip, config.hostport, config.hostsocket); + double cpu_usage; + + while (threads_used < config.max_threads) { + printf("Creating clients for thread %d...\n", threads_used); + for (int i = 0; i < config.numclients; i++) { + sprintf(command, "SET %d %s\r\n", threads_used * config.numclients + i, set_value); + createClient(command, strlen(command), NULL,threads_used); + } + + printf("Starting thread %d\n", threads_used); + + benchmarkThread *t = config.threads[threads_used]; + if (pthread_create(&(t->thread), NULL, execBenchmarkThread, t)){ + fprintf(stderr, "FATAL: Failed to start thread %d.\n", threads_used); + exit(1); + } + threads_used++; + + sleep(1); + + redisReply *reply = (redisReply*)redisCommand(ctx, "INFO"); + if (reply->type != REDIS_REPLY_STRING) { + freeReplyObject(reply); + printf("Error executing INFO command. Exiting.\r\n"); + break; + } + if (extractPropertyFromInfo(reply->str, "used_cpu_sys", cpu_usage)) { + printf("Error reading CPU usage from INFO command. Exiting.\r\n"); + break; + } + printf("CPU Usage: %f\r\n", cpu_usage); + freeReplyObject(reply); } - if (config.idlemode) { - printf("Creating %d idle connections and waiting forever (Ctrl+C when done)\n", config.numclients); - int thread_id = -1, use_threads = (config.num_threads > 0); - if (use_threads) { - thread_id = 0; - initBenchmarkThreads(); - } - c = createClient("",0,NULL,thread_id); /* will never receive a reply */ - createMissingClients(c); - if (use_threads) startBenchmarkThreads(); - else aeMain(config.el); - /* and will wait for every */ - } + printf("Done.\n"); - /* Run benchmark with command in the remainder of the arguments. */ - if (argc) { - sds title = sdsnew(argv[0]); - for (i = 1; i < argc; i++) { - title = sdscatlen(title, " ", 1); - title = sdscatlen(title, (char*)argv[i], strlen(argv[i])); - } - - do { - len = redisFormatCommandArgv(&cmd,argc,argv,NULL); - benchmark(title,cmd,len); - free(cmd); - } while(config.loop); - - if (config.redis_config != NULL) freeRedisConfig(config.redis_config); - return 0; - } - - /* Run default benchmark suite. */ - data = (char*)zmalloc(config.datasize+1, MALLOC_LOCAL); - do { - genBenchmarkRandomData(data, config.datasize); - data[config.datasize] = '\0'; - - if (test_is_selected("ping_inline") || test_is_selected("ping")) - benchmark("PING_INLINE","PING\r\n",6); - - if (test_is_selected("ping_mbulk") || test_is_selected("ping")) { - len = redisFormatCommand(&cmd,"PING"); - benchmark("PING_BULK",cmd,len); - free(cmd); - } - - if (test_is_selected("set")) { - len = redisFormatCommand(&cmd,"SET key%s:__rand_int__ %s",tag,data); - benchmark("SET",cmd,len); - free(cmd); - } - - if (test_is_selected("get")) { - len = redisFormatCommand(&cmd,"GET key%s:__rand_int__",tag); - benchmark("GET",cmd,len); - free(cmd); - } - - if (test_is_selected("incr")) { - len = redisFormatCommand(&cmd,"INCR counter%s:__rand_int__",tag); - benchmark("INCR",cmd,len); - free(cmd); - } - - if (test_is_selected("lpush")) { - len = redisFormatCommand(&cmd,"LPUSH mylist%s %s",tag,data); - benchmark("LPUSH",cmd,len); - free(cmd); - } - - if (test_is_selected("rpush")) { - len = redisFormatCommand(&cmd,"RPUSH mylist%s %s",tag,data); - benchmark("RPUSH",cmd,len); - free(cmd); - } - - if (test_is_selected("lpop")) { - len = redisFormatCommand(&cmd,"LPOP mylist%s",tag); - benchmark("LPOP",cmd,len); - free(cmd); - } - - if (test_is_selected("rpop")) { - len = redisFormatCommand(&cmd,"RPOP mylist%s",tag); - benchmark("RPOP",cmd,len); - free(cmd); - } - - if (test_is_selected("sadd")) { - len = redisFormatCommand(&cmd, - "SADD myset%s element:__rand_int__",tag); - benchmark("SADD",cmd,len); - free(cmd); - } - - if (test_is_selected("hset")) { - len = redisFormatCommand(&cmd, - "HSET myhash%s element:__rand_int__ %s",tag,data); - benchmark("HSET",cmd,len); - free(cmd); - } - - if (test_is_selected("spop")) { - len = redisFormatCommand(&cmd,"SPOP myset%s",tag); - benchmark("SPOP",cmd,len); - free(cmd); - } - - if (test_is_selected("zadd")) { - const char *score = "0"; - if (config.randomkeys) score = "__rand_int__"; - len = redisFormatCommand(&cmd, - "ZADD myzset%s %s element:__rand_int__",tag,score); - benchmark("ZADD",cmd,len); - free(cmd); - } - - if (test_is_selected("zpopmin")) { - len = redisFormatCommand(&cmd,"ZPOPMIN myzset%s",tag); - benchmark("ZPOPMIN",cmd,len); - free(cmd); - } - - if (test_is_selected("lrange") || - test_is_selected("lrange_100") || - test_is_selected("lrange_300") || - test_is_selected("lrange_500") || - test_is_selected("lrange_600")) - { - len = redisFormatCommand(&cmd,"LPUSH mylist%s %s",tag,data); - benchmark("LPUSH (needed to benchmark LRANGE)",cmd,len); - free(cmd); - } - - if (test_is_selected("lrange") || test_is_selected("lrange_100")) { - len = redisFormatCommand(&cmd,"LRANGE mylist%s 0 99",tag); - benchmark("LRANGE_100 (first 100 elements)",cmd,len); - free(cmd); - } - - if (test_is_selected("lrange") || test_is_selected("lrange_300")) { - len = redisFormatCommand(&cmd,"LRANGE mylist%s 0 299",tag); - benchmark("LRANGE_300 (first 300 elements)",cmd,len); - free(cmd); - } - - if (test_is_selected("lrange") || test_is_selected("lrange_500")) { - len = redisFormatCommand(&cmd,"LRANGE mylist%s 0 449",tag); - benchmark("LRANGE_500 (first 450 elements)",cmd,len); - free(cmd); - } - - if (test_is_selected("lrange") || test_is_selected("lrange_600")) { - len = redisFormatCommand(&cmd,"LRANGE mylist%s 0 599",tag); - benchmark("LRANGE_600 (first 600 elements)",cmd,len); - free(cmd); - } - - if (test_is_selected("mset")) { - const char *cmd_argv[21]; - cmd_argv[0] = "MSET"; - sds key_placeholder = sdscatprintf(sdsnew(""),"key%s:__rand_int__",tag); - for (i = 1; i < 21; i += 2) { - cmd_argv[i] = key_placeholder; - cmd_argv[i+1] = data; - } - len = redisFormatCommandArgv(&cmd,21,cmd_argv,NULL); - benchmark("MSET (10 keys)",cmd,len); - free(cmd); - sdsfree(key_placeholder); - } - - if (!config.csv) printf("\n"); - } while(config.loop); - - if (config.redis_config != NULL) freeRedisConfig(config.redis_config); + freeAllClients(); + freeBenchmarkThreads(); return 0; } From 46592a4089b1ec3a2717a255e2dad18b102925c2 Mon Sep 17 00:00:00 2001 From: christianEQ Date: Fri, 11 Jun 2021 16:28:29 +0000 Subject: [PATCH 007/149] don't enforce upper limit for requests issued; tool will run indefinitely Former-commit-id: 239d22ed722357f0973c971b998b21f4f7b3b1da --- src/keydb-diagnostic-tool.cpp | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/src/keydb-diagnostic-tool.cpp b/src/keydb-diagnostic-tool.cpp index c41995284..3dced8523 100644 --- a/src/keydb-diagnostic-tool.cpp +++ b/src/keydb-diagnostic-tool.cpp @@ -399,14 +399,6 @@ static void writeHandler(aeEventLoop *el, int fd, void *privdata, int mask) { /* Initialize request when nothing was written. */ if (c->written == 0) { - /* Enforce upper bound to number of requests. */ - int requests_issued = 0; - atomicGetIncr(config.requests_issued, requests_issued, 1); - if (requests_issued >= config.requests) { - freeClient(c); - return; - } - /* Really initialize: randomize keys and set start time. */ if (config.randomkeys) randomizeClientKey(c); atomicGet(config.slots_last_update, c->slots_last_update); @@ -913,7 +905,7 @@ int main(int argc, const char **argv) { double cpu_usage; while (threads_used < config.max_threads) { - printf("Creating clients for thread %d...\n", threads_used); + printf("Creating %d clients for thread %d...\n", config.numclients, threads_used); for (int i = 0; i < config.numclients; i++) { sprintf(command, "SET %d %s\r\n", threads_used * config.numclients + i, set_value); createClient(command, strlen(command), NULL,threads_used); From 448a1a3879eb719f3ad4f99c396e740bf585373e Mon Sep 17 00:00:00 2001 From: christianEQ Date: Fri, 11 Jun 2021 17:04:33 +0000 Subject: [PATCH 008/149] fixed flag var names in ci Former-commit-id: e632c3db1b6c8b0294075e60533f40812c554b47 --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bce8f20b8..9086bb701 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,7 +12,7 @@ jobs: run: | sudo apt-get update sudo apt-get -y install uuid-dev libcurl4-openssl-dev - make KEYDB_CFLAGS='-Werror' KEYDB_CXXFLAGS='-Werror' BUILD_TLS=yes -j2 + make REDIS_CFLAGS='-Werror' REDIS_CXXFLAGS='-Werror' BUILD_TLS=yes -j2 - name: gen-cert run: ./utils/gen-test-certs.sh - name: test-tls @@ -44,7 +44,7 @@ jobs: steps: - uses: actions/checkout@v2 - name: make - run: make KEYDB_CFLAGS='-Werror' KEYDB_CXXFLAGS='-Werror' -j2 + run: make REDIS_CFLAGS='-Werror' REDIS_CXXFLAGS='-Werror' -j2 build-libc-malloc: runs-on: ubuntu-latest @@ -54,5 +54,5 @@ jobs: run: | sudo apt-get update sudo apt-get -y install uuid-dev libcurl4-openssl-dev - make KEYDB_CFLAGS='-Werror' KEYDB_CXXFLAGS='-Werror' MALLOC=libc -j2 + make REDIS_CFLAGS='-Werror' REDIS_CXXFLAGS='-Werror' MALLOC=libc -j2 From 20b5272f6034ed92c690b244b794e990cdf20f5d Mon Sep 17 00:00:00 2001 From: christianEQ Date: Fri, 11 Jun 2021 18:08:19 +0000 Subject: [PATCH 009/149] removed the question issue template - doesn't seem necessary for us Former-commit-id: ce64f469f404f79ae0028627e7ebfcc47781eeab --- .github/ISSUE_TEMPLATE/question.md | 21 --------------------- 1 file changed, 21 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/question.md diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md deleted file mode 100644 index 6acde0d06..000000000 --- a/.github/ISSUE_TEMPLATE/question.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -name: Question -about: Ask the Redis developers -title: '[QUESTION]' -labels: '' -assignees: '' - ---- - -Please keep in mind that this issue tracker should be used for reporting bugs or proposing improvements to the Redis server. - -Generally, questions about using Redis should be directed to the [community](https://redis.io/community): - -* [the mailing list](https://groups.google.com/forum/#!forum/redis-db) -* [the `redis` tag at StackOverflow](http://stackoverflow.com/questions/tagged/redis) -* [/r/redis subreddit](http://www.reddit.com/r/redis) -* [the irc channel #redis](http://webchat.freenode.net/?channels=redis) on freenode - -It is also possible that your question was already asked here, so please do a quick issues search before submitting. Lastly, if your question is about one of Redis' [clients](https://redis.io/clients), you may to contact your client's developers for help. - -That said, please feel free to replace all this with your question :) \ No newline at end of file From c2655387fca5e4b4469f8aef509b4307b36e018a Mon Sep 17 00:00:00 2001 From: christianEQ Date: Fri, 11 Jun 2021 18:15:50 +0000 Subject: [PATCH 010/149] updated pkg/deb/conf/keydb.conf to use keydb name throughout Former-commit-id: 308cf77437374f01cc40101f2f35768db7a16302 --- pkg/deb/conf/keydb.conf | 315 ++++++++++++++++++++-------------------- 1 file changed, 157 insertions(+), 158 deletions(-) diff --git a/pkg/deb/conf/keydb.conf b/pkg/deb/conf/keydb.conf index 8e1552b9a..b54d1bb36 100644 --- a/pkg/deb/conf/keydb.conf +++ b/pkg/deb/conf/keydb.conf @@ -1,6 +1,6 @@ -# Redis configuration file example. +# KeyDB configuration file example. # -# Note that in order to read the configuration file, Redis must be +# Note that in order to read the configuration file, KeyDB must be # started with the file path as first argument: # # ./keydb-server /path/to/keydb.conf @@ -20,12 +20,12 @@ ################################## INCLUDES ################################### # Include one or more other config files here. This is useful if you -# have a standard template that goes to all Redis servers but also need +# have a standard template that goes to all KeyDB servers but also need # to customize a few per-server settings. Include files can include # other files, so use this wisely. # # Note that option "include" won't be rewritten by command "CONFIG REWRITE" -# from admin or Redis Sentinel. Since Redis always uses the last processed +# from admin or KeyDB Sentinel. Since KeyDB always uses the last processed # line as value of a configuration directive, you'd better put includes # at the beginning of this file to avoid overwriting config change at runtime. # @@ -45,7 +45,7 @@ ################################## NETWORK ##################################### -# By default, if no "bind" configuration directive is specified, Redis listens +# By default, if no "bind" configuration directive is specified, KeyDB listens # for connections from all available network interfaces on the host machine. # It is possible to listen to just one or multiple selected interfaces using # the "bind" configuration directive, followed by one or more IP addresses. @@ -61,11 +61,11 @@ # bind 127.0.0.1 ::1 # listens on loopback IPv4 and IPv6 # bind * -::* # like the default, all available interfaces # -# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# ~~~ WARNING ~~~ If the computer running KeyDB is directly exposed to the # internet, binding to all the interfaces is dangerous and will expose the # instance to everybody on the internet. So by default we uncomment the -# following bind directive, that will force Redis to listen only on the -# IPv4 and IPv6 (if available) loopback interface addresses (this means Redis +# following bind directive, that will force KeyDB to listen only on the +# IPv4 and IPv6 (if available) loopback interface addresses (this means KeyDB # will only be able to accept client connections from the same host that it is # running on). # @@ -75,7 +75,7 @@ bind 127.0.0.1 ::1 # Protected mode is a layer of security protection, in order to avoid that -# Redis instances left open on the internet are accessed and exploited. +# KeyDB instances left open on the internet are accessed and exploited. # # When protected mode is on and if: # @@ -88,13 +88,13 @@ bind 127.0.0.1 ::1 # sockets. # # By default protected mode is enabled. You should disable it only if -# you are sure you want clients from other hosts to connect to Redis +# you are sure you want clients from other hosts to connect to KeyDB # even if no authentication is configured, nor a specific set of interfaces # are explicitly listed using the "bind" directive. protected-mode yes # Accept connections on the specified port, default is 6379 (IANA #815344). -# If port 0 is specified Redis will not listen on a TCP socket. +# If port 0 is specified KeyDB will not listen on a TCP socket. port 6379 # TCP listen() backlog. @@ -109,7 +109,7 @@ tcp-backlog 511 # Unix socket. # # Specify the path for the Unix socket that will be used to listen for -# incoming connections. There is no default, so Redis will not listen +# incoming connections. There is no default, so KeyDB will not listen # on a unix socket when not specified. # # unixsocket /run/keydb.sock @@ -131,8 +131,7 @@ timeout 0 # Note that to close the connection the double of the time is needed. # On other kernels the period depends on the kernel configuration. # -# A reasonable value for this option is 300 seconds, which is the new -# Redis default starting with Redis 3.2.1. +# A reasonable value for this option is 300 seconds, which is the default. tcp-keepalive 300 ################################# TLS/SSL ##################################### @@ -156,7 +155,7 @@ tcp-keepalive 300 # # tls-key-file-pass secret -# Normally Redis uses the same certificate for both server functions (accepting +# Normally KeyDB uses the same certificate for both server functions (accepting # connections) and client functions (replicating from a master, establishing # cluster bus connections, etc.). # @@ -178,7 +177,7 @@ tcp-keepalive 300 # tls-dh-params-file keydb.dh # Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL -# clients and peers. Redis requires an explicit configuration of at least one +# clients and peers. KeyDB requires an explicit configuration of at least one # of these, and will not implicitly use the system wide configuration. # # tls-ca-cert-file ca.crt @@ -194,14 +193,14 @@ tcp-keepalive 300 # tls-auth-clients no # tls-auth-clients optional -# By default, a Redis replica does not attempt to establish a TLS connection +# By default, a KeyDB replica does not attempt to establish a TLS connection # with its master. # # Use the following directive to enable TLS on replication links. # # tls-replication yes -# By default, the Redis Cluster bus uses a plain TCP connection. To enable +# By default, the KeyDB Cluster bus uses a plain TCP connection. To enable # TLS for the bus protocol, use the following directive: # # tls-cluster yes @@ -251,18 +250,18 @@ tcp-keepalive 300 ################################# GENERAL ##################################### -# By default Redis does not run as a daemon. Use 'yes' if you need it. -# Note that Redis will write a pid file in /var/run/keydb.pid when daemonized. -# When Redis is supervised by upstart or systemd, this parameter has no impact. +# By default KeyDB does not run as a daemon. Use 'yes' if you need it. +# Note that KeyDB will write a pid file in /var/run/keydb.pid when daemonized. +# When KeyDB is supervised by upstart or systemd, this parameter has no impact. daemonize no -# If you run Redis from upstart or systemd, Redis can interact with your +# If you run KeyDB from upstart or systemd, KeyDB can interact with your # supervision tree. Options: # supervised no - no supervision interaction -# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# supervised upstart - signal upstart by putting KeyDB into SIGSTOP mode # requires "expect stop" in your upstart job config # supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET -# on startup, and updating Redis status on a regular +# on startup, and updating KeyDB status on a regular # basis. # supervised auto - detect upstart or systemd method based on # UPSTART_JOB or NOTIFY_SOCKET environment variables @@ -274,14 +273,14 @@ daemonize no # # supervised auto -# If a pid file is specified, Redis writes it where specified at startup +# If a pid file is specified, KeyDB writes it where specified at startup # and removes it at exit. # # When the server runs non daemonized, no pid file is created if none is # specified in the configuration. When the server is daemonized, the pid file # is used even if not specified, defaulting to "/var/run/keydb.pid". # -# Creating a pid file is best effort: if Redis is not able to create it +# Creating a pid file is best effort: if KeyDB is not able to create it # nothing bad happens, the server will start and run normally. # # Note that on modern Linux systems "/run/keydb.pid" is more conforming @@ -297,7 +296,7 @@ pidfile /var/run/keydb_6379.pid loglevel notice # Specify the log file name. Also the empty string can be used to force -# Redis to log on the standard output. Note that if you use standard +# KeyDB to log on the standard output. Note that if you use standard # output for logging but daemonize, logs will be sent to /dev/null logfile /var/log/keydb/keydb-server.log @@ -317,7 +316,7 @@ logfile /var/log/keydb/keydb-server.log # crash-log-enabled no # To disable the fast memory check that's run as part of the crash log, which -# will possibly let keydb terminate sooner, uncomment the following: +# will possibly let KeyDB terminate sooner, uncomment the following: # # crash-memcheck-enabled no @@ -326,7 +325,7 @@ logfile /var/log/keydb/keydb-server.log # dbid is a number between 0 and 'databases'-1 databases 16 -# By default Redis shows an ASCII art logo only when started to log to the +# By default KeyDB shows an ASCII art logo only when started to log to the # standard output and if the standard output is a TTY and syslog logging is # disabled. Basically this means that normally a logo is displayed only in # interactive sessions. @@ -335,12 +334,12 @@ databases 16 # ASCII art logo in startup logs by setting the following option to yes. always-show-logo no -# By default, Redis modifies the process title (as seen in 'top' and 'ps') to +# By default, KeyDB modifies the process title (as seen in 'top' and 'ps') to # provide some runtime information. It is possible to disable this and leave # the process name as executed by setting the following to no. set-proc-title yes -# When changing the process title, Redis uses the following template to construct +# When changing the process title, KeyDB uses the following template to construct # the modified title. # # Template variables are specified in curly brackets. The following variables are @@ -363,7 +362,7 @@ proc-title-template "{title} {listen-addr} {server-mode}" # # save # -# Redis will save the DB if both the given number of seconds and the given +# KeyDB will save the DB if both the given number of seconds and the given # number of write operations against the DB occurred. # # Snapshotting can be completely disabled with a single empty string argument @@ -371,7 +370,7 @@ proc-title-template "{title} {listen-addr} {server-mode}" # # save "" # -# Unless specified otherwise, by default Redis will save the DB: +# Unless specified otherwise, by default KeyDB will save the DB: # * After 3600 seconds (an hour) if at least 1 key changed # * After 300 seconds (5 minutes) if at least 100 keys changed # * After 60 seconds if at least 10000 keys changed @@ -382,17 +381,17 @@ proc-title-template "{title} {listen-addr} {server-mode}" # save 300 100 # save 60 10000 -# By default Redis will stop accepting writes if RDB snapshots are enabled +# By default KeyDB will stop accepting writes if RDB snapshots are enabled # (at least one save point) and the latest background save failed. # This will make the user aware (in a hard way) that data is not persisting # on disk properly, otherwise chances are that no one will notice and some # disaster will happen. # -# If the background saving process will start working again Redis will +# If the background saving process will start working again KeyDB will # automatically allow writes again. # -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will +# However if you have setup your proper monitoring of the KeyDB server +# and persistence, you may want to disable this feature so that KeyDB will # continue to work as usual even if there are problems with disk, # permissions, and so forth. stop-writes-on-bgsave-error yes @@ -455,18 +454,18 @@ dir /var/lib/keydb ################################# REPLICATION ################################# -# Master-Replica replication. Use replicaof to make a Redis instance a copy of -# another Redis server. A few things to understand ASAP about Redis replication. +# Master-Replica replication. Use replicaof to make a KeyDB instance a copy of +# another KeyDB server. A few things to understand ASAP about KeyDB replication. # # +------------------+ +---------------+ # | Master | ---> | Replica | # | (receive writes) | | (exact copy) | # +------------------+ +---------------+ # -# 1) Redis replication is asynchronous, but you can configure a master to +# 1) KeyDB replication is asynchronous, but you can configure a master to # stop accepting writes if it appears to be not connected with at least # a given number of replicas. -# 2) Redis replicas are able to perform a partial resynchronization with the +# 2) KeyDB replicas are able to perform a partial resynchronization with the # master if the replication link is lost for a relatively small amount of # time. You may want to configure the replication backlog size (see the next # sections of this file) with a sensible value depending on your needs. @@ -483,7 +482,7 @@ dir /var/lib/keydb # # masterauth # -# However this is not enough if you are using Redis ACLs (for Redis version +# However this is not enough if you are using KeyDB ACLs (for KeyDB version # 6 or greater), and the default user is not capable of running the PSYNC # command and/or other commands needed for replication. In this case it's # better to configure a special user to use with replication, and specify the @@ -515,7 +514,7 @@ replica-serve-stale-data yes # may also cause problems if clients are writing to it because of a # misconfiguration. # -# Since Redis 2.6 by default replicas are read-only. +# Since KeyDB 2.6 by default replicas are read-only. # # Note: read only replicas are not designed to be exposed to untrusted clients # on the internet. It's just a protection layer against misuse of the instance. @@ -536,10 +535,10 @@ replica-read-only yes # synchronization". An RDB file is transmitted from the master to the replicas. # The transmission can happen in two different ways: # -# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# 1) Disk-backed: The KeyDB master creates a new process that writes the RDB # file on disk. Later the file is transferred by the parent # process to the replicas incrementally. -# 2) Diskless: The Redis master creates a new process that directly writes the +# 2) Diskless: The KeyDB master creates a new process that directly writes the # RDB file to replica sockets, without touching the disk at all. # # With disk-backed replication, while the RDB file is generated, more replicas @@ -571,8 +570,8 @@ repl-diskless-sync-delay 5 # ----------------------------------------------------------------------------- # WARNING: RDB diskless load is experimental. Since in this setup the replica # does not immediately store an RDB on disk, it may cause data loss during -# failovers. RDB diskless load + Redis modules not handling I/O reads may also -# cause Redis to abort in case of I/O errors during the initial synchronization +# failovers. RDB diskless load + KeyDB modules not handling I/O reads may also +# cause KeyDB to abort in case of I/O errors during the initial synchronization # stage with the master. Use only if you know what you are doing. # ----------------------------------------------------------------------------- # @@ -615,7 +614,7 @@ repl-diskless-load disabled # Disable TCP_NODELAY on the replica socket after SYNC? # -# If you select "yes" Redis will use a smaller number of TCP packets and +# If you select "yes" KeyDB will use a smaller number of TCP packets and # less bandwidth to send data to replicas. But this can add a delay for # the data to appear on the replica side, up to 40 milliseconds with # Linux kernels using a default configuration. @@ -654,8 +653,8 @@ repl-disable-tcp-nodelay no # # repl-backlog-ttl 3600 -# The replica priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a replica to promote into a +# The replica priority is an integer number published by KeyDB in the INFO output. +# It is used by KeyDB Sentinel in order to select a replica to promote into a # master if the master is no longer working correctly. # # A replica with a low priority number is considered better for promotion, so @@ -664,16 +663,16 @@ repl-disable-tcp-nodelay no # # However a special priority of 0 marks the replica as not able to perform the # role of master, so a replica with priority of 0 will never be selected by -# Redis Sentinel for promotion. +# KeyDB Sentinel for promotion. # # By default the priority is 100. replica-priority 100 # ----------------------------------------------------------------------------- -# By default, Redis Sentinel includes all replicas in its reports. A replica -# can be excluded from Redis Sentinel's announcements. An unannounced replica +# By default, KeyDB Sentinel includes all replicas in its reports. A replica +# can be excluded from KeyDB Sentinel's announcements. An unannounced replica # will be ignored by the 'sentinel replicas ' command and won't be -# exposed to Redis Sentinel's clients. +# exposed to KeyDB Sentinel's clients. # # This option does not change the behavior of replica-priority. Even with # replica-announced set to 'no', the replica can be promoted to master. To @@ -703,10 +702,10 @@ replica-priority 100 # By default min-replicas-to-write is set to 0 (feature disabled) and # min-replicas-max-lag is set to 10. -# A Redis master is able to list the address and port of the attached +# A KeyDB master is able to list the address and port of the attached # replicas in different ways. For example the "INFO replication" section # offers this information, which is used, among other tools, by -# Redis Sentinel in order to discover replica instances. +# KeyDB Sentinel in order to discover replica instances. # Another place where this info is available is in the output of the # "ROLE" command of a master. # @@ -734,7 +733,7 @@ replica-priority 100 ############################### KEYS TRACKING ################################# -# Redis implements server assisted support for client side caching of values. +# KeyDB implements server assisted support for client side caching of values. # This is implemented using an invalidation table that remembers, using # a radix key indexed by key name, what clients have which keys. In turn # this is used in order to send invalidation messages to clients. Please @@ -743,22 +742,22 @@ replica-priority 100 # https://redis.io/topics/client-side-caching # # When tracking is enabled for a client, all the read only queries are assumed -# to be cached: this will force Redis to store information in the invalidation +# to be cached: this will force KeyDB to store information in the invalidation # table. When keys are modified, such information is flushed away, and # invalidation messages are sent to the clients. However if the workload is -# heavily dominated by reads, Redis could use more and more memory in order +# heavily dominated by reads, KeyDB could use more and more memory in order # to track the keys fetched by many clients. # # For this reason it is possible to configure a maximum fill value for the # invalidation table. By default it is set to 1M of keys, and once this limit -# is reached, Redis will start to evict keys in the invalidation table +# is reached, KeyDB will start to evict keys in the invalidation table # even if they were not modified, just to reclaim memory: this will in turn # force the clients to invalidate the cached values. Basically the table # maximum size is a trade off between the memory you want to spend server # side to track information about who cached what, and the ability of clients # to retain cached objects in memory. # -# If you set the value to 0, it means there are no limits, and Redis will +# If you set the value to 0, it means there are no limits, and KeyDB will # retain as many keys as needed in the invalidation table. # In the "stats" INFO section, you can find information about the number of # keys in the invalidation table at every given moment. @@ -770,7 +769,7 @@ replica-priority 100 ################################## SECURITY ################################### -# Warning: since Redis is pretty fast, an outside user can try up to +# Warning: since KeyDB is pretty fast, an outside user can try up to # 1 million passwords per second against a modern box. This means that you # should use very strong passwords, otherwise they will be very easy to break. # Note that because the password is really a shared secret between the client @@ -778,7 +777,7 @@ replica-priority 100 # can be easily a long string from /dev/urandom or whatever, so by using a # long and unguessable password no brute force attack will be possible. -# Redis ACL users are defined in the following format: +# KeyDB ACL users are defined in the following format: # # user ... acl rules ... # @@ -807,7 +806,7 @@ replica-priority 100 # +@ Allow the execution of all the commands in such category # with valid categories are like @admin, @set, @sortedset, ... # and so forth, see the full list in the server.c file where -# the Redis command table is described and defined. +# the KeyDB command table is described and defined. # The special category @all means all the commands, but currently # present in the server, and that will be loaded in the future # via modules. @@ -891,7 +890,7 @@ acllog-max-len 128 # # aclfile /etc/keydb/users.acl -# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility +# IMPORTANT NOTE: starting with KeyDB 6 "requirepass" is just a compatibility # layer on top of the new ACL system. The option effect will be just setting # the password for the default user. Clients will still authenticate using # AUTH as usually, or more explicitly with AUTH default @@ -903,7 +902,7 @@ acllog-max-len 128 # requirepass foobared # New users are initialized with restrictive permissions by default, via the -# equivalent of this ACL rule 'off resetkeys -@all'. Starting with Redis 6.2, it +# equivalent of this ACL rule 'off resetkeys -@all'. Starting with KeyDB 6.2, it # is possible to manage access to Pub/Sub channels with ACL rules as well. The # default Pub/Sub channels permission if new users is controlled by the # acl-pubsub-default configuration directive, which accepts one of these values: @@ -911,10 +910,10 @@ acllog-max-len 128 # allchannels: grants access to all Pub/Sub channels # resetchannels: revokes access to all Pub/Sub channels # -# To ensure backward compatibility while upgrading Redis 6.0, acl-pubsub-default +# To ensure backward compatibility while upgrading KeyDB 6.0, acl-pubsub-default # defaults to the 'allchannels' permission. # -# Future compatibility note: it is very likely that in a future version of Redis +# Future compatibility note: it is very likely that in a future version of KeyDB # the directive's default of 'allchannels' will be changed to 'resetchannels' in # order to provide better out-of-the-box Pub/Sub security. Therefore, it is # recommended that you explicitly define Pub/Sub permissions for all users @@ -951,15 +950,15 @@ acllog-max-len 128 ################################### CLIENTS #################################### # Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not +# this limit is set to 10000 clients, however if the KeyDB server is not # able to configure the process file limit to allow for the specified limit # the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). +# minus 32 (as KeyDB reserves a few file descriptors for internal uses). # -# Once the limit is reached Redis will close all the new connections sending +# Once the limit is reached KeyDB will close all the new connections sending # an error 'max number of clients reached'. # -# IMPORTANT: When Redis Cluster is used, the max number of connections is also +# IMPORTANT: When KeyDB Cluster is used, the max number of connections is also # shared with the cluster bus: every node in the cluster will use two # connections, one incoming and another outgoing. It is important to size the # limit accordingly in case of very large clusters. @@ -969,15 +968,15 @@ acllog-max-len 128 ############################## MEMORY MANAGEMENT ################################ # Set a memory usage limit to the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys +# When the memory limit is reached KeyDB will try to remove keys # according to the eviction policy selected (see maxmemory-policy). # -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands +# If KeyDB can't remove keys according to the policy, or if the policy is +# set to 'noeviction', KeyDB will start to reply with errors to commands # that would use more memory, like SET, LPUSH, and so on, and will continue # to reply to read-only commands like GET. # -# This option is usually useful when using Redis as an LRU or LFU cache, or to +# This option is usually useful when using KeyDB as an LRU or LFU cache, or to # set a hard memory limit for an instance (using the 'noeviction' policy). # # WARNING: If you have replicas attached to an instance with maxmemory on, @@ -993,7 +992,7 @@ acllog-max-len 128 # # maxmemory -# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# MAXMEMORY POLICY: how KeyDB will select what to remove when maxmemory # is reached. You can select among five behaviors: # # volatile-lru -> Evict using approximated LRU among the keys with an expire set. @@ -1012,7 +1011,7 @@ acllog-max-len 128 # randomized algorithms. # # Note: with any of the above policies, when there are no suitable keys for -# eviction, Redis will return an error on write operations that require +# eviction, KeyDB will return an error on write operations that require # more memory. These are usually commands that create new keys, add data or # modify existing keys. A few examples are: SET, INCR, HSET, LPUSH, SUNIONSTORE, # SORT (due to the STORE argument), and EXEC (if the transaction includes any @@ -1024,7 +1023,7 @@ acllog-max-len 128 # LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated # algorithms (in order to save memory), so you can tune it for speed or -# accuracy. By default Redis will check five keys and pick the one that was +# accuracy. By default KeyDB will check five keys and pick the one that was # used least recently, you can change the sample size using the following # configuration directive. # @@ -1041,7 +1040,7 @@ acllog-max-len 128 # # maxmemory-eviction-tenacity 10 -# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# Starting from KeyDB 5, by default a replica will ignore its maxmemory setting # (unless it is promoted to master after a failover or manually). It means # that the eviction of keys will be just handled by the master, sending the # DEL commands to the replica as keys evict in the master side. @@ -1061,7 +1060,7 @@ acllog-max-len 128 # # replica-ignore-maxmemory yes -# Redis reclaims expired keys in two ways: upon access when those keys are +# KeyDB reclaims expired keys in two ways: upon access when those keys are # found to be expired, and also in background, in what is called the # "active expire key". The key space is slowly and interactively scanned # looking for expired keys to reclaim, so that it is possible to free memory @@ -1080,16 +1079,16 @@ acllog-max-len 128 ############################# LAZY FREEING #################################### -# Redis has two primitives to delete keys. One is called DEL and is a blocking +# KeyDB has two primitives to delete keys. One is called DEL and is a blocking # deletion of the object. It means that the server stops processing new commands # in order to reclaim all the memory associated with an object in a synchronous # way. If the key deleted is associated with a small object, the time needed # in order to execute the DEL command is very small and comparable to most other -# O(1) or O(log_N) commands in Redis. However if the key is associated with an +# O(1) or O(log_N) commands in KeyDB. However if the key is associated with an # aggregated value containing millions of elements, the server can block for # a long time (even seconds) in order to complete the operation. # -# For the above reasons Redis also offers non blocking deletion primitives +# For the above reasons KeyDB also offers non blocking deletion primitives # such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and # FLUSHDB commands, in order to reclaim memory in background. Those commands # are executed in constant time. Another thread will incrementally free the @@ -1097,9 +1096,9 @@ acllog-max-len 128 # # DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. # It's up to the design of the application to understand when it is a good -# idea to use one or the other. However the Redis server sometimes has to +# idea to use one or the other. However the KeyDB server sometimes has to # delete keys or flush the whole database as a side effect of other operations. -# Specifically Redis deletes objects independently of a user call in the +# Specifically KeyDB deletes objects independently of a user call in the # following scenarios: # # 1) On eviction, because of the maxmemory and maxmemory policy configurations, @@ -1143,21 +1142,21 @@ lazyfree-lazy-user-flush no ################################ THREADED I/O ################################# -# Redis is mostly single threaded, however there are certain threaded +# KeyDB is mostly single threaded, however there are certain threaded # operations such as UNLINK, slow I/O accesses and other things that are # performed on side threads. # -# Now it is also possible to handle Redis clients socket reads and writes +# Now it is also possible to handle KeyDB clients socket reads and writes # in different I/O threads. Since especially writing is so slow, normally -# Redis users use pipelining in order to speed up the Redis performances per +# KeyDB users use pipelining in order to speed up the KeyDB performances per # core, and spawn multiple instances in order to scale more. Using I/O -# threads it is possible to easily speedup two times Redis without resorting +# threads it is possible to easily speedup two times KeyDB without resorting # to pipelining nor sharding of the instance. # # By default threading is disabled, we suggest enabling it only in machines # that have at least 4 or more cores, leaving at least one spare core. # Using more than 8 threads is unlikely to help much. We also recommend using -# threaded I/O only if you actually have performance problems, with Redis +# threaded I/O only if you actually have performance problems, with KeyDB # instances being able to use a quite big percentage of CPU time, otherwise # there is no point in using this feature. # @@ -1182,9 +1181,9 @@ lazyfree-lazy-user-flush no # CONFIG SET. Aso this feature currently does not work when SSL is # enabled. # -# NOTE 2: If you want to test the Redis speedup using keydb-benchmark, make +# NOTE 2: If you want to test the KeyDB speedup using keydb-benchmark, make # sure you also run the benchmark itself in threaded mode, using the -# --threads option to match the number of Redis threads, otherwise you'll not +# --threads option to match the number of KeyDB threads, otherwise you'll not # be able to notice the improvements. ############################ KERNEL OOM CONTROL ############################## @@ -1192,12 +1191,12 @@ lazyfree-lazy-user-flush no # On Linux, it is possible to hint the kernel OOM killer on what processes # should be killed first when out of memory. # -# Enabling this feature makes Redis actively control the oom_score_adj value +# Enabling this feature makes KeyDB actively control the oom_score_adj value # for all its processes, depending on their role. The default scores will # attempt to have background child processes killed before all others, and # replicas killed before masters. # -# Redis supports three options: +# KeyDB supports three options: # # no: Don't make changes to oom-score-adj (default). # yes: Alias to "relative" see below. @@ -1224,7 +1223,7 @@ oom-score-adj-values 0 200 800 # Usually the kernel Transparent Huge Pages control is set to "madvise" or # or "never" by default (/sys/kernel/mm/transparent_hugepage/enabled), in which # case this config has no effect. On systems in which it is set to "always", -# keydb will attempt to disable it specifically for the keydb process in order +# KeyDB will attempt to disable it specifically for the keydb process in order # to avoid latency problems specifically with fork(2) and CoW. # If for some reason you prefer to keep it enabled, you can set this config to # "no" and the kernel global to "always". @@ -1233,20 +1232,20 @@ disable-thp yes ############################## APPEND ONLY MODE ############################### -# By default Redis asynchronously dumps the dataset on disk. This mode is -# good enough in many applications, but an issue with the Redis process or +# By default KeyDB asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the KeyDB process or # a power outage may result into a few minutes of writes lost (depending on # the configured save points). # # The Append Only File is an alternative persistence mode that provides # much better durability. For instance using the default data fsync policy -# (see later in the config file) Redis can lose just one second of writes in a +# (see later in the config file) KeyDB can lose just one second of writes in a # dramatic event like a server power outage, or a single write if something -# wrong with the Redis process itself happens, but the operating system is +# wrong with the KeyDB process itself happens, but the operating system is # still running correctly. # # AOF and RDB persistence can be enabled at the same time without problems. -# If the AOF is enabled on startup Redis will load the AOF, that is the file +# If the AOF is enabled on startup KeyDB will load the AOF, that is the file # with the better durability guarantees. # # Please check https://redis.io/topics/persistence for more information. @@ -1261,7 +1260,7 @@ appendfilename "appendonly.aof" # instead of waiting for more data in the output buffer. Some OS will really flush # data on disk, some other OS will just try to do it ASAP. # -# Redis supports three different modes: +# KeyDB supports three different modes: # # no: don't fsync, just let the OS flush the data when it wants. Faster. # always: fsync after every write to the append only log. Slow, Safest. @@ -1287,7 +1286,7 @@ appendfsync everysec # When the AOF fsync policy is set to always or everysec, and a background # saving process (a background save or AOF log background rewriting) is # performing a lot of I/O against the disk, in some Linux configurations -# Redis may block too long on the fsync() call. Note that there is no fix for +# KeyDB may block too long on the fsync() call. Note that there is no fix for # this currently, as even performing fsync in a different thread will block # our synchronous write(2) call. # @@ -1295,7 +1294,7 @@ appendfsync everysec # that will prevent fsync() from being called in the main process while a # BGSAVE or BGREWRITEAOF is in progress. # -# This means that while another child is saving, the durability of Redis is +# This means that while another child is saving, the durability of KeyDB is # the same as "appendfsync none". In practical terms, this means that it is # possible to lose up to 30 seconds of log in the worst scenario (with the # default Linux settings). @@ -1306,10 +1305,10 @@ appendfsync everysec no-appendfsync-on-rewrite no # Automatic rewrite of the append only file. -# Redis is able to automatically rewrite the log file implicitly calling +# KeyDB is able to automatically rewrite the log file implicitly calling # BGREWRITEAOF when the AOF log size grows by the specified percentage. # -# This is how it works: Redis remembers the size of the AOF file after the +# This is how it works: KeyDB remembers the size of the AOF file after the # latest rewrite (if no rewrite has happened since the restart, the size of # the AOF at startup is used). # @@ -1325,19 +1324,19 @@ no-appendfsync-on-rewrite no auto-aof-rewrite-percentage 100 auto-aof-rewrite-min-size 64mb -# An AOF file may be found to be truncated at the end during the Redis +# An AOF file may be found to be truncated at the end during the KeyDB # startup process, when the AOF data gets loaded back into memory. -# This may happen when the system where Redis is running +# This may happen when the system where KeyDB is running # crashes, especially when an ext4 filesystem is mounted without the -# data=ordered option (however this can't happen when Redis itself +# data=ordered option (however this can't happen when KeyDB itself # crashes or aborts but the operating system still works correctly). # -# Redis can either exit with an error when this happens, or load as much +# KeyDB can either exit with an error when this happens, or load as much # data as possible (the default now) and start if the AOF file is found # to be truncated at the end. The following option controls this behavior. # # If aof-load-truncated is set to yes, a truncated AOF file is loaded and -# the Redis server starts emitting a log to inform the user of the event. +# the KeyDB server starts emitting a log to inform the user of the event. # Otherwise if the option is set to no, the server aborts with an error # and refuses to start. When the option is set to no, the user requires # to fix the AOF file using the "keydb-check-aof" utility before to restart @@ -1345,17 +1344,17 @@ auto-aof-rewrite-min-size 64mb # # Note that if the AOF file will be found to be corrupted in the middle # the server will still exit with an error. This option only applies when -# Redis will try to read more data from the AOF file but not enough bytes +# KeyDB will try to read more data from the AOF file but not enough bytes # will be found. aof-load-truncated yes -# When rewriting the AOF file, Redis is able to use an RDB preamble in the +# When rewriting the AOF file, KeyDB is able to use an RDB preamble in the # AOF file for faster rewrites and recoveries. When this option is turned # on the rewritten AOF file is composed of two different stanzas: # # [RDB file][AOF tail] # -# When loading, Redis recognizes that the AOF file starts with the "REDIS" +# When loading, KeyDB recognizes that the AOF file starts with the "REDIS" # string and loads the prefixed RDB file, then continues loading the AOF # tail. aof-use-rdb-preamble yes @@ -1364,7 +1363,7 @@ aof-use-rdb-preamble yes # Max execution time of a Lua script in milliseconds. # -# If the maximum execution time is reached Redis will log that a script is +# If the maximum execution time is reached KeyDB will log that a script is # still in execution after the maximum allowed time and will start to # reply to queries with an error. # @@ -1378,23 +1377,23 @@ aof-use-rdb-preamble yes # Set it to 0 or a negative value for unlimited execution without warnings. lua-time-limit 5000 -################################ REDIS CLUSTER ############################### +################################ KEYDB CLUSTER ############################### # # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however +# WARNING EXPERIMENTAL: KeyDB Cluster is considered to be stable code, however # in order to mark it as "mature" we need to wait for a non trivial percentage # of users to deploy it in production. # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # -# Normal Redis instances can't be part of a Redis Cluster; only nodes that are -# started as cluster nodes can. In order to start a Redis instance as a +# Normal KeyDB instances can't be part of a KeyDB Cluster; only nodes that are +# started as cluster nodes can. In order to start a KeyDB instance as a # cluster node enable the cluster support uncommenting the following: # # cluster-enabled yes # Every cluster node has a cluster configuration file. This file is not -# intended to be edited by hand. It is created and updated by Redis nodes. -# Every Redis Cluster node requires a different cluster configuration file. +# intended to be edited by hand. It is created and updated by KeyDB nodes. +# Every KeyDB Cluster node requires a different cluster configuration file. # Make sure that instances running in the same system do not have # overlapping cluster configuration file names. # @@ -1479,7 +1478,7 @@ lua-time-limit 5000 # # cluster-allow-replica-migration yes -# By default Redis Cluster nodes stop accepting queries if they detect there +# By default KeyDB Cluster nodes stop accepting queries if they detect there # is at least a hash slot uncovered (no available node is serving it). # This way if the cluster is partially down (for example a range of hash slots # are no longer covered) all the cluster becomes, eventually, unavailable. @@ -1507,11 +1506,11 @@ lua-time-limit 5000 ########################## CLUSTER DOCKER/NAT support ######################## -# In certain deployments, Redis Cluster nodes address discovery fails, because +# In certain deployments, KeyDB Cluster nodes address discovery fails, because # addresses are NAT-ted or because ports are forwarded (the typical case is # Docker and other containers). # -# In order to make Redis Cluster working in such environments, a static +# In order to make KeyDB Cluster working in such environments, a static # configuration where each node knows its public address is needed. The # following four options are used for this scope, and are: # @@ -1529,7 +1528,7 @@ lua-time-limit 5000 # to zero, then cluster-announce-port refers to the TLS port. Note also that # cluster-announce-tls-port has no effect if cluster-tls is set to no. # -# If the above options are not used, the normal Redis Cluster auto-detection +# If the above options are not used, the normal KeyDB Cluster auto-detection # will be used instead. # # Note that when remapped, the bus port may not be at the fixed offset of @@ -1546,14 +1545,14 @@ lua-time-limit 5000 ################################## SLOW LOG ################################### -# The Redis Slow Log is a system to log queries that exceeded a specified +# The KeyDB Slow Log is a system to log queries that exceeded a specified # execution time. The execution time does not include the I/O operations # like talking with the client, sending the reply and so forth, # but just the time needed to actually execute the command (this is the only # stage of command execution where the thread is blocked and can not serve # other requests in the meantime). # -# You can configure the slow log with two parameters: one tells Redis +# You can configure the slow log with two parameters: one tells KeyDB # what is the execution time, in microseconds, to exceed in order for the # command to get logged, and the other parameter is the length of the # slow log. When a new command is logged the oldest one is removed from the @@ -1570,9 +1569,9 @@ slowlog-max-len 128 ################################ LATENCY MONITOR ############################## -# The Redis latency monitoring subsystem samples different operations +# The KeyDB latency monitoring subsystem samples different operations # at runtime in order to collect data related to possible sources of -# latency of a Redis instance. +# latency of a KeyDB instance. # # Via the LATENCY command this information is available to the user that can # print graphs and obtain reports. @@ -1591,7 +1590,7 @@ latency-monitor-threshold 0 ############################# EVENT NOTIFICATION ############################## -# Redis can notify Pub/Sub clients about events happening in the key space. +# KeyDB can notify Pub/Sub clients about events happening in the key space. # This feature is documented at https://redis.io/topics/notifications # # For instance if keyspace events notification is enabled, and a client @@ -1601,7 +1600,7 @@ latency-monitor-threshold 0 # PUBLISH __keyspace@0__:foo del # PUBLISH __keyevent@0__:del foo # -# It is possible to select the events that Redis will notify among a set +# It is possible to select the events that KeyDB will notify among a set # of classes. Every class is identified by a single character: # # K Keyspace events, published with __keyspace@__ prefix. @@ -1642,12 +1641,12 @@ notify-keyspace-events "" ############################### GOPHER SERVER ################################# -# Redis contains an implementation of the Gopher protocol, as specified in +# KeyDB contains an implementation of the Gopher protocol, as specified in # the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt). # # The Gopher protocol was very popular in the late '90s. It is an alternative # to the web, and the implementation both server and client side is so simple -# that the Redis server has just 100 lines of code in order to implement this +# that the KeyDB server has just 100 lines of code in order to implement this # support. # # What do you do with Gopher nowadays? Well Gopher never *really* died, and @@ -1657,18 +1656,18 @@ notify-keyspace-events "" # controlled, and it's cool to create an alternative space for people that # want a bit of fresh air. # -# Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol +# Anyway for the 10nth birthday of the KeyDB, we gave it the Gopher protocol # as a gift. # # --- HOW IT WORKS? --- # -# The Redis Gopher support uses the inline protocol of Redis, and specifically +# The KeyDB Gopher support uses the inline protocol of KeyDB, and specifically # two kind of inline requests that were anyway illegal: an empty request -# or any request that starts with "/" (there are no Redis commands starting +# or any request that starts with "/" (there are no KeyDB commands starting # with such a slash). Normal RESP2/RESP3 requests are completely out of the # path of the Gopher protocol implementation and are served as usual as well. # -# If you open a connection to Redis when Gopher is enabled and send it +# If you open a connection to KeyDB when Gopher is enabled and send it # a string like "/foo", if there is a key named "/foo" it is served via the # Gopher protocol. # @@ -1679,7 +1678,7 @@ notify-keyspace-events "" # # --- SECURITY WARNING --- # -# If you plan to put Redis on the internet in a publicly accessible address +# If you plan to put KeyDB on the internet in a publicly accessible address # to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance. # Once a password is set: # @@ -1776,8 +1775,8 @@ stream-node-max-bytes 4096 stream-node-max-entries 100 # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in -# order to help rehashing the main Redis hash table (the one mapping top-level -# keys to values). The hash table implementation Redis uses (see dict.c) +# order to help rehashing the main KeyDB hash table (the one mapping top-level +# keys to values). The hash table implementation KeyDB uses (see dict.c) # performs a lazy rehashing: the more operation you run into a hash table # that is rehashing, the more rehashing "steps" are performed, so if the # server is idle the rehashing is never complete and some more memory is used @@ -1788,7 +1787,7 @@ stream-node-max-entries 100 # # If unsure: # use "activerehashing no" if you have hard latency requirements and it is -# not a good thing in your environment that Redis can reply from time to time +# not a good thing in your environment that KeyDB can reply from time to time # to queries with 2 milliseconds delay. # # use "activerehashing yes" if you don't have such hard requirements but @@ -1840,21 +1839,21 @@ client-output-buffer-limit pubsub 32mb 8mb 60 # # client-query-buffer-limit 1gb -# In the Redis protocol, bulk requests, that are, elements representing single +# In the KeyDB protocol, bulk requests, that are, elements representing single # strings, are normally limited to 512 mb. However you can change this limit # here, but must be 1mb or greater # # proto-max-bulk-len 512mb -# Redis calls an internal function to perform many background tasks, like +# KeyDB calls an internal function to perform many background tasks, like # closing connections of clients in timeout, purging expired keys that are # never requested, and so forth. # -# Not all tasks are performed with the same frequency, but Redis checks for +# Not all tasks are performed with the same frequency, but KeyDB checks for # tasks to perform according to the specified "hz" value. # # By default "hz" is set to 10. Raising the value will use more CPU when -# Redis is idle, but at the same time will make Redis more responsive when +# KeyDB is idle, but at the same time will make KeyDB more responsive when # there are many keys expiring at the same time, and timeouts may be # handled with more precision. # @@ -1868,7 +1867,7 @@ hz 10 # avoid too many clients are processed for each background task invocation # in order to avoid latency spikes. # -# Since the default HZ value by default is conservatively set to 10, Redis +# Since the default HZ value by default is conservatively set to 10, KeyDB # offers, and enables by default, the ability to use an adaptive HZ value # which will temporarily raise when there are many connected clients. # @@ -1891,16 +1890,16 @@ aof-rewrite-incremental-fsync yes # big latency spikes. rdb-save-incremental-fsync yes -# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good +# KeyDB LFU eviction (see maxmemory setting) can be tuned. However it is a good # idea to start with the default settings and only change them after investigating # how to improve the performances and how the keys LFU change over time, which # is possible to inspect via the OBJECT FREQ command. # -# There are two tunable parameters in the Redis LFU implementation: the +# There are two tunable parameters in the KeyDB LFU implementation: the # counter logarithm factor and the counter decay time. It is important to # understand what the two parameters mean before changing them. # -# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis +# The LFU counter is just 8 bits per key, it's maximum value is 255, so KeyDB # uses a probabilistic increment with logarithmic behavior. Given the value # of the old counter, when a key is accessed, the counter is incremented in # this way: @@ -1952,7 +1951,7 @@ rdb-save-incremental-fsync yes # What is active defragmentation? # ------------------------------- # -# Active (online) defragmentation allows a Redis server to compact the +# Active (online) defragmentation allows a KeyDB server to compact the # spaces left between small allocations and deallocations of data in memory, # thus allowing to reclaim back memory. # @@ -1960,11 +1959,11 @@ rdb-save-incremental-fsync yes # less so with Jemalloc, fortunately) and certain workloads. Normally a server # restart is needed in order to lower the fragmentation, or at least to flush # away all the data and create it again. However thanks to this feature -# implemented by Oran Agra for Redis 4.0 this process can happen at runtime +# implemented by Oran Agra for KeyDB 4.0 this process can happen at runtime # in a "hot" way, while the server is running. # # Basically when the fragmentation is over a certain level (see the -# configuration options below) Redis will start to create new copies of the +# configuration options below) KeyDB will start to create new copies of the # values in contiguous memory regions by exploiting certain specific Jemalloc # features (in order to understand if an allocation is causing fragmentation # and to allocate it in a better place), and at the same time, will release the @@ -1973,8 +1972,8 @@ rdb-save-incremental-fsync yes # # Important things to understand: # -# 1. This feature is disabled by default, and only works if you compiled Redis -# to use the copy of Jemalloc we ship with the source code of Redis. +# 1. This feature is disabled by default, and only works if you compiled KeyDB +# to use the copy of Jemalloc we ship with the source code of KeyDB. # This is the default with Linux builds. # # 2. You never need to enable this feature if you don't have fragmentation @@ -2012,14 +2011,14 @@ rdb-save-incremental-fsync yes # Jemalloc background thread for purging will be enabled by default jemalloc-bg-thread yes -# It is possible to pin different threads and processes of Redis to specific +# It is possible to pin different threads and processes of KeyDB to specific # CPUs in your system, in order to maximize the performances of the server. -# This is useful both in order to pin different Redis threads in different -# CPUs, but also in order to make sure that multiple Redis instances running +# This is useful both in order to pin different KeyDB threads in different +# CPUs, but also in order to make sure that multiple KeyDB instances running # in the same host will be pinned to different CPUs. # # Normally you can do this using the "taskset" command, however it is also -# possible to this via Redis configuration directly, both in Linux and FreeBSD. +# possible to this via KeyDB configuration directly, both in Linux and FreeBSD. # # You can pin the server/IO threads, bio threads, aof rewrite child process, and # the bgsave child process. The syntax to specify the cpu list is the same as From 45480d3aaf935e1a7d4aa636ce663ac8e036b534 Mon Sep 17 00:00:00 2001 From: christianEQ Date: Fri, 11 Jun 2021 18:17:18 +0000 Subject: [PATCH 011/149] removed outdated comment in server.cpp Former-commit-id: 98f08034a13341059b9b1690989e7ef0207ac920 --- src/server.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/server.cpp b/src/server.cpp index 3d5c70faa..045938657 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -4635,10 +4635,6 @@ int prepareForShutdown(int flags) { overwrite the synchronous saving did by SHUTDOWN. */ if (g_pserver->child_type == CHILD_TYPE_RDB) { serverLog(LL_WARNING,"There is a child saving an .rdb. Killing it!"); - /* Note that, in killRDBChild, we call rdbRemoveTempFile that will - * do close fd(in order to unlink file actully) in background thread. - * The temp rdb file fd may won't be closed when redis exits quickly, - * but OS will close this fd when process exits. */ killRDBChild(); /* Note that, in killRDBChild normally has backgroundSaveDoneHandler * doing it's cleanup, but in this case this code will not be reached, From 39a3238a8f1cf28e1a128aeec4d11bf3480d8432 Mon Sep 17 00:00:00 2001 From: christianEQ Date: Fri, 11 Jun 2021 18:19:02 +0000 Subject: [PATCH 012/149] changed redis.pid to keydb.pid where necessary Former-commit-id: 76d9c7c82abe7aecc7301d82e755953a37513138 --- pkg/rpm/keydb_build/keydb_rpm/etc/keydb/keydb.conf | 6 +++--- tests/assets/default.conf | 4 ++-- tests/assets/minimal.conf | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/rpm/keydb_build/keydb_rpm/etc/keydb/keydb.conf b/pkg/rpm/keydb_build/keydb_rpm/etc/keydb/keydb.conf index 9cf1773c7..d3e6ce9c7 100644 --- a/pkg/rpm/keydb_build/keydb_rpm/etc/keydb/keydb.conf +++ b/pkg/rpm/keydb_build/keydb_rpm/etc/keydb/keydb.conf @@ -131,8 +131,8 @@ tcp-keepalive 300 ################################# GENERAL ##################################### -# By default Redis does not run as a daemon. Use 'yes' if you need it. -# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +# By default KeyDB does not run as a daemon. Use 'yes' if you need it. +# Note that KeyDB will write a pid file in /var/run/keydb.pid when daemonized. daemonize yes # If you run Redis from upstart or systemd, Redis can interact with your @@ -151,7 +151,7 @@ supervised no # # When the server runs non daemonized, no pid file is created if none is # specified in the configuration. When the server is daemonized, the pid file -# is used even if not specified, defaulting to "/var/run/redis.pid". +# is used even if not specified, defaulting to "/var/run/keydb.pid". # # Creating a pid file is best effort: if Redis is not able to create it # nothing bad happens, the server will start and run normally. diff --git a/tests/assets/default.conf b/tests/assets/default.conf index d7b8a75c6..995d5206c 100644 --- a/tests/assets/default.conf +++ b/tests/assets/default.conf @@ -1,9 +1,9 @@ -# Redis configuration for testing. +# KeyDB configuration for testing. always-show-logo yes notify-keyspace-events KEA daemonize no -pidfile /var/run/redis.pid +pidfile /var/run/keydb.pid port 6379 timeout 0 bind 127.0.0.1 diff --git a/tests/assets/minimal.conf b/tests/assets/minimal.conf index ae14ae87b..ed49223c9 100644 --- a/tests/assets/minimal.conf +++ b/tests/assets/minimal.conf @@ -1,5 +1,5 @@ # Minimal configuration for testing. always-show-logo yes daemonize no -pidfile /var/run/redis.pid +pidfile /var/run/keydb.pid loglevel verbose From f8289cebcc764288d19a9a999234cfe15a1614c4 Mon Sep 17 00:00:00 2001 From: christianEQ Date: Fri, 11 Jun 2021 18:19:59 +0000 Subject: [PATCH 013/149] removed unreliable musl test and left only accurate new one Former-commit-id: 386be8990a83fcc5d57aa20a268544a877c2cfd7 --- tests/integration/logging.tcl | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/integration/logging.tcl b/tests/integration/logging.tcl index 742471e0b..40713ed2e 100644 --- a/tests/integration/logging.tcl +++ b/tests/integration/logging.tcl @@ -1,12 +1,10 @@ set system_name [string tolower [exec uname -s]] -# ldd --version returns 1 under musl for unknown reasons. If this check stops working, that may be why -set is_musl [catch {exec ldd --version}] set system_supported 0 # We only support darwin or Linux with glibc if {$system_name eq {darwin}} { set system_supported 1 -} elseif {$system_name eq {linux} && $is_musl eq 0} { +} elseif {$system_name eq {linux}} { # Avoid the test on libmusl, which does not support backtrace set ldd [exec ldd src/keydb-server] if {![string match {*libc.musl*} $ldd]} { From cb3f3d1b7ee0e0d4001132605d70f4408fbd83ae Mon Sep 17 00:00:00 2001 From: christianEQ Date: Fri, 11 Jun 2021 19:09:40 +0000 Subject: [PATCH 014/149] renamed redis test files Former-commit-id: 1c77104b5efcdfd1fce6a4a946e8a1ead35dc7f0 --- tests/helpers/bg_block_op.tcl | 2 +- tests/helpers/bg_complex_data.tcl | 2 +- tests/helpers/gen_write_load.tcl | 2 +- tests/instances.tcl | 2 +- .../{redis-benchmark.tcl => keydb-benchmark.tcl} | 0 tests/integration/{redis-cli.tcl => keydb-cli.tcl} | 0 tests/support/{redis.tcl => keydb.tcl} | 0 tests/test_helper.tcl | 6 +++--- utils/speed-regression.tcl | 2 +- 9 files changed, 8 insertions(+), 8 deletions(-) rename tests/integration/{redis-benchmark.tcl => keydb-benchmark.tcl} (100%) rename tests/integration/{redis-cli.tcl => keydb-cli.tcl} (100%) rename tests/support/{redis.tcl => keydb.tcl} (100%) diff --git a/tests/helpers/bg_block_op.tcl b/tests/helpers/bg_block_op.tcl index c8b323308..f76c22381 100644 --- a/tests/helpers/bg_block_op.tcl +++ b/tests/helpers/bg_block_op.tcl @@ -1,4 +1,4 @@ -source tests/support/redis.tcl +source tests/support/keydb.tcl source tests/support/util.tcl set ::tlsdir "tests/tls" diff --git a/tests/helpers/bg_complex_data.tcl b/tests/helpers/bg_complex_data.tcl index e888748a7..606ed8f8b 100644 --- a/tests/helpers/bg_complex_data.tcl +++ b/tests/helpers/bg_complex_data.tcl @@ -1,4 +1,4 @@ -source tests/support/redis.tcl +source tests/support/keydb.tcl source tests/support/util.tcl set ::tlsdir "tests/tls" diff --git a/tests/helpers/gen_write_load.tcl b/tests/helpers/gen_write_load.tcl index cbf6651bd..200026af6 100644 --- a/tests/helpers/gen_write_load.tcl +++ b/tests/helpers/gen_write_load.tcl @@ -1,4 +1,4 @@ -source tests/support/redis.tcl +source tests/support/keydb.tcl set ::tlsdir "tests/tls" diff --git a/tests/instances.tcl b/tests/instances.tcl index e494e741c..ce5518231 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -10,7 +10,7 @@ package require Tcl 8.5 set tcl_precision 17 -source ../support/redis.tcl +source ../support/keydb.tcl source ../support/util.tcl source ../support/server.tcl source ../support/test.tcl diff --git a/tests/integration/redis-benchmark.tcl b/tests/integration/keydb-benchmark.tcl similarity index 100% rename from tests/integration/redis-benchmark.tcl rename to tests/integration/keydb-benchmark.tcl diff --git a/tests/integration/redis-cli.tcl b/tests/integration/keydb-cli.tcl similarity index 100% rename from tests/integration/redis-cli.tcl rename to tests/integration/keydb-cli.tcl diff --git a/tests/support/redis.tcl b/tests/support/keydb.tcl similarity index 100% rename from tests/support/redis.tcl rename to tests/support/keydb.tcl diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index f942ec838..c4abcf2f3 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -5,7 +5,7 @@ package require Tcl 8.5 set tcl_precision 17 -source tests/support/redis.tcl +source tests/support/keydb.tcl source tests/support/server.tcl source tests/support/tmpfile.tcl source tests/support/test.tcl @@ -57,8 +57,8 @@ set ::all_tests { integration/psync2-reg integration/psync2-pingoff integration/failover - integration/redis-cli - integration/redis-benchmark + integration/keydb-cli + integration/keydb-benchmark unit/pubsub unit/slowlog unit/scripting diff --git a/utils/speed-regression.tcl b/utils/speed-regression.tcl index 8d5220c75..1e9181d41 100755 --- a/utils/speed-regression.tcl +++ b/utils/speed-regression.tcl @@ -2,7 +2,7 @@ # Copyright (C) 2011 Salvatore Sanfilippo # Released under the BSD license like Redis itself -source ../tests/support/redis.tcl +source ../tests/support/keydb.tcl set ::port 12123 set ::tests {PING,SET,GET,INCR,LPUSH,LPOP,SADD,SPOP,LRANGE_100,LRANGE_600,MSET} set ::datasize 16 From ac9fadc3dd603d2edcfb5d2c83a3dd44b1ca0852 Mon Sep 17 00:00:00 2001 From: christianEQ Date: Fri, 11 Jun 2021 19:10:12 +0000 Subject: [PATCH 015/149] renamed workspace gitignore Former-commit-id: 046f5756325992752b51d7ac601fd84d4faa53ce --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 2fa8255df..6c2d3a16e 100644 --- a/.gitignore +++ b/.gitignore @@ -58,4 +58,4 @@ Makefile.dep .ccls .ccls-cache/* compile_commands.json -redis.code-workspace +keydb.code-workspace From 53be1b7fba86d3b4023f1e8a655a0a858d60f15a Mon Sep 17 00:00:00 2001 From: christianEQ Date: Fri, 11 Jun 2021 19:10:35 +0000 Subject: [PATCH 016/149] renamed redis mention in readme Former-commit-id: 34b0209f05d4f44e4b14278ad746f1eda679b32a --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9e7506e64..14c0dddf3 100644 --- a/README.md +++ b/README.md @@ -179,7 +179,7 @@ To compile against jemalloc on Mac OS X systems, use: Monotonic clock --------------- -By default, Redis will build using the POSIX clock_gettime function as the +By default, KeyDB will build using the POSIX clock_gettime function as the monotonic clock source. On most modern systems, the internal processor clock can be used to improve performance. Cautions can be found here: http://oliveryang.net/2015/09/pitfalls-of-TSC-usage/ From 974dc4918a0c22f780b7c44fd6b8eabb03749117 Mon Sep 17 00:00:00 2001 From: christianEQ Date: Fri, 11 Jun 2021 19:11:12 +0000 Subject: [PATCH 017/149] renamed redis mentions in runtest files Former-commit-id: 2e894a512b30a8317619e55fa30a218306f8514f --- runtest-cluster | 2 +- runtest-moduleapi | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/runtest-cluster b/runtest-cluster index a86e93141..3a6d20978 100755 --- a/runtest-cluster +++ b/runtest-cluster @@ -8,7 +8,7 @@ done if [ -z $TCLSH ] then - echo "You need tcl 8.5 or newer in order to run the Redis Cluster test" + echo "You need tcl 8.5 or newer in order to run the KeyDB Cluster test" exit 1 fi $TCLSH tests/cluster/run.tcl $* diff --git a/runtest-moduleapi b/runtest-moduleapi index dc4c9e1ea..154818ed8 100755 --- a/runtest-moduleapi +++ b/runtest-moduleapi @@ -9,7 +9,7 @@ done if [ -z $TCLSH ] then - echo "You need tcl 8.5 or newer in order to run the Redis ModuleApi test" + echo "You need tcl 8.5 or newer in order to run the KeyDB ModuleApi test" exit 1 fi From 2c78e8c046d04559fcc3aa741dc8f7074f3d17a3 Mon Sep 17 00:00:00 2001 From: christianEQ Date: Fri, 11 Jun 2021 19:16:33 +0000 Subject: [PATCH 018/149] several redis->keydb changes in sentinel.conf Former-commit-id: d2ef0594711c83ec9223d2e3b26fce9b5a4f685f --- sentinel.conf | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/sentinel.conf b/sentinel.conf index 5ab5a8b5c..917fe8a6e 100644 --- a/sentinel.conf +++ b/sentinel.conf @@ -20,12 +20,12 @@ # The port that this sentinel instance will run on port 26379 -# By default Redis Sentinel does not run as a daemon. Use 'yes' if you need it. -# Note that Redis will write a pid file in /var/run/keydb-sentinel.pid when +# By default KeyDB Sentinel does not run as a daemon. Use 'yes' if you need it. +# Note that KeyDB will write a pid file in /var/run/keydb-sentinel.pid when # daemonized. daemonize no -# When running daemonized, Redis Sentinel writes a pid file in +# When running daemonized, KeyDB Sentinel writes a pid file in # /var/run/keydb-sentinel.pid by default. You can specify a custom pid file # location here. pidfile /var/run/keydb-sentinel.pid @@ -59,7 +59,7 @@ logfile "" # dir # Every long running process should have a well-defined working directory. -# For Redis Sentinel to chdir to /tmp at startup is the simplest thing +# For KeyDB Sentinel to chdir to /tmp at startup is the simplest thing # for the process to don't interfere with administrative tasks such as # unmounting filesystems. dir /tmp @@ -86,16 +86,16 @@ sentinel monitor mymaster 127.0.0.1 6379 2 # sentinel auth-pass # # Set the password to use to authenticate with the master and replicas. -# Useful if there is a password set in the Redis instances to monitor. +# Useful if there is a password set in the KeyDB instances to monitor. # # Note that the master password is also used for replicas, so it is not # possible to set a different password in masters and replicas instances # if you want to be able to monitor these instances with Sentinel. # -# However you can have Redis instances without the authentication enabled -# mixed with Redis instances requiring the authentication (as long as the +# However you can have KeyDB instances without the authentication enabled +# mixed with KeyDB instances requiring the authentication (as long as the # password set is the same for all the instances requiring the password) as -# the AUTH command will have no effect in Redis instances with authentication +# the AUTH command will have no effect in KeyDB instances with authentication # switched off. # # Example: @@ -105,10 +105,10 @@ sentinel monitor mymaster 127.0.0.1 6379 2 # sentinel auth-user # # This is useful in order to authenticate to instances having ACL capabilities, -# that is, running Redis 6.0 or greater. When just auth-pass is provided the -# Sentinel instance will authenticate to Redis using the old "AUTH " +# that is, running KeyDB 6.0 or greater. When just auth-pass is provided the +# Sentinel instance will authenticate to KeyDB using the old "AUTH " # method. When also an username is provided, it will use "AUTH ". -# In the Redis servers side, the ACL to provide just minimal access to +# In the KeyDB servers side, the ACL to provide just minimal access to # Sentinel instances, should be configured along the following lines: # # user sentinel-user >somepassword +client +subscribe +publish \ @@ -125,7 +125,7 @@ sentinel monitor mymaster 127.0.0.1 6379 2 sentinel down-after-milliseconds mymaster 30000 # IMPORTANT NOTE: starting with KeyDB 6.2 ACL capability is supported for -# Sentinel mode, please refer to the KeyDB website https://redis.io/topics/acl +# Sentinel mode, please refer to the Redis website https://redis.io/topics/acl # for more details. # Sentinel's ACL users are defined in the following format: @@ -137,8 +137,8 @@ sentinel down-after-milliseconds mymaster 30000 # user worker +@admin +@connection ~* on >ffa9203c493aa99 # # For more information about ACL configuration please refer to the Redis -# website at https://redis.io/topics/acl and redis server configuration -# template redis.conf. +# website at https://redis.io/topics/acl and KeyDB server configuration +# template keydb.conf. # ACL LOG # @@ -156,9 +156,9 @@ acllog-max-len 128 # ACL file, the server will refuse to start. # # The format of the external ACL user file is exactly the same as the -# format that is used inside redis.conf to describe users. +# format that is used inside keydb.conf to describe users. # -# aclfile /etc/redis/sentinel-users.acl +# aclfile /etc/keydb/sentinel-users.acl # requirepass # @@ -168,7 +168,7 @@ acllog-max-len 128 # group with the same "requirepass" password. Check the following documentation # for more info: https://redis.io/topics/sentinel # -# IMPORTANT NOTE: starting with Redis 6.2 "requirepass" is a compatibility +# IMPORTANT NOTE: starting with KeyDB 6.2 "requirepass" is a compatibility # layer on top of the ACL system. The option effect will be just setting # the password for the default user. Clients will still authenticate using # AUTH as usually, or more explicitly with AUTH default @@ -251,7 +251,7 @@ sentinel failover-timeout mymaster 180000 # generated in the WARNING level (for instance -sdown, -odown, and so forth). # This script should notify the system administrator via email, SMS, or any # other messaging system, that there is something wrong with the monitored -# Redis systems. +# KeyDB systems. # # The script is called with just two arguments: the first is the event type # and the second the event description. @@ -261,7 +261,7 @@ sentinel failover-timeout mymaster 180000 # # Example: # -# sentinel notification-script mymaster /var/redis/notify.sh +# sentinel notification-script mymaster /var/keydb/notify.sh # CLIENTS RECONFIGURATION SCRIPT # @@ -286,7 +286,7 @@ sentinel failover-timeout mymaster 180000 # # Example: # -# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh +# sentinel client-reconfig-script mymaster /var/keydb/reconfig.sh # SECURITY # @@ -297,11 +297,11 @@ sentinel failover-timeout mymaster 180000 sentinel deny-scripts-reconfig yes -# REDIS COMMANDS RENAMING +# KEYDB COMMANDS RENAMING # -# Sometimes the Redis server has certain commands, that are needed for Sentinel +# Sometimes the KeyDB server has certain commands, that are needed for Sentinel # to work correctly, renamed to unguessable strings. This is often the case -# of CONFIG and SLAVEOF in the context of providers that provide Redis as +# of CONFIG and SLAVEOF in the context of providers that provide KeyDB as # a service, and don't want the customers to reconfigure the instances outside # of the administration console. # @@ -325,7 +325,7 @@ sentinel deny-scripts-reconfig yes # HOSTNAMES SUPPORT # # Normally Sentinel uses only IP addresses and requires SENTINEL MONITOR -# to specify an IP address. Also, it requires the Redis replica-announce-ip +# to specify an IP address. Also, it requires the KeyDB replica-announce-ip # keyword to specify only IP addresses. # # You may enable hostnames support by enabling resolve-hostnames. Note From b35847e7093dd9dd2bb3bb43e5c4af6105eeae69 Mon Sep 17 00:00:00 2001 From: christianEQ Date: Mon, 14 Jun 2021 16:09:42 +0000 Subject: [PATCH 019/149] changed redis_flags names to keydb in makefile Former-commit-id: 07273936271536e4a57efdb7a31bc34e5b8d50f1 --- .github/workflows/ci.yml | 6 +++--- src/Makefile | 24 ++++++++++++------------ 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9086bb701..bce8f20b8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,7 +12,7 @@ jobs: run: | sudo apt-get update sudo apt-get -y install uuid-dev libcurl4-openssl-dev - make REDIS_CFLAGS='-Werror' REDIS_CXXFLAGS='-Werror' BUILD_TLS=yes -j2 + make KEYDB_CFLAGS='-Werror' KEYDB_CXXFLAGS='-Werror' BUILD_TLS=yes -j2 - name: gen-cert run: ./utils/gen-test-certs.sh - name: test-tls @@ -44,7 +44,7 @@ jobs: steps: - uses: actions/checkout@v2 - name: make - run: make REDIS_CFLAGS='-Werror' REDIS_CXXFLAGS='-Werror' -j2 + run: make KEYDB_CFLAGS='-Werror' KEYDB_CXXFLAGS='-Werror' -j2 build-libc-malloc: runs-on: ubuntu-latest @@ -54,5 +54,5 @@ jobs: run: | sudo apt-get update sudo apt-get -y install uuid-dev libcurl4-openssl-dev - make REDIS_CFLAGS='-Werror' REDIS_CXXFLAGS='-Werror' MALLOC=libc -j2 + make KEYDB_CFLAGS='-Werror' KEYDB_CXXFLAGS='-Werror' MALLOC=libc -j2 diff --git a/src/Makefile b/src/Makefile index f0dea3dac..c7dfeea4d 100644 --- a/src/Makefile +++ b/src/Makefile @@ -3,11 +3,11 @@ # This file is released under the BSD license, see the COPYING file # # The Makefile composes the final FINAL_CFLAGS and FINAL_LDFLAGS using -# what is needed for Redis plus the standard CFLAGS and LDFLAGS passed. +# what is needed for KeyDB plus the standard CFLAGS and LDFLAGS passed. # However when building the dependencies (Jemalloc, Lua, Hiredis, ...) # CFLAGS and LDFLAGS are propagated to the dependencies, so to pass -# flags only to be used when compiling / linking Redis itself REDIS_CFLAGS -# and REDIS_LDFLAGS are used instead (this is the case of 'make gcov'). +# flags only to be used when compiling / linking KeyDB itself KEYDB_CFLAGS +# and KEYDB_LDFLAGS are used instead (this is the case of 'make gcov'). # # Dependencies are stored in the Makefile.dep file. To rebuild this file # Just use 'make dep', but this is only needed by developers. @@ -85,7 +85,7 @@ ifeq ($(COMPILER_NAME),clang) LDFLAGS+= -latomic endif -# To get ARM stack traces if Redis crashes we need a special C flag. +# To get ARM stack traces if KeyDB crashes we need a special C flag. ifneq (,$(filter aarch64 armv,$(uname_M))) CFLAGS+=-funwind-tables CXXFLAGS+=-funwind-tables @@ -116,9 +116,9 @@ endif # Override default settings if possible -include .make-settings -FINAL_CFLAGS=$(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(REDIS_CFLAGS) -FINAL_CXXFLAGS=$(CXX_STD) $(WARN) $(OPT) $(DEBUG) $(CXXFLAGS) $(REDIS_CFLAGS) -FINAL_LDFLAGS=$(LDFLAGS) $(REDIS_LDFLAGS) $(DEBUG) +FINAL_CFLAGS=$(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(KEYDB_CFLAGS) +FINAL_CXXFLAGS=$(CXX_STD) $(WARN) $(OPT) $(DEBUG) $(CXXFLAGS) $(KEYDB_CFLAGS) +FINAL_LDFLAGS=$(LDFLAGS) $(KEYDB_LDFLAGS) $(DEBUG) FINAL_LIBS+=-lm DEBUG=-g -ggdb @@ -365,9 +365,9 @@ persist-settings: distclean echo CFLAGS=$(CFLAGS) >> .make-settings echo CXXFLAGS=$(CXXFLAGS) >> .make-settings echo LDFLAGS=$(LDFLAGS) >> .make-settings - echo REDIS_CFLAGS=$(REDIS_CFLAGS) >> .make-settings - echo REDIS_CXXFLAGS=$(REDIS_CXXFLAGS) >> .make-settings - echo REDIS_LDFLAGS=$(REDIS_LDFLAGS) >> .make-settings + echo KEYDB_CFLAGS=$(KEYDB_CFLAGS) >> .make-settings + echo KEYDB_CXXFLAGS=$(KEYDB_CXXFLAGS) >> .make-settings + echo KEYDB_LDFLAGS=$(KEYDB_LDFLAGS) >> .make-settings echo PREV_FINAL_CFLAGS=$(FINAL_CFLAGS) >> .make-settings echo PREV_FINAL_CXXFLAGS=$(FINAL_CXXFLAGS) >> .make-settings echo PREV_FINAL_LDFLAGS=$(FINAL_LDFLAGS) >> .make-settings @@ -468,7 +468,7 @@ bench: $(REDIS_BENCHMARK_NAME) $(MAKE) CXXFLAGS="-m32" CFLAGS="-m32" LDFLAGS="-m32" gcov: - $(MAKE) REDIS_CXXFLAGS="-fprofile-arcs -ftest-coverage -DCOVERAGE_TEST" REDIS_CFLAGS="-fprofile-arcs -ftest-coverage -DCOVERAGE_TEST" REDIS_LDFLAGS="-fprofile-arcs -ftest-coverage" + $(MAKE) KEYDB_CXXFLAGS="-fprofile-arcs -ftest-coverage -DCOVERAGE_TEST" KEYDB_CFLAGS="-fprofile-arcs -ftest-coverage -DCOVERAGE_TEST" KEYDB_LDFLAGS="-fprofile-arcs -ftest-coverage" noopt: $(MAKE) OPTIMIZATION="-O0" @@ -477,7 +477,7 @@ valgrind: $(MAKE) OPTIMIZATION="-O0" USEASM="false" MALLOC="libc" CFLAGS="-DSANITIZE" CXXFLAGS="-DSANITIZE" helgrind: - $(MAKE) OPTIMIZATION="-O0" MALLOC="libc" CFLAGS="-D__ATOMIC_VAR_FORCE_SYNC_MACROS" REDIS_CFLAGS="-I/usr/local/include" REDIS_LDFLAGS="-L/usr/local/lib" + $(MAKE) OPTIMIZATION="-O0" MALLOC="libc" CFLAGS="-D__ATOMIC_VAR_FORCE_SYNC_MACROS" KEYDB_CFLAGS="-I/usr/local/include" KEYDB_LDFLAGS="-L/usr/local/lib" src/help.h: @../utils/generate-command-help.rb > help.h From ebe799f8c014cf60cabbbee269b6a5adc720a4c4 Mon Sep 17 00:00:00 2001 From: malavan Date: Thu, 17 Jun 2021 08:44:29 +0000 Subject: [PATCH 020/149] add CI for gitlab Former-commit-id: 37ba6d28030f4cf1866cc9c182833fe247348f38 --- .gitlab-ci.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 .gitlab-ci.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 000000000..0be416426 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,22 @@ +build: + stage: build + script: + - make + - make install + +make-test: + stage: test + script: + - make test + +node-redis-test: + stage: test + script: + - mkdir node-redis-test + - cd node-redis-test + - git clone https://gitlab.eqalpha.com/keydb-dev/node-redis.git + - cd node-redis + - npm install + - npm run test + - cd ../.. + - rm -rf node-redis-test \ No newline at end of file From 73f522d854bc020b2031947b107a446a2068bb36 Mon Sep 17 00:00:00 2001 From: malavan Date: Thu, 17 Jun 2021 18:30:11 +0000 Subject: [PATCH 021/149] clean before build Former-commit-id: 49805ee385381486476d2986cca9e9c3d50d68b5 --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 0be416426..e6f9a624d 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,6 +1,7 @@ build: stage: build script: + - make distclean - make - make install From 88149ed9e733bd678ab13ef678e3b46f96615af1 Mon Sep 17 00:00:00 2001 From: christianEQ Date: Thu, 17 Jun 2021 18:32:11 +0000 Subject: [PATCH 022/149] accurate cpu usage in diagnostic tool Former-commit-id: 8ee7584cffc5c5cacfb7ad20fc964112974683e4 --- src/keydb-diagnostic-tool.cpp | 42 ++++++++++++++++++++++++----------- 1 file changed, 29 insertions(+), 13 deletions(-) diff --git a/src/keydb-diagnostic-tool.cpp b/src/keydb-diagnostic-tool.cpp index 3dced8523..bc5778139 100644 --- a/src/keydb-diagnostic-tool.cpp +++ b/src/keydb-diagnostic-tool.cpp @@ -863,6 +863,27 @@ int extractPropertyFromInfo(const char *info, const char *key, double &val) { return 0; } +double getServerCpuTime(redisContext *ctx) { + redisReply *reply = (redisReply*)redisCommand(ctx, "INFO"); + if (reply->type != REDIS_REPLY_STRING) { + freeReplyObject(reply); + printf("Error executing INFO command. Exiting.\r\n"); + return -1; + } + + double used_cpu_user, used_cpu_sys; + if (extractPropertyFromInfo(reply->str, "used_cpu_user", used_cpu_user)) { + printf("Error reading user CPU usage from INFO command. Exiting.\r\n"); + return -1; + } + if (extractPropertyFromInfo(reply->str, "used_cpu_sys", used_cpu_sys)) { + printf("Error reading system CPU usage from INFO command. Exiting.\r\n"); + return -1; + } + freeReplyObject(reply); + return used_cpu_user + used_cpu_sys; +} + int main(int argc, const char **argv) { int i; @@ -898,11 +919,12 @@ int main(int argc, const char **argv) { const char *set_value = "abcdefghijklmnopqrstuvwxyz"; int threads_used = 0; + unsigned int period = 5; char command[63]; initBenchmarkThreads(); redisContext *ctx = getRedisContext(config.hostip, config.hostport, config.hostsocket); - double cpu_usage; + double cpu_usage, last_cpu_usage = getServerCpuTime(ctx); while (threads_used < config.max_threads) { printf("Creating %d clients for thread %d...\n", config.numclients, threads_used); @@ -920,20 +942,14 @@ int main(int argc, const char **argv) { } threads_used++; - sleep(1); - - redisReply *reply = (redisReply*)redisCommand(ctx, "INFO"); - if (reply->type != REDIS_REPLY_STRING) { - freeReplyObject(reply); - printf("Error executing INFO command. Exiting.\r\n"); + sleep(period); + + cpu_usage = getServerCpuTime(ctx); + if (cpu_usage < 0) { break; } - if (extractPropertyFromInfo(reply->str, "used_cpu_sys", cpu_usage)) { - printf("Error reading CPU usage from INFO command. Exiting.\r\n"); - break; - } - printf("CPU Usage: %f\r\n", cpu_usage); - freeReplyObject(reply); + printf("CPU Usage: %.1f%%\r\n", (cpu_usage - last_cpu_usage) * 100 / period); + last_cpu_usage = cpu_usage; } printf("Done.\n"); From 35d9cd933768f475722b8d86d3b29f741806dfce Mon Sep 17 00:00:00 2001 From: malavan Date: Thu, 17 Jun 2021 18:52:36 +0000 Subject: [PATCH 023/149] init git submodule Former-commit-id: e96906d095fed6b37c9ece79526cfd0a88819e4d --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e6f9a624d..af283591a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,6 +1,7 @@ build: stage: build script: + - git submodule init && git submodule update - make distclean - make - make install From 940565fe21c766d143fd6daa39142a43e2ac8972 Mon Sep 17 00:00:00 2001 From: malavan Date: Thu, 17 Jun 2021 20:38:56 +0000 Subject: [PATCH 024/149] uninstall before install Former-commit-id: f30afe82d799ad680343cdcb7b80e8c257026ff7 --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index af283591a..57246f581 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -4,6 +4,7 @@ build: - git submodule init && git submodule update - make distclean - make + - make uninstall - make install make-test: From ef3ba1699db28af0898baf935cc66d7018d92429 Mon Sep 17 00:00:00 2001 From: christianEQ Date: Thu, 17 Jun 2021 20:40:10 +0000 Subject: [PATCH 025/149] detect client cpu usage in diagnostic tool Former-commit-id: 693450393c848679b60c5a9bf55428ae1d4f769f --- src/keydb-diagnostic-tool.cpp | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/src/keydb-diagnostic-tool.cpp b/src/keydb-diagnostic-tool.cpp index bc5778139..176939bb8 100644 --- a/src/keydb-diagnostic-tool.cpp +++ b/src/keydb-diagnostic-tool.cpp @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -863,6 +864,13 @@ int extractPropertyFromInfo(const char *info, const char *key, double &val) { return 0; } +double getSelfCpuTime(struct rusage *self_ru) { + getrusage(RUSAGE_SELF, self_ru); + double user_time = self_ru->ru_utime.tv_sec + (self_ru->ru_utime.tv_usec / (double)1000000); + double system_time = self_ru->ru_stime.tv_sec + (self_ru->ru_stime.tv_usec / (double)1000000); + return user_time + system_time; +} + double getServerCpuTime(redisContext *ctx) { redisReply *reply = (redisReply*)redisCommand(ctx, "INFO"); if (reply->type != REDIS_REPLY_STRING) { @@ -924,7 +932,10 @@ int main(int argc, const char **argv) { initBenchmarkThreads(); redisContext *ctx = getRedisContext(config.hostip, config.hostport, config.hostsocket); - double cpu_usage, last_cpu_usage = getServerCpuTime(ctx); + double server_cpu_time, last_server_cpu_time = getServerCpuTime(ctx); + struct rusage self_ru; + double self_cpu_time, last_self_cpu_time = getSelfCpuTime(&self_ru); + while (threads_used < config.max_threads) { printf("Creating %d clients for thread %d...\n", config.numclients, threads_used); @@ -944,12 +955,16 @@ int main(int argc, const char **argv) { sleep(period); - cpu_usage = getServerCpuTime(ctx); - if (cpu_usage < 0) { + server_cpu_time = getServerCpuTime(ctx); + self_cpu_time = getSelfCpuTime(&self_ru); + if (server_cpu_time < 0) { break; } - printf("CPU Usage: %.1f%%\r\n", (cpu_usage - last_cpu_usage) * 100 / period); - last_cpu_usage = cpu_usage; + printf("CPU Usage Self: %.1f%%, Server: %.1f%%\r\n", + (self_cpu_time - last_self_cpu_time) * 100 / period, + (server_cpu_time - last_server_cpu_time) * 100 / period); + last_server_cpu_time = server_cpu_time; + last_self_cpu_time = self_cpu_time; } printf("Done.\n"); From d55e6ce9acdd570811c6d887fc47c54613301282 Mon Sep 17 00:00:00 2001 From: malavan Date: Fri, 18 Jun 2021 15:23:42 +0000 Subject: [PATCH 026/149] parallel make, order tests Former-commit-id: d01afd8324e415b92a292c199703999df1f72457 --- .gitlab-ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 57246f581..95a85cee8 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -3,7 +3,7 @@ build: script: - git submodule init && git submodule update - make distclean - - make + - make -j - make uninstall - make install @@ -13,6 +13,7 @@ make-test: - make test node-redis-test: + requires: make-test stage: test script: - mkdir node-redis-test From 2f184712ab0ba9f53abe790ac6391ad77e32a926 Mon Sep 17 00:00:00 2001 From: malavan Date: Fri, 18 Jun 2021 15:27:30 +0000 Subject: [PATCH 027/149] wrong syntax on last commit Former-commit-id: 587ed953fa8c6a9ed9faf1c406e813ba79e9b7bf --- .gitlab-ci.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 95a85cee8..b8123ce46 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -8,13 +8,12 @@ build: - make install make-test: - stage: test + stage: make test script: - make test node-redis-test: - requires: make-test - stage: test + stage: node redis test script: - mkdir node-redis-test - cd node-redis-test From 954dfe46a16ac0b833bca7fa4bd41f5631135e3d Mon Sep 17 00:00:00 2001 From: malavan Date: Fri, 18 Jun 2021 15:32:58 +0000 Subject: [PATCH 028/149] add src/ as an artifact Former-commit-id: 315bd739a9db97c4e680212dfd31580828c3fcc6 --- .gitlab-ci.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b8123ce46..6d71c9f32 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -6,14 +6,19 @@ build: - make -j - make uninstall - make install + artifacts: + paths: + - src/ make-test: - stage: make test + needs: ["build"] + stage: test script: - make test node-redis-test: - stage: node redis test + needs: ["make-test"] + stage: test script: - mkdir node-redis-test - cd node-redis-test From 6e01abb2563d4cea6f854d712297e5c448a081f8 Mon Sep 17 00:00:00 2001 From: malavan Date: Fri, 18 Jun 2021 15:34:18 +0000 Subject: [PATCH 029/149] syntax error in last commit Former-commit-id: 4638526c0e68d9b153a5ea11f8ef83b0ad2f42de --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6d71c9f32..13d82f930 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -17,7 +17,7 @@ make-test: - make test node-redis-test: - needs: ["make-test"] + needs: ["build"] stage: test script: - mkdir node-redis-test From 063e7f3ab2d109355aebda15f601043a05427e63 Mon Sep 17 00:00:00 2001 From: malavan Date: Fri, 18 Jun 2021 16:16:19 +0000 Subject: [PATCH 030/149] force test to run even if build hangs Former-commit-id: e7a68417ae133402e1f53a6dbbdabbbfc2b3edf7 --- .gitlab-ci.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 13d82f930..2a104e9e9 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -9,15 +9,14 @@ build: artifacts: paths: - src/ + allow_failure: true make-test: - needs: ["build"] stage: test script: - make test node-redis-test: - needs: ["build"] stage: test script: - mkdir node-redis-test From 63f42ae39ae03a683efdcdf77530446ed5683250 Mon Sep 17 00:00:00 2001 From: malavan Date: Fri, 18 Jun 2021 16:19:11 +0000 Subject: [PATCH 031/149] remove allow_failure Former-commit-id: 55f62ea9f6ea4c6adefc81ad90a8bd1567406b72 --- .gitlab-ci.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 2a104e9e9..0551498ac 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -9,7 +9,6 @@ build: artifacts: paths: - src/ - allow_failure: true make-test: stage: test From 1c22752f45b1d6a45958ef1c96285e4c3ef10d18 Mon Sep 17 00:00:00 2001 From: malavan Date: Fri, 18 Jun 2021 16:32:15 +0000 Subject: [PATCH 032/149] remove artifacts Former-commit-id: dd62d85dd101b1f9947af0b335874fd28fee21bc --- .gitlab-ci.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 0551498ac..c6675be5c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -6,9 +6,6 @@ build: - make -j - make uninstall - make install - artifacts: - paths: - - src/ make-test: stage: test From e6900c37c12e1d772907d3ac321bad644a87b378 Mon Sep 17 00:00:00 2001 From: christianEQ Date: Fri, 18 Jun 2021 16:49:25 +0000 Subject: [PATCH 033/149] detect full load + server threads (diagnostic tool) Former-commit-id: fd4ed1e425e32b628c5850e83c9ea9411c471bbe --- src/keydb-diagnostic-tool.cpp | 70 ++++++++++++++++++++++++++++------- 1 file changed, 57 insertions(+), 13 deletions(-) diff --git a/src/keydb-diagnostic-tool.cpp b/src/keydb-diagnostic-tool.cpp index 176939bb8..9c4d1299b 100644 --- a/src/keydb-diagnostic-tool.cpp +++ b/src/keydb-diagnostic-tool.cpp @@ -864,6 +864,16 @@ int extractPropertyFromInfo(const char *info, const char *key, double &val) { return 0; } +int extractPropertyFromInfo(const char *info, const char *key, unsigned int &val) { + char *line = strstr((char*)info, key); + if (line == nullptr) return 1; + line += strlen(key) + 1; // Skip past key name and following colon + char *newline = strchr(line, '\n'); + *newline = 0; // Terminate string after relevant line + val = atoi(line); + return 0; +} + double getSelfCpuTime(struct rusage *self_ru) { getrusage(RUSAGE_SELF, self_ru); double user_time = self_ru->ru_utime.tv_sec + (self_ru->ru_utime.tv_usec / (double)1000000); @@ -872,7 +882,7 @@ double getSelfCpuTime(struct rusage *self_ru) { } double getServerCpuTime(redisContext *ctx) { - redisReply *reply = (redisReply*)redisCommand(ctx, "INFO"); + redisReply *reply = (redisReply*)redisCommand(ctx, "INFO CPU"); if (reply->type != REDIS_REPLY_STRING) { freeReplyObject(reply); printf("Error executing INFO command. Exiting.\r\n"); @@ -892,6 +902,10 @@ double getServerCpuTime(redisContext *ctx) { return used_cpu_user + used_cpu_sys; } +bool isAtFullLoad(double cpuPercent, unsigned int threads) { + return cpuPercent / threads >= 96; +} + int main(int argc, const char **argv) { int i; @@ -926,7 +940,7 @@ int main(int argc, const char **argv) { } const char *set_value = "abcdefghijklmnopqrstuvwxyz"; - int threads_used = 0; + int self_threads = 0; unsigned int period = 5; char command[63]; @@ -935,36 +949,66 @@ int main(int argc, const char **argv) { double server_cpu_time, last_server_cpu_time = getServerCpuTime(ctx); struct rusage self_ru; double self_cpu_time, last_self_cpu_time = getSelfCpuTime(&self_ru); + double server_cpu_load, last_server_cpu_load, self_cpu_load, server_cpu_gain, last_server_cpu_gain; + + redisReply *reply = (redisReply*)redisCommand(ctx, "INFO CPU"); + if (reply->type != REDIS_REPLY_STRING) { + freeReplyObject(reply); + printf("Error executing INFO command. Exiting.\r\n"); + return 1; + } + unsigned int server_threads; + if (extractPropertyFromInfo(reply->str, "server_threads", server_threads)) { + printf("Error reading server threads from INFO command. Exiting.\r\n"); + return 1; + } + freeReplyObject(reply); + + printf("Server has %d threads.\n", server_threads); - while (threads_used < config.max_threads) { - printf("Creating %d clients for thread %d...\n", config.numclients, threads_used); + while (self_threads < config.max_threads) { + printf("Creating %d clients for thread %d...\n", config.numclients, self_threads); for (int i = 0; i < config.numclients; i++) { - sprintf(command, "SET %d %s\r\n", threads_used * config.numclients + i, set_value); - createClient(command, strlen(command), NULL,threads_used); + sprintf(command, "SET %d %s\r\n", self_threads * config.numclients + i, set_value); + createClient(command, strlen(command), NULL,self_threads); } - printf("Starting thread %d\n", threads_used); + printf("Starting thread %d\n", self_threads); - benchmarkThread *t = config.threads[threads_used]; + benchmarkThread *t = config.threads[self_threads]; if (pthread_create(&(t->thread), NULL, execBenchmarkThread, t)){ - fprintf(stderr, "FATAL: Failed to start thread %d.\n", threads_used); + fprintf(stderr, "FATAL: Failed to start thread %d.\n", self_threads); exit(1); } - threads_used++; + self_threads++; sleep(period); server_cpu_time = getServerCpuTime(ctx); self_cpu_time = getSelfCpuTime(&self_ru); + server_cpu_load = (server_cpu_time - last_server_cpu_time) * 100 / period; + self_cpu_load = (self_cpu_time - last_self_cpu_time) * 100 / period; if (server_cpu_time < 0) { break; } - printf("CPU Usage Self: %.1f%%, Server: %.1f%%\r\n", - (self_cpu_time - last_self_cpu_time) * 100 / period, - (server_cpu_time - last_server_cpu_time) * 100 / period); + printf("CPU Usage Self: %.1f%%, Server: %.1f%%\r\n", self_cpu_load, server_cpu_load); + server_cpu_gain = server_cpu_load - last_server_cpu_load; last_server_cpu_time = server_cpu_time; last_self_cpu_time = self_cpu_time; + last_server_cpu_load = server_cpu_load; + + + + if (isAtFullLoad(server_cpu_load, server_threads)) { + printf("Server is at full CPU load.\n"); + break; + } + + if (isAtFullLoad(self_cpu_load, self_threads)) { + printf("Diagnostic tool is at full CPU load.\n"); + break; + } } printf("Done.\n"); From 6ca00c68f4e632ed20ee3f8b05bd83092b45234f Mon Sep 17 00:00:00 2001 From: christianEQ Date: Fri, 18 Jun 2021 16:58:16 +0000 Subject: [PATCH 034/149] added config option for time to spin up new client threads (diagnostic tool) Former-commit-id: 3d0f729572b175457d4874b6e381754ac47e9055 --- src/keydb-diagnostic-tool.cpp | 65 +++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 30 deletions(-) diff --git a/src/keydb-diagnostic-tool.cpp b/src/keydb-diagnostic-tool.cpp index 9c4d1299b..b5986127a 100644 --- a/src/keydb-diagnostic-tool.cpp +++ b/src/keydb-diagnostic-tool.cpp @@ -76,6 +76,7 @@ static struct config { const char *hostsocket; int numclients; int liveclients; + int period_ms; int requests; int requests_issued; int requests_finished; @@ -662,6 +663,7 @@ void initConfigDefaults() { config.keepalive = 1; config.datasize = 3; config.pipeline = 1; + config.period_ms = 5000; config.showerrors = 0; config.randomkeys = 0; config.randomkeys_keyspacelen = 0; @@ -708,6 +710,9 @@ int parseOptions(int argc, const char **argv) { } else if (!strcmp(argv[i],"-k")) { if (lastarg) goto invalid; config.keepalive = atoi(argv[++i]); + } else if (!strcmp(argv[i],"--ms")) { + if (lastarg) goto invalid; + config.period_ms = atoi(argv[++i]); } else if (!strcmp(argv[i],"-h")) { if (lastarg) goto invalid; config.hostip = strdup(argv[++i]); @@ -805,36 +810,37 @@ invalid: usage: printf( "Usage: keydb-benchmark [-h ] [-p ] [-c ] [-n ] [-k ]\n\n" -" -h Server hostname (default 127.0.0.1)\n" -" -p Server port (default 6379)\n" -" -s Server socket (overrides host and port)\n" -" -a Password for Redis Auth\n" -" --user Used to send ACL style 'AUTH username pass'. Needs -a.\n" -" -c Number of parallel connections (default 50)\n" -" -n Total number of requests (default 100000)\n" -" -d Data size of SET/GET value in bytes (default 3)\n" -" --dbnum SELECT the specified db number (default 0)\n" -" --threads Enable multi-thread mode.\n" -" --cluster Enable cluster mode.\n" -" --enable-tracking Send CLIENT TRACKING on before starting benchmark.\n" -" -k 1=keep alive 0=reconnect (default 1)\n" -" -r Use random keys for SET/GET/INCR, random values for SADD,\n" -" random members and scores for ZADD.\n" +" -h Server hostname (default 127.0.0.1)\n" +" -p Server port (default 6379)\n" +" -s Server socket (overrides host and port)\n" +" --ms Time between spinning up new client threads\n" +" -a Password for Redis Auth\n" +" --user Used to send ACL style 'AUTH username pass'. Needs -a.\n" +" -c Number of parallel connections (default 50)\n" +" -n Total number of requests (default 100000)\n" +" -d Data size of SET/GET value in bytes (default 3)\n" +" --dbnum SELECT the specified db number (default 0)\n" +" --threads Enable multi-thread mode.\n" +" --cluster Enable cluster mode.\n" +" --enable-tracking Send CLIENT TRACKING on before starting benchmark.\n" +" -k 1=keep alive 0=reconnect (default 1)\n" +" -r Use random keys for SET/GET/INCR, random values for SADD,\n" +" random members and scores for ZADD.\n" " Using this option the benchmark will expand the string __rand_int__\n" " inside an argument with a 12 digits number in the specified range\n" " from 0 to keyspacelen-1. The substitution changes every time a command\n" " is executed. Default tests use this to hit random keys in the\n" " specified range.\n" -" -P Pipeline requests. Default 1 (no pipeline).\n" -" -e If server replies with errors, show them on stdout.\n" -" (no more than 1 error per second is displayed)\n" -" -q Quiet. Just show query/sec values\n" -" --precision Number of decimal places to display in latency output (default 0)\n" -" --csv Output in CSV format\n" -" -l Loop. Run the tests forever\n" -" -t Only run the comma separated list of tests. The test\n" -" names are the same as the ones produced as output.\n" -" -I Idle mode. Just open N idle connections and wait.\n\n" +" -P Pipeline requests. Default 1 (no pipeline).\n" +" -e If server replies with errors, show them on stdout.\n" +" (no more than 1 error per second is displayed)\n" +" -q Quiet. Just show query/sec values\n" +" --precision Number of decimal places to display in latency output (default 0)\n" +" --csv Output in CSV format\n" +" -l Loop. Run the tests forever\n" +" -t Only run the comma separated list of tests. The test\n" +" names are the same as the ones produced as output.\n" +" -I Idle mode. Just open N idle connections and wait.\n\n" "Examples:\n\n" " Run the benchmark with the default configuration against 127.0.0.1:6379:\n" " $ keydb-benchmark\n\n" @@ -941,7 +947,6 @@ int main(int argc, const char **argv) { const char *set_value = "abcdefghijklmnopqrstuvwxyz"; int self_threads = 0; - unsigned int period = 5; char command[63]; initBenchmarkThreads(); @@ -983,12 +988,12 @@ int main(int argc, const char **argv) { } self_threads++; - sleep(period); + usleep(config.period_ms * 1000); server_cpu_time = getServerCpuTime(ctx); self_cpu_time = getSelfCpuTime(&self_ru); - server_cpu_load = (server_cpu_time - last_server_cpu_time) * 100 / period; - self_cpu_load = (self_cpu_time - last_self_cpu_time) * 100 / period; + server_cpu_load = (server_cpu_time - last_server_cpu_time) * 100000 / config.period_ms; + self_cpu_load = (self_cpu_time - last_self_cpu_time) * 100000 / config.period_ms; if (server_cpu_time < 0) { break; } @@ -998,7 +1003,7 @@ int main(int argc, const char **argv) { last_self_cpu_time = self_cpu_time; last_server_cpu_load = server_cpu_load; - + if (isAtFullLoad(server_cpu_load, server_threads)) { printf("Server is at full CPU load.\n"); From ca92c3303d97bcd64d08cd253a14808d049a2aad Mon Sep 17 00:00:00 2001 From: malavan Date: Fri, 18 Jun 2021 17:15:49 +0000 Subject: [PATCH 035/149] use runner login for clone Former-commit-id: 64cc31184d3a91386b689870de7d96e423fb58af --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c6675be5c..532ee6b6b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -17,7 +17,7 @@ node-redis-test: script: - mkdir node-redis-test - cd node-redis-test - - git clone https://gitlab.eqalpha.com/keydb-dev/node-redis.git + - git clone https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.eqalpha.com/keydb-dev/node-redis.git - cd node-redis - npm install - npm run test From e8a43181385bd7dee9309224fd38f8a6cf02fd6e Mon Sep 17 00:00:00 2001 From: malavan Date: Fri, 18 Jun 2021 17:50:35 +0000 Subject: [PATCH 036/149] add -j to make test Former-commit-id: 7509875974938f1087ace2437ceb95d8baeeeffa --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 532ee6b6b..cbd3ce388 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -10,7 +10,7 @@ build: make-test: stage: test script: - - make test + - make test -j node-redis-test: stage: test From 7d11673ba9bc8df51e13a2b3a875ba90c9894b3f Mon Sep 17 00:00:00 2001 From: christianEQ Date: Fri, 18 Jun 2021 19:01:51 +0000 Subject: [PATCH 037/149] detect stagnating server load before 100% (diagnostic tool) Former-commit-id: 534b70643b8f39303331048d3e86475caa08b864 --- src/keydb-diagnostic-tool.cpp | 57 ++++++++++++++++++++++++++--------- 1 file changed, 42 insertions(+), 15 deletions(-) diff --git a/src/keydb-diagnostic-tool.cpp b/src/keydb-diagnostic-tool.cpp index b5986127a..506cc3ebe 100644 --- a/src/keydb-diagnostic-tool.cpp +++ b/src/keydb-diagnostic-tool.cpp @@ -42,6 +42,7 @@ #include #include #include +#include extern "C" { #include /* Use hiredis sds. */ #include "hiredis.h" @@ -891,23 +892,31 @@ double getServerCpuTime(redisContext *ctx) { redisReply *reply = (redisReply*)redisCommand(ctx, "INFO CPU"); if (reply->type != REDIS_REPLY_STRING) { freeReplyObject(reply); - printf("Error executing INFO command. Exiting.\r\n"); + printf("Error executing INFO command. Exiting.\n"); return -1; } double used_cpu_user, used_cpu_sys; if (extractPropertyFromInfo(reply->str, "used_cpu_user", used_cpu_user)) { - printf("Error reading user CPU usage from INFO command. Exiting.\r\n"); + printf("Error reading user CPU usage from INFO command. Exiting.\n"); return -1; } if (extractPropertyFromInfo(reply->str, "used_cpu_sys", used_cpu_sys)) { - printf("Error reading system CPU usage from INFO command. Exiting.\r\n"); + printf("Error reading system CPU usage from INFO command. Exiting.\n"); return -1; } freeReplyObject(reply); return used_cpu_user + used_cpu_sys; } +double getMean(std::deque *q) { + double sum = 0; + for (long unsigned int i = 0; i < q->size(); i++) { + sum += (*q)[i]; + } + return sum / q->size(); +} + bool isAtFullLoad(double cpuPercent, unsigned int threads) { return cpuPercent / threads >= 96; } @@ -954,7 +963,9 @@ int main(int argc, const char **argv) { double server_cpu_time, last_server_cpu_time = getServerCpuTime(ctx); struct rusage self_ru; double self_cpu_time, last_self_cpu_time = getSelfCpuTime(&self_ru); - double server_cpu_load, last_server_cpu_load, self_cpu_load, server_cpu_gain, last_server_cpu_gain; + double server_cpu_load, last_server_cpu_load = 0, self_cpu_load, server_cpu_gain; + std::deque load_gain_history = {}; + double current_gain_avg, peak_gain_avg = 0; redisReply *reply = (redisReply*)redisCommand(ctx, "INFO CPU"); if (reply->type != REDIS_REPLY_STRING) { @@ -971,19 +982,15 @@ int main(int argc, const char **argv) { printf("Server has %d threads.\n", server_threads); - while (self_threads < config.max_threads) { - printf("Creating %d clients for thread %d...\n", config.numclients, self_threads); for (int i = 0; i < config.numclients; i++) { sprintf(command, "SET %d %s\r\n", self_threads * config.numclients + i, set_value); createClient(command, strlen(command), NULL,self_threads); } - printf("Starting thread %d\n", self_threads); - benchmarkThread *t = config.threads[self_threads]; if (pthread_create(&(t->thread), NULL, execBenchmarkThread, t)){ - fprintf(stderr, "FATAL: Failed to start thread %d.\n", self_threads); + fprintf(stderr, "FATAL: Failed to start thread %d. Exiting.\n", self_threads); exit(1); } self_threads++; @@ -997,23 +1004,42 @@ int main(int argc, const char **argv) { if (server_cpu_time < 0) { break; } - printf("CPU Usage Self: %.1f%%, Server: %.1f%%\r\n", self_cpu_load, server_cpu_load); + printf("%d threads, %d total clients. CPU Usage Self: %.1f%% (%.1f%% per thread), Server: %.1f%% (%.1f%% per thread)\r", + self_threads, + self_threads * config.numclients, + self_cpu_load, + self_cpu_load / self_threads, + server_cpu_load, + server_cpu_load / server_threads); + fflush(stdout); server_cpu_gain = server_cpu_load - last_server_cpu_load; + load_gain_history.push_back(server_cpu_gain); + if (load_gain_history.size() > 5) { + load_gain_history.pop_front(); + } + current_gain_avg = getMean(&load_gain_history); + if (current_gain_avg > peak_gain_avg) { + peak_gain_avg = current_gain_avg; + } last_server_cpu_time = server_cpu_time; last_self_cpu_time = self_cpu_time; last_server_cpu_load = server_cpu_load; - - if (isAtFullLoad(server_cpu_load, server_threads)) { - printf("Server is at full CPU load.\n"); + printf("\nServer is at full CPU load. If higher performance is expected, check server configuration.\n"); break; } - if (isAtFullLoad(self_cpu_load, self_threads)) { - printf("Diagnostic tool is at full CPU load.\n"); + if (current_gain_avg <= 0.05 * peak_gain_avg) { + printf("\nServer CPU load appears to have stagnated with increasing clients.\n" + "Server does not appear to be at full load. Check network for throughput.\n"); break; } + + if (self_threads * config.numclients > 2000) { + printf("\nClient limit of 2000 reached. Server is not at full load and appears to be increasing.\n" + "2000 clients should be more than enough to reach a bottleneck. Check all configuration.\n"); + } } printf("Done.\n"); @@ -1023,3 +1049,4 @@ int main(int argc, const char **argv) { return 0; } + \ No newline at end of file From 1db672e81a50ee897a4107a12bd27ea89e73f6d4 Mon Sep 17 00:00:00 2001 From: christianEQ Date: Fri, 18 Jun 2021 20:21:47 +0000 Subject: [PATCH 038/149] added starting line so diagnostic tool doesnt look frozen at first Former-commit-id: 2b5e5cfa4cf1478682c46d74436025c63ac6c217 --- src/keydb-diagnostic-tool.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/keydb-diagnostic-tool.cpp b/src/keydb-diagnostic-tool.cpp index 53a612b1b..4e39785c0 100644 --- a/src/keydb-diagnostic-tool.cpp +++ b/src/keydb-diagnostic-tool.cpp @@ -981,7 +981,8 @@ int main(int argc, const char **argv) { } freeReplyObject(reply); - printf("Server has %d threads.\n", server_threads); + printf("Server has %d threads.\nStarting...\n", server_threads); + fflush(stdout); while (self_threads < config.max_threads) { for (int i = 0; i < config.numclients; i++) { From 70f215f69dbfdceb4439f56263bf327dfe2fdb62 Mon Sep 17 00:00:00 2001 From: malavan Date: Sat, 19 Jun 2021 06:55:41 +0000 Subject: [PATCH 039/149] try to maintain combined files between jobs Former-commit-id: acf5161c353ddcbbdfc45b4e85ae978bb442e507 --- .gitlab-ci.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index cbd3ce388..1e082fdd1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -4,8 +4,9 @@ build: - git submodule init && git submodule update - make distclean - make -j - - make uninstall - - make install + artifacts: + paths: + - src/ make-test: stage: test @@ -15,6 +16,7 @@ make-test: node-redis-test: stage: test script: + - make install - mkdir node-redis-test - cd node-redis-test - git clone https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.eqalpha.com/keydb-dev/node-redis.git From 6489c1349d614a898bf900394baedb8b2916ff4d Mon Sep 17 00:00:00 2001 From: malavan Date: Sat, 19 Jun 2021 07:01:35 +0000 Subject: [PATCH 040/149] copy entire directory not just src Former-commit-id: f75c88039383e1ebb83ea3254c07541b8575e869 --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1e082fdd1..8164c6bce 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -6,7 +6,7 @@ build: - make -j artifacts: paths: - - src/ + - ./ make-test: stage: test From 9408bbb699783c79fb92d2e2bd17e6bc2c0ed963 Mon Sep 17 00:00:00 2001 From: malavan Date: Sat, 19 Jun 2021 07:07:28 +0000 Subject: [PATCH 041/149] redo build instead of upload and download Former-commit-id: 841cdb2a429576ef2a6fb299aef2156ec56df243 --- .gitlab-ci.yml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8164c6bce..6432fd01a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -4,18 +4,21 @@ build: - git submodule init && git submodule update - make distclean - make -j - artifacts: - paths: - - ./ make-test: stage: test script: + - git submodule init && git submodule update + - make distclean + - make -j - make test -j node-redis-test: stage: test script: + - git submodule init && git submodule update + - make distclean + - make -j - make install - mkdir node-redis-test - cd node-redis-test From 6b66b56c17489fcdac77c0a1f48fc496dda16784 Mon Sep 17 00:00:00 2001 From: christianEQ Date: Mon, 21 Jun 2021 18:36:21 +0000 Subject: [PATCH 042/149] -t threads option Former-commit-id: 0181b0e7a17ad5f83a544401110a9eac2b292aa4 --- src/keydb-diagnostic-tool.cpp | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/src/keydb-diagnostic-tool.cpp b/src/keydb-diagnostic-tool.cpp index 4e39785c0..d71cdb865 100644 --- a/src/keydb-diagnostic-tool.cpp +++ b/src/keydb-diagnostic-tool.cpp @@ -760,17 +760,6 @@ int parseOptions(int argc, const char **argv) { config.idlemode = 1; } else if (!strcmp(argv[i],"-e")) { config.showerrors = 1; - } else if (!strcmp(argv[i],"-t")) { - if (lastarg) goto invalid; - /* We get the list of tests to run as a string in the form - * get,set,lrange,...,test_N. Then we add a comma before and - * after the string in order to make sure that searching - * for ",testname," will always get a match if the test is - * enabled. */ - config.tests = sdsnew(","); - config.tests = sdscat(config.tests,(char*)argv[++i]); - config.tests = sdscat(config.tests,","); - sdstolower(config.tests); } else if (!strcmp(argv[i],"--dbnum")) { if (lastarg) goto invalid; config.dbnum = atoi(argv[++i]); @@ -780,14 +769,16 @@ int parseOptions(int argc, const char **argv) { config.precision = atoi(argv[++i]); if (config.precision < 0) config.precision = 0; if (config.precision > MAX_LATENCY_PRECISION) config.precision = MAX_LATENCY_PRECISION; - } else if (!strcmp(argv[i],"--threads")) { - if (lastarg) goto invalid; - config.max_threads = atoi(argv[++i]); - if (config.max_threads > MAX_THREADS) { - printf("WARNING: too many threads, limiting threads to %d.\n", - MAX_THREADS); + } else if (!strcmp(argv[i],"-t") || !strcmp(argv[i],"--threads")) { + if (lastarg) goto invalid; + config.max_threads = atoi(argv[++i]); + if (config.max_threads > MAX_THREADS) { + printf("WARNING: too many threads, limiting threads to %d.\n", MAX_THREADS); config.max_threads = MAX_THREADS; - } else if (config.max_threads < 0) config.max_threads = 0; + } else if (config.max_threads <= 0) { + printf("Warning: Invalid value for max threads. Defaulting to %d.\n", MAX_THREADS); + config.max_threads = MAX_THREADS; + } } else if (!strcmp(argv[i],"--cluster")) { config.cluster_mode = 1; } else if (!strcmp(argv[i],"--enable-tracking")) { From 45c4220b7f6eab4a281645fc910df2f610f8cfb1 Mon Sep 17 00:00:00 2001 From: christianEQ Date: Mon, 21 Jun 2021 18:40:37 +0000 Subject: [PATCH 043/149] added more verbose options Former-commit-id: fd2ac1fcfc94285cad683528f3d209b204ccfd2b --- src/keydb-diagnostic-tool.cpp | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/keydb-diagnostic-tool.cpp b/src/keydb-diagnostic-tool.cpp index d71cdb865..b782b6a91 100644 --- a/src/keydb-diagnostic-tool.cpp +++ b/src/keydb-diagnostic-tool.cpp @@ -703,22 +703,19 @@ int parseOptions(int argc, const char **argv) { for (i = 1; i < argc; i++) { lastarg = (i == (argc-1)); - if (!strcmp(argv[i],"-c")) { + if (!strcmp(argv[i],"-c") || !strcmp(argv[i],"--clients")) { if (lastarg) goto invalid; config.numclients = atoi(argv[++i]); - } else if (!strcmp(argv[i],"-n")) { - if (lastarg) goto invalid; - config.requests = atoi(argv[++i]); } else if (!strcmp(argv[i],"-k")) { if (lastarg) goto invalid; config.keepalive = atoi(argv[++i]); } else if (!strcmp(argv[i],"--ms")) { if (lastarg) goto invalid; config.period_ms = atoi(argv[++i]); - } else if (!strcmp(argv[i],"-h")) { + } else if (!strcmp(argv[i],"-h") || !strcmp(argv[i],"--host")) { if (lastarg) goto invalid; config.hostip = strdup(argv[++i]); - } else if (!strcmp(argv[i],"-p")) { + } else if (!strcmp(argv[i],"-p") || !strcmp(argv[i],"--port")) { if (lastarg) goto invalid; config.hostport = atoi(argv[++i]); } else if (!strcmp(argv[i],"-s")) { From 1b0a1f8fc7f6422aeafcf474f11ac960a6296ce3 Mon Sep 17 00:00:00 2001 From: christianEQ Date: Mon, 21 Jun 2021 19:57:14 +0000 Subject: [PATCH 044/149] changed --ms to --time Former-commit-id: d7cada0f38668f67eb307172a3e91599b1f00a24 --- src/keydb-diagnostic-tool.cpp | 36 ++++++++++------------------------- 1 file changed, 10 insertions(+), 26 deletions(-) diff --git a/src/keydb-diagnostic-tool.cpp b/src/keydb-diagnostic-tool.cpp index b782b6a91..163311646 100644 --- a/src/keydb-diagnostic-tool.cpp +++ b/src/keydb-diagnostic-tool.cpp @@ -706,12 +706,13 @@ int parseOptions(int argc, const char **argv) { if (!strcmp(argv[i],"-c") || !strcmp(argv[i],"--clients")) { if (lastarg) goto invalid; config.numclients = atoi(argv[++i]); - } else if (!strcmp(argv[i],"-k")) { - if (lastarg) goto invalid; - config.keepalive = atoi(argv[++i]); - } else if (!strcmp(argv[i],"--ms")) { + } else if (!strcmp(argv[i],"--time")) { if (lastarg) goto invalid; config.period_ms = atoi(argv[++i]); + if (config.period_ms <= 0) { + printf("Warning: Invalid value for thread time. Defaulting to 5000ms.\n"); + config.period_ms = 5000; + } } else if (!strcmp(argv[i],"-h") || !strcmp(argv[i],"--host")) { if (lastarg) goto invalid; config.hostip = strdup(argv[++i]); @@ -770,7 +771,7 @@ int parseOptions(int argc, const char **argv) { if (lastarg) goto invalid; config.max_threads = atoi(argv[++i]); if (config.max_threads > MAX_THREADS) { - printf("WARNING: too many threads, limiting threads to %d.\n", MAX_THREADS); + printf("Warning: Too many threads, limiting threads to %d.\n", MAX_THREADS); config.max_threads = MAX_THREADS; } else if (config.max_threads <= 0) { printf("Warning: Invalid value for max threads. Defaulting to %d.\n", MAX_THREADS); @@ -800,17 +801,16 @@ invalid: usage: printf( "Usage: keydb-benchmark [-h ] [-p ] [-c ] [-n ] [-k ]\n\n" -" -h Server hostname (default 127.0.0.1)\n" -" -p Server port (default 6379)\n" +" -h, --host Server hostname (default 127.0.0.1)\n" +" -p, --port Server port (default 6379)\n" " -s Server socket (overrides host and port)\n" -" --ms Time between spinning up new client threads\n" +" --time