Merge origin/unstable into 6.2

This commit is contained in:
Oran Agra 2021-02-22 15:48:42 +02:00
commit e8b616c8ce
87 changed files with 1778 additions and 648 deletions

View File

@ -14,7 +14,7 @@ jobs:
run: make REDIS_CFLAGS='-Werror' BUILD_TLS=yes
- name: test
run: |
sudo apt-get install tcl8.5
sudo apt-get install tcl8.6
./runtest --verbose
- name: module api test
run: ./runtest-moduleapi --verbose
@ -58,14 +58,3 @@ jobs:
run: |
yum -y install gcc make
make REDIS_CFLAGS='-Werror'
build-freebsd:
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- name: make
uses: vmactions/freebsd-vm@v0.1.0
with:
usesh: true
prepare: pkg install -y gmake
run: gmake

View File

@ -20,8 +20,8 @@ jobs:
run: make
- name: test
run: |
sudo apt-get install tcl8.5
./runtest --accurate --verbose
sudo apt-get install tcl8.6
./runtest --accurate --verbose --dump-logs
- name: module api test
run: ./runtest-moduleapi --verbose
- name: sentinel tests
@ -39,8 +39,8 @@ jobs:
run: make MALLOC=libc
- name: test
run: |
sudo apt-get install tcl8.5
./runtest --accurate --verbose
sudo apt-get install tcl8.6
./runtest --accurate --verbose --dump-logs
- name: module api test
run: ./runtest-moduleapi --verbose
- name: sentinel tests
@ -60,8 +60,8 @@ jobs:
make 32bit
- name: test
run: |
sudo apt-get install tcl8.5
./runtest --accurate --verbose
sudo apt-get install tcl8.6
./runtest --accurate --verbose --dump-logs
- name: module api test
run: |
make -C tests/modules 32bit # the script below doesn't have an argument, we must build manually ahead of time
@ -82,10 +82,10 @@ jobs:
make BUILD_TLS=yes
- name: test
run: |
sudo apt-get install tcl8.5 tcl-tls
sudo apt-get install tcl8.6 tcl-tls
./utils/gen-test-certs.sh
./runtest --accurate --verbose --tls
./runtest --accurate --verbose
./runtest --accurate --verbose --tls --dump-logs
./runtest --accurate --verbose --dump-logs
- name: module api test
run: |
./runtest-moduleapi --verbose --tls
@ -110,8 +110,8 @@ jobs:
make
- name: test
run: |
sudo apt-get install tcl8.5 tcl-tls
./runtest --config io-threads 4 --config io-threads-do-reads yes --accurate --verbose --tags network
sudo apt-get install tcl8.6 tcl-tls
./runtest --config io-threads 4 --config io-threads-do-reads yes --accurate --verbose --tags network --dump-logs
- name: cluster tests
run: |
./runtest-cluster --config io-threads 4 --config io-threads-do-reads yes
@ -127,10 +127,10 @@ jobs:
- name: test
run: |
sudo apt-get update
sudo apt-get install tcl8.5 valgrind -y
./runtest --valgrind --verbose --clients 1
sudo apt-get install tcl8.6 valgrind -y
./runtest --valgrind --verbose --clients 1 --dump-logs
- name: module api test
run: ./runtest-moduleapi --valgrind --verbose --clients 1
run: ./runtest-moduleapi --valgrind --no-latency --verbose --clients 1
test-centos7-jemalloc:
runs-on: ubuntu-latest
@ -146,7 +146,7 @@ jobs:
- name: test
run: |
yum -y install which tcl
./runtest --accurate --verbose
./runtest --accurate --verbose --dump-logs
- name: module api test
run: ./runtest-moduleapi --verbose
- name: sentinel tests
@ -170,8 +170,8 @@ jobs:
run: |
yum -y install tcl tcltls
./utils/gen-test-certs.sh
./runtest --accurate --verbose --tls
./runtest --accurate --verbose
./runtest --accurate --verbose --tls --dump-logs
./runtest --accurate --verbose --dump-logs
- name: module api test
run: |
./runtest-moduleapi --verbose --tls
@ -195,7 +195,7 @@ jobs:
run: make
- name: test
run: |
./runtest --accurate --verbose --no-latency
./runtest --accurate --verbose --no-latency --dump-logs
- name: module api test
run: ./runtest-moduleapi --verbose
- name: sentinel tests
@ -210,13 +210,14 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: test
uses: vmactions/freebsd-vm@v0.1.0
uses: vmactions/freebsd-vm@v0.1.2
with:
usesh: true
prepare: pkg install -y gmake lang/tcl85
run: |
gmake
./runtest --accurate --verbose --no-latency
MAKE=gmake ./runtest-moduleapi --verbose
./runtest-sentinel
sync: rsync
prepare: pkg install -y bash gmake lang/tcl86
run: >
gmake &&
./runtest --accurate --verbose --no-latency --dump-logs &&
MAKE=gmake ./runtest-moduleapi --verbose &&
./runtest-sentinel &&
./runtest-cluster

96
CONDUCT Normal file
View File

@ -0,0 +1,96 @@
Contributor Covenant Code of Conduct
Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
this email address: redis@redis.io.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
1. Correction
Community Impact: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
Consequence: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
2. Warning
Community Impact: A violation through a single incident or series
of actions.
Consequence: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
3. Temporary Ban
Community Impact: A serious violation of community standards, including
sustained inappropriate behavior.
Consequence: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
4. Permanent Ban
Community Impact: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
Consequence: A permanent ban from any sort of public interaction within
the community.
Attribution
This Code of Conduct is adapted from the Contributor Covenant,
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by Mozillas code of conduct
enforcement ladder.
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.

View File

@ -32,5 +32,7 @@ $TCLSH tests/test_helper.tcl \
--single unit/moduleapi/getkeys \
--single unit/moduleapi/test_lazyfree \
--single unit/moduleapi/defrag \
--single unit/moduleapi/hash \
--single unit/moduleapi/zset \
--single unit/moduleapi/stream \
"${@}"

View File

@ -814,8 +814,6 @@ void ACLAddAllowedSubcommand(user *u, unsigned long id, const char *sub) {
* invalid (contains non allowed characters).
* ENOENT: The command name or command category provided with + or - is not
* known.
* EBUSY: The subcommand you want to add is about a command that is currently
* fully added.
* EEXIST: You are adding a key pattern after "*" was already added. This is
* almost surely an error on the user side.
* EISDIR: You are adding a channel pattern after "*" was already added. This is
@ -976,22 +974,12 @@ int ACLSetUser(user *u, const char *op, ssize_t oplen) {
return C_ERR;
}
/* The command should not be set right now in the command
* bitmap, because adding a subcommand of a fully added
* command is probably an error on the user side. */
unsigned long id = ACLGetCommandID(copy);
if (ACLGetUserCommandBit(u,id) == 1) {
zfree(copy);
errno = EBUSY;
return C_ERR;
/* Add the subcommand to the list of valid ones, if the command is not set. */
if (ACLGetUserCommandBit(u,id) == 0) {
ACLAddAllowedSubcommand(u,id,sub);
}
/* Add the subcommand to the list of valid ones. */
ACLAddAllowedSubcommand(u,id,sub);
/* We have to clear the command bit so that we force the
* subcommand check. */
ACLSetUserCommandBit(u,id,0);
zfree(copy);
}
} else if (op[0] == '-' && op[1] != '@') {
@ -1030,10 +1018,6 @@ const char *ACLSetUserStringError(void) {
errmsg = "Unknown command or category name in ACL";
else if (errno == EINVAL)
errmsg = "Syntax error";
else if (errno == EBUSY)
errmsg = "Adding a subcommand of a command already fully "
"added is not allowed. Remove the command to start. "
"Example: -DEBUG +DEBUG|DIGEST";
else if (errno == EEXIST)
errmsg = "Adding a pattern after the * pattern (or the "
"'allkeys' flag) is not valid and does not have any "
@ -2253,7 +2237,7 @@ void authCommand(client *c) {
return;
}
username = createStringObject("default",7);
username = shared.default_username;
password = c->argv[1];
} else {
username = c->argv[1];
@ -2265,9 +2249,5 @@ void authCommand(client *c) {
} else {
addReplyError(c,"-WRONGPASS invalid username-password pair or user is disabled.");
}
/* Free the "default" string object we created for the two
* arguments form. */
if (c->argc == 2) decrRefCount(username);
}

View File

@ -588,11 +588,10 @@ sds catAppendOnlyExpireAtCommand(sds buf, struct redisCommand *cmd, robj *key, r
}
decrRefCount(seconds);
argv[0] = createStringObject("PEXPIREAT",9);
argv[0] = shared.pexpireat;
argv[1] = key;
argv[2] = createStringObjectFromLongLong(when);
buf = catAppendOnlyGenericCommand(buf, 3, argv);
decrRefCount(argv[0]);
decrRefCount(argv[2]);
return buf;
}
@ -1441,7 +1440,7 @@ int rewriteAppendOnlyFileRio(rio *aof) {
size_t processed = 0;
int j;
long key_count = 0;
long long cow_updated_time = 0;
long long updated_time = 0;
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
@ -1502,18 +1501,16 @@ int rewriteAppendOnlyFileRio(rio *aof) {
aofReadDiffFromParent();
}
/* Update COW info every 1 second (approximately).
/* Update info every 1 second (approximately).
* in order to avoid calling mstime() on each iteration, we will
* check the diff every 1024 keys */
if ((key_count & 1023) == 0) {
key_count = 0;
if ((key_count++ & 1023) == 0) {
long long now = mstime();
if (now - cow_updated_time >= 1000) {
sendChildCOWInfo(CHILD_TYPE_AOF, 0, "AOF rewrite");
cow_updated_time = now;
if (now - updated_time >= 1000) {
sendChildInfo(CHILD_INFO_TYPE_CURRENT_INFO, key_count, "AOF rewrite");
updated_time = now;
}
}
key_count++;
}
dictReleaseIterator(di);
di = NULL;
@ -1614,7 +1611,7 @@ int rewriteAppendOnlyFile(char *filename) {
size_t bytes_to_write = sdslen(server.aof_child_diff);
const char *buf = server.aof_child_diff;
long long cow_updated_time = mstime();
long long key_count = dbTotalServerKeyCount();
while (bytes_to_write) {
/* We write the AOF buffer in chunk of 8MB so that we can check the time in between them */
size_t chunk_size = bytes_to_write < (8<<20) ? bytes_to_write : (8<<20);
@ -1628,7 +1625,7 @@ int rewriteAppendOnlyFile(char *filename) {
/* Update COW info */
long long now = mstime();
if (now - cow_updated_time >= 1000) {
sendChildCOWInfo(CHILD_TYPE_AOF, 0, "AOF rewrite");
sendChildInfo(CHILD_INFO_TYPE_CURRENT_INFO, key_count, "AOF rewrite");
cow_updated_time = now;
}
}
@ -1762,7 +1759,7 @@ int rewriteAppendOnlyFileBackground(void) {
redisSetCpuAffinity(server.aof_rewrite_cpulist);
snprintf(tmpfile,256,"temp-rewriteaof-bg-%d.aof", (int) getpid());
if (rewriteAppendOnlyFile(tmpfile) == C_OK) {
sendChildCOWInfo(CHILD_TYPE_AOF, 1, "AOF rewrite");
sendChildCowInfo(CHILD_INFO_TYPE_AOF_COW_SIZE, "AOF rewrite");
exitFromChild(0);
} else {
exitFromChild(1);

View File

@ -31,9 +31,10 @@
#include <unistd.h>
typedef struct {
int process_type; /* AOF or RDB child? */
int on_exit; /* COW size of active or exited child */
size_t cow_size; /* Copy on write size. */
size_t keys;
size_t cow;
double progress;
childInfoType information_type; /* Type of information */
} child_info_data;
/* Open a child-parent channel used in order to move information about the
@ -64,39 +65,49 @@ void closeChildInfoPipe(void) {
}
}
/* Send COW data to parent. */
void sendChildInfo(int process_type, int on_exit, size_t cow_size) {
/* Send save data to parent. */
void sendChildInfoGeneric(childInfoType info_type, size_t keys, double progress, char *pname) {
if (server.child_info_pipe[1] == -1) return;
child_info_data buffer = {.process_type = process_type, .on_exit = on_exit, .cow_size = cow_size};
ssize_t wlen = sizeof(buffer);
child_info_data data = {0}; /* zero everything, including padding to sattisfy valgrind */
data.information_type = info_type;
data.keys = keys;
data.cow = zmalloc_get_private_dirty(-1);
data.progress = progress;
if (write(server.child_info_pipe[1],&buffer,wlen) != wlen) {
if (data.cow) {
serverLog((info_type == CHILD_INFO_TYPE_CURRENT_INFO) ? LL_VERBOSE : LL_NOTICE,
"%s: %zu MB of memory used by copy-on-write",
pname, data.cow/(1024*1024));
}
ssize_t wlen = sizeof(data);
if (write(server.child_info_pipe[1], &data, wlen) != wlen) {
/* Nothing to do on error, this will be detected by the other side. */
}
}
/* Update COW data. */
void updateChildInfo(int process_type, int on_exit, size_t cow_size) {
if (!on_exit) {
server.stat_current_cow_bytes = cow_size;
return;
}
if (process_type == CHILD_TYPE_RDB) {
server.stat_rdb_cow_bytes = cow_size;
} else if (process_type == CHILD_TYPE_AOF) {
server.stat_aof_cow_bytes = cow_size;
} else if (process_type == CHILD_TYPE_MODULE) {
server.stat_module_cow_bytes = cow_size;
/* Update Child info. */
void updateChildInfo(childInfoType information_type, size_t cow, size_t keys, double progress) {
if (information_type == CHILD_INFO_TYPE_CURRENT_INFO) {
server.stat_current_cow_bytes = cow;
server.stat_current_save_keys_processed = keys;
if (progress != -1) server.stat_module_progress = progress;
} else if (information_type == CHILD_INFO_TYPE_AOF_COW_SIZE) {
server.stat_aof_cow_bytes = cow;
} else if (information_type == CHILD_INFO_TYPE_RDB_COW_SIZE) {
server.stat_rdb_cow_bytes = cow;
} else if (information_type == CHILD_INFO_TYPE_MODULE_COW_SIZE) {
server.stat_module_cow_bytes = cow;
}
}
/* Read COW info data from the pipe.
* if complete data read into the buffer, process type, copy-on-write type and copy-on-write size
* are stored into *process_type, *on_exit and *cow_size respectively and returns 1.
/* Read child info data from the pipe.
* if complete data read into the buffer,
* data is stored into *buffer, and returns 1.
* otherwise, the partial data is left in the buffer, waiting for the next read, and returns 0. */
int readChildInfo(int *process_type, int *on_exit, size_t *cow_size) {
int readChildInfo(childInfoType *information_type, size_t *cow, size_t *keys, double* progress) {
/* We are using here a static buffer in combination with the server.child_info_nread to handle short reads */
static child_info_data buffer;
ssize_t wlen = sizeof(buffer);
@ -111,25 +122,27 @@ int readChildInfo(int *process_type, int *on_exit, size_t *cow_size) {
/* We have complete child info */
if (server.child_info_nread == wlen) {
*process_type = buffer.process_type;
*on_exit = buffer.on_exit;
*cow_size = buffer.cow_size;
*information_type = buffer.information_type;
*cow = buffer.cow;
*keys = buffer.keys;
*progress = buffer.progress;
return 1;
} else {
return 0;
}
}
/* Receive COW data from child. */
/* Receive info data from child. */
void receiveChildInfo(void) {
if (server.child_info_pipe[0] == -1) return;
int process_type;
int on_exit;
size_t cow_size;
size_t cow;
size_t keys;
double progress;
childInfoType information_type;
/* Drain the pipe and update child info so that we get the final message. */
while (readChildInfo(&process_type, &on_exit, &cow_size)) {
updateChildInfo(process_type, on_exit, cow_size);
while (readChildInfo(&information_type, &cow, &keys, &progress)) {
updateChildInfo(information_type, cow, keys, progress);
}
}

View File

@ -54,7 +54,7 @@ int cliSecureConnection(redisContext *c, cliSSLconfig config, const char **err)
goto error;
}
SSL_CTX_set_options(ssl_ctx, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3);
SSL_CTX_set_verify(ssl_ctx, SSL_VERIFY_PEER, NULL);
SSL_CTX_set_verify(ssl_ctx, config.skip_cert_verify ? SSL_VERIFY_NONE : SSL_VERIFY_PEER, NULL);
if (config.cacert || config.cacertdir) {
if (!SSL_CTX_load_verify_locations(ssl_ctx, config.cacert, config.cacertdir)) {

View File

@ -10,6 +10,8 @@ typedef struct cliSSLconfig {
char *cacert;
/* Directory where trusted CA certificates are stored, or NULL */
char *cacertdir;
/* Skip server certificate verification. */
int skip_cert_verify;
/* Client certificate to authenticate with, or NULL */
char *cert;
/* Private key file to authenticate with, or NULL */

View File

@ -2126,7 +2126,7 @@ int clusterProcessPacket(clusterLink *link) {
/* Don't bother creating useless objects if there are no
* Pub/Sub subscribers. */
if (dictSize(server.pubsub_channels) ||
listLength(server.pubsub_patterns))
dictSize(server.pubsub_patterns))
{
channel_len = ntohl(hdr->data.publish.msg.channel_len);
message_len = ntohl(hdr->data.publish.msg.message_len);
@ -2815,7 +2815,7 @@ void clusterPropagatePublish(robj *channel, robj *message) {
* SLAVE node specific functions
* -------------------------------------------------------------------------- */
/* This function sends a FAILOVE_AUTH_REQUEST message to every node in order to
/* This function sends a FAILOVER_AUTH_REQUEST message to every node in order to
* see if there is the quorum for this slave instance to failover its failing
* master.
*

View File

@ -2517,7 +2517,7 @@ standardConfig configs[] = {
createLongLongConfig("cluster-node-timeout", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.cluster_node_timeout, 15000, INTEGER_CONFIG, NULL, NULL),
createLongLongConfig("slowlog-log-slower-than", NULL, MODIFIABLE_CONFIG, -1, LLONG_MAX, server.slowlog_log_slower_than, 10000, INTEGER_CONFIG, NULL, NULL),
createLongLongConfig("latency-monitor-threshold", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.latency_monitor_threshold, 0, INTEGER_CONFIG, NULL, NULL),
createLongLongConfig("proto-max-bulk-len", NULL, MODIFIABLE_CONFIG, 1024*1024, LLONG_MAX, server.proto_max_bulk_len, 512ll*1024*1024, MEMORY_CONFIG, NULL, NULL), /* Bulk request max size */
createLongLongConfig("proto-max-bulk-len", NULL, MODIFIABLE_CONFIG, 1024*1024, LONG_MAX, server.proto_max_bulk_len, 512ll*1024*1024, MEMORY_CONFIG, NULL, NULL), /* Bulk request max size */
createLongLongConfig("stream-node-max-entries", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.stream_node_max_entries, 100, INTEGER_CONFIG, NULL, NULL),
createLongLongConfig("repl-backlog-size", NULL, MODIFIABLE_CONFIG, 1, LLONG_MAX, server.repl_backlog_size, 1024*1024, MEMORY_CONFIG, NULL, updateReplBacklogSize), /* Default: 1mb */

View File

@ -106,9 +106,8 @@ robj *lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) {
robj *val;
if (expireIfNeeded(db,key) == 1) {
/* Key expired. If we are in the context of a master, expireIfNeeded()
* returns 0 only when the key does not exist at all, so it's safe
* to return NULL ASAP. */
/* If we are in the context of a master, expireIfNeeded() returns 1
* when the key is no longer valid, so we can return NULL ASAP. */
if (server.masterhost == NULL)
goto keymiss;

View File

@ -638,7 +638,7 @@ int defragRaxNode(raxNode **noderef) {
}
/* returns 0 if no more work needs to be been done, and 1 if time is up and more work is needed. */
int scanLaterStraemListpacks(robj *ob, unsigned long *cursor, long long endtime, long long *defragged) {
int scanLaterStreamListpacks(robj *ob, unsigned long *cursor, long long endtime, long long *defragged) {
static unsigned char last[sizeof(streamID)];
raxIterator ri;
long iterations = 0;
@ -958,7 +958,7 @@ int defragLaterItem(dictEntry *de, unsigned long *cursor, long long endtime) {
} else if (ob->type == OBJ_HASH) {
server.stat_active_defrag_hits += scanLaterHash(ob, cursor);
} else if (ob->type == OBJ_STREAM) {
return scanLaterStraemListpacks(ob, cursor, endtime, &server.stat_active_defrag_hits);
return scanLaterStreamListpacks(ob, cursor, endtime, &server.stat_active_defrag_hits);
} else if (ob->type == OBJ_MODULE) {
return moduleLateDefrag(dictGetKey(de), ob, cursor, endtime, &server.stat_active_defrag_hits);
} else {

View File

@ -126,7 +126,7 @@ int _dictInit(dict *d, dictType *type,
d->type = type;
d->privdata = privDataPtr;
d->rehashidx = -1;
d->iterators = 0;
d->pauserehash = 0;
return DICT_OK;
}
@ -264,7 +264,7 @@ long long timeInMilliseconds(void) {
* than 0, and is smaller than 1 in most cases. The exact upper bound
* depends on the running time of dictRehash(d,100).*/
int dictRehashMilliseconds(dict *d, int ms) {
if (d->iterators > 0) return 0;
if (d->pauserehash > 0) return 0;
long long start = timeInMilliseconds();
int rehashes = 0;
@ -276,8 +276,8 @@ int dictRehashMilliseconds(dict *d, int ms) {
return rehashes;
}
/* This function performs just a step of rehashing, and only if there are
* no safe iterators bound to our hash table. When we have iterators in the
/* This function performs just a step of rehashing, and only if hashing has
* not been paused for our hash table. When we have iterators in the
* middle of a rehashing we can't mess with the two hash tables otherwise
* some element can be missed or duplicated.
*
@ -285,7 +285,7 @@ int dictRehashMilliseconds(dict *d, int ms) {
* dictionary so that the hash table automatically migrates from H1 to H2
* while it is actively used. */
static void _dictRehashStep(dict *d) {
if (d->iterators == 0) dictRehash(d,1);
if (d->pauserehash == 0) dictRehash(d,1);
}
/* Add an element to the target hash table */
@ -301,7 +301,7 @@ int dictAdd(dict *d, void *key, void *val)
/* Low level add or find:
* This function adds the entry but instead of setting a value returns the
* dictEntry structure to the user, that will make sure to fill the value
* field as he wishes.
* field as they wish.
*
* This function is also directly exposed to the user API to be called
* mainly in order to store non-pointers inside the hash value, example:
@ -593,7 +593,7 @@ dictEntry *dictNext(dictIterator *iter)
dictht *ht = &iter->d->ht[iter->table];
if (iter->index == -1 && iter->table == 0) {
if (iter->safe)
iter->d->iterators++;
dictPauseRehashing(iter->d);
else
iter->fingerprint = dictFingerprint(iter->d);
}
@ -625,7 +625,7 @@ void dictReleaseIterator(dictIterator *iter)
{
if (!(iter->index == -1 && iter->table == 0)) {
if (iter->safe)
iter->d->iterators--;
dictResumeRehashing(iter->d);
else
assert(iter->fingerprint == dictFingerprint(iter->d));
}
@ -896,9 +896,8 @@ unsigned long dictScan(dict *d,
if (dictSize(d) == 0) return 0;
/* Having a safe iterator means no rehashing can happen, see _dictRehashStep.
* This is needed in case the scan callback tries to do dictFind or alike. */
d->iterators++;
/* This is needed in case the scan callback tries to do dictFind or alike. */
dictPauseRehashing(d);
if (!dictIsRehashing(d)) {
t0 = &(d->ht[0]);
@ -966,8 +965,7 @@ unsigned long dictScan(dict *d,
} while (v & (m0 ^ m1));
}
/* undo the ++ at the top */
d->iterators--;
dictResumeRehashing(d);
return v;
}
@ -1056,7 +1054,7 @@ void dictEmpty(dict *d, void(callback)(void*)) {
_dictClear(d,&d->ht[0],callback);
_dictClear(d,&d->ht[1],callback);
d->rehashidx = -1;
d->iterators = 0;
d->pauserehash = 0;
}
void dictEnableResize(void) {

View File

@ -82,7 +82,7 @@ typedef struct dict {
void *privdata;
dictht ht[2];
long rehashidx; /* rehashing not in progress if rehashidx == -1 */
unsigned long iterators; /* number of iterators currently running */
int16_t pauserehash; /* If >0 rehashing is paused (<0 indicates coding error) */
} dict;
/* If safe is set to 1 this is a safe iterator, that means, you can call
@ -150,6 +150,8 @@ typedef void (dictScanBucketFunction)(void *privdata, dictEntry **bucketref);
#define dictSlots(d) ((d)->ht[0].size+(d)->ht[1].size)
#define dictSize(d) ((d)->ht[0].used+(d)->ht[1].used)
#define dictIsRehashing(d) ((d)->rehashidx != -1)
#define dictPauseRehashing(d) (d)->pauserehash++
#define dictResumeRehashing(d) (d)->pauserehash--
/* If our unsigned long type can store a 64 bit number, use a 64 bit PRNG. */
#if ULONG_MAX >= 0xffffffffffffffff

View File

@ -135,7 +135,7 @@ void evictionPoolAlloc(void) {
/* This is an helper function for performEvictions(), it is used in order
* to populate the evictionPool with a few entries every time we want to
* expire a key. Keys with idle time smaller than one of the current
* expire a key. Keys with idle time bigger than one of the current
* keys are added. Keys are always added if there are free entries.
*
* We insert keys on place in ascending order, so keys with the smaller

View File

@ -83,9 +83,8 @@ int activeExpireCycleTryExpire(redisDb *db, dictEntry *de, long long now) {
* keys that can be removed from the keyspace.
*
* Every expire cycle tests multiple databases: the next call will start
* again from the next db, with the exception of exists for time limit: in that
* case we restart again from the last database we were processing. Anyway
* no more than CRON_DBS_PER_CALL databases are tested at every iteration.
* again from the next db. No more than CRON_DBS_PER_CALL databases are
* tested at every iteration.
*
* The function can perform more or less work, depending on the "type"
* argument. It can execute a "fast cycle" or a "slow cycle". The slow
@ -141,7 +140,7 @@ void activeExpireCycle(int type) {
/* This function has some global state in order to continue the work
* incrementally across calls. */
static unsigned int current_db = 0; /* Last DB tested. */
static unsigned int current_db = 0; /* Next DB to test. */
static int timelimit_exit = 0; /* Time limit hit in previous call? */
static long long last_fast_cycle = 0; /* When last fast cycle ran. */
@ -507,10 +506,15 @@ void expireGenericCommand(client *c, long long basetime, int unit) {
if (getLongLongFromObjectOrReply(c, param, &when, NULL) != C_OK)
return;
int negative_when = when < 0;
if (unit == UNIT_SECONDS) when *= 1000;
when += basetime;
if (((when < 0) && !negative_when) || ((when-basetime > 0) && negative_when)) {
/* EXPIRE allows negative numbers, but we can at least detect an
* overflow by either unit conversion or basetime addition. */
addReplyErrorFormat(c, "invalid expire time in %s", c->cmd->name);
return;
}
/* No key, return zero. */
if (lookupKeyWrite(c->db,key) == NULL) {
addReply(c,shared.czero);

View File

@ -31,6 +31,7 @@
#include "geo.h"
#include "geohash_helper.h"
#include "debugmacro.h"
#include "pqsort.h"
/* Things exported from t_zset.c only for geo.c, since it is the only other
* part of Redis that requires close zset introspection. */
@ -174,10 +175,10 @@ int extractDistanceOrReply(client *c, robj **argv,
* that should be in the form: <number> <number> <unit>, and return C_OK or C_ERR means success or failure
* *conversions is populated with the coefficient to use in order to convert meters to the unit.*/
int extractBoxOrReply(client *c, robj **argv, double *conversion,
double *height, double *width) {
double *width, double *height) {
double h, w;
if ((getDoubleFromObjectOrReply(c, argv[0], &h, "need numeric height") != C_OK) ||
(getDoubleFromObjectOrReply(c, argv[1], &w, "need numeric width") != C_OK)) {
if ((getDoubleFromObjectOrReply(c, argv[0], &w, "need numeric width") != C_OK) ||
(getDoubleFromObjectOrReply(c, argv[1], &h, "need numeric height") != C_OK)) {
return C_ERR;
}
@ -223,8 +224,10 @@ int geoAppendIfWithinShape(geoArray *ga, GeoShape *shape, double score, sds memb
if (!geohashGetDistanceIfInRadiusWGS84(shape->xy[0], shape->xy[1], xy[0], xy[1],
shape->t.radius*shape->conversion, &distance)) return C_ERR;
} else if (shape->type == RECTANGLE_TYPE) {
if (!geohashGetDistanceIfInRectangle(shape->bounds, shape->xy[0], shape->xy[1],
xy[0], xy[1], &distance)) return C_ERR;
if (!geohashGetDistanceIfInRectangle(shape->t.r.width * shape->conversion,
shape->t.r.height * shape->conversion,
shape->xy[0], shape->xy[1], xy[0], xy[1], &distance))
return C_ERR;
}
/* Append the new element. */
@ -634,8 +637,8 @@ void georadiusGeneric(client *c, int srcKeyIndex, int flags) {
flags & GEOSEARCH &&
!byradius)
{
if (extractBoxOrReply(c, c->argv+base_args+i+1, &shape.conversion, &shape.t.r.height,
&shape.t.r.width) != C_OK) return;
if (extractBoxOrReply(c, c->argv+base_args+i+1, &shape.conversion, &shape.t.r.width,
&shape.t.r.height) != C_OK) return;
shape.type = RECTANGLE_TYPE;
bybox = 1;
i += 3;
@ -699,10 +702,20 @@ void georadiusGeneric(client *c, int srcKeyIndex, int flags) {
long option_length = 0;
/* Process [optional] requested sorting */
if (sort == SORT_ASC) {
qsort(ga->array, result_length, sizeof(geoPoint), sort_gp_asc);
} else if (sort == SORT_DESC) {
qsort(ga->array, result_length, sizeof(geoPoint), sort_gp_desc);
if (sort != SORT_NONE) {
int (*sort_gp_callback)(const void *a, const void *b) = NULL;
if (sort == SORT_ASC) {
sort_gp_callback = sort_gp_asc;
} else if (sort == SORT_DESC) {
sort_gp_callback = sort_gp_desc;
}
if (returned_items == result_length) {
qsort(ga->array, result_length, sizeof(geoPoint), sort_gp_callback);
} else {
pqsort(ga->array, result_length, sizeof(geoPoint), sort_gp_callback,
0, (returned_items - 1));
}
}
if (storekey == NULL) {

View File

@ -85,20 +85,16 @@ uint8_t geohashEstimateStepsByRadius(double range_meters, double lat) {
/* Return the bounding box of the search area by shape (see geohash.h GeoShape)
* bounds[0] - bounds[2] is the minimum and maximum longitude
* while bounds[1] - bounds[3] is the minimum and maximum latitude.
* since the higher the latitude, the shorter the arc length, the box shape is as follows
* (left and right edges are actually bent), as shown in the following diagram:
*
* This function does not behave correctly with very large radius values, for
* instance for the coordinates 81.634948934258375 30.561509253718668 and a
* radius of 7083 kilometers, it reports as bounding boxes:
*
* min_lon 7.680495, min_lat -33.119473, max_lon 155.589402, max_lat 94.242491
*
* However, for instance, a min_lon of 7.680495 is not correct, because the
* point -1.27579540014266968 61.33421815228281559 is at less than 7000
* kilometers away.
*
* Since this function is currently only used as an optimization, the
* optimization is not used for very big radiuses, however the function
* should be fixed. */
* \-----------------/ -------- \-----------------/
* \ / / \ \ /
* \ (long,lat) / / (long,lat) \ \ (long,lat) /
* \ / / \ / \
* --------- /----------------\ /--------------\
* Northern Hemisphere Southern Hemisphere Around the equator
*/
int geohashBoundingBox(GeoShape *shape, double *bounds) {
if (!bounds) return 0;
double longitude = shape->xy[0];
@ -106,10 +102,14 @@ int geohashBoundingBox(GeoShape *shape, double *bounds) {
double height = shape->conversion * (shape->type == CIRCULAR_TYPE ? shape->t.radius : shape->t.r.height/2);
double width = shape->conversion * (shape->type == CIRCULAR_TYPE ? shape->t.radius : shape->t.r.width/2);
const double long_delta = rad_deg(width/EARTH_RADIUS_IN_METERS/cos(deg_rad(latitude)));
const double lat_delta = rad_deg(height/EARTH_RADIUS_IN_METERS);
bounds[0] = longitude - long_delta;
bounds[2] = longitude + long_delta;
const double long_delta_top = rad_deg(width/EARTH_RADIUS_IN_METERS/cos(deg_rad(latitude+lat_delta)));
const double long_delta_bottom = rad_deg(width/EARTH_RADIUS_IN_METERS/cos(deg_rad(latitude-lat_delta)));
/* The directions of the northern and southern hemispheres
* are opposite, so we choice different points as min/max long/lat */
int southern_hemisphere = latitude < 0 ? 1 : 0;
bounds[0] = southern_hemisphere ? longitude-long_delta_bottom : longitude-long_delta_top;
bounds[2] = southern_hemisphere ? longitude+long_delta_bottom : longitude+long_delta_top;
bounds[1] = latitude - lat_delta;
bounds[3] = latitude + lat_delta;
return 1;
@ -137,12 +137,10 @@ GeoHashRadius geohashCalculateAreasByShapeWGS84(GeoShape *shape) {
double latitude = shape->xy[1];
/* radius_meters is calculated differently in different search types:
* 1) CIRCULAR_TYPE, just use radius.
* 2) RECTANGLE_TYPE, in order to calculate accurately, we should use
* sqrt((width/2)^2 + (height/2)^2), so that the box is bound by a circle,
* But the current code a simpler approach resulting in a smaller circle,
* which is safe because we search the 8 nearby boxes anyway. */
* 2) RECTANGLE_TYPE, we use sqrt((width/2)^2 + (height/2)^2) to
* calculate the distance from the center point to the corner */
double radius_meters = shape->type == CIRCULAR_TYPE ? shape->t.radius :
shape->t.r.width > shape->t.r.height ? shape->t.r.width/2 : shape->t.r.height/2;
sqrt((shape->t.r.width/2)*(shape->t.r.width/2) + (shape->t.r.height/2)*(shape->t.r.height/2));
radius_meters *= shape->conversion;
steps = geohashEstimateStepsByRadius(radius_meters,latitude);
@ -245,14 +243,21 @@ int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2,
return geohashGetDistanceIfInRadius(x1, y1, x2, y2, radius, distance);
}
/* Judge whether a point is in the axis-aligned rectangle.
* bounds : see geohash.h GeoShape::bounds
/* Judge whether a point is in the axis-aligned rectangle, when the distance
* between a searched point and the center point is less than or equal to
* height/2 or width/2 in height and width, the point is in the rectangle.
*
* width_m, height_m: the rectangle
* x1, y1 : the center of the box
* x2, y2 : the point to be searched
*/
int geohashGetDistanceIfInRectangle(double *bounds, double x1, double y1,
int geohashGetDistanceIfInRectangle(double width_m, double height_m, double x1, double y1,
double x2, double y2, double *distance) {
if (x2 < bounds[0] || x2 > bounds[2] || y2 < bounds[1] || y2 > bounds[3]) return 0;
double lon_distance = geohashGetDistance(x2, y2, x1, y2);
double lat_distance = geohashGetDistance(x2, y2, x2, y1);
if (lon_distance > width_m/2 || lat_distance > height_m/2) {
return 0;
}
*distance = geohashGetDistance(x1, y1, x2, y2);
return 1;
}

View File

@ -60,7 +60,7 @@ int geohashGetDistanceIfInRadius(double x1, double y1,
int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2,
double y2, double radius,
double *distance);
int geohashGetDistanceIfInRectangle(double *bounds, double x1, double y1,
int geohashGetDistanceIfInRectangle(double width_m, double height_m, double x1, double y1,
double x2, double y2, double *distance);
#endif /* GEOHASH_HELPER_HPP_ */

View File

@ -458,6 +458,11 @@ struct commandHelp {
"Set the expiration for a key as a UNIX timestamp",
0,
"1.2.0" },
{ "FAILOVER",
"[TO host port [FORCE]] [ABORT] [TIMEOUT milliseconds]",
"Start a coordinated failover between this server and one of its replicas.",
9,
"6.2.0" },
{ "FLUSHALL",
"[ASYNC|SYNC]",
"Remove all keys from all databases",

View File

@ -219,9 +219,12 @@ int lpStringToInt64(const char *s, unsigned long slen, int64_t *value) {
}
/* Create a new, empty listpack.
* On success the new listpack is returned, otherwise an error is returned. */
unsigned char *lpNew(void) {
unsigned char *lp = lp_malloc(LP_HDR_SIZE+1);
* On success the new listpack is returned, otherwise an error is returned.
* Pre-allocate at least `capacity` bytes of memory,
* over-allocated memory can be shrinked by `lpShrinkToFit`.
* */
unsigned char *lpNew(size_t capacity) {
unsigned char *lp = lp_malloc(capacity > LP_HDR_SIZE+1 ? capacity : LP_HDR_SIZE+1);
if (lp == NULL) return NULL;
lpSetTotalBytes(lp,LP_HDR_SIZE+1);
lpSetNumElements(lp,0);
@ -234,6 +237,16 @@ void lpFree(unsigned char *lp) {
lp_free(lp);
}
/* Shrink the memory to fit. */
unsigned char* lpShrinkToFit(unsigned char *lp) {
size_t size = lpGetTotalBytes(lp);
if (size < lp_malloc_size(lp)) {
return lp_realloc(lp, size);
} else {
return lp;
}
}
/* Given an element 'ele' of size 'size', determine if the element can be
* represented inside the listpack encoded as integer, and returns
* LP_ENCODING_INT if so. Otherwise returns LP_ENCODING_STR if no integer
@ -702,7 +715,8 @@ unsigned char *lpInsert(unsigned char *lp, unsigned char *ele, uint32_t size, un
unsigned char *dst = lp + poff; /* May be updated after reallocation. */
/* Realloc before: we need more room. */
if (new_listpack_bytes > old_listpack_bytes) {
if (new_listpack_bytes > old_listpack_bytes &&
new_listpack_bytes > lp_malloc_size(lp)) {
if ((lp = lp_realloc(lp,new_listpack_bytes)) == NULL) return NULL;
dst = lp + poff;
}

View File

@ -35,6 +35,7 @@
#ifndef __LISTPACK_H
#define __LISTPACK_H
#include <stdlib.h>
#include <stdint.h>
#define LP_INTBUF_SIZE 21 /* 20 digits of -2^63 + 1 null term = 21. */
@ -44,8 +45,9 @@
#define LP_AFTER 1
#define LP_REPLACE 2
unsigned char *lpNew(void);
unsigned char *lpNew(size_t capacity);
void lpFree(unsigned char *lp);
unsigned char* lpShrinkToFit(unsigned char *lp);
unsigned char *lpInsert(unsigned char *lp, unsigned char *ele, uint32_t size, unsigned char *p, int where, unsigned char **newp);
unsigned char *lpAppend(unsigned char *lp, unsigned char *ele, uint32_t size);
unsigned char *lpDelete(unsigned char *lp, unsigned char *p, unsigned char **newp);

View File

@ -42,4 +42,5 @@
#define lp_malloc zmalloc
#define lp_realloc zrealloc
#define lp_free zfree
#define lp_malloc_size zmalloc_usable_size
#endif

View File

@ -2612,6 +2612,7 @@ int RM_ZsetRem(RedisModuleKey *key, RedisModuleString *ele, int *deleted) {
if (key->value && key->value->type != OBJ_ZSET) return REDISMODULE_ERR;
if (key->value != NULL && zsetDel(key->value,ele->ptr)) {
if (deleted) *deleted = 1;
moduleDelKeyIfEmpty(key);
} else {
if (deleted) *deleted = 0;
}
@ -2657,6 +2658,7 @@ void RM_ZsetRangeStop(RedisModuleKey *key) {
/* Return the "End of range" flag value to signal the end of the iteration. */
int RM_ZsetRangeEndReached(RedisModuleKey *key) {
if (!key->value || key->value->type != OBJ_ZSET) return 1;
return key->u.zset.er;
}
@ -2974,6 +2976,10 @@ int RM_ZsetRangePrev(RedisModuleKey *key) {
* are created.
* REDISMODULE_HASH_CFIELDS: The field names passed are null terminated C
* strings instead of RedisModuleString objects.
* REDISMODULE_HASH_COUNT_ALL: Include the number of inserted fields in the
* returned number, in addition to the number of
* updated and deleted fields. (Added in Redis
* 6.2.)
*
* Unless NX is specified, the command overwrites the old field value with
* the new one.
@ -2987,21 +2993,43 @@ int RM_ZsetRangePrev(RedisModuleKey *key) {
*
* Return value:
*
* The number of fields updated (that may be less than the number of fields
* specified because of the XX or NX options).
* The number of fields existing in the hash prior to the call, which have been
* updated (its old value has been replaced by a new value) or deleted. If the
* flag REDISMODULE_HASH_COUNT_ALL is set, insterted fields not previously
* existing in the hash are also counted.
*
* In the following case the return value is always zero:
* If the return value is zero, `errno` is set (since Redis 6.2) as follows:
*
* * The key was not open for writing.
* * The key was associated with a non Hash value.
* - EINVAL if any unknown flags are set or if key is NULL.
* - ENOTSUP if the key is associated with a non Hash value.
* - EBADF if the key was not opened for writing.
* - ENOENT if no fields were counted as described under Return value above.
* This is not actually an error. The return value can be zero if all fields
* were just created and the COUNT_ALL flag was unset, or if changes were held
* back due to the NX and XX flags.
*
* NOTICE: The return value semantics of this function are very different
* between Redis 6.2 and older versions. Modules that use it should determine
* the Redis version and handle it accordingly.
*/
int RM_HashSet(RedisModuleKey *key, int flags, ...) {
va_list ap;
if (!(key->mode & REDISMODULE_WRITE)) return 0;
if (key->value && key->value->type != OBJ_HASH) return 0;
if (!key || (flags & ~(REDISMODULE_HASH_NX |
REDISMODULE_HASH_XX |
REDISMODULE_HASH_CFIELDS |
REDISMODULE_HASH_COUNT_ALL))) {
errno = EINVAL;
return 0;
} else if (key->value && key->value->type != OBJ_HASH) {
errno = ENOTSUP;
return 0;
} else if (!(key->mode & REDISMODULE_WRITE)) {
errno = EBADF;
return 0;
}
if (key->value == NULL) moduleCreateEmptyKey(key,REDISMODULE_KEYTYPE_HASH);
int updated = 0;
int count = 0;
va_start(ap, flags);
while(1) {
RedisModuleString *field, *value;
@ -3029,7 +3057,7 @@ int RM_HashSet(RedisModuleKey *key, int flags, ...) {
/* Handle deletion if value is REDISMODULE_HASH_DELETE. */
if (value == REDISMODULE_HASH_DELETE) {
updated += hashTypeDelete(key->value, field->ptr);
count += hashTypeDelete(key->value, field->ptr);
if (flags & REDISMODULE_HASH_CFIELDS) decrRefCount(field);
continue;
}
@ -3043,7 +3071,8 @@ int RM_HashSet(RedisModuleKey *key, int flags, ...) {
robj *argv[2] = {field,value};
hashTypeTryConversion(key->value,argv,0,1);
updated += hashTypeSet(key->value, field->ptr, value->ptr, low_flags);
int updated = hashTypeSet(key->value, field->ptr, value->ptr, low_flags);
count += (flags & REDISMODULE_HASH_COUNT_ALL) ? 1 : updated;
/* If CFIELDS is active, SDS string ownership is now of hashTypeSet(),
* however we still have to release the 'field' object shell. */
@ -3054,7 +3083,8 @@ int RM_HashSet(RedisModuleKey *key, int flags, ...) {
}
va_end(ap);
moduleDelKeyIfEmpty(key);
return updated;
if (count == 0) errno = ENOENT;
return count;
}
/* Get fields from an hash value. This function is called using a variable
@ -5120,6 +5150,7 @@ RedisModuleBlockedClient *moduleBlockClient(RedisModuleCtx *ctx, RedisModuleCmdF
bc->dbid = c->db->id;
bc->blocked_on_keys = keys != NULL;
bc->unblocked = 0;
bc->background_timer = 0;
bc->background_duration = 0;
c->bpop.timeout = timeout;
@ -6728,10 +6759,13 @@ int RM_InfoBeginDictField(RedisModuleInfoCtx *ctx, char *name) {
/* Implicitly end dicts, instead of returning an error which is likely un checked. */
if (ctx->in_dict_field)
RM_InfoEndDictField(ctx);
char *tmpmodname, *tmpname;
ctx->info = sdscatfmt(ctx->info,
"%s_%s:",
ctx->module->name,
name);
getSafeInfoString(ctx->module->name, strlen(ctx->module->name), &tmpmodname),
getSafeInfoString(name, strlen(name), &tmpname));
if (tmpmodname != NULL) zfree(tmpmodname);
if (tmpname != NULL) zfree(tmpname);
ctx->in_dict_field = 1;
return REDISMODULE_OK;
}
@ -7662,16 +7696,18 @@ int RM_Fork(RedisModuleForkDoneHandler cb, void *user_data) {
}
/* The module is advised to call this function from the fork child once in a while,
* so that it can report COW memory to the parent which will be reported in INFO */
void RM_SendChildCOWInfo(void) {
sendChildCOWInfo(CHILD_TYPE_MODULE, 0, "Module fork");
* so that it can report progress and COW memory to the parent which will be
* reported in INFO.
* The `progress` argument should between 0 and 1, or -1 when not available. */
void RM_SendChildHeartbeat(double progress) {
sendChildInfoGeneric(CHILD_INFO_TYPE_CURRENT_INFO, 0, progress, "Module fork");
}
/* Call from the child process when you want to terminate it.
* retcode will be provided to the done handler executed on the parent process.
*/
int RM_ExitFromChild(int retcode) {
sendChildCOWInfo(CHILD_TYPE_MODULE, 1, "Module fork");
sendChildCowInfo(CHILD_INFO_TYPE_MODULE_COW_SIZE, "Module fork");
exitFromChild(retcode);
return REDISMODULE_OK;
}
@ -9205,7 +9241,7 @@ void moduleRegisterCoreAPI(void) {
REGISTER_API(CommandFilterArgReplace);
REGISTER_API(CommandFilterArgDelete);
REGISTER_API(Fork);
REGISTER_API(SendChildCOWInfo);
REGISTER_API(SendChildHeartbeat);
REGISTER_API(ExitFromChild);
REGISTER_API(KillForkChild);
REGISTER_API(RegisterInfoFunc);

View File

@ -155,7 +155,7 @@ client *createClient(connection *conn) {
c->repl_ack_off = 0;
c->repl_ack_time = 0;
c->slave_listening_port = 0;
c->slave_ip[0] = '\0';
c->slave_addr = NULL;
c->slave_capa = SLAVE_CAPA_NONE;
c->reply = listCreate();
c->reply_bytes = 0;
@ -527,7 +527,7 @@ void trimReplyUnusedTailSpace(client *c) {
listNode *ln = listLast(c->reply);
clientReplyBlock *tail = ln? listNodeValue(ln): NULL;
/* Note that 'tail' may be NULL even if we have a tail node, becuase when
/* Note that 'tail' may be NULL even if we have a tail node, because when
* addReplyDeferredLen() is used */
if (!tail) return;
@ -717,10 +717,7 @@ void addReplyLongLong(client *c, long long ll) {
void addReplyAggregateLen(client *c, long length, int prefix) {
serverAssert(length >= 0);
if (prefix == '*' && length < OBJ_SHARED_BULKHDR_LEN)
addReply(c,shared.mbulkhdr[length]);
else
addReplyLongLongWithPrefix(c,length,prefix);
addReplyLongLongWithPrefix(c,length,prefix);
}
void addReplyArrayLen(client *c, long length) {
@ -781,10 +778,7 @@ void addReplyNullArray(client *c) {
void addReplyBulkLen(client *c, robj *obj) {
size_t len = stringObjectLen(obj);
if (len < OBJ_SHARED_BULKHDR_LEN)
addReply(c,shared.bulkhdr[len]);
else
addReplyLongLongWithPrefix(c,len,'$');
addReplyLongLongWithPrefix(c,len,'$');
}
/* Add a Redis Object as a bulk reply */
@ -1406,6 +1400,7 @@ void freeClient(client *c) {
freeClientMultiState(c);
sdsfree(c->peerid);
sdsfree(c->sockname);
sdsfree(c->slave_addr);
zfree(c);
}
@ -1432,7 +1427,7 @@ void freeClientAsync(client *c) {
pthread_mutex_unlock(&async_free_queue_mutex);
}
/* Free the clietns marked as CLOSE_ASAP, return the number of clients
/* Free the clients marked as CLOSE_ASAP, return the number of clients
* freed. */
int freeClientsInAsyncFreeQueue(void) {
int freed = 0;
@ -3525,7 +3520,7 @@ int handleClientsWithPendingWritesUsingThreads(void) {
if (processed == 0) return 0; /* Return ASAP if there are no clients. */
/* If I/O threads are disabled or we have few clients to serve, don't
* use I/O threads, but thejboring synchronous code. */
* use I/O threads, but the boring synchronous code. */
if (server.io_threads_num == 1 || stopThreadedIOIfNeeded()) {
return handleClientsWithPendingWrites();
}

View File

@ -384,23 +384,6 @@ void decrRefCountVoid(void *o) {
decrRefCount(o);
}
/* This function set the ref count to zero without freeing the object.
* It is useful in order to pass a new object to functions incrementing
* the ref count of the received object. Example:
*
* functionThatWillIncrementRefCount(resetRefCount(CreateObject(...)));
*
* Otherwise you need to resort to the less elegant pattern:
*
* *obj = createObject(...);
* functionThatWillIncrementRefCount(obj);
* decrRefCount(obj);
*/
robj *resetRefCount(robj *obj) {
obj->refcount = 0;
return obj;
}
int checkType(client *c, robj *o, int type) {
/* A NULL is considered an empty key */
if (o && o->type != type) {

View File

@ -124,20 +124,6 @@ void addReplyPubsubPatUnsubscribed(client *c, robj *pattern) {
* Pubsub low level API
*----------------------------------------------------------------------------*/
void freePubsubPattern(void *p) {
pubsubPattern *pat = p;
decrRefCount(pat->pattern);
zfree(pat);
}
int listMatchPubsubPattern(void *a, void *b) {
pubsubPattern *pa = a, *pb = b;
return (pa->client == pb->client) &&
(equalStringObjects(pa->pattern,pb->pattern));
}
/* Return the number of channels + patterns a client is subscribed to. */
int clientSubscriptionsCount(client *c) {
return dictSize(c->pubsub_channels)+
@ -212,18 +198,13 @@ int pubsubSubscribePattern(client *c, robj *pattern) {
if (listSearchKey(c->pubsub_patterns,pattern) == NULL) {
retval = 1;
pubsubPattern *pat;
listAddNodeTail(c->pubsub_patterns,pattern);
incrRefCount(pattern);
pat = zmalloc(sizeof(*pat));
pat->pattern = getDecodedObject(pattern);
pat->client = c;
listAddNodeTail(server.pubsub_patterns,pat);
/* Add the client to the pattern -> list of clients hash table */
de = dictFind(server.pubsub_patterns_dict,pattern);
de = dictFind(server.pubsub_patterns,pattern);
if (de == NULL) {
clients = listCreate();
dictAdd(server.pubsub_patterns_dict,pattern,clients);
dictAdd(server.pubsub_patterns,pattern,clients);
incrRefCount(pattern);
} else {
clients = dictGetVal(de);
@ -241,19 +222,14 @@ int pubsubUnsubscribePattern(client *c, robj *pattern, int notify) {
dictEntry *de;
list *clients;
listNode *ln;
pubsubPattern pat;
int retval = 0;
incrRefCount(pattern); /* Protect the object. May be the same we remove */
if ((ln = listSearchKey(c->pubsub_patterns,pattern)) != NULL) {
retval = 1;
listDelNode(c->pubsub_patterns,ln);
pat.client = c;
pat.pattern = pattern;
ln = listSearchKey(server.pubsub_patterns,&pat);
listDelNode(server.pubsub_patterns,ln);
/* Remove the client from the pattern -> clients list hash table */
de = dictFind(server.pubsub_patterns_dict,pattern);
de = dictFind(server.pubsub_patterns,pattern);
serverAssertWithInfo(c,NULL,de != NULL);
clients = dictGetVal(de);
ln = listSearchKey(clients,c);
@ -262,7 +238,7 @@ int pubsubUnsubscribePattern(client *c, robj *pattern, int notify) {
if (listLength(clients) == 0) {
/* Free the list and associated hash entry at all if this was
* the latest client. */
dictDelete(server.pubsub_patterns_dict,pattern);
dictDelete(server.pubsub_patterns,pattern);
}
}
/* Notify the client */
@ -329,7 +305,7 @@ int pubsubPublishMessage(robj *channel, robj *message) {
}
}
/* Send to clients listening to matching channels */
di = dictGetIterator(server.pubsub_patterns_dict);
di = dictGetIterator(server.pubsub_patterns);
if (di) {
channel = getDecodedObject(channel);
while((de = dictNext(di)) != NULL) {
@ -502,7 +478,7 @@ NULL
}
} else if (!strcasecmp(c->argv[1]->ptr,"numpat") && c->argc == 2) {
/* PUBSUB NUMPAT */
addReplyLongLong(c,listLength(server.pubsub_patterns));
addReplyLongLong(c,dictSize(server.pubsub_patterns));
} else {
addReplySubcommandSyntaxError(c);
}

View File

@ -680,8 +680,7 @@ int quicklistReplaceAtIndex(quicklist *quicklist, long index, void *data,
quicklistEntry entry;
if (likely(quicklistIndex(quicklist, index, &entry))) {
/* quicklistIndex provides an uncompressed node */
entry.node->zl = ziplistDelete(entry.node->zl, &entry.zi);
entry.node->zl = ziplistInsert(entry.node->zl, entry.zi, data, sz);
entry.node->zl = ziplistReplace(entry.node->zl, entry.zi, data, sz);
quicklistNodeUpdateSz(entry.node);
quicklistCompress(quicklist, entry.node);
return 1;
@ -1427,7 +1426,7 @@ void quicklistPush(quicklist *quicklist, void *value, const size_t sz,
* Returns 1 on success (creation of new bookmark or override of an existing one).
* Returns 0 on failure (reached the maximum supported number of bookmarks).
* NOTE: use short simple names, so that string compare on find is quick.
* NOTE: bookmakrk creation may re-allocate the quicklist, so the input pointer
* NOTE: bookmark creation may re-allocate the quicklist, so the input pointer
may change and it's the caller responsibilty to update the reference.
*/
int quicklistBookmarkCreate(quicklist **ql_ref, const char *name, quicklistNode *node) {

View File

@ -1223,7 +1223,8 @@ int rdbSaveRio(rio *rdb, int *error, int rdbflags, rdbSaveInfo *rsi) {
size_t processed = 0;
int j;
long key_count = 0;
long long cow_updated_time = 0;
long long info_updated_time = 0;
char *pname = (rdbflags & RDBFLAGS_AOF_PREAMBLE) ? "AOF rewrite" : "RDB";
if (server.rdb_checksum)
rdb->update_cksum = rioGenericUpdateChecksum;
@ -1270,22 +1271,16 @@ int rdbSaveRio(rio *rdb, int *error, int rdbflags, rdbSaveInfo *rsi) {
aofReadDiffFromParent();
}
/* Update COW info every 1 second (approximately).
/* Update child info every 1 second (approximately).
* in order to avoid calling mstime() on each iteration, we will
* check the diff every 1024 keys */
if ((key_count & 1023) == 0) {
key_count = 0;
if ((key_count++ & 1023) == 0) {
long long now = mstime();
if (now - cow_updated_time >= 1000) {
if (rdbflags & RDBFLAGS_AOF_PREAMBLE) {
sendChildCOWInfo(CHILD_TYPE_AOF, 0, "AOF rewrite");
} else {
sendChildCOWInfo(CHILD_TYPE_RDB, 0, "RDB");
}
cow_updated_time = now;
if (now - info_updated_time >= 1000) {
sendChildInfo(CHILD_INFO_TYPE_CURRENT_INFO, key_count, pname);
info_updated_time = now;
}
}
key_count++;
}
dictReleaseIterator(di);
di = NULL; /* So that we don't release it again on error. */
@ -1438,7 +1433,7 @@ int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
redisSetCpuAffinity(server.bgsave_cpulist);
retval = rdbSave(filename,rsi);
if (retval == C_OK) {
sendChildCOWInfo(CHILD_TYPE_RDB, 1, "RDB");
sendChildCowInfo(CHILD_INFO_TYPE_RDB_COW_SIZE, "RDB");
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
@ -2805,7 +2800,7 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) {
retval = C_ERR;
if (retval == C_OK) {
sendChildCOWInfo(CHILD_TYPE_RDB, 1, "RDB");
sendChildCowInfo(CHILD_INFO_TYPE_RDB_COW_SIZE, "RDB");
}
rioFreeFd(&rdb);

View File

@ -45,7 +45,7 @@
* the first byte to interpreter the length:
*
* 00|XXXXXX => if the two MSB are 00 the len is the 6 bits of this byte
* 01|XXXXXX XXXXXXXX => 01, the len is 14 byes, 6 bits + 8 bits of next byte
* 01|XXXXXX XXXXXXXX => 01, the len is 14 bits, 6 bits + 8 bits of next byte
* 10|000000 [32 bit integer] => A full 32 bit len in net byte order will follow
* 10|000001 [64 bit integer] => A full 64 bit len in net byte order will follow
* 11|OBKIND this means: specially encoded object will follow. The six bits

View File

@ -567,6 +567,9 @@ static void readHandler(aeEventLoop *el, int fd, void *privdata, int mask) {
* we need to randomize. */
for (j = 0; j < c->randlen; j++)
c->randptr[j] -= c->prefixlen;
/* Fix the pointers to the slot hash tags */
for (j = 0; j < c->staglen; j++)
c->stagptr[j] -= c->prefixlen;
c->prefixlen = 0;
}
continue;
@ -1516,6 +1519,8 @@ int parseOptions(int argc, const char **argv) {
} else if (!strcmp(argv[i],"--cacert")) {
if (lastarg) goto invalid;
config.sslconfig.cacert = strdup(argv[++i]);
} else if (!strcmp(argv[i],"--insecure")) {
config.sslconfig.skip_cert_verify = 1;
} else if (!strcmp(argv[i],"--cert")) {
if (lastarg) goto invalid;
config.sslconfig.cert = strdup(argv[++i]);
@ -1585,6 +1590,7 @@ usage:
" --cacertdir <dir> Directory where trusted CA certificates are stored.\n"
" If neither cacert nor cacertdir are specified, the default\n"
" system-wide trusted root certs configuration will apply.\n"
" --insecure Allow insecure TLS connection by skipping cert validation.\n"
" --cert <file> Client certificate to authenticate with.\n"
" --key <file> Private key file to authenticate with.\n"
" --tls-ciphers <list> Sets the list of prefered ciphers (TLSv1.2 and below)\n"
@ -1682,6 +1688,7 @@ int main(int argc, const char **argv) {
signal(SIGHUP, SIG_IGN);
signal(SIGPIPE, SIG_IGN);
memset(&config.sslconfig, 0, sizeof(config.sslconfig));
config.numclients = 50;
config.requests = 100000;
config.liveclients = 0;

View File

@ -1695,6 +1695,8 @@ static int parseOptions(int argc, char **argv) {
config.sslconfig.key = argv[++i];
} else if (!strcmp(argv[i],"--tls-ciphers") && !lastarg) {
config.sslconfig.ciphers = argv[++i];
} else if (!strcmp(argv[i],"--insecure")) {
config.sslconfig.skip_cert_verify = 1;
#ifdef TLS1_3_VERSION
} else if (!strcmp(argv[i],"--tls-ciphersuites") && !lastarg) {
config.sslconfig.ciphersuites = argv[++i];
@ -1820,6 +1822,7 @@ static void usage(void) {
" --cacertdir <dir> Directory where trusted CA certificates are stored.\n"
" If neither cacert nor cacertdir are specified, the default\n"
" system-wide trusted root certs configuration will apply.\n"
" --insecure Allow insecure TLS connection by skipping cert validation.\n"
" --cert <file> Client certificate to authenticate with.\n"
" --key <file> Private key file to authenticate with.\n"
" --tls-ciphers <list> Sets the list of prefered ciphers (TLSv1.2 and below)\n"
@ -1926,13 +1929,20 @@ static int confirmWithYes(char *msg, int ignore_force) {
/* Turn the plain C strings into Sds strings */
static char **convertToSds(int count, char** args) {
int j;
char **sds = zmalloc(sizeof(char*)*count);
int j;
char **sds = zmalloc(sizeof(char*)*count);
for(j = 0; j < count; j++)
sds[j] = sdsnew(args[j]);
for(j = 0; j < count; j++)
sds[j] = sdsnew(args[j]);
return sds;
return sds;
}
static void freeConvertedSds(int count, char **sds) {
int j;
for (j = 0; j < count; j++)
sdsfree(sds[j]);
zfree(sds);
}
static int issueCommandRepeat(int argc, char **argv, long repeat) {
@ -2165,13 +2175,17 @@ static void repl(void) {
static int noninteractive(int argc, char **argv) {
int retval = 0;
argv = convertToSds(argc, argv);
if (config.stdinarg) {
argv = zrealloc(argv, (argc+1)*sizeof(char*));
argv[argc] = readArgFromStdin();
retval = issueCommand(argc+1, argv);
sdsfree(argv[argc]);
} else {
retval = issueCommand(argc, argv);
}
freeConvertedSds(argc, argv);
return retval;
}
@ -8131,6 +8145,7 @@ int main(int argc, char **argv) {
int firstarg;
struct timeval tv;
memset(&config.sslconfig, 0, sizeof(config.sslconfig));
config.hostip = sdsnew("127.0.0.1");
config.hostport = 6379;
config.hostsocket = NULL;
@ -8327,6 +8342,6 @@ int main(int argc, char **argv) {
if (config.eval) {
return evalMode(argc,argv);
} else {
return noninteractive(argc,convertToSds(argc,argv));
return noninteractive(argc,argv);
}
}

View File

@ -68,6 +68,7 @@
#define REDISMODULE_HASH_XX (1<<1)
#define REDISMODULE_HASH_CFIELDS (1<<2)
#define REDISMODULE_HASH_EXISTS (1<<3)
#define REDISMODULE_HASH_COUNT_ALL (1<<4)
/* StreamID type. */
typedef struct RedisModuleStreamID {
@ -812,7 +813,7 @@ REDISMODULE_API int (*RedisModule_CommandFilterArgInsert)(RedisModuleCommandFilt
REDISMODULE_API int (*RedisModule_CommandFilterArgReplace)(RedisModuleCommandFilterCtx *fctx, int pos, RedisModuleString *arg) REDISMODULE_ATTR;
REDISMODULE_API int (*RedisModule_CommandFilterArgDelete)(RedisModuleCommandFilterCtx *fctx, int pos) REDISMODULE_ATTR;
REDISMODULE_API int (*RedisModule_Fork)(RedisModuleForkDoneHandler cb, void *user_data) REDISMODULE_ATTR;
REDISMODULE_API void (*RedisModule_SendChildCOWInfo)(void) REDISMODULE_ATTR;
REDISMODULE_API void (*RedisModule_SendChildHeartbeat)(double progress) REDISMODULE_ATTR;
REDISMODULE_API int (*RedisModule_ExitFromChild)(int retcode) REDISMODULE_ATTR;
REDISMODULE_API int (*RedisModule_KillForkChild)(int child_pid) REDISMODULE_ATTR;
REDISMODULE_API float (*RedisModule_GetUsedMemoryRatio)() REDISMODULE_ATTR;
@ -1081,7 +1082,7 @@ static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int
REDISMODULE_GET_API(CommandFilterArgReplace);
REDISMODULE_GET_API(CommandFilterArgDelete);
REDISMODULE_GET_API(Fork);
REDISMODULE_GET_API(SendChildCOWInfo);
REDISMODULE_GET_API(SendChildHeartbeat);
REDISMODULE_GET_API(ExitFromChild);
REDISMODULE_GET_API(KillForkChild);
REDISMODULE_GET_API(GetUsedMemoryRatio);

View File

@ -57,21 +57,19 @@ int RDBGeneratedByReplication = 0;
* IP address and its listening port which is more clear for the user, for
* example: "Closing connection with replica 10.1.2.3:6380". */
char *replicationGetSlaveName(client *c) {
static char buf[NET_ADDR_STR_LEN];
static char buf[NET_HOST_PORT_STR_LEN];
char ip[NET_IP_STR_LEN];
ip[0] = '\0';
buf[0] = '\0';
if (c->slave_ip[0] != '\0' ||
if (c->slave_addr ||
connPeerToString(c->conn,ip,sizeof(ip),NULL) != -1)
{
/* Note that the 'ip' buffer is always larger than 'c->slave_ip' */
if (c->slave_ip[0] != '\0') memcpy(ip,c->slave_ip,sizeof(c->slave_ip));
char *addr = c->slave_addr ? c->slave_addr : ip;
if (c->slave_listening_port)
anetFormatAddr(buf,sizeof(buf),ip,c->slave_listening_port);
anetFormatAddr(buf,sizeof(buf),addr,c->slave_listening_port);
else
snprintf(buf,sizeof(buf),"%s:<unknown-replica-port>",ip);
snprintf(buf,sizeof(buf),"%s:<unknown-replica-port>",addr);
} else {
snprintf(buf,sizeof(buf),"client id #%llu",
(unsigned long long) c->id);
@ -925,12 +923,13 @@ void replconfCommand(client *c) {
return;
c->slave_listening_port = port;
} else if (!strcasecmp(c->argv[j]->ptr,"ip-address")) {
sds ip = c->argv[j+1]->ptr;
if (sdslen(ip) < sizeof(c->slave_ip)) {
memcpy(c->slave_ip,ip,sdslen(ip)+1);
sds addr = c->argv[j+1]->ptr;
if (sdslen(addr) < NET_HOST_STR_LEN) {
if (c->slave_addr) sdsfree(c->slave_addr);
c->slave_addr = sdsdup(addr);
} else {
addReplyErrorFormat(c,"REPLCONF ip-address provided by "
"replica instance is too long: %zd bytes", sdslen(ip));
"replica instance is too long: %zd bytes", sdslen(addr));
return;
}
} else if (!strcasecmp(c->argv[j]->ptr,"capa")) {
@ -2797,16 +2796,16 @@ void roleCommand(client *c) {
listRewind(server.slaves,&li);
while((ln = listNext(&li))) {
client *slave = ln->value;
char ip[NET_IP_STR_LEN], *slaveip = slave->slave_ip;
char ip[NET_IP_STR_LEN], *slaveaddr = slave->slave_addr;
if (slaveip[0] == '\0') {
if (!slaveaddr) {
if (connPeerToString(slave->conn,ip,sizeof(ip),NULL) == -1)
continue;
slaveip = ip;
slaveaddr = ip;
}
if (slave->replstate != SLAVE_STATE_ONLINE) continue;
addReplyArrayLen(c,3);
addReplyBulkCString(c,slaveip);
addReplyBulkCString(c,slaveaddr);
addReplyBulkLongLong(c,slave->slave_listening_port);
addReplyBulkLongLong(c,slave->repl_ack_off);
slaves++;
@ -3333,10 +3332,9 @@ void replicationCron(void) {
checkClientPauseTimeoutAndReturnIfPaused();
if (!manual_failover_in_progress) {
ping_argv[0] = createStringObject("PING",4);
ping_argv[0] = shared.ping;
replicationFeedSlaves(server.slaves, server.slaveseldb,
ping_argv, 1);
decrRefCount(ping_argv[0]);
}
}
@ -3493,9 +3491,9 @@ static client *findReplica(char *host, int port) {
listRewind(server.slaves,&li);
while((ln = listNext(&li))) {
replica = ln->value;
char ip[NET_IP_STR_LEN], *replicaip = replica->slave_ip;
char ip[NET_IP_STR_LEN], *replicaip = replica->slave_addr;
if (replicaip[0] == '\0') {
if (!replicaip) {
if (connPeerToString(replica->conn, ip, sizeof(ip), NULL) == -1)
continue;
replicaip = ip;
@ -3724,16 +3722,16 @@ void updateFailoverStatus(void) {
while((ln = listNext(&li))) {
replica = ln->value;
if (replica->repl_ack_off == server.master_repl_offset) {
char ip[NET_IP_STR_LEN], *replicaip = replica->slave_ip;
char ip[NET_IP_STR_LEN], *replicaaddr = replica->slave_addr;
if (replicaip[0] == '\0') {
if (!replicaaddr) {
if (connPeerToString(replica->conn,ip,sizeof(ip),NULL) == -1)
continue;
replicaip = ip;
replicaaddr = ip;
}
/* We are now failing over to this specific node */
server.target_replica_host = zstrdup(replicaip);
server.target_replica_host = zstrdup(replicaaddr);
server.target_replica_port = replica->slave_listening_port;
break;
}

View File

@ -1668,12 +1668,11 @@ void evalGenericCommand(client *c, int evalsha) {
* or just running a CPU costly read-only script on the slaves. */
if (server.dirty == initial_server_dirty) {
rewriteClientCommandVector(c,3,
resetRefCount(createStringObject("SCRIPT",6)),
resetRefCount(createStringObject("LOAD",4)),
shared.script,
shared.load,
script);
} else {
rewriteClientCommandArgument(c,0,
resetRefCount(createStringObject("EVAL",4)));
rewriteClientCommandArgument(c,0,shared.eval);
rewriteClientCommandArgument(c,1,script);
}
forceCommandPropagation(c,PROPAGATE_REPL|PROPAGATE_AOF);

View File

@ -111,6 +111,7 @@ sds _sdsnewlen(const void *init, size_t initlen, int trymalloc) {
unsigned char *fp; /* flags pointer. */
size_t usable;
assert(initlen + hdrlen + 1 > initlen); /* Catch size_t overflow */
sh = trymalloc?
s_trymalloc_usable(hdrlen+initlen+1, &usable) :
s_malloc_usable(hdrlen+initlen+1, &usable);
@ -243,6 +244,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen) {
len = sdslen(s);
sh = (char*)s-sdsHdrSize(oldtype);
newlen = (len+addlen);
assert(newlen > len); /* Catch size_t overflow */
if (newlen < SDS_MAX_PREALLOC)
newlen *= 2;
else
@ -256,6 +258,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen) {
if (type == SDS_TYPE_5) type = SDS_TYPE_8;
hdrlen = sdsHdrSize(type);
assert(hdrlen + newlen + 1 > len); /* Catch size_t overflow */
if (oldtype==type) {
newsh = s_realloc_usable(sh, hdrlen+newlen+1, &usable);
if (newsh == NULL) return NULL;

View File

@ -649,6 +649,17 @@ const char *announceSentinelAddr(const sentinelAddr *a) {
return sentinel.announce_hostnames ? a->hostname : a->ip;
}
/* Return an allocated sds with hostname/address:port. IPv6
* addresses are bracketed the same way anetFormatAddr() does.
*/
sds announceSentinelAddrAndPort(const sentinelAddr *a) {
const char *addr = announceSentinelAddr(a);
if (strchr(addr, ':') != NULL)
return sdscatprintf(sdsempty(), "[%s]:%d", addr, a->port);
else
return sdscatprintf(sdsempty(), "%s:%d", addr, a->port);
}
/* =========================== Events notification ========================== */
/* Send an event to log, pub/sub, user notification script.
@ -1273,7 +1284,7 @@ sentinelRedisInstance *createSentinelRedisInstance(char *name, int flags, char *
sentinelRedisInstance *ri;
sentinelAddr *addr;
dict *table = NULL;
char slavename[NET_ADDR_STR_LEN], *sdsname;
sds sdsname;
serverAssert(flags & (SRI_MASTER|SRI_SLAVE|SRI_SENTINEL));
serverAssert((flags & SRI_MASTER) || master != NULL);
@ -1282,11 +1293,11 @@ sentinelRedisInstance *createSentinelRedisInstance(char *name, int flags, char *
addr = createSentinelAddr(hostname,port);
if (addr == NULL) return NULL;
/* For slaves use ip:port as name. */
if (flags & SRI_SLAVE) {
anetFormatAddr(slavename, sizeof(slavename), addr->ip, port);
name = slavename;
}
/* For slaves use ip/host:port as name. */
if (flags & SRI_SLAVE)
sdsname = announceSentinelAddrAndPort(addr);
else
sdsname = sdsnew(name);
/* Make sure the entry is not duplicated. This may happen when the same
* name for a master is used multiple times inside the configuration or
@ -1295,7 +1306,6 @@ sentinelRedisInstance *createSentinelRedisInstance(char *name, int flags, char *
if (flags & SRI_MASTER) table = sentinel.masters;
else if (flags & SRI_SLAVE) table = master->slaves;
else if (flags & SRI_SENTINEL) table = master->sentinels;
sdsname = sdsnew(name);
if (dictFind(table,sdsname)) {
releaseSentinelAddr(addr);
sdsfree(sdsname);
@ -1399,7 +1409,6 @@ sentinelRedisInstance *sentinelRedisInstanceLookupSlave(
{
sds key;
sentinelRedisInstance *slave;
char buf[NET_ADDR_STR_LEN];
sentinelAddr *addr;
serverAssert(ri->flags & SRI_MASTER);
@ -1410,11 +1419,9 @@ sentinelRedisInstance *sentinelRedisInstanceLookupSlave(
*/
addr = createSentinelAddr(slave_addr, port);
if (!addr) return NULL;
anetFormatAddr(buf,sizeof(buf),addr->ip,addr->port);
key = announceSentinelAddrAndPort(addr);
releaseSentinelAddr(addr);
key = sdsnew(buf);
slave = dictFetchValue(ri->slaves,key);
sdsfree(key);
return slave;
@ -2077,13 +2084,13 @@ void rewriteConfigSentinelOption(struct rewriteConfigState *state) {
*/
line = sdscatprintf(sdsempty(), "sentinel resolve-hostnames %s",
sentinel.resolve_hostnames ? "yes" : "no");
rewriteConfigRewriteLine(state,"sentinel",line,
rewriteConfigRewriteLine(state,"sentinel resolve-hostnames",line,
sentinel.resolve_hostnames != SENTINEL_DEFAULT_RESOLVE_HOSTNAMES);
/* sentinel announce-hostnames. */
line = sdscatprintf(sdsempty(), "sentinel announce-hostnames %s",
sentinel.announce_hostnames ? "yes" : "no");
rewriteConfigRewriteLine(state,"sentinel",line,
rewriteConfigRewriteLine(state,"sentinel announce-hostnames",line,
sentinel.announce_hostnames != SENTINEL_DEFAULT_ANNOUNCE_HOSTNAMES);
/* For every master emit a "sentinel monitor" config entry. */

View File

@ -1621,6 +1621,9 @@ void resetChildState() {
server.child_type = CHILD_TYPE_NONE;
server.child_pid = -1;
server.stat_current_cow_bytes = 0;
server.stat_current_save_keys_processed = 0;
server.stat_module_progress = 0;
server.stat_current_save_keys_total = 0;
updateDictResizePolicy();
closeChildInfoPipe();
moduleFireServerEvent(REDISMODULE_EVENT_FORK_CHILD,
@ -2418,13 +2421,10 @@ void beforeSleep(struct aeEventLoop *eventLoop) {
if (server.get_ack_from_slaves && !checkClientPauseTimeoutAndReturnIfPaused()) {
robj *argv[3];
argv[0] = createStringObject("REPLCONF",8);
argv[1] = createStringObject("GETACK",6);
argv[2] = createStringObject("*",1); /* Not used argument. */
argv[0] = shared.replconf;
argv[1] = shared.getack;
argv[2] = shared.special_asterick; /* Not used argument. */
replicationFeedSlaves(server.slaves, server.slaveseldb, argv, 3);
decrRefCount(argv[0]);
decrRefCount(argv[1]);
decrRefCount(argv[2]);
server.get_ack_from_slaves = 0;
}
@ -2565,6 +2565,8 @@ void createSharedObjects(void) {
shared.unsubscribebulk = createStringObject("$11\r\nunsubscribe\r\n",18);
shared.psubscribebulk = createStringObject("$10\r\npsubscribe\r\n",17);
shared.punsubscribebulk = createStringObject("$12\r\npunsubscribe\r\n",19);
/* Shared command names */
shared.del = createStringObject("DEL",3);
shared.unlink = createStringObject("UNLINK",6);
shared.rpop = createStringObject("RPOP",4);
@ -2577,15 +2579,38 @@ void createSharedObjects(void) {
shared.zpopmax = createStringObject("ZPOPMAX",7);
shared.multi = createStringObject("MULTI",5);
shared.exec = createStringObject("EXEC",4);
/* Used in the LMOVE/BLMOVE commands */
shared.left = createStringObject("left",4);
shared.right = createStringObject("right",5);
shared.hset = createStringObject("HSET",4);
shared.srem = createStringObject("SREM",4);
shared.xgroup = createStringObject("XGROUP",6);
shared.xclaim = createStringObject("XCLAIM",6);
shared.script = createStringObject("SCRIPT",6);
shared.replconf = createStringObject("REPLCONF",8);
shared.pexpireat = createStringObject("PEXPIREAT",9);
shared.pexpire = createStringObject("PEXPIRE",7);
shared.persist = createStringObject("PERSIST",7);
shared.set = createStringObject("SET",3);
shared.eval = createStringObject("EVAL",4);
/* Shared command argument */
shared.left = createStringObject("left",4);
shared.right = createStringObject("right",5);
shared.pxat = createStringObject("PXAT", 4);
shared.px = createStringObject("PX",2);
shared.time = createStringObject("TIME",4);
shared.retrycount = createStringObject("RETRYCOUNT",10);
shared.force = createStringObject("FORCE",5);
shared.justid = createStringObject("JUSTID",6);
shared.lastid = createStringObject("LASTID",6);
shared.default_username = createStringObject("default",7);
shared.ping = createStringObject("ping",4);
shared.setid = createStringObject("SETID",5);
shared.keepttl = createStringObject("KEEPTTL",7);
shared.load = createStringObject("LOAD",4);
shared.createconsumer = createStringObject("CREATECONSUMER",14);
shared.getack = createStringObject("GETACK",6);
shared.special_asterick = createStringObject("*",1);
shared.special_equals = createStringObject("=",1);
for (j = 0; j < OBJ_SHARED_INTEGERS; j++) {
shared.integers[j] =
makeObjectShared(createObject(OBJ_STRING,(void*)(long)j));
@ -3181,10 +3206,7 @@ void initServer(void) {
}
evictionPoolAlloc(); /* Initialize the LRU keys pool. */
server.pubsub_channels = dictCreate(&keylistDictType,NULL);
server.pubsub_patterns = listCreate();
server.pubsub_patterns_dict = dictCreate(&keylistDictType,NULL);
listSetFreeMethod(server.pubsub_patterns,freePubsubPattern);
listSetMatchMethod(server.pubsub_patterns,listMatchPubsubPattern);
server.pubsub_patterns = dictCreate(&keylistDictType,NULL);
server.cronloops = 0;
server.in_eval = 0;
server.in_exec = 0;
@ -3214,9 +3236,12 @@ void initServer(void) {
server.stat_starttime = time(NULL);
server.stat_peak_memory = 0;
server.stat_current_cow_bytes = 0;
server.stat_current_save_keys_processed = 0;
server.stat_current_save_keys_total = 0;
server.stat_rdb_cow_bytes = 0;
server.stat_aof_cow_bytes = 0;
server.stat_module_cow_bytes = 0;
server.stat_module_progress = 0;
for (int j = 0; j < CLIENT_TYPE_COUNT; j++)
server.stat_clients_type_memory[j] = 0;
server.cron_malloc_stats.zmalloc_used = 0;
@ -4479,6 +4504,25 @@ void bytesToHuman(char *s, unsigned long long n) {
}
}
/* Characters we sanitize on INFO output to maintain expected format. */
static char unsafe_info_chars[] = "#:\n\r";
static char unsafe_info_chars_substs[] = "____"; /* Must be same length as above */
/* Returns a sanitized version of s that contains no unsafe info string chars.
* If no unsafe characters are found, simply returns s. Caller needs to
* free tmp if it is non-null on return.
*/
const char *getSafeInfoString(const char *s, size_t len, char **tmp) {
*tmp = NULL;
if (mempbrk(s, len, unsafe_info_chars,sizeof(unsafe_info_chars)-1)
== NULL) return s;
char *new = *tmp = zmalloc(len + 1);
memcpy(new, s, len);
new[len] = '\0';
return memmapchars(new, len, unsafe_info_chars, unsafe_info_chars_substs,
sizeof(unsafe_info_chars)-1);
}
/* Create the string returned by the INFO command. This is decoupled
* by the INFO command itself as we need to report the same information
* on memory corruption problems. */
@ -4728,10 +4772,20 @@ sds genRedisInfoString(const char *section) {
/* Persistence */
if (allsections || defsections || !strcasecmp(section,"persistence")) {
if (sections++) info = sdscat(info,"\r\n");
double fork_perc = 0;
if (server.stat_module_progress) {
fork_perc = server.stat_module_progress * 100;
} else if (server.stat_current_save_keys_total) {
fork_perc = ((double)server.stat_current_save_keys_processed / server.stat_current_save_keys_total) * 100;
}
info = sdscatprintf(info,
"# Persistence\r\n"
"loading:%d\r\n"
"current_cow_size:%zu\r\n"
"current_fork_perc:%.2f%%\r\n"
"current_save_keys_processed:%zu\r\n"
"current_save_keys_total:%zu\r\n"
"rdb_changes_since_last_save:%lld\r\n"
"rdb_bgsave_in_progress:%d\r\n"
"rdb_last_save_time:%jd\r\n"
@ -4751,6 +4805,9 @@ sds genRedisInfoString(const char *section) {
"module_fork_last_cow_size:%zu\r\n",
(int)server.loading,
server.stat_current_cow_bytes,
fork_perc,
server.stat_current_save_keys_processed,
server.stat_current_save_keys_total,
server.dirty,
server.child_type == CHILD_TYPE_RDB,
(intmax_t)server.lastsave,
@ -4899,7 +4956,7 @@ sds genRedisInfoString(const char *section) {
server.stat_keyspace_hits,
server.stat_keyspace_misses,
dictSize(server.pubsub_channels),
listLength(server.pubsub_patterns),
dictSize(server.pubsub_patterns),
server.stat_fork_time,
server.stat_total_forks,
dictSize(server.migrate_cached_sockets),
@ -5005,11 +5062,11 @@ sds genRedisInfoString(const char *section) {
while((ln = listNext(&li))) {
client *slave = listNodeValue(ln);
char *state = NULL;
char ip[NET_IP_STR_LEN], *slaveip = slave->slave_ip;
char ip[NET_IP_STR_LEN], *slaveip = slave->slave_addr;
int port;
long lag = 0;
if (slaveip[0] == '\0') {
if (!slaveip) {
if (connPeerToString(slave->conn,ip,sizeof(ip),&port) == -1)
continue;
slaveip = ip;
@ -5104,15 +5161,17 @@ sds genRedisInfoString(const char *section) {
dictIterator *di;
di = dictGetSafeIterator(server.commands);
while((de = dictNext(di)) != NULL) {
char *tmpsafe;
c = (struct redisCommand *) dictGetVal(de);
if (!c->calls && !c->failed_calls && !c->rejected_calls)
continue;
info = sdscatprintf(info,
"cmdstat_%s:calls=%lld,usec=%lld,usec_per_call=%.2f"
",rejected_calls=%lld,failed_calls=%lld\r\n",
c->name, c->calls, c->microseconds,
getSafeInfoString(c->name, strlen(c->name), &tmpsafe), c->calls, c->microseconds,
(c->calls == 0) ? 0 : ((float)c->microseconds/c->calls),
c->rejected_calls, c->failed_calls);
if (tmpsafe != NULL) zfree(tmpsafe);
}
dictReleaseIterator(di);
}
@ -5125,10 +5184,12 @@ sds genRedisInfoString(const char *section) {
raxSeek(&ri,"^",NULL,0);
struct redisError *e;
while(raxNext(&ri)) {
char *tmpsafe;
e = (struct redisError *) ri.data;
info = sdscatprintf(info,
"errorstat_%.*s:count=%lld\r\n",
(int)ri.key_len, ri.key, e->count);
(int)ri.key_len, getSafeInfoString((char *) ri.key, ri.key_len, &tmpsafe), e->count);
if (tmpsafe != NULL) zfree(tmpsafe);
}
raxStop(&ri);
}
@ -5617,6 +5678,9 @@ int redisFork(int purpose) {
server.child_pid = childpid;
server.child_type = purpose;
server.stat_current_cow_bytes = 0;
server.stat_current_save_keys_processed = 0;
server.stat_module_progress = 0;
server.stat_current_save_keys_total = dbTotalServerKeyCount();
}
updateDictResizePolicy();
@ -5627,16 +5691,12 @@ int redisFork(int purpose) {
return childpid;
}
void sendChildCOWInfo(int ptype, int on_exit, char *pname) {
size_t private_dirty = zmalloc_get_private_dirty(-1);
void sendChildCowInfo(childInfoType info_type, char *pname) {
sendChildInfoGeneric(info_type, 0, -1, pname);
}
if (private_dirty) {
serverLog(on_exit ? LL_NOTICE : LL_VERBOSE,
"%s: %zu MB of memory used by copy-on-write",
pname, private_dirty/(1024*1024));
}
sendChildInfo(ptype, on_exit, private_dirty);
void sendChildInfo(childInfoType info_type, size_t keys, char *pname) {
sendChildInfoGeneric(info_type, keys, -1, pname);
}
void memtest(size_t megabytes, int passes);

View File

@ -111,8 +111,10 @@ typedef long long ustime_t; /* microsecond time type. */
#define CONFIG_DEFAULT_CLUSTER_CONFIG_FILE "nodes.conf"
#define CONFIG_DEFAULT_UNIX_SOCKET_PERM 0
#define CONFIG_DEFAULT_LOGFILE ""
#define NET_HOST_STR_LEN 256 /* Longest valid hostname */
#define NET_IP_STR_LEN 46 /* INET6_ADDRSTRLEN is 46, but we need to be sure */
#define NET_ADDR_STR_LEN (NET_IP_STR_LEN+32) /* Must be enough for ip:port */
#define NET_HOST_PORT_STR_LEN (NET_HOST_STR_LEN+32) /* Must be enough for hostname:port */
#define CONFIG_BINDADDR_MAX 16
#define CONFIG_MIN_RESERVED_FDS 32
#define CONFIG_DEFAULT_PROC_TITLE_TEMPLATE "{title} {listen-addr} {server-mode}"
@ -901,7 +903,7 @@ typedef struct client {
should use. */
char replid[CONFIG_RUN_ID_SIZE+1]; /* Master replication ID (if master). */
int slave_listening_port; /* As configured with: REPLCONF listening-port */
char slave_ip[NET_IP_STR_LEN]; /* Optionally given by REPLCONF ip-address */
char *slave_addr; /* Optionally given by REPLCONF ip-address */
int slave_capa; /* Slave capabilities: SLAVE_CAPA_* bitwise OR. */
multiState mstate; /* MULTI/EXEC state */
int btype; /* Type of blocking op if CLIENT_BLOCKED. */
@ -976,8 +978,11 @@ struct sharedObjectsStruct {
*busykeyerr, *oomerr, *plus, *messagebulk, *pmessagebulk, *subscribebulk,
*unsubscribebulk, *psubscribebulk, *punsubscribebulk, *del, *unlink,
*rpop, *lpop, *lpush, *rpoplpush, *lmove, *blmove, *zpopmin, *zpopmax,
*emptyscan, *multi, *exec, *left, *right, *persist, *set, *pexpireat,
*pexpire, *pxat, *px,
*emptyscan, *multi, *exec, *left, *right, *hset, *srem, *xgroup, *xclaim,
*script, *replconf, *eval, *persist, *set, *pexpireat, *pexpire,
*time, *pxat, *px, *retrycount, *force, *justid,
*lastid, *ping, *setid, *keepttl, *load, *createconsumer,
*getack, *special_asterick, *special_equals, *default_username,
*select[PROTO_SHARED_SELECT_CMDS],
*integers[OBJ_SHARED_INTEGERS],
*mbulkhdr[OBJ_SHARED_BULKHDR_LEN], /* "*<value>\r\n" */
@ -1139,6 +1144,13 @@ struct clusterState;
#define CHILD_TYPE_LDB 3
#define CHILD_TYPE_MODULE 4
typedef enum childInfoType {
CHILD_INFO_TYPE_CURRENT_INFO,
CHILD_INFO_TYPE_AOF_COW_SIZE,
CHILD_INFO_TYPE_RDB_COW_SIZE,
CHILD_INFO_TYPE_MODULE_COW_SIZE
} childInfoType;
struct redisServer {
/* General */
pid_t pid; /* Main process pid. */
@ -1267,9 +1279,12 @@ struct redisServer {
redisAtomic long long stat_net_input_bytes; /* Bytes read from network. */
redisAtomic long long stat_net_output_bytes; /* Bytes written to network. */
size_t stat_current_cow_bytes; /* Copy on write bytes while child is active. */
size_t stat_current_save_keys_processed; /* Processed keys while child is active. */
size_t stat_current_save_keys_total; /* Number of keys when child started. */
size_t stat_rdb_cow_bytes; /* Copy on write bytes during RDB saving. */
size_t stat_aof_cow_bytes; /* Copy on write bytes during AOF rewrite. */
size_t stat_module_cow_bytes; /* Copy on write bytes during module fork. */
double stat_module_progress; /* Module save progress. */
uint64_t stat_clients_type_memory[CLIENT_TYPE_COUNT];/* Mem usage by type */
long long stat_unexpected_error_replies; /* Number of unexpected (aof-loading, replica to master, etc.) error replies */
long long stat_total_error_replies; /* Total number of issued error replies ( command + rejected errors ) */
@ -1508,8 +1523,7 @@ struct redisServer {
long long blocked_last_cron; /* Indicate the mstime of the last time we did cron jobs from a blocking operation */
/* Pubsub */
dict *pubsub_channels; /* Map channels to list of subscribed clients */
list *pubsub_patterns; /* A list of pubsub_patterns */
dict *pubsub_patterns_dict; /* A dict of pubsub_patterns */
dict *pubsub_patterns; /* A dict of pubsub_patterns */
int notify_keyspace_events; /* Events to propagate via Pub/Sub. This is an
xor of NOTIFY_... flags. */
/* Cluster */
@ -1596,11 +1610,6 @@ struct redisServer {
int failover_state; /* Failover state */
};
typedef struct pubsubPattern {
client *client;
robj *pattern;
} pubsubPattern;
#define MAX_KEYS_BUFFER 256
/* A result structure for the various getkeys function calls. It lists the
@ -2057,7 +2066,9 @@ void restartAOFAfterSYNC();
/* Child info */
void openChildInfoPipe(void);
void closeChildInfoPipe(void);
void sendChildInfo(int process_type, int on_exit, size_t cow_size);
void sendChildInfoGeneric(childInfoType info_type, size_t keys, double progress, char *pname);
void sendChildCowInfo(childInfoType info_type, char *pname);
void sendChildInfo(childInfoType info_type, size_t keys, char *pname);
void receiveChildInfo(void);
/* Fork helpers */
@ -2065,7 +2076,6 @@ int redisFork(int type);
int hasActiveChildProcess();
void resetChildState();
int isMutuallyExclusiveChildType(int type);
void sendChildCOWInfo(int ptype, int on_exit, char *pname);
/* acl.c -- Authentication related prototypes. */
extern rax *Users;
@ -2266,8 +2276,6 @@ int hashZiplistValidateIntegrity(unsigned char *zl, size_t size, int deep);
/* Pub / Sub */
int pubsubUnsubscribeAllChannels(client *c, int notify);
int pubsubUnsubscribeAllPatterns(client *c, int notify);
void freePubsubPattern(void *p);
int listMatchPubsubPattern(void *a, void *b);
int pubsubPublishMessage(robj *channel, robj *message);
void addReplyPubsubMessage(client *c, robj *channel, robj *msg);
@ -2682,6 +2690,7 @@ void _serverPanic(const char *file, int line, const char *msg, ...);
#endif
void serverLogObjectDebugInfo(const robj *o);
void sigsegvHandler(int sig, siginfo_t *info, void *secret);
const char *getSafeInfoString(const char *s, size_t len, char **tmp);
sds genRedisInfoString(const char *section);
sds genModulesInfoString(sds info);
void enableWatchdog(int period);

View File

@ -215,11 +215,8 @@ int hashTypeSet(robj *o, sds field, sds value, int flags) {
serverAssert(vptr != NULL);
update = 1;
/* Delete value */
zl = ziplistDelete(zl, &vptr);
/* Insert new value */
zl = ziplistInsert(zl, vptr, (unsigned char*)value,
/* Replace value */
zl = ziplistReplace(zl, vptr, (unsigned char*)value,
sdslen(value));
}
}
@ -759,11 +756,9 @@ void hincrbyfloatCommand(client *c) {
/* Always replicate HINCRBYFLOAT as an HSET command with the final value
* in order to make sure that differences in float precision or formatting
* will not create differences in replicas or after an AOF restart. */
robj *aux, *newobj;
aux = createStringObject("HSET",4);
robj *newobj;
newobj = createRawStringObject(buf,len);
rewriteClientCommandArgument(c,0,aux);
decrRefCount(aux);
rewriteClientCommandArgument(c,0,shared.hset);
rewriteClientCommandArgument(c,3,newobj);
decrRefCount(newobj);
}
@ -959,11 +954,33 @@ void hscanCommand(client *c) {
scanGenericCommand(c,o,cursor);
}
static void harndfieldReplyWithZiplist(client *c, unsigned int count, ziplistEntry *keys, ziplistEntry *vals) {
for (unsigned long i = 0; i < count; i++) {
if (vals && c->resp > 2)
addReplyArrayLen(c,2);
if (keys[i].sval)
addReplyBulkCBuffer(c, keys[i].sval, keys[i].slen);
else
addReplyBulkLongLong(c, keys[i].lval);
if (vals) {
if (vals[i].sval)
addReplyBulkCBuffer(c, vals[i].sval, vals[i].slen);
else
addReplyBulkLongLong(c, vals[i].lval);
}
}
}
/* How many times bigger should be the hash compared to the requested size
* for us to not use the "remove elements" strategy? Read later in the
* implementation for more info. */
#define HRANDFIELD_SUB_STRATEGY_MUL 3
/* If client is trying to ask for a very large number of random elements,
* queuing may consume an unlimited amount of memory, so we want to limit
* the number of randoms per time. */
#define HRANDFIELD_RANDOM_SAMPLE_LIMIT 1000
void hrandfieldWithCountCommand(client *c, long l, int withvalues) {
unsigned long count, size;
int uniq = 1;
@ -999,7 +1016,7 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) {
if (hash->encoding == OBJ_ENCODING_HT) {
sds key, value;
while (count--) {
dictEntry *de = dictGetRandomKey(hash->ptr);
dictEntry *de = dictGetFairRandomKey(hash->ptr);
key = dictGetKey(de);
value = dictGetVal(de);
if (withvalues && c->resp > 2)
@ -1010,23 +1027,16 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) {
}
} else if (hash->encoding == OBJ_ENCODING_ZIPLIST) {
ziplistEntry *keys, *vals = NULL;
keys = zmalloc(sizeof(ziplistEntry)*count);
unsigned long limit, sample_count;
limit = count > HRANDFIELD_RANDOM_SAMPLE_LIMIT ? HRANDFIELD_RANDOM_SAMPLE_LIMIT : count;
keys = zmalloc(sizeof(ziplistEntry)*limit);
if (withvalues)
vals = zmalloc(sizeof(ziplistEntry)*count);
ziplistRandomPairs(hash->ptr, count, keys, vals);
for (unsigned long i = 0; i < count; i++) {
if (withvalues && c->resp > 2)
addReplyArrayLen(c,2);
if (keys[i].sval)
addReplyBulkCBuffer(c, keys[i].sval, keys[i].slen);
else
addReplyBulkLongLong(c, keys[i].lval);
if (withvalues) {
if (vals[i].sval)
addReplyBulkCBuffer(c, vals[i].sval, vals[i].slen);
else
addReplyBulkLongLong(c, vals[i].lval);
}
vals = zmalloc(sizeof(ziplistEntry)*limit);
while (count) {
sample_count = count > limit ? limit : count;
count -= sample_count;
ziplistRandomPairs(hash->ptr, sample_count, keys, vals);
harndfieldReplyWithZiplist(c, sample_count, keys, vals);
}
zfree(keys);
zfree(vals);
@ -1068,6 +1078,7 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) {
* used into CASE 4 is highly inefficient. */
if (count*HRANDFIELD_SUB_STRATEGY_MUL > size) {
dict *d = dictCreate(&sdsReplyDictType, NULL);
dictExpand(d, size);
hashTypeIterator *hi = hashTypeInitIterator(hash);
/* Add all the elements into the temporary dictionary. */
@ -1119,9 +1130,25 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) {
* to the temporary hash, trying to eventually get enough unique elements
* to reach the specified count. */
else {
if (hash->encoding == OBJ_ENCODING_ZIPLIST) {
/* it is inefficient to repeatedly pick one random element from a
* ziplist. so we use this instead: */
ziplistEntry *keys, *vals = NULL;
keys = zmalloc(sizeof(ziplistEntry)*count);
if (withvalues)
vals = zmalloc(sizeof(ziplistEntry)*count);
serverAssert(ziplistRandomPairsUnique(hash->ptr, count, keys, vals) == count);
harndfieldReplyWithZiplist(c, count, keys, vals);
zfree(keys);
zfree(vals);
return;
}
/* Hashtable encoding (generic implementation) */
unsigned long added = 0;
ziplistEntry key, value;
dict *d = dictCreate(&hashDictType, NULL);
dictExpand(d, count);
while(added < count) {
hashTypeRandomElement(hash, size, &key, withvalues? &value : NULL);

View File

@ -41,10 +41,13 @@
void listTypePush(robj *subject, robj *value, int where) {
if (subject->encoding == OBJ_ENCODING_QUICKLIST) {
int pos = (where == LIST_HEAD) ? QUICKLIST_HEAD : QUICKLIST_TAIL;
value = getDecodedObject(value);
size_t len = sdslen(value->ptr);
quicklistPush(subject->ptr, value->ptr, len, pos);
decrRefCount(value);
if (value->encoding == OBJ_ENCODING_INT) {
char buf[32];
ll2string(buf, 32, (long)value->ptr);
quicklistPush(subject->ptr, buf, strlen(buf), pos);
} else {
quicklistPush(subject->ptr, value->ptr, sdslen(value->ptr), pos);
}
} else {
serverPanic("Unknown list encoding");
}
@ -324,7 +327,6 @@ void lindexCommand(client *c) {
robj *o = lookupKeyReadOrReply(c,c->argv[1],shared.null[c->resp]);
if (o == NULL || checkType(c,o,OBJ_LIST)) return;
long index;
robj *value = NULL;
if ((getLongFromObjectOrReply(c, c->argv[2], &index, NULL) != C_OK))
return;
@ -333,12 +335,10 @@ void lindexCommand(client *c) {
quicklistEntry entry;
if (quicklistIndex(o->ptr, index, &entry)) {
if (entry.value) {
value = createStringObject((char*)entry.value,entry.sz);
addReplyBulkCBuffer(c, entry.value, entry.sz);
} else {
value = createStringObjectFromLongLong(entry.longval);
addReplyBulkLongLong(c, entry.longval);
}
addReplyBulk(c,value);
decrRefCount(value);
} else {
addReplyNull(c);
}

View File

@ -499,7 +499,7 @@ void spopWithCountCommand(client *c) {
* Prepare our replication argument vector. Also send the array length
* which is common to both the code paths. */
robj *propargv[3];
propargv[0] = createStringObject("SREM",4);
propargv[0] = shared.srem;
propargv[1] = c->argv[1];
addReplySetLen(c,count);
@ -590,13 +590,12 @@ void spopWithCountCommand(client *c) {
* dirty counter. We don't want to propagate an SPOP command since
* we propagated the command as a set of SREMs operations using
* the alsoPropagate() API. */
decrRefCount(propargv[0]);
preventCommandPropagation(c);
signalModifiedKey(c,c->db,c->argv[1]);
}
void spopCommand(client *c) {
robj *set, *ele, *aux;
robj *set, *ele;
sds sdsele;
int64_t llele;
int encoding;
@ -629,9 +628,7 @@ void spopCommand(client *c) {
notifyKeyspaceEvent(NOTIFY_SET,"spop",c->argv[1],c->db->id);
/* Replicate/AOF this command as an SREM operation */
aux = createStringObject("SREM",4);
rewriteClientCommandVector(c,3,aux,c->argv[1],ele);
decrRefCount(aux);
rewriteClientCommandVector(c,3,shared.srem,c->argv[1],ele);
/* Add the element to the reply */
addReplyBulk(c,ele);
@ -677,13 +674,13 @@ void srandmemberWithCountCommand(client *c) {
uniq = 0;
}
if ((set = lookupKeyReadOrReply(c,c->argv[1],shared.emptyset[c->resp]))
if ((set = lookupKeyReadOrReply(c,c->argv[1],shared.emptyarray))
== NULL || checkType(c,set,OBJ_SET)) return;
size = setTypeSize(set);
/* If count is zero, serve it ASAP to avoid special cases later. */
if (count == 0) {
addReply(c,shared.emptyset[c->resp]);
addReply(c,shared.emptyarray);
return;
}
@ -693,7 +690,7 @@ void srandmemberWithCountCommand(client *c) {
* structures. This case is the only one that also needs to return the
* elements in random order. */
if (!uniq || count == 1) {
addReplySetLen(c,count);
addReplyArrayLen(c,count);
while(count--) {
encoding = setTypeRandomElement(set,&ele,&llele);
if (encoding == OBJ_ENCODING_INTSET) {
@ -709,7 +706,19 @@ void srandmemberWithCountCommand(client *c) {
* The number of requested elements is greater than the number of
* elements inside the set: simply return the whole set. */
if (count >= size) {
sunionDiffGenericCommand(c,c->argv+1,1,NULL,SET_OP_UNION);
setTypeIterator *si;
addReplyArrayLen(c,size);
si = setTypeInitIterator(set);
while ((encoding = setTypeNext(si,&ele,&llele)) != -1) {
if (encoding == OBJ_ENCODING_INTSET) {
addReplyBulkLongLong(c,llele);
} else {
addReplyBulkCBuffer(c,ele,sdslen(ele));
}
size--;
}
setTypeReleaseIterator(si);
serverAssert(size==0);
return;
}
@ -730,6 +739,7 @@ void srandmemberWithCountCommand(client *c) {
/* Add all the elements into the temporary dictionary. */
si = setTypeInitIterator(set);
dictExpand(d, size);
while ((encoding = setTypeNext(si,&ele,&llele)) != -1) {
int retval = DICT_ERR;
@ -762,6 +772,7 @@ void srandmemberWithCountCommand(client *c) {
unsigned long added = 0;
sds sdsele;
dictExpand(d, count);
while (added < count) {
encoding = setTypeRandomElement(set,&ele,&llele);
if (encoding == OBJ_ENCODING_INTSET) {
@ -784,7 +795,7 @@ void srandmemberWithCountCommand(client *c) {
dictIterator *di;
dictEntry *de;
addReplySetLen(c,count);
addReplyArrayLen(c,count);
di = dictGetIterator(d);
while((de = dictNext(di)) != NULL)
addReplyBulkSds(c,dictGetKey(de));

View File

@ -43,6 +43,10 @@
* avoid malloc allocation.*/
#define STREAMID_STATIC_VECTOR_LEN 8
/* Max pre-allocation for listpack. This is done to avoid abuse of a user
* setting stream_node_max_bytes to a huge number. */
#define STREAM_LISTPACK_MAX_PRE_ALLOCATE 4096
void streamFreeCG(streamCG *cg);
void streamFreeNACK(streamNACK *na);
size_t streamReplyWithRangeFromConsumerPEL(client *c, stream *s, streamID *start, streamID *end, size_t count, streamConsumer *consumer);
@ -508,8 +512,16 @@ int streamAppendItem(stream *s, robj **argv, int64_t numfields, streamID *added_
{
lp = NULL;
} else if (server.stream_node_max_entries) {
int64_t count = lpGetInteger(lpFirst(lp));
if (count >= server.stream_node_max_entries) lp = NULL;
unsigned char *lp_ele = lpFirst(lp);
/* Count both live entries and deleted ones. */
int64_t count = lpGetInteger(lp_ele) + lpGetInteger(lpNext(lp,lp_ele));
if (count >= server.stream_node_max_entries) {
/* Shrink extra pre-allocated memory */
lp = lpShrinkToFit(lp);
if (ri.data != lp)
raxInsert(s->rax,ri.key,ri.key_len,lp,NULL);
lp = NULL;
}
}
}
@ -517,8 +529,17 @@ int streamAppendItem(stream *s, robj **argv, int64_t numfields, streamID *added_
if (lp == NULL) {
master_id = id;
streamEncodeID(rax_key,&id);
/* Create the listpack having the master entry ID and fields. */
lp = lpNew();
/* Create the listpack having the master entry ID and fields.
* Pre-allocate some bytes when creating listpack to avoid realloc on
* every XADD. Since listpack.c uses malloc_size, it'll grow in steps,
* and won't realloc on every XADD.
* When listpack reaches max number of entries, we'll shrink the
* allocation to fit the data. */
size_t prealloc = STREAM_LISTPACK_MAX_PRE_ALLOCATE;
if (server.stream_node_max_bytes > 0 && server.stream_node_max_bytes < prealloc) {
prealloc = server.stream_node_max_bytes;
}
lp = lpNew(prealloc);
lp = lpAppendInteger(lp,1); /* One item, the one we are adding. */
lp = lpAppendInteger(lp,0); /* Zero deleted so far. */
lp = lpAppendInteger(lp,numfields);
@ -1328,19 +1349,19 @@ void streamPropagateXCLAIM(client *c, robj *key, streamCG *group, robj *groupnam
* Note that JUSTID is useful in order to avoid that XCLAIM will do
* useless work in the slave side, trying to fetch the stream item. */
robj *argv[14];
argv[0] = createStringObject("XCLAIM",6);
argv[0] = shared.xclaim;
argv[1] = key;
argv[2] = groupname;
argv[3] = createStringObject(nack->consumer->name,sdslen(nack->consumer->name));
argv[4] = createStringObjectFromLongLong(0);
argv[4] = shared.integers[0];
argv[5] = id;
argv[6] = createStringObject("TIME",4);
argv[6] = shared.time;
argv[7] = createStringObjectFromLongLong(nack->delivery_time);
argv[8] = createStringObject("RETRYCOUNT",10);
argv[8] = shared.retrycount;
argv[9] = createStringObjectFromLongLong(nack->delivery_count);
argv[10] = createStringObject("FORCE",5);
argv[11] = createStringObject("JUSTID",6);
argv[12] = createStringObject("LASTID",6);
argv[10] = shared.force;
argv[11] = shared.justid;
argv[12] = shared.lastid;
argv[13] = createObjectFromStreamID(&group->last_id);
/* We use progagate() because this code path is not always called from
@ -1348,16 +1369,9 @@ void streamPropagateXCLAIM(client *c, robj *key, streamCG *group, robj *groupnam
* consumer group state, and we don't need MULTI/EXEC wrapping because
* there is no message state cross-message atomicity required. */
propagate(server.xclaimCommand,c->db->id,argv,14,PROPAGATE_AOF|PROPAGATE_REPL);
decrRefCount(argv[0]);
decrRefCount(argv[3]);
decrRefCount(argv[4]);
decrRefCount(argv[6]);
decrRefCount(argv[7]);
decrRefCount(argv[8]);
decrRefCount(argv[9]);
decrRefCount(argv[10]);
decrRefCount(argv[11]);
decrRefCount(argv[12]);
decrRefCount(argv[13]);
}
@ -1369,8 +1383,8 @@ void streamPropagateXCLAIM(client *c, robj *key, streamCG *group, robj *groupnam
*/
void streamPropagateGroupID(client *c, robj *key, streamCG *group, robj *groupname) {
robj *argv[5];
argv[0] = createStringObject("XGROUP",6);
argv[1] = createStringObject("SETID",5);
argv[0] = shared.xgroup;
argv[1] = shared.setid;
argv[2] = key;
argv[3] = groupname;
argv[4] = createObjectFromStreamID(&group->last_id);
@ -1380,8 +1394,6 @@ void streamPropagateGroupID(client *c, robj *key, streamCG *group, robj *groupna
* consumer group state, and we don't need MULTI/EXEC wrapping because
* there is no message state cross-message atomicity required. */
propagate(server.xgroupCommand,c->db->id,argv,5,PROPAGATE_AOF|PROPAGATE_REPL);
decrRefCount(argv[0]);
decrRefCount(argv[1]);
decrRefCount(argv[4]);
}
@ -1393,8 +1405,8 @@ void streamPropagateGroupID(client *c, robj *key, streamCG *group, robj *groupna
*/
void streamPropagateConsumerCreation(client *c, robj *key, robj *groupname, sds consumername) {
robj *argv[5];
argv[0] = createStringObject("XGROUP",6);
argv[1] = createStringObject("CREATECONSUMER",14);
argv[0] = shared.xgroup;
argv[1] = shared.createconsumer;
argv[2] = key;
argv[3] = groupname;
argv[4] = createObject(OBJ_STRING,sdsdup(consumername));
@ -1404,8 +1416,6 @@ void streamPropagateConsumerCreation(client *c, robj *key, robj *groupname, sds
* consumer group state, and we don't need MULTI/EXEC wrapping because
* there is no message state cross-message atomicity required. */
propagate(server.xgroupCommand,c->db->id,argv,5,PROPAGATE_AOF|PROPAGATE_REPL);
decrRefCount(argv[0]);
decrRefCount(argv[1]);
decrRefCount(argv[4]);
}
@ -1725,9 +1735,7 @@ int streamParseIntervalIDOrReply(client *c, robj *o, streamID *id, int *exclude,
}
void streamRewriteApproxSpecifier(client *c, int idx) {
robj *equal_obj = createStringObject("=",1);
rewriteClientCommandArgument(c,idx,equal_obj);
decrRefCount(equal_obj);
rewriteClientCommandArgument(c,idx,shared.special_equals);
}
/* We propagate MAXLEN/MINID ~ <count> as MAXLEN/MINID = <resulting-len-of-stream>
@ -3471,7 +3479,7 @@ NULL
key = c->argv[2];
/* Lookup the key now, this is common for all the subcommands but HELP. */
robj *o = lookupKeyWriteOrReply(c,key,shared.nokeyerr);
robj *o = lookupKeyReadOrReply(c,key,shared.nokeyerr);
if (o == NULL || checkType(c,o,OBJ_STREAM)) return;
s = o->ptr;

View File

@ -73,16 +73,25 @@ static int checkStringLength(client *c, long long size) {
#define OBJ_PERSIST (1<<8) /* Set if we need to remove the ttl */
void setGenericCommand(client *c, int flags, robj *key, robj *val, robj *expire, int unit, robj *ok_reply, robj *abort_reply) {
long long milliseconds = 0; /* initialized to avoid any harmness warning */
long long milliseconds = 0, when = 0; /* initialized to avoid any harmness warning */
if (expire) {
if (getLongLongFromObjectOrReply(c, expire, &milliseconds, NULL) != C_OK)
return;
if (milliseconds <= 0) {
addReplyErrorFormat(c,"invalid expire time in %s",c->cmd->name);
if (milliseconds <= 0 || (unit == UNIT_SECONDS && milliseconds > LLONG_MAX / 1000)) {
/* Negative value provided or multiplication is gonna overflow. */
addReplyErrorFormat(c, "invalid expire time in %s", c->cmd->name);
return;
}
if (unit == UNIT_SECONDS) milliseconds *= 1000;
when = milliseconds;
if ((flags & OBJ_PX) || (flags & OBJ_EX))
when += mstime();
if (when <= 0) {
/* Overflow detected. */
addReplyErrorFormat(c, "invalid expire time in %s", c->cmd->name);
return;
}
}
if ((flags & OBJ_SET_NX && lookupKeyWrite(c->db,key) != NULL) ||
@ -100,14 +109,7 @@ void setGenericCommand(client *c, int flags, robj *key, robj *val, robj *expire,
server.dirty++;
notifyKeyspaceEvent(NOTIFY_STRING,"set",key,c->db->id);
if (expire) {
robj *exp = shared.pxat;
if ((flags & OBJ_PX) || (flags & OBJ_EX)) {
setExpire(c,c->db,key,milliseconds + mstime());
exp = shared.px;
} else {
setExpire(c,c->db,key,milliseconds);
}
setExpire(c,c->db,key,when);
notifyKeyspaceEvent(NOTIFY_GENERIC,"expire",key,c->db->id);
/* Propagate as SET Key Value PXAT millisecond-timestamp if there is EXAT/PXAT or
@ -119,6 +121,7 @@ void setGenericCommand(client *c, int flags, robj *key, robj *val, robj *expire,
* Additional care is required while modifying the argument order. AOF relies on the
* exp argument being at index 3. (see feedAppendOnlyFile)
* */
robj *exp = (flags & OBJ_PXAT) || (flags & OBJ_EXAT) ? shared.pxat : shared.px;
robj *millisecondObj = createStringObjectFromLongLong(milliseconds);
rewriteClientCommandVector(c,5,shared.set,key,val,exp,millisecondObj);
decrRefCount(millisecondObj);
@ -335,17 +338,26 @@ void getexCommand(client *c) {
return;
}
long long milliseconds = 0;
long long milliseconds = 0, when = 0;
/* Validate the expiration time value first */
if (expire) {
if (getLongLongFromObjectOrReply(c, expire, &milliseconds, NULL) != C_OK)
return;
if (milliseconds <= 0) {
addReplyErrorFormat(c,"invalid expire time in %s",c->cmd->name);
if (milliseconds <= 0 || (unit == UNIT_SECONDS && milliseconds > LLONG_MAX / 1000)) {
/* Negative value provided or multiplication is gonna overflow. */
addReplyErrorFormat(c, "invalid expire time in %s", c->cmd->name);
return;
}
if (unit == UNIT_SECONDS) milliseconds *= 1000;
when = milliseconds;
if ((flags & OBJ_PX) || (flags & OBJ_EX))
when += mstime();
if (when <= 0) {
/* Overflow detected. */
addReplyErrorFormat(c, "invalid expire time in %s", c->cmd->name);
return;
}
}
/* We need to do this before we expire the key or delete it */
@ -365,14 +377,9 @@ void getexCommand(client *c) {
notifyKeyspaceEvent(NOTIFY_GENERIC, "del", c->argv[1], c->db->id);
server.dirty++;
} else if (expire) {
robj *exp = shared.pexpireat;
if ((flags & OBJ_PX) || (flags & OBJ_EX)) {
setExpire(c,c->db,c->argv[1],milliseconds + mstime());
exp = shared.pexpire;
} else {
setExpire(c,c->db,c->argv[1],milliseconds);
}
setExpire(c,c->db,c->argv[1],when);
/* Propagate */
robj *exp = (flags & OBJ_PXAT) || (flags & OBJ_EXAT) ? shared.pexpireat : shared.pexpire;
robj* millisecondObj = createStringObjectFromLongLong(milliseconds);
rewriteClientCommandVector(c,3,exp,c->argv[1],millisecondObj);
decrRefCount(millisecondObj);
@ -631,7 +638,7 @@ void decrbyCommand(client *c) {
void incrbyfloatCommand(client *c) {
long double incr, value;
robj *o, *new, *aux;
robj *o, *new;
o = lookupKeyWrite(c->db,c->argv[1]);
if (checkType(c,o,OBJ_STRING)) return;
@ -659,9 +666,7 @@ void incrbyfloatCommand(client *c) {
* will not create differences in replicas or after an AOF restart. */
rewriteClientCommandArgument(c,0,shared.set);
rewriteClientCommandArgument(c,2,new);
aux = createStringObject("KEEPTTL",7);
rewriteClientCommandArgument(c,3,aux);
decrRefCount(aux);
rewriteClientCommandArgument(c,3,shared.keepttl);
}
void appendCommand(client *c) {
@ -764,7 +769,7 @@ void stralgoLCS(client *c) {
addReplyError(c,
"The specified keys must contain string values");
/* Don't cleanup the objects, we need to do that
* only after callign getDecodedObject(). */
* only after calling getDecodedObject(). */
obja = NULL;
objb = NULL;
goto cleanup;

View File

@ -3956,11 +3956,33 @@ void bzpopmaxCommand(client *c) {
blockingGenericZpopCommand(c,ZSET_MAX);
}
static void zarndmemberReplyWithZiplist(client *c, unsigned int count, ziplistEntry *keys, ziplistEntry *vals) {
for (unsigned long i = 0; i < count; i++) {
if (vals && c->resp > 2)
addReplyArrayLen(c,2);
if (keys[i].sval)
addReplyBulkCBuffer(c, keys[i].sval, keys[i].slen);
else
addReplyBulkLongLong(c, keys[i].lval);
if (vals) {
if (vals[i].sval) {
addReplyDouble(c, zzlStrtod(vals[i].sval,vals[i].slen));
} else
addReplyDouble(c, vals[i].lval);
}
}
}
/* How many times bigger should be the zset compared to the requested size
* for us to not use the "remove elements" strategy? Read later in the
* implementation for more info. */
#define ZRANDMEMBER_SUB_STRATEGY_MUL 3
/* If client is trying to ask for a very large number of random elements,
* queuing may consume an unlimited amount of memory, so we want to limit
* the number of randoms per time. */
#define ZRANDMEMBER_RANDOM_SAMPLE_LIMIT 1000
void zrandmemberWithCountCommand(client *c, long l, int withscores) {
unsigned long count, size;
int uniq = 1;
@ -4006,23 +4028,16 @@ void zrandmemberWithCountCommand(client *c, long l, int withscores) {
}
} else if (zsetobj->encoding == OBJ_ENCODING_ZIPLIST) {
ziplistEntry *keys, *vals = NULL;
keys = zmalloc(sizeof(ziplistEntry)*count);
unsigned long limit, sample_count;
limit = count > ZRANDMEMBER_RANDOM_SAMPLE_LIMIT ? ZRANDMEMBER_RANDOM_SAMPLE_LIMIT : count;
keys = zmalloc(sizeof(ziplistEntry)*limit);
if (withscores)
vals = zmalloc(sizeof(ziplistEntry)*count);
ziplistRandomPairs(zsetobj->ptr, count, keys, vals);
for (unsigned long i = 0; i < count; i++) {
if (withscores && c->resp > 2)
addReplyArrayLen(c,2);
if (keys[i].sval)
addReplyBulkCBuffer(c, keys[i].sval, keys[i].slen);
else
addReplyBulkLongLong(c, keys[i].lval);
if (withscores) {
if (vals[i].sval) {
addReplyDouble(c, zzlStrtod(vals[i].sval,vals[i].slen));
} else
addReplyDouble(c, vals[i].lval);
}
vals = zmalloc(sizeof(ziplistEntry)*limit);
while (count) {
sample_count = count > limit ? limit : count;
count -= sample_count;
ziplistRandomPairs(zsetobj->ptr, sample_count, keys, vals);
zarndmemberReplyWithZiplist(c, sample_count, keys, vals);
}
zfree(keys);
zfree(vals);
@ -4070,6 +4085,7 @@ void zrandmemberWithCountCommand(client *c, long l, int withscores) {
* used into CASE 4 is highly inefficient. */
if (count*ZRANDMEMBER_SUB_STRATEGY_MUL > size) {
dict *d = dictCreate(&sdsReplyDictType, NULL);
dictExpand(d, size);
/* Add all the elements into the temporary dictionary. */
while (zuiNext(&src, &zval)) {
sds key = zuiNewSdsFromValue(&zval);
@ -4111,8 +4127,24 @@ void zrandmemberWithCountCommand(client *c, long l, int withscores) {
* to the temporary set, trying to eventually get enough unique elements
* to reach the specified count. */
else {
if (zsetobj->encoding == OBJ_ENCODING_ZIPLIST) {
/* it is inefficient to repeatedly pick one random element from a
* ziplist. so we use this instead: */
ziplistEntry *keys, *vals = NULL;
keys = zmalloc(sizeof(ziplistEntry)*count);
if (withscores)
vals = zmalloc(sizeof(ziplistEntry)*count);
serverAssert(ziplistRandomPairsUnique(zsetobj->ptr, count, keys, vals) == count);
zarndmemberReplyWithZiplist(c, count, keys, vals);
zfree(keys);
zfree(vals);
return;
}
/* Hashtable encoding (generic implementation) */
unsigned long added = 0;
dict *d = dictCreate(&hashDictType, NULL);
dictExpand(d, count);
while (added < count) {
ziplistEntry key;

View File

@ -350,7 +350,7 @@ ConnectionType CT_TLS;
* socket operation.
*
* When this happens, we need to do two things:
* 1. Make sure we register for the even.
* 1. Make sure we register for the event.
* 2. Make sure we know which handler needs to execute when the
* event fires. That is, if we notify the caller of a write operation
* that it blocks, and SSL asks for a read, we need to trigger the

View File

@ -147,7 +147,7 @@ int checkPrefixCollisionsOrReply(client *c, robj **prefixes, size_t numprefix) {
}
}
}
return -1;
return 1;
}
/* Set the client 'c' to track the prefix 'prefix'. If the client 'c' is
@ -269,7 +269,7 @@ void sendTrackingMessage(client *c, char *keyname, size_t keylen, int proto) {
* are unable to send invalidation messages to the redirected
* connection, because the client no longer exist. */
if (c->resp > 2) {
addReplyPushLen(c,3);
addReplyPushLen(c,2);
addReplyBulkCBuffer(c,"tracking-redir-broken",21);
addReplyLongLong(c,c->client_tracking_redirection);
}

View File

@ -244,6 +244,33 @@ long long memtoll(const char *p, int *err) {
return val*mul;
}
/* Search a memory buffer for any set of bytes, like strpbrk().
* Returns pointer to first found char or NULL.
*/
const char *mempbrk(const char *s, size_t len, const char *chars, size_t charslen) {
for (size_t j = 0; j < len; j++) {
for (size_t n = 0; n < charslen; n++)
if (s[j] == chars[n]) return &s[j];
}
return NULL;
}
/* Modify the buffer replacing all occurrences of chars from the 'from'
* set with the corresponding char in the 'to' set. Always returns s.
*/
char *memmapchars(char *s, size_t len, const char *from, const char *to, size_t setlen) {
for (size_t j = 0; j < len; j++) {
for (size_t i = 0; i < setlen; i++) {
if (s[j] == from[i]) {
s[j] = to[i];
break;
}
}
}
return s;
}
/* Return the number of digits of 'v' when converted to string in radix 10.
* See ll2string() for more information. */
uint32_t digits10(uint64_t v) {

View File

@ -49,6 +49,8 @@ int stringmatchlen(const char *p, int plen, const char *s, int slen, int nocase)
int stringmatch(const char *p, const char *s, int nocase);
int stringmatchlen_fuzz_test(void);
long long memtoll(const char *p, int *err);
const char *mempbrk(const char *s, size_t len, const char *chars, size_t charslen);
char *memmapchars(char *s, size_t len, const char *from, const char *to, size_t setlen);
uint32_t digits10(uint64_t v);
uint32_t sdigits10(int64_t v);
int ll2string(char *s, size_t len, long long value);

View File

@ -1265,6 +1265,42 @@ unsigned char *ziplistDeleteRange(unsigned char *zl, int index, unsigned int num
return (p == NULL) ? zl : __ziplistDelete(zl,p,num);
}
/* Replaces the entry at p. This is equivalent to a delete and an insert,
* but avoids some overhead when replacing a value of the same size. */
unsigned char *ziplistReplace(unsigned char *zl, unsigned char *p, unsigned char *s, unsigned int slen) {
/* get metadata of the current entry */
zlentry entry;
zipEntry(p, &entry);
/* compute length of entry to store, excluding prevlen */
unsigned int reqlen;
unsigned char encoding = 0;
long long value = 123456789; /* initialized to avoid warning. */
if (zipTryEncoding(s,slen,&value,&encoding)) {
reqlen = zipIntSize(encoding); /* encoding is set */
} else {
reqlen = slen; /* encoding == 0 */
}
reqlen += zipStoreEntryEncoding(NULL,encoding,slen);
if (reqlen == entry.lensize + entry.len) {
/* Simply overwrite the element. */
p += entry.prevrawlensize;
p += zipStoreEntryEncoding(p,encoding,slen);
if (ZIP_IS_STR(encoding)) {
memcpy(p,s,slen);
} else {
zipSaveInteger(p,value,encoding);
}
} else {
/* Fallback. */
zl = ziplistDelete(zl,&p);
zl = ziplistInsert(zl,p,s,slen);
}
return zl;
}
/* Compare entry pointer to by 'p' with 'sstr' of length 'slen'. */
/* Return 1 if equal. */
unsigned int ziplistCompare(unsigned char *p, unsigned char *sstr, unsigned int slen) {
@ -1523,8 +1559,8 @@ void ziplistRandomPair(unsigned char *zl, unsigned long total_count, ziplistEntr
}
/* int compare for qsort */
int intCompare(const void *a, const void *b) {
return (*(int *) a - *(int *) b);
int uintCompare(const void *a, const void *b) {
return (*(unsigned int *) a - *(unsigned int *) b);
}
/* Helper method to store a string into from val or lval into dest */
@ -1534,39 +1570,42 @@ static inline void ziplistSaveValue(unsigned char *val, unsigned int len, long l
dest->lval = lval;
}
/* Randomly select unique count of key value pairs and store into 'keys' and
* 'vals' args. The order of the picked entries is random.
/* Randomly select count of key value pairs and store into 'keys' and
* 'vals' args. The order of the picked entries is random, and the selections
* are non-unique (repetitions are possible).
* The 'vals' arg can be NULL in which case we skip these. */
void ziplistRandomPairs(unsigned char *zl, int count, ziplistEntry *keys, ziplistEntry *vals) {
void ziplistRandomPairs(unsigned char *zl, unsigned int count, ziplistEntry *keys, ziplistEntry *vals) {
unsigned char *p, *key, *value;
unsigned int klen, vlen;
long long klval, vlval;
unsigned int klen = 0, vlen = 0;
long long klval = 0, vlval = 0;
/* Notice: the index member must be first due to the use in uintCompare */
typedef struct {
int index;
int order;
unsigned int index;
unsigned int order;
} rand_pick;
rand_pick *picks = zmalloc(sizeof(rand_pick)*count);
unsigned long total_size = ziplistLen(zl)/2;
unsigned int total_size = ziplistLen(zl)/2;
/* Avoid div by zero on corrupt ziplist */
assert(total_size);
/* create a pool of random indexes (some may be duplicate). */
for (int i = 0; i < count; i++) {
for (unsigned int i = 0; i < count; i++) {
picks[i].index = (rand() % total_size) * 2; /* Generate even indexes */
/* keep track of the order we picked them */
picks[i].order = i;
}
/* sort by indexes. */
qsort(picks, count, sizeof(rand_pick), intCompare);
qsort(picks, count, sizeof(rand_pick), uintCompare);
/* fetch the elements form the ziplist into a output array respecting the original order. */
int zipindex = 0, pickindex = 0;
unsigned int zipindex = 0, pickindex = 0;
p = ziplistIndex(zl, 0);
while (ziplistGet(p, &key, &klen, &klval) && pickindex < count) {
p = ziplistNext(zl, p);
ziplistGet(p, &value, &vlen, &vlval);
assert(ziplistGet(p, &value, &vlen, &vlval));
while (pickindex < count && zipindex == picks[pickindex].index) {
int storeorder = picks[pickindex].order;
ziplistSaveValue(key, klen, klval, &keys[storeorder]);
@ -1581,6 +1620,51 @@ void ziplistRandomPairs(unsigned char *zl, int count, ziplistEntry *keys, ziplis
zfree(picks);
}
/* Randomly select count of key value pairs and store into 'keys' and
* 'vals' args. The selections are unique (no repetitions), and the order of
* the picked entries is NOT-random.
* The 'vals' arg can be NULL in which case we skip these.
* The return value is the number of items picked which can be lower than the
* requested count if the ziplist doesn't hold enough pairs. */
unsigned int ziplistRandomPairsUnique(unsigned char *zl, unsigned int count, ziplistEntry *keys, ziplistEntry *vals) {
unsigned char *p, *key;
unsigned int klen = 0;
long long klval = 0;
unsigned int total_size = ziplistLen(zl)/2;
unsigned int index = 0;
if (count > total_size)
count = total_size;
/* To only iterate once, every time we try to pick a member, the probability
* we pick it is the quotient of the count left we want to pick and the
* count still we haven't visited in the dict, this way, we could make every
* member be equally picked.*/
p = ziplistIndex(zl, 0);
unsigned int picked = 0, remaining = count;
while (picked < count && p) {
double randomDouble = ((double)rand()) / RAND_MAX;
double threshold = ((double)remaining) / (total_size - index);
if (randomDouble <= threshold) {
assert(ziplistGet(p, &key, &klen, &klval));
ziplistSaveValue(key, klen, klval, &keys[picked]);
p = ziplistNext(zl, p);
assert(p);
if (vals) {
assert(ziplistGet(p, &key, &klen, &klval));
ziplistSaveValue(key, klen, klval, &vals[picked]);
}
remaining--;
picked++;
} else {
p = ziplistNext(zl, p);
assert(p);
}
p = ziplistNext(zl, p);
index++;
}
return picked;
}
#ifdef REDIS_TEST
#include <sys/time.h>
#include "adlist.h"
@ -2022,6 +2106,41 @@ int ziplistTest(int argc, char **argv) {
zfree(zl);
}
printf("Replace with same size:\n");
{
zl = createList(); /* "hello", "foo", "quux", "1024" */
unsigned char *orig_zl = zl;
p = ziplistIndex(zl, 0);
zl = ziplistReplace(zl, p, (unsigned char*)"zoink", 5);
p = ziplistIndex(zl, 3);
zl = ziplistReplace(zl, p, (unsigned char*)"yy", 2);
p = ziplistIndex(zl, 1);
zl = ziplistReplace(zl, p, (unsigned char*)"65536", 5);
p = ziplistIndex(zl, 0);
assert(!memcmp((char*)p,
"\x00\x05zoink"
"\x07\xf0\x00\x00\x01" /* 65536 as int24 */
"\x05\x04quux" "\x06\x02yy" "\xff",
23));
assert(zl == orig_zl); /* no reallocations have happened */
zfree(zl);
printf("SUCCESS\n\n");
}
printf("Replace with different size:\n");
{
zl = createList(); /* "hello", "foo", "quux", "1024" */
p = ziplistIndex(zl, 1);
zl = ziplistReplace(zl, p, (unsigned char*)"squirrel", 8);
p = ziplistIndex(zl, 0);
assert(!strncmp((char*)p,
"\x00\x05hello" "\x07\x08squirrel" "\x0a\x04quux"
"\x06\xc0\x00\x04" "\xff",
28));
zfree(zl);
printf("SUCCESS\n\n");
}
printf("Regression test for >255 byte strings:\n");
{
char v1[257] = {0}, v2[257] = {0};

View File

@ -53,6 +53,7 @@ unsigned int ziplistGet(unsigned char *p, unsigned char **sval, unsigned int *sl
unsigned char *ziplistInsert(unsigned char *zl, unsigned char *p, unsigned char *s, unsigned int slen);
unsigned char *ziplistDelete(unsigned char *zl, unsigned char **p);
unsigned char *ziplistDeleteRange(unsigned char *zl, int index, unsigned int num);
unsigned char *ziplistReplace(unsigned char *zl, unsigned char *p, unsigned char *s, unsigned int slen);
unsigned int ziplistCompare(unsigned char *p, unsigned char *s, unsigned int slen);
unsigned char *ziplistFind(unsigned char *zl, unsigned char *p, unsigned char *vstr, unsigned int vlen, unsigned int skip);
unsigned int ziplistLen(unsigned char *zl);
@ -62,7 +63,8 @@ typedef int (*ziplistValidateEntryCB)(unsigned char* p, void* userdata);
int ziplistValidateIntegrity(unsigned char *zl, size_t size, int deep,
ziplistValidateEntryCB entry_cb, void *cb_userdata);
void ziplistRandomPair(unsigned char *zl, unsigned long total_count, ziplistEntry *key, ziplistEntry *val);
void ziplistRandomPairs(unsigned char *zl, int count, ziplistEntry *keys, ziplistEntry *vals);
void ziplistRandomPairs(unsigned char *zl, unsigned int count, ziplistEntry *keys, ziplistEntry *vals);
unsigned int ziplistRandomPairsUnique(unsigned char *zl, unsigned int count, ziplistEntry *keys, ziplistEntry *vals);
#ifdef REDIS_TEST
int ziplistTest(int argc, char *argv[]);

View File

@ -57,6 +57,12 @@ void zlibc_free(void *ptr) {
#endif
#endif
#if PREFIX_SIZE > 0
#define ASSERT_NO_SIZE_OVERFLOW(sz) assert((sz) + PREFIX_SIZE > (sz))
#else
#define ASSERT_NO_SIZE_OVERFLOW(sz)
#endif
/* Explicitly override malloc/free etc when using tcmalloc. */
#if defined(USE_TCMALLOC)
#define malloc(size) tc_malloc(size)
@ -89,6 +95,7 @@ static void (*zmalloc_oom_handler)(size_t) = zmalloc_default_oom;
/* Try allocating memory, and return NULL if failed.
* '*usable' is set to the usable size if non NULL. */
void *ztrymalloc_usable(size_t size, size_t *usable) {
ASSERT_NO_SIZE_OVERFLOW(size);
void *ptr = malloc(size+PREFIX_SIZE);
if (!ptr) return NULL;
@ -131,6 +138,7 @@ void *zmalloc_usable(size_t size, size_t *usable) {
* Currently implemented only for jemalloc. Used for online defragmentation. */
#ifdef HAVE_DEFRAG
void *zmalloc_no_tcache(size_t size) {
ASSERT_NO_SIZE_OVERFLOW(size);
void *ptr = mallocx(size+PREFIX_SIZE, MALLOCX_TCACHE_NONE);
if (!ptr) zmalloc_oom_handler(size);
update_zmalloc_stat_alloc(zmalloc_size(ptr));
@ -147,6 +155,7 @@ void zfree_no_tcache(void *ptr) {
/* Try allocating memory and zero it, and return NULL if failed.
* '*usable' is set to the usable size if non NULL. */
void *ztrycalloc_usable(size_t size, size_t *usable) {
ASSERT_NO_SIZE_OVERFLOW(size);
void *ptr = calloc(1, size+PREFIX_SIZE);
if (ptr == NULL) return NULL;
@ -187,6 +196,7 @@ void *zcalloc_usable(size_t size, size_t *usable) {
/* Try reallocating memory, and return NULL if failed.
* '*usable' is set to the usable size if non NULL. */
void *ztryrealloc_usable(void *ptr, size_t size, size_t *usable) {
ASSERT_NO_SIZE_OVERFLOW(size);
#ifndef HAVE_MALLOC_SIZE
void *realptr;
#endif

View File

@ -29,6 +29,7 @@ set ::sentinel_base_port 20000
set ::redis_base_port 30000
set ::redis_port_count 1024
set ::host "127.0.0.1"
set ::leaked_fds_file [file normalize "tmp/leaked_fds.txt"]
set ::pids {} ; # We kill everything at exit
set ::dirs {} ; # We remove all the temp dirs at exit
set ::run_matching {} ; # If non empty, only tests matching pattern are run.
@ -410,13 +411,13 @@ proc check_leaks instance_types {
# Execute all the units inside the 'tests' directory.
proc run_tests {} {
set sentinel_fd_leaks_file "sentinel_fd_leaks"
if { [file exists $sentinel_fd_leaks_file] } {
file delete $sentinel_fd_leaks_file
}
set tests [lsort [glob ../tests/*]]
foreach test $tests {
# Remove leaked_fds file before starting
if {$::leaked_fds_file != "" && [file exists $::leaked_fds_file]} {
file delete $::leaked_fds_file
}
if {$::run_matching ne {} && [string match $::run_matching $test] == 0} {
continue
}
@ -424,19 +425,19 @@ proc run_tests {} {
puts [colorstr yellow "Testing unit: [lindex [file split $test] end]"]
source $test
check_leaks {redis sentinel}
# Check if a leaked fds file was created and abort the test.
if {$::leaked_fds_file != "" && [file exists $::leaked_fds_file]} {
puts [colorstr red "ERROR: Sentinel has leaked fds to scripts:"]
puts [exec cat $::leaked_fds_file]
puts "----"
incr ::failed
}
}
}
# Print a message and exists with 0 / 1 according to zero or more failures.
proc end_tests {} {
set sentinel_fd_leaks_file "sentinel_fd_leaks"
if { [file exists $sentinel_fd_leaks_file] } {
# temporarily disabling this error from failing the tests until leaks are fixed.
#puts [colorstr red "WARNING: sentinel test(s) failed, there are leaked fds in sentinel:"]
#puts [exec cat $sentinel_fd_leaks_file]
#exit 1
}
if {$::failed == 0 } {
puts "GOOD! No errors."
exit 0

View File

@ -71,6 +71,13 @@ foreach sanitize_dump {no yes} {
set min_cycles 10 ; # run at least 10 cycles
}
# Don't execute this on FreeBSD due to a yet-undiscovered memory issue
# which causes tclsh to bloat.
if {[exec uname] == "FreeBSD"} {
set min_cycles 1
set min_duration 1
}
test "Fuzzer corrupt restore payloads - sanitize_dump: $sanitize_dump" {
if {$min_duration * 2 > $::timeout} {
fail "insufficient timeout"

View File

@ -206,10 +206,13 @@ set system_name [string tolower [exec uname -s]]
if {$system_name eq {linux}} {
start_server {overrides {save ""}} {
test {Test child sending COW info} {
test {Test child sending info} {
# make sure that rdb_last_cow_size and current_cow_size are zero (the test using new server),
# so that the comparisons during the test will be valid
assert {[s current_cow_size] == 0}
assert {[s current_save_keys_processed] == 0}
assert {[s current_save_keys_total] == 0}
assert {[s rdb_last_cow_size] == 0}
# using a 200us delay, the bgsave is empirically taking about 10 seconds.
@ -234,23 +237,35 @@ start_server {overrides {save ""}} {
# start background rdb save
r bgsave
set current_save_keys_total [s current_save_keys_total]
if {$::verbose} {
puts "Keys before bgsave start: current_save_keys_total"
}
# on each iteration, we will write some key to the server to trigger copy-on-write, and
# wait to see that it reflected in INFO.
set iteration 1
while 1 {
# take a sample before writing new data to the server
# take samples before writing new data to the server
set cow_size [s current_cow_size]
if {$::verbose} {
puts "COW info before copy-on-write: $cow_size"
}
set keys_processed [s current_save_keys_processed]
if {$::verbose} {
puts "current_save_keys_processed info : $keys_processed"
}
# trigger copy-on-write
r setrange key$iteration 0 [string repeat B $size]
# wait to see that current_cow_size value updated (as long as the child is in progress)
wait_for_condition 80 100 {
[s rdb_bgsave_in_progress] == 0 ||
[s current_cow_size] >= $cow_size + $size
[s current_cow_size] >= $cow_size + $size &&
[s current_save_keys_processed] > $keys_processed &&
[s current_fork_perc] > 0
} else {
if {$::verbose} {
puts "COW info on fail: [s current_cow_size]"
@ -259,6 +274,9 @@ start_server {overrides {save ""}} {
fail "COW info wasn't reported"
}
# assert that $keys_processed is not greater than total keys.
assert_morethan_equal $current_save_keys_total $keys_processed
# for no accurate, stop after 2 iterations
if {!$::accurate && $iteration == 2} {
break

View File

@ -109,7 +109,7 @@ start_server {tags {"cli"}} {
test_interactive_cli "INFO response should be printed raw" {
set lines [split [run_command $fd info] "\n"]
foreach line $lines {
assert [regexp {^$|^#|^[a-z0-9_]+:.+} $line]
assert [regexp {^$|^#|^[^#:]+:} $line]
}
}

View File

@ -39,17 +39,19 @@ start_server {tags {"repl"}} {
}
test {No write if min-slaves-max-lag is > of the slave lag} {
r -1 deferred 1
r config set min-slaves-to-write 1
r config set min-slaves-max-lag 2
r -1 debug sleep 6
exec kill -SIGSTOP [srv -1 pid]
assert {[r set foo 12345] eq {OK}}
after 4000
wait_for_condition 100 100 {
[catch {r set foo 12345}] != 0
} else {
fail "Master didn't become readonly"
}
catch {r set foo 12345} err
assert {[r -1 read] eq {OK}}
r -1 deferred 0
set err
} {NOREPLICAS*}
assert_match {NOREPLICAS*} $err
}
exec kill -SIGCONT [srv -1 pid]
test {min-slaves-to-write is ignored by slaves} {
r config set min-slaves-to-write 1

View File

@ -10,6 +10,12 @@ else # Linux, others
SHOBJ_LDFLAGS ?= -shared
endif
# Needed to satisfy __stack_chk_fail_local on Linux with -m32, due to gcc
# -fstack-protector by default. Breaks on FreeBSD so we exclude it.
ifneq ($(uname_S),FreeBSD)
LIBS = -lc
endif
TEST_MODULES = \
commandfilter.so \
testrdb.so \
@ -29,6 +35,8 @@ TEST_MODULES = \
test_lazyfree.so \
timer.so \
defragtest.so \
hash.so \
zset.so \
stream.so
@ -43,7 +51,7 @@ all: $(TEST_MODULES)
$(CC) -I../../src $(CFLAGS) $(SHOBJ_CFLAGS) -fPIC -c $< -o $@
%.so: %.xo
$(LD) -o $@ $< $(SHOBJ_LDFLAGS) $(LDFLAGS) $(LIBS) -lc
$(LD) -o $@ $< $(SHOBJ_LDFLAGS) $(LDFLAGS) $(LIBS)
.PHONY: clean

View File

@ -1,10 +1,10 @@
#define REDISMODULE_EXPERIMENTAL_API
#define _XOPEN_SOURCE 700
#include "redismodule.h"
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <time.h>
#include "assert.h"
#define UNUSED(x) (void)(x)
@ -21,7 +21,7 @@ int HelloBlock_Timeout(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
UNUSED(argv);
UNUSED(argc);
RedisModuleBlockedClient *bc = RedisModule_GetBlockedClientHandle(ctx);
assert(RedisModule_BlockedClientMeasureTimeEnd(bc)==REDISMODULE_OK);
RedisModule_BlockedClientMeasureTimeEnd(bc);
return RedisModule_ReplyWithSimpleString(ctx,"Request timedout");
}
@ -39,7 +39,7 @@ void *BlockDebug_ThreadMain(void *arg) {
long long delay = (unsigned long)targ[1];
long long enable_time_track = (unsigned long)targ[2];
if (enable_time_track)
assert(RedisModule_BlockedClientMeasureTimeStart(bc)==REDISMODULE_OK);
RedisModule_BlockedClientMeasureTimeStart(bc);
RedisModule_Free(targ);
struct timespec ts;
@ -49,18 +49,18 @@ void *BlockDebug_ThreadMain(void *arg) {
int *r = RedisModule_Alloc(sizeof(int));
*r = rand();
if (enable_time_track)
assert(RedisModule_BlockedClientMeasureTimeEnd(bc)==REDISMODULE_OK);
RedisModule_BlockedClientMeasureTimeEnd(bc);
RedisModule_UnblockClient(bc,r);
return NULL;
}
/* The thread entry point that actually executes the blocking part
* of the command BLOCK.DEBUG. */
* of the command BLOCK.DOUBLE_DEBUG. */
void *DoubleBlock_ThreadMain(void *arg) {
void **targ = arg;
RedisModuleBlockedClient *bc = targ[0];
long long delay = (unsigned long)targ[1];
assert(RedisModule_BlockedClientMeasureTimeStart(bc)==REDISMODULE_OK);
RedisModule_BlockedClientMeasureTimeStart(bc);
RedisModule_Free(targ);
struct timespec ts;
ts.tv_sec = delay / 1000;
@ -72,7 +72,7 @@ void *DoubleBlock_ThreadMain(void *arg) {
/* call again RedisModule_BlockedClientMeasureTimeStart() and
* RedisModule_BlockedClientMeasureTimeEnd and ensure that the
* total execution time is 2x the delay. */
assert(RedisModule_BlockedClientMeasureTimeStart(bc)==REDISMODULE_OK);
RedisModule_BlockedClientMeasureTimeStart(bc);
nanosleep(&ts, NULL);
RedisModule_BlockedClientMeasureTimeEnd(bc);
@ -173,14 +173,13 @@ int HelloBlockNoTracking_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **a
int HelloDoubleBlock_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
if (argc != 2) return RedisModule_WrongArity(ctx);
long long delay;
long long timeout;
if (RedisModule_StringToLongLong(argv[1],&delay) != REDISMODULE_OK) {
return RedisModule_ReplyWithError(ctx,"ERR invalid count");
}
pthread_t tid;
RedisModuleBlockedClient *bc = RedisModule_BlockClient(ctx,HelloBlock_Reply,HelloBlock_Timeout,HelloBlock_FreeData,timeout);
RedisModuleBlockedClient *bc = RedisModule_BlockClient(ctx,HelloBlock_Reply,HelloBlock_Timeout,HelloBlock_FreeData,0);
/* Now that we setup a blocking client, we need to pass the control
* to the thread. However we need to pass arguments to the thread:

90
tests/modules/hash.c Normal file
View File

@ -0,0 +1,90 @@
#include "redismodule.h"
#include <strings.h>
#include <errno.h>
#include <stdlib.h>
/* If a string is ":deleted:", the special value for deleted hash fields is
* returned; otherwise the input string is returned. */
static RedisModuleString *value_or_delete(RedisModuleString *s) {
if (!strcasecmp(RedisModule_StringPtrLen(s, NULL), ":delete:"))
return REDISMODULE_HASH_DELETE;
else
return s;
}
/* HASH.SET key flags field1 value1 [field2 value2 ..]
*
* Sets 1-4 fields. Returns the same as RedisModule_HashSet().
* Flags is a string of "nxa" where n = NX, x = XX, a = COUNT_ALL.
* To delete a field, use the value ":delete:".
*/
int hash_set(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
if (argc < 5 || argc % 2 == 0 || argc > 11)
return RedisModule_WrongArity(ctx);
RedisModule_AutoMemory(ctx);
RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
size_t flags_len;
const char *flags_str = RedisModule_StringPtrLen(argv[2], &flags_len);
int flags = REDISMODULE_HASH_NONE;
for (size_t i = 0; i < flags_len; i++) {
switch (flags_str[i]) {
case 'n': flags |= REDISMODULE_HASH_NX; break;
case 'x': flags |= REDISMODULE_HASH_XX; break;
case 'a': flags |= REDISMODULE_HASH_COUNT_ALL; break;
}
}
/* Test some varargs. (In real-world, use a loop and set one at a time.) */
int result;
errno = 0;
if (argc == 5) {
result = RedisModule_HashSet(key, flags,
argv[3], value_or_delete(argv[4]),
NULL);
} else if (argc == 7) {
result = RedisModule_HashSet(key, flags,
argv[3], value_or_delete(argv[4]),
argv[5], value_or_delete(argv[6]),
NULL);
} else if (argc == 9) {
result = RedisModule_HashSet(key, flags,
argv[3], value_or_delete(argv[4]),
argv[5], value_or_delete(argv[6]),
argv[7], value_or_delete(argv[8]),
NULL);
} else if (argc == 11) {
result = RedisModule_HashSet(key, flags,
argv[3], value_or_delete(argv[4]),
argv[5], value_or_delete(argv[6]),
argv[7], value_or_delete(argv[8]),
argv[9], value_or_delete(argv[10]),
NULL);
} else {
return RedisModule_ReplyWithError(ctx, "ERR too many fields");
}
/* Check errno */
if (result == 0) {
if (errno == ENOTSUP)
return RedisModule_ReplyWithError(ctx, REDISMODULE_ERRORMSG_WRONGTYPE);
else
RedisModule_Assert(errno == ENOENT);
}
return RedisModule_ReplyWithLongLong(ctx, result);
}
int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
if (RedisModule_Init(ctx, "hash", 1, REDISMODULE_APIVER_1) ==
REDISMODULE_OK &&
RedisModule_CreateCommand(ctx, "hash.set", hash_set, "",
1, 1, 1) == REDISMODULE_OK) {
return REDISMODULE_OK;
} else {
return REDISMODULE_ERR;
}
}

View File

@ -21,6 +21,11 @@ void InfoFunc(RedisModuleInfoCtx *ctx, int for_crash_report) {
RedisModule_InfoAddFieldLongLong(ctx, "expires", 1);
RedisModule_InfoEndDictField(ctx);
RedisModule_InfoAddSection(ctx, "unsafe");
RedisModule_InfoBeginDictField(ctx, "unsafe:field");
RedisModule_InfoAddFieldLongLong(ctx, "value", 1);
RedisModule_InfoEndDictField(ctx);
if (for_crash_report) {
RedisModule_InfoAddSection(ctx, "Klingon");
RedisModule_InfoAddFieldCString(ctx, "one", "wa");

30
tests/modules/zset.c Normal file
View File

@ -0,0 +1,30 @@
#include "redismodule.h"
/* ZSET.REM key element
*
* Removes an occurrence of an element from a sorted set. Replies with the
* number of removed elements (0 or 1).
*/
int zset_rem(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
if (argc != 3) return RedisModule_WrongArity(ctx);
RedisModule_AutoMemory(ctx);
int keymode = REDISMODULE_READ | REDISMODULE_WRITE;
RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], keymode);
int deleted;
if (RedisModule_ZsetRem(key, argv[2], &deleted) == REDISMODULE_OK)
return RedisModule_ReplyWithLongLong(ctx, deleted);
else
return RedisModule_ReplyWithError(ctx, "ERR ZsetRem failed");
}
int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
if (RedisModule_Init(ctx, "zset", 1, REDISMODULE_APIVER_1) ==
REDISMODULE_OK &&
RedisModule_CreateCommand(ctx, "zset.rem", zset_rem, "",
1, 1, 1) == REDISMODULE_OK)
return REDISMODULE_OK;
else
return REDISMODULE_ERR;
}

View File

@ -10,6 +10,9 @@ set ::tlsdir "../../tls"
proc main {} {
parse_options
if {$::leaked_fds_file != ""} {
set ::env(LEAKED_FDS_FILE) $::leaked_fds_file
}
spawn_instance sentinel $::sentinel_base_port $::instances_count [list "sentinel deny-scripts-reconfig no"] "../tests/includes/sentinel.conf"
spawn_instance redis $::redis_base_port $::instances_count
run_tests

View File

@ -0,0 +1,79 @@
#!/usr/bin/env tclsh
#
# This script detects file descriptors that have leaked from a parent process.
#
# Our goal is to detect file descriptors that were opened by the parent and
# not cleaned up prior to exec(), but not file descriptors that were inherited
# from the grandparent which the parent knows nothing about. To do that, we
# look up every potential leak and try to match it against open files by the
# grandparent process.
# Get PID of parent process
proc get_parent_pid {_pid} {
set fd [open "/proc/$_pid/status" "r"]
set content [read $fd]
close $fd
if {[regexp {\nPPid:\s+(\d+)} $content _ ppid]} {
return $ppid
}
error "failed to get parent pid"
}
# Read symlink to get info about the specified fd of the specified process.
# The result can be the file name or an arbitrary string that identifies it.
# When not able to read, an empty string is returned.
proc get_fdlink {_pid fd} {
if { [catch {set fdlink [file readlink "/proc/$_pid/fd/$fd"]} err] } {
return ""
}
return $fdlink
}
# Linux only
set os [exec uname]
if {$os != "Linux"} {
puts "Only Linux is supported."
exit 0
}
if {![info exists env(LEAKED_FDS_FILE)]} {
puts "Missing LEAKED_FDS_FILE environment variable."
exit 0
}
set outfile $::env(LEAKED_FDS_FILE)
set parent_pid [get_parent_pid [pid]]
set grandparent_pid [get_parent_pid $parent_pid]
set leaked_fds {}
# Look for fds that were directly inherited from our parent but not from
# our grandparent (tcl)
foreach fd [glob -tails -directory "/proc/self/fd" *] {
# Ignore stdin/stdout/stderr
if {$fd == 0 || $fd == 1 || $fd == 2} {
continue
}
set fdlink [get_fdlink "self" $fd]
if {$fdlink == ""} {
continue
}
# We ignore fds that existed in the grandparent, or fds that don't exist
# in our parent (Sentinel process).
if {[get_fdlink $grandparent_pid $fd] == $fdlink ||
[get_fdlink $parent_pid $fd] != $fdlink} {
continue
}
lappend leaked_fds [list $fd $fdlink]
}
# Produce report only if we found leaks
if {[llength $leaked_fds] > 0} {
set fd [open $outfile "w"]
puts $fd [join $leaked_fds "\n"]
close $fd
}

View File

@ -41,8 +41,10 @@ test "(init) Sentinels can start monitoring a master" {
S $id SENTINEL SET mymaster down-after-milliseconds 2000
S $id SENTINEL SET mymaster failover-timeout 20000
S $id SENTINEL SET mymaster parallel-syncs 10
S $id SENTINEL SET mymaster notification-script ../../tests/includes/notify.sh
S $id SENTINEL SET mymaster client-reconfig-script ../../tests/includes/notify.sh
if {$::leaked_fds_file != "" && [exec uname] == "Linux"} {
S $id SENTINEL SET mymaster notification-script ../../tests/helpers/check_leaked_fds.tcl
S $id SENTINEL SET mymaster client-reconfig-script ../../tests/helpers/check_leaked_fds.tcl
}
}
}

View File

@ -1,21 +0,0 @@
#!/usr/bin/env bash
OS=`uname -s`
if [ ${OS} != "Linux" ]
then
exit 0
fi
# fd 3 is meant to catch the actual access to /proc/pid/fd,
# in case there's an fd leak by the sentinel,
# it can take 3, but then the access to /proc will take another fd, and we'll catch that.
leaked_fd_count=`ls /proc/self/fd | grep -vE '^[0|1|2|3]$' | wc -l`
if [ $leaked_fd_count -gt 0 ]
then
sentinel_fd_leaks_file="../sentinel_fd_leaks"
if [ ! -f $sentinel_fd_leaks_file ]
then
ls -l /proc/self/fd | cat >> $sentinel_fd_leaks_file
lsof -p $$ | cat >> $sentinel_fd_leaks_file
fi
fi

View File

@ -248,6 +248,7 @@ proc ::redis::redis_read_reply {id fd} {
- {return -code error [redis_read_line $fd]}
$ {redis_bulk_read $fd}
> -
~ -
* {redis_multi_bulk_read $id $fd}
% {redis_read_map $id $fd}
default {

View File

@ -259,6 +259,13 @@ proc wait_server_started {config_file stdout pid} {
return $port_busy
}
proc dump_server_log {srv} {
set pid [dict get $srv "pid"]
puts "\n===== Start of server log (pid $pid) =====\n"
puts [exec cat [dict get $srv "stdout"]]
puts "===== End of server log (pid $pid) =====\n"
}
proc start_server {options {code undefined}} {
# setup defaults
set baseconfig "default.conf"
@ -492,6 +499,9 @@ proc start_server {options {code undefined}} {
# connect client (after server dict is put on the stack)
reconnect
# remember previous num_failed to catch new errors
set prev_num_failed $::num_failed
# execute provided block
set num_tests $::num_tests
if {[catch { uplevel 1 $code } error]} {
@ -529,6 +539,10 @@ proc start_server {options {code undefined}} {
# Re-raise, let handler up the stack take care of this.
error $error $backtrace
}
} else {
if {$::dump_logs && $prev_num_failed != $::num_failed} {
dump_server_log $srv
}
}
# fetch srv back from the server list, in case it was restarted by restart_server (new PID)

View File

@ -681,3 +681,21 @@ proc string2printable s {
set res "\"$res\""
return $res
}
# Check that probability of each element are between {min_prop} and {max_prop}.
proc check_histogram_distribution {res min_prop max_prop} {
unset -nocomplain mydict
foreach key $res {
dict incr mydict $key 1
}
foreach key [dict keys $mydict] {
set value [dict get $mydict $key]
set probability [expr {double($value) / [llength $res]}]
if {$probability < $min_prop || $probability > $max_prop} {
return false
}
}
return true
}

View File

@ -110,6 +110,7 @@ set ::active_servers {} ; # Pids of active Redis instances.
set ::dont_clean 0
set ::wait_server 0
set ::stop_on_failure 0
set ::dump_logs 0
set ::loop 0
set ::tlsdir "tests/tls"
@ -555,6 +556,7 @@ proc print_help_screen {} {
"--stop Blocks once the first test fails."
"--loop Execute the specified set of tests forever."
"--wait-server Wait after server is started (so that you can attach a debugger)."
"--dump-logs Dump server log on test failure."
"--tls Run tests in TLS mode."
"--host <addr> Run tests against an external host."
"--port <port> TCP port to use against external host."
@ -657,6 +659,8 @@ for {set j 0} {$j < [llength $argv]} {incr j} {
set ::no_latency 1
} elseif {$opt eq {--wait-server}} {
set ::wait_server 1
} elseif {$opt eq {--dump-logs}} {
set ::dump_logs 1
} elseif {$opt eq {--stop}} {
set ::stop_on_failure 1
} elseif {$opt eq {--loop}} {

View File

@ -217,6 +217,25 @@ start_server {tags {"acl"}} {
set e
} {*NOPERM*}
test {ACLs set can include subcommands, if already full command exists} {
r ACL setuser bob +memory|doctor
set cmdstr [dict get [r ACL getuser bob] commands]
assert_equal {-@all +memory|doctor} $cmdstr
# Validate the commands have got engulfed to +memory.
r ACL setuser bob +memory
set cmdstr [dict get [r ACL getuser bob] commands]
assert_equal {-@all +memory} $cmdstr
# Appending to the existing access string of bob.
r ACL setuser bob +@all +client|id
# Validate the new commands has got engulfed to +@all.
set cmdstr [dict get [r ACL getuser bob] commands]
assert_equal {+@all} $cmdstr
r CLIENT ID; # Should not fail
r MEMORY DOCTOR; # Should not fail
}
# Note that the order of the generated ACL rules is not stable in Redis
# so we need to match the different parts and not as a whole string.
test {ACL GETUSER is able to translate back command permissions} {

View File

@ -209,6 +209,58 @@ start_server {tags {"expire"}} {
set e
} {*not an integer*}
test {SET with EX with big integer should report an error} {
catch {r set foo bar EX 10000000000000000} e
set e
} {ERR invalid expire time in set}
test {SET with EX with smallest integer should report an error} {
catch {r SET foo bar EX -9999999999999999} e
set e
} {ERR invalid expire time in set}
test {GETEX with big integer should report an error} {
r set foo bar
catch {r GETEX foo EX 10000000000000000} e
set e
} {ERR invalid expire time in getex}
test {GETEX with smallest integer should report an error} {
r set foo bar
catch {r GETEX foo EX -9999999999999999} e
set e
} {ERR invalid expire time in getex}
test {EXPIRE with big integer overflows when converted to milliseconds} {
r set foo bar
catch {r EXPIRE foo 10000000000000000} e
set e
} {ERR invalid expire time in expire}
test {PEXPIRE with big integer overflow when basetime is added} {
r set foo bar
catch {r PEXPIRE foo 9223372036854770000} e
set e
} {ERR invalid expire time in pexpire}
test {EXPIRE with big negative integer} {
r set foo bar
catch {r EXPIRE foo -9999999999999999} e
assert_match {ERR invalid expire time in expire} $e
r ttl foo
} {-1}
test {PEXPIREAT with big integer works} {
r set foo bar
r PEXPIREAT foo 9223372036854770000
} {1}
test {PEXPIREAT with big negative integer works} {
r set foo bar
r PEXPIREAT foo -9223372036854770000
r ttl foo
} {-2}
test {EXPIRE and SET/GETEX EX/PX/EXAT/PXAT option, TTL should not be reset after loadaof} {
# This test makes sure that expire times are propagated as absolute
# times to the AOF file and not as relative time, so that when the AOF

View File

@ -1,6 +1,7 @@
# Helper functions to simulate search-in-radius in the Tcl side in order to
# verify the Redis implementation with a fuzzy test.
proc geo_degrad deg {expr {$deg*atan(1)*8/360}}
proc geo_degrad deg {expr {$deg*(atan(1)*8/360)}}
proc geo_raddeg rad {expr {$rad/(atan(1)*8/360)}}
proc geo_distance {lon1d lat1d lon2d lat2d} {
set lon1r [geo_degrad $lon1d]
@ -42,6 +43,34 @@ proc compare_lists {List1 List2} {
return $DiffList
}
# return true If a point in circle.
# search_lon and search_lat define the center of the circle,
# and lon, lat define the point being searched.
proc pointInCircle {radius_km lon lat search_lon search_lat} {
set radius_m [expr {$radius_km*1000}]
set distance [geo_distance $lon $lat $search_lon $search_lat]
if {$distance < $radius_m} {
return true
}
return false
}
# return true If a point in rectangle.
# search_lon and search_lat define the center of the rectangle,
# and lon, lat define the point being searched.
# error: can adjust the width and height of the rectangle according to the error
proc pointInRectangle {width_km height_km lon lat search_lon search_lat error} {
set width_m [expr {$width_km*1000*$error/2}]
set height_m [expr {$height_km*1000*$error/2}]
set lon_distance [geo_distance $lon $lat $search_lon $lat]
set lat_distance [geo_distance $lon $lat $lon $search_lat]
if {$lon_distance > $width_m || $lat_distance > $height_m} {
return false
}
return true
}
# The following list represents sets of random seed, search position
# and radius that caused bugs in the past. It is used by the randomized
# test later as a starting point. When the regression vectors are scanned
@ -225,19 +254,26 @@ start_server {tags {"geo"}} {
test {GEOSEARCH non square, long and narrow} {
r del Sicily
r geoadd Sicily 12.75 37.00 "test1"
r geoadd Sicily 12.75 36.995 "test1"
r geoadd Sicily 12.75 36.50 "test2"
r geoadd Sicily 13.00 36.50 "test3"
# box height=2km width=400km
set ret1 [r geosearch Sicily fromlonlat 15 37 bybox 2 400 km]
set ret1 [r geosearch Sicily fromlonlat 15 37 bybox 400 2 km]
assert_equal $ret1 {test1}
# Add a western Hemisphere point
r geoadd Sicily -1 37.00 "test3"
set ret2 [r geosearch Sicily fromlonlat 15 37 bybox 2 3000 km asc]
set ret2 [r geosearch Sicily fromlonlat 15 37 bybox 3000 2 km asc]
assert_equal $ret2 {test1 test3}
}
test {GEOSEARCH corner point test} {
r del Sicily
r geoadd Sicily 12.758489 38.788135 edge1 17.241510 38.788135 edge2 17.250000 35.202000 edge3 12.750000 35.202000 edge4 12.748489955781654 37 edge5 15 38.798135872540925 edge6 17.251510044218346 37 edge7 15 35.201864127459075 edge8 12.692799634687903 38.798135872540925 corner1 12.692799634687903 38.798135872540925 corner2 17.200560937451133 35.201864127459075 corner3 12.799439062548865 35.201864127459075 corner4
set ret [lsort [r geosearch Sicily fromlonlat 15 37 bybox 400 400 km asc]]
assert_equal $ret {edge1 edge2 edge5 edge7}
}
test {GEORADIUSBYMEMBER withdist (sorted)} {
r georadiusbymember nyc "wtc one" 7 km withdist
} {{{wtc one} 0.0000} {{union square} 3.2544} {{central park n/q/r} 6.7000} {4545 6.1975} {{lic market} 6.8969}}
@ -360,12 +396,22 @@ start_server {tags {"geo"}} {
assert {[lindex $res 0] eq "Catania"}
}
test {GEOADD + GEORANGE randomized test} {
set attempt 30
test {GEOSEARCH the box spans -180° or 180°} {
r del points
r geoadd points 179.5 36 point1
r geoadd points -179.5 36 point2
assert_equal {point1 point2} [r geosearch points fromlonlat 179 37 bybox 400 400 km asc]
assert_equal {point2 point1} [r geosearch points fromlonlat -179 37 bybox 400 400 km asc]
}
foreach {type} {byradius bybox} {
test "GEOSEARCH fuzzy test - $type" {
if {$::accurate} { set attempt 300 } else { set attempt 30 }
while {[incr attempt -1]} {
set rv [lindex $regression_vectors $rv_idx]
incr rv_idx
set radius_km 0; set width_km 0; set height_km 0
unset -nocomplain debuginfo
set srand_seed [clock milliseconds]
if {$rv ne {}} {set srand_seed [lindex $rv 0]}
@ -375,33 +421,55 @@ start_server {tags {"geo"}} {
if {[randomInt 10] == 0} {
# From time to time use very big radiuses
set radius_km [expr {[randomInt 50000]+10}]
if {$type == "byradius"} {
set radius_km [expr {[randomInt 5000]+10}]
} elseif {$type == "bybox"} {
set width_km [expr {[randomInt 5000]+10}]
set height_km [expr {[randomInt 5000]+10}]
}
} else {
# Normally use a few - ~200km radiuses to stress
# test the code the most in edge cases.
set radius_km [expr {[randomInt 200]+10}]
if {$type == "byradius"} {
set radius_km [expr {[randomInt 200]+10}]
} elseif {$type == "bybox"} {
set width_km [expr {[randomInt 200]+10}]
set height_km [expr {[randomInt 200]+10}]
}
}
if {$rv ne {}} {
set radius_km [lindex $rv 1]
set width_km [lindex $rv 1]
set height_km [lindex $rv 1]
}
if {$rv ne {}} {set radius_km [lindex $rv 1]}
set radius_m [expr {$radius_km*1000}]
geo_random_point search_lon search_lat
if {$rv ne {}} {
set search_lon [lindex $rv 2]
set search_lat [lindex $rv 3]
}
lappend debuginfo "Search area: $search_lon,$search_lat $radius_km km"
lappend debuginfo "Search area: $search_lon,$search_lat $radius_km $width_km $height_km km"
set tcl_result {}
set argv {}
for {set j 0} {$j < 20000} {incr j} {
geo_random_point lon lat
lappend argv $lon $lat "place:$j"
set distance [geo_distance $lon $lat $search_lon $search_lat]
if {$distance < $radius_m} {
lappend tcl_result "place:$j"
if {$type == "byradius"} {
if {[pointInCircle $radius_km $lon $lat $search_lon $search_lat]} {
lappend tcl_result "place:$j"
}
} elseif {$type == "bybox"} {
if {[pointInRectangle $width_km $height_km $lon $lat $search_lon $search_lat 1]} {
lappend tcl_result "place:$j"
}
}
lappend debuginfo "place:$j $lon $lat [expr {$distance/1000}] km"
lappend debuginfo "place:$j $lon $lat"
}
r geoadd mypoints {*}$argv
set res [lsort [r georadius mypoints $search_lon $search_lat $radius_km km]]
if {$type == "byradius"} {
set res [lsort [r geosearch mypoints fromlonlat $search_lon $search_lat byradius $radius_km km]]
} elseif {$type == "bybox"} {
set res [lsort [r geosearch mypoints fromlonlat $search_lon $search_lat bybox $width_km $height_km km]]
}
set res2 [lsort $tcl_result]
set test_result OK
@ -409,18 +477,27 @@ start_server {tags {"geo"}} {
set rounding_errors 0
set diff [compare_lists $res $res2]
foreach place $diff {
lassign [lindex [r geopos mypoints $place] 0] lon lat
set mydist [geo_distance $lon $lat $search_lon $search_lat]
set mydist [expr $mydist/1000]
if {($mydist / $radius_km) > 0.999} {
incr rounding_errors
continue
}
if {$mydist < $radius_m} {
# This is a false positive for redis since given the
# same points the higher precision calculation provided
# by TCL shows the point within range
incr rounding_errors
continue
if {$type == "byradius"} {
if {($mydist / $radius_km) > 0.999} {
incr rounding_errors
continue
}
if {$mydist < [expr {$radius_km*1000}]} {
# This is a false positive for redis since given the
# same points the higher precision calculation provided
# by TCL shows the point within range
incr rounding_errors
continue
}
} elseif {$type == "bybox"} {
# we add 0.1% error for floating point calculation error
if {[pointInRectangle $width_km $height_km $lon $lat $search_lon $search_lat 1.001]} {
incr rounding_errors
continue
}
}
}
@ -447,7 +524,6 @@ start_server {tags {"geo"}} {
set mydist [geo_distance $lon $lat $search_lon $search_lat]
set mydist [expr $mydist/1000]
puts "$place -> [r geopos mypoints $place] $mydist $where"
if {($mydist / $radius_km) > 0.999} {incr rounding_errors}
}
set test_result FAIL
}
@ -456,4 +532,91 @@ start_server {tags {"geo"}} {
}
set test_result
} {OK}
}
test {GEOSEARCH box edges fuzzy test} {
if {$::accurate} { set attempt 300 } else { set attempt 30 }
while {[incr attempt -1]} {
unset -nocomplain debuginfo
set srand_seed [clock milliseconds]
lappend debuginfo "srand_seed is $srand_seed"
expr {srand($srand_seed)} ; # If you need a reproducible run
r del mypoints
geo_random_point search_lon search_lat
set width_m [expr {[randomInt 10000]+10}]
set height_m [expr {[randomInt 10000]+10}]
set lat_delta [geo_raddeg [expr {$height_m/2/6372797.560856}]]
set long_delta_top [geo_raddeg [expr {$width_m/2/6372797.560856/cos([geo_degrad [expr {$search_lat+$lat_delta}]])}]]
set long_delta_middle [geo_raddeg [expr {$width_m/2/6372797.560856/cos([geo_degrad $search_lat])}]]
set long_delta_bottom [geo_raddeg [expr {$width_m/2/6372797.560856/cos([geo_degrad [expr {$search_lat-$lat_delta}]])}]]
# Total of 8 points are generated, which are located at each vertex and the center of each side
set points(north) [list $search_lon [expr {$search_lat+$lat_delta}]]
set points(south) [list $search_lon [expr {$search_lat-$lat_delta}]]
set points(east) [list [expr {$search_lon+$long_delta_middle}] $search_lat]
set points(west) [list [expr {$search_lon-$long_delta_middle}] $search_lat]
set points(north_east) [list [expr {$search_lon+$long_delta_top}] [expr {$search_lat+$lat_delta}]]
set points(north_west) [list [expr {$search_lon-$long_delta_top}] [expr {$search_lat+$lat_delta}]]
set points(south_east) [list [expr {$search_lon+$long_delta_bottom}] [expr {$search_lat-$lat_delta}]]
set points(south_west) [list [expr {$search_lon-$long_delta_bottom}] [expr {$search_lat-$lat_delta}]]
lappend debuginfo "Search area: geosearch mypoints fromlonlat $search_lon $search_lat bybox $width_m $height_m m"
set tcl_result {}
foreach name [array names points] {
set x [lindex $points($name) 0]
set y [lindex $points($name) 1]
# If longitude crosses -180° or 180°, we need to convert it.
# latitude doesn't have this problem, because it's scope is -70~70, see geo_random_point
if {$x > 180} {
set x [expr {$x-360}]
} elseif {$x < -180} {
set x [expr {$x+360}]
}
r geoadd mypoints $x $y place:$name
lappend tcl_result "place:$name"
lappend debuginfo "geoadd mypoints $x $y place:$name"
}
set res2 [lsort $tcl_result]
# make the box larger by two meter in each direction to put the coordinate slightly inside the box.
set height_new [expr {$height_m+4}]
set width_new [expr {$width_m+4}]
set res [lsort [r geosearch mypoints fromlonlat $search_lon $search_lat bybox $width_new $height_new m]]
if {$res != $res2} {
set diff [compare_lists $res $res2]
lappend debuginfo "res: $res, res2: $res2, diff: $diff"
fail "place should be found, debuginfo: $debuginfo, height_new: $height_new width_new: $width_new"
}
# The width decreases and the height increases. Only north and south are found
set width_new [expr {$width_m-4}]
set height_new [expr {$height_m+4}]
set res [lsort [r geosearch mypoints fromlonlat $search_lon $search_lat bybox $width_new $height_new m]]
if {$res != {place:north place:south}} {
lappend debuginfo "res: $res"
fail "place should not be found, debuginfo: $debuginfo, height_new: $height_new width_new: $width_new"
}
# The width increases and the height decreases. Only ease and west are found
set width_new [expr {$width_m+4}]
set height_new [expr {$height_m-4}]
set res [lsort [r geosearch mypoints fromlonlat $search_lon $search_lat bybox $width_new $height_new m]]
if {$res != {place:east place:west}} {
lappend debuginfo "res: $res"
fail "place should not be found, debuginfo: $debuginfo, height_new: $height_new width_new: $width_new"
}
# make the box smaller by two meter in each direction to put the coordinate slightly outside the box.
set height_new [expr {$height_m-4}]
set width_new [expr {$width_m-4}]
set res [r geosearch mypoints fromlonlat $search_lon $search_lat bybox $width_new $height_new m]
if {$res != ""} {
lappend debuginfo "res: $res"
fail "place should not be found, debuginfo: $debuginfo, height_new: $height_new width_new: $width_new"
}
unset -nocomplain debuginfo
}
}
}

View File

@ -150,4 +150,12 @@ start_server {tags {"info"}} {
assert_match {} [errorstat NOPERM]
}
}
start_server {} {
test {Unsafe command names are sanitized in INFO output} {
catch {r host:} e
set info [r info commandstats]
assert_match {*cmdstat_host_:calls=1*} $info
}
}
}

View File

@ -8,12 +8,18 @@ start_server {tags {"modules"}} {
test { blocked clients time tracking - check blocked command that uses RedisModule_BlockedClientMeasureTimeStart() is tracking background time} {
r slowlog reset
r config set slowlog-log-slower-than 200000
assert_equal [r slowlog len] 0
if {!$::no_latency} {
assert_equal [r slowlog len] 0
}
r block.debug 0 10000
assert_equal [r slowlog len] 0
if {!$::no_latency} {
assert_equal [r slowlog len] 0
}
r config resetstat
r block.debug 200 10000
assert_equal [r slowlog len] 1
if {!$::no_latency} {
assert_equal [r slowlog len] 1
}
set cmdstatline [cmdrstat block.debug r]
@ -25,30 +31,41 @@ start_server {tags {"modules"}} {
test { blocked clients time tracking - check blocked command that uses RedisModule_BlockedClientMeasureTimeStart() is tracking background time even in timeout } {
r slowlog reset
r config set slowlog-log-slower-than 200000
assert_equal [r slowlog len] 0
if {!$::no_latency} {
assert_equal [r slowlog len] 0
}
r block.debug 0 20000
assert_equal [r slowlog len] 0
if {!$::no_latency} {
assert_equal [r slowlog len] 0
}
r config resetstat
r block.debug 20000 200
assert_equal [r slowlog len] 1
r block.debug 20000 500
if {!$::no_latency} {
assert_equal [r slowlog len] 1
}
set cmdstatline [cmdrstat block.debug r]
regexp "calls=1,usec=(.*?),usec_per_call=(.*?),rejected_calls=0,failed_calls=0" $cmdstatline usec usec_per_call
assert {$usec >= 100000}
assert {$usec_per_call >= 100000}
assert {$usec >= 250000}
assert {$usec_per_call >= 250000}
}
test { blocked clients time tracking - check blocked command with multiple calls RedisModule_BlockedClientMeasureTimeStart() is tracking the total background time } {
r slowlog reset
r config set slowlog-log-slower-than 200000
assert_equal [r slowlog len] 0
if {!$::no_latency} {
assert_equal [r slowlog len] 0
}
r block.double_debug 0
assert_equal [r slowlog len] 0
if {!$::no_latency} {
assert_equal [r slowlog len] 0
}
r config resetstat
r block.double_debug 100
assert_equal [r slowlog len] 1
if {!$::no_latency} {
assert_equal [r slowlog len] 1
}
set cmdstatline [cmdrstat block.double_debug r]
regexp "calls=1,usec=(.*?),usec_per_call=(.*?),rejected_calls=0,failed_calls=0" $cmdstatline usec usec_per_call
@ -59,9 +76,13 @@ start_server {tags {"modules"}} {
test { blocked clients time tracking - check blocked command without calling RedisModule_BlockedClientMeasureTimeStart() is not reporting background time } {
r slowlog reset
r config set slowlog-log-slower-than 200000
assert_equal [r slowlog len] 0
if {!$::no_latency} {
assert_equal [r slowlog len] 0
}
r block.debug_no_track 200 1000
# ensure slowlog is still empty
assert_equal [r slowlog len] 0
if {!$::no_latency} {
assert_equal [r slowlog len] 0
}
}
}

View File

@ -0,0 +1,23 @@
set testmodule [file normalize tests/modules/hash.so]
start_server {tags {"modules"}} {
r module load $testmodule
test {Module hash set} {
r set k mystring
assert_error "WRONGTYPE*" {r hash.set k "" hello world}
r del k
# "" = count updates and deletes of existing fields only
assert_equal 0 [r hash.set k "" squirrel yes]
# "a" = COUNT_ALL = count inserted, modified and deleted fields
assert_equal 2 [r hash.set k "a" banana no sushi whynot]
# "n" = NX = only add fields not already existing in the hash
# "x" = XX = only replace the value for existing fields
assert_equal 0 [r hash.set k "n" squirrel hoho what nothing]
assert_equal 1 [r hash.set k "na" squirrel hoho something nice]
assert_equal 0 [r hash.set k "xa" new stuff not inserted]
assert_equal 1 [r hash.set k "x" squirrel ofcourse]
assert_equal 1 [r hash.set k "" sushi :delete: none :delete:]
r hgetall k
} {squirrel ofcourse banana no what nothing something nice}
}

View File

@ -85,5 +85,10 @@ start_server {tags {"modules"}} {
set keys [scan [regexp -inline {keys\=([\d]*)} $keyspace] keys=%d]
} {3}
test {module info unsafe fields} {
set info [r info infotest_unsafe]
assert_match {*infotest_unsafe_field:value=1*} $info
}
# TODO: test crash report.
}

View File

@ -0,0 +1,16 @@
set testmodule [file normalize tests/modules/zset.so]
start_server {tags {"modules"}} {
r module load $testmodule
test {Module zset rem} {
r del k
r zadd k 100 hello 200 world
assert_equal 1 [r zset.rem k hello]
assert_equal 0 [r zset.rem k hello]
assert_equal 1 [r exists k]
# Check that removing the last element deletes the key
assert_equal 1 [r zset.rem k world]
assert_equal 0 [r exists k]
}
}

View File

@ -39,7 +39,12 @@ if {$system_name eq {linux}} {
r bgsave
set child_pid [get_child_pid 0]
assert_equal [get_oom_score_adj $child_pid] [expr $base + 30]
# Wait until background child process to setOOMScoreAdj success.
wait_for_condition 100 10 {
[get_oom_score_adj $child_pid] == [expr $base + 30]
} else {
fail "Set oom-score-adj of background child process is not ok"
}
}
# Failed oom-score-adj tests can only run unprivileged

View File

@ -55,7 +55,7 @@ start_server {tags {"other"}} {
} {*index is out of range*}
tags {consistency} {
if {![catch {package require sha1}]} {
if {true} {
if {$::accurate} {set numops 10000} else {set numops 1000}
test {Check consistency of different data types after a reload} {
r flushdb

View File

@ -468,6 +468,7 @@ start_server {tags {"scripting"}} {
r slaveof no one
set res
} {102}
r config set aof-use-rdb-preamble yes
test {EVAL timeout from AOF} {
# generate a long running script that is propagated to the AOF as script
@ -505,8 +506,6 @@ start_server {tags {"scripting"}} {
$rd close
r get x
} {y}
r config set aof-use-rdb-preamble yes
r config set lua-replicate-commands yes
test {We can call scripts rewriting client->argv from Lua} {
r del myset

View File

@ -17,6 +17,10 @@ start_server {tags {"tracking network"}} {
proc clean_all {} {
uplevel {
# We should make r TRACKING off first. If r is in RESP3,
# r FLUSH ALL will send us tracking-redir-broken or other
# info which will not be consumed.
r CLIENT TRACKING off
$rd QUIT
$rd_redirection QUIT
set rd [redis_deferring_client]
@ -26,7 +30,6 @@ start_server {tags {"tracking network"}} {
$rd_redirection subscribe __redis__:invalidate
$rd_redirection read ; # Consume the SUBSCRIBE reply.
r FLUSHALL
r CLIENT TRACKING off
r HELLO 2
r config set tracking-table-max-keys 1000000
}
@ -52,7 +55,7 @@ start_server {tags {"tracking network"}} {
# so that we can check for leaks, as a side effect.
r MGET a b c d e f g
r CLIENT TRACKING off
}
} {*OK}
test {Clients can enable the BCAST mode with the empty prefix} {
r CLIENT TRACKING on BCAST REDIRECT $redir_id
@ -72,6 +75,8 @@ start_server {tags {"tracking network"}} {
r INCR a:2
r INCR b:1
r INCR b:2
# we should not get this key
r INCR c:1
r EXEC
# Because of the internals, we know we are going to receive
# two separated notifications for the two different prefixes.
@ -96,7 +101,7 @@ start_server {tags {"tracking network"}} {
r SET loopkey 1 ; # We should not get this
$rd_sg SET otherkey2 1; # We should get this
# Because of the internals, we know we are going to receive
# two separated notifications for the two different prefixes.
# two separated notifications for the two different keys.
set keys1 [lsort [lindex [$rd_redirection read] 2]]
set keys2 [lsort [lindex [$rd_redirection read] 2]]
set keys [lsort [list {*}$keys1 {*}$keys2]]
@ -109,8 +114,8 @@ start_server {tags {"tracking network"}} {
$rd_sg SET otherkey1 1; # We should get this
r SET loopkey 1 ; # We should not get this
$rd_sg SET otherkey2 1; # We should get this
# Because of the internals, we know we are going to receive
# two separated notifications for the two different prefixes.
# Because $rd_sg send command synchronously, we know we are
# going to receive two separated notifications.
set keys1 [lsort [lindex [$rd_redirection read] 2]]
set keys2 [lsort [lindex [$rd_redirection read] 2]]
set keys [lsort [list {*}$keys1 {*}$keys2]]
@ -123,10 +128,7 @@ start_server {tags {"tracking network"}} {
r SET mykey myval px 1
r SET mykeyotherkey myval ; # We should not get it
after 1000
# Because of the internals, we know we are going to receive
# two separated notifications for the two different prefixes.
set keys1 [lsort [lindex [$rd_redirection read] 2]]
set keys [lsort [list {*}$keys1]]
set keys [lsort [lindex [$rd_redirection read] 2]]
assert {$keys eq {mykey}}
}
@ -172,6 +174,7 @@ start_server {tags {"tracking network"}} {
}
test {Invalidations of previous keys can be redirected after switching to RESP3} {
r HELLO 2
$rd_sg SET key1 1
r GET key1
r HELLO 3
@ -194,28 +197,33 @@ start_server {tags {"tracking network"}} {
$rd_sg SET key1 1
r GET key1
$rd_redirection QUIT
assert_equal OK [$rd_redirection read]
$rd_sg SET key1 2
set MAX_TRIES 100
for {set i 0} {$i <= $MAX_TRIES && $res <= 0} {incr i} {
set res -1
for {set i 0} {$i <= $MAX_TRIES && $res < 0} {incr i} {
set res [lsearch -exact [r PING] "tracking-redir-broken"]
}
assert {$res >= 0}
}
# Consume PING reply
assert_equal PONG [r read]
test {Different clients can redirect to the same connection} {
# Reinstantiating after QUIT
set rd_redirection [redis_deferring_client]
$rd_redirection CLIENT ID
set redir_id [$rd_redirection read]
$rd_redirection SUBSCRIBE __redis__:invalidate
$rd_redirection read ; # Consume the SUBSCRIBE reply
}
test {Different clients can redirect to the same connection} {
r CLIENT TRACKING on REDIRECT $redir_id
$rd CLIENT TRACKING on REDIRECT $redir_id
$rd read ; # Consume the TRACKING reply
assert_equal OK [$rd read] ; # Consume the TRACKING reply
$rd_sg MSET key1 1 key2 1
r GET key1
$rd GET key2
$rd read ; # Consume the GET reply
assert_equal 1 [$rd read] ; # Consume the GET reply
$rd_sg INCR key1
$rd_sg INCR key2
set res1 [lindex [$rd_redirection read] 2]
@ -226,18 +234,19 @@ start_server {tags {"tracking network"}} {
test {Different clients using different protocols can track the same key} {
$rd HELLO 3
$rd read ; # Consume the HELLO reply
set reply [$rd read] ; # Consume the HELLO reply
assert_equal 3 [dict get $reply proto]
$rd CLIENT TRACKING on
$rd read ; # Consume the TRACKING reply
assert_equal OK [$rd read] ; # Consume the TRACKING reply
$rd_sg set key1 1
r GET key1
$rd GET key1
$rd read ; # Consume the GET reply
assert_equal 1 [$rd read] ; # Consume the GET reply
$rd_sg INCR key1
set res1 [lindex [$rd_redirection read] 2]
$rd PING ; # Non redirecting client has to talk to the server in order to get invalidation message
set res2 [lindex [split [$rd read] " "] 1]
$rd read ; # Consume the PING reply, which comes together with the invalidation message
assert_equal PONG [$rd read] ; # Consume the PING reply, which comes together with the invalidation message
assert {$res1 eq {key1}}
assert {$res2 eq {key1}}
}
@ -292,35 +301,36 @@ start_server {tags {"tracking network"}} {
test {Able to redirect to a RESP3 client} {
$rd_redirection UNSUBSCRIBE __redis__:invalidate ; # Need to unsub first before we can do HELLO 3
$rd_redirection read ; # Consume the UNSUBSCRIBE reply
set res [$rd_redirection read] ; # Consume the UNSUBSCRIBE reply
assert_equal {__redis__:invalidate} [lindex $res 1]
$rd_redirection HELLO 3
$rd_redirection read ; # Consume the HELLO reply
set res [$rd_redirection read] ; # Consume the HELLO reply
assert_equal [dict get $reply proto] 3
$rd_redirection SUBSCRIBE __redis__:invalidate
$rd_redirection read ; # Consume the SUBSCRIBE reply
set res [$rd_redirection read] ; # Consume the SUBSCRIBE reply
assert_equal {__redis__:invalidate} [lindex $res 1]
r CLIENT TRACKING on REDIRECT $redir_id
$rd_sg SET key1 1
r GET key1
$rd_sg INCR key1
set res [lindex [$rd_redirection read] 1]
assert {$res eq {key1}}
$rd_redirection HELLO 2
set res [$rd_redirection read] ; # Consume the HELLO reply
assert_equal [dict get $res proto] 2
}
test {After switching from normal tracking to BCAST mode, no invalidation message is produced for pre-BCAST keys} {
$rd HELLO 3
$rd read ; # Consume the HELLO reply
$rd CLIENT TRACKING on
$rd read ; # Consume the TRACKING reply
r CLIENT TRACKING off
r HELLO 3
r CLIENT TRACKING on
$rd_sg SET key1 1
$rd GET key1
$rd read ; # Consume the GET reply
$rd CLIENT TRACKING off
$rd read ; # Consume the TRACKING reply
$rd CLIENT TRACKING on BCAST
$rd read ; # Consume the TRACKING reply
r GET key1
r CLIENT TRACKING off
r CLIENT TRACKING on BCAST
$rd_sg INCR key1
$rd PING
set inv_msg [$rd read]
set ping_reply [$rd read]
set inv_msg [r PING]
set ping_reply [r read]
assert {$inv_msg eq {invalidate key1}}
assert {$ping_reply eq {PONG}}
}
@ -344,7 +354,6 @@ start_server {tags {"tracking network"}} {
}
test {Tracking gets notification on tracking table key eviction} {
$rd_redirection HELLO 2
r CLIENT TRACKING off
r CLIENT TRACKING on REDIRECT $redir_id NOLOOP
r MSET key1 1 key2 2
@ -368,7 +377,6 @@ start_server {tags {"tracking network"}} {
test {Invalidation message received for flushall} {
clean_all
r CLIENT TRACKING off
r CLIENT TRACKING on REDIRECT $redir_id
$rd_sg SET key1 1
r GET key1
@ -379,7 +387,6 @@ start_server {tags {"tracking network"}} {
test {Invalidation message received for flushdb} {
clean_all
r CLIENT TRACKING off
r CLIENT TRACKING on REDIRECT $redir_id
$rd_sg SET key1 1
r GET key1
@ -422,15 +429,29 @@ start_server {tags {"tracking network"}} {
r GET key1
r GET key2
$rd CLIENT TRACKING on BCAST PREFIX prefix:
assert [string match *OK* [$rd read]]
$rd_sg SET prefix:key1 1
$rd_sg SET prefix:key2 2
set info [r info]
regexp "\r\ntracking_total_items:(.*?)\r\n" $info _ total_items
regexp "\r\ntracking_total_keys:(.*?)\r\n" $info _ total_keys
regexp "\r\ntracking_total_prefixes:(.*?)\r\n" $info _ total_prefixes
regexp "\r\ntracking_clients:(.*?)\r\n" $info _ tracking_clients
assert {$total_items == 2}
assert {$total_keys == 2}
assert {$total_prefixes == 1}
assert {$tracking_clients == 2}
}
test {CLIENT GETREDIR provides correct client id} {
set res [r CLIENT GETREDIR]
assert_equal $redir_id $res
r CLIENT TRACKING off
set res [r CLIENT GETREDIR]
assert_equal -1 $res
r CLIENT TRACKING on
set res [r CLIENT GETREDIR]
assert_equal 0 $res
}
test {CLIENT TRACKINGINFO provides reasonable results when tracking off} {
@ -510,6 +531,32 @@ start_server {tags {"tracking network"}} {
assert_equal {0} $redirect
set prefixes [lsort [dict get $res prefixes]]
assert_equal {bar foo} $prefixes
r CLIENT TRACKING off
r CLIENT TRACKING on BCAST
set res [r client trackinginfo]
set prefixes [dict get $res prefixes]
assert_equal {{}} $prefixes
}
test {CLIENT TRACKINGINFO provides reasonable results when tracking redir broken} {
clean_all
r HELLO 3
r CLIENT TRACKING on REDIRECT $redir_id
$rd_sg SET key1 1
r GET key1
$rd_redirection QUIT
assert_equal OK [$rd_redirection read]
$rd_sg SET key1 2
set res [lsearch -exact [r read] "tracking-redir-broken"]
assert {$res >= 0}
set res [r client trackinginfo]
set flags [dict get $res flags]
assert_equal {on broken_redirect} $flags
set redirect [dict get $res redirect]
assert_equal $redir_id $redirect
set prefixes [dict get $res prefixes]
assert_equal {} $prefixes
}
$rd_redirection close

View File

@ -96,9 +96,17 @@ start_server {tags {"hash"}} {
# 1) Check that it returns repeated elements with and without values.
set res [r hrandfield myhash -20]
assert_equal [llength $res] 20
set res [r hrandfield myhash -1001]
assert_equal [llength $res] 1001
# again with WITHVALUES
set res [r hrandfield myhash -20 withvalues]
assert_equal [llength $res] 40
set res [r hrandfield myhash -1001 withvalues]
assert_equal [llength $res] 2002
# Test random uniform distribution
set res [r hrandfield myhash -1000]
assert_equal [check_histogram_distribution $res 0.05 0.15] true
# 2) Check that all the elements actually belong to the original hash.
foreach {key val} $res {
@ -167,26 +175,31 @@ start_server {tags {"hash"}} {
# 2) Check that eventually all the elements are returned.
# Use both WITHVALUES and without
unset -nocomplain auxset
set iterations 1000
unset -nocomplain allkey
set iterations [expr {1000 / $size}]
set all_ele_return false
while {$iterations != 0} {
incr iterations -1
if {[expr {$iterations % 2}] == 0} {
set res [r hrandfield myhash $size withvalues]
foreach {key value} $res {
dict append auxset $key $value
lappend allkey $key
}
} else {
set res [r hrandfield myhash $size]
foreach key $res {
dict append auxset $key
lappend allkey $key
}
}
if {[lsort [dict keys $mydict]] eq
[lsort [dict keys $auxset]]} {
break;
set all_ele_return true
}
}
assert {$iterations != 0}
assert_equal $all_ele_return true
assert_equal [check_histogram_distribution $allkey 0.05 0.15] true
}
}
r config set hash-max-ziplist-value $original_max_value

View File

@ -515,6 +515,43 @@ start_server {
}
}
foreach {type contents} {
hashtable {
1 5 10 50 125
MARY PATRICIA LINDA BARBARA ELIZABETH
}
intset {
0 1 2 3 4 5 6 7 8 9
}
} {
test "SRANDMEMBER histogram distribution - $type" {
create_set myset $contents
unset -nocomplain myset
array set myset {}
foreach ele [r smembers myset] {
set myset($ele) 1
}
# Use negative count (PATH 1).
set res [r srandmember myset -1000]
assert_equal [check_histogram_distribution $res 0.05 0.15] true
# Use positive count (both PATH 3 and PATH 4).
foreach size {8 2} {
unset -nocomplain allkey
set iterations [expr {1000 / $size}]
while {$iterations != 0} {
incr iterations -1
set res [r srandmember myset $size]
foreach ele $res {
lappend allkey $ele
}
}
assert_equal [check_histogram_distribution $allkey 0.05 0.15] true
}
}
}
proc setup_move {} {
r del myset3 myset4
create_set myset1 {1 a b}

View File

@ -1646,9 +1646,17 @@ start_server {tags {"zset"}} {
# 1) Check that it returns repeated elements with and without values.
set res [r zrandmember myzset -20]
assert_equal [llength $res] 20
set res [r zrandmember myzset -1001]
assert_equal [llength $res] 1001
# again with WITHSCORES
set res [r zrandmember myzset -20 withscores]
assert_equal [llength $res] 40
set res [r zrandmember myzset -1001 withscores]
assert_equal [llength $res] 2002
# Test random uniform distribution
set res [r zrandmember myzset -1000]
assert_equal [check_histogram_distribution $res 0.05 0.15] true
# 2) Check that all the elements actually belong to the original zset.
foreach {key val} $res {
@ -1717,26 +1725,31 @@ start_server {tags {"zset"}} {
# 2) Check that eventually all the elements are returned.
# Use both WITHSCORES and without
unset -nocomplain auxset
set iterations 1000
unset -nocomplain allkey
set iterations [expr {1000 / $size}]
set all_ele_return false
while {$iterations != 0} {
incr iterations -1
if {[expr {$iterations % 2}] == 0} {
set res [r zrandmember myzset $size withscores]
foreach {key value} $res {
dict append auxset $key $value
lappend allkey $key
}
} else {
set res [r zrandmember myzset $size]
foreach key $res {
dict append auxset $key
lappend allkey $key
}
}
if {[lsort [dict keys $mydict]] eq
[lsort [dict keys $auxset]]} {
break;
set all_ele_return true
}
}
assert {$iterations != 0}
assert_equal $all_ele_return true
assert_equal [check_histogram_distribution $allkey 0.05 0.15] true
}
}
r config set zset-max-ziplist-value $original_max_value