Compare commits
163 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
f35f36a265 | ||
![]() |
3e720bbcf5 | ||
![]() |
8263146227 | ||
![]() |
e351099e11 | ||
![]() |
c1d92a69c6 | ||
![]() |
6573acbd73 | ||
![]() |
8ada737f0a | ||
![]() |
7f486ea6ee | ||
![]() |
49dbedb1d5 | ||
![]() |
0f14d32792 | ||
![]() |
4d67bb6afa | ||
![]() |
37599fe75a | ||
![]() |
ea1bc6f623 | ||
![]() |
646069a900 | ||
![]() |
8e73f9d348 | ||
![]() |
f90ecfb1f7 | ||
![]() |
bd1dac0c6e | ||
![]() |
25f610fc29 | ||
![]() |
eb64a97d33 | ||
![]() |
2ba8de9d5e | ||
![]() |
1d2839a830 | ||
![]() |
c340fd5a39 | ||
![]() |
88682ca305 | ||
![]() |
f6a7c9f9ec | ||
![]() |
391aa4079c | ||
![]() |
6b17d824c5 | ||
![]() |
5656cc82a6 | ||
![]() |
863fcfbf52 | ||
![]() |
90f489b0a4 | ||
![]() |
1788568425 | ||
![]() |
1c1bd618c9 | ||
![]() |
f651708a19 | ||
![]() |
6956d15b77 | ||
![]() |
66ff5e6974 | ||
![]() |
8869589430 | ||
![]() |
f8ae7a414c | ||
![]() |
17181517ec | ||
![]() |
a39032214d | ||
![]() |
86920532f7 | ||
![]() |
2a2a582e7c | ||
![]() |
0825552565 | ||
![]() |
7091b495a0 | ||
![]() |
ddf984d058 | ||
![]() |
feb796d312 | ||
![]() |
c2bedf2dd5 | ||
![]() |
2db720591b | ||
![]() |
ab05b28b22 | ||
![]() |
ca0b6caeed | ||
![]() |
5aaa1a271c | ||
![]() |
af80a4a554 | ||
![]() |
c0e064ef16 | ||
![]() |
3a6f00329a | ||
![]() |
4d5a4e4b36 | ||
![]() |
1c75ab062d | ||
![]() |
3f1f02034c | ||
![]() |
6c25c6b7da | ||
![]() |
4537830ea1 | ||
![]() |
5fa7d9a272 | ||
![]() |
3e82bdf738 | ||
![]() |
574a49b96c | ||
![]() |
61a1d4540d | ||
![]() |
f9f48ef674 | ||
![]() |
b7b78a2db2 | ||
![]() |
fcfb046d91 | ||
![]() |
b73de0d9c2 | ||
![]() |
41bff94b43 | ||
![]() |
c0924a8361 | ||
![]() |
e2665c6fc0 | ||
![]() |
3c525fab6a | ||
![]() |
e1557e6c7a | ||
![]() |
b5784feabd | ||
![]() |
a43c51b297 | ||
![]() |
d86408b7b9 | ||
![]() |
dca63da432 | ||
![]() |
4a5aba16c9 | ||
![]() |
7275935641 | ||
![]() |
98410d0da5 | ||
![]() |
9ccb8dfd47 | ||
![]() |
05c9378b3f | ||
![]() |
b322a77e99 | ||
![]() |
5e02338e5e | ||
![]() |
6a6d8806a4 | ||
![]() |
77570e3965 | ||
![]() |
60ef6d2896 | ||
![]() |
669a8dba37 | ||
![]() |
7c4f6e179b | ||
![]() |
fba49b107d | ||
![]() |
3d206f0fcf | ||
![]() |
81e72e8e29 | ||
![]() |
cffabb641f | ||
![]() |
4972f5768c | ||
![]() |
d81c2a5f33 | ||
![]() |
a26ac7ebbc | ||
![]() |
7ad786db2e | ||
![]() |
abf83bc682 | ||
![]() |
7c956d5cdf | ||
![]() |
19d01b62ca | ||
![]() |
9fc20f4f9d | ||
![]() |
f95af7785b | ||
![]() |
92ad0b5c8b | ||
![]() |
6cf24fa437 | ||
![]() |
b1ede2120a | ||
![]() |
5b2119c639 | ||
![]() |
929ab58a12 | ||
![]() |
79414d45fb | ||
![]() |
b8fcd322c6 | ||
![]() |
d279ec2532 | ||
![]() |
a221fc85ed | ||
![]() |
1571907ea0 | ||
![]() |
a06b68b917 | ||
![]() |
7c0d160536 | ||
![]() |
b2a250ce15 | ||
![]() |
fa6815e14e | ||
![]() |
a1ec0caed0 | ||
![]() |
b8beda61a2 | ||
![]() |
c9eabbf9b1 | ||
![]() |
842fd1fbdf | ||
![]() |
01358df34e | ||
![]() |
2f6537dc7f | ||
![]() |
6edbf70760 | ||
![]() |
7fc1a8c5a0 | ||
![]() |
8c702f8ddb | ||
![]() |
63db10ef6f | ||
![]() |
a2a28b8034 | ||
![]() |
1e2876e016 | ||
![]() |
dbb7337e49 | ||
![]() |
fa60cd1369 | ||
![]() |
4dfc487f96 | ||
![]() |
ab524282e8 | ||
![]() |
d1b3276b55 | ||
![]() |
631eb3837e | ||
![]() |
5632bbdcbd | ||
![]() |
ba6ed445a3 | ||
![]() |
ddf1bcf788 | ||
![]() |
5da7fdb785 | ||
![]() |
c039428627 | ||
![]() |
61a59b53a5 | ||
![]() |
babe90c90f | ||
![]() |
9e3dfa96f7 | ||
![]() |
d28314cb03 | ||
![]() |
a8b6efea25 | ||
![]() |
888effe4be | ||
![]() |
15ae4e29e5 | ||
![]() |
21fd252a62 | ||
![]() |
76b9c13d1d | ||
![]() |
85abb7cf2a | ||
![]() |
7b9fc6fca0 | ||
![]() |
05833959e3 | ||
![]() |
1973558b63 | ||
![]() |
5267d3803f | ||
![]() |
2667c41235 | ||
![]() |
595e725db4 | ||
![]() |
a5d2df3473 | ||
![]() |
d375595d5e | ||
![]() |
c1f3020631 | ||
![]() |
20618c713c | ||
![]() |
fb4e0d400f | ||
![]() |
8b242ef977 | ||
![]() |
d7971f96b3 | ||
![]() |
d2b5a579dd | ||
![]() |
10dc57ab22 | ||
![]() |
a83e36637a | ||
![]() |
d5915a167f |
.github/workflows
00-RELEASENOTESdeps
runtest-moduleapisrc
acl.canet.caof.cbio.cbio.hchildinfo.ccluster.ccluster.hcommands.c
commands
bitfield.jsonbitfield_ro.jsonbzmpop.jsonclient-kill.jsonclient-list.jsoncluster-addslots.jsoncluster-addslotsrange.jsoncluster-countkeysinslot.jsoncluster-delslots.jsoncluster-delslotsrange.jsoncluster-failover.jsoncluster-flushslots.jsoncluster-forget.jsoncluster-keyslot.jsoncluster-meet.jsoncluster-myid.jsoncluster-replicate.jsoncluster-reset.jsoncluster-saveconfig.jsoncluster-set-config-epoch.jsoncluster-setslot.jsoneval_ro.jsonevalsha_ro.jsonfcall.jsonfcall_ro.jsongeoradius.jsongeoradiusbymember.jsongeosearch.jsongeosearchstore.jsonrandomkey.jsonreplicaof.jsonscan.jsonslaveof.jsonxclaim.jsonxsetid.jsonzmpop.json
config.cconfig.hconnection.cdb.cdebug.cdict.cdict.heval.cevict.cexpire.cfunction_lua.cfunctions.cfunctions.hgeo.cgeohash_helper.chelp.hlistpack_malloc.hmodule.cmulti.cnetworking.cobject.cpubsub.cquicklist.cquicklist.hrdb.credis-benchmark.credis-cli.credismodule.hreplication.cscript.cscript.hscript_lua.csds.csds.hsentinel.cserver.cserver.hsha1.csort.csyscheck.ct_hash.ct_set.ct_stream.ct_string.ct_zset.ctimeout.ctls.c
12
.github/workflows/daily.yml
vendored
12
.github/workflows/daily.yml
vendored
@ -674,7 +674,7 @@ jobs:
|
||||
run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}}
|
||||
|
||||
test-freebsd:
|
||||
runs-on: macos-10.15
|
||||
runs-on: macos-12
|
||||
if: |
|
||||
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
|
||||
!contains(github.event.inputs.skipjobs, 'freebsd') && !(contains(github.event.inputs.skiptests, 'redis') && contains(github.event.inputs.skiptests, 'modules'))
|
||||
@ -690,7 +690,7 @@ jobs:
|
||||
repository: ${{ env.GITHUB_REPOSITORY }}
|
||||
ref: ${{ env.GITHUB_HEAD_REF }}
|
||||
- name: test
|
||||
uses: vmactions/freebsd-vm@v0.1.6
|
||||
uses: vmactions/freebsd-vm@v0.3.0
|
||||
with:
|
||||
usesh: true
|
||||
sync: rsync
|
||||
@ -702,7 +702,7 @@ jobs:
|
||||
if echo "${{github.event.inputs.skiptests}}" | grep -vq modules ; then MAKE=gmake ./runtest-moduleapi --verbose --timeout 2400 --no-latency --dump-logs ${{github.event.inputs.test_args}} || exit 1 ; fi ;
|
||||
|
||||
test-freebsd-sentinel:
|
||||
runs-on: macos-10.15
|
||||
runs-on: macos-12
|
||||
if: |
|
||||
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
|
||||
!contains(github.event.inputs.skipjobs, 'freebsd') && !contains(github.event.inputs.skiptests, 'sentinel')
|
||||
@ -718,7 +718,7 @@ jobs:
|
||||
repository: ${{ env.GITHUB_REPOSITORY }}
|
||||
ref: ${{ env.GITHUB_HEAD_REF }}
|
||||
- name: test
|
||||
uses: vmactions/freebsd-vm@v0.1.6
|
||||
uses: vmactions/freebsd-vm@v0.3.0
|
||||
with:
|
||||
usesh: true
|
||||
sync: rsync
|
||||
@ -729,7 +729,7 @@ jobs:
|
||||
if echo "${{github.event.inputs.skiptests}}" | grep -vq sentinel ; then ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} || exit 1 ; fi ;
|
||||
|
||||
test-freebsd-cluster:
|
||||
runs-on: macos-10.15
|
||||
runs-on: macos-12
|
||||
if: |
|
||||
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
|
||||
!contains(github.event.inputs.skipjobs, 'freebsd') && !contains(github.event.inputs.skiptests, 'cluster')
|
||||
@ -745,7 +745,7 @@ jobs:
|
||||
repository: ${{ env.GITHUB_REPOSITORY }}
|
||||
ref: ${{ env.GITHUB_HEAD_REF }}
|
||||
- name: test
|
||||
uses: vmactions/freebsd-vm@v0.1.6
|
||||
uses: vmactions/freebsd-vm@v0.3.0
|
||||
with:
|
||||
usesh: true
|
||||
sync: rsync
|
||||
|
1089
00-RELEASENOTES
1089
00-RELEASENOTES
File diff suppressed because it is too large
Load Diff
6
deps/Makefile
vendored
6
deps/Makefile
vendored
@ -3,6 +3,7 @@
|
||||
uname_S:= $(shell sh -c 'uname -s 2>/dev/null || echo not')
|
||||
|
||||
LUA_DEBUG?=no
|
||||
LUA_COVERAGE?=no
|
||||
|
||||
CCCOLOR="\033[34m"
|
||||
LINKCOLOR="\033[34;1m"
|
||||
@ -78,6 +79,11 @@ ifeq ($(LUA_DEBUG),yes)
|
||||
else
|
||||
LUA_CFLAGS+= -O2
|
||||
endif
|
||||
ifeq ($(LUA_COVERAGE),yes)
|
||||
LUA_CFLAGS += -fprofile-arcs -ftest-coverage
|
||||
LUA_LDFLAGS += -fprofile-arcs -ftest-coverage
|
||||
endif
|
||||
|
||||
# lua's Makefile defines AR="ar rcu", which is unusual, and makes it more
|
||||
# challenging to cross-compile lua (and redis). These defines make it easier
|
||||
# to fit redis into cross-compilation environments, which typically set AR.
|
||||
|
9
deps/lua/src/lua_cjson.c
vendored
9
deps/lua/src/lua_cjson.c
vendored
@ -39,6 +39,7 @@
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <math.h>
|
||||
#include <stdint.h>
|
||||
#include <limits.h>
|
||||
#include "lua.h"
|
||||
#include "lauxlib.h"
|
||||
@ -141,13 +142,13 @@ typedef struct {
|
||||
|
||||
typedef struct {
|
||||
json_token_type_t type;
|
||||
int index;
|
||||
size_t index;
|
||||
union {
|
||||
const char *string;
|
||||
double number;
|
||||
int boolean;
|
||||
} value;
|
||||
int string_len;
|
||||
size_t string_len;
|
||||
} json_token_t;
|
||||
|
||||
static const char *char2escape[256] = {
|
||||
@ -473,6 +474,8 @@ static void json_append_string(lua_State *l, strbuf_t *json, int lindex)
|
||||
* This buffer is reused constantly for small strings
|
||||
* If there are any excess pages, they won't be hit anyway.
|
||||
* This gains ~5% speedup. */
|
||||
if (len > SIZE_MAX / 6 - 3)
|
||||
abort(); /* Overflow check */
|
||||
strbuf_ensure_empty_length(json, len * 6 + 2);
|
||||
|
||||
strbuf_append_char_unsafe(json, '\"');
|
||||
@ -706,7 +709,7 @@ static int json_encode(lua_State *l)
|
||||
strbuf_t local_encode_buf;
|
||||
strbuf_t *encode_buf;
|
||||
char *json;
|
||||
int len;
|
||||
size_t len;
|
||||
|
||||
luaL_argcheck(l, lua_gettop(l) == 1, 1, "expected 1 argument");
|
||||
|
||||
|
31
deps/lua/src/lua_cmsgpack.c
vendored
31
deps/lua/src/lua_cmsgpack.c
vendored
@ -117,7 +117,9 @@ mp_buf *mp_buf_new(lua_State *L) {
|
||||
|
||||
void mp_buf_append(lua_State *L, mp_buf *buf, const unsigned char *s, size_t len) {
|
||||
if (buf->free < len) {
|
||||
size_t newsize = (buf->len+len)*2;
|
||||
size_t newsize = buf->len+len;
|
||||
if (newsize < buf->len || newsize >= SIZE_MAX/2) abort();
|
||||
newsize *= 2;
|
||||
|
||||
buf->b = (unsigned char*)mp_realloc(L, buf->b, buf->len + buf->free, newsize);
|
||||
buf->free = newsize - buf->len;
|
||||
@ -173,7 +175,7 @@ void mp_cur_init(mp_cur *cursor, const unsigned char *s, size_t len) {
|
||||
|
||||
void mp_encode_bytes(lua_State *L, mp_buf *buf, const unsigned char *s, size_t len) {
|
||||
unsigned char hdr[5];
|
||||
int hdrlen;
|
||||
size_t hdrlen;
|
||||
|
||||
if (len < 32) {
|
||||
hdr[0] = 0xa0 | (len&0xff); /* fix raw */
|
||||
@ -220,7 +222,7 @@ void mp_encode_double(lua_State *L, mp_buf *buf, double d) {
|
||||
|
||||
void mp_encode_int(lua_State *L, mp_buf *buf, int64_t n) {
|
||||
unsigned char b[9];
|
||||
int enclen;
|
||||
size_t enclen;
|
||||
|
||||
if (n >= 0) {
|
||||
if (n <= 127) {
|
||||
@ -290,9 +292,9 @@ void mp_encode_int(lua_State *L, mp_buf *buf, int64_t n) {
|
||||
mp_buf_append(L,buf,b,enclen);
|
||||
}
|
||||
|
||||
void mp_encode_array(lua_State *L, mp_buf *buf, int64_t n) {
|
||||
void mp_encode_array(lua_State *L, mp_buf *buf, uint64_t n) {
|
||||
unsigned char b[5];
|
||||
int enclen;
|
||||
size_t enclen;
|
||||
|
||||
if (n <= 15) {
|
||||
b[0] = 0x90 | (n & 0xf); /* fix array */
|
||||
@ -313,7 +315,7 @@ void mp_encode_array(lua_State *L, mp_buf *buf, int64_t n) {
|
||||
mp_buf_append(L,buf,b,enclen);
|
||||
}
|
||||
|
||||
void mp_encode_map(lua_State *L, mp_buf *buf, int64_t n) {
|
||||
void mp_encode_map(lua_State *L, mp_buf *buf, uint64_t n) {
|
||||
unsigned char b[5];
|
||||
int enclen;
|
||||
|
||||
@ -791,7 +793,7 @@ void mp_decode_to_lua_type(lua_State *L, mp_cur *c) {
|
||||
}
|
||||
}
|
||||
|
||||
int mp_unpack_full(lua_State *L, int limit, int offset) {
|
||||
int mp_unpack_full(lua_State *L, lua_Integer limit, lua_Integer offset) {
|
||||
size_t len;
|
||||
const char *s;
|
||||
mp_cur c;
|
||||
@ -803,10 +805,10 @@ int mp_unpack_full(lua_State *L, int limit, int offset) {
|
||||
if (offset < 0 || limit < 0) /* requesting negative off or lim is invalid */
|
||||
return luaL_error(L,
|
||||
"Invalid request to unpack with offset of %d and limit of %d.",
|
||||
offset, len);
|
||||
(int) offset, (int) len);
|
||||
else if (offset > len)
|
||||
return luaL_error(L,
|
||||
"Start offset %d greater than input length %d.", offset, len);
|
||||
"Start offset %d greater than input length %d.", (int) offset, (int) len);
|
||||
|
||||
if (decode_all) limit = INT_MAX;
|
||||
|
||||
@ -828,12 +830,13 @@ int mp_unpack_full(lua_State *L, int limit, int offset) {
|
||||
/* c->left is the remaining size of the input buffer.
|
||||
* subtract the entire buffer size from the unprocessed size
|
||||
* to get our next start offset */
|
||||
int offset = len - c.left;
|
||||
size_t new_offset = len - c.left;
|
||||
if (new_offset > LONG_MAX) abort();
|
||||
|
||||
luaL_checkstack(L, 1, "in function mp_unpack_full");
|
||||
|
||||
/* Return offset -1 when we have have processed the entire buffer. */
|
||||
lua_pushinteger(L, c.left == 0 ? -1 : offset);
|
||||
lua_pushinteger(L, c.left == 0 ? -1 : (lua_Integer) new_offset);
|
||||
/* Results are returned with the arg elements still
|
||||
* in place. Lua takes care of only returning
|
||||
* elements above the args for us.
|
||||
@ -852,15 +855,15 @@ int mp_unpack(lua_State *L) {
|
||||
}
|
||||
|
||||
int mp_unpack_one(lua_State *L) {
|
||||
int offset = luaL_optinteger(L, 2, 0);
|
||||
lua_Integer offset = luaL_optinteger(L, 2, 0);
|
||||
/* Variable pop because offset may not exist */
|
||||
lua_pop(L, lua_gettop(L)-1);
|
||||
return mp_unpack_full(L, 1, offset);
|
||||
}
|
||||
|
||||
int mp_unpack_limit(lua_State *L) {
|
||||
int limit = luaL_checkinteger(L, 2);
|
||||
int offset = luaL_optinteger(L, 3, 0);
|
||||
lua_Integer limit = luaL_checkinteger(L, 2);
|
||||
lua_Integer offset = luaL_optinteger(L, 3, 0);
|
||||
/* Variable pop because offset may not exist */
|
||||
lua_pop(L, lua_gettop(L)-1);
|
||||
|
||||
|
109
deps/lua/src/strbuf.c
vendored
109
deps/lua/src/strbuf.c
vendored
@ -26,6 +26,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <stdarg.h>
|
||||
#include <string.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "strbuf.h"
|
||||
|
||||
@ -38,22 +39,22 @@ static void die(const char *fmt, ...)
|
||||
va_end(arg);
|
||||
fprintf(stderr, "\n");
|
||||
|
||||
exit(-1);
|
||||
abort();
|
||||
}
|
||||
|
||||
void strbuf_init(strbuf_t *s, int len)
|
||||
void strbuf_init(strbuf_t *s, size_t len)
|
||||
{
|
||||
int size;
|
||||
size_t size;
|
||||
|
||||
if (len <= 0)
|
||||
if (!len)
|
||||
size = STRBUF_DEFAULT_SIZE;
|
||||
else
|
||||
size = len + 1; /* \0 terminator */
|
||||
|
||||
size = len + 1;
|
||||
if (size < len)
|
||||
die("Overflow, len: %zu", len);
|
||||
s->buf = NULL;
|
||||
s->size = size;
|
||||
s->length = 0;
|
||||
s->increment = STRBUF_DEFAULT_INCREMENT;
|
||||
s->dynamic = 0;
|
||||
s->reallocs = 0;
|
||||
s->debug = 0;
|
||||
@ -65,7 +66,7 @@ void strbuf_init(strbuf_t *s, int len)
|
||||
strbuf_ensure_null(s);
|
||||
}
|
||||
|
||||
strbuf_t *strbuf_new(int len)
|
||||
strbuf_t *strbuf_new(size_t len)
|
||||
{
|
||||
strbuf_t *s;
|
||||
|
||||
@ -81,20 +82,10 @@ strbuf_t *strbuf_new(int len)
|
||||
return s;
|
||||
}
|
||||
|
||||
void strbuf_set_increment(strbuf_t *s, int increment)
|
||||
{
|
||||
/* Increment > 0: Linear buffer growth rate
|
||||
* Increment < -1: Exponential buffer growth rate */
|
||||
if (increment == 0 || increment == -1)
|
||||
die("BUG: Invalid string increment");
|
||||
|
||||
s->increment = increment;
|
||||
}
|
||||
|
||||
static inline void debug_stats(strbuf_t *s)
|
||||
{
|
||||
if (s->debug) {
|
||||
fprintf(stderr, "strbuf(%lx) reallocs: %d, length: %d, size: %d\n",
|
||||
fprintf(stderr, "strbuf(%lx) reallocs: %d, length: %zd, size: %zd\n",
|
||||
(long)s, s->reallocs, s->length, s->size);
|
||||
}
|
||||
}
|
||||
@ -113,7 +104,7 @@ void strbuf_free(strbuf_t *s)
|
||||
free(s);
|
||||
}
|
||||
|
||||
char *strbuf_free_to_string(strbuf_t *s, int *len)
|
||||
char *strbuf_free_to_string(strbuf_t *s, size_t *len)
|
||||
{
|
||||
char *buf;
|
||||
|
||||
@ -131,57 +122,62 @@ char *strbuf_free_to_string(strbuf_t *s, int *len)
|
||||
return buf;
|
||||
}
|
||||
|
||||
static int calculate_new_size(strbuf_t *s, int len)
|
||||
static size_t calculate_new_size(strbuf_t *s, size_t len)
|
||||
{
|
||||
int reqsize, newsize;
|
||||
size_t reqsize, newsize;
|
||||
|
||||
if (len <= 0)
|
||||
die("BUG: Invalid strbuf length requested");
|
||||
|
||||
/* Ensure there is room for optional NULL termination */
|
||||
reqsize = len + 1;
|
||||
if (reqsize < len)
|
||||
die("Overflow, len: %zu", len);
|
||||
|
||||
/* If the user has requested to shrink the buffer, do it exactly */
|
||||
if (s->size > reqsize)
|
||||
return reqsize;
|
||||
|
||||
newsize = s->size;
|
||||
if (s->increment < 0) {
|
||||
if (reqsize >= SIZE_MAX / 2) {
|
||||
newsize = reqsize;
|
||||
} else {
|
||||
/* Exponential sizing */
|
||||
while (newsize < reqsize)
|
||||
newsize *= -s->increment;
|
||||
} else {
|
||||
/* Linear sizing */
|
||||
newsize = ((newsize + s->increment - 1) / s->increment) * s->increment;
|
||||
newsize *= 2;
|
||||
}
|
||||
|
||||
if (newsize < reqsize)
|
||||
die("BUG: strbuf length would overflow, len: %zu", len);
|
||||
|
||||
return newsize;
|
||||
}
|
||||
|
||||
|
||||
/* Ensure strbuf can handle a string length bytes long (ignoring NULL
|
||||
* optional termination). */
|
||||
void strbuf_resize(strbuf_t *s, int len)
|
||||
void strbuf_resize(strbuf_t *s, size_t len)
|
||||
{
|
||||
int newsize;
|
||||
size_t newsize;
|
||||
|
||||
newsize = calculate_new_size(s, len);
|
||||
|
||||
if (s->debug > 1) {
|
||||
fprintf(stderr, "strbuf(%lx) resize: %d => %d\n",
|
||||
fprintf(stderr, "strbuf(%lx) resize: %zd => %zd\n",
|
||||
(long)s, s->size, newsize);
|
||||
}
|
||||
|
||||
s->size = newsize;
|
||||
s->buf = realloc(s->buf, s->size);
|
||||
if (!s->buf)
|
||||
die("Out of memory");
|
||||
die("Out of memory, len: %zu", len);
|
||||
s->reallocs++;
|
||||
}
|
||||
|
||||
void strbuf_append_string(strbuf_t *s, const char *str)
|
||||
{
|
||||
int space, i;
|
||||
int i;
|
||||
size_t space;
|
||||
|
||||
space = strbuf_empty_length(s);
|
||||
|
||||
@ -197,55 +193,6 @@ void strbuf_append_string(strbuf_t *s, const char *str)
|
||||
}
|
||||
}
|
||||
|
||||
/* strbuf_append_fmt() should only be used when an upper bound
|
||||
* is known for the output string. */
|
||||
void strbuf_append_fmt(strbuf_t *s, int len, const char *fmt, ...)
|
||||
{
|
||||
va_list arg;
|
||||
int fmt_len;
|
||||
|
||||
strbuf_ensure_empty_length(s, len);
|
||||
|
||||
va_start(arg, fmt);
|
||||
fmt_len = vsnprintf(s->buf + s->length, len, fmt, arg);
|
||||
va_end(arg);
|
||||
|
||||
if (fmt_len < 0)
|
||||
die("BUG: Unable to convert number"); /* This should never happen.. */
|
||||
|
||||
s->length += fmt_len;
|
||||
}
|
||||
|
||||
/* strbuf_append_fmt_retry() can be used when the there is no known
|
||||
* upper bound for the output string. */
|
||||
void strbuf_append_fmt_retry(strbuf_t *s, const char *fmt, ...)
|
||||
{
|
||||
va_list arg;
|
||||
int fmt_len, try;
|
||||
int empty_len;
|
||||
|
||||
/* If the first attempt to append fails, resize the buffer appropriately
|
||||
* and try again */
|
||||
for (try = 0; ; try++) {
|
||||
va_start(arg, fmt);
|
||||
/* Append the new formatted string */
|
||||
/* fmt_len is the length of the string required, excluding the
|
||||
* trailing NULL */
|
||||
empty_len = strbuf_empty_length(s);
|
||||
/* Add 1 since there is also space to store the terminating NULL. */
|
||||
fmt_len = vsnprintf(s->buf + s->length, empty_len + 1, fmt, arg);
|
||||
va_end(arg);
|
||||
|
||||
if (fmt_len <= empty_len)
|
||||
break; /* SUCCESS */
|
||||
if (try > 0)
|
||||
die("BUG: length of formatted string changed");
|
||||
|
||||
strbuf_resize(s, s->length + fmt_len);
|
||||
}
|
||||
|
||||
s->length += fmt_len;
|
||||
}
|
||||
|
||||
/* vi:ai et sw=4 ts=4:
|
||||
*/
|
||||
|
46
deps/lua/src/strbuf.h
vendored
46
deps/lua/src/strbuf.h
vendored
@ -27,15 +27,13 @@
|
||||
|
||||
/* Size: Total bytes allocated to *buf
|
||||
* Length: String length, excluding optional NULL terminator.
|
||||
* Increment: Allocation increments when resizing the string buffer.
|
||||
* Dynamic: True if created via strbuf_new()
|
||||
*/
|
||||
|
||||
typedef struct {
|
||||
char *buf;
|
||||
int size;
|
||||
int length;
|
||||
int increment;
|
||||
size_t size;
|
||||
size_t length;
|
||||
int dynamic;
|
||||
int reallocs;
|
||||
int debug;
|
||||
@ -44,32 +42,26 @@ typedef struct {
|
||||
#ifndef STRBUF_DEFAULT_SIZE
|
||||
#define STRBUF_DEFAULT_SIZE 1023
|
||||
#endif
|
||||
#ifndef STRBUF_DEFAULT_INCREMENT
|
||||
#define STRBUF_DEFAULT_INCREMENT -2
|
||||
#endif
|
||||
|
||||
/* Initialise */
|
||||
extern strbuf_t *strbuf_new(int len);
|
||||
extern void strbuf_init(strbuf_t *s, int len);
|
||||
extern void strbuf_set_increment(strbuf_t *s, int increment);
|
||||
extern strbuf_t *strbuf_new(size_t len);
|
||||
extern void strbuf_init(strbuf_t *s, size_t len);
|
||||
|
||||
/* Release */
|
||||
extern void strbuf_free(strbuf_t *s);
|
||||
extern char *strbuf_free_to_string(strbuf_t *s, int *len);
|
||||
extern char *strbuf_free_to_string(strbuf_t *s, size_t *len);
|
||||
|
||||
/* Management */
|
||||
extern void strbuf_resize(strbuf_t *s, int len);
|
||||
static int strbuf_empty_length(strbuf_t *s);
|
||||
static int strbuf_length(strbuf_t *s);
|
||||
static char *strbuf_string(strbuf_t *s, int *len);
|
||||
static void strbuf_ensure_empty_length(strbuf_t *s, int len);
|
||||
extern void strbuf_resize(strbuf_t *s, size_t len);
|
||||
static size_t strbuf_empty_length(strbuf_t *s);
|
||||
static size_t strbuf_length(strbuf_t *s);
|
||||
static char *strbuf_string(strbuf_t *s, size_t *len);
|
||||
static void strbuf_ensure_empty_length(strbuf_t *s, size_t len);
|
||||
static char *strbuf_empty_ptr(strbuf_t *s);
|
||||
static void strbuf_extend_length(strbuf_t *s, int len);
|
||||
static void strbuf_extend_length(strbuf_t *s, size_t len);
|
||||
|
||||
/* Update */
|
||||
extern void strbuf_append_fmt(strbuf_t *s, int len, const char *fmt, ...);
|
||||
extern void strbuf_append_fmt_retry(strbuf_t *s, const char *format, ...);
|
||||
static void strbuf_append_mem(strbuf_t *s, const char *c, int len);
|
||||
static void strbuf_append_mem(strbuf_t *s, const char *c, size_t len);
|
||||
extern void strbuf_append_string(strbuf_t *s, const char *str);
|
||||
static void strbuf_append_char(strbuf_t *s, const char c);
|
||||
static void strbuf_ensure_null(strbuf_t *s);
|
||||
@ -87,12 +79,12 @@ static inline int strbuf_allocated(strbuf_t *s)
|
||||
|
||||
/* Return bytes remaining in the string buffer
|
||||
* Ensure there is space for a NULL terminator. */
|
||||
static inline int strbuf_empty_length(strbuf_t *s)
|
||||
static inline size_t strbuf_empty_length(strbuf_t *s)
|
||||
{
|
||||
return s->size - s->length - 1;
|
||||
}
|
||||
|
||||
static inline void strbuf_ensure_empty_length(strbuf_t *s, int len)
|
||||
static inline void strbuf_ensure_empty_length(strbuf_t *s, size_t len)
|
||||
{
|
||||
if (len > strbuf_empty_length(s))
|
||||
strbuf_resize(s, s->length + len);
|
||||
@ -103,12 +95,12 @@ static inline char *strbuf_empty_ptr(strbuf_t *s)
|
||||
return s->buf + s->length;
|
||||
}
|
||||
|
||||
static inline void strbuf_extend_length(strbuf_t *s, int len)
|
||||
static inline void strbuf_extend_length(strbuf_t *s, size_t len)
|
||||
{
|
||||
s->length += len;
|
||||
}
|
||||
|
||||
static inline int strbuf_length(strbuf_t *s)
|
||||
static inline size_t strbuf_length(strbuf_t *s)
|
||||
{
|
||||
return s->length;
|
||||
}
|
||||
@ -124,14 +116,14 @@ static inline void strbuf_append_char_unsafe(strbuf_t *s, const char c)
|
||||
s->buf[s->length++] = c;
|
||||
}
|
||||
|
||||
static inline void strbuf_append_mem(strbuf_t *s, const char *c, int len)
|
||||
static inline void strbuf_append_mem(strbuf_t *s, const char *c, size_t len)
|
||||
{
|
||||
strbuf_ensure_empty_length(s, len);
|
||||
memcpy(s->buf + s->length, c, len);
|
||||
s->length += len;
|
||||
}
|
||||
|
||||
static inline void strbuf_append_mem_unsafe(strbuf_t *s, const char *c, int len)
|
||||
static inline void strbuf_append_mem_unsafe(strbuf_t *s, const char *c, size_t len)
|
||||
{
|
||||
memcpy(s->buf + s->length, c, len);
|
||||
s->length += len;
|
||||
@ -142,7 +134,7 @@ static inline void strbuf_ensure_null(strbuf_t *s)
|
||||
s->buf[s->length] = 0;
|
||||
}
|
||||
|
||||
static inline char *strbuf_string(strbuf_t *s, int *len)
|
||||
static inline char *strbuf_string(strbuf_t *s, size_t *len)
|
||||
{
|
||||
if (len)
|
||||
*len = s->length;
|
||||
|
@ -50,4 +50,5 @@ $TCLSH tests/test_helper.tcl \
|
||||
--single unit/moduleapi/eventloop \
|
||||
--single unit/moduleapi/timer \
|
||||
--single unit/moduleapi/publish \
|
||||
--single unit/moduleapi/usercall \
|
||||
"${@}"
|
||||
|
162
src/acl.c
162
src/acl.c
@ -385,6 +385,7 @@ user *ACLCreateUser(const char *name, size_t namelen) {
|
||||
u->name = sdsnewlen(name,namelen);
|
||||
u->flags = USER_FLAG_DISABLED;
|
||||
u->passwords = listCreate();
|
||||
u->acl_string = NULL;
|
||||
listSetMatchMethod(u->passwords,ACLListMatchSds);
|
||||
listSetFreeMethod(u->passwords,ACLListFreeSds);
|
||||
listSetDupMethod(u->passwords,ACLListDupSds);
|
||||
@ -422,6 +423,10 @@ user *ACLCreateUnlinkedUser(void) {
|
||||
* will not remove the user from the Users global radix tree. */
|
||||
void ACLFreeUser(user *u) {
|
||||
sdsfree(u->name);
|
||||
if (u->acl_string) {
|
||||
decrRefCount(u->acl_string);
|
||||
u->acl_string = NULL;
|
||||
}
|
||||
listRelease(u->passwords);
|
||||
listRelease(u->selectors);
|
||||
zfree(u);
|
||||
@ -466,6 +471,14 @@ void ACLCopyUser(user *dst, user *src) {
|
||||
dst->passwords = listDup(src->passwords);
|
||||
dst->selectors = listDup(src->selectors);
|
||||
dst->flags = src->flags;
|
||||
if (dst->acl_string) {
|
||||
decrRefCount(dst->acl_string);
|
||||
}
|
||||
dst->acl_string = src->acl_string;
|
||||
if (dst->acl_string) {
|
||||
/* if src is NULL, we set it to NULL, if not, need to increment reference count */
|
||||
incrRefCount(dst->acl_string);
|
||||
}
|
||||
}
|
||||
|
||||
/* Free all the users registered in the radix tree 'users' and free the
|
||||
@ -802,7 +815,12 @@ sds ACLDescribeSelector(aclSelector *selector) {
|
||||
* the ACLDescribeSelectorCommandRules() function. This is the function we call
|
||||
* when we want to rewrite the configuration files describing ACLs and
|
||||
* in order to show users with ACL LIST. */
|
||||
sds ACLDescribeUser(user *u) {
|
||||
robj *ACLDescribeUser(user *u) {
|
||||
if (u->acl_string) {
|
||||
incrRefCount(u->acl_string);
|
||||
return u->acl_string;
|
||||
}
|
||||
|
||||
sds res = sdsempty();
|
||||
|
||||
/* Flags. */
|
||||
@ -836,7 +854,12 @@ sds ACLDescribeUser(user *u) {
|
||||
}
|
||||
sdsfree(default_perm);
|
||||
}
|
||||
return res;
|
||||
|
||||
u->acl_string = createObject(OBJ_STRING, res);
|
||||
/* because we are returning it, have to increase count */
|
||||
incrRefCount(u->acl_string);
|
||||
|
||||
return u->acl_string;
|
||||
}
|
||||
|
||||
/* Get a command from the original command table, that is not affected
|
||||
@ -1208,6 +1231,12 @@ int ACLSetSelector(aclSelector *selector, const char* op, size_t oplen) {
|
||||
* ECHILD: Attempt to allow a specific first argument of a subcommand
|
||||
*/
|
||||
int ACLSetUser(user *u, const char *op, ssize_t oplen) {
|
||||
/* as we are changing the ACL, the old generated string is now invalid */
|
||||
if (u->acl_string) {
|
||||
decrRefCount(u->acl_string);
|
||||
u->acl_string = NULL;
|
||||
}
|
||||
|
||||
if (oplen == -1) oplen = strlen(op);
|
||||
if (oplen == 0) return C_OK; /* Empty string is a no-operation. */
|
||||
if (!strcasecmp(op,"on")) {
|
||||
@ -1815,6 +1844,13 @@ void ACLKillPubsubClientsIfNeeded(user *new, user *original) {
|
||||
listRewind(original->selectors,&li);
|
||||
while((ln = listNext(&li)) && match) {
|
||||
aclSelector *s = (aclSelector *) listNodeValue(ln);
|
||||
/* If any of the original selectors has the all-channels permission, but
|
||||
* the new ones don't (this is checked earlier in this function), then the
|
||||
* new list is not a strict superset of the original. */
|
||||
if (s->flags & SELECTOR_FLAG_ALLCHANNELS) {
|
||||
match = 0;
|
||||
break;
|
||||
}
|
||||
listRewind(s->channels, &lpi);
|
||||
while((lpn = listNext(&lpi)) && match) {
|
||||
if (!listSearchKey(upcoming, listNodeValue(lpn))) {
|
||||
@ -1937,6 +1973,68 @@ sds *ACLMergeSelectorArguments(sds *argv, int argc, int *merged_argc, int *inval
|
||||
return acl_args;
|
||||
}
|
||||
|
||||
/* takes an acl string already split on spaces and adds it to the given user
|
||||
* if the user object is NULL, will create a user with the given username
|
||||
*
|
||||
* Returns an error as an sds string if the ACL string is not parsable
|
||||
*/
|
||||
sds ACLStringSetUser(user *u, sds username, sds *argv, int argc) {
|
||||
serverAssert(u != NULL || username != NULL);
|
||||
|
||||
sds error = NULL;
|
||||
|
||||
int merged_argc = 0, invalid_idx = 0;
|
||||
sds *acl_args = ACLMergeSelectorArguments(argv, argc, &merged_argc, &invalid_idx);
|
||||
|
||||
if (!acl_args) {
|
||||
return sdscatfmt(sdsempty(),
|
||||
"Unmatched parenthesis in acl selector starting "
|
||||
"at '%s'.", (char *) argv[invalid_idx]);
|
||||
}
|
||||
|
||||
/* Create a temporary user to validate and stage all changes against
|
||||
* before applying to an existing user or creating a new user. If all
|
||||
* arguments are valid the user parameters will all be applied together.
|
||||
* If there are any errors then none of the changes will be applied. */
|
||||
user *tempu = ACLCreateUnlinkedUser();
|
||||
if (u) {
|
||||
ACLCopyUser(tempu, u);
|
||||
}
|
||||
|
||||
for (int j = 0; j < merged_argc; j++) {
|
||||
if (ACLSetUser(tempu,acl_args[j],(ssize_t) sdslen(acl_args[j])) != C_OK) {
|
||||
const char *errmsg = ACLSetUserStringError();
|
||||
error = sdscatfmt(sdsempty(),
|
||||
"Error in ACL SETUSER modifier '%s': %s",
|
||||
(char*)acl_args[j], errmsg);
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
/* Existing pub/sub clients authenticated with the user may need to be
|
||||
* disconnected if (some of) their channel permissions were revoked. */
|
||||
if (u) {
|
||||
ACLKillPubsubClientsIfNeeded(tempu, u);
|
||||
}
|
||||
|
||||
/* Overwrite the user with the temporary user we modified above. */
|
||||
if (!u) {
|
||||
u = ACLCreateUser(username,sdslen(username));
|
||||
}
|
||||
serverAssert(u != NULL);
|
||||
|
||||
ACLCopyUser(u, tempu);
|
||||
|
||||
cleanup:
|
||||
ACLFreeUser(tempu);
|
||||
for (int i = 0; i < merged_argc; i++) {
|
||||
sdsfree(acl_args[i]);
|
||||
}
|
||||
zfree(acl_args);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Given an argument vector describing a user in the form:
|
||||
*
|
||||
* user <username> ... ACL rules and flags ...
|
||||
@ -2252,9 +2350,9 @@ int ACLSaveToFile(const char *filename) {
|
||||
sds user = sdsnew("user ");
|
||||
user = sdscatsds(user,u->name);
|
||||
user = sdscatlen(user," ",1);
|
||||
sds descr = ACLDescribeUser(u);
|
||||
user = sdscatsds(user,descr);
|
||||
sdsfree(descr);
|
||||
robj *descr = ACLDescribeUser(u);
|
||||
user = sdscatsds(user,descr->ptr);
|
||||
decrRefCount(descr);
|
||||
acl = sdscatsds(acl,user);
|
||||
acl = sdscatlen(acl,"\n",1);
|
||||
sdsfree(user);
|
||||
@ -2571,50 +2669,18 @@ void aclCommand(client *c) {
|
||||
return;
|
||||
}
|
||||
|
||||
int merged_argc = 0, invalid_idx = 0;
|
||||
user *u = ACLGetUserByName(username,sdslen(username));
|
||||
|
||||
sds *temp_argv = zmalloc(c->argc * sizeof(sds));
|
||||
for (int i = 3; i < c->argc; i++) temp_argv[i-3] = c->argv[i]->ptr;
|
||||
sds *acl_args = ACLMergeSelectorArguments(temp_argv, c->argc - 3, &merged_argc, &invalid_idx);
|
||||
|
||||
sds error = ACLStringSetUser(u, username, temp_argv, c->argc - 3);
|
||||
zfree(temp_argv);
|
||||
|
||||
if (!acl_args) {
|
||||
addReplyErrorFormat(c,
|
||||
"Unmatched parenthesis in acl selector starting "
|
||||
"at '%s'.", (char *) c->argv[invalid_idx]->ptr);
|
||||
return;
|
||||
if (error == NULL) {
|
||||
addReply(c,shared.ok);
|
||||
} else {
|
||||
addReplyErrorSdsSafe(c, error);
|
||||
}
|
||||
|
||||
/* Create a temporary user to validate and stage all changes against
|
||||
* before applying to an existing user or creating a new user. If all
|
||||
* arguments are valid the user parameters will all be applied together.
|
||||
* If there are any errors then none of the changes will be applied. */
|
||||
user *tempu = ACLCreateUnlinkedUser();
|
||||
user *u = ACLGetUserByName(username,sdslen(username));
|
||||
if (u) ACLCopyUser(tempu, u);
|
||||
|
||||
for (int j = 0; j < merged_argc; j++) {
|
||||
if (ACLSetUser(tempu,acl_args[j],sdslen(acl_args[j])) != C_OK) {
|
||||
const char *errmsg = ACLSetUserStringError();
|
||||
addReplyErrorFormat(c,
|
||||
"Error in ACL SETUSER modifier '%s': %s",
|
||||
(char*)acl_args[j], errmsg);
|
||||
goto setuser_cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
/* Existing pub/sub clients authenticated with the user may need to be
|
||||
* disconnected if (some of) their channel permissions were revoked. */
|
||||
if (u) ACLKillPubsubClientsIfNeeded(tempu, u);
|
||||
|
||||
/* Overwrite the user with the temporary user we modified above. */
|
||||
if (!u) u = ACLCreateUser(username,sdslen(username));
|
||||
serverAssert(u != NULL);
|
||||
ACLCopyUser(u, tempu);
|
||||
addReply(c,shared.ok);
|
||||
setuser_cleanup:
|
||||
ACLFreeUser(tempu);
|
||||
for (int i = 0; i < merged_argc; i++) sdsfree(acl_args[i]);
|
||||
zfree(acl_args);
|
||||
return;
|
||||
} else if (!strcasecmp(sub,"deluser") && c->argc >= 3) {
|
||||
int deleted = 0;
|
||||
@ -2701,9 +2767,9 @@ setuser_cleanup:
|
||||
sds config = sdsnew("user ");
|
||||
config = sdscatsds(config,u->name);
|
||||
config = sdscatlen(config," ",1);
|
||||
sds descr = ACLDescribeUser(u);
|
||||
config = sdscatsds(config,descr);
|
||||
sdsfree(descr);
|
||||
robj *descr = ACLDescribeUser(u);
|
||||
config = sdscatsds(config,descr->ptr);
|
||||
decrRefCount(descr);
|
||||
addReplyBulkSds(c,config);
|
||||
}
|
||||
}
|
||||
|
11
src/anet.c
11
src/anet.c
@ -407,13 +407,16 @@ int anetUnixGenericConnect(char *err, const char *path, int flags)
|
||||
return s;
|
||||
}
|
||||
|
||||
static int anetListen(char *err, int s, struct sockaddr *sa, socklen_t len, int backlog) {
|
||||
static int anetListen(char *err, int s, struct sockaddr *sa, socklen_t len, int backlog, mode_t perm) {
|
||||
if (bind(s,sa,len) == -1) {
|
||||
anetSetError(err, "bind: %s", strerror(errno));
|
||||
close(s);
|
||||
return ANET_ERR;
|
||||
}
|
||||
|
||||
if (sa->sa_family == AF_LOCAL && perm)
|
||||
chmod(((struct sockaddr_un *) sa)->sun_path, perm);
|
||||
|
||||
if (listen(s, backlog) == -1) {
|
||||
anetSetError(err, "listen: %s", strerror(errno));
|
||||
close(s);
|
||||
@ -457,7 +460,7 @@ static int _anetTcpServer(char *err, int port, char *bindaddr, int af, int backl
|
||||
|
||||
if (af == AF_INET6 && anetV6Only(err,s) == ANET_ERR) goto error;
|
||||
if (anetSetReuseAddr(err,s) == ANET_ERR) goto error;
|
||||
if (anetListen(err,s,p->ai_addr,p->ai_addrlen,backlog) == ANET_ERR) s = ANET_ERR;
|
||||
if (anetListen(err,s,p->ai_addr,p->ai_addrlen,backlog,0) == ANET_ERR) s = ANET_ERR;
|
||||
goto end;
|
||||
}
|
||||
if (p == NULL) {
|
||||
@ -498,10 +501,8 @@ int anetUnixServer(char *err, char *path, mode_t perm, int backlog)
|
||||
memset(&sa,0,sizeof(sa));
|
||||
sa.sun_family = AF_LOCAL;
|
||||
strncpy(sa.sun_path,path,sizeof(sa.sun_path)-1);
|
||||
if (anetListen(err,s,(struct sockaddr*)&sa,sizeof(sa),backlog) == ANET_ERR)
|
||||
if (anetListen(err,s,(struct sockaddr*)&sa,sizeof(sa),backlog,perm) == ANET_ERR)
|
||||
return ANET_ERR;
|
||||
if (perm)
|
||||
chmod(sa.sun_path, perm);
|
||||
return s;
|
||||
}
|
||||
|
||||
|
51
src/aof.c
51
src/aof.c
@ -49,6 +49,7 @@ int aofFileExist(char *filename);
|
||||
int rewriteAppendOnlyFile(char *filename);
|
||||
aofManifest *aofLoadManifestFromFile(sds am_filepath);
|
||||
void aofManifestFreeAndUpdate(aofManifest *am);
|
||||
void aof_background_fsync_and_close(int fd);
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
* AOF Manifest file implementation.
|
||||
@ -757,6 +758,7 @@ void aofOpenIfNeededOnServerStart(void) {
|
||||
}
|
||||
|
||||
server.aof_last_incr_size = getAppendOnlyFileSize(aof_name, NULL);
|
||||
server.aof_last_incr_fsync_offset = server.aof_last_incr_size;
|
||||
|
||||
if (incr_aof_len) {
|
||||
serverLog(LL_NOTICE, "Opening AOF incr file %s on server start", aof_name);
|
||||
@ -826,12 +828,19 @@ int openNewIncrAofForAppend(void) {
|
||||
/* If reaches here, we can safely modify the `server.aof_manifest`
|
||||
* and `server.aof_fd`. */
|
||||
|
||||
/* Close old aof_fd if needed. */
|
||||
if (server.aof_fd != -1) bioCreateCloseJob(server.aof_fd);
|
||||
/* fsync and close old aof_fd if needed. In fsync everysec it's ok to delay
|
||||
* the fsync as long as we grantee it happens, and in fsync always the file
|
||||
* is already synced at this point so fsync doesn't matter. */
|
||||
if (server.aof_fd != -1) {
|
||||
aof_background_fsync_and_close(server.aof_fd);
|
||||
server.aof_last_fsync = server.unixtime;
|
||||
}
|
||||
server.aof_fd = newfd;
|
||||
|
||||
/* Reset the aof_last_incr_size. */
|
||||
server.aof_last_incr_size = 0;
|
||||
/* Reset the aof_last_incr_fsync_offset. */
|
||||
server.aof_last_incr_fsync_offset = 0;
|
||||
/* Update `server.aof_manifest`. */
|
||||
if (temp_am) aofManifestFreeAndUpdate(temp_am);
|
||||
return C_OK;
|
||||
@ -904,6 +913,9 @@ int aofRewriteLimited(void) {
|
||||
/* Return true if an AOf fsync is currently already in progress in a
|
||||
* BIO thread. */
|
||||
int aofFsyncInProgress(void) {
|
||||
/* Note that we don't care about aof_background_fsync_and_close because
|
||||
* server.aof_fd has been replaced by the new INCR AOF file fd,
|
||||
* see openNewIncrAofForAppend. */
|
||||
return bioPendingJobsOfType(BIO_AOF_FSYNC) != 0;
|
||||
}
|
||||
|
||||
@ -913,6 +925,11 @@ void aof_background_fsync(int fd) {
|
||||
bioCreateFsyncJob(fd);
|
||||
}
|
||||
|
||||
/* Close the fd on the basis of aof_background_fsync. */
|
||||
void aof_background_fsync_and_close(int fd) {
|
||||
bioCreateCloseJob(fd, 1);
|
||||
}
|
||||
|
||||
/* Kills an AOFRW child process if exists */
|
||||
void killAppendOnlyChild(void) {
|
||||
int statloc;
|
||||
@ -937,7 +954,6 @@ void stopAppendOnly(void) {
|
||||
if (redis_fsync(server.aof_fd) == -1) {
|
||||
serverLog(LL_WARNING,"Fail to fsync the AOF file: %s",strerror(errno));
|
||||
} else {
|
||||
server.aof_fsync_offset = server.aof_current_size;
|
||||
server.aof_last_fsync = server.unixtime;
|
||||
}
|
||||
close(server.aof_fd);
|
||||
@ -947,6 +963,7 @@ void stopAppendOnly(void) {
|
||||
server.aof_state = AOF_OFF;
|
||||
server.aof_rewrite_scheduled = 0;
|
||||
server.aof_last_incr_size = 0;
|
||||
server.aof_last_incr_fsync_offset = 0;
|
||||
killAppendOnlyChild();
|
||||
sdsfree(server.aof_buf);
|
||||
server.aof_buf = sdsempty();
|
||||
@ -1054,10 +1071,19 @@ void flushAppendOnlyFile(int force) {
|
||||
* stop write commands before fsync called in one second,
|
||||
* the data in page cache cannot be flushed in time. */
|
||||
if (server.aof_fsync == AOF_FSYNC_EVERYSEC &&
|
||||
server.aof_fsync_offset != server.aof_current_size &&
|
||||
server.aof_last_incr_fsync_offset != server.aof_last_incr_size &&
|
||||
server.unixtime > server.aof_last_fsync &&
|
||||
!(sync_in_progress = aofFsyncInProgress())) {
|
||||
goto try_fsync;
|
||||
|
||||
/* Check if we need to do fsync even the aof buffer is empty,
|
||||
* the reason is described in the previous AOF_FSYNC_EVERYSEC block,
|
||||
* and AOF_FSYNC_ALWAYS is also checked here to handle a case where
|
||||
* aof_fsync is changed from everysec to always. */
|
||||
} else if (server.aof_fsync == AOF_FSYNC_ALWAYS &&
|
||||
server.aof_last_incr_fsync_offset != server.aof_last_incr_size)
|
||||
{
|
||||
goto try_fsync;
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
@ -1224,13 +1250,13 @@ try_fsync:
|
||||
}
|
||||
latencyEndMonitor(latency);
|
||||
latencyAddSampleIfNeeded("aof-fsync-always",latency);
|
||||
server.aof_fsync_offset = server.aof_current_size;
|
||||
server.aof_last_incr_fsync_offset = server.aof_last_incr_size;
|
||||
server.aof_last_fsync = server.unixtime;
|
||||
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
|
||||
server.unixtime > server.aof_last_fsync)) {
|
||||
} else if (server.aof_fsync == AOF_FSYNC_EVERYSEC &&
|
||||
server.unixtime > server.aof_last_fsync) {
|
||||
if (!sync_in_progress) {
|
||||
aof_background_fsync(server.aof_fd);
|
||||
server.aof_fsync_offset = server.aof_current_size;
|
||||
server.aof_last_incr_fsync_offset = server.aof_last_incr_size;
|
||||
}
|
||||
server.aof_last_fsync = server.unixtime;
|
||||
}
|
||||
@ -1670,6 +1696,7 @@ int loadAppendOnlyFiles(aofManifest *am) {
|
||||
/* If the truncated file is not the last file, we consider this to be a fatal error. */
|
||||
if (ret == AOF_TRUNCATED && !last_file) {
|
||||
ret = AOF_FAILED;
|
||||
serverLog(LL_WARNING, "Fatal error: the truncated file is not the last file");
|
||||
}
|
||||
|
||||
if (ret == AOF_OPEN_ERR || ret == AOF_FAILED) {
|
||||
@ -1700,8 +1727,10 @@ int loadAppendOnlyFiles(aofManifest *am) {
|
||||
* so empty incr AOF file doesn't count as a AOF_EMPTY result */
|
||||
if (ret == AOF_EMPTY) ret = AOF_OK;
|
||||
|
||||
/* If the truncated file is not the last file, we consider this to be a fatal error. */
|
||||
if (ret == AOF_TRUNCATED && !last_file) {
|
||||
ret = AOF_FAILED;
|
||||
serverLog(LL_WARNING, "Fatal error: the truncated file is not the last file");
|
||||
}
|
||||
|
||||
if (ret == AOF_OPEN_ERR || ret == AOF_FAILED) {
|
||||
@ -1721,7 +1750,6 @@ int loadAppendOnlyFiles(aofManifest *am) {
|
||||
* executed early, but that shouldn't be a problem since everything will be
|
||||
* fine after the first AOFRW. */
|
||||
server.aof_rewrite_base_size = base_size;
|
||||
server.aof_fsync_offset = server.aof_current_size;
|
||||
|
||||
cleanup:
|
||||
stopLoading(ret == AOF_OK || ret == AOF_TRUNCATED);
|
||||
@ -2642,13 +2670,10 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
|
||||
/* We can safely let `server.aof_manifest` point to 'temp_am' and free the previous one. */
|
||||
aofManifestFreeAndUpdate(temp_am);
|
||||
|
||||
if (server.aof_fd != -1) {
|
||||
if (server.aof_state != AOF_OFF) {
|
||||
/* AOF enabled. */
|
||||
server.aof_selected_db = -1; /* Make sure SELECT is re-issued */
|
||||
server.aof_current_size = getAppendOnlyFileSize(new_base_filename, NULL) + server.aof_last_incr_size;
|
||||
server.aof_rewrite_base_size = server.aof_current_size;
|
||||
server.aof_fsync_offset = server.aof_current_size;
|
||||
server.aof_last_fsync = server.unixtime;
|
||||
}
|
||||
|
||||
/* We don't care about the return value of `aofDelHistoryFiles`, because the history
|
||||
|
47
src/bio.c
47
src/bio.c
@ -76,12 +76,19 @@ static unsigned long long bio_pending[BIO_NUM_OPS];
|
||||
|
||||
/* This structure represents a background Job. It is only used locally to this
|
||||
* file as the API does not expose the internals at all. */
|
||||
struct bio_job {
|
||||
typedef union bio_job {
|
||||
/* Job specific arguments.*/
|
||||
int fd; /* Fd for file based background jobs */
|
||||
lazy_free_fn *free_fn; /* Function that will free the provided arguments */
|
||||
void *free_args[]; /* List of arguments to be passed to the free function */
|
||||
};
|
||||
struct {
|
||||
int fd; /* Fd for file based background jobs */
|
||||
unsigned need_fsync:1; /* A flag to indicate that a fsync is required before
|
||||
* the file is closed. */
|
||||
} fd_args;
|
||||
|
||||
struct {
|
||||
lazy_free_fn *free_fn; /* Function that will free the provided arguments */
|
||||
void *free_args[]; /* List of arguments to be passed to the free function */
|
||||
} free_args;
|
||||
} bio_job;
|
||||
|
||||
void *bioProcessBackgroundJobs(void *arg);
|
||||
|
||||
@ -125,7 +132,7 @@ void bioInit(void) {
|
||||
}
|
||||
}
|
||||
|
||||
void bioSubmitJob(int type, struct bio_job *job) {
|
||||
void bioSubmitJob(int type, bio_job *job) {
|
||||
pthread_mutex_lock(&bio_mutex[type]);
|
||||
listAddNodeTail(bio_jobs[type],job);
|
||||
bio_pending[type]++;
|
||||
@ -137,33 +144,34 @@ void bioCreateLazyFreeJob(lazy_free_fn free_fn, int arg_count, ...) {
|
||||
va_list valist;
|
||||
/* Allocate memory for the job structure and all required
|
||||
* arguments */
|
||||
struct bio_job *job = zmalloc(sizeof(*job) + sizeof(void *) * (arg_count));
|
||||
job->free_fn = free_fn;
|
||||
bio_job *job = zmalloc(sizeof(*job) + sizeof(void *) * (arg_count));
|
||||
job->free_args.free_fn = free_fn;
|
||||
|
||||
va_start(valist, arg_count);
|
||||
for (int i = 0; i < arg_count; i++) {
|
||||
job->free_args[i] = va_arg(valist, void *);
|
||||
job->free_args.free_args[i] = va_arg(valist, void *);
|
||||
}
|
||||
va_end(valist);
|
||||
bioSubmitJob(BIO_LAZY_FREE, job);
|
||||
}
|
||||
|
||||
void bioCreateCloseJob(int fd) {
|
||||
struct bio_job *job = zmalloc(sizeof(*job));
|
||||
job->fd = fd;
|
||||
void bioCreateCloseJob(int fd, int need_fsync) {
|
||||
bio_job *job = zmalloc(sizeof(*job));
|
||||
job->fd_args.fd = fd;
|
||||
job->fd_args.need_fsync = need_fsync;
|
||||
|
||||
bioSubmitJob(BIO_CLOSE_FILE, job);
|
||||
}
|
||||
|
||||
void bioCreateFsyncJob(int fd) {
|
||||
struct bio_job *job = zmalloc(sizeof(*job));
|
||||
job->fd = fd;
|
||||
bio_job *job = zmalloc(sizeof(*job));
|
||||
job->fd_args.fd = fd;
|
||||
|
||||
bioSubmitJob(BIO_AOF_FSYNC, job);
|
||||
}
|
||||
|
||||
void *bioProcessBackgroundJobs(void *arg) {
|
||||
struct bio_job *job;
|
||||
bio_job *job;
|
||||
unsigned long type = (unsigned long) arg;
|
||||
sigset_t sigset;
|
||||
|
||||
@ -216,12 +224,15 @@ void *bioProcessBackgroundJobs(void *arg) {
|
||||
|
||||
/* Process the job accordingly to its type. */
|
||||
if (type == BIO_CLOSE_FILE) {
|
||||
close(job->fd);
|
||||
if (job->fd_args.need_fsync) {
|
||||
redis_fsync(job->fd_args.fd);
|
||||
}
|
||||
close(job->fd_args.fd);
|
||||
} else if (type == BIO_AOF_FSYNC) {
|
||||
/* The fd may be closed by main thread and reused for another
|
||||
* socket, pipe, or file. We just ignore these errno because
|
||||
* aof fsync did not really fail. */
|
||||
if (redis_fsync(job->fd) == -1 &&
|
||||
if (redis_fsync(job->fd_args.fd) == -1 &&
|
||||
errno != EBADF && errno != EINVAL)
|
||||
{
|
||||
int last_status;
|
||||
@ -236,7 +247,7 @@ void *bioProcessBackgroundJobs(void *arg) {
|
||||
atomicSet(server.aof_bio_fsync_status,C_OK);
|
||||
}
|
||||
} else if (type == BIO_LAZY_FREE) {
|
||||
job->free_fn(job->free_args);
|
||||
job->free_args.free_fn(job->free_args.free_args);
|
||||
} else {
|
||||
serverPanic("Wrong job type in bioProcessBackgroundJobs().");
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ void bioInit(void);
|
||||
unsigned long long bioPendingJobsOfType(int type);
|
||||
unsigned long long bioWaitStepOfType(int type);
|
||||
void bioKillThreads(void);
|
||||
void bioCreateCloseJob(int fd);
|
||||
void bioCreateCloseJob(int fd, int need_fsync);
|
||||
void bioCreateFsyncJob(int fd);
|
||||
void bioCreateLazyFreeJob(lazy_free_fn free_fn, int arg_count, ...);
|
||||
|
||||
|
@ -112,7 +112,9 @@ void sendChildInfoGeneric(childInfoType info_type, size_t keys, double progress,
|
||||
ssize_t wlen = sizeof(data);
|
||||
|
||||
if (write(server.child_info_pipe[1], &data, wlen) != wlen) {
|
||||
/* Nothing to do on error, this will be detected by the other side. */
|
||||
/* Failed writing to parent, it could have been killed, exit. */
|
||||
serverLog(LL_WARNING,"Child failed reporting info to parent, exiting. %s", strerror(errno));
|
||||
exitFromChild(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
164
src/cluster.c
164
src/cluster.c
@ -66,6 +66,8 @@ void clusterSetMaster(clusterNode *n);
|
||||
void clusterHandleSlaveFailover(void);
|
||||
void clusterHandleSlaveMigration(int max_slaves);
|
||||
int bitmapTestBit(unsigned char *bitmap, int pos);
|
||||
void bitmapSetBit(unsigned char *bitmap, int pos);
|
||||
void bitmapClearBit(unsigned char *bitmap, int pos);
|
||||
void clusterDoBeforeSleep(int flags);
|
||||
void clusterSendUpdate(clusterLink *link, clusterNode *node);
|
||||
void resetManualFailover(void);
|
||||
@ -91,6 +93,10 @@ unsigned int delKeysInSlot(unsigned int hashslot);
|
||||
#define dictEntryPrevInSlot(de) \
|
||||
(((clusterDictEntryMetadata *)dictMetadata(de))->prev)
|
||||
|
||||
#define isSlotUnclaimed(slot) \
|
||||
(server.cluster->slots[slot] == NULL || \
|
||||
bitmapTestBit(server.cluster->owner_not_claiming_slot, slot))
|
||||
|
||||
#define RCVBUF_INIT_LEN 1024
|
||||
#define RCVBUF_MAX_PREALLOC (1<<20) /* 1MB */
|
||||
|
||||
@ -556,6 +562,14 @@ void clusterUpdateMyselfFlags(void) {
|
||||
}
|
||||
|
||||
|
||||
/* We want to take myself->port/cport/pport in sync with the
|
||||
* cluster-announce-port/cluster-announce-bus-port/cluster-announce-tls-port option.
|
||||
* The option can be set at runtime via CONFIG SET. */
|
||||
void clusterUpdateMyselfAnnouncedPorts(void) {
|
||||
if (!myself) return;
|
||||
deriveAnnouncedPorts(&myself->port,&myself->pport,&myself->cport);
|
||||
}
|
||||
|
||||
/* We want to take myself->ip in sync with the cluster-announce-ip option.
|
||||
* The option can be set at runtime via CONFIG SET. */
|
||||
void clusterUpdateMyselfIp(void) {
|
||||
@ -813,10 +827,15 @@ void setClusterNodeToInboundClusterLink(clusterNode *node, clusterLink *link) {
|
||||
/* A peer may disconnect and then reconnect with us, and it's not guaranteed that
|
||||
* we would always process the disconnection of the existing inbound link before
|
||||
* accepting a new existing inbound link. Therefore, it's possible to have more than
|
||||
* one inbound link from the same node at the same time. */
|
||||
* one inbound link from the same node at the same time. Our cleanup logic assumes
|
||||
* a one to one relationship between nodes and inbound links, so we need to kill
|
||||
* one of the links. The existing link is more likely the outdated one, but it's
|
||||
* possible the the other node may need to open another link. */
|
||||
serverLog(LL_DEBUG, "Replacing inbound link fd %d from node %.40s with fd %d",
|
||||
node->inbound_link->conn->fd, node->name, link->conn->fd);
|
||||
freeClusterLink(node->inbound_link);
|
||||
}
|
||||
serverAssert(!node->inbound_link);
|
||||
node->inbound_link = link;
|
||||
link->node = node;
|
||||
}
|
||||
@ -878,7 +897,7 @@ void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) {
|
||||
return;
|
||||
}
|
||||
connEnableTcpNoDelay(conn);
|
||||
connKeepAlive(conn,server.cluster_node_timeout * 2);
|
||||
connKeepAlive(conn,server.cluster_node_timeout / 1000 * 2);
|
||||
|
||||
/* Use non-blocking I/O for cluster messages. */
|
||||
serverLog(LL_VERBOSE,"Accepting cluster node connection from %s:%d", cip, cport);
|
||||
@ -1747,12 +1766,18 @@ void clusterProcessGossipSection(clusterMsg *hdr, clusterLink *link) {
|
||||
/* IP -> string conversion. 'buf' is supposed to at least be 46 bytes.
|
||||
* If 'announced_ip' length is non-zero, it is used instead of extracting
|
||||
* the IP from the socket peer address. */
|
||||
void nodeIp2String(char *buf, clusterLink *link, char *announced_ip) {
|
||||
int nodeIp2String(char *buf, clusterLink *link, char *announced_ip) {
|
||||
if (announced_ip[0] != '\0') {
|
||||
memcpy(buf,announced_ip,NET_IP_STR_LEN);
|
||||
buf[NET_IP_STR_LEN-1] = '\0'; /* We are not sure the input is sane. */
|
||||
return C_OK;
|
||||
} else {
|
||||
connPeerToString(link->conn, buf, NET_IP_STR_LEN, NULL);
|
||||
if (connPeerToString(link->conn, buf, NET_IP_STR_LEN, NULL) == C_ERR) {
|
||||
serverLog(LL_NOTICE, "Error converting peer IP to string: %s",
|
||||
link->conn ? connGetLastError(link->conn) : "no link");
|
||||
return C_ERR;
|
||||
}
|
||||
return C_OK;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1784,7 +1809,11 @@ int nodeUpdateAddressIfNeeded(clusterNode *node, clusterLink *link,
|
||||
* it is safe to call during packet processing. */
|
||||
if (link == node->link) return 0;
|
||||
|
||||
nodeIp2String(ip,link,hdr->myip);
|
||||
/* If the peer IP is unavailable for some reasons like invalid fd or closed
|
||||
* link, just give up the update this time, and the update will be retried
|
||||
* in the next round of PINGs */
|
||||
if (nodeIp2String(ip,link,hdr->myip) == C_ERR) return 0;
|
||||
|
||||
if (node->port == port && node->cport == cport && node->pport == pport &&
|
||||
strcmp(ip,node->ip) == 0) return 0;
|
||||
|
||||
@ -1869,7 +1898,10 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc
|
||||
sender_slots++;
|
||||
|
||||
/* The slot is already bound to the sender of this message. */
|
||||
if (server.cluster->slots[j] == sender) continue;
|
||||
if (server.cluster->slots[j] == sender) {
|
||||
bitmapClearBit(server.cluster->owner_not_claiming_slot, j);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* The slot is in importing state, it should be modified only
|
||||
* manually via redis-cli (example: a resharding is in progress
|
||||
@ -1878,10 +1910,10 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc
|
||||
if (server.cluster->importing_slots_from[j]) continue;
|
||||
|
||||
/* We rebind the slot to the new node claiming it if:
|
||||
* 1) The slot was unassigned or the new node claims it with a
|
||||
* greater configEpoch.
|
||||
* 1) The slot was unassigned or the previous owner no longer owns the slot or
|
||||
* the new node claims it with a greater configEpoch.
|
||||
* 2) We are not currently importing the slot. */
|
||||
if (server.cluster->slots[j] == NULL ||
|
||||
if (isSlotUnclaimed(j) ||
|
||||
server.cluster->slots[j]->configEpoch < senderConfigEpoch)
|
||||
{
|
||||
/* Was this slot mine, and still contains keys? Mark it as
|
||||
@ -1900,10 +1932,20 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc
|
||||
}
|
||||
clusterDelSlot(j);
|
||||
clusterAddSlot(sender,j);
|
||||
bitmapClearBit(server.cluster->owner_not_claiming_slot, j);
|
||||
clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG|
|
||||
CLUSTER_TODO_UPDATE_STATE|
|
||||
CLUSTER_TODO_FSYNC_CONFIG);
|
||||
}
|
||||
} else if (server.cluster->slots[j] == sender) {
|
||||
/* The slot is currently bound to the sender but the sender is no longer
|
||||
* claiming it. We don't want to unbind the slot yet as it can cause the cluster
|
||||
* to move to FAIL state and also throw client error. Keeping the slot bound to
|
||||
* the previous owner will cause a few client side redirects, but won't throw
|
||||
* any errors. We will keep track of the uncertainty in ownership to avoid
|
||||
* propagating misinformation about this slot's ownership using UPDATE
|
||||
* messages. */
|
||||
bitmapSetBit(server.cluster->owner_not_claiming_slot, j);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1930,7 +1972,13 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc
|
||||
clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG|
|
||||
CLUSTER_TODO_UPDATE_STATE|
|
||||
CLUSTER_TODO_FSYNC_CONFIG);
|
||||
} else if (myself->slaveof && myself->slaveof->slaveof) {
|
||||
} else if (myself->slaveof && myself->slaveof->slaveof &&
|
||||
/* In some rare case when CLUSTER FAILOVER TAKEOVER is used, it
|
||||
* can happen that myself is a replica of a replica of myself. If
|
||||
* this happens, we do nothing to avoid a crash and wait for the
|
||||
* admin to repair the cluster. */
|
||||
myself->slaveof->slaveof != myself)
|
||||
{
|
||||
/* Safeguard against sub-replicas. A replica's master can turn itself
|
||||
* into a replica if its last slot is removed. If no other node takes
|
||||
* over the slot, there is nothing else to trigger replica migration. */
|
||||
@ -2237,7 +2285,7 @@ int clusterProcessPacket(clusterLink *link) {
|
||||
clusterNode *node;
|
||||
|
||||
node = createClusterNode(NULL,CLUSTER_NODE_HANDSHAKE);
|
||||
nodeIp2String(node->ip,link,hdr->myip);
|
||||
serverAssert(nodeIp2String(node->ip,link,hdr->myip) == C_OK);
|
||||
node->port = ntohs(hdr->port);
|
||||
node->pport = ntohs(hdr->pport);
|
||||
node->cport = ntohs(hdr->cport);
|
||||
@ -2437,7 +2485,7 @@ int clusterProcessPacket(clusterLink *link) {
|
||||
for (j = 0; j < CLUSTER_SLOTS; j++) {
|
||||
if (bitmapTestBit(hdr->myslots,j)) {
|
||||
if (server.cluster->slots[j] == sender ||
|
||||
server.cluster->slots[j] == NULL) continue;
|
||||
isSlotUnclaimed(j)) continue;
|
||||
if (server.cluster->slots[j]->configEpoch >
|
||||
senderConfigEpoch)
|
||||
{
|
||||
@ -2745,6 +2793,9 @@ void clusterReadHandler(connection *conn) {
|
||||
* the link to be invalidated, so it is safe to call this function
|
||||
* from event handlers that will do stuff with the same link later. */
|
||||
void clusterSendMessage(clusterLink *link, unsigned char *msg, size_t msglen) {
|
||||
if (!link) {
|
||||
return;
|
||||
}
|
||||
if (sdslen(link->sndbuf) == 0 && msglen != 0)
|
||||
connSetWriteHandlerWithBarrier(link->conn, clusterWriteHandler, 1);
|
||||
|
||||
@ -2771,7 +2822,6 @@ void clusterBroadcastMessage(void *buf, size_t len) {
|
||||
while((de = dictNext(di)) != NULL) {
|
||||
clusterNode *node = dictGetVal(de);
|
||||
|
||||
if (!node->link) continue;
|
||||
if (node->flags & (CLUSTER_NODE_MYSELF|CLUSTER_NODE_HANDSHAKE))
|
||||
continue;
|
||||
clusterSendMessage(node->link,buf,len);
|
||||
@ -3062,13 +3112,18 @@ void clusterBroadcastPong(int target) {
|
||||
* the 'bulk_data', sanitizer generates an out-of-bounds error which is a false
|
||||
* positive in this context. */
|
||||
REDIS_NO_SANITIZE("bounds")
|
||||
void clusterSendPublish(clusterLink *link, robj *channel, robj *message, uint16_t type) {
|
||||
void clusterSendPublish(clusterLink *link, robj *channel, robj *message, uint16_t type, int bcast) {
|
||||
unsigned char *payload;
|
||||
clusterMsg buf[1];
|
||||
clusterMsg *hdr = (clusterMsg*) buf;
|
||||
uint32_t totlen;
|
||||
uint32_t channel_len, message_len;
|
||||
|
||||
/* In case we are not going to broadcast we have no point trying to publish on a missing
|
||||
* clusterbus link. */
|
||||
if (!bcast && !link)
|
||||
return;
|
||||
|
||||
channel = getDecodedObject(channel);
|
||||
message = getDecodedObject(message);
|
||||
channel_len = sdslen(channel->ptr);
|
||||
@ -3094,7 +3149,7 @@ void clusterSendPublish(clusterLink *link, robj *channel, robj *message, uint16_
|
||||
memcpy(hdr->data.publish.msg.bulk_data+sdslen(channel->ptr),
|
||||
message->ptr,sdslen(message->ptr));
|
||||
|
||||
if (link)
|
||||
if (!bcast)
|
||||
clusterSendMessage(link,payload,totlen);
|
||||
else
|
||||
clusterBroadcastMessage(payload,totlen);
|
||||
@ -3130,6 +3185,11 @@ void clusterSendUpdate(clusterLink *link, clusterNode *node) {
|
||||
memcpy(hdr->data.update.nodecfg.nodename,node->name,CLUSTER_NAMELEN);
|
||||
hdr->data.update.nodecfg.configEpoch = htonu64(node->configEpoch);
|
||||
memcpy(hdr->data.update.nodecfg.slots,node->slots,sizeof(node->slots));
|
||||
for (unsigned int i = 0; i < sizeof(node->slots); i++) {
|
||||
/* Don't advertise slots that the node stopped claiming */
|
||||
hdr->data.update.nodecfg.slots[i] = hdr->data.update.nodecfg.slots[i] & (~server.cluster->owner_not_claiming_slot[i]);
|
||||
}
|
||||
|
||||
clusterSendMessage(link,(unsigned char*)buf,ntohl(hdr->totlen));
|
||||
}
|
||||
|
||||
@ -3201,7 +3261,7 @@ int clusterSendModuleMessageToTarget(const char *target, uint64_t module_id, uin
|
||||
* -------------------------------------------------------------------------- */
|
||||
void clusterPropagatePublish(robj *channel, robj *message, int sharded) {
|
||||
if (!sharded) {
|
||||
clusterSendPublish(NULL, channel, message, CLUSTERMSG_TYPE_PUBLISH);
|
||||
clusterSendPublish(NULL, channel, message, CLUSTERMSG_TYPE_PUBLISH, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -3212,9 +3272,9 @@ void clusterPropagatePublish(robj *channel, robj *message, int sharded) {
|
||||
listRewind(nodes_for_slot, &li);
|
||||
while((ln = listNext(&li))) {
|
||||
clusterNode *node = listNodeValue(ln);
|
||||
if (node != myself) {
|
||||
clusterSendPublish(node->link, channel, message, CLUSTERMSG_TYPE_PUBLISHSHARD);
|
||||
}
|
||||
if (node->flags & (CLUSTER_NODE_MYSELF|CLUSTER_NODE_HANDSHAKE))
|
||||
continue;
|
||||
clusterSendPublish(node->link, channel, message, CLUSTERMSG_TYPE_PUBLISHSHARD, 0);
|
||||
}
|
||||
}
|
||||
listRelease(nodes_for_slot);
|
||||
@ -3349,7 +3409,7 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) {
|
||||
* slots in the current configuration. */
|
||||
for (j = 0; j < CLUSTER_SLOTS; j++) {
|
||||
if (bitmapTestBit(claimed_slots, j) == 0) continue;
|
||||
if (server.cluster->slots[j] == NULL ||
|
||||
if (isSlotUnclaimed(j) ||
|
||||
server.cluster->slots[j]->configEpoch <= requestConfigEpoch)
|
||||
{
|
||||
continue;
|
||||
@ -3923,7 +3983,7 @@ static void resizeClusterLinkBuffer(clusterLink *link) {
|
||||
/* If unused space is a lot bigger than the used portion of the buffer then free up unused space.
|
||||
* We use a factor of 4 because of the greediness of sdsMakeRoomFor (used by sdscatlen). */
|
||||
if (link != NULL && sdsavail(link->sndbuf) / 4 > sdslen(link->sndbuf)) {
|
||||
link->sndbuf = sdsRemoveFreeSpace(link->sndbuf);
|
||||
link->sndbuf = sdsRemoveFreeSpace(link->sndbuf, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4942,13 +5002,17 @@ void addNodeToNodeReply(client *c, clusterNode *node) {
|
||||
if (server.cluster_preferred_endpoint_type == CLUSTER_ENDPOINT_TYPE_IP) {
|
||||
addReplyBulkCString(c, node->ip);
|
||||
} else if (server.cluster_preferred_endpoint_type == CLUSTER_ENDPOINT_TYPE_HOSTNAME) {
|
||||
addReplyBulkCString(c, sdslen(node->hostname) != 0 ? node->hostname : "?");
|
||||
if (sdslen(node->hostname) != 0) {
|
||||
addReplyBulkCBuffer(c, node->hostname, sdslen(node->hostname));
|
||||
} else {
|
||||
addReplyBulkCString(c, "?");
|
||||
}
|
||||
} else if (server.cluster_preferred_endpoint_type == CLUSTER_ENDPOINT_TYPE_UNKNOWN_ENDPOINT) {
|
||||
addReplyNull(c);
|
||||
} else {
|
||||
serverPanic("Unrecognized preferred endpoint type");
|
||||
}
|
||||
|
||||
|
||||
/* Report non-TLS ports to non-TLS client in TLS cluster if available. */
|
||||
int use_pport = (server.tls_cluster &&
|
||||
c->conn && connGetType(c->conn) != CONN_TYPE_TLS);
|
||||
@ -4956,40 +5020,55 @@ void addNodeToNodeReply(client *c, clusterNode *node) {
|
||||
addReplyBulkCBuffer(c, node->name, CLUSTER_NAMELEN);
|
||||
|
||||
/* Add the additional endpoint information, this is all the known networking information
|
||||
* that is not the preferred endpoint. */
|
||||
void *deflen = addReplyDeferredLen(c);
|
||||
* that is not the preferred endpoint. Note the logic is evaluated twice so we can
|
||||
* correctly report the number of additional network arguments without using a deferred
|
||||
* map, an assertion is made at the end to check we set the right length. */
|
||||
int length = 0;
|
||||
if (server.cluster_preferred_endpoint_type != CLUSTER_ENDPOINT_TYPE_IP) {
|
||||
addReplyBulkCString(c, "ip");
|
||||
addReplyBulkCString(c, node->ip);
|
||||
length++;
|
||||
}
|
||||
if (server.cluster_preferred_endpoint_type != CLUSTER_ENDPOINT_TYPE_HOSTNAME
|
||||
&& sdslen(node->hostname) != 0)
|
||||
{
|
||||
addReplyBulkCString(c, "hostname");
|
||||
addReplyBulkCString(c, node->hostname);
|
||||
length++;
|
||||
}
|
||||
setDeferredMapLen(c, deflen, length);
|
||||
addReplyMapLen(c, length);
|
||||
|
||||
if (server.cluster_preferred_endpoint_type != CLUSTER_ENDPOINT_TYPE_IP) {
|
||||
addReplyBulkCString(c, "ip");
|
||||
addReplyBulkCString(c, node->ip);
|
||||
length--;
|
||||
}
|
||||
if (server.cluster_preferred_endpoint_type != CLUSTER_ENDPOINT_TYPE_HOSTNAME
|
||||
&& sdslen(node->hostname) != 0)
|
||||
{
|
||||
addReplyBulkCString(c, "hostname");
|
||||
addReplyBulkCBuffer(c, node->hostname, sdslen(node->hostname));
|
||||
length--;
|
||||
}
|
||||
serverAssert(length == 0);
|
||||
}
|
||||
|
||||
void addNodeReplyForClusterSlot(client *c, clusterNode *node, int start_slot, int end_slot) {
|
||||
int i, nested_elements = 3; /* slots (2) + master addr (1) */
|
||||
void *nested_replylen = addReplyDeferredLen(c);
|
||||
for (i = 0; i < node->numslaves; i++) {
|
||||
if (!isReplicaAvailable(node->slaves[i])) continue;
|
||||
nested_elements++;
|
||||
}
|
||||
addReplyArrayLen(c, nested_elements);
|
||||
addReplyLongLong(c, start_slot);
|
||||
addReplyLongLong(c, end_slot);
|
||||
addNodeToNodeReply(c, node);
|
||||
|
||||
|
||||
/* Remaining nodes in reply are replicas for slot range */
|
||||
for (i = 0; i < node->numslaves; i++) {
|
||||
/* This loop is copy/pasted from clusterGenNodeDescription()
|
||||
* with modifications for per-slot node aggregation. */
|
||||
if (!isReplicaAvailable(node->slaves[i])) continue;
|
||||
addNodeToNodeReply(c, node->slaves[i]);
|
||||
nested_elements++;
|
||||
nested_elements--;
|
||||
}
|
||||
setDeferredArrayLen(c, nested_replylen, nested_elements);
|
||||
serverAssert(nested_elements == 3); /* Original 3 elements */
|
||||
}
|
||||
|
||||
/* Add detailed information of a node to the output buffer of the given client. */
|
||||
@ -5024,9 +5103,9 @@ void addNodeDetailsToShardReply(client *c, clusterNode *node) {
|
||||
addReplyBulkCString(c, getPreferredEndpoint(node));
|
||||
reply_count++;
|
||||
|
||||
if (node->hostname) {
|
||||
if (sdslen(node->hostname) != 0) {
|
||||
addReplyBulkCString(c, "hostname");
|
||||
addReplyBulkCString(c, node->hostname);
|
||||
addReplyBulkCBuffer(c, node->hostname, sdslen(node->hostname));
|
||||
reply_count++;
|
||||
}
|
||||
|
||||
@ -6692,7 +6771,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in
|
||||
* slot migration, the channel will be served from the source
|
||||
* node until the migration completes with CLUSTER SETSLOT <slot>
|
||||
* NODE <node-id>. */
|
||||
int flags = LOOKUP_NOTOUCH | LOOKUP_NOSTATS | LOOKUP_NONOTIFY;
|
||||
int flags = LOOKUP_NOTOUCH | LOOKUP_NOSTATS | LOOKUP_NONOTIFY | LOOKUP_NOEXPIRE;
|
||||
if ((migrating_slot || importing_slot) && !is_pubsubshard)
|
||||
{
|
||||
if (lookupKeyReadWithFlags(&server.db[0], thiskey, flags) == NULL) missing_keys++;
|
||||
@ -6706,6 +6785,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in
|
||||
* without redirections or errors in all the cases. */
|
||||
if (n == NULL) return myself;
|
||||
|
||||
uint64_t cmd_flags = getCommandFlags(c);
|
||||
/* Cluster is globally down but we got keys? We only serve the request
|
||||
* if it is a read command and when allow_reads_when_down is enabled. */
|
||||
if (server.cluster->state != CLUSTER_OK) {
|
||||
@ -6719,7 +6799,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in
|
||||
* cluster is down. */
|
||||
if (error_code) *error_code = CLUSTER_REDIR_DOWN_STATE;
|
||||
return NULL;
|
||||
} else if (cmd->flags & CMD_WRITE) {
|
||||
} else if (cmd_flags & CMD_WRITE) {
|
||||
/* The cluster is configured to allow read only commands */
|
||||
if (error_code) *error_code = CLUSTER_REDIR_DOWN_RO_STATE;
|
||||
return NULL;
|
||||
@ -6757,7 +6837,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in
|
||||
* involves multiple keys and we don't have them all, the only option is
|
||||
* to send a TRYAGAIN error. */
|
||||
if (importing_slot &&
|
||||
(c->flags & CLIENT_ASKING || cmd->flags & CMD_ASKING))
|
||||
(c->flags & CLIENT_ASKING || cmd_flags & CMD_ASKING))
|
||||
{
|
||||
if (multiple_keys && missing_keys) {
|
||||
if (error_code) *error_code = CLUSTER_REDIR_UNSTABLE;
|
||||
@ -6770,7 +6850,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in
|
||||
/* Handle the read-only client case reading from a slave: if this
|
||||
* node is a slave and the request is about a hash slot our master
|
||||
* is serving, we can reply without redirection. */
|
||||
int is_write_command = (c->cmd->flags & CMD_WRITE) ||
|
||||
int is_write_command = (cmd_flags & CMD_WRITE) ||
|
||||
(c->cmd->proc == execCommand && (c->mstate.cmd_flags & CMD_WRITE));
|
||||
if (((c->flags & CLIENT_READONLY) || is_pubsubshard) &&
|
||||
!is_write_command &&
|
||||
@ -6854,8 +6934,8 @@ int clusterRedirectBlockedClientIfNeeded(client *c) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* If the client is blocked on module, but ont on a specific key,
|
||||
* don't unblock it (except for the CLSUTER_FAIL case above). */
|
||||
/* If the client is blocked on module, but not on a specific key,
|
||||
* don't unblock it (except for the CLUSTER_FAIL case above). */
|
||||
if (c->btype == BLOCKED_MODULE && !moduleClientIsBlockedOnKeys(c))
|
||||
return 0;
|
||||
|
||||
|
@ -206,6 +206,13 @@ typedef struct clusterState {
|
||||
long long stats_pfail_nodes; /* Number of nodes in PFAIL status,
|
||||
excluding nodes without address. */
|
||||
unsigned long long stat_cluster_links_buffer_limit_exceeded; /* Total number of cluster links freed due to exceeding buffer limit */
|
||||
|
||||
/* Bit map for slots that are no longer claimed by the owner in cluster PING
|
||||
* messages. During slot migration, the owner will stop claiming the slot after
|
||||
* the ownership transfer. Set the bit corresponding to the slot when a node
|
||||
* stops claiming the slot. This prevents spreading incorrect information (that
|
||||
* source still owns the slot) using UPDATE messages. */
|
||||
unsigned char owner_not_claiming_slot[CLUSTER_SLOTS / 8];
|
||||
} clusterState;
|
||||
|
||||
/* Redis cluster messages header */
|
||||
@ -398,5 +405,7 @@ void clusterUpdateMyselfIp(void);
|
||||
void slotToChannelAdd(sds channel);
|
||||
void slotToChannelDel(sds channel);
|
||||
void clusterUpdateMyselfHostname(void);
|
||||
void clusterUpdateMyselfAnnouncedPorts(void);
|
||||
void freeClusterLink(clusterLink *link);
|
||||
|
||||
#endif /* __CLUSTER_H */
|
||||
|
206
src/commands.c
206
src/commands.c
@ -102,7 +102,7 @@ struct redisCommandArg BITFIELD_operation_Subargs[] = {
|
||||
/* BITFIELD argument table */
|
||||
struct redisCommandArg BITFIELD_Args[] = {
|
||||
{"key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_NONE},
|
||||
{"operation",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE,.subargs=BITFIELD_operation_Subargs},
|
||||
{"operation",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE,.subargs=BITFIELD_operation_Subargs},
|
||||
{0}
|
||||
};
|
||||
|
||||
@ -124,7 +124,7 @@ struct redisCommandArg BITFIELD_RO_encoding_offset_Subargs[] = {
|
||||
/* BITFIELD_RO argument table */
|
||||
struct redisCommandArg BITFIELD_RO_Args[] = {
|
||||
{"key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_NONE},
|
||||
{"encoding_offset",ARG_TYPE_BLOCK,-1,"GET",NULL,NULL,CMD_ARG_MULTIPLE|CMD_ARG_MULTIPLE_TOKEN,.subargs=BITFIELD_RO_encoding_offset_Subargs},
|
||||
{"encoding_offset",ARG_TYPE_BLOCK,-1,"GET",NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE|CMD_ARG_MULTIPLE_TOKEN,.subargs=BITFIELD_RO_encoding_offset_Subargs},
|
||||
{0}
|
||||
};
|
||||
|
||||
@ -229,10 +229,7 @@ struct redisCommandArg SETBIT_Args[] = {
|
||||
#define CLUSTER_ADDSLOTS_History NULL
|
||||
|
||||
/* CLUSTER ADDSLOTS tips */
|
||||
const char *CLUSTER_ADDSLOTS_tips[] = {
|
||||
"nondeterministic_output",
|
||||
NULL
|
||||
};
|
||||
#define CLUSTER_ADDSLOTS_tips NULL
|
||||
|
||||
/* CLUSTER ADDSLOTS argument table */
|
||||
struct redisCommandArg CLUSTER_ADDSLOTS_Args[] = {
|
||||
@ -246,10 +243,7 @@ struct redisCommandArg CLUSTER_ADDSLOTS_Args[] = {
|
||||
#define CLUSTER_ADDSLOTSRANGE_History NULL
|
||||
|
||||
/* CLUSTER ADDSLOTSRANGE tips */
|
||||
const char *CLUSTER_ADDSLOTSRANGE_tips[] = {
|
||||
"nondeterministic_output",
|
||||
NULL
|
||||
};
|
||||
#define CLUSTER_ADDSLOTSRANGE_tips NULL
|
||||
|
||||
/* CLUSTER ADDSLOTSRANGE start_slot_end_slot argument table */
|
||||
struct redisCommandArg CLUSTER_ADDSLOTSRANGE_start_slot_end_slot_Subargs[] = {
|
||||
@ -298,10 +292,7 @@ struct redisCommandArg CLUSTER_COUNT_FAILURE_REPORTS_Args[] = {
|
||||
#define CLUSTER_COUNTKEYSINSLOT_History NULL
|
||||
|
||||
/* CLUSTER COUNTKEYSINSLOT tips */
|
||||
const char *CLUSTER_COUNTKEYSINSLOT_tips[] = {
|
||||
"nondeterministic_output",
|
||||
NULL
|
||||
};
|
||||
#define CLUSTER_COUNTKEYSINSLOT_tips NULL
|
||||
|
||||
/* CLUSTER COUNTKEYSINSLOT argument table */
|
||||
struct redisCommandArg CLUSTER_COUNTKEYSINSLOT_Args[] = {
|
||||
@ -315,10 +306,7 @@ struct redisCommandArg CLUSTER_COUNTKEYSINSLOT_Args[] = {
|
||||
#define CLUSTER_DELSLOTS_History NULL
|
||||
|
||||
/* CLUSTER DELSLOTS tips */
|
||||
const char *CLUSTER_DELSLOTS_tips[] = {
|
||||
"nondeterministic_output",
|
||||
NULL
|
||||
};
|
||||
#define CLUSTER_DELSLOTS_tips NULL
|
||||
|
||||
/* CLUSTER DELSLOTS argument table */
|
||||
struct redisCommandArg CLUSTER_DELSLOTS_Args[] = {
|
||||
@ -332,10 +320,7 @@ struct redisCommandArg CLUSTER_DELSLOTS_Args[] = {
|
||||
#define CLUSTER_DELSLOTSRANGE_History NULL
|
||||
|
||||
/* CLUSTER DELSLOTSRANGE tips */
|
||||
const char *CLUSTER_DELSLOTSRANGE_tips[] = {
|
||||
"nondeterministic_output",
|
||||
NULL
|
||||
};
|
||||
#define CLUSTER_DELSLOTSRANGE_tips NULL
|
||||
|
||||
/* CLUSTER DELSLOTSRANGE start_slot_end_slot argument table */
|
||||
struct redisCommandArg CLUSTER_DELSLOTSRANGE_start_slot_end_slot_Subargs[] = {
|
||||
@ -356,10 +341,7 @@ struct redisCommandArg CLUSTER_DELSLOTSRANGE_Args[] = {
|
||||
#define CLUSTER_FAILOVER_History NULL
|
||||
|
||||
/* CLUSTER FAILOVER tips */
|
||||
const char *CLUSTER_FAILOVER_tips[] = {
|
||||
"nondeterministic_output",
|
||||
NULL
|
||||
};
|
||||
#define CLUSTER_FAILOVER_tips NULL
|
||||
|
||||
/* CLUSTER FAILOVER options argument table */
|
||||
struct redisCommandArg CLUSTER_FAILOVER_options_Subargs[] = {
|
||||
@ -380,10 +362,7 @@ struct redisCommandArg CLUSTER_FAILOVER_Args[] = {
|
||||
#define CLUSTER_FLUSHSLOTS_History NULL
|
||||
|
||||
/* CLUSTER FLUSHSLOTS tips */
|
||||
const char *CLUSTER_FLUSHSLOTS_tips[] = {
|
||||
"nondeterministic_output",
|
||||
NULL
|
||||
};
|
||||
#define CLUSTER_FLUSHSLOTS_tips NULL
|
||||
|
||||
/********** CLUSTER FORGET ********************/
|
||||
|
||||
@ -391,10 +370,7 @@ NULL
|
||||
#define CLUSTER_FORGET_History NULL
|
||||
|
||||
/* CLUSTER FORGET tips */
|
||||
const char *CLUSTER_FORGET_tips[] = {
|
||||
"nondeterministic_output",
|
||||
NULL
|
||||
};
|
||||
#define CLUSTER_FORGET_tips NULL
|
||||
|
||||
/* CLUSTER FORGET argument table */
|
||||
struct redisCommandArg CLUSTER_FORGET_Args[] = {
|
||||
@ -445,10 +421,7 @@ NULL
|
||||
#define CLUSTER_KEYSLOT_History NULL
|
||||
|
||||
/* CLUSTER KEYSLOT tips */
|
||||
const char *CLUSTER_KEYSLOT_tips[] = {
|
||||
"nondeterministic_output",
|
||||
NULL
|
||||
};
|
||||
#define CLUSTER_KEYSLOT_tips NULL
|
||||
|
||||
/* CLUSTER KEYSLOT argument table */
|
||||
struct redisCommandArg CLUSTER_KEYSLOT_Args[] = {
|
||||
@ -476,10 +449,7 @@ commandHistory CLUSTER_MEET_History[] = {
|
||||
};
|
||||
|
||||
/* CLUSTER MEET tips */
|
||||
const char *CLUSTER_MEET_tips[] = {
|
||||
"nondeterministic_output",
|
||||
NULL
|
||||
};
|
||||
#define CLUSTER_MEET_tips NULL
|
||||
|
||||
/* CLUSTER MEET argument table */
|
||||
struct redisCommandArg CLUSTER_MEET_Args[] = {
|
||||
@ -495,10 +465,7 @@ struct redisCommandArg CLUSTER_MEET_Args[] = {
|
||||
#define CLUSTER_MYID_History NULL
|
||||
|
||||
/* CLUSTER MYID tips */
|
||||
const char *CLUSTER_MYID_tips[] = {
|
||||
"nondeterministic_output",
|
||||
NULL
|
||||
};
|
||||
#define CLUSTER_MYID_tips NULL
|
||||
|
||||
/********** CLUSTER NODES ********************/
|
||||
|
||||
@ -534,10 +501,7 @@ struct redisCommandArg CLUSTER_REPLICAS_Args[] = {
|
||||
#define CLUSTER_REPLICATE_History NULL
|
||||
|
||||
/* CLUSTER REPLICATE tips */
|
||||
const char *CLUSTER_REPLICATE_tips[] = {
|
||||
"nondeterministic_output",
|
||||
NULL
|
||||
};
|
||||
#define CLUSTER_REPLICATE_tips NULL
|
||||
|
||||
/* CLUSTER REPLICATE argument table */
|
||||
struct redisCommandArg CLUSTER_REPLICATE_Args[] = {
|
||||
@ -551,10 +515,7 @@ struct redisCommandArg CLUSTER_REPLICATE_Args[] = {
|
||||
#define CLUSTER_RESET_History NULL
|
||||
|
||||
/* CLUSTER RESET tips */
|
||||
const char *CLUSTER_RESET_tips[] = {
|
||||
"nondeterministic_output",
|
||||
NULL
|
||||
};
|
||||
#define CLUSTER_RESET_tips NULL
|
||||
|
||||
/* CLUSTER RESET hard_soft argument table */
|
||||
struct redisCommandArg CLUSTER_RESET_hard_soft_Subargs[] = {
|
||||
@ -575,10 +536,7 @@ struct redisCommandArg CLUSTER_RESET_Args[] = {
|
||||
#define CLUSTER_SAVECONFIG_History NULL
|
||||
|
||||
/* CLUSTER SAVECONFIG tips */
|
||||
const char *CLUSTER_SAVECONFIG_tips[] = {
|
||||
"nondeterministic_output",
|
||||
NULL
|
||||
};
|
||||
#define CLUSTER_SAVECONFIG_tips NULL
|
||||
|
||||
/********** CLUSTER SET_CONFIG_EPOCH ********************/
|
||||
|
||||
@ -586,10 +544,7 @@ NULL
|
||||
#define CLUSTER_SET_CONFIG_EPOCH_History NULL
|
||||
|
||||
/* CLUSTER SET_CONFIG_EPOCH tips */
|
||||
const char *CLUSTER_SET_CONFIG_EPOCH_tips[] = {
|
||||
"nondeterministic_output",
|
||||
NULL
|
||||
};
|
||||
#define CLUSTER_SET_CONFIG_EPOCH_tips NULL
|
||||
|
||||
/* CLUSTER SET_CONFIG_EPOCH argument table */
|
||||
struct redisCommandArg CLUSTER_SET_CONFIG_EPOCH_Args[] = {
|
||||
@ -603,10 +558,7 @@ struct redisCommandArg CLUSTER_SET_CONFIG_EPOCH_Args[] = {
|
||||
#define CLUSTER_SETSLOT_History NULL
|
||||
|
||||
/* CLUSTER SETSLOT tips */
|
||||
const char *CLUSTER_SETSLOT_tips[] = {
|
||||
"nondeterministic_output",
|
||||
NULL
|
||||
};
|
||||
#define CLUSTER_SETSLOT_tips NULL
|
||||
|
||||
/* CLUSTER SETSLOT subcommand argument table */
|
||||
struct redisCommandArg CLUSTER_SETSLOT_subcommand_Subargs[] = {
|
||||
@ -820,8 +772,8 @@ commandHistory CLIENT_KILL_History[] = {
|
||||
/* CLIENT KILL tips */
|
||||
#define CLIENT_KILL_tips NULL
|
||||
|
||||
/* CLIENT KILL normal_master_slave_pubsub argument table */
|
||||
struct redisCommandArg CLIENT_KILL_normal_master_slave_pubsub_Subargs[] = {
|
||||
/* CLIENT KILL filter new_format normal_master_slave_pubsub argument table */
|
||||
struct redisCommandArg CLIENT_KILL_filter_new_format_normal_master_slave_pubsub_Subargs[] = {
|
||||
{"normal",ARG_TYPE_PURE_TOKEN,-1,"NORMAL",NULL,NULL,CMD_ARG_NONE},
|
||||
{"master",ARG_TYPE_PURE_TOKEN,-1,"MASTER",NULL,"3.2.0",CMD_ARG_NONE},
|
||||
{"slave",ARG_TYPE_PURE_TOKEN,-1,"SLAVE",NULL,NULL,CMD_ARG_NONE},
|
||||
@ -830,11 +782,10 @@ struct redisCommandArg CLIENT_KILL_normal_master_slave_pubsub_Subargs[] = {
|
||||
{0}
|
||||
};
|
||||
|
||||
/* CLIENT KILL argument table */
|
||||
struct redisCommandArg CLIENT_KILL_Args[] = {
|
||||
{"ip:port",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL},
|
||||
/* CLIENT KILL filter new_format argument table */
|
||||
struct redisCommandArg CLIENT_KILL_filter_new_format_Subargs[] = {
|
||||
{"client-id",ARG_TYPE_INTEGER,-1,"ID",NULL,"2.8.12",CMD_ARG_OPTIONAL},
|
||||
{"normal_master_slave_pubsub",ARG_TYPE_ONEOF,-1,"TYPE",NULL,"2.8.12",CMD_ARG_OPTIONAL,.subargs=CLIENT_KILL_normal_master_slave_pubsub_Subargs},
|
||||
{"normal_master_slave_pubsub",ARG_TYPE_ONEOF,-1,"TYPE",NULL,"2.8.12",CMD_ARG_OPTIONAL,.subargs=CLIENT_KILL_filter_new_format_normal_master_slave_pubsub_Subargs},
|
||||
{"username",ARG_TYPE_STRING,-1,"USER",NULL,NULL,CMD_ARG_OPTIONAL},
|
||||
{"ip:port",ARG_TYPE_STRING,-1,"ADDR",NULL,NULL,CMD_ARG_OPTIONAL},
|
||||
{"ip:port",ARG_TYPE_STRING,-1,"LADDR",NULL,"6.2.0",CMD_ARG_OPTIONAL},
|
||||
@ -842,13 +793,29 @@ struct redisCommandArg CLIENT_KILL_Args[] = {
|
||||
{0}
|
||||
};
|
||||
|
||||
/* CLIENT KILL filter argument table */
|
||||
struct redisCommandArg CLIENT_KILL_filter_Subargs[] = {
|
||||
{"ip:port",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,.deprecated_since="2.8.12"},
|
||||
{"new-format",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE,.subargs=CLIENT_KILL_filter_new_format_Subargs},
|
||||
{0}
|
||||
};
|
||||
|
||||
/* CLIENT KILL argument table */
|
||||
struct redisCommandArg CLIENT_KILL_Args[] = {
|
||||
{"filter",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_NONE,.subargs=CLIENT_KILL_filter_Subargs},
|
||||
{0}
|
||||
};
|
||||
|
||||
/********** CLIENT LIST ********************/
|
||||
|
||||
/* CLIENT LIST history */
|
||||
commandHistory CLIENT_LIST_History[] = {
|
||||
{"2.8.12","Added unique client `id` field."},
|
||||
{"5.0.0","Added optional `TYPE` filter."},
|
||||
{"6.2.0","Added `laddr` field and the optional `ID` filter."},
|
||||
{"6.0.0","Added `user` field."},
|
||||
{"6.2.0","Added `argv-mem`, `tot-mem`, `laddr` and `redir` fields and the optional `ID` filter."},
|
||||
{"7.0.0","Added `resp`, `multi-mem`, `rbs` and `rbp` fields."},
|
||||
{"7.0.3","Added `ssub` field."},
|
||||
{0}
|
||||
};
|
||||
|
||||
@ -1922,6 +1889,7 @@ struct redisCommandArg GEOPOS_Args[] = {
|
||||
/* GEORADIUS history */
|
||||
commandHistory GEORADIUS_History[] = {
|
||||
{"6.2.0","Added the `ANY` option for `COUNT`."},
|
||||
{"7.0.0","Added support for uppercase unit names."},
|
||||
{0}
|
||||
};
|
||||
|
||||
@ -1971,7 +1939,10 @@ struct redisCommandArg GEORADIUS_Args[] = {
|
||||
/********** GEORADIUSBYMEMBER ********************/
|
||||
|
||||
/* GEORADIUSBYMEMBER history */
|
||||
#define GEORADIUSBYMEMBER_History NULL
|
||||
commandHistory GEORADIUSBYMEMBER_History[] = {
|
||||
{"7.0.0","Added support for uppercase unit names."},
|
||||
{0}
|
||||
};
|
||||
|
||||
/* GEORADIUSBYMEMBER tips */
|
||||
#define GEORADIUSBYMEMBER_tips NULL
|
||||
@ -2112,7 +2083,10 @@ struct redisCommandArg GEORADIUS_RO_Args[] = {
|
||||
/********** GEOSEARCH ********************/
|
||||
|
||||
/* GEOSEARCH history */
|
||||
#define GEOSEARCH_History NULL
|
||||
commandHistory GEOSEARCH_History[] = {
|
||||
{"7.0.0","Added support for uppercase unit names."},
|
||||
{0}
|
||||
};
|
||||
|
||||
/* GEOSEARCH tips */
|
||||
#define GEOSEARCH_tips NULL
|
||||
@ -2201,7 +2175,10 @@ struct redisCommandArg GEOSEARCH_Args[] = {
|
||||
/********** GEOSEARCHSTORE ********************/
|
||||
|
||||
/* GEOSEARCHSTORE history */
|
||||
#define GEOSEARCHSTORE_History NULL
|
||||
commandHistory GEOSEARCHSTORE_History[] = {
|
||||
{"7.0.0","Added support for uppercase unit names."},
|
||||
{0}
|
||||
};
|
||||
|
||||
/* GEOSEARCHSTORE tips */
|
||||
#define GEOSEARCHSTORE_tips NULL
|
||||
@ -3318,8 +3295,8 @@ struct redisCommandArg EVALSHA_Args[] = {
|
||||
struct redisCommandArg EVALSHA_RO_Args[] = {
|
||||
{"sha1",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
|
||||
{"numkeys",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
|
||||
{"key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_MULTIPLE},
|
||||
{"arg",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE},
|
||||
{"key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE},
|
||||
{"arg",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE},
|
||||
{0}
|
||||
};
|
||||
|
||||
@ -3335,8 +3312,8 @@ struct redisCommandArg EVALSHA_RO_Args[] = {
|
||||
struct redisCommandArg EVAL_RO_Args[] = {
|
||||
{"script",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
|
||||
{"numkeys",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
|
||||
{"key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_MULTIPLE},
|
||||
{"arg",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE},
|
||||
{"key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE},
|
||||
{"arg",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE},
|
||||
{0}
|
||||
};
|
||||
|
||||
@ -3352,8 +3329,8 @@ struct redisCommandArg EVAL_RO_Args[] = {
|
||||
struct redisCommandArg FCALL_Args[] = {
|
||||
{"function",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
|
||||
{"numkeys",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
|
||||
{"key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_MULTIPLE},
|
||||
{"arg",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE},
|
||||
{"key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE},
|
||||
{"arg",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE},
|
||||
{0}
|
||||
};
|
||||
|
||||
@ -3369,8 +3346,8 @@ struct redisCommandArg FCALL_Args[] = {
|
||||
struct redisCommandArg FCALL_RO_Args[] = {
|
||||
{"function",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
|
||||
{"numkeys",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
|
||||
{"key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_MULTIPLE},
|
||||
{"arg",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE},
|
||||
{"key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE},
|
||||
{"arg",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE},
|
||||
{0}
|
||||
};
|
||||
|
||||
@ -4940,13 +4917,33 @@ struct redisCommandArg PSYNC_Args[] = {
|
||||
/* REPLICAOF tips */
|
||||
#define REPLICAOF_tips NULL
|
||||
|
||||
/* REPLICAOF argument table */
|
||||
struct redisCommandArg REPLICAOF_Args[] = {
|
||||
/* REPLICAOF args host_port argument table */
|
||||
struct redisCommandArg REPLICAOF_args_host_port_Subargs[] = {
|
||||
{"host",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
|
||||
{"port",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
|
||||
{0}
|
||||
};
|
||||
|
||||
/* REPLICAOF args no_one argument table */
|
||||
struct redisCommandArg REPLICAOF_args_no_one_Subargs[] = {
|
||||
{"no",ARG_TYPE_PURE_TOKEN,-1,"NO",NULL,NULL,CMD_ARG_NONE},
|
||||
{"one",ARG_TYPE_PURE_TOKEN,-1,"ONE",NULL,NULL,CMD_ARG_NONE},
|
||||
{0}
|
||||
};
|
||||
|
||||
/* REPLICAOF args argument table */
|
||||
struct redisCommandArg REPLICAOF_args_Subargs[] = {
|
||||
{"host-port",ARG_TYPE_BLOCK,-1,NULL,NULL,NULL,CMD_ARG_NONE,.subargs=REPLICAOF_args_host_port_Subargs},
|
||||
{"no-one",ARG_TYPE_BLOCK,-1,NULL,NULL,NULL,CMD_ARG_NONE,.subargs=REPLICAOF_args_no_one_Subargs},
|
||||
{0}
|
||||
};
|
||||
|
||||
/* REPLICAOF argument table */
|
||||
struct redisCommandArg REPLICAOF_Args[] = {
|
||||
{"args",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_NONE,.subargs=REPLICAOF_args_Subargs},
|
||||
{0}
|
||||
};
|
||||
|
||||
/********** RESTORE_ASKING ********************/
|
||||
|
||||
/* RESTORE_ASKING history */
|
||||
@ -5023,13 +5020,33 @@ struct redisCommandArg SHUTDOWN_Args[] = {
|
||||
/* SLAVEOF tips */
|
||||
#define SLAVEOF_tips NULL
|
||||
|
||||
/* SLAVEOF argument table */
|
||||
struct redisCommandArg SLAVEOF_Args[] = {
|
||||
/* SLAVEOF args host_port argument table */
|
||||
struct redisCommandArg SLAVEOF_args_host_port_Subargs[] = {
|
||||
{"host",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
|
||||
{"port",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
|
||||
{0}
|
||||
};
|
||||
|
||||
/* SLAVEOF args no_one argument table */
|
||||
struct redisCommandArg SLAVEOF_args_no_one_Subargs[] = {
|
||||
{"no",ARG_TYPE_PURE_TOKEN,-1,"NO",NULL,NULL,CMD_ARG_NONE},
|
||||
{"one",ARG_TYPE_PURE_TOKEN,-1,"ONE",NULL,NULL,CMD_ARG_NONE},
|
||||
{0}
|
||||
};
|
||||
|
||||
/* SLAVEOF args argument table */
|
||||
struct redisCommandArg SLAVEOF_args_Subargs[] = {
|
||||
{"host-port",ARG_TYPE_BLOCK,-1,NULL,NULL,NULL,CMD_ARG_NONE,.subargs=SLAVEOF_args_host_port_Subargs},
|
||||
{"no-one",ARG_TYPE_BLOCK,-1,NULL,NULL,NULL,CMD_ARG_NONE,.subargs=SLAVEOF_args_no_one_Subargs},
|
||||
{0}
|
||||
};
|
||||
|
||||
/* SLAVEOF argument table */
|
||||
struct redisCommandArg SLAVEOF_Args[] = {
|
||||
{"args",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_NONE,.subargs=SLAVEOF_args_Subargs},
|
||||
{0}
|
||||
};
|
||||
|
||||
/********** SLOWLOG GET ********************/
|
||||
|
||||
/* SLOWLOG GET history */
|
||||
@ -6286,6 +6303,7 @@ struct redisCommandArg XCLAIM_Args[] = {
|
||||
{"count",ARG_TYPE_INTEGER,-1,"RETRYCOUNT",NULL,NULL,CMD_ARG_OPTIONAL},
|
||||
{"force",ARG_TYPE_PURE_TOKEN,-1,"FORCE",NULL,NULL,CMD_ARG_OPTIONAL},
|
||||
{"justid",ARG_TYPE_PURE_TOKEN,-1,"JUSTID",NULL,NULL,CMD_ARG_OPTIONAL},
|
||||
{"id",ARG_TYPE_STRING,-1,"LASTID",NULL,NULL,CMD_ARG_OPTIONAL},
|
||||
{0}
|
||||
};
|
||||
|
||||
@ -6674,8 +6692,8 @@ commandHistory XSETID_History[] = {
|
||||
struct redisCommandArg XSETID_Args[] = {
|
||||
{"key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_NONE},
|
||||
{"last-id",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
|
||||
{"entries_added",ARG_TYPE_INTEGER,-1,"ENTRIESADDED",NULL,NULL,CMD_ARG_OPTIONAL},
|
||||
{"max_deleted_entry_id",ARG_TYPE_STRING,-1,"MAXDELETEDID",NULL,NULL,CMD_ARG_OPTIONAL},
|
||||
{"entries_added",ARG_TYPE_INTEGER,-1,"ENTRIESADDED",NULL,"7.0.0",CMD_ARG_OPTIONAL},
|
||||
{"max_deleted_entry_id",ARG_TYPE_STRING,-1,"MAXDELETEDID",NULL,"7.0.0",CMD_ARG_OPTIONAL},
|
||||
{0}
|
||||
};
|
||||
|
||||
@ -7202,11 +7220,11 @@ struct redisCommand redisCommandTable[] = {
|
||||
{"pexpireat","Set the expiration for a key as a UNIX timestamp specified in milliseconds","O(1)","2.6.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_GENERIC,PEXPIREAT_History,PEXPIREAT_tips,pexpireatCommand,-3,CMD_WRITE|CMD_FAST,ACL_CATEGORY_KEYSPACE,{{NULL,CMD_KEY_RW|CMD_KEY_UPDATE,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={0,1,0}}},.args=PEXPIREAT_Args},
|
||||
{"pexpiretime","Get the expiration Unix timestamp for a key in milliseconds","O(1)","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_GENERIC,PEXPIRETIME_History,PEXPIRETIME_tips,pexpiretimeCommand,2,CMD_READONLY|CMD_FAST,ACL_CATEGORY_KEYSPACE,{{NULL,CMD_KEY_RO|CMD_KEY_ACCESS,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={0,1,0}}},.args=PEXPIRETIME_Args},
|
||||
{"pttl","Get the time to live for a key in milliseconds","O(1)","2.6.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_GENERIC,PTTL_History,PTTL_tips,pttlCommand,2,CMD_READONLY|CMD_FAST,ACL_CATEGORY_KEYSPACE,{{NULL,CMD_KEY_RO|CMD_KEY_ACCESS,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={0,1,0}}},.args=PTTL_Args},
|
||||
{"randomkey","Return a random key from the keyspace","O(1)","1.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_GENERIC,RANDOMKEY_History,RANDOMKEY_tips,randomkeyCommand,1,CMD_READONLY,ACL_CATEGORY_KEYSPACE},
|
||||
{"randomkey","Return a random key from the keyspace","O(1)","1.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_GENERIC,RANDOMKEY_History,RANDOMKEY_tips,randomkeyCommand,1,CMD_READONLY|CMD_TOUCHES_ARBITRARY_KEYS,ACL_CATEGORY_KEYSPACE},
|
||||
{"rename","Rename a key","O(1)","1.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_GENERIC,RENAME_History,RENAME_tips,renameCommand,3,CMD_WRITE,ACL_CATEGORY_KEYSPACE,{{NULL,CMD_KEY_RW|CMD_KEY_ACCESS|CMD_KEY_DELETE,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={0,1,0}},{NULL,CMD_KEY_OW|CMD_KEY_UPDATE,KSPEC_BS_INDEX,.bs.index={2},KSPEC_FK_RANGE,.fk.range={0,1,0}}},.args=RENAME_Args},
|
||||
{"renamenx","Rename a key, only if the new key does not exist","O(1)","1.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_GENERIC,RENAMENX_History,RENAMENX_tips,renamenxCommand,3,CMD_WRITE|CMD_FAST,ACL_CATEGORY_KEYSPACE,{{NULL,CMD_KEY_RW|CMD_KEY_ACCESS|CMD_KEY_DELETE,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={0,1,0}},{NULL,CMD_KEY_OW|CMD_KEY_INSERT,KSPEC_BS_INDEX,.bs.index={2},KSPEC_FK_RANGE,.fk.range={0,1,0}}},.args=RENAMENX_Args},
|
||||
{"restore","Create a key using the provided serialized value, previously obtained using DUMP.","O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N)).","2.6.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_GENERIC,RESTORE_History,RESTORE_tips,restoreCommand,-4,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_KEYSPACE|ACL_CATEGORY_DANGEROUS,{{NULL,CMD_KEY_OW|CMD_KEY_UPDATE,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={0,1,0}}},.args=RESTORE_Args},
|
||||
{"scan","Incrementally iterate the keys space","O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection.","2.8.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_GENERIC,SCAN_History,SCAN_tips,scanCommand,-2,CMD_READONLY,ACL_CATEGORY_KEYSPACE,.args=SCAN_Args},
|
||||
{"scan","Incrementally iterate the keys space","O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection.","2.8.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_GENERIC,SCAN_History,SCAN_tips,scanCommand,-2,CMD_READONLY|CMD_TOUCHES_ARBITRARY_KEYS,ACL_CATEGORY_KEYSPACE,.args=SCAN_Args},
|
||||
{"sort","Sort the elements in a list, set or sorted set","O(N+M*log(M)) where N is the number of elements in the list or set to sort, and M the number of returned elements. When the elements are not sorted, complexity is O(N).","1.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_GENERIC,SORT_History,SORT_tips,sortCommand,-2,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_SET|ACL_CATEGORY_SORTEDSET|ACL_CATEGORY_LIST|ACL_CATEGORY_DANGEROUS,{{NULL,CMD_KEY_RO|CMD_KEY_ACCESS,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={0,1,0}},{"For the optional BY/GET keyword. It is marked 'unknown' because the key names derive from the content of the key we sort",CMD_KEY_RO|CMD_KEY_ACCESS,KSPEC_BS_UNKNOWN,{{0}},KSPEC_FK_UNKNOWN,{{0}}},{"For the optional STORE keyword. It is marked 'unknown' because the keyword can appear anywhere in the argument array",CMD_KEY_OW|CMD_KEY_UPDATE,KSPEC_BS_UNKNOWN,{{0}},KSPEC_FK_UNKNOWN,{{0}}}},sortGetKeys,.args=SORT_Args},
|
||||
{"sort_ro","Sort the elements in a list, set or sorted set. Read-only variant of SORT.","O(N+M*log(M)) where N is the number of elements in the list or set to sort, and M the number of returned elements. When the elements are not sorted, complexity is O(N).","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_GENERIC,SORT_RO_History,SORT_RO_tips,sortroCommand,-2,CMD_READONLY,ACL_CATEGORY_SET|ACL_CATEGORY_SORTEDSET|ACL_CATEGORY_LIST|ACL_CATEGORY_DANGEROUS,{{NULL,CMD_KEY_RO|CMD_KEY_ACCESS,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={0,1,0}},{"For the optional BY/GET keyword. It is marked 'unknown' because the key names derive from the content of the key we sort",CMD_KEY_RO|CMD_KEY_ACCESS,KSPEC_BS_UNKNOWN,{{0}},KSPEC_FK_UNKNOWN,{{0}}}},sortROGetKeys,.args=SORT_RO_Args},
|
||||
{"touch","Alters the last access time of a key(s). Returns the number of existing keys specified.","O(N) where N is the number of keys that will be touched.","3.2.1",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_GENERIC,TOUCH_History,TOUCH_tips,touchCommand,-2,CMD_READONLY|CMD_FAST,ACL_CATEGORY_KEYSPACE,{{NULL,CMD_KEY_RO,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={-1,1,0}}},.args=TOUCH_Args},
|
||||
@ -7341,7 +7359,7 @@ struct redisCommand redisCommandTable[] = {
|
||||
{"sunion","Add multiple sets","O(N) where N is the total number of elements in all given sets.","1.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SET,SUNION_History,SUNION_tips,sunionCommand,-2,CMD_READONLY,ACL_CATEGORY_SET,{{NULL,CMD_KEY_RO|CMD_KEY_ACCESS,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={-1,1,0}}},.args=SUNION_Args},
|
||||
{"sunionstore","Add multiple sets and store the resulting set in a key","O(N) where N is the total number of elements in all given sets.","1.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SET,SUNIONSTORE_History,SUNIONSTORE_tips,sunionstoreCommand,-3,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_SET,{{NULL,CMD_KEY_OW|CMD_KEY_UPDATE,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={0,1,0}},{NULL,CMD_KEY_RO|CMD_KEY_ACCESS,KSPEC_BS_INDEX,.bs.index={2},KSPEC_FK_RANGE,.fk.range={-1,1,0}}},.args=SUNIONSTORE_Args},
|
||||
/* sorted_set */
|
||||
{"bzmpop","Remove and return members with scores in a sorted set or block until one is available","O(K) + O(N*log(M)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped.","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SORTED_SET,BZMPOP_History,BZMPOP_tips,bzmpopCommand,-5,CMD_WRITE|CMD_BLOCKING,ACL_CATEGORY_SORTEDSET,{{NULL,CMD_KEY_RW|CMD_KEY_ACCESS|CMD_KEY_DELETE,KSPEC_BS_INDEX,.bs.index={2},KSPEC_FK_KEYNUM,.fk.keynum={0,1,1}}},blmpopGetKeys,.args=BZMPOP_Args},
|
||||
{"bzmpop","Remove and return members with scores in a sorted set or block until one is available","O(K) + O(M*log(N)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped.","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SORTED_SET,BZMPOP_History,BZMPOP_tips,bzmpopCommand,-5,CMD_WRITE|CMD_BLOCKING,ACL_CATEGORY_SORTEDSET,{{NULL,CMD_KEY_RW|CMD_KEY_ACCESS|CMD_KEY_DELETE,KSPEC_BS_INDEX,.bs.index={2},KSPEC_FK_KEYNUM,.fk.keynum={0,1,1}}},blmpopGetKeys,.args=BZMPOP_Args},
|
||||
{"bzpopmax","Remove and return the member with the highest score from one or more sorted sets, or block until one is available","O(log(N)) with N being the number of elements in the sorted set.","5.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SORTED_SET,BZPOPMAX_History,BZPOPMAX_tips,bzpopmaxCommand,-3,CMD_WRITE|CMD_NOSCRIPT|CMD_FAST|CMD_BLOCKING,ACL_CATEGORY_SORTEDSET,{{NULL,CMD_KEY_RW|CMD_KEY_ACCESS|CMD_KEY_DELETE,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={-2,1,0}}},.args=BZPOPMAX_Args},
|
||||
{"bzpopmin","Remove and return the member with the lowest score from one or more sorted sets, or block until one is available","O(log(N)) with N being the number of elements in the sorted set.","5.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SORTED_SET,BZPOPMIN_History,BZPOPMIN_tips,bzpopminCommand,-3,CMD_WRITE|CMD_NOSCRIPT|CMD_FAST|CMD_BLOCKING,ACL_CATEGORY_SORTEDSET,{{NULL,CMD_KEY_RW|CMD_KEY_ACCESS|CMD_KEY_DELETE,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={-2,1,0}}},.args=BZPOPMIN_Args},
|
||||
{"zadd","Add one or more members to a sorted set, or update its score if it already exists","O(log(N)) for each item added, where N is the number of elements in the sorted set.","1.2.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SORTED_SET,ZADD_History,ZADD_tips,zaddCommand,-4,CMD_WRITE|CMD_DENYOOM|CMD_FAST,ACL_CATEGORY_SORTEDSET,{{NULL,CMD_KEY_RW|CMD_KEY_UPDATE,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={0,1,0}}},.args=ZADD_Args},
|
||||
@ -7354,7 +7372,7 @@ struct redisCommand redisCommandTable[] = {
|
||||
{"zintercard","Intersect multiple sorted sets and return the cardinality of the result","O(N*K) worst case with N being the smallest input sorted set, K being the number of input sorted sets.","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SORTED_SET,ZINTERCARD_History,ZINTERCARD_tips,zinterCardCommand,-3,CMD_READONLY,ACL_CATEGORY_SORTEDSET,{{NULL,CMD_KEY_RO|CMD_KEY_ACCESS,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_KEYNUM,.fk.keynum={0,1,1}}},zunionInterDiffGetKeys,.args=ZINTERCARD_Args},
|
||||
{"zinterstore","Intersect multiple sorted sets and store the resulting sorted set in a new key","O(N*K)+O(M*log(M)) worst case with N being the smallest input sorted set, K being the number of input sorted sets and M being the number of elements in the resulting sorted set.","2.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SORTED_SET,ZINTERSTORE_History,ZINTERSTORE_tips,zinterstoreCommand,-4,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_SORTEDSET,{{NULL,CMD_KEY_OW|CMD_KEY_UPDATE,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={0,1,0}},{NULL,CMD_KEY_RO|CMD_KEY_ACCESS,KSPEC_BS_INDEX,.bs.index={2},KSPEC_FK_KEYNUM,.fk.keynum={0,1,1}}},zunionInterDiffStoreGetKeys,.args=ZINTERSTORE_Args},
|
||||
{"zlexcount","Count the number of members in a sorted set between a given lexicographical range","O(log(N)) with N being the number of elements in the sorted set.","2.8.9",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SORTED_SET,ZLEXCOUNT_History,ZLEXCOUNT_tips,zlexcountCommand,4,CMD_READONLY|CMD_FAST,ACL_CATEGORY_SORTEDSET,{{NULL,CMD_KEY_RO|CMD_KEY_ACCESS,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={0,1,0}}},.args=ZLEXCOUNT_Args},
|
||||
{"zmpop","Remove and return members with scores in a sorted set","O(K) + O(N*log(M)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped.","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SORTED_SET,ZMPOP_History,ZMPOP_tips,zmpopCommand,-4,CMD_WRITE,ACL_CATEGORY_SORTEDSET,{{NULL,CMD_KEY_RW|CMD_KEY_ACCESS|CMD_KEY_DELETE,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_KEYNUM,.fk.keynum={0,1,1}}},zmpopGetKeys,.args=ZMPOP_Args},
|
||||
{"zmpop","Remove and return members with scores in a sorted set","O(K) + O(M*log(N)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped.","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SORTED_SET,ZMPOP_History,ZMPOP_tips,zmpopCommand,-4,CMD_WRITE,ACL_CATEGORY_SORTEDSET,{{NULL,CMD_KEY_RW|CMD_KEY_ACCESS|CMD_KEY_DELETE,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_KEYNUM,.fk.keynum={0,1,1}}},zmpopGetKeys,.args=ZMPOP_Args},
|
||||
{"zmscore","Get the score associated with the given members in a sorted set","O(N) where N is the number of members being requested.","6.2.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SORTED_SET,ZMSCORE_History,ZMSCORE_tips,zmscoreCommand,-3,CMD_READONLY|CMD_FAST,ACL_CATEGORY_SORTEDSET,{{NULL,CMD_KEY_RO|CMD_KEY_ACCESS,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={0,1,0}}},.args=ZMSCORE_Args},
|
||||
{"zpopmax","Remove and return members with the highest scores in a sorted set","O(log(N)*M) with N being the number of elements in the sorted set, and M being the number of elements popped.","5.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SORTED_SET,ZPOPMAX_History,ZPOPMAX_tips,zpopmaxCommand,-2,CMD_WRITE|CMD_FAST,ACL_CATEGORY_SORTEDSET,{{NULL,CMD_KEY_RW|CMD_KEY_ACCESS|CMD_KEY_DELETE,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={0,1,0}}},.args=ZPOPMAX_Args},
|
||||
{"zpopmin","Remove and return members with the lowest scores in a sorted set","O(log(N)*M) with N being the number of elements in the sorted set, and M being the number of elements popped.","5.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SORTED_SET,ZPOPMIN_History,ZPOPMIN_tips,zpopminCommand,-2,CMD_WRITE|CMD_FAST,ACL_CATEGORY_SORTEDSET,{{NULL,CMD_KEY_RW|CMD_KEY_ACCESS|CMD_KEY_DELETE,KSPEC_BS_INDEX,.bs.index={1},KSPEC_FK_RANGE,.fk.range={0,1,0}}},.args=ZPOPMIN_Args},
|
||||
|
@ -47,6 +47,7 @@
|
||||
"name": "operation",
|
||||
"type": "oneof",
|
||||
"multiple": true,
|
||||
"optional": true,
|
||||
"arguments": [
|
||||
{
|
||||
"token": "GET",
|
||||
|
@ -43,6 +43,7 @@
|
||||
"token": "GET",
|
||||
"name": "encoding_offset",
|
||||
"type": "block",
|
||||
"optional": true,
|
||||
"multiple": true,
|
||||
"multiple_token": true,
|
||||
"arguments": [
|
||||
|
@ -1,7 +1,7 @@
|
||||
{
|
||||
"BZMPOP": {
|
||||
"summary": "Remove and return members with scores in a sorted set or block until one is available",
|
||||
"complexity": "O(K) + O(N*log(M)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped.",
|
||||
"complexity": "O(K) + O(M*log(N)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped.",
|
||||
"group": "sorted_set",
|
||||
"since": "7.0.0",
|
||||
"arity": -5,
|
||||
|
@ -41,77 +41,90 @@
|
||||
],
|
||||
"arguments": [
|
||||
{
|
||||
"name": "ip:port",
|
||||
"type": "string",
|
||||
"optional": true
|
||||
},
|
||||
{
|
||||
"token": "ID",
|
||||
"name": "client-id",
|
||||
"type": "integer",
|
||||
"optional": true,
|
||||
"since": "2.8.12"
|
||||
},
|
||||
{
|
||||
"token": "TYPE",
|
||||
"name": "normal_master_slave_pubsub",
|
||||
"name": "filter",
|
||||
"type": "oneof",
|
||||
"optional": true,
|
||||
"since": "2.8.12",
|
||||
"arguments": [
|
||||
{
|
||||
"name": "normal",
|
||||
"type": "pure-token",
|
||||
"token": "normal"
|
||||
"name": "ip:port",
|
||||
"type": "string",
|
||||
"deprecated_since": "2.8.12"
|
||||
},
|
||||
{
|
||||
"name": "master",
|
||||
"type": "pure-token",
|
||||
"token": "master",
|
||||
"since": "3.2.0"
|
||||
},
|
||||
{
|
||||
"name": "slave",
|
||||
"type": "pure-token",
|
||||
"token": "slave"
|
||||
},
|
||||
{
|
||||
"name": "replica",
|
||||
"type": "pure-token",
|
||||
"token": "replica",
|
||||
"since": "5.0.0"
|
||||
},
|
||||
{
|
||||
"name": "pubsub",
|
||||
"type": "pure-token",
|
||||
"token": "pubsub"
|
||||
"name": "new-format",
|
||||
"type": "oneof",
|
||||
"multiple": true,
|
||||
"arguments": [
|
||||
{
|
||||
"token": "ID",
|
||||
"name": "client-id",
|
||||
"type": "integer",
|
||||
"optional": true,
|
||||
"since": "2.8.12"
|
||||
},
|
||||
{
|
||||
"token": "TYPE",
|
||||
"name": "normal_master_slave_pubsub",
|
||||
"type": "oneof",
|
||||
"optional": true,
|
||||
"since": "2.8.12",
|
||||
"arguments": [
|
||||
{
|
||||
"name": "normal",
|
||||
"type": "pure-token",
|
||||
"token": "normal"
|
||||
},
|
||||
{
|
||||
"name": "master",
|
||||
"type": "pure-token",
|
||||
"token": "master",
|
||||
"since": "3.2.0"
|
||||
},
|
||||
{
|
||||
"name": "slave",
|
||||
"type": "pure-token",
|
||||
"token": "slave"
|
||||
},
|
||||
{
|
||||
"name": "replica",
|
||||
"type": "pure-token",
|
||||
"token": "replica",
|
||||
"since": "5.0.0"
|
||||
},
|
||||
{
|
||||
"name": "pubsub",
|
||||
"type": "pure-token",
|
||||
"token": "pubsub"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"token": "USER",
|
||||
"name": "username",
|
||||
"type": "string",
|
||||
"optional": true
|
||||
},
|
||||
{
|
||||
"token": "ADDR",
|
||||
"name": "ip:port",
|
||||
"type": "string",
|
||||
"optional": true
|
||||
},
|
||||
{
|
||||
"token": "LADDR",
|
||||
"name": "ip:port",
|
||||
"type": "string",
|
||||
"optional": true,
|
||||
"since": "6.2.0"
|
||||
},
|
||||
{
|
||||
"token": "SKIPME",
|
||||
"name": "yes/no",
|
||||
"type": "string",
|
||||
"optional": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"token": "USER",
|
||||
"name": "username",
|
||||
"type": "string",
|
||||
"optional": true
|
||||
},
|
||||
{
|
||||
"token": "ADDR",
|
||||
"name": "ip:port",
|
||||
"type": "string",
|
||||
"optional": true
|
||||
},
|
||||
{
|
||||
"token": "LADDR",
|
||||
"name": "ip:port",
|
||||
"type": "string",
|
||||
"optional": true,
|
||||
"since": "6.2.0"
|
||||
},
|
||||
{
|
||||
"token": "SKIPME",
|
||||
"name": "yes/no",
|
||||
"type": "string",
|
||||
"optional": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -16,9 +16,21 @@
|
||||
"5.0.0",
|
||||
"Added optional `TYPE` filter."
|
||||
],
|
||||
[
|
||||
"6.0.0",
|
||||
"Added `user` field."
|
||||
],
|
||||
[
|
||||
"6.2.0",
|
||||
"Added `laddr` field and the optional `ID` filter."
|
||||
"Added `argv-mem`, `tot-mem`, `laddr` and `redir` fields and the optional `ID` filter."
|
||||
],
|
||||
[
|
||||
"7.0.0",
|
||||
"Added `resp`, `multi-mem`, `rbs` and `rbp` fields."
|
||||
],
|
||||
[
|
||||
"7.0.3",
|
||||
"Added `ssub` field."
|
||||
]
|
||||
],
|
||||
"command_flags": [
|
||||
|
@ -12,9 +12,6 @@
|
||||
"ADMIN",
|
||||
"STALE"
|
||||
],
|
||||
"command_tips": [
|
||||
"NONDETERMINISTIC_OUTPUT"
|
||||
],
|
||||
"arguments": [
|
||||
{
|
||||
"name": "slot",
|
||||
|
@ -12,9 +12,6 @@
|
||||
"ADMIN",
|
||||
"STALE"
|
||||
],
|
||||
"command_tips": [
|
||||
"NONDETERMINISTIC_OUTPUT"
|
||||
],
|
||||
"arguments": [
|
||||
{
|
||||
"name": "start-slot_end-slot",
|
||||
|
@ -10,9 +10,6 @@
|
||||
"command_flags": [
|
||||
"STALE"
|
||||
],
|
||||
"command_tips": [
|
||||
"NONDETERMINISTIC_OUTPUT"
|
||||
],
|
||||
"arguments": [
|
||||
{
|
||||
"name": "slot",
|
||||
|
@ -12,9 +12,6 @@
|
||||
"ADMIN",
|
||||
"STALE"
|
||||
],
|
||||
"command_tips": [
|
||||
"NONDETERMINISTIC_OUTPUT"
|
||||
],
|
||||
"arguments": [
|
||||
{
|
||||
"name": "slot",
|
||||
|
@ -12,9 +12,6 @@
|
||||
"ADMIN",
|
||||
"STALE"
|
||||
],
|
||||
"command_tips": [
|
||||
"NONDETERMINISTIC_OUTPUT"
|
||||
],
|
||||
"arguments": [
|
||||
{
|
||||
"name": "start-slot_end-slot",
|
||||
|
@ -12,9 +12,6 @@
|
||||
"ADMIN",
|
||||
"STALE"
|
||||
],
|
||||
"command_tips": [
|
||||
"NONDETERMINISTIC_OUTPUT"
|
||||
],
|
||||
"arguments": [
|
||||
{
|
||||
"name": "options",
|
||||
|
@ -11,9 +11,6 @@
|
||||
"NO_ASYNC_LOADING",
|
||||
"ADMIN",
|
||||
"STALE"
|
||||
],
|
||||
"command_tips": [
|
||||
"NONDETERMINISTIC_OUTPUT"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -12,9 +12,6 @@
|
||||
"ADMIN",
|
||||
"STALE"
|
||||
],
|
||||
"command_tips": [
|
||||
"NONDETERMINISTIC_OUTPUT"
|
||||
],
|
||||
"arguments": [
|
||||
{
|
||||
"name": "node-id",
|
||||
|
@ -10,9 +10,6 @@
|
||||
"command_flags": [
|
||||
"STALE"
|
||||
],
|
||||
"command_tips": [
|
||||
"NONDETERMINISTIC_OUTPUT"
|
||||
],
|
||||
"arguments": [
|
||||
{
|
||||
"name": "key",
|
||||
|
@ -18,9 +18,6 @@
|
||||
"ADMIN",
|
||||
"STALE"
|
||||
],
|
||||
"command_tips": [
|
||||
"NONDETERMINISTIC_OUTPUT"
|
||||
],
|
||||
"arguments": [
|
||||
{
|
||||
"name": "ip",
|
||||
|
@ -9,9 +9,6 @@
|
||||
"function": "clusterCommand",
|
||||
"command_flags": [
|
||||
"STALE"
|
||||
],
|
||||
"command_tips": [
|
||||
"NONDETERMINISTIC_OUTPUT"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -12,9 +12,6 @@
|
||||
"ADMIN",
|
||||
"STALE"
|
||||
],
|
||||
"command_tips": [
|
||||
"NONDETERMINISTIC_OUTPUT"
|
||||
],
|
||||
"arguments": [
|
||||
{
|
||||
"name": "node-id",
|
||||
|
@ -12,9 +12,6 @@
|
||||
"STALE",
|
||||
"NOSCRIPT"
|
||||
],
|
||||
"command_tips": [
|
||||
"NONDETERMINISTIC_OUTPUT"
|
||||
],
|
||||
"arguments": [
|
||||
{
|
||||
"name": "hard_soft",
|
||||
|
@ -11,9 +11,6 @@
|
||||
"NO_ASYNC_LOADING",
|
||||
"ADMIN",
|
||||
"STALE"
|
||||
],
|
||||
"command_tips": [
|
||||
"NONDETERMINISTIC_OUTPUT"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -12,9 +12,6 @@
|
||||
"ADMIN",
|
||||
"STALE"
|
||||
],
|
||||
"command_tips": [
|
||||
"NONDETERMINISTIC_OUTPUT"
|
||||
],
|
||||
"arguments": [
|
||||
{
|
||||
"name": "config-epoch",
|
||||
|
@ -12,9 +12,6 @@
|
||||
"ADMIN",
|
||||
"STALE"
|
||||
],
|
||||
"command_tips": [
|
||||
"NONDETERMINISTIC_OUTPUT"
|
||||
],
|
||||
"arguments": [
|
||||
{
|
||||
"name": "slot",
|
||||
|
@ -51,11 +51,13 @@
|
||||
"name": "key",
|
||||
"type": "key",
|
||||
"key_spec_index": 0,
|
||||
"optional":true,
|
||||
"multiple": true
|
||||
},
|
||||
{
|
||||
"name": "arg",
|
||||
"type": "string",
|
||||
"optional":true,
|
||||
"multiple": true
|
||||
}
|
||||
]
|
||||
|
@ -50,11 +50,13 @@
|
||||
"name": "key",
|
||||
"type": "key",
|
||||
"key_spec_index": 0,
|
||||
"optional":true,
|
||||
"multiple": true
|
||||
},
|
||||
{
|
||||
"name": "arg",
|
||||
"type": "string",
|
||||
"optional":true,
|
||||
"multiple": true
|
||||
}
|
||||
]
|
||||
|
@ -52,11 +52,13 @@
|
||||
"name": "key",
|
||||
"type": "key",
|
||||
"key_spec_index": 0,
|
||||
"optional": true,
|
||||
"multiple": true
|
||||
},
|
||||
{
|
||||
"name": "arg",
|
||||
"type": "string",
|
||||
"optional": true,
|
||||
"multiple": true
|
||||
}
|
||||
]
|
||||
|
@ -51,11 +51,13 @@
|
||||
"name": "key",
|
||||
"type": "key",
|
||||
"key_spec_index": 0,
|
||||
"optional": true,
|
||||
"multiple": true
|
||||
},
|
||||
{
|
||||
"name": "arg",
|
||||
"type": "string",
|
||||
"optional": true,
|
||||
"multiple": true
|
||||
}
|
||||
]
|
||||
|
@ -11,6 +11,10 @@
|
||||
[
|
||||
"6.2.0",
|
||||
"Added the `ANY` option for `COUNT`."
|
||||
],
|
||||
[
|
||||
"7.0.0",
|
||||
"Added support for uppercase unit names."
|
||||
]
|
||||
],
|
||||
"deprecated_since": "6.2.0",
|
||||
|
@ -7,6 +7,12 @@
|
||||
"arity": -5,
|
||||
"function": "georadiusbymemberCommand",
|
||||
"get_keys_function": "georadiusGetKeys",
|
||||
"history": [
|
||||
[
|
||||
"7.0.0",
|
||||
"Added support for uppercase unit names."
|
||||
]
|
||||
],
|
||||
"deprecated_since": "6.2.0",
|
||||
"replaced_by": "`GEOSEARCH` and `GEOSEARCHSTORE` with the `BYRADIUS` and `FROMMEMBER` arguments",
|
||||
"doc_flags": [
|
||||
|
@ -6,6 +6,12 @@
|
||||
"since": "6.2.0",
|
||||
"arity": -7,
|
||||
"function": "geosearchCommand",
|
||||
"history": [
|
||||
[
|
||||
"7.0.0",
|
||||
"Added support for uppercase unit names."
|
||||
]
|
||||
],
|
||||
"command_flags": [
|
||||
"READONLY"
|
||||
],
|
||||
@ -203,4 +209,4 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -6,6 +6,12 @@
|
||||
"since": "6.2.0",
|
||||
"arity": -8,
|
||||
"function": "geosearchstoreCommand",
|
||||
"history": [
|
||||
[
|
||||
"7.0.0",
|
||||
"Added support for uppercase unit names."
|
||||
]
|
||||
],
|
||||
"command_flags": [
|
||||
"WRITE",
|
||||
"DENYOOM"
|
||||
@ -215,4 +221,4 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -7,7 +7,8 @@
|
||||
"arity": 1,
|
||||
"function": "randomkeyCommand",
|
||||
"command_flags": [
|
||||
"READONLY"
|
||||
"READONLY",
|
||||
"TOUCHES_ARBITRARY_KEYS"
|
||||
],
|
||||
"acl_categories": [
|
||||
"KEYSPACE"
|
||||
|
@ -14,12 +14,40 @@
|
||||
],
|
||||
"arguments": [
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer"
|
||||
"name": "args",
|
||||
"type": "oneof",
|
||||
"arguments": [
|
||||
{
|
||||
"name": "host-port",
|
||||
"type": "block",
|
||||
"arguments": [
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "no-one",
|
||||
"type": "block",
|
||||
"arguments": [
|
||||
{
|
||||
"name": "no",
|
||||
"type": "pure-token",
|
||||
"token": "NO"
|
||||
},
|
||||
{
|
||||
"name": "one",
|
||||
"type": "pure-token",
|
||||
"token": "ONE"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -13,7 +13,8 @@
|
||||
]
|
||||
],
|
||||
"command_flags": [
|
||||
"READONLY"
|
||||
"READONLY",
|
||||
"TOUCHES_ARBITRARY_KEYS"
|
||||
],
|
||||
"acl_categories": [
|
||||
"KEYSPACE"
|
||||
|
@ -19,12 +19,40 @@
|
||||
],
|
||||
"arguments": [
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer"
|
||||
"name": "args",
|
||||
"type": "oneof",
|
||||
"arguments": [
|
||||
{
|
||||
"name": "host-port",
|
||||
"type": "block",
|
||||
"arguments": [
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "no-one",
|
||||
"type": "block",
|
||||
"arguments": [
|
||||
{
|
||||
"name": "no",
|
||||
"type": "pure-token",
|
||||
"token": "NO"
|
||||
},
|
||||
{
|
||||
"name": "one",
|
||||
"type": "pure-token",
|
||||
"token": "ONE"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -88,6 +88,12 @@
|
||||
"token": "JUSTID",
|
||||
"type": "pure-token",
|
||||
"optional": true
|
||||
},
|
||||
{
|
||||
"name": "id",
|
||||
"token": "LASTID",
|
||||
"type": "string",
|
||||
"optional": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -54,13 +54,15 @@
|
||||
"name": "entries_added",
|
||||
"token": "ENTRIESADDED",
|
||||
"type": "integer",
|
||||
"optional": true
|
||||
"optional": true,
|
||||
"since": "7.0.0"
|
||||
},
|
||||
{
|
||||
"name": "max_deleted_entry_id",
|
||||
"token": "MAXDELETEDID",
|
||||
"type": "string",
|
||||
"optional": true
|
||||
"optional": true,
|
||||
"since": "7.0.0"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
{
|
||||
"ZMPOP": {
|
||||
"summary": "Remove and return members with scores in a sorted set",
|
||||
"complexity": "O(K) + O(N*log(M)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped.",
|
||||
"complexity": "O(K) + O(M*log(N)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped.",
|
||||
"group": "sorted_set",
|
||||
"since": "7.0.0",
|
||||
"arity": -4,
|
||||
|
51
src/config.c
51
src/config.c
@ -1416,9 +1416,9 @@ void rewriteConfigUserOption(struct rewriteConfigState *state) {
|
||||
sds line = sdsnew("user ");
|
||||
line = sdscatsds(line,u->name);
|
||||
line = sdscatlen(line," ",1);
|
||||
sds descr = ACLDescribeUser(u);
|
||||
line = sdscatsds(line,descr);
|
||||
sdsfree(descr);
|
||||
robj *descr = ACLDescribeUser(u);
|
||||
line = sdscatsds(line,descr->ptr);
|
||||
decrRefCount(descr);
|
||||
rewriteConfigRewriteLine(state,"user",line,1);
|
||||
}
|
||||
raxStop(&ri);
|
||||
@ -2550,6 +2550,12 @@ int updateClusterFlags(const char **err) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int updateClusterAnnouncedPort(const char **err) {
|
||||
UNUSED(err);
|
||||
clusterUpdateMyselfAnnouncedPorts();
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int updateClusterIp(const char **err) {
|
||||
UNUSED(err);
|
||||
clusterUpdateMyselfIp();
|
||||
@ -2926,6 +2932,37 @@ void rewriteConfigLatencyTrackingInfoPercentilesOutputOption(standardConfig *con
|
||||
rewriteConfigRewriteLine(state,name,line,1);
|
||||
}
|
||||
|
||||
static int applyClientMaxMemoryUsage(const char **err) {
|
||||
UNUSED(err);
|
||||
listIter li;
|
||||
listNode *ln;
|
||||
|
||||
/* server.client_mem_usage_buckets is an indication that the previous config
|
||||
* was non-zero, in which case we can exit and no apply is needed. */
|
||||
if(server.maxmemory_clients !=0 && server.client_mem_usage_buckets)
|
||||
return 1;
|
||||
if (server.maxmemory_clients != 0)
|
||||
initServerClientMemUsageBuckets();
|
||||
|
||||
/* When client eviction is enabled update memory buckets for all clients.
|
||||
* When disabled, clear that data structure. */
|
||||
listRewind(server.clients, &li);
|
||||
while ((ln = listNext(&li)) != NULL) {
|
||||
client *c = listNodeValue(ln);
|
||||
if (server.maxmemory_clients == 0) {
|
||||
/* Remove client from memory usage bucket. */
|
||||
removeClientFromMemUsageBucket(c, 0);
|
||||
} else {
|
||||
/* Update each client(s) memory usage and add to appropriate bucket. */
|
||||
updateClientMemUsageAndBucket(c);
|
||||
}
|
||||
}
|
||||
|
||||
if (server.maxmemory_clients == 0)
|
||||
freeServerClientMemUsageBuckets();
|
||||
return 1;
|
||||
}
|
||||
|
||||
standardConfig static_configs[] = {
|
||||
/* Bool configs */
|
||||
createBoolConfig("rdbchecksum", NULL, IMMUTABLE_CONFIG, server.rdb_checksum, 1, NULL, NULL),
|
||||
@ -3042,9 +3079,9 @@ standardConfig static_configs[] = {
|
||||
createIntConfig("replica-announce-port", "slave-announce-port", MODIFIABLE_CONFIG, 0, 65535, server.slave_announce_port, 0, INTEGER_CONFIG, NULL, NULL),
|
||||
createIntConfig("tcp-backlog", NULL, IMMUTABLE_CONFIG, 0, INT_MAX, server.tcp_backlog, 511, INTEGER_CONFIG, NULL, NULL), /* TCP listen backlog. */
|
||||
createIntConfig("cluster-port", NULL, IMMUTABLE_CONFIG, 0, 65535, server.cluster_port, 0, INTEGER_CONFIG, NULL, NULL),
|
||||
createIntConfig("cluster-announce-bus-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_bus_port, 0, INTEGER_CONFIG, NULL, NULL), /* Default: Use +10000 offset. */
|
||||
createIntConfig("cluster-announce-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_port, 0, INTEGER_CONFIG, NULL, NULL), /* Use server.port */
|
||||
createIntConfig("cluster-announce-tls-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_tls_port, 0, INTEGER_CONFIG, NULL, NULL), /* Use server.tls_port */
|
||||
createIntConfig("cluster-announce-bus-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_bus_port, 0, INTEGER_CONFIG, NULL, updateClusterAnnouncedPort), /* Default: Use +10000 offset. */
|
||||
createIntConfig("cluster-announce-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_port, 0, INTEGER_CONFIG, NULL, updateClusterAnnouncedPort), /* Use server.port */
|
||||
createIntConfig("cluster-announce-tls-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_tls_port, 0, INTEGER_CONFIG, NULL, updateClusterAnnouncedPort), /* Use server.tls_port */
|
||||
createIntConfig("repl-timeout", NULL, MODIFIABLE_CONFIG, 1, INT_MAX, server.repl_timeout, 60, INTEGER_CONFIG, NULL, NULL),
|
||||
createIntConfig("repl-ping-replica-period", "repl-ping-slave-period", MODIFIABLE_CONFIG, 1, INT_MAX, server.repl_ping_slave_period, 10, INTEGER_CONFIG, NULL, NULL),
|
||||
createIntConfig("list-compress-depth", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, 0, INT_MAX, server.list_compress_depth, 0, INTEGER_CONFIG, NULL, NULL),
|
||||
@ -3092,7 +3129,7 @@ standardConfig static_configs[] = {
|
||||
createSizeTConfig("hll-sparse-max-bytes", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.hll_sparse_max_bytes, 3000, MEMORY_CONFIG, NULL, NULL),
|
||||
createSizeTConfig("tracking-table-max-keys", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.tracking_table_max_keys, 1000000, INTEGER_CONFIG, NULL, NULL), /* Default: 1 million keys max. */
|
||||
createSizeTConfig("client-query-buffer-limit", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, 1024*1024, LONG_MAX, server.client_max_querybuf_len, 1024*1024*1024, MEMORY_CONFIG, NULL, NULL), /* Default: 1GB max query buffer. */
|
||||
createSSizeTConfig("maxmemory-clients", NULL, MODIFIABLE_CONFIG, -100, SSIZE_MAX, server.maxmemory_clients, 0, MEMORY_CONFIG | PERCENT_CONFIG, NULL, NULL),
|
||||
createSSizeTConfig("maxmemory-clients", NULL, MODIFIABLE_CONFIG, -100, SSIZE_MAX, server.maxmemory_clients, 0, MEMORY_CONFIG | PERCENT_CONFIG, NULL, applyClientMaxMemoryUsage),
|
||||
|
||||
/* Other configs */
|
||||
createTimeTConfig("repl-backlog-ttl", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.repl_backlog_time_limit, 60*60, INTEGER_CONFIG, NULL, NULL), /* Default: 1 hour */
|
||||
|
10
src/config.h
10
src/config.h
@ -40,8 +40,12 @@
|
||||
#include <fcntl.h>
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__) && defined(__MAC_OS_X_VERSION_MAX_ALLOWED) && __MAC_OS_X_VERSION_MAX_ALLOWED >= 1060
|
||||
#define MAC_OS_10_6_DETECTED
|
||||
#endif
|
||||
|
||||
/* Define redis_fstat to fstat or fstat64() */
|
||||
#if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_6)
|
||||
#if defined(__APPLE__) && !defined(MAC_OS_10_6_DETECTED)
|
||||
#define redis_fstat fstat64
|
||||
#define redis_stat stat64
|
||||
#else
|
||||
@ -96,7 +100,7 @@
|
||||
#define HAVE_ACCEPT4 1
|
||||
#endif
|
||||
|
||||
#if (defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6)) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined (__NetBSD__)
|
||||
#if (defined(__APPLE__) && defined(MAC_OS_10_6_DETECTED)) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined (__NetBSD__)
|
||||
#define HAVE_KQUEUE 1
|
||||
#endif
|
||||
|
||||
@ -293,7 +297,7 @@ void setproctitle(const char *fmt, ...);
|
||||
#include <kernel/OS.h>
|
||||
#define redis_set_thread_title(name) rename_thread(find_thread(0), name)
|
||||
#else
|
||||
#if (defined __APPLE__ && defined(MAC_OS_X_VERSION_10_7))
|
||||
#if (defined __APPLE__ && defined(__MAC_OS_X_VERSION_MAX_ALLOWED) && __MAC_OS_X_VERSION_MAX_ALLOWED >= 1070)
|
||||
int pthread_setname_np(const char *name);
|
||||
#include <pthread.h>
|
||||
#define redis_set_thread_title(name) pthread_setname_np(name)
|
||||
|
@ -389,7 +389,11 @@ int connGetSocketError(connection *conn) {
|
||||
}
|
||||
|
||||
int connPeerToString(connection *conn, char *ip, size_t ip_len, int *port) {
|
||||
return anetFdToString(conn ? conn->fd : -1, ip, ip_len, port, FD_TO_PEER_NAME);
|
||||
if (anetFdToString(conn ? conn->fd : -1, ip, ip_len, port, FD_TO_PEER_NAME) == -1) {
|
||||
if (conn) conn->last_errno = errno;
|
||||
return C_ERR;
|
||||
}
|
||||
return C_OK;
|
||||
}
|
||||
|
||||
int connSockName(connection *conn, char *ip, size_t ip_len, int *port) {
|
||||
|
116
src/db.c
116
src/db.c
@ -41,7 +41,11 @@
|
||||
* C-level DB API
|
||||
*----------------------------------------------------------------------------*/
|
||||
|
||||
int expireIfNeeded(redisDb *db, robj *key, int force_delete_expired);
|
||||
/* Flags for expireIfNeeded */
|
||||
#define EXPIRE_FORCE_DELETE_EXPIRED 1
|
||||
#define EXPIRE_AVOID_DELETE_EXPIRED 2
|
||||
|
||||
int expireIfNeeded(redisDb *db, robj *key, int flags);
|
||||
int keyIsExpired(redisDb *db, robj *key);
|
||||
|
||||
/* Update LFU when an object is accessed.
|
||||
@ -72,6 +76,8 @@ void updateLFU(robj *val) {
|
||||
* LOOKUP_NOSTATS: Don't increment key hits/misses counters.
|
||||
* LOOKUP_WRITE: Prepare the key for writing (delete expired keys even on
|
||||
* replicas, use separate keyspace stats and events (TODO)).
|
||||
* LOOKUP_NOEXPIRE: Perform expiration check, but avoid deleting the key,
|
||||
* so that we don't have to propagate the deletion.
|
||||
*
|
||||
* Note: this function also returns NULL if the key is logically expired but
|
||||
* still existing, in case this is a replica and the LOOKUP_WRITE is not set.
|
||||
@ -92,8 +98,12 @@ robj *lookupKey(redisDb *db, robj *key, int flags) {
|
||||
* command, since the command may trigger events that cause modules to
|
||||
* perform additional writes. */
|
||||
int is_ro_replica = server.masterhost && server.repl_slave_ro;
|
||||
int force_delete_expired = flags & LOOKUP_WRITE && !is_ro_replica;
|
||||
if (expireIfNeeded(db, key, force_delete_expired)) {
|
||||
int expire_flags = 0;
|
||||
if (flags & LOOKUP_WRITE && !is_ro_replica)
|
||||
expire_flags |= EXPIRE_FORCE_DELETE_EXPIRED;
|
||||
if (flags & LOOKUP_NOEXPIRE)
|
||||
expire_flags |= EXPIRE_AVOID_DELETE_EXPIRED;
|
||||
if (expireIfNeeded(db, key, expire_flags)) {
|
||||
/* The key is no longer valid. */
|
||||
val = NULL;
|
||||
}
|
||||
@ -747,6 +757,8 @@ void keysCommand(client *c) {
|
||||
}
|
||||
decrRefCount(keyobj);
|
||||
}
|
||||
if (c->flags & CLIENT_CLOSE_ASAP)
|
||||
break;
|
||||
}
|
||||
dictReleaseIterator(di);
|
||||
setDeferredArrayLen(c,replylen,numkeys);
|
||||
@ -1578,8 +1590,8 @@ void deleteExpiredKeyAndPropagate(redisDb *db, robj *keyobj) {
|
||||
* because call() handles server.also_propagate(); or
|
||||
* 2. Outside of call(): Example: Active-expire, eviction.
|
||||
* In this the caller must remember to call
|
||||
* propagatePendingCommands, preferably at the end of
|
||||
* the deletion batch, so that DELs will be wrapped
|
||||
* postExecutionUnitOperations, preferably just after a
|
||||
* single deletion batch, so that DELs will NOT be wrapped
|
||||
* in MULTI/EXEC */
|
||||
void propagateDeletion(redisDb *db, robj *key, int lazy) {
|
||||
robj *argv[2];
|
||||
@ -1657,13 +1669,17 @@ int keyIsExpired(redisDb *db, robj *key) {
|
||||
*
|
||||
* On replicas, this function does not delete expired keys by default, but
|
||||
* it still returns 1 if the key is logically expired. To force deletion
|
||||
* of logically expired keys even on replicas, set force_delete_expired to
|
||||
* a non-zero value. Note though that if the current client is executing
|
||||
* of logically expired keys even on replicas, use the EXPIRE_FORCE_DELETE_EXPIRED
|
||||
* flag. Note though that if the current client is executing
|
||||
* replicated commands from the master, keys are never considered expired.
|
||||
*
|
||||
* On the other hand, if you just want expiration check, but need to avoid
|
||||
* the actual key deletion and propagation of the deletion, use the
|
||||
* EXPIRE_AVOID_DELETE_EXPIRED flag.
|
||||
*
|
||||
* The return value of the function is 0 if the key is still valid,
|
||||
* otherwise the function returns 1 if the key is expired. */
|
||||
int expireIfNeeded(redisDb *db, robj *key, int force_delete_expired) {
|
||||
int expireIfNeeded(redisDb *db, robj *key, int flags) {
|
||||
if (!keyIsExpired(db,key)) return 0;
|
||||
|
||||
/* If we are running in the context of a replica, instead of
|
||||
@ -1681,9 +1697,14 @@ int expireIfNeeded(redisDb *db, robj *key, int force_delete_expired) {
|
||||
* expired. */
|
||||
if (server.masterhost != NULL) {
|
||||
if (server.current_client == server.master) return 0;
|
||||
if (!force_delete_expired) return 1;
|
||||
if (!(flags & EXPIRE_FORCE_DELETE_EXPIRED)) return 1;
|
||||
}
|
||||
|
||||
/* In some cases we're explicitly instructed to return an indication of a
|
||||
* missing key without actually deleting it, even on masters. */
|
||||
if (flags & EXPIRE_AVOID_DELETE_EXPIRED)
|
||||
return 1;
|
||||
|
||||
/* If clients are paused, we keep the current dataset constant,
|
||||
* but return to the client what we believe is the right state. Typically,
|
||||
* at the end of the pause we will properly expire the key OR we will
|
||||
@ -1749,8 +1770,9 @@ int64_t getAllKeySpecsFlags(struct redisCommand *cmd, int inv) {
|
||||
* found in other valid keyspecs.
|
||||
*/
|
||||
int getKeysUsingKeySpecs(struct redisCommand *cmd, robj **argv, int argc, int search_flags, getKeysResult *result) {
|
||||
int j, i, k = 0, last, first, step;
|
||||
int j, i, last, first, step;
|
||||
keyReference *keys;
|
||||
result->numkeys = 0;
|
||||
|
||||
for (j = 0; j < cmd->key_specs_num; j++) {
|
||||
keySpec *spec = cmd->key_specs + j;
|
||||
@ -1815,7 +1837,7 @@ int getKeysUsingKeySpecs(struct redisCommand *cmd, robj **argv, int argc, int se
|
||||
}
|
||||
|
||||
int count = ((last - first)+1);
|
||||
keys = getKeysPrepareResult(result, count);
|
||||
keys = getKeysPrepareResult(result, result->numkeys + count);
|
||||
|
||||
/* First or last is out of bounds, which indicates a syntax error */
|
||||
if (last >= argc || last < first || first >= argc) {
|
||||
@ -1836,8 +1858,9 @@ int getKeysUsingKeySpecs(struct redisCommand *cmd, robj **argv, int argc, int se
|
||||
serverPanic("Redis built-in command declared keys positions not matching the arity requirements.");
|
||||
}
|
||||
}
|
||||
keys[k].pos = i;
|
||||
keys[k++].flags = spec->flags;
|
||||
keys[result->numkeys].pos = i;
|
||||
keys[result->numkeys].flags = spec->flags;
|
||||
result->numkeys++;
|
||||
}
|
||||
|
||||
/* Handle incomplete specs (only after we added the current spec
|
||||
@ -1858,8 +1881,7 @@ invalid_spec:
|
||||
}
|
||||
}
|
||||
|
||||
result->numkeys = k;
|
||||
return k;
|
||||
return result->numkeys;
|
||||
}
|
||||
|
||||
/* Return all the arguments that are keys in the command passed via argc / argv.
|
||||
@ -1880,14 +1902,6 @@ int getKeysFromCommandWithSpecs(struct redisCommand *cmd, robj **argv, int argc,
|
||||
/* The command has at least one key-spec marked as VARIABLE_FLAGS */
|
||||
int has_varflags = (getAllKeySpecsFlags(cmd, 0) & CMD_KEY_VARIABLE_FLAGS);
|
||||
|
||||
/* Flags indicating that we have a getkeys callback */
|
||||
int has_module_getkeys = cmd->flags & CMD_MODULE_GETKEYS;
|
||||
|
||||
/* The key-spec that's auto generated by RM_CreateCommand sets VARIABLE_FLAGS since no flags are given.
|
||||
* If the module provides getkeys callback, we'll prefer it, but if it didn't, we'll use key-spec anyway. */
|
||||
if ((cmd->flags & CMD_MODULE) && has_varflags && !has_module_getkeys)
|
||||
has_varflags = 0;
|
||||
|
||||
/* We prefer key-specs if there are any, and their flags are reliable. */
|
||||
if (has_keyspec && !has_varflags) {
|
||||
int ret = getKeysUsingKeySpecs(cmd,argv,argc,search_flags,result);
|
||||
@ -1898,7 +1912,7 @@ int getKeysFromCommandWithSpecs(struct redisCommand *cmd, robj **argv, int argc,
|
||||
}
|
||||
|
||||
/* Resort to getkeys callback methods. */
|
||||
if (has_module_getkeys)
|
||||
if (cmd->flags & CMD_MODULE_GETKEYS)
|
||||
return moduleGetCommandKeysViaAPI(cmd,argv,argc,result);
|
||||
|
||||
/* We use native getkeys as a last resort, since not all these native getkeys provide
|
||||
@ -2179,7 +2193,8 @@ int sortROGetKeys(struct redisCommand *cmd, robj **argv, int argc, getKeysResult
|
||||
keys = getKeysPrepareResult(result, 1);
|
||||
keys[0].pos = 1; /* <sort-key> is always present. */
|
||||
keys[0].flags = CMD_KEY_RO | CMD_KEY_ACCESS;
|
||||
return 1;
|
||||
result->numkeys = 1;
|
||||
return result->numkeys;
|
||||
}
|
||||
|
||||
/* Helper function to extract keys from the SORT command.
|
||||
@ -2237,7 +2252,7 @@ int sortGetKeys(struct redisCommand *cmd, robj **argv, int argc, getKeysResult *
|
||||
|
||||
/* This command declares incomplete keys, so the flags are correctly set for this function */
|
||||
int migrateGetKeys(struct redisCommand *cmd, robj **argv, int argc, getKeysResult *result) {
|
||||
int i, num, first;
|
||||
int i, j, num, first;
|
||||
keyReference *keys;
|
||||
UNUSED(cmd);
|
||||
|
||||
@ -2246,15 +2261,35 @@ int migrateGetKeys(struct redisCommand *cmd, robj **argv, int argc, getKeysResul
|
||||
num = 1;
|
||||
|
||||
/* But check for the extended one with the KEYS option. */
|
||||
struct {
|
||||
char* name;
|
||||
int skip;
|
||||
} skip_keywords[] = {
|
||||
{"copy", 0},
|
||||
{"replace", 0},
|
||||
{"auth", 1},
|
||||
{"auth2", 2},
|
||||
{NULL, 0}
|
||||
};
|
||||
if (argc > 6) {
|
||||
for (i = 6; i < argc; i++) {
|
||||
if (!strcasecmp(argv[i]->ptr,"keys") &&
|
||||
sdslen(argv[3]->ptr) == 0)
|
||||
{
|
||||
first = i+1;
|
||||
num = argc-first;
|
||||
if (!strcasecmp(argv[i]->ptr, "keys")) {
|
||||
if (sdslen(argv[3]->ptr) > 0) {
|
||||
/* This is a syntax error. So ignore the keys and leave
|
||||
* the syntax error to be handled by migrateCommand. */
|
||||
num = 0;
|
||||
} else {
|
||||
first = i + 1;
|
||||
num = argc - first;
|
||||
}
|
||||
break;
|
||||
}
|
||||
for (j = 0; skip_keywords[j].name != NULL; j++) {
|
||||
if (!strcasecmp(argv[i]->ptr, skip_keywords[j].name)) {
|
||||
i += skip_keywords[j].skip;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2389,6 +2424,7 @@ int setGetKeys(struct redisCommand *cmd, robj **argv, int argc, getKeysResult *r
|
||||
* read-only if the BITFIELD GET subcommand is used. */
|
||||
int bitfieldGetKeys(struct redisCommand *cmd, robj **argv, int argc, getKeysResult *result) {
|
||||
keyReference *keys;
|
||||
int readonly = 1;
|
||||
UNUSED(cmd);
|
||||
|
||||
keys = getKeysPrepareResult(result, 1);
|
||||
@ -2399,11 +2435,23 @@ int bitfieldGetKeys(struct redisCommand *cmd, robj **argv, int argc, getKeysResu
|
||||
int remargs = argc - i - 1; /* Remaining args other than current. */
|
||||
char *arg = argv[i]->ptr;
|
||||
if (!strcasecmp(arg, "get") && remargs >= 2) {
|
||||
keys[0].flags = CMD_KEY_RO | CMD_KEY_ACCESS;
|
||||
return 1;
|
||||
i += 2;
|
||||
} else if ((!strcasecmp(arg, "set") || !strcasecmp(arg, "incrby")) && remargs >= 3) {
|
||||
readonly = 0;
|
||||
i += 3;
|
||||
break;
|
||||
} else if (!strcasecmp(arg, "overflow") && remargs >= 1) {
|
||||
i += 1;
|
||||
} else {
|
||||
readonly = 0; /* Syntax error. safer to assume non-RO. */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
keys[0].flags = CMD_KEY_RW | CMD_KEY_ACCESS | CMD_KEY_UPDATE;
|
||||
if (readonly) {
|
||||
keys[0].flags = CMD_KEY_RO | CMD_KEY_ACCESS;
|
||||
} else {
|
||||
keys[0].flags = CMD_KEY_RW | CMD_KEY_ACCESS | CMD_KEY_UPDATE;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
129
src/debug.c
129
src/debug.c
@ -34,6 +34,7 @@
|
||||
#include "crc64.h"
|
||||
#include "bio.h"
|
||||
#include "quicklist.h"
|
||||
#include "cluster.h"
|
||||
|
||||
#include <arpa/inet.h>
|
||||
#include <signal.h>
|
||||
@ -466,7 +467,7 @@ void debugCommand(client *c) {
|
||||
" default.",
|
||||
"QUICKLIST-PACKED-THRESHOLD <size>",
|
||||
" Sets the threshold for elements to be inserted as plain vs packed nodes",
|
||||
" Default value is 1GB, allows values up to 4GB",
|
||||
" Default value is 1GB, allows values up to 4GB. Setting to 0 restores to default.",
|
||||
"SET-SKIP-CHECKSUM-VALIDATION <0|1>",
|
||||
" Enables or disables checksum checks for RDB files and RESTORE's payload.",
|
||||
"SLEEP <seconds>",
|
||||
@ -489,7 +490,9 @@ void debugCommand(client *c) {
|
||||
" In case NEVER is provided the last observed peak will never be reset",
|
||||
" In case RESET is provided the peak reset time will be restored to the default value",
|
||||
"REPLYBUFFER RESIZING <0|1>",
|
||||
" Enable or disable the replay buffer resize cron job",
|
||||
" Enable or disable the reply buffer resize cron job",
|
||||
"CLUSTERLINK KILL <to|from|all> <node-id>",
|
||||
" Kills the link based on the direction to/from (both) with the provided node." ,
|
||||
NULL
|
||||
};
|
||||
addReplyHelp(c, help);
|
||||
@ -516,7 +519,7 @@ NULL
|
||||
restartServer(flags,delay);
|
||||
addReplyError(c,"failed to restart the server. Check server logs.");
|
||||
} else if (!strcasecmp(c->argv[1]->ptr,"oom")) {
|
||||
void *ptr = zmalloc(ULONG_MAX); /* Should trigger an out of memory. */
|
||||
void *ptr = zmalloc(SIZE_MAX/2); /* Should trigger an out of memory. */
|
||||
zfree(ptr);
|
||||
addReply(c,shared.ok);
|
||||
} else if (!strcasecmp(c->argv[1]->ptr,"assert")) {
|
||||
@ -801,9 +804,12 @@ NULL
|
||||
addReplyError(c,"RESP2 is not supported by this command");
|
||||
return;
|
||||
}
|
||||
uint64_t old_flags = c->flags;
|
||||
c->flags |= CLIENT_PUSHING;
|
||||
addReplyPushLen(c,2);
|
||||
addReplyBulkCString(c,"server-cpu-usage");
|
||||
addReplyLongLong(c,42);
|
||||
if (!(old_flags & CLIENT_PUSHING)) c->flags &= ~CLIENT_PUSHING;
|
||||
/* Push replies are not synchronous replies, so we emit also a
|
||||
* normal reply in order for blocking clients just discarding the
|
||||
* push reply, to actually consume the reply and continue. */
|
||||
@ -946,6 +952,10 @@ NULL
|
||||
else
|
||||
addReply(c, shared.ok);
|
||||
} else if(!strcasecmp(c->argv[1]->ptr,"client-eviction") && c->argc == 2) {
|
||||
if (!server.client_mem_usage_buckets) {
|
||||
addReplyError(c,"maxmemory-clients is disabled.");
|
||||
return;
|
||||
}
|
||||
sds bucket_info = sdsempty();
|
||||
for (int j = 0; j < CLIENT_MEM_USAGE_BUCKETS; j++) {
|
||||
if (j == 0)
|
||||
@ -991,6 +1001,33 @@ NULL
|
||||
return;
|
||||
}
|
||||
addReply(c, shared.ok);
|
||||
} else if(!strcasecmp(c->argv[1]->ptr,"CLUSTERLINK") &&
|
||||
!strcasecmp(c->argv[2]->ptr,"KILL") &&
|
||||
c->argc == 5) {
|
||||
if (!server.cluster_enabled) {
|
||||
addReplyError(c, "Debug option only available for cluster mode enabled setup!");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Find the node. */
|
||||
clusterNode *n = clusterLookupNode(c->argv[4]->ptr, sdslen(c->argv[4]->ptr));
|
||||
if (!n) {
|
||||
addReplyErrorFormat(c,"Unknown node %s", (char*)c->argv[4]->ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Terminate the link based on the direction or all. */
|
||||
if (!strcasecmp(c->argv[3]->ptr,"from")) {
|
||||
freeClusterLink(n->inbound_link);
|
||||
} else if (!strcasecmp(c->argv[3]->ptr,"to")) {
|
||||
freeClusterLink(n->link);
|
||||
} else if (!strcasecmp(c->argv[3]->ptr,"all")) {
|
||||
freeClusterLink(n->link);
|
||||
freeClusterLink(n->inbound_link);
|
||||
} else {
|
||||
addReplyErrorFormat(c, "Unknown direction %s", (char*) c->argv[3]->ptr);
|
||||
}
|
||||
addReply(c,shared.ok);
|
||||
} else {
|
||||
addReplySubcommandSyntaxError(c);
|
||||
return;
|
||||
@ -1123,73 +1160,88 @@ void bugReportStart(void) {
|
||||
}
|
||||
|
||||
#ifdef HAVE_BACKTRACE
|
||||
static void *getMcontextEip(ucontext_t *uc) {
|
||||
|
||||
/* Returns the current eip and set it to the given new value (if its not NULL) */
|
||||
static void* getAndSetMcontextEip(ucontext_t *uc, void *eip) {
|
||||
#define NOT_SUPPORTED() do {\
|
||||
UNUSED(uc);\
|
||||
UNUSED(eip);\
|
||||
return NULL;\
|
||||
} while(0)
|
||||
#if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_6)
|
||||
#define GET_SET_RETURN(target_var, new_val) do {\
|
||||
void *old_val = (void*)target_var; \
|
||||
if (new_val) { \
|
||||
void **temp = (void**)&target_var; \
|
||||
*temp = new_val; \
|
||||
} \
|
||||
return old_val; \
|
||||
} while(0)
|
||||
#if defined(__APPLE__) && !defined(MAC_OS_10_6_DETECTED)
|
||||
/* OSX < 10.6 */
|
||||
#if defined(__x86_64__)
|
||||
return (void*) uc->uc_mcontext->__ss.__rip;
|
||||
GET_SET_RETURN(uc->uc_mcontext->__ss.__rip, eip);
|
||||
#elif defined(__i386__)
|
||||
return (void*) uc->uc_mcontext->__ss.__eip;
|
||||
GET_SET_RETURN(uc->uc_mcontext->__ss.__eip, eip);
|
||||
#else
|
||||
return (void*) uc->uc_mcontext->__ss.__srr0;
|
||||
GET_SET_RETURN(uc->uc_mcontext->__ss.__srr0, eip);
|
||||
#endif
|
||||
#elif defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6)
|
||||
#elif defined(__APPLE__) && defined(MAC_OS_10_6_DETECTED)
|
||||
/* OSX >= 10.6 */
|
||||
#if defined(_STRUCT_X86_THREAD_STATE64) && !defined(__i386__)
|
||||
return (void*) uc->uc_mcontext->__ss.__rip;
|
||||
GET_SET_RETURN(uc->uc_mcontext->__ss.__rip, eip);
|
||||
#elif defined(__i386__)
|
||||
return (void*) uc->uc_mcontext->__ss.__eip;
|
||||
GET_SET_RETURN(uc->uc_mcontext->__ss.__eip, eip);
|
||||
#else
|
||||
/* OSX ARM64 */
|
||||
return (void*) arm_thread_state64_get_pc(uc->uc_mcontext->__ss);
|
||||
void *old_val = (void*)arm_thread_state64_get_pc(uc->uc_mcontext->__ss);
|
||||
if (eip) {
|
||||
arm_thread_state64_set_pc_fptr(uc->uc_mcontext->__ss, eip);
|
||||
}
|
||||
return old_val;
|
||||
#endif
|
||||
#elif defined(__linux__)
|
||||
/* Linux */
|
||||
#if defined(__i386__) || ((defined(__X86_64__) || defined(__x86_64__)) && defined(__ILP32__))
|
||||
return (void*) uc->uc_mcontext.gregs[14]; /* Linux 32 */
|
||||
GET_SET_RETURN(uc->uc_mcontext.gregs[14], eip);
|
||||
#elif defined(__X86_64__) || defined(__x86_64__)
|
||||
return (void*) uc->uc_mcontext.gregs[16]; /* Linux 64 */
|
||||
GET_SET_RETURN(uc->uc_mcontext.gregs[16], eip);
|
||||
#elif defined(__ia64__) /* Linux IA64 */
|
||||
return (void*) uc->uc_mcontext.sc_ip;
|
||||
GET_SET_RETURN(uc->uc_mcontext.sc_ip, eip);
|
||||
#elif defined(__arm__) /* Linux ARM */
|
||||
return (void*) uc->uc_mcontext.arm_pc;
|
||||
GET_SET_RETURN(uc->uc_mcontext.arm_pc, eip);
|
||||
#elif defined(__aarch64__) /* Linux AArch64 */
|
||||
return (void*) uc->uc_mcontext.pc;
|
||||
GET_SET_RETURN(uc->uc_mcontext.pc, eip);
|
||||
#else
|
||||
NOT_SUPPORTED();
|
||||
#endif
|
||||
#elif defined(__FreeBSD__)
|
||||
/* FreeBSD */
|
||||
#if defined(__i386__)
|
||||
return (void*) uc->uc_mcontext.mc_eip;
|
||||
GET_SET_RETURN(uc->uc_mcontext.mc_eip, eip);
|
||||
#elif defined(__x86_64__)
|
||||
return (void*) uc->uc_mcontext.mc_rip;
|
||||
GET_SET_RETURN(uc->uc_mcontext.mc_rip, eip);
|
||||
#else
|
||||
NOT_SUPPORTED();
|
||||
#endif
|
||||
#elif defined(__OpenBSD__)
|
||||
/* OpenBSD */
|
||||
#if defined(__i386__)
|
||||
return (void*) uc->sc_eip;
|
||||
GET_SET_RETURN(uc->sc_eip, eip);
|
||||
#elif defined(__x86_64__)
|
||||
return (void*) uc->sc_rip;
|
||||
GET_SET_RETURN(uc->sc_rip, eip);
|
||||
#else
|
||||
NOT_SUPPORTED();
|
||||
#endif
|
||||
#elif defined(__NetBSD__)
|
||||
#if defined(__i386__)
|
||||
return (void*) uc->uc_mcontext.__gregs[_REG_EIP];
|
||||
GET_SET_RETURN(uc->uc_mcontext.__gregs[_REG_EIP], eip);
|
||||
#elif defined(__x86_64__)
|
||||
return (void*) uc->uc_mcontext.__gregs[_REG_RIP];
|
||||
GET_SET_RETURN(uc->uc_mcontext.__gregs[_REG_RIP], eip);
|
||||
#else
|
||||
NOT_SUPPORTED();
|
||||
#endif
|
||||
#elif defined(__DragonFly__)
|
||||
return (void*) uc->uc_mcontext.mc_rip;
|
||||
GET_SET_RETURN(uc->uc_mcontext.mc_rip, eip);
|
||||
#else
|
||||
NOT_SUPPORTED();
|
||||
#endif
|
||||
@ -1220,7 +1272,7 @@ void logRegisters(ucontext_t *uc) {
|
||||
} while(0)
|
||||
|
||||
/* OSX */
|
||||
#if defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6)
|
||||
#if defined(__APPLE__) && defined(MAC_OS_10_6_DETECTED)
|
||||
/* OSX AMD64 */
|
||||
#if defined(_STRUCT_X86_THREAD_STATE64) && !defined(__i386__)
|
||||
serverLog(LL_WARNING,
|
||||
@ -1951,6 +2003,10 @@ void dumpCodeAroundEIP(void *eip) {
|
||||
}
|
||||
}
|
||||
|
||||
void invalidFunctionWasCalled() {}
|
||||
|
||||
typedef void (*invalidFunctionWasCalledType)();
|
||||
|
||||
void sigsegvHandler(int sig, siginfo_t *info, void *secret) {
|
||||
UNUSED(secret);
|
||||
UNUSED(info);
|
||||
@ -1968,13 +2024,30 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) {
|
||||
|
||||
#ifdef HAVE_BACKTRACE
|
||||
ucontext_t *uc = (ucontext_t*) secret;
|
||||
void *eip = getMcontextEip(uc);
|
||||
void *eip = getAndSetMcontextEip(uc, NULL);
|
||||
if (eip != NULL) {
|
||||
serverLog(LL_WARNING,
|
||||
"Crashed running the instruction at: %p", eip);
|
||||
}
|
||||
|
||||
logStackTrace(getMcontextEip(uc), 1);
|
||||
if (eip == info->si_addr) {
|
||||
/* When eip matches the bad address, it's an indication that we crashed when calling a non-mapped
|
||||
* function pointer. In that case the call to backtrace will crash trying to access that address and we
|
||||
* won't get a crash report logged. Set it to a valid point to avoid that crash. */
|
||||
|
||||
/* This trick allow to avoid compiler warning */
|
||||
void *ptr;
|
||||
invalidFunctionWasCalledType *ptr_ptr = (invalidFunctionWasCalledType*)&ptr;
|
||||
*ptr_ptr = invalidFunctionWasCalled;
|
||||
getAndSetMcontextEip(uc, ptr);
|
||||
}
|
||||
|
||||
logStackTrace(eip, 1);
|
||||
|
||||
if (eip == info->si_addr) {
|
||||
/* Restore old eip */
|
||||
getAndSetMcontextEip(uc, eip);
|
||||
}
|
||||
|
||||
logRegisters(uc);
|
||||
#endif
|
||||
@ -2079,7 +2152,7 @@ void watchdogSignalHandler(int sig, siginfo_t *info, void *secret) {
|
||||
|
||||
serverLogFromHandler(LL_WARNING,"\n--- WATCHDOG TIMER EXPIRED ---");
|
||||
#ifdef HAVE_BACKTRACE
|
||||
logStackTrace(getMcontextEip(uc), 1);
|
||||
logStackTrace(getAndSetMcontextEip(uc, NULL), 1);
|
||||
#else
|
||||
serverLogFromHandler(LL_WARNING,"Sorry: no support for backtrace().");
|
||||
#endif
|
||||
|
74
src/dict.c
74
src/dict.c
@ -47,15 +47,15 @@
|
||||
#include "zmalloc.h"
|
||||
#include "redisassert.h"
|
||||
|
||||
/* Using dictEnableResize() / dictDisableResize() we make possible to
|
||||
* enable/disable resizing of the hash table as needed. This is very important
|
||||
/* Using dictEnableResize() / dictDisableResize() we make possible to disable
|
||||
* resizing and rehashing of the hash table as needed. This is very important
|
||||
* for Redis, as we use copy-on-write and don't want to move too much memory
|
||||
* around when there is a child performing saving operations.
|
||||
*
|
||||
* Note that even when dict_can_resize is set to 0, not all resizes are
|
||||
* prevented: a hash table is still allowed to grow if the ratio between
|
||||
* the number of elements and the buckets > dict_force_resize_ratio. */
|
||||
static int dict_can_resize = 1;
|
||||
static dictResizeEnable dict_can_resize = DICT_RESIZE_ENABLE;
|
||||
static unsigned int dict_force_resize_ratio = 5;
|
||||
|
||||
/* -------------------------- private prototypes ---------------------------- */
|
||||
@ -127,7 +127,7 @@ int dictResize(dict *d)
|
||||
{
|
||||
unsigned long minimal;
|
||||
|
||||
if (!dict_can_resize || dictIsRehashing(d)) return DICT_ERR;
|
||||
if (dict_can_resize != DICT_RESIZE_ENABLE || dictIsRehashing(d)) return DICT_ERR;
|
||||
minimal = d->ht_used[0];
|
||||
if (minimal < DICT_HT_INITIAL_SIZE)
|
||||
minimal = DICT_HT_INITIAL_SIZE;
|
||||
@ -210,7 +210,15 @@ int dictTryExpand(dict *d, unsigned long size) {
|
||||
* work it does would be unbound and the function may block for a long time. */
|
||||
int dictRehash(dict *d, int n) {
|
||||
int empty_visits = n*10; /* Max number of empty buckets to visit. */
|
||||
if (!dictIsRehashing(d)) return 0;
|
||||
unsigned long s0 = DICTHT_SIZE(d->ht_size_exp[0]);
|
||||
unsigned long s1 = DICTHT_SIZE(d->ht_size_exp[1]);
|
||||
if (dict_can_resize == DICT_RESIZE_FORBID || !dictIsRehashing(d)) return 0;
|
||||
if (dict_can_resize == DICT_RESIZE_AVOID &&
|
||||
((s1 > s0 && s1 / s0 < dict_force_resize_ratio) ||
|
||||
(s1 < s0 && s0 / s1 < dict_force_resize_ratio)))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
while(n-- && d->ht_used[0] != 0) {
|
||||
dictEntry *de, *nextde;
|
||||
@ -754,19 +762,30 @@ unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count) {
|
||||
} else {
|
||||
emptylen = 0;
|
||||
while (he) {
|
||||
/* Collect all the elements of the buckets found non
|
||||
* empty while iterating. */
|
||||
*des = he;
|
||||
des++;
|
||||
/* Collect all the elements of the buckets found non empty while iterating.
|
||||
* To avoid the issue of being unable to sample the end of a long chain,
|
||||
* we utilize the Reservoir Sampling algorithm to optimize the sampling process.
|
||||
* This means that even when the maximum number of samples has been reached,
|
||||
* we continue sampling until we reach the end of the chain.
|
||||
* See https://en.wikipedia.org/wiki/Reservoir_sampling. */
|
||||
if (stored < count) {
|
||||
des[stored] = he;
|
||||
} else {
|
||||
unsigned long r = randomULong() % (stored + 1);
|
||||
if (r < count) des[r] = he;
|
||||
}
|
||||
|
||||
he = he->next;
|
||||
stored++;
|
||||
if (stored == count) return stored;
|
||||
}
|
||||
if (stored >= count) goto end;
|
||||
}
|
||||
}
|
||||
i = (i+1) & maxsizemask;
|
||||
}
|
||||
return stored;
|
||||
|
||||
end:
|
||||
return stored > count ? count : stored;
|
||||
}
|
||||
|
||||
/* This is like dictGetRandomKey() from the POV of the API, but will do more
|
||||
@ -1000,28 +1019,25 @@ static int _dictExpandIfNeeded(dict *d)
|
||||
* table (global setting) or we should avoid it but the ratio between
|
||||
* elements/buckets is over the "safe" threshold, we resize doubling
|
||||
* the number of buckets. */
|
||||
if (d->ht_used[0] >= DICTHT_SIZE(d->ht_size_exp[0]) &&
|
||||
(dict_can_resize ||
|
||||
d->ht_used[0]/ DICTHT_SIZE(d->ht_size_exp[0]) > dict_force_resize_ratio) &&
|
||||
dictTypeExpandAllowed(d))
|
||||
if ((dict_can_resize == DICT_RESIZE_ENABLE &&
|
||||
d->ht_used[0] >= DICTHT_SIZE(d->ht_size_exp[0])) ||
|
||||
(dict_can_resize != DICT_RESIZE_FORBID &&
|
||||
d->ht_used[0] / DICTHT_SIZE(d->ht_size_exp[0]) > dict_force_resize_ratio))
|
||||
{
|
||||
if (!dictTypeExpandAllowed(d))
|
||||
return DICT_OK;
|
||||
return dictExpand(d, d->ht_used[0] + 1);
|
||||
}
|
||||
return DICT_OK;
|
||||
}
|
||||
|
||||
/* TODO: clz optimization */
|
||||
/* Our hash table capability is a power of two */
|
||||
static signed char _dictNextExp(unsigned long size)
|
||||
{
|
||||
unsigned char e = DICT_HT_INITIAL_EXP;
|
||||
|
||||
if (size <= DICT_HT_INITIAL_SIZE) return DICT_HT_INITIAL_EXP;
|
||||
if (size >= LONG_MAX) return (8*sizeof(long)-1);
|
||||
while(1) {
|
||||
if (((unsigned long)1<<e) >= size)
|
||||
return e;
|
||||
e++;
|
||||
}
|
||||
|
||||
return 8*sizeof(long) - __builtin_clzl(size-1);
|
||||
}
|
||||
|
||||
/* Returns the index of a free slot that can be populated with
|
||||
@ -1063,12 +1079,8 @@ void dictEmpty(dict *d, void(callback)(dict*)) {
|
||||
d->pauserehash = 0;
|
||||
}
|
||||
|
||||
void dictEnableResize(void) {
|
||||
dict_can_resize = 1;
|
||||
}
|
||||
|
||||
void dictDisableResize(void) {
|
||||
dict_can_resize = 0;
|
||||
void dictSetResizeEnabled(dictResizeEnable enable) {
|
||||
dict_can_resize = enable;
|
||||
}
|
||||
|
||||
uint64_t dictGetHash(dict *d, const void *key) {
|
||||
@ -1111,7 +1123,9 @@ size_t _dictGetStatsHt(char *buf, size_t bufsize, dict *d, int htidx) {
|
||||
|
||||
if (d->ht_used[htidx] == 0) {
|
||||
return snprintf(buf,bufsize,
|
||||
"No stats available for empty dictionaries\n");
|
||||
"Hash table %d stats (%s):\n"
|
||||
"No stats available for empty dictionaries\n",
|
||||
htidx, (htidx == 0) ? "main hash table" : "rehashing target");
|
||||
}
|
||||
|
||||
/* Compute stats. */
|
||||
|
@ -169,6 +169,12 @@ typedef void (dictScanBucketFunction)(dict *d, dictEntry **bucketref);
|
||||
#define randomULong() random()
|
||||
#endif
|
||||
|
||||
typedef enum {
|
||||
DICT_RESIZE_ENABLE,
|
||||
DICT_RESIZE_AVOID,
|
||||
DICT_RESIZE_FORBID,
|
||||
} dictResizeEnable;
|
||||
|
||||
/* API */
|
||||
dict *dictCreate(dictType *type);
|
||||
int dictExpand(dict *d, unsigned long size);
|
||||
@ -195,8 +201,7 @@ void dictGetStats(char *buf, size_t bufsize, dict *d);
|
||||
uint64_t dictGenHashFunction(const void *key, size_t len);
|
||||
uint64_t dictGenCaseHashFunction(const unsigned char *buf, size_t len);
|
||||
void dictEmpty(dict *d, void(callback)(dict*));
|
||||
void dictEnableResize(void);
|
||||
void dictDisableResize(void);
|
||||
void dictSetResizeEnabled(dictResizeEnable enable);
|
||||
int dictRehash(dict *d, int n);
|
||||
int dictRehashMilliseconds(dict *d, int ms);
|
||||
void dictSetHashFunctionSeed(uint8_t *seed);
|
||||
|
19
src/eval.c
19
src/eval.c
@ -385,17 +385,17 @@ uint64_t evalGetCommandFlags(client *c, uint64_t cmd_flags) {
|
||||
int evalsha = c->cmd->proc == evalShaCommand || c->cmd->proc == evalShaRoCommand;
|
||||
if (evalsha && sdslen(c->argv[1]->ptr) != 40)
|
||||
return cmd_flags;
|
||||
uint64_t script_flags;
|
||||
evalCalcFunctionName(evalsha, c->argv[1]->ptr, funcname);
|
||||
char *lua_cur_script = funcname + 2;
|
||||
dictEntry *de = dictFind(lctx.lua_scripts, lua_cur_script);
|
||||
uint64_t script_flags;
|
||||
if (!de) {
|
||||
c->cur_script = dictFind(lctx.lua_scripts, lua_cur_script);
|
||||
if (!c->cur_script) {
|
||||
if (evalsha)
|
||||
return cmd_flags;
|
||||
if (evalExtractShebangFlags(c->argv[1]->ptr, &script_flags, NULL, NULL) == C_ERR)
|
||||
return cmd_flags;
|
||||
} else {
|
||||
luaScript *l = dictGetVal(de);
|
||||
luaScript *l = dictGetVal(c->cur_script);
|
||||
script_flags = l->flags;
|
||||
}
|
||||
if (script_flags & SCRIPT_FLAG_EVAL_COMPAT_MODE)
|
||||
@ -502,7 +502,12 @@ void evalGenericCommand(client *c, int evalsha) {
|
||||
return;
|
||||
}
|
||||
|
||||
evalCalcFunctionName(evalsha, c->argv[1]->ptr, funcname);
|
||||
if (c->cur_script) {
|
||||
funcname[0] = 'f', funcname[1] = '_';
|
||||
memcpy(funcname+2, dictGetKey(c->cur_script), 40);
|
||||
funcname[42] = '\0';
|
||||
} else
|
||||
evalCalcFunctionName(evalsha, c->argv[1]->ptr, funcname);
|
||||
|
||||
/* Push the pcall error handler function on the stack. */
|
||||
lua_getglobal(lua, "__redis__err__handler");
|
||||
@ -531,7 +536,9 @@ void evalGenericCommand(client *c, int evalsha) {
|
||||
}
|
||||
|
||||
char *lua_cur_script = funcname + 2;
|
||||
dictEntry *de = dictFind(lctx.lua_scripts, lua_cur_script);
|
||||
dictEntry *de = c->cur_script;
|
||||
if (!de)
|
||||
de = dictFind(lctx.lua_scripts, lua_cur_script);
|
||||
luaScript *l = dictGetVal(de);
|
||||
int ro = c->cmd->proc == evalRoCommand || c->cmd->proc == evalShaRoCommand;
|
||||
|
||||
|
12
src/evict.c
12
src/evict.c
@ -590,7 +590,7 @@ int performEvictions(void) {
|
||||
{
|
||||
struct evictionPoolEntry *pool = EvictionPoolLRU;
|
||||
|
||||
while(bestkey == NULL) {
|
||||
while (bestkey == NULL) {
|
||||
unsigned long total_keys = 0, keys;
|
||||
|
||||
/* We don't want to make local-db choices when expiring keys,
|
||||
@ -732,12 +732,18 @@ cant_free:
|
||||
/* At this point, we have run out of evictable items. It's possible
|
||||
* that some items are being freed in the lazyfree thread. Perform a
|
||||
* short wait here if such jobs exist, but don't wait long. */
|
||||
if (bioPendingJobsOfType(BIO_LAZY_FREE)) {
|
||||
usleep(eviction_time_limit_us);
|
||||
mstime_t lazyfree_latency;
|
||||
latencyStartMonitor(lazyfree_latency);
|
||||
while (bioPendingJobsOfType(BIO_LAZY_FREE) &&
|
||||
elapsedUs(evictionTimer) < eviction_time_limit_us) {
|
||||
if (getMaxmemoryState(NULL,NULL,NULL,NULL) == C_OK) {
|
||||
result = EVICT_OK;
|
||||
break;
|
||||
}
|
||||
usleep(eviction_time_limit_us < 1000 ? eviction_time_limit_us : 1000);
|
||||
}
|
||||
latencyEndMonitor(lazyfree_latency);
|
||||
latencyAddSampleIfNeeded("eviction-lazyfree",lazyfree_latency);
|
||||
}
|
||||
|
||||
serverAssert(server.core_propagates); /* This function should not be re-entrant */
|
||||
|
21
src/expire.c
21
src/expire.c
@ -425,6 +425,10 @@ void expireSlaveKeys(void) {
|
||||
if ((cycles % 64) == 0 && mstime()-start > 1) break;
|
||||
if (dictSize(slaveKeysWithExpire) == 0) break;
|
||||
}
|
||||
|
||||
/* Propagate the DEL (writable replicas do not propagate anything to other replicas,
|
||||
* but they might propagate to AOF) and trigger module hooks. */
|
||||
propagatePendingCommands();
|
||||
}
|
||||
|
||||
/* Track keys that received an EXPIRE or similar command in the context
|
||||
@ -651,10 +655,19 @@ void expireGenericCommand(client *c, long long basetime, int unit) {
|
||||
} else {
|
||||
setExpire(c,c->db,key,when);
|
||||
addReply(c,shared.cone);
|
||||
/* Propagate as PEXPIREAT millisecond-timestamp */
|
||||
robj *when_obj = createStringObjectFromLongLong(when);
|
||||
rewriteClientCommandVector(c, 3, shared.pexpireat, key, when_obj);
|
||||
decrRefCount(when_obj);
|
||||
/* Propagate as PEXPIREAT millisecond-timestamp
|
||||
* Only rewrite the command arg if not already PEXPIREAT */
|
||||
if (c->cmd->proc != pexpireatCommand) {
|
||||
rewriteClientCommandArgument(c,0,shared.pexpireat);
|
||||
}
|
||||
|
||||
/* Avoid creating a string object when it's the same as argv[2] parameter */
|
||||
if (basetime != 0 || unit == UNIT_SECONDS) {
|
||||
robj *when_obj = createStringObjectFromLongLong(when);
|
||||
rewriteClientCommandArgument(c,2,when_obj);
|
||||
decrRefCount(when_obj);
|
||||
}
|
||||
|
||||
signalModifiedKey(c,c->db,key);
|
||||
notifyKeyspaceEvent(NOTIFY_GENERIC,"expire",key,c->db->id);
|
||||
server.dirty++;
|
||||
|
@ -51,7 +51,6 @@
|
||||
#define REGISTRY_LOAD_CTX_NAME "__LIBRARY_CTX__"
|
||||
#define LIBRARY_API_NAME "__LIBRARY_API__"
|
||||
#define GLOBALS_API_NAME "__GLOBALS_API__"
|
||||
#define LOAD_TIMEOUT_MS 500
|
||||
|
||||
/* Lua engine ctx */
|
||||
typedef struct luaEngineCtx {
|
||||
@ -67,6 +66,7 @@ typedef struct luaFunctionCtx {
|
||||
typedef struct loadCtx {
|
||||
functionLibInfo *li;
|
||||
monotime start_time;
|
||||
size_t timeout;
|
||||
} loadCtx;
|
||||
|
||||
typedef struct registerFunctionArgs {
|
||||
@ -84,7 +84,7 @@ static void luaEngineLoadHook(lua_State *lua, lua_Debug *ar) {
|
||||
UNUSED(ar);
|
||||
loadCtx *load_ctx = luaGetFromRegistry(lua, REGISTRY_LOAD_CTX_NAME);
|
||||
uint64_t duration = elapsedMs(load_ctx->start_time);
|
||||
if (duration > LOAD_TIMEOUT_MS) {
|
||||
if (load_ctx->timeout > 0 && duration > load_ctx->timeout) {
|
||||
lua_sethook(lua, luaEngineLoadHook, LUA_MASKLINE, 0);
|
||||
|
||||
luaPushError(lua,"FUNCTION LOAD timeout");
|
||||
@ -99,7 +99,7 @@ static void luaEngineLoadHook(lua_State *lua, lua_Debug *ar) {
|
||||
*
|
||||
* Return NULL on compilation error and set the error to the err variable
|
||||
*/
|
||||
static int luaEngineCreate(void *engine_ctx, functionLibInfo *li, sds blob, sds *err) {
|
||||
static int luaEngineCreate(void *engine_ctx, functionLibInfo *li, sds blob, size_t timeout, sds *err) {
|
||||
int ret = C_ERR;
|
||||
luaEngineCtx *lua_engine_ctx = engine_ctx;
|
||||
lua_State *lua = lua_engine_ctx->lua;
|
||||
@ -123,6 +123,7 @@ static int luaEngineCreate(void *engine_ctx, functionLibInfo *li, sds blob, sds
|
||||
loadCtx load_ctx = {
|
||||
.li = li,
|
||||
.start_time = getMonotonicUs(),
|
||||
.timeout = timeout,
|
||||
};
|
||||
luaSaveOnRegistry(lua, REGISTRY_LOAD_CTX_NAME, &load_ctx);
|
||||
|
||||
|
@ -33,6 +33,8 @@
|
||||
#include "adlist.h"
|
||||
#include "atomicvar.h"
|
||||
|
||||
#define LOAD_TIMEOUT_MS 500
|
||||
|
||||
typedef enum {
|
||||
restorePolicy_Flush, restorePolicy_Append, restorePolicy_Replace
|
||||
} restorePolicy;
|
||||
@ -245,7 +247,7 @@ functionsLibCtx* functionsLibCtxCreate() {
|
||||
*/
|
||||
int functionLibCreateFunction(sds name, void *function, functionLibInfo *li, sds desc, uint64_t f_flags, sds *err) {
|
||||
if (functionsVerifyName(name) != C_OK) {
|
||||
*err = sdsnew("Function names can only contain letters and numbers and must be at least one character long");
|
||||
*err = sdsnew("Library names can only contain letters, numbers, or underscores(_) and must be at least one character long");
|
||||
return C_ERR;
|
||||
}
|
||||
|
||||
@ -608,20 +610,27 @@ void functionKillCommand(client *c) {
|
||||
* Note that it does not guarantee the command arguments are right. */
|
||||
uint64_t fcallGetCommandFlags(client *c, uint64_t cmd_flags) {
|
||||
robj *function_name = c->argv[1];
|
||||
functionInfo *fi = dictFetchValue(curr_functions_lib_ctx->functions, function_name->ptr);
|
||||
if (!fi)
|
||||
c->cur_script = dictFind(curr_functions_lib_ctx->functions, function_name->ptr);
|
||||
if (!c->cur_script)
|
||||
return cmd_flags;
|
||||
functionInfo *fi = dictGetVal(c->cur_script);
|
||||
uint64_t script_flags = fi->f_flags;
|
||||
return scriptFlagsToCmdFlags(cmd_flags, script_flags);
|
||||
}
|
||||
|
||||
static void fcallCommandGeneric(client *c, int ro) {
|
||||
/* Functions need to be fed to monitors before the commands they execute. */
|
||||
replicationFeedMonitors(c,server.monitors,c->db->id,c->argv,c->argc);
|
||||
|
||||
robj *function_name = c->argv[1];
|
||||
functionInfo *fi = dictFetchValue(curr_functions_lib_ctx->functions, function_name->ptr);
|
||||
if (!fi) {
|
||||
dictEntry *de = c->cur_script;
|
||||
if (!de)
|
||||
de = dictFind(curr_functions_lib_ctx->functions, function_name->ptr);
|
||||
if (!de) {
|
||||
addReplyError(c, "Function not found");
|
||||
return;
|
||||
}
|
||||
functionInfo *fi = dictGetVal(de);
|
||||
engine *engine = fi->li->ei->engine;
|
||||
|
||||
long long numkeys;
|
||||
@ -819,7 +828,7 @@ void functionFlushCommand(client *c) {
|
||||
/* FUNCTION HELP */
|
||||
void functionHelpCommand(client *c) {
|
||||
const char *help[] = {
|
||||
"LOAD <ENGINE NAME> <LIBRARY NAME> [REPLACE] [DESCRIPTION <LIBRARY DESCRIPTION>] <LIBRARY CODE>",
|
||||
"LOAD [REPLACE] <FUNCTION CODE>",
|
||||
" Create a new library with the given library name and code.",
|
||||
"DELETE <LIBRARY NAME>",
|
||||
" Delete the given library.",
|
||||
@ -847,7 +856,7 @@ void functionHelpCommand(client *c) {
|
||||
" * ASYNC: Asynchronously flush the libraries.",
|
||||
" * SYNC: Synchronously flush the libraries.",
|
||||
"DUMP",
|
||||
" Returns a serialized payload representing the current libraries, can be restored using FUNCTION RESTORE command",
|
||||
" Return a serialized payload representing the current libraries, can be restored using FUNCTION RESTORE command",
|
||||
"RESTORE <PAYLOAD> [FLUSH|APPEND|REPLACE]",
|
||||
" Restore the libraries represented by the given payload, it is possible to give a restore policy to",
|
||||
" control how to handle existing libraries (default APPEND):",
|
||||
@ -950,7 +959,7 @@ void functionFreeLibMetaData(functionsLibMataData *md) {
|
||||
|
||||
/* Compile and save the given library, return the loaded library name on success
|
||||
* and NULL on failure. In case on failure the err out param is set with relevant error message */
|
||||
sds functionsCreateWithLibraryCtx(sds code, int replace, sds* err, functionsLibCtx *lib_ctx) {
|
||||
sds functionsCreateWithLibraryCtx(sds code, int replace, sds* err, functionsLibCtx *lib_ctx, size_t timeout) {
|
||||
dictIterator *iter = NULL;
|
||||
dictEntry *entry = NULL;
|
||||
functionLibInfo *new_li = NULL;
|
||||
@ -961,7 +970,7 @@ sds functionsCreateWithLibraryCtx(sds code, int replace, sds* err, functionsLibC
|
||||
}
|
||||
|
||||
if (functionsVerifyName(md.name)) {
|
||||
*err = sdsnew("Library names can only contain letters and numbers and must be at least one character long");
|
||||
*err = sdsnew("Library names can only contain letters, numbers, or underscores(_) and must be at least one character long");
|
||||
goto error;
|
||||
}
|
||||
|
||||
@ -984,7 +993,7 @@ sds functionsCreateWithLibraryCtx(sds code, int replace, sds* err, functionsLibC
|
||||
}
|
||||
|
||||
new_li = engineLibraryCreate(md.name, ei, code);
|
||||
if (engine->create(engine->engine_ctx, new_li, md.code, err) != C_OK) {
|
||||
if (engine->create(engine->engine_ctx, new_li, md.code, timeout, err) != C_OK) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
@ -1052,7 +1061,11 @@ void functionLoadCommand(client *c) {
|
||||
robj *code = c->argv[argc_pos];
|
||||
sds err = NULL;
|
||||
sds library_name = NULL;
|
||||
if (!(library_name = functionsCreateWithLibraryCtx(code->ptr, replace, &err, curr_functions_lib_ctx)))
|
||||
size_t timeout = LOAD_TIMEOUT_MS;
|
||||
if (mustObeyClient(c)) {
|
||||
timeout = 0;
|
||||
}
|
||||
if (!(library_name = functionsCreateWithLibraryCtx(code->ptr, replace, &err, curr_functions_lib_ctx, timeout)))
|
||||
{
|
||||
addReplyErrorSds(c, err);
|
||||
return;
|
||||
|
@ -53,9 +53,14 @@ typedef struct engine {
|
||||
/* engine specific context */
|
||||
void *engine_ctx;
|
||||
|
||||
/* Create function callback, get the engine_ctx, and function code.
|
||||
/* Create function callback, get the engine_ctx, and function code
|
||||
* engine_ctx - opaque struct that was created on engine initialization
|
||||
* li - library information that need to be provided and when add functions
|
||||
* code - the library code
|
||||
* timeout - timeout for the library creation (0 for no timeout)
|
||||
* err - description of error (if occurred)
|
||||
* returns NULL on error and set sds to be the error message */
|
||||
int (*create)(void *engine_ctx, functionLibInfo *li, sds code, sds *err);
|
||||
int (*create)(void *engine_ctx, functionLibInfo *li, sds code, size_t timeout, sds *err);
|
||||
|
||||
/* Invoking a function, r_ctx is an opaque object (from engine POV).
|
||||
* The r_ctx should be used by the engine to interaction with Redis,
|
||||
@ -109,7 +114,7 @@ struct functionLibInfo {
|
||||
};
|
||||
|
||||
int functionsRegisterEngine(const char *engine_name, engine *engine_ctx);
|
||||
sds functionsCreateWithLibraryCtx(sds code, int replace, sds* err, functionsLibCtx *lib_ctx);
|
||||
sds functionsCreateWithLibraryCtx(sds code, int replace, sds* err, functionsLibCtx *lib_ctx, size_t timeout);
|
||||
unsigned long functionsMemory();
|
||||
unsigned long functionsMemoryOverhead();
|
||||
unsigned long functionsNum();
|
||||
|
70
src/geo.c
70
src/geo.c
@ -60,14 +60,20 @@ geoArray *geoArrayCreate(void) {
|
||||
return ga;
|
||||
}
|
||||
|
||||
/* Add a new entry and return its pointer so that the caller can populate
|
||||
* it with data. */
|
||||
geoPoint *geoArrayAppend(geoArray *ga) {
|
||||
/* Add and populate with data a new entry to the geoArray. */
|
||||
geoPoint *geoArrayAppend(geoArray *ga, double *xy, double dist,
|
||||
double score, char *member)
|
||||
{
|
||||
if (ga->used == ga->buckets) {
|
||||
ga->buckets = (ga->buckets == 0) ? 8 : ga->buckets*2;
|
||||
ga->array = zrealloc(ga->array,sizeof(geoPoint)*ga->buckets);
|
||||
}
|
||||
geoPoint *gp = ga->array+ga->used;
|
||||
gp->longitude = xy[0];
|
||||
gp->latitude = xy[1];
|
||||
gp->dist = dist;
|
||||
gp->member = member;
|
||||
gp->score = score;
|
||||
ga->used++;
|
||||
return gp;
|
||||
}
|
||||
@ -205,38 +211,38 @@ int extractBoxOrReply(client *c, robj **argv, double *conversion,
|
||||
* the kilometer. */
|
||||
void addReplyDoubleDistance(client *c, double d) {
|
||||
char dbuf[128];
|
||||
int dlen = snprintf(dbuf, sizeof(dbuf), "%.4f", d);
|
||||
const int dlen = fixedpoint_d2string(dbuf, sizeof(dbuf), d, 4);
|
||||
addReplyBulkCBuffer(c, dbuf, dlen);
|
||||
}
|
||||
|
||||
/* Helper function for geoGetPointsInRange(): given a sorted set score
|
||||
* representing a point, and a GeoShape, appends this entry as a geoPoint
|
||||
* into the specified geoArray only if the point is within the search area.
|
||||
* representing a point, and a GeoShape, checks if the point is within the search area.
|
||||
*
|
||||
* returns C_OK if the point is included, or C_ERR if it is outside. */
|
||||
int geoAppendIfWithinShape(geoArray *ga, GeoShape *shape, double score, sds member) {
|
||||
double distance = 0, xy[2];
|
||||
|
||||
* shape: the rectangle
|
||||
* score: the encoded version of lat,long
|
||||
* xy: output variable, the decoded lat,long
|
||||
* distance: output variable, the distance between the center of the shape and the point
|
||||
*
|
||||
* Return values:
|
||||
*
|
||||
* The return value is C_OK if the point is within search area, or C_ERR if it is outside.
|
||||
* "*xy" is populated with the decoded lat,long.
|
||||
* "*distance" is populated with the distance between the center of the shape and the point.
|
||||
*/
|
||||
int geoWithinShape(GeoShape *shape, double score, double *xy, double *distance) {
|
||||
if (!decodeGeohash(score,xy)) return C_ERR; /* Can't decode. */
|
||||
/* Note that geohashGetDistanceIfInRadiusWGS84() takes arguments in
|
||||
* reverse order: longitude first, latitude later. */
|
||||
if (shape->type == CIRCULAR_TYPE) {
|
||||
if (!geohashGetDistanceIfInRadiusWGS84(shape->xy[0], shape->xy[1], xy[0], xy[1],
|
||||
shape->t.radius*shape->conversion, &distance)) return C_ERR;
|
||||
shape->t.radius*shape->conversion, distance))
|
||||
return C_ERR;
|
||||
} else if (shape->type == RECTANGLE_TYPE) {
|
||||
if (!geohashGetDistanceIfInRectangle(shape->t.r.width * shape->conversion,
|
||||
shape->t.r.height * shape->conversion,
|
||||
shape->xy[0], shape->xy[1], xy[0], xy[1], &distance))
|
||||
shape->xy[0], shape->xy[1], xy[0], xy[1], distance))
|
||||
return C_ERR;
|
||||
}
|
||||
|
||||
/* Append the new element. */
|
||||
geoPoint *gp = geoArrayAppend(ga);
|
||||
gp->longitude = xy[0];
|
||||
gp->latitude = xy[1];
|
||||
gp->dist = distance;
|
||||
gp->member = member;
|
||||
gp->score = score;
|
||||
return C_OK;
|
||||
}
|
||||
|
||||
@ -257,8 +263,6 @@ int geoGetPointsInRange(robj *zobj, double min, double max, GeoShape *shape, geo
|
||||
/* That's: min <= val < max */
|
||||
zrangespec range = { .min = min, .max = max, .minex = 0, .maxex = 1 };
|
||||
size_t origincount = ga->used;
|
||||
sds member;
|
||||
|
||||
if (zobj->encoding == OBJ_ENCODING_LISTPACK) {
|
||||
unsigned char *zl = zobj->ptr;
|
||||
unsigned char *eptr, *sptr;
|
||||
@ -274,6 +278,8 @@ int geoGetPointsInRange(robj *zobj, double min, double max, GeoShape *shape, geo
|
||||
|
||||
sptr = lpNext(zl, eptr);
|
||||
while (eptr) {
|
||||
double xy[2];
|
||||
double distance = 0;
|
||||
score = zzlGetScore(sptr);
|
||||
|
||||
/* If we fell out of range, break. */
|
||||
@ -281,10 +287,11 @@ int geoGetPointsInRange(robj *zobj, double min, double max, GeoShape *shape, geo
|
||||
break;
|
||||
|
||||
vstr = lpGetValue(eptr, &vlen, &vlong);
|
||||
member = (vstr == NULL) ? sdsfromlonglong(vlong) :
|
||||
sdsnewlen(vstr,vlen);
|
||||
if (geoAppendIfWithinShape(ga,shape,score,member)
|
||||
== C_ERR) sdsfree(member);
|
||||
if (geoWithinShape(shape, score, xy, &distance) == C_OK) {
|
||||
/* Append the new element. */
|
||||
char *member = (vstr == NULL) ? sdsfromlonglong(vlong) : sdsnewlen(vstr, vlen);
|
||||
geoArrayAppend(ga, xy, distance, score, member);
|
||||
}
|
||||
if (ga->used && limit && ga->used >= limit) break;
|
||||
zzlNext(zl, &eptr, &sptr);
|
||||
}
|
||||
@ -299,14 +306,15 @@ int geoGetPointsInRange(robj *zobj, double min, double max, GeoShape *shape, geo
|
||||
}
|
||||
|
||||
while (ln) {
|
||||
sds ele = ln->ele;
|
||||
double xy[2];
|
||||
double distance = 0;
|
||||
/* Abort when the node is no longer in range. */
|
||||
if (!zslValueLteMax(ln->score, &range))
|
||||
break;
|
||||
|
||||
ele = sdsdup(ele);
|
||||
if (geoAppendIfWithinShape(ga,shape,ln->score,ele)
|
||||
== C_ERR) sdsfree(ele);
|
||||
if (geoWithinShape(shape, ln->score, xy, &distance) == C_OK) {
|
||||
/* Append the new element. */
|
||||
geoArrayAppend(ga, xy, distance, ln->score, sdsdup(ln->ele));
|
||||
}
|
||||
if (ga->used && limit && ga->used >= limit) break;
|
||||
ln = ln->level[0].forward;
|
||||
}
|
||||
|
@ -216,17 +216,29 @@ GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash) {
|
||||
return bits;
|
||||
}
|
||||
|
||||
/* Calculate distance using simplified haversine great circle distance formula.
|
||||
* Given longitude diff is 0 the asin(sqrt(a)) on the haversine is asin(sin(abs(u))).
|
||||
* arcsin(sin(x)) equal to x when x ∈[−𝜋/2,𝜋/2]. Given latitude is between [−𝜋/2,𝜋/2]
|
||||
* we can simplify arcsin(sin(x)) to x.
|
||||
*/
|
||||
double geohashGetLatDistance(double lat1d, double lat2d) {
|
||||
return EARTH_RADIUS_IN_METERS * fabs(deg_rad(lat2d) - deg_rad(lat1d));
|
||||
}
|
||||
|
||||
/* Calculate distance using haversine great circle distance formula. */
|
||||
double geohashGetDistance(double lon1d, double lat1d, double lon2d, double lat2d) {
|
||||
double lat1r, lon1r, lat2r, lon2r, u, v;
|
||||
lat1r = deg_rad(lat1d);
|
||||
double lat1r, lon1r, lat2r, lon2r, u, v, a;
|
||||
lon1r = deg_rad(lon1d);
|
||||
lat2r = deg_rad(lat2d);
|
||||
lon2r = deg_rad(lon2d);
|
||||
u = sin((lat2r - lat1r) / 2);
|
||||
v = sin((lon2r - lon1r) / 2);
|
||||
return 2.0 * EARTH_RADIUS_IN_METERS *
|
||||
asin(sqrt(u * u + cos(lat1r) * cos(lat2r) * v * v));
|
||||
/* if v == 0 we can avoid doing expensive math when lons are practically the same */
|
||||
if (v == 0.0)
|
||||
return geohashGetLatDistance(lat1d, lat2d);
|
||||
lat1r = deg_rad(lat1d);
|
||||
lat2r = deg_rad(lat2d);
|
||||
u = sin((lat2r - lat1r) / 2);
|
||||
a = u * u + cos(lat1r) * cos(lat2r) * v * v;
|
||||
return 2.0 * EARTH_RADIUS_IN_METERS * asin(sqrt(a));
|
||||
}
|
||||
|
||||
int geohashGetDistanceIfInRadius(double x1, double y1,
|
||||
@ -253,9 +265,14 @@ int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2,
|
||||
*/
|
||||
int geohashGetDistanceIfInRectangle(double width_m, double height_m, double x1, double y1,
|
||||
double x2, double y2, double *distance) {
|
||||
/* latitude distance is less expensive to compute than longitude distance
|
||||
* so we check first for the latitude condition */
|
||||
double lat_distance = geohashGetLatDistance(y2, y1);
|
||||
if (lat_distance > height_m/2) {
|
||||
return 0;
|
||||
}
|
||||
double lon_distance = geohashGetDistance(x2, y2, x1, y2);
|
||||
double lat_distance = geohashGetDistance(x2, y2, x2, y1);
|
||||
if (lon_distance > width_m/2 || lat_distance > height_m/2) {
|
||||
if (lon_distance > width_m/2) {
|
||||
return 0;
|
||||
}
|
||||
*distance = geohashGetDistance(x1, y1, x2, y2);
|
||||
|
12
src/help.h
12
src/help.h
@ -225,7 +225,7 @@ struct commandHelp {
|
||||
8,
|
||||
"6.2.0" },
|
||||
{ "CLIENT KILL",
|
||||
"[ip:port] [ID client-id] [TYPE NORMAL|MASTER|SLAVE|REPLICA|PUBSUB] [USER username] [ADDR ip:port] [LADDR ip:port] [SKIPME yes/no]",
|
||||
"ip:port|[ID client-id]|[TYPE NORMAL|MASTER|SLAVE|REPLICA|PUBSUB]|[USER username]|[ADDR ip:port]|[LADDR ip:port]|[SKIPME yes/no] [[ID client-id]|[TYPE NORMAL|MASTER|SLAVE|REPLICA|PUBSUB]|[USER username]|[ADDR ip:port]|[LADDR ip:port]|[SKIPME yes/no] ...]",
|
||||
"Kill the connection of a client",
|
||||
8,
|
||||
"2.4.0" },
|
||||
@ -540,12 +540,12 @@ struct commandHelp {
|
||||
10,
|
||||
"2.6.0" },
|
||||
{ "EVALSHA_RO",
|
||||
"sha1 numkeys key [key ...] arg [arg ...]",
|
||||
"sha1 numkeys [key [key ...]] [arg [arg ...]]",
|
||||
"Execute a read-only Lua script server side",
|
||||
10,
|
||||
"7.0.0" },
|
||||
{ "EVAL_RO",
|
||||
"script numkeys key [key ...] arg [arg ...]",
|
||||
"script numkeys [key [key ...]] [arg [arg ...]]",
|
||||
"Execute a read-only Lua script server side",
|
||||
10,
|
||||
"7.0.0" },
|
||||
@ -580,12 +580,12 @@ struct commandHelp {
|
||||
9,
|
||||
"6.2.0" },
|
||||
{ "FCALL",
|
||||
"function numkeys key [key ...] arg [arg ...]",
|
||||
"function numkeys [key [key ...]] [arg [arg ...]]",
|
||||
"Invoke a function",
|
||||
10,
|
||||
"7.0.0" },
|
||||
{ "FCALL_RO",
|
||||
"function numkeys key [key ...] arg [arg ...]",
|
||||
"function numkeys [key [key ...]] [arg [arg ...]]",
|
||||
"Invoke a read-only function",
|
||||
10,
|
||||
"7.0.0" },
|
||||
@ -1590,7 +1590,7 @@ struct commandHelp {
|
||||
14,
|
||||
"6.2.0" },
|
||||
{ "XCLAIM",
|
||||
"key group consumer min-idle-time id [id ...] [IDLE ms] [TIME unix-time-milliseconds] [RETRYCOUNT count] [FORCE] [JUSTID]",
|
||||
"key group consumer min-idle-time id [id ...] [IDLE ms] [TIME unix-time-milliseconds] [RETRYCOUNT count] [FORCE] [JUSTID] [LASTID id]",
|
||||
"Changes (or acquires) ownership of a message in a consumer group, as if the message was delivered to the specified consumer.",
|
||||
14,
|
||||
"5.0.0" },
|
||||
|
@ -39,8 +39,11 @@
|
||||
#ifndef LISTPACK_ALLOC_H
|
||||
#define LISTPACK_ALLOC_H
|
||||
#include "zmalloc.h"
|
||||
#define lp_malloc zmalloc
|
||||
#define lp_realloc zrealloc
|
||||
/* We use zmalloc_usable/zrealloc_usable instead of zmalloc/zrealloc
|
||||
* to ensure the safe invocation of 'zmalloc_usable_size().
|
||||
* See comment in zmalloc_usable_size(). */
|
||||
#define lp_malloc(sz) zmalloc_usable(sz,NULL)
|
||||
#define lp_realloc(ptr,sz) zrealloc_usable(ptr,sz,NULL)
|
||||
#define lp_free zfree
|
||||
#define lp_malloc_size zmalloc_usable_size
|
||||
#endif
|
||||
|
227
src/module.c
227
src/module.c
@ -137,6 +137,7 @@ typedef struct RedisModulePoolAllocBlock {
|
||||
* but only the fields needed in a given context. */
|
||||
|
||||
struct RedisModuleBlockedClient;
|
||||
struct RedisModuleUser;
|
||||
|
||||
struct RedisModuleCtx {
|
||||
void *getapifuncptr; /* NOTE: Must be the first field. */
|
||||
@ -161,6 +162,9 @@ struct RedisModuleCtx {
|
||||
|
||||
struct RedisModulePoolAllocBlock *pa_head;
|
||||
long long next_yield_time;
|
||||
|
||||
const struct RedisModuleUser *user; /* RedisModuleUser commands executed via
|
||||
RM_Call should be executed as, if set */
|
||||
};
|
||||
typedef struct RedisModuleCtx RedisModuleCtx;
|
||||
|
||||
@ -318,6 +322,7 @@ typedef struct RedisModuleDictIter {
|
||||
|
||||
typedef struct RedisModuleCommandFilterCtx {
|
||||
RedisModuleString **argv;
|
||||
int argv_len;
|
||||
int argc;
|
||||
} RedisModuleCommandFilterCtx;
|
||||
|
||||
@ -352,7 +357,7 @@ typedef struct RedisModuleServerInfoData {
|
||||
#define REDISMODULE_ARGV_NO_REPLICAS (1<<2)
|
||||
#define REDISMODULE_ARGV_RESP_3 (1<<3)
|
||||
#define REDISMODULE_ARGV_RESP_AUTO (1<<4)
|
||||
#define REDISMODULE_ARGV_CHECK_ACL (1<<5)
|
||||
#define REDISMODULE_ARGV_RUN_AS_USER (1<<5)
|
||||
#define REDISMODULE_ARGV_SCRIPT_MODE (1<<6)
|
||||
#define REDISMODULE_ARGV_NO_WRITES (1<<7)
|
||||
#define REDISMODULE_ARGV_CALL_REPLIES_AS_ERRORS (1<<8)
|
||||
@ -469,13 +474,20 @@ static int moduleConvertArgFlags(int flags);
|
||||
* You should avoid using malloc().
|
||||
* This function panics if unable to allocate enough memory. */
|
||||
void *RM_Alloc(size_t bytes) {
|
||||
return zmalloc(bytes);
|
||||
/* Use 'zmalloc_usable()' instead of 'zmalloc()' to allow the compiler
|
||||
* to recognize the additional memory size, which means that modules can
|
||||
* use the memory reported by 'RM_MallocUsableSize()' safely. In theory this
|
||||
* isn't really needed since this API can't be inlined (not even for embedded
|
||||
* modules like TLS (we use function pointers for module APIs), and the API doesn't
|
||||
* have the malloc_size attribute, but it's hard to predict how smart future compilers
|
||||
* will be, so better safe than sorry. */
|
||||
return zmalloc_usable(bytes,NULL);
|
||||
}
|
||||
|
||||
/* Similar to RM_Alloc, but returns NULL in case of allocation failure, instead
|
||||
* of panicking. */
|
||||
void *RM_TryAlloc(size_t bytes) {
|
||||
return ztrymalloc(bytes);
|
||||
return ztrymalloc_usable(bytes,NULL);
|
||||
}
|
||||
|
||||
/* Use like calloc(). Memory allocated with this function is reported in
|
||||
@ -483,12 +495,12 @@ void *RM_TryAlloc(size_t bytes) {
|
||||
* and in general is taken into account as memory allocated by Redis.
|
||||
* You should avoid using calloc() directly. */
|
||||
void *RM_Calloc(size_t nmemb, size_t size) {
|
||||
return zcalloc(nmemb*size);
|
||||
return zcalloc_usable(nmemb*size,NULL);
|
||||
}
|
||||
|
||||
/* Use like realloc() for memory obtained with RedisModule_Alloc(). */
|
||||
void* RM_Realloc(void *ptr, size_t bytes) {
|
||||
return zrealloc(ptr,bytes);
|
||||
return zrealloc_usable(ptr,bytes,NULL);
|
||||
}
|
||||
|
||||
/* Use like free() for memory obtained by RedisModule_Alloc() and
|
||||
@ -565,7 +577,7 @@ void *RM_PoolAlloc(RedisModuleCtx *ctx, size_t bytes) {
|
||||
* Helpers for modules API implementation
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
client *moduleAllocTempClient() {
|
||||
client *moduleAllocTempClient(void) {
|
||||
client *c = NULL;
|
||||
|
||||
if (moduleTempClientCount > 0) {
|
||||
@ -592,6 +604,7 @@ void moduleReleaseTempClient(client *c) {
|
||||
c->bufpos = 0;
|
||||
c->flags = CLIENT_MODULE;
|
||||
c->user = NULL; /* Root user */
|
||||
c->cmd = c->lastcmd = c->realcmd = NULL;
|
||||
moduleTempClients[moduleTempClientCount++] = c;
|
||||
}
|
||||
|
||||
@ -1147,7 +1160,9 @@ RedisModuleCommand *moduleCreateCommandProxy(struct RedisModule *module, sds dec
|
||||
cp->rediscmd->key_specs = cp->rediscmd->key_specs_static;
|
||||
if (firstkey != 0) {
|
||||
cp->rediscmd->key_specs_num = 1;
|
||||
cp->rediscmd->key_specs[0].flags = CMD_KEY_FULL_ACCESS | CMD_KEY_VARIABLE_FLAGS;
|
||||
cp->rediscmd->key_specs[0].flags = CMD_KEY_FULL_ACCESS;
|
||||
if (flags & CMD_MODULE_GETKEYS)
|
||||
cp->rediscmd->key_specs[0].flags |= CMD_KEY_VARIABLE_FLAGS;
|
||||
cp->rediscmd->key_specs[0].begin_search_type = KSPEC_BS_INDEX;
|
||||
cp->rediscmd->key_specs[0].bs.index.pos = firstkey;
|
||||
cp->rediscmd->key_specs[0].find_keys_type = KSPEC_FK_RANGE;
|
||||
@ -3867,7 +3882,7 @@ int RM_SetAbsExpire(RedisModuleKey *key, mstime_t expire) {
|
||||
* When async is set to true, db contents will be freed by a background thread. */
|
||||
void RM_ResetDataset(int restart_aof, int async) {
|
||||
if (restart_aof && server.aof_state != AOF_OFF) stopAppendOnly();
|
||||
flushAllDataAndResetRDB(async? EMPTYDB_ASYNC: EMPTYDB_NO_FLAGS);
|
||||
flushAllDataAndResetRDB((async? EMPTYDB_ASYNC: EMPTYDB_NO_FLAGS) | EMPTYDB_NOFUNCTIONS);
|
||||
if (server.aof_enabled && restart_aof) restartAOFAfterSYNC();
|
||||
}
|
||||
|
||||
@ -4008,7 +4023,7 @@ int RM_StringTruncate(RedisModuleKey *key, size_t newlen) {
|
||||
sdssubstr(key->value->ptr,0,newlen);
|
||||
/* If the string is too wasteful, reallocate it. */
|
||||
if (sdslen(key->value->ptr) < sdsavail(key->value->ptr))
|
||||
key->value->ptr = sdsRemoveFreeSpace(key->value->ptr);
|
||||
key->value->ptr = sdsRemoveFreeSpace(key->value->ptr, 0);
|
||||
}
|
||||
}
|
||||
return REDISMODULE_OK;
|
||||
@ -4115,10 +4130,7 @@ int RM_ListPush(RedisModuleKey *key, int where, RedisModuleString *ele) {
|
||||
|
||||
if (!(key->mode & REDISMODULE_WRITE)) return REDISMODULE_ERR;
|
||||
if (key->value && key->value->type != OBJ_LIST) return REDISMODULE_ERR;
|
||||
if (key->iter) {
|
||||
listTypeReleaseIterator(key->iter);
|
||||
key->iter = NULL;
|
||||
}
|
||||
if (key->iter) moduleFreeKeyIterator(key);
|
||||
if (key->value == NULL) moduleCreateEmptyKey(key,REDISMODULE_KEYTYPE_LIST);
|
||||
listTypePush(key->value, ele,
|
||||
(where == REDISMODULE_LIST_HEAD) ? LIST_HEAD : LIST_TAIL);
|
||||
@ -4148,10 +4160,7 @@ RedisModuleString *RM_ListPop(RedisModuleKey *key, int where) {
|
||||
errno = EBADF;
|
||||
return NULL;
|
||||
}
|
||||
if (key->iter) {
|
||||
listTypeReleaseIterator(key->iter);
|
||||
key->iter = NULL;
|
||||
}
|
||||
if (key->iter) moduleFreeKeyIterator(key);
|
||||
robj *ele = listTypePop(key->value,
|
||||
(where == REDISMODULE_LIST_HEAD) ? LIST_HEAD : LIST_TAIL);
|
||||
robj *decoded = getDecodedObject(ele);
|
||||
@ -4214,8 +4223,7 @@ int RM_ListSet(RedisModuleKey *key, long index, RedisModuleString *value) {
|
||||
listTypeReplace(&key->u.list.entry, value);
|
||||
/* A note in quicklist.c forbids use of iterator after insert, so
|
||||
* probably also after replace. */
|
||||
listTypeReleaseIterator(key->iter);
|
||||
key->iter = NULL;
|
||||
moduleFreeKeyIterator(key);
|
||||
return REDISMODULE_OK;
|
||||
} else {
|
||||
return REDISMODULE_ERR;
|
||||
@ -4260,8 +4268,7 @@ int RM_ListInsert(RedisModuleKey *key, long index, RedisModuleString *value) {
|
||||
int where = index < 0 ? LIST_TAIL : LIST_HEAD;
|
||||
listTypeInsert(&key->u.list.entry, value, where);
|
||||
/* A note in quicklist.c forbids use of iterator after insert. */
|
||||
listTypeReleaseIterator(key->iter);
|
||||
key->iter = NULL;
|
||||
moduleFreeKeyIterator(key);
|
||||
return REDISMODULE_OK;
|
||||
} else {
|
||||
return REDISMODULE_ERR;
|
||||
@ -4282,7 +4289,24 @@ int RM_ListInsert(RedisModuleKey *key, long index, RedisModuleString *value) {
|
||||
int RM_ListDelete(RedisModuleKey *key, long index) {
|
||||
if (moduleListIteratorSeek(key, index, REDISMODULE_WRITE)) {
|
||||
listTypeDelete(key->iter, &key->u.list.entry);
|
||||
moduleDelKeyIfEmpty(key);
|
||||
if (moduleDelKeyIfEmpty(key)) return REDISMODULE_OK;
|
||||
if (listTypeNext(key->iter, &key->u.list.entry)) {
|
||||
/* After delete entry at position 'index', we need to update
|
||||
* 'key->u.list.index' according to the following cases:
|
||||
* 1) [1, 2, 3] => dir: forward, index: 0 => [2, 3] => index: still 0
|
||||
* 2) [1, 2, 3] => dir: forward, index: -3 => [2, 3] => index: -2
|
||||
* 3) [1, 2, 3] => dir: reverse, index: 2 => [1, 2] => index: 1
|
||||
* 4) [1, 2, 3] => dir: reverse, index: -1 => [1, 2] => index: still -1 */
|
||||
listTypeIterator *li = key->iter;
|
||||
int reverse = li->direction == LIST_HEAD;
|
||||
if (key->u.list.index < 0)
|
||||
key->u.list.index += reverse ? 0 : 1;
|
||||
else
|
||||
key->u.list.index += reverse ? -1 : 0;
|
||||
} else {
|
||||
/* Reset list iterator if the next entry doesn't exist. */
|
||||
moduleFreeKeyIterator(key);
|
||||
}
|
||||
return REDISMODULE_OK;
|
||||
} else {
|
||||
return REDISMODULE_ERR;
|
||||
@ -5595,6 +5619,11 @@ RedisModuleString *RM_CreateStringFromCallReply(RedisModuleCallReply *reply) {
|
||||
}
|
||||
}
|
||||
|
||||
/* Modifies the user that RM_Call will use (e.g. for ACL checks) */
|
||||
void RM_SetContextUser(RedisModuleCtx *ctx, const RedisModuleUser *user) {
|
||||
ctx->user = user;
|
||||
}
|
||||
|
||||
/* Returns an array of robj pointers, by parsing the format specifier "fmt" as described for
|
||||
* the RM_Call(), RM_Replicate() and other module APIs. Populates *argcp with the number of
|
||||
* items and *argvlenp with the length of the allocated argv.
|
||||
@ -5607,7 +5636,7 @@ RedisModuleString *RM_CreateStringFromCallReply(RedisModuleCallReply *reply) {
|
||||
* "R" -> REDISMODULE_ARGV_NO_REPLICAS
|
||||
* "3" -> REDISMODULE_ARGV_RESP_3
|
||||
* "0" -> REDISMODULE_ARGV_RESP_AUTO
|
||||
* "C" -> REDISMODULE_ARGV_CHECK_ACL
|
||||
* "C" -> REDISMODULE_ARGV_RUN_AS_USER
|
||||
*
|
||||
* On error (format specifier error) NULL is returned and nothing is
|
||||
* allocated. On success the argument vector is returned. */
|
||||
@ -5671,7 +5700,7 @@ robj **moduleCreateArgvFromUserFormat(const char *cmdname, const char *fmt, int
|
||||
} else if (*p == '0') {
|
||||
if (flags) (*flags) |= REDISMODULE_ARGV_RESP_AUTO;
|
||||
} else if (*p == 'C') {
|
||||
if (flags) (*flags) |= REDISMODULE_ARGV_CHECK_ACL;
|
||||
if (flags) (*flags) |= REDISMODULE_ARGV_RUN_AS_USER;
|
||||
} else if (*p == 'S') {
|
||||
if (flags) (*flags) |= REDISMODULE_ARGV_SCRIPT_MODE;
|
||||
} else if (*p == 'W') {
|
||||
@ -5718,7 +5747,17 @@ fmterr:
|
||||
* * `0` -- Return the reply in auto mode, i.e. the reply format will be the
|
||||
* same as the client attached to the given RedisModuleCtx. This will
|
||||
* probably used when you want to pass the reply directly to the client.
|
||||
* * `C` -- Check if command can be executed according to ACL rules.
|
||||
* * `C` -- Run a command as the user attached to the context.
|
||||
* User is either attached automatically via the client that directly
|
||||
* issued the command and created the context or via RM_SetContextUser.
|
||||
* If the context is not directly created by an issued command (such as a
|
||||
* background context and no user was set on it via RM_SetContextUser,
|
||||
* RM_Call will fail.
|
||||
* Checks if the command can be executed according to ACL rules and causes
|
||||
* the command to run as the determined user, so that any future user
|
||||
* dependent activity, such as ACL checks within scripts will proceed as
|
||||
* expected.
|
||||
* Otherwise, the command will run as the Redis unrestricted user.
|
||||
* * `S` -- Run the command in a script mode, this means that it will raise
|
||||
* an error if a command which are not allowed inside a script
|
||||
* (flagged with the `deny-script` flag) is invoked (like SHUTDOWN).
|
||||
@ -5749,7 +5788,7 @@ fmterr:
|
||||
* * ESPIPE: Command not allowed on script mode
|
||||
*
|
||||
* Example code fragment:
|
||||
*
|
||||
*
|
||||
* reply = RedisModule_Call(ctx,"INCRBY","sc",argv[1],"10");
|
||||
* if (RedisModule_CallReplyType(reply) == REDISMODULE_REPLY_INTEGER) {
|
||||
* long long myval = RedisModule_CallReplyInteger(reply);
|
||||
@ -5759,7 +5798,6 @@ fmterr:
|
||||
* This API is documented here: https://redis.io/topics/modules-intro
|
||||
*/
|
||||
RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const char *fmt, ...) {
|
||||
struct redisCommand *cmd;
|
||||
client *c = NULL;
|
||||
robj **argv = NULL;
|
||||
int argc = 0, argv_len = 0, flags = 0;
|
||||
@ -5767,6 +5805,7 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
|
||||
RedisModuleCallReply *reply = NULL;
|
||||
int replicate = 0; /* Replicate this command? */
|
||||
int error_as_call_replies = 0; /* return errors as RedisModuleCallReply object */
|
||||
uint64_t cmd_flags;
|
||||
|
||||
/* Handle arguments. */
|
||||
va_start(ap, fmt);
|
||||
@ -5792,6 +5831,20 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
|
||||
}
|
||||
if (ctx->module) ctx->module->in_call++;
|
||||
|
||||
user *user = NULL;
|
||||
if (flags & REDISMODULE_ARGV_RUN_AS_USER) {
|
||||
user = ctx->user ? ctx->user->user : ctx->client->user;
|
||||
if (!user) {
|
||||
errno = ENOTSUP;
|
||||
if (error_as_call_replies) {
|
||||
sds msg = sdsnew("cannot run as user, no user directly attached to context or context's client");
|
||||
reply = callReplyCreateError(msg, ctx);
|
||||
}
|
||||
goto cleanup;
|
||||
}
|
||||
c->user = user;
|
||||
}
|
||||
|
||||
/* We handle the above format error only when the client is setup so that
|
||||
* we can free it normally. */
|
||||
if (argv == NULL) {
|
||||
@ -5809,7 +5862,7 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
|
||||
/* Lookup command now, after filters had a chance to make modifications
|
||||
* if necessary.
|
||||
*/
|
||||
cmd = c->cmd = c->lastcmd = c->realcmd = lookupCommand(c->argv,c->argc);
|
||||
c->cmd = c->lastcmd = c->realcmd = lookupCommand(c->argv,c->argc);
|
||||
sds err;
|
||||
if (!commandCheckExistence(c, error_as_call_replies? &err : NULL)) {
|
||||
errno = ENOENT;
|
||||
@ -5824,10 +5877,12 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
cmd_flags = getCommandFlags(c);
|
||||
|
||||
if (flags & REDISMODULE_ARGV_SCRIPT_MODE) {
|
||||
/* Basically on script mode we want to only allow commands that can
|
||||
* be executed on scripts (CMD_NOSCRIPT is not set on the command flags) */
|
||||
if (cmd->flags & CMD_NOSCRIPT) {
|
||||
if (cmd_flags & CMD_NOSCRIPT) {
|
||||
errno = ESPIPE;
|
||||
if (error_as_call_replies) {
|
||||
sds msg = sdscatfmt(sdsempty(), "command '%S' is not allowed on script mode", c->cmd->fullname);
|
||||
@ -5837,8 +5892,8 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & REDISMODULE_ARGV_RESPECT_DENY_OOM) {
|
||||
if (cmd->flags & CMD_DENYOOM) {
|
||||
if (flags & REDISMODULE_ARGV_RESPECT_DENY_OOM && server.maxmemory) {
|
||||
if (cmd_flags & CMD_DENYOOM) {
|
||||
int oom_state;
|
||||
if (ctx->flags & REDISMODULE_CTX_THREAD_SAFE) {
|
||||
/* On background thread we can not count on server.pre_command_oom_state.
|
||||
@ -5860,7 +5915,7 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
|
||||
}
|
||||
|
||||
if (flags & REDISMODULE_ARGV_NO_WRITES) {
|
||||
if (cmd->flags & CMD_WRITE) {
|
||||
if (cmd_flags & CMD_WRITE) {
|
||||
errno = ENOSPC;
|
||||
if (error_as_call_replies) {
|
||||
sds msg = sdscatfmt(sdsempty(), "Write command '%S' was "
|
||||
@ -5873,7 +5928,7 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
|
||||
|
||||
/* Script mode tests */
|
||||
if (flags & REDISMODULE_ARGV_SCRIPT_MODE) {
|
||||
if (cmd->flags & CMD_WRITE) {
|
||||
if (cmd_flags & CMD_WRITE) {
|
||||
/* on script mode, if a command is a write command,
|
||||
* We will not run it if we encounter disk error
|
||||
* or we do not have enough replicas */
|
||||
@ -5910,7 +5965,7 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
|
||||
}
|
||||
|
||||
if (server.masterhost && server.repl_state != REPL_STATE_CONNECTED &&
|
||||
server.repl_serve_stale_data == 0 && !(cmd->flags & CMD_STALE)) {
|
||||
server.repl_serve_stale_data == 0 && !(cmd_flags & CMD_STALE)) {
|
||||
errno = ESPIPE;
|
||||
if (error_as_call_replies) {
|
||||
sds msg = sdsdup(shared.masterdownerr->ptr);
|
||||
@ -5921,20 +5976,16 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
|
||||
}
|
||||
|
||||
/* Check if the user can run this command according to the current
|
||||
* ACLs. */
|
||||
if (flags & REDISMODULE_ARGV_CHECK_ACL) {
|
||||
* ACLs.
|
||||
*
|
||||
* If RM_SetContextUser has set a user, that user is used, otherwise
|
||||
* use the attached client's user. If there is no attached client user and no manually
|
||||
* set user, an error will be returned */
|
||||
if (flags & REDISMODULE_ARGV_RUN_AS_USER) {
|
||||
int acl_errpos;
|
||||
int acl_retval;
|
||||
|
||||
if (ctx->client->user == NULL) {
|
||||
errno = ENOTSUP;
|
||||
if (error_as_call_replies) {
|
||||
sds msg = sdsnew("acl verification failed, context is not attached to a client.");
|
||||
reply = callReplyCreateError(msg, ctx);
|
||||
}
|
||||
goto cleanup;
|
||||
}
|
||||
acl_retval = ACLCheckAllUserCommandPerm(ctx->client->user,c->cmd,c->argv,c->argc,&acl_errpos);
|
||||
acl_retval = ACLCheckAllUserCommandPerm(user,c->cmd,c->argv,c->argc,&acl_errpos);
|
||||
if (acl_retval != ACL_OK) {
|
||||
sds object = (acl_retval == ACL_DENIED_CMD) ? sdsdup(c->cmd->fullname) : sdsdup(c->argv[acl_errpos]->ptr);
|
||||
addACLLogEntry(ctx->client, acl_retval, ACL_LOG_CTX_MODULE, -1, ctx->client->user->name, object);
|
||||
@ -7508,6 +7559,11 @@ void moduleHandleBlockedClients(void) {
|
||||
* properly unblocked by the module. */
|
||||
bc->disconnect_callback = NULL;
|
||||
unblockClient(c);
|
||||
|
||||
/* Update the wait offset, we don't know if this blocked client propagated anything,
|
||||
* currently we rather not add any API for that, so we just assume it did. */
|
||||
c->woff = server.master_repl_offset;
|
||||
|
||||
/* Put the client in the list of clients that need to write
|
||||
* if there are pending replies here. This is needed since
|
||||
* during a non blocking command the client may receive output. */
|
||||
@ -7930,7 +7986,6 @@ void moduleCallClusterReceivers(const char *sender_id, uint64_t module_id, uint8
|
||||
if (r->module_id == module_id) {
|
||||
RedisModuleCtx ctx;
|
||||
moduleCreateContext(&ctx, r->module, REDISMODULE_CTX_TEMP_CLIENT);
|
||||
selectDb(ctx.client, 0);
|
||||
r->callback(&ctx,sender_id,type,payload,len);
|
||||
moduleFreeContext(&ctx);
|
||||
return;
|
||||
@ -8659,6 +8714,46 @@ int RM_SetModuleUserACL(RedisModuleUser *user, const char* acl) {
|
||||
return ACLSetUser(user->user, acl, -1);
|
||||
}
|
||||
|
||||
/* Sets the permission of a user with a complete ACL string, such as one
|
||||
* would use on the redis ACL SETUSER command line API. This differs from
|
||||
* RM_SetModuleUserACL, which only takes single ACL operations at a time.
|
||||
*
|
||||
* Returns REDISMODULE_OK on success and REDISMODULE_ERR on failure
|
||||
* if a RedisModuleString is provided in error, a string describing the error
|
||||
* will be returned */
|
||||
int RM_SetModuleUserACLString(RedisModuleCtx *ctx, RedisModuleUser *user, const char *acl, RedisModuleString **error) {
|
||||
serverAssert(user != NULL);
|
||||
|
||||
int argc;
|
||||
sds *argv = sdssplitargs(acl, &argc);
|
||||
|
||||
sds err = ACLStringSetUser(user->user, NULL, argv, argc);
|
||||
|
||||
sdsfreesplitres(argv, argc);
|
||||
|
||||
if (err) {
|
||||
if (error) {
|
||||
*error = createObject(OBJ_STRING, err);
|
||||
if (ctx != NULL) autoMemoryAdd(ctx, REDISMODULE_AM_STRING, *error);
|
||||
} else {
|
||||
sdsfree(err);
|
||||
}
|
||||
|
||||
return REDISMODULE_ERR;
|
||||
}
|
||||
|
||||
return REDISMODULE_OK;
|
||||
}
|
||||
|
||||
/* Get the ACL string for a given user
|
||||
* Returns a RedisModuleString
|
||||
*/
|
||||
RedisModuleString *RM_GetModuleUserACLString(RedisModuleUser *user) {
|
||||
serverAssert(user != NULL);
|
||||
|
||||
return ACLDescribeUser(user->user);
|
||||
}
|
||||
|
||||
/* Retrieve the user name of the client connection behind the current context.
|
||||
* The user name can be used later, in order to get a RedisModuleUser.
|
||||
* See more information in RM_GetModuleUserFromUserName.
|
||||
@ -9760,6 +9855,7 @@ void moduleCallCommandFilters(client *c) {
|
||||
|
||||
RedisModuleCommandFilterCtx filter = {
|
||||
.argv = c->argv,
|
||||
.argv_len = c->argv_len,
|
||||
.argc = c->argc
|
||||
};
|
||||
|
||||
@ -9776,6 +9872,7 @@ void moduleCallCommandFilters(client *c) {
|
||||
}
|
||||
|
||||
c->argv = filter.argv;
|
||||
c->argv_len = filter.argv_len;
|
||||
c->argc = filter.argc;
|
||||
}
|
||||
|
||||
@ -9807,7 +9904,10 @@ int RM_CommandFilterArgInsert(RedisModuleCommandFilterCtx *fctx, int pos, RedisM
|
||||
|
||||
if (pos < 0 || pos > fctx->argc) return REDISMODULE_ERR;
|
||||
|
||||
fctx->argv = zrealloc(fctx->argv, (fctx->argc+1)*sizeof(RedisModuleString *));
|
||||
if (fctx->argv_len < fctx->argc+1) {
|
||||
fctx->argv_len = fctx->argc+1;
|
||||
fctx->argv = zrealloc(fctx->argv, fctx->argv_len*sizeof(RedisModuleString *));
|
||||
}
|
||||
for (i = fctx->argc; i > pos; i--) {
|
||||
fctx->argv[i] = fctx->argv[i-1];
|
||||
}
|
||||
@ -9862,6 +9962,9 @@ size_t RM_MallocSize(void* ptr) {
|
||||
/* Similar to RM_MallocSize, the difference is that RM_MallocUsableSize
|
||||
* returns the usable size of memory by the module. */
|
||||
size_t RM_MallocUsableSize(void *ptr) {
|
||||
/* It is safe to use 'zmalloc_usable_size()' to manipulate additional
|
||||
* memory space, as we guarantee that the compiler can recognize this
|
||||
* after 'RM_Alloc', 'RM_TryAlloc', 'RM_Realloc', or 'RM_Calloc'. */
|
||||
return zmalloc_usable_size(ptr);
|
||||
}
|
||||
|
||||
@ -10703,11 +10806,6 @@ void moduleFireServerEvent(uint64_t eid, int subid, void *data) {
|
||||
RedisModuleClientInfoV1 civ1;
|
||||
RedisModuleReplicationInfoV1 riv1;
|
||||
RedisModuleModuleChangeV1 mcv1;
|
||||
/* Start at DB zero by default when calling the handler. It's
|
||||
* up to the specific event setup to change it when it makes
|
||||
* sense. For instance for FLUSHDB events we select the correct
|
||||
* DB automatically. */
|
||||
selectDb(ctx.client, 0);
|
||||
|
||||
/* Event specific context and data pointer setup. */
|
||||
if (eid == REDISMODULE_EVENT_CLIENT_CHANGE) {
|
||||
@ -11027,6 +11125,21 @@ void moduleFreeModuleStructure(struct RedisModule *module) {
|
||||
zfree(module);
|
||||
}
|
||||
|
||||
void moduleFreeArgs(struct redisCommandArg *args, int num_args) {
|
||||
for (int j = 0; j < num_args; j++) {
|
||||
zfree((char *)args[j].name);
|
||||
zfree((char *)args[j].token);
|
||||
zfree((char *)args[j].summary);
|
||||
zfree((char *)args[j].since);
|
||||
zfree((char *)args[j].deprecated_since);
|
||||
|
||||
if (args[j].subargs) {
|
||||
moduleFreeArgs(args[j].subargs, args[j].num_args);
|
||||
}
|
||||
}
|
||||
zfree(args);
|
||||
}
|
||||
|
||||
/* Free the command registered with the specified module.
|
||||
* On success C_OK is returned, otherwise C_ERR is returned.
|
||||
*
|
||||
@ -11052,10 +11165,12 @@ int moduleFreeCommand(struct RedisModule *module, struct redisCommand *cmd) {
|
||||
zfree(cmd->key_specs);
|
||||
for (int j = 0; cmd->tips && cmd->tips[j]; j++)
|
||||
zfree((char *)cmd->tips[j]);
|
||||
zfree(cmd->tips);
|
||||
for (int j = 0; cmd->history && cmd->history[j].since; j++) {
|
||||
zfree((char *)cmd->history[j].since);
|
||||
zfree((char *)cmd->history[j].changes);
|
||||
}
|
||||
zfree(cmd->history);
|
||||
zfree((char *)cmd->summary);
|
||||
zfree((char *)cmd->since);
|
||||
zfree((char *)cmd->deprecated_since);
|
||||
@ -11064,7 +11179,7 @@ int moduleFreeCommand(struct RedisModule *module, struct redisCommand *cmd) {
|
||||
hdr_close(cmd->latency_histogram);
|
||||
cmd->latency_histogram = NULL;
|
||||
}
|
||||
zfree(cmd->args);
|
||||
moduleFreeArgs(cmd->args, cmd->num_args);
|
||||
zfree(cp);
|
||||
|
||||
if (cmd->subcommands_dict) {
|
||||
@ -11174,7 +11289,6 @@ int moduleLoad(const char *path, void **module_argv, int module_argc, int is_loa
|
||||
}
|
||||
RedisModuleCtx ctx;
|
||||
moduleCreateContext(&ctx, NULL, REDISMODULE_CTX_TEMP_CLIENT); /* We pass NULL since we don't have a module yet. */
|
||||
selectDb(ctx.client, 0);
|
||||
if (onload((void*)&ctx,module_argv,module_argc) == REDISMODULE_ERR) {
|
||||
serverLog(LL_WARNING,
|
||||
"Module %s initialization failed. Module not loaded",path);
|
||||
@ -12484,6 +12598,8 @@ void moduleRegisterCoreAPI(void) {
|
||||
REGISTER_API(StringTruncate);
|
||||
REGISTER_API(SetExpire);
|
||||
REGISTER_API(GetExpire);
|
||||
REGISTER_API(SetAbsExpire);
|
||||
REGISTER_API(GetAbsExpire);
|
||||
REGISTER_API(ResetDataset);
|
||||
REGISTER_API(DbSize);
|
||||
REGISTER_API(RandomKey);
|
||||
@ -12682,7 +12798,10 @@ void moduleRegisterCoreAPI(void) {
|
||||
REGISTER_API(Scan);
|
||||
REGISTER_API(ScanKey);
|
||||
REGISTER_API(CreateModuleUser);
|
||||
REGISTER_API(SetContextUser);
|
||||
REGISTER_API(SetModuleUserACL);
|
||||
REGISTER_API(SetModuleUserACLString);
|
||||
REGISTER_API(GetModuleUserACLString);
|
||||
REGISTER_API(GetCurrentUserName);
|
||||
REGISTER_API(GetModuleUserFromUserName);
|
||||
REGISTER_API(ACLCheckCommandPermissions);
|
||||
|
@ -438,9 +438,9 @@ void touchAllWatchedKeysInDb(redisDb *emptied, redisDb *replaced_with) {
|
||||
}
|
||||
client *c = wk->client;
|
||||
c->flags |= CLIENT_DIRTY_CAS;
|
||||
/* As the client is marked as dirty, there is no point in getting here
|
||||
* again for others keys (or keep the memory overhead till EXEC). */
|
||||
unwatchAllKeys(c);
|
||||
/* Note - we could potentially call unwatchAllKeys for this specific client in order to reduce
|
||||
* the total number of iterations. BUT this could also free the current next entry pointer
|
||||
* held by the iterator and can lead to use-after-free. */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -131,7 +131,7 @@ client *createClient(connection *conn) {
|
||||
connSetReadHandler(conn, readQueryFromClient);
|
||||
connSetPrivateData(conn, c);
|
||||
}
|
||||
c->buf = zmalloc(PROTO_REPLY_CHUNK_BYTES);
|
||||
c->buf = zmalloc_usable(PROTO_REPLY_CHUNK_BYTES, &c->buf_usable_size);
|
||||
selectDb(c,0);
|
||||
uint64_t client_id;
|
||||
atomicGetIncr(server.next_client_id, client_id, 1);
|
||||
@ -140,7 +140,6 @@ client *createClient(connection *conn) {
|
||||
c->conn = conn;
|
||||
c->name = NULL;
|
||||
c->bufpos = 0;
|
||||
c->buf_usable_size = zmalloc_usable_size(c->buf);
|
||||
c->buf_peak = c->buf_usable_size;
|
||||
c->buf_peak_last_reset_time = server.unixtime;
|
||||
c->ref_repl_buf_node = NULL;
|
||||
@ -156,6 +155,7 @@ client *createClient(connection *conn) {
|
||||
c->original_argc = 0;
|
||||
c->original_argv = NULL;
|
||||
c->cmd = c->lastcmd = c->realcmd = NULL;
|
||||
c->cur_script = NULL;
|
||||
c->multibulklen = 0;
|
||||
c->bulklen = -1;
|
||||
c->sentlen = 0;
|
||||
@ -289,8 +289,10 @@ int prepareClientToWrite(client *c) {
|
||||
/* If CLIENT_CLOSE_ASAP flag is set, we need not write anything. */
|
||||
if (c->flags & CLIENT_CLOSE_ASAP) return C_ERR;
|
||||
|
||||
/* CLIENT REPLY OFF / SKIP handling: don't send replies. */
|
||||
if (c->flags & (CLIENT_REPLY_OFF|CLIENT_REPLY_SKIP)) return C_ERR;
|
||||
/* CLIENT REPLY OFF / SKIP handling: don't send replies.
|
||||
* CLIENT_PUSHING handling: disables the reply silencing flags. */
|
||||
if ((c->flags & (CLIENT_REPLY_OFF|CLIENT_REPLY_SKIP)) &&
|
||||
!(c->flags & CLIENT_PUSHING)) return C_ERR;
|
||||
|
||||
/* Masters don't receive replies, unless CLIENT_MASTER_FORCE_REPLY flag
|
||||
* is set. */
|
||||
@ -600,6 +602,13 @@ void addReplyErrorSds(client *c, sds err) {
|
||||
addReplyErrorSdsEx(c, err, 0);
|
||||
}
|
||||
|
||||
/* See addReplyErrorLength for expectations from the input string. */
|
||||
/* As a side effect the SDS string is freed. */
|
||||
void addReplyErrorSdsSafe(client *c, sds err) {
|
||||
err = sdsmapchars(err, "\r\n", " ", 2);
|
||||
addReplyErrorSdsEx(c, err, 0);
|
||||
}
|
||||
|
||||
/* Internal function used by addReplyErrorFormat and addReplyErrorFormatEx.
|
||||
* Refer to afterErrorReply for more information about the flags. */
|
||||
static void addReplyErrorFormatInternal(client *c, int flags, const char *fmt, va_list ap) {
|
||||
@ -680,11 +689,12 @@ void trimReplyUnusedTailSpace(client *c) {
|
||||
if (tail->size - tail->used > tail->size / 4 &&
|
||||
tail->used < PROTO_REPLY_CHUNK_BYTES)
|
||||
{
|
||||
size_t usable_size;
|
||||
size_t old_size = tail->size;
|
||||
tail = zrealloc(tail, tail->used + sizeof(clientReplyBlock));
|
||||
tail = zrealloc_usable(tail, tail->used + sizeof(clientReplyBlock), &usable_size);
|
||||
/* take over the allocation's internal fragmentation (at least for
|
||||
* memory usage tracking) */
|
||||
tail->size = zmalloc_usable_size(tail) - sizeof(clientReplyBlock);
|
||||
tail->size = usable_size - sizeof(clientReplyBlock);
|
||||
c->reply_bytes = c->reply_bytes + tail->size - old_size;
|
||||
listNodeValue(ln) = tail;
|
||||
}
|
||||
@ -760,9 +770,10 @@ void setDeferredReply(client *c, void *node, const char *s, size_t length) {
|
||||
listDelNode(c->reply,ln);
|
||||
} else {
|
||||
/* Create a new node */
|
||||
clientReplyBlock *buf = zmalloc(length + sizeof(clientReplyBlock));
|
||||
size_t usable_size;
|
||||
clientReplyBlock *buf = zmalloc_usable(length + sizeof(clientReplyBlock), &usable_size);
|
||||
/* Take over the allocation's internal fragmentation */
|
||||
buf->size = zmalloc_usable_size(buf) - sizeof(clientReplyBlock);
|
||||
buf->size = usable_size - sizeof(clientReplyBlock);
|
||||
buf->used = length;
|
||||
memcpy(buf->buf, s, length);
|
||||
listNodeValue(ln) = buf;
|
||||
@ -840,13 +851,29 @@ void addReplyDouble(client *c, double d) {
|
||||
d > 0 ? 6 : 7);
|
||||
}
|
||||
} else {
|
||||
char dbuf[MAX_LONG_DOUBLE_CHARS+3],
|
||||
sbuf[MAX_LONG_DOUBLE_CHARS+32];
|
||||
int dlen, slen;
|
||||
char dbuf[MAX_LONG_DOUBLE_CHARS+32];
|
||||
int dlen = 0;
|
||||
if (c->resp == 2) {
|
||||
dlen = snprintf(dbuf,sizeof(dbuf),"%.17g",d);
|
||||
slen = snprintf(sbuf,sizeof(sbuf),"$%d\r\n%s\r\n",dlen,dbuf);
|
||||
addReplyProto(c,sbuf,slen);
|
||||
/* In order to prepend the string length before the formatted number,
|
||||
* but still avoid an extra memcpy of the whole number, we reserve space
|
||||
* for maximum header `$0000\r\n`, print double, add the resp header in
|
||||
* front of it, and then send the buffer with the right `start` offset. */
|
||||
int dlen = snprintf(dbuf+7,sizeof(dbuf) - 7,"%.17g",d);
|
||||
int digits = digits10(dlen);
|
||||
int start = 4 - digits;
|
||||
dbuf[start] = '$';
|
||||
|
||||
/* Convert `dlen` to string, putting it's digits after '$' and before the
|
||||
* formatted double string. */
|
||||
for(int i = digits, val = dlen; val && i > 0 ; --i, val /= 10) {
|
||||
dbuf[start + i] = "0123456789"[val % 10];
|
||||
}
|
||||
|
||||
dbuf[5] = '\r';
|
||||
dbuf[6] = '\n';
|
||||
dbuf[dlen+7] = '\r';
|
||||
dbuf[dlen+8] = '\n';
|
||||
addReplyProto(c,dbuf+start,dlen+9-start);
|
||||
} else {
|
||||
dlen = snprintf(dbuf,sizeof(dbuf),",%.17g\r\n",d);
|
||||
addReplyProto(c,dbuf,dlen);
|
||||
@ -949,6 +976,7 @@ void addReplyAttributeLen(client *c, long length) {
|
||||
|
||||
void addReplyPushLen(client *c, long length) {
|
||||
serverAssert(c->resp >= 3);
|
||||
serverAssertWithInfo(c, NULL, c->flags & CLIENT_PUSHING);
|
||||
addReplyAggregateLen(c,length,'>');
|
||||
}
|
||||
|
||||
@ -1996,7 +2024,7 @@ int writeToClient(client *c, int handler_installed) {
|
||||
* Since this isn't thread safe we do this conditionally. In case of threaded writes this is done in
|
||||
* handleClientsWithPendingWritesUsingThreads(). */
|
||||
if (io_threads_op == IO_THREADS_OP_IDLE)
|
||||
updateClientMemUsage(c);
|
||||
updateClientMemUsageAndBucket(c);
|
||||
return C_OK;
|
||||
}
|
||||
|
||||
@ -2045,6 +2073,7 @@ void resetClient(client *c) {
|
||||
redisCommandProc *prevcmd = c->cmd ? c->cmd->proc : NULL;
|
||||
|
||||
freeClientArgv(c);
|
||||
c->cur_script = NULL;
|
||||
c->reqtype = 0;
|
||||
c->multibulklen = 0;
|
||||
c->bulklen = -1;
|
||||
@ -2445,7 +2474,7 @@ int processCommandAndResetClient(client *c) {
|
||||
commandProcessed(c);
|
||||
/* Update the client's memory to include output buffer growth following the
|
||||
* processed command. */
|
||||
updateClientMemUsage(c);
|
||||
updateClientMemUsageAndBucket(c);
|
||||
}
|
||||
|
||||
if (server.current_client == NULL) deadclient = 1;
|
||||
@ -2582,7 +2611,7 @@ int processInputBuffer(client *c) {
|
||||
* important in case the query buffer is big and wasn't drained during
|
||||
* the above loop (because of partially sent big commands). */
|
||||
if (io_threads_op == IO_THREADS_OP_IDLE)
|
||||
updateClientMemUsage(c);
|
||||
updateClientMemUsageAndBucket(c);
|
||||
|
||||
return C_OK;
|
||||
}
|
||||
@ -3020,9 +3049,11 @@ NULL
|
||||
/* CLIENT NO-EVICT ON|OFF */
|
||||
if (!strcasecmp(c->argv[2]->ptr,"on")) {
|
||||
c->flags |= CLIENT_NO_EVICT;
|
||||
removeClientFromMemUsageBucket(c, 0);
|
||||
addReply(c,shared.ok);
|
||||
} else if (!strcasecmp(c->argv[2]->ptr,"off")) {
|
||||
c->flags &= ~CLIENT_NO_EVICT;
|
||||
updateClientMemUsageAndBucket(c);
|
||||
addReply(c,shared.ok);
|
||||
} else {
|
||||
addReplyErrorObject(c,shared.syntaxerr);
|
||||
@ -4004,7 +4035,13 @@ void processEventsWhileBlocked(void) {
|
||||
* ========================================================================== */
|
||||
|
||||
#define IO_THREADS_MAX_NUM 128
|
||||
#ifndef CACHE_LINE_SIZE
|
||||
#if defined(__aarch64__) && defined(__APPLE__)
|
||||
#define CACHE_LINE_SIZE 128
|
||||
#else
|
||||
#define CACHE_LINE_SIZE 64
|
||||
#endif
|
||||
#endif
|
||||
|
||||
typedef struct __attribute__((aligned(CACHE_LINE_SIZE))) threads_pending {
|
||||
redisAtomic unsigned long value;
|
||||
@ -4251,7 +4288,7 @@ int handleClientsWithPendingWritesUsingThreads(void) {
|
||||
client *c = listNodeValue(ln);
|
||||
|
||||
/* Update the client in the mem usage after we're done processing it in the io-threads */
|
||||
updateClientMemUsage(c);
|
||||
updateClientMemUsageAndBucket(c);
|
||||
|
||||
/* Install the write handler if there are pending writes in some
|
||||
* of the clients. */
|
||||
@ -4358,7 +4395,7 @@ int handleClientsWithPendingReadsUsingThreads(void) {
|
||||
}
|
||||
|
||||
/* Once io-threads are idle we can update the client in the mem usage */
|
||||
updateClientMemUsage(c);
|
||||
updateClientMemUsageAndBucket(c);
|
||||
|
||||
if (processPendingCommandAndInputBuffer(c) == C_ERR) {
|
||||
/* If the client is no longer valid, we avoid
|
||||
@ -4405,6 +4442,8 @@ size_t getClientEvictionLimit(void) {
|
||||
}
|
||||
|
||||
void evictClients(void) {
|
||||
if (!server.client_mem_usage_buckets)
|
||||
return;
|
||||
/* Start eviction from topmost bucket (largest clients) */
|
||||
int curr_bucket = CLIENT_MEM_USAGE_BUCKETS-1;
|
||||
listIter bucket_iter;
|
||||
|
10
src/object.c
10
src/object.c
@ -595,7 +595,7 @@ void trimStringObjectIfNeeded(robj *o) {
|
||||
if (o->encoding == OBJ_ENCODING_RAW &&
|
||||
sdsavail(o->ptr) > sdslen(o->ptr)/10)
|
||||
{
|
||||
o->ptr = sdsRemoveFreeSpace(o->ptr);
|
||||
o->ptr = sdsRemoveFreeSpace(o->ptr, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1071,7 +1071,8 @@ size_t objectComputeSize(robj *key, robj *o, size_t sample_size, int dbid) {
|
||||
size_t lpsize = 0, samples = 0;
|
||||
while(samples < sample_size && raxNext(&ri)) {
|
||||
unsigned char *lp = ri.data;
|
||||
lpsize += lpBytes(lp);
|
||||
/* Use the allocated size, since we overprovision the node initially. */
|
||||
lpsize += zmalloc_size(lp);
|
||||
samples++;
|
||||
}
|
||||
if (s->rax->numele <= samples) {
|
||||
@ -1083,7 +1084,8 @@ size_t objectComputeSize(robj *key, robj *o, size_t sample_size, int dbid) {
|
||||
* if there are a few elements in the radix tree. */
|
||||
raxSeek(&ri,"$",NULL,0);
|
||||
raxNext(&ri);
|
||||
asize += lpBytes(ri.data);
|
||||
/* Use the allocated size, since we overprovision the node initially. */
|
||||
asize += zmalloc_size(ri.data);
|
||||
}
|
||||
raxStop(&ri);
|
||||
|
||||
@ -1187,7 +1189,7 @@ struct redisMemOverhead *getMemoryOverheadData(void) {
|
||||
|
||||
/* Computing the memory used by the clients would be O(N) if done
|
||||
* here online. We use our values computed incrementally by
|
||||
* updateClientMemUsage(). */
|
||||
* updateClientMemoryUsage(). */
|
||||
mh->clients_normal = server.stat_clients_type_memory[CLIENT_TYPE_MASTER]+
|
||||
server.stat_clients_type_memory[CLIENT_TYPE_PUBSUB]+
|
||||
server.stat_clients_type_memory[CLIENT_TYPE_NORMAL];
|
||||
|
22
src/pubsub.c
22
src/pubsub.c
@ -105,6 +105,8 @@ pubsubtype pubSubShardType = {
|
||||
* to send a special message (for instance an Array type) by using the
|
||||
* addReply*() API family. */
|
||||
void addReplyPubsubMessage(client *c, robj *channel, robj *msg, robj *message_bulk) {
|
||||
uint64_t old_flags = c->flags;
|
||||
c->flags |= CLIENT_PUSHING;
|
||||
if (c->resp == 2)
|
||||
addReply(c,shared.mbulkhdr[3]);
|
||||
else
|
||||
@ -112,12 +114,15 @@ void addReplyPubsubMessage(client *c, robj *channel, robj *msg, robj *message_bu
|
||||
addReply(c,message_bulk);
|
||||
addReplyBulk(c,channel);
|
||||
if (msg) addReplyBulk(c,msg);
|
||||
if (!(old_flags & CLIENT_PUSHING)) c->flags &= ~CLIENT_PUSHING;
|
||||
}
|
||||
|
||||
/* Send a pubsub message of type "pmessage" to the client. The difference
|
||||
* with the "message" type delivered by addReplyPubsubMessage() is that
|
||||
* this message format also includes the pattern that matched the message. */
|
||||
void addReplyPubsubPatMessage(client *c, robj *pat, robj *channel, robj *msg) {
|
||||
uint64_t old_flags = c->flags;
|
||||
c->flags |= CLIENT_PUSHING;
|
||||
if (c->resp == 2)
|
||||
addReply(c,shared.mbulkhdr[4]);
|
||||
else
|
||||
@ -126,10 +131,13 @@ void addReplyPubsubPatMessage(client *c, robj *pat, robj *channel, robj *msg) {
|
||||
addReplyBulk(c,pat);
|
||||
addReplyBulk(c,channel);
|
||||
addReplyBulk(c,msg);
|
||||
if (!(old_flags & CLIENT_PUSHING)) c->flags &= ~CLIENT_PUSHING;
|
||||
}
|
||||
|
||||
/* Send the pubsub subscription notification to the client. */
|
||||
void addReplyPubsubSubscribed(client *c, robj *channel, pubsubtype type) {
|
||||
uint64_t old_flags = c->flags;
|
||||
c->flags |= CLIENT_PUSHING;
|
||||
if (c->resp == 2)
|
||||
addReply(c,shared.mbulkhdr[3]);
|
||||
else
|
||||
@ -137,6 +145,7 @@ void addReplyPubsubSubscribed(client *c, robj *channel, pubsubtype type) {
|
||||
addReply(c,*type.subscribeMsg);
|
||||
addReplyBulk(c,channel);
|
||||
addReplyLongLong(c,type.subscriptionCount(c));
|
||||
if (!(old_flags & CLIENT_PUSHING)) c->flags &= ~CLIENT_PUSHING;
|
||||
}
|
||||
|
||||
/* Send the pubsub unsubscription notification to the client.
|
||||
@ -144,6 +153,8 @@ void addReplyPubsubSubscribed(client *c, robj *channel, pubsubtype type) {
|
||||
* unsubscribe command but there are no channels to unsubscribe from: we
|
||||
* still send a notification. */
|
||||
void addReplyPubsubUnsubscribed(client *c, robj *channel, pubsubtype type) {
|
||||
uint64_t old_flags = c->flags;
|
||||
c->flags |= CLIENT_PUSHING;
|
||||
if (c->resp == 2)
|
||||
addReply(c,shared.mbulkhdr[3]);
|
||||
else
|
||||
@ -154,10 +165,13 @@ void addReplyPubsubUnsubscribed(client *c, robj *channel, pubsubtype type) {
|
||||
else
|
||||
addReplyNull(c);
|
||||
addReplyLongLong(c,type.subscriptionCount(c));
|
||||
if (!(old_flags & CLIENT_PUSHING)) c->flags &= ~CLIENT_PUSHING;
|
||||
}
|
||||
|
||||
/* Send the pubsub pattern subscription notification to the client. */
|
||||
void addReplyPubsubPatSubscribed(client *c, robj *pattern) {
|
||||
uint64_t old_flags = c->flags;
|
||||
c->flags |= CLIENT_PUSHING;
|
||||
if (c->resp == 2)
|
||||
addReply(c,shared.mbulkhdr[3]);
|
||||
else
|
||||
@ -165,6 +179,7 @@ void addReplyPubsubPatSubscribed(client *c, robj *pattern) {
|
||||
addReply(c,shared.psubscribebulk);
|
||||
addReplyBulk(c,pattern);
|
||||
addReplyLongLong(c,clientSubscriptionsCount(c));
|
||||
if (!(old_flags & CLIENT_PUSHING)) c->flags &= ~CLIENT_PUSHING;
|
||||
}
|
||||
|
||||
/* Send the pubsub pattern unsubscription notification to the client.
|
||||
@ -172,6 +187,8 @@ void addReplyPubsubPatSubscribed(client *c, robj *pattern) {
|
||||
* punsubscribe command but there are no pattern to unsubscribe from: we
|
||||
* still send a notification. */
|
||||
void addReplyPubsubPatUnsubscribed(client *c, robj *pattern) {
|
||||
uint64_t old_flags = c->flags;
|
||||
c->flags |= CLIENT_PUSHING;
|
||||
if (c->resp == 2)
|
||||
addReply(c,shared.mbulkhdr[3]);
|
||||
else
|
||||
@ -182,6 +199,7 @@ void addReplyPubsubPatUnsubscribed(client *c, robj *pattern) {
|
||||
else
|
||||
addReplyNull(c);
|
||||
addReplyLongLong(c,clientSubscriptionsCount(c));
|
||||
if (!(old_flags & CLIENT_PUSHING)) c->flags &= ~CLIENT_PUSHING;
|
||||
}
|
||||
|
||||
/*-----------------------------------------------------------------------------
|
||||
@ -465,7 +483,7 @@ int pubsubPublishMessageInternal(robj *channel, robj *message, pubsubtype type)
|
||||
while ((ln = listNext(&li)) != NULL) {
|
||||
client *c = ln->value;
|
||||
addReplyPubsubMessage(c,channel,message,*type.messageBulk);
|
||||
updateClientMemUsage(c);
|
||||
updateClientMemUsageAndBucket(c);
|
||||
receivers++;
|
||||
}
|
||||
}
|
||||
@ -491,7 +509,7 @@ int pubsubPublishMessageInternal(robj *channel, robj *message, pubsubtype type)
|
||||
while ((ln = listNext(&li)) != NULL) {
|
||||
client *c = listNodeValue(ln);
|
||||
addReplyPubsubPatMessage(c,pattern,channel,message);
|
||||
updateClientMemUsage(c);
|
||||
updateClientMemUsageAndBucket(c);
|
||||
receivers++;
|
||||
}
|
||||
}
|
||||
|
@ -57,6 +57,8 @@ int quicklistisSetPackedThreshold(size_t sz) {
|
||||
/* Don't allow threshold to be set above or even slightly below 4GB */
|
||||
if (sz > (1ull<<32) - (1<<20)) {
|
||||
return 0;
|
||||
} else if (sz == 0) { /* 0 means restore threshold */
|
||||
sz = (1 << 30);
|
||||
}
|
||||
packed_threshold = sz;
|
||||
return 1;
|
||||
@ -177,6 +179,7 @@ REDIS_STATIC quicklistNode *quicklistCreateNode(void) {
|
||||
node->encoding = QUICKLIST_NODE_ENCODING_RAW;
|
||||
node->container = QUICKLIST_NODE_CONTAINER_PACKED;
|
||||
node->recompress = 0;
|
||||
node->dont_compress = 0;
|
||||
return node;
|
||||
}
|
||||
|
||||
@ -212,6 +215,7 @@ REDIS_STATIC int __quicklistCompressNode(quicklistNode *node) {
|
||||
#ifdef REDIS_TEST
|
||||
node->attempted_compress = 1;
|
||||
#endif
|
||||
if (node->dont_compress) return 0;
|
||||
|
||||
/* validate that the node is neither
|
||||
* tail nor head (it has prev and next)*/
|
||||
@ -748,12 +752,15 @@ void quicklistReplaceEntry(quicklistIter *iter, quicklistEntry *entry,
|
||||
__quicklistDelNode(quicklist, entry->node);
|
||||
}
|
||||
} else {
|
||||
entry->node->dont_compress = 1; /* Prevent compression in quicklistInsertAfter() */
|
||||
quicklistInsertAfter(iter, entry, data, sz);
|
||||
if (entry->node->count == 1) {
|
||||
__quicklistDelNode(quicklist, entry->node);
|
||||
} else {
|
||||
unsigned char *p = lpSeek(entry->node->entry, -1);
|
||||
quicklistDelIndex(quicklist, entry->node, &p);
|
||||
entry->node->dont_compress = 0; /* Re-enable compression */
|
||||
quicklistCompress(quicklist, entry->node);
|
||||
quicklistCompress(quicklist, entry->node->next);
|
||||
}
|
||||
}
|
||||
@ -905,6 +912,9 @@ REDIS_STATIC quicklistNode *_quicklistSplitNode(quicklistNode *node, int offset,
|
||||
/* Copy original listpack so we can split it */
|
||||
memcpy(new_node->entry, node->entry, zl_sz);
|
||||
|
||||
/* Need positive offset for calculating extent below. */
|
||||
if (offset < 0) offset = node->count + offset;
|
||||
|
||||
/* Ranges to be trimmed: -1 here means "continue deleting until the list ends" */
|
||||
int orig_start = after ? offset + 1 : 0;
|
||||
int orig_extent = after ? -1 : offset;
|
||||
@ -1608,10 +1618,11 @@ void quicklistRepr(unsigned char *ql, int full) {
|
||||
|
||||
while(node != NULL) {
|
||||
printf("{quicklist node(%d)\n", i++);
|
||||
printf("{container : %s, encoding: %s, size: %zu, recompress: %d, attempted_compress: %d}\n",
|
||||
printf("{container : %s, encoding: %s, size: %zu, count: %d, recompress: %d, attempted_compress: %d}\n",
|
||||
QL_NODE_IS_PLAIN(node) ? "PLAIN": "PACKED",
|
||||
(node->encoding == QUICKLIST_NODE_ENCODING_RAW) ? "RAW": "LZF",
|
||||
node->sz,
|
||||
node->count,
|
||||
node->recompress,
|
||||
node->attempted_compress);
|
||||
|
||||
|
@ -53,7 +53,8 @@ typedef struct quicklistNode {
|
||||
unsigned int container : 2; /* PLAIN==1 or PACKED==2 */
|
||||
unsigned int recompress : 1; /* was this node previous compressed? */
|
||||
unsigned int attempted_compress : 1; /* node can't compress; too small */
|
||||
unsigned int extra : 10; /* more bits to steal for future usage */
|
||||
unsigned int dont_compress : 1; /* prevent compression of entry that will be used later */
|
||||
unsigned int extra : 9; /* more bits to steal for future usage */
|
||||
} quicklistNode;
|
||||
|
||||
/* quicklistLZF is a 8+N byte struct holding 'sz' followed by 'compressed'.
|
||||
|
@ -2856,7 +2856,7 @@ cleanup:
|
||||
|
||||
if (lib_ctx) {
|
||||
sds library_name = NULL;
|
||||
if (!(library_name = functionsCreateWithLibraryCtx(final_payload, rdbflags & RDBFLAGS_ALLOW_DUP, &error, lib_ctx))) {
|
||||
if (!(library_name = functionsCreateWithLibraryCtx(final_payload, rdbflags & RDBFLAGS_ALLOW_DUP, &error, lib_ctx, 0))) {
|
||||
if (!error) {
|
||||
error = sdsnew("Failed creating the library");
|
||||
}
|
||||
@ -3418,6 +3418,10 @@ int rdbSaveToSlavesSockets(int req, rdbSaveInfo *rsi) {
|
||||
|
||||
rioInitWithFd(&rdb,rdb_pipe_write);
|
||||
|
||||
/* Close the reading part, so that if the parent crashes, the child will
|
||||
* get a write error and exit. */
|
||||
close(server.rdb_pipe_read);
|
||||
|
||||
redisSetProcTitle("redis-rdb-to-slaves");
|
||||
redisSetCpuAffinity(server.bgsave_cpulist);
|
||||
|
||||
|
@ -330,6 +330,7 @@ static redisConfig *getRedisConfig(const char *ip, int port,
|
||||
}
|
||||
redisAppendCommand(c, "CONFIG GET %s", "save");
|
||||
redisAppendCommand(c, "CONFIG GET %s", "appendonly");
|
||||
int abort_test = 0;
|
||||
int i = 0;
|
||||
void *r = NULL;
|
||||
for (; i < 2; i++) {
|
||||
@ -338,7 +339,6 @@ static redisConfig *getRedisConfig(const char *ip, int port,
|
||||
reply = res == REDIS_OK ? ((redisReply *) r) : NULL;
|
||||
if (res != REDIS_OK || !r) goto fail;
|
||||
if (reply->type == REDIS_REPLY_ERROR) {
|
||||
fprintf(stderr, "ERROR: %s\n", reply->str);
|
||||
goto fail;
|
||||
}
|
||||
if (reply->type != REDIS_REPLY_ARRAY || reply->elements < 2) goto fail;
|
||||
@ -354,15 +354,14 @@ static redisConfig *getRedisConfig(const char *ip, int port,
|
||||
redisFree(c);
|
||||
return cfg;
|
||||
fail:
|
||||
fprintf(stderr, "ERROR: failed to fetch CONFIG from ");
|
||||
if (hostsocket == NULL) fprintf(stderr, "%s:%d\n", ip, port);
|
||||
else fprintf(stderr, "%s\n", hostsocket);
|
||||
int abort_test = 0;
|
||||
if (reply && reply->type == REDIS_REPLY_ERROR &&
|
||||
(!strncmp(reply->str,"NOAUTH",6) ||
|
||||
!strncmp(reply->str,"WRONGPASS",9) ||
|
||||
!strncmp(reply->str,"NOPERM",6)))
|
||||
!strncmp(reply->str,"NOAUTH",6)) {
|
||||
if (hostsocket == NULL)
|
||||
fprintf(stderr, "Node %s:%d replied with error:\n%s\n", ip, port, reply->str);
|
||||
else
|
||||
fprintf(stderr, "Node %s replied with error:\n%s\n", hostsocket, reply->str);
|
||||
abort_test = 1;
|
||||
}
|
||||
freeReplyObject(reply);
|
||||
redisFree(c);
|
||||
freeRedisConfig(cfg);
|
||||
|
@ -6331,10 +6331,10 @@ assign_replicas:
|
||||
* So if (bus_port == 0) or (bus_port == port + CLUSTER_MANAGER_PORT_INCR),
|
||||
* we just call CLUSTER MEET with 2 arguments, using the old form. */
|
||||
reply = CLUSTER_MANAGER_COMMAND(node, "cluster meet %s %d",
|
||||
first->ip, first->port);
|
||||
first_ip, first->port);
|
||||
} else {
|
||||
reply = CLUSTER_MANAGER_COMMAND(node, "cluster meet %s %d %d",
|
||||
first->ip, first->port, first->bus_port);
|
||||
first_ip, first->port, first->bus_port);
|
||||
}
|
||||
int is_err = 0;
|
||||
if (reply != NULL) {
|
||||
@ -6525,7 +6525,7 @@ static int clusterManagerCommandAddNode(int argc, char **argv) {
|
||||
first_ip, first->port);
|
||||
} else {
|
||||
reply = CLUSTER_MANAGER_COMMAND(new_node, "CLUSTER MEET %s %d %d",
|
||||
first->ip, first->port, first->bus_port);
|
||||
first_ip, first->port, first->bus_port);
|
||||
}
|
||||
|
||||
if (!(success = clusterManagerCheckRedisReply(new_node, reply, NULL)))
|
||||
|
@ -1172,7 +1172,10 @@ REDISMODULE_API size_t (*RedisModule_MallocSizeString)(RedisModuleString* str) R
|
||||
REDISMODULE_API size_t (*RedisModule_MallocSizeDict)(RedisModuleDict* dict) REDISMODULE_ATTR;
|
||||
REDISMODULE_API RedisModuleUser * (*RedisModule_CreateModuleUser)(const char *name) REDISMODULE_ATTR;
|
||||
REDISMODULE_API void (*RedisModule_FreeModuleUser)(RedisModuleUser *user) REDISMODULE_ATTR;
|
||||
REDISMODULE_API void (*RedisModule_SetContextUser)(RedisModuleCtx *ctx, const RedisModuleUser *user) REDISMODULE_ATTR;
|
||||
REDISMODULE_API int (*RedisModule_SetModuleUserACL)(RedisModuleUser *user, const char* acl) REDISMODULE_ATTR;
|
||||
REDISMODULE_API int (*RedisModule_SetModuleUserACLString)(RedisModuleCtx * ctx, RedisModuleUser *user, const char* acl, RedisModuleString **error) REDISMODULE_ATTR;
|
||||
REDISMODULE_API RedisModuleString * (*RedisModule_GetModuleUserACLString)(RedisModuleUser *user) REDISMODULE_ATTR;
|
||||
REDISMODULE_API RedisModuleString * (*RedisModule_GetCurrentUserName)(RedisModuleCtx *ctx) REDISMODULE_ATTR;
|
||||
REDISMODULE_API RedisModuleUser * (*RedisModule_GetModuleUserFromUserName)(RedisModuleString *name) REDISMODULE_ATTR;
|
||||
REDISMODULE_API int (*RedisModule_ACLCheckCommandPermissions)(RedisModuleUser *user, RedisModuleString **argv, int argc) REDISMODULE_ATTR;
|
||||
@ -1510,7 +1513,10 @@ static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int
|
||||
REDISMODULE_GET_API(MallocSizeDict);
|
||||
REDISMODULE_GET_API(CreateModuleUser);
|
||||
REDISMODULE_GET_API(FreeModuleUser);
|
||||
REDISMODULE_GET_API(SetContextUser);
|
||||
REDISMODULE_GET_API(SetModuleUserACL);
|
||||
REDISMODULE_GET_API(SetModuleUserACLString);
|
||||
REDISMODULE_GET_API(GetModuleUserACLString);
|
||||
REDISMODULE_GET_API(GetCurrentUserName);
|
||||
REDISMODULE_GET_API(GetModuleUserFromUserName);
|
||||
REDISMODULE_GET_API(ACLCheckCommandPermissions);
|
||||
|
@ -44,7 +44,7 @@
|
||||
void replicationDiscardCachedMaster(void);
|
||||
void replicationResurrectCachedMaster(connection *conn);
|
||||
void replicationSendAck(void);
|
||||
void replicaPutOnline(client *slave);
|
||||
int replicaPutOnline(client *slave);
|
||||
void replicaStartCommandStream(client *slave);
|
||||
int cancelReplicationHandshake(int reconnect);
|
||||
|
||||
@ -103,7 +103,7 @@ int bg_unlink(const char *filename) {
|
||||
errno = old_errno;
|
||||
return -1;
|
||||
}
|
||||
bioCreateCloseJob(fd);
|
||||
bioCreateCloseJob(fd, 0);
|
||||
return 0; /* Success. */
|
||||
}
|
||||
}
|
||||
@ -324,89 +324,100 @@ void feedReplicationBuffer(char *s, size_t len) {
|
||||
static long long repl_block_id = 0;
|
||||
|
||||
if (server.repl_backlog == NULL) return;
|
||||
server.master_repl_offset += len;
|
||||
server.repl_backlog->histlen += len;
|
||||
|
||||
size_t start_pos = 0; /* The position of referenced block to start sending. */
|
||||
listNode *start_node = NULL; /* Replica/backlog starts referenced node. */
|
||||
int add_new_block = 0; /* Create new block if current block is total used. */
|
||||
listNode *ln = listLast(server.repl_buffer_blocks);
|
||||
replBufBlock *tail = ln ? listNodeValue(ln) : NULL;
|
||||
while(len > 0) {
|
||||
size_t start_pos = 0; /* The position of referenced block to start sending. */
|
||||
listNode *start_node = NULL; /* Replica/backlog starts referenced node. */
|
||||
int add_new_block = 0; /* Create new block if current block is total used. */
|
||||
listNode *ln = listLast(server.repl_buffer_blocks);
|
||||
replBufBlock *tail = ln ? listNodeValue(ln) : NULL;
|
||||
|
||||
/* Append to tail string when possible. */
|
||||
if (tail && tail->size > tail->used) {
|
||||
start_node = listLast(server.repl_buffer_blocks);
|
||||
start_pos = tail->used;
|
||||
/* Copy the part we can fit into the tail, and leave the rest for a
|
||||
* new node */
|
||||
size_t avail = tail->size - tail->used;
|
||||
size_t copy = (avail >= len) ? len : avail;
|
||||
memcpy(tail->buf + tail->used, s, copy);
|
||||
tail->used += copy;
|
||||
s += copy;
|
||||
len -= copy;
|
||||
}
|
||||
if (len) {
|
||||
/* Create a new node, make sure it is allocated to at
|
||||
* least PROTO_REPLY_CHUNK_BYTES */
|
||||
size_t usable_size;
|
||||
size_t size = (len < PROTO_REPLY_CHUNK_BYTES) ? PROTO_REPLY_CHUNK_BYTES : len;
|
||||
tail = zmalloc_usable(size + sizeof(replBufBlock), &usable_size);
|
||||
/* Take over the allocation's internal fragmentation */
|
||||
tail->size = usable_size - sizeof(replBufBlock);
|
||||
tail->used = len;
|
||||
tail->refcount = 0;
|
||||
tail->repl_offset = server.master_repl_offset - tail->used + 1;
|
||||
tail->id = repl_block_id++;
|
||||
memcpy(tail->buf, s, len);
|
||||
listAddNodeTail(server.repl_buffer_blocks, tail);
|
||||
/* We also count the list node memory into replication buffer memory. */
|
||||
server.repl_buffer_mem += (usable_size + sizeof(listNode));
|
||||
add_new_block = 1;
|
||||
if (start_node == NULL) {
|
||||
/* Append to tail string when possible. */
|
||||
if (tail && tail->size > tail->used) {
|
||||
start_node = listLast(server.repl_buffer_blocks);
|
||||
start_pos = 0;
|
||||
start_pos = tail->used;
|
||||
/* Copy the part we can fit into the tail, and leave the rest for a
|
||||
* new node */
|
||||
size_t avail = tail->size - tail->used;
|
||||
size_t copy = (avail >= len) ? len : avail;
|
||||
memcpy(tail->buf + tail->used, s, copy);
|
||||
tail->used += copy;
|
||||
s += copy;
|
||||
len -= copy;
|
||||
server.master_repl_offset += copy;
|
||||
server.repl_backlog->histlen += copy;
|
||||
}
|
||||
if (len) {
|
||||
/* Create a new node, make sure it is allocated to at
|
||||
* least PROTO_REPLY_CHUNK_BYTES */
|
||||
size_t usable_size;
|
||||
/* Avoid creating nodes smaller than PROTO_REPLY_CHUNK_BYTES, so that we can append more data into them,
|
||||
* and also avoid creating nodes bigger than repl_backlog_size / 16, so that we won't have huge nodes that can't
|
||||
* trim when we only still need to hold a small portion from them. */
|
||||
size_t limit = max((size_t)server.repl_backlog_size / 16, (size_t)PROTO_REPLY_CHUNK_BYTES);
|
||||
size_t size = min(max(len, (size_t)PROTO_REPLY_CHUNK_BYTES), limit);
|
||||
tail = zmalloc_usable(size + sizeof(replBufBlock), &usable_size);
|
||||
/* Take over the allocation's internal fragmentation */
|
||||
tail->size = usable_size - sizeof(replBufBlock);
|
||||
size_t copy = (tail->size >= len) ? len : tail->size;
|
||||
tail->used = copy;
|
||||
tail->refcount = 0;
|
||||
tail->repl_offset = server.master_repl_offset + 1;
|
||||
tail->id = repl_block_id++;
|
||||
memcpy(tail->buf, s, copy);
|
||||
listAddNodeTail(server.repl_buffer_blocks, tail);
|
||||
/* We also count the list node memory into replication buffer memory. */
|
||||
server.repl_buffer_mem += (usable_size + sizeof(listNode));
|
||||
add_new_block = 1;
|
||||
if (start_node == NULL) {
|
||||
start_node = listLast(server.repl_buffer_blocks);
|
||||
start_pos = 0;
|
||||
}
|
||||
s += copy;
|
||||
len -= copy;
|
||||
server.master_repl_offset += copy;
|
||||
server.repl_backlog->histlen += copy;
|
||||
}
|
||||
}
|
||||
|
||||
/* For output buffer of replicas. */
|
||||
listIter li;
|
||||
listRewind(server.slaves,&li);
|
||||
while((ln = listNext(&li))) {
|
||||
client *slave = ln->value;
|
||||
if (!canFeedReplicaReplBuffer(slave)) continue;
|
||||
/* For output buffer of replicas. */
|
||||
listIter li;
|
||||
listRewind(server.slaves,&li);
|
||||
while((ln = listNext(&li))) {
|
||||
client *slave = ln->value;
|
||||
if (!canFeedReplicaReplBuffer(slave)) continue;
|
||||
|
||||
/* Update shared replication buffer start position. */
|
||||
if (slave->ref_repl_buf_node == NULL) {
|
||||
slave->ref_repl_buf_node = start_node;
|
||||
slave->ref_block_pos = start_pos;
|
||||
/* Update shared replication buffer start position. */
|
||||
if (slave->ref_repl_buf_node == NULL) {
|
||||
slave->ref_repl_buf_node = start_node;
|
||||
slave->ref_block_pos = start_pos;
|
||||
/* Only increase the start block reference count. */
|
||||
((replBufBlock *)listNodeValue(start_node))->refcount++;
|
||||
}
|
||||
|
||||
/* Check output buffer limit only when add new block. */
|
||||
if (add_new_block) closeClientOnOutputBufferLimitReached(slave, 1);
|
||||
}
|
||||
|
||||
/* For replication backlog */
|
||||
if (server.repl_backlog->ref_repl_buf_node == NULL) {
|
||||
server.repl_backlog->ref_repl_buf_node = start_node;
|
||||
/* Only increase the start block reference count. */
|
||||
((replBufBlock *)listNodeValue(start_node))->refcount++;
|
||||
|
||||
/* Replication buffer must be empty before adding replication stream
|
||||
* into replication backlog. */
|
||||
serverAssert(add_new_block == 1 && start_pos == 0);
|
||||
}
|
||||
if (add_new_block) {
|
||||
createReplicationBacklogIndex(listLast(server.repl_buffer_blocks));
|
||||
|
||||
/* Check output buffer limit only when add new block. */
|
||||
if (add_new_block) closeClientOnOutputBufferLimitReached(slave, 1);
|
||||
/* It is important to trim after adding replication data to keep the backlog size close to
|
||||
* repl_backlog_size in the common case. We wait until we add a new block to avoid repeated
|
||||
* unnecessary trimming attempts when small amounts of data are added. See comments in
|
||||
* freeMemoryGetNotCountedMemory() for details on replication backlog memory tracking. */
|
||||
incrementalTrimReplicationBacklog(REPL_BACKLOG_TRIM_BLOCKS_PER_CALL);
|
||||
}
|
||||
}
|
||||
|
||||
/* For replication backlog */
|
||||
if (server.repl_backlog->ref_repl_buf_node == NULL) {
|
||||
server.repl_backlog->ref_repl_buf_node = start_node;
|
||||
/* Only increase the start block reference count. */
|
||||
((replBufBlock *)listNodeValue(start_node))->refcount++;
|
||||
|
||||
/* Replication buffer must be empty before adding replication stream
|
||||
* into replication backlog. */
|
||||
serverAssert(add_new_block == 1 && start_pos == 0);
|
||||
}
|
||||
if (add_new_block) {
|
||||
createReplicationBacklogIndex(listLast(server.repl_buffer_blocks));
|
||||
}
|
||||
/* Try to trim replication backlog since replication backlog may exceed
|
||||
* our setting when we add replication stream. Note that it is important to
|
||||
* try to trim at least one node since in the common case this is where one
|
||||
* new backlog node is added and one should be removed. See also comments
|
||||
* in freeMemoryGetNotCountedMemory for details. */
|
||||
incrementalTrimReplicationBacklog(REPL_BACKLOG_TRIM_BLOCKS_PER_CALL);
|
||||
}
|
||||
|
||||
/* Propagate write commands to replication stream.
|
||||
@ -585,7 +596,6 @@ void replicationFeedMonitors(client *c, list *monitors, int dictid, robj **argv,
|
||||
while((ln = listNext(&li))) {
|
||||
client *monitor = ln->value;
|
||||
addReply(monitor,cmdobj);
|
||||
updateClientMemUsage(c);
|
||||
}
|
||||
decrRefCount(cmdobj);
|
||||
}
|
||||
@ -1252,12 +1262,19 @@ void replconfCommand(client *c) {
|
||||
* It does a few things:
|
||||
* 1) Put the slave in ONLINE state.
|
||||
* 2) Update the count of "good replicas".
|
||||
* 3) Trigger the module event. */
|
||||
void replicaPutOnline(client *slave) {
|
||||
* 3) Trigger the module event.
|
||||
*
|
||||
* the return value indicates that the replica should be disconnected.
|
||||
* */
|
||||
int replicaPutOnline(client *slave) {
|
||||
if (slave->flags & CLIENT_REPL_RDBONLY) {
|
||||
return;
|
||||
slave->replstate = SLAVE_STATE_RDB_TRANSMITTED;
|
||||
/* The client asked for RDB only so we should close it ASAP */
|
||||
serverLog(LL_NOTICE,
|
||||
"RDB transfer completed, rdb only replica (%s) should be disconnected asap",
|
||||
replicationGetSlaveName(slave));
|
||||
return 0;
|
||||
}
|
||||
|
||||
slave->replstate = SLAVE_STATE_ONLINE;
|
||||
slave->repl_ack_time = server.unixtime; /* Prevent false timeout. */
|
||||
|
||||
@ -1268,6 +1285,7 @@ void replicaPutOnline(client *slave) {
|
||||
NULL);
|
||||
serverLog(LL_NOTICE,"Synchronization with replica %s succeeded",
|
||||
replicationGetSlaveName(slave));
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* This function should be called just after a replica received the RDB file
|
||||
@ -1282,14 +1300,8 @@ void replicaPutOnline(client *slave) {
|
||||
* accumulate output buffer data without sending it to the replica so it
|
||||
* won't get mixed with the RDB stream. */
|
||||
void replicaStartCommandStream(client *slave) {
|
||||
serverAssert(!(slave->flags & CLIENT_REPL_RDBONLY));
|
||||
slave->repl_start_cmd_stream_on_ack = 0;
|
||||
if (slave->flags & CLIENT_REPL_RDBONLY) {
|
||||
serverLog(LL_NOTICE,
|
||||
"Close the connection with replica %s as RDB transfer is complete",
|
||||
replicationGetSlaveName(slave));
|
||||
freeClientAsync(slave);
|
||||
return;
|
||||
}
|
||||
|
||||
putClientInPendingWriteQueue(slave);
|
||||
}
|
||||
@ -1392,7 +1404,10 @@ void sendBulkToSlave(connection *conn) {
|
||||
close(slave->repldbfd);
|
||||
slave->repldbfd = -1;
|
||||
connSetWriteHandler(slave->conn,NULL);
|
||||
replicaPutOnline(slave);
|
||||
if (!replicaPutOnline(slave)) {
|
||||
freeClient(slave);
|
||||
return;
|
||||
}
|
||||
replicaStartCommandStream(slave);
|
||||
}
|
||||
}
|
||||
@ -1595,7 +1610,10 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) {
|
||||
* after such final EOF. So we don't want to glue the end of
|
||||
* the RDB transfer with the start of the other replication
|
||||
* data. */
|
||||
replicaPutOnline(slave);
|
||||
if (!replicaPutOnline(slave)) {
|
||||
freeClientAsync(slave);
|
||||
continue;
|
||||
}
|
||||
slave->repl_start_cmd_stream_on_ack = 1;
|
||||
} else {
|
||||
if ((slave->repldbfd = open(server.rdb_filename,O_RDONLY)) == -1 ||
|
||||
@ -1821,7 +1839,7 @@ void readSyncBulkPayload(connection *conn) {
|
||||
if (nread == -1) {
|
||||
serverLog(LL_WARNING,
|
||||
"I/O error reading bulk count from MASTER: %s",
|
||||
strerror(errno));
|
||||
connGetLastError(conn));
|
||||
goto error;
|
||||
} else {
|
||||
/* nread here is returned by connSyncReadLine(), which calls syncReadLine() and
|
||||
@ -1893,7 +1911,7 @@ void readSyncBulkPayload(connection *conn) {
|
||||
return;
|
||||
}
|
||||
serverLog(LL_WARNING,"I/O error trying to sync with MASTER: %s",
|
||||
(nread == -1) ? strerror(errno) : "connection lost");
|
||||
(nread == -1) ? connGetLastError(conn) : "connection lost");
|
||||
cancelReplicationHandshake(1);
|
||||
return;
|
||||
}
|
||||
@ -2149,7 +2167,7 @@ void readSyncBulkPayload(connection *conn) {
|
||||
return;
|
||||
}
|
||||
/* Close old rdb asynchronously. */
|
||||
if (old_rdb_fd != -1) bioCreateCloseJob(old_rdb_fd);
|
||||
if (old_rdb_fd != -1) bioCreateCloseJob(old_rdb_fd, 0);
|
||||
|
||||
/* Sync the directory to ensure rename is persisted */
|
||||
if (fsyncFileDir(server.rdb_filename) == -1) {
|
||||
@ -2238,7 +2256,7 @@ char *receiveSynchronousResponse(connection *conn) {
|
||||
/* Read the reply from the server. */
|
||||
if (connSyncReadLine(conn,buf,sizeof(buf),server.repl_syncio_timeout*1000) == -1)
|
||||
{
|
||||
serverLog(LL_WARNING, "Failed to read response from the server: %s", strerror(errno));
|
||||
serverLog(LL_WARNING, "Failed to read response from the server: %s", connGetLastError(conn));
|
||||
return NULL;
|
||||
}
|
||||
server.repl_transfer_lastio = server.unixtime;
|
||||
@ -2799,7 +2817,7 @@ void syncWithMaster(connection *conn) {
|
||||
serverLog(LL_NOTICE,"Retrying with SYNC...");
|
||||
if (connSyncWrite(conn,"SYNC\r\n",6,server.repl_syncio_timeout*1000) == -1) {
|
||||
serverLog(LL_WARNING,"I/O error writing to MASTER: %s",
|
||||
strerror(errno));
|
||||
connGetLastError(conn));
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
11
src/script.c
11
src/script.c
@ -299,6 +299,7 @@ void scriptKill(client *c, int is_eval) {
|
||||
if (mustObeyClient(curr_run_ctx->original_client)) {
|
||||
addReplyError(c,
|
||||
"-UNKILLABLE The busy script was sent by a master instance in the context of replication and cannot be killed.");
|
||||
return;
|
||||
}
|
||||
if (curr_run_ctx->flags & SCRIPT_WRITE_DIRTY) {
|
||||
addReplyError(c,
|
||||
@ -514,22 +515,18 @@ static int scriptVerifyAllowStale(client *c, sds *err) {
|
||||
* up to the engine to take and parse.
|
||||
* The err out variable is set only if error occurs and describe the error.
|
||||
* If err is set on reply is written to the run_ctx client. */
|
||||
void scriptCall(scriptRunCtx *run_ctx, robj* *argv, int argc, sds *err) {
|
||||
void scriptCall(scriptRunCtx *run_ctx, sds *err) {
|
||||
client *c = run_ctx->c;
|
||||
|
||||
/* Setup our fake client for command execution */
|
||||
c->argv = argv;
|
||||
c->argc = argc;
|
||||
c->user = run_ctx->original_client->user;
|
||||
|
||||
/* Process module hooks */
|
||||
moduleCallCommandFilters(c);
|
||||
argv = c->argv;
|
||||
argc = c->argc;
|
||||
|
||||
struct redisCommand *cmd = lookupCommand(argv, argc);
|
||||
struct redisCommand *cmd = lookupCommand(c->argv, c->argc);
|
||||
c->cmd = c->lastcmd = c->realcmd = cmd;
|
||||
if (scriptVerifyCommandArity(cmd, argc, err) != C_OK) {
|
||||
if (scriptVerifyCommandArity(cmd, c->argc, err) != C_OK) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
|
@ -98,7 +98,7 @@ int scriptPrepareForRun(scriptRunCtx *r_ctx, client *engine_client, client *call
|
||||
void scriptResetRun(scriptRunCtx *r_ctx);
|
||||
int scriptSetResp(scriptRunCtx *r_ctx, int resp);
|
||||
int scriptSetRepl(scriptRunCtx *r_ctx, int repl);
|
||||
void scriptCall(scriptRunCtx *r_ctx, robj **argv, int argc, sds *err);
|
||||
void scriptCall(scriptRunCtx *r_ctx, sds *err);
|
||||
int scriptInterrupt(scriptRunCtx *r_ctx);
|
||||
void scriptKill(client *c, int is_eval);
|
||||
int scriptIsRunning();
|
||||
|
119
src/script_lua.c
119
src/script_lua.c
@ -334,7 +334,7 @@ static void redisProtocolToLuaType_Error(void *ctx, const char *str, size_t len,
|
||||
/* push a field indicate to ignore updating the stats on this error
|
||||
* because it was already updated when executing the command. */
|
||||
lua_pushstring(lua,"ignore_error_stats_update");
|
||||
lua_pushboolean(lua, true);
|
||||
lua_pushboolean(lua, 1);
|
||||
lua_settable(lua,-3);
|
||||
}
|
||||
|
||||
@ -628,7 +628,7 @@ static void luaReplyToRedisReply(client *c, client* script_client, lua_State *lu
|
||||
/* Handle error reply. */
|
||||
/* we took care of the stack size on function start */
|
||||
lua_pushstring(lua,"err");
|
||||
lua_gettable(lua,-2);
|
||||
lua_rawget(lua,-2);
|
||||
t = lua_type(lua,-1);
|
||||
if (t == LUA_TSTRING) {
|
||||
lua_pop(lua, 1); /* pop the error message, we will use luaExtractErrorInformation to get error information */
|
||||
@ -646,12 +646,12 @@ static void luaReplyToRedisReply(client *c, client* script_client, lua_State *lu
|
||||
|
||||
/* Handle status reply. */
|
||||
lua_pushstring(lua,"ok");
|
||||
lua_gettable(lua,-2);
|
||||
lua_rawget(lua,-2);
|
||||
t = lua_type(lua,-1);
|
||||
if (t == LUA_TSTRING) {
|
||||
sds ok = sdsnew(lua_tostring(lua,-1));
|
||||
sdsmapchars(ok,"\r\n"," ",2);
|
||||
addReplySds(c,sdscatprintf(sdsempty(),"+%s\r\n",ok));
|
||||
addReplyStatusLength(c, ok, sdslen(ok));
|
||||
sdsfree(ok);
|
||||
lua_pop(lua,2);
|
||||
return;
|
||||
@ -660,7 +660,7 @@ static void luaReplyToRedisReply(client *c, client* script_client, lua_State *lu
|
||||
|
||||
/* Handle double reply. */
|
||||
lua_pushstring(lua,"double");
|
||||
lua_gettable(lua,-2);
|
||||
lua_rawget(lua,-2);
|
||||
t = lua_type(lua,-1);
|
||||
if (t == LUA_TNUMBER) {
|
||||
addReplyDouble(c,lua_tonumber(lua,-1));
|
||||
@ -671,7 +671,7 @@ static void luaReplyToRedisReply(client *c, client* script_client, lua_State *lu
|
||||
|
||||
/* Handle big number reply. */
|
||||
lua_pushstring(lua,"big_number");
|
||||
lua_gettable(lua,-2);
|
||||
lua_rawget(lua,-2);
|
||||
t = lua_type(lua,-1);
|
||||
if (t == LUA_TSTRING) {
|
||||
sds big_num = sdsnewlen(lua_tostring(lua,-1), lua_strlen(lua,-1));
|
||||
@ -685,16 +685,16 @@ static void luaReplyToRedisReply(client *c, client* script_client, lua_State *lu
|
||||
|
||||
/* Handle verbatim reply. */
|
||||
lua_pushstring(lua,"verbatim_string");
|
||||
lua_gettable(lua,-2);
|
||||
lua_rawget(lua,-2);
|
||||
t = lua_type(lua,-1);
|
||||
if (t == LUA_TTABLE) {
|
||||
lua_pushstring(lua,"format");
|
||||
lua_gettable(lua,-2);
|
||||
lua_rawget(lua,-2);
|
||||
t = lua_type(lua,-1);
|
||||
if (t == LUA_TSTRING){
|
||||
char* format = (char*)lua_tostring(lua,-1);
|
||||
lua_pushstring(lua,"string");
|
||||
lua_gettable(lua,-3);
|
||||
lua_rawget(lua,-3);
|
||||
t = lua_type(lua,-1);
|
||||
if (t == LUA_TSTRING){
|
||||
size_t len;
|
||||
@ -711,7 +711,7 @@ static void luaReplyToRedisReply(client *c, client* script_client, lua_State *lu
|
||||
|
||||
/* Handle map reply. */
|
||||
lua_pushstring(lua,"map");
|
||||
lua_gettable(lua,-2);
|
||||
lua_rawget(lua,-2);
|
||||
t = lua_type(lua,-1);
|
||||
if (t == LUA_TTABLE) {
|
||||
int maplen = 0;
|
||||
@ -734,7 +734,7 @@ static void luaReplyToRedisReply(client *c, client* script_client, lua_State *lu
|
||||
|
||||
/* Handle set reply. */
|
||||
lua_pushstring(lua,"set");
|
||||
lua_gettable(lua,-2);
|
||||
lua_rawget(lua,-2);
|
||||
t = lua_type(lua,-1);
|
||||
if (t == LUA_TTABLE) {
|
||||
int setlen = 0;
|
||||
@ -761,7 +761,7 @@ static void luaReplyToRedisReply(client *c, client* script_client, lua_State *lu
|
||||
while(1) {
|
||||
/* we took care of the stack size on function start */
|
||||
lua_pushnumber(lua,j++);
|
||||
lua_gettable(lua,-2);
|
||||
lua_rawget(lua,-2);
|
||||
t = lua_type(lua,-1);
|
||||
if (t == LUA_TNIL) {
|
||||
lua_pop(lua,1);
|
||||
@ -781,8 +781,19 @@ static void luaReplyToRedisReply(client *c, client* script_client, lua_State *lu
|
||||
/* ---------------------------------------------------------------------------
|
||||
* Lua redis.* functions implementations.
|
||||
* ------------------------------------------------------------------------- */
|
||||
void freeLuaRedisArgv(robj **argv, int argc, int argv_len);
|
||||
|
||||
static robj **luaArgsToRedisArgv(lua_State *lua, int *argc) {
|
||||
/* Cached argv array across calls. */
|
||||
static robj **lua_argv = NULL;
|
||||
static int lua_argv_size = 0;
|
||||
|
||||
/* Cache of recently used small arguments to avoid malloc calls. */
|
||||
#define LUA_CMD_OBJCACHE_SIZE 32
|
||||
#define LUA_CMD_OBJCACHE_MAX_LEN 64
|
||||
static robj *lua_args_cached_objects[LUA_CMD_OBJCACHE_SIZE];
|
||||
static size_t lua_args_cached_objects_len[LUA_CMD_OBJCACHE_SIZE];
|
||||
|
||||
static robj **luaArgsToRedisArgv(lua_State *lua, int *argc, int *argv_len) {
|
||||
int j;
|
||||
/* Require at least one argument */
|
||||
*argc = lua_gettop(lua);
|
||||
@ -791,8 +802,12 @@ static robj **luaArgsToRedisArgv(lua_State *lua, int *argc) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Build the arguments vector */
|
||||
robj **argv = zcalloc(sizeof(robj*) * *argc);
|
||||
/* Build the arguments vector (reuse a cached argv from last call) */
|
||||
if (lua_argv_size < *argc) {
|
||||
lua_argv = zrealloc(lua_argv,sizeof(robj*)* *argc);
|
||||
lua_argv_size = *argc;
|
||||
}
|
||||
*argv_len = lua_argv_size;
|
||||
|
||||
for (j = 0; j < *argc; j++) {
|
||||
char *obj_s;
|
||||
@ -810,8 +825,18 @@ static robj **luaArgsToRedisArgv(lua_State *lua, int *argc) {
|
||||
obj_s = (char*)lua_tolstring(lua,j+1,&obj_len);
|
||||
if (obj_s == NULL) break; /* Not a string. */
|
||||
}
|
||||
|
||||
argv[j] = createStringObject(obj_s, obj_len);
|
||||
/* Try to use a cached object. */
|
||||
if (j < LUA_CMD_OBJCACHE_SIZE && lua_args_cached_objects[j] &&
|
||||
lua_args_cached_objects_len[j] >= obj_len)
|
||||
{
|
||||
sds s = lua_args_cached_objects[j]->ptr;
|
||||
lua_argv[j] = lua_args_cached_objects[j];
|
||||
lua_args_cached_objects[j] = NULL;
|
||||
memcpy(s,obj_s,obj_len+1);
|
||||
sdssetlen(s, obj_len);
|
||||
} else {
|
||||
lua_argv[j] = createStringObject(obj_s, obj_len);
|
||||
}
|
||||
}
|
||||
|
||||
/* Pop all arguments from the stack, we do not need them anymore
|
||||
@ -822,17 +847,42 @@ static robj **luaArgsToRedisArgv(lua_State *lua, int *argc) {
|
||||
* is not a string or an integer (lua_isstring() return true for
|
||||
* integers as well). */
|
||||
if (j != *argc) {
|
||||
j--;
|
||||
while (j >= 0) {
|
||||
decrRefCount(argv[j]);
|
||||
j--;
|
||||
}
|
||||
zfree(argv);
|
||||
freeLuaRedisArgv(lua_argv, j, lua_argv_size);
|
||||
luaPushError(lua, "Lua redis lib command arguments must be strings or integers");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return argv;
|
||||
return lua_argv;
|
||||
}
|
||||
|
||||
void freeLuaRedisArgv(robj **argv, int argc, int argv_len) {
|
||||
int j;
|
||||
for (j = 0; j < argc; j++) {
|
||||
robj *o = argv[j];
|
||||
|
||||
/* Try to cache the object in the lua_args_cached_objects array.
|
||||
* The object must be small, SDS-encoded, and with refcount = 1
|
||||
* (we must be the only owner) for us to cache it. */
|
||||
if (j < LUA_CMD_OBJCACHE_SIZE &&
|
||||
o->refcount == 1 &&
|
||||
(o->encoding == OBJ_ENCODING_RAW ||
|
||||
o->encoding == OBJ_ENCODING_EMBSTR) &&
|
||||
sdslen(o->ptr) <= LUA_CMD_OBJCACHE_MAX_LEN)
|
||||
{
|
||||
sds s = o->ptr;
|
||||
if (lua_args_cached_objects[j]) decrRefCount(lua_args_cached_objects[j]);
|
||||
lua_args_cached_objects[j] = o;
|
||||
lua_args_cached_objects_len[j] = sdsalloc(s);
|
||||
} else {
|
||||
decrRefCount(o);
|
||||
}
|
||||
}
|
||||
if (argv != lua_argv || argv_len != lua_argv_size) {
|
||||
/* The command changed argv, scrap the cache and start over. */
|
||||
zfree(argv);
|
||||
lua_argv = NULL;
|
||||
lua_argv_size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int luaRedisGenericCommand(lua_State *lua, int raise_error) {
|
||||
@ -846,9 +896,8 @@ static int luaRedisGenericCommand(lua_State *lua, int raise_error) {
|
||||
client* c = rctx->c;
|
||||
sds reply;
|
||||
|
||||
int argc;
|
||||
robj **argv = luaArgsToRedisArgv(lua, &argc);
|
||||
if (argv == NULL) {
|
||||
c->argv = luaArgsToRedisArgv(lua, &c->argc, &c->argv_len);
|
||||
if (c->argv == NULL) {
|
||||
return raise_error ? luaError(lua) : 1;
|
||||
}
|
||||
|
||||
@ -884,14 +933,14 @@ static int luaRedisGenericCommand(lua_State *lua, int raise_error) {
|
||||
ldbLog(cmdlog);
|
||||
}
|
||||
|
||||
scriptCall(rctx, argv, argc, &err);
|
||||
scriptCall(rctx, &err);
|
||||
if (err) {
|
||||
luaPushError(lua, err);
|
||||
sdsfree(err);
|
||||
/* push a field indicate to ignore updating the stats on this error
|
||||
* because it was already updated when executing the command. */
|
||||
lua_pushstring(lua,"ignore_error_stats_update");
|
||||
lua_pushboolean(lua, true);
|
||||
lua_pushboolean(lua, 1);
|
||||
lua_settable(lua,-3);
|
||||
goto cleanup;
|
||||
}
|
||||
@ -929,8 +978,11 @@ static int luaRedisGenericCommand(lua_State *lua, int raise_error) {
|
||||
cleanup:
|
||||
/* Clean up. Command code may have changed argv/argc so we use the
|
||||
* argv/argc of the client instead of the local variables. */
|
||||
freeClientArgv(c);
|
||||
freeLuaRedisArgv(c->argv, c->argc, c->argv_len);
|
||||
c->argc = c->argv_len = 0;
|
||||
c->user = NULL;
|
||||
c->argv = NULL;
|
||||
freeClientArgv(c);
|
||||
inuse--;
|
||||
|
||||
if (raise_error) {
|
||||
@ -1083,8 +1135,8 @@ static int luaRedisAclCheckCmdPermissionsCommand(lua_State *lua) {
|
||||
}
|
||||
int raise_error = 0;
|
||||
|
||||
int argc;
|
||||
robj **argv = luaArgsToRedisArgv(lua, &argc);
|
||||
int argc, argv_len;
|
||||
robj **argv = luaArgsToRedisArgv(lua, &argc, &argv_len);
|
||||
|
||||
/* Require at least one argument */
|
||||
if (argv == NULL) return luaError(lua);
|
||||
@ -1103,8 +1155,7 @@ static int luaRedisAclCheckCmdPermissionsCommand(lua_State *lua) {
|
||||
}
|
||||
}
|
||||
|
||||
while (argc--) decrRefCount(argv[argc]);
|
||||
zfree(argv);
|
||||
freeLuaRedisArgv(argv, argc, argv_len);
|
||||
if (raise_error)
|
||||
return luaError(lua);
|
||||
else
|
||||
|
87
src/sds.c
87
src/sds.c
@ -306,46 +306,20 @@ sds sdsMakeRoomForNonGreedy(sds s, size_t addlen) {
|
||||
*
|
||||
* After the call, the passed sds string is no longer valid and all the
|
||||
* references must be substituted with the new pointer returned by the call. */
|
||||
sds sdsRemoveFreeSpace(sds s) {
|
||||
void *sh, *newsh;
|
||||
char type, oldtype = s[-1] & SDS_TYPE_MASK;
|
||||
int hdrlen, oldhdrlen = sdsHdrSize(oldtype);
|
||||
size_t len = sdslen(s);
|
||||
size_t avail = sdsavail(s);
|
||||
sh = (char*)s-oldhdrlen;
|
||||
|
||||
/* Return ASAP if there is no space left. */
|
||||
if (avail == 0) return s;
|
||||
|
||||
/* Check what would be the minimum SDS header that is just good enough to
|
||||
* fit this string. */
|
||||
type = sdsReqType(len);
|
||||
hdrlen = sdsHdrSize(type);
|
||||
|
||||
/* If the type is the same, or at least a large enough type is still
|
||||
* required, we just realloc(), letting the allocator to do the copy
|
||||
* only if really needed. Otherwise if the change is huge, we manually
|
||||
* reallocate the string to use the different header type. */
|
||||
if (oldtype==type || type > SDS_TYPE_8) {
|
||||
newsh = s_realloc(sh, oldhdrlen+len+1);
|
||||
if (newsh == NULL) return NULL;
|
||||
s = (char*)newsh+oldhdrlen;
|
||||
} else {
|
||||
newsh = s_malloc(hdrlen+len+1);
|
||||
if (newsh == NULL) return NULL;
|
||||
memcpy((char*)newsh+hdrlen, s, len+1);
|
||||
s_free(sh);
|
||||
s = (char*)newsh+hdrlen;
|
||||
s[-1] = type;
|
||||
sdssetlen(s, len);
|
||||
}
|
||||
sdssetalloc(s, len);
|
||||
return s;
|
||||
sds sdsRemoveFreeSpace(sds s, int would_regrow) {
|
||||
return sdsResize(s, sdslen(s), would_regrow);
|
||||
}
|
||||
|
||||
/* Resize the allocation, this can make the allocation bigger or smaller,
|
||||
* if the size is smaller than currently used len, the data will be truncated */
|
||||
sds sdsResize(sds s, size_t size) {
|
||||
* if the size is smaller than currently used len, the data will be truncated.
|
||||
*
|
||||
* The when the would_regrow argument is set to 1, it prevents the use of
|
||||
* SDS_TYPE_5, which is desired when the sds is likely to be changed again.
|
||||
*
|
||||
* The sdsAlloc size will be set to the requested size regardless of the actual
|
||||
* allocation size, this is done in order to avoid repeated calls to this
|
||||
* function when the caller detects that it has excess space. */
|
||||
sds sdsResize(sds s, size_t size, int would_regrow) {
|
||||
void *sh, *newsh;
|
||||
char type, oldtype = s[-1] & SDS_TYPE_MASK;
|
||||
int hdrlen, oldhdrlen = sdsHdrSize(oldtype);
|
||||
@ -361,8 +335,10 @@ sds sdsResize(sds s, size_t size) {
|
||||
/* Check what would be the minimum SDS header that is just good enough to
|
||||
* fit this string. */
|
||||
type = sdsReqType(size);
|
||||
/* Don't use type 5, it is not good for strings that are resized. */
|
||||
if (type == SDS_TYPE_5) type = SDS_TYPE_8;
|
||||
if (would_regrow) {
|
||||
/* Don't use type 5, it is not good for strings that are expected to grow back. */
|
||||
if (type == SDS_TYPE_5) type = SDS_TYPE_8;
|
||||
}
|
||||
hdrlen = sdsHdrSize(type);
|
||||
|
||||
/* If the type is the same, or can hold the size in it with low overhead
|
||||
@ -370,12 +346,25 @@ sds sdsResize(sds s, size_t size) {
|
||||
* to do the copy only if really needed. Otherwise if the change is
|
||||
* huge, we manually reallocate the string to use the different header
|
||||
* type. */
|
||||
if (oldtype==type || (type < oldtype && type > SDS_TYPE_8)) {
|
||||
newsh = s_realloc(sh, oldhdrlen+size+1);
|
||||
if (newsh == NULL) return NULL;
|
||||
s = (char*)newsh+oldhdrlen;
|
||||
int use_realloc = (oldtype==type || (type < oldtype && type > SDS_TYPE_8));
|
||||
size_t newlen = use_realloc ? oldhdrlen+size+1 : hdrlen+size+1;
|
||||
|
||||
if (use_realloc) {
|
||||
int alloc_already_optimal = 0;
|
||||
#if defined(USE_JEMALLOC)
|
||||
/* je_nallocx returns the expected allocation size for the newlen.
|
||||
* We aim to avoid calling realloc() when using Jemalloc if there is no
|
||||
* change in the allocation size, as it incurs a cost even if the
|
||||
* allocation size stays the same. */
|
||||
alloc_already_optimal = (je_nallocx(newlen, 0) == zmalloc_size(sh));
|
||||
#endif
|
||||
if (!alloc_already_optimal) {
|
||||
newsh = s_realloc(sh, newlen);
|
||||
if (newsh == NULL) return NULL;
|
||||
s = (char*)newsh+oldhdrlen;
|
||||
}
|
||||
} else {
|
||||
newsh = s_malloc(hdrlen+size+1);
|
||||
newsh = s_malloc(newlen);
|
||||
if (newsh == NULL) return NULL;
|
||||
memcpy((char*)newsh+hdrlen, s, len);
|
||||
s_free(sh);
|
||||
@ -1551,27 +1540,27 @@ int sdsTest(int argc, char **argv, int flags) {
|
||||
|
||||
/* Test sdsresize - extend */
|
||||
x = sdsnew("1234567890123456789012345678901234567890");
|
||||
x = sdsResize(x, 200);
|
||||
x = sdsResize(x, 200, 1);
|
||||
test_cond("sdsrezie() expand len", sdslen(x) == 40);
|
||||
test_cond("sdsrezie() expand strlen", strlen(x) == 40);
|
||||
test_cond("sdsrezie() expand alloc", sdsalloc(x) == 200);
|
||||
/* Test sdsresize - trim free space */
|
||||
x = sdsResize(x, 80);
|
||||
x = sdsResize(x, 80, 1);
|
||||
test_cond("sdsrezie() shrink len", sdslen(x) == 40);
|
||||
test_cond("sdsrezie() shrink strlen", strlen(x) == 40);
|
||||
test_cond("sdsrezie() shrink alloc", sdsalloc(x) == 80);
|
||||
/* Test sdsresize - crop used space */
|
||||
x = sdsResize(x, 30);
|
||||
x = sdsResize(x, 30, 1);
|
||||
test_cond("sdsrezie() crop len", sdslen(x) == 30);
|
||||
test_cond("sdsrezie() crop strlen", strlen(x) == 30);
|
||||
test_cond("sdsrezie() crop alloc", sdsalloc(x) == 30);
|
||||
/* Test sdsresize - extend to different class */
|
||||
x = sdsResize(x, 400);
|
||||
x = sdsResize(x, 400, 1);
|
||||
test_cond("sdsrezie() expand len", sdslen(x) == 30);
|
||||
test_cond("sdsrezie() expand strlen", strlen(x) == 30);
|
||||
test_cond("sdsrezie() expand alloc", sdsalloc(x) == 400);
|
||||
/* Test sdsresize - shrink to different class */
|
||||
x = sdsResize(x, 4);
|
||||
x = sdsResize(x, 4, 1);
|
||||
test_cond("sdsrezie() crop len", sdslen(x) == 4);
|
||||
test_cond("sdsrezie() crop strlen", strlen(x) == 4);
|
||||
test_cond("sdsrezie() crop alloc", sdsalloc(x) == 4);
|
||||
|
@ -267,8 +267,8 @@ sds sdstemplate(const char *template, sdstemplate_callback_t cb_func, void *cb_a
|
||||
sds sdsMakeRoomFor(sds s, size_t addlen);
|
||||
sds sdsMakeRoomForNonGreedy(sds s, size_t addlen);
|
||||
void sdsIncrLen(sds s, ssize_t incr);
|
||||
sds sdsRemoveFreeSpace(sds s);
|
||||
sds sdsResize(sds s, size_t size);
|
||||
sds sdsRemoveFreeSpace(sds s, int would_regrow);
|
||||
sds sdsResize(sds s, size_t size, int would_regrow);
|
||||
size_t sdsAllocSize(sds s);
|
||||
void *sdsAllocPtr(sds s);
|
||||
|
||||
|
@ -598,11 +598,6 @@ void releaseSentinelAddr(sentinelAddr *sa) {
|
||||
zfree(sa);
|
||||
}
|
||||
|
||||
/* Return non-zero if two addresses are equal. */
|
||||
int sentinelAddrIsEqual(sentinelAddr *a, sentinelAddr *b) {
|
||||
return a->port == b->port && !strcasecmp(a->ip,b->ip);
|
||||
}
|
||||
|
||||
/* Return non-zero if the two addresses are equal, either by address
|
||||
* or by hostname if they could not have been resolved.
|
||||
*/
|
||||
@ -616,10 +611,16 @@ int sentinelAddrOrHostnameEqual(sentinelAddr *a, sentinelAddr *b) {
|
||||
int sentinelAddrEqualsHostname(sentinelAddr *a, char *hostname) {
|
||||
char ip[NET_IP_STR_LEN];
|
||||
|
||||
/* We always resolve the hostname and compare it to the address */
|
||||
/* Try resolve the hostname and compare it to the address */
|
||||
if (anetResolve(NULL, hostname, ip, sizeof(ip),
|
||||
sentinel.resolve_hostnames ? ANET_NONE : ANET_IP_ONLY) == ANET_ERR)
|
||||
return 0;
|
||||
sentinel.resolve_hostnames ? ANET_NONE : ANET_IP_ONLY) == ANET_ERR) {
|
||||
|
||||
/* If failed resolve then compare based on hostnames. That is our best effort as
|
||||
* long as the server is unavailable for some reason. It is fine since Redis
|
||||
* instance cannot have multiple hostnames for a given setup */
|
||||
return !strcasecmp(sentinel.resolve_hostnames ? a->hostname : a->ip, hostname);
|
||||
}
|
||||
/* Compare based on address */
|
||||
return !strcasecmp(a->ip, ip);
|
||||
}
|
||||
|
||||
@ -1610,7 +1611,7 @@ int sentinelResetMasterAndChangeAddress(sentinelRedisInstance *master, char *hos
|
||||
while((de = dictNext(di)) != NULL) {
|
||||
sentinelRedisInstance *slave = dictGetVal(de);
|
||||
|
||||
if (sentinelAddrIsEqual(slave->addr,newaddr)) continue;
|
||||
if (sentinelAddrOrHostnameEqual(slave->addr,newaddr)) continue;
|
||||
slaves[numslaves++] = dupSentinelAddr(slave->addr);
|
||||
}
|
||||
dictReleaseIterator(di);
|
||||
@ -1618,7 +1619,7 @@ int sentinelResetMasterAndChangeAddress(sentinelRedisInstance *master, char *hos
|
||||
/* If we are switching to a different address, include the old address
|
||||
* as a slave as well, so that we'll be able to sense / reconfigure
|
||||
* the old master. */
|
||||
if (!sentinelAddrIsEqual(newaddr,master->addr)) {
|
||||
if (!sentinelAddrOrHostnameEqual(newaddr,master->addr)) {
|
||||
slaves[numslaves++] = dupSentinelAddr(master->addr);
|
||||
}
|
||||
|
||||
@ -2169,7 +2170,7 @@ void rewriteConfigSentinelOption(struct rewriteConfigState *state) {
|
||||
* slave's address, a failover is in progress and the slave was
|
||||
* already successfully promoted. So as the address of this slave
|
||||
* we use the old master address instead. */
|
||||
if (sentinelAddrIsEqual(slave_addr,master_addr))
|
||||
if (sentinelAddrOrHostnameEqual(slave_addr,master_addr))
|
||||
slave_addr = master->addr;
|
||||
line = sdscatprintf(sdsempty(),
|
||||
"sentinel known-replica %s %s %d",
|
||||
|
249
src/server.c
249
src/server.c
@ -594,13 +594,15 @@ int incrementallyRehash(int dbid) {
|
||||
* as we want to avoid resizing the hash tables when there is a child in order
|
||||
* to play well with copy-on-write (otherwise when a resize happens lots of
|
||||
* memory pages are copied). The goal of this function is to update the ability
|
||||
* for dict.c to resize the hash tables accordingly to the fact we have an
|
||||
* for dict.c to resize or rehash the tables accordingly to the fact we have an
|
||||
* active fork child running. */
|
||||
void updateDictResizePolicy(void) {
|
||||
if (!hasActiveChildProcess())
|
||||
dictEnableResize();
|
||||
if (server.in_fork_child != CHILD_TYPE_NONE)
|
||||
dictSetResizeEnabled(DICT_RESIZE_FORBID);
|
||||
else if (hasActiveChildProcess())
|
||||
dictSetResizeEnabled(DICT_RESIZE_AVOID);
|
||||
else
|
||||
dictDisableResize();
|
||||
dictSetResizeEnabled(DICT_RESIZE_ENABLE);
|
||||
}
|
||||
|
||||
const char *strChildType(int type) {
|
||||
@ -695,7 +697,7 @@ int clientsCronResizeQueryBuffer(client *c) {
|
||||
/* There are two conditions to resize the query buffer: */
|
||||
if (idletime > 2) {
|
||||
/* 1) Query is idle for a long time. */
|
||||
c->querybuf = sdsRemoveFreeSpace(c->querybuf);
|
||||
c->querybuf = sdsRemoveFreeSpace(c->querybuf, 1);
|
||||
} else if (querybuf_size > PROTO_RESIZE_THRESHOLD && querybuf_size/2 > c->querybuf_peak) {
|
||||
/* 2) Query buffer is too big for latest peak and is larger than
|
||||
* resize threshold. Trim excess space but only up to a limit,
|
||||
@ -705,7 +707,7 @@ int clientsCronResizeQueryBuffer(client *c) {
|
||||
size_t resize = sdslen(c->querybuf);
|
||||
if (resize < c->querybuf_peak) resize = c->querybuf_peak;
|
||||
if (c->bulklen != -1 && resize < (size_t)c->bulklen) resize = c->bulklen;
|
||||
c->querybuf = sdsResize(c->querybuf, resize);
|
||||
c->querybuf = sdsResize(c->querybuf, resize, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -815,37 +817,44 @@ static inline clientMemUsageBucket *getMemUsageBucket(size_t mem) {
|
||||
return &server.client_mem_usage_buckets[bucket_idx];
|
||||
}
|
||||
|
||||
/* This is called both on explicit clients when something changed their buffers,
|
||||
* so we can track clients' memory and enforce clients' maxmemory in real time,
|
||||
* and also from the clientsCron. We call it from the cron so we have updated
|
||||
* stats for non CLIENT_TYPE_NORMAL/PUBSUB clients and in case a configuration
|
||||
* change requires us to evict a non-active client.
|
||||
/*
|
||||
* This method updates the client memory usage and update the
|
||||
* server stats for client type.
|
||||
*
|
||||
* This also adds the client to the correct memory usage bucket. Each bucket contains
|
||||
* all clients with roughly the same amount of memory. This way we group
|
||||
* together clients consuming about the same amount of memory and can quickly
|
||||
* free them in case we reach maxmemory-clients (client eviction).
|
||||
* This method is called from the clientsCron to have updated
|
||||
* stats for non CLIENT_TYPE_NORMAL/PUBSUB clients to accurately
|
||||
* provide information around clients memory usage.
|
||||
*
|
||||
* It is also used in updateClientMemUsageAndBucket to have latest
|
||||
* client memory usage information to place it into appropriate client memory
|
||||
* usage bucket.
|
||||
*/
|
||||
int updateClientMemUsage(client *c) {
|
||||
serverAssert(io_threads_op == IO_THREADS_OP_IDLE);
|
||||
void updateClientMemoryUsage(client *c) {
|
||||
size_t mem = getClientMemoryUsage(c, NULL);
|
||||
int type = getClientType(c);
|
||||
/* Now that we have the memory used by the client, remove the old
|
||||
* value from the old category, and add it back. */
|
||||
server.stat_clients_type_memory[c->last_memory_type] -= c->last_memory_usage;
|
||||
server.stat_clients_type_memory[type] += mem;
|
||||
/* Remember what we added and where, to remove it next time. */
|
||||
c->last_memory_type = type;
|
||||
c->last_memory_usage = mem;
|
||||
}
|
||||
|
||||
/* Remove the old value of the memory used by the client from the old
|
||||
* category, and add it back. */
|
||||
if (type != c->last_memory_type) {
|
||||
server.stat_clients_type_memory[c->last_memory_type] -= c->last_memory_usage;
|
||||
server.stat_clients_type_memory[type] += mem;
|
||||
c->last_memory_type = type;
|
||||
} else {
|
||||
server.stat_clients_type_memory[type] += mem - c->last_memory_usage;
|
||||
int clientEvictionAllowed(client *c) {
|
||||
if (server.maxmemory_clients == 0 || c->flags & CLIENT_NO_EVICT) {
|
||||
return 0;
|
||||
}
|
||||
int type = getClientType(c);
|
||||
return (type == CLIENT_TYPE_NORMAL || type == CLIENT_TYPE_PUBSUB);
|
||||
}
|
||||
|
||||
int allow_eviction =
|
||||
(type == CLIENT_TYPE_NORMAL || type == CLIENT_TYPE_PUBSUB) &&
|
||||
!(c->flags & CLIENT_NO_EVICT);
|
||||
|
||||
/* Update the client in the mem usage buckets */
|
||||
/* This function is used to cleanup the client's previously tracked memory usage.
|
||||
* This is called during incremental client memory usage tracking as well as
|
||||
* used to reset when client to bucket allocation is not required when
|
||||
* client eviction is disabled. */
|
||||
void removeClientFromMemUsageBucket(client *c, int allow_eviction) {
|
||||
if (c->mem_usage_bucket) {
|
||||
c->mem_usage_bucket->mem_usage_sum -= c->last_memory_usage;
|
||||
/* If this client can't be evicted then remove it from the mem usage
|
||||
@ -856,23 +865,48 @@ int updateClientMemUsage(client *c) {
|
||||
c->mem_usage_bucket_node = NULL;
|
||||
}
|
||||
}
|
||||
if (allow_eviction) {
|
||||
clientMemUsageBucket *bucket = getMemUsageBucket(mem);
|
||||
bucket->mem_usage_sum += mem;
|
||||
if (bucket != c->mem_usage_bucket) {
|
||||
if (c->mem_usage_bucket)
|
||||
listDelNode(c->mem_usage_bucket->clients,
|
||||
c->mem_usage_bucket_node);
|
||||
c->mem_usage_bucket = bucket;
|
||||
listAddNodeTail(bucket->clients, c);
|
||||
c->mem_usage_bucket_node = listLast(bucket->clients);
|
||||
}
|
||||
}
|
||||
|
||||
/* This is called only if explicit clients when something changed their buffers,
|
||||
* so we can track clients' memory and enforce clients' maxmemory in real time.
|
||||
*
|
||||
* This also adds the client to the correct memory usage bucket. Each bucket contains
|
||||
* all clients with roughly the same amount of memory. This way we group
|
||||
* together clients consuming about the same amount of memory and can quickly
|
||||
* free them in case we reach maxmemory-clients (client eviction).
|
||||
*
|
||||
* Note: This function filters clients of type monitor, master or replica regardless
|
||||
* of whether the eviction is enabled or not, so the memory usage we get from these
|
||||
* types of clients via the INFO command may be out of date. If someday we wanna
|
||||
* improve that to make monitors' memory usage more accurate, we need to re-add this
|
||||
* function call to `replicationFeedMonitors()`.
|
||||
*
|
||||
* returns 1 if client eviction for this client is allowed, 0 otherwise.
|
||||
*/
|
||||
int updateClientMemUsageAndBucket(client *c) {
|
||||
serverAssert(io_threads_op == IO_THREADS_OP_IDLE);
|
||||
int allow_eviction = clientEvictionAllowed(c);
|
||||
removeClientFromMemUsageBucket(c, allow_eviction);
|
||||
|
||||
if (!allow_eviction) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Remember what we added, to remove it next time. */
|
||||
c->last_memory_usage = mem;
|
||||
/* Update client memory usage. */
|
||||
updateClientMemoryUsage(c);
|
||||
|
||||
return 0;
|
||||
/* Update the client in the mem usage buckets */
|
||||
clientMemUsageBucket *bucket = getMemUsageBucket(c->last_memory_usage);
|
||||
bucket->mem_usage_sum += c->last_memory_usage;
|
||||
if (bucket != c->mem_usage_bucket) {
|
||||
if (c->mem_usage_bucket)
|
||||
listDelNode(c->mem_usage_bucket->clients,
|
||||
c->mem_usage_bucket_node);
|
||||
c->mem_usage_bucket = bucket;
|
||||
listAddNodeTail(bucket->clients, c);
|
||||
c->mem_usage_bucket_node = listLast(bucket->clients);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Return the max samples in the memory usage of clients tracked by
|
||||
@ -960,8 +994,11 @@ void clientsCron(void) {
|
||||
* in turn would make the INFO command too slow. So we perform this
|
||||
* computation incrementally and track the (not instantaneous but updated
|
||||
* to the second) total memory used by clients using clientsCron() in
|
||||
* a more incremental way (depending on server.hz). */
|
||||
if (updateClientMemUsage(c)) continue;
|
||||
* a more incremental way (depending on server.hz).
|
||||
* If client eviction is enabled, update the bucket as well. */
|
||||
if (!updateClientMemUsageAndBucket(c))
|
||||
updateClientMemoryUsage(c);
|
||||
|
||||
if (closeClientOnOutputBufferLimitReached(c, 0)) continue;
|
||||
}
|
||||
}
|
||||
@ -1842,6 +1879,25 @@ void createSharedObjects(void) {
|
||||
shared.maxstring = sdsnew("maxstring");
|
||||
}
|
||||
|
||||
void initServerClientMemUsageBuckets() {
|
||||
if (server.client_mem_usage_buckets)
|
||||
return;
|
||||
server.client_mem_usage_buckets = zmalloc(sizeof(clientMemUsageBucket)*CLIENT_MEM_USAGE_BUCKETS);
|
||||
for (int j = 0; j < CLIENT_MEM_USAGE_BUCKETS; j++) {
|
||||
server.client_mem_usage_buckets[j].mem_usage_sum = 0;
|
||||
server.client_mem_usage_buckets[j].clients = listCreate();
|
||||
}
|
||||
}
|
||||
|
||||
void freeServerClientMemUsageBuckets() {
|
||||
if (!server.client_mem_usage_buckets)
|
||||
return;
|
||||
for (int j = 0; j < CLIENT_MEM_USAGE_BUCKETS; j++)
|
||||
listRelease(server.client_mem_usage_buckets[j].clients);
|
||||
zfree(server.client_mem_usage_buckets);
|
||||
server.client_mem_usage_buckets = NULL;
|
||||
}
|
||||
|
||||
void initServerConfig(void) {
|
||||
int j;
|
||||
char *default_bindaddr[CONFIG_DEFAULT_BINDADDR_COUNT] = CONFIG_DEFAULT_BINDADDR;
|
||||
@ -1886,6 +1942,7 @@ void initServerConfig(void) {
|
||||
server.aof_selected_db = -1; /* Make sure the first time will not match */
|
||||
server.aof_flush_postponed_start = 0;
|
||||
server.aof_last_incr_size = 0;
|
||||
server.aof_last_incr_fsync_offset = 0;
|
||||
server.active_defrag_running = 0;
|
||||
server.notify_keyspace_events = 0;
|
||||
server.blocked_clients = 0;
|
||||
@ -2439,6 +2496,7 @@ void initServer(void) {
|
||||
server.cluster_drop_packet_filter = -1;
|
||||
server.reply_buffer_peak_reset_time = REPLY_BUFFER_DEFAULT_PEAK_RESET_TIME;
|
||||
server.reply_buffer_resizing_enabled = 1;
|
||||
server.client_mem_usage_buckets = NULL;
|
||||
resetReplicationBuffer();
|
||||
|
||||
if ((server.tls_port || server.tls_replication || server.tls_cluster)
|
||||
@ -2447,11 +2505,6 @@ void initServer(void) {
|
||||
exit(1);
|
||||
}
|
||||
|
||||
for (j = 0; j < CLIENT_MEM_USAGE_BUCKETS; j++) {
|
||||
server.client_mem_usage_buckets[j].mem_usage_sum = 0;
|
||||
server.client_mem_usage_buckets[j].clients = listCreate();
|
||||
}
|
||||
|
||||
createSharedObjects();
|
||||
adjustOpenFilesLimit();
|
||||
const char *clk_msg = monotonicInit();
|
||||
@ -2624,6 +2677,9 @@ void initServer(void) {
|
||||
ACLUpdateDefaultUserPassword(server.requirepass);
|
||||
|
||||
applyWatchdogPeriod();
|
||||
|
||||
if (server.maxmemory_clients != 0)
|
||||
initServerClientMemUsageBuckets();
|
||||
}
|
||||
|
||||
/* Some steps in server initialization need to be done last (after modules
|
||||
@ -3004,6 +3060,7 @@ struct redisCommand *lookupCommandBySdsLogic(dict *commands, sds s) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
serverAssert(argc > 0); /* Avoid warning `-Wmaybe-uninitialized` in lookupCommandLogic() */
|
||||
robj objects[argc];
|
||||
robj *argv[argc];
|
||||
for (j = 0; j < argc; j++) {
|
||||
@ -3181,7 +3238,6 @@ void propagatePendingCommands() {
|
||||
|
||||
int j;
|
||||
redisOp *rop;
|
||||
int multi_emitted = 0;
|
||||
|
||||
/* Wrap the commands in server.also_propagate array,
|
||||
* but don't wrap it if we are already in MULTI context,
|
||||
@ -3189,12 +3245,23 @@ void propagatePendingCommands() {
|
||||
*
|
||||
* And if the array contains only one command, no need to
|
||||
* wrap it, since the single command is atomic. */
|
||||
if (server.also_propagate.numops > 1 && !server.propagate_no_multi) {
|
||||
int transaction = server.also_propagate.numops > 1 && !server.propagate_no_multi;
|
||||
|
||||
/* In case a command that may modify random keys was run *directly*
|
||||
* (i.e. not from within a script, MULTI/EXEC, RM_Call, etc.) we want
|
||||
* to avoid using a transaction (much like active-expire) */
|
||||
if (server.current_client &&
|
||||
server.current_client->cmd &&
|
||||
server.current_client->cmd->flags & CMD_TOUCHES_ARBITRARY_KEYS)
|
||||
{
|
||||
transaction = 0;
|
||||
}
|
||||
|
||||
if (transaction) {
|
||||
/* We use the first command-to-propagate to set the dbid for MULTI,
|
||||
* so that the SELECT will be propagated beforehand */
|
||||
int multi_dbid = server.also_propagate.ops[0].dbid;
|
||||
propagateNow(multi_dbid,&shared.multi,1,PROPAGATE_AOF|PROPAGATE_REPL);
|
||||
multi_emitted = 1;
|
||||
}
|
||||
|
||||
for (j = 0; j < server.also_propagate.numops; j++) {
|
||||
@ -3203,7 +3270,7 @@ void propagatePendingCommands() {
|
||||
propagateNow(rop->dbid,rop->argv,rop->argc,rop->target);
|
||||
}
|
||||
|
||||
if (multi_emitted) {
|
||||
if (transaction) {
|
||||
/* We take the dbid from last command so that propagateNow() won't inject another SELECT */
|
||||
int exec_dbid = server.also_propagate.ops[server.also_propagate.numops-1].dbid;
|
||||
propagateNow(exec_dbid,&shared.exec,1,PROPAGATE_AOF|PROPAGATE_REPL);
|
||||
@ -3584,6 +3651,23 @@ int commandCheckArity(client *c, sds *err) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* If we're executing a script, try to extract a set of command flags from
|
||||
* it, in case it declared them. Note this is just an attempt, we don't yet
|
||||
* know the script command is well formed.*/
|
||||
uint64_t getCommandFlags(client *c) {
|
||||
uint64_t cmd_flags = c->cmd->flags;
|
||||
|
||||
if (c->cmd->proc == fcallCommand || c->cmd->proc == fcallroCommand) {
|
||||
cmd_flags = fcallGetCommandFlags(c, cmd_flags);
|
||||
} else if (c->cmd->proc == evalCommand || c->cmd->proc == evalRoCommand ||
|
||||
c->cmd->proc == evalShaCommand || c->cmd->proc == evalShaRoCommand)
|
||||
{
|
||||
cmd_flags = evalGetCommandFlags(c, cmd_flags);
|
||||
}
|
||||
|
||||
return cmd_flags;
|
||||
}
|
||||
|
||||
/* If this function gets called we already read a whole
|
||||
* command, arguments are in the client argv/argc fields.
|
||||
* processCommand() execute the command or prepare the
|
||||
@ -3648,19 +3732,7 @@ int processCommand(client *c) {
|
||||
}
|
||||
}
|
||||
|
||||
/* If we're executing a script, try to extract a set of command flags from
|
||||
* it, in case it declared them. Note this is just an attempt, we don't yet
|
||||
* know the script command is well formed.*/
|
||||
uint64_t cmd_flags = c->cmd->flags;
|
||||
if (c->cmd->proc == evalCommand || c->cmd->proc == evalShaCommand ||
|
||||
c->cmd->proc == evalRoCommand || c->cmd->proc == evalShaRoCommand ||
|
||||
c->cmd->proc == fcallCommand || c->cmd->proc == fcallroCommand)
|
||||
{
|
||||
if (c->cmd->proc == fcallCommand || c->cmd->proc == fcallroCommand)
|
||||
cmd_flags = fcallGetCommandFlags(c, cmd_flags);
|
||||
else
|
||||
cmd_flags = evalGetCommandFlags(c, cmd_flags);
|
||||
}
|
||||
uint64_t cmd_flags = getCommandFlags(c);
|
||||
|
||||
int is_read_command = (cmd_flags & CMD_READONLY) ||
|
||||
(c->cmd->proc == execCommand && (c->mstate.cmd_flags & CMD_READONLY));
|
||||
@ -3947,7 +4019,7 @@ int processCommand(client *c) {
|
||||
} else {
|
||||
call(c,CMD_CALL_FULL);
|
||||
c->woff = server.master_repl_offset;
|
||||
if (listLength(server.ready_keys))
|
||||
if (listLength(server.ready_keys) && !isInsideYieldingLongCommand())
|
||||
handleClientsBlockedOnKeys();
|
||||
}
|
||||
|
||||
@ -4218,10 +4290,13 @@ int finishShutdown(void) {
|
||||
/* Close the listening sockets. Apparently this allows faster restarts. */
|
||||
closeListeningSockets(1);
|
||||
|
||||
#if !defined(__sun)
|
||||
/* Unlock the cluster config file before shutdown */
|
||||
if (server.cluster_enabled && server.cluster_config_file_lock_fd != -1) {
|
||||
flock(server.cluster_config_file_lock_fd, LOCK_UN|LOCK_NB);
|
||||
}
|
||||
#endif /* __sun */
|
||||
|
||||
|
||||
serverLog(LL_WARNING,"%s is now ready to exit, bye bye...",
|
||||
server.sentinel_mode ? "Sentinel" : "Redis");
|
||||
@ -4369,6 +4444,7 @@ void addReplyFlagsForCommand(client *c, struct redisCommand *cmd) {
|
||||
{CMD_NO_MULTI, "no_multi"},
|
||||
{CMD_MOVABLE_KEYS, "movablekeys"},
|
||||
{CMD_ALLOW_BUSY, "allow_busy"},
|
||||
/* {CMD_TOUCHES_ARBITRARY_KEYS, "TOUCHES_ARBITRARY_KEYS"}, Hidden on purpose */
|
||||
{0,NULL}
|
||||
};
|
||||
addReplyCommandFlags(c, cmd->flags, flagNames);
|
||||
@ -5969,9 +6045,13 @@ sds genRedisInfoString(dict *section_dict, int all_sections, int everything) {
|
||||
}
|
||||
|
||||
/* Get info from modules.
|
||||
* if user asked for "everything" or "modules", or a specific section
|
||||
* that's not found yet. */
|
||||
if (everything || dictFind(section_dict, "modules") != NULL || sections < (int)dictSize(section_dict)) {
|
||||
* Returned when the user asked for "everything", "modules", or a specific module section.
|
||||
* We're not aware of the module section names here, and we rather avoid the search when we can.
|
||||
* so we proceed if there's a requested section name that's not found yet, or when the user asked
|
||||
* for "all" with any additional section names. */
|
||||
if (everything || dictFind(section_dict, "modules") != NULL || sections < (int)dictSize(section_dict) ||
|
||||
(all_sections && dictSize(section_dict)))
|
||||
{
|
||||
|
||||
info = modulesCollectInfo(info,
|
||||
everything || dictFind(section_dict, "modules") != NULL ? NULL: section_dict,
|
||||
@ -6352,8 +6432,13 @@ int redisFork(int purpose) {
|
||||
server.in_fork_child = purpose;
|
||||
setupChildSignalHandlers();
|
||||
setOOMScoreAdj(CONFIG_OOM_BGCHILD);
|
||||
updateDictResizePolicy();
|
||||
dismissMemoryInChild();
|
||||
closeChildUnusedResourceAfterFork();
|
||||
/* Close the reading part, so that if the parent crashes, the child will
|
||||
* get a write error and exit. */
|
||||
if (server.child_info_pipe[0] != -1)
|
||||
close(server.child_info_pipe[0]);
|
||||
} else {
|
||||
/* Parent */
|
||||
if (childpid == -1) {
|
||||
@ -6507,6 +6592,7 @@ void loadDataFromDisk(void) {
|
||||
serverLog(LL_NOTICE, "DB loaded from append only file: %.3f seconds", (float)(ustime()-start)/1000000);
|
||||
} else {
|
||||
rdbSaveInfo rsi = RDB_SAVE_INFO_INIT;
|
||||
int rsi_is_valid = 0;
|
||||
errno = 0; /* Prevent a stale value from affecting error checking */
|
||||
int rdb_flags = RDBFLAGS_NONE;
|
||||
if (iAmMaster()) {
|
||||
@ -6527,6 +6613,7 @@ void loadDataFromDisk(void) {
|
||||
* information in function rdbPopulateSaveInfo. */
|
||||
rsi.repl_stream_db != -1)
|
||||
{
|
||||
rsi_is_valid = 1;
|
||||
if (!iAmMaster()) {
|
||||
memcpy(server.replid,rsi.repl_id,sizeof(server.replid));
|
||||
server.master_repl_offset = rsi.repl_offset;
|
||||
@ -6560,7 +6647,7 @@ void loadDataFromDisk(void) {
|
||||
* if RDB doesn't have replication info or there is no rdb, it is not
|
||||
* possible to support partial resynchronization, to avoid extra memory
|
||||
* of replication backlog, we drop it. */
|
||||
if (server.master_repl_offset == 0 && server.repl_backlog)
|
||||
if (!rsi_is_valid && server.repl_backlog)
|
||||
freeReplicationBacklog();
|
||||
}
|
||||
}
|
||||
@ -6945,6 +7032,24 @@ int main(int argc, char **argv) {
|
||||
* so it will become `--save ""` and will follow the same reset thing. */
|
||||
options = sdscat(options, "\"\"");
|
||||
}
|
||||
else if ((j != argc-1) && argv[j+1][0] == '-' && argv[j+1][1] == '-' &&
|
||||
!strcasecmp(argv[j], "--sentinel"))
|
||||
{
|
||||
/* Special case: handle some things like `--sentinel --config value`.
|
||||
* It is a pseudo config option with no value. In this case, if next
|
||||
* argument starts with `--`, we will reset handled_last_config_arg flag.
|
||||
* We are doing it to be compatible with pre 7.0 behavior (which we
|
||||
* break it in #10660, 7.0.1). */
|
||||
options = sdscat(options, "");
|
||||
handled_last_config_arg = 1;
|
||||
}
|
||||
else if ((j == argc-1) && !strcasecmp(argv[j], "--sentinel")) {
|
||||
/* Special case: when --sentinel is the last argument.
|
||||
* It is a pseudo config option with no value. In this case, do nothing.
|
||||
* We are doing it to be compatible with pre 7.0 behavior (which we
|
||||
* break it in #10660, 7.0.1). */
|
||||
options = sdscat(options, "");
|
||||
}
|
||||
} else {
|
||||
/* Means that we are passing both config name and it's value in the same arg,
|
||||
* like "--port 6380", so we need to reset handled_last_config_arg flag. */
|
||||
|
34
src/server.h
34
src/server.h
@ -219,6 +219,7 @@ extern int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT];
|
||||
* Populated by populateCommandLegacyRangeSpec. */
|
||||
#define CMD_ALLOW_BUSY ((1ULL<<26))
|
||||
#define CMD_MODULE_GETCHANNELS (1ULL<<27) /* Use the modules getchannels interface. */
|
||||
#define CMD_TOUCHES_ARBITRARY_KEYS (1ULL<<28)
|
||||
|
||||
/* Command flags that describe ACLs categories. */
|
||||
#define ACL_CATEGORY_KEYSPACE (1ULL<<0)
|
||||
@ -363,6 +364,7 @@ extern int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT];
|
||||
RDB without replication buffer. */
|
||||
#define CLIENT_NO_EVICT (1ULL<<43) /* This client is protected against client
|
||||
memory eviction. */
|
||||
#define CLIENT_PUSHING (1ULL<<44) /* This client is pushing notifications. */
|
||||
|
||||
/* Client block type (btype field in client structure)
|
||||
* if CLIENT_BLOCKED flag is set. */
|
||||
@ -427,6 +429,8 @@ typedef enum {
|
||||
#define SLAVE_STATE_WAIT_BGSAVE_END 7 /* Waiting RDB file creation to finish. */
|
||||
#define SLAVE_STATE_SEND_BULK 8 /* Sending RDB file to slave. */
|
||||
#define SLAVE_STATE_ONLINE 9 /* RDB file transmitted, sending just updates. */
|
||||
#define SLAVE_STATE_RDB_TRANSMITTED 10 /* RDB file transmitted - This state is used only for
|
||||
* a replica that only wants RDB without replication buffer */
|
||||
|
||||
/* Slave capabilities. */
|
||||
#define SLAVE_CAPA_NONE 0
|
||||
@ -890,7 +894,7 @@ typedef struct clientReplyBlock {
|
||||
* | / \
|
||||
* | / \
|
||||
* | / \
|
||||
* Repl Backlog Replia_A Replia_B
|
||||
* Repl Backlog Replica_A Replica_B
|
||||
*
|
||||
* Each replica or replication backlog increments only the refcount of the
|
||||
* 'ref_repl_buf_node' which it points to. So when replica walks to the next
|
||||
@ -1049,6 +1053,7 @@ typedef struct {
|
||||
list *selectors; /* A list of selectors this user validates commands
|
||||
against. This list will always contain at least
|
||||
one selector for backwards compatibility. */
|
||||
robj *acl_string; /* cached string represent of ACLs */
|
||||
} user;
|
||||
|
||||
/* With multiplexing we need to take per-client state.
|
||||
@ -1115,6 +1120,7 @@ typedef struct client {
|
||||
time_t ctime; /* Client creation time. */
|
||||
long duration; /* Current command duration. Used for measuring latency of blocking/non-blocking cmds */
|
||||
int slot; /* The slot the client is executing against. Set to -1 if no slot is being used */
|
||||
dictEntry *cur_script; /* Cached pointer to the dictEntry of the script being executed. */
|
||||
time_t lastinteraction; /* Time of the last interaction, used for timeout */
|
||||
time_t obuf_soft_limit_reached_time;
|
||||
int authenticated; /* Needed when the default user requires auth. */
|
||||
@ -1168,7 +1174,7 @@ typedef struct client {
|
||||
rax *client_tracking_prefixes; /* A dictionary of prefixes we are already
|
||||
subscribed to in BCAST mode, in the
|
||||
context of client side caching. */
|
||||
/* In updateClientMemUsage() we track the memory usage of
|
||||
/* In updateClientMemoryUsage() we track the memory usage of
|
||||
* each client and add it to the sum of all the clients of a given type,
|
||||
* however we need to remember what was the old contribution of each
|
||||
* client, and in which category the client was, in order to remove it
|
||||
@ -1520,7 +1526,7 @@ struct redisServer {
|
||||
client *current_client; /* Current client executing the command. */
|
||||
|
||||
/* Stuff for client mem eviction */
|
||||
clientMemUsageBucket client_mem_usage_buckets[CLIENT_MEM_USAGE_BUCKETS];
|
||||
clientMemUsageBucket* client_mem_usage_buckets;
|
||||
|
||||
rax *clients_timeout_table; /* Radix tree for blocked clients timeouts. */
|
||||
long fixed_time_expire; /* If > 0, expire keys against server.mstime. */
|
||||
@ -1660,7 +1666,8 @@ struct redisServer {
|
||||
off_t aof_rewrite_base_size; /* AOF size on latest startup or rewrite. */
|
||||
off_t aof_current_size; /* AOF current size (Including BASE + INCRs). */
|
||||
off_t aof_last_incr_size; /* The size of the latest incr AOF. */
|
||||
off_t aof_fsync_offset; /* AOF offset which is already synced to disk. */
|
||||
off_t aof_last_incr_fsync_offset; /* AOF offset which is already requested to be synced to disk.
|
||||
* Compare with the aof_last_incr_size. */
|
||||
int aof_flush_sleep; /* Micros to sleep before flush. (used by tests) */
|
||||
int aof_rewrite_scheduled; /* Rewrite once BGSAVE terminates. */
|
||||
sds aof_buf; /* AOF buffer, written before entering the event loop */
|
||||
@ -2196,6 +2203,12 @@ typedef int redisGetKeysProc(struct redisCommand *cmd, robj **argv, int argc, ge
|
||||
*
|
||||
* CMD_NO_MULTI: The command is not allowed inside a transaction
|
||||
*
|
||||
* CMD_ALLOW_BUSY: The command can run while another command is running for
|
||||
* a long time (timedout script, module command that yields)
|
||||
*
|
||||
* CMD_TOUCHES_ARBITRARY_KEYS: The command may touch (and cause lazy-expire)
|
||||
* arbitrary key (i.e not provided in argv)
|
||||
*
|
||||
* The following additional flags are only used in order to put commands
|
||||
* in a specific ACL category. Commands can have multiple ACL categories.
|
||||
* See redis.conf for the exact meaning of each.
|
||||
@ -2463,6 +2476,7 @@ void addReplyBulkCString(client *c, const char *s);
|
||||
void addReplyBulkCBuffer(client *c, const void *p, size_t len);
|
||||
void addReplyBulkLongLong(client *c, long long ll);
|
||||
void addReply(client *c, robj *obj);
|
||||
void addReplyStatusLength(client *c, const char *s, size_t len);
|
||||
void addReplySds(client *c, sds s);
|
||||
void addReplyBulkSds(client *c, sds s);
|
||||
void setDeferredReplyBulkSds(client *c, void *node, sds s);
|
||||
@ -2471,6 +2485,7 @@ void addReplyOrErrorObject(client *c, robj *reply);
|
||||
void afterErrorReply(client *c, const char *s, size_t len, int flags);
|
||||
void addReplyErrorSdsEx(client *c, sds err, int flags);
|
||||
void addReplyErrorSds(client *c, sds err);
|
||||
void addReplyErrorSdsSafe(client *c, sds err);
|
||||
void addReplyError(client *c, const char *err);
|
||||
void addReplyErrorArity(client *c);
|
||||
void addReplyErrorExpireTime(client *c);
|
||||
@ -2530,8 +2545,8 @@ int handleClientsWithPendingReadsUsingThreads(void);
|
||||
int stopThreadedIOIfNeeded(void);
|
||||
int clientHasPendingReplies(client *c);
|
||||
int islocalClient(client *c);
|
||||
int updateClientMemUsage(client *c);
|
||||
void updateClientMemUsageBucket(client *c);
|
||||
int updateClientMemUsageAndBucket(client *c);
|
||||
void removeClientFromMemUsageBucket(client *c, int allow_eviction);
|
||||
void unlinkClient(client *c);
|
||||
int writeToClient(client *c, int handler_installed);
|
||||
void linkClient(client *c);
|
||||
@ -2791,11 +2806,12 @@ int ACLCheckAllUserCommandPerm(user *u, struct redisCommand *cmd, robj **argv, i
|
||||
int ACLUserCheckCmdWithUnrestrictedKeyAccess(user *u, struct redisCommand *cmd, robj **argv, int argc, int flags);
|
||||
int ACLCheckAllPerm(client *c, int *idxptr);
|
||||
int ACLSetUser(user *u, const char *op, ssize_t oplen);
|
||||
sds ACLStringSetUser(user *u, sds username, sds *argv, int argc);
|
||||
uint64_t ACLGetCommandCategoryFlagByName(const char *name);
|
||||
int ACLAppendUserForLoading(sds *argv, int argc, int *argc_err);
|
||||
const char *ACLSetUserStringError(void);
|
||||
int ACLLoadConfiguredUsers(void);
|
||||
sds ACLDescribeUser(user *u);
|
||||
robj *ACLDescribeUser(user *u);
|
||||
void ACLLoadUsersAtStartup(void);
|
||||
void addReplyCommandCategories(client *c, struct redisCommand *cmd);
|
||||
user *ACLCreateUnlinkedUser();
|
||||
@ -2876,6 +2892,7 @@ int zslLexValueLteMax(sds value, zlexrangespec *spec);
|
||||
int getMaxmemoryState(size_t *total, size_t *logical, size_t *tofree, float *level);
|
||||
size_t freeMemoryGetNotCountedMemory();
|
||||
int overMaxmemoryAfterAlloc(size_t moremem);
|
||||
uint64_t getCommandFlags(client *c);
|
||||
int processCommand(client *c);
|
||||
int processPendingCommandAndInputBuffer(client *c);
|
||||
void setupSignalHandlers(void);
|
||||
@ -3056,6 +3073,8 @@ void initConfigValues();
|
||||
void removeConfig(sds name);
|
||||
sds getConfigDebugInfo();
|
||||
int allowProtectedAction(int config, client *c);
|
||||
void initServerClientMemUsageBuckets();
|
||||
void freeServerClientMemUsageBuckets();
|
||||
|
||||
/* Module Configuration */
|
||||
typedef struct ModuleConfig ModuleConfig;
|
||||
@ -3099,6 +3118,7 @@ int objectSetLRUOrLFU(robj *val, long long lfu_freq, long long lru_idle,
|
||||
#define LOOKUP_NONOTIFY (1<<1) /* Don't trigger keyspace event on key misses. */
|
||||
#define LOOKUP_NOSTATS (1<<2) /* Don't update keyspace hits/misses counters. */
|
||||
#define LOOKUP_WRITE (1<<3) /* Delete expired keys even in replicas. */
|
||||
#define LOOKUP_NOEXPIRE (1<<4) /* Avoid deleting lazy expired keys. */
|
||||
|
||||
void dbAdd(redisDb *db, robj *key, robj *val);
|
||||
int dbAddRDBLoad(redisDb *db, sds key, robj *val);
|
||||
|
11
src/sha1.c
11
src/sha1.c
@ -125,6 +125,14 @@ void SHA1Init(SHA1_CTX* context)
|
||||
context->count[0] = context->count[1] = 0;
|
||||
}
|
||||
|
||||
/* This source code is referenced from
|
||||
* https://github.com/libevent/libevent/commit/e1d7d3e40a7fd50348d849046fbfd9bf976e643c */
|
||||
#if defined(__GNUC__) && __GNUC__ >= 12
|
||||
#pragma GCC diagnostic push
|
||||
/* Ignore the case when SHA1Transform() called with 'char *', that code passed
|
||||
* buffer of 64 bytes anyway (at least now) */
|
||||
#pragma GCC diagnostic ignored "-Wstringop-overread"
|
||||
#endif
|
||||
|
||||
/* Run your data through this. */
|
||||
|
||||
@ -149,6 +157,9 @@ void SHA1Update(SHA1_CTX* context, const unsigned char* data, uint32_t len)
|
||||
memcpy(&context->buffer[j], &data[i], len - i);
|
||||
}
|
||||
|
||||
#if defined(__GNUC__) && __GNUC__ >= 12
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
/* Add padding and return the message digest. */
|
||||
|
||||
|
@ -328,8 +328,10 @@ void sortCommandGeneric(client *c, int readonly) {
|
||||
default: vectorlen = 0; serverPanic("Bad SORT type"); /* Avoid GCC warning */
|
||||
}
|
||||
|
||||
/* Perform LIMIT start,count sanity checking. */
|
||||
start = (limit_start < 0) ? 0 : limit_start;
|
||||
/* Perform LIMIT start,count sanity checking.
|
||||
* And avoid integer overflow by limiting inputs to object sizes. */
|
||||
start = min(max(limit_start, 0), vectorlen);
|
||||
limit_count = min(max(limit_count, -1), vectorlen);
|
||||
end = (limit_count < 0) ? vectorlen-1 : start+limit_count-1;
|
||||
if (start >= vectorlen) {
|
||||
start = vectorlen-1;
|
||||
|
@ -150,9 +150,12 @@ int checkOvercommit(sds *error_msg) {
|
||||
}
|
||||
fclose(fp);
|
||||
|
||||
if (strtol(buf, NULL, 10) == 0) {
|
||||
if (strtol(buf, NULL, 10) != 1) {
|
||||
*error_msg = sdsnew(
|
||||
"overcommit_memory is set to 0! Background save may fail under low memory condition. "
|
||||
"Memory overcommit must be enabled! Without it, a background save or replication may fail under low memory condition. "
|
||||
#if defined(USE_JEMALLOC)
|
||||
"Being disabled, it can can also cause failures without low memory condition, see https://github.com/jemalloc/jemalloc/issues/1328. "
|
||||
#endif
|
||||
"To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the "
|
||||
"command 'sysctl vm.overcommit_memory=1' for this to take effect.");
|
||||
return -1;
|
||||
|
17
src/t_hash.c
17
src/t_hash.c
@ -670,6 +670,10 @@ void hincrbyfloatCommand(client *c) {
|
||||
unsigned int vlen;
|
||||
|
||||
if (getLongDoubleFromObjectOrReply(c,c->argv[3],&incr,NULL) != C_OK) return;
|
||||
if (isnan(incr) || isinf(incr)) {
|
||||
addReplyError(c,"value is NaN or Infinity");
|
||||
return;
|
||||
}
|
||||
if ((o = hashTypeLookupWriteOrCreate(c,c->argv[1])) == NULL) return;
|
||||
if (hashTypeGetValue(o,c->argv[2]->ptr,&vstr,&vlen,&ll) == C_OK) {
|
||||
if (vstr) {
|
||||
@ -956,6 +960,8 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) {
|
||||
addReplyBulkCBuffer(c, key, sdslen(key));
|
||||
if (withvalues)
|
||||
addReplyBulkCBuffer(c, value, sdslen(value));
|
||||
if (c->flags & CLIENT_CLOSE_ASAP)
|
||||
break;
|
||||
}
|
||||
} else if (hash->encoding == OBJ_ENCODING_LISTPACK) {
|
||||
listpackEntry *keys, *vals = NULL;
|
||||
@ -970,6 +976,8 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) {
|
||||
count -= sample_count;
|
||||
lpRandomPairs(hash->ptr, sample_count, keys, vals);
|
||||
hrandfieldReplyWithListpack(c, sample_count, keys, vals);
|
||||
if (c->flags & CLIENT_CLOSE_ASAP)
|
||||
break;
|
||||
}
|
||||
zfree(keys);
|
||||
zfree(vals);
|
||||
@ -1116,12 +1124,17 @@ void hrandfieldCommand(client *c) {
|
||||
listpackEntry ele;
|
||||
|
||||
if (c->argc >= 3) {
|
||||
if (getLongFromObjectOrReply(c,c->argv[2],&l,NULL) != C_OK) return;
|
||||
if (getRangeLongFromObjectOrReply(c,c->argv[2],-LONG_MAX,LONG_MAX,&l,NULL) != C_OK) return;
|
||||
if (c->argc > 4 || (c->argc == 4 && strcasecmp(c->argv[3]->ptr,"withvalues"))) {
|
||||
addReplyErrorObject(c,shared.syntaxerr);
|
||||
return;
|
||||
} else if (c->argc == 4)
|
||||
} else if (c->argc == 4) {
|
||||
withvalues = 1;
|
||||
if (l < -LONG_MAX/2 || l > LONG_MAX/2) {
|
||||
addReplyError(c,"value is out of range");
|
||||
return;
|
||||
}
|
||||
}
|
||||
hrandfieldWithCountCommand(c, l, withvalues);
|
||||
return;
|
||||
}
|
||||
|
@ -665,7 +665,7 @@ void srandmemberWithCountCommand(client *c) {
|
||||
|
||||
dict *d;
|
||||
|
||||
if (getLongFromObjectOrReply(c,c->argv[2],&l,NULL) != C_OK) return;
|
||||
if (getRangeLongFromObjectOrReply(c,c->argv[2],-LONG_MAX,LONG_MAX,&l,NULL) != C_OK) return;
|
||||
if (l >= 0) {
|
||||
count = (unsigned long) l;
|
||||
} else {
|
||||
@ -699,6 +699,8 @@ void srandmemberWithCountCommand(client *c) {
|
||||
} else {
|
||||
addReplyBulkCBuffer(c,ele,sdslen(ele));
|
||||
}
|
||||
if (c->flags & CLIENT_CLOSE_ASAP)
|
||||
break;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -528,22 +528,25 @@ int streamAppendItem(stream *s, robj **argv, int64_t numfields, streamID *added_
|
||||
* if we need to switch to the next one. 'lp' will be set to NULL if
|
||||
* the current node is full. */
|
||||
if (lp != NULL) {
|
||||
int new_node = 0;
|
||||
size_t node_max_bytes = server.stream_node_max_bytes;
|
||||
if (node_max_bytes == 0 || node_max_bytes > STREAM_LISTPACK_MAX_SIZE)
|
||||
node_max_bytes = STREAM_LISTPACK_MAX_SIZE;
|
||||
if (lp_bytes + totelelen >= node_max_bytes) {
|
||||
lp = NULL;
|
||||
new_node = 1;
|
||||
} else if (server.stream_node_max_entries) {
|
||||
unsigned char *lp_ele = lpFirst(lp);
|
||||
/* Count both live entries and deleted ones. */
|
||||
int64_t count = lpGetInteger(lp_ele) + lpGetInteger(lpNext(lp,lp_ele));
|
||||
if (count >= server.stream_node_max_entries) {
|
||||
/* Shrink extra pre-allocated memory */
|
||||
lp = lpShrinkToFit(lp);
|
||||
if (ri.data != lp)
|
||||
raxInsert(s->rax,ri.key,ri.key_len,lp,NULL);
|
||||
lp = NULL;
|
||||
}
|
||||
if (count >= server.stream_node_max_entries) new_node = 1;
|
||||
}
|
||||
|
||||
if (new_node) {
|
||||
/* Shrink extra pre-allocated memory */
|
||||
lp = lpShrinkToFit(lp);
|
||||
if (ri.data != lp)
|
||||
raxInsert(s->rax,ri.key,ri.key_len,lp,NULL);
|
||||
lp = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3334,6 +3337,7 @@ void xautoclaimCommand(client *c) {
|
||||
robj *o = lookupKeyRead(c->db,c->argv[1]);
|
||||
long long minidle; /* Minimum idle time argument, in milliseconds. */
|
||||
long count = 100; /* Maximum entries to claim. */
|
||||
const unsigned attempts_factor = 10;
|
||||
streamID startid;
|
||||
int startex;
|
||||
int justid = 0;
|
||||
@ -3356,7 +3360,8 @@ void xautoclaimCommand(client *c) {
|
||||
int moreargs = (c->argc-1) - j; /* Number of additional arguments. */
|
||||
char *opt = c->argv[j]->ptr;
|
||||
if (!strcasecmp(opt,"COUNT") && moreargs) {
|
||||
if (getRangeLongFromObjectOrReply(c,c->argv[j+1],1,LONG_MAX,&count,"COUNT must be > 0") != C_OK)
|
||||
long max_count = LONG_MAX / (max(sizeof(streamID), attempts_factor));
|
||||
if (getRangeLongFromObjectOrReply(c,c->argv[j+1],1,max_count,&count,"COUNT must be > 0") != C_OK)
|
||||
return;
|
||||
j++;
|
||||
} else if (!strcasecmp(opt,"JUSTID")) {
|
||||
@ -3383,9 +3388,15 @@ void xautoclaimCommand(client *c) {
|
||||
return;
|
||||
}
|
||||
|
||||
streamID *deleted_ids = ztrymalloc(count * sizeof(streamID));
|
||||
if (!deleted_ids) {
|
||||
addReplyError(c, "Insufficient memory, failed allocating transient memory, COUNT too high.");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Do the actual claiming. */
|
||||
streamConsumer *consumer = NULL;
|
||||
long long attempts = count*10;
|
||||
long long attempts = count * attempts_factor;
|
||||
|
||||
addReplyArrayLen(c, 3); /* We add another reply later */
|
||||
void *endidptr = addReplyDeferredLen(c); /* reply[0] */
|
||||
@ -3399,7 +3410,6 @@ void xautoclaimCommand(client *c) {
|
||||
size_t arraylen = 0;
|
||||
mstime_t now = mstime();
|
||||
sds name = c->argv[3]->ptr;
|
||||
streamID *deleted_ids = zmalloc(count * sizeof(streamID));
|
||||
int deleted_id_num = 0;
|
||||
while (attempts-- && count && raxNext(&ri)) {
|
||||
streamNACK *nack = ri.data;
|
||||
@ -3421,6 +3431,7 @@ void xautoclaimCommand(client *c) {
|
||||
/* Remember the ID for later */
|
||||
deleted_ids[deleted_id_num++] = id;
|
||||
raxSeek(&ri,">=",ri.key,ri.key_len);
|
||||
count--; /* Count is a limit of the command response size. */
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -37,8 +37,14 @@ int getGenericCommand(client *c);
|
||||
* String Commands
|
||||
*----------------------------------------------------------------------------*/
|
||||
|
||||
static int checkStringLength(client *c, long long size) {
|
||||
if (!mustObeyClient(c) && size > server.proto_max_bulk_len) {
|
||||
static int checkStringLength(client *c, long long size, long long append) {
|
||||
if (mustObeyClient(c))
|
||||
return C_OK;
|
||||
/* 'uint64_t' cast is there just to prevent undefined behavior on overflow */
|
||||
long long total = (uint64_t)size + append;
|
||||
/* Test configured max-bulk-len represending a limit of the biggest string object,
|
||||
* and also test for overflow. */
|
||||
if (total > server.proto_max_bulk_len || total < size || total < append) {
|
||||
addReplyError(c,"string exceeds maximum allowed size (proto-max-bulk-len)");
|
||||
return C_ERR;
|
||||
}
|
||||
@ -456,7 +462,7 @@ void setrangeCommand(client *c) {
|
||||
}
|
||||
|
||||
/* Return when the resulting string exceeds allowed size */
|
||||
if (checkStringLength(c,offset+sdslen(value)) != C_OK)
|
||||
if (checkStringLength(c,offset,sdslen(value)) != C_OK)
|
||||
return;
|
||||
|
||||
o = createObject(OBJ_STRING,sdsnewlen(NULL, offset+sdslen(value)));
|
||||
@ -476,7 +482,7 @@ void setrangeCommand(client *c) {
|
||||
}
|
||||
|
||||
/* Return when the resulting string exceeds allowed size */
|
||||
if (checkStringLength(c,offset+sdslen(value)) != C_OK)
|
||||
if (checkStringLength(c,offset,sdslen(value)) != C_OK)
|
||||
return;
|
||||
|
||||
/* Create a copy when the object is shared or encoded. */
|
||||
@ -574,7 +580,7 @@ void msetGenericCommand(client *c, int nx) {
|
||||
|
||||
for (j = 1; j < c->argc; j += 2) {
|
||||
c->argv[j+1] = tryObjectEncoding(c->argv[j+1]);
|
||||
setKey(c,c->db,c->argv[j],c->argv[j+1],0);
|
||||
setKey(c, c->db, c->argv[j], c->argv[j + 1], 0);
|
||||
notifyKeyspaceEvent(NOTIFY_STRING,"set",c->argv[j],c->db->id);
|
||||
}
|
||||
server.dirty += (c->argc-1)/2;
|
||||
@ -703,8 +709,7 @@ void appendCommand(client *c) {
|
||||
|
||||
/* "append" is an argument, so always an sds */
|
||||
append = c->argv[2];
|
||||
totlen = stringObjectLen(o)+sdslen(append->ptr);
|
||||
if (checkStringLength(c,totlen) != C_OK)
|
||||
if (checkStringLength(c,stringObjectLen(o),sdslen(append->ptr)) != C_OK)
|
||||
return;
|
||||
|
||||
/* Append the value */
|
||||
|
13
src/t_zset.c
13
src/t_zset.c
@ -4126,6 +4126,8 @@ void zrandmemberWithCountCommand(client *c, long l, int withscores) {
|
||||
addReplyBulkCBuffer(c, key, sdslen(key));
|
||||
if (withscores)
|
||||
addReplyDouble(c, *(double*)dictGetVal(de));
|
||||
if (c->flags & CLIENT_CLOSE_ASAP)
|
||||
break;
|
||||
}
|
||||
} else if (zsetobj->encoding == OBJ_ENCODING_LISTPACK) {
|
||||
listpackEntry *keys, *vals = NULL;
|
||||
@ -4139,6 +4141,8 @@ void zrandmemberWithCountCommand(client *c, long l, int withscores) {
|
||||
count -= sample_count;
|
||||
lpRandomPairs(zsetobj->ptr, sample_count, keys, vals);
|
||||
zrandmemberReplyWithListpack(c, sample_count, keys, vals);
|
||||
if (c->flags & CLIENT_CLOSE_ASAP)
|
||||
break;
|
||||
}
|
||||
zfree(keys);
|
||||
zfree(vals);
|
||||
@ -4285,12 +4289,17 @@ void zrandmemberCommand(client *c) {
|
||||
listpackEntry ele;
|
||||
|
||||
if (c->argc >= 3) {
|
||||
if (getLongFromObjectOrReply(c,c->argv[2],&l,NULL) != C_OK) return;
|
||||
if (getRangeLongFromObjectOrReply(c,c->argv[2],-LONG_MAX,LONG_MAX,&l,NULL) != C_OK) return;
|
||||
if (c->argc > 4 || (c->argc == 4 && strcasecmp(c->argv[3]->ptr,"withscores"))) {
|
||||
addReplyErrorObject(c,shared.syntaxerr);
|
||||
return;
|
||||
} else if (c->argc == 4)
|
||||
} else if (c->argc == 4) {
|
||||
withscores = 1;
|
||||
if (l < -LONG_MAX/2 || l > LONG_MAX/2) {
|
||||
addReplyError(c,"value is out of range");
|
||||
return;
|
||||
}
|
||||
}
|
||||
zrandmemberWithCountCommand(c, l, withscores);
|
||||
return;
|
||||
}
|
||||
|
@ -29,6 +29,8 @@
|
||||
#include "server.h"
|
||||
#include "cluster.h"
|
||||
|
||||
#include <math.h>
|
||||
|
||||
/* ========================== Clients timeouts ============================= */
|
||||
|
||||
/* Check if this blocked client timedout (does nothing if the client is
|
||||
@ -169,7 +171,7 @@ int getTimeoutFromObjectOrReply(client *c, robj *object, mstime_t *timeout, int
|
||||
if (getLongDoubleFromObjectOrReply(c,object,&ftval,
|
||||
"timeout is not a float or out of range") != C_OK)
|
||||
return C_ERR;
|
||||
tval = (long long) (ftval * 1000.0);
|
||||
tval = (long long) ceill(ftval * 1000.0);
|
||||
} else {
|
||||
if (getLongLongFromObjectOrReply(c,object,&tval,
|
||||
"timeout is not an integer or out of range") != C_OK)
|
||||
|
115
src/tls.c
115
src/tls.c
@ -515,6 +515,7 @@ connection *connCreateAcceptedTLS(int fd, int require_auth) {
|
||||
}
|
||||
|
||||
static void tlsEventHandler(struct aeEventLoop *el, int fd, void *clientData, int mask);
|
||||
static void updateSSLEvent(tls_connection *conn);
|
||||
|
||||
/* Process the return code received from OpenSSL>
|
||||
* Update the want parameter with expected I/O.
|
||||
@ -548,7 +549,48 @@ static int handleSSLReturnCode(tls_connection *conn, int ret_value, WantIOType *
|
||||
return 0;
|
||||
}
|
||||
|
||||
void registerSSLEvent(tls_connection *conn, WantIOType want) {
|
||||
/* Handle OpenSSL return code following SSL_write() or SSL_read():
|
||||
*
|
||||
* - Updates conn state and last_errno.
|
||||
* - If update_event is nonzero, calls updateSSLEvent() when necessary.
|
||||
*
|
||||
* Returns ret_value, or -1 on error or dropped connection.
|
||||
*/
|
||||
static int updateStateAfterSSLIO(tls_connection *conn, int ret_value, int update_event) {
|
||||
/* If system call was interrupted, there's no need to go through the full
|
||||
* OpenSSL error handling and just report this for the caller to retry the
|
||||
* operation.
|
||||
*/
|
||||
if (errno == EINTR) {
|
||||
conn->c.last_errno = EINTR;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (ret_value <= 0) {
|
||||
WantIOType want = 0;
|
||||
int ssl_err;
|
||||
if (!(ssl_err = handleSSLReturnCode(conn, ret_value, &want))) {
|
||||
if (want == WANT_READ) conn->flags |= TLS_CONN_FLAG_WRITE_WANT_READ;
|
||||
if (want == WANT_WRITE) conn->flags |= TLS_CONN_FLAG_READ_WANT_WRITE;
|
||||
if (update_event) updateSSLEvent(conn);
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
} else {
|
||||
if (ssl_err == SSL_ERROR_ZERO_RETURN ||
|
||||
((ssl_err == SSL_ERROR_SYSCALL && !errno))) {
|
||||
conn->c.state = CONN_STATE_CLOSED;
|
||||
return -1;
|
||||
} else {
|
||||
conn->c.state = CONN_STATE_ERROR;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret_value;
|
||||
}
|
||||
|
||||
static void registerSSLEvent(tls_connection *conn, WantIOType want) {
|
||||
int mask = aeGetFileEvents(server.el, conn->c.fd);
|
||||
|
||||
switch (want) {
|
||||
@ -787,39 +829,12 @@ static int connTLSConnect(connection *conn_, const char *addr, int port, const c
|
||||
|
||||
static int connTLSWrite(connection *conn_, const void *data, size_t data_len) {
|
||||
tls_connection *conn = (tls_connection *) conn_;
|
||||
int ret, ssl_err;
|
||||
int ret;
|
||||
|
||||
if (conn->c.state != CONN_STATE_CONNECTED) return -1;
|
||||
ERR_clear_error();
|
||||
ret = SSL_write(conn->ssl, data, data_len);
|
||||
/* If system call was interrupted, there's no need to go through the full
|
||||
* OpenSSL error handling and just report this for the caller to retry the
|
||||
* operation.
|
||||
*/
|
||||
if (errno == EINTR) {
|
||||
conn->c.last_errno = EINTR;
|
||||
return -1;
|
||||
}
|
||||
if (ret <= 0) {
|
||||
WantIOType want = 0;
|
||||
if (!(ssl_err = handleSSLReturnCode(conn, ret, &want))) {
|
||||
if (want == WANT_READ) conn->flags |= TLS_CONN_FLAG_WRITE_WANT_READ;
|
||||
updateSSLEvent(conn);
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
} else {
|
||||
if (ssl_err == SSL_ERROR_ZERO_RETURN ||
|
||||
((ssl_err == SSL_ERROR_SYSCALL && !errno))) {
|
||||
conn->c.state = CONN_STATE_CLOSED;
|
||||
return -1;
|
||||
} else {
|
||||
conn->c.state = CONN_STATE_ERROR;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
return updateStateAfterSSLIO(conn, ret, 1);
|
||||
}
|
||||
|
||||
static int connTLSWritev(connection *conn_, const struct iovec *iov, int iovcnt) {
|
||||
@ -862,40 +877,11 @@ static int connTLSWritev(connection *conn_, const struct iovec *iov, int iovcnt)
|
||||
static int connTLSRead(connection *conn_, void *buf, size_t buf_len) {
|
||||
tls_connection *conn = (tls_connection *) conn_;
|
||||
int ret;
|
||||
int ssl_err;
|
||||
|
||||
if (conn->c.state != CONN_STATE_CONNECTED) return -1;
|
||||
ERR_clear_error();
|
||||
ret = SSL_read(conn->ssl, buf, buf_len);
|
||||
/* If system call was interrupted, there's no need to go through the full
|
||||
* OpenSSL error handling and just report this for the caller to retry the
|
||||
* operation.
|
||||
*/
|
||||
if (errno == EINTR) {
|
||||
conn->c.last_errno = EINTR;
|
||||
return -1;
|
||||
}
|
||||
if (ret <= 0) {
|
||||
WantIOType want = 0;
|
||||
if (!(ssl_err = handleSSLReturnCode(conn, ret, &want))) {
|
||||
if (want == WANT_WRITE) conn->flags |= TLS_CONN_FLAG_READ_WANT_WRITE;
|
||||
updateSSLEvent(conn);
|
||||
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
} else {
|
||||
if (ssl_err == SSL_ERROR_ZERO_RETURN ||
|
||||
((ssl_err == SSL_ERROR_SYSCALL) && !errno)) {
|
||||
conn->c.state = CONN_STATE_CLOSED;
|
||||
return 0;
|
||||
} else {
|
||||
conn->c.state = CONN_STATE_ERROR;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
return updateStateAfterSSLIO(conn, ret, 1);
|
||||
}
|
||||
|
||||
static const char *connTLSGetLastError(connection *conn_) {
|
||||
@ -962,7 +948,9 @@ static ssize_t connTLSSyncWrite(connection *conn_, char *ptr, ssize_t size, long
|
||||
|
||||
setBlockingTimeout(conn, timeout);
|
||||
SSL_clear_mode(conn->ssl, SSL_MODE_ENABLE_PARTIAL_WRITE);
|
||||
ERR_clear_error();
|
||||
int ret = SSL_write(conn->ssl, ptr, size);
|
||||
ret = updateStateAfterSSLIO(conn, ret, 0);
|
||||
SSL_set_mode(conn->ssl, SSL_MODE_ENABLE_PARTIAL_WRITE);
|
||||
unsetBlockingTimeout(conn);
|
||||
|
||||
@ -973,7 +961,9 @@ static ssize_t connTLSSyncRead(connection *conn_, char *ptr, ssize_t size, long
|
||||
tls_connection *conn = (tls_connection *) conn_;
|
||||
|
||||
setBlockingTimeout(conn, timeout);
|
||||
ERR_clear_error();
|
||||
int ret = SSL_read(conn->ssl, ptr, size);
|
||||
ret = updateStateAfterSSLIO(conn, ret, 0);
|
||||
unsetBlockingTimeout(conn);
|
||||
|
||||
return ret;
|
||||
@ -989,7 +979,10 @@ static ssize_t connTLSSyncReadLine(connection *conn_, char *ptr, ssize_t size, l
|
||||
while(size) {
|
||||
char c;
|
||||
|
||||
if (SSL_read(conn->ssl,&c,1) <= 0) {
|
||||
ERR_clear_error();
|
||||
int ret = SSL_read(conn->ssl, &c, 1);
|
||||
ret = updateStateAfterSSLIO(conn, ret, 0);
|
||||
if (ret <= 0) {
|
||||
nread = -1;
|
||||
goto exit;
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user