refactor server global into a pointer (so that threadsafe commands can set this to NULL to prove they don't rely on it

Former-commit-id: 37b2d046bef12c7ee7cac4883f0b64ddde236d74
This commit is contained in:
John Sully 2019-04-21 14:01:10 -04:00
parent e2e3acf04c
commit 7b87123505
39 changed files with 2650 additions and 2647 deletions

View File

@ -227,7 +227,7 @@ void ACLFreeUser(user *u) {
void ACLFreeUserAndKillClients(user *u) {
listIter li;
listNode *ln;
listRewind(server.clients,&li);
listRewind(g_pserver->clients,&li);
while ((ln = listNext(&li)) != NULL) {
client *c = (client*)listNodeValue(ln);
if (c->puser == u) {
@ -334,7 +334,7 @@ void ACLSetUserCommandBit(user *u, unsigned long id, int value) {
int ACLSetUserCommandBitsForCategory(user *u, const char *category, int value) {
uint64_t cflag = ACLGetCommandCategoryFlagByName(category);
if (!cflag) return C_ERR;
dictIterator *di = dictGetIterator(server.orig_commands);
dictIterator *di = dictGetIterator(g_pserver->orig_commands);
dictEntry *de;
while ((de = dictNext(di)) != NULL) {
struct redisCommand *cmd = (redisCommand*)dictGetVal(de);
@ -359,7 +359,7 @@ int ACLCountCategoryBitsForUser(user *u, unsigned long *on, unsigned long *off,
if (!cflag) return C_ERR;
*on = *off = 0;
dictIterator *di = dictGetIterator(server.orig_commands);
dictIterator *di = dictGetIterator(g_pserver->orig_commands);
dictEntry *de;
while ((de = dictNext(di)) != NULL) {
struct redisCommand *cmd = (redisCommand*)dictGetVal(de);
@ -428,7 +428,7 @@ sds ACLDescribeUserCommandRules(user *u) {
}
/* Fix the final ACLs with single commands differences. */
dictIterator *di = dictGetIterator(server.orig_commands);
dictIterator *di = dictGetIterator(g_pserver->orig_commands);
dictEntry *de;
while ((de = dictNext(di)) != NULL) {
struct redisCommand *cmd = (redisCommand*)dictGetVal(de);
@ -533,7 +533,7 @@ sds ACLDescribeUser(user *u) {
struct redisCommand *ACLLookupCommand(const char *name) {
struct redisCommand *cmd;
sds sdsname = sdsnew(name);
cmd = (redisCommand*)dictFetchValue(server.orig_commands, sdsname);
cmd = (redisCommand*)dictFetchValue(g_pserver->orig_commands, sdsname);
sdsfree(sdsname);
return cmd;
}
@ -1194,7 +1194,7 @@ sds ACLLoadFromFile(const char *filename) {
if (argv == NULL) {
errors = sdscatprintf(errors,
"%s:%d: unbalanced quotes in acl line. ",
server.acl_filename, linenum);
g_pserver->acl_filename, linenum);
continue;
}
@ -1208,7 +1208,7 @@ sds ACLLoadFromFile(const char *filename) {
if (strcmp(argv[0],"user") || argc < 2) {
errors = sdscatprintf(errors,
"%s:%d should start with user keyword followed "
"by the username. ", server.acl_filename,
"by the username. ", g_pserver->acl_filename,
linenum);
sdsfreesplitres(argv,argc);
continue;
@ -1223,7 +1223,7 @@ sds ACLLoadFromFile(const char *filename) {
const char *errmsg = ACLSetUserStringError();
errors = sdscatprintf(errors,
"%s:%d: %s. ",
server.acl_filename, linenum, errmsg);
g_pserver->acl_filename, linenum, errmsg);
continue;
}
}
@ -1349,7 +1349,7 @@ cleanup:
* The function will just exit with an error if the user is trying to mix
* both the loading methods. */
void ACLLoadUsersAtStartup(void) {
if (server.acl_filename[0] != '\0' && listLength(UsersToLoad) != 0) {
if (g_pserver->acl_filename[0] != '\0' && listLength(UsersToLoad) != 0) {
serverLog(LL_WARNING,
"Configuring Redis with users defined in redis.conf and at "
"the same setting an ACL file path is invalid. This setup "
@ -1365,8 +1365,8 @@ void ACLLoadUsersAtStartup(void) {
exit(1);
}
if (server.acl_filename[0] != '\0') {
sds errors = ACLLoadFromFile(server.acl_filename);
if (g_pserver->acl_filename[0] != '\0') {
sds errors = ACLLoadFromFile(g_pserver->acl_filename);
if (errors) {
serverLog(LL_WARNING,
"Aborting Redis startup because of ACL errors: %s", errors);
@ -1526,13 +1526,13 @@ void aclCommand(client *c) {
} else {
addReplyNull(c);
}
} else if (server.acl_filename[0] == '\0' &&
} else if (g_pserver->acl_filename[0] == '\0' &&
(!strcasecmp(sub,"load") || !strcasecmp(sub,"save")))
{
addReplyError(c,"This Redis instance is not configured to use an ACL file. You may want to specify users via the ACL SETUSER command and then issue a CONFIG REWRITE (assuming you have a Redis configuration file set) in order to store users in the Redis configuration.");
return;
} else if (!strcasecmp(sub,"load") && c->argc == 2) {
sds errors = ACLLoadFromFile(server.acl_filename);
sds errors = ACLLoadFromFile(g_pserver->acl_filename);
if (errors == NULL) {
addReply(c,shared.ok);
} else {
@ -1540,7 +1540,7 @@ void aclCommand(client *c) {
sdsfree(errors);
}
} else if (!strcasecmp(sub,"save") && c->argc == 2) {
if (ACLSaveToFile(server.acl_filename) == C_OK) {
if (ACLSaveToFile(g_pserver->acl_filename) == C_OK) {
addReply(c,shared.ok);
} else {
addReplyError(c,"There was an error trying to save the ACLs. "
@ -1561,7 +1561,7 @@ void aclCommand(client *c) {
}
int arraylen = 0;
void *dl = addReplyDeferredLen(c);
dictIterator *di = dictGetIterator(server.orig_commands);
dictIterator *di = dictGetIterator(g_pserver->orig_commands);
dictEntry *de;
while ((de = dictNext(di)) != NULL) {
struct redisCommand *cmd = (redisCommand*)dictGetVal(de);

View File

@ -65,14 +65,14 @@ typedef struct aofrwblock {
} aofrwblock;
/* This function free the old AOF rewrite buffer if needed, and initialize
* a fresh new one. It tests for server.aof_rewrite_buf_blocks equal to NULL
* a fresh new one. It tests for g_pserver->aof_rewrite_buf_blocks equal to NULL
* so can be used for the first initialization as well. */
void aofRewriteBufferReset(void) {
if (server.aof_rewrite_buf_blocks)
listRelease(server.aof_rewrite_buf_blocks);
if (g_pserver->aof_rewrite_buf_blocks)
listRelease(g_pserver->aof_rewrite_buf_blocks);
server.aof_rewrite_buf_blocks = listCreate();
listSetFreeMethod(server.aof_rewrite_buf_blocks,zfree);
g_pserver->aof_rewrite_buf_blocks = listCreate();
listSetFreeMethod(g_pserver->aof_rewrite_buf_blocks,zfree);
}
/* Return the current size of the AOF rewrite buffer. */
@ -81,7 +81,7 @@ unsigned long aofRewriteBufferSize(void) {
listIter li;
unsigned long size = 0;
listRewind(server.aof_rewrite_buf_blocks,&li);
listRewind(g_pserver->aof_rewrite_buf_blocks,&li);
while((ln = listNext(&li))) {
aofrwblock *block = (aofrwblock*)listNodeValue(ln);
size += block->used;
@ -104,28 +104,28 @@ void aofChildWriteDiffData(aeEventLoop *el, int fd, void *privdata, int mask) {
UNUSED(mask);
while(1) {
ln = listFirst(server.aof_rewrite_buf_blocks);
ln = listFirst(g_pserver->aof_rewrite_buf_blocks);
block = (aofrwblock*)(ln ? ln->value : NULL);
if (server.aof_stop_sending_diff || !block) {
aeDeleteFileEvent(el,server.aof_pipe_write_data_to_child,
if (g_pserver->aof_stop_sending_diff || !block) {
aeDeleteFileEvent(el,g_pserver->aof_pipe_write_data_to_child,
AE_WRITABLE);
return;
}
if (block->used > 0) {
nwritten = write(server.aof_pipe_write_data_to_child,
nwritten = write(g_pserver->aof_pipe_write_data_to_child,
block->buf,block->used);
if (nwritten <= 0) return;
memmove(block->buf,block->buf+nwritten,block->used-nwritten);
block->used -= nwritten;
block->free += nwritten;
}
if (block->used == 0) listDelNode(server.aof_rewrite_buf_blocks,ln);
if (block->used == 0) listDelNode(g_pserver->aof_rewrite_buf_blocks,ln);
}
}
/* Append data to the AOF rewrite buffer, allocating new blocks if needed. */
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
listNode *ln = listLast(g_pserver->aof_rewrite_buf_blocks);
aofrwblock *block = (aofrwblock*)(ln ? ln->value : NULL);
while(len) {
@ -148,11 +148,11 @@ void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
block = (aofrwblock*)zmalloc(sizeof(*block), MALLOC_LOCAL);
block->free = AOF_RW_BUF_BLOCK_SIZE;
block->used = 0;
listAddNodeTail(server.aof_rewrite_buf_blocks,block);
listAddNodeTail(g_pserver->aof_rewrite_buf_blocks,block);
/* Log every time we cross more 10 or 100 blocks, respectively
* as a notice or warning. */
numblocks = listLength(server.aof_rewrite_buf_blocks);
numblocks = listLength(g_pserver->aof_rewrite_buf_blocks);
if (((numblocks+1) % 10) == 0) {
int level = ((numblocks+1) % 100) == 0 ? LL_WARNING :
LL_NOTICE;
@ -164,8 +164,8 @@ void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
/* Install a file event to send data to the rewrite child if there is
* not one already. */
if (aeGetFileEvents(serverTL->el,server.aof_pipe_write_data_to_child) == 0) {
aeCreateFileEvent(serverTL->el, server.aof_pipe_write_data_to_child,
if (aeGetFileEvents(serverTL->el,g_pserver->aof_pipe_write_data_to_child) == 0) {
aeCreateFileEvent(serverTL->el, g_pserver->aof_pipe_write_data_to_child,
AE_WRITABLE, aofChildWriteDiffData, NULL);
}
}
@ -178,7 +178,7 @@ ssize_t aofRewriteBufferWrite(int fd) {
listIter li;
ssize_t count = 0;
listRewind(server.aof_rewrite_buf_blocks,&li);
listRewind(g_pserver->aof_rewrite_buf_blocks,&li);
while((ln = listNext(&li))) {
aofrwblock *block = (aofrwblock*)listNodeValue(ln);
ssize_t nwritten;
@ -209,18 +209,18 @@ void aof_background_fsync(int fd) {
void killAppendOnlyChild(void) {
int statloc;
/* No AOFRW child? return. */
if (server.aof_child_pid == -1) return;
if (g_pserver->aof_child_pid == -1) return;
/* Kill AOFRW child, wait for child exit. */
serverLog(LL_NOTICE,"Killing running AOF rewrite child: %ld",
(long) server.aof_child_pid);
if (kill(server.aof_child_pid,SIGUSR1) != -1) {
while(wait3(&statloc,0,NULL) != server.aof_child_pid);
(long) g_pserver->aof_child_pid);
if (kill(g_pserver->aof_child_pid,SIGUSR1) != -1) {
while(wait3(&statloc,0,NULL) != g_pserver->aof_child_pid);
}
/* Reset the buffer accumulating changes while the child saves. */
aofRewriteBufferReset();
aofRemoveTempFile(server.aof_child_pid);
server.aof_child_pid = -1;
server.aof_rewrite_time_start = -1;
aofRemoveTempFile(g_pserver->aof_child_pid);
g_pserver->aof_child_pid = -1;
g_pserver->aof_rewrite_time_start = -1;
/* Close pipes used for IPC between the two processes. */
aofClosePipes();
closeChildInfoPipe();
@ -230,14 +230,14 @@ void killAppendOnlyChild(void) {
/* Called when the user switches from "appendonly yes" to "appendonly no"
* at runtime using the CONFIG command. */
void stopAppendOnly(void) {
serverAssert(server.aof_state != AOF_OFF);
serverAssert(g_pserver->aof_state != AOF_OFF);
flushAppendOnlyFile(1);
redis_fsync(server.aof_fd);
close(server.aof_fd);
redis_fsync(g_pserver->aof_fd);
close(g_pserver->aof_fd);
server.aof_fd = -1;
server.aof_selected_db = -1;
server.aof_state = AOF_OFF;
g_pserver->aof_fd = -1;
g_pserver->aof_selected_db = -1;
g_pserver->aof_state = AOF_OFF;
killAppendOnlyChild();
}
@ -247,27 +247,27 @@ int startAppendOnly(void) {
char cwd[MAXPATHLEN]; /* Current working dir path for error messages. */
int newfd;
newfd = open(server.aof_filename,O_WRONLY|O_APPEND|O_CREAT,0644);
serverAssert(server.aof_state == AOF_OFF);
newfd = open(g_pserver->aof_filename,O_WRONLY|O_APPEND|O_CREAT,0644);
serverAssert(g_pserver->aof_state == AOF_OFF);
if (newfd == -1) {
char *cwdp = getcwd(cwd,MAXPATHLEN);
serverLog(LL_WARNING,
"Redis needs to enable the AOF but can't open the "
"append only file %s (in server root dir %s): %s",
server.aof_filename,
g_pserver->aof_filename,
cwdp ? cwdp : "unknown",
strerror(errno));
return C_ERR;
}
if (server.rdb_child_pid != -1) {
server.aof_rewrite_scheduled = 1;
if (g_pserver->rdb_child_pid != -1) {
g_pserver->aof_rewrite_scheduled = 1;
serverLog(LL_WARNING,"AOF was enabled but there is already a child process saving an RDB file on disk. An AOF background was scheduled to start when possible.");
} else {
/* If there is a pending AOF rewrite, we need to switch it off and
* start a new one: the old one cannot be reused because it is not
* accumulating the AOF buffer. */
if (server.aof_child_pid != -1) {
if (g_pserver->aof_child_pid != -1) {
serverLog(LL_WARNING,"AOF was enabled but there is already an AOF rewriting in background. Stopping background AOF and starting a rewrite now.");
killAppendOnlyChild();
}
@ -279,9 +279,9 @@ int startAppendOnly(void) {
}
/* We correctly switched on AOF, now wait for the rewrite to be complete
* in order to append data on disk. */
server.aof_state = AOF_WAIT_REWRITE;
server.aof_last_fsync = server.unixtime;
server.aof_fd = newfd;
g_pserver->aof_state = AOF_WAIT_REWRITE;
g_pserver->aof_last_fsync = g_pserver->unixtime;
g_pserver->aof_fd = newfd;
return C_OK;
}
@ -337,29 +337,29 @@ void flushAppendOnlyFile(int force) {
int sync_in_progress = 0;
mstime_t latency;
if (sdslen(server.aof_buf) == 0) return;
if (sdslen(g_pserver->aof_buf) == 0) return;
if (server.aof_fsync == AOF_FSYNC_EVERYSEC)
if (g_pserver->aof_fsync == AOF_FSYNC_EVERYSEC)
sync_in_progress = bioPendingJobsOfType(BIO_AOF_FSYNC) != 0;
if (server.aof_fsync == AOF_FSYNC_EVERYSEC && !force) {
if (g_pserver->aof_fsync == AOF_FSYNC_EVERYSEC && !force) {
/* With this append fsync policy we do background fsyncing.
* If the fsync is still in progress we can try to delay
* the write for a couple of seconds. */
if (sync_in_progress) {
if (server.aof_flush_postponed_start == 0) {
if (g_pserver->aof_flush_postponed_start == 0) {
/* No previous write postponing, remember that we are
* postponing the flush and return. */
server.aof_flush_postponed_start = server.unixtime;
g_pserver->aof_flush_postponed_start = g_pserver->unixtime;
return;
} else if (server.unixtime - server.aof_flush_postponed_start < 2) {
} else if (g_pserver->unixtime - g_pserver->aof_flush_postponed_start < 2) {
/* We were already waiting for fsync to finish, but for less
* than two seconds this is still ok. Postpone again. */
return;
}
/* Otherwise fall trough, and go write since we can't wait
* over two seconds. */
server.aof_delayed_fsync++;
g_pserver->aof_delayed_fsync++;
serverLog(LL_NOTICE,"Asynchronous AOF fsync is taking too long (disk is busy?). Writing the AOF buffer without waiting for fsync to complete, this may slow down Redis.");
}
}
@ -370,7 +370,7 @@ void flushAppendOnlyFile(int force) {
* or alike */
latencyStartMonitor(latency);
nwritten = aofWrite(server.aof_fd,server.aof_buf,sdslen(server.aof_buf));
nwritten = aofWrite(g_pserver->aof_fd,g_pserver->aof_buf,sdslen(g_pserver->aof_buf));
latencyEndMonitor(latency);
/* We want to capture different events for delayed writes:
* when the delay happens with a pending fsync, or with a saving child
@ -379,7 +379,7 @@ void flushAppendOnlyFile(int force) {
* useful for graphing / monitoring purposes. */
if (sync_in_progress) {
latencyAddSampleIfNeeded("aof-write-pending-fsync",latency);
} else if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
} else if (g_pserver->aof_child_pid != -1 || g_pserver->rdb_child_pid != -1) {
latencyAddSampleIfNeeded("aof-write-active-child",latency);
} else {
latencyAddSampleIfNeeded("aof-write-alone",latency);
@ -387,16 +387,16 @@ void flushAppendOnlyFile(int force) {
latencyAddSampleIfNeeded("aof-write",latency);
/* We performed the write so reset the postponed flush sentinel to zero. */
server.aof_flush_postponed_start = 0;
g_pserver->aof_flush_postponed_start = 0;
if (nwritten != (ssize_t)sdslen(server.aof_buf)) {
if (nwritten != (ssize_t)sdslen(g_pserver->aof_buf)) {
static time_t last_write_error_log = 0;
int can_log = 0;
/* Limit logging rate to 1 line per AOF_WRITE_LOG_ERROR_RATE seconds. */
if ((server.unixtime - last_write_error_log) > AOF_WRITE_LOG_ERROR_RATE) {
if ((g_pserver->unixtime - last_write_error_log) > AOF_WRITE_LOG_ERROR_RATE) {
can_log = 1;
last_write_error_log = server.unixtime;
last_write_error_log = g_pserver->unixtime;
}
/* Log the AOF write error and record the error code. */
@ -404,7 +404,7 @@ void flushAppendOnlyFile(int force) {
if (can_log) {
serverLog(LL_WARNING,"Error writing to the AOF file: %s",
strerror(errno));
server.aof_last_write_errno = errno;
g_pserver->aof_last_write_errno = errno;
}
} else {
if (can_log) {
@ -412,10 +412,10 @@ void flushAppendOnlyFile(int force) {
"the AOF file: (nwritten=%lld, "
"expected=%lld)",
(long long)nwritten,
(long long)sdslen(server.aof_buf));
(long long)sdslen(g_pserver->aof_buf));
}
if (ftruncate(server.aof_fd, server.aof_current_size) == -1) {
if (ftruncate(g_pserver->aof_fd, g_pserver->aof_current_size) == -1) {
if (can_log) {
serverLog(LL_WARNING, "Could not remove short write "
"from the append-only file. Redis may refuse "
@ -427,11 +427,11 @@ void flushAppendOnlyFile(int force) {
* -1 since there is no longer partial data into the AOF. */
nwritten = -1;
}
server.aof_last_write_errno = ENOSPC;
g_pserver->aof_last_write_errno = ENOSPC;
}
/* Handle the AOF write error. */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
if (g_pserver->aof_fsync == AOF_FSYNC_ALWAYS) {
/* We can't recover when the fsync policy is ALWAYS since the
* reply for the client is already in the output buffers, and we
* have the contract with the user that on acknowledged write data
@ -442,55 +442,55 @@ void flushAppendOnlyFile(int force) {
/* Recover from failed write leaving data into the buffer. However
* set an error to stop accepting writes as long as the error
* condition is not cleared. */
server.aof_last_write_status = C_ERR;
g_pserver->aof_last_write_status = C_ERR;
/* Trim the sds buffer if there was a partial write, and there
* was no way to undo it with ftruncate(2). */
if (nwritten > 0) {
server.aof_current_size += nwritten;
sdsrange(server.aof_buf,nwritten,-1);
g_pserver->aof_current_size += nwritten;
sdsrange(g_pserver->aof_buf,nwritten,-1);
}
return; /* We'll try again on the next call... */
}
} else {
/* Successful write(2). If AOF was in error state, restore the
* OK state and log the event. */
if (server.aof_last_write_status == C_ERR) {
if (g_pserver->aof_last_write_status == C_ERR) {
serverLog(LL_WARNING,
"AOF write error looks solved, Redis can write again.");
server.aof_last_write_status = C_OK;
g_pserver->aof_last_write_status = C_OK;
}
}
server.aof_current_size += nwritten;
g_pserver->aof_current_size += nwritten;
/* Re-use AOF buffer when it is small enough. The maximum comes from the
* arena size of 4k minus some overhead (but is otherwise arbitrary). */
if ((sdslen(server.aof_buf)+sdsavail(server.aof_buf)) < 4000) {
sdsclear(server.aof_buf);
if ((sdslen(g_pserver->aof_buf)+sdsavail(g_pserver->aof_buf)) < 4000) {
sdsclear(g_pserver->aof_buf);
} else {
sdsfree(server.aof_buf);
server.aof_buf = sdsempty();
sdsfree(g_pserver->aof_buf);
g_pserver->aof_buf = sdsempty();
}
/* Don't fsync if no-appendfsync-on-rewrite is set to yes and there are
* children doing I/O in the background. */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1))
if (g_pserver->aof_no_fsync_on_rewrite &&
(g_pserver->aof_child_pid != -1 || g_pserver->rdb_child_pid != -1))
return;
/* Perform the fsync if needed. */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
if (g_pserver->aof_fsync == AOF_FSYNC_ALWAYS) {
/* redis_fsync is defined as fdatasync() for Linux in order to avoid
* flushing metadata. */
latencyStartMonitor(latency);
redis_fsync(server.aof_fd); /* Let's try to get this data on the disk */
redis_fsync(g_pserver->aof_fd); /* Let's try to get this data on the disk */
latencyEndMonitor(latency);
latencyAddSampleIfNeeded("aof-fsync-always",latency);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime > server.aof_last_fsync)) {
if (!sync_in_progress) aof_background_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
g_pserver->aof_last_fsync = g_pserver->unixtime;
} else if ((g_pserver->aof_fsync == AOF_FSYNC_EVERYSEC &&
g_pserver->unixtime > g_pserver->aof_last_fsync)) {
if (!sync_in_progress) aof_background_fsync(g_pserver->aof_fd);
g_pserver->aof_last_fsync = g_pserver->unixtime;
}
}
@ -562,13 +562,13 @@ void feedAppendOnlyFile(struct redisCommand *cmd, int dictid, robj **argv, int a
/* The DB this command was targeting is not the same as the last command
* we appended. To issue a SELECT command is needed. */
if (dictid != server.aof_selected_db) {
if (dictid != g_pserver->aof_selected_db) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_selected_db = dictid;
g_pserver->aof_selected_db = dictid;
}
if (cmd->proc == expireCommand || cmd->proc == pexpireCommand ||
@ -609,14 +609,14 @@ void feedAppendOnlyFile(struct redisCommand *cmd, int dictid, robj **argv, int a
/* Append to the AOF buffer. This will be flushed on disk just before
* of re-entering the event loop, so before the client will get a
* positive reply about the operation performed. */
if (server.aof_state == AOF_ON)
server.aof_buf = sdscatlen(server.aof_buf,buf,sdslen(buf));
if (g_pserver->aof_state == AOF_ON)
g_pserver->aof_buf = sdscatlen(g_pserver->aof_buf,buf,sdslen(buf));
/* If a background append only file rewriting is in progress we want to
* accumulate the differences between the child DB and the current one
* in a buffer, so that when the child process will do its work we
* can append the differences to the new append only file. */
if (server.aof_child_pid != -1)
if (g_pserver->aof_child_pid != -1)
aofRewriteBufferAppend((unsigned char*)buf,sdslen(buf));
sdsfree(buf);
@ -686,7 +686,7 @@ int loadAppendOnlyFile(char *filename) {
struct client *fakeClient;
FILE *fp = fopen(filename,"r");
struct redis_stat sb;
int old_aof_state = server.aof_state;
int old_aof_state = g_pserver->aof_state;
long loops = 0;
off_t valid_up_to = 0; /* Offset of latest well-formed command loaded. */
off_t valid_before_multi = 0; /* Offset before MULTI command loaded. */
@ -702,14 +702,14 @@ int loadAppendOnlyFile(char *filename) {
* a zero length file at startup, that will remain like that if no write
* operation is received. */
if (fp && redis_fstat(fileno(fp),&sb) != -1 && sb.st_size == 0) {
server.aof_current_size = 0;
g_pserver->aof_current_size = 0;
fclose(fp);
return C_ERR;
}
/* Temporarily disable AOF, to prevent EXEC from feeding a MULTI
* to the same file we're about to read. */
server.aof_state = AOF_OFF;
g_pserver->aof_state = AOF_OFF;
fakeClient = createFakeClient();
startLoading(fp);
@ -748,7 +748,7 @@ int loadAppendOnlyFile(char *filename) {
/* Serve the clients from time to time */
if (!(loops++ % 1000)) {
loadingProgress(ftello(fp));
processEventsWhileBlocked(serverTL - server.rgthreadvar);
processEventsWhileBlocked(serverTL - g_pserver->rgthreadvar);
}
if (fgets(buf,sizeof(buf),fp) == NULL) {
@ -821,7 +821,7 @@ int loadAppendOnlyFile(char *filename) {
* argv/argc of the client instead of the local variables. */
freeFakeClientArgv(fakeClient);
fakeClient->cmd = NULL;
if (server.aof_load_truncated) valid_up_to = ftello(fp);
if (g_pserver->aof_load_truncated) valid_up_to = ftello(fp);
}
/* This point can only be reached when EOF is reached without errors.
@ -838,10 +838,10 @@ int loadAppendOnlyFile(char *filename) {
loaded_ok: /* DB loaded, cleanup and return C_OK to the caller. */
fclose(fp);
freeFakeClient(fakeClient);
server.aof_state = old_aof_state;
g_pserver->aof_state = old_aof_state;
stopLoading();
aofUpdateCurrentSize();
server.aof_rewrite_base_size = server.aof_current_size;
g_pserver->aof_rewrite_base_size = g_pserver->aof_current_size;
return C_OK;
readerr: /* Read error. If feof(fp) is true, fall through to unexpected EOF. */
@ -852,7 +852,7 @@ readerr: /* Read error. If feof(fp) is true, fall through to unexpected EOF. */
}
uxeof: /* Unexpected AOF end of file. */
if (server.aof_load_truncated) {
if (g_pserver->aof_load_truncated) {
serverLog(LL_WARNING,"!!! Warning: short read while loading the AOF file !!!");
serverLog(LL_WARNING,"!!! Truncating the AOF at offset %llu !!!",
(unsigned long long) valid_up_to);
@ -866,7 +866,7 @@ uxeof: /* Unexpected AOF end of file. */
} else {
/* Make sure the AOF file descriptor points to the end of the
* file after the truncate call. */
if (server.aof_fd != -1 && lseek(server.aof_fd,0,SEEK_END) == -1) {
if (g_pserver->aof_fd != -1 && lseek(g_pserver->aof_fd,0,SEEK_END) == -1) {
serverLog(LL_WARNING,"Can't seek the end of the AOF file: %s",
strerror(errno));
} else {
@ -877,7 +877,7 @@ uxeof: /* Unexpected AOF end of file. */
}
}
if (fakeClient) freeFakeClient(fakeClient); /* avoid valgrind warning */
serverLog(LL_WARNING,"Unexpected end of file reading the append only file. You can: 1) Make a backup of your AOF file, then use ./keydb-check-aof --fix <filename>. 2) Alternatively you can set the 'aof-load-truncated' configuration option to yes and restart the server.");
serverLog(LL_WARNING,"Unexpected end of file reading the append only file. You can: 1) Make a backup of your AOF file, then use ./keydb-check-aof --fix <filename>. 2) Alternatively you can set the 'aof-load-truncated' configuration option to yes and restart the g_pserver->");
exit(1);
fmterr: /* Format error. */
@ -891,7 +891,7 @@ fmterr: /* Format error. */
* ------------------------------------------------------------------------- */
/* Delegate writing an object to writing a bulk string or bulk long long.
* This is not placed in rio.c since that adds the server.h dependency. */
* This is not placed in rio.c since that adds the g_pserver->h dependency. */
int rioWriteBulkObject(rio *r, robj *obj) {
/* Avoid using getDecodedObject to help copy-on-write (we are often
* in a child process when this function is called). */
@ -1266,8 +1266,8 @@ ssize_t aofReadDiffFromParent(void) {
ssize_t nread, total = 0;
while ((nread =
read(server.aof_pipe_read_data_from_parent,buf,sizeof(buf))) > 0) {
server.aof_child_diff = sdscatlen(server.aof_child_diff,buf,nread);
read(g_pserver->aof_pipe_read_data_from_parent,buf,sizeof(buf))) > 0) {
g_pserver->aof_child_diff = sdscatlen(g_pserver->aof_child_diff,buf,nread);
total += nread;
}
return total;
@ -1281,7 +1281,7 @@ int rewriteAppendOnlyFileRio(rio *aof) {
for (j = 0; j < cserver.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
redisDb *db = g_pserver->db+j;
dict *d = db->pdict;
if (dictSize(d) == 0) continue;
di = dictGetSafeIterator(d);
@ -1372,13 +1372,13 @@ int rewriteAppendOnlyFile(char *filename) {
return C_ERR;
}
server.aof_child_diff = sdsempty();
g_pserver->aof_child_diff = sdsempty();
rioInitWithFile(&aof,fileno(fp));
if (server.aof_rewrite_incremental_fsync)
if (g_pserver->aof_rewrite_incremental_fsync)
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
if (server.aof_use_rdb_preamble) {
if (g_pserver->aof_use_rdb_preamble) {
int error;
if (rdbSaveRio(&aof,&error,RDB_SAVE_AOF_PREAMBLE,NULL) == C_ERR) {
errno = error;
@ -1401,7 +1401,7 @@ int rewriteAppendOnlyFile(char *filename) {
* happens after 20 ms without new data). */
start = mstime();
while(mstime()-start < 1000 && nodata < 20) {
if (aeWait(server.aof_pipe_read_data_from_parent, AE_READABLE, 1) <= 0)
if (aeWait(g_pserver->aof_pipe_read_data_from_parent, AE_READABLE, 1) <= 0)
{
nodata++;
continue;
@ -1412,13 +1412,13 @@ int rewriteAppendOnlyFile(char *filename) {
}
/* Ask the master to stop sending diffs. */
if (write(server.aof_pipe_write_ack_to_parent,"!",1) != 1) goto werr;
if (anetNonBlock(NULL,server.aof_pipe_read_ack_from_parent) != ANET_OK)
if (write(g_pserver->aof_pipe_write_ack_to_parent,"!",1) != 1) goto werr;
if (anetNonBlock(NULL,g_pserver->aof_pipe_read_ack_from_parent) != ANET_OK)
goto werr;
/* We read the ACK from the server using a 10 seconds timeout. Normally
* it should reply ASAP, but just in case we lose its reply, we are sure
* the child will eventually get terminated. */
if (syncRead(server.aof_pipe_read_ack_from_parent,&byte,1,5000) != 1 ||
if (syncRead(g_pserver->aof_pipe_read_ack_from_parent,&byte,1,5000) != 1 ||
byte != '!') goto werr;
serverLog(LL_NOTICE,"Parent agreed to stop sending diffs. Finalizing AOF...");
@ -1428,8 +1428,8 @@ int rewriteAppendOnlyFile(char *filename) {
/* Write the received diff to the file. */
serverLog(LL_NOTICE,
"Concatenating %.2f MB of AOF diff received from parent.",
(double) sdslen(server.aof_child_diff) / (1024*1024));
if (rioWrite(&aof,server.aof_child_diff,sdslen(server.aof_child_diff)) == 0)
(double) sdslen(g_pserver->aof_child_diff) / (1024*1024));
if (rioWrite(&aof,g_pserver->aof_child_diff,sdslen(g_pserver->aof_child_diff)) == 0)
goto werr;
/* Make sure data will not remain on the OS's output buffers */
@ -1469,8 +1469,8 @@ void aofChildPipeReadable(aeEventLoop *el, int fd, void *privdata, int mask) {
if (read(fd,&byte,1) == 1 && byte == '!') {
serverLog(LL_NOTICE,"AOF rewrite child asks to stop sending diffs.");
server.aof_stop_sending_diff = 1;
if (write(server.aof_pipe_write_ack_to_child,"!",1) != 1) {
g_pserver->aof_stop_sending_diff = 1;
if (write(g_pserver->aof_pipe_write_ack_to_child,"!",1) != 1) {
/* If we can't send the ack, inform the user, but don't try again
* since in the other side the children will use a timeout if the
* kernel can't buffer our write, or, the children was
@ -1481,7 +1481,7 @@ void aofChildPipeReadable(aeEventLoop *el, int fd, void *privdata, int mask) {
}
/* Remove the handler since this can be called only one time during a
* rewrite. */
aeDeleteFileEventAsync(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,server.aof_pipe_read_ack_from_child,AE_READABLE);
aeDeleteFileEventAsync(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el,g_pserver->aof_pipe_read_ack_from_child,AE_READABLE);
}
/* Create the pipes used for parent - child process IPC during rewrite.
@ -1501,14 +1501,14 @@ int aofCreatePipes(void) {
if (anetNonBlock(NULL,fds[1]) != ANET_OK) goto error;
if (aeCreateFileEvent(serverTL->el, fds[2], AE_READABLE, aofChildPipeReadable, NULL) == AE_ERR) goto error;
server.aof_pipe_write_data_to_child = fds[1];
server.aof_pipe_read_data_from_parent = fds[0];
server.aof_pipe_write_ack_to_parent = fds[3];
server.aof_pipe_read_ack_from_child = fds[2];
server.el_alf_pip_read_ack_from_child = serverTL->el;
server.aof_pipe_write_ack_to_child = fds[5];
server.aof_pipe_read_ack_from_parent = fds[4];
server.aof_stop_sending_diff = 0;
g_pserver->aof_pipe_write_data_to_child = fds[1];
g_pserver->aof_pipe_read_data_from_parent = fds[0];
g_pserver->aof_pipe_write_ack_to_parent = fds[3];
g_pserver->aof_pipe_read_ack_from_child = fds[2];
g_pserver->el_alf_pip_read_ack_from_child = serverTL->el;
g_pserver->aof_pipe_write_ack_to_child = fds[5];
g_pserver->aof_pipe_read_ack_from_parent = fds[4];
g_pserver->aof_stop_sending_diff = 0;
return C_OK;
error:
@ -1519,14 +1519,14 @@ error:
}
void aofClosePipes(void) {
aeDeleteFileEventAsync(server.el_alf_pip_read_ack_from_child,server.aof_pipe_read_ack_from_child,AE_READABLE);
aeDeleteFileEventAsync(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,server.aof_pipe_write_data_to_child,AE_WRITABLE);
close(server.aof_pipe_write_data_to_child);
close(server.aof_pipe_read_data_from_parent);
close(server.aof_pipe_write_ack_to_parent);
close(server.aof_pipe_read_ack_from_child);
close(server.aof_pipe_write_ack_to_child);
close(server.aof_pipe_read_ack_from_parent);
aeDeleteFileEventAsync(g_pserver->el_alf_pip_read_ack_from_child,g_pserver->aof_pipe_read_ack_from_child,AE_READABLE);
aeDeleteFileEventAsync(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el,g_pserver->aof_pipe_write_data_to_child,AE_WRITABLE);
close(g_pserver->aof_pipe_write_data_to_child);
close(g_pserver->aof_pipe_read_data_from_parent);
close(g_pserver->aof_pipe_write_ack_to_parent);
close(g_pserver->aof_pipe_read_ack_from_child);
close(g_pserver->aof_pipe_write_ack_to_child);
close(g_pserver->aof_pipe_read_ack_from_parent);
}
/* ----------------------------------------------------------------------------
@ -1538,10 +1538,10 @@ void aofClosePipes(void) {
* 1) The user calls BGREWRITEAOF
* 2) Redis calls this function, that forks():
* 2a) the child rewrite the append only file in a temp file.
* 2b) the parent accumulates differences in server.aof_rewrite_buf.
* 2b) the parent accumulates differences in g_pserver->aof_rewrite_buf.
* 3) When the child finished '2a' exists.
* 4) The parent will trap the exit code, if it's OK, will append the
* data accumulated into server.aof_rewrite_buf into the temp file, and
* data accumulated into g_pserver->aof_rewrite_buf into the temp file, and
* finally will rename(2) the temp file in the actual file name.
* The the new file is reopened as the new append only file. Profit!
*/
@ -1549,7 +1549,7 @@ int rewriteAppendOnlyFileBackground(void) {
pid_t childpid;
long long start;
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) return C_ERR;
if (g_pserver->aof_child_pid != -1 || g_pserver->rdb_child_pid != -1) return C_ERR;
if (aofCreatePipes() != C_OK) return C_ERR;
openChildInfoPipe();
start = ustime();
@ -1569,7 +1569,7 @@ int rewriteAppendOnlyFileBackground(void) {
private_dirty/(1024*1024));
}
server.child_info_data.cow_size = private_dirty;
g_pserver->child_info_data.cow_size = private_dirty;
sendChildInfo(CHILD_INFO_TYPE_AOF);
exitFromChild(0);
} else {
@ -1577,9 +1577,9 @@ int rewriteAppendOnlyFileBackground(void) {
}
} else {
/* Parent */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_time / (1024*1024*1024); /* GB per second. */
latencyAddSampleIfNeeded("fork",server.stat_fork_time/1000);
g_pserver->stat_fork_time = ustime()-start;
g_pserver->stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / g_pserver->stat_fork_time / (1024*1024*1024); /* GB per second. */
latencyAddSampleIfNeeded("fork",g_pserver->stat_fork_time/1000);
if (childpid == -1) {
closeChildInfoPipe();
serverLog(LL_WARNING,
@ -1590,15 +1590,15 @@ int rewriteAppendOnlyFileBackground(void) {
}
serverLog(LL_NOTICE,
"Background append only file rewriting started by pid %d",childpid);
server.aof_rewrite_scheduled = 0;
server.aof_rewrite_time_start = time(NULL);
server.aof_child_pid = childpid;
g_pserver->aof_rewrite_scheduled = 0;
g_pserver->aof_rewrite_time_start = time(NULL);
g_pserver->aof_child_pid = childpid;
updateDictResizePolicy();
/* We set appendseldb to -1 in order to force the next call to the
* feedAppendOnlyFile() to issue a SELECT command, so the differences
* accumulated by the parent into server.aof_rewrite_buf will start
* accumulated by the parent into g_pserver->aof_rewrite_buf will start
* with a SELECT statement and it will be safe to merge. */
server.aof_selected_db = -1;
g_pserver->aof_selected_db = -1;
replicationScriptCacheFlush();
return C_OK;
}
@ -1606,10 +1606,10 @@ int rewriteAppendOnlyFileBackground(void) {
}
void bgrewriteaofCommand(client *c) {
if (server.aof_child_pid != -1) {
if (g_pserver->aof_child_pid != -1) {
addReplyError(c,"Background append only file rewriting already in progress");
} else if (server.rdb_child_pid != -1) {
server.aof_rewrite_scheduled = 1;
} else if (g_pserver->rdb_child_pid != -1) {
g_pserver->aof_rewrite_scheduled = 1;
addReplyStatus(c,"Background append only file rewriting scheduled");
} else if (rewriteAppendOnlyFileBackground() == C_OK) {
addReplyStatus(c,"Background append only file rewriting started");
@ -1628,7 +1628,7 @@ void aofRemoveTempFile(pid_t childpid) {
unlink(tmpfile);
}
/* Update the server.aof_current_size field explicitly using stat(2)
/* Update the g_pserver->aof_current_size field explicitly using stat(2)
* to check the size of the file. This is useful after a rewrite or after
* a restart, normally the size is updated just adding the write length
* to the current length, that is much faster. */
@ -1637,11 +1637,11 @@ void aofUpdateCurrentSize(void) {
mstime_t latency;
latencyStartMonitor(latency);
if (redis_fstat(server.aof_fd,&sb) == -1) {
if (redis_fstat(g_pserver->aof_fd,&sb) == -1) {
serverLog(LL_WARNING,"Unable to obtain the AOF file length. stat: %s",
strerror(errno));
} else {
server.aof_current_size = sb.st_size;
g_pserver->aof_current_size = sb.st_size;
}
latencyEndMonitor(latency);
latencyAddSampleIfNeeded("aof-fstat",latency);
@ -1663,7 +1663,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
* rewritten AOF. */
latencyStartMonitor(latency);
snprintf(tmpfile,256,"temp-rewriteaof-bg-%d.aof",
(int)server.aof_child_pid);
(int)g_pserver->aof_child_pid);
newfd = open(tmpfile,O_WRONLY|O_APPEND);
if (newfd == -1) {
serverLog(LL_WARNING,
@ -1692,14 +1692,14 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
*
* 1) AOF is DISABLED and this was a one time rewrite. The temporary
* file will be renamed to the configured file. When this file already
* exists, it will be unlinked, which may block the server.
* exists, it will be unlinked, which may block the g_pserver->
*
* 2) AOF is ENABLED and the rewritten AOF will immediately start
* receiving writes. After the temporary file is renamed to the
* configured file, the original AOF file descriptor will be closed.
* Since this will be the last reference to that file, closing it
* causes the underlying file to be unlinked, which may block the
* server.
* g_pserver->
*
* To mitigate the blocking effect of the unlink operation (either
* caused by rename(2) in scenario 1, or by close(2) in scenario 2), we
@ -1710,13 +1710,13 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
* guarantee atomicity for this switch has already happened by then, so
* we don't care what the outcome or duration of that close operation
* is, as long as the file descriptor is released again. */
if (server.aof_fd == -1) {
if (g_pserver->aof_fd == -1) {
/* AOF disabled */
/* Don't care if this fails: oldfd will be -1 and we handle that.
* One notable case of -1 return is if the old file does
* not exist. */
oldfd = open(server.aof_filename,O_RDONLY|O_NONBLOCK);
oldfd = open(g_pserver->aof_filename,O_RDONLY|O_NONBLOCK);
} else {
/* AOF enabled */
oldfd = -1; /* We'll set this to the current AOF filedes later. */
@ -1725,11 +1725,11 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
/* Rename the temporary file. This will not unlink the target file if
* it exists, because we reference it with "oldfd". */
latencyStartMonitor(latency);
if (rename(tmpfile,server.aof_filename) == -1) {
if (rename(tmpfile,g_pserver->aof_filename) == -1) {
serverLog(LL_WARNING,
"Error trying to rename the temporary AOF file %s into %s: %s",
tmpfile,
server.aof_filename,
g_pserver->aof_filename,
strerror(errno));
close(newfd);
if (oldfd != -1) close(oldfd);
@ -1738,34 +1738,34 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
latencyEndMonitor(latency);
latencyAddSampleIfNeeded("aof-rename",latency);
if (server.aof_fd == -1) {
if (g_pserver->aof_fd == -1) {
/* AOF disabled, we don't need to set the AOF file descriptor
* to this new file, so we can close it. */
close(newfd);
} else {
/* AOF enabled, replace the old fd with the new one. */
oldfd = server.aof_fd;
server.aof_fd = newfd;
if (server.aof_fsync == AOF_FSYNC_ALWAYS)
oldfd = g_pserver->aof_fd;
g_pserver->aof_fd = newfd;
if (g_pserver->aof_fsync == AOF_FSYNC_ALWAYS)
redis_fsync(newfd);
else if (server.aof_fsync == AOF_FSYNC_EVERYSEC)
else if (g_pserver->aof_fsync == AOF_FSYNC_EVERYSEC)
aof_background_fsync(newfd);
server.aof_selected_db = -1; /* Make sure SELECT is re-issued */
g_pserver->aof_selected_db = -1; /* Make sure SELECT is re-issued */
aofUpdateCurrentSize();
server.aof_rewrite_base_size = server.aof_current_size;
g_pserver->aof_rewrite_base_size = g_pserver->aof_current_size;
/* Clear regular AOF buffer since its contents was just written to
* the new AOF from the background rewrite buffer. */
sdsfree(server.aof_buf);
server.aof_buf = sdsempty();
sdsfree(g_pserver->aof_buf);
g_pserver->aof_buf = sdsempty();
}
server.aof_lastbgrewrite_status = C_OK;
g_pserver->aof_lastbgrewrite_status = C_OK;
serverLog(LL_NOTICE, "Background AOF rewrite finished successfully");
/* Change state from WAIT_REWRITE to ON if needed */
if (server.aof_state == AOF_WAIT_REWRITE)
server.aof_state = AOF_ON;
if (g_pserver->aof_state == AOF_WAIT_REWRITE)
g_pserver->aof_state = AOF_ON;
/* Asynchronously close the overwritten AOF. */
if (oldfd != -1) bioCreateBackgroundJob(BIO_CLOSE_FILE,(void*)(long)oldfd,NULL,NULL);
@ -1776,11 +1776,11 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
/* SIGUSR1 is whitelisted, so we have a way to kill a child without
* tirggering an error condition. */
if (bysignal != SIGUSR1)
server.aof_lastbgrewrite_status = C_ERR;
g_pserver->aof_lastbgrewrite_status = C_ERR;
serverLog(LL_WARNING,
"Background AOF rewrite terminated with error");
} else {
server.aof_lastbgrewrite_status = C_ERR;
g_pserver->aof_lastbgrewrite_status = C_ERR;
serverLog(LL_WARNING,
"Background AOF rewrite terminated by signal %d", bysignal);
@ -1789,11 +1789,11 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
cleanup:
aofClosePipes();
aofRewriteBufferReset();
aofRemoveTempFile(server.aof_child_pid);
server.aof_child_pid = -1;
server.aof_rewrite_time_last = time(NULL)-server.aof_rewrite_time_start;
server.aof_rewrite_time_start = -1;
aofRemoveTempFile(g_pserver->aof_child_pid);
g_pserver->aof_child_pid = -1;
g_pserver->aof_rewrite_time_last = time(NULL)-g_pserver->aof_rewrite_time_start;
g_pserver->aof_rewrite_time_start = -1;
/* Schedule a new rewrite if we are waiting for it to switch the AOF ON. */
if (server.aof_state == AOF_WAIT_REWRITE)
server.aof_rewrite_scheduled = 1;
if (g_pserver->aof_state == AOF_WAIT_REWRITE)
g_pserver->aof_rewrite_scheduled = 1;
}

View File

@ -4,7 +4,7 @@
* Currently there is only a single operation, that is a background close(2)
* system call. This is needed as when the process is the last owner of a
* reference to a file closing it means unlinking it, and the deletion of the
* file is slow, blocking the server.
* file is slow, blocking the g_pserver->
*
* In the future we'll either continue implementing new things we need or
* we'll switch to libeio. However there are probably long term uses for this

View File

@ -556,7 +556,7 @@ void setbitCommand(client *c) {
((uint8_t*)ptrFromObj(o))[byte] = byteval;
signalModifiedKey(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_STRING,"setbit",c->argv[1],c->db->id);
server.dirty++;
g_pserver->dirty++;
addReply(c, bitval ? shared.cone : shared.czero);
}
@ -762,7 +762,7 @@ void bitopCommand(client *c) {
signalModifiedKey(c->db,targetkey);
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",targetkey,c->db->id);
}
server.dirty++;
g_pserver->dirty++;
addReplyLongLong(c,maxlen); /* Return the output string length in bytes. */
}
@ -1120,7 +1120,7 @@ void bitfieldCommand(client *c) {
if (changes) {
signalModifiedKey(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_STRING,"setbit",c->argv[1],c->db->id);
server.dirty += changes;
g_pserver->dirty += changes;
}
zfree(ops);
}

View File

@ -110,8 +110,8 @@ void blockClient(client *c, int btype) {
serverAssert(GlobalLocksAcquired());
c->flags |= CLIENT_BLOCKED;
c->btype = btype;
server.blocked_clients++;
server.blocked_clients_by_type[btype]++;
g_pserver->blocked_clients++;
g_pserver->blocked_clients_by_type[btype]++;
}
/* This function is called in the beforeSleep() function of the event loop
@ -122,8 +122,8 @@ void processUnblockedClients(int iel) {
listNode *ln;
client *c;
list *unblocked_clients = server.rgthreadvar[iel].unblocked_clients;
serverAssert(iel == (serverTL - server.rgthreadvar));
list *unblocked_clients = g_pserver->rgthreadvar[iel].unblocked_clients;
serverAssert(iel == (serverTL - g_pserver->rgthreadvar));
while (listLength(unblocked_clients)) {
ln = listFirst(unblocked_clients);
@ -171,7 +171,7 @@ void queueClientForReprocessing(client *c) {
fastlock_lock(&c->lock);
if (!(c->flags & CLIENT_UNBLOCKED)) {
c->flags |= CLIENT_UNBLOCKED;
listAddNodeTail(server.rgthreadvar[c->iel].unblocked_clients,c);
listAddNodeTail(g_pserver->rgthreadvar[c->iel].unblocked_clients,c);
}
fastlock_unlock(&c->lock);
}
@ -193,8 +193,8 @@ void unblockClient(client *c) {
}
/* Clear the flags, and put the client in the unblocked list so that
* we'll process new commands in its query buffer ASAP. */
server.blocked_clients--;
server.blocked_clients_by_type[c->btype]--;
g_pserver->blocked_clients--;
g_pserver->blocked_clients_by_type[c->btype]--;
c->flags &= ~CLIENT_BLOCKED;
c->btype = BLOCKED_NONE;
queueClientForReprocessing(c);
@ -229,7 +229,7 @@ void disconnectAllBlockedClients(void) {
listNode *ln;
listIter li;
listRewind(server.clients,&li);
listRewind(g_pserver->clients,&li);
while((ln = listNext(&li))) {
client *c = (client*)listNodeValue(ln);
@ -252,7 +252,7 @@ void disconnectAllBlockedClients(void) {
*
* All the keys with at least one client blocked that received at least
* one new element via some write operation are accumulated into
* the server.ready_keys list. This function will run the list and will
* the g_pserver->ready_keys list. This function will run the list and will
* serve clients accordingly. Note that the function will iterate again and
* again as a result of serving BRPOPLPUSH we can have new blocking clients
* to serve because of the PUSH side of BRPOPLPUSH.
@ -268,15 +268,15 @@ void disconnectAllBlockedClients(void) {
* do, the function is already fair. */
void handleClientsBlockedOnKeys(void) {
serverAssert(GlobalLocksAcquired());
while(listLength(server.ready_keys) != 0) {
while(listLength(g_pserver->ready_keys) != 0) {
list *l;
/* Point server.ready_keys to a fresh list and save the current one
/* Point g_pserver->ready_keys to a fresh list and save the current one
* locally. This way as we run the old list we are free to call
* signalKeyAsReady() that may push new elements in server.ready_keys
* signalKeyAsReady() that may push new elements in g_pserver->ready_keys
* when handling clients blocked into BRPOPLPUSH. */
l = server.ready_keys;
server.ready_keys = listCreate();
l = g_pserver->ready_keys;
g_pserver->ready_keys = listCreate();
while(listLength(l) != 0) {
listNode *ln = listFirst(l);
@ -613,7 +613,7 @@ void unblockClientWaitingData(client *c) {
}
/* If the specified key has clients blocked waiting for list pushes, this
* function will put the key reference into the server.ready_keys list.
* function will put the key reference into the g_pserver->ready_keys list.
* Note that db->ready_keys is a hash table that allows us to avoid putting
* the same key again and again in the list in case of multiple pushes
* made by a script or in the context of MULTI/EXEC.
@ -628,12 +628,12 @@ void signalKeyAsReady(redisDb *db, robj *key) {
/* Key was already signaled? No need to queue it again. */
if (dictFind(db->ready_keys,key) != NULL) return;
/* Ok, we need to queue this key into server.ready_keys. */
/* Ok, we need to queue this key into g_pserver->ready_keys. */
rl = (readyList*)zmalloc(sizeof(*rl), MALLOC_SHARED);
rl->key = key;
rl->db = db;
incrRefCount(key);
listAddNodeTail(server.ready_keys,rl);
listAddNodeTail(g_pserver->ready_keys,rl);
/* We also add the key in the db->ready_keys dictionary in order
* to avoid adding it multiple times into a list with a simple O(1)

View File

@ -34,52 +34,52 @@
* RDB / AOF saving process from the child to the parent (for instance
* the amount of copy on write memory used) */
void openChildInfoPipe(void) {
if (pipe(server.child_info_pipe) == -1) {
if (pipe(g_pserver->child_info_pipe) == -1) {
/* On error our two file descriptors should be still set to -1,
* but we call anyway cloesChildInfoPipe() since can't hurt. */
closeChildInfoPipe();
} else if (anetNonBlock(NULL,server.child_info_pipe[0]) != ANET_OK) {
} else if (anetNonBlock(NULL,g_pserver->child_info_pipe[0]) != ANET_OK) {
closeChildInfoPipe();
} else {
memset(&server.child_info_data,0,sizeof(server.child_info_data));
memset(&g_pserver->child_info_data,0,sizeof(g_pserver->child_info_data));
}
}
/* Close the pipes opened with openChildInfoPipe(). */
void closeChildInfoPipe(void) {
if (server.child_info_pipe[0] != -1 ||
server.child_info_pipe[1] != -1)
if (g_pserver->child_info_pipe[0] != -1 ||
g_pserver->child_info_pipe[1] != -1)
{
close(server.child_info_pipe[0]);
close(server.child_info_pipe[1]);
server.child_info_pipe[0] = -1;
server.child_info_pipe[1] = -1;
close(g_pserver->child_info_pipe[0]);
close(g_pserver->child_info_pipe[1]);
g_pserver->child_info_pipe[0] = -1;
g_pserver->child_info_pipe[1] = -1;
}
}
/* Send COW data to parent. The child should call this function after populating
* the corresponding fields it want to sent (according to the process type). */
void sendChildInfo(int ptype) {
if (server.child_info_pipe[1] == -1) return;
server.child_info_data.magic = CHILD_INFO_MAGIC;
server.child_info_data.process_type = ptype;
ssize_t wlen = sizeof(server.child_info_data);
if (write(server.child_info_pipe[1],&server.child_info_data,wlen) != wlen) {
if (g_pserver->child_info_pipe[1] == -1) return;
g_pserver->child_info_data.magic = CHILD_INFO_MAGIC;
g_pserver->child_info_data.process_type = ptype;
ssize_t wlen = sizeof(g_pserver->child_info_data);
if (write(g_pserver->child_info_pipe[1],&g_pserver->child_info_data,wlen) != wlen) {
/* Nothing to do on error, this will be detected by the other side. */
}
}
/* Receive COW data from parent. */
void receiveChildInfo(void) {
if (server.child_info_pipe[0] == -1) return;
ssize_t wlen = sizeof(server.child_info_data);
if (read(server.child_info_pipe[0],&server.child_info_data,wlen) == wlen &&
server.child_info_data.magic == CHILD_INFO_MAGIC)
if (g_pserver->child_info_pipe[0] == -1) return;
ssize_t wlen = sizeof(g_pserver->child_info_data);
if (read(g_pserver->child_info_pipe[0],&g_pserver->child_info_data,wlen) == wlen &&
g_pserver->child_info_data.magic == CHILD_INFO_MAGIC)
{
if (server.child_info_data.process_type == CHILD_INFO_TYPE_RDB) {
server.stat_rdb_cow_bytes = server.child_info_data.cow_size;
} else if (server.child_info_data.process_type == CHILD_INFO_TYPE_AOF) {
server.stat_aof_cow_bytes = server.child_info_data.cow_size;
if (g_pserver->child_info_data.process_type == CHILD_INFO_TYPE_RDB) {
g_pserver->stat_rdb_cow_bytes = g_pserver->child_info_data.cow_size;
} else if (g_pserver->child_info_data.process_type == CHILD_INFO_TYPE_AOF) {
g_pserver->stat_aof_cow_bytes = g_pserver->child_info_data.cow_size;
}
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -60,11 +60,11 @@ static robj *lookupKey(redisDb *db, robj *key, int flags) {
/* Update the access time for the ageing algorithm.
* Don't do it if we have a saving child, as this will trigger
* a copy on write madness. */
if (server.rdb_child_pid == -1 &&
server.aof_child_pid == -1 &&
if (g_pserver->rdb_child_pid == -1 &&
g_pserver->aof_child_pid == -1 &&
!(flags & LOOKUP_NOTOUCH))
{
if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) {
if (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LFU) {
updateLFU(val);
} else {
val->lru = LRU_CLOCK();
@ -110,8 +110,8 @@ robj_roptr lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) {
/* Key expired. If we are in the context of a master, expireIfNeeded()
* returns 0 only when the key does not exist at all, so it's safe
* to return NULL ASAP. */
if (listLength(server.masters) == 0) {
server.stat_keyspace_misses++;
if (listLength(g_pserver->masters) == 0) {
g_pserver->stat_keyspace_misses++;
notifyKeyspaceEvent(NOTIFY_KEY_MISS, "keymiss", key, db->id);
return NULL;
}
@ -133,18 +133,18 @@ robj_roptr lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) {
serverTL->current_client->cmd &&
serverTL->current_client->cmd->flags & CMD_READONLY)
{
server.stat_keyspace_misses++;
g_pserver->stat_keyspace_misses++;
notifyKeyspaceEvent(NOTIFY_KEY_MISS, "keymiss", key, db->id);
return NULL;
}
}
val = lookupKey(db,key,flags);
if (val == NULL) {
server.stat_keyspace_misses++;
g_pserver->stat_keyspace_misses++;
notifyKeyspaceEvent(NOTIFY_KEY_MISS, "keymiss", key, db->id);
}
else
server.stat_keyspace_hits++;
g_pserver->stat_keyspace_hits++;
return val;
}
@ -186,7 +186,7 @@ int dbAddCore(redisDb *db, robj *key, robj *val) {
if (val->type == OBJ_LIST ||
val->type == OBJ_ZSET)
signalKeyAsReady(db, key);
if (server.cluster_enabled) slotToKeyAdd(key);
if (g_pserver->cluster_enabled) slotToKeyAdd(key);
}
else
{
@ -210,7 +210,7 @@ void dbOverwriteCore(redisDb *db, dictEntry *de, robj *val, bool fUpdateMvcc)
{
dictEntry auxentry = *de;
robj *old = (robj*)dictGetVal(de);
if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) {
if (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LFU) {
val->lru = old->lru;
}
if (fUpdateMvcc)
@ -218,7 +218,7 @@ void dbOverwriteCore(redisDb *db, dictEntry *de, robj *val, bool fUpdateMvcc)
dictSetVal(db->pdict, de, val);
if (server.lazyfree_lazy_server_del) {
if (g_pserver->lazyfree_lazy_server_del) {
freeObjAsync(old);
dictSetVal(db->pdict, &auxentry, NULL);
}
@ -304,7 +304,7 @@ robj *dbRandomKey(redisDb *db) {
key = (sds)dictGetKey(de);
keyobj = createStringObject(key,sdslen(key));
if (dictFind(db->expires,key)) {
if (allvolatile && listLength(server.masters) && --maxtries == 0) {
if (allvolatile && listLength(g_pserver->masters) && --maxtries == 0) {
/* If the DB is composed only of keys with an expire set,
* it could happen that all the keys are already logically
* expired in the slave, so the function cannot stop because
@ -330,7 +330,7 @@ int dbSyncDelete(redisDb *db, robj *key) {
* the key, because it is shared with the main dictionary. */
if (dictSize(db->expires) > 0) dictDelete(db->expires,ptrFromObj(key));
if (dictDelete(db->pdict,ptrFromObj(key)) == DICT_OK) {
if (server.cluster_enabled) slotToKeyDel(key);
if (g_pserver->cluster_enabled) slotToKeyDel(key);
return 1;
} else {
return 0;
@ -340,7 +340,7 @@ int dbSyncDelete(redisDb *db, robj *key) {
/* This is a wrapper whose behavior depends on the Redis lazy free
* configuration. Deletes the key synchronously or asynchronously. */
int dbDelete(redisDb *db, robj *key) {
return server.lazyfree_lazy_server_del ? dbAsyncDelete(db,key) :
return g_pserver->lazyfree_lazy_server_del ? dbAsyncDelete(db,key) :
dbSyncDelete(db,key);
}
@ -382,7 +382,7 @@ robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o) {
return o;
}
/* Remove all keys from all the databases in a Redis server.
/* Remove all keys from all the databases in a Redis g_pserver->
* If callback is given the function is called from time to time to
* signal that work is in progress.
*
@ -414,15 +414,15 @@ long long emptyDb(int dbnum, int flags, void(callback)(void*)) {
}
for (int j = startdb; j <= enddb; j++) {
removed += dictSize(server.db[j].pdict);
removed += dictSize(g_pserver->db[j].pdict);
if (async) {
emptyDbAsync(&server.db[j]);
emptyDbAsync(&g_pserver->db[j]);
} else {
dictEmpty(server.db[j].pdict,callback);
dictEmpty(server.db[j].expires,callback);
dictEmpty(g_pserver->db[j].pdict,callback);
dictEmpty(g_pserver->db[j].expires,callback);
}
}
if (server.cluster_enabled) {
if (g_pserver->cluster_enabled) {
if (async) {
slotToKeyFlushAsync();
} else {
@ -436,7 +436,7 @@ long long emptyDb(int dbnum, int flags, void(callback)(void*)) {
int selectDb(client *c, int id) {
if (id < 0 || id >= cserver.dbnum)
return C_ERR;
c->db = &server.db[id];
c->db = &g_pserver->db[id];
return C_OK;
}
@ -491,7 +491,7 @@ void flushdbCommand(client *c) {
if (getFlushCommandFlags(c,&flags) == C_ERR) return;
signalFlushedDb(c->db->id);
server.dirty += emptyDb(c->db->id,flags,NULL);
g_pserver->dirty += emptyDb(c->db->id,flags,NULL);
addReply(c,shared.ok);
}
@ -503,19 +503,19 @@ void flushallCommand(client *c) {
if (getFlushCommandFlags(c,&flags) == C_ERR) return;
signalFlushedDb(-1);
server.dirty += emptyDb(-1,flags,NULL);
g_pserver->dirty += emptyDb(-1,flags,NULL);
addReply(c,shared.ok);
if (server.rdb_child_pid != -1) killRDBChild();
if (server.saveparamslen > 0) {
if (g_pserver->rdb_child_pid != -1) killRDBChild();
if (g_pserver->saveparamslen > 0) {
/* Normally rdbSave() will reset dirty, but we don't want this here
* as otherwise FLUSHALL will not be replicated nor put into the AOF. */
int saved_dirty = server.dirty;
int saved_dirty = g_pserver->dirty;
rdbSaveInfo rsi, *rsiptr;
rsiptr = rdbPopulateSaveInfo(&rsi);
rdbSave(rsiptr);
server.dirty = saved_dirty;
g_pserver->dirty = saved_dirty;
}
server.dirty++;
g_pserver->dirty++;
}
/* This command implements DEL and LAZYDEL. */
@ -530,7 +530,7 @@ void delGenericCommand(client *c, int lazy) {
signalModifiedKey(c->db,c->argv[j]);
notifyKeyspaceEvent(NOTIFY_GENERIC,
"del",c->argv[j],c->db->id);
server.dirty++;
g_pserver->dirty++;
numdel++;
}
}
@ -564,7 +564,7 @@ void selectCommand(client *c) {
"invalid DB index") != C_OK)
return;
if (server.cluster_enabled && id != 0) {
if (g_pserver->cluster_enabled && id != 0) {
addReplyError(c,"SELECT is not allowed in cluster mode");
return;
}
@ -864,7 +864,7 @@ void dbsizeCommand(client *c) {
}
void lastsaveCommand(client *c) {
addReplyLongLong(c,server.lastsave);
addReplyLongLong(c,g_pserver->lastsave);
}
void typeCommand(client *c) {
@ -913,7 +913,7 @@ void shutdownCommand(client *c) {
* with half-read data).
*
* Also when in Sentinel mode clear the SAVE flag and force NOSAVE. */
if (server.loading || server.sentinel_mode)
if (g_pserver->loading || g_pserver->sentinel_mode)
flags = (flags & ~SHUTDOWN_SAVE) | SHUTDOWN_NOSAVE;
if (prepareForShutdown(flags) == C_OK) exit(0);
addReplyError(c,"Errors trying to SHUTDOWN. Check logs.");
@ -957,7 +957,7 @@ void renameGenericCommand(client *c, int nx) {
c->argv[1],c->db->id);
notifyKeyspaceEvent(NOTIFY_GENERIC,"rename_to",
c->argv[2],c->db->id);
server.dirty++;
g_pserver->dirty++;
addReply(c,nx ? shared.cone : shared.ok);
}
@ -975,7 +975,7 @@ void moveCommand(client *c) {
int srcid;
long long dbid, expire;
if (server.cluster_enabled) {
if (g_pserver->cluster_enabled) {
addReplyError(c,"MOVE is not allowed in cluster mode");
return;
}
@ -1020,7 +1020,7 @@ void moveCommand(client *c) {
/* OK! key moved, free the entry in the source DB */
dbDelete(src,c->argv[1]);
server.dirty++;
g_pserver->dirty++;
addReply(c,shared.cone);
}
@ -1054,8 +1054,8 @@ int dbSwapDatabases(int id1, int id2) {
if (id1 < 0 || id1 >= cserver.dbnum ||
id2 < 0 || id2 >= cserver.dbnum) return C_ERR;
if (id1 == id2) return C_OK;
redisDb aux = server.db[id1];
redisDb *db1 = &server.db[id1], *db2 = &server.db[id2];
redisDb aux = g_pserver->db[id1];
redisDb *db1 = &g_pserver->db[id1], *db2 = &g_pserver->db[id2];
/* Swap hash tables. Note that we don't swap blocking_keys,
* ready_keys and watched_keys, since we want clients to
@ -1087,7 +1087,7 @@ void swapdbCommand(client *c) {
long id1, id2;
/* Not allowed in cluster mode: we have just DB 0 there. */
if (server.cluster_enabled) {
if (g_pserver->cluster_enabled) {
addReplyError(c,"SWAPDB is not allowed in cluster mode");
return;
}
@ -1106,7 +1106,7 @@ void swapdbCommand(client *c) {
addReplyError(c,"DB index is out of range");
return;
} else {
server.dirty++;
g_pserver->dirty++;
addReply(c,shared.ok);
}
}
@ -1136,7 +1136,7 @@ void setExpire(client *c, redisDb *db, robj *key, long long when) {
de = dictAddOrFind(db->expires,dictGetKey(kde));
dictSetSignedIntegerVal(de,when);
int writable_slave = listLength(server.masters) && server.repl_slave_ro == 0;
int writable_slave = listLength(g_pserver->masters) && g_pserver->repl_slave_ro == 0;
if (c && writable_slave && !(c->flags & CLIENT_MASTER))
rememberSlaveKeyWithExpire(db,key);
}
@ -1173,9 +1173,9 @@ void propagateExpire(redisDb *db, robj *key, int lazy) {
incrRefCount(argv[0]);
incrRefCount(argv[1]);
if (server.aof_state != AOF_OFF)
if (g_pserver->aof_state != AOF_OFF)
feedAppendOnlyFile(cserver.delCommand,db->id,argv,2);
replicationFeedSlaves(server.slaves,db->id,argv,2);
replicationFeedSlaves(g_pserver->slaves,db->id,argv,2);
decrRefCount(argv[0]);
decrRefCount(argv[1]);
@ -1188,14 +1188,14 @@ int keyIsExpired(redisDb *db, robj *key) {
if (when < 0) return 0; /* No expire for this key */
/* Don't expire anything while loading. It will be done later. */
if (server.loading) return 0;
if (g_pserver->loading) return 0;
/* If we are in the context of a Lua script, we pretend that time is
* blocked to when the Lua script started. This way a key can expire
* only the first time it is accessed and not in the middle of the
* script execution, making propagation to slaves / AOF consistent.
* See issue #1525 on Github for more information. */
mstime_t now = server.lua_caller ? server.lua_time_start : mstime();
mstime_t now = g_pserver->lua_caller ? g_pserver->lua_time_start : mstime();
return now > when;
}
@ -1230,14 +1230,14 @@ int expireIfNeeded(redisDb *db, robj *key) {
* Still we try to return the right information to the caller,
* that is, 0 if we think the key should be still valid, 1 if
* we think the key is expired at this time. */
if (listLength(server.masters)) return 1;
if (listLength(g_pserver->masters)) return 1;
/* Delete the key */
server.stat_expiredkeys++;
propagateExpire(db,key,server.lazyfree_lazy_expire);
g_pserver->stat_expiredkeys++;
propagateExpire(db,key,g_pserver->lazyfree_lazy_expire);
notifyKeyspaceEvent(NOTIFY_EXPIRED,
"expired",key,db->id);
return server.lazyfree_lazy_expire ? dbAsyncDelete(db,key) :
return g_pserver->lazyfree_lazy_expire ? dbAsyncDelete(db,key) :
dbSyncDelete(db,key);
}
@ -1528,15 +1528,15 @@ void slotToKeyUpdateKey(robj *key, int add) {
unsigned char buf[64];
unsigned char *indexed = buf;
server.cluster->slots_keys_count[hashslot] += add ? 1 : -1;
g_pserver->cluster->slots_keys_count[hashslot] += add ? 1 : -1;
if (keylen+2 > 64) indexed = (unsigned char*)zmalloc(keylen+2, MALLOC_SHARED);
indexed[0] = (hashslot >> 8) & 0xff;
indexed[1] = hashslot & 0xff;
memcpy(indexed+2,ptrFromObj(key),keylen);
if (add) {
raxInsert(server.cluster->slots_to_keys,indexed,keylen+2,NULL,NULL);
raxInsert(g_pserver->cluster->slots_to_keys,indexed,keylen+2,NULL,NULL);
} else {
raxRemove(server.cluster->slots_to_keys,indexed,keylen+2,NULL);
raxRemove(g_pserver->cluster->slots_to_keys,indexed,keylen+2,NULL);
}
if (indexed != buf) zfree(indexed);
}
@ -1550,10 +1550,10 @@ void slotToKeyDel(robj *key) {
}
void slotToKeyFlush(void) {
raxFree(server.cluster->slots_to_keys);
server.cluster->slots_to_keys = raxNew();
memset(server.cluster->slots_keys_count,0,
sizeof(server.cluster->slots_keys_count));
raxFree(g_pserver->cluster->slots_to_keys);
g_pserver->cluster->slots_to_keys = raxNew();
memset(g_pserver->cluster->slots_keys_count,0,
sizeof(g_pserver->cluster->slots_keys_count));
}
/* Pupulate the specified array of objects with keys in the specified slot.
@ -1566,7 +1566,7 @@ unsigned int getKeysInSlot(unsigned int hashslot, robj **keys, unsigned int coun
indexed[0] = (hashslot >> 8) & 0xff;
indexed[1] = hashslot & 0xff;
raxStart(&iter,server.cluster->slots_to_keys);
raxStart(&iter,g_pserver->cluster->slots_to_keys);
raxSeek(&iter,">=",indexed,2);
while(count-- && raxNext(&iter)) {
if (iter.key[0] != indexed[0] || iter.key[1] != indexed[1]) break;
@ -1585,13 +1585,13 @@ unsigned int delKeysInSlot(unsigned int hashslot) {
indexed[0] = (hashslot >> 8) & 0xff;
indexed[1] = hashslot & 0xff;
raxStart(&iter,server.cluster->slots_to_keys);
while(server.cluster->slots_keys_count[hashslot]) {
raxStart(&iter,g_pserver->cluster->slots_to_keys);
while(g_pserver->cluster->slots_keys_count[hashslot]) {
raxSeek(&iter,">=",indexed,2);
raxNext(&iter);
robj *key = createStringObject((char*)iter.key+2,iter.key_len-2);
dbDelete(&server.db[0],key);
dbDelete(&g_pserver->db[0],key);
decrRefCount(key);
j++;
}
@ -1600,5 +1600,5 @@ unsigned int delKeysInSlot(unsigned int hashslot) {
}
unsigned int countKeysInSlot(unsigned int hashslot) {
return server.cluster->slots_keys_count[hashslot];
return g_pserver->cluster->slots_keys_count[hashslot];
}

View File

@ -267,7 +267,7 @@ void computeDatasetDigest(unsigned char *final) {
memset(final,0,20); /* Start with a clean result */
for (j = 0; j < cserver.dbnum; j++) {
redisDb *db = server.db+j;
redisDb *db = g_pserver->db+j;
if (dictSize(db->pdict) == 0) continue;
di = dictGetSafeIterator(db->pdict);
@ -345,7 +345,7 @@ NULL
(RESTART_SERVER_GRACEFULLY|RESTART_SERVER_CONFIG_REWRITE) :
RESTART_SERVER_NONE;
restartServer(flags,delay);
addReplyError(c,"failed to restart the server. Check server logs.");
addReplyError(c,"failed to restart the g_pserver-> Check server logs.");
} else if (!strcasecmp(szFromObj(c->argv[1]),"oom")) {
void *ptr = zmalloc(ULONG_MAX, MALLOC_LOCAL); /* Should trigger an out of memory. */
zfree(ptr);
@ -374,16 +374,16 @@ NULL
serverLog(LL_WARNING,"DB reloaded by DEBUG RELOAD");
addReply(c,shared.ok);
} else if (!strcasecmp(szFromObj(c->argv[1]),"loadaof")) {
if (server.aof_state != AOF_OFF) flushAppendOnlyFile(1);
if (g_pserver->aof_state != AOF_OFF) flushAppendOnlyFile(1);
emptyDb(-1,EMPTYDB_NO_FLAGS,NULL);
protectClient(c);
int ret = loadAppendOnlyFile(server.aof_filename);
int ret = loadAppendOnlyFile(g_pserver->aof_filename);
unprotectClient(c);
if (ret != C_OK) {
addReply(c,shared.err);
return;
}
server.dirty = 0; /* Prevent AOF / replication */
g_pserver->dirty = 0; /* Prevent AOF / replication */
serverLog(LL_WARNING,"Append Only File loaded by DEBUG LOADAOF");
addReply(c,shared.ok);
} else if (!strcasecmp(szFromObj(c->argv[1]),"object") && c->argc == 3) {
@ -596,12 +596,12 @@ NULL
} else if (!strcasecmp(szFromObj(c->argv[1]),"set-active-expire") &&
c->argc == 3)
{
server.active_expire_enabled = atoi(szFromObj(c->argv[2]));
g_pserver->active_expire_enabled = atoi(szFromObj(c->argv[2]));
addReply(c,shared.ok);
} else if (!strcasecmp(szFromObj(c->argv[1]),"lua-always-replicate-commands") &&
c->argc == 3)
{
server.lua_always_replicate_commands = atoi(szFromObj(c->argv[2]));
g_pserver->lua_always_replicate_commands = atoi(szFromObj(c->argv[2]));
addReply(c,shared.ok);
} else if (!strcasecmp(szFromObj(c->argv[1]),"error") && c->argc == 3) {
sds errstr = sdsnewlen("-",1);
@ -634,11 +634,11 @@ NULL
}
stats = sdscatprintf(stats,"[Dictionary HT]\n");
dictGetStats(buf,sizeof(buf),server.db[dbid].pdict);
dictGetStats(buf,sizeof(buf),g_pserver->db[dbid].pdict);
stats = sdscat(stats,buf);
stats = sdscatprintf(stats,"[Expires HT]\n");
dictGetStats(buf,sizeof(buf),server.db[dbid].expires);
dictGetStats(buf,sizeof(buf),g_pserver->db[dbid].expires);
stats = sdscat(stats,buf);
addReplyBulkSds(c,stats);
@ -692,9 +692,9 @@ void _serverAssert(const char *estr, const char *file, int line) {
serverLog(LL_WARNING,"=== ASSERTION FAILED ===");
serverLog(LL_WARNING,"==> %s:%d '%s' is not true",file,line,estr);
#ifdef HAVE_BACKTRACE
server.assert_failed = estr;
server.assert_file = file;
server.assert_line = line;
g_pserver->assert_failed = estr;
g_pserver->assert_file = file;
g_pserver->assert_line = line;
serverLog(LL_WARNING,"(forcing SIGSEGV to print the bug report.)");
#endif
*((char*)-1) = 'x';
@ -779,10 +779,10 @@ void _serverPanic(const char *file, int line, const char *msg, ...) {
}
void bugReportStart(void) {
if (server.bug_report_start == 0) {
if (g_pserver->bug_report_start == 0) {
serverLogRaw(LL_WARNING|LL_RAW,
"\n\n=== REDIS BUG REPORT START: Cut & paste starting from here ===\n");
server.bug_report_start = 1;
g_pserver->bug_report_start = 1;
}
}
@ -1126,16 +1126,16 @@ void logRegisters(ucontext_t *uc) {
*
* Close it with closeDirectLogFiledes(). */
int openDirectLogFiledes(void) {
int log_to_stdout = server.logfile[0] == '\0';
int log_to_stdout = g_pserver->logfile[0] == '\0';
int fd = log_to_stdout ?
STDOUT_FILENO :
open(server.logfile, O_APPEND|O_CREAT|O_WRONLY, 0644);
open(g_pserver->logfile, O_APPEND|O_CREAT|O_WRONLY, 0644);
return fd;
}
/* Used to close what closeDirectLogFiledes() returns. */
void closeDirectLogFiledes(int fd) {
int log_to_stdout = server.logfile[0] == '\0';
int log_to_stdout = g_pserver->logfile[0] == '\0';
if (!log_to_stdout) close(fd);
}
@ -1372,8 +1372,8 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) {
"Accessing address: %p", (void*)info->si_addr);
}
serverLog(LL_WARNING,
"Failed assertion: %s (%s:%d)", server.assert_failed,
server.assert_file, server.assert_line);
"Failed assertion: %s (%s:%d)", g_pserver->assert_failed,
g_pserver->assert_file, g_pserver->assert_line);
/* Log the stack trace */
serverLogRaw(LL_WARNING|LL_RAW, "\n------ STACK TRACE ------\n");
@ -1521,7 +1521,7 @@ void watchdogScheduleSignal(int period) {
void enableWatchdog(int period) {
int min_period;
if (server.watchdog_period == 0) {
if (g_pserver->watchdog_period == 0) {
struct sigaction act;
/* Watchdog was actually disabled, so we have to setup the signal
@ -1534,16 +1534,16 @@ void enableWatchdog(int period) {
/* If the configured period is smaller than twice the timer period, it is
* too short for the software watchdog to work reliably. Fix it now
* if needed. */
min_period = (1000/server.hz)*2;
min_period = (1000/g_pserver->hz)*2;
if (period < min_period) period = min_period;
watchdogScheduleSignal(period); /* Adjust the current timer. */
server.watchdog_period = period;
g_pserver->watchdog_period = period;
}
/* Disable the software watchdog. */
void disableWatchdog(void) {
struct sigaction act;
if (server.watchdog_period == 0) return; /* Already disabled. */
if (g_pserver->watchdog_period == 0) return; /* Already disabled. */
watchdogScheduleSignal(0); /* Stop the current timer. */
/* Set the signal handler to SIG_IGN, this will also remove pending
@ -1552,5 +1552,5 @@ void disableWatchdog(void) {
act.sa_flags = 0;
act.sa_handler = SIG_IGN;
sigaction(SIGALRM, &act, NULL);
server.watchdog_period = 0;
g_pserver->watchdog_period = 0;
}

View File

@ -59,14 +59,14 @@ void* activeDefragAlloc(void *ptr) {
size_t size;
void *newptr;
if(!je_get_defrag_hint(ptr, &bin_util, &run_util)) {
server.stat_active_defrag_misses++;
g_pserver->stat_active_defrag_misses++;
return NULL;
}
/* if this run is more utilized than the average utilization in this bin
* (or it is full), skip it. This will eventually move all the allocations
* from relatively empty runs into relatively full runs. */
if (run_util > bin_util || run_util == 1<<16) {
server.stat_active_defrag_misses++;
g_pserver->stat_active_defrag_misses++;
return NULL;
}
/* move this allocation to a new allocation.
@ -442,7 +442,7 @@ long scanLaterList(robj *ob) {
quicklist *ql = (quicklist*)ptrFromObj(ob);
if (ob->type != OBJ_LIST || ob->encoding != OBJ_ENCODING_QUICKLIST)
return 0;
server.stat_active_defrag_scanned+=ql->len;
g_pserver->stat_active_defrag_scanned+=ql->len;
return activeDefragQuickListNodes(ql);
}
@ -455,7 +455,7 @@ void scanLaterZsetCallback(void *privdata, const dictEntry *_de) {
dictEntry *de = (dictEntry*)_de;
scanLaterZsetData *data = (scanLaterZsetData*)privdata;
data->defragged += activeDefragZsetEntry(data->zs, de);
server.stat_active_defrag_scanned++;
g_pserver->stat_active_defrag_scanned++;
}
long scanLaterZset(robj *ob, unsigned long *cursor) {
@ -474,7 +474,7 @@ void scanLaterSetCallback(void *privdata, const dictEntry *_de) {
sds sdsele = (sds)dictGetKey(de), newsds;
if ((newsds = activeDefragSds(sdsele)))
(*defragged)++, de->key = newsds;
server.stat_active_defrag_scanned++;
g_pserver->stat_active_defrag_scanned++;
}
long scanLaterSet(robj *ob, unsigned long *cursor) {
@ -495,7 +495,7 @@ void scanLaterHashCallback(void *privdata, const dictEntry *_de) {
sdsele = (sds)dictGetVal(de);
if ((newsds = activeDefragSds(sdsele)))
(*defragged)++, de->v.val = newsds;
server.stat_active_defrag_scanned++;
g_pserver->stat_active_defrag_scanned++;
}
long scanLaterHash(robj *ob, unsigned long *cursor) {
@ -837,12 +837,12 @@ long defragKey(redisDb *db, dictEntry *de) {
/* Defrag scan callback for the main db dictionary. */
void defragScanCallback(void *privdata, const dictEntry *de) {
long defragged = defragKey((redisDb*)privdata, (dictEntry*)de);
server.stat_active_defrag_hits += defragged;
g_pserver->stat_active_defrag_hits += defragged;
if(defragged)
server.stat_active_defrag_key_hits++;
g_pserver->stat_active_defrag_key_hits++;
else
server.stat_active_defrag_key_misses++;
server.stat_active_defrag_scanned++;
g_pserver->stat_active_defrag_key_misses++;
g_pserver->stat_active_defrag_scanned++;
}
/* Defrag scan callback for each hash table bicket,
@ -887,8 +887,8 @@ long defragOtherGlobals() {
/* there are many more pointers to defrag (e.g. client argv, output / aof buffers, etc.
* but we assume most of these are short lived, we only need to defrag allocations
* that remain static for a long time */
defragged += activeDefragSdsDict(server.lua_scripts, DEFRAG_SDS_DICT_VAL_IS_STROB);
defragged += activeDefragSdsListAndDict(server.repl_scriptcache_fifo, server.repl_scriptcache_dict, DEFRAG_SDS_DICT_NO_VAL);
defragged += activeDefragSdsDict(g_pserver->lua_scripts, DEFRAG_SDS_DICT_VAL_IS_STROB);
defragged += activeDefragSdsListAndDict(g_pserver->repl_scriptcache_fifo, g_pserver->repl_scriptcache_dict, DEFRAG_SDS_DICT_NO_VAL);
return defragged;
}
@ -898,16 +898,16 @@ int defragLaterItem(dictEntry *de, unsigned long *cursor, long long endtime) {
if (de) {
robj *ob = (robj*)dictGetVal(de);
if (ob->type == OBJ_LIST) {
server.stat_active_defrag_hits += scanLaterList(ob);
g_pserver->stat_active_defrag_hits += scanLaterList(ob);
*cursor = 0; /* list has no scan, we must finish it in one go */
} else if (ob->type == OBJ_SET) {
server.stat_active_defrag_hits += scanLaterSet(ob, cursor);
g_pserver->stat_active_defrag_hits += scanLaterSet(ob, cursor);
} else if (ob->type == OBJ_ZSET) {
server.stat_active_defrag_hits += scanLaterZset(ob, cursor);
g_pserver->stat_active_defrag_hits += scanLaterZset(ob, cursor);
} else if (ob->type == OBJ_HASH) {
server.stat_active_defrag_hits += scanLaterHash(ob, cursor);
g_pserver->stat_active_defrag_hits += scanLaterHash(ob, cursor);
} else if (ob->type == OBJ_STREAM) {
return scanLaterStraemListpacks(ob, cursor, endtime, &server.stat_active_defrag_hits);
return scanLaterStraemListpacks(ob, cursor, endtime, &g_pserver->stat_active_defrag_hits);
} else {
*cursor = 0; /* object type may have changed since we schedule it for later */
}
@ -922,8 +922,8 @@ int defragLaterStep(redisDb *db, long long endtime) {
static sds current_key = NULL;
static unsigned long cursor = 0;
unsigned int iterations = 0;
unsigned long long prev_defragged = server.stat_active_defrag_hits;
unsigned long long prev_scanned = server.stat_active_defrag_scanned;
unsigned long long prev_defragged = g_pserver->stat_active_defrag_hits;
unsigned long long prev_scanned = g_pserver->stat_active_defrag_scanned;
long long key_defragged;
do {
@ -952,7 +952,7 @@ int defragLaterStep(redisDb *db, long long endtime) {
/* each time we enter this function we need to fetch the key from the dict again (if it still exists) */
dictEntry *de = dictFind(db->pdict, current_key);
key_defragged = server.stat_active_defrag_hits;
key_defragged = g_pserver->stat_active_defrag_hits;
do {
int quit = 0;
if (defragLaterItem(de, &cursor, endtime))
@ -967,24 +967,24 @@ int defragLaterStep(redisDb *db, long long endtime) {
* (if we have a lot of pointers in one hash bucket, or rehashing),
* check if we reached the time limit. */
if (quit || (++iterations > 16 ||
server.stat_active_defrag_hits - prev_defragged > 512 ||
server.stat_active_defrag_scanned - prev_scanned > 64)) {
g_pserver->stat_active_defrag_hits - prev_defragged > 512 ||
g_pserver->stat_active_defrag_scanned - prev_scanned > 64)) {
if (quit || ustime() > endtime) {
if(key_defragged != server.stat_active_defrag_hits)
server.stat_active_defrag_key_hits++;
if(key_defragged != g_pserver->stat_active_defrag_hits)
g_pserver->stat_active_defrag_key_hits++;
else
server.stat_active_defrag_key_misses++;
g_pserver->stat_active_defrag_key_misses++;
return 1;
}
iterations = 0;
prev_defragged = server.stat_active_defrag_hits;
prev_scanned = server.stat_active_defrag_scanned;
prev_defragged = g_pserver->stat_active_defrag_hits;
prev_scanned = g_pserver->stat_active_defrag_scanned;
}
} while(cursor);
if(key_defragged != server.stat_active_defrag_hits)
server.stat_active_defrag_key_hits++;
if(key_defragged != g_pserver->stat_active_defrag_hits)
g_pserver->stat_active_defrag_key_hits++;
else
server.stat_active_defrag_key_misses++;
g_pserver->stat_active_defrag_key_misses++;
} while(1);
}
@ -996,7 +996,7 @@ void computeDefragCycles() {
size_t frag_bytes;
float frag_pct = getAllocatorFragmentation(&frag_bytes);
/* If we're not already running, and below the threshold, exit. */
if (!server.active_defrag_running) {
if (!g_pserver->active_defrag_running) {
if(frag_pct < cserver.active_defrag_threshold_lower || frag_bytes < cserver.active_defrag_ignore_bytes)
return;
}
@ -1012,10 +1012,10 @@ void computeDefragCycles() {
cserver.active_defrag_cycle_max);
/* We allow increasing the aggressiveness during a scan, but don't
* reduce it. */
if (!server.active_defrag_running ||
cpu_pct > server.active_defrag_running)
if (!g_pserver->active_defrag_running ||
cpu_pct > g_pserver->active_defrag_running)
{
server.active_defrag_running = cpu_pct;
g_pserver->active_defrag_running = cpu_pct;
serverLog(LL_VERBOSE,
"Starting active defrag, frag=%.0f%%, frag_bytes=%zu, cpu=%d%%",
frag_pct, frag_bytes, cpu_pct);
@ -1031,13 +1031,13 @@ void activeDefragCycle(void) {
static redisDb *db = NULL;
static long long start_scan, start_stat;
unsigned int iterations = 0;
unsigned long long prev_defragged = server.stat_active_defrag_hits;
unsigned long long prev_scanned = server.stat_active_defrag_scanned;
unsigned long long prev_defragged = g_pserver->stat_active_defrag_hits;
unsigned long long prev_scanned = g_pserver->stat_active_defrag_scanned;
long long start, timelimit, endtime;
mstime_t latency;
int quit = 0;
if (server.aof_child_pid!=-1 || server.rdb_child_pid!=-1)
if (g_pserver->aof_child_pid!=-1 || g_pserver->rdb_child_pid!=-1)
return; /* Defragging memory while there's a fork will just do damage. */
/* Once a second, check if we the fragmentation justfies starting a scan
@ -1045,12 +1045,12 @@ void activeDefragCycle(void) {
run_with_period(1000) {
computeDefragCycles();
}
if (!server.active_defrag_running)
if (!g_pserver->active_defrag_running)
return;
/* See activeExpireCycle for how timelimit is handled. */
start = ustime();
timelimit = 1000000*server.active_defrag_running/server.hz/100;
timelimit = 1000000*g_pserver->active_defrag_running/g_pserver->hz/100;
if (timelimit <= 0) timelimit = 1;
endtime = start + timelimit;
latencyStartMonitor(latency);
@ -1074,26 +1074,26 @@ void activeDefragCycle(void) {
float frag_pct = getAllocatorFragmentation(&frag_bytes);
serverLog(LL_VERBOSE,
"Active defrag done in %dms, reallocated=%d, frag=%.0f%%, frag_bytes=%zu",
(int)((now - start_scan)/1000), (int)(server.stat_active_defrag_hits - start_stat), frag_pct, frag_bytes);
(int)((now - start_scan)/1000), (int)(g_pserver->stat_active_defrag_hits - start_stat), frag_pct, frag_bytes);
start_scan = now;
current_db = -1;
cursor = 0;
db = NULL;
server.active_defrag_running = 0;
g_pserver->active_defrag_running = 0;
computeDefragCycles(); /* if another scan is needed, start it right away */
if (server.active_defrag_running != 0 && ustime() < endtime)
if (g_pserver->active_defrag_running != 0 && ustime() < endtime)
continue;
break;
}
else if (current_db==0) {
/* Start a scan from the first database. */
start_scan = ustime();
start_stat = server.stat_active_defrag_hits;
start_stat = g_pserver->stat_active_defrag_hits;
}
db = &server.db[current_db];
db = &g_pserver->db[current_db];
cursor = 0;
}
@ -1112,15 +1112,15 @@ void activeDefragCycle(void) {
* But regardless, don't start a new db in this loop, this is because after
* the last db we call defragOtherGlobals, which must be done in once cycle */
if (!cursor || (++iterations > 16 ||
server.stat_active_defrag_hits - prev_defragged > 512 ||
server.stat_active_defrag_scanned - prev_scanned > 64)) {
g_pserver->stat_active_defrag_hits - prev_defragged > 512 ||
g_pserver->stat_active_defrag_scanned - prev_scanned > 64)) {
if (!cursor || ustime() > endtime) {
quit = 1;
break;
}
iterations = 0;
prev_defragged = server.stat_active_defrag_hits;
prev_scanned = server.stat_active_defrag_scanned;
prev_defragged = g_pserver->stat_active_defrag_hits;
prev_scanned = g_pserver->stat_active_defrag_scanned;
}
} while(cursor && !quit);
} while(!quit);

View File

@ -77,8 +77,8 @@ unsigned int getLRUClock(void) {
* precomputed value, otherwise we need to resort to a system call. */
unsigned int LRU_CLOCK(void) {
unsigned int lruclock;
if (1000/server.hz <= LRU_CLOCK_RESOLUTION) {
atomicGet(server.lruclock,lruclock);
if (1000/g_pserver->hz <= LRU_CLOCK_RESOLUTION) {
atomicGet(g_pserver->lruclock,lruclock);
} else {
lruclock = getLRUClock();
}
@ -111,7 +111,7 @@ unsigned long long estimateObjectIdleTime(robj *o) {
* If all the bytes needed to return back under the limit were freed the
* function returns C_OK, otherwise C_ERR is returned, and the caller
* should block the execution of commands that will result in more memory
* used by the server.
* used by the g_pserver->
*
* ------------------------------------------------------------------------
*
@ -161,9 +161,9 @@ void evictionPoolAlloc(void) {
void evictionPoolPopulate(int dbid, dict *sampledict, dict *keydict, struct evictionPoolEntry *pool) {
int j, k, count;
dictEntry **samples = (dictEntry**)alloca(server.maxmemory_samples * sizeof(dictEntry*));
dictEntry **samples = (dictEntry**)alloca(g_pserver->maxmemory_samples * sizeof(dictEntry*));
count = dictGetSomeKeys(sampledict,samples,server.maxmemory_samples);
count = dictGetSomeKeys(sampledict,samples,g_pserver->maxmemory_samples);
for (j = 0; j < count; j++) {
unsigned long long idle;
sds key;
@ -176,7 +176,7 @@ void evictionPoolPopulate(int dbid, dict *sampledict, dict *keydict, struct evic
/* If the dictionary we are sampling from is not the main
* dictionary (but the expires one) we need to lookup the key
* again in the key dictionary to obtain the value object. */
if (server.maxmemory_policy != MAXMEMORY_VOLATILE_TTL) {
if (g_pserver->maxmemory_policy != MAXMEMORY_VOLATILE_TTL) {
if (sampledict != keydict) de = dictFind(keydict, key);
o = (robj*)dictGetVal(de);
}
@ -184,9 +184,9 @@ void evictionPoolPopulate(int dbid, dict *sampledict, dict *keydict, struct evic
/* Calculate the idle time according to the policy. This is called
* idle just because the code initially handled LRU, but is in fact
* just a score where an higher score means better candidate. */
if (server.maxmemory_policy & MAXMEMORY_FLAG_LRU) {
if (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LRU) {
idle = (o != nullptr) ? estimateObjectIdleTime(o) : 0;
} else if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) {
} else if (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LFU) {
/* When we use an LRU policy, we sort the keys by idle time
* so that we expire keys starting from greater idle time.
* However when the policy is an LFU one, we have a frequency
@ -195,7 +195,7 @@ void evictionPoolPopulate(int dbid, dict *sampledict, dict *keydict, struct evic
* frequency subtracting the actual frequency to the maximum
* frequency of 255. */
idle = 255-LFUDecrAndReturn(o);
} else if (server.maxmemory_policy == MAXMEMORY_VOLATILE_TTL) {
} else if (g_pserver->maxmemory_policy == MAXMEMORY_VOLATILE_TTL) {
/* In this case the sooner the expire the better. */
idle = ULLONG_MAX - (long)dictGetVal(de);
} else {
@ -297,7 +297,7 @@ void evictionPoolPopulate(int dbid, dict *sampledict, dict *keydict, struct evic
* 16 bits. The returned time is suitable to be stored as LDT (last decrement
* time) for the LFU implementation. */
unsigned long LFUGetTimeInMinutes(void) {
return (server.unixtime/60) & 65535;
return (g_pserver->unixtime/60) & 65535;
}
/* Given an object last access time, compute the minimum number of minutes
@ -317,7 +317,7 @@ uint8_t LFULogIncr(uint8_t counter) {
double r = (double)rand()/RAND_MAX;
double baseval = counter - LFU_INIT_VAL;
if (baseval < 0) baseval = 0;
double p = 1.0/(baseval*server.lfu_log_factor+1);
double p = 1.0/(baseval*g_pserver->lfu_log_factor+1);
if (r < p) counter++;
return counter;
}
@ -326,7 +326,7 @@ uint8_t LFULogIncr(uint8_t counter) {
* do not update LFU fields of the object, we update the access time
* and counter in an explicit way when the object is really accessed.
* And we will times halve the counter according to the times of
* elapsed time than server.lfu_decay_time.
* elapsed time than g_pserver->lfu_decay_time.
* Return the object frequency counter.
*
* This function is used in order to scan the dataset for the best object
@ -335,7 +335,7 @@ uint8_t LFULogIncr(uint8_t counter) {
unsigned long LFUDecrAndReturn(robj *o) {
unsigned long ldt = o->lru >> 8;
unsigned long counter = o->lru & 255;
unsigned long num_periods = server.lfu_decay_time ? LFUTimeElapsed(ldt) / server.lfu_decay_time : 0;
unsigned long num_periods = g_pserver->lfu_decay_time ? LFUTimeElapsed(ldt) / g_pserver->lfu_decay_time : 0;
if (num_periods)
counter = (num_periods > counter) ? 0 : counter - num_periods;
return counter;
@ -352,20 +352,20 @@ unsigned long LFUDecrAndReturn(robj *o) {
size_t freeMemoryGetNotCountedMemory(void) {
serverAssert(GlobalLocksAcquired());
size_t overhead = 0;
int slaves = listLength(server.slaves);
int slaves = listLength(g_pserver->slaves);
if (slaves) {
listIter li;
listNode *ln;
listRewind(server.slaves,&li);
listRewind(g_pserver->slaves,&li);
while((ln = listNext(&li))) {
client *slave = (client*)listNodeValue(ln);
overhead += getClientOutputBufferMemoryUsage(slave);
}
}
if (server.aof_state != AOF_OFF) {
overhead += sdsalloc(server.aof_buf)+aofRewriteBufferSize();
if (g_pserver->aof_state != AOF_OFF) {
overhead += sdsalloc(g_pserver->aof_buf)+aofRewriteBufferSize();
}
return overhead;
}
@ -403,7 +403,7 @@ int getMaxmemoryState(size_t *total, size_t *logical, size_t *tofree, float *lev
if (total) *total = mem_reported;
/* We may return ASAP if there is no need to compute the level. */
int return_ok_asap = !server.maxmemory || mem_reported <= server.maxmemory;
int return_ok_asap = !g_pserver->maxmemory || mem_reported <= g_pserver->maxmemory;
if (return_ok_asap && !level) return C_OK;
/* Remove the size of slaves output buffers and AOF buffer from the
@ -414,20 +414,20 @@ int getMaxmemoryState(size_t *total, size_t *logical, size_t *tofree, float *lev
/* Compute the ratio of memory usage. */
if (level) {
if (!server.maxmemory) {
if (!g_pserver->maxmemory) {
*level = 0;
} else {
*level = (float)mem_used / (float)server.maxmemory;
*level = (float)mem_used / (float)g_pserver->maxmemory;
}
}
if (return_ok_asap) return C_OK;
/* Check if we are still over the memory limit. */
if (mem_used <= server.maxmemory) return C_OK;
if (mem_used <= g_pserver->maxmemory) return C_OK;
/* Compute how much memory we need to free. */
mem_tofree = mem_used - server.maxmemory;
mem_tofree = mem_used - g_pserver->maxmemory;
if (logical) *logical = mem_used;
if (tofree) *tofree = mem_tofree;
@ -448,12 +448,12 @@ int freeMemoryIfNeeded(void) {
serverAssert(GlobalLocksAcquired());
/* By default replicas should ignore maxmemory
* and just be masters exact copies. */
if (listLength(server.masters) && server.repl_slave_ignore_maxmemory) return C_OK;
if (listLength(g_pserver->masters) && g_pserver->repl_slave_ignore_maxmemory) return C_OK;
size_t mem_reported, mem_tofree, mem_freed;
mstime_t latency, eviction_latency;
long long delta;
int slaves = listLength(server.slaves);
int slaves = listLength(g_pserver->slaves);
/* When clients are paused the dataset should be static not just from the
* POV of clients not being able to write, but also from the POV of
@ -464,7 +464,7 @@ int freeMemoryIfNeeded(void) {
mem_freed = 0;
if (server.maxmemory_policy == MAXMEMORY_NO_EVICTION)
if (g_pserver->maxmemory_policy == MAXMEMORY_NO_EVICTION)
goto cant_free; /* We need to free memory, but policy forbids. */
latencyStartMonitor(latency);
@ -477,8 +477,8 @@ int freeMemoryIfNeeded(void) {
dict *dict;
dictEntry *de;
if (server.maxmemory_policy & (MAXMEMORY_FLAG_LRU|MAXMEMORY_FLAG_LFU) ||
server.maxmemory_policy == MAXMEMORY_VOLATILE_TTL)
if (g_pserver->maxmemory_policy & (MAXMEMORY_FLAG_LRU|MAXMEMORY_FLAG_LFU) ||
g_pserver->maxmemory_policy == MAXMEMORY_VOLATILE_TTL)
{
struct evictionPoolEntry *pool = EvictionPoolLRU;
@ -489,8 +489,8 @@ int freeMemoryIfNeeded(void) {
* so to start populate the eviction pool sampling keys from
* every DB. */
for (i = 0; i < cserver.dbnum; i++) {
db = server.db+i;
dict = (server.maxmemory_policy & MAXMEMORY_FLAG_ALLKEYS) ?
db = g_pserver->db+i;
dict = (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_ALLKEYS) ?
db->pdict : db->expires;
if ((keys = dictSize(dict)) != 0) {
evictionPoolPopulate(i, dict, db->pdict, pool);
@ -504,11 +504,11 @@ int freeMemoryIfNeeded(void) {
if (pool[k].key == NULL) continue;
bestdbid = pool[k].dbid;
if (server.maxmemory_policy & MAXMEMORY_FLAG_ALLKEYS) {
de = dictFind(server.db[pool[k].dbid].pdict,
if (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_ALLKEYS) {
de = dictFind(g_pserver->db[pool[k].dbid].pdict,
pool[k].key);
} else {
de = dictFind(server.db[pool[k].dbid].expires,
de = dictFind(g_pserver->db[pool[k].dbid].expires,
pool[k].key);
}
@ -531,16 +531,16 @@ int freeMemoryIfNeeded(void) {
}
/* volatile-random and allkeys-random policy */
else if (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM ||
server.maxmemory_policy == MAXMEMORY_VOLATILE_RANDOM)
else if (g_pserver->maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM ||
g_pserver->maxmemory_policy == MAXMEMORY_VOLATILE_RANDOM)
{
/* When evicting a random key, we try to evict a key for
* each DB, so we use the static 'next_db' variable to
* incrementally visit all DBs. */
for (i = 0; i < cserver.dbnum; i++) {
j = (++next_db) % cserver.dbnum;
db = server.db+j;
dict = (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM) ?
db = g_pserver->db+j;
dict = (g_pserver->maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM) ?
db->pdict : db->expires;
if (dictSize(dict) != 0) {
de = dictGetRandomKey(dict);
@ -553,9 +553,9 @@ int freeMemoryIfNeeded(void) {
/* Finally remove the selected key. */
if (bestkey) {
db = server.db+bestdbid;
db = g_pserver->db+bestdbid;
robj *keyobj = createStringObject(bestkey,sdslen(bestkey));
propagateExpire(db,keyobj,server.lazyfree_lazy_eviction);
propagateExpire(db,keyobj,g_pserver->lazyfree_lazy_eviction);
/* We compute the amount of memory freed by db*Delete() alone.
* It is possible that actually the memory needed to propagate
* the DEL in AOF and replication link is greater than the one
@ -566,7 +566,7 @@ int freeMemoryIfNeeded(void) {
* we only care about memory used by the key space. */
delta = (long long) zmalloc_used_memory();
latencyStartMonitor(eviction_latency);
if (server.lazyfree_lazy_eviction)
if (g_pserver->lazyfree_lazy_eviction)
dbAsyncDelete(db,keyobj);
else
dbSyncDelete(db,keyobj);
@ -575,7 +575,7 @@ int freeMemoryIfNeeded(void) {
latencyRemoveNestedEvent(latency,eviction_latency);
delta -= (long long) zmalloc_used_memory();
mem_freed += delta;
server.stat_evictedkeys++;
g_pserver->stat_evictedkeys++;
notifyKeyspaceEvent(NOTIFY_EVICTED, "evicted",
keyobj, db->id);
decrRefCount(keyobj);
@ -594,7 +594,7 @@ int freeMemoryIfNeeded(void) {
* memory, since the "mem_freed" amount is computed only
* across the dbAsyncDelete() call, while the thread can
* release the memory all the time. */
if (server.lazyfree_lazy_eviction && !(keys_freed % 16)) {
if (g_pserver->lazyfree_lazy_eviction && !(keys_freed % 16)) {
if (getMaxmemoryState(NULL,NULL,NULL,NULL) == C_OK) {
/* Let's satisfy our stop condition. */
mem_freed = mem_tofree;
@ -632,6 +632,6 @@ cant_free:
*
*/
int freeMemoryIfNeededAndSafe(void) {
if (server.lua_timedout || server.loading) return C_OK;
if (g_pserver->lua_timedout || g_pserver->loading) return C_OK;
return freeMemoryIfNeeded();
}

View File

@ -47,7 +47,7 @@
* If the key is found to be expired, it is removed from the database and
* 1 is returned. Otherwise no operation is performed and 0 is returned.
*
* When a key is expired, server.stat_expiredkeys is incremented.
* When a key is expired, g_pserver->stat_expiredkeys is incremented.
*
* The parameter 'now' is the current time in milliseconds as is passed
* to the function to avoid too many gettimeofday() syscalls. */
@ -57,15 +57,15 @@ int activeExpireCycleTryExpire(redisDb *db, dictEntry *de, long long now) {
sds key = (sds)dictGetKey(de);
robj *keyobj = createStringObject(key,sdslen(key));
propagateExpire(db,keyobj,server.lazyfree_lazy_expire);
if (server.lazyfree_lazy_expire)
propagateExpire(db,keyobj,g_pserver->lazyfree_lazy_expire);
if (g_pserver->lazyfree_lazy_expire)
dbAsyncDelete(db,keyobj);
else
dbSyncDelete(db,keyobj);
notifyKeyspaceEvent(NOTIFY_EXPIRED,
"expired",keyobj,db->id);
decrRefCount(keyobj);
server.stat_expiredkeys++;
g_pserver->stat_expiredkeys++;
return 1;
} else {
return 0;
@ -131,9 +131,9 @@ void activeExpireCycle(int type) {
/* We can use at max ACTIVE_EXPIRE_CYCLE_SLOW_TIME_PERC percentage of CPU time
* per iteration. Since this function gets called with a frequency of
* server.hz times per second, the following is the max amount of
* g_pserver->hz times per second, the following is the max amount of
* microseconds we can spend in this function. */
timelimit = 1000000*ACTIVE_EXPIRE_CYCLE_SLOW_TIME_PERC/server.hz/100;
timelimit = 1000000*ACTIVE_EXPIRE_CYCLE_SLOW_TIME_PERC/g_pserver->hz/100;
timelimit_exit = 0;
if (timelimit <= 0) timelimit = 1;
@ -148,7 +148,7 @@ void activeExpireCycle(int type) {
for (j = 0; j < dbs_per_call && timelimit_exit == 0; j++) {
int expired;
redisDb *db = server.db+(current_db % cserver.dbnum);
redisDb *db = g_pserver->db+(current_db % cserver.dbnum);
/* Increment the DB now so we are sure if we run out of time
* in the current DB we'll restart from the next. This allows to
@ -220,7 +220,7 @@ void activeExpireCycle(int type) {
elapsed = ustime()-start;
if (elapsed > timelimit) {
timelimit_exit = 1;
server.stat_expired_time_cap_reached_count++;
g_pserver->stat_expired_time_cap_reached_count++;
break;
}
}
@ -239,8 +239,8 @@ void activeExpireCycle(int type) {
current_perc = (double)total_expired/total_sampled;
} else
current_perc = 0;
server.stat_expired_stale_perc = (current_perc*0.05)+
(server.stat_expired_stale_perc*0.95);
g_pserver->stat_expired_stale_perc = (current_perc*0.05)+
(g_pserver->stat_expired_stale_perc*0.95);
}
/*-----------------------------------------------------------------------------
@ -299,12 +299,12 @@ void expireSlaveKeys(void) {
int dbid = 0;
while(dbids && dbid < cserver.dbnum) {
if ((dbids & 1) != 0) {
redisDb *db = server.db+dbid;
redisDb *db = g_pserver->db+dbid;
dictEntry *expire = dictFind(db->expires,keyname);
int expired = 0;
if (expire &&
activeExpireCycleTryExpire(server.db+dbid,expire,start))
activeExpireCycleTryExpire(g_pserver->db+dbid,expire,start))
{
expired = 1;
}
@ -377,7 +377,7 @@ size_t getSlaveKeyWithExpireCount(void) {
}
/* Remove the keys in the hash table. We need to do that when data is
* flushed from the server. We may receive new keys from the master with
* flushed from the g_pserver-> We may receive new keys from the master with
* the same name/db and it is no longer a good idea to expire them.
*
* Note: technically we should handle the case of a single DB being flushed
@ -424,16 +424,16 @@ void expireGenericCommand(client *c, long long basetime, int unit) {
*
* Instead we take the other branch of the IF statement setting an expire
* (possibly in the past) and wait for an explicit DEL from the master. */
if (when <= mstime() && !server.loading && !listLength(server.masters)) {
if (when <= mstime() && !g_pserver->loading && !listLength(g_pserver->masters)) {
robj *aux;
int deleted = server.lazyfree_lazy_expire ? dbAsyncDelete(c->db,key) :
int deleted = g_pserver->lazyfree_lazy_expire ? dbAsyncDelete(c->db,key) :
dbSyncDelete(c->db,key);
serverAssertWithInfo(c,key,deleted);
server.dirty++;
g_pserver->dirty++;
/* Replicate/AOF this as an explicit DEL or UNLINK. */
aux = server.lazyfree_lazy_expire ? shared.unlink : shared.del;
aux = g_pserver->lazyfree_lazy_expire ? shared.unlink : shared.del;
rewriteClientCommandVector(c,2,aux,key);
signalModifiedKey(c->db,key);
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",key,c->db->id);
@ -444,7 +444,7 @@ void expireGenericCommand(client *c, long long basetime, int unit) {
addReply(c,shared.cone);
signalModifiedKey(c->db,key);
notifyKeyspaceEvent(NOTIFY_GENERIC,"expire",key,c->db->id);
server.dirty++;
g_pserver->dirty++;
return;
}
}
@ -507,7 +507,7 @@ void persistCommand(client *c) {
if (lookupKeyWrite(c->db,c->argv[1])) {
if (removeExpire(c->db,c->argv[1])) {
addReply(c,shared.cone);
server.dirty++;
g_pserver->dirty++;
} else {
addReply(c,shared.czero);
}

View File

@ -661,11 +661,11 @@ void georadiusGeneric(client *c, int flags) {
decrRefCount(zobj);
notifyKeyspaceEvent(NOTIFY_ZSET,"georadiusstore",storekey,
c->db->id);
server.dirty += returned_items;
g_pserver->dirty += returned_items;
} else if (dbDelete(c->db,storekey)) {
signalModifiedKey(c->db,storekey);
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",storekey,c->db->id);
server.dirty++;
g_pserver->dirty++;
}
addReplyLongLong(c, returned_items);
}

View File

@ -176,7 +176,7 @@
* involved in updating the sparse representation is not justified by the
* memory savings. The exact maximum length of the sparse representation
* when this implementation switches to the dense representation is
* configured via the define server.hll_sparse_max_bytes.
* configured via the define g_pserver->hll_sparse_max_bytes.
*/
struct hllhdr {
@ -652,7 +652,7 @@ int hllSparseToDense(robj *o) {
* As a side effect the function may promote the HLL representation from
* sparse to dense: this happens when a register requires to be set to a value
* not representable with the sparse representation, or when the resulting
* size would be greater than server.hll_sparse_max_bytes. */
* size would be greater than g_pserver->hll_sparse_max_bytes. */
int hllSparseSet(robj *o, long index, uint8_t count) {
struct hllhdr *hdr;
uint8_t oldcount, *sparse, *end, *p, *prev, *next;
@ -837,7 +837,7 @@ int hllSparseSet(robj *o, long index, uint8_t count) {
deltalen = seqlen-oldlen;
if (deltalen > 0 &&
sdslen(szFromObj(o))+deltalen > server.hll_sparse_max_bytes) goto promote;
sdslen(szFromObj(o))+deltalen > g_pserver->hll_sparse_max_bytes) goto promote;
if (deltalen && next) memmove(next+deltalen,next,end-next);
sdsIncrLen(szFromObj(o),deltalen);
memcpy(p,seq,seqlen);
@ -1221,7 +1221,7 @@ void pfaddCommand(client *c) {
if (updated) {
signalModifiedKey(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_STRING,"pfadd",c->argv[1],c->db->id);
server.dirty++;
g_pserver->dirty++;
HLL_INVALIDATE_CACHE(hdr);
}
addReply(c, updated ? shared.cone : shared.czero);
@ -1311,7 +1311,7 @@ void pfcountCommand(client *c) {
* may be modified and given that the HLL is a Redis string
* we need to propagate the change. */
signalModifiedKey(c->db,c->argv[1]);
server.dirty++;
g_pserver->dirty++;
}
addReplyLongLong(c,card);
}
@ -1387,7 +1387,7 @@ void pfmergeCommand(client *c) {
/* We generate a PFADD event for PFMERGE for semantical simplicity
* since in theory this is a mass-add of elements. */
notifyKeyspaceEvent(NOTIFY_STRING,"pfadd",c->argv[1],c->db->id);
server.dirty++;
g_pserver->dirty++;
addReply(c,shared.ok);
}
@ -1457,7 +1457,7 @@ void pfselftestCommand(client *c) {
/* Make sure that for small cardinalities we use sparse
* encoding. */
if (j == checkpoint && j < server.hll_sparse_max_bytes/2) {
if (j == checkpoint && j < g_pserver->hll_sparse_max_bytes/2) {
hdr2 = (hllhdr*)ptrFromObj(o);
if (hdr2->encoding != HLL_SPARSE) {
addReplyError(c, "TESTFAILED sparse encoding not used");
@ -1528,7 +1528,7 @@ void pfdebugCommand(client *c) {
addReplySds(c,sdsnew(invalid_hll_err));
return;
}
server.dirty++; /* Force propagation on encoding change. */
g_pserver->dirty++; /* Force propagation on encoding change. */
}
hdr = (hllhdr*)ptrFromObj(o);
@ -1593,7 +1593,7 @@ void pfdebugCommand(client *c) {
return;
}
conv = 1;
server.dirty++; /* Force propagation on encoding change. */
g_pserver->dirty++; /* Force propagation on encoding change. */
}
addReply(c,conv ? shared.cone : shared.czero);
} else {

View File

@ -88,15 +88,15 @@ int THPGetAnonHugePagesSize(void) {
* of time series, each time serie is craeted on demand in order to avoid
* having a fixed list to maintain. */
void latencyMonitorInit(void) {
server.latency_events = dictCreate(&latencyTimeSeriesDictType,NULL);
g_pserver->latency_events = dictCreate(&latencyTimeSeriesDictType,NULL);
}
/* Add the specified sample to the specified time series "event".
* This function is usually called via latencyAddSampleIfNeeded(), that
* is a macro that only adds the sample if the latency is higher than
* server.latency_monitor_threshold. */
* g_pserver->latency_monitor_threshold. */
void latencyAddSample(const char *event, mstime_t latency) {
struct latencyTimeSeries *ts = (latencyTimeSeries*)dictFetchValue(server.latency_events,event);
struct latencyTimeSeries *ts = (latencyTimeSeries*)dictFetchValue(g_pserver->latency_events,event);
time_t now = time(NULL);
int prev;
@ -106,7 +106,7 @@ void latencyAddSample(const char *event, mstime_t latency) {
ts->idx = 0;
ts->max = 0;
memset(ts->samples,0,sizeof(ts->samples));
dictAdd(server.latency_events,zstrdup(event),ts);
dictAdd(g_pserver->latency_events,zstrdup(event),ts);
}
if (latency > ts->max) ts->max = latency;
@ -137,12 +137,12 @@ int latencyResetEvent(char *event_to_reset) {
dictEntry *de;
int resets = 0;
di = dictGetSafeIterator(server.latency_events);
di = dictGetSafeIterator(g_pserver->latency_events);
while((de = dictNext(di)) != NULL) {
char *event = (char*)dictGetKey(de);
if (event_to_reset == NULL || strcasecmp(event,event_to_reset) == 0) {
dictDelete(server.latency_events, event);
dictDelete(g_pserver->latency_events, event);
resets++;
}
}
@ -158,7 +158,7 @@ int latencyResetEvent(char *event_to_reset) {
* If the specified event has no elements the structure is populate with
* zero values. */
void analyzeLatencyForEvent(char *event, struct latencyStats *ls) {
struct latencyTimeSeries *ts = (latencyTimeSeries*)dictFetchValue(server.latency_events,event);
struct latencyTimeSeries *ts = (latencyTimeSeries*)dictFetchValue(g_pserver->latency_events,event);
int j;
uint64_t sum;
@ -236,8 +236,8 @@ sds createLatencyReport(void) {
/* Return ASAP if the latency engine is disabled and it looks like it
* was never enabled so far. */
if (dictSize(server.latency_events) == 0 &&
server.latency_monitor_threshold == 0)
if (dictSize(g_pserver->latency_events) == 0 &&
g_pserver->latency_monitor_threshold == 0)
{
report = sdscat(report,"I'm sorry, Dave, I can't do that. Latency monitoring is disabled in this Redis instance. You may use \"CONFIG SET latency-monitor-threshold <milliseconds>.\" in order to enable it. If we weren't in a deep space mission I'd suggest to take a look at http://redis.io/topics/latency-monitor.\n");
return report;
@ -249,7 +249,7 @@ sds createLatencyReport(void) {
dictEntry *de;
int eventnum = 0;
di = dictGetSafeIterator(server.latency_events);
di = dictGetSafeIterator(g_pserver->latency_events);
while((de = dictNext(di)) != NULL) {
char *event = (char*)dictGetKey(de);
struct latencyTimeSeries *ts = (latencyTimeSeries*)dictGetVal(de);
@ -274,31 +274,31 @@ sds createLatencyReport(void) {
/* Fork */
if (!strcasecmp(event,"fork")) {
const char *fork_quality;
if (server.stat_fork_rate < 10) {
if (g_pserver->stat_fork_rate < 10) {
fork_quality = "terrible";
advise_better_vm = 1;
advices++;
} else if (server.stat_fork_rate < 25) {
} else if (g_pserver->stat_fork_rate < 25) {
fork_quality = "poor";
advise_better_vm = 1;
advices++;
} else if (server.stat_fork_rate < 100) {
} else if (g_pserver->stat_fork_rate < 100) {
fork_quality = "good";
} else {
fork_quality = "excellent";
}
report = sdscatprintf(report,
" Fork rate is %.2f GB/sec (%s).", server.stat_fork_rate,
" Fork rate is %.2f GB/sec (%s).", g_pserver->stat_fork_rate,
fork_quality);
}
/* Potentially commands. */
if (!strcasecmp(event,"command")) {
if (server.slowlog_log_slower_than < 0) {
if (g_pserver->slowlog_log_slower_than < 0) {
advise_slowlog_enabled = 1;
advices++;
} else if (server.slowlog_log_slower_than/1000 >
server.latency_monitor_threshold)
} else if (g_pserver->slowlog_log_slower_than/1000 >
g_pserver->latency_monitor_threshold)
{
advise_slowlog_tuning = 1;
advices++;
@ -401,11 +401,11 @@ sds createLatencyReport(void) {
/* Slow log. */
if (advise_slowlog_enabled) {
report = sdscatprintf(report,"- There are latency issues with potentially slow commands you are using. Try to enable the Slow Log Redis feature using the command 'CONFIG SET slowlog-log-slower-than %llu'. If the Slow log is disabled Redis is not able to log slow commands execution for you.\n", (unsigned long long)server.latency_monitor_threshold*1000);
report = sdscatprintf(report,"- There are latency issues with potentially slow commands you are using. Try to enable the Slow Log Redis feature using the command 'CONFIG SET slowlog-log-slower-than %llu'. If the Slow log is disabled Redis is not able to log slow commands execution for you.\n", (unsigned long long)g_pserver->latency_monitor_threshold*1000);
}
if (advise_slowlog_tuning) {
report = sdscatprintf(report,"- Your current Slow Log configuration only logs events that are slower than your configured latency monitor threshold. Please use 'CONFIG SET slowlog-log-slower-than %llu'.\n", (unsigned long long)server.latency_monitor_threshold*1000);
report = sdscatprintf(report,"- Your current Slow Log configuration only logs events that are slower than your configured latency monitor threshold. Please use 'CONFIG SET slowlog-log-slower-than %llu'.\n", (unsigned long long)g_pserver->latency_monitor_threshold*1000);
}
if (advise_slowlog_inspect) {
@ -443,7 +443,7 @@ sds createLatencyReport(void) {
report = sdscat(report,"- Assuming from the point of view of data safety this is viable in your environment, you could try to enable the 'no-appendfsync-on-rewrite' option, so that fsync will not be performed while there is a child rewriting the AOF file or producing an RDB file (the moment where there is high disk contention).\n");
}
if (advise_relax_fsync_policy && server.aof_fsync == AOF_FSYNC_ALWAYS) {
if (advise_relax_fsync_policy && g_pserver->aof_fsync == AOF_FSYNC_ALWAYS) {
report = sdscat(report,"- Your fsync policy is set to 'always'. It is very hard to get good performances with such a setup, if possible try to relax the fsync policy to 'onesec'.\n");
}
@ -451,7 +451,7 @@ sds createLatencyReport(void) {
report = sdscat(report,"- Latency during the AOF atomic rename operation or when the final difference is flushed to the AOF file at the end of the rewrite, sometimes is caused by very high write load, causing the AOF buffer to get very large. If possible try to send less commands to accomplish the same work, or use Lua scripts to group multiple operations into a single EVALSHA call.\n");
}
if (advise_hz && server.hz < 100) {
if (advise_hz && g_pserver->hz < 100) {
report = sdscat(report,"- In order to make the Redis keys expiring process more incremental, try to set the 'hz' configuration parameter to 100 using 'CONFIG SET hz 100'.\n");
}
@ -497,8 +497,8 @@ void latencyCommandReplyWithLatestEvents(client *c) {
dictIterator *di;
dictEntry *de;
addReplyArrayLen(c,dictSize(server.latency_events));
di = dictGetIterator(server.latency_events);
addReplyArrayLen(c,dictSize(g_pserver->latency_events));
di = dictGetIterator(g_pserver->latency_events);
while((de = dictNext(di)) != NULL) {
char *event = (char*)dictGetKey(de);
struct latencyTimeSeries *ts = (latencyTimeSeries*)dictGetVal(de);
@ -581,7 +581,7 @@ NULL
if (!strcasecmp(szFromObj(c->argv[1]),"history") && c->argc == 3) {
/* LATENCY HISTORY <event> */
ts = (latencyTimeSeries*)dictFetchValue(server.latency_events,ptrFromObj(c->argv[2]));
ts = (latencyTimeSeries*)dictFetchValue(g_pserver->latency_events,ptrFromObj(c->argv[2]));
if (ts == NULL) {
addReplyArrayLen(c,0);
} else {
@ -593,7 +593,7 @@ NULL
dictEntry *de;
char *event;
de = dictFind(server.latency_events,ptrFromObj(c->argv[2]));
de = dictFind(g_pserver->latency_events,ptrFromObj(c->argv[2]));
if (de == NULL) goto nodataerr;
ts = (latencyTimeSeries*)dictGetVal(de);
event = (char*)dictGetKey(de);

View File

@ -72,7 +72,7 @@ int THPIsEnabled(void);
/* Latency monitoring macros. */
/* Start monitoring an event. We just set the current time. */
#define latencyStartMonitor(var) if (server.latency_monitor_threshold) { \
#define latencyStartMonitor(var) if (g_pserver->latency_monitor_threshold) { \
var = mstime(); \
} else { \
var = 0; \
@ -80,14 +80,14 @@ int THPIsEnabled(void);
/* End monitoring an event, compute the difference with the current time
* to check the amount of time elapsed. */
#define latencyEndMonitor(var) if (server.latency_monitor_threshold) { \
#define latencyEndMonitor(var) if (g_pserver->latency_monitor_threshold) { \
var = mstime() - var; \
}
/* Add the sample only if the elapsed time is >= to the configured threshold. */
#define latencyAddSampleIfNeeded(event,var) \
if (server.latency_monitor_threshold && \
(var) >= server.latency_monitor_threshold) \
if (g_pserver->latency_monitor_threshold && \
(var) >= g_pserver->latency_monitor_threshold) \
latencyAddSample((event),(var));
/* Remove time from a nested event. */

View File

@ -83,7 +83,7 @@ int dbAsyncDelete(redisDb *db, robj *key) {
* field to NULL in order to lazy free it later. */
if (de) {
dictFreeUnlinkedEntry(db->pdict,de);
if (server.cluster_enabled) slotToKeyDel(key);
if (g_pserver->cluster_enabled) slotToKeyDel(key);
return 1;
} else {
return 0;
@ -115,11 +115,11 @@ void emptyDbAsync(redisDb *db) {
/* Empty the slots-keys map of Redis CLuster by creating a new empty one
* and scheduiling the old for lazy freeing. */
void slotToKeyFlushAsync(void) {
rax *old = server.cluster->slots_to_keys;
rax *old = g_pserver->cluster->slots_to_keys;
server.cluster->slots_to_keys = raxNew();
memset(server.cluster->slots_keys_count,0,
sizeof(server.cluster->slots_keys_count));
g_pserver->cluster->slots_to_keys = raxNew();
memset(g_pserver->cluster->slots_keys_count,0,
sizeof(g_pserver->cluster->slots_keys_count));
atomicIncr(lazyfree_objects,old->numele);
bioCreateBackgroundJob(BIO_LAZY_FREE,NULL,NULL,old);
}

View File

@ -55,7 +55,7 @@ struct RedisModule {
typedef struct RedisModule RedisModule;
/* This represents a shared API. Shared APIs will be used to populate
* the server.sharedapi dictionary, mapping names of APIs exported by
* the g_pserver->sharedapi dictionary, mapping names of APIs exported by
* modules for other modules to use, to their structure specifying the
* function pointer that can be called. */
struct RedisModuleSharedAPI {
@ -427,8 +427,8 @@ int moduleCreateEmptyKey(RedisModuleKey *key, int type) {
switch(type) {
case REDISMODULE_KEYTYPE_LIST:
obj = createQuicklistObject();
quicklistSetOptions((quicklist*)obj->m_ptr, server.list_max_ziplist_size,
server.list_compress_depth);
quicklistSetOptions((quicklist*)obj->m_ptr, g_pserver->list_max_ziplist_size,
g_pserver->list_compress_depth);
break;
case REDISMODULE_KEYTYPE_ZSET:
obj = createZsetZiplistObject();
@ -492,7 +492,7 @@ int moduleDelKeyIfEmpty(RedisModuleKey *key) {
* This function is not meant to be used by modules developer, it is only
* used implicitly by including redismodule.h. */
int RM_GetApi(const char *funcname, void **targetPtrPtr) {
dictEntry *he = dictFind(server.moduleapi, funcname);
dictEntry *he = dictFind(g_pserver->moduleapi, funcname);
if (!he) return REDISMODULE_ERR;
*targetPtrPtr = dictGetVal(he);
return REDISMODULE_OK;
@ -700,7 +700,7 @@ int commandFlagsFromString(char *s) {
int RM_CreateCommand(RedisModuleCtx *ctx, const char *name, RedisModuleCmdFunc cmdfunc, const char *strflags, int firstkey, int lastkey, int keystep) {
int flags = strflags ? commandFlagsFromString((char*)strflags) : 0;
if (flags == -1) return REDISMODULE_ERR;
if ((flags & CMD_MODULE_NO_CLUSTER) && server.cluster_enabled)
if ((flags & CMD_MODULE_NO_CLUSTER) && g_pserver->cluster_enabled)
return REDISMODULE_ERR;
struct redisCommand *rediscmd;
@ -734,8 +734,8 @@ int RM_CreateCommand(RedisModuleCtx *ctx, const char *name, RedisModuleCmdFunc c
cp->rediscmd->keystep = keystep;
cp->rediscmd->microseconds = 0;
cp->rediscmd->calls = 0;
dictAdd(server.commands,sdsdup(cmdname),cp->rediscmd);
dictAdd(server.orig_commands,sdsdup(cmdname),cp->rediscmd);
dictAdd(g_pserver->commands,sdsdup(cmdname),cp->rediscmd);
dictAdd(g_pserver->orig_commands,sdsdup(cmdname),cp->rediscmd);
cp->rediscmd->id = ACLGetCommandID(cmdname); /* ID used for ACL. */
return REDISMODULE_OK;
}
@ -1355,7 +1355,7 @@ int RM_Replicate(RedisModuleCtx *ctx, const char *cmdname, const char *fmt, ...)
/* Release the argv. */
for (j = 0; j < argc; j++) decrRefCount(argv[j]);
zfree(argv);
server.dirty++;
g_pserver->dirty++;
return REDISMODULE_OK;
}
@ -1374,7 +1374,7 @@ int RM_ReplicateVerbatim(RedisModuleCtx *ctx) {
alsoPropagate(ctx->client->cmd,ctx->client->db->id,
ctx->client->argv,ctx->client->argc,
PROPAGATE_AOF|PROPAGATE_REPL);
server.dirty++;
g_pserver->dirty++;
return REDISMODULE_OK;
}
@ -1454,29 +1454,29 @@ int RM_GetContextFlags(RedisModuleCtx *ctx) {
flags |= REDISMODULE_CTX_FLAGS_REPLICATED;
}
if (server.cluster_enabled)
if (g_pserver->cluster_enabled)
flags |= REDISMODULE_CTX_FLAGS_CLUSTER;
/* Maxmemory and eviction policy */
if (server.maxmemory > 0) {
if (g_pserver->maxmemory > 0) {
flags |= REDISMODULE_CTX_FLAGS_MAXMEMORY;
if (server.maxmemory_policy != MAXMEMORY_NO_EVICTION)
if (g_pserver->maxmemory_policy != MAXMEMORY_NO_EVICTION)
flags |= REDISMODULE_CTX_FLAGS_EVICT;
}
/* Persistence flags */
if (server.aof_state != AOF_OFF)
if (g_pserver->aof_state != AOF_OFF)
flags |= REDISMODULE_CTX_FLAGS_AOF;
if (server.saveparamslen > 0)
if (g_pserver->saveparamslen > 0)
flags |= REDISMODULE_CTX_FLAGS_RDB;
/* Replication flags */
if (listLength(server.masters) == 0) {
if (listLength(g_pserver->masters) == 0) {
flags |= REDISMODULE_CTX_FLAGS_MASTER;
} else {
flags |= REDISMODULE_CTX_FLAGS_SLAVE;
if (server.repl_slave_ro)
if (g_pserver->repl_slave_ro)
flags |= REDISMODULE_CTX_FLAGS_READONLY;
}
@ -2792,12 +2792,12 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
/* If this is a Redis Cluster node, we need to make sure the module is not
* trying to access non-local keys, with the exception of commands
* received from our master. */
if (server.cluster_enabled && !(ctx->client->flags & CLIENT_MASTER)) {
if (g_pserver->cluster_enabled && !(ctx->client->flags & CLIENT_MASTER)) {
/* Duplicate relevant flags in the module client. */
c->flags &= ~(CLIENT_READONLY|CLIENT_ASKING);
c->flags |= ctx->client->flags & (CLIENT_READONLY|CLIENT_ASKING);
if (getNodeByQuery(c,c->cmd,c->argv,c->argc,NULL,NULL) !=
server.cluster->myself)
g_pserver->cluster->myself)
{
errno = EPERM;
goto cleanup;
@ -3663,7 +3663,7 @@ int RM_UnblockClient(RedisModuleBlockedClient *bc, void *privdata) {
pthread_mutex_lock(&moduleUnblockedClientsMutex);
bc->privdata = privdata;
listAddNodeTail(moduleUnblockedClients,bc);
if (write(server.module_blocked_pipe[1],"A",1) != 1) {
if (write(g_pserver->module_blocked_pipe[1],"A",1) != 1) {
/* Ignore the error, this is best-effort. */
}
pthread_mutex_unlock(&moduleUnblockedClientsMutex);
@ -3715,7 +3715,7 @@ void moduleHandleBlockedClients(void) {
/* Here we unblock all the pending clients blocked in modules operations
* so we can read every pending "awake byte" in the pipe. */
char buf[1];
while (read(server.module_blocked_pipe[0],buf,1) == 1);
while (read(g_pserver->module_blocked_pipe[0],buf,1) == 1);
while (listLength(moduleUnblockedClients)) {
ln = listFirst(moduleUnblockedClients);
bc = (RedisModuleBlockedClient*)ln->value;
@ -3788,9 +3788,9 @@ void moduleHandleBlockedClients(void) {
c->flags |= CLIENT_PENDING_WRITE;
AssertCorrectThread(c);
fastlock_lock(&server.rgthreadvar[c->iel].lockPendingWrite);
listAddNodeHead(server.rgthreadvar[c->iel].clients_pending_write,c);
fastlock_unlock(&server.rgthreadvar[c->iel].lockPendingWrite);
fastlock_lock(&g_pserver->rgthreadvar[c->iel].lockPendingWrite);
listAddNodeHead(g_pserver->rgthreadvar[c->iel].clients_pending_write,c);
fastlock_unlock(&g_pserver->rgthreadvar[c->iel].lockPendingWrite);
}
}
@ -4107,7 +4107,7 @@ void moduleCallClusterReceivers(const char *sender_id, uint64_t module_id, uint8
* is already a callback for this function, the callback is unregistered
* (so this API call is also used in order to delete the receiver). */
void RM_RegisterClusterMessageReceiver(RedisModuleCtx *ctx, uint8_t type, RedisModuleClusterMessageReceiver callback) {
if (!server.cluster_enabled) return;
if (!g_pserver->cluster_enabled) return;
uint64_t module_id = moduleTypeEncodeId(ctx->module->name,0);
moduleClusterReceiver *r = clusterReceivers[type], *prev = NULL;
@ -4151,7 +4151,7 @@ void RM_RegisterClusterMessageReceiver(RedisModuleCtx *ctx, uint8_t type, RedisM
* otherwise if the node is not connected or such node ID does not map to any
* known cluster node, REDISMODULE_ERR is returned. */
int RM_SendClusterMessage(RedisModuleCtx *ctx, char *target_id, uint8_t type, unsigned char *msg, uint32_t len) {
if (!server.cluster_enabled) return REDISMODULE_ERR;
if (!g_pserver->cluster_enabled) return REDISMODULE_ERR;
uint64_t module_id = moduleTypeEncodeId(ctx->module->name,0);
if (clusterSendModuleMessageToTarget(target_id,module_id,type,msg,len) == C_OK)
return REDISMODULE_OK;
@ -4184,10 +4184,10 @@ int RM_SendClusterMessage(RedisModuleCtx *ctx, char *target_id, uint8_t type, un
char **RM_GetClusterNodesList(RedisModuleCtx *ctx, size_t *numnodes) {
UNUSED(ctx);
if (!server.cluster_enabled) return NULL;
size_t count = dictSize(server.cluster->nodes);
if (!g_pserver->cluster_enabled) return NULL;
size_t count = dictSize(g_pserver->cluster->nodes);
char **ids = (char**)zmalloc((count+1)*REDISMODULE_NODE_ID_LEN, MALLOC_LOCAL);
dictIterator *di = dictGetIterator(server.cluster->nodes);
dictIterator *di = dictGetIterator(g_pserver->cluster->nodes);
dictEntry *de;
int j = 0;
while((de = dictNext(di)) != NULL) {
@ -4214,8 +4214,8 @@ void RM_FreeClusterNodesList(char **ids) {
/* Return this node ID (REDISMODULE_CLUSTER_ID_LEN bytes) or NULL if the cluster
* is disabled. */
const char *RM_GetMyClusterID(void) {
if (!server.cluster_enabled) return NULL;
return server.cluster->myself->name;
if (!g_pserver->cluster_enabled) return NULL;
return g_pserver->cluster->myself->name;
}
/* Return the number of nodes in the cluster, regardless of their state
@ -4223,8 +4223,8 @@ const char *RM_GetMyClusterID(void) {
* be smaller, but not greater than this number. If the instance is not in
* cluster mode, zero is returned. */
size_t RM_GetClusterSize(void) {
if (!server.cluster_enabled) return 0;
return dictSize(server.cluster->nodes);
if (!g_pserver->cluster_enabled) return 0;
return dictSize(g_pserver->cluster->nodes);
}
/* Populate the specified info for the node having as ID the specified 'id',
@ -4304,9 +4304,9 @@ int RM_GetClusterNodeInfo(RedisModuleCtx *ctx, const char *id, char *ip, char *m
void RM_SetClusterFlags(RedisModuleCtx *ctx, uint64_t flags) {
UNUSED(ctx);
if (flags & REDISMODULE_CLUSTER_FLAG_NO_FAILOVER)
server.cluster_module_flags |= CLUSTER_MODULE_FLAG_NO_FAILOVER;
g_pserver->cluster_module_flags |= CLUSTER_MODULE_FLAG_NO_FAILOVER;
if (flags & REDISMODULE_CLUSTER_FLAG_NO_REDIRECTION)
server.cluster_module_flags |= CLUSTER_MODULE_FLAG_NO_REDIRECTION;
g_pserver->cluster_module_flags |= CLUSTER_MODULE_FLAG_NO_REDIRECTION;
}
/* --------------------------------------------------------------------------
@ -4415,7 +4415,7 @@ RedisModuleTimerID RM_CreateTimer(RedisModuleCtx *ctx, mstime_t period, RedisMod
if (memcmp(ri.key,&key,sizeof(key)) == 0) {
/* This is the first key, we need to re-install the timer according
* to the just added event. */
aeDeleteTimeEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,aeTimer);
aeDeleteTimeEvent(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el,aeTimer);
aeTimer = -1;
}
raxStop(&ri);
@ -4424,7 +4424,7 @@ RedisModuleTimerID RM_CreateTimer(RedisModuleCtx *ctx, mstime_t period, RedisMod
/* If we have no main timer (the old one was invalidated, or this is the
* first module timer we have), install one. */
if (aeTimer == -1)
aeTimer = aeCreateTimeEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,period,moduleTimerHandler,NULL,NULL);
aeTimer = aeCreateTimeEvent(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el,period,moduleTimerHandler,NULL,NULL);
return key;
}
@ -4752,7 +4752,7 @@ int RM_ExportSharedAPI(RedisModuleCtx *ctx, const char *apiname, void *func) {
RedisModuleSharedAPI *sapi = (RedisModuleSharedAPI*)zmalloc(sizeof(*sapi), MALLOC_LOCAL);
sapi->module = ctx->module;
sapi->func = func;
if (dictAdd(server.sharedapi, (char*)apiname, sapi) != DICT_OK) {
if (dictAdd(g_pserver->sharedapi, (char*)apiname, sapi) != DICT_OK) {
zfree(sapi);
return REDISMODULE_ERR;
}
@ -4793,7 +4793,7 @@ int RM_ExportSharedAPI(RedisModuleCtx *ctx, const char *apiname, void *func) {
* }
*/
void *RM_GetSharedAPI(RedisModuleCtx *ctx, const char *apiname) {
dictEntry *de = dictFind(server.sharedapi, apiname);
dictEntry *de = dictFind(g_pserver->sharedapi, apiname);
if (de == NULL) return NULL;
RedisModuleSharedAPI *sapi = (RedisModuleSharedAPI*)dictGetVal(de);
if (listSearchKey(sapi->module->usedby,ctx->module) == NULL) {
@ -4811,13 +4811,13 @@ void *RM_GetSharedAPI(RedisModuleCtx *ctx, const char *apiname) {
* The number of unregistered APIs is returned. */
int moduleUnregisterSharedAPI(RedisModule *module) {
int count = 0;
dictIterator *di = dictGetSafeIterator(server.sharedapi);
dictIterator *di = dictGetSafeIterator(g_pserver->sharedapi);
dictEntry *de;
while ((de = dictNext(di)) != NULL) {
const char *apiname = (const char*)dictGetKey(de);
RedisModuleSharedAPI *sapi = (RedisModuleSharedAPI*)dictGetVal(de);
if (sapi->module == module) {
dictDelete(server.sharedapi,apiname);
dictDelete(g_pserver->sharedapi,apiname);
zfree(sapi);
count++;
}
@ -5056,7 +5056,7 @@ int RM_CommandFilterArgDelete(RedisModuleCommandFilterCtx *fctx, int pos)
* Modules API internals
* -------------------------------------------------------------------------- */
/* server.moduleapi dictionary type. Only uses plain C strings since
/* g_pserver->moduleapi dictionary type. Only uses plain C strings since
* this gets queries from modules. */
uint64_t dictCStringKeyHash(const void *key) {
@ -5078,7 +5078,7 @@ dictType moduleAPIDictType = {
};
extern "C" int moduleRegisterApi(const char *funcname, void *funcptr) {
return dictAdd(server.moduleapi, (char*)funcname, funcptr);
return dictAdd(g_pserver->moduleapi, (char*)funcname, funcptr);
}
#define REGISTER_API(name) \
@ -5089,7 +5089,7 @@ void moduleRegisterCoreAPI(void);
void moduleInitModulesSystem(void) {
moduleUnblockedClients = listCreate();
server.loadmodule_queue = listCreate();
g_pserver->loadmodule_queue = listCreate();
modules = dictCreate(&modulesDictType,NULL);
/* Set up the keyspace notification susbscriber list and static client */
@ -5102,7 +5102,7 @@ void moduleInitModulesSystem(void) {
moduleCommandFilters = listCreate();
moduleRegisterCoreAPI();
if (pipe(server.module_blocked_pipe) == -1) {
if (pipe(g_pserver->module_blocked_pipe) == -1) {
serverLog(LL_WARNING,
"Can't create the pipe for module blocking commands: %s",
strerror(errno));
@ -5110,8 +5110,8 @@ void moduleInitModulesSystem(void) {
}
/* Make the pipe non blocking. This is just a best effort aware mechanism
* and we do not want to block not in the read nor in the write half. */
anetNonBlock(NULL,server.module_blocked_pipe[0]);
anetNonBlock(NULL,server.module_blocked_pipe[1]);
anetNonBlock(NULL,g_pserver->module_blocked_pipe[0]);
anetNonBlock(NULL,g_pserver->module_blocked_pipe[1]);
/* Create the timers radix tree. */
Timers = raxNew();
@ -5122,7 +5122,7 @@ void moduleInitModulesSystem(void) {
pthread_rwlock_rdlock(&moduleGIL);
}
/* Load all the modules in the server.loadmodule_queue list, which is
/* Load all the modules in the g_pserver->loadmodule_queue list, which is
* populated by `loadmodule` directives in the configuration file.
* We can't load modules directly when processing the configuration file
* because the server must be fully initialized before loading modules.
@ -5135,7 +5135,7 @@ void moduleLoadFromQueue(void) {
listIter li;
listNode *ln;
listRewind(server.loadmodule_queue,&li);
listRewind(g_pserver->loadmodule_queue,&li);
while((ln = listNext(&li))) {
struct moduleLoadQueueEntry *loadmod = (moduleLoadQueueEntry*)ln->value;
if (moduleLoad(loadmod->path,(void **)loadmod->argv,loadmod->argc)
@ -5158,7 +5158,7 @@ void moduleFreeModuleStructure(struct RedisModule *module) {
void moduleUnregisterCommands(struct RedisModule *module) {
/* Unregister all the commands registered by this module. */
dictIterator *di = dictGetSafeIterator(server.commands);
dictIterator *di = dictGetSafeIterator(g_pserver->commands);
dictEntry *de;
while ((de = dictNext(di)) != NULL) {
struct redisCommand *cmd = (redisCommand*)dictGetVal(de);
@ -5167,8 +5167,8 @@ void moduleUnregisterCommands(struct RedisModule *module) {
(RedisModuleCommandProxy*)(unsigned long)cmd->getkeys_proc;
sds cmdname = (sds)cp->rediscmd->name;
if (cp->module == module) {
dictDelete(server.commands,cmdname);
dictDelete(server.orig_commands,cmdname);
dictDelete(g_pserver->commands,cmdname);
dictDelete(g_pserver->orig_commands,cmdname);
sdsfree(cmdname);
zfree(cp->rediscmd);
zfree(cp);
@ -5353,8 +5353,8 @@ size_t moduleCount(void) {
/* Register all the APIs we export. Keep this function at the end of the
* file so that's easy to seek it to add new entries. */
void moduleRegisterCoreAPI(void) {
server.moduleapi = dictCreate(&moduleAPIDictType,NULL);
server.sharedapi = dictCreate(&moduleAPIDictType,NULL);
g_pserver->moduleapi = dictCreate(&moduleAPIDictType,NULL);
g_pserver->sharedapi = dictCreate(&moduleAPIDictType,NULL);
REGISTER_API(Alloc);
REGISTER_API(Calloc);
REGISTER_API(Realloc);

View File

@ -122,7 +122,7 @@ void execCommand(client *c) {
int orig_argc;
struct redisCommand *orig_cmd;
int must_propagate = 0; /* Need to propagate MULTI/EXEC to AOF / slaves? */
int was_master = listLength(server.masters) == 0;
int was_master = listLength(g_pserver->masters) == 0;
if (!(c->flags & CLIENT_MULTI)) {
addReplyError(c,"EXEC without MULTI");
@ -147,7 +147,7 @@ void execCommand(client *c) {
* was initiated when the instance was a master or a writable replica and
* then the configuration changed (for example instance was turned into
* a replica). */
if (!server.loading && listLength(server.masters) && server.repl_slave_ro &&
if (!g_pserver->loading && listLength(g_pserver->masters) && g_pserver->repl_slave_ro &&
!(c->flags & CLIENT_MASTER) && c->mstate.cmd_flags & CMD_WRITE)
{
addReplyError(c,
@ -178,7 +178,7 @@ void execCommand(client *c) {
must_propagate = 1;
}
call(c,server.loading ? CMD_CALL_NONE : CMD_CALL_FULL);
call(c,g_pserver->loading ? CMD_CALL_NONE : CMD_CALL_FULL);
/* Commands may alter argc/argv, restore mstate. */
c->mstate.commands[j].argc = c->argc;
@ -193,14 +193,14 @@ void execCommand(client *c) {
/* Make sure the EXEC command will be propagated as well if MULTI
* was already propagated. */
if (must_propagate) {
int is_master = listLength(server.masters) == 0;
server.dirty++;
int is_master = listLength(g_pserver->masters) == 0;
g_pserver->dirty++;
/* If inside the MULTI/EXEC block this instance was suddenly
* switched from master to slave (using the SLAVEOF command), the
* initial MULTI was propagated into the replication backlog, but the
* rest was not. We need to make sure to at least terminate the
* backlog with the final EXEC. */
if (server.repl_backlog && was_master && !is_master) {
if (g_pserver->repl_backlog && was_master && !is_master) {
const char *execcmd = "*1\r\n$4\r\nEXEC\r\n";
feedReplicationBacklog(execcmd,strlen(execcmd));
}
@ -212,8 +212,8 @@ handle_monitor:
* MUTLI, EXEC, ... commands inside transaction ...
* Instead EXEC is flagged as CMD_SKIP_MONITOR in the command
* table, and we do it here with correct ordering. */
if (listLength(server.monitors) && !server.loading)
replicationFeedMonitors(c,server.monitors,c->db->id,c->argv,c->argc);
if (listLength(g_pserver->monitors) && !g_pserver->loading)
replicationFeedMonitors(c,g_pserver->monitors,c->db->id,c->argv,c->argc);
}
/* ===================== WATCH (CAS alike for MULTI/EXEC) ===================
@ -323,7 +323,7 @@ void touchWatchedKeysOnFlush(int dbid) {
serverAssert(GlobalLocksAcquired());
/* For every client, check all the waited keys */
listRewind(server.clients,&li1);
listRewind(g_pserver->clients,&li1);
while((ln = listNext(&li1))) {
client *c = (client*)listNodeValue(ln);
listRewind(c->watched_keys,&li2);

View File

@ -133,14 +133,14 @@ int listMatchObjects(void *a, void *b) {
/* This function links the client to the global linked list of clients.
* unlinkClient() does the opposite, among other things. */
void linkClient(client *c) {
listAddNodeTail(server.clients,c);
listAddNodeTail(g_pserver->clients,c);
/* Note that we remember the linked list node where the client is stored,
* this way removing the client in unlinkClient() will not require
* a linear scan, but just a constant time operation. */
c->client_list_node = listLast(server.clients);
if (c->fd != -1) atomicIncr(server.rgthreadvar[c->iel].cclients, 1);
c->client_list_node = listLast(g_pserver->clients);
if (c->fd != -1) atomicIncr(g_pserver->rgthreadvar[c->iel].cclients, 1);
uint64_t id = htonu64(c->id);
raxInsert(server.clients_index,(unsigned char*)&id,sizeof(id),c,NULL);
raxInsert(g_pserver->clients_index,(unsigned char*)&id,sizeof(id),c,NULL);
}
client *createClient(int fd, int iel) {
@ -156,7 +156,7 @@ client *createClient(int fd, int iel) {
anetEnableTcpNoDelay(NULL,fd);
if (cserver.tcpkeepalive)
anetKeepAlive(NULL,fd,cserver.tcpkeepalive);
if (aeCreateFileEvent(server.rgthreadvar[iel].el,fd,AE_READABLE|AE_READ_THREADSAFE,
if (aeCreateFileEvent(g_pserver->rgthreadvar[iel].el,fd,AE_READABLE|AE_READ_THREADSAFE,
readQueryFromClient, c) == AE_ERR)
{
close(fd);
@ -167,7 +167,7 @@ client *createClient(int fd, int iel) {
selectDb(c,0);
uint64_t client_id;
atomicGetIncr(server.next_client_id,client_id,1);
atomicGetIncr(g_pserver->next_client_id,client_id,1);
c->iel = iel;
fastlock_init(&c->lock);
c->id = client_id;
@ -190,7 +190,7 @@ client *createClient(int fd, int iel) {
c->sentlenAsync = 0;
c->flags = 0;
c->fPendingAsyncWrite = FALSE;
c->ctime = c->lastinteraction = server.unixtime;
c->ctime = c->lastinteraction = g_pserver->unixtime;
/* If the default user does not require authentication, the user is
* directly authenticated. */
c->authenticated = (c->puser->flags & USER_FLAG_NOPASS) != 0;
@ -260,8 +260,8 @@ void clientInstallWriteHandler(client *c) {
* a system call. We'll only really install the write handler if
* we'll not be able to write the whole reply at once. */
c->flags |= CLIENT_PENDING_WRITE;
std::unique_lock<fastlock> lockf(server.rgthreadvar[c->iel].lockPendingWrite);
listAddNodeHead(server.rgthreadvar[c->iel].clients_pending_write,c);
std::unique_lock<fastlock> lockf(g_pserver->rgthreadvar[c->iel].lockPendingWrite);
listAddNodeHead(g_pserver->rgthreadvar[c->iel].clients_pending_write,c);
}
}
@ -1056,14 +1056,14 @@ static void acceptCommonHandler(int fd, int flags, char *ip, int iel) {
* connection. Note that we create the client instead to check before
* for this condition, since now the socket is already set in non-blocking
* mode and we can send an error for free using the Kernel I/O */
if (listLength(server.clients) > server.maxclients) {
if (listLength(g_pserver->clients) > g_pserver->maxclients) {
const char *err = "-ERR max number of clients reached\r\n";
/* That's a best effort error message, don't check write errors */
if (write(c->fd,err,strlen(err)) == -1) {
/* Nothing to do, Just to avoid the warning... */
}
server.stat_rejected_conn++;
g_pserver->stat_rejected_conn++;
freeClient(c);
return;
}
@ -1072,8 +1072,8 @@ static void acceptCommonHandler(int fd, int flags, char *ip, int iel) {
* is no password set, nor a specific interface is bound, we don't accept
* requests from non loopback interfaces. Instead we try to explain the
* user what to do to fix it if needed. */
if (server.protected_mode &&
server.bindaddr_count == 0 &&
if (g_pserver->protected_mode &&
g_pserver->bindaddr_count == 0 &&
DefaultUser->flags & USER_FLAG_NOPASS &&
!(flags & CLIENT_UNIX_SOCKET) &&
ip != NULL)
@ -1094,7 +1094,7 @@ static void acceptCommonHandler(int fd, int flags, char *ip, int iel) {
"change permanent. "
"2) Alternatively you can just disable the protected mode by "
"editing the Redis configuration file, and setting the protected "
"mode option to 'no', and then restarting the server. "
"mode option to 'no', and then restarting the g_pserver-> "
"3) If you started the server manually just for testing, restart "
"it with the '--protected-mode no' option. "
"4) Setup a bind address or an authentication password. "
@ -1103,13 +1103,13 @@ static void acceptCommonHandler(int fd, int flags, char *ip, int iel) {
if (write(c->fd,err,strlen(err)) == -1) {
/* Nothing to do, Just to avoid the warning... */
}
server.stat_rejected_conn++;
g_pserver->stat_rejected_conn++;
freeClient(c);
return;
}
}
server.stat_numconnections++;
g_pserver->stat_numconnections++;
c->flags |= flags;
}
@ -1120,11 +1120,11 @@ void acceptTcpHandler(aeEventLoop *el, int fd, void *privdata, int mask) {
UNUSED(privdata);
while(max--) {
cfd = anetTcpAccept(server.neterr, fd, cip, sizeof(cip), &cport);
cfd = anetTcpAccept(g_pserver->neterr, fd, cip, sizeof(cip), &cport);
if (cfd == ANET_ERR) {
if (errno != EWOULDBLOCK)
serverLog(LL_WARNING,
"Accepting client connection: %s", server.neterr);
"Accepting client connection: %s", g_pserver->neterr);
return;
}
serverLog(LL_VERBOSE,"Accepted %s:%d", cip, cport);
@ -1144,15 +1144,15 @@ void acceptUnixHandler(aeEventLoop *el, int fd, void *privdata, int mask) {
UNUSED(privdata);
while(max--) {
cfd = anetUnixAccept(server.neterr, fd);
cfd = anetUnixAccept(g_pserver->neterr, fd);
if (cfd == ANET_ERR) {
if (errno != EWOULDBLOCK)
serverLog(LL_WARNING,
"Accepting client connection: %s", server.neterr);
"Accepting client connection: %s", g_pserver->neterr);
return;
}
int ielCur = ielFromEventLoop(el);
serverLog(LL_VERBOSE,"Accepted connection to %s", server.unixsocket);
serverLog(LL_VERBOSE,"Accepted connection to %s", g_pserver->unixsocket);
aeAcquireLock();
acceptCommonHandler(cfd,CLIENT_UNIX_SOCKET,NULL, ielCur);
@ -1175,7 +1175,7 @@ void disconnectSlavesExcept(unsigned char *uuid)
listIter li;
listNode *ln;
listRewind(server.slaves, &li);
listRewind(g_pserver->slaves, &li);
while ((ln = listNext(&li))) {
client *c = (client*)listNodeValue(ln);
if (uuid == nullptr || !FUuidEqual(c->uuid, uuid))
@ -1209,8 +1209,8 @@ void unlinkClient(client *c) {
/* Remove from the list of active clients. */
if (c->client_list_node) {
uint64_t id = htonu64(c->id);
raxRemove(server.clients_index,(unsigned char*)&id,sizeof(id),NULL);
listDelNode(server.clients,c->client_list_node);
raxRemove(g_pserver->clients_index,(unsigned char*)&id,sizeof(id),NULL);
listDelNode(g_pserver->clients,c->client_list_node);
c->client_list_node = NULL;
}
@ -1225,29 +1225,29 @@ void unlinkClient(client *c) {
}
/* Unregister async I/O handlers and close the socket. */
aeDeleteFileEvent(server.rgthreadvar[c->iel].el,c->fd,AE_READABLE);
aeDeleteFileEvent(server.rgthreadvar[c->iel].el,c->fd,AE_WRITABLE);
aeDeleteFileEvent(g_pserver->rgthreadvar[c->iel].el,c->fd,AE_READABLE);
aeDeleteFileEvent(g_pserver->rgthreadvar[c->iel].el,c->fd,AE_WRITABLE);
close(c->fd);
c->fd = -1;
atomicDecr(server.rgthreadvar[c->iel].cclients, 1);
atomicDecr(g_pserver->rgthreadvar[c->iel].cclients, 1);
}
/* Remove from the list of pending writes if needed. */
if (c->flags & CLIENT_PENDING_WRITE) {
std::unique_lock<fastlock> lockf(server.rgthreadvar[c->iel].lockPendingWrite);
ln = listSearchKey(server.rgthreadvar[c->iel].clients_pending_write,c);
std::unique_lock<fastlock> lockf(g_pserver->rgthreadvar[c->iel].lockPendingWrite);
ln = listSearchKey(g_pserver->rgthreadvar[c->iel].clients_pending_write,c);
serverAssert(ln != NULL);
listDelNode(server.rgthreadvar[c->iel].clients_pending_write,ln);
listDelNode(g_pserver->rgthreadvar[c->iel].clients_pending_write,ln);
c->flags &= ~CLIENT_PENDING_WRITE;
}
/* When client was just unblocked because of a blocking operation,
* remove it from the list of unblocked clients. */
if (c->flags & CLIENT_UNBLOCKED) {
ln = listSearchKey(server.rgthreadvar[c->iel].unblocked_clients,c);
ln = listSearchKey(g_pserver->rgthreadvar[c->iel].unblocked_clients,c);
serverAssert(ln != NULL);
listDelNode(server.rgthreadvar[c->iel].unblocked_clients,ln);
listDelNode(g_pserver->rgthreadvar[c->iel].unblocked_clients,ln);
c->flags &= ~CLIENT_UNBLOCKED;
}
@ -1256,11 +1256,11 @@ void unlinkClient(client *c) {
bool fFound = false;
for (int iel = 0; iel < cserver.cthreads; ++iel)
{
ln = listSearchKey(server.rgthreadvar[iel].clients_pending_asyncwrite,c);
ln = listSearchKey(g_pserver->rgthreadvar[iel].clients_pending_asyncwrite,c);
if (ln)
{
fFound = true;
listDelNode(server.rgthreadvar[iel].clients_pending_asyncwrite,ln);
listDelNode(g_pserver->rgthreadvar[iel].clients_pending_asyncwrite,ln);
}
}
serverAssert(fFound);
@ -1338,15 +1338,15 @@ void freeClient(client *c) {
if (c->repldbfd != -1) close(c->repldbfd);
if (c->replpreamble) sdsfree(c->replpreamble);
}
list *l = (c->flags & CLIENT_MONITOR) ? server.monitors : server.slaves;
list *l = (c->flags & CLIENT_MONITOR) ? g_pserver->monitors : g_pserver->slaves;
ln = listSearchKey(l,c);
serverAssert(ln != NULL);
listDelNode(l,ln);
/* We need to remember the time when we started to have zero
* attached slaves, as after some time we'll free the replication
* backlog. */
if (c->flags & CLIENT_SLAVE && listLength(server.slaves) == 0)
server.repl_no_slaves_since = server.unixtime;
if (c->flags & CLIENT_SLAVE && listLength(g_pserver->slaves) == 0)
g_pserver->repl_no_slaves_since = g_pserver->unixtime;
refreshGoodSlavesCount();
}
@ -1357,9 +1357,9 @@ void freeClient(client *c) {
/* If this client was scheduled for async freeing we need to remove it
* from the queue. */
if (c->flags & CLIENT_CLOSE_ASAP) {
ln = listSearchKey(server.clients_to_close,c);
ln = listSearchKey(g_pserver->clients_to_close,c);
serverAssert(ln != NULL);
listDelNode(server.clients_to_close,ln);
listDelNode(g_pserver->clients_to_close,ln);
}
/* Release other dynamically allocated client structure fields,
@ -1384,13 +1384,13 @@ void freeClientAsync(client *c) {
lock.arm(nullptr);
std::lock_guard<decltype(c->lock)> clientlock(c->lock);
c->flags |= CLIENT_CLOSE_ASAP;
listAddNodeTail(server.clients_to_close,c);
listAddNodeTail(g_pserver->clients_to_close,c);
}
void freeClientsInAsyncFreeQueue(int iel) {
listIter li;
listNode *ln;
listRewind(server.clients_to_close,&li);
listRewind(g_pserver->clients_to_close,&li);
while((ln = listNext(&li))) {
client *c = (client*)listNodeValue(ln);
@ -1399,8 +1399,8 @@ void freeClientsInAsyncFreeQueue(int iel) {
c->flags &= ~CLIENT_CLOSE_ASAP;
freeClient(c);
listDelNode(server.clients_to_close,ln);
listRewind(server.clients_to_close,&li);
listDelNode(g_pserver->clients_to_close,ln);
listRewind(g_pserver->clients_to_close,&li);
}
}
@ -1409,7 +1409,7 @@ void freeClientsInAsyncFreeQueue(int iel) {
* are not registered clients. */
client *lookupClientByID(uint64_t id) {
id = htonu64(id);
client *c = (client*)raxFind(server.clients_index,(unsigned char*)&id,sizeof(id));
client *c = (client*)raxFind(g_pserver->clients_index,(unsigned char*)&id,sizeof(id));
return (c == raxNotFound) ? NULL : c;
}
@ -1475,12 +1475,12 @@ int writeToClient(int fd, client *c, int handler_installed) {
* a slave (otherwise, on high-speed traffic, the replication
* buffer will grow indefinitely) */
if (totwritten > NET_MAX_WRITES_PER_EVENT &&
(server.maxmemory == 0 ||
zmalloc_used_memory() < server.maxmemory) &&
(g_pserver->maxmemory == 0 ||
zmalloc_used_memory() < g_pserver->maxmemory) &&
!(c->flags & CLIENT_SLAVE)) break;
}
__atomic_fetch_add(&server.stat_net_output_bytes, totwritten, __ATOMIC_RELAXED);
__atomic_fetch_add(&g_pserver->stat_net_output_bytes, totwritten, __ATOMIC_RELAXED);
if (nwritten == -1) {
if (errno == EAGAIN) {
nwritten = 0;
@ -1506,11 +1506,11 @@ int writeToClient(int fd, client *c, int handler_installed) {
* as an interaction, since we always send REPLCONF ACK commands
* that take some time to just fill the socket output buffer.
* We just rely on data / pings received for timeout detection. */
if (!(c->flags & CLIENT_MASTER)) c->lastinteraction = server.unixtime;
if (!(c->flags & CLIENT_MASTER)) c->lastinteraction = g_pserver->unixtime;
}
if (!clientHasPendingReplies(c)) {
c->sentlen = 0;
if (handler_installed) aeDeleteFileEvent(server.rgthreadvar[c->iel].el,c->fd,AE_WRITABLE);
if (handler_installed) aeDeleteFileEvent(g_pserver->rgthreadvar[c->iel].el,c->fd,AE_WRITABLE);
/* Close connection after entire reply has been sent. */
if (c->flags & CLIENT_CLOSE_AFTER_REPLY) {
@ -1578,8 +1578,8 @@ void ProcessPendingAsyncWrites()
* so that in the middle of receiving the query, and serving it
* to the client, we'll call beforeSleep() that will do the
* actual fsync of AOF to disk. AE_BARRIER ensures that. */
if (server.aof_state == AOF_ON &&
server.aof_fsync == AOF_FSYNC_ALWAYS)
if (g_pserver->aof_state == AOF_ON &&
g_pserver->aof_fsync == AOF_FSYNC_ALWAYS)
{
ae_flags |= AE_BARRIER;
}
@ -1589,7 +1589,7 @@ void ProcessPendingAsyncWrites()
continue;
asyncCloseClientOnOutputBufferLimitReached(c);
if (aeCreateRemoteFileEvent(server.rgthreadvar[c->iel].el, c->fd, ae_flags, sendReplyToClient, c, FALSE) == AE_ERR)
if (aeCreateRemoteFileEvent(g_pserver->rgthreadvar[c->iel].el, c->fd, ae_flags, sendReplyToClient, c, FALSE) == AE_ERR)
continue; // We can retry later in the cron
}
}
@ -1602,10 +1602,10 @@ int handleClientsWithPendingWrites(int iel) {
listIter li;
listNode *ln;
std::unique_lock<fastlock> lockf(server.rgthreadvar[iel].lockPendingWrite);
list *list = server.rgthreadvar[iel].clients_pending_write;
std::unique_lock<fastlock> lockf(g_pserver->rgthreadvar[iel].lockPendingWrite);
list *list = g_pserver->rgthreadvar[iel].clients_pending_write;
int processed = listLength(list);
serverAssert(iel == (serverTL - server.rgthreadvar));
serverAssert(iel == (serverTL - g_pserver->rgthreadvar));
listRewind(list,&li);
while((ln = listNext(&li))) {
@ -1635,13 +1635,13 @@ int handleClientsWithPendingWrites(int iel) {
* so that in the middle of receiving the query, and serving it
* to the client, we'll call beforeSleep() that will do the
* actual fsync of AOF to disk. AE_BARRIER ensures that. */
if (server.aof_state == AOF_ON &&
server.aof_fsync == AOF_FSYNC_ALWAYS)
if (g_pserver->aof_state == AOF_ON &&
g_pserver->aof_fsync == AOF_FSYNC_ALWAYS)
{
ae_flags |= AE_BARRIER;
}
if (aeCreateFileEvent(server.rgthreadvar[c->iel].el, c->fd, ae_flags, sendReplyToClient, c) == AE_ERR)
if (aeCreateFileEvent(g_pserver->rgthreadvar[c->iel].el, c->fd, ae_flags, sendReplyToClient, c) == AE_ERR)
freeClientAsync(c);
}
}
@ -1693,8 +1693,8 @@ void resetClient(client *c) {
void protectClient(client *c) {
c->flags |= CLIENT_PROTECTED;
AssertCorrectThread(c);
aeDeleteFileEvent(server.rgthreadvar[c->iel].el,c->fd,AE_READABLE);
aeDeleteFileEvent(server.rgthreadvar[c->iel].el,c->fd,AE_WRITABLE);
aeDeleteFileEvent(g_pserver->rgthreadvar[c->iel].el,c->fd,AE_READABLE);
aeDeleteFileEvent(g_pserver->rgthreadvar[c->iel].el,c->fd,AE_WRITABLE);
}
/* This will undo the client protection done by protectClient() */
@ -1702,7 +1702,7 @@ void unprotectClient(client *c) {
AssertCorrectThread(c);
if (c->flags & CLIENT_PROTECTED) {
c->flags &= ~CLIENT_PROTECTED;
aeCreateFileEvent(server.rgthreadvar[c->iel].el,c->fd,AE_READABLE|AE_READ_THREADSAFE,readQueryFromClient,c);
aeCreateFileEvent(g_pserver->rgthreadvar[c->iel].el,c->fd,AE_READABLE|AE_READ_THREADSAFE,readQueryFromClient,c);
if (clientHasPendingReplies(c)) clientInstallWriteHandler(c);
}
}
@ -1751,7 +1751,7 @@ int processInlineBuffer(client *c) {
* This is useful for a slave to ping back while loading a big
* RDB file. */
if (querylen == 0 && c->flags & CLIENT_SLAVE)
c->repl_ack_time = server.unixtime;
c->repl_ack_time = g_pserver->unixtime;
/* Move querybuffer position to the next query in the buffer. */
c->qb_pos += querylen+linefeed_chars;
@ -1888,7 +1888,7 @@ int processMultibulkBuffer(client *c) {
}
ok = string2ll(c->querybuf+c->qb_pos+1,newline-(c->querybuf+c->qb_pos+1),&ll);
if (!ok || ll < 0 || ll > server.proto_max_bulk_len) {
if (!ok || ll < 0 || ll > g_pserver->proto_max_bulk_len) {
addReplyError(c,"Protocol error: invalid bulk length");
setProtocolError("invalid bulk length",c);
return C_ERR;
@ -1971,7 +1971,7 @@ void processInputBuffer(client *c, int callFlags) {
* condition on the slave. We want just to accumulate the replication
* stream (instead of replying -BUSY like we do with other clients) and
* later resume the processing. */
if (server.lua_timedout && c->flags & CLIENT_MASTER) break;
if (g_pserver->lua_timedout && c->flags & CLIENT_MASTER) break;
/* CLIENT_CLOSE_AFTER_REPLY closes the connection once the reply is
* written to the client. Make sure to not let the reply grow after
@ -2049,10 +2049,10 @@ void processInputBufferAndReplicate(client *c) {
processInputBuffer(c, CMD_CALL_FULL);
size_t applied = c->reploff - prev_offset;
if (applied) {
if (!server.fActiveReplica)
if (!g_pserver->fActiveReplica)
{
aeAcquireLock();
replicationFeedSlavesFromMasterStream(server.slaves,
replicationFeedSlavesFromMasterStream(g_pserver->slaves,
c->pending_querybuf, applied);
aeReleaseLock();
}
@ -2124,9 +2124,9 @@ void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) {
}
sdsIncrLen(c->querybuf,nread);
c->lastinteraction = server.unixtime;
c->lastinteraction = g_pserver->unixtime;
if (c->flags & CLIENT_MASTER) c->read_reploff += nread;
server.stat_net_input_bytes += nread;
g_pserver->stat_net_input_bytes += nread;
if (sdslen(c->querybuf) > cserver.client_max_querybuf_len) {
sds ci = catClientInfoString(sdsempty(),c), bytes = sdsempty();
@ -2158,7 +2158,7 @@ void getClientsMaxBuffers(unsigned long *longest_output_list,
listIter li;
unsigned long lol = 0, bib = 0;
listRewind(server.clients,&li);
listRewind(g_pserver->clients,&li);
while ((ln = listNext(&li)) != NULL) {
c = (client*)listNodeValue(ln);
@ -2184,7 +2184,7 @@ void genClientPeerId(client *client, char *peerid,
size_t peerid_len) {
if (client->flags & CLIENT_UNIX_SOCKET) {
/* Unix socket client. */
snprintf(peerid,peerid_len,"%s:0",server.unixsocket);
snprintf(peerid,peerid_len,"%s:0",g_pserver->unixsocket);
} else {
/* TCP client. */
anetFormatPeer(client->fd,peerid,peerid_len);
@ -2231,7 +2231,7 @@ sds catClientInfoString(sds s, client *client) {
if (p == flags) *p++ = 'N';
*p++ = '\0';
emask = client->fd == -1 ? 0 : aeGetFileEvents(server.rgthreadvar[client->iel].el,client->fd);
emask = client->fd == -1 ? 0 : aeGetFileEvents(g_pserver->rgthreadvar[client->iel].el,client->fd);
p = events;
if (emask & AE_READABLE) *p++ = 'r';
if (emask & AE_WRITABLE) *p++ = 'w';
@ -2242,8 +2242,8 @@ sds catClientInfoString(sds s, client *client) {
getClientPeerId(client),
client->fd,
client->name ? (char*)ptrFromObj(client->name) : "",
(long long)(server.unixtime - client->ctime),
(long long)(server.unixtime - client->lastinteraction),
(long long)(g_pserver->unixtime - client->ctime),
(long long)(g_pserver->unixtime - client->lastinteraction),
flags,
client->db->id,
(int) dictSize(client->pubsub_channels),
@ -2262,9 +2262,9 @@ sds getAllClientsInfoString(int type) {
listNode *ln;
listIter li;
client *client;
sds o = sdsnewlen(SDS_NOINIT,200*listLength(server.clients));
sds o = sdsnewlen(SDS_NOINIT,200*listLength(g_pserver->clients));
sdsclear(o);
listRewind(server.clients,&li);
listRewind(g_pserver->clients,&li);
while ((ln = listNext(&li)) != NULL) {
client = reinterpret_cast<struct client*>(listNodeValue(ln));
if (type != -1 && getClientType(client) != type) continue;
@ -2426,7 +2426,7 @@ NULL
}
/* Iterate clients killing all the matching clients. */
listRewind(server.clients,&li);
listRewind(g_pserver->clients,&li);
while ((ln = listNext(&li)) != NULL) {
client = (struct client*)listNodeValue(ln);
if (addr && strcmp(getClientPeerId(client),addr) != 0) continue;
@ -2566,13 +2566,13 @@ void helloCommand(client *c) {
addReplyLongLong(c,c->id);
addReplyBulkCString(c,"mode");
if (server.sentinel_mode) addReplyBulkCString(c,"sentinel");
if (server.cluster_enabled) addReplyBulkCString(c,"cluster");
if (g_pserver->sentinel_mode) addReplyBulkCString(c,"sentinel");
if (g_pserver->cluster_enabled) addReplyBulkCString(c,"cluster");
else addReplyBulkCString(c,"standalone");
if (!server.sentinel_mode) {
if (!g_pserver->sentinel_mode) {
addReplyBulkCString(c,"role");
addReplyBulkCString(c,listLength(server.masters) ? "replica" : "master");
addReplyBulkCString(c,listLength(g_pserver->masters) ? "replica" : "master");
}
addReplyBulkCString(c,"modules");
@ -2750,10 +2750,10 @@ int checkClientOutputBufferLimits(client *c) {
* specified amount of seconds. */
if (soft) {
if (c->obuf_soft_limit_reached_time == 0) {
c->obuf_soft_limit_reached_time = server.unixtime;
c->obuf_soft_limit_reached_time = g_pserver->unixtime;
soft = 0; /* First time we see the soft limit reached */
} else {
time_t elapsed = server.unixtime - c->obuf_soft_limit_reached_time;
time_t elapsed = g_pserver->unixtime - c->obuf_soft_limit_reached_time;
if (elapsed <=
cserver.client_obuf_limits[clientType].soft_limit_seconds) {
@ -2797,7 +2797,7 @@ void flushSlavesOutputBuffers(void) {
listIter li;
listNode *ln;
listRewind(server.slaves,&li);
listRewind(g_pserver->slaves,&li);
while((ln = listNext(&li))) {
client *slave = (client*)listNodeValue(ln);
int events;
@ -2811,7 +2811,7 @@ void flushSlavesOutputBuffers(void) {
* of put_online_on_ack is to postpone the moment it is installed.
* This is what we want since slaves in this state should not receive
* writes before the first ACK. */
events = aeGetFileEvents(server.rgthreadvar[slave->iel].el,slave->fd);
events = aeGetFileEvents(g_pserver->rgthreadvar[slave->iel].el,slave->fd);
if (events & AE_WRITABLE &&
slave->replstate == SLAVE_STATE_ONLINE &&
clientHasPendingReplies(slave))
@ -2839,27 +2839,27 @@ void flushSlavesOutputBuffers(void) {
* than the time left for the previous pause, no change is made to the
* left duration. */
void pauseClients(mstime_t end) {
if (!server.clients_paused || end > server.clients_pause_end_time)
server.clients_pause_end_time = end;
server.clients_paused = 1;
if (!g_pserver->clients_paused || end > g_pserver->clients_pause_end_time)
g_pserver->clients_pause_end_time = end;
g_pserver->clients_paused = 1;
}
/* Return non-zero if clients are currently paused. As a side effect the
* function checks if the pause time was reached and clear it. */
int clientsArePaused(void) {
if (server.clients_paused &&
server.clients_pause_end_time < server.mstime)
if (g_pserver->clients_paused &&
g_pserver->clients_pause_end_time < g_pserver->mstime)
{
aeAcquireLock();
listNode *ln;
listIter li;
client *c;
server.clients_paused = 0;
g_pserver->clients_paused = 0;
/* Put all the clients in the unblocked clients queue in order to
* force the re-processing of the input buffer if any. */
listRewind(server.clients,&li);
listRewind(g_pserver->clients,&li);
while ((ln = listNext(&li)) != NULL) {
c = (client*)listNodeValue(ln);
@ -2870,7 +2870,7 @@ int clientsArePaused(void) {
}
aeReleaseLock();
}
return server.clients_paused;
return g_pserver->clients_paused;
}
/* This function is called by Redis in order to process a few events from
@ -2892,7 +2892,7 @@ int processEventsWhileBlocked(int iel) {
aeReleaseLock();
while (iterations--) {
int events = 0;
events += aeProcessEvents(server.rgthreadvar[iel].el, AE_FILE_EVENTS|AE_DONT_WAIT);
events += aeProcessEvents(g_pserver->rgthreadvar[iel].el, AE_FILE_EVENTS|AE_DONT_WAIT);
events += handleClientsWithPendingWrites(iel);
if (!events) break;
count += events;

View File

@ -109,12 +109,12 @@ void notifyKeyspaceEvent(int type, const char *event, robj *key, int dbid) {
moduleNotifyKeyspaceEvent(type, event, key, dbid);
/* If notifications for this class of events are off, return ASAP. */
if (!(server.notify_keyspace_events & type)) return;
if (!(g_pserver->notify_keyspace_events & type)) return;
eventobj = createStringObject(event,strlen(event));
/* __keyspace@<db>__:<key> <event> notifications. */
if (server.notify_keyspace_events & NOTIFY_KEYSPACE) {
if (g_pserver->notify_keyspace_events & NOTIFY_KEYSPACE) {
chan = sdsnewlen("__keyspace@",11);
len = ll2string(buf,sizeof(buf),dbid);
chan = sdscatlen(chan, buf, len);
@ -126,7 +126,7 @@ void notifyKeyspaceEvent(int type, const char *event, robj *key, int dbid) {
}
/* __keyevent@<db>__:<event> <key> notifications. */
if (server.notify_keyspace_events & NOTIFY_KEYEVENT) {
if (g_pserver->notify_keyspace_events & NOTIFY_KEYEVENT) {
chan = sdsnewlen("__keyevent@",11);
if (len == -1) len = ll2string(buf,sizeof(buf),dbid);
chan = sdscatlen(chan, buf, len);

View File

@ -48,7 +48,7 @@ robj *createObject(int type, void *ptr) {
/* Set the LRU to the current lruclock (minutes resolution), or
* alternatively the LFU counter. */
if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) {
if (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LFU) {
o->lru = (LFUGetTimeInMinutes()<<8) | LFU_INIT_VAL;
} else {
o->lru = LRU_CLOCK();
@ -94,7 +94,7 @@ robj *createEmbeddedStringObject(const char *ptr, size_t len) {
o->refcount = 1;
o->mvcc_tstamp = OBJ_MVCC_INVALID;
if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) {
if (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LFU) {
o->lru = (LFUGetTimeInMinutes()<<8) | LFU_INIT_VAL;
} else {
o->lru = LRU_CLOCK();
@ -138,8 +138,8 @@ robj *createStringObject(const char *ptr, size_t len) {
robj *createStringObjectFromLongLongWithOptions(long long value, int valueobj) {
robj *o;
if (server.maxmemory == 0 ||
!(server.maxmemory_policy & MAXMEMORY_FLAG_NO_SHARED_INTEGERS))
if (g_pserver->maxmemory == 0 ||
!(g_pserver->maxmemory_policy & MAXMEMORY_FLAG_NO_SHARED_INTEGERS))
{
/* If the maxmemory policy permits, we can still return shared integers
* even if valueobj is true. */
@ -463,8 +463,8 @@ robj *tryObjectEncoding(robj *o) {
* Note that we avoid using shared integers when maxmemory is used
* because every object needs to have a private LRU field for the LRU
* algorithm to work well. */
if ((server.maxmemory == 0 ||
!(server.maxmemory_policy & MAXMEMORY_FLAG_NO_SHARED_INTEGERS)) &&
if ((g_pserver->maxmemory == 0 ||
!(g_pserver->maxmemory_policy & MAXMEMORY_FLAG_NO_SHARED_INTEGERS)) &&
value >= 0 &&
value < OBJ_SHARED_INTEGERS)
{
@ -966,39 +966,39 @@ struct redisMemOverhead *getMemoryOverheadData(void) {
struct redisMemOverhead *mh = (redisMemOverhead*)zcalloc(sizeof(*mh), MALLOC_LOCAL);
mh->total_allocated = zmalloc_used;
mh->startup_allocated = server.initial_memory_usage;
mh->peak_allocated = server.stat_peak_memory;
mh->startup_allocated = g_pserver->initial_memory_usage;
mh->peak_allocated = g_pserver->stat_peak_memory;
mh->total_frag =
(float)server.cron_malloc_stats.process_rss / server.cron_malloc_stats.zmalloc_used;
(float)g_pserver->cron_malloc_stats.process_rss / g_pserver->cron_malloc_stats.zmalloc_used;
mh->total_frag_bytes =
server.cron_malloc_stats.process_rss - server.cron_malloc_stats.zmalloc_used;
g_pserver->cron_malloc_stats.process_rss - g_pserver->cron_malloc_stats.zmalloc_used;
mh->allocator_frag =
(float)server.cron_malloc_stats.allocator_active / server.cron_malloc_stats.allocator_allocated;
(float)g_pserver->cron_malloc_stats.allocator_active / g_pserver->cron_malloc_stats.allocator_allocated;
mh->allocator_frag_bytes =
server.cron_malloc_stats.allocator_active - server.cron_malloc_stats.allocator_allocated;
g_pserver->cron_malloc_stats.allocator_active - g_pserver->cron_malloc_stats.allocator_allocated;
mh->allocator_rss =
(float)server.cron_malloc_stats.allocator_resident / server.cron_malloc_stats.allocator_active;
(float)g_pserver->cron_malloc_stats.allocator_resident / g_pserver->cron_malloc_stats.allocator_active;
mh->allocator_rss_bytes =
server.cron_malloc_stats.allocator_resident - server.cron_malloc_stats.allocator_active;
g_pserver->cron_malloc_stats.allocator_resident - g_pserver->cron_malloc_stats.allocator_active;
mh->rss_extra =
(float)server.cron_malloc_stats.process_rss / server.cron_malloc_stats.allocator_resident;
(float)g_pserver->cron_malloc_stats.process_rss / g_pserver->cron_malloc_stats.allocator_resident;
mh->rss_extra_bytes =
server.cron_malloc_stats.process_rss - server.cron_malloc_stats.allocator_resident;
g_pserver->cron_malloc_stats.process_rss - g_pserver->cron_malloc_stats.allocator_resident;
mem_total += server.initial_memory_usage;
mem_total += g_pserver->initial_memory_usage;
mem = 0;
if (server.repl_backlog)
mem += zmalloc_size(server.repl_backlog);
if (g_pserver->repl_backlog)
mem += zmalloc_size(g_pserver->repl_backlog);
mh->repl_backlog = mem;
mem_total += mem;
mem = 0;
if (listLength(server.slaves)) {
if (listLength(g_pserver->slaves)) {
listIter li;
listNode *ln;
listRewind(server.slaves,&li);
listRewind(g_pserver->slaves,&li);
while((ln = listNext(&li))) {
client *c = (client*)listNodeValue(ln);
if (c->flags & CLIENT_CLOSE_ASAP)
@ -1012,11 +1012,11 @@ struct redisMemOverhead *getMemoryOverheadData(void) {
mem_total+=mem;
mem = 0;
if (listLength(server.clients)) {
if (listLength(g_pserver->clients)) {
listIter li;
listNode *ln;
listRewind(server.clients,&li);
listRewind(g_pserver->clients,&li);
while((ln = listNext(&li))) {
client *c = (client*)listNodeValue(ln);
if (c->flags & CLIENT_SLAVE && !(c->flags & CLIENT_MONITOR))
@ -1030,27 +1030,27 @@ struct redisMemOverhead *getMemoryOverheadData(void) {
mem_total+=mem;
mem = 0;
if (server.aof_state != AOF_OFF) {
mem += sdsalloc(server.aof_buf);
if (g_pserver->aof_state != AOF_OFF) {
mem += sdsalloc(g_pserver->aof_buf);
mem += aofRewriteBufferSize();
}
mh->aof_buffer = mem;
mem_total+=mem;
mem = server.lua_scripts_mem;
mem += dictSize(server.lua_scripts) * sizeof(dictEntry) +
dictSlots(server.lua_scripts) * sizeof(dictEntry*);
mem += dictSize(server.repl_scriptcache_dict) * sizeof(dictEntry) +
dictSlots(server.repl_scriptcache_dict) * sizeof(dictEntry*);
if (listLength(server.repl_scriptcache_fifo) > 0) {
mem += listLength(server.repl_scriptcache_fifo) * (sizeof(listNode) +
sdsZmallocSize((sds)listNodeValue(listFirst(server.repl_scriptcache_fifo))));
mem = g_pserver->lua_scripts_mem;
mem += dictSize(g_pserver->lua_scripts) * sizeof(dictEntry) +
dictSlots(g_pserver->lua_scripts) * sizeof(dictEntry*);
mem += dictSize(g_pserver->repl_scriptcache_dict) * sizeof(dictEntry) +
dictSlots(g_pserver->repl_scriptcache_dict) * sizeof(dictEntry*);
if (listLength(g_pserver->repl_scriptcache_fifo) > 0) {
mem += listLength(g_pserver->repl_scriptcache_fifo) * (sizeof(listNode) +
sdsZmallocSize((sds)listNodeValue(listFirst(g_pserver->repl_scriptcache_fifo))));
}
mh->lua_caches = mem;
mem_total+=mem;
for (j = 0; j < cserver.dbnum; j++) {
redisDb *db = server.db+j;
redisDb *db = g_pserver->db+j;
long long keyscount = dictSize(db->pdict);
if (keyscount==0) continue;
@ -1146,8 +1146,8 @@ sds getMemoryDoctorReport(void) {
}
/* Clients using more than 200k each average? */
long numslaves = listLength(server.slaves);
long numclients = listLength(server.clients)-numslaves;
long numslaves = listLength(g_pserver->slaves);
long numclients = listLength(g_pserver->clients)-numslaves;
if ((numclients > 0) && mh->clients_normal / numclients > (1024*200)) {
big_client_buf = 1;
num_reports++;
@ -1160,7 +1160,7 @@ sds getMemoryDoctorReport(void) {
}
/* Too many scripts are cached? */
if (dictSize(server.lua_scripts) > 1000) {
if (dictSize(g_pserver->lua_scripts) > 1000) {
many_scripts = 1;
num_reports++;
}
@ -1210,14 +1210,14 @@ sds getMemoryDoctorReport(void) {
return s;
}
/* Set the object LRU/LFU depending on server.maxmemory_policy.
/* Set the object LRU/LFU depending on g_pserver->maxmemory_policy.
* The lfu_freq arg is only relevant if policy is MAXMEMORY_FLAG_LFU.
* The lru_idle and lru_clock args are only relevant if policy
* is MAXMEMORY_FLAG_LRU.
* Either or both of them may be <0, in that case, nothing is set. */
void objectSetLRUOrLFU(robj *val, long long lfu_freq, long long lru_idle,
long long lru_clock) {
if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) {
if (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LFU) {
if (lfu_freq >= 0) {
serverAssert(lfu_freq <= 255);
val->lru = (LFUGetTimeInMinutes()<<8) | lfu_freq;
@ -1283,7 +1283,7 @@ NULL
} else if (!strcasecmp(szFromObj(c->argv[1]),"idletime") && c->argc == 3) {
if ((o = objectCommandLookupOrReply(c,c->argv[2],shared.null[c->resp]))
== NULL) return;
if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) {
if (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LFU) {
addReplyError(c,"An LFU maxmemory policy is selected, idle time not tracked. Please note that when switching between policies at runtime LRU and LFU data will take some time to adjust.");
return;
}
@ -1291,7 +1291,7 @@ NULL
} else if (!strcasecmp(szFromObj(c->argv[1]),"freq") && c->argc == 3) {
if ((o = objectCommandLookupOrReply(c,c->argv[2],shared.null[c->resp]))
== NULL) return;
if (!(server.maxmemory_policy & MAXMEMORY_FLAG_LFU)) {
if (!(g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LFU)) {
addReplyError(c,"An LFU maxmemory policy is not selected, access frequency not tracked. Please note that when switching between policies at runtime LRU and LFU data will take some time to adjust.");
return;
}
@ -1315,7 +1315,7 @@ void memoryCommand(client *c) {
"DOCTOR - Return memory problems reports.",
"MALLOC-STATS -- Return internal statistics report from the memory allocator.",
"PURGE -- Attempt to purge dirty pages for reclamation by the allocator.",
"STATS -- Return information about the memory usage of the server.",
"STATS -- Return information about the memory usage of the g_pserver->",
"USAGE <key> [SAMPLES <count>] -- Return memory in bytes used by <key> and its value. Nested values are sampled up to <count> times (default: 5).",
NULL
};
@ -1409,13 +1409,13 @@ NULL
addReplyDouble(c,mh->peak_perc);
addReplyBulkCString(c,"allocator.allocated");
addReplyLongLong(c,server.cron_malloc_stats.allocator_allocated);
addReplyLongLong(c,g_pserver->cron_malloc_stats.allocator_allocated);
addReplyBulkCString(c,"allocator.active");
addReplyLongLong(c,server.cron_malloc_stats.allocator_active);
addReplyLongLong(c,g_pserver->cron_malloc_stats.allocator_active);
addReplyBulkCString(c,"allocator.resident");
addReplyLongLong(c,server.cron_malloc_stats.allocator_resident);
addReplyLongLong(c,g_pserver->cron_malloc_stats.allocator_resident);
addReplyBulkCString(c,"allocator-fragmentation.ratio");
addReplyDouble(c,mh->allocator_frag);

View File

@ -152,10 +152,10 @@ int pubsubSubscribeChannel(client *c, robj *channel) {
retval = 1;
incrRefCount(channel);
/* Add the client to the channel -> list of clients hash table */
de = dictFind(server.pubsub_channels,channel);
de = dictFind(g_pserver->pubsub_channels,channel);
if (de == NULL) {
clients = listCreate();
dictAdd(server.pubsub_channels,channel,clients);
dictAdd(g_pserver->pubsub_channels,channel,clients);
incrRefCount(channel);
} else {
clients = (list*)dictGetVal(de);
@ -181,7 +181,7 @@ int pubsubUnsubscribeChannel(client *c, robj *channel, int notify) {
if (dictDelete(c->pubsub_channels,channel) == DICT_OK) {
retval = 1;
/* Remove the client from the channel -> clients list hash table */
de = dictFind(server.pubsub_channels,channel);
de = dictFind(g_pserver->pubsub_channels,channel);
serverAssertWithInfo(c,NULL,de != NULL);
clients = (list*)dictGetVal(de);
ln = listSearchKey(clients,c);
@ -191,7 +191,7 @@ int pubsubUnsubscribeChannel(client *c, robj *channel, int notify) {
/* Free the list and associated hash entry at all if this was
* the latest client, so that it will be possible to abuse
* Redis PUBSUB creating millions of channels. */
dictDelete(server.pubsub_channels,channel);
dictDelete(g_pserver->pubsub_channels,channel);
}
}
/* Notify the client */
@ -212,7 +212,7 @@ int pubsubSubscribePattern(client *c, robj *pattern) {
pat = (pubsubPattern*)zmalloc(sizeof(*pat), MALLOC_LOCAL);
pat->pattern = getDecodedObject(pattern);
pat->pclient = c;
listAddNodeTail(server.pubsub_patterns,pat);
listAddNodeTail(g_pserver->pubsub_patterns,pat);
}
/* Notify the client */
addReplyPubsubPatSubscribed(c,pattern);
@ -232,8 +232,8 @@ int pubsubUnsubscribePattern(client *c, robj *pattern, int notify) {
listDelNode(c->pubsub_patterns,ln);
pat.pclient = c;
pat.pattern = pattern;
ln = listSearchKey(server.pubsub_patterns,&pat);
listDelNode(server.pubsub_patterns,ln);
ln = listSearchKey(g_pserver->pubsub_patterns,&pat);
listDelNode(g_pserver->pubsub_patterns,ln);
}
/* Notify the client */
if (notify) addReplyPubsubPatUnsubscribed(c,pattern);
@ -284,7 +284,7 @@ int pubsubPublishMessage(robj *channel, robj *message) {
listIter li;
/* Send to clients listening for that channel */
de = dictFind(server.pubsub_channels,channel);
de = dictFind(g_pserver->pubsub_channels,channel);
if (de) {
list *list = reinterpret_cast<::list*>(dictGetVal(de));
listNode *ln;
@ -300,8 +300,8 @@ int pubsubPublishMessage(robj *channel, robj *message) {
}
}
/* Send to clients listening to matching channels */
if (listLength(server.pubsub_patterns)) {
listRewind(server.pubsub_patterns,&li);
if (listLength(g_pserver->pubsub_patterns)) {
listRewind(g_pserver->pubsub_patterns,&li);
channel = getDecodedObject(channel);
while ((ln = listNext(&li)) != NULL) {
pubsubPattern *pat = (pubsubPattern*)ln->value;
@ -371,7 +371,7 @@ void punsubscribeCommand(client *c) {
void publishCommand(client *c) {
int receivers = pubsubPublishMessage(c->argv[1],c->argv[2]);
if (server.cluster_enabled)
if (g_pserver->cluster_enabled)
clusterPropagatePublish(c->argv[1],c->argv[2]);
else
forceCommandPropagation(c,PROPAGATE_REPL);
@ -393,7 +393,7 @@ NULL
{
/* PUBSUB CHANNELS [<pattern>] */
sds pat = (c->argc == 2) ? NULL : szFromObj(c->argv[2]);
dictIterator *di = dictGetIterator(server.pubsub_channels);
dictIterator *di = dictGetIterator(g_pserver->pubsub_channels);
dictEntry *de;
long mblen = 0;
void *replylen;
@ -418,14 +418,14 @@ NULL
addReplyArrayLen(c,(c->argc-2)*2);
for (j = 2; j < c->argc; j++) {
list *l = (list*)dictFetchValue(server.pubsub_channels,c->argv[j]);
list *l = (list*)dictFetchValue(g_pserver->pubsub_channels,c->argv[j]);
addReplyBulk(c,c->argv[j]);
addReplyLongLong(c,l ? listLength(l) : 0);
}
} else if (!strcasecmp(szFromObj(c->argv[1]),"numpat") && c->argc == 2) {
/* PUBSUB NUMPAT */
addReplyLongLong(c,listLength(server.pubsub_patterns));
addReplyLongLong(c,listLength(g_pserver->pubsub_patterns));
} else {
addReplySubcommandSyntaxError(c);
}

View File

@ -62,7 +62,7 @@ void rdbCheckThenExit(int linenum, const char *reason, ...) {
if (!rdbCheckMode) {
serverLog(LL_WARNING, "%s", msg);
const char * argv[2] = {"",server.rdb_filename};
const char * argv[2] = {"",g_pserver->rdb_filename};
redis_check_rdb_main(2,argv,NULL);
} else {
rdbCheckError("%s",msg);
@ -418,7 +418,7 @@ ssize_t rdbSaveRawString(rio *rdb, const unsigned char *s, size_t len) {
/* Try LZF compression - under 20 bytes it's unable to compress even
* aaaaaaaaaaaaaaaaaa so skip it */
if (server.rdb_compression && len > 20) {
if (g_pserver->rdb_compression && len > 20) {
n = rdbSaveLzfStringObject(rdb,(const unsigned char*)s,len);
if (n == -1) return -1;
if (n > 0) return n;
@ -1032,8 +1032,8 @@ size_t rdbSavedObjectLen(robj *o) {
* On success if the key was actually saved 1 is returned, otherwise 0
* is returned (the key was already expired). */
int rdbSaveKeyValuePair(rio *rdb, robj *key, robj *val, long long expiretime) {
int savelru = server.maxmemory_policy & MAXMEMORY_FLAG_LRU;
int savelfu = server.maxmemory_policy & MAXMEMORY_FLAG_LFU;
int savelru = g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LRU;
int savelfu = g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LFU;
/* Save the expire time */
if (expiretime != -1) {
@ -1087,9 +1087,9 @@ int rdbSaveInfoAuxFields(rio *rdb, int flags, rdbSaveInfo *rsi) {
if (rsi) {
if (rdbSaveAuxFieldStrInt(rdb,"repl-stream-db",rsi->repl_stream_db)
== -1) return -1;
if (rdbSaveAuxFieldStrStr(rdb,"repl-id",server.replid)
if (rdbSaveAuxFieldStrStr(rdb,"repl-id",g_pserver->replid)
== -1) return -1;
if (rdbSaveAuxFieldStrInt(rdb,"repl-offset",server.master_repl_offset)
if (rdbSaveAuxFieldStrInt(rdb,"repl-offset",g_pserver->master_repl_offset)
== -1) return -1;
}
if (rdbSaveAuxFieldStrInt(rdb,"aof-preamble",aof_preamble) == -1) return -1;
@ -1112,14 +1112,14 @@ int rdbSaveRio(rio *rdb, int *error, int flags, rdbSaveInfo *rsi) {
uint64_t cksum;
size_t processed = 0;
if (server.rdb_checksum)
if (g_pserver->rdb_checksum)
rdb->update_cksum = rioGenericUpdateChecksum;
snprintf(magic,sizeof(magic),"REDIS%04d",RDB_VERSION);
if (rdbWriteRaw(rdb,magic,9) == -1) goto werr;
if (rdbSaveInfoAuxFields(rdb,flags,rsi) == -1) goto werr;
for (j = 0; j < cserver.dbnum; j++) {
redisDb *db = server.db+j;
redisDb *db = g_pserver->db+j;
dict *d = db->pdict;
if (dictSize(d) == 0) continue;
di = dictGetSafeIterator(d);
@ -1167,8 +1167,8 @@ int rdbSaveRio(rio *rdb, int *error, int flags, rdbSaveInfo *rsi) {
* the script cache as well: on successful PSYNC after a restart, we need
* to be able to process any EVALSHA inside the replication backlog the
* master will send us. */
if (rsi && dictSize(server.lua_scripts)) {
di = dictGetIterator(server.lua_scripts);
if (rsi && dictSize(g_pserver->lua_scripts)) {
di = dictGetIterator(g_pserver->lua_scripts);
while((de = dictNext(di)) != NULL) {
robj *body = (robj*)dictGetVal(de);
if (rdbSaveAuxField(rdb,"lua",3,szFromObj(body),sdslen(szFromObj(body))) == -1)
@ -1227,7 +1227,7 @@ int rdbSaveFd(int fd, rdbSaveInfo *rsi)
rioInitWithFile(&rdb,fd);
if (server.rdb_save_incremental_fsync)
if (g_pserver->rdb_save_incremental_fsync)
rioSetAutoSync(&rdb,REDIS_AUTOSYNC_BYTES);
if (rdbSaveRio(&rdb,&error,RDB_SAVE_NONE,rsi) == C_ERR) {
@ -1240,11 +1240,11 @@ int rdbSaveFd(int fd, rdbSaveInfo *rsi)
int rdbSave(rdbSaveInfo *rsi)
{
int err = C_OK;
if (server.rdb_filename != NULL)
err = rdbSaveFile(server.rdb_filename, rsi);
if (g_pserver->rdb_filename != NULL)
err = rdbSaveFile(g_pserver->rdb_filename, rsi);
if (err == C_OK && server.rdb_s3bucketpath != NULL)
err = rdbSaveS3(server.rdb_s3bucketpath, rsi);
if (err == C_OK && g_pserver->rdb_s3bucketpath != NULL)
err = rdbSaveS3(g_pserver->rdb_s3bucketpath, rsi);
return err;
}
@ -1292,9 +1292,9 @@ int rdbSaveFile(char *filename, rdbSaveInfo *rsi) {
}
serverLog(LL_NOTICE,"DB saved on disk");
server.dirty = 0;
server.lastsave = time(NULL);
server.lastbgsave_status = C_OK;
g_pserver->dirty = 0;
g_pserver->lastsave = time(NULL);
g_pserver->lastbgsave_status = C_OK;
return C_OK;
werr:
@ -1308,10 +1308,10 @@ int rdbSaveBackground(rdbSaveInfo *rsi) {
pid_t childpid;
long long start;
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) return C_ERR;
if (g_pserver->aof_child_pid != -1 || g_pserver->rdb_child_pid != -1) return C_ERR;
server.dirty_before_bgsave = server.dirty;
server.lastbgsave_try = time(NULL);
g_pserver->dirty_before_bgsave = g_pserver->dirty;
g_pserver->lastbgsave_try = time(NULL);
openChildInfoPipe();
start = ustime();
@ -1331,26 +1331,26 @@ int rdbSaveBackground(rdbSaveInfo *rsi) {
private_dirty/(1024*1024));
}
server.child_info_data.cow_size = private_dirty;
g_pserver->child_info_data.cow_size = private_dirty;
sendChildInfo(CHILD_INFO_TYPE_RDB);
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* Parent */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_time / (1024*1024*1024); /* GB per second. */
latencyAddSampleIfNeeded("fork",server.stat_fork_time/1000);
g_pserver->stat_fork_time = ustime()-start;
g_pserver->stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / g_pserver->stat_fork_time / (1024*1024*1024); /* GB per second. */
latencyAddSampleIfNeeded("fork",g_pserver->stat_fork_time/1000);
if (childpid == -1) {
closeChildInfoPipe();
server.lastbgsave_status = C_ERR;
g_pserver->lastbgsave_status = C_ERR;
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
return C_ERR;
}
serverLog(LL_NOTICE,"Background saving started by pid %d",childpid);
server.rdb_save_time_start = time(NULL);
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
g_pserver->rdb_save_time_start = time(NULL);
g_pserver->rdb_child_pid = childpid;
g_pserver->rdb_child_type = RDB_CHILD_TYPE_DISK;
updateDictResizePolicy();
return C_OK;
}
@ -1419,8 +1419,8 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key, uint64_t mvcc_tstamp) {
if ((len = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL;
o = createQuicklistObject();
quicklistSetOptions((quicklist*)ptrFromObj(o), server.list_max_ziplist_size,
server.list_compress_depth);
quicklistSetOptions((quicklist*)ptrFromObj(o), g_pserver->list_max_ziplist_size,
g_pserver->list_compress_depth);
/* Load every single element of the list */
while(len--) {
@ -1436,7 +1436,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key, uint64_t mvcc_tstamp) {
if ((len = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL;
/* Use a regular set when there are too many entries. */
if (len > server.set_max_intset_entries) {
if (len > g_pserver->set_max_intset_entries) {
o = createSetObject();
/* It's faster to expand the dict to the right size asap in order
* to avoid rehashing */
@ -1508,8 +1508,8 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key, uint64_t mvcc_tstamp) {
}
/* Convert *after* loading, since sorted sets are not stored ordered. */
if (zsetLength(o) <= server.zset_max_ziplist_entries &&
maxelelen <= server.zset_max_ziplist_value)
if (zsetLength(o) <= g_pserver->zset_max_ziplist_entries &&
maxelelen <= g_pserver->zset_max_ziplist_value)
zsetConvert(o,OBJ_ENCODING_ZIPLIST);
} else if (rdbtype == RDB_TYPE_HASH) {
uint64_t len;
@ -1522,7 +1522,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key, uint64_t mvcc_tstamp) {
o = createHashObject();
/* Too many entries? Use a hash table. */
if (len > server.hash_max_ziplist_entries)
if (len > g_pserver->hash_max_ziplist_entries)
hashTypeConvert(o, OBJ_ENCODING_HT);
/* Load every field and value into the ziplist */
@ -1541,8 +1541,8 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key, uint64_t mvcc_tstamp) {
sdslen(value), ZIPLIST_TAIL);
/* Convert to hash table if size threshold is exceeded */
if (sdslen(field) > server.hash_max_ziplist_value ||
sdslen(value) > server.hash_max_ziplist_value)
if (sdslen(field) > g_pserver->hash_max_ziplist_value ||
sdslen(value) > g_pserver->hash_max_ziplist_value)
{
sdsfree(field);
sdsfree(value);
@ -1577,8 +1577,8 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key, uint64_t mvcc_tstamp) {
} else if (rdbtype == RDB_TYPE_LIST_QUICKLIST) {
if ((len = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL;
o = createQuicklistObject();
quicklistSetOptions((quicklist*)ptrFromObj(o), server.list_max_ziplist_size,
server.list_compress_depth);
quicklistSetOptions((quicklist*)ptrFromObj(o), g_pserver->list_max_ziplist_size,
g_pserver->list_compress_depth);
while (len--) {
unsigned char *zl = (unsigned char*)
@ -1626,8 +1626,8 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key, uint64_t mvcc_tstamp) {
o->type = OBJ_HASH;
o->encoding = OBJ_ENCODING_ZIPLIST;
if (hashTypeLength(o) > server.hash_max_ziplist_entries ||
maxlen > server.hash_max_ziplist_value)
if (hashTypeLength(o) > g_pserver->hash_max_ziplist_entries ||
maxlen > g_pserver->hash_max_ziplist_value)
{
hashTypeConvert(o, OBJ_ENCODING_HT);
}
@ -1641,19 +1641,19 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key, uint64_t mvcc_tstamp) {
case RDB_TYPE_SET_INTSET:
o->type = OBJ_SET;
o->encoding = OBJ_ENCODING_INTSET;
if (intsetLen((intset*)ptrFromObj(o)) > server.set_max_intset_entries)
if (intsetLen((intset*)ptrFromObj(o)) > g_pserver->set_max_intset_entries)
setTypeConvert(o,OBJ_ENCODING_HT);
break;
case RDB_TYPE_ZSET_ZIPLIST:
o->type = OBJ_ZSET;
o->encoding = OBJ_ENCODING_ZIPLIST;
if (zsetLength(o) > server.zset_max_ziplist_entries)
if (zsetLength(o) > g_pserver->zset_max_ziplist_entries)
zsetConvert(o,OBJ_ENCODING_SKIPLIST);
break;
case RDB_TYPE_HASH_ZIPLIST:
o->type = OBJ_HASH;
o->encoding = OBJ_ENCODING_ZIPLIST;
if (hashTypeLength(o) > server.hash_max_ziplist_entries)
if (hashTypeLength(o) > g_pserver->hash_max_ziplist_entries)
hashTypeConvert(o, OBJ_ENCODING_HT);
break;
default:
@ -1831,35 +1831,35 @@ void startLoading(FILE *fp) {
struct stat sb;
/* Load the DB */
server.loading = 1;
server.loading_start_time = time(NULL);
server.loading_loaded_bytes = 0;
g_pserver->loading = 1;
g_pserver->loading_start_time = time(NULL);
g_pserver->loading_loaded_bytes = 0;
if (fstat(fileno(fp), &sb) == -1) {
server.loading_total_bytes = 0;
g_pserver->loading_total_bytes = 0;
} else {
server.loading_total_bytes = sb.st_size;
g_pserver->loading_total_bytes = sb.st_size;
}
}
/* Refresh the loading progress info */
void loadingProgress(off_t pos) {
server.loading_loaded_bytes = pos;
if (server.stat_peak_memory < zmalloc_used_memory())
server.stat_peak_memory = zmalloc_used_memory();
g_pserver->loading_loaded_bytes = pos;
if (g_pserver->stat_peak_memory < zmalloc_used_memory())
g_pserver->stat_peak_memory = zmalloc_used_memory();
}
/* Loading finished */
void stopLoading(void) {
server.loading = 0;
g_pserver->loading = 0;
}
/* Track loading progress in order to serve client's from time to time
and if needed calculate rdb checksum */
void rdbLoadProgressCallback(rio *r, const void *buf, size_t len) {
if (server.rdb_checksum)
if (g_pserver->rdb_checksum)
rioGenericUpdateChecksum(r, buf, len);
if (server.loading_process_events_interval_bytes &&
(r->processed_bytes + len)/server.loading_process_events_interval_bytes > r->processed_bytes/server.loading_process_events_interval_bytes)
if (g_pserver->loading_process_events_interval_bytes &&
(r->processed_bytes + len)/g_pserver->loading_process_events_interval_bytes > r->processed_bytes/g_pserver->loading_process_events_interval_bytes)
{
/* The DB can take some non trivial amount of time to load. Update
* our cached time since it is used to create and update the last
@ -1867,7 +1867,7 @@ void rdbLoadProgressCallback(rio *r, const void *buf, size_t len) {
updateCachedTime();
listIter li;
listNode *ln;
listRewind(server.masters, &li);
listRewind(g_pserver->masters, &li);
while ((ln = listNext(&li)))
{
struct redisMaster *mi = (struct redisMaster*)listNodeValue(ln);
@ -1875,7 +1875,7 @@ void rdbLoadProgressCallback(rio *r, const void *buf, size_t len) {
replicationSendNewlineToMaster(mi);
}
loadingProgress(r->processed_bytes);
processEventsWhileBlocked(serverTL - server.rgthreadvar);
processEventsWhileBlocked(serverTL - g_pserver->rgthreadvar);
}
}
@ -1884,7 +1884,7 @@ void rdbLoadProgressCallback(rio *r, const void *buf, size_t len) {
int rdbLoadRio(rio *rdb, rdbSaveInfo *rsi, int loading_aof) {
uint64_t dbid;
int type, rdbver;
redisDb *db = server.db+0;
redisDb *db = g_pserver->db+0;
char buf[1024];
/* Key-specific attributes, set by opcodes before the key type. */
long long lru_idle = -1, lfu_freq = -1, expiretime = -1, now = mstime();
@ -1892,7 +1892,7 @@ int rdbLoadRio(rio *rdb, rdbSaveInfo *rsi, int loading_aof) {
uint64_t mvcc_tstamp = OBJ_MVCC_INVALID;
rdb->update_cksum = rdbLoadProgressCallback;
rdb->max_processing_chunk = server.loading_process_events_interval_bytes;
rdb->max_processing_chunk = g_pserver->loading_process_events_interval_bytes;
if (rioRead(rdb,buf,9) == 0) goto eoferr;
buf[9] = '\0';
if (memcmp(buf,"REDIS",5) != 0) {
@ -1954,7 +1954,7 @@ int rdbLoadRio(rio *rdb, rdbSaveInfo *rsi, int loading_aof) {
"databases. Exiting\n", cserver.dbnum);
exit(1);
}
db = server.db+dbid;
db = g_pserver->db+dbid;
continue; /* Read next opcode. */
} else if (type == RDB_OPCODE_RESIZEDB) {
/* RESIZEDB: Hint about the size of the keys in the currently
@ -1995,7 +1995,7 @@ int rdbLoadRio(rio *rdb, rdbSaveInfo *rsi, int loading_aof) {
if (rsi) rsi->repl_offset = strtoll(szFromObj(auxval),NULL,10);
} else if (!strcasecmp(szFromObj(auxkey),"lua")) {
/* Load the script back in memory. */
if (luaCreateFunction(NULL,server.lua,auxval) == NULL) {
if (luaCreateFunction(NULL,g_pserver->lua,auxval) == NULL) {
rdbExitReportCorruptRDB(
"Can't load Lua script from RDB file! "
"BODY: %s", ptrFromObj(auxval));
@ -2069,7 +2069,7 @@ int rdbLoadRio(rio *rdb, rdbSaveInfo *rsi, int loading_aof) {
* received from the master. In the latter case, the master is
* responsible for key expiry. If we would expire keys here, the
* snapshot taken by the master may not be reflected on the slave. */
if (listLength(server.masters) == 0 && !loading_aof && expiretime != -1 && expiretime < now) {
if (listLength(g_pserver->masters) == 0 && !loading_aof && expiretime != -1 && expiretime < now) {
decrRefCount(key);
decrRefCount(val);
} else {
@ -2106,7 +2106,7 @@ int rdbLoadRio(rio *rdb, rdbSaveInfo *rsi, int loading_aof) {
uint64_t cksum, expected = rdb->cksum;
if (rioRead(rdb,&cksum,8) == 0) goto eoferr;
if (server.rdb_checksum) {
if (g_pserver->rdb_checksum) {
memrev64ifbe(&cksum);
if (cksum == 0) {
serverLog(LL_WARNING,"RDB file was saved with checksum disabled: no check performed.");
@ -2128,11 +2128,11 @@ int rdbLoadFile(char *filename, rdbSaveInfo *rsi);
int rdbLoad(rdbSaveInfo *rsi)
{
int err = C_ERR;
if (server.rdb_filename != NULL)
err = rdbLoadFile(server.rdb_filename, rsi);
if (g_pserver->rdb_filename != NULL)
err = rdbLoadFile(g_pserver->rdb_filename, rsi);
if ((err == C_ERR) && server.rdb_s3bucketpath != NULL)
err = rdbLoadS3(server.rdb_s3bucketpath, rsi);
if ((err == C_ERR) && g_pserver->rdb_s3bucketpath != NULL)
err = rdbLoadS3(g_pserver->rdb_s3bucketpath, rsi);
return err;
}
@ -2164,30 +2164,30 @@ void backgroundSaveDoneHandlerDisk(int exitcode, int bysignal) {
if (!bysignal && exitcode == 0) {
serverLog(LL_NOTICE,
"Background saving terminated with success");
server.dirty = server.dirty - server.dirty_before_bgsave;
server.lastsave = time(NULL);
server.lastbgsave_status = C_OK;
g_pserver->dirty = g_pserver->dirty - g_pserver->dirty_before_bgsave;
g_pserver->lastsave = time(NULL);
g_pserver->lastbgsave_status = C_OK;
} else if (!bysignal && exitcode != 0) {
serverLog(LL_WARNING, "Background saving error");
server.lastbgsave_status = C_ERR;
g_pserver->lastbgsave_status = C_ERR;
} else {
mstime_t latency;
serverLog(LL_WARNING,
"Background saving terminated by signal %d", bysignal);
latencyStartMonitor(latency);
rdbRemoveTempFile(server.rdb_child_pid);
rdbRemoveTempFile(g_pserver->rdb_child_pid);
latencyEndMonitor(latency);
latencyAddSampleIfNeeded("rdb-unlink-temp-file",latency);
/* SIGUSR1 is whitelisted, so we have a way to kill a child without
* tirggering an error condition. */
if (bysignal != SIGUSR1)
server.lastbgsave_status = C_ERR;
g_pserver->lastbgsave_status = C_ERR;
}
server.rdb_child_pid = -1;
server.rdb_child_type = RDB_CHILD_TYPE_NONE;
server.rdb_save_time_last = time(NULL)-server.rdb_save_time_start;
server.rdb_save_time_start = -1;
g_pserver->rdb_child_pid = -1;
g_pserver->rdb_child_type = RDB_CHILD_TYPE_NONE;
g_pserver->rdb_save_time_last = time(NULL)-g_pserver->rdb_save_time_start;
g_pserver->rdb_save_time_start = -1;
/* Possibly there are slaves waiting for a BGSAVE in order to be served
* (the first stage of SYNC is a bulk transfer of dump.rdb) */
updateSlavesWaitingBgsave((!bysignal && exitcode == 0) ? C_OK : C_ERR, RDB_CHILD_TYPE_DISK);
@ -2209,9 +2209,9 @@ void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) {
serverLog(LL_WARNING,
"Background transfer terminated by signal %d", bysignal);
}
server.rdb_child_pid = -1;
server.rdb_child_type = RDB_CHILD_TYPE_NONE;
server.rdb_save_time_start = -1;
g_pserver->rdb_child_pid = -1;
g_pserver->rdb_child_type = RDB_CHILD_TYPE_NONE;
g_pserver->rdb_save_time_start = -1;
/* If the child returns an OK exit code, read the set of slave client
* IDs and the associated status code. We'll terminate all the slaves
@ -2225,7 +2225,7 @@ void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) {
if (!bysignal && exitcode == 0) {
int readlen = sizeof(uint64_t);
if (read(server.rdb_pipe_read_result_from_child, ok_slaves, readlen) ==
if (read(g_pserver->rdb_pipe_read_result_from_child, ok_slaves, readlen) ==
readlen)
{
readlen = ok_slaves[0]*sizeof(uint64_t)*2;
@ -2234,7 +2234,7 @@ void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) {
* uint64_t element in the array. */
ok_slaves = (uint64_t*)zrealloc(ok_slaves,sizeof(uint64_t)+readlen, MALLOC_LOCAL);
if (readlen &&
read(server.rdb_pipe_read_result_from_child, ok_slaves+1,
read(g_pserver->rdb_pipe_read_result_from_child, ok_slaves+1,
readlen) != readlen)
{
ok_slaves[0] = 0;
@ -2242,15 +2242,15 @@ void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) {
}
}
close(server.rdb_pipe_read_result_from_child);
close(server.rdb_pipe_write_result_to_parent);
close(g_pserver->rdb_pipe_read_result_from_child);
close(g_pserver->rdb_pipe_write_result_to_parent);
/* We can continue the replication process with all the slaves that
* correctly received the full payload. Others are terminated. */
listNode *ln;
listIter li;
listRewind(server.slaves,&li);
listRewind(g_pserver->slaves,&li);
while((ln = listNext(&li))) {
client *slave = (client*)ln->value;
@ -2291,7 +2291,7 @@ void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) {
/* When a background RDB saving/transfer terminates, call the right handler. */
void backgroundSaveDoneHandler(int exitcode, int bysignal) {
switch(server.rdb_child_type) {
switch(g_pserver->rdb_child_type) {
case RDB_CHILD_TYPE_DISK:
backgroundSaveDoneHandlerDisk(exitcode,bysignal);
break;
@ -2308,8 +2308,8 @@ void backgroundSaveDoneHandler(int exitcode, int bysignal) {
* the child did not exit for an error, but because we wanted), and performs
* the cleanup needed. */
void killRDBChild(void) {
kill(server.rdb_child_pid,SIGUSR1);
rdbRemoveTempFile(server.rdb_child_pid);
kill(g_pserver->rdb_child_pid,SIGUSR1);
rdbRemoveTempFile(g_pserver->rdb_child_pid);
closeChildInfoPipe();
updateDictResizePolicy();
}
@ -2327,25 +2327,25 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) {
long long start;
int pipefds[2];
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) return C_ERR;
if (g_pserver->aof_child_pid != -1 || g_pserver->rdb_child_pid != -1) return C_ERR;
/* Before to fork, create a pipe that will be used in order to
* send back to the parent the IDs of the slaves that successfully
* received all the writes. */
if (pipe(pipefds) == -1) return C_ERR;
server.rdb_pipe_read_result_from_child = pipefds[0];
server.rdb_pipe_write_result_to_parent = pipefds[1];
g_pserver->rdb_pipe_read_result_from_child = pipefds[0];
g_pserver->rdb_pipe_write_result_to_parent = pipefds[1];
/* Collect the file descriptors of the slaves we want to transfer
* the RDB to, which are i WAIT_BGSAVE_START state. */
fds = (int*)zmalloc(sizeof(int)*listLength(server.slaves), MALLOC_LOCAL);
fds = (int*)zmalloc(sizeof(int)*listLength(g_pserver->slaves), MALLOC_LOCAL);
/* We also allocate an array of corresponding client IDs. This will
* be useful for the child process in order to build the report
* (sent via unix pipe) that will be sent to the parent. */
clientids = (uint64_t*)zmalloc(sizeof(uint64_t)*listLength(server.slaves), MALLOC_LOCAL);
clientids = (uint64_t*)zmalloc(sizeof(uint64_t)*listLength(g_pserver->slaves), MALLOC_LOCAL);
numfds = 0;
listRewind(server.slaves,&li);
listRewind(g_pserver->slaves,&li);
while((ln = listNext(&li))) {
client *slave = (client*)ln->value;
@ -2357,7 +2357,7 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) {
* We'll restore it when the children returns (since duped socket
* will share the O_NONBLOCK attribute with the parent). */
anetBlock(NULL,slave->fd);
anetSendTimeout(NULL,slave->fd,server.repl_timeout*1000);
anetSendTimeout(NULL,slave->fd,g_pserver->repl_timeout*1000);
}
}
@ -2388,7 +2388,7 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) {
private_dirty/(1024*1024));
}
server.child_info_data.cow_size = private_dirty;
g_pserver->child_info_data.cow_size = private_dirty;
sendChildInfo(CHILD_INFO_TYPE_RDB);
/* If we are returning OK, at least one slave was served
@ -2423,7 +2423,7 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) {
* process with all the childre that were waiting. */
msglen = sizeof(uint64_t)*(1+2*numfds);
if (*len == 0 ||
write(server.rdb_pipe_write_result_to_parent,msg,msglen)
write(g_pserver->rdb_pipe_write_result_to_parent,msg,msglen)
!= msglen)
{
retval = C_ERR;
@ -2442,7 +2442,7 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) {
/* Undo the state change. The caller will perform cleanup on
* all the slaves in BGSAVE_START state, but an early call to
* replicationSetupSlaveForFullResync() turned it into BGSAVE_END */
listRewind(server.slaves,&li);
listRewind(g_pserver->slaves,&li);
while((ln = listNext(&li))) {
client *slave = (client*)ln->value;
int j;
@ -2458,15 +2458,15 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) {
close(pipefds[1]);
closeChildInfoPipe();
} else {
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_time / (1024*1024*1024); /* GB per second. */
latencyAddSampleIfNeeded("fork",server.stat_fork_time/1000);
g_pserver->stat_fork_time = ustime()-start;
g_pserver->stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / g_pserver->stat_fork_time / (1024*1024*1024); /* GB per second. */
latencyAddSampleIfNeeded("fork",g_pserver->stat_fork_time/1000);
serverLog(LL_NOTICE,"Background RDB transfer started by pid %d",
childpid);
server.rdb_save_time_start = time(NULL);
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_SOCKET;
g_pserver->rdb_save_time_start = time(NULL);
g_pserver->rdb_child_pid = childpid;
g_pserver->rdb_child_type = RDB_CHILD_TYPE_SOCKET;
updateDictResizePolicy();
}
zfree(clientids);
@ -2477,7 +2477,7 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) {
}
void saveCommand(client *c) {
if (server.rdb_child_pid != -1) {
if (g_pserver->rdb_child_pid != -1) {
addReplyError(c,"Background save already in progress");
return;
}
@ -2508,11 +2508,11 @@ void bgsaveCommand(client *c) {
rdbSaveInfo rsi, *rsiptr;
rsiptr = rdbPopulateSaveInfo(&rsi);
if (server.rdb_child_pid != -1) {
if (g_pserver->rdb_child_pid != -1) {
addReplyError(c,"Background save already in progress");
} else if (server.aof_child_pid != -1) {
} else if (g_pserver->aof_child_pid != -1) {
if (schedule) {
server.rdb_bgsave_scheduled = 1;
g_pserver->rdb_bgsave_scheduled = 1;
addReplyStatus(c,"Background saving scheduled");
} else {
addReplyError(c,
@ -2547,22 +2547,22 @@ rdbSaveInfo *rdbPopulateSaveInfo(rdbSaveInfo *rsi) {
* connects to us, the NULL repl_backlog will trigger a full
* synchronization, at the same time we will use a new replid and clear
* replid2. */
if (!listLength(server.masters) && server.repl_backlog) {
/* Note that when server.slaveseldb is -1, it means that this master
if (!listLength(g_pserver->masters) && g_pserver->repl_backlog) {
/* Note that when g_pserver->slaveseldb is -1, it means that this master
* didn't apply any write commands after a full synchronization.
* So we can let repl_stream_db be 0, this allows a restarted slave
* to reload replication ID/offset, it's safe because the next write
* command must generate a SELECT statement. */
rsi->repl_stream_db = server.slaveseldb == -1 ? 0 : server.slaveseldb;
rsi->repl_stream_db = g_pserver->slaveseldb == -1 ? 0 : g_pserver->slaveseldb;
return rsi;
}
if (listLength(server.masters) > 1)
if (listLength(g_pserver->masters) > 1)
{
// BUGBUG, warn user about this incomplete implementation
serverLog(LL_WARNING, "Warning: Only backing up first master's information in RDB");
}
struct redisMaster *miFirst = (redisMaster*)(listLength(server.masters) ? listNodeValue(listFirst(server.masters)) : NULL);
struct redisMaster *miFirst = (redisMaster*)(listLength(g_pserver->masters) ? listNodeValue(listFirst(g_pserver->masters)) : NULL);
/* If the instance is a slave we need a connected master
* in order to fetch the currently selected DB. */

View File

@ -297,7 +297,7 @@ int redis_check_rdb(const char *rdbfilename, FILE *fp) {
expiretime = -1;
}
/* Verify the checksum if RDB version is >= 5 */
if (rdbver >= 5 && server.rdb_checksum) {
if (rdbver >= 5 && g_pserver->rdb_checksum) {
uint64_t cksum, expected = rdb.cksum;
rdbstate.doing = RDB_CHECK_DOING_CHECK_SUM;
@ -349,7 +349,7 @@ int redis_check_rdb_main(int argc, const char **argv, FILE *fp) {
* an already initialized Redis instance, check if we really need to. */
if (shared.integers[0] == NULL)
createSharedObjects();
server.loading_process_events_interval_bytes = 0;
g_pserver->loading_process_events_interval_bytes = 0;
rdbCheckMode = 1;
rdbCheckInfo("Checking RDB file %s", argv[1]);
rdbCheckSetupSignals();

File diff suppressed because it is too large Load Diff

View File

@ -191,7 +191,7 @@ char *redisProtocolToLuaType_MultiBulk(lua_State *lua, char *reply, int atype) {
int j = 0;
string2ll(reply+1,p-reply-1,&mbulklen);
if (server.lua_caller->resp == 2 || atype == '*') {
if (g_pserver->lua_caller->resp == 2 || atype == '*') {
p += 2;
if (mbulklen == -1) {
lua_pushboolean(lua,0);
@ -203,7 +203,7 @@ char *redisProtocolToLuaType_MultiBulk(lua_State *lua, char *reply, int atype) {
p = redisProtocolToLuaType(lua,p);
lua_settable(lua,-3);
}
} else if (server.lua_caller->resp == 3) {
} else if (g_pserver->lua_caller->resp == 3) {
/* Here we handle only Set and Map replies in RESP3 mode, since arrays
* follow the above RESP2 code path. */
p += 2;
@ -371,14 +371,14 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) {
int acl_retval = 0;
int call_flags = CMD_CALL_SLOWLOG | CMD_CALL_STATS;
struct redisCommand *cmd;
client *c = server.lua_client;
client *c = g_pserver->lua_client;
sds reply;
// Ensure our client is on the right thread
serverAssert(!(c->flags & CLIENT_PENDING_WRITE));
serverAssert(!(c->flags & CLIENT_UNBLOCKED));
serverAssert(GlobalLocksAcquired());
c->iel = serverTL - server.rgthreadvar;
c->iel = serverTL - g_pserver->rgthreadvar;
/* Cached across calls. */
static robj **argv = NULL;
@ -388,7 +388,7 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) {
static int inuse = 0; /* Recursive calls detection. */
/* Reflect MULTI state */
if (server.lua_multi_emitted || (server.lua_caller->flags & CLIENT_MULTI)) {
if (g_pserver->lua_multi_emitted || (g_pserver->lua_caller->flags & CLIENT_MULTI)) {
c->flags |= CLIENT_MULTI;
} else {
c->flags &= ~CLIENT_MULTI;
@ -472,7 +472,7 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) {
/* Setup our fake client for command execution */
c->argv = argv;
c->argc = argc;
c->puser = server.lua_caller->puser;
c->puser = g_pserver->lua_caller->puser;
/* Process module hooks */
moduleCallCommandFilters(c);
@ -533,13 +533,13 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) {
* of this script. */
if (cmd->flags & CMD_WRITE) {
int deny_write_type = writeCommandsDeniedByDiskError();
if (server.lua_random_dirty && !server.lua_replicate_commands) {
if (g_pserver->lua_random_dirty && !g_pserver->lua_replicate_commands) {
luaPushError(lua,
"Write commands not allowed after non deterministic commands. Call redis.replicate_commands() at the start of your script in order to switch to single commands replication mode.");
goto cleanup;
} else if (listLength(server.masters) && server.repl_slave_ro &&
!server.loading &&
!(server.lua_caller->flags & CLIENT_MASTER))
} else if (listLength(g_pserver->masters) && g_pserver->repl_slave_ro &&
!g_pserver->loading &&
!(g_pserver->lua_caller->flags & CLIENT_MASTER))
{
luaPushError(lua, (char*)ptrFromObj(shared.roslaveerr));
goto cleanup;
@ -549,7 +549,7 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) {
} else {
sds aof_write_err = sdscatfmt(sdsempty(),
"-MISCONF Errors writing to the AOF file: %s\r\n",
strerror(server.aof_last_write_errno));
strerror(g_pserver->aof_last_write_errno));
luaPushError(lua, aof_write_err);
sdsfree(aof_write_err);
}
@ -561,10 +561,10 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) {
* could enlarge the memory usage are not allowed, but only if this is the
* first write in the context of this script, otherwise we can't stop
* in the middle. */
if (server.maxmemory && /* Maxmemory is actually enabled. */
!server.loading && /* Don't care about mem if loading. */
!listLength(server.masters) && /* Slave must execute the script. */
server.lua_write_dirty == 0 && /* Script had no side effects so far. */
if (g_pserver->maxmemory && /* Maxmemory is actually enabled. */
!g_pserver->loading && /* Don't care about mem if loading. */
!listLength(g_pserver->masters) && /* Slave must execute the script. */
g_pserver->lua_write_dirty == 0 && /* Script had no side effects so far. */
(cmd->flags & CMD_DENYOOM))
{
if (getMaxmemoryState(NULL,NULL,NULL,NULL) != C_OK) {
@ -573,20 +573,20 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) {
}
}
if (cmd->flags & CMD_RANDOM) server.lua_random_dirty = 1;
if (cmd->flags & CMD_WRITE) server.lua_write_dirty = 1;
if (cmd->flags & CMD_RANDOM) g_pserver->lua_random_dirty = 1;
if (cmd->flags & CMD_WRITE) g_pserver->lua_write_dirty = 1;
/* If this is a Redis Cluster node, we need to make sure Lua is not
* trying to access non-local keys, with the exception of commands
* received from our master or when loading the AOF back in memory. */
if (server.cluster_enabled && !server.loading &&
!(server.lua_caller->flags & CLIENT_MASTER))
if (g_pserver->cluster_enabled && !g_pserver->loading &&
!(g_pserver->lua_caller->flags & CLIENT_MASTER))
{
/* Duplicate relevant flags in the lua client. */
c->flags &= ~(CLIENT_READONLY|CLIENT_ASKING);
c->flags |= server.lua_caller->flags & (CLIENT_READONLY|CLIENT_ASKING);
c->flags |= g_pserver->lua_caller->flags & (CLIENT_READONLY|CLIENT_ASKING);
if (getNodeByQuery(c,c->cmd,c->argv,c->argc,NULL,NULL) !=
server.cluster->myself)
g_pserver->cluster->myself)
{
luaPushError(lua,
"Lua script attempted to access a non local key in a "
@ -598,22 +598,22 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) {
/* If we are using single commands replication, we need to wrap what
* we propagate into a MULTI/EXEC block, so that it will be atomic like
* a Lua script in the context of AOF and slaves. */
if (server.lua_replicate_commands &&
!server.lua_multi_emitted &&
!(server.lua_caller->flags & CLIENT_MULTI) &&
server.lua_write_dirty &&
server.lua_repl != PROPAGATE_NONE)
if (g_pserver->lua_replicate_commands &&
!g_pserver->lua_multi_emitted &&
!(g_pserver->lua_caller->flags & CLIENT_MULTI) &&
g_pserver->lua_write_dirty &&
g_pserver->lua_repl != PROPAGATE_NONE)
{
execCommandPropagateMulti(server.lua_caller);
server.lua_multi_emitted = 1;
execCommandPropagateMulti(g_pserver->lua_caller);
g_pserver->lua_multi_emitted = 1;
}
/* Run the command */
if (server.lua_replicate_commands) {
if (g_pserver->lua_replicate_commands) {
/* Set flags according to redis.set_repl() settings. */
if (server.lua_repl & PROPAGATE_AOF)
if (g_pserver->lua_repl & PROPAGATE_AOF)
call_flags |= CMD_CALL_PROPAGATE_AOF;
if (server.lua_repl & PROPAGATE_REPL)
if (g_pserver->lua_repl & PROPAGATE_REPL)
call_flags |= CMD_CALL_PROPAGATE_REPL;
}
call(c,call_flags);
@ -648,7 +648,7 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) {
/* Sort the output array if needed, assuming it is a non-null multi bulk
* reply as expected. */
if ((cmd->flags & CMD_SORT_FOR_SCRIPT) &&
(server.lua_replicate_commands == 0) &&
(g_pserver->lua_replicate_commands == 0) &&
(reply[0] == '*' && reply[1] != '-')) {
luaSortArray(lua);
}
@ -764,10 +764,10 @@ int luaRedisStatusReplyCommand(lua_State *lua) {
* already started to write, returns false and stick to whole scripts
* replication, which is our default. */
int luaRedisReplicateCommandsCommand(lua_State *lua) {
if (server.lua_write_dirty) {
if (g_pserver->lua_write_dirty) {
lua_pushboolean(lua,0);
} else {
server.lua_replicate_commands = 1;
g_pserver->lua_replicate_commands = 1;
/* When we switch to single commands replication, we can provide
* different math.random() sequences at every call, which is what
* the user normally expects. */
@ -817,7 +817,7 @@ int luaRedisSetReplCommand(lua_State *lua) {
int argc = lua_gettop(lua);
int flags;
if (server.lua_replicate_commands == 0) {
if (g_pserver->lua_replicate_commands == 0) {
lua_pushstring(lua, "You can set the replication behavior only after turning on single commands replication with redis.replicate_commands().");
return lua_error(lua);
} else if (argc != 1) {
@ -830,7 +830,7 @@ int luaRedisSetReplCommand(lua_State *lua) {
lua_pushstring(lua, "Invalid replication flags. Use REPL_AOF, REPL_REPLICA, REPL_ALL or REPL_NONE.");
return lua_error(lua);
}
server.lua_repl = flags;
g_pserver->lua_repl = flags;
return 0;
}
@ -966,9 +966,9 @@ void scriptingInit(int setup) {
lua_State *lua = lua_open();
if (setup) {
server.lua_client = NULL;
server.lua_caller = NULL;
server.lua_timedout = 0;
g_pserver->lua_client = NULL;
g_pserver->lua_caller = NULL;
g_pserver->lua_timedout = 0;
ldbInit();
}
@ -978,8 +978,8 @@ void scriptingInit(int setup) {
/* Initialize a dictionary we use to map SHAs to scripts.
* This is useful for replication, as we need to replicate EVALSHA
* as EVAL, so we need to remember the associated script. */
server.lua_scripts = dictCreate(&shaScriptObjectDictType,NULL);
server.lua_scripts_mem = 0;
g_pserver->lua_scripts = dictCreate(&shaScriptObjectDictType,NULL);
g_pserver->lua_scripts_mem = 0;
/* Register the redis commands table and fields */
lua_newtable(lua);
@ -1121,9 +1121,9 @@ void scriptingInit(int setup) {
* inside the Lua interpreter.
* Note: there is no need to create it again when this function is called
* by scriptingReset(). */
if (server.lua_client == NULL) {
server.lua_client = createClient(-1, IDX_EVENT_LOOP_MAIN);
server.lua_client->flags |= CLIENT_LUA;
if (g_pserver->lua_client == NULL) {
g_pserver->lua_client = createClient(-1, IDX_EVENT_LOOP_MAIN);
g_pserver->lua_client->flags |= CLIENT_LUA;
}
/* Lua beginners often don't use "local", this is likely to introduce
@ -1131,15 +1131,15 @@ void scriptingInit(int setup) {
* to global variables. */
scriptingEnableGlobalsProtection(lua);
server.lua = lua;
g_pserver->lua = lua;
}
/* Release resources related to Lua scripting.
* This function is used in order to reset the scripting environment. */
void scriptingRelease(void) {
dictRelease(server.lua_scripts);
server.lua_scripts_mem = 0;
lua_close(server.lua);
dictRelease(g_pserver->lua_scripts);
g_pserver->lua_scripts_mem = 0;
lua_close(g_pserver->lua);
}
void scriptingReset(void) {
@ -1233,7 +1233,7 @@ sds luaCreateFunction(client *c, lua_State *lua, robj *body) {
sha1hex(funcname+2,(char*)ptrFromObj(body),sdslen((sds)ptrFromObj(body)));
sds sha = sdsnewlen(funcname+2,40);
if ((de = dictFind(server.lua_scripts,sha)) != NULL) {
if ((de = dictFind(g_pserver->lua_scripts,sha)) != NULL) {
sdsfree(sha);
return (sds)dictGetKey(de);
}
@ -1271,33 +1271,33 @@ sds luaCreateFunction(client *c, lua_State *lua, robj *body) {
/* We also save a SHA1 -> Original script map in a dictionary
* so that we can replicate / write in the AOF all the
* EVALSHA commands as EVAL using the original script. */
int retval = dictAdd(server.lua_scripts,sha,body);
serverAssertWithInfo(c ? c : server.lua_client,NULL,retval == DICT_OK);
server.lua_scripts_mem += sdsZmallocSize(sha) + getStringObjectSdsUsedMemory(body);
int retval = dictAdd(g_pserver->lua_scripts,sha,body);
serverAssertWithInfo(c ? c : g_pserver->lua_client,NULL,retval == DICT_OK);
g_pserver->lua_scripts_mem += sdsZmallocSize(sha) + getStringObjectSdsUsedMemory(body);
incrRefCount(body);
return sha;
}
/* This is the Lua script "count" hook that we use to detect scripts timeout. */
void luaMaskCountHook(lua_State *lua, lua_Debug *ar) {
long long elapsed = mstime() - server.lua_time_start;
long long elapsed = mstime() - g_pserver->lua_time_start;
UNUSED(ar);
UNUSED(lua);
/* Set the timeout condition if not already set and the maximum
* execution time was reached. */
if (elapsed >= server.lua_time_limit && server.lua_timedout == 0) {
if (elapsed >= g_pserver->lua_time_limit && g_pserver->lua_timedout == 0) {
serverLog(LL_WARNING,"Lua slow script detected: still in execution after %lld milliseconds. You can try killing the script using the SCRIPT KILL command.",elapsed);
server.lua_timedout = 1;
g_pserver->lua_timedout = 1;
/* Once the script timeouts we reenter the event loop to permit others
* to call SCRIPT KILL or SHUTDOWN NOSAVE if needed. For this reason
* we need to mask the client executing the script from the event loop.
* If we don't do that the client may disconnect and could no longer be
* here when the EVAL command will return. */
protectClient(server.lua_caller);
protectClient(g_pserver->lua_caller);
}
if (server.lua_timedout) processEventsWhileBlocked(serverTL - server.rgthreadvar);
if (server.lua_kill) {
if (g_pserver->lua_timedout) processEventsWhileBlocked(serverTL - g_pserver->rgthreadvar);
if (g_pserver->lua_kill) {
serverLog(LL_WARNING,"Lua script killed by user with SCRIPT KILL.");
lua_pushstring(lua,"Script killed by user with SCRIPT KILL...");
lua_error(lua);
@ -1305,10 +1305,10 @@ void luaMaskCountHook(lua_State *lua, lua_Debug *ar) {
}
void evalGenericCommand(client *c, int evalsha) {
lua_State *lua = server.lua;
lua_State *lua = g_pserver->lua;
char funcname[43];
long long numkeys;
long long initial_server_dirty = server.dirty;
long long initial_server_dirty = g_pserver->dirty;
int delhook = 0, err;
/* When we replicate whole scripts, we want the same PRNG sequence at
@ -1323,11 +1323,11 @@ void evalGenericCommand(client *c, int evalsha) {
*
* Thanks to this flag we'll raise an error every time a write command
* is called after a random command was used. */
server.lua_random_dirty = 0;
server.lua_write_dirty = 0;
server.lua_replicate_commands = server.lua_always_replicate_commands;
server.lua_multi_emitted = 0;
server.lua_repl = PROPAGATE_AOF|PROPAGATE_REPL;
g_pserver->lua_random_dirty = 0;
g_pserver->lua_write_dirty = 0;
g_pserver->lua_replicate_commands = g_pserver->lua_always_replicate_commands;
g_pserver->lua_multi_emitted = 0;
g_pserver->lua_repl = PROPAGATE_AOF|PROPAGATE_REPL;
/* Get the number of arguments that are keys */
if (getLongLongFromObjectOrReply(c,c->argv[2],&numkeys,NULL) != C_OK)
@ -1393,7 +1393,7 @@ void evalGenericCommand(client *c, int evalsha) {
luaSetGlobalArray(lua,"ARGV",c->argv+3+numkeys,c->argc-3-numkeys);
/* Select the right DB in the context of the Lua client */
selectDb(server.lua_client,c->db->id);
selectDb(g_pserver->lua_client,c->db->id);
/* Set a hook in order to be able to stop the script execution if it
* is running for too much time.
@ -1402,14 +1402,14 @@ void evalGenericCommand(client *c, int evalsha) {
*
* If we are debugging, we set instead a "line" hook so that the
* debugger is call-back at every line executed by the script. */
server.lua_caller = c;
server.lua_time_start = mstime();
server.lua_kill = 0;
if (server.lua_time_limit > 0 && ldb.active == 0) {
g_pserver->lua_caller = c;
g_pserver->lua_time_start = mstime();
g_pserver->lua_kill = 0;
if (g_pserver->lua_time_limit > 0 && ldb.active == 0) {
lua_sethook(lua,luaMaskCountHook,LUA_MASKCOUNT,100000);
delhook = 1;
} else if (ldb.active) {
lua_sethook(server.lua,luaLdbLineHook,LUA_MASKLINE|LUA_MASKCOUNT,100000);
lua_sethook(g_pserver->lua,luaLdbLineHook,LUA_MASKLINE|LUA_MASKCOUNT,100000);
delhook = 1;
}
@ -1420,14 +1420,14 @@ void evalGenericCommand(client *c, int evalsha) {
/* Perform some cleanup that we need to do both on error and success. */
if (delhook) lua_sethook(lua,NULL,0,0); /* Disable hook */
if (server.lua_timedout) {
server.lua_timedout = 0;
if (g_pserver->lua_timedout) {
g_pserver->lua_timedout = 0;
/* Restore the client that was protected when the script timeout
* was detected. */
unprotectClient(c);
listIter li;
listNode *ln;
listRewind(server.masters, &li);
listRewind(g_pserver->masters, &li);
while ((ln = listNext(&li)))
{
struct redisMaster *mi = (struct redisMaster*)listNodeValue(ln);
@ -1435,7 +1435,7 @@ void evalGenericCommand(client *c, int evalsha) {
queueClientForReprocessing(mi->master);
}
}
server.lua_caller = NULL;
g_pserver->lua_caller = NULL;
/* Call the Lua garbage collector from time to time to avoid a
* full cycle performed by Lua, which adds too latency.
@ -1467,9 +1467,9 @@ void evalGenericCommand(client *c, int evalsha) {
/* If we are using single commands replication, emit EXEC if there
* was at least a write. */
if (server.lua_replicate_commands) {
if (g_pserver->lua_replicate_commands) {
preventCommandPropagation(c);
if (server.lua_multi_emitted) {
if (g_pserver->lua_multi_emitted) {
robj *propargv[1];
propargv[0] = createStringObject("EXEC",4);
alsoPropagate(cserver.execCommand,c->db->id,propargv,1,
@ -1488,12 +1488,12 @@ void evalGenericCommand(client *c, int evalsha) {
* For repliation, everytime a new slave attaches to the master, we need to
* flush our cache of scripts that can be replicated as EVALSHA, while
* for AOF we need to do so every time we rewrite the AOF file. */
if (evalsha && !server.lua_replicate_commands) {
if (evalsha && !g_pserver->lua_replicate_commands) {
if (!replicationScriptCacheExists((sds)ptrFromObj(c->argv[1]))) {
/* This script is not in our script cache, replicate it as
* EVAL, then add it into the script cache, as from now on
* slaves and AOF know about it. */
robj *script = (robj*)dictFetchValue(server.lua_scripts,ptrFromObj(c->argv[1]));
robj *script = (robj*)dictFetchValue(g_pserver->lua_scripts,ptrFromObj(c->argv[1]));
replicationScriptCacheAdd((sds)ptrFromObj(c->argv[1]));
serverAssertWithInfo(c,NULL,script != NULL);
@ -1502,7 +1502,7 @@ void evalGenericCommand(client *c, int evalsha) {
* just to replicate it as SCRIPT LOAD, otherwise we risk running
* an aborted script on slaves (that may then produce results there)
* or just running a CPU costly read-only script on the slaves. */
if (server.dirty == initial_server_dirty) {
if (g_pserver->dirty == initial_server_dirty) {
rewriteClientCommandVector(c,3,
resetRefCount(createStringObject("SCRIPT",6)),
resetRefCount(createStringObject("LOAD",4)),
@ -1556,31 +1556,31 @@ NULL
scriptingReset();
addReply(c,shared.ok);
replicationScriptCacheFlush();
server.dirty++; /* Propagating this command is a good idea. */
g_pserver->dirty++; /* Propagating this command is a good idea. */
} else if (c->argc >= 2 && !strcasecmp((const char*)ptrFromObj(c->argv[1]),"exists")) {
int j;
addReplyArrayLen(c, c->argc-2);
for (j = 2; j < c->argc; j++) {
if (dictFind(server.lua_scripts,ptrFromObj(c->argv[j])))
if (dictFind(g_pserver->lua_scripts,ptrFromObj(c->argv[j])))
addReply(c,shared.cone);
else
addReply(c,shared.czero);
}
} else if (c->argc == 3 && !strcasecmp((const char*)ptrFromObj(c->argv[1]),"load")) {
sds sha = luaCreateFunction(c,server.lua,c->argv[2]);
sds sha = luaCreateFunction(c,g_pserver->lua,c->argv[2]);
if (sha == NULL) return; /* The error was sent by luaCreateFunction(). */
addReplyBulkCBuffer(c,sha,40);
forceCommandPropagation(c,PROPAGATE_REPL|PROPAGATE_AOF);
} else if (c->argc == 2 && !strcasecmp((const char*)ptrFromObj(c->argv[1]),"kill")) {
if (server.lua_caller == NULL) {
if (g_pserver->lua_caller == NULL) {
addReplySds(c,sdsnew("-NOTBUSY No scripts in execution right now.\r\n"));
} else if (server.lua_caller->flags & CLIENT_MASTER) {
} else if (g_pserver->lua_caller->flags & CLIENT_MASTER) {
addReplySds(c,sdsnew("-UNKILLABLE The busy script was sent by a master instance in the context of replication and cannot be killed.\r\n"));
} else if (server.lua_write_dirty) {
} else if (g_pserver->lua_write_dirty) {
addReplySds(c,sdsnew("-UNKILLABLE Sorry the script already executed write commands against the dataset. You can either wait the script termination or kill the server in a hard way using the SHUTDOWN NOSAVE command.\r\n"));
} else {
server.lua_kill = 1;
g_pserver->lua_kill = 1;
addReply(c,shared.ok);
}
} else if (c->argc == 3 && !strcasecmp((const char*)ptrFromObj(c->argv[1]),"debug")) {
@ -2289,7 +2289,7 @@ void ldbEval(lua_State *lua, sds *argv, int argc) {
* implementation, with ldb.step enabled, so as a side effect the Redis command
* and its reply are logged. */
void ldbRedis(lua_State *lua, sds *argv, int argc) {
int j, saved_rc = server.lua_replicate_commands;
int j, saved_rc = g_pserver->lua_replicate_commands;
lua_getglobal(lua,"redis");
lua_pushstring(lua,"call");
@ -2297,10 +2297,10 @@ void ldbRedis(lua_State *lua, sds *argv, int argc) {
for (j = 1; j < argc; j++)
lua_pushlstring(lua,argv[j],sdslen(argv[j]));
ldb.step = 1; /* Force redis.call() to log. */
server.lua_replicate_commands = 1;
g_pserver->lua_replicate_commands = 1;
lua_pcall(lua,argc-1,1,0); /* Stack: redis, result */
ldb.step = 0; /* Disable logging. */
server.lua_replicate_commands = saved_rc;
g_pserver->lua_replicate_commands = saved_rc;
lua_pop(lua,2); /* Discard the result and clean the stack. */
}
@ -2474,9 +2474,9 @@ void luaLdbLineHook(lua_State *lua, lua_Debug *ar) {
/* Check if a timeout occurred. */
if (ar->event == LUA_HOOKCOUNT && ldb.step == 0 && bp == 0) {
mstime_t elapsed = mstime() - server.lua_time_start;
mstime_t timelimit = server.lua_time_limit ?
server.lua_time_limit : 5000;
mstime_t elapsed = mstime() - g_pserver->lua_time_start;
mstime_t timelimit = g_pserver->lua_time_limit ?
g_pserver->lua_time_limit : 5000;
if (elapsed >= timelimit) {
timeout = 1;
ldb.step = 1;
@ -2504,7 +2504,7 @@ void luaLdbLineHook(lua_State *lua, lua_Debug *ar) {
lua_pushstring(lua, "timeout during Lua debugging with client closing connection");
lua_error(lua);
}
server.lua_time_start = mstime();
g_pserver->lua_time_start = mstime();
}
}

View File

@ -460,8 +460,8 @@ struct redisCommand sentinelcmds[] = {
/* This function overwrites a few normal Redis config default with Sentinel
* specific defaults. */
void initSentinelConfig(void) {
server.port = REDIS_SENTINEL_PORT;
server.protected_mode = 0; /* Sentinel must be exposed. */
g_pserver->port = REDIS_SENTINEL_PORT;
g_pserver->protected_mode = 0; /* Sentinel must be exposed. */
}
/* Perform the Sentinel mode initialization. */
@ -470,12 +470,12 @@ void initSentinel(void) {
/* Remove usual Redis commands from the command table, then just add
* the SENTINEL command. */
dictEmpty(server.commands,NULL);
dictEmpty(g_pserver->commands,NULL);
for (j = 0; j < sizeof(sentinelcmds)/sizeof(sentinelcmds[0]); j++) {
int retval;
struct redisCommand *cmd = sentinelcmds+j;
retval = dictAdd(server.commands, sdsnew(cmd->name), cmd);
retval = dictAdd(g_pserver->commands, sdsnew(cmd->name), cmd);
serverAssert(retval == DICT_OK);
/* Translate the command string flags description into an actual
@ -1926,12 +1926,12 @@ void rewriteConfigSentinelOption(struct rewriteConfigState *state) {
* On failure the function logs a warning on the Redis log. */
void sentinelFlushConfig(void) {
int fd = -1;
int saved_hz = server.hz;
int saved_hz = g_pserver->hz;
int rewrite_status;
server.hz = CONFIG_DEFAULT_HZ;
g_pserver->hz = CONFIG_DEFAULT_HZ;
rewrite_status = rewriteConfig(cserver.configfile);
server.hz = saved_hz;
g_pserver->hz = saved_hz;
if (rewrite_status == -1) goto werr;
if ((fd = open(cserver.configfile,O_RDONLY)) == -1) goto werr;
@ -2018,7 +2018,7 @@ void sentinelReconnectInstance(sentinelRedisInstance *ri) {
link->pending_commands = 0;
link->cc_conn_time = mstime();
link->cc->data = link;
redisAeAttach(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,link->cc);
redisAeAttach(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el,link->cc);
redisAsyncSetConnectCallback(link->cc,
sentinelLinkEstablishedCallback);
redisAsyncSetDisconnectCallback(link->cc,
@ -2042,7 +2042,7 @@ void sentinelReconnectInstance(sentinelRedisInstance *ri) {
link->pc_conn_time = mstime();
link->pc->data = link;
redisAeAttach(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,link->pc);
redisAeAttach(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el,link->pc);
redisAsyncSetConnectCallback(link->pc,
sentinelLinkEstablishedCallback);
redisAsyncSetDisconnectCallback(link->pc,
@ -2585,7 +2585,7 @@ int sentinelSendHello(sentinelRedisInstance *ri) {
announce_ip = ip;
}
announce_port = sentinel.announce_port ?
sentinel.announce_port : server.port;
sentinel.announce_port : g_pserver->port;
/* Format and send the Hello message. */
snprintf(payload,sizeof(payload),
@ -4521,6 +4521,6 @@ void sentinelTimer(void) {
* exactly continue to stay synchronized asking to be voted at the
* same time again and again (resulting in nobody likely winning the
* election because of split brain voting). */
server.hz = CONFIG_DEFAULT_HZ + rand() % CONFIG_DEFAULT_HZ;
g_pserver->hz = CONFIG_DEFAULT_HZ + rand() % CONFIG_DEFAULT_HZ;
}

File diff suppressed because it is too large Load Diff

View File

@ -262,7 +262,7 @@ public:
#define LIMIT_PENDING_QUERYBUF (4*1024*1024) /* 4mb */
/* When configuring the server eventloop, we setup it so that the total number
* of file descriptors we can handle are server.maxclients + RESERVED_FDS +
* of file descriptors we can handle are g_pserver->maxclients + RESERVED_FDS +
* a few more to stay safe. Since RESERVED_FDS defaults to 32, we add 96
* in order to make sure of not over provisioning more than 128 fds. */
#define CONFIG_FDSET_INCR (CONFIG_MIN_RESERVED_FDS+96)
@ -329,7 +329,7 @@ public:
#define CLIENT_DIRTY_CAS (1<<5) /* Watched keys modified. EXEC will fail. */
#define CLIENT_CLOSE_AFTER_REPLY (1<<6) /* Close after writing entire reply. */
#define CLIENT_UNBLOCKED (1<<7) /* This client was unblocked and is stored in
server.unblocked_clients */
g_pserver->unblocked_clients */
#define CLIENT_LUA (1<<8) /* This is a non connected client used by Lua */
#define CLIENT_ASKING (1<<9) /* Client issued the ASKING command */
#define CLIENT_CLOSE_ASAP (1<<10)/* Close this client ASAP */
@ -379,7 +379,7 @@ public:
buffer configuration. Just the first
three: normal, slave, pubsub. */
/* Slave replication state. Used in server.repl_state for slaves to remember
/* Slave replication state. Used in g_pserver->repl_state for slaves to remember
* what to do next. */
#define REPL_STATE_NONE 0 /* No active replication */
#define REPL_STATE_CONNECT 1 /* Must connect to master */
@ -544,12 +544,12 @@ public:
#define NOTIFY_ALL (NOTIFY_GENERIC | NOTIFY_STRING | NOTIFY_LIST | NOTIFY_SET | NOTIFY_HASH | NOTIFY_ZSET | NOTIFY_EXPIRED | NOTIFY_EVICTED | NOTIFY_STREAM | NOTIFY_KEY_MISS) /* A flag */
/* Get the first bind addr or NULL */
#define NET_FIRST_BIND_ADDR (server.bindaddr_count ? server.bindaddr[0] : NULL)
#define NET_FIRST_BIND_ADDR (g_pserver->bindaddr_count ? g_pserver->bindaddr[0] : NULL)
/* Using the following macro you can run code inside serverCron() with the
* specified period, specified in milliseconds.
* The actual resolution depends on server.hz. */
#define run_with_period(_ms_) if ((_ms_ <= 1000/server.hz) || !(server.cronloops%((_ms_)/(1000/server.hz))))
* The actual resolution depends on g_pserver->hz. */
#define run_with_period(_ms_) if ((_ms_ <= 1000/g_pserver->hz) || !(g_pserver->cronloops%((_ms_)/(1000/g_pserver->hz))))
/* We can print the stacktrace, so our assert is defined this way: */
#define serverAssertWithInfo(_c,_o,_e) ((_e)?(void)0 : (_serverAssertWithInfo(_c,_o,#_e,__FILE__,__LINE__),_exit(1)))
@ -827,17 +827,17 @@ typedef struct blockingState {
handled in module.c. */
} blockingState;
/* The following structure represents a node in the server.ready_keys list,
/* The following structure represents a node in the g_pserver->ready_keys list,
* where we accumulate all the keys that had clients blocked with a blocking
* operation such as B[LR]POP, but received new data in the context of the
* last executed command.
*
* After the execution of every command or script, we run this list to check
* if as a result we should serve data to clients blocked, unblocking them.
* Note that server.ready_keys will not have duplicates as there dictionary
* Note that g_pserver->ready_keys will not have duplicates as there dictionary
* also called ready_keys in every structure representing a Redis database,
* where we make sure to remember if a given key was already added in the
* server.ready_keys list. */
* g_pserver->ready_keys list. */
typedef struct readyList {
redisDb *db;
robj *key;
@ -1097,7 +1097,7 @@ struct redisMemOverhead {
* top-level master. */
typedef struct rdbSaveInfo {
/* Used saving and loading. */
int repl_stream_db; /* DB to select in server.master client. */
int repl_stream_db; /* DB to select in g_pserver->master client. */
/* Used only loading. */
int repl_id_is_set; /* True if repl_id field is set. */
@ -1172,7 +1172,7 @@ struct redisMaster {
time_t repl_down_since; /* Unix time at which link with master went down */
unsigned char master_uuid[UUID_BINARY_LEN]; /* Used during sync with master, this is our master's UUID */
/* After we've connected with our master use the UUID in server.master */
/* After we've connected with our master use the UUID in g_pserver->master */
};
// Const vars are not changed after worker threads are launched
@ -1254,7 +1254,7 @@ struct redisServer {
int port; /* TCP listening port */
int tcp_backlog; /* TCP listen() backlog */
char *bindaddr[CONFIG_BINDADDR_MAX]; /* Addresses we should bind to */
int bindaddr_count; /* Number of addresses in server.bindaddr[] */
int bindaddr_count; /* Number of addresses in g_pserver->bindaddr[] */
char *unixsocket; /* UNIX socket path */
mode_t unixsocketperm; /* UNIX socket permission */
int sofd; /* Unix socket file descriptor */
@ -1407,7 +1407,7 @@ struct redisServer {
time_t repl_backlog_time_limit; /* Time without slaves after the backlog
gets released. */
time_t repl_no_slaves_since; /* We have no slaves since that time.
Only valid if server.slaves len is 0. */
Only valid if g_pserver->slaves len is 0. */
int repl_min_slaves_to_write; /* Min number of slaves to write. */
int repl_min_slaves_max_lag; /* Max lag of <count> slaves to write. */
int repl_good_slaves_count; /* Number of slaves with lag <= max_lag. */
@ -2503,7 +2503,7 @@ inline int ielFromEventLoop(const aeEventLoop *eventLoop)
int iel = 0;
for (; iel < cserver.cthreads; ++iel)
{
if (server.rgthreadvar[iel].el == eventLoop)
if (g_pserver->rgthreadvar[iel].el == eventLoop)
break;
}
serverAssert(iel < cserver.cthreads);
@ -2512,7 +2512,7 @@ inline int ielFromEventLoop(const aeEventLoop *eventLoop)
inline int FCorrectThread(client *c)
{
return (serverTL != NULL && (server.rgthreadvar[c->iel].el == serverTL->el))
return (serverTL != NULL && (g_pserver->rgthreadvar[c->iel].el == serverTL->el))
|| (c->iel == IDX_EVENT_LOOP_MAIN && moduleGILAcquiredByModule())
|| (c->fd == -1);
}

View File

@ -87,7 +87,7 @@ slowlogEntry *slowlogCreateEntry(client *c, robj **argv, int argc, long long dur
}
se->time = time(NULL);
se->duration = duration;
se->id = server.slowlog_entry_id++;
se->id = g_pserver->slowlog_entry_id++;
se->peerid = sdsnew(getClientPeerId(c));
se->cname = c->name ? sdsnew(szFromObj(c->name)) : sdsempty();
return se;
@ -112,29 +112,29 @@ void slowlogFreeEntry(const void *septr) {
/* Initialize the slow log. This function should be called a single time
* at server startup. */
void slowlogInit(void) {
server.slowlog = listCreate();
server.slowlog_entry_id = 0;
listSetFreeMethod(server.slowlog,slowlogFreeEntry);
g_pserver->slowlog = listCreate();
g_pserver->slowlog_entry_id = 0;
listSetFreeMethod(g_pserver->slowlog,slowlogFreeEntry);
}
/* Push a new entry into the slow log.
* This function will make sure to trim the slow log accordingly to the
* configured max length. */
void slowlogPushEntryIfNeeded(client *c, robj **argv, int argc, long long duration) {
if (server.slowlog_log_slower_than < 0) return; /* Slowlog disabled */
if (duration >= server.slowlog_log_slower_than)
listAddNodeHead(server.slowlog,
if (g_pserver->slowlog_log_slower_than < 0) return; /* Slowlog disabled */
if (duration >= g_pserver->slowlog_log_slower_than)
listAddNodeHead(g_pserver->slowlog,
slowlogCreateEntry(c,argv,argc,duration));
/* Remove old entries if needed. */
while (listLength(server.slowlog) > server.slowlog_max_len)
listDelNode(server.slowlog,listLast(server.slowlog));
while (listLength(g_pserver->slowlog) > g_pserver->slowlog_max_len)
listDelNode(g_pserver->slowlog,listLast(g_pserver->slowlog));
}
/* Remove all the entries from the current slow log. */
void slowlogReset(void) {
while (listLength(server.slowlog) > 0)
listDelNode(server.slowlog,listLast(server.slowlog));
while (listLength(g_pserver->slowlog) > 0)
listDelNode(g_pserver->slowlog,listLast(g_pserver->slowlog));
}
/* The SLOWLOG command. Implements all the subcommands needed to handle the
@ -154,7 +154,7 @@ NULL
slowlogReset();
addReply(c,shared.ok);
} else if (c->argc == 2 && !strcasecmp(szFromObj(c->argv[1]),"len")) {
addReplyLongLong(c,listLength(server.slowlog));
addReplyLongLong(c,listLength(g_pserver->slowlog));
} else if ((c->argc == 2 || c->argc == 3) &&
!strcasecmp(szFromObj(c->argv[1]),"get"))
{
@ -168,7 +168,7 @@ NULL
getLongFromObjectOrReply(c,c->argv[2],&count,NULL) != C_OK)
return;
listRewind(server.slowlog,&li);
listRewind(g_pserver->slowlog,&li);
totentries = addReplyDeferredLen(c);
while(count-- && (ln = listNext(&li))) {
int j;

View File

@ -143,7 +143,7 @@ int sortCompare(const void *s1, const void *s2) {
const redisSortObject *so1 = (redisSortObject*)s1, *so2 = (redisSortObject*)s2;
int cmp;
if (!server.sort_alpha) {
if (!g_pserver->sort_alpha) {
/* Numeric sorting. Here it's trivial as we precomputed scores */
if (so1->u.score > so2->u.score) {
cmp = 1;
@ -157,7 +157,7 @@ int sortCompare(const void *s1, const void *s2) {
}
} else {
/* Alphanumeric sorting */
if (server.sort_bypattern) {
if (g_pserver->sort_bypattern) {
if (!so1->u.cmpobj || !so2->u.cmpobj) {
/* At least one compare object is NULL */
if (so1->u.cmpobj == so2->u.cmpobj)
@ -168,7 +168,7 @@ int sortCompare(const void *s1, const void *s2) {
cmp = 1;
} else {
/* We have both the objects, compare them. */
if (server.sort_store) {
if (g_pserver->sort_store) {
cmp = compareStringObjects(so1->u.cmpobj,so2->u.cmpobj);
} else {
/* Here we can use strcoll() directly as we are sure that
@ -178,14 +178,14 @@ int sortCompare(const void *s1, const void *s2) {
}
} else {
/* Compare elements directly. */
if (server.sort_store) {
if (g_pserver->sort_store) {
cmp = compareStringObjects(so1->obj,so2->obj);
} else {
cmp = collateStringObjects(so1->obj,so2->obj);
}
}
}
return server.sort_desc ? -cmp : cmp;
return g_pserver->sort_desc ? -cmp : cmp;
}
/* The SORT command is the most complex command in Redis. Warning: this code
@ -239,7 +239,7 @@ void sortCommand(client *c) {
} else {
/* If BY is specified with a real patter, we can't accept
* it in cluster mode. */
if (server.cluster_enabled) {
if (g_pserver->cluster_enabled) {
addReplyError(c,"BY option of SORT denied in Cluster mode.");
syntax_error++;
break;
@ -247,7 +247,7 @@ void sortCommand(client *c) {
}
j++;
} else if (!strcasecmp(szFromObj(c->argv[j]),"get") && leftargs >= 1) {
if (server.cluster_enabled) {
if (g_pserver->cluster_enabled) {
addReplyError(c,"GET option of SORT denied in Cluster mode.");
syntax_error++;
break;
@ -496,10 +496,10 @@ void sortCommand(client *c) {
}
}
server.sort_desc = desc;
server.sort_alpha = alpha;
server.sort_bypattern = sortby ? 1 : 0;
server.sort_store = storekey ? 1 : 0;
g_pserver->sort_desc = desc;
g_pserver->sort_alpha = alpha;
g_pserver->sort_bypattern = sortby ? 1 : 0;
g_pserver->sort_store = storekey ? 1 : 0;
if (sortby && (start != 0 || end != vectorlen-1))
pqsort(vector,vectorlen,sizeof(redisSortObject),sortCompare, start,end);
else
@ -574,11 +574,11 @@ void sortCommand(client *c) {
setKey(c->db,storekey,sobj);
notifyKeyspaceEvent(NOTIFY_LIST,"sortstore",storekey,
c->db->id);
server.dirty += outputlen;
g_pserver->dirty += outputlen;
} else if (dbDelete(c->db,storekey)) {
signalModifiedKey(c->db,storekey);
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",storekey,c->db->id);
server.dirty++;
g_pserver->dirty++;
}
decrRefCount(sobj);
addReplyLongLong(c,outputlen);

View File

@ -44,7 +44,7 @@ void hashTypeTryConversion(robj *o, robj **argv, int start, int end) {
for (i = start; i <= end; i++) {
if (sdsEncodedObject(argv[i]) &&
sdslen(szFromObj(argv[i])) > server.hash_max_ziplist_value)
sdslen(szFromObj(argv[i])) > g_pserver->hash_max_ziplist_value)
{
hashTypeConvert(o, OBJ_ENCODING_HT);
break;
@ -234,7 +234,7 @@ int hashTypeSet(robj *o, sds field, sds value, int flags) {
o->m_ptr = zl;
/* Check if the ziplist needs to be converted to a hash table */
if (hashTypeLength(o) > server.hash_max_ziplist_entries)
if (hashTypeLength(o) > g_pserver->hash_max_ziplist_entries)
hashTypeConvert(o, OBJ_ENCODING_HT);
} else if (o->encoding == OBJ_ENCODING_HT) {
dictEntry *de = dictFind((dict*)ptrFromObj(o),field);
@ -523,7 +523,7 @@ void hsetnxCommand(client *c) {
addReply(c, shared.cone);
signalModifiedKey(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_HASH,"hset",c->argv[1],c->db->id);
server.dirty++;
g_pserver->dirty++;
}
}
@ -553,7 +553,7 @@ void hsetCommand(client *c) {
}
signalModifiedKey(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_HASH,"hset",c->argv[1],c->db->id);
server.dirty++;
g_pserver->dirty++;
}
void hincrbyCommand(client *c) {
@ -588,7 +588,7 @@ void hincrbyCommand(client *c) {
addReplyLongLong(c,value);
signalModifiedKey(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_HASH,"hincrby",c->argv[1],c->db->id);
server.dirty++;
g_pserver->dirty++;
}
void hincrbyfloatCommand(client *c) {
@ -627,7 +627,7 @@ void hincrbyfloatCommand(client *c) {
addReplyBulkCBuffer(c,buf,len);
signalModifiedKey(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_HASH,"hincrbyfloat",c->argv[1],c->db->id);
server.dirty++;
g_pserver->dirty++;
/* Always replicate HINCRBYFLOAT as an HSET command with the final value
* in order to make sure that differences in float pricision or formatting
@ -726,7 +726,7 @@ void hdelCommand(client *c) {
if (keyremoved)
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",c->argv[1],
c->db->id);
server.dirty += deleted;
g_pserver->dirty += deleted;
}
addReplyLongLong(c,deleted);
}

View File

@ -181,8 +181,8 @@ void listTypeConvert(robj *subject, int enc) {
serverAssertWithInfo(NULL,subject,subject->encoding==OBJ_ENCODING_ZIPLIST);
if (enc == OBJ_ENCODING_QUICKLIST) {
size_t zlen = server.list_max_ziplist_size;
int depth = server.list_compress_depth;
size_t zlen = g_pserver->list_max_ziplist_size;
int depth = g_pserver->list_compress_depth;
subject->m_ptr = quicklistCreateFromZiplist(zlen, depth, (unsigned char*)ptrFromObj(subject));
subject->encoding = OBJ_ENCODING_QUICKLIST;
} else {
@ -206,8 +206,8 @@ void pushGenericCommand(client *c, int where) {
for (j = 2; j < c->argc; j++) {
if (!lobj) {
lobj = createQuicklistObject();
quicklistSetOptions((quicklist*)ptrFromObj(lobj), server.list_max_ziplist_size,
server.list_compress_depth);
quicklistSetOptions((quicklist*)ptrFromObj(lobj), g_pserver->list_max_ziplist_size,
g_pserver->list_compress_depth);
dbAdd(c->db,c->argv[1],lobj);
}
listTypePush(lobj,c->argv[j],where);
@ -220,7 +220,7 @@ void pushGenericCommand(client *c, int where) {
signalModifiedKey(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_LIST,event,c->argv[1],c->db->id);
}
server.dirty += pushed;
g_pserver->dirty += pushed;
}
void lpushCommand(client *c) {
@ -250,7 +250,7 @@ void pushxGenericCommand(client *c, int where) {
signalModifiedKey(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_LIST,event,c->argv[1],c->db->id);
}
server.dirty += pushed;
g_pserver->dirty += pushed;
}
void lpushxCommand(client *c) {
@ -295,7 +295,7 @@ void linsertCommand(client *c) {
signalModifiedKey(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_LIST,"linsert",
c->argv[1],c->db->id);
server.dirty++;
g_pserver->dirty++;
} else {
/* Notify client of a failed insert */
addReplyLongLong(c,-1);
@ -357,7 +357,7 @@ void lsetCommand(client *c) {
addReply(c,shared.ok);
signalModifiedKey(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_LIST,"lset",c->argv[1],c->db->id);
server.dirty++;
g_pserver->dirty++;
}
} else {
serverPanic("Unknown list encoding");
@ -383,7 +383,7 @@ void popGenericCommand(client *c, int where) {
dbDelete(c->db,c->argv[1]);
}
signalModifiedKey(c->db,c->argv[1]);
server.dirty++;
g_pserver->dirty++;
}
}
@ -483,7 +483,7 @@ void ltrimCommand(client *c) {
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",c->argv[1],c->db->id);
}
signalModifiedKey(c->db,c->argv[1]);
server.dirty++;
g_pserver->dirty++;
addReply(c,shared.ok);
}
@ -511,7 +511,7 @@ void lremCommand(client *c) {
while (listTypeNext(li,&entry)) {
if (listTypeEqual(&entry,obj)) {
listTypeDelete(li, &entry);
server.dirty++;
g_pserver->dirty++;
removed++;
if (toremove && removed == toremove) break;
}
@ -551,8 +551,8 @@ static void rpoplpushHandlePush(client *c, robj *dstkey, robj *dstobj, robj *val
/* Create the list if the key does not exist */
if (!dstobj) {
dstobj = createQuicklistObject();
quicklistSetOptions((quicklist*)ptrFromObj(dstobj), server.list_max_ziplist_size,
server.list_compress_depth);
quicklistSetOptions((quicklist*)ptrFromObj(dstobj), g_pserver->list_max_ziplist_size,
g_pserver->list_compress_depth);
dbAdd(c->db,dstkey,dstobj);
}
signalModifiedKey(c->db,dstkey);
@ -595,7 +595,7 @@ void rpoplpushCommand(client *c) {
}
signalModifiedKey(c->db,touchedkey);
decrRefCount(touchedkey);
server.dirty++;
g_pserver->dirty++;
if (c->cmd->proc == brpoplpushCommand) {
rewriteClientCommandVector(c,3,shared.rpoplpush,c->argv[1],c->argv[2]);
}
@ -721,7 +721,7 @@ void blockingPopGenericCommand(client *c, int where) {
c->argv[j],c->db->id);
}
signalModifiedKey(c->db,c->argv[j]);
server.dirty++;
g_pserver->dirty++;
/* Replicate it as an [LR]POP instead of B[LR]POP. */
rewriteClientCommandVector(c,2,

View File

@ -66,7 +66,7 @@ int setTypeAdd(robj *subject, const char *value) {
if (success) {
/* Convert to regular set when the intset contains
* too many entries. */
if (intsetLen((intset*)subject->m_ptr) > server.set_max_intset_entries)
if (intsetLen((intset*)subject->m_ptr) > g_pserver->set_max_intset_entries)
setTypeConvert(subject,OBJ_ENCODING_HT);
return 1;
}
@ -288,7 +288,7 @@ void saddCommand(client *c) {
signalModifiedKey(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_SET,"sadd",c->argv[1],c->db->id);
}
server.dirty += added;
g_pserver->dirty += added;
addReplyLongLong(c,added);
}
@ -315,7 +315,7 @@ void sremCommand(client *c) {
if (keyremoved)
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",c->argv[1],
c->db->id);
server.dirty += deleted;
g_pserver->dirty += deleted;
}
addReplyLongLong(c,deleted);
}
@ -365,11 +365,11 @@ void smoveCommand(client *c) {
signalModifiedKey(c->db,c->argv[1]);
signalModifiedKey(c->db,c->argv[2]);
server.dirty++;
g_pserver->dirty++;
/* An extra key has changed when ele was successfully added to dstset */
if (setTypeAdd(dstset,szFromObj(ele))) {
server.dirty++;
g_pserver->dirty++;
notifyKeyspaceEvent(NOTIFY_SET,"sadd",c->argv[2],c->db->id);
}
addReply(c,shared.cone);
@ -434,7 +434,7 @@ void spopWithCountCommand(client *c) {
/* Generate an SPOP keyspace notification */
notifyKeyspaceEvent(NOTIFY_SET,"spop",c->argv[1],c->db->id);
server.dirty += count;
g_pserver->dirty += count;
/* CASE 1:
* The number of requested elements is greater than or equal to
@ -450,7 +450,7 @@ void spopWithCountCommand(client *c) {
/* Propagate this command as an DEL operation */
rewriteClientCommandVector(c,2,shared.del,c->argv[1]);
signalModifiedKey(c->db,c->argv[1]);
server.dirty++;
g_pserver->dirty++;
return;
}
@ -552,7 +552,7 @@ void spopWithCountCommand(client *c) {
decrRefCount(propargv[0]);
preventCommandPropagation(c);
signalModifiedKey(c->db,c->argv[1]);
server.dirty++;
g_pserver->dirty++;
}
void spopCommand(client *c) {
@ -605,7 +605,7 @@ void spopCommand(client *c) {
/* Set has been modified */
signalModifiedKey(c->db,c->argv[1]);
server.dirty++;
g_pserver->dirty++;
}
/* handle the "SRANDMEMBER key <count>" variant. The normal version of the
@ -814,7 +814,7 @@ void sinterGenericCommand(client *c, robj **setkeys,
if (dstkey) {
if (dbDelete(c->db,dstkey)) {
signalModifiedKey(c->db,dstkey);
server.dirty++;
g_pserver->dirty++;
}
addReply(c,shared.czero);
} else {
@ -917,7 +917,7 @@ void sinterGenericCommand(client *c, robj **setkeys,
dstkey,c->db->id);
}
signalModifiedKey(c->db,dstkey);
server.dirty++;
g_pserver->dirty++;
} else {
setDeferredSetLen(c,replylen,cardinality);
}
@ -1072,7 +1072,7 @@ void sunionDiffGenericCommand(client *c, robj **setkeys, int setnum,
sdsfree(ele);
}
setTypeReleaseIterator(si);
server.lazyfree_lazy_server_del ? freeObjAsync(dstset) :
g_pserver->lazyfree_lazy_server_del ? freeObjAsync(dstset) :
decrRefCount(dstset);
} else {
/* If we have a target key where to store the resulting set
@ -1092,7 +1092,7 @@ void sunionDiffGenericCommand(client *c, robj **setkeys, int setnum,
dstkey,c->db->id);
}
signalModifiedKey(c->db,dstkey);
server.dirty++;
g_pserver->dirty++;
}
zfree(sets);
}

View File

@ -241,18 +241,18 @@ int streamAppendItem(stream *s, robj **argv, int64_t numfields, streamID *added_
* if we need to switch to the next one. 'lp' will be set to NULL if
* the current node is full. */
if (lp != NULL) {
if (server.stream_node_max_bytes &&
lp_bytes > server.stream_node_max_bytes)
if (g_pserver->stream_node_max_bytes &&
lp_bytes > g_pserver->stream_node_max_bytes)
{
lp = NULL;
} else if (server.stream_node_max_entries) {
} else if (g_pserver->stream_node_max_entries) {
int64_t count = lpGetInteger(lpFirst(lp));
if (count > server.stream_node_max_entries) lp = NULL;
if (count > g_pserver->stream_node_max_entries) lp = NULL;
}
}
int flags = STREAM_ITEM_FLAG_NONE;
if (lp == NULL || lp_bytes > server.stream_node_max_bytes) {
if (lp == NULL || lp_bytes > g_pserver->stream_node_max_bytes) {
master_id = id;
streamEncodeID(rax_key,&id);
/* Create the listpack having the master entry ID and fields. */
@ -1242,7 +1242,7 @@ void xaddCommand(client *c) {
signalModifiedKey(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_STREAM,"xadd",c->argv[1],c->db->id);
server.dirty++;
g_pserver->dirty++;
if (maxlen >= 0) {
/* Notify xtrim event if needed. */
@ -1260,7 +1260,7 @@ void xaddCommand(client *c) {
/* We need to signal to blocked clients that there is new data on this
* stream. */
if (server.blocked_clients_by_type[BLOCKED_STREAM])
if (g_pserver->blocked_clients_by_type[BLOCKED_STREAM])
signalKeyAsReady(c->db, c->argv[1]);
}
@ -1534,7 +1534,7 @@ void xreadCommand(client *c) {
streamReplyWithRange(c,s,&start,NULL,count,0,
groups ? groups[i] : NULL,
consumer, flags, &spi);
if (groups) server.dirty++;
if (groups) g_pserver->dirty++;
}
}
@ -1798,7 +1798,7 @@ NULL
streamCG *cg = streamCreateCG(s,grpname,sdslen(grpname),&id);
if (cg) {
addReply(c,shared.ok);
server.dirty++;
g_pserver->dirty++;
notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-create",
c->argv[2],c->db->id);
} else {
@ -1814,14 +1814,14 @@ NULL
}
cg->last_id = id;
addReply(c,shared.ok);
server.dirty++;
g_pserver->dirty++;
notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-setid",c->argv[2],c->db->id);
} else if (!strcasecmp(opt,"DESTROY") && c->argc == 4) {
if (cg) {
raxRemove(s->cgroups,(unsigned char*)grpname,sdslen(grpname),NULL);
streamFreeCG(cg);
addReply(c,shared.cone);
server.dirty++;
g_pserver->dirty++;
notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-destroy",
c->argv[2],c->db->id);
} else {
@ -1832,7 +1832,7 @@ NULL
* that were yet associated with such a consumer. */
long long pending = streamDelConsumer(cg,szFromObj(c->argv[4]));
addReplyLongLong(c,pending);
server.dirty++;
g_pserver->dirty++;
notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-delconsumer",
c->argv[2],c->db->id);
} else if (!strcasecmp(opt,"HELP")) {
@ -1872,7 +1872,7 @@ void xsetidCommand(client *c) {
}
s->last_id = id;
addReply(c,shared.ok);
server.dirty++;
g_pserver->dirty++;
notifyKeyspaceEvent(NOTIFY_STREAM,"xsetid",c->argv[1],c->db->id);
}
@ -1915,7 +1915,7 @@ void xackCommand(client *c) {
raxRemove(nack->consumer->pel,buf,sizeof(buf),NULL);
streamFreeNACK(nack);
acknowledged++;
server.dirty++;
g_pserver->dirty++;
}
}
addReplyLongLong(c,acknowledged);
@ -2309,12 +2309,12 @@ void xclaimCommand(client *c) {
/* Propagate this change. */
streamPropagateXCLAIM(c,c->argv[1],group,c->argv[2],c->argv[j],nack);
propagate_last_id = 0; /* Will be propagated by XCLAIM itself. */
server.dirty++;
g_pserver->dirty++;
}
}
if (propagate_last_id) {
streamPropagateGroupID(c,c->argv[1],group,c->argv[2]);
server.dirty++;
g_pserver->dirty++;
}
setDeferredArrayLen(c,arraylenptr,arraylen);
preventCommandPropagation(c);
@ -2352,7 +2352,7 @@ void xdelCommand(client *c) {
if (deleted) {
signalModifiedKey(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_STREAM,"xdel",c->argv[1],c->db->id);
server.dirty += deleted;
g_pserver->dirty += deleted;
}
addReplyLongLong(c,deleted);
}
@ -2429,7 +2429,7 @@ void xtrimCommand(client *c) {
if (deleted) {
signalModifiedKey(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_STREAM,"xtrim",c->argv[1],c->db->id);
server.dirty += deleted;
g_pserver->dirty += deleted;
if (approx_maxlen) streamRewriteApproxMaxlen(c,s,maxlen_arg_idx);
}
addReplyLongLong(c,deleted);

View File

@ -84,7 +84,7 @@ void setGenericCommand(client *c, int flags, robj *key, robj *val, robj *expire,
return;
}
setKey(c->db,key,val);
server.dirty++;
g_pserver->dirty++;
if (expire) setExpire(c,c->db,key,mstime()+milliseconds);
notifyKeyspaceEvent(NOTIFY_STRING,"set",key,c->db->id);
if (expire) notifyKeyspaceEvent(NOTIFY_GENERIC,
@ -178,7 +178,7 @@ void getsetCommand(client *c) {
c->argv[2] = tryObjectEncoding(c->argv[2]);
setKey(c->db,c->argv[1],c->argv[2]);
notifyKeyspaceEvent(NOTIFY_STRING,"set",c->argv[1],c->db->id);
server.dirty++;
g_pserver->dirty++;
}
void setrangeCommand(client *c) {
@ -236,7 +236,7 @@ void setrangeCommand(client *c) {
signalModifiedKey(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_STRING,
"setrange",c->argv[1],c->db->id);
server.dirty++;
g_pserver->dirty++;
}
addReplyLongLong(c,sdslen((sds)ptrFromObj(o)));
}
@ -325,7 +325,7 @@ void msetGenericCommand(client *c, int nx) {
setKey(c->db,c->argv[j],c->argv[j+1]);
notifyKeyspaceEvent(NOTIFY_STRING,"set",c->argv[j],c->db->id);
}
server.dirty += (c->argc-1)/2;
g_pserver->dirty += (c->argc-1)/2;
addReply(c, nx ? shared.cone : shared.ok);
}
@ -369,7 +369,7 @@ void incrDecrCommand(client *c, long long incr) {
}
signalModifiedKey(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_STRING,"incrby",c->argv[1],c->db->id);
server.dirty++;
g_pserver->dirty++;
addReply(c,shared.colon);
addReply(c,newObj);
addReply(c,shared.crlf);
@ -419,7 +419,7 @@ void incrbyfloatCommand(client *c) {
dbAdd(c->db,c->argv[1],newObj);
signalModifiedKey(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_STRING,"incrbyfloat",c->argv[1],c->db->id);
server.dirty++;
g_pserver->dirty++;
addReplyBulk(c,newObj);
/* Always replicate INCRBYFLOAT as a SET command with the final value
@ -460,7 +460,7 @@ void appendCommand(client *c) {
}
signalModifiedKey(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_STRING,"append",c->argv[1],c->db->id);
server.dirty++;
g_pserver->dirty++;
addReplyLongLong(c,totlen);
}

View File

@ -1243,8 +1243,8 @@ void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen) {
if (zobj->encoding == OBJ_ENCODING_ZIPLIST) return;
zset *set = (zset*)zobj->m_ptr;
if (set->zsl->length <= server.zset_max_ziplist_entries &&
maxelelen <= server.zset_max_ziplist_value)
if (set->zsl->length <= g_pserver->zset_max_ziplist_entries &&
maxelelen <= g_pserver->zset_max_ziplist_value)
zsetConvert(zobj,OBJ_ENCODING_ZIPLIST);
}
@ -1357,9 +1357,9 @@ int zsetAdd(robj *zobj, double score, sds ele, int *flags, double *newscore) {
/* Optimize: check if the element is too large or the list
* becomes too long *before* executing zzlInsert. */
zobj->m_ptr = zzlInsert((unsigned char*)zobj->m_ptr,ele,score);
if (zzlLength((unsigned char*)zobj->m_ptr) > server.zset_max_ziplist_entries)
if (zzlLength((unsigned char*)zobj->m_ptr) > g_pserver->zset_max_ziplist_entries)
zsetConvert(zobj,OBJ_ENCODING_SKIPLIST);
if (sdslen(ele) > server.zset_max_ziplist_value)
if (sdslen(ele) > g_pserver->zset_max_ziplist_value)
zsetConvert(zobj,OBJ_ENCODING_SKIPLIST);
if (newscore) *newscore = score;
*flags |= ZADD_ADDED;
@ -1600,8 +1600,8 @@ void zaddGenericCommand(client *c, int flags) {
zobj = lookupKeyWrite(c->db,key);
if (zobj == NULL) {
if (xx) goto reply_to_client; /* No key + XX option: nothing to do. */
if (server.zset_max_ziplist_entries == 0 ||
server.zset_max_ziplist_value < sdslen(szFromObj(c->argv[scoreidx+1])))
if (g_pserver->zset_max_ziplist_entries == 0 ||
g_pserver->zset_max_ziplist_value < sdslen(szFromObj(c->argv[scoreidx+1])))
{
zobj = createZsetObject();
} else {
@ -1631,7 +1631,7 @@ void zaddGenericCommand(client *c, int flags) {
if (!(retflags & ZADD_NOP)) processed++;
score = newscore;
}
server.dirty += (added+updated);
g_pserver->dirty += (added+updated);
reply_to_client:
if (incr) { /* ZINCRBY or INCR option. */
@ -1682,7 +1682,7 @@ void zremCommand(client *c) {
if (keyremoved)
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",key,c->db->id);
signalModifiedKey(c->db,key);
server.dirty += deleted;
g_pserver->dirty += deleted;
}
addReplyLongLong(c,deleted);
}
@ -1784,7 +1784,7 @@ void zremrangeGenericCommand(client *c, int rangetype) {
if (keyremoved)
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",key,c->db->id);
}
server.dirty += deleted;
g_pserver->dirty += deleted;
addReplyLongLong(c,deleted);
cleanup:
@ -2387,14 +2387,14 @@ void zunionInterGenericCommand(client *c, robj *dstkey, int op) {
notifyKeyspaceEvent(NOTIFY_ZSET,
(op == SET_OP_UNION) ? "zunionstore" : "zinterstore",
dstkey,c->db->id);
server.dirty++;
g_pserver->dirty++;
} else {
decrRefCount(dstobj);
addReply(c,shared.czero);
if (touched) {
signalModifiedKey(c->db,dstkey);
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",dstkey,c->db->id);
server.dirty++;
g_pserver->dirty++;
}
}
zfree(src);
@ -3211,7 +3211,7 @@ void genericZpopCommand(client *c, robj **keyv, int keyc, int where, int emitkey
}
serverAssertWithInfo(c,zobj,zsetDel(zobj,ele));
server.dirty++;
g_pserver->dirty++;
if (arraylen == 0) { /* Do this only for the first iteration. */
const char *events[2] = {"zpopmin","zpopmax"};