Multithreading works!
This commit is contained in:
parent
357a9e92e2
commit
29c1105132
19
src/ae.cpp
19
src/ae.cpp
@ -109,9 +109,8 @@ void aeProcessCmd(aeEventLoop *eventLoop, int fd, void *, int )
|
|||||||
auto cb = read(fd, &cmd, sizeof(aeCommand));
|
auto cb = read(fd, &cmd, sizeof(aeCommand));
|
||||||
if (cb != sizeof(cmd))
|
if (cb != sizeof(cmd))
|
||||||
{
|
{
|
||||||
if (errno == EAGAIN)
|
AE_ASSERT(errno == EAGAIN);
|
||||||
break;
|
break;
|
||||||
fprintf(stderr, "Failed to read pipe.\n");
|
|
||||||
}
|
}
|
||||||
switch (cmd.op)
|
switch (cmd.op)
|
||||||
{
|
{
|
||||||
@ -168,6 +167,8 @@ int aeCreateRemoteFileEvent(aeEventLoop *eventLoop, int fd, int mask,
|
|||||||
if (eventLoop == g_eventLoopThisThread)
|
if (eventLoop == g_eventLoopThisThread)
|
||||||
return aeCreateFileEvent(eventLoop, fd, mask, proc, clientData);
|
return aeCreateFileEvent(eventLoop, fd, mask, proc, clientData);
|
||||||
|
|
||||||
|
int ret = AE_OK;
|
||||||
|
|
||||||
aeCommand cmd;
|
aeCommand cmd;
|
||||||
cmd.op = AE_ASYNC_OP::CreateFileEvent;
|
cmd.op = AE_ASYNC_OP::CreateFileEvent;
|
||||||
cmd.fd = fd;
|
cmd.fd = fd;
|
||||||
@ -182,14 +183,19 @@ int aeCreateRemoteFileEvent(aeEventLoop *eventLoop, int fd, int mask,
|
|||||||
if (fSynchronous)
|
if (fSynchronous)
|
||||||
cmd.pctl->mutexcv.lock();
|
cmd.pctl->mutexcv.lock();
|
||||||
auto size = write(eventLoop->fdCmdWrite, &cmd, sizeof(cmd));
|
auto size = write(eventLoop->fdCmdWrite, &cmd, sizeof(cmd));
|
||||||
AE_ASSERT(size == sizeof(cmd));
|
if (size != sizeof(cmd))
|
||||||
int ret = AE_OK;
|
{
|
||||||
|
AE_ASSERT(errno == EAGAIN);
|
||||||
|
ret = AE_ERR;
|
||||||
|
}
|
||||||
|
|
||||||
if (fSynchronous)
|
if (fSynchronous)
|
||||||
{
|
{
|
||||||
cmd.pctl->cv.wait(ulock);
|
cmd.pctl->cv.wait(ulock);
|
||||||
ret = cmd.pctl->rval;
|
ret = cmd.pctl->rval;
|
||||||
delete cmd.pctl;
|
delete cmd.pctl;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -265,7 +271,7 @@ aeEventLoop *aeCreateEventLoop(int setsize) {
|
|||||||
if (pipe(rgfd) < 0)
|
if (pipe(rgfd) < 0)
|
||||||
goto err;
|
goto err;
|
||||||
eventLoop->fdCmdRead = rgfd[0];
|
eventLoop->fdCmdRead = rgfd[0];
|
||||||
eventLoop->fdCmdWrite = rgfd[1];;
|
eventLoop->fdCmdWrite = rgfd[1];
|
||||||
fcntl(eventLoop->fdCmdWrite, F_SETFL, O_NONBLOCK);
|
fcntl(eventLoop->fdCmdWrite, F_SETFL, O_NONBLOCK);
|
||||||
fcntl(eventLoop->fdCmdRead, F_SETFL, O_NONBLOCK);
|
fcntl(eventLoop->fdCmdRead, F_SETFL, O_NONBLOCK);
|
||||||
eventLoop->cevents = 0;
|
eventLoop->cevents = 0;
|
||||||
@ -389,6 +395,7 @@ extern "C" void aeDeleteFileEvent(aeEventLoop *eventLoop, int fd, int mask)
|
|||||||
}
|
}
|
||||||
|
|
||||||
extern "C" int aeGetFileEvents(aeEventLoop *eventLoop, int fd) {
|
extern "C" int aeGetFileEvents(aeEventLoop *eventLoop, int fd) {
|
||||||
|
//AE_ASSERT(g_eventLoopThisThread == NULL || g_eventLoopThisThread == eventLoop);
|
||||||
if (fd >= eventLoop->setsize) return 0;
|
if (fd >= eventLoop->setsize) return 0;
|
||||||
aeFileEvent *fe = &eventLoop->events[fd];
|
aeFileEvent *fe = &eventLoop->events[fd];
|
||||||
|
|
||||||
@ -468,6 +475,7 @@ extern "C" int aeDeleteTimeEvent(aeEventLoop *eventLoop, long long id)
|
|||||||
*/
|
*/
|
||||||
static aeTimeEvent *aeSearchNearestTimer(aeEventLoop *eventLoop)
|
static aeTimeEvent *aeSearchNearestTimer(aeEventLoop *eventLoop)
|
||||||
{
|
{
|
||||||
|
AE_ASSERT(g_eventLoopThisThread == NULL || g_eventLoopThisThread == eventLoop);
|
||||||
aeTimeEvent *te = eventLoop->timeEventHead;
|
aeTimeEvent *te = eventLoop->timeEventHead;
|
||||||
aeTimeEvent *nearest = NULL;
|
aeTimeEvent *nearest = NULL;
|
||||||
|
|
||||||
@ -629,6 +637,7 @@ extern "C" void ProcessEventCore(aeEventLoop *eventLoop, aeFileEvent *fe, int ma
|
|||||||
* The function returns the number of events processed. */
|
* The function returns the number of events processed. */
|
||||||
int aeProcessEvents(aeEventLoop *eventLoop, int flags)
|
int aeProcessEvents(aeEventLoop *eventLoop, int flags)
|
||||||
{
|
{
|
||||||
|
AE_ASSERT(g_eventLoopThisThread == NULL || g_eventLoopThisThread == eventLoop);
|
||||||
int processed = 0, numevents;
|
int processed = 0, numevents;
|
||||||
|
|
||||||
/* Nothing to do? return ASAP */
|
/* Nothing to do? return ASAP */
|
||||||
|
26
src/aof.c
26
src/aof.c
@ -96,7 +96,7 @@ void aofChildWriteDiffData(aeEventLoop *el, int fd, void *privdata, int mask) {
|
|||||||
listNode *ln;
|
listNode *ln;
|
||||||
aofrwblock *block;
|
aofrwblock *block;
|
||||||
ssize_t nwritten;
|
ssize_t nwritten;
|
||||||
serverAssert(el == server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el);
|
serverAssert(aeThreadOwnsLock());
|
||||||
|
|
||||||
UNUSED(el);
|
UNUSED(el);
|
||||||
UNUSED(fd);
|
UNUSED(fd);
|
||||||
@ -123,15 +123,6 @@ void aofChildWriteDiffData(aeEventLoop *el, int fd, void *privdata, int mask) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void QueueAofPipeWrite(void *arg)
|
|
||||||
{
|
|
||||||
UNUSED(arg);
|
|
||||||
if (aeGetFileEvents(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,server.aof_pipe_write_data_to_child) == 0) {
|
|
||||||
aeCreateFileEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el, server.aof_pipe_write_data_to_child,
|
|
||||||
AE_WRITABLE, aofChildWriteDiffData, NULL);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Append data to the AOF rewrite buffer, allocating new blocks if needed. */
|
/* Append data to the AOF rewrite buffer, allocating new blocks if needed. */
|
||||||
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
|
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
|
||||||
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
|
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
|
||||||
@ -173,7 +164,10 @@ void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
|
|||||||
|
|
||||||
/* Install a file event to send data to the rewrite child if there is
|
/* Install a file event to send data to the rewrite child if there is
|
||||||
* not one already. */
|
* not one already. */
|
||||||
aePostFunction(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el, QueueAofPipeWrite, NULL);
|
if (aeGetFileEvents(serverTL->el,server.aof_pipe_write_data_to_child) == 0) {
|
||||||
|
aeCreateFileEvent(serverTL->el, server.aof_pipe_write_data_to_child,
|
||||||
|
AE_WRITABLE, aofChildWriteDiffData, NULL);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Write the buffer (possibly composed of multiple blocks) into the specified
|
/* Write the buffer (possibly composed of multiple blocks) into the specified
|
||||||
@ -691,6 +685,7 @@ int loadAppendOnlyFile(char *filename) {
|
|||||||
long loops = 0;
|
long loops = 0;
|
||||||
off_t valid_up_to = 0; /* Offset of latest well-formed command loaded. */
|
off_t valid_up_to = 0; /* Offset of latest well-formed command loaded. */
|
||||||
off_t valid_before_multi = 0; /* Offset before MULTI command loaded. */
|
off_t valid_before_multi = 0; /* Offset before MULTI command loaded. */
|
||||||
|
serverAssert(serverTL != NULL); // This happens early in boot, ensure serverTL was setup
|
||||||
|
|
||||||
if (fp == NULL) {
|
if (fp == NULL) {
|
||||||
serverLog(LL_WARNING,"Fatal error: can't open the append log file for reading: %s",strerror(errno));
|
serverLog(LL_WARNING,"Fatal error: can't open the append log file for reading: %s",strerror(errno));
|
||||||
@ -747,7 +742,7 @@ int loadAppendOnlyFile(char *filename) {
|
|||||||
/* Serve the clients from time to time */
|
/* Serve the clients from time to time */
|
||||||
if (!(loops++ % 1000)) {
|
if (!(loops++ % 1000)) {
|
||||||
loadingProgress(ftello(fp));
|
loadingProgress(ftello(fp));
|
||||||
processEventsWhileBlocked(IDX_EVENT_LOOP_MAIN);
|
processEventsWhileBlocked(serverTL - server.rgthreadvar);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fgets(buf,sizeof(buf),fp) == NULL) {
|
if (fgets(buf,sizeof(buf),fp) == NULL) {
|
||||||
@ -1497,12 +1492,13 @@ int aofCreatePipes(void) {
|
|||||||
/* Parent -> children data is non blocking. */
|
/* Parent -> children data is non blocking. */
|
||||||
if (anetNonBlock(NULL,fds[0]) != ANET_OK) goto error;
|
if (anetNonBlock(NULL,fds[0]) != ANET_OK) goto error;
|
||||||
if (anetNonBlock(NULL,fds[1]) != ANET_OK) goto error;
|
if (anetNonBlock(NULL,fds[1]) != ANET_OK) goto error;
|
||||||
if (aeCreateRemoteFileEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el, fds[2], AE_READABLE, aofChildPipeReadable, NULL, TRUE) == AE_ERR) goto error;
|
if (aeCreateFileEvent(serverTL->el, fds[2], AE_READABLE, aofChildPipeReadable, NULL) == AE_ERR) goto error;
|
||||||
|
|
||||||
server.aof_pipe_write_data_to_child = fds[1];
|
server.aof_pipe_write_data_to_child = fds[1];
|
||||||
server.aof_pipe_read_data_from_parent = fds[0];
|
server.aof_pipe_read_data_from_parent = fds[0];
|
||||||
server.aof_pipe_write_ack_to_parent = fds[3];
|
server.aof_pipe_write_ack_to_parent = fds[3];
|
||||||
server.aof_pipe_read_ack_from_child = fds[2];
|
server.aof_pipe_read_ack_from_child = fds[2];
|
||||||
|
server.el_alf_pip_read_ack_from_child = serverTL->el;
|
||||||
server.aof_pipe_write_ack_to_child = fds[5];
|
server.aof_pipe_write_ack_to_child = fds[5];
|
||||||
server.aof_pipe_read_ack_from_parent = fds[4];
|
server.aof_pipe_read_ack_from_parent = fds[4];
|
||||||
server.aof_stop_sending_diff = 0;
|
server.aof_stop_sending_diff = 0;
|
||||||
@ -1516,8 +1512,8 @@ error:
|
|||||||
}
|
}
|
||||||
|
|
||||||
void aofClosePipes(void) {
|
void aofClosePipes(void) {
|
||||||
aeDeleteFileEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,server.aof_pipe_read_ack_from_child,AE_READABLE);
|
aeDeleteFileEventAsync(server.el_alf_pip_read_ack_from_child,server.aof_pipe_read_ack_from_child,AE_READABLE);
|
||||||
aeDeleteFileEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,server.aof_pipe_write_data_to_child,AE_WRITABLE);
|
aeDeleteFileEventAsync(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,server.aof_pipe_write_data_to_child,AE_WRITABLE);
|
||||||
close(server.aof_pipe_write_data_to_child);
|
close(server.aof_pipe_write_data_to_child);
|
||||||
close(server.aof_pipe_read_data_from_parent);
|
close(server.aof_pipe_read_data_from_parent);
|
||||||
close(server.aof_pipe_write_ack_to_parent);
|
close(server.aof_pipe_write_ack_to_parent);
|
||||||
|
@ -100,6 +100,7 @@ int getTimeoutFromObjectOrReply(client *c, robj *object, mstime_t *timeout, int
|
|||||||
* flag is set client query buffer is not longer processed, but accumulated,
|
* flag is set client query buffer is not longer processed, but accumulated,
|
||||||
* and will be processed when the client is unblocked. */
|
* and will be processed when the client is unblocked. */
|
||||||
void blockClient(client *c, int btype) {
|
void blockClient(client *c, int btype) {
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
c->flags |= CLIENT_BLOCKED;
|
c->flags |= CLIENT_BLOCKED;
|
||||||
c->btype = btype;
|
c->btype = btype;
|
||||||
server.blocked_clients++;
|
server.blocked_clients++;
|
||||||
@ -122,6 +123,7 @@ void processUnblockedClients(int iel) {
|
|||||||
serverAssert(ln != NULL);
|
serverAssert(ln != NULL);
|
||||||
c = ln->value;
|
c = ln->value;
|
||||||
listDelNode(unblocked_clients,ln);
|
listDelNode(unblocked_clients,ln);
|
||||||
|
AssertCorrectThread(c);
|
||||||
c->flags &= ~CLIENT_UNBLOCKED;
|
c->flags &= ~CLIENT_UNBLOCKED;
|
||||||
|
|
||||||
/* Process remaining data in the input buffer, unless the client
|
/* Process remaining data in the input buffer, unless the client
|
||||||
@ -165,6 +167,7 @@ void queueClientForReprocessing(client *c) {
|
|||||||
/* Unblock a client calling the right function depending on the kind
|
/* Unblock a client calling the right function depending on the kind
|
||||||
* of operation the client is blocking for. */
|
* of operation the client is blocking for. */
|
||||||
void unblockClient(client *c) {
|
void unblockClient(client *c) {
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
if (c->btype == BLOCKED_LIST ||
|
if (c->btype == BLOCKED_LIST ||
|
||||||
c->btype == BLOCKED_ZSET ||
|
c->btype == BLOCKED_ZSET ||
|
||||||
c->btype == BLOCKED_STREAM) {
|
c->btype == BLOCKED_STREAM) {
|
||||||
@ -210,6 +213,7 @@ void replyToBlockedClientTimedOut(client *c) {
|
|||||||
* The semantics is to send an -UNBLOCKED error to the client, disconnecting
|
* The semantics is to send an -UNBLOCKED error to the client, disconnecting
|
||||||
* it at the same time. */
|
* it at the same time. */
|
||||||
void disconnectAllBlockedClients(void) {
|
void disconnectAllBlockedClients(void) {
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
listNode *ln;
|
listNode *ln;
|
||||||
listIter li;
|
listIter li;
|
||||||
|
|
||||||
|
@ -5390,6 +5390,7 @@ socket_err:
|
|||||||
* the target instance. See the Redis Cluster specification for more
|
* the target instance. See the Redis Cluster specification for more
|
||||||
* information. */
|
* information. */
|
||||||
void askingCommand(client *c) {
|
void askingCommand(client *c) {
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
if (server.cluster_enabled == 0) {
|
if (server.cluster_enabled == 0) {
|
||||||
addReplyError(c,"This instance has cluster support disabled");
|
addReplyError(c,"This instance has cluster support disabled");
|
||||||
return;
|
return;
|
||||||
@ -5402,6 +5403,7 @@ void askingCommand(client *c) {
|
|||||||
* In this mode slaves will not redirect clients as long as clients access
|
* In this mode slaves will not redirect clients as long as clients access
|
||||||
* with read-only commands to keys that are served by the slave's master. */
|
* with read-only commands to keys that are served by the slave's master. */
|
||||||
void readonlyCommand(client *c) {
|
void readonlyCommand(client *c) {
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
if (server.cluster_enabled == 0) {
|
if (server.cluster_enabled == 0) {
|
||||||
addReplyError(c,"This instance has cluster support disabled");
|
addReplyError(c,"This instance has cluster support disabled");
|
||||||
return;
|
return;
|
||||||
@ -5412,6 +5414,7 @@ void readonlyCommand(client *c) {
|
|||||||
|
|
||||||
/* The READWRITE command just clears the READONLY command state. */
|
/* The READWRITE command just clears the READONLY command state. */
|
||||||
void readwriteCommand(client *c) {
|
void readwriteCommand(client *c) {
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
c->flags &= ~CLIENT_READONLY;
|
c->flags &= ~CLIENT_READONLY;
|
||||||
addReply(c,shared.ok);
|
addReply(c,shared.ok);
|
||||||
}
|
}
|
||||||
@ -5455,6 +5458,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in
|
|||||||
multiState *ms, _ms;
|
multiState *ms, _ms;
|
||||||
multiCmd mc;
|
multiCmd mc;
|
||||||
int i, slot = 0, migrating_slot = 0, importing_slot = 0, missing_keys = 0;
|
int i, slot = 0, migrating_slot = 0, importing_slot = 0, missing_keys = 0;
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
|
|
||||||
/* Allow any key to be set if a module disabled cluster redirections. */
|
/* Allow any key to be set if a module disabled cluster redirections. */
|
||||||
if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION)
|
if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION)
|
||||||
@ -5663,6 +5667,7 @@ void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_co
|
|||||||
* longer handles, the client is sent a redirection error, and the function
|
* longer handles, the client is sent a redirection error, and the function
|
||||||
* returns 1. Otherwise 0 is returned and no operation is performed. */
|
* returns 1. Otherwise 0 is returned and no operation is performed. */
|
||||||
int clusterRedirectBlockedClientIfNeeded(client *c) {
|
int clusterRedirectBlockedClientIfNeeded(client *c) {
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
if (c->flags & CLIENT_BLOCKED &&
|
if (c->flags & CLIENT_BLOCKED &&
|
||||||
(c->btype == BLOCKED_LIST ||
|
(c->btype == BLOCKED_LIST ||
|
||||||
c->btype == BLOCKED_ZSET ||
|
c->btype == BLOCKED_ZSET ||
|
||||||
|
@ -961,6 +961,7 @@ void configSetCommand(client *c) {
|
|||||||
|
|
||||||
/* Try to check if the OS is capable of supporting so many FDs. */
|
/* Try to check if the OS is capable of supporting so many FDs. */
|
||||||
server.maxclients = ll;
|
server.maxclients = ll;
|
||||||
|
serverAssert(FALSE);
|
||||||
if (ll > orig_value) {
|
if (ll > orig_value) {
|
||||||
adjustOpenFilesLimit();
|
adjustOpenFilesLimit();
|
||||||
if (server.maxclients != ll) {
|
if (server.maxclients != ll) {
|
||||||
|
2
src/db.c
2
src/db.c
@ -99,6 +99,7 @@ robj *lookupKey(redisDb *db, robj *key, int flags) {
|
|||||||
* expiring our key via DELs in the replication link. */
|
* expiring our key via DELs in the replication link. */
|
||||||
robj *lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) {
|
robj *lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) {
|
||||||
robj *val;
|
robj *val;
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
|
|
||||||
if (expireIfNeeded(db,key) == 1) {
|
if (expireIfNeeded(db,key) == 1) {
|
||||||
/* Key expired. If we are in the context of a master, expireIfNeeded()
|
/* Key expired. If we are in the context of a master, expireIfNeeded()
|
||||||
@ -1072,6 +1073,7 @@ int removeExpire(redisDb *db, robj *key) {
|
|||||||
* after which the key will no longer be considered valid. */
|
* after which the key will no longer be considered valid. */
|
||||||
void setExpire(client *c, redisDb *db, robj *key, long long when) {
|
void setExpire(client *c, redisDb *db, robj *key, long long when) {
|
||||||
dictEntry *kde, *de;
|
dictEntry *kde, *de;
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
|
|
||||||
/* Reuse the sds from the main dict in the expire dict */
|
/* Reuse the sds from the main dict in the expire dict */
|
||||||
kde = dictFind(db->pdict,ptrFromObj(key));
|
kde = dictFind(db->pdict,ptrFromObj(key));
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include <sys/syscall.h>
|
#include <sys/syscall.h>
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include <sched.h>
|
#include <sched.h>
|
||||||
|
#include <atomic>
|
||||||
|
|
||||||
static_assert(sizeof(pid_t) <= sizeof(fastlock::m_pidOwner), "fastlock::m_pidOwner not large enough");
|
static_assert(sizeof(pid_t) <= sizeof(fastlock::m_pidOwner), "fastlock::m_pidOwner not large enough");
|
||||||
|
|
||||||
@ -21,6 +22,8 @@ extern "C" void fastlock_init(struct fastlock *lock)
|
|||||||
}
|
}
|
||||||
|
|
||||||
extern "C" void fastlock_lock(struct fastlock *lock)
|
extern "C" void fastlock_lock(struct fastlock *lock)
|
||||||
|
{
|
||||||
|
if (!__sync_bool_compare_and_swap(&lock->m_lock, 0, 1))
|
||||||
{
|
{
|
||||||
if (lock->m_pidOwner == gettid())
|
if (lock->m_pidOwner == gettid())
|
||||||
{
|
{
|
||||||
@ -32,8 +35,11 @@ extern "C" void fastlock_lock(struct fastlock *lock)
|
|||||||
{
|
{
|
||||||
sched_yield();
|
sched_yield();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
lock->m_depth = 1;
|
lock->m_depth = 1;
|
||||||
lock->m_pidOwner = gettid();
|
lock->m_pidOwner = gettid();
|
||||||
|
__sync_synchronize();
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C" void fastlock_unlock(struct fastlock *lock)
|
extern "C" void fastlock_unlock(struct fastlock *lock)
|
||||||
@ -42,7 +48,9 @@ extern "C" void fastlock_unlock(struct fastlock *lock)
|
|||||||
if (lock->m_depth == 0)
|
if (lock->m_depth == 0)
|
||||||
{
|
{
|
||||||
lock->m_pidOwner = -1;
|
lock->m_pidOwner = -1;
|
||||||
__sync_bool_compare_and_swap(&lock->m_lock, 1, 0);
|
__sync_synchronize();
|
||||||
|
if (!__sync_bool_compare_and_swap(&lock->m_lock, 1, 0))
|
||||||
|
*((volatile int*)0) = -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -55,5 +63,10 @@ extern "C" void fastlock_free(struct fastlock *lock)
|
|||||||
|
|
||||||
bool fastlock::fOwnLock()
|
bool fastlock::fOwnLock()
|
||||||
{
|
{
|
||||||
|
if (__sync_bool_compare_and_swap(&m_lock, 0, 1))
|
||||||
|
{
|
||||||
|
__sync_bool_compare_and_swap(&m_lock, 1, 0);
|
||||||
|
return false; // it was never locked
|
||||||
|
}
|
||||||
return gettid() == m_pidOwner;
|
return gettid() == m_pidOwner;
|
||||||
}
|
}
|
@ -484,6 +484,7 @@ void moduleFreeContext(RedisModuleCtx *ctx) {
|
|||||||
* details needed to correctly replicate commands. */
|
* details needed to correctly replicate commands. */
|
||||||
void moduleHandlePropagationAfterCommandCallback(RedisModuleCtx *ctx) {
|
void moduleHandlePropagationAfterCommandCallback(RedisModuleCtx *ctx) {
|
||||||
client *c = ctx->client;
|
client *c = ctx->client;
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
|
|
||||||
if (c->flags & CLIENT_LUA) return;
|
if (c->flags & CLIENT_LUA) return;
|
||||||
|
|
||||||
@ -3623,6 +3624,7 @@ void RM_SetDisconnectCallback(RedisModuleBlockedClient *bc, RedisModuleDisconnec
|
|||||||
void moduleHandleBlockedClients(void) {
|
void moduleHandleBlockedClients(void) {
|
||||||
listNode *ln;
|
listNode *ln;
|
||||||
RedisModuleBlockedClient *bc;
|
RedisModuleBlockedClient *bc;
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
|
|
||||||
pthread_mutex_lock(&moduleUnblockedClientsMutex);
|
pthread_mutex_lock(&moduleUnblockedClientsMutex);
|
||||||
/* Here we unblock all the pending clients blocked in modules operations
|
/* Here we unblock all the pending clients blocked in modules operations
|
||||||
|
@ -72,6 +72,7 @@ void queueMultiCommand(client *c) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void discardTransaction(client *c) {
|
void discardTransaction(client *c) {
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
freeClientMultiState(c);
|
freeClientMultiState(c);
|
||||||
initClientMultiState(c);
|
initClientMultiState(c);
|
||||||
c->flags &= ~(CLIENT_MULTI|CLIENT_DIRTY_CAS|CLIENT_DIRTY_EXEC);
|
c->flags &= ~(CLIENT_MULTI|CLIENT_DIRTY_CAS|CLIENT_DIRTY_EXEC);
|
||||||
@ -81,11 +82,13 @@ void discardTransaction(client *c) {
|
|||||||
/* Flag the transacation as DIRTY_EXEC so that EXEC will fail.
|
/* Flag the transacation as DIRTY_EXEC so that EXEC will fail.
|
||||||
* Should be called every time there is an error while queueing a command. */
|
* Should be called every time there is an error while queueing a command. */
|
||||||
void flagTransaction(client *c) {
|
void flagTransaction(client *c) {
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
if (c->flags & CLIENT_MULTI)
|
if (c->flags & CLIENT_MULTI)
|
||||||
c->flags |= CLIENT_DIRTY_EXEC;
|
c->flags |= CLIENT_DIRTY_EXEC;
|
||||||
}
|
}
|
||||||
|
|
||||||
void multiCommand(client *c) {
|
void multiCommand(client *c) {
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
if (c->flags & CLIENT_MULTI) {
|
if (c->flags & CLIENT_MULTI) {
|
||||||
addReplyError(c,"MULTI calls can not be nested");
|
addReplyError(c,"MULTI calls can not be nested");
|
||||||
return;
|
return;
|
||||||
@ -291,6 +294,7 @@ void unwatchAllKeys(client *c) {
|
|||||||
/* "Touch" a key, so that if this key is being WATCHed by some client the
|
/* "Touch" a key, so that if this key is being WATCHed by some client the
|
||||||
* next EXEC will fail. */
|
* next EXEC will fail. */
|
||||||
void touchWatchedKey(redisDb *db, robj *key) {
|
void touchWatchedKey(redisDb *db, robj *key) {
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
list *clients;
|
list *clients;
|
||||||
listIter li;
|
listIter li;
|
||||||
listNode *ln;
|
listNode *ln;
|
||||||
@ -316,6 +320,7 @@ void touchWatchedKey(redisDb *db, robj *key) {
|
|||||||
void touchWatchedKeysOnFlush(int dbid) {
|
void touchWatchedKeysOnFlush(int dbid) {
|
||||||
listIter li1, li2;
|
listIter li1, li2;
|
||||||
listNode *ln;
|
listNode *ln;
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
|
|
||||||
/* For every client, check all the waited keys */
|
/* For every client, check all the waited keys */
|
||||||
listRewind(server.clients,&li1);
|
listRewind(server.clients,&li1);
|
||||||
@ -350,6 +355,7 @@ void watchCommand(client *c) {
|
|||||||
|
|
||||||
void unwatchCommand(client *c) {
|
void unwatchCommand(client *c) {
|
||||||
unwatchAllKeys(c);
|
unwatchAllKeys(c);
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
c->flags &= (~CLIENT_DIRTY_CAS);
|
c->flags &= (~CLIENT_DIRTY_CAS);
|
||||||
addReply(c,shared.ok);
|
addReply(c,shared.ok);
|
||||||
}
|
}
|
||||||
|
@ -50,13 +50,17 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
void arm()
|
void arm()
|
||||||
|
{
|
||||||
|
if (!m_fArmed)
|
||||||
{
|
{
|
||||||
m_fArmed = true;
|
m_fArmed = true;
|
||||||
aeAcquireLock();
|
aeAcquireLock();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void disarm()
|
void disarm()
|
||||||
{
|
{
|
||||||
|
serverAssert(m_fArmed);
|
||||||
m_fArmed = false;
|
m_fArmed = false;
|
||||||
aeReleaseLock();
|
aeReleaseLock();
|
||||||
}
|
}
|
||||||
@ -118,6 +122,7 @@ void linkClient(client *c) {
|
|||||||
client *createClient(int fd, int iel) {
|
client *createClient(int fd, int iel) {
|
||||||
client *c = (client*)zmalloc(sizeof(client), MALLOC_LOCAL);
|
client *c = (client*)zmalloc(sizeof(client), MALLOC_LOCAL);
|
||||||
|
|
||||||
|
c->iel = iel;
|
||||||
/* passing -1 as fd it is possible to create a non connected client.
|
/* passing -1 as fd it is possible to create a non connected client.
|
||||||
* This is useful since all the commands needs to be executed
|
* This is useful since all the commands needs to be executed
|
||||||
* in the context of a client. When commands are executed in other
|
* in the context of a client. When commands are executed in other
|
||||||
@ -203,6 +208,7 @@ client *createClient(int fd, int iel) {
|
|||||||
listSetMatchMethod(c->pubsub_patterns,listMatchObjects);
|
listSetMatchMethod(c->pubsub_patterns,listMatchObjects);
|
||||||
if (fd != -1) linkClient(c);
|
if (fd != -1) linkClient(c);
|
||||||
initClientMultiState(c);
|
initClientMultiState(c);
|
||||||
|
AssertCorrectThread(c);
|
||||||
return c;
|
return c;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -217,6 +223,7 @@ void clientInstallWriteHandler(client *c) {
|
|||||||
/* Schedule the client to write the output buffers to the socket only
|
/* Schedule the client to write the output buffers to the socket only
|
||||||
* if not already done and, for slaves, if the slave can actually receive
|
* if not already done and, for slaves, if the slave can actually receive
|
||||||
* writes at this stage. */
|
* writes at this stage. */
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
if (!(c->flags & CLIENT_PENDING_WRITE) &&
|
if (!(c->flags & CLIENT_PENDING_WRITE) &&
|
||||||
(c->replstate == REPL_STATE_NONE ||
|
(c->replstate == REPL_STATE_NONE ||
|
||||||
(c->replstate == SLAVE_STATE_ONLINE && !c->repl_put_online_on_ack)))
|
(c->replstate == SLAVE_STATE_ONLINE && !c->repl_put_online_on_ack)))
|
||||||
@ -234,6 +241,7 @@ void clientInstallWriteHandler(client *c) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void clientInstallAsyncWriteHandler(client *c) {
|
void clientInstallAsyncWriteHandler(client *c) {
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
if (!(c->flags & CLIENT_PENDING_ASYNCWRITE)) {
|
if (!(c->flags & CLIENT_PENDING_ASYNCWRITE)) {
|
||||||
c->flags |= CLIENT_PENDING_ASYNCWRITE;
|
c->flags |= CLIENT_PENDING_ASYNCWRITE;
|
||||||
listAddNodeHead(serverTL->clients_pending_asyncwrite,c);
|
listAddNodeHead(serverTL->clients_pending_asyncwrite,c);
|
||||||
@ -263,6 +271,7 @@ void clientInstallAsyncWriteHandler(client *c) {
|
|||||||
* data to the clients output buffers. If the function returns C_ERR no
|
* data to the clients output buffers. If the function returns C_ERR no
|
||||||
* data should be appended to the output buffers. */
|
* data should be appended to the output buffers. */
|
||||||
int prepareClientToWrite(client *c, bool fAsync) {
|
int prepareClientToWrite(client *c, bool fAsync) {
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
fAsync = fAsync && !FCorrectThread(c); // Not async if we're on the right thread
|
fAsync = fAsync && !FCorrectThread(c); // Not async if we're on the right thread
|
||||||
|
|
||||||
/* If it's the Lua client we always return ok without installing any
|
/* If it's the Lua client we always return ok without installing any
|
||||||
@ -302,7 +311,7 @@ int _addReplyToBuffer(client *c, const char *s, size_t len, bool fAsync) {
|
|||||||
if ((c->buflenAsync - c->bufposAsync) < (int)len)
|
if ((c->buflenAsync - c->bufposAsync) < (int)len)
|
||||||
{
|
{
|
||||||
int minsize = len + c->bufposAsync;
|
int minsize = len + c->bufposAsync;
|
||||||
c->buflenAsync = std::max(minsize, c->buflenAsync*2);
|
c->buflenAsync = std::max(minsize, c->buflenAsync*2 - c->buflenAsync);
|
||||||
c->bufAsync = (char*)zrealloc(c->bufAsync, c->buflenAsync, MALLOC_LOCAL);
|
c->bufAsync = (char*)zrealloc(c->bufAsync, c->buflenAsync, MALLOC_LOCAL);
|
||||||
}
|
}
|
||||||
memcpy(c->bufAsync+c->bufposAsync,s,len);
|
memcpy(c->bufAsync+c->bufposAsync,s,len);
|
||||||
@ -595,7 +604,7 @@ void setDeferredAggregateLenAsync(client *c, void *node, long length, char prefi
|
|||||||
serverAssert(idxSplice <= c->bufposAsync);
|
serverAssert(idxSplice <= c->bufposAsync);
|
||||||
if (c->buflenAsync < (c->bufposAsync + lenstr_len))
|
if (c->buflenAsync < (c->bufposAsync + lenstr_len))
|
||||||
{
|
{
|
||||||
c->buflenAsync = std::max((int)(c->bufposAsync+lenstr_len), c->buflenAsync*2);
|
c->buflenAsync = std::max((int)(c->bufposAsync+lenstr_len), c->buflenAsync*2 - c->buflenAsync);
|
||||||
c->bufAsync = (char*)zrealloc(c->bufAsync, c->buflenAsync, MALLOC_LOCAL);
|
c->bufAsync = (char*)zrealloc(c->bufAsync, c->buflenAsync, MALLOC_LOCAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -713,13 +722,21 @@ void addReplyLongLongWithPrefix(client *c, long long ll, char prefix) {
|
|||||||
addReplyLongLongWithPrefixCore(c, ll, prefix, false);
|
addReplyLongLongWithPrefixCore(c, ll, prefix, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void addReplyLongLong(client *c, long long ll) {
|
void addReplyLongLongCore(client *c, long long ll, bool fAsync) {
|
||||||
if (ll == 0)
|
if (ll == 0)
|
||||||
addReply(c,shared.czero);
|
addReplyCore(c,shared.czero, fAsync);
|
||||||
else if (ll == 1)
|
else if (ll == 1)
|
||||||
addReply(c,shared.cone);
|
addReplyCore(c,shared.cone, fAsync);
|
||||||
else
|
else
|
||||||
addReplyLongLongWithPrefix(c,ll,':');
|
addReplyLongLongWithPrefixCore(c,ll,':', fAsync);
|
||||||
|
}
|
||||||
|
|
||||||
|
void addReplyLongLong(client *c, long long ll) {
|
||||||
|
addReplyLongLongCore(c, ll, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
void addReplyLongLongAsync(client *c, long long ll) {
|
||||||
|
addReplyLongLongCore(c, ll, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void addReplyAggregateLenCore(client *c, long length, int prefix, bool fAsync) {
|
void addReplyAggregateLenCore(client *c, long length, int prefix, bool fAsync) {
|
||||||
@ -1155,6 +1172,7 @@ void disconnectSlaves(void) {
|
|||||||
void unlinkClient(client *c) {
|
void unlinkClient(client *c) {
|
||||||
listNode *ln;
|
listNode *ln;
|
||||||
AssertCorrectThread(c);
|
AssertCorrectThread(c);
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
|
|
||||||
/* If this is marked as current client unset it. */
|
/* If this is marked as current client unset it. */
|
||||||
if (server.current_client == c) server.current_client = NULL;
|
if (server.current_client == c) server.current_client = NULL;
|
||||||
@ -1163,8 +1181,6 @@ void unlinkClient(client *c) {
|
|||||||
* If the client was already unlinked or if it's a "fake client" the
|
* If the client was already unlinked or if it's a "fake client" the
|
||||||
* fd is already set to -1. */
|
* fd is already set to -1. */
|
||||||
if (c->fd != -1) {
|
if (c->fd != -1) {
|
||||||
AssertCorrectThread(c);
|
|
||||||
|
|
||||||
/* Remove from the list of active clients. */
|
/* Remove from the list of active clients. */
|
||||||
if (c->client_list_node) {
|
if (c->client_list_node) {
|
||||||
uint64_t id = htonu64(c->id);
|
uint64_t id = htonu64(c->id);
|
||||||
@ -1191,7 +1207,6 @@ void unlinkClient(client *c) {
|
|||||||
/* When client was just unblocked because of a blocking operation,
|
/* When client was just unblocked because of a blocking operation,
|
||||||
* remove it from the list of unblocked clients. */
|
* remove it from the list of unblocked clients. */
|
||||||
if (c->flags & CLIENT_UNBLOCKED) {
|
if (c->flags & CLIENT_UNBLOCKED) {
|
||||||
AssertCorrectThread(c);
|
|
||||||
ln = listSearchKey(server.rgthreadvar[c->iel].unblocked_clients,c);
|
ln = listSearchKey(server.rgthreadvar[c->iel].unblocked_clients,c);
|
||||||
serverAssert(ln != NULL);
|
serverAssert(ln != NULL);
|
||||||
listDelNode(server.rgthreadvar[c->iel].unblocked_clients,ln);
|
listDelNode(server.rgthreadvar[c->iel].unblocked_clients,ln);
|
||||||
@ -1199,9 +1214,16 @@ void unlinkClient(client *c) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (c->flags & CLIENT_PENDING_ASYNCWRITE) {
|
if (c->flags & CLIENT_PENDING_ASYNCWRITE) {
|
||||||
ln = listSearchKey(server.rgthreadvar[c->iel].clients_pending_asyncwrite,c);
|
ln = NULL;
|
||||||
|
int iel = 0;
|
||||||
|
for (; iel < server.cthreads; ++iel)
|
||||||
|
{
|
||||||
|
ln = listSearchKey(server.rgthreadvar[iel].clients_pending_asyncwrite,c);
|
||||||
|
if (ln)
|
||||||
|
break;
|
||||||
|
}
|
||||||
serverAssert(ln != NULL);
|
serverAssert(ln != NULL);
|
||||||
listDelNode(server.rgthreadvar[c->iel].clients_pending_asyncwrite,ln);
|
listDelNode(server.rgthreadvar[iel].clients_pending_asyncwrite,ln);
|
||||||
c->flags &= ~CLIENT_PENDING_ASYNCWRITE;
|
c->flags &= ~CLIENT_PENDING_ASYNCWRITE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1316,8 +1338,8 @@ void freeClient(client *c) {
|
|||||||
* should be valid for the continuation of the flow of the program. */
|
* should be valid for the continuation of the flow of the program. */
|
||||||
void freeClientAsync(client *c) {
|
void freeClientAsync(client *c) {
|
||||||
if (c->flags & CLIENT_CLOSE_ASAP || c->flags & CLIENT_LUA) return;
|
if (c->flags & CLIENT_CLOSE_ASAP || c->flags & CLIENT_LUA) return;
|
||||||
c->flags |= CLIENT_CLOSE_ASAP;
|
|
||||||
aeAcquireLock();
|
aeAcquireLock();
|
||||||
|
c->flags |= CLIENT_CLOSE_ASAP;
|
||||||
listAddNodeTail(server.clients_to_close,c);
|
listAddNodeTail(server.clients_to_close,c);
|
||||||
aeReleaseLock();
|
aeReleaseLock();
|
||||||
}
|
}
|
||||||
@ -1368,6 +1390,11 @@ int writeToClient(int fd, client *c, int handler_installed) {
|
|||||||
o = (clientReplyBlock*)listNodeValue(listFirst(c->listbufferDoneAsync));
|
o = (clientReplyBlock*)listNodeValue(listFirst(c->listbufferDoneAsync));
|
||||||
if (o->used == 0) {
|
if (o->used == 0) {
|
||||||
listDelNode(c->listbufferDoneAsync,listFirst(c->listbufferDoneAsync));
|
listDelNode(c->listbufferDoneAsync,listFirst(c->listbufferDoneAsync));
|
||||||
|
if (listLength(c->listbufferDoneAsync) == 0)
|
||||||
|
{
|
||||||
|
fSendAsyncBuffer = 0;
|
||||||
|
locker.disarm();
|
||||||
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1485,17 +1512,59 @@ int writeToClient(int fd, client *c, int handler_installed) {
|
|||||||
/* Write event handler. Just send data to the client. */
|
/* Write event handler. Just send data to the client. */
|
||||||
void sendReplyToClient(aeEventLoop *el, int fd, void *privdata, int mask) {
|
void sendReplyToClient(aeEventLoop *el, int fd, void *privdata, int mask) {
|
||||||
UNUSED(mask);
|
UNUSED(mask);
|
||||||
AeLocker locker;
|
|
||||||
client *c = (client*)privdata;
|
client *c = (client*)privdata;
|
||||||
|
|
||||||
serverAssert(ielFromEventLoop(el) == c->iel);
|
serverAssert(ielFromEventLoop(el) == c->iel);
|
||||||
|
|
||||||
if (c->flags | CLIENT_SLAVE)
|
|
||||||
locker.arm();
|
|
||||||
|
|
||||||
writeToClient(fd,c,1);
|
writeToClient(fd,c,1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ProcessPendingAsyncWrites()
|
||||||
|
{
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
|
|
||||||
|
while(listLength(serverTL->clients_pending_asyncwrite)) {
|
||||||
|
client *c = (client*)listNodeValue(listFirst(serverTL->clients_pending_asyncwrite));
|
||||||
|
listDelNode(serverTL->clients_pending_asyncwrite, listFirst(serverTL->clients_pending_asyncwrite));
|
||||||
|
|
||||||
|
serverAssert(c->flags & CLIENT_PENDING_ASYNCWRITE);
|
||||||
|
|
||||||
|
// TODO: Append to end of reply block?
|
||||||
|
|
||||||
|
size_t size = c->bufposAsync;
|
||||||
|
clientReplyBlock *reply = (clientReplyBlock*)zmalloc(size + sizeof(clientReplyBlock), MALLOC_LOCAL);
|
||||||
|
/* take over the allocation's internal fragmentation */
|
||||||
|
reply->size = zmalloc_usable(reply) - sizeof(clientReplyBlock);
|
||||||
|
reply->used = c->bufposAsync;
|
||||||
|
memcpy(reply->buf(), c->bufAsync, c->bufposAsync);
|
||||||
|
listAddNodeTail(c->listbufferDoneAsync, reply);
|
||||||
|
c->bufposAsync = 0;
|
||||||
|
c->buflenAsync = 0;
|
||||||
|
zfree(c->bufAsync);
|
||||||
|
c->bufAsync = nullptr;
|
||||||
|
c->flags &= ~CLIENT_PENDING_ASYNCWRITE;
|
||||||
|
|
||||||
|
// Now install the write event handler
|
||||||
|
int ae_flags = AE_WRITABLE|AE_WRITE_THREADSAFE;
|
||||||
|
/* For the fsync=always policy, we want that a given FD is never
|
||||||
|
* served for reading and writing in the same event loop iteration,
|
||||||
|
* so that in the middle of receiving the query, and serving it
|
||||||
|
* to the client, we'll call beforeSleep() that will do the
|
||||||
|
* actual fsync of AOF to disk. AE_BARRIER ensures that. */
|
||||||
|
if (server.aof_state == AOF_ON &&
|
||||||
|
server.aof_fsync == AOF_FSYNC_ALWAYS)
|
||||||
|
{
|
||||||
|
ae_flags |= AE_BARRIER;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!((c->replstate == REPL_STATE_NONE ||
|
||||||
|
(c->replstate == SLAVE_STATE_ONLINE && !c->repl_put_online_on_ack))))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (aeCreateRemoteFileEvent(server.rgthreadvar[c->iel].el, c->fd, ae_flags, sendReplyToClient, c, FALSE) == AE_ERR)
|
||||||
|
continue; // We can retry later in the cron
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* This function is called just before entering the event loop, in the hope
|
/* This function is called just before entering the event loop, in the hope
|
||||||
* we can just write the replies to the client output buffer without any
|
* we can just write the replies to the client output buffer without any
|
||||||
* need to use a syscall in order to install the writable event handler,
|
* need to use a syscall in order to install the writable event handler,
|
||||||
@ -1504,8 +1573,11 @@ int handleClientsWithPendingWrites(int iel) {
|
|||||||
listIter li;
|
listIter li;
|
||||||
listNode *ln;
|
listNode *ln;
|
||||||
|
|
||||||
|
AeLocker locker(true);
|
||||||
|
|
||||||
list *list = server.rgthreadvar[iel].clients_pending_write;
|
list *list = server.rgthreadvar[iel].clients_pending_write;
|
||||||
int processed = listLength(list);
|
int processed = listLength(list);
|
||||||
|
serverAssert(iel == (serverTL - server.rgthreadvar));
|
||||||
|
|
||||||
listRewind(list,&li);
|
listRewind(list,&li);
|
||||||
while((ln = listNext(&li))) {
|
while((ln = listNext(&li))) {
|
||||||
@ -1524,7 +1596,7 @@ int handleClientsWithPendingWrites(int iel) {
|
|||||||
/* If after the synchronous writes above we still have data to
|
/* If after the synchronous writes above we still have data to
|
||||||
* output to the client, we need to install the writable handler. */
|
* output to the client, we need to install the writable handler. */
|
||||||
if (clientHasPendingReplies(c, TRUE)) {
|
if (clientHasPendingReplies(c, TRUE)) {
|
||||||
int ae_flags = AE_WRITABLE;
|
int ae_flags = AE_WRITABLE|AE_WRITE_THREADSAFE;
|
||||||
/* For the fsync=always policy, we want that a given FD is never
|
/* For the fsync=always policy, we want that a given FD is never
|
||||||
* served for reading and writing in the same event loop iteration,
|
* served for reading and writing in the same event loop iteration,
|
||||||
* so that in the middle of receiving the query, and serving it
|
* so that in the middle of receiving the query, and serving it
|
||||||
@ -1540,6 +1612,9 @@ int handleClientsWithPendingWrites(int iel) {
|
|||||||
freeClientAsync(c);
|
freeClientAsync(c);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ProcessPendingAsyncWrites();
|
||||||
|
|
||||||
return processed;
|
return processed;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1947,10 +2022,8 @@ void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) {
|
|||||||
UNUSED(mask);
|
UNUSED(mask);
|
||||||
serverAssert(mask & AE_READ_THREADSAFE);
|
serverAssert(mask & AE_READ_THREADSAFE);
|
||||||
serverAssert(c->iel == ielFromEventLoop(el));
|
serverAssert(c->iel == ielFromEventLoop(el));
|
||||||
|
AeLocker locker;
|
||||||
AeLocker lockerSlave;
|
AssertCorrectThread(c);
|
||||||
if (c->flags & CLIENT_SLAVE) // slaves are not async capable
|
|
||||||
lockerSlave.arm();
|
|
||||||
|
|
||||||
readlen = PROTO_IOBUF_LEN;
|
readlen = PROTO_IOBUF_LEN;
|
||||||
/* If this is a multi bulk request, and we are processing a bulk reply
|
/* If this is a multi bulk request, and we are processing a bulk reply
|
||||||
@ -1974,22 +2047,19 @@ void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) {
|
|||||||
c->querybuf = sdsMakeRoomFor(c->querybuf, readlen);
|
c->querybuf = sdsMakeRoomFor(c->querybuf, readlen);
|
||||||
|
|
||||||
nread = read(fd, c->querybuf+qblen, readlen);
|
nread = read(fd, c->querybuf+qblen, readlen);
|
||||||
|
locker.arm();
|
||||||
|
|
||||||
if (nread == -1) {
|
if (nread == -1) {
|
||||||
if (errno == EAGAIN) {
|
if (errno == EAGAIN) {
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
serverLog(LL_VERBOSE, "Reading from client: %s",strerror(errno));
|
serverLog(LL_VERBOSE, "Reading from client: %s",strerror(errno));
|
||||||
aeAcquireLock();
|
|
||||||
freeClient(c);
|
freeClient(c);
|
||||||
aeReleaseLock();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} else if (nread == 0) {
|
} else if (nread == 0) {
|
||||||
serverLog(LL_VERBOSE, "Client closed connection");
|
serverLog(LL_VERBOSE, "Client closed connection");
|
||||||
aeAcquireLock();
|
|
||||||
freeClient(c);
|
freeClient(c);
|
||||||
aeReleaseLock();
|
|
||||||
return;
|
return;
|
||||||
} else if (c->flags & CLIENT_MASTER) {
|
} else if (c->flags & CLIENT_MASTER) {
|
||||||
/* Append the query buffer to the pending (not applied) buffer
|
/* Append the query buffer to the pending (not applied) buffer
|
||||||
@ -2010,9 +2080,7 @@ void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) {
|
|||||||
serverLog(LL_WARNING,"Closing client that reached max query buffer length: %s (qbuf initial bytes: %s)", ci, bytes);
|
serverLog(LL_WARNING,"Closing client that reached max query buffer length: %s (qbuf initial bytes: %s)", ci, bytes);
|
||||||
sdsfree(ci);
|
sdsfree(ci);
|
||||||
sdsfree(bytes);
|
sdsfree(bytes);
|
||||||
aeAcquireLock();
|
|
||||||
freeClient(c);
|
freeClient(c);
|
||||||
aeReleaseLock();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2022,9 +2090,8 @@ void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) {
|
|||||||
* was actually applied to the master state: this quantity, and its
|
* was actually applied to the master state: this quantity, and its
|
||||||
* corresponding part of the replication stream, will be propagated to
|
* corresponding part of the replication stream, will be propagated to
|
||||||
* the sub-slaves and to the replication backlog. */
|
* the sub-slaves and to the replication backlog. */
|
||||||
aeAcquireLock();
|
|
||||||
processInputBufferAndReplicate(c);
|
processInputBufferAndReplicate(c);
|
||||||
aeReleaseLock();
|
ProcessPendingAsyncWrites();
|
||||||
}
|
}
|
||||||
|
|
||||||
void getClientsMaxBuffers(unsigned long *longest_output_list,
|
void getClientsMaxBuffers(unsigned long *longest_output_list,
|
||||||
@ -2649,6 +2716,9 @@ void flushSlavesOutputBuffers(void) {
|
|||||||
client *slave = (client*)listNodeValue(ln);
|
client *slave = (client*)listNodeValue(ln);
|
||||||
int events;
|
int events;
|
||||||
|
|
||||||
|
if (!FCorrectThread(slave))
|
||||||
|
continue; // we cannot synchronously flush other thread's clients
|
||||||
|
|
||||||
/* Note that the following will not flush output buffers of slaves
|
/* Note that the following will not flush output buffers of slaves
|
||||||
* in STATE_ONLINE but having put_online_on_ack set to true: in this
|
* in STATE_ONLINE but having put_online_on_ack set to true: in this
|
||||||
* case the writable event is never installed, since the purpose
|
* case the writable event is never installed, since the purpose
|
||||||
|
@ -394,7 +394,7 @@ robj *resetRefCount(robj *obj) {
|
|||||||
|
|
||||||
int checkType(client *c, robj *o, int type) {
|
int checkType(client *c, robj *o, int type) {
|
||||||
if (o->type != type) {
|
if (o->type != type) {
|
||||||
addReply(c,shared.wrongtypeerr);
|
addReplyAsync(c,shared.wrongtypeerr);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -325,6 +325,7 @@ int pubsubPublishMessage(robj *channel, robj *message) {
|
|||||||
|
|
||||||
void subscribeCommand(client *c) {
|
void subscribeCommand(client *c) {
|
||||||
int j;
|
int j;
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
|
|
||||||
for (j = 1; j < c->argc; j++)
|
for (j = 1; j < c->argc; j++)
|
||||||
pubsubSubscribeChannel(c,c->argv[j]);
|
pubsubSubscribeChannel(c,c->argv[j]);
|
||||||
@ -345,6 +346,7 @@ void unsubscribeCommand(client *c) {
|
|||||||
|
|
||||||
void psubscribeCommand(client *c) {
|
void psubscribeCommand(client *c) {
|
||||||
int j;
|
int j;
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
|
|
||||||
for (j = 1; j < c->argc; j++)
|
for (j = 1; j < c->argc; j++)
|
||||||
pubsubSubscribePattern(c,c->argv[j]);
|
pubsubSubscribePattern(c,c->argv[j]);
|
||||||
|
@ -1862,7 +1862,7 @@ void rdbLoadProgressCallback(rio *r, const void *buf, size_t len) {
|
|||||||
if (server.masterhost && server.repl_state == REPL_STATE_TRANSFER)
|
if (server.masterhost && server.repl_state == REPL_STATE_TRANSFER)
|
||||||
replicationSendNewlineToMaster();
|
replicationSendNewlineToMaster();
|
||||||
loadingProgress(r->processed_bytes);
|
loadingProgress(r->processed_bytes);
|
||||||
processEventsWhileBlocked(IDX_EVENT_LOOP_MAIN);
|
processEventsWhileBlocked(serverTL - server.rgthreadvar);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -124,6 +124,7 @@ void freeReplicationBacklog(void) {
|
|||||||
* server.master_repl_offset, because there is no case where we want to feed
|
* server.master_repl_offset, because there is no case where we want to feed
|
||||||
* the backlog without incrementing the offset. */
|
* the backlog without incrementing the offset. */
|
||||||
void feedReplicationBacklog(void *ptr, size_t len) {
|
void feedReplicationBacklog(void *ptr, size_t len) {
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
unsigned char *p = (unsigned char*)ptr;
|
unsigned char *p = (unsigned char*)ptr;
|
||||||
|
|
||||||
server.master_repl_offset += len;
|
server.master_repl_offset += len;
|
||||||
@ -175,6 +176,7 @@ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) {
|
|||||||
listIter li;
|
listIter li;
|
||||||
int j, len;
|
int j, len;
|
||||||
char llstr[LONG_STR_SIZE];
|
char llstr[LONG_STR_SIZE];
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
|
|
||||||
/* If the instance is not a top level master, return ASAP: we'll just proxy
|
/* If the instance is not a top level master, return ASAP: we'll just proxy
|
||||||
* the stream of data we receive from our master instead, in order to
|
* the stream of data we receive from our master instead, in order to
|
||||||
@ -310,6 +312,7 @@ void replicationFeedMonitors(client *c, list *monitors, int dictid, robj **argv,
|
|||||||
sds cmdrepr = sdsnew("+");
|
sds cmdrepr = sdsnew("+");
|
||||||
robj *cmdobj;
|
robj *cmdobj;
|
||||||
struct timeval tv;
|
struct timeval tv;
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
|
|
||||||
gettimeofday(&tv,NULL);
|
gettimeofday(&tv,NULL);
|
||||||
cmdrepr = sdscatprintf(cmdrepr,"%ld.%06ld ",(long)tv.tv_sec,(long)tv.tv_usec);
|
cmdrepr = sdscatprintf(cmdrepr,"%ld.%06ld ",(long)tv.tv_sec,(long)tv.tv_usec);
|
||||||
@ -717,6 +720,7 @@ void syncCommand(client *c) {
|
|||||||
slave = (client*)ln->value;
|
slave = (client*)ln->value;
|
||||||
if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END) break;
|
if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END) break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* To attach this slave, we check that it has at least all the
|
/* To attach this slave, we check that it has at least all the
|
||||||
* capabilities of the slave that triggered the current BGSAVE. */
|
* capabilities of the slave that triggered the current BGSAVE. */
|
||||||
if (ln && ((c->slave_capa & slave->slave_capa) == slave->slave_capa)) {
|
if (ln && ((c->slave_capa & slave->slave_capa) == slave->slave_capa)) {
|
||||||
@ -883,6 +887,7 @@ void sendBulkToSlave(aeEventLoop *el, int fd, void *privdata, int mask) {
|
|||||||
* replication process. Currently the preamble is just the bulk count of
|
* replication process. Currently the preamble is just the bulk count of
|
||||||
* the file in the form "$<length>\r\n". */
|
* the file in the form "$<length>\r\n". */
|
||||||
if (slave->replpreamble) {
|
if (slave->replpreamble) {
|
||||||
|
serverAssert(slave->replpreamble[0] == '$');
|
||||||
nwritten = write(fd,slave->replpreamble,sdslen(slave->replpreamble));
|
nwritten = write(fd,slave->replpreamble,sdslen(slave->replpreamble));
|
||||||
if (nwritten == -1) {
|
if (nwritten == -1) {
|
||||||
serverLog(LL_VERBOSE,"Write error sending RDB preamble to replica: %s",
|
serverLog(LL_VERBOSE,"Write error sending RDB preamble to replica: %s",
|
||||||
@ -942,13 +947,14 @@ void sendBulkToSlave(aeEventLoop *el, int fd, void *privdata, int mask) {
|
|||||||
* otherwise C_ERR is passed to the function.
|
* otherwise C_ERR is passed to the function.
|
||||||
* The 'type' argument is the type of the child that terminated
|
* The 'type' argument is the type of the child that terminated
|
||||||
* (if it had a disk or socket target). */
|
* (if it had a disk or socket target). */
|
||||||
void updateSlavesWaitingBgsaveThread(int bgsaveerr, int type, int iel);
|
|
||||||
void updateSlavesWaitingBgsave(int bgsaveerr, int type)
|
void updateSlavesWaitingBgsave(int bgsaveerr, int type)
|
||||||
{
|
{
|
||||||
listNode *ln;
|
listNode *ln;
|
||||||
listIter li;
|
listIter li;
|
||||||
int startbgsave = 0;
|
int startbgsave = 0;
|
||||||
int mincapa = -1;
|
int mincapa = -1;
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
|
|
||||||
listRewind(server.slaves,&li);
|
listRewind(server.slaves,&li);
|
||||||
while((ln = listNext(&li))) {
|
while((ln = listNext(&li))) {
|
||||||
client *slave = (client*)ln->value;
|
client *slave = (client*)ln->value;
|
||||||
@ -957,34 +963,7 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type)
|
|||||||
startbgsave = 1;
|
startbgsave = 1;
|
||||||
mincapa = (mincapa == -1) ? slave->slave_capa :
|
mincapa = (mincapa == -1) ? slave->slave_capa :
|
||||||
(mincapa & slave->slave_capa);
|
(mincapa & slave->slave_capa);
|
||||||
}
|
} else if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END) {
|
||||||
}
|
|
||||||
|
|
||||||
for (int iel = 0; iel < server.cthreads; ++iel)
|
|
||||||
{
|
|
||||||
if (iel == IDX_EVENT_LOOP_MAIN)
|
|
||||||
updateSlavesWaitingBgsaveThread(bgsaveerr, type, iel);
|
|
||||||
else
|
|
||||||
aePostFunction(server.rgthreadvar[iel].el, [=]{
|
|
||||||
updateSlavesWaitingBgsaveThread(bgsaveerr, type, iel);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if (startbgsave)
|
|
||||||
startBgsaveForReplication(mincapa);
|
|
||||||
}
|
|
||||||
|
|
||||||
void updateSlavesWaitingBgsaveThread(int bgsaveerr, int type, int iel) {
|
|
||||||
listNode *ln;
|
|
||||||
listIter li;
|
|
||||||
|
|
||||||
listRewind(server.slaves,&li);
|
|
||||||
while((ln = listNext(&li))) {
|
|
||||||
client *slave = (client*)ln->value;
|
|
||||||
if (slave->iel != iel)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END) {
|
|
||||||
struct redis_stat buf;
|
struct redis_stat buf;
|
||||||
|
|
||||||
/* If this was an RDB on disk save, we have to prepare to send
|
/* If this was an RDB on disk save, we have to prepare to send
|
||||||
@ -1006,13 +985,19 @@ void updateSlavesWaitingBgsaveThread(int bgsaveerr, int type, int iel) {
|
|||||||
slave->repl_ack_time = server.unixtime; /* Timeout otherwise. */
|
slave->repl_ack_time = server.unixtime; /* Timeout otherwise. */
|
||||||
} else {
|
} else {
|
||||||
if (bgsaveerr != C_OK) {
|
if (bgsaveerr != C_OK) {
|
||||||
|
if (FCorrectThread(slave))
|
||||||
freeClient(slave);
|
freeClient(slave);
|
||||||
|
else
|
||||||
|
freeClientAsync(slave);
|
||||||
serverLog(LL_WARNING,"SYNC failed. BGSAVE child returned an error");
|
serverLog(LL_WARNING,"SYNC failed. BGSAVE child returned an error");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if ((slave->repldbfd = open(server.rdb_filename,O_RDONLY)) == -1 ||
|
if ((slave->repldbfd = open(server.rdb_filename,O_RDONLY)) == -1 ||
|
||||||
redis_fstat(slave->repldbfd,&buf) == -1) {
|
redis_fstat(slave->repldbfd,&buf) == -1) {
|
||||||
|
if (FCorrectThread(slave))
|
||||||
freeClient(slave);
|
freeClient(slave);
|
||||||
|
else
|
||||||
|
freeClientAsync(slave);
|
||||||
serverLog(LL_WARNING,"SYNC failed. Can't open/stat DB after BGSAVE: %s", strerror(errno));
|
serverLog(LL_WARNING,"SYNC failed. Can't open/stat DB after BGSAVE: %s", strerror(errno));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -1022,14 +1007,28 @@ void updateSlavesWaitingBgsaveThread(int bgsaveerr, int type, int iel) {
|
|||||||
slave->replpreamble = sdscatprintf(sdsempty(),"$%lld\r\n",
|
slave->replpreamble = sdscatprintf(sdsempty(),"$%lld\r\n",
|
||||||
(unsigned long long) slave->repldbsize);
|
(unsigned long long) slave->repldbsize);
|
||||||
|
|
||||||
|
if (FCorrectThread(slave))
|
||||||
|
{
|
||||||
aeDeleteFileEvent(server.rgthreadvar[slave->iel].el,slave->fd,AE_WRITABLE);
|
aeDeleteFileEvent(server.rgthreadvar[slave->iel].el,slave->fd,AE_WRITABLE);
|
||||||
if (aeCreateFileEvent(server.rgthreadvar[slave->iel].el, slave->fd, AE_WRITABLE, sendBulkToSlave, slave) == AE_ERR) {
|
if (aeCreateFileEvent(server.rgthreadvar[slave->iel].el, slave->fd, AE_WRITABLE, sendBulkToSlave, slave) == AE_ERR) {
|
||||||
freeClient(slave);
|
freeClient(slave);
|
||||||
continue;
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
aePostFunction(server.rgthreadvar[slave->iel].el, [slave]{
|
||||||
|
aeDeleteFileEvent(server.rgthreadvar[slave->iel].el,slave->fd,AE_WRITABLE);
|
||||||
|
if (aeCreateFileEvent(server.rgthreadvar[slave->iel].el, slave->fd, AE_WRITABLE, sendBulkToSlave, slave) == AE_ERR) {
|
||||||
|
freeClient(slave);
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (startbgsave)
|
||||||
|
startBgsaveForReplication(mincapa);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Change the current instance replication ID with a new, random one.
|
/* Change the current instance replication ID with a new, random one.
|
||||||
@ -1107,7 +1106,7 @@ void replicationEmptyDbCallback(void *privdata) {
|
|||||||
* performed, this function materializes the master client we store
|
* performed, this function materializes the master client we store
|
||||||
* at server.master, starting from the specified file descriptor. */
|
* at server.master, starting from the specified file descriptor. */
|
||||||
void replicationCreateMasterClient(int fd, int dbid) {
|
void replicationCreateMasterClient(int fd, int dbid) {
|
||||||
server.master = createClient(fd, IDX_EVENT_LOOP_MAIN);
|
server.master = createClient(fd, serverTL - server.rgthreadvar);
|
||||||
server.master->flags |= CLIENT_MASTER;
|
server.master->flags |= CLIENT_MASTER;
|
||||||
server.master->authenticated = 1;
|
server.master->authenticated = 1;
|
||||||
server.master->reploff = server.master_initial_offset;
|
server.master->reploff = server.master_initial_offset;
|
||||||
@ -1144,6 +1143,8 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) {
|
|||||||
UNUSED(privdata);
|
UNUSED(privdata);
|
||||||
UNUSED(mask);
|
UNUSED(mask);
|
||||||
|
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
|
|
||||||
/* Static vars used to hold the EOF mark, and the last bytes received
|
/* Static vars used to hold the EOF mark, and the last bytes received
|
||||||
* form the server: when they match, we reached the end of the transfer. */
|
* form the server: when they match, we reached the end of the transfer. */
|
||||||
static char eofmark[CONFIG_RUN_ID_SIZE];
|
static char eofmark[CONFIG_RUN_ID_SIZE];
|
||||||
@ -2042,7 +2043,6 @@ void replicationHandleMasterDisconnection(void) {
|
|||||||
* the slaves only if we'll have to do a full resync with our master. */
|
* the slaves only if we'll have to do a full resync with our master. */
|
||||||
}
|
}
|
||||||
|
|
||||||
void replicaofCommandCore(client *c);
|
|
||||||
void replicaofCommand(client *c) {
|
void replicaofCommand(client *c) {
|
||||||
// Changing the master needs to be done on the main thread.
|
// Changing the master needs to be done on the main thread.
|
||||||
|
|
||||||
@ -2053,23 +2053,6 @@ void replicaofCommand(client *c) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((serverTL - server.rgthreadvar) == IDX_EVENT_LOOP_MAIN)
|
|
||||||
{
|
|
||||||
replicaofCommandCore(c);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
aeReleaseLock();
|
|
||||||
aePostFunction(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el, [=]{
|
|
||||||
replicaofCommandCore(c);
|
|
||||||
}, true /*fSync*/);
|
|
||||||
aeAcquireLock();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void replicaofCommandCore(client *c) {
|
|
||||||
|
|
||||||
|
|
||||||
/* The special host/port combination "NO" "ONE" turns the instance
|
/* The special host/port combination "NO" "ONE" turns the instance
|
||||||
* into a master. Otherwise the new master address is set. */
|
* into a master. Otherwise the new master address is set. */
|
||||||
if (!strcasecmp((const char*)ptrFromObj(c->argv[1]),"no") &&
|
if (!strcasecmp((const char*)ptrFromObj(c->argv[1]),"no") &&
|
||||||
@ -2164,7 +2147,8 @@ void roleCommand(client *c) {
|
|||||||
/* Send a REPLCONF ACK command to the master to inform it about the current
|
/* Send a REPLCONF ACK command to the master to inform it about the current
|
||||||
* processed offset. If we are not connected with a master, the command has
|
* processed offset. If we are not connected with a master, the command has
|
||||||
* no effects. */
|
* no effects. */
|
||||||
void replicationSendAck(void) {
|
void replicationSendAck(void)
|
||||||
|
{
|
||||||
client *c = server.master;
|
client *c = server.master;
|
||||||
|
|
||||||
if (c != NULL) {
|
if (c != NULL) {
|
||||||
@ -2200,6 +2184,7 @@ void replicationSendAck(void) {
|
|||||||
void replicationCacheMaster(client *c) {
|
void replicationCacheMaster(client *c) {
|
||||||
serverAssert(server.master != NULL && server.cached_master == NULL);
|
serverAssert(server.master != NULL && server.cached_master == NULL);
|
||||||
serverLog(LL_NOTICE,"Caching the disconnected master state.");
|
serverLog(LL_NOTICE,"Caching the disconnected master state.");
|
||||||
|
AssertCorrectThread(c);
|
||||||
|
|
||||||
/* Unlink the client from the server structures. */
|
/* Unlink the client from the server structures. */
|
||||||
unlinkClient(c);
|
unlinkClient(c);
|
||||||
@ -2288,9 +2273,13 @@ void replicationResurrectCachedMaster(int newfd) {
|
|||||||
server.repl_state = REPL_STATE_CONNECTED;
|
server.repl_state = REPL_STATE_CONNECTED;
|
||||||
server.repl_down_since = 0;
|
server.repl_down_since = 0;
|
||||||
|
|
||||||
|
/* Normally changing the thread of a client is a BIG NONO,
|
||||||
|
but this client was unlinked so its OK here */
|
||||||
|
server.master->iel = serverTL - server.rgthreadvar; // martial to this thread
|
||||||
|
|
||||||
/* Re-add to the list of clients. */
|
/* Re-add to the list of clients. */
|
||||||
linkClient(server.master);
|
linkClient(server.master);
|
||||||
if (aeCreateFileEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el, newfd, AE_READABLE|AE_READ_THREADSAFE,
|
if (aeCreateFileEvent(server.rgthreadvar[server.master->iel].el, newfd, AE_READABLE|AE_READ_THREADSAFE,
|
||||||
readQueryFromClient, server.master)) {
|
readQueryFromClient, server.master)) {
|
||||||
serverLog(LL_WARNING,"Error resurrecting the cached master, impossible to add the readable handler: %s", strerror(errno));
|
serverLog(LL_WARNING,"Error resurrecting the cached master, impossible to add the readable handler: %s", strerror(errno));
|
||||||
freeClientAsync(server.master); /* Close ASAP. */
|
freeClientAsync(server.master); /* Close ASAP. */
|
||||||
@ -2299,7 +2288,7 @@ void replicationResurrectCachedMaster(int newfd) {
|
|||||||
/* We may also need to install the write handler as well if there is
|
/* We may also need to install the write handler as well if there is
|
||||||
* pending data in the write buffers. */
|
* pending data in the write buffers. */
|
||||||
if (clientHasPendingReplies(server.master, TRUE)) {
|
if (clientHasPendingReplies(server.master, TRUE)) {
|
||||||
if (aeCreateFileEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el, newfd, AE_WRITABLE,
|
if (aeCreateFileEvent(server.rgthreadvar[server.master->iel].el, newfd, AE_WRITABLE,
|
||||||
sendReplyToClient, server.master)) {
|
sendReplyToClient, server.master)) {
|
||||||
serverLog(LL_WARNING,"Error resurrecting the cached master, impossible to add the writable handler: %s", strerror(errno));
|
serverLog(LL_WARNING,"Error resurrecting the cached master, impossible to add the writable handler: %s", strerror(errno));
|
||||||
freeClientAsync(server.master); /* Close ASAP. */
|
freeClientAsync(server.master); /* Close ASAP. */
|
||||||
@ -2535,7 +2524,7 @@ void processClientsWaitingReplicas(void) {
|
|||||||
last_numreplicas > c->bpop.numreplicas)
|
last_numreplicas > c->bpop.numreplicas)
|
||||||
{
|
{
|
||||||
unblockClient(c);
|
unblockClient(c);
|
||||||
addReplyLongLong(c,last_numreplicas);
|
addReplyLongLongAsync(c,last_numreplicas);
|
||||||
} else {
|
} else {
|
||||||
int numreplicas = replicationCountAcksByOffset(c->bpop.reploffset);
|
int numreplicas = replicationCountAcksByOffset(c->bpop.reploffset);
|
||||||
|
|
||||||
@ -2543,7 +2532,7 @@ void processClientsWaitingReplicas(void) {
|
|||||||
last_offset = c->bpop.reploffset;
|
last_offset = c->bpop.reploffset;
|
||||||
last_numreplicas = numreplicas;
|
last_numreplicas = numreplicas;
|
||||||
unblockClient(c);
|
unblockClient(c);
|
||||||
addReplyLongLong(c,numreplicas);
|
addReplyLongLongAsync(c,numreplicas);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2680,7 +2669,7 @@ void replicationCron(void) {
|
|||||||
{
|
{
|
||||||
serverLog(LL_WARNING, "Disconnecting timedout replica: %s",
|
serverLog(LL_WARNING, "Disconnecting timedout replica: %s",
|
||||||
replicationGetSlaveName(slave));
|
replicationGetSlaveName(slave));
|
||||||
freeClient(slave);
|
freeClientAsync(slave);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -370,6 +370,9 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) {
|
|||||||
sds reply;
|
sds reply;
|
||||||
|
|
||||||
// Ensure our client is on the right thread
|
// Ensure our client is on the right thread
|
||||||
|
serverAssert(!(c->flags & CLIENT_PENDING_WRITE));
|
||||||
|
serverAssert(!(c->flags & CLIENT_UNBLOCKED));
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
c->iel = serverTL - server.rgthreadvar;
|
c->iel = serverTL - server.rgthreadvar;
|
||||||
|
|
||||||
/* Cached across calls. */
|
/* Cached across calls. */
|
||||||
|
@ -3976,6 +3976,7 @@ int sentinelSendSlaveOf(sentinelRedisInstance *ri, char *host, int port) {
|
|||||||
|
|
||||||
/* Setup the master state to start a failover. */
|
/* Setup the master state to start a failover. */
|
||||||
void sentinelStartFailover(sentinelRedisInstance *master) {
|
void sentinelStartFailover(sentinelRedisInstance *master) {
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
serverAssert(master->flags & SRI_MASTER);
|
serverAssert(master->flags & SRI_MASTER);
|
||||||
|
|
||||||
master->failover_state = SENTINEL_FAILOVER_STATE_WAIT_START;
|
master->failover_state = SENTINEL_FAILOVER_STATE_WAIT_START;
|
||||||
@ -4168,6 +4169,7 @@ void sentinelFailoverWaitStart(sentinelRedisInstance *ri) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void sentinelFailoverSelectSlave(sentinelRedisInstance *ri) {
|
void sentinelFailoverSelectSlave(sentinelRedisInstance *ri) {
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
sentinelRedisInstance *slave = sentinelSelectSlave(ri);
|
sentinelRedisInstance *slave = sentinelSelectSlave(ri);
|
||||||
|
|
||||||
/* We don't handle the timeout in this state as the function aborts
|
/* We don't handle the timeout in this state as the function aborts
|
||||||
@ -4292,6 +4294,7 @@ void sentinelFailoverReconfNextSlave(sentinelRedisInstance *master) {
|
|||||||
dictIterator *di;
|
dictIterator *di;
|
||||||
dictEntry *de;
|
dictEntry *de;
|
||||||
int in_progress = 0;
|
int in_progress = 0;
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
|
|
||||||
di = dictGetIterator(master->slaves);
|
di = dictGetIterator(master->slaves);
|
||||||
while((de = dictNext(di)) != NULL) {
|
while((de = dictNext(di)) != NULL) {
|
||||||
|
97
src/server.c
97
src/server.c
@ -1526,6 +1526,7 @@ int clientsCronHandleTimeout(client *c, mstime_t now_ms) {
|
|||||||
*
|
*
|
||||||
* The function always returns 0 as it never terminates the client. */
|
* The function always returns 0 as it never terminates the client. */
|
||||||
int clientsCronResizeQueryBuffer(client *c) {
|
int clientsCronResizeQueryBuffer(client *c) {
|
||||||
|
AssertCorrectThread(c);
|
||||||
size_t querybuf_size = sdsAllocSize(c->querybuf);
|
size_t querybuf_size = sdsAllocSize(c->querybuf);
|
||||||
time_t idletime = server.unixtime - c->lastinteraction;
|
time_t idletime = server.unixtime - c->lastinteraction;
|
||||||
|
|
||||||
@ -1620,16 +1621,6 @@ void getExpansiveClientsInfo(size_t *in_usage, size_t *out_usage) {
|
|||||||
*out_usage = o;
|
*out_usage = o;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void AsyncClientCron(void *pv)
|
|
||||||
{
|
|
||||||
client *c = (client*)pv;
|
|
||||||
mstime_t now = mstime();
|
|
||||||
if (clientsCronHandleTimeout(c,now)) return;
|
|
||||||
if (clientsCronResizeQueryBuffer(c)) return;
|
|
||||||
if (clientsCronTrackExpansiveClients(c)) return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This function is called by serverCron() and is used in order to perform
|
/* This function is called by serverCron() and is used in order to perform
|
||||||
* operations on clients that are important to perform constantly. For instance
|
* operations on clients that are important to perform constantly. For instance
|
||||||
* we use this function in order to disconnect clients after a timeout, including
|
* we use this function in order to disconnect clients after a timeout, including
|
||||||
@ -1646,7 +1637,7 @@ void AsyncClientCron(void *pv)
|
|||||||
* of clients per second, turning this function into a source of latency.
|
* of clients per second, turning this function into a source of latency.
|
||||||
*/
|
*/
|
||||||
#define CLIENTS_CRON_MIN_ITERATIONS 5
|
#define CLIENTS_CRON_MIN_ITERATIONS 5
|
||||||
void clientsCron(void) {
|
void clientsCron(int iel) {
|
||||||
/* Try to process at least numclients/server.hz of clients
|
/* Try to process at least numclients/server.hz of clients
|
||||||
* per call. Since normally (if there are no big latency events) this
|
* per call. Since normally (if there are no big latency events) this
|
||||||
* function is called server.hz times per second, in the average case we
|
* function is called server.hz times per second, in the average case we
|
||||||
@ -1672,7 +1663,7 @@ void clientsCron(void) {
|
|||||||
listRotate(server.clients);
|
listRotate(server.clients);
|
||||||
head = listFirst(server.clients);
|
head = listFirst(server.clients);
|
||||||
c = listNodeValue(head);
|
c = listNodeValue(head);
|
||||||
if (c->iel == IDX_EVENT_LOOP_MAIN)
|
if (c->iel == iel)
|
||||||
{
|
{
|
||||||
/* The following functions do different service checks on the client.
|
/* The following functions do different service checks on the client.
|
||||||
* The protocol is that they return non-zero if the client was
|
* The protocol is that they return non-zero if the client was
|
||||||
@ -1681,11 +1672,6 @@ void clientsCron(void) {
|
|||||||
if (clientsCronResizeQueryBuffer(c)) continue;
|
if (clientsCronResizeQueryBuffer(c)) continue;
|
||||||
if (clientsCronTrackExpansiveClients(c)) continue;
|
if (clientsCronTrackExpansiveClients(c)) continue;
|
||||||
}
|
}
|
||||||
else if (IDX_EVENT_LOOP_MAIN > 1)
|
|
||||||
{
|
|
||||||
aePostFunction(server.rgthreadvar[c->iel].el, AsyncClientCron, c);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1787,6 +1773,8 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) {
|
|||||||
UNUSED(id);
|
UNUSED(id);
|
||||||
UNUSED(clientData);
|
UNUSED(clientData);
|
||||||
|
|
||||||
|
ProcessPendingAsyncWrites(); // This is really a bug, but for now catch any laggards that didn't clean up
|
||||||
|
|
||||||
/* Software watchdog: deliver the SIGALRM that will reach the signal
|
/* Software watchdog: deliver the SIGALRM that will reach the signal
|
||||||
* handler if we don't return here fast enough. */
|
* handler if we don't return here fast enough. */
|
||||||
if (server.watchdog_period) watchdogScheduleSignal(server.watchdog_period);
|
if (server.watchdog_period) watchdogScheduleSignal(server.watchdog_period);
|
||||||
@ -1898,7 +1886,7 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* We need to do a few operations on clients asynchronously. */
|
/* We need to do a few operations on clients asynchronously. */
|
||||||
clientsCron();
|
clientsCron(IDX_EVENT_LOOP_MAIN);
|
||||||
|
|
||||||
/* Handle background operations on Redis databases. */
|
/* Handle background operations on Redis databases. */
|
||||||
databasesCron();
|
databasesCron();
|
||||||
@ -2055,7 +2043,13 @@ int serverCronLite(struct aeEventLoop *eventLoop, long long id, void *clientData
|
|||||||
|
|
||||||
int iel = ielFromEventLoop(eventLoop);
|
int iel = ielFromEventLoop(eventLoop);
|
||||||
serverAssert(iel != IDX_EVENT_LOOP_MAIN);
|
serverAssert(iel != IDX_EVENT_LOOP_MAIN);
|
||||||
|
|
||||||
|
aeAcquireLock();
|
||||||
|
ProcessPendingAsyncWrites(); // A bug but leave for now, events should clean up after themselves
|
||||||
|
clientsCron(iel);
|
||||||
|
|
||||||
freeClientsInAsyncFreeQueue(iel);
|
freeClientsInAsyncFreeQueue(iel);
|
||||||
|
aeReleaseLock();
|
||||||
|
|
||||||
return 1000/server.hz;
|
return 1000/server.hz;
|
||||||
}
|
}
|
||||||
@ -2126,12 +2120,11 @@ void beforeSleepLite(struct aeEventLoop *eventLoop)
|
|||||||
int iel = ielFromEventLoop(eventLoop);
|
int iel = ielFromEventLoop(eventLoop);
|
||||||
|
|
||||||
/* Try to process pending commands for clients that were just unblocked. */
|
/* Try to process pending commands for clients that were just unblocked. */
|
||||||
if (listLength(server.rgthreadvar[iel].unblocked_clients)) {
|
|
||||||
aeAcquireLock();
|
aeAcquireLock();
|
||||||
|
if (listLength(server.rgthreadvar[iel].unblocked_clients)) {
|
||||||
processUnblockedClients(iel);
|
processUnblockedClients(iel);
|
||||||
aeReleaseLock();
|
|
||||||
}
|
}
|
||||||
|
aeReleaseLock();
|
||||||
|
|
||||||
/* Handle writes with pending output buffers. */
|
/* Handle writes with pending output buffers. */
|
||||||
handleClientsWithPendingWrites(iel);
|
handleClientsWithPendingWrites(iel);
|
||||||
@ -2752,9 +2745,18 @@ void resetServerStats(void) {
|
|||||||
static void initNetworkingThread(int iel, int fReusePort)
|
static void initNetworkingThread(int iel, int fReusePort)
|
||||||
{
|
{
|
||||||
/* Open the TCP listening socket for the user commands. */
|
/* Open the TCP listening socket for the user commands. */
|
||||||
|
if (fReusePort || (iel == IDX_EVENT_LOOP_MAIN))
|
||||||
|
{
|
||||||
if (server.port != 0 &&
|
if (server.port != 0 &&
|
||||||
listenToPort(server.port,server.rgthreadvar[iel].ipfd,&server.rgthreadvar[iel].ipfd_count, fReusePort) == C_ERR)
|
listenToPort(server.port,server.rgthreadvar[iel].ipfd,&server.rgthreadvar[iel].ipfd_count, fReusePort) == C_ERR)
|
||||||
exit(1);
|
exit(1);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// We use the main threads file descriptors
|
||||||
|
memcpy(server.rgthreadvar[iel].ipfd, server.rgthreadvar[IDX_EVENT_LOOP_MAIN].ipfd, sizeof(int)*CONFIG_BINDADDR_MAX);
|
||||||
|
server.rgthreadvar[iel].ipfd_count = server.rgthreadvar[IDX_EVENT_LOOP_MAIN].ipfd_count;
|
||||||
|
}
|
||||||
|
|
||||||
/* Create an event handler for accepting new connections in TCP */
|
/* Create an event handler for accepting new connections in TCP */
|
||||||
for (int j = 0; j < server.rgthreadvar[iel].ipfd_count; j++) {
|
for (int j = 0; j < server.rgthreadvar[iel].ipfd_count; j++) {
|
||||||
@ -3181,44 +3183,6 @@ void preventCommandReplication(client *c) {
|
|||||||
c->flags |= CLIENT_PREVENT_REPL_PROP;
|
c->flags |= CLIENT_PREVENT_REPL_PROP;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ProcessPendingAsyncWrites()
|
|
||||||
{
|
|
||||||
while(listLength(serverTL->clients_pending_asyncwrite)) {
|
|
||||||
client *c = (client*)listNodeValue(listFirst(serverTL->clients_pending_asyncwrite));
|
|
||||||
listDelNode(serverTL->clients_pending_asyncwrite, listFirst(serverTL->clients_pending_asyncwrite));
|
|
||||||
|
|
||||||
serverAssert(c->flags & CLIENT_PENDING_ASYNCWRITE);
|
|
||||||
|
|
||||||
// TODO: Append to end of reply block?
|
|
||||||
|
|
||||||
size_t size = c->bufposAsync;
|
|
||||||
clientReplyBlock *reply = (clientReplyBlock*)zmalloc(size + sizeof(clientReplyBlock), MALLOC_LOCAL);
|
|
||||||
/* take over the allocation's internal fragmentation */
|
|
||||||
reply->size = zmalloc_usable(reply) - sizeof(clientReplyBlock);
|
|
||||||
reply->used = c->bufposAsync;
|
|
||||||
memcpy(reply->buf, c->bufAsync, c->bufposAsync);
|
|
||||||
listAddNodeTail(c->listbufferDoneAsync, reply);
|
|
||||||
c->bufposAsync = 0;
|
|
||||||
c->flags &= ~CLIENT_PENDING_ASYNCWRITE;
|
|
||||||
|
|
||||||
// Now install the write event handler
|
|
||||||
int ae_flags = AE_WRITABLE;
|
|
||||||
/* For the fsync=always policy, we want that a given FD is never
|
|
||||||
* served for reading and writing in the same event loop iteration,
|
|
||||||
* so that in the middle of receiving the query, and serving it
|
|
||||||
* to the client, we'll call beforeSleep() that will do the
|
|
||||||
* actual fsync of AOF to disk. AE_BARRIER ensures that. */
|
|
||||||
if (server.aof_state == AOF_ON &&
|
|
||||||
server.aof_fsync == AOF_FSYNC_ALWAYS)
|
|
||||||
{
|
|
||||||
ae_flags |= AE_BARRIER;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (aeCreateRemoteFileEvent(server.rgthreadvar[c->iel].el, c->fd, ae_flags, sendReplyToClient, c, FALSE) == AE_ERR)
|
|
||||||
freeClientAsync(c);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Call() is the core of Redis execution of a command.
|
/* Call() is the core of Redis execution of a command.
|
||||||
*
|
*
|
||||||
* The following flags can be passed:
|
* The following flags can be passed:
|
||||||
@ -3260,6 +3224,7 @@ void call(client *c, int flags) {
|
|||||||
long long dirty, start, duration;
|
long long dirty, start, duration;
|
||||||
int client_old_flags = c->flags;
|
int client_old_flags = c->flags;
|
||||||
struct redisCommand *real_cmd = c->cmd;
|
struct redisCommand *real_cmd = c->cmd;
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
|
|
||||||
/* Sent the command to clients in MONITOR mode, only if the commands are
|
/* Sent the command to clients in MONITOR mode, only if the commands are
|
||||||
* not generated from reading an AOF. */
|
* not generated from reading an AOF. */
|
||||||
@ -3389,6 +3354,7 @@ void call(client *c, int flags) {
|
|||||||
* other operations can be performed by the caller. Otherwise
|
* other operations can be performed by the caller. Otherwise
|
||||||
* if C_ERR is returned the client was destroyed (i.e. after QUIT). */
|
* if C_ERR is returned the client was destroyed (i.e. after QUIT). */
|
||||||
int processCommand(client *c) {
|
int processCommand(client *c) {
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
/* The QUIT command is handled separately. Normal command procs will
|
/* The QUIT command is handled separately. Normal command procs will
|
||||||
* go through checking for replication and QUIT will cause trouble
|
* go through checking for replication and QUIT will cause trouble
|
||||||
* when FORCE_REPLICATION is enabled and would be implemented in
|
* when FORCE_REPLICATION is enabled and would be implemented in
|
||||||
@ -4517,6 +4483,7 @@ void infoCommand(client *c) {
|
|||||||
|
|
||||||
void monitorCommand(client *c) {
|
void monitorCommand(client *c) {
|
||||||
/* ignore MONITOR if already slave or in monitor mode */
|
/* ignore MONITOR if already slave or in monitor mode */
|
||||||
|
serverAssert(aeThreadOwnsLock());
|
||||||
if (c->flags & CLIENT_SLAVE) return;
|
if (c->flags & CLIENT_SLAVE) return;
|
||||||
|
|
||||||
c->flags |= (CLIENT_SLAVE|CLIENT_MONITOR);
|
c->flags |= (CLIENT_SLAVE|CLIENT_MONITOR);
|
||||||
@ -4872,7 +4839,7 @@ void *workerThreadMain(void *parg)
|
|||||||
|
|
||||||
int isMainThread = (iel == IDX_EVENT_LOOP_MAIN);
|
int isMainThread = (iel == IDX_EVENT_LOOP_MAIN);
|
||||||
aeEventLoop *el = server.rgthreadvar[iel].el;
|
aeEventLoop *el = server.rgthreadvar[iel].el;
|
||||||
aeSetBeforeSleepProc(el, isMainThread ? beforeSleep : beforeSleepLite, isMainThread ? 0 : AE_SLEEP_THREADSAFE);
|
aeSetBeforeSleepProc(el, isMainThread ? beforeSleep : beforeSleepLite, /*isMainThread ? 0 : AE_SLEEP_THREADSAFE*/ 0);
|
||||||
aeSetAfterSleepProc(el, isMainThread ? afterSleep : NULL, 0);
|
aeSetAfterSleepProc(el, isMainThread ? afterSleep : NULL, 0);
|
||||||
aeMain(el);
|
aeMain(el);
|
||||||
aeDeleteEventLoop(el);
|
aeDeleteEventLoop(el);
|
||||||
@ -4932,6 +4899,9 @@ int main(int argc, char **argv) {
|
|||||||
{
|
{
|
||||||
initServerThread(server.rgthreadvar+iel, iel == IDX_EVENT_LOOP_MAIN);
|
initServerThread(server.rgthreadvar+iel, iel == IDX_EVENT_LOOP_MAIN);
|
||||||
}
|
}
|
||||||
|
serverTL = &server.rgthreadvar[IDX_EVENT_LOOP_MAIN];
|
||||||
|
aeAcquireLock(); // We own the lock on boot
|
||||||
|
|
||||||
ACLInit(); /* The ACL subsystem must be initialized ASAP because the
|
ACLInit(); /* The ACL subsystem must be initialized ASAP because the
|
||||||
basic networking code and client creation depends on it. */
|
basic networking code and client creation depends on it. */
|
||||||
moduleInitModulesSystem();
|
moduleInitModulesSystem();
|
||||||
@ -5046,9 +5016,8 @@ int main(int argc, char **argv) {
|
|||||||
|
|
||||||
initServer();
|
initServer();
|
||||||
|
|
||||||
server.cthreads = 1; //testing
|
server.cthreads = 4; //testing
|
||||||
initNetworking(1 /* fReusePort */);
|
initNetworking(0 /* fReusePort */);
|
||||||
serverTL = &server.rgthreadvar[IDX_EVENT_LOOP_MAIN];
|
|
||||||
|
|
||||||
if (background || server.pidfile) createPidFile();
|
if (background || server.pidfile) createPidFile();
|
||||||
redisSetProcTitle(argv[0]);
|
redisSetProcTitle(argv[0]);
|
||||||
@ -5089,6 +5058,8 @@ int main(int argc, char **argv) {
|
|||||||
serverLog(LL_WARNING,"WARNING: You specified a maxmemory value that is less than 1MB (current value is %llu bytes). Are you sure this is what you really want?", server.maxmemory);
|
serverLog(LL_WARNING,"WARNING: You specified a maxmemory value that is less than 1MB (current value is %llu bytes). Are you sure this is what you really want?", server.maxmemory);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
aeReleaseLock(); //Finally we can dump the lock
|
||||||
|
|
||||||
serverAssert(server.cthreads > 0 && server.cthreads <= MAX_EVENT_LOOPS);
|
serverAssert(server.cthreads > 0 && server.cthreads <= MAX_EVENT_LOOPS);
|
||||||
pthread_t rgthread[MAX_EVENT_LOOPS];
|
pthread_t rgthread[MAX_EVENT_LOOPS];
|
||||||
for (int iel = 0; iel < server.cthreads; ++iel)
|
for (int iel = 0; iel < server.cthreads; ++iel)
|
||||||
|
@ -1205,6 +1205,7 @@ struct redisServer {
|
|||||||
int aof_pipe_read_data_from_parent;
|
int aof_pipe_read_data_from_parent;
|
||||||
int aof_pipe_write_ack_to_parent;
|
int aof_pipe_write_ack_to_parent;
|
||||||
int aof_pipe_read_ack_from_child;
|
int aof_pipe_read_ack_from_child;
|
||||||
|
aeEventLoop *el_alf_pip_read_ack_from_child;
|
||||||
int aof_pipe_write_ack_to_child;
|
int aof_pipe_write_ack_to_child;
|
||||||
int aof_pipe_read_ack_from_parent;
|
int aof_pipe_read_ack_from_parent;
|
||||||
int aof_stop_sending_diff; /* If true stop sending accumulated diffs
|
int aof_stop_sending_diff; /* If true stop sending accumulated diffs
|
||||||
@ -1640,6 +1641,7 @@ void setDeferredArrayLenAsync(client *c, void *node, long length);
|
|||||||
void addReplySdsAsync(client *c, sds s);
|
void addReplySdsAsync(client *c, sds s);
|
||||||
void addReplyBulkSdsAsync(client *c, sds s);
|
void addReplyBulkSdsAsync(client *c, sds s);
|
||||||
void addReplyPushLenAsync(client *c, long length);
|
void addReplyPushLenAsync(client *c, long length);
|
||||||
|
void addReplyLongLongAsync(client *c, long long ll);
|
||||||
|
|
||||||
void ProcessPendingAsyncWrites(void);
|
void ProcessPendingAsyncWrites(void);
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user