diff --git a/src/aof.cpp b/src/aof.cpp index 637b2ce34..e18bce652 100644 --- a/src/aof.cpp +++ b/src/aof.cpp @@ -666,7 +666,7 @@ client *createFakeClient(void) { c->flags = 0; c->fPendingAsyncWrite = FALSE; c->btype = BLOCKED_NONE; - /* We set the fake client as a slave waiting for the synchronization + /* We set the fake client as a replica waiting for the synchronization * so that Redis will not try to send replies to this client. */ c->replstate = SLAVE_STATE_WAIT_BGSAVE_START; c->reply = listCreate(); diff --git a/src/blocked.cpp b/src/blocked.cpp index c09488b1f..19d7f2fb6 100644 --- a/src/blocked.cpp +++ b/src/blocked.cpp @@ -221,8 +221,8 @@ void replyToBlockedClientTimedOut(client *c) { /* Mass-unblock clients because something changed in the instance that makes * blocking no longer safe. For example clients blocked in list operations - * in an instance which turns from master to slave is unsafe, so this function - * is called when a master turns into a slave. + * in an instance which turns from master to replica is unsafe, so this function + * is called when a master turns into a replica. * * The semantics is to send an -UNBLOCKED error to the client, disconnecting * it at the same time. */ diff --git a/src/config.cpp b/src/config.cpp index 0519e44ff..2ac46da6d 100644 --- a/src/config.cpp +++ b/src/config.cpp @@ -96,7 +96,7 @@ configEnum aof_fsync_enum[] = { /* Output buffer limits presets. */ clientBufferLimitsConfig clientBufferLimitsDefaults[CLIENT_TYPE_OBUF_COUNT] = { {0, 0, 0}, /* normal */ - {1024*1024*256, 1024*1024*64, 60}, /* slave */ + {1024*1024*256, 1024*1024*64, 60}, /* replica */ {1024*1024*32, 1024*1024*8, 60} /* pubsub */ }; diff --git a/src/db.cpp b/src/db.cpp index 2a047870f..0d5042bab 100644 --- a/src/db.cpp +++ b/src/db.cpp @@ -115,7 +115,7 @@ static robj *lookupKey(redisDb *db, robj *key, int flags) { * LOOKUP_NOTOUCH: don't alter the last access time of the key. * * Note: this function also returns NULL if the key is logically expired - * but still existing, in case this is a slave, since this API is called only + * but still existing, in case this is a replica, since this API is called only * for read operations. Even if the key expiry is master-driven, we can * correctly report a key is expired on slaves even if the master is lagging * expiring our key via DELs in the replication link. */ @@ -133,7 +133,7 @@ robj_roptr lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) { return NULL; } - /* However if we are in the context of a slave, expireIfNeeded() will + /* However if we are in the context of a replica, expireIfNeeded() will * not really try to expire the key, it only returns information * about the "logical" status of the key: key expiring is up to the * master in order to have a consistent view of master's data set. @@ -344,7 +344,7 @@ robj *dbRandomKey(redisDb *db) { if (allvolatile && listLength(g_pserver->masters) && --maxtries == 0) { /* If the DB is composed only of keys with an expire set, * it could happen that all the keys are already logically - * expired in the slave, so the function cannot stop because + * expired in the replica, so the function cannot stop because * expireIfNeeded() is false, nor it can stop because * dictGetRandomKey() returns NULL (there are keys to return). * To prevent the infinite loop we do some tries, but if there @@ -1368,7 +1368,7 @@ expireEntry *getExpire(redisDb *db, robj_roptr key) { * to all the slaves and the AOF file if enabled. * * This way the key expiry is centralized in one place, and since both - * AOF and the master->slave link guarantee operation ordering, everything + * AOF and the master->replica link guarantee operation ordering, everything * will be consistent even if we allow write operations against expiring * keys. */ void propagateExpire(redisDb *db, robj *key, int lazy) { @@ -1426,10 +1426,10 @@ int keyIsExpired(redisDb *db, robj *key) { * is via lookupKey*() family of functions. * * The behavior of the function depends on the replication role of the - * instance, because slave instances do not expire keys, they wait + * instance, because replica instances do not expire keys, they wait * for DELs from the master for consistency matters. However even * slaves will try to have a coherent return value for the function, - * so that read commands executed in the slave side will be able to + * so that read commands executed in the replica side will be able to * behave like if the key is expired even if still present (because the * master has yet to propagate the DEL). * @@ -1442,9 +1442,9 @@ int keyIsExpired(redisDb *db, robj *key) { int expireIfNeeded(redisDb *db, robj *key) { if (!keyIsExpired(db,key)) return 0; - /* If we are running in the context of a slave, instead of + /* If we are running in the context of a replica, instead of * evicting the expired key from the database, we return ASAP: - * the slave key expiration is controlled by the master that will + * the replica key expiration is controlled by the master that will * send us synthesized DEL operations for expired keys. * * Still we try to return the right information to the caller, diff --git a/src/evict.cpp b/src/evict.cpp index 8cf24dd5e..04fd7a7d6 100644 --- a/src/evict.cpp +++ b/src/evict.cpp @@ -374,8 +374,8 @@ size_t freeMemoryGetNotCountedMemory(void) { listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { - client *slave = (client*)listNodeValue(ln); - overhead += getClientOutputBufferMemoryUsage(slave); + client *replica = (client*)listNodeValue(ln); + overhead += getClientOutputBufferMemoryUsage(replica); } } if (g_pserver->aof_state != AOF_OFF) { diff --git a/src/expire.cpp b/src/expire.cpp index 72968119a..0518abdca 100644 --- a/src/expire.cpp +++ b/src/expire.cpp @@ -307,17 +307,17 @@ void activeExpireCycle(int type) { * * Normally slaves do not process expires: they wait the masters to synthesize * DEL operations in order to retain consistency. However writable slaves are - * an exception: if a key is created in the slave and an expire is assigned + * an exception: if a key is created in the replica and an expire is assigned * to it, we need a way to expire such a key, since the master does not know * anything about such a key. * - * In order to do so, we track keys created in the slave side with an expire + * In order to do so, we track keys created in the replica side with an expire * set, and call the expireSlaveKeys() function from time to time in order to * reclaim the keys if they already expired. * * Note that the use case we are trying to cover here, is a popular one where * slaves are put in writable mode in order to compute slow operations in - * the slave side that are mostly useful to actually read data in a more + * the replica side that are mostly useful to actually read data in a more * processed way. Think at sets intersections in a tmp key, with an expire so * that it is also used as a cache to avoid intersecting every time. * @@ -326,7 +326,7 @@ void activeExpireCycle(int type) { *----------------------------------------------------------------------------*/ /* The dictionary where we remember key names and database ID of keys we may - * want to expire from the slave. Since this function is not often used we + * want to expire from the replica. Since this function is not often used we * don't even care to initialize the database at startup. We'll do it once * the feature is used the first time, that is, when rememberSlaveKeyWithExpire() * is called. @@ -389,7 +389,7 @@ void expireSlaveKeys(void) { } /* Set the new bitmap as value of the key, in the dictionary - * of keys with an expire set directly in the writable slave. Otherwise + * of keys with an expire set directly in the writable replica. Otherwise * if the bitmap is zero, we no longer need to keep track of it. */ if (new_dbids) dictSetUnsignedIntegerVal(de,new_dbids); @@ -406,7 +406,7 @@ void expireSlaveKeys(void) { } /* Track keys that received an EXPIRE or similar command in the context - * of a writable slave. */ + * of a writable replica. */ void rememberSlaveKeyWithExpire(redisDb *db, robj *key) { if (slaveKeysWithExpire == NULL) { static dictType dt = { @@ -448,7 +448,7 @@ size_t getSlaveKeyWithExpireCount(void) { * * Note: technically we should handle the case of a single DB being flushed * but it is not worth it since anyway race conditions using the same set - * of key names in a wriatable slave and in its master will lead to + * of key names in a wriatable replica and in its master will lead to * inconsistencies. This is just a best-effort thing we do. */ void flushSlaveKeysWithExpireList(void) { if (slaveKeysWithExpire) { @@ -486,7 +486,7 @@ void expireGenericCommand(client *c, long long basetime, int unit) { /* EXPIRE with negative TTL, or EXPIREAT with a timestamp into the past * should never be executed as a DEL when load the AOF or in the context - * of a slave instance. + * of a replica instance. * * Instead we take the other branch of the IF statement setting an expire * (possibly in the past) and wait for an explicit DEL from the master. */ diff --git a/src/module.cpp b/src/module.cpp index 1d00d2b1d..a13ba549c 100644 --- a/src/module.cpp +++ b/src/module.cpp @@ -1439,7 +1439,7 @@ int RM_GetSelectedDb(RedisModuleCtx *ctx) { * * * REDISMODULE_CTX_FLAGS_MASTER: The Redis instance is a master * - * * REDISMODULE_CTX_FLAGS_SLAVE: The Redis instance is a slave + * * REDISMODULE_CTX_FLAGS_SLAVE: The Redis instance is a replica * * * REDISMODULE_CTX_FLAGS_READONLY: The Redis instance is read-only * @@ -4289,7 +4289,7 @@ size_t RM_GetClusterSize(void) { * * The arguments ip, master_id, port and flags can be NULL in case we don't * need to populate back certain info. If an ip and master_id (only populated - * if the instance is a slave) are specified, they point to buffers holding + * if the instance is a replica) are specified, they point to buffers holding * at least REDISMODULE_NODE_ID_LEN bytes. The strings written back as ip * and master_id are not null terminated. * @@ -4300,7 +4300,7 @@ size_t RM_GetClusterSize(void) { * * REDISMODULE_NODE_SLAVE The node is a replica * * REDISMODULE_NODE_PFAIL We see the node as failing * * REDISMODULE_NODE_FAIL The cluster agrees the node is failing - * * REDISMODULE_NODE_NOFAILOVER The slave is configured to never failover + * * REDISMODULE_NODE_NOFAILOVER The replica is configured to never failover */ clusterNode *clusterLookupNode(const char *name); /* We need access to internals */ @@ -5181,7 +5181,7 @@ void moduleInitModulesSystem(void) { * The function aborts the server on errors, since to start with missing * modules is not considered sane: clients may rely on the existence of * given commands, loading AOF also may need some modules to exist, and - * if this instance is a slave, it must understand commands from master. */ + * if this instance is a replica, it must understand commands from master. */ void moduleLoadFromQueue(void) { listIter li; listNode *ln; diff --git a/src/multi.cpp b/src/multi.cpp index 3383f5a49..2018e08d9 100644 --- a/src/multi.cpp +++ b/src/multi.cpp @@ -142,7 +142,7 @@ void execCommand(client *c) { } /* If there are write commands inside the transaction, and this is a read - * only slave, we want to send an error. This happens when the transaction + * only replica, we want to send an error. This happens when the transaction * was initiated when the instance was a master or a writable replica and * then the configuration changed (for example instance was turned into * a replica). */ @@ -195,7 +195,7 @@ void execCommand(client *c) { int is_master = listLength(g_pserver->masters) == 0; g_pserver->dirty++; /* If inside the MULTI/EXEC block this instance was suddenly - * switched from master to slave (using the SLAVEOF command), the + * switched from master to replica (using the SLAVEOF command), the * initial MULTI was propagated into the replication backlog, but the * rest was not. We need to make sure to at least terminate the * backlog with the final EXEC. */ diff --git a/src/networking.cpp b/src/networking.cpp index 0588745b2..636e95c62 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -195,7 +195,7 @@ client *createClient(int fd, int iel) { * buffers can hold, then we'll really install the handler. */ void clientInstallWriteHandler(client *c) { /* Schedule the client to write the output buffers to the socket only - * if not already done and, for slaves, if the slave can actually receive + * if not already done and, for slaves, if the replica can actually receive * writes at this stage. */ if (!(c->flags & CLIENT_PENDING_WRITE) && (c->replstate == REPL_STATE_NONE || @@ -239,7 +239,7 @@ void clientInstallAsyncWriteHandler(client *c) { * * 1) The event handler should already be installed since the output buffer * already contains something. - * 2) The client is a slave but not yet online, so we want to just accumulate + * 2) The client is a replica but not yet online, so we want to just accumulate * writes in the buffer but not actually sending them yet. * * Typically gets called every time a reply is built, before adding more @@ -442,7 +442,7 @@ void addReplyErrorLengthCore(client *c, const char *s, size_t len, bool fAsync) addReplyProtoCore(c,s,len,fAsync); addReplyProtoCore(c,"\r\n",2,fAsync); - /* Sometimes it could be normal that a slave replies to a master with + /* Sometimes it could be normal that a replica replies to a master with * an error and this function gets called. Actually the error will never * be sent because addReply*() against master clients has no effect... * A notable example is: @@ -1214,7 +1214,7 @@ void unlinkClient(client *c) { /* In the case of diskless replication the fork is writing to the * sockets and just closing the fd isn't enough, if we don't also - * shutdown the socket the fork will continue to write to the slave + * shutdown the socket the fork will continue to write to the replica * and the salve will only find out that it was disconnected when * it will finish reading the rdb. */ if ((c->flags & CLIENT_SLAVE) && @@ -1299,7 +1299,7 @@ bool freeClient(client *c) { } } - /* Log link disconnection with slave */ + /* Log link disconnection with replica */ if ((c->flags & CLIENT_SLAVE) && !(c->flags & CLIENT_MONITOR)) { serverLog(LL_WARNING,"Connection with replica %s lost.", replicationGetSlaveName(c)); @@ -1333,8 +1333,8 @@ bool freeClient(client *c) { * places where active clients may be referenced. */ unlinkClient(c); - /* Master/slave cleanup Case 1: - * we lost the connection with a slave. */ + /* Master/replica cleanup Case 1: + * we lost the connection with a replica. */ if (c->flags & CLIENT_SLAVE) { if (c->replstate == SLAVE_STATE_SEND_BULK) { if (c->repldbfd != -1) close(c->repldbfd); @@ -1352,7 +1352,7 @@ bool freeClient(client *c) { refreshGoodSlavesCount(); } - /* Master/slave cleanup Case 2: + /* Master/replica cleanup Case 2: * we lost the connection with the master. */ if (c->flags & CLIENT_MASTER) replicationHandleMasterDisconnection(MasterInfoFromClient(c)); @@ -1494,7 +1494,7 @@ int writeToClient(int fd, client *c, int handler_installed) { * just deliver as much data as it is possible to deliver. * * Moreover, we also send as much as possible if the client is - * a slave (otherwise, on high-speed traffic, the replication + * a replica (otherwise, on high-speed traffic, the replication * buffer will grow indefinitely) */ if (totwritten > NET_MAX_WRITES_PER_EVENT && (g_pserver->maxmemory == 0 || @@ -1805,7 +1805,7 @@ int processInlineBuffer(client *c) { } /* Newline from slaves can be used to refresh the last ACK time. - * This is useful for a slave to ping back while loading a big + * This is useful for a replica to ping back while loading a big * RDB file. */ if (querylen == 0 && c->flags & CLIENT_SLAVE) c->repl_ack_time = g_pserver->unixtime; @@ -2038,8 +2038,8 @@ int processCommandAndResetClient(client *c, int flags) { } if (serverTL->current_client == NULL) deadclient = 1; serverTL->current_client = NULL; - /* freeMemoryIfNeeded may flush slave output buffers. This may - * result into a slave, that may be the active client, to be + /* freeMemoryIfNeeded may flush replica output buffers. This may + * result into a replica, that may be the active client, to be * freed. */ return deadclient ? C_ERR : C_OK; } @@ -2060,7 +2060,7 @@ void processInputBuffer(client *c, int callFlags) { if (c->flags & CLIENT_BLOCKED) break; /* Don't process input from the master while there is a busy script - * condition on the slave. We want just to accumulate the replication + * condition on the replica. We want just to accumulate the replication * stream (instead of replying -BUSY like we do with other clients) and * later resume the processing. */ if (g_pserver->lua_timedout && c->flags & CLIENT_MASTER) break; @@ -2910,10 +2910,10 @@ void flushSlavesOutputBuffers(void) { listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { - client *slave = (client*)listNodeValue(ln); + client *replica = (client*)listNodeValue(ln); int events; - if (!FCorrectThread(slave)) + if (!FCorrectThread(replica)) continue; // we cannot synchronously flush other thread's clients /* Note that the following will not flush output buffers of slaves @@ -2922,12 +2922,12 @@ void flushSlavesOutputBuffers(void) { * of put_online_on_ack is to postpone the moment it is installed. * This is what we want since slaves in this state should not receive * writes before the first ACK. */ - events = aeGetFileEvents(g_pserver->rgthreadvar[slave->iel].el,slave->fd); + events = aeGetFileEvents(g_pserver->rgthreadvar[replica->iel].el,replica->fd); if (events & AE_WRITABLE && - slave->replstate == SLAVE_STATE_ONLINE && - clientHasPendingReplies(slave)) + replica->replstate == SLAVE_STATE_ONLINE && + clientHasPendingReplies(replica)) { - writeToClient(slave->fd,slave,0); + writeToClient(replica->fd,replica,0); } } } diff --git a/src/rdb.cpp b/src/rdb.cpp index ac0ad0b16..476b2e2e4 100644 --- a/src/rdb.cpp +++ b/src/rdb.cpp @@ -2119,7 +2119,7 @@ int rdbLoadRio(rio *rdb, rdbSaveInfo *rsi, int loading_aof) { * an RDB file from disk, either at startup, or when an RDB was * received from the master. In the latter case, the master is * responsible for key expiry. If we would expire keys here, the - * snapshot taken by the master may not be reflected on the slave. */ + * snapshot taken by the master may not be reflected on the replica. */ if (listLength(g_pserver->masters) == 0 && !loading_aof && expiretime != -1 && expiretime < now) { decrRefCount(key); key = nullptr; @@ -2273,7 +2273,7 @@ void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) { g_pserver->rdb_child_type = RDB_CHILD_TYPE_NONE; g_pserver->rdb_save_time_start = -1; - /* If the child returns an OK exit code, read the set of slave client + /* If the child returns an OK exit code, read the set of replica client * IDs and the associated status code. We'll terminate all the slaves * in error state. * @@ -2312,35 +2312,35 @@ void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) { listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { - client *slave = (client*)ln->value; + client *replica = (client*)ln->value; - if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END) { + if (replica->replstate == SLAVE_STATE_WAIT_BGSAVE_END) { uint64_t j; int errorcode = 0; - /* Search for the slave ID in the reply. In order for a slave to + /* Search for the replica ID in the reply. In order for a replica to * continue the replication process, we need to find it in the list, * and it must have an error code set to 0 (which means success). */ for (j = 0; j < ok_slaves[0]; j++) { - if (slave->id == ok_slaves[2*j+1]) { + if (replica->id == ok_slaves[2*j+1]) { errorcode = ok_slaves[2*j+2]; break; /* Found in slaves list. */ } } if (j == ok_slaves[0] || errorcode != 0) { serverLog(LL_WARNING, - "Closing slave %s: child->slave RDB transfer failed: %s", - replicationGetSlaveName(slave), + "Closing replica %s: child->replica RDB transfer failed: %s", + replicationGetSlaveName(replica), (errorcode == 0) ? "RDB transfer child aborted" : strerror(errorcode)); - freeClient(slave); + freeClient(replica); } else { serverLog(LL_WARNING, "Slave %s correctly received the streamed RDB file.", - replicationGetSlaveName(slave)); + replicationGetSlaveName(replica)); /* Restore the socket as non-blocking. */ - anetNonBlock(NULL,slave->fd); - anetSendTimeout(NULL,slave->fd,0); + anetNonBlock(NULL,replica->fd); + anetSendTimeout(NULL,replica->fd,0); } } } @@ -2407,17 +2407,17 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) { listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { - client *slave = (client*)ln->value; + client *replica = (client*)ln->value; - if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) { - clientids[numfds] = slave->id; - fds[numfds++] = slave->fd; - replicationSetupSlaveForFullResync(slave,getPsyncInitialOffset()); + if (replica->replstate == SLAVE_STATE_WAIT_BGSAVE_START) { + clientids[numfds] = replica->id; + fds[numfds++] = replica->fd; + replicationSetupSlaveForFullResync(replica,getPsyncInitialOffset()); /* Put the socket in blocking mode to simplify RDB transfer. * We'll restore it when the children returns (since duped socket * will share the O_NONBLOCK attribute with the parent). */ - anetBlock(NULL,slave->fd); - anetSendTimeout(NULL,slave->fd,g_pserver->repl_timeout*1000); + anetBlock(NULL,replica->fd); + anetSendTimeout(NULL,replica->fd,g_pserver->repl_timeout*1000); } } @@ -2451,19 +2451,19 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) { g_pserver->child_info_data.cow_size = private_dirty; sendChildInfo(CHILD_INFO_TYPE_RDB); - /* If we are returning OK, at least one slave was served + /* If we are returning OK, at least one replica was served * with the RDB file as expected, so we need to send a report * to the parent via the pipe. The format of the message is: * - * ... + * ... * - * len, slave IDs, and slave errors, are all uint64_t integers, + * len, replica IDs, and replica errors, are all uint64_t integers, * so basically the reply is composed of 64 bits for the len field * plus 2 additional 64 bit integers for each entry, for a total * of 'len' entries. * - * The 'id' represents the slave's client ID, so that the master - * can match the report with a specific slave, and 'error' is + * The 'id' represents the replica's client ID, so that the master + * can match the report with a specific replica, and 'error' is * set to 0 if the replication process terminated with a success * or the error code if an error occurred. */ void *msg = zmalloc(sizeof(uint64_t)*(1+2*numfds), MALLOC_LOCAL); @@ -2504,12 +2504,12 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) { * replicationSetupSlaveForFullResync() turned it into BGSAVE_END */ listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { - client *slave = (client*)ln->value; + client *replica = (client*)ln->value; int j; for (j = 0; j < numfds; j++) { - if (slave->id == clientids[j]) { - slave->replstate = SLAVE_STATE_WAIT_BGSAVE_START; + if (replica->id == clientids[j]) { + replica->replstate = SLAVE_STATE_WAIT_BGSAVE_START; break; } } @@ -2603,14 +2603,14 @@ rdbSaveInfo *rdbPopulateSaveInfo(rdbSaveInfo *rsi) { /* If the instance is a master, we can populate the replication info * only when repl_backlog is not NULL. If the repl_backlog is NULL, * it means that the instance isn't in any replication chains. In this - * scenario the replication info is useless, because when a slave + * scenario the replication info is useless, because when a replica * connects to us, the NULL repl_backlog will trigger a full * synchronization, at the same time we will use a new replid and clear * replid2. */ if (g_pserver->fActiveReplica || (!listLength(g_pserver->masters) && g_pserver->repl_backlog)) { /* Note that when g_pserver->replicaseldb is -1, it means that this master * didn't apply any write commands after a full synchronization. - * So we can let repl_stream_db be 0, this allows a restarted slave + * So we can let repl_stream_db be 0, this allows a restarted replica * to reload replication ID/offset, it's safe because the next write * command must generate a SELECT statement. */ rsi->repl_stream_db = g_pserver->replicaseldb == -1 ? 0 : g_pserver->replicaseldb; @@ -2624,7 +2624,7 @@ rdbSaveInfo *rdbPopulateSaveInfo(rdbSaveInfo *rsi) { } struct redisMaster *miFirst = (redisMaster*)(listLength(g_pserver->masters) ? listNodeValue(listFirst(g_pserver->masters)) : NULL); - /* If the instance is a slave we need a connected master + /* If the instance is a replica we need a connected master * in order to fetch the currently selected DB. */ if (miFirst && miFirst->master) { rsi->repl_stream_db = miFirst->master->db->id; @@ -2632,7 +2632,7 @@ rdbSaveInfo *rdbPopulateSaveInfo(rdbSaveInfo *rsi) { } /* If we have a cached master we can use it in order to populate the - * replication selected DB info inside the RDB file: the slave can + * replication selected DB info inside the RDB file: the replica can * increment the master_repl_offset only from data arriving from the * master, so if we are disconnected the offset in the cached master * is valid. */ diff --git a/src/redis-benchmark.cpp b/src/redis-benchmark.cpp index 19aa7a892..17866bec1 100644 --- a/src/redis-benchmark.cpp +++ b/src/redis-benchmark.cpp @@ -152,7 +152,7 @@ typedef struct clusterNode { int port; sds name; int flags; - sds replicate; /* Master ID if node is a slave */ + sds replicate; /* Master ID if node is a replica */ int *slots; int slots_count; int current_slot_index; diff --git a/src/redis-cli-cpphelper.cpp b/src/redis-cli-cpphelper.cpp index 208c1656d..16afcaf61 100644 --- a/src/redis-cli-cpphelper.cpp +++ b/src/redis-cli-cpphelper.cpp @@ -124,8 +124,8 @@ extern "C" void freeClusterManager(void) { * * The score is calculated as follows: * - * SAME_AS_MASTER = 10000 * each slave in the same IP of its master. - * SAME_AS_SLAVE = 1 * each slave having the same IP as another slave + * SAME_AS_MASTER = 10000 * each replica in the same IP of its master. + * SAME_AS_SLAVE = 1 * each replica having the same IP as another replica of the same master. * FINAL_SCORE = SAME_AS_MASTER + SAME_AS_SLAVE * diff --git a/src/redis-cli.h b/src/redis-cli.h index 33910c2ce..ed466ecd5 100644 --- a/src/redis-cli.h +++ b/src/redis-cli.h @@ -187,7 +187,7 @@ typedef struct clusterManagerNode { time_t ping_recv; int flags; list *flags_str; /* Flags string representations */ - sds replicate; /* Master ID if node is a slave */ + sds replicate; /* Master ID if node is a replica */ int dirty; /* Node has changes that can be flushed */ uint8_t slots[CLUSTER_MANAGER_SLOTS]; int slots_count; diff --git a/src/redismodule.h b/src/redismodule.h index fae755b0a..656a5afbc 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -71,9 +71,9 @@ extern "C" { #define REDISMODULE_CTX_FLAGS_MULTI (1<<1) /* The instance is a master */ #define REDISMODULE_CTX_FLAGS_MASTER (1<<2) -/* The instance is a slave */ +/* The instance is a replica */ #define REDISMODULE_CTX_FLAGS_SLAVE (1<<3) -/* The instance is read-only (usually meaning it's a slave as well) */ +/* The instance is read-only (usually meaning it's a replica as well) */ #define REDISMODULE_CTX_FLAGS_READONLY (1<<4) /* The instance is running in cluster mode */ #define REDISMODULE_CTX_FLAGS_CLUSTER (1<<5) diff --git a/src/replication.cpp b/src/replication.cpp index 447de4188..eb8dec503 100644 --- a/src/replication.cpp +++ b/src/replication.cpp @@ -46,13 +46,13 @@ void replicationDiscardCachedMaster(redisMaster *mi); void replicationResurrectCachedMaster(redisMaster *mi, int newfd); void replicationSendAck(redisMaster *mi); -void putSlaveOnline(client *slave); +void putSlaveOnline(client *replica); int cancelReplicationHandshake(redisMaster *mi); /* --------------------------- Utility functions ---------------------------- */ -/* Return the pointer to a string representing the slave ip:listening_port - * pair. Mostly useful for logging, since we want to log a slave using its +/* Return the pointer to a string representing the replica ip:listening_port + * pair. Mostly useful for logging, since we want to log a replica using its * IP address and its listening port which is more clear for the user, for * example: "Closing connection with replica 10.1.2.3:6380". */ char *replicationGetSlaveName(client *c) { @@ -231,12 +231,12 @@ void feedReplicationBacklogWithObject(robj *o) { feedReplicationBacklog(p,len); } -void replicationFeedSlave(client *slave, int dictid, robj **argv, int argc, bool fSendRaw) +void replicationFeedSlave(client *replica, int dictid, robj **argv, int argc, bool fSendRaw) { char llstr[LONG_STR_SIZE]; - std::unique_locklock)> lock(slave->lock); + std::unique_locklock)> lock(replica->lock); - /* Send SELECT command to every slave if needed. */ + /* Send SELECT command to every replica if needed. */ if (g_pserver->replicaseldb != dictid) { robj *selectcmd; @@ -258,7 +258,7 @@ void replicationFeedSlave(client *slave, int dictid, robj **argv, int argc, bool if (g_pserver->repl_backlog && fSendRaw) feedReplicationBacklogWithObject(selectcmd); /* Send it to slaves */ - addReply(slave,selectcmd); + addReply(replica,selectcmd); if (dictid < 0 || dictid >= PROTO_SHARED_SELECT_CMDS) decrRefCount(selectcmd); @@ -270,18 +270,18 @@ void replicationFeedSlave(client *slave, int dictid, robj **argv, int argc, bool * or are already in sync with the master. */ /* Add the multi bulk length. */ - addReplyArrayLen(slave,argc); + addReplyArrayLen(replica,argc); /* Finally any additional argument that was not stored inside the * static buffer if any (from j to argc). */ for (int j = 0; j < argc; j++) - addReplyBulk(slave,argv[j]); + addReplyBulk(replica,argv[j]); } /* Propagate write commands to slaves, and populate the replication backlog * as well. This function is used if the instance is a master: we use * the commands received by our clients in order to create the replication - * stream. Instead if the instance is a slave and has sub-slaves attached, + * stream. Instead if the instance is a replica and has sub-slaves attached, * we use replicationFeedSlavesFromMaster() */ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) { listNode *ln, *lnReply; @@ -293,7 +293,7 @@ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) { /* If the instance is not a top level master, return ASAP: we'll just proxy * the stream of data we receive from our master instead, in order to - * propagate *identical* replication stream. In this way this slave can + * propagate *identical* replication stream. In this way this replica can * advertise the same replication ID as the master (since it shares the * master replication history and has the same backlog and offsets). */ if (!g_pserver->fActiveReplica && listLength(g_pserver->masters)) return; @@ -377,34 +377,34 @@ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) { } } - /* Write the command to every slave. */ + /* Write the command to every replica. */ listRewind(slaves,&li); while((ln = listNext(&li))) { - client *slave = (client*)ln->value; + client *replica = (client*)ln->value; /* Don't feed slaves that are still waiting for BGSAVE to start */ - if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) continue; - std::unique_locklock)> lock(slave->lock); - if (serverTL->current_client && FSameHost(serverTL->current_client, slave)) + if (replica->replstate == SLAVE_STATE_WAIT_BGSAVE_START) continue; + std::unique_locklock)> lock(replica->lock); + if (serverTL->current_client && FSameHost(serverTL->current_client, replica)) { - slave->reploff_skipped += g_pserver->master_repl_offset - master_repl_offset_start; + replica->reploff_skipped += g_pserver->master_repl_offset - master_repl_offset_start; continue; } if (!fSendRaw) - addReplyProtoAsync(slave, proto, cchProto); + addReplyProtoAsync(replica, proto, cchProto); - addReplyProtoAsync(slave,fake->buf,fake->bufpos); + addReplyProtoAsync(replica,fake->buf,fake->bufpos); listRewind(fake->reply, &liReply); while ((lnReply = listNext(&liReply))) { clientReplyBlock* reply = (clientReplyBlock*)listNodeValue(lnReply); - addReplyProtoAsync(slave, reply->buf(), reply->used); + addReplyProtoAsync(replica, reply->buf(), reply->used); } if (!fSendRaw) { - addReplyAsync(slave,shared.crlf); - addReplyProtoAsync(slave, szDbNum, cchDbNum); + addReplyAsync(replica,shared.crlf); + addReplyProtoAsync(replica, szDbNum, cchDbNum); } } @@ -432,15 +432,15 @@ void replicationFeedSlavesFromMasterStream(list *slaves, char *buf, size_t bufle listRewind(slaves,&li); while((ln = listNext(&li))) { - client *slave = (client*)ln->value; - std::lock_guardlock)> ulock(slave->lock); - if (FMasterHost(slave)) + client *replica = (client*)ln->value; + std::lock_guardlock)> ulock(replica->lock); + if (FMasterHost(replica)) continue; // Active Active case, don't feed back /* Don't feed slaves that are still waiting for BGSAVE to start */ - if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) continue; + if (replica->replstate == SLAVE_STATE_WAIT_BGSAVE_START) continue; - addReplyProtoAsync(slave,buf,buflen); + addReplyProtoAsync(replica,buf,buflen); } if (listLength(slaves)) @@ -488,7 +488,7 @@ void replicationFeedMonitors(client *c, list *monitors, int dictid, robj **argv, decrRefCount(cmdobj); } -/* Feed the slave 'c' with the replication backlog starting from the +/* Feed the replica 'c' with the replication backlog starting from the * specified 'offset' up to the end of the backlog. */ long long addReplyReplicationBacklog(client *c, long long offset) { long long j, skip, len; @@ -523,7 +523,7 @@ long long addReplyReplicationBacklog(client *c, long long offset) { /* Discard the amount of data to seek to the specified 'offset'. */ j = (j + skip) % g_pserver->repl_backlog_size; - /* Feed slave with data. Since it is a circular buffer we have to + /* Feed replica with data. Since it is a circular buffer we have to * split the reply in two parts if we are cross-boundary. */ len = g_pserver->repl_backlog_histlen - skip; serverLog(LL_DEBUG, "[PSYNC] Reply total length: %lld", len); @@ -541,7 +541,7 @@ long long addReplyReplicationBacklog(client *c, long long offset) { } /* Return the offset to provide as reply to the PSYNC command received - * from the slave. The returned value is only valid immediately after + * from the replica. The returned value is only valid immediately after * the BGSAVE process started and before executing any other command * from clients. */ long long getPsyncInitialOffset(void) { @@ -549,39 +549,39 @@ long long getPsyncInitialOffset(void) { } /* Send a FULLRESYNC reply in the specific case of a full resynchronization, - * as a side effect setup the slave for a full sync in different ways: + * as a side effect setup the replica for a full sync in different ways: * - * 1) Remember, into the slave client structure, the replication offset + * 1) Remember, into the replica client structure, the replication offset * we sent here, so that if new slaves will later attach to the same * background RDB saving process (by duplicating this client output - * buffer), we can get the right offset from this slave. - * 2) Set the replication state of the slave to WAIT_BGSAVE_END so that + * buffer), we can get the right offset from this replica. + * 2) Set the replication state of the replica to WAIT_BGSAVE_END so that * we start accumulating differences from this point. * 3) Force the replication stream to re-emit a SELECT statement so - * the new slave incremental differences will start selecting the + * the new replica incremental differences will start selecting the * right database number. * * Normally this function should be called immediately after a successful * BGSAVE for replication was started, or when there is one already in - * progress that we attached our slave to. */ -int replicationSetupSlaveForFullResync(client *slave, long long offset) { + * progress that we attached our replica to. */ +int replicationSetupSlaveForFullResync(client *replica, long long offset) { char buf[128]; int buflen; - slave->psync_initial_offset = offset; - slave->replstate = SLAVE_STATE_WAIT_BGSAVE_END; + replica->psync_initial_offset = offset; + replica->replstate = SLAVE_STATE_WAIT_BGSAVE_END; /* We are going to accumulate the incremental changes for this - * slave as well. Set replicaseldb to -1 in order to force to re-emit + * replica as well. Set replicaseldb to -1 in order to force to re-emit * a SELECT statement in the replication stream. */ g_pserver->replicaseldb = -1; /* Don't send this reply to slaves that approached us with * the old SYNC command. */ - if (!(slave->flags & CLIENT_PRE_PSYNC)) { + if (!(replica->flags & CLIENT_PRE_PSYNC)) { buflen = snprintf(buf,sizeof(buf),"+FULLRESYNC %s %lld\r\n", g_pserver->replid,offset); - if (write(slave->fd,buf,buflen) != buflen) { - freeClientAsync(slave); + if (write(replica->fd,buf,buflen) != buflen) { + freeClientAsync(replica); return C_ERR; } } @@ -600,14 +600,14 @@ int masterTryPartialResynchronization(client *c) { char buf[128]; int buflen; - /* Parse the replication offset asked by the slave. Go to full sync + /* Parse the replication offset asked by the replica. Go to full sync * on parse error: this should never happen but we try to handle * it in a robust way compared to aborting. */ if (getLongLongFromObjectOrReply(c,c->argv[2],&psync_offset,NULL) != C_OK) goto need_full_resync; /* Is the replication ID of this master the same advertised by the wannabe - * slave via PSYNC? If the replication ID changed this master has a + * replica via PSYNC? If the replication ID changed this master has a * different replication history, and there is no way to continue. * * Note that there are two potentially valid replication IDs: the ID1 @@ -637,7 +637,7 @@ int masterTryPartialResynchronization(client *c) { goto need_full_resync; } - /* We still have the data our slave is asking for? */ + /* We still have the data our replica is asking for? */ if (!g_pserver->repl_backlog || psync_offset < g_pserver->repl_backlog_off || psync_offset > (g_pserver->repl_backlog_off + g_pserver->repl_backlog_histlen)) @@ -652,9 +652,9 @@ int masterTryPartialResynchronization(client *c) { } /* If we reached this point, we are able to perform a partial resync: - * 1) Set client state to make it a slave. + * 1) Set client state to make it a replica. * 2) Inform the client we can continue with +CONTINUE - * 3) Send the backlog data (from the offset to the end) to the slave. */ + * 3) Send the backlog data (from the offset to the end) to the replica. */ c->flags |= CLIENT_SLAVE; c->replstate = SLAVE_STATE_ONLINE; c->repl_ack_time = g_pserver->unixtime; @@ -681,7 +681,7 @@ int masterTryPartialResynchronization(client *c) { replicationGetSlaveName(c), psync_len, psync_offset); /* Note that we don't need to set the selected DB at g_pserver->replicaseldb - * to -1 to force the master to emit SELECT, since the slave already + * to -1 to force the master to emit SELECT, since the replica already * has this state from the previous connection with the master. */ refreshGoodSlavesCount(); @@ -700,7 +700,7 @@ need_full_resync: * the script cache is flushed before to start. * * The mincapa argument is the bitwise AND among all the slaves capabilities - * of the slaves waiting for this BGSAVE, so represents the slave capabilities + * of the slaves waiting for this BGSAVE, so represents the replica capabilities * all the slaves support. Can be tested via SLAVE_CAPA_* macros. * * Side effects, other than starting a BGSAVE: @@ -726,7 +726,7 @@ int startBgsaveForReplication(int mincapa) { rdbSaveInfo rsi, *rsiptr; rsiptr = rdbPopulateSaveInfo(&rsi); /* Only do rdbSave* when rsiptr is not NULL, - * otherwise slave will miss repl-stream-db. */ + * otherwise replica will miss repl-stream-db. */ if (rsiptr) { if (socket_target) retval = rdbSaveToSlavesSockets(rsiptr); @@ -744,16 +744,16 @@ int startBgsaveForReplication(int mincapa) { serverLog(LL_WARNING,"BGSAVE for replication failed"); listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { - client *slave = (client*)ln->value; - std::unique_locklock)> lock(slave->lock); + client *replica = (client*)ln->value; + std::unique_locklock)> lock(replica->lock); - if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) { - slave->replstate = REPL_STATE_NONE; - slave->flags &= ~CLIENT_SLAVE; + if (replica->replstate == SLAVE_STATE_WAIT_BGSAVE_START) { + replica->replstate = REPL_STATE_NONE; + replica->flags &= ~CLIENT_SLAVE; listDelNode(g_pserver->slaves,ln); - addReplyError(slave, + addReplyError(replica, "BGSAVE failed, replication can't continue"); - slave->flags |= CLIENT_CLOSE_AFTER_REPLY; + replica->flags |= CLIENT_CLOSE_AFTER_REPLY; } } return retval; @@ -764,17 +764,17 @@ int startBgsaveForReplication(int mincapa) { if (!socket_target) { listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { - client *slave = (client*)ln->value; - std::unique_locklock)> lock(slave->lock); + client *replica = (client*)ln->value; + std::unique_locklock)> lock(replica->lock); - if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) { - replicationSetupSlaveForFullResync(slave, + if (replica->replstate == SLAVE_STATE_WAIT_BGSAVE_START) { + replicationSetupSlaveForFullResync(replica, getPsyncInitialOffset()); } } } - /* Flush the script cache, since we need that slave differences are + /* Flush the script cache, since we need that replica differences are * accumulated without requiring slaves to match our cached scripts. */ if (retval == C_OK) replicationScriptCacheFlush(); return retval; @@ -782,10 +782,10 @@ int startBgsaveForReplication(int mincapa) { /* SYNC and PSYNC command implemenation. */ void syncCommand(client *c) { - /* ignore SYNC if already slave or in monitor mode */ + /* ignore SYNC if already replica or in monitor mode */ if (c->flags & CLIENT_SLAVE) return; - /* Refuse SYNC requests if we are a slave but the link with our master + /* Refuse SYNC requests if we are a replica but the link with our master * is not ok... */ if (!g_pserver->fActiveReplica) { if (FAnyDisconnectedMasters()) { @@ -813,7 +813,7 @@ void syncCommand(client *c) { * * +FULLRESYNC * - * So the slave knows the new replid and offset to try a PSYNC later + * So the replica knows the new replid and offset to try a PSYNC later * if the connection with the master is lost. */ if (!strcasecmp((const char*)ptrFromObj(c->argv[0]),"psync")) { if (masterTryPartialResynchronization(c) == C_OK) { @@ -829,8 +829,8 @@ void syncCommand(client *c) { if (master_replid[0] != '?') g_pserver->stat_sync_partial_err++; } } else { - /* If a slave uses SYNC, we are dealing with an old implementation - * of the replication protocol (like keydb-cli --slave). Flag the client + /* If a replica uses SYNC, we are dealing with an old implementation + * of the replication protocol (like keydb-cli --replica). Flag the client * so that we don't expect to receive REPLCONF ACK feedbacks. */ c->flags |= CLIENT_PRE_PSYNC; } @@ -838,8 +838,8 @@ void syncCommand(client *c) { /* Full resynchronization. */ g_pserver->stat_sync_full++; - /* Setup the slave as one waiting for BGSAVE to start. The following code - * paths will change the state if we handle the slave differently. */ + /* Setup the replica as one waiting for BGSAVE to start. The following code + * paths will change the state if we handle the replica differently. */ c->replstate = SLAVE_STATE_WAIT_BGSAVE_START; if (g_pserver->repl_disable_tcp_nodelay) anetDisableTcpNoDelay(NULL, c->fd); /* Non critical if it fails. */ @@ -862,25 +862,25 @@ void syncCommand(client *c) { g_pserver->rdb_child_type == RDB_CHILD_TYPE_DISK) { /* Ok a background save is in progress. Let's check if it is a good - * one for replication, i.e. if there is another slave that is + * one for replication, i.e. if there is another replica that is * registering differences since the server forked to save. */ - client *slave; + client *replica; listNode *ln; listIter li; listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { - slave = (client*)ln->value; - if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END) break; + replica = (client*)ln->value; + if (replica->replstate == SLAVE_STATE_WAIT_BGSAVE_END) break; } - /* To attach this slave, we check that it has at least all the - * capabilities of the slave that triggered the current BGSAVE. */ - if (ln && ((c->slave_capa & slave->slave_capa) == slave->slave_capa)) { + /* To attach this replica, we check that it has at least all the + * capabilities of the replica that triggered the current BGSAVE. */ + if (ln && ((c->slave_capa & replica->slave_capa) == replica->slave_capa)) { /* Perfect, the server is already registering differences for - * another slave. Set the right state, and copy the buffer. */ - copyClientOutputBuffer(c,slave); - replicationSetupSlaveForFullResync(c,slave->psync_initial_offset); + * another replica. Set the right state, and copy the buffer. */ + copyClientOutputBuffer(c,replica); + replicationSetupSlaveForFullResync(c,replica->psync_initial_offset); serverLog(LL_NOTICE,"Waiting for end of BGSAVE for SYNC"); } else { /* No way, we need to wait for the next BGSAVE in order to @@ -906,7 +906,7 @@ void syncCommand(client *c) { if (g_pserver->repl_diskless_sync_delay) serverLog(LL_NOTICE,"Delay next BGSAVE for diskless SYNC"); } else { - /* Target is disk (or the slave is not capable of supporting + /* Target is disk (or the replica is not capable of supporting * diskless replication) and we don't have a BGSAVE in progress, * let's start one. */ if (g_pserver->aof_child_pid == -1) { @@ -948,7 +948,7 @@ LError: } /* REPLCONF