Fix cluster test failures from Redis 6 merge

Former-commit-id: fd702e43f19ac2b8097afef84a73c8e71172979b
This commit is contained in:
John Sully 2020-02-03 20:00:09 -05:00
parent 2c899d7219
commit c2eb10c322
3 changed files with 8 additions and 26 deletions

View File

@ -2182,27 +2182,14 @@ void clusterWriteHandler(connection *conn) {
clusterLink *link = (clusterLink*)connGetPrivateData(conn); clusterLink *link = (clusterLink*)connGetPrivateData(conn);
ssize_t nwritten; ssize_t nwritten;
// We're about to release the lock, so the link's sndbuf needs to be owned fully by us
// allocate a new one in case anyone tries to write while we're waiting
sds sndbuf = link->sndbuf;
link->sndbuf = sdsempty();
aeReleaseLock();
nwritten = connWrite(conn, link->sndbuf, sdslen(link->sndbuf)); nwritten = connWrite(conn, link->sndbuf, sdslen(link->sndbuf));
aeAcquireLock();
if (nwritten <= 0) { if (nwritten <= 0) {
serverLog(LL_DEBUG,"I/O error writing to node link: %s", serverLog(LL_DEBUG,"I/O error writing to node link: %s",
(nwritten == -1) ? connGetLastError(conn) : "short write"); (nwritten == -1) ? connGetLastError(conn) : "short write");
sdsfree(sndbuf);
handleLinkIOError(link); handleLinkIOError(link);
return; return;
} }
sdsrange(sndbuf,nwritten,-1); sdsrange(link->sndbuf,nwritten,-1);
// Restore our send buffer, ensuring any unsent data is first
sndbuf = sdscat(sndbuf, link->sndbuf);
sdsfree(link->sndbuf);
link->sndbuf = sndbuf;
if (sdslen(link->sndbuf) == 0) if (sdslen(link->sndbuf) == 0)
connSetWriteHandler(link->conn, NULL); connSetWriteHandler(link->conn, NULL);
} }
@ -2324,7 +2311,11 @@ void clusterReadHandler(connection *conn) {
void clusterSendMessage(clusterLink *link, unsigned char *msg, size_t msglen) { void clusterSendMessage(clusterLink *link, unsigned char *msg, size_t msglen) {
serverAssert(GlobalLocksAcquired()); serverAssert(GlobalLocksAcquired());
if (sdslen(link->sndbuf) == 0 && msglen != 0) if (sdslen(link->sndbuf) == 0 && msglen != 0)
connSetWriteHandlerWithBarrier(link->conn, clusterWriteHandler, 1); {
aePostFunction(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el, [link] {
connSetWriteHandlerWithBarrier(link->conn, clusterWriteHandler, 1);
});
}
link->sndbuf = sdscatlen(link->sndbuf, msg, msglen); link->sndbuf = sdscatlen(link->sndbuf, msg, msglen);

View File

@ -15,13 +15,8 @@ To create a cluster, follow these steps:
1. Edit create-cluster and change the start / end port, depending on the 1. Edit create-cluster and change the start / end port, depending on the
number of instances you want to create. number of instances you want to create.
2. Use "./create-cluster start" in order to run the instances. 2. Use "./create-cluster start" in order to run the instances.
<<<<<<< HEAD
3. Use "./create-cluster create" in order to execute keydb-cli --cluster create, so that 3. Use "./create-cluster create" in order to execute keydb-cli --cluster create, so that
an actual Redis cluster will be created. an actual KeyDB cluster will be created. (If you're accessing your setup via a local container, ensure that the CLUSTER_HOST value is changed to your local IP)
=======
3. Use "./create-cluster create" in order to execute redis-cli --cluster create, so that
an actual Redis cluster will be created. (If you're accessing your setup via a local container, ensure that the CLUSTER_HOST value is changed to your local IP)
>>>>>>> redis/6.0
4. Now you are ready to play with the cluster. AOF files and logs for each instances are created in the current directory. 4. Now you are ready to play with the cluster. AOF files and logs for each instances are created in the current directory.
In order to stop a cluster: In order to stop a cluster:

View File

@ -25,11 +25,7 @@ then
while [ $((PORT < ENDPORT)) != "0" ]; do while [ $((PORT < ENDPORT)) != "0" ]; do
PORT=$((PORT+1)) PORT=$((PORT+1))
echo "Starting $PORT" echo "Starting $PORT"
<<<<<<< HEAD ../../src/keydb-server --port $PORT --protected-mode $PROTECTED_MODE --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes ${ADDITIONAL_OPTIONS}
../../src/keydb-server --port $PORT --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes
=======
../../src/redis-server --port $PORT --protected-mode $PROTECTED_MODE --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes ${ADDITIONAL_OPTIONS}
>>>>>>> redis/6.0
done done
exit 0 exit 0
fi fi