From 8405c99017eb0a819e62559d55e2170fac49b24e Mon Sep 17 00:00:00 2001 From: John Sully Date: Tue, 17 May 2022 03:48:59 +0000 Subject: [PATCH] Add fence barriers for the repl backlog (important for AARCH64 and other weak memory models) --- src/networking.cpp | 2 ++ src/replication.cpp | 3 +++ 2 files changed, 5 insertions(+) diff --git a/src/networking.cpp b/src/networking.cpp index e8e929a20..08eebedba 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -1810,6 +1810,8 @@ int writeToClient(client *c, int handler_installed) { is a replica, so only attempt to do so if that's the case. */ if (c->flags & CLIENT_SLAVE && !(c->flags & CLIENT_MONITOR) && c->replstate == SLAVE_STATE_ONLINE) { std::unique_lock repl_backlog_lock (g_pserver->repl_backlog_lock); + // Ensure all writes to the repl backlog are visible + std::atomic_thread_fence(std::memory_order_acquire); while (clientHasPendingReplies(c)) { long long repl_end_idx = getReplIndexFromOffset(c->repl_end_off); diff --git a/src/replication.cpp b/src/replication.cpp index fd713e55b..a71c1560f 100644 --- a/src/replication.cpp +++ b/src/replication.cpp @@ -5599,6 +5599,9 @@ void flushReplBacklogToClients() serverAssert(g_pserver->master_repl_offset - g_pserver->repl_batch_offStart <= g_pserver->repl_backlog_size); serverAssert(g_pserver->repl_batch_idxStart != g_pserver->repl_backlog_idx); + // Repl backlog writes must become visible to all threads at this point + std::atomic_thread_fence(std::memory_order_release); + listIter li; listNode *ln; listRewind(g_pserver->slaves, &li);