Add fence barriers for the repl backlog (important for AARCH64 and other weak memory models)

This commit is contained in:
John Sully 2022-05-17 03:48:59 +00:00
parent 027ad50581
commit 8405c99017
2 changed files with 5 additions and 0 deletions

View File

@ -1810,6 +1810,8 @@ int writeToClient(client *c, int handler_installed) {
is a replica, so only attempt to do so if that's the case. */
if (c->flags & CLIENT_SLAVE && !(c->flags & CLIENT_MONITOR) && c->replstate == SLAVE_STATE_ONLINE) {
std::unique_lock<fastlock> repl_backlog_lock (g_pserver->repl_backlog_lock);
// Ensure all writes to the repl backlog are visible
std::atomic_thread_fence(std::memory_order_acquire);
while (clientHasPendingReplies(c)) {
long long repl_end_idx = getReplIndexFromOffset(c->repl_end_off);

View File

@ -5599,6 +5599,9 @@ void flushReplBacklogToClients()
serverAssert(g_pserver->master_repl_offset - g_pserver->repl_batch_offStart <= g_pserver->repl_backlog_size);
serverAssert(g_pserver->repl_batch_idxStart != g_pserver->repl_backlog_idx);
// Repl backlog writes must become visible to all threads at this point
std::atomic_thread_fence(std::memory_order_release);
listIter li;
listNode *ln;
listRewind(g_pserver->slaves, &li);