Add fence barriers for the repl backlog (important for AARCH64 and other weak memory models)
This commit is contained in:
parent
027ad50581
commit
8405c99017
@ -1810,6 +1810,8 @@ int writeToClient(client *c, int handler_installed) {
|
|||||||
is a replica, so only attempt to do so if that's the case. */
|
is a replica, so only attempt to do so if that's the case. */
|
||||||
if (c->flags & CLIENT_SLAVE && !(c->flags & CLIENT_MONITOR) && c->replstate == SLAVE_STATE_ONLINE) {
|
if (c->flags & CLIENT_SLAVE && !(c->flags & CLIENT_MONITOR) && c->replstate == SLAVE_STATE_ONLINE) {
|
||||||
std::unique_lock<fastlock> repl_backlog_lock (g_pserver->repl_backlog_lock);
|
std::unique_lock<fastlock> repl_backlog_lock (g_pserver->repl_backlog_lock);
|
||||||
|
// Ensure all writes to the repl backlog are visible
|
||||||
|
std::atomic_thread_fence(std::memory_order_acquire);
|
||||||
|
|
||||||
while (clientHasPendingReplies(c)) {
|
while (clientHasPendingReplies(c)) {
|
||||||
long long repl_end_idx = getReplIndexFromOffset(c->repl_end_off);
|
long long repl_end_idx = getReplIndexFromOffset(c->repl_end_off);
|
||||||
|
@ -5599,6 +5599,9 @@ void flushReplBacklogToClients()
|
|||||||
serverAssert(g_pserver->master_repl_offset - g_pserver->repl_batch_offStart <= g_pserver->repl_backlog_size);
|
serverAssert(g_pserver->master_repl_offset - g_pserver->repl_batch_offStart <= g_pserver->repl_backlog_size);
|
||||||
serverAssert(g_pserver->repl_batch_idxStart != g_pserver->repl_backlog_idx);
|
serverAssert(g_pserver->repl_batch_idxStart != g_pserver->repl_backlog_idx);
|
||||||
|
|
||||||
|
// Repl backlog writes must become visible to all threads at this point
|
||||||
|
std::atomic_thread_fence(std::memory_order_release);
|
||||||
|
|
||||||
listIter li;
|
listIter li;
|
||||||
listNode *ln;
|
listNode *ln;
|
||||||
listRewind(g_pserver->slaves, &li);
|
listRewind(g_pserver->slaves, &li);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user