bugfix:del keys in slot replicate to replica, and trigger other invalidations (#11084)
Bugfix: with the scenario if we force assigned a slot to other master, old master will lose the slot ownership, then old master will call the function delKeysInSlot() to delete all keys which in the slot. These delete operations should replicate to replicas, avoid the data divergence issue in master and replicas. Additionally, in this case, we now call: * signalModifiedKey (to invalidate WATCH) * moduleNotifyKeyspaceEvent (key space notification for modules) * dirty++ (to signal that the persistence file may be outdated) Co-authored-by: weimeng <weimeng@didiglobal.com> Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
This commit is contained in:
parent
bc7fe41e58
commit
8945067544
@ -7077,8 +7077,13 @@ unsigned int delKeysInSlot(unsigned int hashslot) {
|
||||
de = dictEntryNextInSlot(de);
|
||||
robj *key = createStringObject(sdskey, sdslen(sdskey));
|
||||
dbDelete(&server.db[0], key);
|
||||
propagateDeletion(&server.db[0], key, server.lazyfree_lazy_server_del);
|
||||
propagatePendingCommands();
|
||||
signalModifiedKey(NULL, &server.db[0], key);
|
||||
moduleNotifyKeyspaceEvent(NOTIFY_GENERIC, "del", key, server.db[0].id);
|
||||
decrRefCount(key);
|
||||
j++;
|
||||
server.dirty++;
|
||||
}
|
||||
return j;
|
||||
}
|
||||
|
@ -100,6 +100,7 @@ set ::all_tests {
|
||||
unit/cluster/scripting
|
||||
unit/cluster/hostnames
|
||||
unit/cluster/multi-slot-operations
|
||||
unit/cluster/slot-ownership
|
||||
}
|
||||
# Index to the next test to run in the ::all_tests list.
|
||||
set ::next_test 0
|
||||
|
61
tests/unit/cluster/slot-ownership.tcl
Normal file
61
tests/unit/cluster/slot-ownership.tcl
Normal file
@ -0,0 +1,61 @@
|
||||
start_cluster 2 2 {tags {external:skip cluster}} {
|
||||
|
||||
test "Verify that slot ownership transfer through gossip propagates deletes to replicas" {
|
||||
assert {[s -2 role] eq {slave}}
|
||||
wait_for_condition 1000 50 {
|
||||
[s -2 master_link_status] eq {up}
|
||||
} else {
|
||||
fail "Instance #2 master link status is not up"
|
||||
}
|
||||
|
||||
assert {[s -3 role] eq {slave}}
|
||||
wait_for_condition 1000 50 {
|
||||
[s -3 master_link_status] eq {up}
|
||||
} else {
|
||||
fail "Instance #3 master link status is not up"
|
||||
}
|
||||
|
||||
# Set a single key that will be used to test deletion
|
||||
set key "FOO"
|
||||
R 0 SET $key TEST
|
||||
set key_slot [R 0 cluster keyslot $key]
|
||||
set slot_keys_num [R 0 cluster countkeysinslot $key_slot]
|
||||
assert {$slot_keys_num > 0}
|
||||
|
||||
# Wait for replica to have the key
|
||||
R 2 readonly
|
||||
wait_for_condition 1000 50 {
|
||||
[R 2 exists $key] eq "1"
|
||||
} else {
|
||||
fail "Test key was not replicated"
|
||||
}
|
||||
|
||||
assert_equal [R 2 cluster countkeysinslot $key_slot] $slot_keys_num
|
||||
|
||||
# Assert other shards in cluster doesn't have the key
|
||||
assert_equal [R 1 cluster countkeysinslot $key_slot] "0"
|
||||
assert_equal [R 3 cluster countkeysinslot $key_slot] "0"
|
||||
|
||||
set nodeid [R 1 cluster myid]
|
||||
|
||||
R 1 cluster bumpepoch
|
||||
# Move $key_slot to node 1
|
||||
assert_equal [R 1 cluster setslot $key_slot node $nodeid] "OK"
|
||||
|
||||
wait_for_cluster_propagation
|
||||
|
||||
# src master will delete keys in the slot
|
||||
wait_for_condition 50 100 {
|
||||
[R 0 cluster countkeysinslot $key_slot] eq 0
|
||||
} else {
|
||||
fail "master 'countkeysinslot $key_slot' did not eq 0"
|
||||
}
|
||||
|
||||
# src replica will delete keys in the slot
|
||||
wait_for_condition 50 100 {
|
||||
[R 2 cluster countkeysinslot $key_slot] eq 0
|
||||
} else {
|
||||
fail "replica 'countkeysinslot $key_slot' did not eq 0"
|
||||
}
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user