rename procedure redis_deferring_client to valkey_deferring_client (#270)
Updated procedure redis_deferring_client in test environent to valkey_deferring_client. Signed-off-by: Shivshankar-Reddy <shiva.sheri.github@gmail.com>
This commit is contained in:
parent
c0cef48e98
commit
da831c0d22
@ -143,7 +143,7 @@ void reqresReset(client *c, int free_buf) {
|
||||
* Ideally, we would just have this code inside reqresAppendRequest, which is called
|
||||
* from processCommand, but we cannot save the reply offset inside processCommand
|
||||
* because of the following pipe-lining scenario:
|
||||
* set rd [redis_deferring_client]
|
||||
* set rd [valkey_deferring_client]
|
||||
* set buf ""
|
||||
* append buf "SET key vale\r\n"
|
||||
* append buf "BLPOP mylist 0\r\n"
|
||||
|
@ -58,7 +58,7 @@ test "MULTI-EXEC with write operations is MOVED" {
|
||||
}
|
||||
|
||||
test "read-only blocking operations from replica" {
|
||||
set rd [redis_deferring_client redis 1]
|
||||
set rd [valkey_deferring_client redis 1]
|
||||
$rd readonly
|
||||
$rd read
|
||||
$rd XREAD BLOCK 0 STREAMS k 0
|
||||
|
@ -51,7 +51,7 @@ test "Main db not affected when fail to diskless load" {
|
||||
# backlog size is very small, and dumping rdb will cost several seconds.
|
||||
set num 10000
|
||||
set value [string repeat A 1024]
|
||||
set rd [redis_deferring_client redis $master_id]
|
||||
set rd [valkey_deferring_client redis $master_id]
|
||||
for {set j 0} {$j < $num} {incr j} {
|
||||
$rd set $j $value
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ test "Migrate a slot, verify client receives sunsubscribe on primary serving the
|
||||
array set nodefrom [$cluster masternode_for_slot $slot]
|
||||
array set nodeto [$cluster masternode_notfor_slot $slot]
|
||||
|
||||
set subscribeclient [redis_deferring_client_by_addr $nodefrom(host) $nodefrom(port)]
|
||||
set subscribeclient [valkey_deferring_client_by_addr $nodefrom(host) $nodefrom(port)]
|
||||
|
||||
$subscribeclient deferred 1
|
||||
$subscribeclient ssubscribe $channelname
|
||||
@ -64,7 +64,7 @@ test "Client subscribes to multiple channels, migrate a slot, verify client rece
|
||||
array set nodefrom [$cluster masternode_for_slot $slot]
|
||||
array set nodeto [$cluster masternode_notfor_slot $slot]
|
||||
|
||||
set subscribeclient [redis_deferring_client_by_addr $nodefrom(host) $nodefrom(port)]
|
||||
set subscribeclient [valkey_deferring_client_by_addr $nodefrom(host) $nodefrom(port)]
|
||||
|
||||
$subscribeclient deferred 1
|
||||
$subscribeclient ssubscribe $channelname
|
||||
@ -113,7 +113,7 @@ test "Migrate a slot, verify client receives sunsubscribe on replica serving the
|
||||
set replica_addr [get_addr_replica_serving_slot $slot]
|
||||
set replicahost [lindex $replica_addr 0]
|
||||
set replicaport [lindex $replica_addr 1]
|
||||
set subscribeclient [redis_deferring_client_by_addr $replicahost $replicaport]
|
||||
set subscribeclient [valkey_deferring_client_by_addr $replicahost $replicaport]
|
||||
|
||||
$subscribeclient deferred 1
|
||||
$subscribeclient ssubscribe $channelname
|
||||
@ -148,7 +148,7 @@ test "Move a replica to another primary, verify client receives sunsubscribe on
|
||||
set replica_host [lindex $replica_addr 0]
|
||||
set replica_port [lindex $replica_addr 1]
|
||||
set replica_client [redis_client_by_addr $replica_host $replica_port]
|
||||
set subscribeclient [redis_deferring_client_by_addr $replica_host $replica_port]
|
||||
set subscribeclient [valkey_deferring_client_by_addr $replica_host $replica_port]
|
||||
|
||||
$subscribeclient deferred 1
|
||||
$subscribeclient ssubscribe $channelname
|
||||
@ -174,7 +174,7 @@ test "Delete a slot, verify sunsubscribe message" {
|
||||
|
||||
array set primary_client [$cluster masternode_for_slot $slot]
|
||||
|
||||
set subscribeclient [redis_deferring_client_by_addr $primary_client(host) $primary_client(port)]
|
||||
set subscribeclient [valkey_deferring_client_by_addr $primary_client(host) $primary_client(port)]
|
||||
$subscribeclient deferred 1
|
||||
$subscribeclient ssubscribe $channelname
|
||||
$subscribeclient read
|
||||
@ -195,7 +195,7 @@ test "Reset cluster, verify sunsubscribe message" {
|
||||
|
||||
array set primary_client [$cluster masternode_for_slot $slot]
|
||||
|
||||
set subscribeclient [redis_deferring_client_by_addr $primary_client(host) $primary_client(port)]
|
||||
set subscribeclient [valkey_deferring_client_by_addr $primary_client(host) $primary_client(port)]
|
||||
$subscribeclient deferred 1
|
||||
$subscribeclient ssubscribe $channelname
|
||||
$subscribeclient read
|
||||
|
@ -14,9 +14,9 @@ test "Pub/Sub shard basics" {
|
||||
array set notshardnode [$cluster masternode_notfor_slot $slot]
|
||||
|
||||
set publishclient [redis_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
set subscribeclient [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
set subscribeclient2 [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
set anotherclient [redis_deferring_client_by_addr $notshardnode(host) $notshardnode(port)]
|
||||
set subscribeclient [valkey_deferring_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
set subscribeclient2 [valkey_deferring_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
set anotherclient [valkey_deferring_client_by_addr $notshardnode(host) $notshardnode(port)]
|
||||
|
||||
$subscribeclient ssubscribe channel.0
|
||||
$subscribeclient read
|
||||
@ -58,7 +58,7 @@ test "client can subscribe to multiple shard channels across different slots in
|
||||
|
||||
test "sunsubscribe without specifying any channel would unsubscribe all shard channels subscribed" {
|
||||
set publishclient [redis_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
set subscribeclient [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
set subscribeclient [valkey_deferring_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
|
||||
set sub_res [ssubscribe $subscribeclient [list "\{channel.0\}1" "\{channel.0\}2" "\{channel.0\}3"]]
|
||||
assert_equal [list 1 2 3] $sub_res
|
||||
@ -78,9 +78,9 @@ test "Verify Pub/Sub and Pub/Sub shard no overlap" {
|
||||
array set notshardnode [$cluster masternode_notfor_slot $slot]
|
||||
|
||||
set publishshardclient [redis_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
set publishclient [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
set subscribeshardclient [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
set subscribeclient [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
set publishclient [valkey_deferring_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
set subscribeshardclient [valkey_deferring_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
set subscribeclient [valkey_deferring_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
|
||||
$subscribeshardclient deferred 1
|
||||
$subscribeshardclient ssubscribe channel.0
|
||||
@ -109,9 +109,9 @@ test "Verify Pub/Sub and Pub/Sub shard no overlap" {
|
||||
}
|
||||
|
||||
test "PUBSUB channels/shardchannels" {
|
||||
set subscribeclient [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
set subscribeclient2 [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
set subscribeclient3 [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
set subscribeclient [valkey_deferring_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
set subscribeclient2 [valkey_deferring_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
set subscribeclient3 [valkey_deferring_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
set publishclient [redis_client_by_addr $publishnode(host) $publishnode(port)]
|
||||
|
||||
ssubscribe $subscribeclient [list "\{channel.0\}1"]
|
||||
|
@ -717,14 +717,14 @@ proc restart_instance {type id} {
|
||||
}
|
||||
}
|
||||
|
||||
proc redis_deferring_client {type id} {
|
||||
proc valkey_deferring_client {type id} {
|
||||
set port [get_instance_attrib $type $id port]
|
||||
set host [get_instance_attrib $type $id host]
|
||||
set client [redis $host $port 1 $::tls]
|
||||
return $client
|
||||
}
|
||||
|
||||
proc redis_deferring_client_by_addr {host port} {
|
||||
proc valkey_deferring_client_by_addr {host port} {
|
||||
set client [redis $host $port 1 $::tls]
|
||||
return $client
|
||||
}
|
||||
|
@ -210,7 +210,7 @@ tags {"aof external:skip"} {
|
||||
|
||||
start_server {overrides {appendonly {yes} appendfsync always}} {
|
||||
test {AOF fsync always barrier issue} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
# Set a sleep when aof is flushed, so that we have a chance to look
|
||||
# at the aof size and detect if the response of an incr command
|
||||
# arrives before the data was written (and hopefully fsynced)
|
||||
@ -438,7 +438,7 @@ tags {"aof external:skip"} {
|
||||
append_to_aof [formatCommand select 9]
|
||||
append_to_aof [formatCommand eval {redis.call('set',KEYS[1],'y'); for i=1,1500000 do redis.call('ping') end return 'ok'} 1 x]
|
||||
}
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd debug loadaof
|
||||
$rd flush
|
||||
wait_for_condition 100 10 {
|
||||
|
@ -14,7 +14,7 @@ if { ! [ catch {
|
||||
}
|
||||
|
||||
proc generate_collections {suffix elements} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
for {set j 0} {$j < $elements} {incr j} {
|
||||
# add both string values and integers
|
||||
if {$j % 2 == 0} {set val $j} else {set val "_$j"}
|
||||
|
@ -61,7 +61,7 @@ start_server {tags {"dismiss external:skip"}} {
|
||||
for {set i 0} {$i < 100} {incr i} {
|
||||
r lpush mylist $item
|
||||
}
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd lrange mylist 0 -1
|
||||
$rd flush
|
||||
after 100
|
||||
@ -74,7 +74,7 @@ start_server {tags {"dismiss external:skip"}} {
|
||||
test {dismiss client query buffer} {
|
||||
# Big pending query buffer
|
||||
set bigstr [string repeat A 8192]
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd write "*2\r\n\$8192\r\n"
|
||||
$rd write $bigstr\r\n
|
||||
$rd flush
|
||||
|
@ -256,7 +256,7 @@ start_server {overrides {save {}}} {
|
||||
# We pause the target long enough to send a write command
|
||||
# during the pause. This write will not be interrupted.
|
||||
pause_process [srv -1 pid]
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd SET FOO BAR
|
||||
$node_0 failover to $node_1_host $node_1_port
|
||||
resume_process [srv -1 pid]
|
||||
|
@ -187,7 +187,7 @@ test {client freed during loading} {
|
||||
# connect and disconnect 5 clients
|
||||
set clients {}
|
||||
for {set j 0} {$j < 5} {incr j} {
|
||||
lappend clients [redis_deferring_client]
|
||||
lappend clients [valkey_deferring_client]
|
||||
}
|
||||
foreach rd $clients {
|
||||
$rd debug log bla
|
||||
@ -262,7 +262,7 @@ start_server {overrides {save ""}} {
|
||||
# populate the db with 10k keys of 512B each (since we want to measure the COW size by
|
||||
# changing some keys and read the reported COW size, we are using small key size to prevent from
|
||||
# the "dismiss mechanism" free memory and reduce the COW size)
|
||||
set rd [redis_deferring_client 0]
|
||||
set rd [valkey_deferring_client 0]
|
||||
set size 500 ;# aim for the 512 bin (sds overhead)
|
||||
set cmd_count 10000
|
||||
for {set k 0} {$k < $cmd_count} {incr k} {
|
||||
|
@ -294,7 +294,7 @@ test {Replica client-output-buffer size is limited to backlog_limit/16 when no r
|
||||
|
||||
# Before this fix (#11905), the test would trigger an assertion in 'o->used >= c->ref_block_pos'
|
||||
test {The update of replBufBlock's repl_offset is ok - Regression test for #11666} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set replid [status $master master_replid]
|
||||
set offset [status $master repl_backlog_first_byte_offset]
|
||||
$rd psync $replid $offset
|
||||
|
@ -108,7 +108,7 @@ start_server {tags {"repl external:skip"}} {
|
||||
|
||||
test {BRPOPLPUSH replication, when blocking against empty list} {
|
||||
$A config resetstat
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd brpoplpush a b 5
|
||||
r lpush a foo
|
||||
wait_for_condition 50 100 {
|
||||
@ -122,7 +122,7 @@ start_server {tags {"repl external:skip"}} {
|
||||
|
||||
test {BRPOPLPUSH replication, list exists} {
|
||||
$A config resetstat
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r lpush c 1
|
||||
r lpush c 2
|
||||
r lpush c 3
|
||||
@ -137,7 +137,7 @@ start_server {tags {"repl external:skip"}} {
|
||||
foreach whereto {left right} {
|
||||
test "BLMOVE ($wherefrom, $whereto) replication, when blocking against empty list" {
|
||||
$A config resetstat
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd blmove a b $wherefrom $whereto 5
|
||||
r lpush a foo
|
||||
wait_for_condition 50 100 {
|
||||
@ -151,7 +151,7 @@ start_server {tags {"repl external:skip"}} {
|
||||
|
||||
test "BLMOVE ($wherefrom, $whereto) replication, list exists" {
|
||||
$A config resetstat
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r lpush c 1
|
||||
r lpush c 2
|
||||
r lpush c 3
|
||||
@ -165,7 +165,7 @@ start_server {tags {"repl external:skip"}} {
|
||||
}
|
||||
|
||||
test {BLPOP followed by role change, issue #2473} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd blpop foo 0 ; # Block while B is a master
|
||||
|
||||
# Turn B into master of A
|
||||
@ -637,7 +637,7 @@ foreach testType {Successful Aborted} {
|
||||
}
|
||||
|
||||
test {Busy script during async loading} {
|
||||
set rd_replica [redis_deferring_client -1]
|
||||
set rd_replica [valkey_deferring_client -1]
|
||||
$replica config set lua-time-limit 10
|
||||
$rd_replica eval {while true do end} 0
|
||||
after 200
|
||||
@ -1146,7 +1146,7 @@ test {replicaof right after disconnection} {
|
||||
fail "Can't turn the instance into a replica"
|
||||
}
|
||||
|
||||
set rd [redis_deferring_client -1]
|
||||
set rd [valkey_deferring_client -1]
|
||||
$rd debug sleep 1
|
||||
after 100
|
||||
|
||||
@ -1344,7 +1344,7 @@ test {replica can handle EINTR if use diskless load} {
|
||||
|
||||
start_server {tags {"repl" "external:skip"}} {
|
||||
test "replica do not write the reply to the replication link - SYNC (_addReplyToBufferOrList)" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set lines [count_log_lines 0]
|
||||
|
||||
$rd sync
|
||||
@ -1361,7 +1361,7 @@ start_server {tags {"repl" "external:skip"}} {
|
||||
}
|
||||
|
||||
test "replica do not write the reply to the replication link - SYNC (addReplyDeferredLen)" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set lines [count_log_lines 0]
|
||||
|
||||
$rd sync
|
||||
@ -1378,7 +1378,7 @@ start_server {tags {"repl" "external:skip"}} {
|
||||
}
|
||||
|
||||
test "replica do not write the reply to the replication link - PSYNC (_addReplyToBufferOrList)" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set lines [count_log_lines 0]
|
||||
|
||||
$rd psync replicationid -1
|
||||
@ -1398,7 +1398,7 @@ start_server {tags {"repl" "external:skip"}} {
|
||||
}
|
||||
|
||||
test "replica do not write the reply to the replication link - PSYNC (addReplyDeferredLen)" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set lines [count_log_lines 0]
|
||||
|
||||
$rd psync replicationid -1
|
||||
|
@ -56,7 +56,7 @@ foreach how {sigterm shutdown} {
|
||||
exec kill -SIGTERM $master_pid
|
||||
}
|
||||
shutdown {
|
||||
set rd [redis_deferring_client -1]
|
||||
set rd [valkey_deferring_client -1]
|
||||
$rd shutdown
|
||||
}
|
||||
}
|
||||
@ -152,8 +152,8 @@ test "Shutting down master waits for replica then fails" {
|
||||
$master incr k
|
||||
|
||||
# Two clients call blocking SHUTDOWN in parallel.
|
||||
set rd1 [redis_deferring_client -1]
|
||||
set rd2 [redis_deferring_client -1]
|
||||
set rd1 [valkey_deferring_client -1]
|
||||
set rd2 [valkey_deferring_client -1]
|
||||
$rd1 shutdown
|
||||
$rd2 shutdown
|
||||
set info_clients [$master info clients]
|
||||
@ -205,8 +205,8 @@ test "Shutting down master waits for replica then aborted" {
|
||||
$master incr k
|
||||
|
||||
# Two clients call blocking SHUTDOWN in parallel.
|
||||
set rd1 [redis_deferring_client -1]
|
||||
set rd2 [redis_deferring_client -1]
|
||||
set rd1 [valkey_deferring_client -1]
|
||||
set rd2 [valkey_deferring_client -1]
|
||||
$rd1 shutdown
|
||||
$rd2 shutdown
|
||||
set info_clients [$master info clients]
|
||||
|
@ -256,7 +256,7 @@ proc reconnect {args} {
|
||||
lset ::servers end+$level $srv
|
||||
}
|
||||
|
||||
proc redis_deferring_client {args} {
|
||||
proc valkey_deferring_client {args} {
|
||||
set level 0
|
||||
if {[llength $args] > 0 && [string is integer [lindex $args 0]]} {
|
||||
set level [lindex $args 0]
|
||||
|
@ -108,7 +108,7 @@ start_server {tags {"acl external:skip"}} {
|
||||
} {*NOPERM*channel*}
|
||||
|
||||
test {By default, only default user is able to subscribe to any channel} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd AUTH default pwd
|
||||
$rd read
|
||||
$rd SUBSCRIBE foo
|
||||
@ -124,7 +124,7 @@ start_server {tags {"acl external:skip"}} {
|
||||
} {*NOPERM*channel*}
|
||||
|
||||
test {By default, only default user is able to subscribe to any shard channel} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd AUTH default pwd
|
||||
$rd read
|
||||
$rd SSUBSCRIBE foo
|
||||
@ -140,7 +140,7 @@ start_server {tags {"acl external:skip"}} {
|
||||
} {*NOPERM*channel*}
|
||||
|
||||
test {By default, only default user is able to subscribe to any pattern} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd AUTH default pwd
|
||||
$rd read
|
||||
$rd PSUBSCRIBE bar*
|
||||
@ -209,7 +209,7 @@ start_server {tags {"acl external:skip"}} {
|
||||
}
|
||||
|
||||
test {It's possible to allow subscribing to a subset of channels} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd AUTH psuser pspass
|
||||
$rd read
|
||||
$rd SUBSCRIBE foo:1
|
||||
@ -222,7 +222,7 @@ start_server {tags {"acl external:skip"}} {
|
||||
} {*NOPERM*channel*}
|
||||
|
||||
test {It's possible to allow subscribing to a subset of shard channels} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd AUTH psuser pspass
|
||||
$rd read
|
||||
$rd SSUBSCRIBE foo:1
|
||||
@ -235,7 +235,7 @@ start_server {tags {"acl external:skip"}} {
|
||||
} {*NOPERM*channel*}
|
||||
|
||||
test {It's possible to allow subscribing to a subset of channel patterns} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd AUTH psuser pspass
|
||||
$rd read
|
||||
$rd PSUBSCRIBE foo:1
|
||||
@ -248,7 +248,7 @@ start_server {tags {"acl external:skip"}} {
|
||||
} {*NOPERM*channel*}
|
||||
|
||||
test {Subscribers are killed when revoked of channel permission} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r ACL setuser psuser resetchannels &foo:1
|
||||
$rd AUTH psuser pspass
|
||||
$rd read
|
||||
@ -262,7 +262,7 @@ start_server {tags {"acl external:skip"}} {
|
||||
} {0}
|
||||
|
||||
test {Subscribers are killed when revoked of channel permission} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r ACL setuser psuser resetchannels &foo:1
|
||||
$rd AUTH psuser pspass
|
||||
$rd read
|
||||
@ -276,7 +276,7 @@ start_server {tags {"acl external:skip"}} {
|
||||
} {0}
|
||||
|
||||
test {Subscribers are killed when revoked of pattern permission} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r ACL setuser psuser resetchannels &bar:*
|
||||
$rd AUTH psuser pspass
|
||||
$rd read
|
||||
@ -290,7 +290,7 @@ start_server {tags {"acl external:skip"}} {
|
||||
} {0}
|
||||
|
||||
test {Subscribers are killed when revoked of allchannels permission} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r ACL setuser psuser allchannels
|
||||
$rd AUTH psuser pspass
|
||||
$rd read
|
||||
@ -304,7 +304,7 @@ start_server {tags {"acl external:skip"}} {
|
||||
} {0}
|
||||
|
||||
test {Subscribers are pardoned if literal permissions are retained and/or gaining allchannels} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r ACL setuser psuser resetchannels &foo:1 &bar:* &orders
|
||||
$rd AUTH psuser pspass
|
||||
$rd read
|
||||
@ -326,7 +326,7 @@ start_server {tags {"acl external:skip"}} {
|
||||
test {blocked command gets rejected when reprocessed after permission change} {
|
||||
r auth default ""
|
||||
r config resetstat
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r ACL setuser psuser reset on nopass +@all allkeys
|
||||
$rd AUTH psuser pspass
|
||||
$rd read
|
||||
@ -754,7 +754,7 @@ start_server {tags {"acl external:skip"}} {
|
||||
}
|
||||
|
||||
test {ACL LOG can distinguish the transaction context (2)} {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
r ACL SETUSER antirez +incr
|
||||
|
||||
r AUTH antirez foo
|
||||
@ -830,7 +830,7 @@ start_server {tags {"acl external:skip"}} {
|
||||
|
||||
test {When default user is off, new connections are not authenticated} {
|
||||
r ACL setuser default off
|
||||
catch {set rd1 [redis_deferring_client]} e
|
||||
catch {set rd1 [valkey_deferring_client]} e
|
||||
r ACL setuser default on
|
||||
set e
|
||||
} {*NOAUTH*}
|
||||
@ -1024,8 +1024,8 @@ start_server [list overrides [list "dir" $server_path "acl-pubsub-default" "allc
|
||||
reconnect
|
||||
r ACL SETUSER doug on nopass resetchannels &test* +@all ~*
|
||||
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
|
||||
$rd1 AUTH alice alice
|
||||
$rd1 read
|
||||
@ -1055,8 +1055,8 @@ start_server [list overrides [list "dir" $server_path "acl-pubsub-default" "allc
|
||||
reconnect
|
||||
r ACL SETUSER mortimer on >mortimer ~* &* +@all
|
||||
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
|
||||
$rd1 AUTH alice alice
|
||||
$rd1 read
|
||||
|
@ -257,7 +257,7 @@ start_server {} {
|
||||
test "client evicted due to output buf" {
|
||||
r flushdb
|
||||
r setrange k 200000 v
|
||||
set rr [redis_deferring_client]
|
||||
set rr [valkey_deferring_client]
|
||||
$rr client setname test_client
|
||||
$rr flush
|
||||
assert {[$rr read] == "OK"}
|
||||
@ -325,10 +325,10 @@ start_server {} {
|
||||
r setrange k $obuf_size v
|
||||
set rr1 [redis_client]
|
||||
$rr1 client setname "qbuf-client"
|
||||
set rr2 [redis_deferring_client]
|
||||
set rr2 [valkey_deferring_client]
|
||||
$rr2 client setname "obuf-client1"
|
||||
assert_equal [$rr2 read] OK
|
||||
set rr3 [redis_deferring_client]
|
||||
set rr3 [valkey_deferring_client]
|
||||
$rr3 client setname "obuf-client2"
|
||||
assert_equal [$rr3 read] OK
|
||||
|
||||
|
@ -17,7 +17,7 @@ start_multiple_servers 3 [list overrides $base_conf] {
|
||||
set node2 [srv -1 client]
|
||||
set node3 [srv -2 client]
|
||||
set node3_pid [srv -2 pid]
|
||||
set node3_rd [redis_deferring_client -2]
|
||||
set node3_rd [valkey_deferring_client -2]
|
||||
|
||||
test {Create 3 node cluster} {
|
||||
exec src/valkey-cli --cluster-yes --cluster create \
|
||||
@ -79,7 +79,7 @@ start_multiple_servers 3 [list overrides $base_conf] {
|
||||
}
|
||||
}
|
||||
|
||||
set node1_rd [redis_deferring_client 0]
|
||||
set node1_rd [valkey_deferring_client 0]
|
||||
|
||||
test "use previous hostip in \"cluster-preferred-endpoint-type unknown-endpoint\" mode" {
|
||||
|
||||
|
@ -80,13 +80,13 @@ start_cluster 1 2 {tags {external:skip cluster}} {
|
||||
set channelname ch3
|
||||
|
||||
# subscribe on replica1
|
||||
set subscribeclient1 [redis_deferring_client -1]
|
||||
set subscribeclient1 [valkey_deferring_client -1]
|
||||
$subscribeclient1 deferred 1
|
||||
$subscribeclient1 SSUBSCRIBE $channelname
|
||||
$subscribeclient1 read
|
||||
|
||||
# subscribe on replica2
|
||||
set subscribeclient2 [redis_deferring_client -2]
|
||||
set subscribeclient2 [valkey_deferring_client -2]
|
||||
$subscribeclient2 deferred 1
|
||||
$subscribeclient2 SSUBSCRIBE $channelname
|
||||
$subscribeclient2 read
|
||||
|
@ -285,7 +285,7 @@ start_server {tags {"dump"}} {
|
||||
assert {[$first exists key] == 1}
|
||||
assert {[$second exists key] == 0}
|
||||
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd debug sleep 1.0 ; # Make second server unable to reply.
|
||||
set e {}
|
||||
catch {r -1 migrate $second_host $second_port key 9 500} e
|
||||
|
@ -235,7 +235,7 @@ start_server {tags {"scripting"}} {
|
||||
} {x}
|
||||
|
||||
test {FUNCTION - test function kill} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r config set busy-reply-threshold 10
|
||||
r function load REPLACE [get_function_code lua test test {local a = 1 while true do a = a + 1 end}]
|
||||
$rd fcall test 0
|
||||
@ -249,7 +249,7 @@ start_server {tags {"scripting"}} {
|
||||
}
|
||||
|
||||
test {FUNCTION - test script kill not working on function} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r config set busy-reply-threshold 10
|
||||
r function load REPLACE [get_function_code lua test test {local a = 1 while true do a = a + 1 end}]
|
||||
$rd fcall test 0
|
||||
@ -264,7 +264,7 @@ start_server {tags {"scripting"}} {
|
||||
}
|
||||
|
||||
test {FUNCTION - test function kill not working on eval} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r config set busy-reply-threshold 10
|
||||
$rd eval {local a = 1 while true do a = a + 1 end} 0
|
||||
after 200
|
||||
|
@ -60,7 +60,7 @@ start_server {tags {"info" "external:skip"}} {
|
||||
r config resetstat
|
||||
r CONFIG SET latency-tracking yes
|
||||
r CONFIG SET latency-tracking-info-percentiles "50.0 99.0 99.9"
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del list1{t}
|
||||
|
||||
$rd blpop list1{t} 0
|
||||
@ -259,7 +259,7 @@ start_server {tags {"info" "external:skip"}} {
|
||||
|
||||
test {errorstats: blocking commands} {
|
||||
r config resetstat
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd client id
|
||||
set rd_id [$rd read]
|
||||
r del list1{t}
|
||||
@ -394,8 +394,8 @@ start_server {tags {"info" "external:skip"}} {
|
||||
test {clients: pubsub clients} {
|
||||
set info [r info clients]
|
||||
assert_equal [getInfoProperty $info pubsub_clients] {0}
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
# basic count
|
||||
assert_equal {1} [ssubscribe $rd1 {chan1}]
|
||||
assert_equal {1} [subscribe $rd2 {chan2}]
|
||||
|
@ -44,9 +44,9 @@ start_server {tags {"introspection"}} {
|
||||
# 3 retries of increasing sleep_time, i.e. start with 2s, then go 4s, 8s.
|
||||
set sleep_time 2
|
||||
for {set i 0} {$i < 3} {incr i} {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
r debug sleep $sleep_time
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
r acl setuser dummy on nopass +ping
|
||||
$rd1 auth dummy ""
|
||||
$rd1 read
|
||||
@ -80,16 +80,16 @@ start_server {tags {"introspection"}} {
|
||||
|
||||
test {CLIENT KILL SKIPME YES/NO will kill all clients} {
|
||||
# Kill all clients except `me`
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
set connected_clients [s connected_clients]
|
||||
assert {$connected_clients >= 3}
|
||||
set res [r client kill skipme yes]
|
||||
assert {$res == $connected_clients - 1}
|
||||
|
||||
# Kill all clients, including `me`
|
||||
set rd3 [redis_deferring_client]
|
||||
set rd4 [redis_deferring_client]
|
||||
set rd3 [valkey_deferring_client]
|
||||
set rd4 [valkey_deferring_client]
|
||||
set connected_clients [s connected_clients]
|
||||
assert {$connected_clients == 3}
|
||||
set res [r client kill skipme no]
|
||||
@ -162,7 +162,7 @@ start_server {tags {"introspection"}} {
|
||||
} {} {needs:save}
|
||||
|
||||
test "CLIENT REPLY OFF/ON: disable all commands reply" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
# These replies were silenced.
|
||||
$rd client reply off
|
||||
@ -178,7 +178,7 @@ start_server {tags {"introspection"}} {
|
||||
}
|
||||
|
||||
test "CLIENT REPLY SKIP: skip the next command reply" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
# The first pong reply was silenced.
|
||||
$rd client reply skip
|
||||
@ -191,7 +191,7 @@ start_server {tags {"introspection"}} {
|
||||
}
|
||||
|
||||
test "CLIENT REPLY ON: unset SKIP flag" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
$rd client reply skip
|
||||
$rd client reply on
|
||||
@ -204,7 +204,7 @@ start_server {tags {"introspection"}} {
|
||||
}
|
||||
|
||||
test {MONITOR can log executed commands} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd monitor
|
||||
assert_match {*OK*} [$rd read]
|
||||
r set foo bar
|
||||
@ -215,7 +215,7 @@ start_server {tags {"introspection"}} {
|
||||
} {*"set" "foo"*"get" "foo"*}
|
||||
|
||||
test {MONITOR can log commands issued by the scripting engine} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd monitor
|
||||
$rd read ;# Discard the OK
|
||||
r eval {redis.call('set',KEYS[1],ARGV[1])} 1 foo bar
|
||||
@ -228,7 +228,7 @@ start_server {tags {"introspection"}} {
|
||||
r function load replace {#!lua name=test
|
||||
redis.register_function('test', function() return redis.call('set', 'foo', 'bar') end)
|
||||
}
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd monitor
|
||||
$rd read ;# Discard the OK
|
||||
r fcall test 0
|
||||
@ -238,7 +238,7 @@ start_server {tags {"introspection"}} {
|
||||
}
|
||||
|
||||
test {MONITOR supports redacting command arguments} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd monitor
|
||||
$rd read ; # Discard the OK
|
||||
|
||||
@ -267,7 +267,7 @@ start_server {tags {"introspection"}} {
|
||||
} {0} {needs:repl}
|
||||
|
||||
test {MONITOR correctly handles multi-exec cases} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd monitor
|
||||
$rd read ; # Discard the OK
|
||||
|
||||
@ -296,8 +296,8 @@ start_server {tags {"introspection"}} {
|
||||
# need to reconnect in order to reset the clients state
|
||||
reconnect
|
||||
|
||||
set rd [redis_deferring_client]
|
||||
set bc [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set bc [valkey_deferring_client]
|
||||
r del mylist
|
||||
|
||||
$rd monitor
|
||||
@ -363,7 +363,7 @@ start_server {tags {"introspection"}} {
|
||||
} {*name=someothername*}
|
||||
|
||||
test {After CLIENT SETNAME, connection can still be closed} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd client setname foobar
|
||||
assert_equal [$rd read] "OK"
|
||||
assert_match {*foobar*} [r client list]
|
||||
|
@ -9,7 +9,7 @@ start_server {tags {"limits network external:skip"} overrides {maxclients 10}} {
|
||||
catch {
|
||||
while {$c < 50} {
|
||||
incr c
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd ping
|
||||
$rd read
|
||||
after 100
|
||||
|
@ -56,7 +56,7 @@ start_server {tags {"maxmemory" "external:skip"}} {
|
||||
init_test $client_eviction
|
||||
|
||||
for {set j 0} {$j < 20} {incr j} {
|
||||
set rr [redis_deferring_client]
|
||||
set rr [valkey_deferring_client]
|
||||
lappend clients $rr
|
||||
}
|
||||
|
||||
@ -85,7 +85,7 @@ start_server {tags {"maxmemory" "external:skip"}} {
|
||||
init_test $client_eviction
|
||||
|
||||
for {set j 0} {$j < 30} {incr j} {
|
||||
set rr [redis_deferring_client]
|
||||
set rr [valkey_deferring_client]
|
||||
lappend clients $rr
|
||||
}
|
||||
|
||||
@ -349,12 +349,12 @@ proc test_slave_buffers {test_name cmd_count payload_len limit_memory pipeline}
|
||||
}
|
||||
|
||||
# put the slave to sleep
|
||||
set rd_slave [redis_deferring_client]
|
||||
set rd_slave [valkey_deferring_client]
|
||||
pause_process $slave_pid
|
||||
|
||||
# send some 10mb worth of commands that don't increase the memory usage
|
||||
if {$pipeline == 1} {
|
||||
set rd_master [redis_deferring_client -1]
|
||||
set rd_master [valkey_deferring_client -1]
|
||||
for {set k 0} {$k < $cmd_count} {incr k} {
|
||||
$rd_master setrange key:0 0 [string repeat A $payload_len]
|
||||
}
|
||||
@ -406,7 +406,7 @@ proc test_slave_buffers {test_name cmd_count payload_len limit_memory pipeline}
|
||||
|
||||
# test that slave buffer are counted correctly
|
||||
# we wanna use many small commands, and we don't wanna wait long
|
||||
# so we need to use a pipeline (redis_deferring_client)
|
||||
# so we need to use a pipeline (valkey_deferring_client)
|
||||
# that may cause query buffer to fill and induce eviction, so we disable it
|
||||
test_slave_buffers {slave buffer are counted correctly} 1000000 10 0 1
|
||||
|
||||
@ -450,7 +450,7 @@ start_server {tags {"maxmemory external:skip"}} {
|
||||
# 10 clients listening on tracking messages
|
||||
set clients {}
|
||||
for {set j 0} {$j < 10} {incr j} {
|
||||
lappend clients [redis_deferring_client]
|
||||
lappend clients [valkey_deferring_client]
|
||||
}
|
||||
foreach rd $clients {
|
||||
$rd HELLO 3
|
||||
|
@ -1,6 +1,6 @@
|
||||
proc test_memory_efficiency {range} {
|
||||
r flushall
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set base_mem [s used_memory]
|
||||
set written 0
|
||||
for {set j 0} {$j < 10000} {incr j} {
|
||||
@ -193,7 +193,7 @@ run_solo {defrag} {
|
||||
|
||||
# Populate memory with interleaving script-key pattern of same size
|
||||
set dummy_script "--[string repeat x 400]\nreturn "
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
for {set j 0} {$j < $n} {incr j} {
|
||||
set val "$dummy_script[format "%06d" $j]"
|
||||
$rd script load $val
|
||||
@ -286,7 +286,7 @@ run_solo {defrag} {
|
||||
r xreadgroup GROUP mygroup Alice COUNT 1 STREAMS stream >
|
||||
|
||||
# create big keys with 10k items
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
for {set j 0} {$j < 10000} {incr j} {
|
||||
$rd hset bighash $j [concat "asdfasdfasdf" $j]
|
||||
$rd lpush biglist [concat "asdfasdfasdf" $j]
|
||||
@ -418,8 +418,8 @@ run_solo {defrag} {
|
||||
# Populate memory with interleaving pubsub-key pattern of same size
|
||||
set n 50000
|
||||
set dummy_channel "[string repeat x 400]"
|
||||
set rd [redis_deferring_client]
|
||||
set rd_pubsub [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set rd_pubsub [valkey_deferring_client]
|
||||
for {set j 0} {$j < $n} {incr j} {
|
||||
set channel_name "$dummy_channel[format "%06d" $j]"
|
||||
$rd_pubsub subscribe $channel_name
|
||||
@ -518,7 +518,7 @@ run_solo {defrag} {
|
||||
r config set list-max-ziplist-size 5 ;# list of 500k items will have 100k quicklist nodes
|
||||
|
||||
# create big keys with 10k items
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
set expected_frag 1.7
|
||||
# add a mass of list nodes to two lists (allocations are interlaced)
|
||||
@ -637,7 +637,7 @@ run_solo {defrag} {
|
||||
}
|
||||
|
||||
# add a mass of keys with 600 bytes values, fill the bin of 640 bytes which has 32 regs per slab.
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set keys 640000
|
||||
for {set j 0} {$j < $keys} {incr j} {
|
||||
$rd setrange $j 600 x
|
||||
|
@ -16,7 +16,7 @@ start_server {tags {"modules"}} {
|
||||
}
|
||||
|
||||
test "Blpop on threaded async RM_Call" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
$rd do_rm_call_async_on_thread blpop l 0
|
||||
wait_for_blocked_clients_count 1
|
||||
@ -29,7 +29,7 @@ start_server {tags {"modules"}} {
|
||||
foreach cmd {do_rm_call_async do_rm_call_async_script_mode } {
|
||||
|
||||
test "Blpop on async RM_Call using $cmd" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
$rd $cmd blpop l 0
|
||||
wait_for_blocked_clients_count 1
|
||||
@ -40,7 +40,7 @@ start_server {tags {"modules"}} {
|
||||
}
|
||||
|
||||
test "Brpop on async RM_Call using $cmd" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
$rd $cmd brpop l 0
|
||||
wait_for_blocked_clients_count 1
|
||||
@ -51,7 +51,7 @@ start_server {tags {"modules"}} {
|
||||
}
|
||||
|
||||
test "Brpoplpush on async RM_Call using $cmd" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
$rd $cmd brpoplpush l1 l2 0
|
||||
wait_for_blocked_clients_count 1
|
||||
@ -63,7 +63,7 @@ start_server {tags {"modules"}} {
|
||||
} {a}
|
||||
|
||||
test "Blmove on async RM_Call using $cmd" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
$rd $cmd blmove l1 l2 LEFT LEFT 0
|
||||
wait_for_blocked_clients_count 1
|
||||
@ -75,7 +75,7 @@ start_server {tags {"modules"}} {
|
||||
} {a}
|
||||
|
||||
test "Bzpopmin on async RM_Call using $cmd" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
$rd $cmd bzpopmin s 0
|
||||
wait_for_blocked_clients_count 1
|
||||
@ -86,7 +86,7 @@ start_server {tags {"modules"}} {
|
||||
}
|
||||
|
||||
test "Bzpopmax on async RM_Call using $cmd" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
$rd $cmd bzpopmax s 0
|
||||
wait_for_blocked_clients_count 1
|
||||
@ -98,7 +98,7 @@ start_server {tags {"modules"}} {
|
||||
}
|
||||
|
||||
test {Nested async RM_Call} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
$rd do_rm_call_async do_rm_call_async do_rm_call_async do_rm_call_async blpop l 0
|
||||
wait_for_blocked_clients_count 1
|
||||
@ -109,8 +109,8 @@ start_server {tags {"modules"}} {
|
||||
}
|
||||
|
||||
test {Test multiple async RM_Call waiting on the same event} {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
|
||||
$rd1 do_rm_call_async do_rm_call_async do_rm_call_async do_rm_call_async blpop l 0
|
||||
$rd2 do_rm_call_async do_rm_call_async do_rm_call_async do_rm_call_async blpop l 0
|
||||
@ -136,7 +136,7 @@ start_server {tags {"modules"}} {
|
||||
}
|
||||
|
||||
test {async RM_Call inside async RM_Call callback} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd wait_and_do_rm_call blpop l 0
|
||||
wait_for_blocked_clients_count 1
|
||||
|
||||
@ -161,7 +161,7 @@ start_server {tags {"modules"}} {
|
||||
|
||||
test {Become replica while having async RM_Call running} {
|
||||
r flushall
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd do_rm_call_async blpop l 0
|
||||
wait_for_blocked_clients_count 1
|
||||
|
||||
@ -182,7 +182,7 @@ start_server {tags {"modules"}} {
|
||||
|
||||
test {Pipeline with blocking RM_Call} {
|
||||
r flushall
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set buf ""
|
||||
append buf "do_rm_call_async blpop l 0\r\n"
|
||||
append buf "ping\r\n"
|
||||
@ -202,7 +202,7 @@ start_server {tags {"modules"}} {
|
||||
|
||||
test {blocking RM_Call abort} {
|
||||
r flushall
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
$rd client id
|
||||
set client_id [$rd read]
|
||||
@ -229,7 +229,7 @@ start_server {tags {"modules"}} {
|
||||
r flushall
|
||||
set repl [attach_to_replication_stream]
|
||||
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
$rd do_rm_call_async blpop l 0
|
||||
wait_for_blocked_clients_count 1
|
||||
@ -251,7 +251,7 @@ start_server {tags {"modules"}} {
|
||||
r flushall
|
||||
set repl [attach_to_replication_stream]
|
||||
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
$rd blpop_and_set_multiple_keys l x 1 y 2
|
||||
wait_for_blocked_clients_count 1
|
||||
@ -277,7 +277,7 @@ start_server {tags {"modules"}} {
|
||||
r flushall
|
||||
set repl [attach_to_replication_stream]
|
||||
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
$rd do_rm_call_async_no_replicate blpop l 0
|
||||
wait_for_blocked_clients_count 1
|
||||
@ -307,7 +307,7 @@ start_server {tags {"modules"}} {
|
||||
r flushall
|
||||
set repl [attach_to_replication_stream]
|
||||
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
$rd blpop_and_set_multiple_keys l string_foo 1 string_bar 2
|
||||
wait_for_blocked_clients_count 1
|
||||
@ -346,7 +346,7 @@ start_server {tags {"modules"}} {
|
||||
r DEBUG SET-ACTIVE-EXPIRE 0
|
||||
set repl [attach_to_replication_stream]
|
||||
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
$rd blpop_and_set_multiple_keys l string_foo 1 string_bar 2
|
||||
wait_for_blocked_clients_count 1
|
||||
@ -421,7 +421,7 @@ start_server {tags {"modules"}} {
|
||||
r module load $testmodule3
|
||||
|
||||
test {Test unblock handler on module blocked on keys} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
r fsl.push l 1
|
||||
$rd do_rm_call_async FSL.BPOPGT l 3 0
|
||||
|
@ -114,7 +114,7 @@ foreach call_type {nested normal} {
|
||||
set busy_time_limit 50
|
||||
set old_time_limit [lindex [r config get busy-reply-threshold] 1]
|
||||
r config set busy-reply-threshold $busy_time_limit
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
# run command that blocks until released
|
||||
set start [clock clicks -milliseconds]
|
||||
@ -171,7 +171,7 @@ foreach call_type {nested normal} {
|
||||
# trigger slow operation
|
||||
r set_slow_bg_operation 1
|
||||
r hset hash foo bar
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set start [clock clicks -milliseconds]
|
||||
$rd do_bg_rm_call hgetall hash
|
||||
|
||||
@ -284,7 +284,7 @@ foreach call_type {nested normal} {
|
||||
|
||||
test {block time is shorter than timer period} {
|
||||
# This command does not have the reply.
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd unblock_by_timer 100 10
|
||||
# Wait for the client to unlock.
|
||||
after 120
|
||||
|
@ -96,7 +96,7 @@ start_server {tags {"modules"}} {
|
||||
}
|
||||
|
||||
test "client unblock works only for modules with timeout support" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd client id
|
||||
set id [$rd read]
|
||||
|
||||
|
@ -4,8 +4,8 @@ start_server {tags {"modules"}} {
|
||||
r module load $testmodule
|
||||
|
||||
test "Module client blocked on keys: Circular BPOPPUSH" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
|
||||
r del src dst
|
||||
|
||||
@ -23,7 +23,7 @@ start_server {tags {"modules"}} {
|
||||
}
|
||||
|
||||
test "Module client blocked on keys: Self-referential BPOPPUSH" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
|
||||
r del src
|
||||
|
||||
@ -35,7 +35,7 @@ start_server {tags {"modules"}} {
|
||||
}
|
||||
|
||||
test "Module client blocked on keys: BPOPPUSH unblocked by timer" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
|
||||
r del src dst
|
||||
|
||||
@ -68,14 +68,14 @@ start_server {tags {"modules"}} {
|
||||
|
||||
test {Module client blocked on keys (no metadata): Timeout} {
|
||||
r del k
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd fsl.bpop k 1
|
||||
assert_equal {Request timedout} [$rd read]
|
||||
}
|
||||
|
||||
test {Module client blocked on keys (no metadata): Blocked} {
|
||||
r del k
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd fsl.bpop k 0
|
||||
wait_for_blocked_clients_count 1
|
||||
r fsl.push k 34
|
||||
@ -90,7 +90,7 @@ start_server {tags {"modules"}} {
|
||||
|
||||
test {Module client blocked on keys (with metadata): Timeout} {
|
||||
r del k
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd client id
|
||||
set cid [$rd read]
|
||||
r fsl.push k 33
|
||||
@ -101,7 +101,7 @@ start_server {tags {"modules"}} {
|
||||
|
||||
test {Module client blocked on keys (with metadata): Blocked, case 1} {
|
||||
r del k
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd client id
|
||||
set cid [$rd read]
|
||||
r fsl.push k 33
|
||||
@ -115,7 +115,7 @@ start_server {tags {"modules"}} {
|
||||
test {Module client blocked on keys (with metadata): Blocked, case 2} {
|
||||
r del k
|
||||
r fsl.push k 32
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd fsl.bpopgt k 35 0
|
||||
wait_for_blocked_clients_count 1
|
||||
r fsl.push k 33
|
||||
@ -128,7 +128,7 @@ start_server {tags {"modules"}} {
|
||||
test {Module client blocked on keys (with metadata): Blocked, DEL} {
|
||||
r del k
|
||||
r fsl.push k 32
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd fsl.bpopgt k 35 0
|
||||
wait_for_blocked_clients_count 1
|
||||
r del k
|
||||
@ -138,7 +138,7 @@ start_server {tags {"modules"}} {
|
||||
test {Module client blocked on keys (with metadata): Blocked, FLUSHALL} {
|
||||
r del k
|
||||
r fsl.push k 32
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd fsl.bpopgt k 35 0
|
||||
wait_for_blocked_clients_count 1
|
||||
r flushall
|
||||
@ -149,7 +149,7 @@ start_server {tags {"modules"}} {
|
||||
r select 9
|
||||
r del k
|
||||
r fsl.push k 32
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd fsl.bpopgt k 35 0
|
||||
wait_for_blocked_clients_count 1
|
||||
r swapdb 0 9
|
||||
@ -164,7 +164,7 @@ start_server {tags {"modules"}} {
|
||||
r select 0
|
||||
r lpush k 38
|
||||
r select 9
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd fsl.bpopgt k 35 0
|
||||
wait_for_blocked_clients_count 1
|
||||
r swapdb 0 9
|
||||
@ -180,7 +180,7 @@ start_server {tags {"modules"}} {
|
||||
r select 0
|
||||
r fsl.push k 34
|
||||
r select 9
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd fsl.bpopgt k 35 0
|
||||
wait_for_blocked_clients_count 1
|
||||
r swapdb 0 9
|
||||
@ -198,7 +198,7 @@ start_server {tags {"modules"}} {
|
||||
r select 0
|
||||
r fsl.push k 38
|
||||
r select 9
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd fsl.bpopgt k 35 0
|
||||
wait_for_blocked_clients_count 1
|
||||
r swapdb 0 9
|
||||
@ -209,7 +209,7 @@ start_server {tags {"modules"}} {
|
||||
test {Module client blocked on keys (with metadata): Blocked, CLIENT KILL} {
|
||||
r del k
|
||||
r fsl.push k 32
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd client id
|
||||
set cid [$rd read]
|
||||
$rd fsl.bpopgt k 35 0
|
||||
@ -220,7 +220,7 @@ start_server {tags {"modules"}} {
|
||||
test {Module client blocked on keys (with metadata): Blocked, CLIENT UNBLOCK TIMEOUT} {
|
||||
r del k
|
||||
r fsl.push k 32
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd client id
|
||||
set cid [$rd read]
|
||||
$rd fsl.bpopgt k 35 0
|
||||
@ -232,7 +232,7 @@ start_server {tags {"modules"}} {
|
||||
test {Module client blocked on keys (with metadata): Blocked, CLIENT UNBLOCK ERROR} {
|
||||
r del k
|
||||
r fsl.push k 32
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd client id
|
||||
set cid [$rd read]
|
||||
$rd fsl.bpopgt k 35 0
|
||||
@ -243,7 +243,7 @@ start_server {tags {"modules"}} {
|
||||
|
||||
test {Module client blocked on keys, no timeout CB, CLIENT UNBLOCK TIMEOUT} {
|
||||
r del k
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd client id
|
||||
set cid [$rd read]
|
||||
$rd fsl.bpop k 0 NO_TO_CB
|
||||
@ -254,7 +254,7 @@ start_server {tags {"modules"}} {
|
||||
|
||||
test {Module client blocked on keys, no timeout CB, CLIENT UNBLOCK ERROR} {
|
||||
r del k
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd client id
|
||||
set cid [$rd read]
|
||||
$rd fsl.bpop k 0 NO_TO_CB
|
||||
@ -265,7 +265,7 @@ start_server {tags {"modules"}} {
|
||||
|
||||
test {Module client re-blocked on keys after woke up on wrong type} {
|
||||
r del k
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd fsl.bpop k 0
|
||||
wait_for_blocked_clients_count 1
|
||||
r lpush k 12
|
||||
@ -279,7 +279,7 @@ start_server {tags {"modules"}} {
|
||||
|
||||
test {Module client blocked on keys woken up by LPUSH} {
|
||||
r del k
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd blockonkeys.popall k
|
||||
wait_for_blocked_clients_count 1
|
||||
r lpush k 42 squirrel banana
|
||||
@ -289,7 +289,7 @@ start_server {tags {"modules"}} {
|
||||
|
||||
test {Module client unblocks BLPOP} {
|
||||
r del k
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd blpop k 3
|
||||
wait_for_blocked_clients_count 1
|
||||
r blockonkeys.lpush k 42
|
||||
@ -301,7 +301,7 @@ start_server {tags {"modules"}} {
|
||||
r del k
|
||||
r lpush k aa
|
||||
# Module client blocks to pop 5 elements from list
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd blockonkeys.blpopn k 5
|
||||
wait_for_blocked_clients_count 1
|
||||
# Check that RM_SignalKeyAsReady() can wake up BLPOPN
|
||||
@ -316,7 +316,7 @@ start_server {tags {"modules"}} {
|
||||
r del k
|
||||
r set somekey someval
|
||||
# Module client blocks to pop 5 elements from list
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd blockonkeys.blpopn_or_unblock k 5 0
|
||||
wait_for_blocked_clients_count 1
|
||||
# will now cause the module to trigger pop but instead will unblock the client from the reply_callback
|
||||
@ -342,7 +342,7 @@ start_server {tags {"modules"}} {
|
||||
wait_for_sync $replica
|
||||
|
||||
test {WAIT command on module blocked client on keys} {
|
||||
set rd [redis_deferring_client -1]
|
||||
set rd [valkey_deferring_client -1]
|
||||
$rd set x y
|
||||
$rd read
|
||||
|
||||
|
@ -19,7 +19,7 @@ start_cluster 3 0 [list config_lines $modules] {
|
||||
|
||||
test "Run blocking command (blocked on key) on cluster node3" {
|
||||
# key9184688 is mapped to slot 10923 (first slot of node 3)
|
||||
set node3_rd [redis_deferring_client -2]
|
||||
set node3_rd [valkey_deferring_client -2]
|
||||
$node3_rd fsl.bpop key9184688 0
|
||||
$node3_rd flush
|
||||
wait_for_condition 50 100 {
|
||||
@ -30,7 +30,7 @@ start_cluster 3 0 [list config_lines $modules] {
|
||||
}
|
||||
|
||||
test "Run blocking command (no keys) on cluster node2" {
|
||||
set node2_rd [redis_deferring_client -1]
|
||||
set node2_rd [valkey_deferring_client -1]
|
||||
$node2_rd block.block 0
|
||||
$node2_rd flush
|
||||
|
||||
@ -83,7 +83,7 @@ start_cluster 3 0 [list config_lines $modules] {
|
||||
test "Sanity test push cmd after resharding" {
|
||||
assert_error {*MOVED*} {$node3 fsl.push key9184688 1}
|
||||
|
||||
set node1_rd [redis_deferring_client 0]
|
||||
set node1_rd [valkey_deferring_client 0]
|
||||
$node1_rd fsl.bpop key9184688 0
|
||||
$node1_rd flush
|
||||
|
||||
@ -106,7 +106,7 @@ start_cluster 3 0 [list config_lines $modules] {
|
||||
test "Run blocking command (blocked on key) again on cluster node1" {
|
||||
$node1 del key9184688
|
||||
# key9184688 is mapped to slot 10923 which has been moved to node1
|
||||
set node1_rd [redis_deferring_client 0]
|
||||
set node1_rd [valkey_deferring_client 0]
|
||||
$node1_rd fsl.bpop key9184688 0
|
||||
$node1_rd flush
|
||||
|
||||
@ -118,7 +118,7 @@ start_cluster 3 0 [list config_lines $modules] {
|
||||
}
|
||||
|
||||
test "Run blocking command (no keys) again on cluster node2" {
|
||||
set node2_rd [redis_deferring_client -1]
|
||||
set node2_rd [valkey_deferring_client -1]
|
||||
|
||||
$node2_rd block.block 0
|
||||
$node2_rd flush
|
||||
|
@ -127,7 +127,7 @@ test {Blocking Commands don't run through command filter when reprocessed} {
|
||||
|
||||
r lpush list2{t} a b c d e
|
||||
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
# we're asking to pop from the left, but the command filter swaps the two arguments,
|
||||
# if it didn't swap it, we would end up with e d c b a 5 (5 being the left most of the following lpush)
|
||||
# but since we swap the arguments, we end up with 1 e d c b a (1 being the right most of it).
|
||||
|
@ -64,7 +64,7 @@ start_server {tags {"modules"}} {
|
||||
r config set busy-reply-threshold 5000 ;# make sure we're using a high default
|
||||
# trigger slow loading
|
||||
r datatype.slow_loading 1
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set start [clock clicks -milliseconds]
|
||||
$rd debug reload
|
||||
|
||||
|
@ -8,7 +8,7 @@ tags "modules" {
|
||||
|
||||
test {Test clients connection / disconnection hooks} {
|
||||
for {set j 0} {$j < 2} {incr j} {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
$rd1 close
|
||||
}
|
||||
assert {[r hooks.event_count client-connected] > 1}
|
||||
@ -16,7 +16,7 @@ tags "modules" {
|
||||
}
|
||||
|
||||
test {Test module client change event for blocked client} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
# select db other than 0
|
||||
$rd select 1
|
||||
# block on key
|
||||
|
@ -76,7 +76,7 @@ tags "modules" {
|
||||
test "Keyspace notifications: module events test" {
|
||||
r config set notify-keyspace-events Kd
|
||||
r del x
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {1} [psubscribe $rd1 *]
|
||||
r keyspace.notify x
|
||||
assert_equal {pmessage * __keyspace@9__:x notify} [$rd1 read]
|
||||
|
@ -247,8 +247,8 @@ start_server {tags {"modules"}} {
|
||||
test {module auth during blocking module auth} {
|
||||
r config resetstat
|
||||
r acl setuser foo >pwd on ~* &* +@all
|
||||
set rd [redis_deferring_client]
|
||||
set rd_two [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set rd_two [valkey_deferring_client]
|
||||
|
||||
# Attempt blocking module auth. While this ongoing, attempt non blocking module auth from
|
||||
# moduleone/moduletwo and start another blocking module auth from another deferring client.
|
||||
@ -289,7 +289,7 @@ start_server {tags {"modules"}} {
|
||||
test {Disabling Redis User during blocking module auth} {
|
||||
r config resetstat
|
||||
r acl setuser foo >pwd on ~* &* +@all
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
# Attempt blocking module auth and disable the user while module auth is in progress.
|
||||
$rd AUTH foo pwd
|
||||
@ -306,7 +306,7 @@ start_server {tags {"modules"}} {
|
||||
test {Killing a client in the middle of blocking module auth} {
|
||||
r config resetstat
|
||||
r acl setuser foo >pwd on ~* &* +@all
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd client id
|
||||
set cid [$rd read]
|
||||
|
||||
@ -337,7 +337,7 @@ start_server {tags {"modules"}} {
|
||||
test {test RM_RegisterAuthCallback Module API during blocking module auth} {
|
||||
r config resetstat
|
||||
r acl setuser foo >defaultpwd on ~* &* +@all
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
# Start the module auth attempt with the standard auth password for the user. This
|
||||
# will result in all module auth cbs attempted and then standard auth will be tried.
|
||||
@ -365,7 +365,7 @@ start_server {tags {"modules"}} {
|
||||
test {Module unload during blocking module auth} {
|
||||
r config resetstat
|
||||
r module load $miscmodule
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r acl setuser foo >pwd on ~* &* +@all
|
||||
|
||||
# Start a blocking module auth attempt.
|
||||
|
@ -4,8 +4,8 @@ start_server {tags {"modules"}} {
|
||||
r module load $testmodule
|
||||
|
||||
test {PUBLISH and SPUBLISH via a module} {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
|
||||
assert_equal {1} [ssubscribe $rd1 {chan1}]
|
||||
assert_equal {1} [subscribe $rd2 {chan1}]
|
||||
|
@ -37,7 +37,7 @@ start_server {tags {"modules"}} {
|
||||
# cause a problem.
|
||||
# e.g. the server won't try to process next message of the current client
|
||||
# while it is in the command callback for that client .
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
$rd1 test.rdbload blabla.rdb
|
||||
|
||||
wait_for_condition 50 100 {
|
||||
|
@ -30,7 +30,7 @@ start_server {tags {"modules"}} {
|
||||
r del mystream
|
||||
|
||||
# Blocking XREAD on an empty key
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
$rd1 XREAD BLOCK 3000 STREAMS mystream $
|
||||
# wait until client is actually blocked
|
||||
wait_for_condition 50 100 {
|
||||
@ -42,7 +42,7 @@ start_server {tags {"modules"}} {
|
||||
assert_equal "{mystream {{$id {field 1 value a}}}}" [$rd1 read]
|
||||
|
||||
# Blocking XREAD on an existing stream
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
$rd2 XREAD BLOCK 3000 STREAMS mystream $
|
||||
# wait until client is actually blocked
|
||||
wait_for_condition 50 100 {
|
||||
|
@ -5,7 +5,7 @@ start_server {tags {"modules"}} {
|
||||
|
||||
test "modules allocated memory can be reclaimed in the background" {
|
||||
set orig_mem [s used_memory]
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
# LAZYFREE_THRESHOLD is 64
|
||||
for {set i 0} {$i < 10000} {incr i} {
|
||||
|
@ -68,7 +68,7 @@ start_server {tags {"multi"}} {
|
||||
} {0 0}
|
||||
|
||||
test {EXEC fails if there are errors while queueing commands #2} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del foo1{t} foo2{t}
|
||||
r multi
|
||||
r set foo1{t} bar1
|
||||
@ -523,7 +523,7 @@ start_server {tags {"multi"}} {
|
||||
} {OK} {needs:repl cluster:skip}
|
||||
|
||||
test {DISCARD should not fail during OOM} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd config set maxmemory 1
|
||||
assert {[$rd read] eq {OK}}
|
||||
r multi
|
||||
@ -539,7 +539,7 @@ start_server {tags {"multi"}} {
|
||||
test {MULTI and script timeout} {
|
||||
# check that if MULTI arrives during timeout, it is either refused, or
|
||||
# allowed to pass, and we don't end up executing half of the transaction
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set r2 [redis_client]
|
||||
r config set lua-time-limit 10
|
||||
r set xx 1
|
||||
@ -564,7 +564,7 @@ start_server {tags {"multi"}} {
|
||||
test {EXEC and script timeout} {
|
||||
# check that if EXEC arrives during timeout, we don't end up executing
|
||||
# half of the transaction, and also that we exit the multi state
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set r2 [redis_client]
|
||||
r config set lua-time-limit 10
|
||||
r set xx 1
|
||||
@ -589,7 +589,7 @@ start_server {tags {"multi"}} {
|
||||
test {MULTI-EXEC body and script timeout} {
|
||||
# check that we don't run an incomplete transaction due to some commands
|
||||
# arriving during busy script
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set r2 [redis_client]
|
||||
r config set lua-time-limit 10
|
||||
r set xx 1
|
||||
@ -614,7 +614,7 @@ start_server {tags {"multi"}} {
|
||||
test {just EXEC and script timeout} {
|
||||
# check that if EXEC arrives during timeout, we don't end up executing
|
||||
# actual commands during busy script, and also that we exit the multi state
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set r2 [redis_client]
|
||||
r config set lua-time-limit 10
|
||||
r set xx 1
|
||||
|
@ -29,7 +29,7 @@ start_server {tags {"obuf-limits external:skip logreqres:skip"}} {
|
||||
|
||||
test {Client output buffer hard limit is enforced} {
|
||||
r config set client-output-buffer-limit {pubsub 100000 0 0}
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
|
||||
$rd1 subscribe foo
|
||||
set reply [$rd1 read]
|
||||
@ -58,7 +58,7 @@ start_server {tags {"obuf-limits external:skip logreqres:skip"}} {
|
||||
test $test_name {
|
||||
r config set client-output-buffer-limit "pubsub 0 100000 $soft_limit_time"
|
||||
set soft_limit_time [expr $soft_limit_time*1000]
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
|
||||
$rd1 client setname test_client
|
||||
set reply [$rd1 read]
|
||||
@ -124,7 +124,7 @@ start_server {tags {"obuf-limits external:skip logreqres:skip"}} {
|
||||
}
|
||||
set orig_mem [s used_memory]
|
||||
# Set client name and get all items
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd client setname mybiglist
|
||||
assert {[$rd read] eq "OK"}
|
||||
$rd lrange mylist 0 -1
|
||||
@ -149,8 +149,8 @@ start_server {tags {"obuf-limits external:skip logreqres:skip"}} {
|
||||
r config set client-output-buffer-limit {normal 100000 0 0}
|
||||
set value [string repeat "x" 10000]
|
||||
r set bigkey $value
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
$rd2 client setname multicommands
|
||||
assert_equal "OK" [$rd2 read]
|
||||
|
||||
|
@ -308,7 +308,7 @@ start_server {tags {"other"}} {
|
||||
} {} {needs:reset}
|
||||
|
||||
test {RESET clears MONITOR state} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd monitor
|
||||
assert_equal [$rd read] "OK"
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
start_server {tags {"pause network"}} {
|
||||
test "Test read commands are not blocked by client pause" {
|
||||
r client PAUSE 100000 WRITE
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd GET FOO
|
||||
$rd PING
|
||||
$rd INFO
|
||||
@ -24,7 +24,7 @@ start_server {tags {"pause network"}} {
|
||||
# paused only WRITE. This is because the first 'PAUSE ALL' command is
|
||||
# more restrictive than the second 'PAUSE WRITE' and pause-client feature
|
||||
# preserve most restrictive configuration among multiple settings.
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd SET FOO BAR
|
||||
|
||||
set test_start_time [clock milliseconds]
|
||||
@ -40,7 +40,7 @@ start_server {tags {"pause network"}} {
|
||||
r client PAUSE 60000 WRITE
|
||||
r client PAUSE 10 WRITE
|
||||
after 100
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd SET FOO BAR
|
||||
wait_for_blocked_clients_count 1 100 10
|
||||
|
||||
@ -52,7 +52,7 @@ start_server {tags {"pause network"}} {
|
||||
test "Test write commands are paused by RO" {
|
||||
r client PAUSE 60000 WRITE
|
||||
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd SET FOO BAR
|
||||
wait_for_blocked_clients_count 1 50 100
|
||||
|
||||
@ -66,13 +66,13 @@ start_server {tags {"pause network"}} {
|
||||
r client PAUSE 100000 WRITE
|
||||
|
||||
# Test that pfcount, which can replicate, is also blocked
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd PFCOUNT pause-hll
|
||||
wait_for_blocked_clients_count 1 50 100
|
||||
|
||||
# Test that publish, which adds the message to the replication
|
||||
# stream is blocked.
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
$rd2 publish foo bar
|
||||
wait_for_blocked_clients_count 2 50 100
|
||||
|
||||
@ -97,7 +97,7 @@ start_server {tags {"pause network"}} {
|
||||
}
|
||||
|
||||
test "Test write multi-execs are blocked by pause RO" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd MULTI
|
||||
assert_equal [$rd read] "OK"
|
||||
$rd SET FOO BAR
|
||||
@ -112,8 +112,8 @@ start_server {tags {"pause network"}} {
|
||||
|
||||
test "Test scripts are blocked by pause RO" {
|
||||
r client PAUSE 60000 WRITE
|
||||
set rd [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
$rd EVAL "return 1" 0
|
||||
|
||||
# test a script with a shebang and no flags for coverage
|
||||
@ -194,8 +194,8 @@ start_server {tags {"pause network"}} {
|
||||
}
|
||||
|
||||
test "Test write scripts in multi-exec are blocked by pause RO" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
|
||||
# one with a shebang
|
||||
$rd MULTI
|
||||
@ -240,7 +240,7 @@ start_server {tags {"pause network"}} {
|
||||
|
||||
test "Test multiple clients can be queued up and unblocked" {
|
||||
r client PAUSE 60000 WRITE
|
||||
set clients [list [redis_deferring_client] [redis_deferring_client] [redis_deferring_client]]
|
||||
set clients [list [valkey_deferring_client] [valkey_deferring_client] [valkey_deferring_client]]
|
||||
foreach client $clients {
|
||||
$client SET FOO BAR
|
||||
}
|
||||
@ -294,7 +294,7 @@ start_server {tags {"pause network"}} {
|
||||
r SET FOO2{t} BAR
|
||||
r exec
|
||||
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd SET FOO3{t} BAR
|
||||
|
||||
wait_for_blocked_clients_count 1 50 100
|
||||
|
@ -234,7 +234,7 @@ start_server {tags {"protocol network"}} {
|
||||
|
||||
start_server {tags {"regression"}} {
|
||||
test "Regression for a crash with blocking ops and pipelining" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set fd [r channel]
|
||||
set proto "*3\r\n\$5\r\nBLPOP\r\n\$6\r\nnolist\r\n\$1\r\n0\r\n"
|
||||
puts -nonewline $fd $proto$proto
|
||||
|
@ -6,7 +6,7 @@ start_server {tags {"pubsub network"}} {
|
||||
}
|
||||
|
||||
foreach resp {2 3} {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
if {[lsearch $::denytags "resp3"] >= 0} {
|
||||
if {$resp == 3} {continue}
|
||||
} elseif {$::force_resp3} {
|
||||
@ -42,7 +42,7 @@ start_server {tags {"pubsub network"}} {
|
||||
}
|
||||
|
||||
test "PUBLISH/SUBSCRIBE basics" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
|
||||
# subscribe to two channels
|
||||
assert_equal {1 2} [subscribe $rd1 {chan1 chan2}]
|
||||
@ -67,8 +67,8 @@ start_server {tags {"pubsub network"}} {
|
||||
}
|
||||
|
||||
test "PUBLISH/SUBSCRIBE with two clients" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
|
||||
assert_equal {1} [subscribe $rd1 {chan1}]
|
||||
assert_equal {1} [subscribe $rd2 {chan1}]
|
||||
@ -82,7 +82,7 @@ start_server {tags {"pubsub network"}} {
|
||||
}
|
||||
|
||||
test "PUBLISH/SUBSCRIBE after UNSUBSCRIBE without arguments" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {1 2 3} [subscribe $rd1 {chan1 chan2 chan3}]
|
||||
unsubscribe $rd1
|
||||
assert_equal 0 [r publish chan1 hello]
|
||||
@ -94,7 +94,7 @@ start_server {tags {"pubsub network"}} {
|
||||
}
|
||||
|
||||
test "SUBSCRIBE to one channel more than once" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {1 1 1} [subscribe $rd1 {chan1 chan1 chan1}]
|
||||
assert_equal 1 [r publish chan1 hello]
|
||||
assert_equal {message chan1 hello} [$rd1 read]
|
||||
@ -104,7 +104,7 @@ start_server {tags {"pubsub network"}} {
|
||||
}
|
||||
|
||||
test "UNSUBSCRIBE from non-subscribed channels" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {0 0 0} [unsubscribe $rd1 {foo bar quux}]
|
||||
|
||||
# clean up clients
|
||||
@ -112,7 +112,7 @@ start_server {tags {"pubsub network"}} {
|
||||
}
|
||||
|
||||
test "PUBLISH/PSUBSCRIBE basics" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
|
||||
# subscribe to two patterns
|
||||
assert_equal {1 2} [psubscribe $rd1 {foo.* bar.*}]
|
||||
@ -140,8 +140,8 @@ start_server {tags {"pubsub network"}} {
|
||||
}
|
||||
|
||||
test "PUBLISH/PSUBSCRIBE with two clients" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
|
||||
assert_equal {1} [psubscribe $rd1 {chan.*}]
|
||||
assert_equal {1} [psubscribe $rd2 {chan.*}]
|
||||
@ -155,7 +155,7 @@ start_server {tags {"pubsub network"}} {
|
||||
}
|
||||
|
||||
test "PUBLISH/PSUBSCRIBE after PUNSUBSCRIBE without arguments" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {1 2 3} [psubscribe $rd1 {chan1.* chan2.* chan3.*}]
|
||||
punsubscribe $rd1
|
||||
assert_equal 0 [r publish chan1.hi hello]
|
||||
@ -167,7 +167,7 @@ start_server {tags {"pubsub network"}} {
|
||||
}
|
||||
|
||||
test "PubSub messages with CLIENT REPLY OFF" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd hello 3
|
||||
$rd read ;# Discard the hello reply
|
||||
|
||||
@ -191,7 +191,7 @@ start_server {tags {"pubsub network"}} {
|
||||
} {0} {resp3}
|
||||
|
||||
test "PUNSUBSCRIBE from non-subscribed channels" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {0 0 0} [punsubscribe $rd1 {foo.* bar.* quux.*}]
|
||||
|
||||
# clean up clients
|
||||
@ -203,8 +203,8 @@ start_server {tags {"pubsub network"}} {
|
||||
} {abc 0 def 0}
|
||||
|
||||
test "NUMPATs returns the number of unique patterns" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
|
||||
# Three unique patterns and one that overlaps
|
||||
psubscribe $rd1 "foo*"
|
||||
@ -223,7 +223,7 @@ start_server {tags {"pubsub network"}} {
|
||||
}
|
||||
|
||||
test "Mix SUBSCRIBE and PSUBSCRIBE" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {1} [subscribe $rd1 {foo.bar}]
|
||||
assert_equal {2} [psubscribe $rd1 {foo.*}]
|
||||
|
||||
@ -249,7 +249,7 @@ start_server {tags {"pubsub network"}} {
|
||||
|
||||
test "Keyspace notifications: we receive keyspace notifications" {
|
||||
r config set notify-keyspace-events KA
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
$rd1 CLIENT REPLY OFF ;# Make sure it works even if replies are silenced
|
||||
assert_equal {1} [psubscribe $rd1 *]
|
||||
r set foo bar
|
||||
@ -259,7 +259,7 @@ start_server {tags {"pubsub network"}} {
|
||||
|
||||
test "Keyspace notifications: we receive keyevent notifications" {
|
||||
r config set notify-keyspace-events EA
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
$rd1 CLIENT REPLY SKIP ;# Make sure it works even if replies are silenced
|
||||
assert_equal {1} [psubscribe $rd1 *]
|
||||
r set foo bar
|
||||
@ -269,7 +269,7 @@ start_server {tags {"pubsub network"}} {
|
||||
|
||||
test "Keyspace notifications: we can receive both kind of events" {
|
||||
r config set notify-keyspace-events KEA
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
$rd1 CLIENT REPLY ON ;# Just coverage
|
||||
assert_equal {OK} [$rd1 read]
|
||||
assert_equal {1} [psubscribe $rd1 *]
|
||||
@ -282,7 +282,7 @@ start_server {tags {"pubsub network"}} {
|
||||
test "Keyspace notifications: we are able to mask events" {
|
||||
r config set notify-keyspace-events KEl
|
||||
r del mylist
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {1} [psubscribe $rd1 *]
|
||||
r set foo bar
|
||||
r lpush mylist a
|
||||
@ -294,7 +294,7 @@ start_server {tags {"pubsub network"}} {
|
||||
|
||||
test "Keyspace notifications: general events test" {
|
||||
r config set notify-keyspace-events KEg
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {1} [psubscribe $rd1 *]
|
||||
r set foo bar
|
||||
r expire foo 1
|
||||
@ -309,7 +309,7 @@ start_server {tags {"pubsub network"}} {
|
||||
test "Keyspace notifications: list events test" {
|
||||
r config set notify-keyspace-events KEl
|
||||
r del mylist
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {1} [psubscribe $rd1 *]
|
||||
r lpush mylist a
|
||||
r rpush mylist a
|
||||
@ -326,7 +326,7 @@ start_server {tags {"pubsub network"}} {
|
||||
test "Keyspace notifications: set events test" {
|
||||
r config set notify-keyspace-events Ks
|
||||
r del myset
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {1} [psubscribe $rd1 *]
|
||||
r sadd myset a b c d
|
||||
r srem myset x
|
||||
@ -341,7 +341,7 @@ start_server {tags {"pubsub network"}} {
|
||||
test "Keyspace notifications: zset events test" {
|
||||
r config set notify-keyspace-events Kz
|
||||
r del myzset
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {1} [psubscribe $rd1 *]
|
||||
r zadd myzset 1 a 2 b
|
||||
r zrem myzset x
|
||||
@ -356,7 +356,7 @@ start_server {tags {"pubsub network"}} {
|
||||
test "Keyspace notifications: hash events test" {
|
||||
r config set notify-keyspace-events Kh
|
||||
r del myhash
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {1} [psubscribe $rd1 *]
|
||||
r hmset myhash yes 1 no 0
|
||||
r hincrby myhash yes 10
|
||||
@ -368,7 +368,7 @@ start_server {tags {"pubsub network"}} {
|
||||
test "Keyspace notifications: stream events test" {
|
||||
r config set notify-keyspace-events Kt
|
||||
r del mystream
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {1} [psubscribe $rd1 *]
|
||||
r xgroup create mystream mygroup $ mkstream
|
||||
r xgroup createconsumer mystream mygroup Bob
|
||||
@ -392,7 +392,7 @@ start_server {tags {"pubsub network"}} {
|
||||
test "Keyspace notifications: expired events (triggered expire)" {
|
||||
r config set notify-keyspace-events Ex
|
||||
r del foo
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {1} [psubscribe $rd1 *]
|
||||
r psetex foo 100 1
|
||||
wait_for_condition 50 100 {
|
||||
@ -407,7 +407,7 @@ start_server {tags {"pubsub network"}} {
|
||||
test "Keyspace notifications: expired events (background expire)" {
|
||||
r config set notify-keyspace-events Ex
|
||||
r del foo
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {1} [psubscribe $rd1 *]
|
||||
r psetex foo 100 1
|
||||
assert_equal "pmessage * __keyevent@${db}__:expired foo" [$rd1 read]
|
||||
@ -418,7 +418,7 @@ start_server {tags {"pubsub network"}} {
|
||||
r config set notify-keyspace-events Ee
|
||||
r config set maxmemory-policy allkeys-lru
|
||||
r flushdb
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {1} [psubscribe $rd1 *]
|
||||
r set foo bar
|
||||
r config set maxmemory 1
|
||||
@ -441,7 +441,7 @@ start_server {tags {"pubsub network"}} {
|
||||
|
||||
test "Keyspace notifications: new key test" {
|
||||
r config set notify-keyspace-events En
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {1} [psubscribe $rd1 *]
|
||||
r set foo bar
|
||||
# second set of foo should not cause a 'new' event
|
||||
|
@ -1,6 +1,6 @@
|
||||
start_server {tags {"pubsubshard external:skip"}} {
|
||||
test "SPUBLISH/SSUBSCRIBE basics" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
|
||||
# subscribe to two channels
|
||||
assert_equal {1} [ssubscribe $rd1 {chan1}]
|
||||
@ -26,8 +26,8 @@ start_server {tags {"pubsubshard external:skip"}} {
|
||||
}
|
||||
|
||||
test "SPUBLISH/SSUBSCRIBE with two clients" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
|
||||
assert_equal {1} [ssubscribe $rd1 {chan1}]
|
||||
assert_equal {1} [ssubscribe $rd2 {chan1}]
|
||||
@ -41,7 +41,7 @@ start_server {tags {"pubsubshard external:skip"}} {
|
||||
}
|
||||
|
||||
test "SPUBLISH/SSUBSCRIBE after UNSUBSCRIBE without arguments" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {1} [ssubscribe $rd1 {chan1}]
|
||||
assert_equal {2} [ssubscribe $rd1 {chan2}]
|
||||
assert_equal {3} [ssubscribe $rd1 {chan3}]
|
||||
@ -55,7 +55,7 @@ start_server {tags {"pubsubshard external:skip"}} {
|
||||
}
|
||||
|
||||
test "SSUBSCRIBE to one channel more than once" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {1 1 1} [ssubscribe $rd1 {chan1 chan1 chan1}]
|
||||
assert_equal 1 [r SPUBLISH chan1 hello]
|
||||
assert_equal {smessage chan1 hello} [$rd1 read]
|
||||
@ -65,7 +65,7 @@ start_server {tags {"pubsubshard external:skip"}} {
|
||||
}
|
||||
|
||||
test "SUNSUBSCRIBE from non-subscribed channels" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
assert_equal {0} [sunsubscribe $rd1 {foo}]
|
||||
assert_equal {0} [sunsubscribe $rd1 {bar}]
|
||||
assert_equal {0} [sunsubscribe $rd1 {quux}]
|
||||
@ -79,8 +79,8 @@ start_server {tags {"pubsubshard external:skip"}} {
|
||||
} {abc 0 def 0}
|
||||
|
||||
test "SPUBLISH/SSUBSCRIBE with two clients" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
|
||||
assert_equal {1} [ssubscribe $rd1 {chan1}]
|
||||
assert_equal {1} [ssubscribe $rd2 {chan1}]
|
||||
@ -94,8 +94,8 @@ start_server {tags {"pubsubshard external:skip"}} {
|
||||
}
|
||||
|
||||
test "SPUBLISH/SSUBSCRIBE with PUBLISH/SUBSCRIBE" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
|
||||
assert_equal {1} [ssubscribe $rd1 {chan1}]
|
||||
assert_equal {1} [subscribe $rd2 {chan1}]
|
||||
@ -111,7 +111,7 @@ start_server {tags {"pubsubshard external:skip"}} {
|
||||
}
|
||||
|
||||
test "PubSubShard with CLIENT REPLY OFF" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd hello 3
|
||||
$rd read ;# Discard the hello reply
|
||||
|
||||
@ -151,8 +151,8 @@ start_server {tags {"pubsubshard external:skip"}} {
|
||||
}
|
||||
|
||||
test {publish message to master and receive on replica} {
|
||||
set rd0 [redis_deferring_client node_0_host node_0_port]
|
||||
set rd1 [redis_deferring_client node_1_host node_1_port]
|
||||
set rd0 [valkey_deferring_client node_0_host node_0_port]
|
||||
set rd1 [valkey_deferring_client node_1_host node_1_port]
|
||||
|
||||
assert_equal {1} [ssubscribe $rd1 {chan1}]
|
||||
$rd0 SPUBLISH chan1 hello
|
||||
|
@ -770,7 +770,7 @@ start_server {tags {"scripting"}} {
|
||||
r script flush ;# reset Lua VM
|
||||
r set x 0
|
||||
# Use a non blocking client to speedup the loop.
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
for {set j 0} {$j < 10000} {incr j} {
|
||||
run_script_on_connection $rd {return redis.call("incr",KEYS[1])} 1 x
|
||||
}
|
||||
@ -1138,7 +1138,7 @@ start_server {tags {"scripting"}} {
|
||||
# instance at all.
|
||||
start_server {tags {"scripting"}} {
|
||||
test {Timedout read-only scripts can be killed by SCRIPT KILL} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r config set lua-time-limit 10
|
||||
run_script_on_connection $rd {while true do end} 0
|
||||
after 200
|
||||
@ -1151,7 +1151,7 @@ start_server {tags {"scripting"}} {
|
||||
}
|
||||
|
||||
test {Timedout read-only scripts can be killed by SCRIPT KILL even when use pcall} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r config set lua-time-limit 10
|
||||
run_script_on_connection $rd {local f = function() while 1 do redis.call('ping') end end while 1 do pcall(f) end} 0
|
||||
|
||||
@ -1179,7 +1179,7 @@ start_server {tags {"scripting"}} {
|
||||
}
|
||||
|
||||
test {Timedout script does not cause a false dead client} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r config set lua-time-limit 10
|
||||
|
||||
# senging (in a pipeline):
|
||||
@ -1240,8 +1240,8 @@ start_server {tags {"scripting"}} {
|
||||
r config set appendonly yes
|
||||
|
||||
# create clients, and set one to block waiting for key 'x'
|
||||
set rd [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
set r3 [redis_client]
|
||||
$rd2 blpop x 0
|
||||
wait_for_blocked_clients_count 1
|
||||
@ -1280,7 +1280,7 @@ start_server {tags {"scripting"}} {
|
||||
} {OK} {external:skip needs:debug}
|
||||
|
||||
test {Timedout scripts that modified data can't be killed by SCRIPT KILL} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r config set lua-time-limit 10
|
||||
run_script_on_connection $rd {redis.call('set',KEYS[1],'y'); while true do end} 1 x
|
||||
after 200
|
||||
@ -1300,7 +1300,7 @@ start_server {tags {"scripting"}} {
|
||||
assert_match {BUSY*} $e
|
||||
catch {r shutdown nosave}
|
||||
# Make sure the server was killed
|
||||
catch {set rd [redis_deferring_client]} e
|
||||
catch {set rd [valkey_deferring_client]} e
|
||||
assert_match {*connection refused*} $e
|
||||
} {} {external:skip}
|
||||
}
|
||||
@ -1348,7 +1348,7 @@ start_server {tags {"scripting"}} {
|
||||
} ;# is_eval
|
||||
|
||||
test "Replication of script multiple pushes to list with BLPOP" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd brpop a 0
|
||||
run_script {
|
||||
redis.call("lpush",KEYS[1],"1");
|
||||
@ -2125,7 +2125,7 @@ start_server {tags {"scripting"}} {
|
||||
|
||||
# run a slow script that does one write, then waits for INFO to indicate
|
||||
# that the replica dropped, and then runs another write
|
||||
set rd [redis_deferring_client -1]
|
||||
set rd [valkey_deferring_client -1]
|
||||
$rd eval {
|
||||
redis.call('set','x',"script value")
|
||||
while true do
|
||||
|
@ -22,7 +22,7 @@ start_server {tags {"shutdown external:skip"}} {
|
||||
|
||||
catch {r shutdown nosave}
|
||||
# Make sure the server was killed
|
||||
catch {set rd [redis_deferring_client]} e
|
||||
catch {set rd [valkey_deferring_client]} e
|
||||
assert_match {*connection refused*} $e
|
||||
|
||||
# Temp rdb file must be deleted
|
||||
|
@ -143,7 +143,7 @@ start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} {
|
||||
assert_equal {INCRBYFLOAT A 1.0} [lindex [lindex [r slowlog get] 0] 3]
|
||||
|
||||
# blocked BLPOP is replicated as LPOP
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd blpop l 0
|
||||
wait_for_blocked_clients_count 1 50 100
|
||||
r multi
|
||||
@ -231,7 +231,7 @@ start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} {
|
||||
# Cleanup first
|
||||
r del mylist
|
||||
# create a test client
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
# config the slowlog and reset
|
||||
r config set slowlog-log-slower-than 0
|
||||
|
@ -2,7 +2,7 @@
|
||||
start_server {tags {"tracking network logreqres:skip"}} {
|
||||
# Create a deferred client we'll use to redirect invalidation
|
||||
# messages to.
|
||||
set rd_redirection [redis_deferring_client]
|
||||
set rd_redirection [valkey_deferring_client]
|
||||
$rd_redirection client id
|
||||
set redir_id [$rd_redirection read]
|
||||
$rd_redirection subscribe __redis__:invalidate
|
||||
@ -10,7 +10,7 @@ start_server {tags {"tracking network logreqres:skip"}} {
|
||||
|
||||
# Create another client that's not used as a redirection client
|
||||
# We should always keep this client's buffer clean
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
# Client to be used for SET and GET commands
|
||||
# We don't read this client's buffer
|
||||
@ -24,8 +24,8 @@ start_server {tags {"tracking network logreqres:skip"}} {
|
||||
r CLIENT TRACKING off
|
||||
$rd QUIT
|
||||
$rd_redirection QUIT
|
||||
set rd [redis_deferring_client]
|
||||
set rd_redirection [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set rd_redirection [valkey_deferring_client]
|
||||
$rd_redirection client id
|
||||
set redir_id [$rd_redirection read]
|
||||
$rd_redirection subscribe __redis__:invalidate
|
||||
@ -269,7 +269,7 @@ start_server {tags {"tracking network logreqres:skip"}} {
|
||||
assert_equal PONG [r read]
|
||||
|
||||
# Reinstantiating after QUIT
|
||||
set rd_redirection [redis_deferring_client]
|
||||
set rd_redirection [valkey_deferring_client]
|
||||
$rd_redirection CLIENT ID
|
||||
set redir_id [$rd_redirection read]
|
||||
$rd_redirection SUBSCRIBE __redis__:invalidate
|
||||
@ -814,7 +814,7 @@ start_server {tags {"tracking network logreqres:skip"}} {
|
||||
test {RESP3 based basic redirect invalidation with client reply off} {
|
||||
clean_all
|
||||
|
||||
set rd_redir [redis_deferring_client]
|
||||
set rd_redir [valkey_deferring_client]
|
||||
$rd_redir hello 3
|
||||
$rd_redir read
|
||||
|
||||
@ -880,7 +880,7 @@ start_server {tags {"tracking network logreqres:skip"}} {
|
||||
# run the full tracking unit in that mode
|
||||
start_server {tags {"tracking network"}} {
|
||||
test {Coverage: Basic CLIENT CACHING} {
|
||||
set rd_redirection [redis_deferring_client]
|
||||
set rd_redirection [valkey_deferring_client]
|
||||
$rd_redirection client id
|
||||
set redir_id [$rd_redirection read]
|
||||
assert_equal {OK} [r CLIENT TRACKING on OPTIN REDIRECT $redir_id]
|
||||
|
@ -645,7 +645,7 @@ foreach {type large} [array get largevalue] {
|
||||
foreach {type large} [array get largevalue] {
|
||||
foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
test "$pop: single existing list - $type" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
create_$type blist "a b $large c d"
|
||||
|
||||
bpop_command $rd $pop blist 1
|
||||
@ -671,7 +671,7 @@ foreach {type large} [array get largevalue] {
|
||||
}
|
||||
|
||||
test "$pop: multiple existing lists - $type" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
create_$type blist1{t} "a $large c"
|
||||
create_$type blist2{t} "d $large f"
|
||||
|
||||
@ -700,7 +700,7 @@ foreach {type large} [array get largevalue] {
|
||||
}
|
||||
|
||||
test "$pop: second list has an entry - $type" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del blist1{t}
|
||||
create_$type blist2{t} "d $large f"
|
||||
|
||||
@ -722,7 +722,7 @@ foreach {type large} [array get largevalue] {
|
||||
r del target{t}
|
||||
r rpush target{t} bar
|
||||
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
create_$type blist{t} "a b $large c d"
|
||||
|
||||
$rd brpoplpush blist{t} target{t} 1
|
||||
@ -739,7 +739,7 @@ foreach {type large} [array get largevalue] {
|
||||
r del target{t}
|
||||
r rpush target{t} bar
|
||||
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
create_$type blist{t} "a b $large c d"
|
||||
|
||||
$rd blmove blist{t} target{t} $wherefrom $whereto 1
|
||||
@ -766,7 +766,7 @@ foreach {type large} [array get largevalue] {
|
||||
|
||||
foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
test "$pop, LPUSH + DEL should not awake blocked client" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del list
|
||||
|
||||
bpop_command $rd $pop list 0
|
||||
@ -783,7 +783,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
}
|
||||
|
||||
test "$pop, LPUSH + DEL + SET should not awake blocked client" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del list
|
||||
|
||||
bpop_command $rd $pop list 0
|
||||
@ -802,7 +802,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
}
|
||||
|
||||
test "BLPOP with same key multiple times should work (issue #801)" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del list1{t} list2{t}
|
||||
|
||||
# Data arriving after the BLPOP.
|
||||
@ -827,7 +827,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
|
||||
foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
test "MULTI/EXEC is isolated from the point of view of $pop" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del list
|
||||
|
||||
bpop_command $rd $pop list 0
|
||||
@ -843,7 +843,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
}
|
||||
|
||||
test "$pop with variadic LPUSH" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del blist
|
||||
bpop_command $rd $pop blist 0
|
||||
wait_for_blocked_client
|
||||
@ -855,7 +855,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
}
|
||||
|
||||
test "BRPOPLPUSH with zero timeout should block indefinitely" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del blist{t} target{t}
|
||||
r rpush target{t} bar
|
||||
$rd brpoplpush blist{t} target{t} 0
|
||||
@ -869,7 +869,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
foreach wherefrom {left right} {
|
||||
foreach whereto {left right} {
|
||||
test "BLMOVE $wherefrom $whereto with zero timeout should block indefinitely" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del blist{t} target{t}
|
||||
r rpush target{t} bar
|
||||
$rd blmove blist{t} target{t} $wherefrom $whereto 0
|
||||
@ -889,8 +889,8 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
foreach wherefrom {left right} {
|
||||
foreach whereto {left right} {
|
||||
test "BLMOVE ($wherefrom, $whereto) with a client BLPOPing the target list" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
r del blist{t} target{t}
|
||||
$rd2 blpop target{t} 0
|
||||
wait_for_blocked_clients_count 1
|
||||
@ -907,7 +907,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
}
|
||||
|
||||
test "BRPOPLPUSH with wrong source type" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del blist{t} target{t}
|
||||
r set blist{t} nolist
|
||||
$rd brpoplpush blist{t} target{t} 1
|
||||
@ -916,7 +916,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
}
|
||||
|
||||
test "BRPOPLPUSH with wrong destination type" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del blist{t} target{t}
|
||||
r set target{t} nolist
|
||||
r lpush blist{t} foo
|
||||
@ -924,7 +924,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
assert_error "WRONGTYPE*" {$rd read}
|
||||
$rd close
|
||||
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del blist{t} target{t}
|
||||
r set target{t} nolist
|
||||
$rd brpoplpush blist{t} target{t} 0
|
||||
@ -936,7 +936,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
}
|
||||
|
||||
test "BRPOPLPUSH maintains order of elements after failure" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del blist{t} target{t}
|
||||
r set target{t} nolist
|
||||
$rd brpoplpush blist{t} target{t} 0
|
||||
@ -948,8 +948,8 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
} {a b c}
|
||||
|
||||
test "BRPOPLPUSH with multiple blocked clients" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
r del blist{t} target1{t} target2{t}
|
||||
r set target1{t} nolist
|
||||
$rd1 brpoplpush blist{t} target1{t} 0
|
||||
@ -966,10 +966,10 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
}
|
||||
|
||||
test "BLMPOP with multiple blocked clients" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd3 [redis_deferring_client]
|
||||
set rd4 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
set rd3 [valkey_deferring_client]
|
||||
set rd4 [valkey_deferring_client]
|
||||
r del blist{t} blist2{t}
|
||||
|
||||
$rd1 blmpop 0 2 blist{t} blist2{t} left count 1
|
||||
@ -999,8 +999,8 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
}
|
||||
|
||||
test "Linked LMOVEs" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
|
||||
r del list1{t} list2{t} list3{t}
|
||||
|
||||
@ -1019,8 +1019,8 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
}
|
||||
|
||||
test "Circular BRPOPLPUSH" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
|
||||
r del list1{t} list2{t}
|
||||
|
||||
@ -1038,7 +1038,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
}
|
||||
|
||||
test "Self-referential BRPOPLPUSH" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
r del blist{t}
|
||||
|
||||
@ -1066,8 +1066,8 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
} {foo bar {} {} {bar foo}}
|
||||
|
||||
test "PUSH resulting from BRPOPLPUSH affect WATCH" {
|
||||
set blocked_client [redis_deferring_client]
|
||||
set watching_client [redis_deferring_client]
|
||||
set blocked_client [valkey_deferring_client]
|
||||
set watching_client [valkey_deferring_client]
|
||||
r del srclist{t} dstlist{t} somekey{t}
|
||||
r set somekey{t} somevalue
|
||||
$blocked_client brpoplpush srclist{t} dstlist{t} 0
|
||||
@ -1087,8 +1087,8 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
} {}
|
||||
|
||||
test "BRPOPLPUSH does not affect WATCH while still blocked" {
|
||||
set blocked_client [redis_deferring_client]
|
||||
set watching_client [redis_deferring_client]
|
||||
set blocked_client [valkey_deferring_client]
|
||||
set watching_client [valkey_deferring_client]
|
||||
r del srclist{t} dstlist{t} somekey{t}
|
||||
r set somekey{t} somevalue
|
||||
$blocked_client brpoplpush srclist{t} dstlist{t} 0
|
||||
@ -1109,7 +1109,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
} {somevalue}
|
||||
|
||||
test {BRPOPLPUSH timeout} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
$rd brpoplpush foo_list{t} bar_list{t} 1
|
||||
wait_for_blocked_clients_count 1
|
||||
@ -1124,7 +1124,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
r select 1
|
||||
r rpush k hello
|
||||
r select 9
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd brpop k 5
|
||||
wait_for_blocked_clients_count 1
|
||||
r swapdb 1 9
|
||||
@ -1138,7 +1138,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
r select 1
|
||||
r rpush k hello
|
||||
r pexpire k 100
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd deferred 0
|
||||
$rd select 9
|
||||
set id [$rd client id]
|
||||
@ -1184,7 +1184,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
r flushall
|
||||
r debug set-active-expire 0
|
||||
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd client id
|
||||
set id [$rd read]
|
||||
$rd brpop k 0
|
||||
@ -1224,7 +1224,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
test {BLPOP unblock but the key is expired and then block again - reprocessing command} {
|
||||
r flushall
|
||||
r debug set-active-expire 0
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
set start [clock milliseconds]
|
||||
$rd blpop mylist 1
|
||||
@ -1251,7 +1251,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
|
||||
foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
test "$pop when new key is moved into place" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del foo{t}
|
||||
|
||||
bpop_command $rd $pop foo{t} 0
|
||||
@ -1264,7 +1264,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
} {foo{t} hij}
|
||||
|
||||
test "$pop when result key is created by SORT..STORE" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
# zero out list from previous test without explicit delete
|
||||
r lpop foo{t}
|
||||
@ -1291,7 +1291,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
|
||||
foreach {pop} {BLPOP BRPOP BLMPOP_LEFT BLMPOP_RIGHT} {
|
||||
test "$pop: with single empty list argument" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del blist1
|
||||
bpop_command $rd $pop blist1 1
|
||||
wait_for_blocked_client
|
||||
@ -1302,14 +1302,14 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
}
|
||||
|
||||
test "$pop: with negative timeout" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
bpop_command $rd $pop blist1 -1
|
||||
assert_error "ERR *is negative*" {$rd read}
|
||||
$rd close
|
||||
}
|
||||
|
||||
test "$pop: with non-integer timeout" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del blist1
|
||||
bpop_command $rd $pop blist1 0.1
|
||||
r rpush blist1 foo
|
||||
@ -1321,7 +1321,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
test "$pop: with zero timeout should block indefinitely" {
|
||||
# To test this, use a timeout of 0 and wait a second.
|
||||
# The blocking pop should still be waiting for a push.
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
bpop_command $rd $pop blist1 0
|
||||
wait_for_blocked_client
|
||||
r rpush blist1 foo
|
||||
@ -1332,7 +1332,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
test "$pop: with 0.001 timeout should not block indefinitely" {
|
||||
# Use a timeout of 0.001 and wait for the number of blocked clients to equal 0.
|
||||
# Validate the empty read from the deferring client.
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
bpop_command $rd $pop blist1 0.001
|
||||
wait_for_blocked_clients_count 0
|
||||
assert_equal {} [$rd read]
|
||||
@ -1340,7 +1340,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
}
|
||||
|
||||
test "$pop: second argument is not a list" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del blist1{t} blist2{t}
|
||||
r set blist2{t} nolist{t}
|
||||
bpop_command_two_key $rd $pop blist1{t} blist2{t} 1
|
||||
@ -1349,7 +1349,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
}
|
||||
|
||||
test "$pop: timeout" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del blist1{t} blist2{t}
|
||||
bpop_command_two_key $rd $pop blist1{t} blist2{t} 1
|
||||
wait_for_blocked_client
|
||||
@ -1358,7 +1358,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
}
|
||||
|
||||
test "$pop: arguments are empty" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del blist1{t} blist2{t}
|
||||
|
||||
bpop_command_two_key $rd $pop blist1{t} blist2{t} 1
|
||||
@ -1393,7 +1393,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
|
||||
}
|
||||
|
||||
test {BLMPOP propagate as pop with count command to replica} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set repl [attach_to_replication_stream]
|
||||
|
||||
# BLMPOP without being blocked.
|
||||
@ -2009,8 +2009,8 @@ foreach {type large} [array get largevalue] {
|
||||
}
|
||||
|
||||
test "Regression for bug 593 - chaining BRPOPLPUSH with other blocking cmds" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
|
||||
$rd1 brpoplpush a{t} b{t} 0
|
||||
$rd1 brpoplpush a{t} b{t} 0
|
||||
@ -2025,7 +2025,7 @@ foreach {type large} [array get largevalue] {
|
||||
|
||||
test "BLPOP/BLMOVE should increase dirty" {
|
||||
r del lst{t} lst1{t}
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
set dirty [s rdb_changes_since_last_save]
|
||||
$rd blpop lst{t} 0
|
||||
@ -2049,7 +2049,7 @@ foreach {type large} [array get largevalue] {
|
||||
foreach {pop} {BLPOP BLMPOP_RIGHT} {
|
||||
test "client unblock tests" {
|
||||
r del l
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd client id
|
||||
set id [$rd read]
|
||||
|
||||
@ -2261,8 +2261,8 @@ foreach {pop} {BLPOP BLMPOP_RIGHT} {
|
||||
} {12 0 9223372036854775808 2147483647 32767 127}
|
||||
|
||||
test "Unblock fairness is kept while pipelining" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
|
||||
# delete the list in case already exists
|
||||
r del mylist
|
||||
@ -2296,9 +2296,9 @@ foreach {pop} {BLPOP BLMPOP_RIGHT} {
|
||||
}
|
||||
|
||||
test "Unblock fairness is kept during nested unblock" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd3 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
set rd3 [valkey_deferring_client]
|
||||
|
||||
# delete the list in case already exists
|
||||
r del l1{t} l2{t} l3{t}
|
||||
@ -2334,7 +2334,7 @@ foreach {pop} {BLPOP BLMPOP_RIGHT} {
|
||||
r del mylist
|
||||
|
||||
# create a test client
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
# reset the server stats
|
||||
r config resetstat
|
||||
@ -2357,7 +2357,7 @@ foreach {pop} {BLPOP BLMPOP_RIGHT} {
|
||||
r del mylist
|
||||
|
||||
# create a test client
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd client id
|
||||
set id [$rd read]
|
||||
|
||||
@ -2380,9 +2380,9 @@ foreach {pop} {BLPOP BLMPOP_RIGHT} {
|
||||
r del src{t} dst{t} key1{t} key2{t} key3{t}
|
||||
set repl [attach_to_replication_stream]
|
||||
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd3 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
set rd3 [valkey_deferring_client]
|
||||
|
||||
$rd1 blmove src{t} dst{t} left right 0
|
||||
wait_for_blocked_clients_count 1
|
||||
|
@ -221,7 +221,7 @@ start_server {
|
||||
assert {[lindex $res 0 1 0] == {666-0 {f v}}}
|
||||
r XADD mystream 667 f2 v2
|
||||
r XDEL mystream 667
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd XREADGROUP GROUP mygroup Alice BLOCK 10 STREAMS mystream ">"
|
||||
wait_for_blocked_clients_count 0
|
||||
assert {[$rd read] == {}} ;# before the fix, client didn't even block, but was served synchronously with {mystream {}}
|
||||
@ -232,7 +232,7 @@ start_server {
|
||||
r DEL mystream
|
||||
r XADD mystream 666 f v
|
||||
r XGROUP CREATE mystream mygroup $
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
|
||||
wait_for_blocked_clients_count 1
|
||||
r DEL mystream
|
||||
@ -244,7 +244,7 @@ start_server {
|
||||
r DEL mystream
|
||||
r XADD mystream 666 f v
|
||||
r XGROUP CREATE mystream mygroup $
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
|
||||
wait_for_blocked_clients_count 1
|
||||
r SET mystream val1
|
||||
@ -256,7 +256,7 @@ start_server {
|
||||
r DEL mystream
|
||||
r XADD mystream 666 f v
|
||||
r XGROUP CREATE mystream mygroup $
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
|
||||
wait_for_blocked_clients_count 1
|
||||
r MULTI
|
||||
@ -271,7 +271,7 @@ start_server {
|
||||
r DEL mystream
|
||||
r XADD mystream 666 f v
|
||||
r XGROUP CREATE mystream mygroup $
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
|
||||
wait_for_blocked_clients_count 1
|
||||
r FLUSHALL
|
||||
@ -286,7 +286,7 @@ start_server {
|
||||
r DEL mystream
|
||||
r XADD mystream 666 f v
|
||||
r XGROUP CREATE mystream mygroup $
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd SELECT 9
|
||||
$rd read
|
||||
$rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
|
||||
@ -304,7 +304,7 @@ start_server {
|
||||
r DEL mystream
|
||||
r XADD mystream 666 f v
|
||||
r XGROUP CREATE mystream mygroup $
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd SELECT 9
|
||||
$rd read
|
||||
$rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
|
||||
@ -325,7 +325,7 @@ start_server {
|
||||
test {Blocking XREAD: key deleted} {
|
||||
r DEL mystream
|
||||
r XADD mystream 666 f v
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd XREAD BLOCK 0 STREAMS mystream "$"
|
||||
wait_for_blocked_clients_count 1
|
||||
r DEL mystream
|
||||
@ -339,7 +339,7 @@ start_server {
|
||||
test {Blocking XREAD: key type changed with SET} {
|
||||
r DEL mystream
|
||||
r XADD mystream 666 f v
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd XREAD BLOCK 0 STREAMS mystream "$"
|
||||
wait_for_blocked_clients_count 1
|
||||
r SET mystream val1
|
||||
@ -352,7 +352,7 @@ start_server {
|
||||
}
|
||||
|
||||
test {Blocking XREADGROUP for stream that ran dry (issue #5299)} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
# Add a entry then delete it, now stream's last_id is 666.
|
||||
r DEL mystream
|
||||
@ -378,7 +378,7 @@ start_server {
|
||||
}
|
||||
|
||||
test "Blocking XREADGROUP will ignore BLOCK if ID is not >" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
# Add a entry then delete it, now stream's last_id is 666.
|
||||
r DEL mystream
|
||||
@ -427,8 +427,8 @@ start_server {
|
||||
}
|
||||
|
||||
test {Blocking XREADGROUP for stream key that has clients blocked on list} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
|
||||
# First delete the stream
|
||||
r DEL mystream
|
||||
@ -479,9 +479,9 @@ start_server {
|
||||
r DEL mystream
|
||||
r XGROUP CREATE mystream mygroup $ MKSTREAM
|
||||
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd3 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
set rd3 [valkey_deferring_client]
|
||||
|
||||
$rd1 xreadgroup GROUP mygroup myuser COUNT 10 BLOCK 10000 STREAMS mystream >
|
||||
$rd2 xreadgroup GROUP mygroup myuser COUNT 10 BLOCK 10000 STREAMS mystream >
|
||||
@ -502,8 +502,8 @@ start_server {
|
||||
r DEL mystream
|
||||
r XGROUP CREATE mystream mygroup $ MKSTREAM
|
||||
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
|
||||
$rd1 xreadgroup GROUP mygroup myuser BLOCK 0 STREAMS mystream >
|
||||
wait_for_blocked_clients_count 1
|
||||
@ -530,7 +530,7 @@ start_server {
|
||||
r config resetstat
|
||||
r del mystream
|
||||
r XGROUP CREATE mystream mygroup $ MKSTREAM
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
|
||||
wait_for_blocked_clients_count 1
|
||||
r XGROUP DESTROY mystream mygroup
|
||||
@ -546,7 +546,7 @@ start_server {
|
||||
test {RENAME can unblock XREADGROUP with data} {
|
||||
r del mystream{t}
|
||||
r XGROUP CREATE mystream{t} mygroup $ MKSTREAM
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream{t} ">"
|
||||
wait_for_blocked_clients_count 1
|
||||
r XGROUP CREATE mystream2{t} mygroup $ MKSTREAM
|
||||
@ -559,7 +559,7 @@ start_server {
|
||||
test {RENAME can unblock XREADGROUP with -NOGROUP} {
|
||||
r del mystream{t}
|
||||
r XGROUP CREATE mystream{t} mygroup $ MKSTREAM
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream{t} ">"
|
||||
wait_for_blocked_clients_count 1
|
||||
r XADD mystream2{t} 100 f1 v1
|
||||
@ -1015,7 +1015,7 @@ start_server {
|
||||
r XGROUP CREATE mystream mygroup $ MKSTREAM
|
||||
r XADD mystream * f1 v1
|
||||
r XREADGROUP GROUP mygroup Alice NOACK STREAMS mystream ">"
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd XREADGROUP GROUP mygroup Bob BLOCK 0 NOACK STREAMS mystream ">"
|
||||
wait_for_blocked_clients_count 1
|
||||
r XADD mystream * f2 v2
|
||||
@ -1036,7 +1036,7 @@ start_server {
|
||||
r XGROUP CREATE mystream mygroup $ MKSTREAM
|
||||
r XADD mystream * f v
|
||||
r XREADGROUP GROUP mygroup Alice NOACK STREAMS mystream ">"
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd XREADGROUP GROUP mygroup Bob BLOCK 0 NOACK STREAMS mystream ">"
|
||||
wait_for_blocked_clients_count 1
|
||||
r XGROUP CREATECONSUMER mystream mygroup Charlie
|
||||
|
@ -337,7 +337,7 @@ start_server {
|
||||
|
||||
test {Blocking XREAD waiting new data} {
|
||||
r XADD s2{t} * old abcd1234
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd XREAD BLOCK 20000 STREAMS s1{t} s2{t} s3{t} $ $ $
|
||||
wait_for_blocked_client
|
||||
r XADD s2{t} * new abcd1234
|
||||
@ -348,7 +348,7 @@ start_server {
|
||||
}
|
||||
|
||||
test {Blocking XREAD waiting old data} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd XREAD BLOCK 20000 STREAMS s1{t} s2{t} s3{t} $ 0-0 $
|
||||
r XADD s2{t} * foo abcd1234
|
||||
set res [$rd read]
|
||||
@ -362,7 +362,7 @@ start_server {
|
||||
r XADD s1 666 f v
|
||||
r XADD s1 667 f2 v2
|
||||
r XDEL s1 667
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd XREAD BLOCK 10 STREAMS s1 666
|
||||
after 20
|
||||
assert {[$rd read] == {}} ;# before the fix, client didn't even block, but was served synchronously with {s1 {}}
|
||||
@ -370,7 +370,7 @@ start_server {
|
||||
}
|
||||
|
||||
test "Blocking XREAD for stream that ran dry (issue #5299)" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
# Add a entry then delete it, now stream's last_id is 666.
|
||||
r DEL mystream
|
||||
@ -444,7 +444,7 @@ start_server {
|
||||
r DEL lestream
|
||||
|
||||
# read last entry from stream, blocking
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd XREAD BLOCK 20000 STREAMS lestream +
|
||||
wait_for_blocked_client
|
||||
|
||||
@ -511,7 +511,7 @@ start_server {
|
||||
}
|
||||
|
||||
test "XREAD: XADD + DEL should not awake client" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del s1
|
||||
$rd XREAD BLOCK 20000 STREAMS s1 $
|
||||
wait_for_blocked_clients_count 1
|
||||
@ -527,7 +527,7 @@ start_server {
|
||||
}
|
||||
|
||||
test "XREAD: XADD + DEL + LPUSH should not awake client" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del s1
|
||||
$rd XREAD BLOCK 20000 STREAMS s1 $
|
||||
wait_for_blocked_clients_count 1
|
||||
@ -546,7 +546,7 @@ start_server {
|
||||
|
||||
test {XREAD with same stream name multiple times should work} {
|
||||
r XADD s2 * old abcd1234
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd XREAD BLOCK 20000 STREAMS s2 s2 s2 $ $ $
|
||||
wait_for_blocked_clients_count 1
|
||||
r XADD s2 * new abcd1234
|
||||
@ -558,7 +558,7 @@ start_server {
|
||||
|
||||
test {XREAD + multiple XADD inside transaction} {
|
||||
r XADD s2 * old abcd1234
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd XREAD BLOCK 20000 STREAMS s2 s2 s2 $ $ $
|
||||
wait_for_blocked_clients_count 1
|
||||
r MULTI
|
||||
@ -682,7 +682,7 @@ start_server {
|
||||
|
||||
test {XREAD streamID edge (blocking)} {
|
||||
r del x
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd XREAD BLOCK 0 STREAMS x 1-18446744073709551615
|
||||
wait_for_blocked_clients_count 1
|
||||
r XADD x 1-1 f v
|
||||
|
@ -1166,7 +1166,7 @@ start_server {tags {"zset"}} {
|
||||
|
||||
foreach {popmin popmax} {BZPOPMIN BZPOPMAX BZMPOP_MIN BZMPOP_MAX} {
|
||||
test "$popmin/$popmax with a single existing sorted set - $encoding" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
create_zset zset {0 a 1 b 2 c 3 d}
|
||||
|
||||
verify_bzpop_response $rd $popmin zset 5 0 {zset a 0} {zset {{a 0}}}
|
||||
@ -1178,7 +1178,7 @@ start_server {tags {"zset"}} {
|
||||
}
|
||||
|
||||
test "$popmin/$popmax with multiple existing sorted sets - $encoding" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
create_zset z1{t} {0 a 1 b 2 c}
|
||||
create_zset z2{t} {3 d 4 e 5 f}
|
||||
|
||||
@ -1195,7 +1195,7 @@ start_server {tags {"zset"}} {
|
||||
}
|
||||
|
||||
test "$popmin/$popmax second sorted set has members - $encoding" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del z1{t}
|
||||
create_zset z2{t} {3 d 4 e 5 f}
|
||||
|
||||
@ -1228,7 +1228,7 @@ start_server {tags {"zset"}} {
|
||||
foreach {popmin popmax} {BZPOPMIN BZPOPMAX BZMPOP_MIN BZMPOP_MAX} {
|
||||
test "$popmin/$popmax - $encoding RESP3" {
|
||||
r hello 3
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
create_zset zset {0 a 1 b 2 c 3 d}
|
||||
|
||||
verify_bzpop_response $rd $popmin zset 5 0 {zset a 0} {zset {{a 0}}}
|
||||
@ -1334,7 +1334,7 @@ start_server {tags {"zset"}} {
|
||||
} {} {needs:repl}
|
||||
|
||||
foreach resp {3 2} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
if {[lsearch $::denytags "resp3"] >= 0} {
|
||||
if {$resp == 3} {continue}
|
||||
@ -1952,7 +1952,7 @@ start_server {tags {"zset"}} {
|
||||
|
||||
foreach {pop} {BZPOPMIN BZMPOP_MIN} {
|
||||
test "$pop, ZADD + DEL should not awake blocked client" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del zset
|
||||
|
||||
bzpop_command $rd $pop zset 0
|
||||
@ -1970,7 +1970,7 @@ start_server {tags {"zset"}} {
|
||||
}
|
||||
|
||||
test "$pop, ZADD + DEL + SET should not awake blocked client" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del zset
|
||||
|
||||
bzpop_command $rd $pop zset 0
|
||||
@ -1992,7 +1992,7 @@ start_server {tags {"zset"}} {
|
||||
test {BZPOPMIN unblock but the key is expired and then block again - reprocessing command} {
|
||||
r flushall
|
||||
r debug set-active-expire 0
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
|
||||
set start [clock milliseconds]
|
||||
$rd bzpopmin zset{t} 1
|
||||
@ -2018,7 +2018,7 @@ start_server {tags {"zset"}} {
|
||||
} {0} {needs:debug}
|
||||
|
||||
test "BZPOPMIN with same key multiple times should work" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del z1{t} z2{t}
|
||||
|
||||
# Data arriving after the BZPOPMIN.
|
||||
@ -2043,7 +2043,7 @@ start_server {tags {"zset"}} {
|
||||
|
||||
foreach {pop} {BZPOPMIN BZMPOP_MIN} {
|
||||
test "MULTI/EXEC is isolated from the point of view of $pop" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del zset
|
||||
|
||||
bzpop_command $rd $pop zset 0
|
||||
@ -2060,7 +2060,7 @@ start_server {tags {"zset"}} {
|
||||
}
|
||||
|
||||
test "$pop with variadic ZADD" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del zset
|
||||
if {$::valgrind} {after 100}
|
||||
bzpop_command $rd $pop zset 0
|
||||
@ -2074,7 +2074,7 @@ start_server {tags {"zset"}} {
|
||||
}
|
||||
|
||||
test "$pop with zero timeout should block indefinitely" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
r del zset
|
||||
bzpop_command $rd $pop zset 0
|
||||
wait_for_blocked_client
|
||||
@ -2132,10 +2132,10 @@ start_server {tags {"zset"}} {
|
||||
}
|
||||
|
||||
test "BZMPOP with multiple blocked clients" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd3 [redis_deferring_client]
|
||||
set rd4 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
set rd3 [valkey_deferring_client]
|
||||
set rd4 [valkey_deferring_client]
|
||||
r del myzset{t} myzset2{t}
|
||||
|
||||
$rd1 bzmpop 0 2 myzset{t} myzset2{t} min count 1
|
||||
@ -2167,7 +2167,7 @@ start_server {tags {"zset"}} {
|
||||
}
|
||||
|
||||
test "BZMPOP propagate as pop with count command to replica" {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set repl [attach_to_replication_stream]
|
||||
|
||||
# BZMPOP without being blocked.
|
||||
@ -2213,8 +2213,8 @@ start_server {tags {"zset"}} {
|
||||
} {} {needs:repl}
|
||||
|
||||
test "BZMPOP should not blocks on non key arguments - #10762" {
|
||||
set rd1 [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd1 [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
r del myzset myzset2 myzset3
|
||||
|
||||
$rd1 bzmpop 0 1 myzset min count 10
|
||||
|
@ -70,8 +70,8 @@ start_server {} {
|
||||
}
|
||||
|
||||
test {WAIT replica multiple clients unblock - reuse last result} {
|
||||
set rd [redis_deferring_client -1]
|
||||
set rd2 [redis_deferring_client -1]
|
||||
set rd [valkey_deferring_client -1]
|
||||
set rd2 [valkey_deferring_client -1]
|
||||
|
||||
pause_process $slave_pid
|
||||
|
||||
@ -125,7 +125,7 @@ tags {"wait aof network external:skip"} {
|
||||
|
||||
test {WAITAOF local wait and then stop aof} {
|
||||
r config set appendfsync no
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd incr foo
|
||||
$rd read
|
||||
$rd waitaof 1 0 0
|
||||
@ -187,7 +187,7 @@ tags {"wait aof network external:skip"} {
|
||||
$replica config set appendfsync no
|
||||
|
||||
test {WAITAOF on demoted master gets unblocked with an error} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
$rd incr foo
|
||||
$rd read
|
||||
$rd waitaof 0 1 0
|
||||
@ -268,8 +268,8 @@ tags {"wait aof network external:skip"} {
|
||||
}
|
||||
|
||||
test {WAITAOF replica multiple clients unblock - reuse last result} {
|
||||
set rd [redis_deferring_client -1]
|
||||
set rd2 [redis_deferring_client -1]
|
||||
set rd [valkey_deferring_client -1]
|
||||
set rd2 [valkey_deferring_client -1]
|
||||
|
||||
pause_process $replica_pid
|
||||
|
||||
@ -311,7 +311,7 @@ tags {"wait aof network external:skip"} {
|
||||
}
|
||||
|
||||
test {WAITAOF master without backlog, wait is released when the replica finishes full-sync} {
|
||||
set rd [redis_deferring_client -1]
|
||||
set rd [valkey_deferring_client -1]
|
||||
$rd incr foo
|
||||
$rd read
|
||||
$rd waitaof 0 1 0
|
||||
@ -401,8 +401,8 @@ tags {"wait aof network external:skip"} {
|
||||
}
|
||||
|
||||
# add some writes and block a client on each master
|
||||
set rd [redis_deferring_client -3]
|
||||
set rd2 [redis_deferring_client -1]
|
||||
set rd [valkey_deferring_client -3]
|
||||
set rd2 [valkey_deferring_client -1]
|
||||
$rd set boo 11
|
||||
$rd2 set boo 22
|
||||
$rd read
|
||||
@ -454,8 +454,8 @@ start_server {} {
|
||||
}
|
||||
|
||||
test {WAIT and WAITAOF replica multiple clients unblock - reuse last result} {
|
||||
set rd [redis_deferring_client]
|
||||
set rd2 [redis_deferring_client]
|
||||
set rd [valkey_deferring_client]
|
||||
set rd2 [valkey_deferring_client]
|
||||
|
||||
$master config set appendonly yes
|
||||
$replica1 config set appendonly yes
|
||||
|
Loading…
x
Reference in New Issue
Block a user