futriix/tests/unit/aofrw.tcl
sundb 02fd76b97c
Replace all usage of ziplist with listpack for t_hash (#8887)
Part one of implementing #8702 (taking hashes first before other types)

## Description of the feature
1. Change ziplist encoded hash objects to listpack encoding.
2. Convert existing ziplists on RDB loading time. an O(n) operation.

## Rdb format changes
1. Add RDB_TYPE_HASH_LISTPACK rdb type.
2. Bump RDB_VERSION to 10

## Interface changes
1. New `hash-max-listpack-entries` config is an alias for `hash-max-ziplist-entries` (same with `hash-max-listpack-value`)
2. OBJECT ENCODING will return `listpack` instead of `ziplist`

## Listpack improvements:
1. Support direct insert, replace integer element (rather than convert back and forth from string)
3. Add more listpack capabilities to match the ziplist ones (like `lpFind`, `lpRandomPairs` and such)
4. Optimize element length fetching, avoid multiple calculations
5. Use inline to avoid function call overhead.

## Tests
1. Add a new test to the RDB load time conversion
2. Adding the listpack unit tests. (based on the one in ziplist.c)
3. Add a few "corrupt payload: fuzzer findings" tests, and slightly modify existing ones.

Co-authored-by: Oran Agra <oran@redislabs.com>
2021-08-10 09:18:49 +03:00

207 lines
7.0 KiB
Tcl

start_server {tags {"aofrw external:skip"}} {
# Enable the AOF
r config set appendonly yes
r config set auto-aof-rewrite-percentage 0 ; # Disable auto-rewrite.
waitForBgrewriteaof r
foreach rdbpre {yes no} {
r config set aof-use-rdb-preamble $rdbpre
test "AOF rewrite during write load: RDB preamble=$rdbpre" {
# Start a write load for 10 seconds
set master [srv 0 client]
set master_host [srv 0 host]
set master_port [srv 0 port]
set load_handle0 [start_write_load $master_host $master_port 10]
set load_handle1 [start_write_load $master_host $master_port 10]
set load_handle2 [start_write_load $master_host $master_port 10]
set load_handle3 [start_write_load $master_host $master_port 10]
set load_handle4 [start_write_load $master_host $master_port 10]
# Make sure the instance is really receiving data
wait_for_condition 50 100 {
[r dbsize] > 0
} else {
fail "No write load detected."
}
# After 3 seconds, start a rewrite, while the write load is still
# active.
after 3000
r bgrewriteaof
waitForBgrewriteaof r
# Let it run a bit more so that we'll append some data to the new
# AOF.
after 1000
# Stop the processes generating the load if they are still active
stop_write_load $load_handle0
stop_write_load $load_handle1
stop_write_load $load_handle2
stop_write_load $load_handle3
stop_write_load $load_handle4
# Make sure no more commands processed, before taking debug digest
wait_load_handlers_disconnected
# Get the data set digest
set d1 [r debug digest]
# Load the AOF
r debug loadaof
set d2 [r debug digest]
# Make sure they are the same
assert {$d1 eq $d2}
}
}
}
start_server {tags {"aofrw external:skip"} overrides {aof-use-rdb-preamble no}} {
test {Turning off AOF kills the background writing child if any} {
r config set appendonly yes
waitForBgrewriteaof r
r multi
r bgrewriteaof
r config set appendonly no
r exec
wait_for_condition 50 100 {
[string match {*Killing*AOF*child*} [exec tail -5 < [srv 0 stdout]]]
} else {
fail "Can't find 'Killing AOF child' into recent logs"
}
}
foreach d {string int} {
foreach e {quicklist} {
test "AOF rewrite of list with $e encoding, $d data" {
r flushall
set len 1000
for {set j 0} {$j < $len} {incr j} {
if {$d eq {string}} {
set data [randstring 0 16 alpha]
} else {
set data [randomInt 4000000000]
}
r lpush key $data
}
assert_equal [r object encoding key] $e
set d1 [r debug digest]
r bgrewriteaof
waitForBgrewriteaof r
r debug loadaof
set d2 [r debug digest]
if {$d1 ne $d2} {
error "assertion:$d1 is not equal to $d2"
}
}
}
}
foreach d {string int} {
foreach e {intset hashtable} {
test "AOF rewrite of set with $e encoding, $d data" {
r flushall
if {$e eq {intset}} {set len 10} else {set len 1000}
for {set j 0} {$j < $len} {incr j} {
if {$d eq {string}} {
set data [randstring 0 16 alpha]
} else {
set data [randomInt 4000000000]
}
r sadd key $data
}
if {$d ne {string}} {
assert_equal [r object encoding key] $e
}
set d1 [r debug digest]
r bgrewriteaof
waitForBgrewriteaof r
r debug loadaof
set d2 [r debug digest]
if {$d1 ne $d2} {
error "assertion:$d1 is not equal to $d2"
}
}
}
}
foreach d {string int} {
foreach e {listpack hashtable} {
test "AOF rewrite of hash with $e encoding, $d data" {
r flushall
if {$e eq {listpack}} {set len 10} else {set len 1000}
for {set j 0} {$j < $len} {incr j} {
if {$d eq {string}} {
set data [randstring 0 16 alpha]
} else {
set data [randomInt 4000000000]
}
r hset key $data $data
}
assert_equal [r object encoding key] $e
set d1 [r debug digest]
r bgrewriteaof
waitForBgrewriteaof r
r debug loadaof
set d2 [r debug digest]
if {$d1 ne $d2} {
error "assertion:$d1 is not equal to $d2"
}
}
}
}
foreach d {string int} {
foreach e {ziplist skiplist} {
test "AOF rewrite of zset with $e encoding, $d data" {
r flushall
if {$e eq {ziplist}} {set len 10} else {set len 1000}
for {set j 0} {$j < $len} {incr j} {
if {$d eq {string}} {
set data [randstring 0 16 alpha]
} else {
set data [randomInt 4000000000]
}
r zadd key [expr rand()] $data
}
assert_equal [r object encoding key] $e
set d1 [r debug digest]
r bgrewriteaof
waitForBgrewriteaof r
r debug loadaof
set d2 [r debug digest]
if {$d1 ne $d2} {
error "assertion:$d1 is not equal to $d2"
}
}
}
}
test {BGREWRITEAOF is delayed if BGSAVE is in progress} {
r multi
r bgsave
r bgrewriteaof
r info persistence
set res [r exec]
assert_match {*scheduled*} [lindex $res 1]
assert_match {*aof_rewrite_scheduled:1*} [lindex $res 2]
while {[string match {*aof_rewrite_scheduled:1*} [r info persistence]]} {
after 100
}
}
test {BGREWRITEAOF is refused if already in progress} {
catch {
r multi
r bgrewriteaof
r bgrewriteaof
r exec
} e
assert_match {*ERR*already*} $e
while {[string match {*aof_rewrite_scheduled:1*} [r info persistence]]} {
after 100
}
}
}