Add a --no-latency tests flag. (#7939)
Useful for running tests on systems which may be way slower than usual.
This commit is contained in:
parent
6cf23d6610
commit
843a13e88f
2
.github/workflows/daily.yml
vendored
2
.github/workflows/daily.yml
vendored
@ -200,7 +200,7 @@ jobs:
|
|||||||
run: make
|
run: make
|
||||||
- name: test
|
- name: test
|
||||||
run: |
|
run: |
|
||||||
./runtest --accurate --verbose
|
./runtest --accurate --verbose --no-latency
|
||||||
- name: module api test
|
- name: module api test
|
||||||
run: ./runtest-moduleapi --verbose
|
run: ./runtest-moduleapi --verbose
|
||||||
- name: sentinel tests
|
- name: sentinel tests
|
||||||
|
@ -655,11 +655,11 @@ start_server {tags {"repl"}} {
|
|||||||
puts "master utime: $master_utime"
|
puts "master utime: $master_utime"
|
||||||
puts "master stime: $master_stime"
|
puts "master stime: $master_stime"
|
||||||
}
|
}
|
||||||
if {$all_drop == "all" || $all_drop == "slow"} {
|
if {!$::no_latency && ($all_drop == "all" || $all_drop == "slow")} {
|
||||||
assert {$master_utime < 70}
|
assert {$master_utime < 70}
|
||||||
assert {$master_stime < 70}
|
assert {$master_stime < 70}
|
||||||
}
|
}
|
||||||
if {$all_drop == "none" || $all_drop == "fast"} {
|
if {!$::no_latency && ($all_drop == "none" || $all_drop == "fast")} {
|
||||||
assert {$master_utime < 15}
|
assert {$master_utime < 15}
|
||||||
assert {$master_stime < 15}
|
assert {$master_stime < 15}
|
||||||
}
|
}
|
||||||
|
@ -88,6 +88,7 @@ set ::quiet 0
|
|||||||
set ::denytags {}
|
set ::denytags {}
|
||||||
set ::skiptests {}
|
set ::skiptests {}
|
||||||
set ::skipunits {}
|
set ::skipunits {}
|
||||||
|
set ::no_latency 0
|
||||||
set ::allowtags {}
|
set ::allowtags {}
|
||||||
set ::only_tests {}
|
set ::only_tests {}
|
||||||
set ::single_tests {}
|
set ::single_tests {}
|
||||||
@ -540,6 +541,7 @@ proc print_help_screen {} {
|
|||||||
"--skipfile <file> Name of a file containing test names that should be skipped (one per line)."
|
"--skipfile <file> Name of a file containing test names that should be skipped (one per line)."
|
||||||
"--skiptest <name> Name of a file containing test names that should be skipped (one per line)."
|
"--skiptest <name> Name of a file containing test names that should be skipped (one per line)."
|
||||||
"--dont-clean Don't delete redis log files after the run."
|
"--dont-clean Don't delete redis log files after the run."
|
||||||
|
"--no-latency Skip latency measurements and validation by some tests."
|
||||||
"--stop Blocks once the first test fails."
|
"--stop Blocks once the first test fails."
|
||||||
"--loop Execute the specified set of tests forever."
|
"--loop Execute the specified set of tests forever."
|
||||||
"--wait-server Wait after server is started (so that you can attach a debugger)."
|
"--wait-server Wait after server is started (so that you can attach a debugger)."
|
||||||
@ -641,6 +643,8 @@ for {set j 0} {$j < [llength $argv]} {incr j} {
|
|||||||
set ::durable 1
|
set ::durable 1
|
||||||
} elseif {$opt eq {--dont-clean}} {
|
} elseif {$opt eq {--dont-clean}} {
|
||||||
set ::dont_clean 1
|
set ::dont_clean 1
|
||||||
|
} elseif {$opt eq {--no-latency}} {
|
||||||
|
set ::no_latency 1
|
||||||
} elseif {$opt eq {--wait-server}} {
|
} elseif {$opt eq {--wait-server}} {
|
||||||
set ::wait_server 1
|
set ::wait_server 1
|
||||||
} elseif {$opt eq {--stop}} {
|
} elseif {$opt eq {--stop}} {
|
||||||
|
@ -17,7 +17,9 @@ start_server {tags {"latency-monitor"}} {
|
|||||||
set max 450
|
set max 450
|
||||||
foreach event [r latency history command] {
|
foreach event [r latency history command] {
|
||||||
lassign $event time latency
|
lassign $event time latency
|
||||||
|
if {!$::no_latency} {
|
||||||
assert {$latency >= $min && $latency <= $max}
|
assert {$latency >= $min && $latency <= $max}
|
||||||
|
}
|
||||||
incr min 100
|
incr min 100
|
||||||
incr max 100
|
incr max 100
|
||||||
set last_time $time ; # Used in the next test
|
set last_time $time ; # Used in the next test
|
||||||
@ -28,8 +30,10 @@ start_server {tags {"latency-monitor"}} {
|
|||||||
foreach event [r latency latest] {
|
foreach event [r latency latest] {
|
||||||
lassign $event eventname time latency max
|
lassign $event eventname time latency max
|
||||||
assert {$eventname eq "command"}
|
assert {$eventname eq "command"}
|
||||||
|
if {!$::no_latency} {
|
||||||
assert {$max >= 450 & $max <= 650}
|
assert {$max >= 450 & $max <= 650}
|
||||||
assert {$time == $last_time}
|
assert {$time == $last_time}
|
||||||
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -105,8 +105,10 @@ start_server {tags {"defrag"} overrides {appendonly yes auto-aof-rewrite-percent
|
|||||||
assert {$frag < 1.1}
|
assert {$frag < 1.1}
|
||||||
# due to high fragmentation, 100hz, and active-defrag-cycle-max set to 75,
|
# due to high fragmentation, 100hz, and active-defrag-cycle-max set to 75,
|
||||||
# we expect max latency to be not much higher than 7.5ms but due to rare slowness threshold is set higher
|
# we expect max latency to be not much higher than 7.5ms but due to rare slowness threshold is set higher
|
||||||
|
if {!$::no_latency} {
|
||||||
assert {$max_latency <= 30}
|
assert {$max_latency <= 30}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
# verify the data isn't corrupted or changed
|
# verify the data isn't corrupted or changed
|
||||||
set newdigest [r debug digest]
|
set newdigest [r debug digest]
|
||||||
assert {$digest eq $newdigest}
|
assert {$digest eq $newdigest}
|
||||||
@ -148,9 +150,11 @@ start_server {tags {"defrag"} overrides {appendonly yes auto-aof-rewrite-percent
|
|||||||
assert {$frag < 1.4}
|
assert {$frag < 1.4}
|
||||||
# since the AOF contains simple (fast) SET commands (and the cron during loading runs every 1000 commands),
|
# since the AOF contains simple (fast) SET commands (and the cron during loading runs every 1000 commands),
|
||||||
# it'll still not block the loading for long periods of time.
|
# it'll still not block the loading for long periods of time.
|
||||||
|
if {!$::no_latency} {
|
||||||
assert {$max_latency <= 30}
|
assert {$max_latency <= 30}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
r config set appendonly no
|
r config set appendonly no
|
||||||
r config set key-load-delay 0
|
r config set key-load-delay 0
|
||||||
|
|
||||||
@ -273,8 +277,10 @@ start_server {tags {"defrag"} overrides {appendonly yes auto-aof-rewrite-percent
|
|||||||
assert {$frag < 1.1}
|
assert {$frag < 1.1}
|
||||||
# due to high fragmentation, 100hz, and active-defrag-cycle-max set to 75,
|
# due to high fragmentation, 100hz, and active-defrag-cycle-max set to 75,
|
||||||
# we expect max latency to be not much higher than 7.5ms but due to rare slowness threshold is set higher
|
# we expect max latency to be not much higher than 7.5ms but due to rare slowness threshold is set higher
|
||||||
|
if {!$::no_latency} {
|
||||||
assert {$max_latency <= 30}
|
assert {$max_latency <= 30}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
# verify the data isn't corrupted or changed
|
# verify the data isn't corrupted or changed
|
||||||
set newdigest [r debug digest]
|
set newdigest [r debug digest]
|
||||||
assert {$digest eq $newdigest}
|
assert {$digest eq $newdigest}
|
||||||
@ -368,7 +374,9 @@ start_server {tags {"defrag"} overrides {appendonly yes auto-aof-rewrite-percent
|
|||||||
assert {$frag < 1.1}
|
assert {$frag < 1.1}
|
||||||
# due to high fragmentation, 100hz, and active-defrag-cycle-max set to 75,
|
# due to high fragmentation, 100hz, and active-defrag-cycle-max set to 75,
|
||||||
# we expect max latency to be not much higher than 7.5ms but due to rare slowness threshold is set higher
|
# we expect max latency to be not much higher than 7.5ms but due to rare slowness threshold is set higher
|
||||||
|
if {!$::no_latency} {
|
||||||
assert {$max_latency <= 30}
|
assert {$max_latency <= 30}
|
||||||
|
}
|
||||||
|
|
||||||
# in extreme cases of stagnation, we see over 20m misses before the tests aborts with "defrag didn't stop",
|
# in extreme cases of stagnation, we see over 20m misses before the tests aborts with "defrag didn't stop",
|
||||||
# in normal cases we only see 100k misses out of 500k elements
|
# in normal cases we only see 100k misses out of 500k elements
|
||||||
|
Loading…
x
Reference in New Issue
Block a user