Merge branch 'async_commands' into 'keydbpro'

async commands

See merge request external-collab/keydb-pro-6!10

Former-commit-id: 669987028af582a8561b7959c1b33b84f67c6146
This commit is contained in:
Malavan Sotheeswaran 2021-11-30 17:28:17 +00:00
commit d575cc1e0c
63 changed files with 3334 additions and 1088 deletions

View File

@ -1,21 +0,0 @@
---
name: Question
about: Ask the Redis developers
title: '[QUESTION]'
labels: ''
assignees: ''
---
Please keep in mind that this issue tracker should be used for reporting bugs or proposing improvements to the Redis server.
Generally, questions about using Redis should be directed to the [community](https://redis.io/community):
* [the mailing list](https://groups.google.com/forum/#!forum/redis-db)
* [the `redis` tag at StackOverflow](http://stackoverflow.com/questions/tagged/redis)
* [/r/redis subreddit](http://www.reddit.com/r/redis)
* [the irc channel #redis](http://webchat.freenode.net/?channels=redis) on freenode
It is also possible that your question was already asked here, so please do a quick issues search before submitting. Lastly, if your question is about one of Redis' [clients](https://redis.io/clients), you may to contact your client's developers for help.
That said, please feel free to replace all this with your question :)

3
.gitignore vendored
View File

@ -29,6 +29,7 @@ redis-check-rdb
keydb-check-rdb
redis-check-dump
keydb-check-dump
keydb-diagnostic-tool
redis-cli
redis-sentinel
redis-server
@ -57,4 +58,4 @@ Makefile.dep
.ccls
.ccls-cache/*
compile_commands.json
redis.code-workspace
keydb.code-workspace

View File

@ -1,90 +1,107 @@
build:
.standard-pipeline:
rules:
- if: '$COVERAGE'
when: never
- if: '$ENDURANCE'
when: never
- when: always
- if: '$CI_PIPELINE_SOURCE == "push"'
build:
extends: .standard-pipeline
tags:
- docker
stage: build
script:
- git submodule init && git submodule update
- git submodule update --init
- make distclean
- make -j
artifacts:
paths:
- src/
make-test:
rules:
- if: '$COVERAGE'
when: never
- if: '$ENDURANCE'
when: never
- when: always
runtest:
extends: .standard-pipeline
dependencies:
- build
tags:
- docker
stage: test
script:
- git submodule init && git submodule update
- make distclean
- make -j
- make test -j
- ./runtest --config server-threads 3
runtest-cluster:
extends: .standard-pipeline
dependencies:
- build
tags:
- docker
stage: test
script:
- ./runtest-cluster
runtest-moduleapi:
extends: .standard-pipeline
dependencies:
- build
tags:
- docker
stage: test
script:
- ./runtest-moduleapi
runtest-sentinel:
extends: .standard-pipeline
dependencies:
- build
tags:
- docker
stage: test
script:
- ./runtest-sentinel
node-redis-test:
extends: .standard-pipeline
dependencies:
- build
rules:
- if: '$COVERAGE'
when: never
- if: '$ENDURANCE'
when: never
- when: always
- when: never
tags:
- docker
- ipv6
stage: test
script:
- git submodule init && git submodule update
- make distclean
- make -j
- make install
- cp -pf src/keydb-server /usr/local/bin
- cp -pf src/keydb-cli /usr/local/bin
- git clone https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.eqalpha.com/keydb-dev/node-redis.git
- cd node-redis
- npm install
- npm run test
jedis-test:
rules:
- if: '$COVERAGE'
when: never
- if: '$ENDURANCE'
when: never
- when: always
extends: .standard-pipeline
dependencies:
- build
tags:
- docker
- ipv4
stage: test
script:
- git submodule init && git submodule update
- make distclean
- make -j
- make install
- cp -pf src/keydb-server /usr/local/bin
- cp -pf src/keydb-cli /usr/local/bin
- git clone https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.eqalpha.com/keydb-dev/jedis.git
- cd jedis
- make test
redis-rs-test:
rules:
- if: '$COVERAGE'
when: never
- if: '$ENDURANCE'
when: never
- when: always
extends: .standard-pipeline
dependencies:
- build
tags:
- docker
stage: test
script:
- git submodule init && git submodule update
- make distclean
- make -j
- make install
- cp -pf src/keydb-server /usr/local/bin
- cp -pf src/keydb-cli /usr/local/bin
- git clone https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.eqalpha.com/keydb-dev/redis-rs.git
- cd redis-rs
- make test
@ -96,10 +113,10 @@ endurance-test:
- docker
stage: test
script:
- git submodule init && git submodule update
- git submodule update --init
- make distclean
- make -j
- ./runtest --loop --stop
- ./runtest --config server-threads 3 --loop --stop
coverage-test:
rules:
@ -108,7 +125,7 @@ coverage-test:
- docker
stage: test
script:
- git submodule init && git submodule update
- git submodule update --init
- make distclean
- make gcov -j
- make install

View File

@ -183,7 +183,7 @@ To compile against jemalloc on Mac OS X systems, use:
Monotonic clock
---------------
By default, Redis will build using the POSIX clock_gettime function as the
By default, KeyDB will build using the POSIX clock_gettime function as the
monotonic clock source. On most modern systems, the internal processor clock
can be used to improve performance. Cautions can be found here:
http://oliveryang.net/2015/09/pitfalls-of-TSC-usage/

4
TLS.md
View File

@ -28,8 +28,8 @@ To manually run a Redis server with TLS mode (assuming `gen-test-certs.sh` was
invoked so sample certificates/keys are available):
./src/keydb-server --tls-port 6379 --port 0 \
--tls-cert-file ./tests/tls/keydb.crt \
--tls-key-file ./tests/tls/keydb.key \
--tls-cert-file ./tests/tls/client.crt \
--tls-key-file ./tests/tls/client.key \
--tls-ca-cert-file ./tests/tls/ca.crt
To connect to this Redis server with `keydb-cli`:

2
deps/rocksdb vendored

@ -1 +1 @@
Subproject commit e3169e3ea8762d2f34880742106858a23c8dc8b7
Subproject commit c3034fce329017036c807e01261729bfc11a5d62

View File

@ -32,8 +32,17 @@
# If instead you are interested in using includes to override configuration
# options, it is better to use include as the last line.
#
# Included paths may contain wildcards. All files matching the wildcards will
# be included in alphabetical order.
# Note that if an include path contains a wildcards but no files match it when
# the server is started, the include statement will be ignored and no error will
# be emitted. It is safe, therefore, to include wildcard files from empty
# directories.
#
# include /path/to/local.conf
# include /path/to/other.conf
# include /path/to/fragments/*.conf
#
################################## MODULES #####################################
@ -49,23 +58,32 @@
# for connections from all available network interfaces on the host machine.
# It is possible to listen to just one or multiple selected interfaces using
# the "bind" configuration directive, followed by one or more IP addresses.
# Each address can be prefixed by "-", which means that redis will not fail to
# start if the address is not available. Being not available only refers to
# addresses that does not correspond to any network interfece. Addresses that
# are already in use will always fail, and unsupported protocols will always BE
# silently skipped.
#
# Examples:
#
# bind 192.168.1.100 10.0.0.1
# bind 127.0.0.1 ::1
# bind 192.168.1.100 10.0.0.1 # listens on two specific IPv4 addresses
# bind 127.0.0.1 ::1 # listens on loopback IPv4 and IPv6
# bind * -::* # like the default, all available interfaces
#
# ~~~ WARNING ~~~ If the computer running KeyDB is directly exposed to the
# internet, binding to all the interfaces is dangerous and will expose the
# instance to everybody on the internet. So by default we uncomment the
# following bind directive, that will force KeyDB to listen only on the
# IPv4 loopback interface address (this means KeyDB will only be able to
# IPv4 and IPv6 (if available) loopback interface addresses (this means KeyDB will only be able to
# accept client connections from the same host that it is running on).
#
# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
# JUST COMMENT OUT THE FOLLOWING LINE.
#
# You will also need to set a password unless you explicitly disable protected
# mode.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bind 127.0.0.1
bind 127.0.0.1 -::1
# Protected mode is a layer of security protection, in order to avoid that
# KeyDB instances left open on the internet are accessed and exploited.
@ -125,7 +143,7 @@ timeout 0
# On other kernels the period depends on the kernel configuration.
#
# A reasonable value for this option is 300 seconds, which is the new
# KeyDB default starting with Redis 3.2.1.
# KeyDB default starting with KeyDB 3.2.1.
tcp-keepalive 300
################################# TLS/SSL #####################################
@ -141,15 +159,37 @@ tcp-keepalive 300
# server to connected clients, masters or cluster peers. These files should be
# PEM formatted.
#
# tls-cert-file redis.crt
# tls-key-file redis.key
# tls-cert-file keydb.crt
# tls-key-file keydb.key
#
# If the key file is encrypted using a passphrase, it can be included here
# as well.
#
# tls-key-file-pass secret
# Normally KeyDB uses the same certificate for both server functions (accepting
# connections) and client functions (replicating from a master, establishing
# cluster bus connections, etc.).
#
# Sometimes certificates are issued with attributes that designate them as
# client-only or server-only certificates. In that case it may be desired to use
# different certificates for incoming (server) and outgoing (client)
# connections. To do that, use the following directives:
#
# tls-client-cert-file client.crt
# tls-client-key-file client.key
#
# If the key file is encrypted using a passphrase, it can be included here
# as well.
#
# tls-client-key-file-pass secret
# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange:
#
# tls-dh-params-file redis.dh
# tls-dh-params-file keydb.dh
# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL
# clients and peers. Redis requires an explicit configuration of at least one
# clients and peers. KeyDB requires an explicit configuration of at least one
# of these, and will not implicitly use the system wide configuration.
#
# tls-ca-cert-file ca.crt
@ -172,7 +212,7 @@ tcp-keepalive 300
#
# tls-replication yes
# By default, the Redis Cluster bus uses a plain TCP connection. To enable
# By default, the KeyDB Cluster bus uses a plain TCP connection. To enable
# TLS for the bus protocol, use the following directive:
#
# tls-cluster yes
@ -269,6 +309,16 @@ logfile ""
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
# syslog-facility local0
# To disable the built in crash log, which will possibly produce cleaner core
# dumps when they are needed, uncomment the following:
#
# crash-log-enabled no
# To disable the fast memory check that's run as part of the crash log, which
# will possibly let keydb terminate sooner, uncomment the following:
#
# crash-memcheck-enabled no
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT <dbid> where
# dbid is a number between 0 and 'databases'-1
@ -282,9 +332,31 @@ databases 16
# ASCII art logo in startup logs by setting the following option to yes.
always-show-logo yes
# By default, KeyDB modifies the process title (as seen in 'top' and 'ps') to
# provide some runtime information. It is possible to disable this and leave
# the process name as executed by setting the following to no.
set-proc-title yes
# Retrieving "message of today" using CURL requests.
#enable-motd yes
# When changing the process title, KeyDB uses the following template to construct
# the modified title.
#
# Template variables are specified in curly brackets. The following variables are
# supported:
#
# {title} Name of process as executed if parent, or type of child process.
# {listen-addr} Bind address or '*' followed by TCP or TLS port listening on, or
# Unix socket if only that's available.
# {server-mode} Special mode, i.e. "[sentinel]" or "[cluster]".
# {port} TCP port listening on, or 0.
# {tls-port} TLS port listening on, or 0.
# {unixsocket} Unix domain socket listening on, or "".
# {config-file} Name of configuration file used.
#
proc-title-template "{title} {listen-addr} {server-mode}"
################################ SNAPSHOTTING ################################
#
# Save the DB on disk:
@ -299,8 +371,6 @@ always-show-logo yes
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving completely by commenting out all "save" lines.
#
# It is also possible to remove all the previously configured save
# points by adding a save directive with a single empty string argument
# like in the following example:
@ -341,6 +411,21 @@ rdbcompression yes
# tell the loading code to skip the check.
rdbchecksum yes
# Enables or disables full sanitation checks for ziplist and listpack etc when
# loading an RDB or RESTORE payload. This reduces the chances of a assertion or
# crash later on while processing commands.
# Options:
# no - Never perform full sanitation
# yes - Always perform full sanitation
# clients - Perform full sanitation only for user connections.
# Excludes: RDB files, RESTORE commands received from the master
# connection, and client connections which have the
# skip-sanitize-payload ACL flag.
# The default should be 'clients' but since it currently affects cluster
# resharding via MIGRATE, it is temporarily set to 'no' by default.
#
# sanitize-dump-payload no
# The filename where to dump the DB
dbfilename dump.rdb
@ -397,7 +482,7 @@ dir ./
#
# masterauth <master-password>
#
# However this is not enough if you are using KeyDB ACLs (for Redis version
# However this is not enough if you are using KeyDB ACLs (for KeyDB version
# 6 or greater), and the default user is not capable of running the PSYNC
# command and/or other commands needed for replication (gathered in the
# @replication group). In this case it's better to configure a special user to
@ -443,7 +528,7 @@ replica-serve-stale-data yes
# may also cause problems if clients are writing to it because of a
# misconfiguration.
#
# Since Redis 2.6 by default replicas are read-only.
# Since KeyDB 2.6 by default replicas are read-only.
#
# Note: read only replicas are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
@ -595,6 +680,18 @@ repl-disable-tcp-nodelay no
# By default the priority is 100.
replica-priority 100
# -----------------------------------------------------------------------------
# By default, KeyDB Sentinel includes all replicas in its reports. A replica
# can be excluded from KeyDB Sentinel's announcements. An unannounced replica
# will be ignored by the 'sentinel replicas <master>' command and won't be
# exposed to KeyDB Sentinel's clients.
#
# This option does not change the behavior of replica-priority. Even with
# replica-announced set to 'no', the replica can be promoted to master. To
# prevent this behavior, set replica-priority to 0.
#
# replica-announced yes
# It is possible for a master to stop accepting writes if there are less than
# N replicas connected, having a lag less or equal than M seconds.
#
@ -714,6 +811,8 @@ replica-priority 100
# off Disable the user: it's no longer possible to authenticate
# with this user, however the already authenticated connections
# will still work.
# skip-sanitize-payload RESTORE dump-payload sanitation is skipped.
# sanitize-payload RESTORE dump-payload is sanitized (default).
# +<command> Allow the execution of that command
# -<command> Disallow the execution of that command
# +@<category> Allow the execution of all the commands in such category
@ -736,6 +835,11 @@ replica-priority 100
# It is possible to specify multiple patterns.
# allkeys Alias for ~*
# resetkeys Flush the list of allowed keys patterns.
# &<pattern> Add a glob-style pattern of Pub/Sub channels that can be
# accessed by the user. It is possible to specify multiple channel
# patterns.
# allchannels Alias for &*
# resetchannels Flush the list of allowed channel patterns.
# ><password> Add this password to the list of valid password for the user.
# For example >mypass will add "mypass" to the list.
# This directive clears the "nopass" flag (see later).
@ -775,6 +879,40 @@ replica-priority 100
#
# Basically ACL rules are processed left-to-right.
#
# The following is a list of command categories and their meanings:
# * keyspace - Writing or reading from keys, databases, or their metadata
# in a type agnostic way. Includes DEL, RESTORE, DUMP, RENAME, EXISTS, DBSIZE,
# KEYS, EXPIRE, TTL, FLUSHALL, etc. Commands that may modify the keyspace,
# key or metadata will also have `write` category. Commands that only read
# the keyspace, key or metadata will have the `read` category.
# * read - Reading from keys (values or metadata). Note that commands that don't
# interact with keys, will not have either `read` or `write`.
# * write - Writing to keys (values or metadata)
# * admin - Administrative commands. Normal applications will never need to use
# these. Includes REPLICAOF, CONFIG, DEBUG, SAVE, MONITOR, ACL, SHUTDOWN, etc.
# * dangerous - Potentially dangerous (each should be considered with care for
# various reasons). This includes FLUSHALL, MIGRATE, RESTORE, SORT, KEYS,
# CLIENT, DEBUG, INFO, CONFIG, SAVE, REPLICAOF, etc.
# * connection - Commands affecting the connection or other connections.
# This includes AUTH, SELECT, COMMAND, CLIENT, ECHO, PING, etc.
# * blocking - Potentially blocking the connection until released by another
# command.
# * fast - Fast O(1) commands. May loop on the number of arguments, but not the
# number of elements in the key.
# * slow - All commands that are not Fast.
# * pubsub - PUBLISH / SUBSCRIBE related
# * transaction - WATCH / MULTI / EXEC related commands.
# * scripting - Scripting related.
# * set - Data type: sets related.
# * sortedset - Data type: zsets related.
# * list - Data type: lists related.
# * hash - Data type: hashes related.
# * string - Data type: strings related.
# * bitmap - Data type: bitmaps related.
# * hyperloglog - Data type: hyperloglog related.
# * geo - Data type: geo related.
# * stream - Data type: streams related.
#
# For more information about ACL configuration please refer to
# the Redis web site at https://redis.io/topics/acl
@ -798,14 +936,38 @@ acllog-max-len 128
#
# aclfile /etc/keydb/users.acl
# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility
# IMPORTANT NOTE: starting with KeyDB 6 "requirepass" is just a compatibility
# layer on top of the new ACL system. The option effect will be just setting
# the password for the default user. Clients will still authenticate using
# AUTH <password> as usually, or more explicitly with AUTH default <password>
# if they follow the new protocol: both will work.
#
# The requirepass is not compatible with aclfile option and the ACL LOAD
# command, these will cause requirepass to be ignored.
#
# requirepass foobared
# New users are initialized with restrictive permissions by default, via the
# equivalent of this ACL rule 'off resetkeys -@all'. Starting with KeyDB 6.2, it
# is possible to manage access to Pub/Sub channels with ACL rules as well. The
# default Pub/Sub channels permission if new users is controlled by the
# acl-pubsub-default configuration directive, which accepts one of these values:
#
# allchannels: grants access to all Pub/Sub channels
# resetchannels: revokes access to all Pub/Sub channels
#
# To ensure backward compatibility while upgrading KeyDB 6.0, acl-pubsub-default
# defaults to the 'allchannels' permission.
#
# Future compatibility note: it is very likely that in a future version of KeyDB
# the directive's default of 'allchannels' will be changed to 'resetchannels' in
# order to provide better out-of-the-box Pub/Sub security. Therefore, it is
# recommended that you explicitly define Pub/Sub permissions for all users
# rather then rely on implicit default values. Once you've set explicit
# Pub/Sub for all existing users, you should uncomment the following line.
#
# acl-pubsub-default resetchannels
# Command renaming (DEPRECATED).
#
# ------------------------------------------------------------------------
@ -842,7 +1004,7 @@ acllog-max-len 128
# Once the limit is reached KeyDB will close all the new connections sending
# an error 'max number of clients reached'.
#
# IMPORTANT: When Redis Cluster is used, the max number of connections is also
# IMPORTANT: When KeyDB Cluster is used, the max number of connections is also
# shared with the cluster bus: every node in the cluster will use two
# connections, one incoming and another outgoing. It is important to size the
# limit accordingly in case of very large clusters.
@ -918,7 +1080,15 @@ acllog-max-len 128
#
# maxmemory-samples 5
# Starting from Redis 5, by default a replica will ignore its maxmemory setting
# Eviction processing is designed to function well with the default setting.
# If there is an unusually large amount of write traffic, this value may need to
# be increased. Decreasing this value may reduce latency at the risk of
# eviction processing effectiveness
# 0 = minimum latency, 10 = default, 100 = process without regard to latency
#
# maxmemory-eviction-tenacity 10
# Starting from KeyDB 5, by default a replica will ignore its maxmemory setting
# (unless it is promoted to master after a failover or manually). It means
# that the eviction of keys will be just handled by the master, sending the
# DEL commands to the replica as keys evict in the master side.
@ -1011,6 +1181,13 @@ replica-lazy-flush no
lazyfree-lazy-user-del no
# FLUSHDB, FLUSHALL, and SCRIPT FLUSH support both asynchronous and synchronous
# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the
# commands. When neither flag is passed, this directive will be used to determine
# if the data should be deleted asynchronously.
lazyfree-lazy-user-flush no
############################ KERNEL OOM CONTROL ##############################
# On Linux, it is possible to hint the kernel OOM killer on what processes
@ -1042,6 +1219,19 @@ oom-score-adj no
# oom-score-adj-values to positive values will always succeed.
oom-score-adj-values 0 200 800
#################### KERNEL transparent hugepage CONTROL ######################
# Usually the kernel Transparent Huge Pages control is set to "madvise" or
# or "never" by default (/sys/kernel/mm/transparent_hugepage/enabled), in which
# case this config has no effect. On systems in which it is set to "always",
# KeyDB will attempt to disable it specifically for the KeyDB process in order
# to avoid latency problems specifically with fork(2) and CoW.
# If for some reason you prefer to keep it enabled, you can set this config to
# "no" and the kernel global to "always".
disable-thp yes
############################## APPEND ONLY MODE ###############################
# By default KeyDB asynchronously dumps the dataset on disk. This mode is
@ -1269,12 +1459,21 @@ lua-time-limit 5000
# master in your cluster.
#
# Default is 1 (replicas migrate only if their masters remain with at least
# one replica). To disable migration just set it to a very large value.
# one replica). To disable migration just set it to a very large value or
# set cluster-allow-replica-migration to 'no'.
# A value of 0 can be set but is useful only for debugging and dangerous
# in production.
#
# cluster-migration-barrier 1
# Turning off this option allows to use less automatic cluster configuration.
# It both disables migration to orphaned masters and migration from masters
# that became empty.
#
# Default is 'yes' (allow automatic migrations).
#
# cluster-allow-replica-migration yes
# By default KeyDB Cluster nodes stop accepting queries if they detect there
# is at least a hash slot uncovered (no available node is serving it).
# This way if the cluster is partially down (for example a range of hash slots
@ -1325,17 +1524,23 @@ lua-time-limit 5000
#
# In order to make KeyDB Cluster working in such environments, a static
# configuration where each node knows its public address is needed. The
# following two options are used for this scope, and are:
# following four options are used for this scope, and are:
#
# * cluster-announce-ip
# * cluster-announce-port
# * cluster-announce-tls-port
# * cluster-announce-bus-port
#
# Each instructs the node about its address, client port, and cluster message
# Each instructs the node about its address, client ports (for connections
# without and with TLS), and cluster message
# bus port. The information is then published in the header of the bus packets
# so that other nodes will be able to correctly map the address of the node
# publishing the information.
#
# If cluster-tls is set to yes and cluster-announce-tls-port is omitted or set
# to zero, then cluster-announce-port refers to the TLS port. Note also that
# cluster-announce-tls-port has no effect if cluster-tls is set to no.
#
# If the above options are not used, the normal KeyDB Cluster auto-detection
# will be used instead.
#
@ -1347,7 +1552,8 @@ lua-time-limit 5000
# Example:
#
# cluster-announce-ip 10.1.1.5
# cluster-announce-port 6379
# cluster-announce-tls-port 6379
# cluster-announce-port 0
# cluster-announce-bus-port 6380
################################## SLOW LOG ###################################
@ -1421,8 +1627,9 @@ latency-monitor-threshold 0
# x Expired events (events generated every time a key expires)
# e Evicted events (events generated when a key is evicted for maxmemory)
# t Stream commands
# d Module key type events
# m Key-miss events (Note: It is not included in the 'A' class)
# A Alias for g$lshzxet, so that the "AKE" string means all the events
# A Alias for g$lshzxetd, so that the "AKE" string means all the events
# (Except key-miss events which are excluded from 'A' due to their
# unique nature).
#

File diff suppressed because it is too large Load Diff

View File

@ -20,12 +20,12 @@
# The port that this sentinel instance will run on
port 26379
# By default Redis Sentinel does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/keydb-sentinel.pid when
# By default KeyDB Sentinel does not run as a daemon. Use 'yes' if you need it.
# Note that KeyDB will write a pid file in /var/run/keydb-sentinel.pid when
# daemonized.
daemonize yes
# When running daemonized, Redis Sentinel writes a pid file in
# When running daemonized, KeyDB Sentinel writes a pid file in
# /var/run/keydb-sentinel.pid by default. You can specify a custom pid file
# location here.
pidfile /var/run/sentinel/keydb-sentinel.pid
@ -59,7 +59,7 @@ logfile /var/log/keydb/keydb-sentinel.log
# dir <working-directory>
# Every long running process should have a well-defined working directory.
# For Redis Sentinel to chdir to /tmp at startup is the simplest thing
# For KeyDB Sentinel to chdir to /tmp at startup is the simplest thing
# for the process to don't interfere with administrative tasks such as
# unmounting filesystems.
dir /var/lib/keydb
@ -86,22 +86,34 @@ sentinel monitor mymaster 127.0.0.1 6379 2
# sentinel auth-pass <master-name> <password>
#
# Set the password to use to authenticate with the master and replicas.
# Useful if there is a password set in the Redis instances to monitor.
# Useful if there is a password set in the KeyDB instances to monitor.
#
# Note that the master password is also used for replicas, so it is not
# possible to set a different password in masters and replicas instances
# if you want to be able to monitor these instances with Sentinel.
#
# However you can have Redis instances without the authentication enabled
# mixed with Redis instances requiring the authentication (as long as the
# However you can have KeyDB instances without the authentication enabled
# mixed with KeyDB instances requiring the authentication (as long as the
# password set is the same for all the instances requiring the password) as
# the AUTH command will have no effect in Redis instances with authentication
# the AUTH command will have no effect in KeyDB instances with authentication
# switched off.
#
# Example:
#
# sentinel auth-pass mymaster MySUPER--secret-0123passw0rd
# sentinel auth-user <master-name> <username>
#
# This is useful in order to authenticate to instances having ACL capabilities,
# that is, running KeyDB 6.0 or greater. When just auth-pass is provided the
# Sentinel instance will authenticate to KeyDB using the old "AUTH <pass>"
# method. When also an username is provided, it will use "AUTH <user> <pass>".
# In the KeyDB servers side, the ACL to provide just minimal access to
# Sentinel instances, should be configured along the following lines:
#
# user sentinel-user >somepassword +client +subscribe +publish \
# +ping +info +multi +slaveof +config +client +exec on
# sentinel down-after-milliseconds <master-name> <milliseconds>
#
# Number of milliseconds the master (or any attached replica or sentinel) should
@ -112,6 +124,73 @@ sentinel monitor mymaster 127.0.0.1 6379 2
# Default is 30 seconds.
sentinel down-after-milliseconds mymaster 30000
# IMPORTANT NOTE: starting with KeyDB 6.2 ACL capability is supported for
# Sentinel mode, please refer to the Redis website https://redis.io/topics/acl
# for more details.
# Sentinel's ACL users are defined in the following format:
#
# user <username> ... acl rules ...
#
# For example:
#
# user worker +@admin +@connection ~* on >ffa9203c493aa99
#
# For more information about ACL configuration please refer to the Redis
# website at https://redis.io/topics/acl and KeyDB server configuration
# template keydb.conf.
# ACL LOG
#
# The ACL Log tracks failed commands and authentication events associated
# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked
# by ACLs. The ACL Log is stored in memory. You can reclaim memory with
# ACL LOG RESET. Define the maximum entry length of the ACL Log below.
acllog-max-len 128
# Using an external ACL file
#
# Instead of configuring users here in this file, it is possible to use
# a stand-alone file just listing users. The two methods cannot be mixed:
# if you configure users here and at the same time you activate the external
# ACL file, the server will refuse to start.
#
# The format of the external ACL user file is exactly the same as the
# format that is used inside keydb.conf to describe users.
#
# aclfile /etc/keydb/sentinel-users.acl
# requirepass <password>
#
# You can configure Sentinel itself to require a password, however when doing
# so Sentinel will try to authenticate with the same password to all the
# other Sentinels. So you need to configure all your Sentinels in a given
# group with the same "requirepass" password. Check the following documentation
# for more info: https://redis.io/topics/sentinel
#
# IMPORTANT NOTE: starting with KeyDB 6.2 "requirepass" is a compatibility
# layer on top of the ACL system. The option effect will be just setting
# the password for the default user. Clients will still authenticate using
# AUTH <password> as usually, or more explicitly with AUTH default <password>
# if they follow the new protocol: both will work.
#
# New config files are advised to use separate authentication control for
# incoming connections (via ACL), and for outgoing connections (via
# sentinel-user and sentinel-pass)
#
# The requirepass is not compatable with aclfile option and the ACL LOAD
# command, these will cause requirepass to be ignored.
# sentinel sentinel-user <username>
#
# You can configure Sentinel to authenticate with other Sentinels with specific
# user name.
# sentinel sentinel-pass <password>
#
# The password for Sentinel to authenticate with other Sentinels. If sentinel-user
# is not configured, Sentinel will use 'default' user with sentinel-pass to authenticate.
# sentinel parallel-syncs <master-name> <numreplicas>
#
# How many replicas we can reconfigure to point to the new replica simultaneously
@ -172,7 +251,7 @@ sentinel failover-timeout mymaster 180000
# generated in the WARNING level (for instance -sdown, -odown, and so forth).
# This script should notify the system administrator via email, SMS, or any
# other messaging system, that there is something wrong with the monitored
# Redis systems.
# KeyDB systems.
#
# The script is called with just two arguments: the first is the event type
# and the second the event description.
@ -182,7 +261,7 @@ sentinel failover-timeout mymaster 180000
#
# Example:
#
# sentinel notification-script mymaster /var/redis/notify.sh
# sentinel notification-script mymaster /var/keydb/notify.sh
# CLIENTS RECONFIGURATION SCRIPT
#
@ -207,7 +286,7 @@ sentinel failover-timeout mymaster 180000
#
# Example:
#
# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh
# sentinel client-reconfig-script mymaster /var/keydb/reconfig.sh
# SECURITY
#
@ -218,11 +297,11 @@ sentinel failover-timeout mymaster 180000
sentinel deny-scripts-reconfig yes
# REDIS COMMANDS RENAMING
# KEYDB COMMANDS RENAMING
#
# Sometimes the Redis server has certain commands, that are needed for Sentinel
# Sometimes the KeyDB server has certain commands, that are needed for Sentinel
# to work correctly, renamed to unguessable strings. This is often the case
# of CONFIG and SLAVEOF in the context of providers that provide Redis as
# of CONFIG and SLAVEOF in the context of providers that provide KeyDB as
# a service, and don't want the customers to reconfigure the instances outside
# of the administration console.
#
@ -239,6 +318,24 @@ sentinel deny-scripts-reconfig yes
# SENTINEL SET can also be used in order to perform this configuration at runtime.
#
# In order to set a command back to its original name (undo the renaming), it
# is possible to just rename a command to itsef:
# is possible to just rename a command to itself:
#
# SENTINEL rename-command mymaster CONFIG CONFIG
# HOSTNAMES SUPPORT
#
# Normally Sentinel uses only IP addresses and requires SENTINEL MONITOR
# to specify an IP address. Also, it requires the KeyDB replica-announce-ip
# keyword to specify only IP addresses.
#
# You may enable hostnames support by enabling resolve-hostnames. Note
# that you must make sure your DNS is configured properly and that DNS
# resolution does not introduce very long delays.
#
SENTINEL resolve-hostnames no
# When resolve-hostnames is enabled, Sentinel still uses IP addresses
# when exposing instances to users, configuration files, etc. If you want
# to retain the hostnames when announced, enable announce-hostnames below.
#
SENTINEL announce-hostnames no

View File

@ -5,3 +5,4 @@ src/keydb-check-rdb /usr/bin
src/keydb-cli /usr/bin
src/keydb-server /usr/bin
src/keydb-sentinel /usr/bin
src/keydb-diagnostic-tool /usr/bin

View File

@ -0,0 +1,53 @@
#compdef keydb-cli
local -a options
options=(
'-h[Server hostname (default: 127.0.0.1).]: :_hosts'
'-p[Server port (default: 6379).]'
'-s[Server socket (overrides hostname and port).]'
'-a[Password to use when connecting to the server. You can also use the REDISCLI_AUTH environment variable to pass this password more safely (if both are used, this argument takes precedence).]'
'--user[Used to send ACL style "AUTH username pass". Needs -a.]'
'--pass[Alias of -a for consistency with the new --user option.]'
'--askpass[Force user to input password with mask from STDIN. If this argument is used, "-a" and REDISCLI_AUTH environment variable will be ignored.]'
'-u[Server URI.]'
'-r[Execute specified command N times.]'
'-i[When -r is used, waits <interval> seconds per command. It is possible to specify sub-second times like -i 0.1.]'
'-n[Database number.]'
'-3[Start session in RESP3 protocol mode.]'
'-x[Read last argument from STDIN.]'
'-d[Delimiter between response bulks for raw formatting (default: \n).]'
'-D[D <delimiter> Delimiter between responses for raw formatting (default: \n).]'
'-c[Enable cluster mode (follow -ASK and -MOVED redirections).]'
'-e[Return exit error code when command execution fails.]'
'--raw[Use raw formatting for replies (default when STDOUT is not a tty).]'
'--no-raw[Force formatted output even when STDOUT is not a tty.]'
'--quoted-input[Force input to be handled as quoted strings.]'
'--csv[Output in CSV format.]'
'--show-pushes[Whether to print RESP3 PUSH messages. Enabled by default when STDOUT is a tty but can be overriden with --show-pushes no.]'
'--stat[Print rolling stats about server: mem, clients, ...]'
'--latency[Enter a special mode continuously sampling latency. If you use this mode in an interactive session it runs forever displaying real-time stats. Otherwise if --raw or --csv is specified, or if you redirect the output to a non TTY, it samples the latency for 1 second (you can use -i to change the interval), then produces a single output and exits.]'
'--latency-history[Like --latency but tracking latency changes over time. Default time interval is 15 sec. Change it using -i.]'
'--latency-dist[Shows latency as a spectrum, requires xterm 256 colors. Default time interval is 1 sec. Change it using -i.]'
'--lru-test[Simulate a cache workload with an 80-20 distribution.]'
'--replica[Simulate a replica showing commands received from the master.]'
'--rdb[Transfer an RDB dump from remote server to local file.]'
'--pipe[Transfer raw KeyDB protocol from stdin to server.]'
'--pipe-timeout[In --pipe mode, abort with error if after sending all data. no reply is received within <n> seconds. Default timeout: 30. Use 0 to wait forever.]'
'--bigkeys[Sample KeyDB keys looking for keys with many elements (complexity).]'
'--memkeys[Sample KeyDB keys looking for keys consuming a lot of memory.]'
'--memkeys-samples[Sample KeyDB keys looking for keys consuming a lot of memory. And define number of key elements to sample]'
'--hotkeys[Sample KeyDB keys looking for hot keys. only works when maxmemory-policy is *lfu.]'
'--scan[List all keys using the SCAN command.]'
'--pattern[Keys pattern when using the --scan, --bigkeys or --hotkeys options (default: *).]'
'--quoted-pattern[Same as --pattern, but the specified string can be quoted, in order to pass an otherwise non binary-safe string.]'
'--intrinsic-latency[Run a test to measure intrinsic system latency. The test will run for the specified amount of seconds.]'
'--eval[Send an EVAL command using the Lua script at <file>.]'
'--ldb[Used with --eval enable the Redis Lua debugger.]'
'--ldb-sync-mode[Like --ldb but uses the synchronous Lua debugger, in this mode the server is blocked and script changes are not rolled back from the server memory.]'
'--cluster[<command> args... opts... Cluster Manager command and arguments (see below).]'
'--verbose[Verbose mode.]'
'--no-auth-warning[Dont show warning message when using password on command line interface.]'
'--help[Output this help and exit.]'
'--version[Output version and exit.]'
)
_arguments -s $options

View File

@ -5,3 +5,4 @@ src/keydb-check-rdb /usr/bin
src/keydb-cli /usr/bin
src/keydb-server /usr/bin
src/keydb-sentinel /usr/bin
src/keydb-diagnostic-tool /usr/bin

View File

@ -25,7 +25,13 @@ mkdir -p $DIR/keydb_build/keydb_rpm/var/log/keydb
# move binaries to bin
rm $DIR/keydb_build/keydb_rpm/usr/bin/*
cp $DIR/../../src/keydb-* $DIR/keydb_build/keydb_rpm/usr/bin/
cp $DIR/../../src/keydb-server $DIR/keydb_build/keydb_rpm/usr/bin/
cp $DIR/../../src/keydb-sentinel $DIR/keydb_build/keydb_rpm/usr/bin/
cp $DIR/../../src/keydb-cli $DIR/keydb_build/keydb_rpm/usr/bin/
cp $DIR/../../src/keydb-benchmark $DIR/keydb_build/keydb_rpm/usr/bin/
cp $DIR/../../src/keydb-check-aof $DIR/keydb_build/keydb_rpm/usr/bin/
cp $DIR/../../src/keydb-check-rdb $DIR/keydb_build/keydb_rpm/usr/bin/
cp $DIR/../../src/keydb-diagnostic-tool $DIR/keydb_build/keydb_rpm/usr/bin/
# update spec file with build info
sed -i '2d' $DIR/keydb_build/keydb.spec

File diff suppressed because it is too large Load Diff

View File

@ -20,20 +20,20 @@
# The port that this sentinel instance will run on
port 26379
# By default Redis Sentinel does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis-sentinel.pid when
# By default KeyDB Sentinel does not run as a daemon. Use 'yes' if you need it.
# Note that KeyDB will write a pid file in /var/run/keydb-sentinel.pid when
# daemonized.
daemonize no
# When running daemonized, Redis Sentinel writes a pid file in
# /var/run/redis-sentinel.pid by default. You can specify a custom pid file
# When running daemonized, KeyDB Sentinel writes a pid file in
# /var/run/keydb-sentinel.pid by default. You can specify a custom pid file
# location here.
pidfile /var/run/redis-sentinel.pid
pidfile /var/run/sentinel/keydb-sentinel.pid
# Specify the log file name. Also the empty string can be used to force
# Sentinel to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile /var/log/redis/sentinel.log
logfile /var/log/keydb/keydb-sentinel.log
# sentinel announce-ip <ip>
# sentinel announce-port <port>
@ -59,12 +59,12 @@ logfile /var/log/redis/sentinel.log
# dir <working-directory>
# Every long running process should have a well-defined working directory.
# For Redis Sentinel to chdir to /tmp at startup is the simplest thing
# For KeyDB Sentinel to chdir to /tmp at startup is the simplest thing
# for the process to don't interfere with administrative tasks such as
# unmounting filesystems.
dir /tmp
# sentinel monitor <master-name> <ip> <redis-port> <quorum>
# sentinel monitor <master-name> <ip> <keydb-port> <quorum>
#
# Tells Sentinel to monitor this master, and to consider it in O_DOWN
# (Objectively Down) state only if at least <quorum> sentinels agree.
@ -86,22 +86,34 @@ sentinel monitor mymaster 127.0.0.1 6379 2
# sentinel auth-pass <master-name> <password>
#
# Set the password to use to authenticate with the master and replicas.
# Useful if there is a password set in the Redis instances to monitor.
# Useful if there is a password set in the KeyDB instances to monitor.
#
# Note that the master password is also used for replicas, so it is not
# possible to set a different password in masters and replicas instances
# if you want to be able to monitor these instances with Sentinel.
#
# However you can have Redis instances without the authentication enabled
# mixed with Redis instances requiring the authentication (as long as the
# However you can have KeyDB instances without the authentication enabled
# mixed with KeyDB instances requiring the authentication (as long as the
# password set is the same for all the instances requiring the password) as
# the AUTH command will have no effect in Redis instances with authentication
# the AUTH command will have no effect in KeyDB instances with authentication
# switched off.
#
# Example:
#
# sentinel auth-pass mymaster MySUPER--secret-0123passw0rd
# sentinel auth-user <master-name> <username>
#
# This is useful in order to authenticate to instances having ACL capabilities,
# that is, running KeyDB 6.0 or greater. When just auth-pass is provided the
# Sentinel instance will authenticate to KeyDB using the old "AUTH <pass>"
# method. When also an username is provided, it will use "AUTH <user> <pass>".
# In the KeyDB servers side, the ACL to provide just minimal access to
# Sentinel instances, should be configured along the following lines:
#
# user sentinel-user >somepassword +client +subscribe +publish \
# +ping +info +multi +slaveof +config +client +exec on
# sentinel down-after-milliseconds <master-name> <milliseconds>
#
# Number of milliseconds the master (or any attached replica or sentinel) should
@ -112,6 +124,73 @@ sentinel monitor mymaster 127.0.0.1 6379 2
# Default is 30 seconds.
sentinel down-after-milliseconds mymaster 30000
# IMPORTANT NOTE: starting with KeyDB 6.2 ACL capability is supported for
# Sentinel mode, please refer to the Redis website https://redis.io/topics/acl
# for more details.
# Sentinel's ACL users are defined in the following format:
#
# user <username> ... acl rules ...
#
# For example:
#
# user worker +@admin +@connection ~* on >ffa9203c493aa99
#
# For more information about ACL configuration please refer to the Redis
# website at https://redis.io/topics/acl and KeyDB server configuration
# template keydb.conf.
# ACL LOG
#
# The ACL Log tracks failed commands and authentication events associated
# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked
# by ACLs. The ACL Log is stored in memory. You can reclaim memory with
# ACL LOG RESET. Define the maximum entry length of the ACL Log below.
acllog-max-len 128
# Using an external ACL file
#
# Instead of configuring users here in this file, it is possible to use
# a stand-alone file just listing users. The two methods cannot be mixed:
# if you configure users here and at the same time you activate the external
# ACL file, the server will refuse to start.
#
# The format of the external ACL user file is exactly the same as the
# format that is used inside keydb.conf to describe users.
#
# aclfile /etc/keydb/sentinel-users.acl
# requirepass <password>
#
# You can configure Sentinel itself to require a password, however when doing
# so Sentinel will try to authenticate with the same password to all the
# other Sentinels. So you need to configure all your Sentinels in a given
# group with the same "requirepass" password. Check the following documentation
# for more info: https://redis.io/topics/sentinel
#
# IMPORTANT NOTE: starting with KeyDB 6.2 "requirepass" is a compatibility
# layer on top of the ACL system. The option effect will be just setting
# the password for the default user. Clients will still authenticate using
# AUTH <password> as usually, or more explicitly with AUTH default <password>
# if they follow the new protocol: both will work.
#
# New config files are advised to use separate authentication control for
# incoming connections (via ACL), and for outgoing connections (via
# sentinel-user and sentinel-pass)
#
# The requirepass is not compatable with aclfile option and the ACL LOAD
# command, these will cause requirepass to be ignored.
# sentinel sentinel-user <username>
#
# You can configure Sentinel to authenticate with other Sentinels with specific
# user name.
# sentinel sentinel-pass <password>
#
# The password for Sentinel to authenticate with other Sentinels. If sentinel-user
# is not configured, Sentinel will use 'default' user with sentinel-pass to authenticate.
# sentinel parallel-syncs <master-name> <numreplicas>
#
# How many replicas we can reconfigure to point to the new replica simultaneously
@ -172,7 +251,7 @@ sentinel failover-timeout mymaster 180000
# generated in the WARNING level (for instance -sdown, -odown, and so forth).
# This script should notify the system administrator via email, SMS, or any
# other messaging system, that there is something wrong with the monitored
# Redis systems.
# KeyDB systems.
#
# The script is called with just two arguments: the first is the event type
# and the second the event description.
@ -182,7 +261,7 @@ sentinel failover-timeout mymaster 180000
#
# Example:
#
# sentinel notification-script mymaster /var/redis/notify.sh
# sentinel notification-script mymaster /var/keydb/notify.sh
# CLIENTS RECONFIGURATION SCRIPT
#
@ -207,7 +286,7 @@ sentinel failover-timeout mymaster 180000
#
# Example:
#
# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh
# sentinel client-reconfig-script mymaster /var/keydb/reconfig.sh
# SECURITY
#
@ -218,11 +297,11 @@ sentinel failover-timeout mymaster 180000
sentinel deny-scripts-reconfig yes
# REDIS COMMANDS RENAMING
# KEYDB COMMANDS RENAMING
#
# Sometimes the Redis server has certain commands, that are needed for Sentinel
# Sometimes the KeyDB server has certain commands, that are needed for Sentinel
# to work correctly, renamed to unguessable strings. This is often the case
# of CONFIG and SLAVEOF in the context of providers that provide Redis as
# of CONFIG and SLAVEOF in the context of providers that provide KeyDB as
# a service, and don't want the customers to reconfigure the instances outside
# of the administration console.
#
@ -239,6 +318,24 @@ sentinel deny-scripts-reconfig yes
# SENTINEL SET can also be used in order to perform this configuration at runtime.
#
# In order to set a command back to its original name (undo the renaming), it
# is possible to just rename a command to itsef:
# is possible to just rename a command to itself:
#
# SENTINEL rename-command mymaster CONFIG CONFIG
# HOSTNAMES SUPPORT
#
# Normally Sentinel uses only IP addresses and requires SENTINEL MONITOR
# to specify an IP address. Also, it requires the KeyDB replica-announce-ip
# keyword to specify only IP addresses.
#
# You may enable hostnames support by enabling resolve-hostnames. Note
# that you must make sure your DNS is configured properly and that DNS
# resolution does not introduce very long delays.
#
SENTINEL resolve-hostnames no
# When resolve-hostnames is enabled, Sentinel still uses IP addresses
# when exposing instances to users, configuration files, etc. If you want
# to retain the hostnames when announced, enable announce-hostnames below.
#
SENTINEL announce-hostnames no

View File

@ -10,7 +10,7 @@ done
if [ -z $TCLSH ]
then
echo "You need tcl 8.5 or newer in order to run the Redis test"
echo "You need tcl 8.5 or newer in order to run the KeyDB test"
exit 1
fi
$TCLSH tests/test_helper.tcl "${@}"

View File

@ -8,7 +8,7 @@ done
if [ -z $TCLSH ]
then
echo "You need tcl 8.5 or newer in order to run the Redis Cluster test"
echo "You need tcl 8.5 or newer in order to run the KeyDB Cluster test"
exit 1
fi
$TCLSH tests/cluster/run.tcl $*

View File

@ -9,7 +9,7 @@ done
if [ -z $TCLSH ]
then
echo "You need tcl 8.5 or newer in order to run the Redis ModuleApi test"
echo "You need tcl 8.5 or newer in order to run the KeyDB ModuleApi test"
exit 1
fi

View File

@ -8,7 +8,7 @@ done
if [ -z $TCLSH ]
then
echo "You need tcl 8.5 or newer in order to run the Redis Sentinel test"
echo "You need tcl 8.5 or newer in order to run the KeyDB Sentinel test"
exit 1
fi
$TCLSH tests/sentinel/run.tcl $*

View File

@ -20,12 +20,12 @@
# The port that this sentinel instance will run on
port 26379
# By default Redis Sentinel does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/keydb-sentinel.pid when
# By default KeyDB Sentinel does not run as a daemon. Use 'yes' if you need it.
# Note that KeyDB will write a pid file in /var/run/keydb-sentinel.pid when
# daemonized.
daemonize no
# When running daemonized, Redis Sentinel writes a pid file in
# When running daemonized, KeyDB Sentinel writes a pid file in
# /var/run/keydb-sentinel.pid by default. You can specify a custom pid file
# location here.
pidfile /var/run/keydb-sentinel.pid
@ -59,7 +59,7 @@ logfile ""
# dir <working-directory>
# Every long running process should have a well-defined working directory.
# For Redis Sentinel to chdir to /tmp at startup is the simplest thing
# For KeyDB Sentinel to chdir to /tmp at startup is the simplest thing
# for the process to don't interfere with administrative tasks such as
# unmounting filesystems.
dir /tmp
@ -86,16 +86,16 @@ sentinel monitor mymaster 127.0.0.1 6379 2
# sentinel auth-pass <master-name> <password>
#
# Set the password to use to authenticate with the master and replicas.
# Useful if there is a password set in the Redis instances to monitor.
# Useful if there is a password set in the KeyDB instances to monitor.
#
# Note that the master password is also used for replicas, so it is not
# possible to set a different password in masters and replicas instances
# if you want to be able to monitor these instances with Sentinel.
#
# However you can have Redis instances without the authentication enabled
# mixed with Redis instances requiring the authentication (as long as the
# However you can have KeyDB instances without the authentication enabled
# mixed with KeyDB instances requiring the authentication (as long as the
# password set is the same for all the instances requiring the password) as
# the AUTH command will have no effect in Redis instances with authentication
# the AUTH command will have no effect in KeyDB instances with authentication
# switched off.
#
# Example:
@ -105,10 +105,10 @@ sentinel monitor mymaster 127.0.0.1 6379 2
# sentinel auth-user <master-name> <username>
#
# This is useful in order to authenticate to instances having ACL capabilities,
# that is, running Redis 6.0 or greater. When just auth-pass is provided the
# Sentinel instance will authenticate to Redis using the old "AUTH <pass>"
# that is, running KeyDB 6.0 or greater. When just auth-pass is provided the
# Sentinel instance will authenticate to KeyDB using the old "AUTH <pass>"
# method. When also an username is provided, it will use "AUTH <user> <pass>".
# In the Redis servers side, the ACL to provide just minimal access to
# In the KeyDB servers side, the ACL to provide just minimal access to
# Sentinel instances, should be configured along the following lines:
#
# user sentinel-user >somepassword +client +subscribe +publish \
@ -125,7 +125,7 @@ sentinel monitor mymaster 127.0.0.1 6379 2
sentinel down-after-milliseconds mymaster 30000
# IMPORTANT NOTE: starting with KeyDB 6.2 ACL capability is supported for
# Sentinel mode, please refer to the KeyDB website https://redis.io/topics/acl
# Sentinel mode, please refer to the Redis website https://redis.io/topics/acl
# for more details.
# Sentinel's ACL users are defined in the following format:
@ -137,8 +137,8 @@ sentinel down-after-milliseconds mymaster 30000
# user worker +@admin +@connection ~* on >ffa9203c493aa99
#
# For more information about ACL configuration please refer to the Redis
# website at https://redis.io/topics/acl and redis server configuration
# template redis.conf.
# website at https://redis.io/topics/acl and KeyDB server configuration
# template keydb.conf.
# ACL LOG
#
@ -156,9 +156,9 @@ acllog-max-len 128
# ACL file, the server will refuse to start.
#
# The format of the external ACL user file is exactly the same as the
# format that is used inside redis.conf to describe users.
# format that is used inside keydb.conf to describe users.
#
# aclfile /etc/redis/sentinel-users.acl
# aclfile /etc/keydb/sentinel-users.acl
# requirepass <password>
#
@ -168,7 +168,7 @@ acllog-max-len 128
# group with the same "requirepass" password. Check the following documentation
# for more info: https://redis.io/topics/sentinel
#
# IMPORTANT NOTE: starting with Redis 6.2 "requirepass" is a compatibility
# IMPORTANT NOTE: starting with KeyDB 6.2 "requirepass" is a compatibility
# layer on top of the ACL system. The option effect will be just setting
# the password for the default user. Clients will still authenticate using
# AUTH <password> as usually, or more explicitly with AUTH default <password>
@ -251,7 +251,7 @@ sentinel failover-timeout mymaster 180000
# generated in the WARNING level (for instance -sdown, -odown, and so forth).
# This script should notify the system administrator via email, SMS, or any
# other messaging system, that there is something wrong with the monitored
# Redis systems.
# KeyDB systems.
#
# The script is called with just two arguments: the first is the event type
# and the second the event description.
@ -261,7 +261,7 @@ sentinel failover-timeout mymaster 180000
#
# Example:
#
# sentinel notification-script mymaster /var/redis/notify.sh
# sentinel notification-script mymaster /var/keydb/notify.sh
# CLIENTS RECONFIGURATION SCRIPT
#
@ -286,7 +286,7 @@ sentinel failover-timeout mymaster 180000
#
# Example:
#
# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh
# sentinel client-reconfig-script mymaster /var/keydb/reconfig.sh
# SECURITY
#
@ -297,11 +297,11 @@ sentinel failover-timeout mymaster 180000
sentinel deny-scripts-reconfig yes
# REDIS COMMANDS RENAMING
# KEYDB COMMANDS RENAMING
#
# Sometimes the Redis server has certain commands, that are needed for Sentinel
# Sometimes the KeyDB server has certain commands, that are needed for Sentinel
# to work correctly, renamed to unguessable strings. This is often the case
# of CONFIG and SLAVEOF in the context of providers that provide Redis as
# of CONFIG and SLAVEOF in the context of providers that provide KeyDB as
# a service, and don't want the customers to reconfigure the instances outside
# of the administration console.
#
@ -325,7 +325,7 @@ sentinel deny-scripts-reconfig yes
# HOSTNAMES SUPPORT
#
# Normally Sentinel uses only IP addresses and requires SENTINEL MONITOR
# to specify an IP address. Also, it requires the Redis replica-announce-ip
# to specify an IP address. Also, it requires the KeyDB replica-announce-ip
# keyword to specify only IP addresses.
#
# You may enable hostnames support by enabling resolve-hostnames. Note

View File

@ -3,11 +3,11 @@
# This file is released under the BSD license, see the COPYING file
#
# The Makefile composes the final FINAL_CFLAGS and FINAL_LDFLAGS using
# what is needed for Redis plus the standard CFLAGS and LDFLAGS passed.
# what is needed for KeyDB plus the standard CFLAGS and LDFLAGS passed.
# However when building the dependencies (Jemalloc, Lua, Hiredis, ...)
# CFLAGS and LDFLAGS are propagated to the dependencies, so to pass
# flags only to be used when compiling / linking Redis itself REDIS_CFLAGS
# and REDIS_LDFLAGS are used instead (this is the case of 'make gcov').
# flags only to be used when compiling / linking KeyDB itself KEYDB_CFLAGS
# and KEYDB_LDFLAGS are used instead (this is the case of 'make gcov').
#
# Dependencies are stored in the Makefile.dep file. To rebuild this file
# Just use 'make dep', but this is only needed by developers.
@ -29,7 +29,7 @@ ifneq (,$(findstring FreeBSD,$(uname_S)))
STD+=-Wno-c11-extensions
endif
endif
WARN=-Wall -W -Wno-missing-field-initializers
WARN=-Wall -W -Wno-missing-field-initializers -Wno-address-of-packed-member -Wno-atomic-alignment
OPT=$(OPTIMIZATION)
# Detect if the compiler supports C11 _Atomic
@ -89,7 +89,7 @@ ifeq ($(COMPILER_NAME),clang)
LDFLAGS+= -latomic
endif
# To get ARM stack traces if Redis crashes we need a special C flag.
# To get ARM stack traces if KeyDB crashes we need a special C flag.
ifneq (,$(filter aarch64 armv,$(uname_M)))
CFLAGS+=-funwind-tables
CXXFLAGS+=-funwind-tables
@ -131,9 +131,9 @@ endif
# Override default settings if possible
-include .make-settings
FINAL_CFLAGS=$(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(REDIS_CFLAGS)
FINAL_CXXFLAGS=$(CXX_STD) $(WARN) $(OPT) $(DEBUG) $(CXXFLAGS) $(REDIS_CFLAGS)
FINAL_LDFLAGS=$(LDFLAGS) $(REDIS_LDFLAGS) $(DEBUG)
FINAL_CFLAGS=$(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(KEYDB_CFLAGS) $(REDIS_CFLAGS)
FINAL_CXXFLAGS=$(CXX_STD) $(WARN) $(OPT) $(DEBUG) $(CXXFLAGS) $(KEYDB_CFLAGS) $(REDIS_CFLAGS)
FINAL_LDFLAGS=$(LDFLAGS) $(KEYDB_LDFLAGS) $(DEBUG)
FINAL_LIBS+=-lm -lz -latomic -L$(LICENSE_LIB_DIR) -lkey -lcrypto -lbz2 -lzstd -llz4 -lsnappy
DEBUG=-g -ggdb
@ -142,7 +142,7 @@ ifneq ($(uname_S),Darwin)
endif
# Linux ARM32 needs -latomic at linking time
ifneq (,$(findstring armv,$(uname_M)))
FINAL_LIBS+=-latomic
FINAL_LIBS+=-latomic
endif
@ -192,7 +192,7 @@ ifeq ($(uname_S),OpenBSD)
FINAL_CXXFLAGS+= -DUSE_BACKTRACE -I/usr/local/include
FINAL_LDFLAGS+= -L/usr/local/lib
FINAL_LIBS+= -lexecinfo
endif
endif
else
ifeq ($(uname_S),NetBSD)
@ -202,7 +202,7 @@ ifeq ($(uname_S),NetBSD)
FINAL_CFLAGS+= -DUSE_BACKTRACE -I/usr/pkg/include
FINAL_LDFLAGS+= -L/usr/pkg/lib
FINAL_LIBS+= -lexecinfo
endif
endif
else
ifeq ($(uname_S),FreeBSD)
# FreeBSD
@ -273,6 +273,7 @@ endif
ifeq ($(BUILD_WITH_SYSTEMD),yes)
FINAL_LIBS+=$(LIBSYSTEMD_LIBS)
FINAL_CFLAGS+= -DHAVE_LIBSYSTEMD
FINAL_CXXFLAGS+= -DHAVE_LIBSYSTEMD
endif
ifeq ($(MALLOC),tcmalloc)
@ -331,6 +332,14 @@ else
endef
endif
# Alpine OS doesn't have support for the execinfo backtrace library we use for debug, so we provide an alternate implementation using libwunwind.
OS := $(shell cat /etc/os-release | grep ID= | head -n 1 | cut -d'=' -f2)
ifeq ($(OS),alpine)
FINAL_CXXFLAGS+=-DUNW_LOCAL_ONLY
FINAL_LIBS += -lunwind
endif
REDIS_CC=$(QUIET_CC)$(CC) $(FINAL_CFLAGS)
REDIS_CXX=$(QUIET_CC)$(CXX) $(FINAL_CXXFLAGS)
KEYDB_AS=$(QUIET_CC) as --64 -g
@ -361,8 +370,10 @@ REDIS_BENCHMARK_NAME=keydb-benchmark$(PROG_SUFFIX)
REDIS_BENCHMARK_OBJ=ae.o anet.o redis-benchmark.o adlist.o dict.o zmalloc.o release.o crcspeed.o crc64.o siphash.o redis-benchmark.o storage-lite.o fastlock.o new.o monotonic.o cli_common.o mt19937-64.o $(ASM_OBJ)
REDIS_CHECK_RDB_NAME=keydb-check-rdb$(PROG_SUFFIX)
REDIS_CHECK_AOF_NAME=keydb-check-aof$(PROG_SUFFIX)
KEYDB_DIAGNOSTIC_NAME=keydb-diagnostic-tool$(PROG_SUFFIX)
KEYDB_DIAGNOSTIC_OBJ=ae.o anet.o keydb-diagnostic-tool.o adlist.o dict.o zmalloc.o release.o crcspeed.o crc64.o siphash.o keydb-diagnostic-tool.o storage-lite.o fastlock.o new.o monotonic.o cli_common.o mt19937-64.o $(ASM_OBJ)
all: $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME)
all: $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) $(KEYDB_DIAGNOSTIC_NAME)
@echo ""
@echo "Hint: It's a good idea to run 'make test' ;)"
@echo ""
@ -386,9 +397,9 @@ persist-settings: distclean
echo CFLAGS=$(CFLAGS) >> .make-settings
echo CXXFLAGS=$(CXXFLAGS) >> .make-settings
echo LDFLAGS=$(LDFLAGS) >> .make-settings
echo REDIS_CFLAGS=$(REDIS_CFLAGS) >> .make-settings
echo REDIS_CXXFLAGS=$(REDIS_CXXFLAGS) >> .make-settings
echo REDIS_LDFLAGS=$(REDIS_LDFLAGS) >> .make-settings
echo KEYDB_CFLAGS=$(KEYDB_CFLAGS) >> .make-settings
echo KEYDB_CXXFLAGS=$(KEYDB_CXXFLAGS) >> .make-settings
echo KEYDB_LDFLAGS=$(KEYDB_LDFLAGS) >> .make-settings
echo PREV_FINAL_CFLAGS=$(FINAL_CFLAGS) >> .make-settings
echo PREV_FINAL_CXXFLAGS=$(FINAL_CXXFLAGS) >> .make-settings
echo PREV_FINAL_LDFLAGS=$(FINAL_LDFLAGS) >> .make-settings
@ -434,6 +445,10 @@ $(REDIS_CLI_NAME): $(REDIS_CLI_OBJ)
$(REDIS_BENCHMARK_NAME): $(REDIS_BENCHMARK_OBJ)
$(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/hdr_histogram/hdr_histogram.o $(FINAL_LIBS)
# keydb-diagnostic-tool
$(KEYDB_DIAGNOSTIC_NAME): $(KEYDB_DIAGNOSTIC_OBJ)
$(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a $(FINAL_LIBS)
DEP = $(REDIS_SERVER_OBJ:%.o=%.d) $(KEYDB_SERVER_OBJ:%.o=%.d) $(REDIS_CLI_OBJ:%.o=%.d) $(REDIS_BENCHMARK_OBJ:%.o=%.d)
-include $(DEP)
@ -456,7 +471,7 @@ motd_server.o: motd.cpp .make-prerequisites
$(KEYDB_AS) $< -o $@
clean:
rm -rf $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) *.o *.gcda *.gcno *.gcov KeyDB.info lcov-html Makefile.dep
rm -rf $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) $(KEYDB_DIAGNOSTIC_NAME) *.o *.gcda *.gcno *.gcov KeyDB.info lcov-html Makefile.dep
rm -rf storage/*.o
rm -rf keydb-server
rm -f $(DEP)
@ -498,7 +513,7 @@ bench: $(REDIS_BENCHMARK_NAME)
$(MAKE) CXXFLAGS="-m32" CFLAGS="-m32" LDFLAGS="-m32"
gcov:
$(MAKE) REDIS_CXXFLAGS="-fprofile-arcs -ftest-coverage -DCOVERAGE_TEST" REDIS_CFLAGS="-fprofile-arcs -ftest-coverage -DCOVERAGE_TEST" REDIS_LDFLAGS="-fprofile-arcs -ftest-coverage"
$(MAKE) KEYDB_CXXFLAGS="-fprofile-arcs -ftest-coverage -DCOVERAGE_TEST" KEYDB_CFLAGS="-fprofile-arcs -ftest-coverage -DCOVERAGE_TEST" KEYDB_LDFLAGS="-fprofile-arcs -ftest-coverage"
noopt:
$(MAKE) OPTIMIZATION="-O0"
@ -507,7 +522,7 @@ valgrind:
$(MAKE) OPTIMIZATION="-O0" USEASM="false" MALLOC="libc" CFLAGS="-DSANITIZE" CXXFLAGS="-DSANITIZE"
helgrind:
$(MAKE) OPTIMIZATION="-O0" MALLOC="libc" CFLAGS="-D__ATOMIC_VAR_FORCE_SYNC_MACROS" REDIS_CFLAGS="-I/usr/local/include" REDIS_LDFLAGS="-L/usr/local/lib"
$(MAKE) OPTIMIZATION="-O0" MALLOC="libc" CFLAGS="-D__ATOMIC_VAR_FORCE_SYNC_MACROS" KEYDB_CFLAGS="-I/usr/local/include" KEYDB_LDFLAGS="-L/usr/local/lib"
src/help.h:
@../utils/generate-command-help.rb > help.h
@ -522,4 +537,4 @@ install: all
@ln -sf $(REDIS_SERVER_NAME) $(INSTALL_BIN)/$(REDIS_SENTINEL_NAME)
uninstall:
rm -f $(INSTALL_BIN)/{$(REDIS_SERVER_NAME),$(REDIS_BENCHMARK_NAME),$(REDIS_CLI_NAME),$(REDIS_CHECK_RDB_NAME),$(REDIS_CHECK_AOF_NAME),$(REDIS_SENTINEL_NAME)}
rm -f $(INSTALL_BIN)/{$(REDIS_SERVER_NAME),$(REDIS_BENCHMARK_NAME),$(REDIS_CLI_NAME),$(REDIS_CHECK_RDB_NAME),$(REDIS_CHECK_AOF_NAME),$(REDIS_SENTINEL_NAME),$(KEYDB_DIAGNOSTIC_NAME)}

View File

@ -175,12 +175,11 @@ void queueClientForReprocessing(client *c) {
/* The client may already be into the unblocked list because of a previous
* blocking operation, don't add back it into the list multiple times. */
serverAssert(GlobalLocksAcquired());
fastlock_lock(&c->lock);
std::unique_lock<fastlock> ul(c->lock);
if (!(c->flags & CLIENT_UNBLOCKED)) {
c->flags |= CLIENT_UNBLOCKED;
listAddNodeTail(g_pserver->rgthreadvar[c->iel].unblocked_clients,c);
}
fastlock_unlock(&c->lock);
}
/* Unblock a client calling the right function depending on the kind
@ -796,4 +795,4 @@ void signalKeyAsReady(redisDb *db, sds key, int type) {
redisObjectStack o;
initStaticStringObject(o, key);
signalKeyAsReady(db, &o, type);
}
}

View File

@ -561,7 +561,7 @@ void clusterInit(void) {
serverAssert(serverTL == &g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN]);
if (createSocketAcceptHandler(&g_pserver->cfd, clusterAcceptHandler) != C_OK) {
serverPanic("Unrecoverable error creating Redis Cluster socket accept handler.");
serverPanic("Unrecoverable error creating KeyDB Cluster socket accept handler.");
}
/* The slots -> keys map is a radix tree. Initialize it here. */
@ -5172,11 +5172,12 @@ void dumpCommand(client *c) {
/* KEYDB.MVCCRESTORE key mvcc expire serialized-value */
void mvccrestoreCommand(client *c) {
long long mvcc, expire;
long long expire;
uint64_t mvcc;
robj *key = c->argv[1], *obj = nullptr;
int type;
if (getLongLongFromObjectOrReply(c, c->argv[2], &mvcc, "Invalid MVCC Tstamp") != C_OK)
if (getUnsignedLongLongFromObjectOrReply(c, c->argv[2], &mvcc, "Invalid MVCC Tstamp") != C_OK)
return;
if (getLongLongFromObjectOrReply(c, c->argv[3], &expire, "Invalid expire") != C_OK)
@ -5866,7 +5867,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in
multiState *ms, _ms;
multiCmd mc;
int i, slot = 0, migrating_slot = 0, importing_slot = 0, missing_keys = 0;
serverAssert(GlobalLocksAcquired());
serverAssert((c->cmd->flags & CMD_ASYNC_OK) || GlobalLocksAcquired());
/* Allow any key to be set if a module disabled cluster redirections. */
if (g_pserver->cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION)

View File

@ -2736,6 +2736,7 @@ standardConfig configs[] = {
createBoolConfig("disable-thp", NULL, MODIFIABLE_CONFIG, g_pserver->disable_thp, 1, NULL, NULL),
createBoolConfig("cluster-allow-replica-migration", NULL, MODIFIABLE_CONFIG, g_pserver->cluster_allow_replica_migration, 1, NULL, NULL),
createBoolConfig("replica-announced", NULL, MODIFIABLE_CONFIG, g_pserver->replica_announced, 1, NULL, NULL),
createBoolConfig("enable-async-commands", NULL, MODIFIABLE_CONFIG, g_pserver->enable_async_commands, 1, NULL, NULL),
/* String Configs */
createStringConfig("aclfile", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, g_pserver->acl_filename, "", NULL, NULL),

View File

@ -456,6 +456,9 @@ void connSetThreadAffinity(connection *conn, int cpu) {
{
serverLog(LL_WARNING, "Failed to set socket affinity");
}
#else
(void)conn;
(void)cpu;
#endif
}

View File

@ -159,7 +159,6 @@ static robj_roptr lookupKeyConst(redisDb *db, robj *key, int flags) {
* expiring our key via DELs in the replication link. */
robj_roptr lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) {
robj_roptr val;
serverAssert(GlobalLocksAcquired());
if (expireIfNeeded(db,key) == 1) {
/* If we are in the context of a master, expireIfNeeded() returns 1
@ -204,8 +203,41 @@ keymiss:
/* Like lookupKeyReadWithFlags(), but does not use any flag, which is the
* common case. */
robj_roptr lookupKeyRead(redisDb *db, robj *key) {
serverAssert(GlobalLocksAcquired());
return lookupKeyReadWithFlags(db,key,LOOKUP_NONE);
}
robj_roptr lookupKeyRead(redisDb *db, robj *key, uint64_t mvccCheckpoint) {
robj_roptr o;
if (aeThreadOwnsLock()) {
return lookupKeyReadWithFlags(db,key,LOOKUP_NONE);
} else {
// This is an async command
if (keyIsExpired(db,key))
return nullptr;
int idb = db->id;
if (serverTL->rgdbSnapshot[idb] == nullptr || serverTL->rgdbSnapshot[idb]->mvccCheckpoint() < mvccCheckpoint) {
AeLocker locker;
locker.arm(serverTL->current_client);
if (serverTL->rgdbSnapshot[idb] != nullptr) {
db->endSnapshot(serverTL->rgdbSnapshot[idb]);
serverTL->rgdbSnapshot[idb] = nullptr;
} else {
serverTL->rgdbSnapshot[idb] = db->createSnapshot(mvccCheckpoint, true);
}
if (serverTL->rgdbSnapshot[idb] == nullptr) {
// We still need to service the read
o = lookupKeyReadWithFlags(db,key,LOOKUP_NONE);
serverTL->disable_async_commands = true; // don't try this again
}
}
if (serverTL->rgdbSnapshot[idb] != nullptr) {
o = serverTL->rgdbSnapshot[idb]->find_cached_threadsafe(szFromObj(key)).val();
}
}
return o;
}
/* Lookup a key for write operations, and as a side effect, if needed, expires
* the key if its TTL is reached.
@ -231,7 +263,7 @@ static void SentReplyOnKeyMiss(client *c, robj *reply){
}
}
robj_roptr lookupKeyReadOrReply(client *c, robj *key, robj *reply) {
robj_roptr o = lookupKeyRead(c->db, key);
robj_roptr o = lookupKeyRead(c->db, key, c->mvccCheckpoint);
if (!o) SentReplyOnKeyMiss(c, reply);
return o;
}
@ -1188,17 +1220,10 @@ void scanGenericCommand(client *c, robj_roptr o, unsigned long cursor) {
if (o == nullptr && count >= 100)
{
// Do an async version
const redisDbPersistentDataSnapshot *snapshot = nullptr;
if (!(c->flags & (CLIENT_MULTI | CLIENT_BLOCKED)))
snapshot = c->db->createSnapshot(c->mvccCheckpoint, false /* fOptional */);
if (snapshot != nullptr)
{
aeEventLoop *el = serverTL->el;
blockClient(c, BLOCKED_ASYNC);
redisDb *db = c->db;
sds patCopy = pat ? sdsdup(pat) : nullptr;
sds typeCopy = type ? sdsdup(type) : nullptr;
g_pserver->asyncworkqueue->AddWorkFunction([c, snapshot, cursor, count, keys, el, db, patCopy, typeCopy, use_pattern]{
if (c->asyncCommand(
[c, keys, pat, type, cursor, count, use_pattern] (const redisDbPersistentDataSnapshot *snapshot, const std::vector<robj_sharedptr> &) {
sds patCopy = pat ? sdsdup(pat) : nullptr;
sds typeCopy = type ? sdsdup(type) : nullptr;
auto cursorResult = snapshot->scan_threadsafe(cursor, count, typeCopy, keys);
if (use_pattern) {
listNode *ln = listFirst(keys);
@ -1219,30 +1244,17 @@ void scanGenericCommand(client *c, robj_roptr o, unsigned long cursor) {
sdsfree(patCopy);
if (typeCopy != nullptr)
sdsfree(typeCopy);
aePostFunction(el, [c, snapshot, keys, db, cursorResult, use_pattern]{
aeReleaseLock(); // we need to lock with coordination of the client
std::unique_lock<decltype(c->lock)> lock(c->lock);
AeLocker locker;
locker.arm(c);
unblockClient(c);
mstime_t timeScanFilter;
latencyStartMonitor(timeScanFilter);
scanFilterAndReply(c, keys, nullptr, nullptr, false, nullptr, cursorResult);
latencyEndMonitor(timeScanFilter);
latencyAddSampleIfNeeded("scan-async-filter", timeScanFilter);
locker.disarm();
lock.unlock();
db->endSnapshotAsync(snapshot);
listSetFreeMethod(keys,decrRefCountVoid);
listRelease(keys);
aeAcquireLock();
});
});
mstime_t timeScanFilter;
latencyStartMonitor(timeScanFilter);
scanFilterAndReply(c, keys, nullptr, nullptr, false, nullptr, cursorResult);
latencyEndMonitor(timeScanFilter);
latencyAddSampleIfNeeded("scan-async-filter", timeScanFilter);
},
[keys] (const redisDbPersistentDataSnapshot *) {
listSetFreeMethod(keys,decrRefCountVoid);
listRelease(keys);
}
)) {
return;
}
}
@ -1674,9 +1686,8 @@ void copyCommand(client *c) {
}
dbAdd(dst,newkey,newobj);
if (expire != nullptr) {
if (expire != nullptr) setExpire(c, dst, newkey, expire->duplicate());
}
if (expire != nullptr)
setExpire(c, dst, newkey, expire->duplicate());
/* OK! key copied */
signalModifiedKey(c,dst,c->argv[2]);
@ -2827,8 +2838,10 @@ LNotFound:
serverAssert(m_setexpire->find(sdsKey) != m_setexpire->end());
}
serverAssert(o->FExpires() == (m_setexpire->find(sdsKey) != m_setexpire->end()));
g_pserver->stat_storage_provider_read_hits++;
} else {
sdsfree(sdsNewKey);
g_pserver->stat_storage_provider_read_misses++;
}
*pde = dictFind(m_pdict, sdsKey);
@ -3233,6 +3246,8 @@ bool redisDbPersistentData::prefetchKeysAsync(client *c, parsed_command &command
if (command.argc >= 2) {
const char *cmd = szFromObj(command.argv[0]);
if (!strcasecmp(cmd, "set") || !strcasecmp(cmd, "get")) {
if (c->db->m_spdbSnapshotHOLDER != nullptr)
return false; // this is dangerous enough without a snapshot around
auto h = dictSdsHash(szFromObj(command.argv[1]));
for (int iht = 0; iht < 2; ++iht) {
auto hT = h & c->db->m_pdict->ht[iht].sizemask;

View File

@ -51,6 +51,12 @@ typedef ucontext_t sigcontext_t;
#include <cxxabi.h>
#endif /* HAVE_BACKTRACE */
//UNW_LOCAL_ONLY being set means we use libunwind for backtraces instead of execinfo
#ifdef UNW_LOCAL_ONLY
#include <libunwind.h>
#include <cxxabi.h>
#endif
#ifdef __CYGWIN__
#ifndef SA_ONSTACK
#define SA_ONSTACK 0x08000000
@ -944,7 +950,7 @@ void _serverAssert(const char *estr, const char *file, int line) {
serverLog(LL_WARNING,"==> %s:%d '%s' is not true",file,line,estr);
if (g_pserver->crashlog_enabled) {
#ifdef HAVE_BACKTRACE
#if defined HAVE_BACKTRACE || defined UNW_LOCAL_ONLY
logStackTrace(NULL, 1);
#endif
printCrashReport();
@ -1035,14 +1041,13 @@ void _serverPanic(const char *file, int line, const char *msg, ...) {
vsnprintf(fmtmsg,sizeof(fmtmsg),msg,ap);
va_end(ap);
g_fInCrash = true;
bugReportStart();
serverLog(LL_WARNING,"------------------------------------------------");
serverLog(LL_WARNING,"!!! Software Failure. Press left mouse button to continue");
serverLog(LL_WARNING,"Guru Meditation: %s #%s:%d",fmtmsg,file,line);
if (g_pserver->crashlog_enabled) {
#ifdef HAVE_BACKTRACE
#if defined HAVE_BACKTRACE || defined UNW_LOCAL_ONLY
logStackTrace(NULL, 1);
#endif
printCrashReport();
@ -1597,6 +1602,65 @@ void safe_write(int fd, const void *pv, ssize_t cb)
} while (offset < cb);
}
#ifdef UNW_LOCAL_ONLY
/* Logs the stack trace using the libunwind call.
* The eip argument is unused as libunwind only gets local context.
* The uplevel argument indicates how many of the calling functions to skip.
*/
void logStackTrace(void * eip, int uplevel) {
(void)eip;//UNUSED
const char *msg;
int fd = openDirectLogFiledes();
if (fd == -1) return; /* If we can't log there is anything to do. */
msg = "\n------ STACK TRACE ------\n";
if (write(fd,msg,strlen(msg)) == -1) {/* Avoid warning. */};
unw_cursor_t cursor;
unw_context_t context;
unw_getcontext(&context);
unw_init_local(&cursor, &context);
/* Write symbols to log file */
msg = "\nBacktrace:\n";
if (write(fd,msg,strlen(msg)) == -1) {/* Avoid warning. */};
for (int i = 0; i < uplevel; i++) {
unw_step(&cursor);
}
while ( unw_step(&cursor) ) {
unw_word_t ip, sp, off;
unw_get_reg(&cursor, UNW_REG_IP, &ip);
unw_get_reg(&cursor, UNW_REG_SP, &sp);
char symbol[256] = {"<unknown>"};
char *name = symbol;
if ( !unw_get_proc_name(&cursor, symbol, sizeof(symbol), &off) ) {
int status;
if ( (name = abi::__cxa_demangle(symbol, NULL, NULL, &status)) == 0 )
name = symbol;
}
dprintf(fd, "%s(+0x%" PRIxPTR ") [0x%016" PRIxPTR "] sp=0x%016" PRIxPTR "\n",
name,
static_cast<uintptr_t>(off),
static_cast<uintptr_t>(ip),
static_cast<uintptr_t>(sp));
if ( name != symbol )
free(name);
}
}
#endif /* UNW_LOCAL_ONLY */
#ifdef HAVE_BACKTRACE
void backtrace_symbols_demangle_fd(void **trace, size_t csym, int fd)
{
char **syms = backtrace_symbols(trace, csym);
@ -1640,8 +1704,6 @@ void backtrace_symbols_demangle_fd(void **trace, size_t csym, int fd)
free(syms);
}
#ifdef HAVE_BACKTRACE
/* Logs the stack trace using the backtrace() call. This function is designed
* to be called from signal handlers safely.
* The eip argument is optional (can take NULL).
@ -1930,6 +1992,9 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) {
logRegisters(uc);
#endif
#ifdef UNW_LOCAL_ONLY
logStackTrace(NULL, 1);
#endif
printCrashReport();
@ -2024,6 +2089,8 @@ void watchdogSignalHandler(int sig, siginfo_t *info, void *secret) {
serverLogFromHandler(LL_WARNING,"\n--- WATCHDOG TIMER EXPIRED ---");
#ifdef HAVE_BACKTRACE
logStackTrace(getMcontextEip(uc), 1);
#elif defined UNW_LOCAL_ONLY
logStackTrace(NULL, 1);
#else
serverLogFromHandler(LL_WARNING,"Sorry: no support for backtrace().");
#endif

View File

@ -721,7 +721,6 @@ static dictEntry *dictGenericDelete(dict *d, const void *key, int nofree) {
}
if (!dictIsRehashing(d)) break;
}
_dictExpandIfNeeded(d);
return NULL; /* not found */
}

View File

@ -826,8 +826,8 @@ void expireEntryFat::expireSubKey(const char *szSubkey, long long when)
fFound = true;
}
if (fFound) {
m_vecexpireEntries.erase(itr);
dictDelete(m_dictIndex, szSubkey);
m_vecexpireEntries.erase(itr);
break;
}
++itr;

View File

@ -0,0 +1,972 @@
/* KeyDB diagnostic utility.
*
* Copyright (c) 2009-2021, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2021, EQ Alpha Technology Ltd. <john at eqalpha dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "fmacros.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <time.h>
#include <sys/resource.h>
#include <sys/time.h>
#include <signal.h>
#include <assert.h>
#include <math.h>
#include <pthread.h>
#include <deque>
extern "C" {
#include <sds.h> /* Use hiredis sds. */
#include <sdscompat.h>
#include "hiredis.h"
}
#include "ae.h"
#include "adlist.h"
#include "dict.h"
#include "zmalloc.h"
#include "storage.h"
#include "atomicvar.h"
#include "crc16_slottable.h"
#define UNUSED(V) ((void) V)
#define RANDPTR_INITIAL_SIZE 8
#define MAX_LATENCY_PRECISION 3
#define MAX_THREADS 500
#define CLUSTER_SLOTS 16384
#define CLIENT_GET_EVENTLOOP(c) \
(c->thread_id >= 0 ? config.threads[c->thread_id]->el : config.el)
struct benchmarkThread;
struct clusterNode;
struct redisConfig;
int g_fTestMode = false;
static struct config {
aeEventLoop *el;
const char *hostip;
int hostport;
const char *hostsocket;
int numclients;
int liveclients;
int period_ms;
int requests;
int requests_issued;
int requests_finished;
int keysize;
int datasize;
int randomkeys;
int randomkeys_keyspacelen;
int keepalive;
int pipeline;
int showerrors;
long long start;
long long totlatency;
long long *latency;
const char *title;
list *clients;
int quiet;
int csv;
int loop;
int idlemode;
int dbnum;
sds dbnumstr;
char *tests;
char *auth;
const char *user;
int precision;
int max_threads;
struct benchmarkThread **threads;
int cluster_mode;
int cluster_node_count;
struct clusterNode **cluster_nodes;
struct redisConfig *redis_config;
int is_fetching_slots;
int is_updating_slots;
int slots_last_update;
int enable_tracking;
/* Thread mutexes to be used as fallbacks by atomicvar.h */
pthread_mutex_t requests_issued_mutex;
pthread_mutex_t requests_finished_mutex;
pthread_mutex_t liveclients_mutex;
pthread_mutex_t is_fetching_slots_mutex;
pthread_mutex_t is_updating_slots_mutex;
pthread_mutex_t updating_slots_mutex;
pthread_mutex_t slots_last_update_mutex;
} config;
typedef struct _client {
redisContext *context;
sds obuf;
char **randptr; /* Pointers to :rand: strings inside the command buf */
size_t randlen; /* Number of pointers in client->randptr */
size_t randfree; /* Number of unused pointers in client->randptr */
char **stagptr; /* Pointers to slot hashtags (cluster mode only) */
size_t staglen; /* Number of pointers in client->stagptr */
size_t stagfree; /* Number of unused pointers in client->stagptr */
size_t written; /* Bytes of 'obuf' already written */
long long start; /* Start time of a request */
long long latency; /* Request latency */
int pending; /* Number of pending requests (replies to consume) */
int prefix_pending; /* If non-zero, number of pending prefix commands. Commands
such as auth and select are prefixed to the pipeline of
benchmark commands and discarded after the first send. */
int prefixlen; /* Size in bytes of the pending prefix commands */
int thread_id;
struct clusterNode *cluster_node;
int slots_last_update;
redisReply *lastReply;
} *client;
/* Threads. */
typedef struct benchmarkThread {
int index;
pthread_t thread;
aeEventLoop *el;
} benchmarkThread;
/* Cluster. */
typedef struct clusterNode {
char *ip;
int port;
sds name;
int flags;
sds replicate; /* Master ID if node is a replica */
int *slots;
int slots_count;
int current_slot_index;
int *updated_slots; /* Used by updateClusterSlotsConfiguration */
int updated_slots_count; /* Used by updateClusterSlotsConfiguration */
int replicas_count;
sds *migrating; /* An array of sds where even strings are slots and odd
* strings are the destination node IDs. */
sds *importing; /* An array of sds where even strings are slots and odd
* strings are the source node IDs. */
int migrating_count; /* Length of the migrating array (migrating slots*2) */
int importing_count; /* Length of the importing array (importing slots*2) */
struct redisConfig *redis_config;
} clusterNode;
typedef struct redisConfig {
sds save;
sds appendonly;
} redisConfig;
int g_fInCrash = false;
/* Prototypes */
static void writeHandler(aeEventLoop *el, int fd, void *privdata, int mask);
static benchmarkThread *createBenchmarkThread(int index);
static void freeBenchmarkThread(benchmarkThread *thread);
static void freeBenchmarkThreads();
static redisContext *getRedisContext(const char *ip, int port,
const char *hostsocket);
/* Implementation */
static long long ustime(void) {
struct timeval tv;
long long ust;
gettimeofday(&tv, NULL);
ust = ((long)tv.tv_sec)*1000000;
ust += tv.tv_usec;
return ust;
}
/* _serverAssert is needed by dict */
extern "C" void _serverAssert(const char *estr, const char *file, int line) {
fprintf(stderr, "=== ASSERTION FAILED ===");
fprintf(stderr, "==> %s:%d '%s' is not true",file,line,estr);
*((char*)-1) = 'x';
}
/* asyncFreeDictTable is needed by dict */
extern "C" void asyncFreeDictTable(struct dictEntry **de) {
zfree(de);
}
static redisContext *getRedisContext(const char *ip, int port,
const char *hostsocket)
{
redisContext *ctx = NULL;
redisReply *reply = NULL;
if (hostsocket == NULL)
ctx = redisConnect(ip, port);
else
ctx = redisConnectUnix(hostsocket);
if (ctx == NULL || ctx->err) {
fprintf(stderr,"Could not connect to Redis at ");
const char *err = (ctx != NULL ? ctx->errstr : "");
if (hostsocket == NULL)
fprintf(stderr,"%s:%d: %s\n",ip,port,err);
else
fprintf(stderr,"%s: %s\n",hostsocket,err);
goto cleanup;
}
if (config.auth == NULL)
return ctx;
if (config.user == NULL)
reply = (redisReply*)redisCommand(ctx,"AUTH %s", config.auth);
else
reply = (redisReply*)redisCommand(ctx,"AUTH %s %s", config.user, config.auth);
if (reply != NULL) {
if (reply->type == REDIS_REPLY_ERROR) {
if (hostsocket == NULL)
fprintf(stderr, "Node %s:%d replied with error:\n%s\n", ip, port, reply->str);
else
fprintf(stderr, "Node %s replied with error:\n%s\n", hostsocket, reply->str);
goto cleanup;
}
freeReplyObject(reply);
return ctx;
}
fprintf(stderr, "ERROR: failed to fetch reply from ");
if (hostsocket == NULL)
fprintf(stderr, "%s:%d\n", ip, port);
else
fprintf(stderr, "%s\n", hostsocket);
cleanup:
freeReplyObject(reply);
redisFree(ctx);
return NULL;
}
static void freeClient(client c) {
aeEventLoop *el = CLIENT_GET_EVENTLOOP(c);
listNode *ln;
aeDeleteFileEvent(el,c->context->fd,AE_WRITABLE);
aeDeleteFileEvent(el,c->context->fd,AE_READABLE);
if (c->thread_id >= 0) {
int requests_finished = 0;
atomicGet(config.requests_finished, requests_finished);
if (requests_finished >= config.requests) {
aeStop(el);
}
}
redisFree(c->context);
sdsfree(c->obuf);
zfree(c->randptr);
zfree(c->stagptr);
zfree(c);
if (config.max_threads) pthread_mutex_lock(&(config.liveclients_mutex));
config.liveclients--;
ln = listSearchKey(config.clients,c);
assert(ln != NULL);
listDelNode(config.clients,ln);
if (config.max_threads) pthread_mutex_unlock(&(config.liveclients_mutex));
}
static void freeAllClients(void) {
listNode *ln = config.clients->head, *next;
while(ln) {
next = ln->next;
freeClient((client)ln->value);
ln = next;
}
}
static void resetClient(client c) {
aeEventLoop *el = CLIENT_GET_EVENTLOOP(c);
aeDeleteFileEvent(el,c->context->fd,AE_WRITABLE);
aeDeleteFileEvent(el,c->context->fd,AE_READABLE);
aeCreateFileEvent(el,c->context->fd,AE_WRITABLE,writeHandler,c);
c->written = 0;
c->pending = config.pipeline;
}
static void randomizeClientKey(client c) {
size_t i;
for (i = 0; i < c->randlen; i++) {
char *p = c->randptr[i]+11;
size_t r = 0;
if (config.randomkeys_keyspacelen != 0)
r = random() % config.randomkeys_keyspacelen;
size_t j;
for (j = 0; j < 12; j++) {
*p = '0'+r%10;
r/=10;
p--;
}
}
}
static void readHandler(aeEventLoop *el, int fd, void *privdata, int mask) {
client c = (client)privdata;
void *reply = NULL;
UNUSED(el);
UNUSED(fd);
UNUSED(mask);
/* Calculate latency only for the first read event. This means that the
* server already sent the reply and we need to parse it. Parsing overhead
* is not part of the latency, so calculate it only once, here. */
if (c->latency < 0) c->latency = ustime()-(c->start);
if (redisBufferRead(c->context) != REDIS_OK) {
fprintf(stderr,"Error: %s\n",c->context->errstr);
exit(1);
} else {
while(c->pending) {
if (redisGetReply(c->context,&reply) != REDIS_OK) {
fprintf(stderr,"Error: %s\n",c->context->errstr);
exit(1);
}
if (reply != NULL) {
if (reply == (void*)REDIS_REPLY_ERROR) {
fprintf(stderr,"Unexpected error reply, exiting...\n");
exit(1);
}
redisReply *r = (redisReply*)reply;
int is_err = (r->type == REDIS_REPLY_ERROR);
if (is_err && config.showerrors) {
/* TODO: static lasterr_time not thread-safe */
static time_t lasterr_time = 0;
time_t now = time(NULL);
if (lasterr_time != now) {
lasterr_time = now;
if (c->cluster_node) {
printf("Error from server %s:%d: %s\n",
c->cluster_node->ip,
c->cluster_node->port,
r->str);
} else printf("Error from server: %s\n", r->str);
}
}
freeReplyObject(reply);
/* This is an OK for prefix commands such as auth and select.*/
if (c->prefix_pending > 0) {
c->prefix_pending--;
c->pending--;
/* Discard prefix commands on first response.*/
if (c->prefixlen > 0) {
size_t j;
sdsrange(c->obuf, c->prefixlen, -1);
/* We also need to fix the pointers to the strings
* we need to randomize. */
for (j = 0; j < c->randlen; j++)
c->randptr[j] -= c->prefixlen;
c->prefixlen = 0;
}
continue;
}
int requests_finished = 0;
atomicGetIncr(config.requests_finished, requests_finished, 1);
if (requests_finished < config.requests)
config.latency[requests_finished] = c->latency;
c->pending--;
if (c->pending == 0) {
resetClient(c);
break;
}
} else {
break;
}
}
}
}
static void writeHandler(aeEventLoop *el, int fd, void *privdata, int mask) {
client c = (client)privdata;
UNUSED(el);
UNUSED(fd);
UNUSED(mask);
/* Initialize request when nothing was written. */
if (c->written == 0) {
/* Really initialize: randomize keys and set start time. */
if (config.randomkeys) randomizeClientKey(c);
atomicGet(config.slots_last_update, c->slots_last_update);
c->start = ustime();
c->latency = -1;
}
if (sdslen(c->obuf) > c->written) {
void *ptr = c->obuf+c->written;
ssize_t nwritten = write(c->context->fd,ptr,sdslen(c->obuf)-c->written);
if (nwritten == -1) {
if (errno != EPIPE)
fprintf(stderr, "Writing to socket: %s\n", strerror(errno));
freeClient(c);
return;
}
c->written += nwritten;
if (sdslen(c->obuf) == c->written) {
aeDeleteFileEvent(el,c->context->fd,AE_WRITABLE);
aeCreateFileEvent(el,c->context->fd,AE_READABLE,readHandler,c);
}
}
}
/* Create a benchmark client, configured to send the command passed as 'cmd' of
* 'len' bytes.
*
* The command is copied N times in the client output buffer (that is reused
* again and again to send the request to the server) accordingly to the configured
* pipeline size.
*
* Also an initial SELECT command is prepended in order to make sure the right
* database is selected, if needed. The initial SELECT will be discarded as soon
* as the first reply is received.
*
* To create a client from scratch, the 'from' pointer is set to NULL. If instead
* we want to create a client using another client as reference, the 'from' pointer
* points to the client to use as reference. In such a case the following
* information is take from the 'from' client:
*
* 1) The command line to use.
* 2) The offsets of the __rand_int__ elements inside the command line, used
* for arguments randomization.
*
* Even when cloning another client, prefix commands are applied if needed.*/
static client createClient(const char *cmd, size_t len, client from, int thread_id) {
int j;
int is_cluster_client = (config.cluster_mode && thread_id >= 0);
client c = (client)zmalloc(sizeof(struct _client), MALLOC_LOCAL);
const char *ip = NULL;
int port = 0;
c->cluster_node = NULL;
if (config.hostsocket == NULL || is_cluster_client) {
if (!is_cluster_client) {
ip = config.hostip;
port = config.hostport;
} else {
int node_idx = 0;
if (config.max_threads < config.cluster_node_count)
node_idx = config.liveclients % config.cluster_node_count;
else
node_idx = thread_id % config.cluster_node_count;
clusterNode *node = config.cluster_nodes[node_idx];
assert(node != NULL);
ip = (const char *) node->ip;
port = node->port;
c->cluster_node = node;
}
c->context = redisConnectNonBlock(ip,port);
} else {
c->context = redisConnectUnixNonBlock(config.hostsocket);
}
if (c->context->err) {
fprintf(stderr,"Could not connect to Redis at ");
if (config.hostsocket == NULL || is_cluster_client)
fprintf(stderr,"%s:%d: %s\n",ip,port,c->context->errstr);
else
fprintf(stderr,"%s: %s\n",config.hostsocket,c->context->errstr);
exit(1);
}
c->thread_id = thread_id;
/* Suppress hiredis cleanup of unused buffers for max speed. */
c->context->reader->maxbuf = 0;
/* Build the request buffer:
* Queue N requests accordingly to the pipeline size, or simply clone
* the example client buffer. */
c->obuf = sdsempty();
/* Prefix the request buffer with AUTH and/or SELECT commands, if applicable.
* These commands are discarded after the first response, so if the client is
* reused the commands will not be used again. */
c->prefix_pending = 0;
if (config.auth) {
char *buf = NULL;
int len;
if (config.user == NULL)
len = redisFormatCommand(&buf, "AUTH %s", config.auth);
else
len = redisFormatCommand(&buf, "AUTH %s %s",
config.user, config.auth);
c->obuf = sdscatlen(c->obuf, buf, len);
free(buf);
c->prefix_pending++;
}
if (config.enable_tracking) {
char *buf = NULL;
int len = redisFormatCommand(&buf, "CLIENT TRACKING on");
c->obuf = sdscatlen(c->obuf, buf, len);
free(buf);
c->prefix_pending++;
}
/* If a DB number different than zero is selected, prefix our request
* buffer with the SELECT command, that will be discarded the first
* time the replies are received, so if the client is reused the
* SELECT command will not be used again. */
if (config.dbnum != 0 && !is_cluster_client) {
c->obuf = sdscatprintf(c->obuf,"*2\r\n$6\r\nSELECT\r\n$%d\r\n%s\r\n",
(int)sdslen(config.dbnumstr),config.dbnumstr);
c->prefix_pending++;
}
c->prefixlen = sdslen(c->obuf);
/* Append the request itself. */
if (from) {
c->obuf = sdscatlen(c->obuf,
from->obuf+from->prefixlen,
sdslen(from->obuf)-from->prefixlen);
} else {
for (j = 0; j < config.pipeline; j++)
c->obuf = sdscatlen(c->obuf,cmd,len);
}
c->written = 0;
c->pending = config.pipeline+c->prefix_pending;
c->randptr = NULL;
c->randlen = 0;
c->stagptr = NULL;
c->staglen = 0;
/* Find substrings in the output buffer that need to be randomized. */
if (config.randomkeys) {
if (from) {
c->randlen = from->randlen;
c->randfree = 0;
c->randptr = (char**)zmalloc(sizeof(char*)*c->randlen, MALLOC_LOCAL);
/* copy the offsets. */
for (j = 0; j < (int)c->randlen; j++) {
c->randptr[j] = c->obuf + (from->randptr[j]-from->obuf);
/* Adjust for the different select prefix length. */
c->randptr[j] += c->prefixlen - from->prefixlen;
}
} else {
char *p = c->obuf;
c->randlen = 0;
c->randfree = RANDPTR_INITIAL_SIZE;
c->randptr = (char**)zmalloc(sizeof(char*)*c->randfree, MALLOC_LOCAL);
while ((p = strstr(p,"__rand_int__")) != NULL) {
if (c->randfree == 0) {
c->randptr = (char**)zrealloc(c->randptr,sizeof(char*)*c->randlen*2, MALLOC_LOCAL);
c->randfree += c->randlen;
}
c->randptr[c->randlen++] = p;
c->randfree--;
p += 12; /* 12 is strlen("__rand_int__). */
}
}
}
/* If cluster mode is enabled, set slot hashtags pointers. */
if (config.cluster_mode) {
if (from) {
c->staglen = from->staglen;
c->stagfree = 0;
c->stagptr = (char**)zmalloc(sizeof(char*)*c->staglen, MALLOC_LOCAL);
/* copy the offsets. */
for (j = 0; j < (int)c->staglen; j++) {
c->stagptr[j] = c->obuf + (from->stagptr[j]-from->obuf);
/* Adjust for the different select prefix length. */
c->stagptr[j] += c->prefixlen - from->prefixlen;
}
} else {
char *p = c->obuf;
c->staglen = 0;
c->stagfree = RANDPTR_INITIAL_SIZE;
c->stagptr = (char**)zmalloc(sizeof(char*)*c->stagfree, MALLOC_LOCAL);
while ((p = strstr(p,"{tag}")) != NULL) {
if (c->stagfree == 0) {
c->stagptr = (char**)zrealloc(c->stagptr,
sizeof(char*) * c->staglen*2, MALLOC_LOCAL);
c->stagfree += c->staglen;
}
c->stagptr[c->staglen++] = p;
c->stagfree--;
p += 5; /* 5 is strlen("{tag}"). */
}
}
}
aeEventLoop *el = NULL;
if (thread_id < 0) el = config.el;
else {
benchmarkThread *thread = config.threads[thread_id];
el = thread->el;
}
if (config.idlemode == 0)
aeCreateFileEvent(el,c->context->fd,AE_WRITABLE,writeHandler,c);
listAddNodeTail(config.clients,c);
atomicIncr(config.liveclients, 1);
atomicGet(config.slots_last_update, c->slots_last_update);
return c;
}
static void initBenchmarkThreads() {
int i;
if (config.threads) freeBenchmarkThreads();
config.threads = (benchmarkThread**)zmalloc(config.max_threads * sizeof(benchmarkThread*), MALLOC_LOCAL);
for (i = 0; i < config.max_threads; i++) {
benchmarkThread *thread = createBenchmarkThread(i);
config.threads[i] = thread;
}
}
/* Thread functions. */
static benchmarkThread *createBenchmarkThread(int index) {
benchmarkThread *thread = (benchmarkThread*)zmalloc(sizeof(*thread), MALLOC_LOCAL);
if (thread == NULL) return NULL;
thread->index = index;
thread->el = aeCreateEventLoop(1024*10);
return thread;
}
static void freeBenchmarkThread(benchmarkThread *thread) {
if (thread->el) aeDeleteEventLoop(thread->el);
zfree(thread);
}
static void freeBenchmarkThreads() {
int i = 0;
for (; i < config.max_threads; i++) {
benchmarkThread *thread = config.threads[i];
if (thread) freeBenchmarkThread(thread);
}
zfree(config.threads);
config.threads = NULL;
}
static void *execBenchmarkThread(void *ptr) {
benchmarkThread *thread = (benchmarkThread *) ptr;
aeMain(thread->el);
return NULL;
}
void initConfigDefaults() {
config.numclients = 50;
config.requests = 100000;
config.liveclients = 0;
config.el = aeCreateEventLoop(1024*10);
config.keepalive = 1;
config.datasize = 3;
config.pipeline = 1;
config.period_ms = 5000;
config.showerrors = 0;
config.randomkeys = 0;
config.randomkeys_keyspacelen = 0;
config.quiet = 0;
config.csv = 0;
config.loop = 0;
config.idlemode = 0;
config.latency = NULL;
config.clients = listCreate();
config.hostip = "127.0.0.1";
config.hostport = 6379;
config.hostsocket = NULL;
config.tests = NULL;
config.dbnum = 0;
config.auth = NULL;
config.precision = 1;
config.max_threads = MAX_THREADS;
config.threads = NULL;
config.cluster_mode = 0;
config.cluster_node_count = 0;
config.cluster_nodes = NULL;
config.redis_config = NULL;
config.is_fetching_slots = 0;
config.is_updating_slots = 0;
config.slots_last_update = 0;
config.enable_tracking = 0;
}
/* Returns number of consumed options. */
int parseOptions(int argc, const char **argv) {
int i;
int lastarg;
int exit_status = 1;
for (i = 1; i < argc; i++) {
lastarg = (i == (argc-1));
if (!strcmp(argv[i],"-c") || !strcmp(argv[i],"--clients")) {
if (lastarg) goto invalid;
config.numclients = atoi(argv[++i]);
} else if (!strcmp(argv[i],"--time")) {
if (lastarg) goto invalid;
config.period_ms = atoi(argv[++i]);
if (config.period_ms <= 0) {
printf("Warning: Invalid value for thread time. Defaulting to 5000ms.\n");
config.period_ms = 5000;
}
} else if (!strcmp(argv[i],"-h") || !strcmp(argv[i],"--host")) {
if (lastarg) goto invalid;
config.hostip = strdup(argv[++i]);
} else if (!strcmp(argv[i],"-p") || !strcmp(argv[i],"--port")) {
if (lastarg) goto invalid;
config.hostport = atoi(argv[++i]);
} else if (!strcmp(argv[i],"-s")) {
if (lastarg) goto invalid;
config.hostsocket = strdup(argv[++i]);
} else if (!strcmp(argv[i],"--password") ) {
if (lastarg) goto invalid;
config.auth = strdup(argv[++i]);
} else if (!strcmp(argv[i],"--user")) {
if (lastarg) goto invalid;
config.user = argv[++i];
} else if (!strcmp(argv[i],"--dbnum")) {
if (lastarg) goto invalid;
config.dbnum = atoi(argv[++i]);
config.dbnumstr = sdsfromlonglong(config.dbnum);
} else if (!strcmp(argv[i],"-t") || !strcmp(argv[i],"--threads")) {
if (lastarg) goto invalid;
config.max_threads = atoi(argv[++i]);
if (config.max_threads > MAX_THREADS) {
printf("Warning: Too many threads, limiting threads to %d.\n", MAX_THREADS);
config.max_threads = MAX_THREADS;
} else if (config.max_threads <= 0) {
printf("Warning: Invalid value for max threads. Defaulting to %d.\n", MAX_THREADS);
config.max_threads = MAX_THREADS;
}
} else if (!strcmp(argv[i],"--help")) {
exit_status = 0;
goto usage;
} else {
/* Assume the user meant to provide an option when the arg starts
* with a dash. We're done otherwise and should use the remainder
* as the command and arguments for running the benchmark. */
if (argv[i][0] == '-') goto invalid;
return i;
}
}
return i;
invalid:
printf("Invalid option \"%s\" or option argument missing\n\n",argv[i]);
usage:
printf(
"Usage: keydb-benchmark [-h <host>] [-p <port>] [-c <clients>] [-n <requests>] [-k <boolean>]\n\n"
" -h, --host <hostname> Server hostname (default 127.0.0.1)\n"
" -p, --port <port> Server port (default 6379)\n"
" -c <clients> Number of parallel connections (default 50)\n"
" -t, --threads <threads> Maximum number of threads to start before ending\n"
" --time <time> Time between spinning up new client threads, in milliseconds\n"
" --dbnum <db> Select the specified DB number (default 0)\n"
" --user <username> Used to send ACL style 'AUTH username pass'. Needs -a.\n"
" --password <password> Password for Redis Auth\n\n"
);
exit(exit_status);
}
int extractPropertyFromInfo(const char *info, const char *key, double &val) {
char *line = strstr((char*)info, key);
if (line == nullptr) return 1;
line += strlen(key) + 1; // Skip past key name and following colon
char *newline = strchr(line, '\n');
*newline = 0; // Terminate string after relevant line
val = strtod(line, nullptr);
return 0;
}
int extractPropertyFromInfo(const char *info, const char *key, unsigned int &val) {
char *line = strstr((char*)info, key);
if (line == nullptr) return 1;
line += strlen(key) + 1; // Skip past key name and following colon
char *newline = strchr(line, '\n');
*newline = 0; // Terminate string after relevant line
val = atoi(line);
return 0;
}
double getSelfCpuTime(struct rusage *self_ru) {
getrusage(RUSAGE_SELF, self_ru);
double user_time = self_ru->ru_utime.tv_sec + (self_ru->ru_utime.tv_usec / (double)1000000);
double system_time = self_ru->ru_stime.tv_sec + (self_ru->ru_stime.tv_usec / (double)1000000);
return user_time + system_time;
}
double getServerCpuTime(redisContext *ctx) {
redisReply *reply = (redisReply*)redisCommand(ctx, "INFO CPU");
if (reply->type != REDIS_REPLY_STRING) {
freeReplyObject(reply);
printf("Error executing INFO command. Exiting.\n");
return -1;
}
double used_cpu_user, used_cpu_sys;
if (extractPropertyFromInfo(reply->str, "used_cpu_user", used_cpu_user)) {
printf("Error reading user CPU usage from INFO command. Exiting.\n");
return -1;
}
if (extractPropertyFromInfo(reply->str, "used_cpu_sys", used_cpu_sys)) {
printf("Error reading system CPU usage from INFO command. Exiting.\n");
return -1;
}
freeReplyObject(reply);
return used_cpu_user + used_cpu_sys;
}
double getMean(std::deque<double> *q) {
double sum = 0;
for (long unsigned int i = 0; i < q->size(); i++) {
sum += (*q)[i];
}
return sum / q->size();
}
bool isAtFullLoad(double cpuPercent, unsigned int threads) {
return cpuPercent / threads >= 96;
}
int main(int argc, const char **argv) {
int i;
storage_init(NULL, 0);
srandom(time(NULL));
signal(SIGHUP, SIG_IGN);
signal(SIGPIPE, SIG_IGN);
initConfigDefaults();
i = parseOptions(argc,argv);
argc -= i;
argv += i;
config.latency = (long long*)zmalloc(sizeof(long long)*config.requests, MALLOC_LOCAL);
if (config.max_threads > 0) {
int err = 0;
err |= pthread_mutex_init(&(config.requests_issued_mutex), NULL);
err |= pthread_mutex_init(&(config.requests_finished_mutex), NULL);
err |= pthread_mutex_init(&(config.liveclients_mutex), NULL);
err |= pthread_mutex_init(&(config.is_fetching_slots_mutex), NULL);
err |= pthread_mutex_init(&(config.is_updating_slots_mutex), NULL);
err |= pthread_mutex_init(&(config.updating_slots_mutex), NULL);
err |= pthread_mutex_init(&(config.slots_last_update_mutex), NULL);
if (err != 0)
{
perror("Failed to initialize mutex");
exit(EXIT_FAILURE);
}
}
const char *set_value = "abcdefghijklmnopqrstuvwxyz";
int self_threads = 0;
char command[63];
initBenchmarkThreads();
redisContext *ctx = getRedisContext(config.hostip, config.hostport, config.hostsocket);
double server_cpu_time, last_server_cpu_time = getServerCpuTime(ctx);
struct rusage self_ru;
double self_cpu_time, last_self_cpu_time = getSelfCpuTime(&self_ru);
double server_cpu_load, last_server_cpu_load = 0, self_cpu_load, server_cpu_gain;
std::deque<double> load_gain_history = {};
double current_gain_avg, peak_gain_avg = 0;
redisReply *reply = (redisReply*)redisCommand(ctx, "INFO CPU");
if (reply->type != REDIS_REPLY_STRING) {
freeReplyObject(reply);
printf("Error executing INFO command. Exiting.\r\n");
return 1;
}
unsigned int server_threads;
if (extractPropertyFromInfo(reply->str, "server_threads", server_threads)) {
printf("Error reading server threads from INFO command. Exiting.\r\n");
return 1;
}
freeReplyObject(reply);
printf("Server has %d threads.\nStarting...\n", server_threads);
fflush(stdout);
while (self_threads < config.max_threads) {
for (int i = 0; i < config.numclients; i++) {
sprintf(command, "SET %d %s\r\n", self_threads * config.numclients + i, set_value);
createClient(command, strlen(command), NULL,self_threads);
}
benchmarkThread *t = config.threads[self_threads];
if (pthread_create(&(t->thread), NULL, execBenchmarkThread, t)){
fprintf(stderr, "FATAL: Failed to start thread %d. Exiting.\n", self_threads);
exit(1);
}
self_threads++;
usleep(config.period_ms * 1000);
server_cpu_time = getServerCpuTime(ctx);
self_cpu_time = getSelfCpuTime(&self_ru);
server_cpu_load = (server_cpu_time - last_server_cpu_time) * 100000 / config.period_ms;
self_cpu_load = (self_cpu_time - last_self_cpu_time) * 100000 / config.period_ms;
if (server_cpu_time < 0) {
break;
}
printf("%d threads, %d total clients. CPU Usage Self: %.1f%% (%.1f%% per thread), Server: %.1f%% (%.1f%% per thread)\r",
self_threads,
self_threads * config.numclients,
self_cpu_load,
self_cpu_load / self_threads,
server_cpu_load,
server_cpu_load / server_threads);
fflush(stdout);
server_cpu_gain = server_cpu_load - last_server_cpu_load;
load_gain_history.push_back(server_cpu_gain);
if (load_gain_history.size() > 5) {
load_gain_history.pop_front();
}
current_gain_avg = getMean(&load_gain_history);
if (current_gain_avg > peak_gain_avg) {
peak_gain_avg = current_gain_avg;
}
last_server_cpu_time = server_cpu_time;
last_self_cpu_time = self_cpu_time;
last_server_cpu_load = server_cpu_load;
if (isAtFullLoad(server_cpu_load, server_threads)) {
printf("\nServer is at full CPU load. If higher performance is expected, check server configuration.\n");
break;
}
if (current_gain_avg <= 0.05 * peak_gain_avg) {
printf("\nServer CPU load appears to have stagnated with increasing clients.\n"
"Server does not appear to be at full load. Check network for throughput.\n");
break;
}
if (self_threads * config.numclients > 2000) {
printf("\nClient limit of 2000 reached. Server is not at full load and appears to be increasing.\n"
"2000 clients should be more than enough to reach a bottleneck. Check all configuration.\n");
}
}
printf("Done.\n");
freeAllClients();
freeBenchmarkThreads();
return 0;
}

View File

@ -426,7 +426,7 @@ sds createLatencyReport(void) {
}
if (advise_slowlog_inspect) {
report = sdscat(report,"- Check your Slow Log to understand what are the commands you are running which are too slow to execute. Please check https://redis.io/commands/slowlog for more information.\n");
report = sdscat(report,"- Check your Slow Log to understand what are the commands you are running which are too slow to execute. Please check https://docs.keydb.dev/docs/commands#slowlog for more information.\n");
}
/* Intrinsic latency. */

View File

@ -612,6 +612,19 @@ int moduleDelKeyIfEmpty(RedisModuleKey *key) {
}
}
/* This function is used to set the thread local variables (serverTL) for
* arbitrary module threads. All incoming module threads share the same set of
* thread local variables (modulethreadvar).
*
* This is needed as some KeyDB functions use thread local variables to do things,
* and we don't want to share the thread local variables of existing server threads */
void moduleSetThreadVariablesIfNeeded(void) {
if (serverTL == nullptr) {
serverTL = &g_pserver->modulethreadvar;
g_fModuleThread = true;
}
}
/* --------------------------------------------------------------------------
* Service API exported to modules
*
@ -826,6 +839,7 @@ int64_t commandFlagsFromString(char *s) {
else if (!strcasecmp(t,"may-replicate")) flags |= CMD_MAY_REPLICATE;
else if (!strcasecmp(t,"getkeys-api")) flags |= CMD_MODULE_GETKEYS;
else if (!strcasecmp(t,"no-cluster")) flags |= CMD_MODULE_NO_CLUSTER;
else if (!strcasecmp(t,"async")) flags |= CMD_ASYNC_OK;
else break;
}
sdsfreesplitres(tokens,count);
@ -2265,6 +2279,7 @@ int RM_GetContextFlags(RedisModuleCtx *ctx) {
* periodically in timer callbacks or other periodic callbacks.
*/
int RM_AvoidReplicaTraffic() {
moduleSetThreadVariablesIfNeeded();
return checkClientPauseTimeoutAndReturnIfPaused();
}
@ -2341,8 +2356,11 @@ void *RM_OpenKey(RedisModuleCtx *ctx, robj *keyname, int mode) {
/* Destroy a RedisModuleKey struct (freeing is the responsibility of the caller). */
static void moduleCloseKey(RedisModuleKey *key) {
int signal = SHOULD_SIGNAL_MODIFIED_KEYS(key->ctx);
moduleAcquireGIL(false);
if ((key->mode & REDISMODULE_WRITE) && signal)
signalModifiedKey(key->ctx->client,key->db,key->key);
/* TODO: if (key->iter) RM_KeyIteratorStop(kp); */
moduleReleaseGIL(false);
if (key->iter) zfree(key->iter);
RM_ZsetRangeStop(key);
if (key && key->value && key->value->type == OBJ_STREAM &&
@ -5596,10 +5614,7 @@ int moduleClientIsBlockedOnKeys(client *c) {
* RedisModule_BlockClientOnKeys() is accessible from the timeout
* callback via RM_GetBlockedClientPrivateData). */
int RM_UnblockClient(RedisModuleBlockedClient *bc, void *privdata) {
if (serverTL == nullptr) {
serverTL = &g_pserver->modulethreadvar;
g_fModuleThread = true;
}
moduleSetThreadVariablesIfNeeded();
if (bc->blocked_on_keys) {
/* In theory the user should always pass the timeout handler as an
* argument, but better to be safe than sorry. */
@ -5899,10 +5914,7 @@ void RM_FreeThreadSafeContext(RedisModuleCtx *ctx) {
* a blocked client connected to the thread safe context. */
void RM_ThreadSafeContextLock(RedisModuleCtx *ctx) {
UNUSED(ctx);
if (serverTL == nullptr) {
serverTL = &g_pserver->modulethreadvar;
g_fModuleThread = true;
}
moduleSetThreadVariablesIfNeeded();
moduleAcquireGIL(FALSE /*fServerThread*/, true /*fExclusive*/);
}

View File

@ -199,7 +199,7 @@ client *createClient(connection *conn, int iel) {
c->paused_list_node = NULL;
c->client_tracking_redirection = 0;
c->casyncOpsPending = 0;
c->mvccCheckpoint = 0;
c->mvccCheckpoint = getMvccTstamp();
c->master_error = 0;
memset(c->uuid, 0, UUID_BINARY_LEN);
@ -529,8 +529,8 @@ void addReplyErrorLength(client *c, const char *s, size_t len) {
/* Do some actions after an error reply was sent (Log if needed, updates stats, etc.) */
void afterErrorReply(client *c, const char *s, size_t len, int severity = ERR_CRITICAL) {
/* Increment the global error counter */
g_pserver->stat_total_error_replies++;
/* Increment the thread error counter */
serverTL->stat_total_error_replies++;
/* Increment the error stats
* If the string already starts with "-..." then the error prefix
* is provided by the caller ( we limit the search to 32 chars). Otherwise we use "-ERR". */
@ -2450,7 +2450,7 @@ void commandProcessed(client *c, int flags) {
* sub-replicas and to the replication backlog. */
if (c->flags & CLIENT_MASTER) {
AeLocker ae;
ae.arm(c);
ae.arm(c);
long long applied = c->reploff - prev_offset;
if (applied) {
if (!g_pserver->fActiveReplica && (flags & CMD_CALL_PROPAGATE))
@ -2474,7 +2474,7 @@ int processCommandAndResetClient(client *c, int flags) {
int deadclient = 0;
client *old_client = serverTL->current_client;
serverTL->current_client = c;
serverAssert(GlobalLocksAcquired());
serverAssert((flags & CMD_CALL_ASYNC) || GlobalLocksAcquired());
if (processCommand(c, flags) == C_OK) {
commandProcessed(c, flags);
@ -2582,6 +2582,17 @@ void parseClientCommandBuffer(client *c) {
}
}
bool FAsyncCommand(parsed_command &cmd)
{
if (serverTL->in_eval || serverTL->in_exec)
return false;
auto parsedcmd = lookupCommand(szFromObj(cmd.argv[0]));
if (parsedcmd == nullptr)
return false;
static const long long expectedFlags = CMD_ASYNC_OK | CMD_READONLY;
return (parsedcmd->flags & expectedFlags) == expectedFlags;
}
/* This function is called every time, in the client structure 'c', there is
* more query buffer to process, because we read more data from the socket
* or because a client was blocked and later reactivated, so there could be
@ -2600,6 +2611,9 @@ void processInputBuffer(client *c, bool fParse, int callFlags) {
if (!FClientReady(c)) break;
if ((callFlags & CMD_CALL_ASYNC) && !FAsyncCommand(cmd))
break;
zfree(c->argv);
c->argc = cmd.argc;
c->argv = cmd.argv;
@ -2709,7 +2723,19 @@ void readQueryFromClient(connection *conn) {
if (cserver.cthreads > 1 || g_pserver->m_pstorageFactory) {
parseClientCommandBuffer(c);
serverTL->vecclientsProcess.push_back(c);
if (g_pserver->enable_async_commands && !serverTL->disable_async_commands && listLength(g_pserver->monitors) == 0 && (aeLockContention() || serverTL->rgdbSnapshot[c->db->id] || g_fTestMode)) {
// Frequent writers aren't good candidates for this optimization, they cause us to renew the snapshot too often
// so we exclude them unless the snapshot we need already exists
bool fSnapshotExists = c->db->mvccLastSnapshot >= c->mvccCheckpoint;
bool fWriteTooRecent = (((getMvccTstamp() - c->mvccCheckpoint) >> MVCC_MS_SHIFT) < redisDbPersistentDataSnapshot::msStaleThreshold/2);
// The check below avoids running async commands if this is a frequent writer unless a snapshot is already there to service it
if (!fWriteTooRecent || fSnapshotExists) {
processInputBuffer(c, false, CMD_CALL_SLOWLOG | CMD_CALL_STATS | CMD_CALL_ASYNC);
}
}
if (!c->vecqueuedcmd.empty())
serverTL->vecclientsProcess.push_back(c);
} else {
// If we're single threaded its actually better to just process the command here while the query is hot in the cache
// multithreaded lock contention dominates and batching is better

View File

@ -102,10 +102,11 @@ robj *createEmbeddedStringObject(const char *ptr, size_t len) {
allocsize = sizeof(void*);
size_t mvccExtraBytes = g_pserver->fActiveReplica ? sizeof(redisObjectExtended) : 0;
char *oB = (char*)zcalloc(sizeof(robj)+allocsize-sizeof(redisObject::m_ptr)+mvccExtraBytes, MALLOC_SHARED);
char *oB = (char*)zmalloc(sizeof(robj)+allocsize-sizeof(redisObject::m_ptr)+mvccExtraBytes, MALLOC_SHARED);
robj *o = reinterpret_cast<robj*>(oB + mvccExtraBytes);
struct sdshdr8 *sh = (sdshdr8*)(&o->m_ptr);
new (o) redisObject;
o->type = OBJ_STRING;
o->encoding = OBJ_ENCODING_EMBSTR;
o->setrefcount(1);
@ -763,6 +764,20 @@ int getLongLongFromObjectOrReply(client *c, robj *o, long long *target, const ch
return C_OK;
}
int getUnsignedLongLongFromObjectOrReply(client *c, robj *o, uint64_t *target, const char *msg) {
uint64_t value;
if (getUnsignedLongLongFromObject(o, &value) != C_OK) {
if (msg != NULL) {
addReplyError(c,(char*)msg);
} else {
addReplyError(c,"value is not an integer or out of range");
}
return C_ERR;
}
*target = value;
return C_OK;
}
int getLongFromObjectOrReply(client *c, robj *o, long *target, const char *msg) {
long long value;
@ -1586,23 +1601,22 @@ robj *deserializeStoredStringObject(const char *data, size_t cb)
newObject = createObject(OBJ_STRING, nullptr);
newObject->encoding = oT->encoding;
newObject->m_ptr = oT->m_ptr;
return newObject;
break;
case OBJ_ENCODING_EMBSTR:
newObject = createEmbeddedStringObject(szFromObj(oT), sdslen(szFromObj(oT)));
return newObject;
break;
case OBJ_ENCODING_RAW:
newObject = createObject(OBJ_STRING, sdsnewlen(SDS_NOINIT,cb-sizeof(robj)-sizeof(uint64_t)));
newObject->lru = oT->lru;
memcpy(newObject->m_ptr, data+sizeof(robj)+sizeof(mvcc), cb-sizeof(robj)-sizeof(mvcc));
return newObject;
break;
default:
serverPanic("Unknown string object encoding from storage");
}
setMvccTstamp(newObject, mvcc);
newObject->setrefcount(1);
return newObject;
}

View File

@ -1983,6 +1983,54 @@ int main(int argc, const char **argv) {
sdsfree(key_placeholder);
}
if (test_is_selected("mget")) {
const char *cmd_argv[1002];
cmd_argv[0] = "MGET";
sds key_placeholder = sdscatprintf(sdsnew(""),"key%s:__rand_int__",tag);
for (int keys = 1; keys < 1002; keys += 100) {
for (i = 1; i < keys + 1; i++) {
cmd_argv[i] = key_placeholder;
}
len = redisFormatCommandArgv(&cmd,keys+1,cmd_argv,NULL);
std::string title = "MGET (" + std::to_string(keys) + " keys)";
benchmark(title.data(),cmd,len);
free(cmd);
}
sdsfree(key_placeholder);
}
if (test_is_selected("hmset")) {
const char *cmd_argv[22];
cmd_argv[0] = "HMSET";
cmd_argv[1] = "testhash";
sds key_placeholder = sdscatprintf(sdsnew(""),"key%s:__rand_int__",tag);
for (i = 2; i < 22; i += 2) {
cmd_argv[i] = key_placeholder;
cmd_argv[i+1] = data;
}
len = redisFormatCommandArgv(&cmd,22,cmd_argv,NULL);
benchmark("MSET (10 keys)",cmd,len);
free(cmd);
sdsfree(key_placeholder);
}
if (test_is_selected("hmget")) {
const char *cmd_argv[1003];
cmd_argv[0] = "HMGET";
cmd_argv[1] = "testhash";
sds key_placeholder = sdscatprintf(sdsnew(""),"key%s:__rand_int__",tag);
for (int keys = 1; keys < 1002; keys += 100) {
for (i = 2; i < keys + 2; i++) {
cmd_argv[i] = key_placeholder;
}
len = redisFormatCommandArgv(&cmd,keys+2,cmd_argv,NULL);
std::string title = "HMGET (" + std::to_string(keys) + " keys)";
benchmark(title.data(),cmd,len);
free(cmd);
}
sdsfree(key_placeholder);
}
if (!config.csv) printf("\n");
} while(config.loop);
zfree(data);

View File

@ -3399,18 +3399,19 @@ void syncWithMaster(connection *conn) {
goto error;
}
retry_connect:
/* Send a PING to check the master is able to reply without errors. */
if (mi->repl_state == REPL_STATE_CONNECTING) {
if (mi->repl_state == REPL_STATE_CONNECTING || mi->repl_state == REPL_STATE_RETRY_NOREPLPING) {
serverLog(LL_NOTICE,"Non blocking connect for SYNC fired the event.");
/* Delete the writable event so that the readable event remains
* registered and we can wait for the PONG reply. */
connSetReadHandler(conn, syncWithMaster);
connSetWriteHandler(conn, NULL);
mi->repl_state = REPL_STATE_RECEIVE_PING_REPLY;
/* Send the PING, don't check for errors at all, we have the timeout
* that will take care about this. */
err = sendCommand(conn,"PING",NULL);
err = sendCommand(conn,mi->repl_state == REPL_STATE_RETRY_NOREPLPING ? "PING" : "REPLPING",NULL);
if (err) goto write_error;
mi->repl_state = REPL_STATE_RECEIVE_PING_REPLY;
return;
}
@ -3423,7 +3424,13 @@ void syncWithMaster(connection *conn) {
* Note that older versions of Redis replied with "operation not
* permitted" instead of using a proper error code, so we test
* both. */
if (err[0] != '+' &&
if (strncmp(err,"-ERR unknown command",20) == 0) {
serverLog(LL_NOTICE,"Master does not support REPLPING, sending PING instead...");
mi->repl_state = REPL_STATE_RETRY_NOREPLPING;
sdsfree(err);
err = NULL;
goto retry_connect;
} else if (err[0] != '+' &&
strncmp(err,"-NOAUTH",7) != 0 &&
strncmp(err,"-NOPERM",7) != 0 &&
strncmp(err,"-ERR operation not permitted",28) != 0)
@ -5530,7 +5537,7 @@ void replicationNotifyLoadedKey(redisDb *db, robj_roptr key, robj_roptr val, lon
redisObjectStack objTtl;
initStaticStringObject(objTtl, sdscatprintf(sdsempty(), "%lld", expire));
redisObjectStack objMvcc;
initStaticStringObject(objMvcc, sdscatprintf(sdsempty(), "%lu", mvccFromObj(val)));
initStaticStringObject(objMvcc, sdscatprintf(sdsempty(), "%" PRIu64, mvccFromObj(val)));
redisObject *argv[5] = {shared.mvccrestore, key.unsafe_robjcast(), &objMvcc, &objTtl, &objPayload};
replicationFeedSlaves(g_pserver->slaves, db->id, argv, 5);

View File

@ -57,7 +57,6 @@
#include <limits.h>
#include <float.h>
#include <math.h>
#include <sys/resource.h>
#include <sys/utsname.h>
#include <locale.h>
#include <sys/socket.h>
@ -69,7 +68,6 @@
#include "keycheck.h"
#include "motd.h"
#include "t_nhash.h"
#include <sys/resource.h>
#ifdef __linux__
#include <sys/prctl.h>
#include <sys/mman.h>
@ -223,7 +221,7 @@ struct redisCommand redisCommandTable[] = {
0,NULL,0,0,0,0,0,0},
{"get",getCommand,2,
"read-only fast @string",
"read-only fast async @string",
0,NULL,1,1,1,0,0,0},
{"getex",getexCommand,-2,
@ -317,7 +315,7 @@ struct redisCommand redisCommandTable[] = {
0,NULL,1,1,1,0,0,0},
{"mget",mgetCommand,-2,
"read-only fast @string",
"read-only fast async @string",
0,NULL,1,-1,1,0,0,0},
{"rpush",rpushCommand,-3,
@ -609,7 +607,7 @@ struct redisCommand redisCommandTable[] = {
0,NULL,1,1,1,0,0,0},
{"hmget",hmgetCommand,-3,
"read-only fast @hash",
"read-only fast async @hash",
0,NULL,1,1,1,0,0,0},
{"hincrby",hincrbyCommand,4,
@ -633,15 +631,15 @@ struct redisCommand redisCommandTable[] = {
0,NULL,1,1,1,0,0,0},
{"hkeys",hkeysCommand,2,
"read-only to-sort @hash",
"read-only to-sort async @hash",
0,NULL,1,1,1,0,0,0},
{"hvals",hvalsCommand,2,
"read-only to-sort @hash",
"read-only to-sort async @hash",
0,NULL,1,1,1,0,0,0},
{"hgetall",hgetallCommand,2,
"read-only random @hash",
"read-only random async @hash",
0,NULL,1,1,1,0,0,0},
{"hexists",hexistsCommand,3,
@ -653,7 +651,7 @@ struct redisCommand redisCommandTable[] = {
0,NULL,1,1,1,0,0,0},
{"hscan",hscanCommand,-3,
"read-only random @hash",
"read-only random async @hash",
0,NULL,1,1,1,0,0,0},
{"incrby",incrbyCommand,3,
@ -761,6 +759,10 @@ struct redisCommand redisCommandTable[] = {
"ok-stale ok-loading fast @connection @replication",
0,NULL,0,0,0,0,0,0},
{"replping",pingCommand,-1,
"ok-stale fast @connection @replication",
0,NULL,0,0,0,0,0,0},
{"echo",echoCommand,2,
"fast @connection",
0,NULL,0,0,0,0,0,0},
@ -2068,6 +2070,7 @@ int hash_spin_worker() {
* rehashing. */
void databasesCron(bool fMainThread) {
serverAssert(GlobalLocksAcquired());
if (fMainThread) {
/* Expire keys by random sampling. Not required for slaves
* as master will synthesize DELs for us. */
@ -2410,7 +2413,9 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) {
stat_net_input_bytes = g_pserver->stat_net_input_bytes.load(std::memory_order_relaxed);
stat_net_output_bytes = g_pserver->stat_net_output_bytes.load(std::memory_order_relaxed);
trackInstantaneousMetric(STATS_METRIC_COMMAND,g_pserver->stat_numcommands);
long long stat_numcommands;
__atomic_load(&g_pserver->stat_numcommands, &stat_numcommands, __ATOMIC_RELAXED);
trackInstantaneousMetric(STATS_METRIC_COMMAND,stat_numcommands);
trackInstantaneousMetric(STATS_METRIC_NET_INPUT,
stat_net_input_bytes);
trackInstantaneousMetric(STATS_METRIC_NET_OUTPUT,
@ -2779,6 +2784,14 @@ void beforeSleep(struct aeEventLoop *eventLoop) {
locker.arm();
/* end any snapshots created by fast async commands */
for (int idb = 0; idb < cserver.dbnum; ++idb) {
if (serverTL->rgdbSnapshot[idb] != nullptr) {
g_pserver->db[idb]->endSnapshot(serverTL->rgdbSnapshot[idb]);
serverTL->rgdbSnapshot[idb] = nullptr;
}
}
size_t zmalloc_used = zmalloc_used_memory();
if (zmalloc_used > g_pserver->stat_peak_memory)
g_pserver->stat_peak_memory = zmalloc_used;
@ -2990,6 +3003,8 @@ void afterSleep(struct aeEventLoop *eventLoop) {
serverTL->gcEpoch = g_pserver->garbageCollector.startEpoch();
for (int idb = 0; idb < cserver.dbnum; ++idb)
g_pserver->db[idb]->trackChanges(false);
serverTL->disable_async_commands = false;
}
}
@ -3124,6 +3139,7 @@ void createSharedObjects(void) {
shared.lastid = makeObjectShared("LASTID",6);
shared.default_username = makeObjectShared("default",7);
shared.ping = makeObjectShared("ping",4);
shared.replping = makeObjectShared("replping", 8);
shared.setid = makeObjectShared("SETID",5);
shared.keepttl = makeObjectShared("KEEPTTL",7);
shared.load = makeObjectShared("LOAD",4);
@ -3694,7 +3710,8 @@ void resetServerStats(void) {
g_pserver->stat_net_input_bytes = 0;
g_pserver->stat_net_output_bytes = 0;
g_pserver->stat_unexpected_error_replies = 0;
g_pserver->stat_total_error_replies = 0;
for (int iel = 0; iel < cserver.cthreads; ++iel)
g_pserver->rgthreadvar[iel].stat_total_error_replies = 0;
g_pserver->stat_dump_payload_sanitizations = 0;
g_pserver->aof_delayed_fsync = 0;
}
@ -3841,6 +3858,9 @@ void initServer(void) {
g_pserver->rgthreadvar[i].rgdbSnapshot = (const redisDbPersistentDataSnapshot**)zcalloc(sizeof(redisDbPersistentDataSnapshot*)*cserver.dbnum, MALLOC_LOCAL);
serverAssert(g_pserver->rgthreadvar[i].rgdbSnapshot != nullptr);
}
g_pserver->modulethreadvar.rgdbSnapshot = (const redisDbPersistentDataSnapshot**)zcalloc(sizeof(redisDbPersistentDataSnapshot*)*cserver.dbnum, MALLOC_LOCAL);
serverAssert(g_pserver->modulethreadvar.rgdbSnapshot != nullptr);
serverAssert(g_pserver->rgthreadvar[0].rgdbSnapshot != nullptr);
/* Fixup Master Client Database */
@ -4089,6 +4109,8 @@ int populateCommandTableParseFlags(struct redisCommand *c, const char *strflags)
c->flags |= CMD_NO_AUTH;
} else if (!strcasecmp(flag,"may-replicate")) {
c->flags |= CMD_MAY_REPLICATE;
} else if (!strcasecmp(flag,"async")) {
c->flags |= CMD_ASYNC_OK;
} else {
/* Parse ACL categories here if the flag name starts with @. */
uint64_t catflag;
@ -4388,8 +4410,7 @@ void call(client *c, int flags) {
monotime call_timer;
int client_old_flags = c->flags;
struct redisCommand *real_cmd = c->cmd;
serverAssert(GlobalLocksAcquired());
static long long prev_err_count;
serverAssert(((flags & CMD_CALL_ASYNC) && (c->cmd->flags & CMD_READONLY)) || GlobalLocksAcquired());
serverTL->fixed_time_expire++;
@ -4412,12 +4433,15 @@ void call(client *c, int flags) {
/* Initialization: clear the flags that must be set by the command on
* demand, and initialize the array for additional commands propagation. */
c->flags &= ~(CLIENT_FORCE_AOF|CLIENT_FORCE_REPL|CLIENT_PREVENT_PROP);
redisOpArray prev_also_propagate = g_pserver->also_propagate;
redisOpArrayInit(&g_pserver->also_propagate);
redisOpArray prev_also_propagate;
if (!(flags & CMD_CALL_ASYNC)) {
prev_also_propagate = g_pserver->also_propagate;
redisOpArrayInit(&g_pserver->also_propagate);
}
/* Call the command. */
dirty = g_pserver->dirty;
prev_err_count = g_pserver->stat_total_error_replies;
serverTL->prev_err_count = serverTL->stat_total_error_replies;
incrementMvccTstamp();
elapsedStart(&call_timer);
try {
@ -4432,7 +4456,10 @@ void call(client *c, int flags) {
serverTL->commandsExecuted++;
const long duration = elapsedUs(call_timer);
c->duration = duration;
dirty = g_pserver->dirty-dirty;
if (flags & CMD_CALL_ASYNC)
dirty = 0; // dirty is bogus in this case as there's no synchronization
else
dirty = g_pserver->dirty-dirty;
if (dirty < 0) dirty = 0;
if (dirty)
@ -4442,7 +4469,7 @@ void call(client *c, int flags) {
* We leverage a static variable (prev_err_count) to retain
* the counter across nested function calls and avoid logging
* the same error twice. */
if ((g_pserver->stat_total_error_replies - prev_err_count) > 0) {
if ((serverTL->stat_total_error_replies - serverTL->prev_err_count) > 0) {
real_cmd->failed_calls++;
}
@ -4483,8 +4510,13 @@ void call(client *c, int flags) {
/* Log the command into the Slow log if needed.
* If the client is blocked we will handle slowlog when it is unblocked. */
if ((flags & CMD_CALL_SLOWLOG) && !(c->flags & CLIENT_BLOCKED))
slowlogPushCurrentCommand(c, real_cmd, duration);
if ((flags & CMD_CALL_SLOWLOG) && !(c->flags & CLIENT_BLOCKED)) {
if (duration >= g_pserver->slowlog_log_slower_than) {
AeLocker locker;
locker.arm(c);
slowlogPushCurrentCommand(c, real_cmd, duration);
}
}
/* Clear the original argv.
* If the client is blocked we will handle slowlog when it is unblocked. */
@ -4493,8 +4525,8 @@ void call(client *c, int flags) {
/* populate the per-command statistics that we show in INFO commandstats. */
if (flags & CMD_CALL_STATS) {
real_cmd->microseconds += duration;
real_cmd->calls++;
__atomic_fetch_add(&real_cmd->microseconds, duration, __ATOMIC_RELAXED);
__atomic_fetch_add(&real_cmd->calls, 1, __ATOMIC_RELAXED);
}
/* Propagate the command into the AOF and replication link */
@ -4538,48 +4570,50 @@ void call(client *c, int flags) {
c->flags |= client_old_flags &
(CLIENT_FORCE_AOF|CLIENT_FORCE_REPL|CLIENT_PREVENT_PROP);
/* Handle the alsoPropagate() API to handle commands that want to propagate
* multiple separated commands. Note that alsoPropagate() is not affected
* by CLIENT_PREVENT_PROP flag. */
if (g_pserver->also_propagate.numops) {
int j;
redisOp *rop;
if (!(flags & CMD_CALL_ASYNC)) {
/* Handle the alsoPropagate() API to handle commands that want to propagate
* multiple separated commands. Note that alsoPropagate() is not affected
* by CLIENT_PREVENT_PROP flag. */
if (g_pserver->also_propagate.numops) {
int j;
redisOp *rop;
if (flags & CMD_CALL_PROPAGATE) {
bool multi_emitted = false;
/* Wrap the commands in g_pserver->also_propagate array,
* but don't wrap it if we are already in MULTI context,
* in case the nested MULTI/EXEC.
*
* And if the array contains only one command, no need to
* wrap it, since the single command is atomic. */
if (g_pserver->also_propagate.numops > 1 &&
!(c->cmd->flags & CMD_MODULE) &&
!(c->flags & CLIENT_MULTI) &&
!(flags & CMD_CALL_NOWRAP))
{
execCommandPropagateMulti(c->db->id);
multi_emitted = true;
}
for (j = 0; j < g_pserver->also_propagate.numops; j++) {
rop = &g_pserver->also_propagate.ops[j];
int target = rop->target;
/* Whatever the command wish is, we honor the call() flags. */
if (!(flags&CMD_CALL_PROPAGATE_AOF)) target &= ~PROPAGATE_AOF;
if (!(flags&CMD_CALL_PROPAGATE_REPL)) target &= ~PROPAGATE_REPL;
if (target)
propagate(rop->cmd,rop->dbid,rop->argv,rop->argc,target);
}
if (flags & CMD_CALL_PROPAGATE) {
bool multi_emitted = false;
/* Wrap the commands in g_pserver->also_propagate array,
* but don't wrap it if we are already in MULTI context,
* in case the nested MULTI/EXEC.
*
* And if the array contains only one command, no need to
* wrap it, since the single command is atomic. */
if (g_pserver->also_propagate.numops > 1 &&
!(c->cmd->flags & CMD_MODULE) &&
!(c->flags & CLIENT_MULTI) &&
!(flags & CMD_CALL_NOWRAP))
{
execCommandPropagateMulti(c->db->id);
multi_emitted = true;
}
for (j = 0; j < g_pserver->also_propagate.numops; j++) {
rop = &g_pserver->also_propagate.ops[j];
int target = rop->target;
/* Whatever the command wish is, we honor the call() flags. */
if (!(flags&CMD_CALL_PROPAGATE_AOF)) target &= ~PROPAGATE_AOF;
if (!(flags&CMD_CALL_PROPAGATE_REPL)) target &= ~PROPAGATE_REPL;
if (target)
propagate(rop->cmd,rop->dbid,rop->argv,rop->argc,target);
}
if (multi_emitted) {
execCommandPropagateExec(c->db->id);
if (multi_emitted) {
execCommandPropagateExec(c->db->id);
}
}
redisOpArrayFree(&g_pserver->also_propagate);
}
redisOpArrayFree(&g_pserver->also_propagate);
g_pserver->also_propagate = prev_also_propagate;
}
g_pserver->also_propagate = prev_also_propagate;
/* Client pause takes effect after a transaction has finished. This needs
* to be located after everything is propagated. */
@ -4599,15 +4633,17 @@ void call(client *c, int flags) {
}
}
g_pserver->stat_numcommands++;
__atomic_fetch_add(&g_pserver->stat_numcommands, 1, __ATOMIC_RELAXED);
serverTL->fixed_time_expire--;
prev_err_count = g_pserver->stat_total_error_replies;
serverTL->prev_err_count = serverTL->stat_total_error_replies;
/* Record peak memory after each command and before the eviction that runs
* before the next command. */
size_t zmalloc_used = zmalloc_used_memory();
if (zmalloc_used > g_pserver->stat_peak_memory)
g_pserver->stat_peak_memory = zmalloc_used;
if (!(flags & CMD_CALL_ASYNC)) {
/* Record peak memory after each command and before the eviction that runs
* before the next command. */
size_t zmalloc_used = zmalloc_used_memory();
if (zmalloc_used > g_pserver->stat_peak_memory)
g_pserver->stat_peak_memory = zmalloc_used;
}
}
/* Used when a command that is ready for execution needs to be rejected, due to
@ -4663,7 +4699,7 @@ static int cmdHasMovableKeys(struct redisCommand *cmd) {
* if C_ERR is returned the client was destroyed (i.e. after QUIT). */
int processCommand(client *c, int callFlags) {
AssertCorrectThread(c);
serverAssert(GlobalLocksAcquired());
serverAssert((callFlags & CMD_CALL_ASYNC) || GlobalLocksAcquired());
if (!g_pserver->lua_timedout) {
/* Both EXEC and EVAL call call() directly so there should be
* no way in_exec or in_eval or propagate_in_transaction is 1.
@ -4797,7 +4833,7 @@ int processCommand(client *c, int callFlags) {
* the event loop since there is a busy Lua script running in timeout
* condition, to avoid mixing the propagation of scripts with the
* propagation of DELs due to eviction. */
if (g_pserver->maxmemory && !g_pserver->lua_timedout) {
if (g_pserver->maxmemory && !g_pserver->lua_timedout && !(callFlags & CMD_CALL_ASYNC)) {
int out_of_memory = (performEvictions(false /*fPreSnapshot*/) == EVICT_FAIL);
/* freeMemoryIfNeeded may flush replica output buffers. This may result
* into a replica, that may be the active client, to be freed. */
@ -4996,6 +5032,46 @@ bool client::postFunction(std::function<void(client *)> fn, bool fLock) {
}, fLock) == AE_OK;
}
std::vector<robj_sharedptr> clientArgs(client *c) {
std::vector<robj_sharedptr> args;
for (int j = 0; j < c->argc; j++) {
args.push_back(robj_sharedptr(c->argv[j]));
}
return args;
}
bool client::asyncCommand(std::function<void(const redisDbPersistentDataSnapshot *, const std::vector<robj_sharedptr> &)> &&mainFn,
std::function<void(const redisDbPersistentDataSnapshot *)> &&postFn)
{
serverAssert(FCorrectThread(this));
const redisDbPersistentDataSnapshot *snapshot = nullptr;
if (!(this->flags & (CLIENT_MULTI | CLIENT_BLOCKED)))
snapshot = this->db->createSnapshot(this->mvccCheckpoint, false /* fOptional */);
if (snapshot == nullptr) {
return false;
}
aeEventLoop *el = serverTL->el;
blockClient(this, BLOCKED_ASYNC);
g_pserver->asyncworkqueue->AddWorkFunction([el, this, mainFn, postFn, snapshot] {
std::vector<robj_sharedptr> args = clientArgs(this);
aePostFunction(el, [this, mainFn, postFn, snapshot, args] {
aeReleaseLock();
std::unique_lock<decltype(this->lock)> lock(this->lock);
AeLocker locker;
locker.arm(this);
unblockClient(this);
mainFn(snapshot, args);
locker.disarm();
lock.unlock();
if (postFn)
postFn(snapshot);
this->db->endSnapshotAsync(snapshot);
aeAcquireLock();
});
});
return true;
}
/* ====================== Error lookup and execution ===================== */
void incrementErrorCount(const char *fullerr, size_t namelen) {
@ -5056,11 +5132,7 @@ int prepareForShutdown(int flags) {
overwrite the synchronous saving did by SHUTDOWN. */
if (g_pserver->FRdbSaveInProgress()) {
serverLog(LL_WARNING,"There is a child saving an .rdb. Killing it!");
/* Note that, in killRDBChild, we call rdbRemoveTempFile that will
* do close fd(in order to unlink file actully) in background thread.
* The temp rdb file fd may won't be closed when redis exits quickly,
* but OS will close this fd when process exits. */
killRDBChild(true);
killRDBChild();
/* Note that, in killRDBChild normally has backgroundSaveDoneHandler
* doing it's cleanup, but in this case this code will not be reached,
* so we need to call rdbRemoveTempFile which will close fd(in order
@ -5806,6 +5878,10 @@ sds genRedisInfoString(const char *section) {
stat_net_input_bytes = g_pserver->stat_net_input_bytes.load(std::memory_order_relaxed);
stat_net_output_bytes = g_pserver->stat_net_output_bytes.load(std::memory_order_relaxed);
long long stat_total_error_replies = 0;
for (int iel = 0; iel < cserver.cthreads; ++iel)
stat_total_error_replies += g_pserver->rgthreadvar[iel].stat_total_error_replies;
if (sections++) info = sdscat(info,"\r\n");
info = sdscatprintf(info,
"# Stats\r\n"
@ -5846,7 +5922,9 @@ sds genRedisInfoString(const char *section) {
"total_reads_processed:%lld\r\n"
"total_writes_processed:%lld\r\n"
"instantaneous_lock_contention:%d\r\n"
"avg_lock_contention:%f\r\n",
"avg_lock_contention:%f\r\n"
"storage_provider_read_hits:%lld\r\n"
"storage_provider_read_misses:%lld\r\n",
g_pserver->stat_numconnections,
g_pserver->stat_numcommands,
getInstantaneousMetric(STATS_METRIC_COMMAND),
@ -5879,12 +5957,14 @@ sds genRedisInfoString(const char *section) {
(unsigned long long) trackingGetTotalItems(),
(unsigned long long) trackingGetTotalPrefixes(),
g_pserver->stat_unexpected_error_replies,
g_pserver->stat_total_error_replies,
stat_total_error_replies,
g_pserver->stat_dump_payload_sanitizations,
stat_total_reads_processed,
stat_total_writes_processed,
aeLockContention(),
avgLockContention);
avgLockContention,
g_pserver->stat_storage_provider_read_hits,
g_pserver->stat_storage_provider_read_misses);
}
/* Replication */
@ -7402,7 +7482,7 @@ int main(int argc, char **argv) {
serverLog(LL_WARNING, "Failed to test the kernel for a bug that could lead to data corruption during background save. "
"Your system could be affected, please report this error.");
if (!checkIgnoreWarning("ARM64-COW-BUG")) {
serverLog(LL_WARNING,"Redis will now exit to prevent data corruption. "
serverLog(LL_WARNING,"KeyDB will now exit to prevent data corruption. "
"Note that it is possible to suppress this warning by setting the following config: ignore-warnings ARM64-COW-BUG");
exit(1);
}

View File

@ -476,6 +476,7 @@ extern int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT];
#define CMD_CATEGORY_SCRIPTING (1ULL<<38)
#define CMD_CATEGORY_REPLICATION (1ULL<<39)
#define CMD_SKIP_PROPOGATE (1ULL<<40) /* "noprop" flag */
#define CMD_ASYNC_OK (1ULL<<41) /* This command is safe without a lock */
/* AOF states */
#define AOF_OFF 0 /* AOF is off */
@ -574,6 +575,7 @@ typedef enum {
REPL_STATE_NONE = 0, /* No active replication */
REPL_STATE_CONNECT, /* Must connect to master */
REPL_STATE_CONNECTING, /* Connecting to master */
REPL_STATE_RETRY_NOREPLPING, /* Master does not support REPLPING, retry with PING */
/* --- Handshake states, must be ordered --- */
REPL_STATE_RECEIVE_PING_REPLY, /* Wait for PING reply */
REPL_STATE_SEND_HANDSHAKE, /* Send handshake sequance to master */
@ -726,6 +728,7 @@ typedef enum {
#define CMD_CALL_FULL (CMD_CALL_SLOWLOG | CMD_CALL_STATS | CMD_CALL_PROPAGATE | CMD_CALL_NOWRAP)
#define CMD_CALL_NOWRAP (1<<4) /* Don't wrap also propagate array into
MULTI/EXEC: the caller will handle it. */
#define CMD_CALL_ASYNC (1<<5)
/* Command propagation flags, see propagate() function */
#define PROPAGATE_NONE 0
@ -956,6 +959,7 @@ struct redisObjectExtended {
};
typedef struct redisObject {
friend redisObject *createEmbeddedStringObject(const char *ptr, size_t len);
protected:
redisObject() {}
@ -1281,6 +1285,8 @@ public:
// These need to be fixed
using redisDbPersistentData::size;
using redisDbPersistentData::expireSize;
static const uint64_t msStaleThreshold = 500;
};
/* Redis database representation. There are multiple databases identified
@ -1351,7 +1357,6 @@ struct redisDb : public redisDbPersistentDataSnapshot
using redisDbPersistentData::commitChanges;
using redisDbPersistentData::setexpireUnsafe;
using redisDbPersistentData::setexpire;
using redisDbPersistentData::createSnapshot;
using redisDbPersistentData::endSnapshot;
using redisDbPersistentData::restoreSnapshot;
using redisDbPersistentData::removeAllCachedValues;
@ -1367,6 +1372,13 @@ struct redisDb : public redisDbPersistentDataSnapshot
using redisDbPersistentData::bulkStorageInsert;
public:
const redisDbPersistentDataSnapshot *createSnapshot(uint64_t mvccCheckpoint, bool fOptional) {
auto psnapshot = redisDbPersistentData::createSnapshot(mvccCheckpoint, fOptional);
if (psnapshot != nullptr)
mvccLastSnapshot = psnapshot->mvccCheckpoint();
return psnapshot;
}
expireset::setiter expireitr;
dict *blocking_keys; /* Keys with clients waiting for data (BLPOP)*/
dict *ready_keys; /* Blocked keys that received a PUSH */
@ -1375,6 +1387,7 @@ public:
long long last_expire_set; /* when the last expire was set */
double avg_ttl; /* Average TTL, just for stats */
list *defrag_later; /* List of key names to attempt to defrag one by one, gradually. */
uint64_t mvccLastSnapshot = 0;
};
/* Declare database backup that include redis main DBs and slots to keys map.
@ -1692,6 +1705,8 @@ struct client {
// post a function from a non-client thread to run on its client thread
bool postFunction(std::function<void(client *)> fn, bool fLock = true);
size_t argv_len_sum() const;
bool asyncCommand(std::function<void(const redisDbPersistentDataSnapshot *, const std::vector<robj_sharedptr> &)> &&mainFn,
std::function<void(const redisDbPersistentDataSnapshot *)> &&postFn = nullptr);
};
struct saveparam {
@ -1730,7 +1745,7 @@ struct sharedObjectsStruct {
*emptyscan, *multi, *exec, *left, *right, *hset, *srem, *xgroup, *xclaim,
*script, *replconf, *eval, *persist, *set, *pexpireat, *pexpire,
*time, *pxat, *px, *retrycount, *force, *justid,
*lastid, *ping, *setid, *keepttl, *load, *createconsumer,
*lastid, *ping, *replping, *setid, *keepttl, *load, *createconsumer,
*getack, *special_asterick, *special_equals, *default_username,
*hdel, *zrem, *mvccrestore, *pexpirememberat,
*select[PROTO_SHARED_SELECT_CMDS],
@ -2046,9 +2061,12 @@ struct redisServerThreadVars {
long unsigned commandsExecuted = 0;
GarbageCollectorCollection::Epoch gcEpoch;
const redisDbPersistentDataSnapshot **rgdbSnapshot = nullptr;
long long stat_total_error_replies; /* Total number of issued error replies ( command + rejected errors ) */
long long prev_err_count; /* per thread marker of exisiting errors during a call */
bool fRetrySetAofEvent = false;
bool modulesEnabledThisAeLoop = false; /* In this loop of aeMain, were modules enabled before
the thread went to sleep? */
bool disable_async_commands = false; /* this is only valid for one cycle of the AE loop and is reset in afterSleep */
std::vector<client*> vecclientsProcess;
dictAsyncRehashCtl *rehashCtl = nullptr;
@ -2279,10 +2297,11 @@ struct redisServer {
double stat_module_progress; /* Module save progress. */
uint64_t stat_clients_type_memory[CLIENT_TYPE_COUNT];/* Mem usage by type */
long long stat_unexpected_error_replies; /* Number of unexpected (aof-loading, replica to master, etc.) error replies */
long long stat_total_error_replies; /* Total number of issued error replies ( command + rejected errors ) */
long long stat_dump_payload_sanitizations; /* Number deep dump payloads integrity validations. */
std::atomic<long long> stat_total_reads_processed; /* Total number of read events processed */
std::atomic<long long> stat_total_writes_processed; /* Total number of write events processed */
long long stat_storage_provider_read_hits;
long long stat_storage_provider_read_misses;
/* The following two are used to track instantaneous metrics, like
* number of operations per second, network traffic. */
struct {
@ -2603,6 +2622,8 @@ struct redisServer {
int target_replica_port; /* Failover target port */
int failover_state; /* Failover state */
int enable_async_commands;
long long repl_batch_offStart = -1;
long long repl_batch_idxStart = -1;
@ -2997,6 +3018,7 @@ robj *createZsetZiplistObject(void);
robj *createStreamObject(void);
robj *createModuleObject(moduleType *mt, void *value);
int getLongFromObjectOrReply(client *c, robj *o, long *target, const char *msg);
int getUnsignedLongLongFromObjectOrReply(client *c, robj *o, uint64_t *target, const char *msg);
int getPositiveLongFromObjectOrReply(client *c, robj *o, long *target, const char *msg);
int getRangeLongFromObjectOrReply(client *c, robj *o, long min, long max, long *target, const char *msg);
int checkType(client *c, robj_roptr o, int type);
@ -3348,6 +3370,7 @@ void propagateSubkeyExpire(redisDb *db, int type, robj *key, robj *subkey);
int expireIfNeeded(redisDb *db, robj *key);
void setExpire(client *c, redisDb *db, robj *key, robj *subkey, long long when);
void setExpire(client *c, redisDb *db, robj *key, expireEntry &&entry);
robj_roptr lookupKeyRead(redisDb *db, robj *key, uint64_t mvccCheckpoint);
robj_roptr lookupKeyRead(redisDb *db, robj *key);
int checkAlreadyExpired(long long when);
robj *lookupKeyWrite(redisDb *db, robj *key);

View File

@ -654,9 +654,7 @@ int redisDbPersistentDataSnapshot::snapshot_depth() const
bool redisDbPersistentDataSnapshot::FStale() const
{
// 0.5 seconds considered stale;
static const uint64_t msStale = 500;
return ((getMvccTstamp() - m_mvccCheckpoint) >> MVCC_MS_SHIFT) >= msStale;
return ((getMvccTstamp() - m_mvccCheckpoint) >> MVCC_MS_SHIFT) >= redisDbPersistentDataSnapshot::msStaleThreshold;
}
void dictGCAsyncFree(dictAsyncRehashCtl *async) {

View File

@ -812,7 +812,7 @@ void hmgetCommand(client *c) {
/* Don't abort when the key cannot be found. Non-existing keys are empty
* hashes, where HMGET should respond with a series of null bulks. */
o = lookupKeyRead(c->db, c->argv[1]);
o = lookupKeyRead(c->db, c->argv[1], c->mvccCheckpoint);
if (checkType(c,o,OBJ_HASH)) return;
addReplyArrayLen(c, c->argc-2);

View File

@ -512,7 +512,7 @@ void spopWithCountCommand(client *c) {
const char *sdsele;
robj *objele;
int encoding;
int64_t llele;
int64_t llele = 0;
unsigned long remaining = size-count; /* Elements left after SPOP. */
/* If we are here, the number of requested elements is less than the
@ -664,7 +664,7 @@ void srandmemberWithCountCommand(client *c) {
int uniq = 1;
robj_roptr set;
const char *ele;
int64_t llele;
int64_t llele = 0;
int encoding;
dict *d;
@ -813,7 +813,7 @@ void srandmemberWithCountCommand(client *c) {
void srandmemberCommand(client *c) {
robj_roptr set;
const char *ele;
int64_t llele;
int64_t llele = 0;
int encoding;
if (c->argc == 3) {

View File

@ -813,7 +813,7 @@ int64_t streamTrim(stream *s, streamAddTrimArgs *args) {
}
deleted += deleted_from_lp;
/* Now we the entries/deleted counters. */
/* Now we update the entries/deleted counters. */
p = lpFirst(lp);
lp = lpReplaceInteger(lp,&p,entries-deleted_from_lp);
p = lpNext(lp,p); /* Skip deleted field. */
@ -842,7 +842,7 @@ int64_t streamTrim(stream *s, streamAddTrimArgs *args) {
/* Trims a stream by length. Returns the number of deleted items. */
int64_t streamTrimByLength(stream *s, long long maxlen, int approx) {
streamAddTrimArgs args = {0};
streamAddTrimArgs args = {{0}};
args.trim_strategy = TRIM_STRATEGY_MAXLEN;
args.approx_trim = approx;
args.limit = approx ? 100 * g_pserver->stream_node_max_entries : 0;
@ -852,7 +852,7 @@ int64_t streamTrimByLength(stream *s, long long maxlen, int approx) {
/* Trims a stream by minimum ID. Returns the number of deleted items. */
int64_t streamTrimByID(stream *s, streamID minid, int approx) {
streamAddTrimArgs args = {0};
streamAddTrimArgs args = {{0}};
args.trim_strategy = TRIM_STRATEGY_MINID;
args.approx_trim = approx;
args.limit = approx ? 100 * g_pserver->stream_node_max_entries : 0;

View File

@ -29,6 +29,7 @@
#include "server.h"
#include <cmath> /* isnan(), isinf() */
#include "aelocker.h"
/* Forward declarations */
int getGenericCommand(client *c);
@ -524,19 +525,13 @@ void getrangeCommand(client *c) {
}
void mgetCommand(client *c) {
int j;
addReplyArrayLen(c,c->argc-1);
for (j = 1; j < c->argc; j++) {
robj_roptr o = lookupKeyRead(c->db,c->argv[j]);
if (o == nullptr) {
for (int i = 1; i < c->argc; i++) {
robj_roptr o = lookupKeyRead(c->db,c->argv[i],c->mvccCheckpoint);
if (o == nullptr || o->type != OBJ_STRING) {
addReplyNull(c);
} else {
if (o->type != OBJ_STRING) {
addReplyNull(c);
} else {
addReplyBulk(c,o);
}
addReplyBulk(c,o);
}
}
}

View File

@ -1,9 +1,9 @@
# Redis configuration for testing.
# KeyDB configuration for testing.
always-show-logo yes
notify-keyspace-events KEA
daemonize no
pidfile /var/run/redis.pid
pidfile /var/run/keydb.pid
port 6379
timeout 0
bind 127.0.0.1

View File

@ -1,5 +1,5 @@
# Minimal configuration for testing.
always-show-logo yes
daemonize no
pidfile /var/run/redis.pid
pidfile /var/run/keydb.pid
loglevel verbose

View File

@ -1,4 +1,4 @@
source tests/support/redis.tcl
source tests/support/keydb.tcl
source tests/support/util.tcl
set ::tlsdir "tests/tls"

View File

@ -1,4 +1,4 @@
source tests/support/redis.tcl
source tests/support/keydb.tcl
source tests/support/util.tcl
set ::tlsdir "tests/tls"

View File

@ -1,4 +1,4 @@
source tests/support/redis.tcl
source tests/support/keydb.tcl
set ::tlsdir "tests/tls"

View File

@ -10,7 +10,7 @@
package require Tcl 8.5
set tcl_precision 17
source ../support/redis.tcl
source ../support/keydb.tcl
source ../support/util.tcl
source ../support/server.tcl
source ../support/test.tcl
@ -36,7 +36,7 @@ set ::run_matching {} ; # If non empty, only tests matching pattern are run.
if {[catch {cd tmp}]} {
puts "tmp directory not found."
puts "Please run this test from the Redis source root."
puts "Please run this test from the KeyDB source root."
exit 1
}
@ -92,7 +92,7 @@ proc spawn_instance {type base_port count {conf {}} {base_conf_file ""}} {
puts $cfg [format "tls-key-file %s/../../tls/server.key" [pwd]]
puts $cfg [format "tls-client-cert-file %s/../../tls/client.crt" [pwd]]
puts $cfg [format "tls-client-key-file %s/../../tls/client.key" [pwd]]
puts $cfg [format "tls-dh-params-file %s/../../tls/redis.dh" [pwd]]
puts $cfg [format "tls-dh-params-file %s/../../tls/keydb.dh" [pwd]]
puts $cfg [format "tls-ca-cert-file %s/../../tls/ca.crt" [pwd]]
puts $cfg "loglevel debug"
} else {
@ -303,7 +303,7 @@ proc pause_on_error {} {
set count 10
if {[lindex $argv 1] ne {}} {set count [lindex $argv 1]}
foreach_redis_id id {
puts "=== REDIS $id ===="
puts "=== KeyDB $id ===="
puts [exec tail -$count redis_$id/log.txt]
puts "---------------------\n"
}
@ -317,7 +317,7 @@ proc pause_on_error {} {
}
} elseif {$cmd eq {ls}} {
foreach_redis_id id {
puts -nonewline "Redis $id"
puts -nonewline "KeyDB $id"
set errcode [catch {
set str {}
append str "@[RI $id tcp_port]: "
@ -348,13 +348,13 @@ proc pause_on_error {} {
}
}
} elseif {$cmd eq {help}} {
puts "ls List Sentinel and Redis instances."
puts "ls List Sentinel and KeyDB instances."
puts "show-sentinel-logs \[N\] Show latest N lines of logs."
puts "show-keydb-logs \[N\] Show latest N lines of logs."
puts "S <id> cmd ... arg Call command in Sentinel <id>."
puts "R <id> cmd ... arg Call command in Redis <id>."
puts "R <id> cmd ... arg Call command in KeyDB <id>."
puts "SI <id> <field> Show Sentinel <id> INFO <field>."
puts "RI <id> <field> Show Redis <id> INFO <field>."
puts "RI <id> <field> Show KeyDB <id> INFO <field>."
puts "continue Resume test."
} else {
set errcode [catch {eval $line} retval]

View File

@ -1,12 +1,10 @@
set system_name [string tolower [exec uname -s]]
# ldd --version returns 1 under musl for unknown reasons. If this check stops working, that may be why
set is_musl [catch {exec ldd --version}]
set system_supported 0
# We only support darwin or Linux with glibc
if {$system_name eq {darwin}} {
set system_supported 1
} elseif {$system_name eq {linux} && $is_musl eq 0} {
} elseif {$system_name eq {linux}} {
# Avoid the test on libmusl, which does not support backtrace
set ldd [exec ldd src/keydb-server]
if {![string match {*libc.musl*} $ldd]} {

View File

@ -4,7 +4,7 @@ proc show_cluster_status {} {
# The following is the regexp we use to match the log line
# time info. Logs are in the following form:
#
# 11296:M 25 May 2020 17:37:14.652 # Server initialized
# 11296:11296:M 25 May 2020 17:37:14.652 # Server initialized
set log_regexp {^[0-9]+:^[0-9]+:[A-Z] [0-9]+ [A-z]+ [0-9]+ ([0-9:.]+) .*}
set repl_regexp {(master|repl|sync|backlog|meaningful|offset)}

View File

@ -356,7 +356,7 @@ proc start_server {options {code undefined}} {
dict set config "tls-key-file" [format "%s/tests/tls/server.key" [pwd]]
dict set config "tls-client-cert-file" [format "%s/tests/tls/client.crt" [pwd]]
dict set config "tls-client-key-file" [format "%s/tests/tls/client.key" [pwd]]
dict set config "tls-dh-params-file" [format "%s/tests/tls/redis.dh" [pwd]]
dict set config "tls-dh-params-file" [format "%s/tests/tls/keydb.dh" [pwd]]
dict set config "tls-ca-cert-file" [format "%s/tests/tls/ca.crt" [pwd]]
dict set config "loglevel" "debug"
}

View File

@ -5,7 +5,7 @@
package require Tcl 8.5
set tcl_precision 17
source tests/support/redis.tcl
source tests/support/keydb.tcl
source tests/support/server.tcl
source tests/support/tmpfile.tcl
source tests/support/test.tcl
@ -58,8 +58,8 @@ set ::all_tests {
integration/psync2-reg
integration/psync2-pingoff
integration/failover
integration/redis-cli
integration/redis-benchmark
integration/keydb-cli
integration/keydb-benchmark
integration/replication-fast
unit/pubsub
unit/slowlog

View File

@ -179,12 +179,31 @@ start_server {tags {"expire"}} {
# one second.
after 1000
set size2 [r dbsize]
set async [lindex [split [r config get enable-async-commands] " "] 1]
r config set enable-async-commands no
r mget key1 key2 key3
r config set enable-async-commands $async
set size3 [r dbsize]
r debug set-active-expire 1
list $size1 $size2 $size3
} {3 3 0}
test {Return nil when async get expired key} {
r flushdb
r debug set-active-expire 0
r psetex key 500 a
# Redis expires random keys ten times every second so we are
# fairly sure that all the three keys should be evicted after
# one second.
after 1000
set async [lindex [split [r config get enable-async-commands] " "] 1]
r config set enable-async-commands yes
set out [r get key]
r config set enable-async-commands $async
r debug set-active-expire 1
assert_equal $out {}
}
test {EXPIRE should not resurrect keys (issue #1026)} {
r debug set-active-expire 0
r set foo bar

View File

@ -12,6 +12,16 @@ start_server [list tags {flash} overrides [list storage-provider {flash ./rocks.
assert_equal {0} [r dbsize] "Key count is accurate after non-existant delete"
}
test { DEL of flushed key works } {
r flushall
r set testkey foo
assert_equal {1} [r dbsize] "Only one key after first insert"
r flushall cache
assert_equal {foo} [r get testkey] "Value still there after flushing cache"
r del testkey
assert_equal {0} [r dbsize] "No keys after delete"
}
test { SET of existing but flushed key works } {
r flushall
r set testkey foo
@ -99,6 +109,16 @@ start_server [list tags {flash} overrides [list storage-provider {flash ./rocks.
assert_equal $expectedDigest [r debug digest]
}
test { DELETE of flushed set member persists after another flush } {
r flushall
r sadd set1 val1 val2 val3
assert_equal {3} [r scard set1]
r flushall cache
r srem set1 val1
r flushall cache
assert_equal {2} [r scard set1]
}
r flushall
# If a weak storage memory model is set, wait for any pending snapshot writes to finish
after 500

View File

@ -62,57 +62,55 @@ start_server {overrides {save ""} tags {"other"}} {
} {*index is out of range*}
tags {consistency} {
if {true} {
if {$::accurate} {set numops 10000} else {set numops 1000}
test {Check consistency of different data types after a reload} {
r flushdb
createComplexDataset r $numops
set dump [csvdump r]
set sha1 [r debug digest]
r debug reload
set sha1_after [r debug digest]
if {$sha1 eq $sha1_after} {
set _ 1
} else {
set newdump [csvdump r]
puts "Consistency test failed!"
puts "You can inspect the two dumps in /tmp/repldump*.txt"
if {$::accurate} {set numops 10000} else {set numops 1000}
test {Check consistency of different data types after a reload} {
r flushdb
createComplexDataset r $numops
set dump [csvdump r]
set sha1 [r debug digest]
r debug reload
set sha1_after [r debug digest]
if {$sha1 eq $sha1_after} {
set _ 1
} else {
set newdump [csvdump r]
puts "Consistency test failed!"
puts "You can inspect the two dumps in /tmp/repldump*.txt"
set fd [open /tmp/repldump1.txt w]
puts $fd $dump
close $fd
set fd [open /tmp/repldump2.txt w]
puts $fd $newdump
close $fd
set fd [open /tmp/repldump1.txt w]
puts $fd $dump
close $fd
set fd [open /tmp/repldump2.txt w]
puts $fd $newdump
close $fd
set _ 0
}
} {1}
set _ 0
}
} {1}
test {Same dataset digest if saving/reloading as AOF?} {
r config set aof-use-rdb-preamble no
r bgrewriteaof
waitForBgrewriteaof r
r debug loadaof
set sha1_after [r debug digest]
if {$sha1 eq $sha1_after} {
set _ 1
} else {
set newdump [csvdump r]
puts "Consistency test failed!"
puts "You can inspect the two dumps in /tmp/aofdump*.txt"
test {Same dataset digest if saving/reloading as AOF?} {
r config set aof-use-rdb-preamble no
r bgrewriteaof
waitForBgrewriteaof r
r debug loadaof
set sha1_after [r debug digest]
if {$sha1 eq $sha1_after} {
set _ 1
} else {
set newdump [csvdump r]
puts "Consistency test failed!"
puts "You can inspect the two dumps in /tmp/aofdump*.txt"
set fd [open /tmp/aofdump1.txt w]
puts $fd $dump
close $fd
set fd [open /tmp/aofdump2.txt w]
puts $fd $newdump
close $fd
set fd [open /tmp/aofdump1.txt w]
puts $fd $dump
close $fd
set fd [open /tmp/aofdump2.txt w]
puts $fd $newdump
close $fd
set _ 0
}
} {1}
}
set _ 0
}
} {1}
}
test {EXPIRES after a reload (snapshot + append only file rewrite)} {

View File

@ -100,8 +100,8 @@ start_server {tags {"tls"}} {
set master_port [srv 0 port]
# Use a non-restricted client/server cert for the replica
set redis_crt [format "%s/tests/tls/redis.crt" [pwd]]
set redis_key [format "%s/tests/tls/redis.key" [pwd]]
set redis_crt [format "%s/tests/tls/keydb.crt" [pwd]]
set redis_key [format "%s/tests/tls/keydb.key" [pwd]]
start_server [list overrides [list tls-cert-file $redis_crt tls-key-file $redis_key] \
omit [list tls-client-cert-file tls-client-key-file]] {

40
utils/compare_config.sh Normal file
View File

@ -0,0 +1,40 @@
#! /bin/bash
if [[ "$1" == "--help" ]] || [[ "$1" == "-h" ]] || [[ "$#" -ne 2 ]] ; then
echo "This script is used to compare different KeyDB configuration files."
echo ""
echo " Usage: compare_config.sh [keydb1.conf] [keydb2.conf]"
echo ""
echo "Output: a side by side sorted list of all active parameters, followed by a summary of the differences."
exit 0
fi
conf_1=$(mktemp)
conf_2=$(mktemp)
echo "----------------------------------------------------"
echo "--- display all active parameters in config files---"
echo "----------------------------------------------------"
echo ""
echo "--- $1 ---" > $conf_1
echo "" >> $conf_1
grep -ve "^#" -ve "^$" $1 | sort >> $conf_1
echo "--- $2 ---" >> $conf_2
echo "" >> $conf_2
grep -ve "^#" -ve "^$" $2 | sort >> $conf_2
pr -T --merge $conf_1 $conf_2
echo ""
echo ""
echo "--------------------------------------------"
echo "--- display config file differences only ---"
echo "--------------------------------------------"
echo ""
sdiff --suppress-common-lines $conf_1 $conf_2
rm $conf_1
rm $conf_2
exit 0

View File

@ -3,10 +3,10 @@
# Generate some test certificates which are used by the regression test suite:
#
# tests/tls/ca.{crt,key} Self signed CA certificate.
# tests/tls/redis.{crt,key} A certificate with no key usage/policy restrictions.
# tests/tls/keydb.{crt,key} A certificate with no key usage/policy restrictions.
# tests/tls/client.{crt,key} A certificate restricted for SSL client usage.
# tests/tls/server.{crt,key} A certificate restricted fro SSL server usage.
# tests/tls/redis.dh DH Params file.
# tests/tls/keydb.dh DH Params file.
generate_cert() {
local name=$1
@ -19,7 +19,7 @@ generate_cert() {
[ -f $keyfile ] || openssl genrsa -out $keyfile 2048
openssl req \
-new -sha256 \
-subj "/O=Redis Test/CN=$cn" \
-subj "/O=KeyDB Test/CN=$cn" \
-key $keyfile | \
openssl x509 \
-req -sha256 \
@ -38,7 +38,7 @@ openssl req \
-x509 -new -nodes -sha256 \
-key tests/tls/ca.key \
-days 3650 \
-subj '/O=Redis Test/CN=Certificate Authority' \
-subj '/O=KeyDB Test/CN=Certificate Authority' \
-out tests/tls/ca.crt
cat > tests/tls/openssl.cnf <<_END_
@ -53,6 +53,6 @@ _END_
generate_cert server "Server-only" "-extfile tests/tls/openssl.cnf -extensions server_cert"
generate_cert client "Client-only" "-extfile tests/tls/openssl.cnf -extensions client_cert"
generate_cert redis "Generic-cert"
generate_cert keydb "Generic-cert"
[ -f tests/tls/redis.dh ] || openssl dhparam -out tests/tls/redis.dh 2048
[ -f tests/tls/keydb.dh ] || openssl dhparam -out tests/tls/keydb.dh 2048

View File

@ -2,7 +2,7 @@
# Copyright (C) 2011 Salvatore Sanfilippo
# Released under the BSD license like Redis itself
source ../tests/support/redis.tcl
source ../tests/support/keydb.tcl
set ::port 12123
set ::tests {PING,SET,GET,INCR,LPUSH,LPOP,SADD,SPOP,LRANGE_100,LRANGE_600,MSET}
set ::datasize 16