Merge branch 'unstable' into RELEASE_5

Former-commit-id: b643c3820485886d2a1911af7bc1cd8419e21f99
This commit is contained in:
John Sully 2020-01-06 12:07:11 -05:00
commit 0bc26f88bb
22 changed files with 483 additions and 178 deletions

3
.gitignore vendored
View File

@ -1,6 +1,7 @@
.*.swp
core
*.o
*.d
*.log
dump.rdb
redis-benchmark
@ -27,7 +28,7 @@ release.h
src/transfer.sh
src/configs
redis.ds
src/redis.conf
src/keydb.conf
src/nodes.conf
deps/lua/src/lua
deps/lua/src/luac

View File

@ -9,7 +9,7 @@ What is KeyDB?
KeyDB is a high performance fork of Redis with a focus on multithreading, memory efficiency, and high throughput. In addition to multithreading, KeyDB also has features only available in Redis Enterprise such as [Active Replication](https://github.com/JohnSully/KeyDB/wiki/Active-Replication), [FLASH storage](https://github.com/JohnSully/KeyDB/wiki/FLASH-Storage) support, and some not available at all such as direct backup to AWS S3.
KeyDB maintains full compatibility with the Redis protocol, modules, and scripts. This includes the atomicity gurantees for scripts and transactions. Because KeyDB keeps in sync with Redis development KeyDB is a superset of Redis functionality, making KeyDB a drop in replacement for existing Redis deployments.
KeyDB maintains full compatibility with the Redis protocol, modules, and scripts. This includes the atomicity guarantees for scripts and transactions. Because KeyDB keeps in sync with Redis development KeyDB is a superset of Redis functionality, making KeyDB a drop in replacement for existing Redis deployments.
On the same hardware KeyDB can perform twice as many queries per second as Redis, with 60% lower latency. Active-Replication simplifies hot-spare failover allowing you to easily distribute writes over replicas and use simple TCP based load balancing/failover. KeyDB's higher performance allows you to do more on less hardware which reduces operation costs and complexity.

View File

@ -1,9 +1,9 @@
# Redis configuration file example.
# KeyDB configuration file example.
#
# Note that in order to read the configuration file, Redis must be
# Note that in order to read the configuration file, KeyDB must be
# started with the file path as first argument:
#
# ./keydb-server /path/to/redis.conf
# ./keydb-server /path/to/keydb.conf
# Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth:
@ -20,12 +20,12 @@
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis servers but also need
# have a standard template that goes to all KeyDB servers but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
# from admin or Redis Sentinel. Since Redis always uses the last processed
# from admin or KeyDB Sentinel. Since KeyDB always uses the last processed
# line as value of a configuration directive, you'd better put includes
# at the beginning of this file to avoid overwriting config change at runtime.
#
@ -45,7 +45,7 @@
################################## NETWORK #####################################
# By default, if no "bind" configuration directive is specified, Redis listens
# By default, if no "bind" configuration directive is specified, KeyDB listens
# for connections from all the network interfaces available on the server.
# It is possible to listen to just one or multiple selected interfaces using
# the "bind" configuration directive, followed by one or more IP addresses.
@ -55,11 +55,11 @@
# bind 192.168.1.100 10.0.0.1
# bind 127.0.0.1 ::1
#
# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the
# ~~~ WARNING ~~~ If the computer running KeyDB is directly exposed to the
# internet, binding to all the interfaces is dangerous and will expose the
# instance to everybody on the internet. So by default we uncomment the
# following bind directive, that will force Redis to listen only into
# the IPv4 loopback interface address (this means Redis will be able to
# following bind directive, that will force KeyDB to listen only into
# the IPv4 loopback interface address (this means KeyDB will be able to
# accept connections only from clients running into the same computer it
# is running).
#
@ -69,7 +69,7 @@
bind 127.0.0.1
# Protected mode is a layer of security protection, in order to avoid that
# Redis instances left open on the internet are accessed and exploited.
# KeyDB instances left open on the internet are accessed and exploited.
#
# When protected mode is on and if:
#
@ -82,13 +82,13 @@ bind 127.0.0.1
# sockets.
#
# By default protected mode is enabled. You should disable it only if
# you are sure you want clients from other hosts to connect to Redis
# you are sure you want clients from other hosts to connect to KeyDB
# even if no authentication is configured, nor a specific set of interfaces
# are explicitly listed using the "bind" directive.
protected-mode yes
# Accept connections on the specified port, default is 6379 (IANA #815344).
# If port 0 is specified Redis will not listen on a TCP socket.
# If port 0 is specified KeyDB will not listen on a TCP socket.
port 6379
# TCP listen() backlog.
@ -103,10 +103,10 @@ tcp-backlog 511
# Unix socket.
#
# Specify the path for the Unix socket that will be used to listen for
# incoming connections. There is no default, so Redis will not listen
# incoming connections. There is no default, so KeyDB will not listen
# on a unix socket when not specified.
#
# unixsocket /tmp/redis.sock
# unixsocket /tmp/keydb.sock
# unixsocketperm 700
# Close the connection after a client is idle for N seconds (0 to disable)
@ -126,19 +126,19 @@ timeout 0
# On other kernels the period depends on the kernel configuration.
#
# A reasonable value for this option is 300 seconds, which is the new
# Redis default starting with Redis 3.2.1.
# KeyDB default starting with Redis 3.2.1.
tcp-keepalive 300
################################# GENERAL #####################################
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
# By default KeyDB does not run as a daemon. Use 'yes' if you need it.
# Note that KeyDB will write a pid file in /var/run/keydb.pid when daemonized.
daemonize no
# If you run Redis from upstart or systemd, Redis can interact with your
# If you run KeyDB from upstart or systemd, KeyDB can interact with your
# supervision tree. Options:
# supervised no - no supervision interaction
# supervised upstart - signal upstart by putting Redis into SIGSTOP mode
# supervised upstart - signal upstart by putting KeyDB into SIGSTOP mode
# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
# supervised auto - detect upstart or systemd method based on
# UPSTART_JOB or NOTIFY_SOCKET environment variables
@ -146,16 +146,16 @@ daemonize no
# They do not enable continuous liveness pings back to your supervisor.
supervised no
# If a pid file is specified, Redis writes it where specified at startup
# If a pid file is specified, KeyDB writes it where specified at startup
# and removes it at exit.
#
# When the server runs non daemonized, no pid file is created if none is
# specified in the configuration. When the server is daemonized, the pid file
# is used even if not specified, defaulting to "/var/run/redis.pid".
# is used even if not specified, defaulting to "/var/run/keydb.pid".
#
# Creating a pid file is best effort: if Redis is not able to create it
# Creating a pid file is best effort: if KeyDB is not able to create it
# nothing bad happens, the server will start and run normally.
pidfile /var/run/redis_6379.pid
pidfile /var/run/keydb_6379.pid
# Specify the server verbosity level.
# This can be one of:
@ -166,7 +166,7 @@ pidfile /var/run/redis_6379.pid
loglevel notice
# Specify the log file name. Also the empty string can be used to force
# Redis to log on the standard output. Note that if you use standard
# KeyDB to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile ""
@ -175,7 +175,7 @@ logfile ""
# syslog-enabled no
# Specify the syslog identity.
# syslog-ident redis
# syslog-ident keydb
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
# syslog-facility local0
@ -185,7 +185,7 @@ logfile ""
# dbid is a number between 0 and 'databases'-1
databases 16
# By default Redis shows an ASCII art logo only when started to log to the
# By default KeyDB shows an ASCII art logo only when started to log to the
# standard output and if the standard output is a TTY. Basically this means
# that normally a logo is displayed only in interactive sessions.
#
@ -219,17 +219,17 @@ save 900 1
save 300 10
save 60 10000
# By default Redis will stop accepting writes if RDB snapshots are enabled
# By default KeyDB will stop accepting writes if RDB snapshots are enabled
# (at least one save point) and the latest background save failed.
# This will make the user aware (in a hard way) that data is not persisting
# on disk properly, otherwise chances are that no one will notice and some
# disaster will happen.
#
# If the background saving process will start working again Redis will
# If the background saving process will start working again KeyDB will
# automatically allow writes again.
#
# However if you have setup your proper monitoring of the Redis server
# and persistence, you may want to disable this feature so that Redis will
# However if you have setup your proper monitoring of the KeyDB server
# and persistence, you may want to disable this feature so that KeyDB will
# continue to work as usual even if there are problems with disk,
# permissions, and so forth.
stop-writes-on-bgsave-error yes
@ -264,18 +264,18 @@ dir ./
################################# REPLICATION #################################
# Master-Replica replication. Use replicaof to make a Redis instance a copy of
# another Redis server. A few things to understand ASAP about Redis replication.
# Master-Replica replication. Use replicaof to make a KeyDB instance a copy of
# another KeyDB server. A few things to understand ASAP about KeyDB replication.
#
# +------------------+ +---------------+
# | Master | ---> | Replica |
# | (receive writes) | | (exact copy) |
# +------------------+ +---------------+
#
# 1) Redis replication is asynchronous, but you can configure a master to
# 1) KeyDB replication is asynchronous, but you can configure a master to
# stop accepting writes if it appears to be not connected with at least
# a given number of replicas.
# 2) Redis replicas are able to perform a partial resynchronization with the
# 2) KeyDB replicas are able to perform a partial resynchronization with the
# master if the replication link is lost for a relatively small amount of
# time. You may want to configure the replication backlog size (see the next
# sections of this file) with a sensible value depending on your needs.
@ -292,7 +292,7 @@ dir ./
#
# masterauth <master-password>
#
# However this is not enough if you are using Redis ACLs (for Redis version
# However this is not enough if you are using KeyDB ACLs (for Redis version
# 6 or greater), and the default user is not capable of running the PSYNC
# command and/or other commands needed for replication. In this case it's
# better to configure a special user to use with replication, and specify the
@ -345,10 +345,10 @@ replica-read-only yes
# synchronization". An RDB file is transmitted from the master to the replicas.
# The transmission can happen in two different ways:
#
# 1) Disk-backed: The Redis master creates a new process that writes the RDB
# 1) Disk-backed: The KeyDB master creates a new process that writes the RDB
# file on disk. Later the file is transferred by the parent
# process to the replicas incrementally.
# 2) Diskless: The Redis master creates a new process that directly writes the
# 2) Diskless: The KeyDB master creates a new process that directly writes the
# RDB file to replica sockets, without touching the disk at all.
#
# With disk-backed replication, while the RDB file is generated, more replicas
@ -397,7 +397,7 @@ repl-diskless-sync-delay 5
# Disable TCP_NODELAY on the replica socket after SYNC?
#
# If you select "yes" Redis will use a smaller number of TCP packets and
# If you select "yes" KeyDB will use a smaller number of TCP packets and
# less bandwidth to send data to replicas. But this can add a delay for
# the data to appear on the replica side, up to 40 milliseconds with
# Linux kernels using a default configuration.
@ -436,8 +436,8 @@ repl-disable-tcp-nodelay no
#
# repl-backlog-ttl 3600
# The replica priority is an integer number published by Redis in the INFO output.
# It is used by Redis Sentinel in order to select a replica to promote into a
# The replica priority is an integer number published by KeyDB in the INFO output.
# It is used by KeyDB Sentinel in order to select a replica to promote into a
# master if the master is no longer working correctly.
#
# A replica with a low priority number is considered better for promotion, so
@ -446,7 +446,7 @@ repl-disable-tcp-nodelay no
#
# However a special priority of 0 marks the replica as not able to perform the
# role of master, so a replica with priority of 0 will never be selected by
# Redis Sentinel for promotion.
# KeyDB Sentinel for promotion.
#
# By default the priority is 100.
replica-priority 100
@ -473,10 +473,10 @@ replica-priority 100
# By default min-replicas-to-write is set to 0 (feature disabled) and
# min-replicas-max-lag is set to 10.
# A Redis master is able to list the address and port of the attached
# A KeyDB master is able to list the address and port of the attached
# replicas in different ways. For example the "INFO replication" section
# offers this information, which is used, among other tools, by
# Redis Sentinel in order to discover replica instances.
# KeyDB Sentinel in order to discover replica instances.
# Another place where this info is available is in the output of the
# "ROLE" command of a master.
#
@ -504,7 +504,7 @@ replica-priority 100
################################## SECURITY ###################################
# Warning: since Redis is pretty fast an outside user can try up to
# Warning: since KeyDB is pretty fast an outside user can try up to
# 1 million passwords per second against a modern box. This means that you
# should use very strong passwords, otherwise they will be very easy to break.
# Note that because the password is really a shared secret between the client
@ -512,7 +512,7 @@ replica-priority 100
# can be easily a long string from /dev/urandom or whatever, so by using a
# long and unguessable password no brute force attack will be possible.
# Redis ACL users are defined in the following format:
# KeyDB ACL users are defined in the following format:
#
# user <username> ... acl rules ...
#
@ -539,7 +539,7 @@ replica-priority 100
# +@<category> Allow the execution of all the commands in such category
# with valid categories are like @admin, @set, @sortedset, ...
# and so forth, see the full list in the server.c file where
# the Redis command table is described and defined.
# the KeyDB command table is described and defined.
# The special category @all means all the commands, but currently
# present in the server, and that will be loaded in the future
# via modules.
@ -606,9 +606,9 @@ replica-priority 100
# ACL file, the server will refuse to start.
#
# The format of the external ACL user file is exactly the same as the
# format that is used inside redis.conf to describe users.
# format that is used inside keydb.conf to describe users.
#
# aclfile /etc/redis/users.acl
# aclfile /etc/keydb/users.acl
# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatiblity
# layer on top of the new ACL system. The option effect will be just setting
@ -646,12 +646,12 @@ replica-priority 100
################################### CLIENTS ####################################
# Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the Redis server is not
# this limit is set to 10000 clients, however if the KeyDB server is not
# able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit
# minus 32 (as Redis reserves a few file descriptors for internal uses).
# minus 32 (as KeyDB reserves a few file descriptors for internal uses).
#
# Once the limit is reached Redis will close all the new connections sending
# Once the limit is reached KeyDB will close all the new connections sending
# an error 'max number of clients reached'.
#
# maxclients 10000
@ -659,15 +659,15 @@ replica-priority 100
############################## MEMORY MANAGEMENT ################################
# Set a memory usage limit to the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# When the memory limit is reached KeyDB will try to remove keys
# according to the eviction policy selected (see maxmemory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
# If KeyDB can't remove keys according to the policy, or if the policy is
# set to 'noeviction', KeyDB will start to reply with errors to commands
# that would use more memory, like SET, LPUSH, and so on, and will continue
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU or LFU cache, or to
# This option is usually useful when using KeyDB as an LRU or LFU cache, or to
# set a hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have replicas attached to an instance with maxmemory on,
@ -683,7 +683,7 @@ replica-priority 100
#
# maxmemory <bytes>
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# MAXMEMORY POLICY: how KeyDB will select what to remove when maxmemory
# is reached. You can select among five behaviors:
#
# volatile-lru -> Evict using approximated LRU among the keys with an expire set.
@ -701,7 +701,7 @@ replica-priority 100
# Both LRU, LFU and volatile-ttl are implemented using approximated
# randomized algorithms.
#
# Note: with any of the above policies, Redis will return an error on write
# Note: with any of the above policies, KeyDB will return an error on write
# operations, when there are no suitable keys for eviction.
#
# At the date of writing these commands are: set setnx setex append
@ -716,7 +716,7 @@ replica-priority 100
# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can tune it for speed or
# accuracy. For default Redis will check five keys and pick the one that was
# accuracy. For default KeyDB will check five keys and pick the one that was
# used less recently, you can change the sample size using the following
# configuration directive.
#
@ -747,16 +747,16 @@ replica-priority 100
############################# LAZY FREEING ####################################
# Redis has two primitives to delete keys. One is called DEL and is a blocking
# KeyDB has two primitives to delete keys. One is called DEL and is a blocking
# deletion of the object. It means that the server stops processing new commands
# in order to reclaim all the memory associated with an object in a synchronous
# way. If the key deleted is associated with a small object, the time needed
# in order to execute the DEL command is very small and comparable to most other
# O(1) or O(log_N) commands in Redis. However if the key is associated with an
# O(1) or O(log_N) commands in KeyDB. However if the key is associated with an
# aggregated value containing millions of elements, the server can block for
# a long time (even seconds) in order to complete the operation.
#
# For the above reasons Redis also offers non blocking deletion primitives
# For the above reasons KeyDB also offers non blocking deletion primitives
# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and
# FLUSHDB commands, in order to reclaim memory in background. Those commands
# are executed in constant time. Another thread will incrementally free the
@ -764,9 +764,9 @@ replica-priority 100
#
# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled.
# It's up to the design of the application to understand when it is a good
# idea to use one or the other. However the Redis server sometimes has to
# idea to use one or the other. However the KeyDB server sometimes has to
# delete keys or flush the whole database as a side effect of other operations.
# Specifically Redis deletes objects independently of a user call in the
# Specifically KeyDB deletes objects independently of a user call in the
# following scenarios:
#
# 1) On eviction, because of the maxmemory and maxmemory policy configurations,
@ -796,20 +796,20 @@ replica-lazy-flush no
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. This mode is
# good enough in many applications, but an issue with the Redis process or
# By default KeyDB asynchronously dumps the dataset on disk. This mode is
# good enough in many applications, but an issue with the KeyDB process or
# a power outage may result into a few minutes of writes lost (depending on
# the configured save points).
#
# The Append Only File is an alternative persistence mode that provides
# much better durability. For instance using the default data fsync policy
# (see later in the config file) Redis can lose just one second of writes in a
# (see later in the config file) KeyDB can lose just one second of writes in a
# dramatic event like a server power outage, or a single write if something
# wrong with the Redis process itself happens, but the operating system is
# wrong with the KeyDB process itself happens, but the operating system is
# still running correctly.
#
# AOF and RDB persistence can be enabled at the same time without problems.
# If the AOF is enabled on startup Redis will load the AOF, that is the file
# If the AOF is enabled on startup KeyDB will load the AOF, that is the file
# with the better durability guarantees.
#
# Please check http://redis.io/topics/persistence for more information.
@ -824,7 +824,7 @@ appendfilename "appendonly.aof"
# instead of waiting for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
# KeyDB supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log. Slow, Safest.
@ -850,7 +850,7 @@ appendfsync everysec
# When the AOF fsync policy is set to always or everysec, and a background
# saving process (a background save or AOF log background rewriting) is
# performing a lot of I/O against the disk, in some Linux configurations
# Redis may block too long on the fsync() call. Note that there is no fix for
# KeyDB may block too long on the fsync() call. Note that there is no fix for
# this currently, as even performing fsync in a different thread will block
# our synchronous write(2) call.
#
@ -858,7 +858,7 @@ appendfsync everysec
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving, the durability of Redis is
# This means that while another child is saving, the durability of KeyDB is
# the same as "appendfsync none". In practical terms, this means that it is
# possible to lose up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
@ -869,10 +869,10 @@ appendfsync everysec
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# KeyDB is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# This is how it works: KeyDB remembers the size of the AOF file after the
# latest rewrite (if no rewrite has happened since the restart, the size of
# the AOF at startup is used).
#
@ -888,19 +888,19 @@ no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
# An AOF file may be found to be truncated at the end during the Redis
# An AOF file may be found to be truncated at the end during the KeyDB
# startup process, when the AOF data gets loaded back into memory.
# This may happen when the system where Redis is running
# This may happen when the system where KeyDB is running
# crashes, especially when an ext4 filesystem is mounted without the
# data=ordered option (however this can't happen when Redis itself
# data=ordered option (however this can't happen when KeyDB itself
# crashes or aborts but the operating system still works correctly).
#
# Redis can either exit with an error when this happens, or load as much
# KeyDB can either exit with an error when this happens, or load as much
# data as possible (the default now) and start if the AOF file is found
# to be truncated at the end. The following option controls this behavior.
#
# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
# the Redis server starts emitting a log to inform the user of the event.
# the KeyDB server starts emitting a log to inform the user of the event.
# Otherwise if the option is set to no, the server aborts with an error
# and refuses to start. When the option is set to no, the user requires
# to fix the AOF file using the "keydb-check-aof" utility before to restart
@ -908,17 +908,17 @@ auto-aof-rewrite-min-size 64mb
#
# Note that if the AOF file will be found to be corrupted in the middle
# the server will still exit with an error. This option only applies when
# Redis will try to read more data from the AOF file but not enough bytes
# KeyDB will try to read more data from the AOF file but not enough bytes
# will be found.
aof-load-truncated yes
# When rewriting the AOF file, Redis is able to use an RDB preamble in the
# When rewriting the AOF file, KeyDB is able to use an RDB preamble in the
# AOF file for faster rewrites and recoveries. When this option is turned
# on the rewritten AOF file is composed of two different stanzas:
#
# [RDB file][AOF tail]
#
# When loading Redis recognizes that the AOF file starts with the "REDIS"
# When loading KeyDB recognizes that the AOF file starts with the "REDIS"
# string and loads the prefixed RDB file, and continues loading the AOF
# tail.
aof-use-rdb-preamble yes
@ -927,7 +927,7 @@ aof-use-rdb-preamble yes
# Max execution time of a Lua script in milliseconds.
#
# If the maximum execution time is reached Redis will log that a script is
# If the maximum execution time is reached KeyDB will log that a script is
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
@ -941,17 +941,17 @@ aof-use-rdb-preamble yes
# Set it to 0 or a negative value for unlimited execution without warnings.
lua-time-limit 5000
################################ REDIS CLUSTER ###############################
################################ KEYDB CLUSTER ###############################
# Normal Redis instances can't be part of a Redis Cluster; only nodes that are
# started as cluster nodes can. In order to start a Redis instance as a
# Normal KeyDB instances can't be part of a KeyDB Cluster; only nodes that are
# started as cluster nodes can. In order to start a KeyDB instance as a
# cluster node enable the cluster support uncommenting the following:
#
# cluster-enabled yes
# Every cluster node has a cluster configuration file. This file is not
# intended to be edited by hand. It is created and updated by Redis nodes.
# Every Redis Cluster node requires a different cluster configuration file.
# intended to be edited by hand. It is created and updated by KeyDB nodes.
# Every KeyDB Cluster node requires a different cluster configuration file.
# Make sure that instances running in the same system do not have
# overlapping cluster configuration file names.
#
@ -1027,7 +1027,7 @@ lua-time-limit 5000
#
# cluster-migration-barrier 1
# By default Redis Cluster nodes stop accepting queries if they detect there
# By default KeyDB Cluster nodes stop accepting queries if they detect there
# is at least an hash slot uncovered (no available node is serving it).
# This way if the cluster is partially down (for example a range of hash slots
# are no longer covered) all the cluster becomes, eventually, unavailable.
@ -1055,11 +1055,11 @@ lua-time-limit 5000
########################## CLUSTER DOCKER/NAT support ########################
# In certain deployments, Redis Cluster nodes address discovery fails, because
# In certain deployments, KeyDB Cluster nodes address discovery fails, because
# addresses are NAT-ted or because ports are forwarded (the typical case is
# Docker and other containers).
#
# In order to make Redis Cluster working in such environments, a static
# In order to make KeyDB Cluster working in such environments, a static
# configuration where each node knows its public address is needed. The
# following two options are used for this scope, and are:
#
@ -1072,7 +1072,7 @@ lua-time-limit 5000
# so that other nodes will be able to correctly map the address of the node
# publishing the information.
#
# If the above options are not used, the normal Redis Cluster auto-detection
# If the above options are not used, the normal KeyDB Cluster auto-detection
# will be used instead.
#
# Note that when remapped, the bus port may not be at the fixed offset of
@ -1088,14 +1088,14 @@ lua-time-limit 5000
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
# The KeyDB Slow Log is a system to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
#
# You can configure the slow log with two parameters: one tells Redis
# You can configure the slow log with two parameters: one tells KeyDB
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
# slow log. When a new command is logged the oldest one is removed from the
@ -1112,9 +1112,9 @@ slowlog-max-len 128
################################ LATENCY MONITOR ##############################
# The Redis latency monitoring subsystem samples different operations
# The KeyDB latency monitoring subsystem samples different operations
# at runtime in order to collect data related to possible sources of
# latency of a Redis instance.
# latency of a KeyDB instance.
#
# Via the LATENCY command this information is available to the user that can
# print graphs and obtain reports.
@ -1133,7 +1133,7 @@ latency-monitor-threshold 0
############################# EVENT NOTIFICATION ##############################
# Redis can notify Pub/Sub clients about events happening in the key space.
# KeyDB can notify Pub/Sub clients about events happening in the key space.
# This feature is documented at http://redis.io/topics/notifications
#
# For instance if keyspace events notification is enabled, and a client
@ -1143,7 +1143,7 @@ latency-monitor-threshold 0
# PUBLISH __keyspace@0__:foo del
# PUBLISH __keyevent@0__:del foo
#
# It is possible to select the events that Redis will notify among a set
# It is possible to select the events that KeyDB will notify among a set
# of classes. Every class is identified by a single character:
#
# K Keyspace events, published with __keyspace@<db>__ prefix.
@ -1179,12 +1179,12 @@ notify-keyspace-events ""
############################### GOPHER SERVER #################################
# Redis contains an implementation of the Gopher protocol, as specified in
# KeyDB contains an implementation of the Gopher protocol, as specified in
# the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt).
#
# The Gopher protocol was very popular in the late '90s. It is an alternative
# to the web, and the implementation both server and client side is so simple
# that the Redis server has just 100 lines of code in order to implement this
# that the KeyDB server has just 100 lines of code in order to implement this
# support.
#
# What do you do with Gopher nowadays? Well Gopher never *really* died, and
@ -1194,18 +1194,18 @@ notify-keyspace-events ""
# controlled, and it's cool to create an alternative space for people that
# want a bit of fresh air.
#
# Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol
# Anyway for the 10nth birthday of the KeyDB, we gave it the Gopher protocol
# as a gift.
#
# --- HOW IT WORKS? ---
#
# The Redis Gopher support uses the inline protocol of Redis, and specifically
# The KeyDB Gopher support uses the inline protocol of KeyDB, and specifically
# two kind of inline requests that were anyway illegal: an empty request
# or any request that starts with "/" (there are no Redis commands starting
# or any request that starts with "/" (there are no KeyDB commands starting
# with such a slash). Normal RESP2/RESP3 requests are completely out of the
# path of the Gopher protocol implementation and are served as usually as well.
#
# If you open a connection to Redis when Gopher is enabled and send it
# If you open a connection to KeyDB when Gopher is enabled and send it
# a string like "/foo", if there is a key named "/foo" it is served via the
# Gopher protocol.
#
@ -1216,7 +1216,7 @@ notify-keyspace-events ""
#
# --- SECURITY WARNING ---
#
# If you plan to put Redis on the internet in a publicly accessible address
# If you plan to put KeyDB on the internet in a publicly accessible address
# to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance.
# Once a password is set:
#
@ -1310,8 +1310,8 @@ stream-node-max-bytes 4096
stream-node-max-entries 100
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
# order to help rehashing the main KeyDB hash table (the one mapping top-level
# keys to values). The hash table implementation KeyDB uses (see dict.c)
# performs a lazy rehashing: the more operation you run into a hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
@ -1322,7 +1322,7 @@ stream-node-max-entries 100
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply from time to time
# not a good thing in your environment that KeyDB can reply from time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
@ -1374,21 +1374,21 @@ client-output-buffer-limit pubsub 32mb 8mb 60
#
# client-query-buffer-limit 1gb
# In the Redis protocol, bulk requests, that are, elements representing single
# In the KeyDB protocol, bulk requests, that are, elements representing single
# strings, are normally limited ot 512 mb. However you can change this limit
# here.
#
# proto-max-bulk-len 512mb
# Redis calls an internal function to perform many background tasks, like
# KeyDB calls an internal function to perform many background tasks, like
# closing connections of clients in timeout, purging expired keys that are
# never requested, and so forth.
#
# Not all tasks are performed with the same frequency, but Redis checks for
# Not all tasks are performed with the same frequency, but KeyDB checks for
# tasks to perform according to the specified "hz" value.
#
# By default "hz" is set to 10. Raising the value will use more CPU when
# Redis is idle, but at the same time will make Redis more responsive when
# KeyDB is idle, but at the same time will make KeyDB more responsive when
# there are many keys expiring at the same time, and timeouts may be
# handled with more precision.
#
@ -1402,7 +1402,7 @@ hz 10
# avoid too many clients are processed for each background task invocation
# in order to avoid latency spikes.
#
# Since the default HZ value by default is conservatively set to 10, Redis
# Since the default HZ value by default is conservatively set to 10, KeyDB
# offers, and enables by default, the ability to use an adaptive HZ value
# which will temporary raise when there are many connected clients.
#
@ -1419,22 +1419,22 @@ dynamic-hz yes
# big latency spikes.
aof-rewrite-incremental-fsync yes
# When redis saves RDB file, if the following option is enabled
# When KeyDB saves RDB file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
rdb-save-incremental-fsync yes
# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good
# KeyDB LFU eviction (see maxmemory setting) can be tuned. However it is a good
# idea to start with the default settings and only change them after investigating
# how to improve the performances and how the keys LFU change over time, which
# is possible to inspect via the OBJECT FREQ command.
#
# There are two tunable parameters in the Redis LFU implementation: the
# There are two tunable parameters in the KeyDB LFU implementation: the
# counter logarithm factor and the counter decay time. It is important to
# understand what the two parameters mean before changing them.
#
# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis
# The LFU counter is just 8 bits per key, it's maximum value is 255, so KeyDB
# uses a probabilistic increment with logarithmic behavior. Given the value
# of the old counter, when a key is accessed, the counter is incremented in
# this way:
@ -1486,7 +1486,7 @@ rdb-save-incremental-fsync yes
# What is active defragmentation?
# -------------------------------
#
# Active (online) defragmentation allows a Redis server to compact the
# Active (online) defragmentation allows a KeyDB server to compact the
# spaces left between small allocations and deallocations of data in memory,
# thus allowing to reclaim back memory.
#
@ -1498,7 +1498,7 @@ rdb-save-incremental-fsync yes
# in an "hot" way, while the server is running.
#
# Basically when the fragmentation is over a certain level (see the
# configuration options below) Redis will start to create new copies of the
# configuration options below) KeyDB will start to create new copies of the
# values in contiguous memory regions by exploiting certain specific Jemalloc
# features (in order to understand if an allocation is causing fragmentation
# and to allocate it in a better place), and at the same time, will release the
@ -1507,8 +1507,8 @@ rdb-save-incremental-fsync yes
#
# Important things to understand:
#
# 1. This feature is disabled by default, and only works if you compiled Redis
# to use the copy of Jemalloc we ship with the source code of Redis.
# 1. This feature is disabled by default, and only works if you compiled KeyDB
# to use the copy of Jemalloc we ship with the source code of KeyDB.
# This is the default with Linux builds.
#
# 2. You never need to enable this feature if you don't have fragmentation
@ -1561,3 +1561,10 @@ server-threads 2
# replicas will still sync in the normal way and incorrect ordering when
# bringing up replicas can result in data loss (the first master will win).
# active-replica yes
# Enable Pro? KeyDB pro provides support for pro only features
# note: you may omit the license key to demo pro features for a limited time
# enable-pro [License Key]
# Enable FLASH support? (Pro Only)
# storage-provider flash /path/to/flash/db

View File

@ -74,9 +74,11 @@ endif
# To get ARM stack traces if Redis crashes we need a special C flag.
ifneq (,$(filter aarch64 armv,$(uname_M)))
CFLAGS+=-funwind-tables
CXXFLAGS+=-funwind-tables
else
ifneq (,$(findstring armv,$(uname_M)))
CFLAGS+=-funwind-tables
CXXFLAGS+=-funwind-tables
endif
endif
@ -101,7 +103,7 @@ endif
-include .make-settings
FINAL_CFLAGS=$(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(REDIS_CFLAGS)
FINAL_CXXFLAGS=$(CXX_STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(CXXFLAGS) $(REDIS_CFLAGS)
FINAL_CXXFLAGS=$(CXX_STD) $(WARN) $(OPT) $(DEBUG) $(CXXFLAGS) $(REDIS_CFLAGS)
FINAL_LDFLAGS=$(LDFLAGS) $(REDIS_LDFLAGS) $(DEBUG)
FINAL_LIBS=-lm
DEBUG=-g -ggdb
@ -110,13 +112,15 @@ ifeq ($(uname_S),SunOS)
# SunOS
ifneq ($(@@),32bit)
CFLAGS+= -m64
CXXFLAGS+= -m64
LDFLAGS+= -m64
endif
DEBUG=-g
DEBUG_FLAGS=-g
export CFLAGS LDFLAGS DEBUG DEBUG_FLAGS
export CFLAGS CXXFLAGS LDFLAGS DEBUG DEBUG_FLAGS
INSTALL=cp -pf
FINAL_CFLAGS+= -D__EXTENSIONS__ -D_XPG6
FINAL_CXXFLAGS+= -D__EXTENSIONS__ -D_XPG6
FINAL_LIBS+= -ldl -lnsl -lsocket -lresolv -lpthread -lrt
else
ifeq ($(uname_S),Darwin)
@ -133,6 +137,7 @@ ifeq ($(uname_S),OpenBSD)
FINAL_LIBS+= -lpthread
ifeq ($(USE_BACKTRACE),yes)
FINAL_CFLAGS+= -DUSE_BACKTRACE -I/usr/local/include
FINAL_CXXFLAGS+= -DUSE_BACKTRACE -I/usr/local/include
FINAL_LDFLAGS+= -L/usr/local/lib
FINAL_LIBS+= -lexecinfo
endif
@ -150,6 +155,7 @@ else
FINAL_LDFLAGS+= -rdynamic
FINAL_LIBS+=-ldl -pthread -lrt -luuid
FINAL_CFLAGS += -DMOTD
FINAL_CXXFLAGS += -DMOTD
endif
endif
endif
@ -187,7 +193,7 @@ ifeq ($(MALLOC),memkind)
endif
REDIS_CC=$(QUIET_CC)$(CC) $(FINAL_CFLAGS)
REDIS_CXX=$(QUIET_CC)$(CC) $(FINAL_CXXFLAGS)
REDIS_CXX=$(QUIET_CC)$(CXX) $(FINAL_CXXFLAGS)
KEYDB_AS=$(QUIET_CC) as --64 -g
REDIS_LD=$(QUIET_LINK)$(CXX) $(FINAL_LDFLAGS)
REDIS_INSTALL=$(QUIET_INSTALL)$(INSTALL)
@ -235,10 +241,13 @@ persist-settings: distclean
echo OPT=$(OPT) >> .make-settings
echo MALLOC=$(MALLOC) >> .make-settings
echo CFLAGS=$(CFLAGS) >> .make-settings
echo CXXFLAGS=$(CXXFLAGS) >> .make-settings
echo LDFLAGS=$(LDFLAGS) >> .make-settings
echo REDIS_CFLAGS=$(REDIS_CFLAGS) >> .make-settings
echo REDIS_CXXFLAGS=$(REDIS_CXXFLAGS) >> .make-settings
echo REDIS_LDFLAGS=$(REDIS_LDFLAGS) >> .make-settings
echo PREV_FINAL_CFLAGS=$(FINAL_CFLAGS) >> .make-settings
echo PREV_FINAL_CXXFLAGS=$(FINAL_CXXFLAGS) >> .make-settings
echo PREV_FINAL_LDFLAGS=$(FINAL_LDFLAGS) >> .make-settings
-(cd modules && $(MAKE))
-(cd ../deps && $(MAKE) $(DEPENDENCY_TARGETS))
@ -249,6 +258,8 @@ persist-settings: distclean
# Clean everything, persist settings and build dependencies if anything changed
ifneq ($(strip $(PREV_FINAL_CFLAGS)), $(strip $(FINAL_CFLAGS)))
.make-prerequisites: persist-settings
else ifneq ($(strip $(PREV_FINAL_CXXFLAGS)), $(strip $(FINAL_CXXFLAGS)))
.make-prerequisites: persist-settings
else ifneq ($(strip $(PREV_FINAL_LDFLAGS)), $(strip $(FINAL_LDFLAGS)))
.make-prerequisites: persist-settings
else
@ -283,20 +294,25 @@ $(REDIS_BENCHMARK_NAME): $(REDIS_BENCHMARK_OBJ)
dict-benchmark: dict.cpp zmalloc.cpp sds.c siphash.c
$(REDIS_CC) $(FINAL_CFLAGS) $^ -D DICT_BENCHMARK_MAIN -o $@ $(FINAL_LIBS)
DEP = $(REDIS_SERVER_OBJ:%.o=%.d) $(REDIS_CLI_OBJ:%.o=%.d) $(REDIS_BENCHMARK_OBJ:%.o=%.d)
-include $(DEP)
# Because the jemalloc.h header is generated as a part of the jemalloc build,
# building it should complete before building any other object. Instead of
# depending on a single artifact, build all dependencies first.
%.o: %.c .make-prerequisites
$(REDIS_CC) -c $<
$(REDIS_CC) -MMD -c $<
%.o: %.cpp .make-prerequisites
$(REDIS_CXX) -c $<
$(REDIS_CXX) -MMD -c $<
%.o: %.asm .make-prerequisites
$(KEYDB_AS) $< -o $@
clean:
rm -rf $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) *.o *.gcda *.gcno *.gcov redis.info lcov-html Makefile.dep dict-benchmark
rm -f $(DEP)
.PHONY: clean
@ -334,7 +350,7 @@ bench: $(REDIS_BENCHMARK_NAME)
@echo ""
@echo "WARNING: if it fails under Linux you probably need to install libc6-dev-i386"
@echo ""
$(MAKE) CFLAGS="-m32" LDFLAGS="-m32"
$(MAKE) CXXFLAGS="-m32" CFLAGS="-m32" LDFLAGS="-m32"
gcov:
$(MAKE) REDIS_CFLAGS="-fprofile-arcs -ftest-coverage -DCOVERAGE_TEST" REDIS_LDFLAGS="-fprofile-arcs -ftest-coverage"

View File

@ -1123,7 +1123,7 @@ int ACLLoadConfiguredUsers(void) {
/* This function loads the ACL from the specified filename: every line
* is validated and should be either empty or in the format used to specify
* users in the redis.conf configuration or in the ACL file, that is:
* users in the keydb.conf configuration or in the ACL file, that is:
*
* user <username> ... rules ...
*
@ -1347,17 +1347,17 @@ cleanup:
/* This function is called once the server is already running, modules are
* loaded, and we are ready to start, in order to load the ACLs either from
* the pending list of users defined in redis.conf, or from the ACL file.
* the pending list of users defined in keydb.conf, or from the ACL file.
* The function will just exit with an error if the user is trying to mix
* both the loading methods. */
void ACLLoadUsersAtStartup(void) {
if (g_pserver->acl_filename[0] != '\0' && listLength(UsersToLoad) != 0) {
serverLog(LL_WARNING,
"Configuring Redis with users defined in redis.conf and at "
"Configuring KeyDB with users defined in keydb.conf and at "
"the same setting an ACL file path is invalid. This setup "
"is very likely to lead to configuration errors and security "
"holes, please define either an ACL file or declare users "
"directly in your redis.conf, but not both.");
"directly in your keydb.conf, but not both.");
exit(1);
}

View File

@ -84,7 +84,7 @@ fastlock g_lock("AE (global)");
#endif
thread_local aeEventLoop *g_eventLoopThisThread = NULL;
#define AE_ASSERT(x) if (!(x)) do { fprintf(stderr, "AE_ASSERT FAILURE %s: %d\n", __FILE__, __LINE__); *((volatile int*)0) = 1; } while(0)
#define AE_ASSERT(x) if (!(x)) do { fprintf(stderr, "AE_ASSERT FAILURE %s: %d\n", __FILE__, __LINE__); *((volatile int*)1) = 1; } while(0)
/* Include the best multiplexing layer supported by this system.
* The following should be ordered by performances, descending. */
@ -237,11 +237,11 @@ int aeCreateRemoteFileEvent(aeEventLoop *eventLoop, int fd, int mask,
cmd.clientData = clientData;
cmd.pctl = nullptr;
if (fSynchronous)
{
cmd.pctl = new (MALLOC_LOCAL) aeCommandControl();
std::unique_lock<std::mutex> ulock(cmd.pctl->mutexcv, std::defer_lock);
if (fSynchronous)
cmd.pctl->mutexcv.lock();
}
auto size = safe_write(eventLoop->fdCmdWrite, &cmd, sizeof(cmd));
if (size != sizeof(cmd))
{
@ -252,6 +252,7 @@ int aeCreateRemoteFileEvent(aeEventLoop *eventLoop, int fd, int mask,
if (fSynchronous)
{
std::unique_lock<std::mutex> ulock(cmd.pctl->mutexcv, std::defer_lock);
cmd.pctl->cv.wait(ulock);
ret = cmd.pctl->rval;
delete cmd.pctl;
@ -289,15 +290,17 @@ int aePostFunction(aeEventLoop *eventLoop, std::function<void()> fn, bool fSynch
cmd.pfn = new (MALLOC_LOCAL) std::function<void()>(fn);
cmd.pctl = nullptr;
if (fSynchronous)
{
cmd.pctl = new (MALLOC_LOCAL) aeCommandControl();
std::unique_lock<std::mutex> ulock(cmd.pctl->mutexcv, std::defer_lock);
if (fSynchronous)
cmd.pctl->mutexcv.lock();
}
auto size = write(eventLoop->fdCmdWrite, &cmd, sizeof(cmd));
AE_ASSERT(size == sizeof(cmd));
int ret = AE_OK;
if (fSynchronous)
{
std::unique_lock<std::mutex> ulock(cmd.pctl->mutexcv, std::defer_lock);
cmd.pctl->cv.wait(ulock);
ret = cmd.pctl->rval;
delete cmd.pctl;

View File

@ -165,7 +165,10 @@ void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
/* Install a file event to send data to the rewrite child if there is
* not one already. */
aeCreateRemoteFileEvent(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el, g_pserver->aof_pipe_write_data_to_child, AE_WRITABLE, aofChildWriteDiffData, NULL, FALSE);
aePostFunction(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el, []{
if (g_pserver->aof_pipe_write_data_to_child >= 0)
aeCreateFileEvent(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el, g_pserver->aof_pipe_write_data_to_child, AE_WRITABLE, aofChildWriteDiffData, NULL);
});
}
/* Write the buffer (possibly composed of multiple blocks) into the specified
@ -1566,6 +1569,7 @@ void aofClosePipes(void) {
aeDeleteFileEventAsync(serverTL->el,fdAofWritePipe,AE_WRITABLE);
close(fdAofWritePipe);
});
g_pserver->aof_pipe_write_data_to_child = -1;
close(g_pserver->aof_pipe_read_data_from_parent);
close(g_pserver->aof_pipe_write_ack_to_parent);

View File

@ -802,6 +802,8 @@ void loadServerConfigFromString(char *config) {
g_fTestMode = yesnotoi(argv[1]);
} else if (!strcasecmp(argv[0],"rdbfuzz-mode")) {
// NOP, handled in main
} else if (!strcasecmp(argv[0],"enable-pro")) {
cserver.fUsePro = true;
} else {
err = "Bad directive or wrong number of arguments"; goto loaderr;
}
@ -1727,7 +1729,7 @@ void rewriteConfigRewriteLine(struct rewriteConfigState *state, const char *opti
}
/* Write the long long 'bytes' value as a string in a way that is parsable
* inside redis.conf. If possible uses the GB, MB, KB notation. */
* inside keydb.conf. If possible uses the GB, MB, KB notation. */
int rewriteConfigFormatMemory(char *buf, size_t len, long long bytes) {
int gb = 1024*1024*1024;
int mb = 1024*1024;
@ -1890,7 +1892,7 @@ void rewriteConfigDirOption(struct rewriteConfigState *state) {
void rewriteConfigSlaveofOption(struct rewriteConfigState *state, const char *option) {
/* If this is a master, we want all the slaveof config options
* in the file to be removed. Note that if this is a cluster instance
* we don't want a slaveof directive inside redis.conf. */
* we don't want a slaveof directive inside keydb.conf. */
if (g_pserver->cluster_enabled || listLength(g_pserver->masters) == 0) {
rewriteConfigMarkAsProcessed(state,option);
return;

View File

@ -48,7 +48,7 @@ extern "C" int je_get_defrag_hint(void* ptr, int *bin_util, int *run_util);
/* forward declarations*/
void defragDictBucketCallback(void *privdata, dictEntry **bucketref);
dictEntry* replaceSateliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sds newkey, uint64_t hash, long *defragged);
void replaceSateliteOSetKeyPtr(expireset &set, sds oldkey, sds newkey);
bool replaceSateliteOSetKeyPtr(expireset &set, sds oldkey, sds newkey);
/* Defrag helper for generic allocations.
*
@ -407,7 +407,7 @@ dictEntry* replaceSateliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sd
return NULL;
}
void replaceSateliteOSetKeyPtr(expireset &set, sds oldkey, sds newkey) {
bool replaceSateliteOSetKeyPtr(expireset &set, sds oldkey, sds newkey) {
auto itr = set.find(oldkey);
if (itr != set.end())
{
@ -415,7 +415,10 @@ void replaceSateliteOSetKeyPtr(expireset &set, sds oldkey, sds newkey) {
eNew.setKeyUnsafe(newkey);
set.erase(itr);
set.insert(eNew);
serverAssert(set.find(newkey) != set.end());
return true;
}
return false;
}
long activeDefragQuickListNodes(quicklist *ql) {
@ -777,16 +780,22 @@ long defragKey(redisDb *db, dictEntry *de) {
long defragged = 0;
sds newsds;
ob = (robj*)dictGetVal(de);
/* Try to defrag the key name. */
newsds = activeDefragSds(keysds);
if (newsds)
{
defragged++, de->key = newsds;
if (!db->setexpire->empty()) {
replaceSateliteOSetKeyPtr(*db->setexpire, keysds, newsds);
if (!db->setexpire->empty()) {
bool fReplaced = replaceSateliteOSetKeyPtr(*db->setexpire, keysds, newsds);
serverAssert(fReplaced == ob->FExpires());
} else {
serverAssert(!ob->FExpires());
}
}
/* Try to defrag robj and / or string value. */
ob = (robj*)dictGetVal(de);
if ((newob = activeDefragStringOb(ob, &defragged))) {
de->v.val = newob;
ob = newob;
@ -839,6 +848,7 @@ long defragKey(redisDb *db, dictEntry *de) {
} else {
serverPanic("Unknown object type");
}
return defragged;
}

View File

@ -100,9 +100,28 @@ void activeExpireCycleExpire(redisDb *db, expireEntry &e, long long now) {
}
}
break;
case OBJ_LIST:
case OBJ_ZSET:
case OBJ_HASH:
if (hashTypeDelete(val,(sds)pfat->nextExpireEntry().spsubkey.get())) {
deleted++;
if (hashTypeLength(val) == 0) {
activeExpireCycleExpireFullKey(db, e.key());
return;
}
}
break;
case OBJ_ZSET:
if (zsetDel(val,(sds)pfat->nextExpireEntry().spsubkey.get())) {
deleted++;
if (zsetLength(val) == 0) {
activeExpireCycleExpireFullKey(db, e.key());
return;
}
}
break;
case OBJ_LIST:
default:
serverAssert(false);
}
@ -161,6 +180,7 @@ void expireMemberCore(client *c, robj *key, robj *subkey, long long basetime, lo
return;
}
double dblT;
switch (val->type)
{
case OBJ_SET:
@ -170,6 +190,20 @@ void expireMemberCore(client *c, robj *key, robj *subkey, long long basetime, lo
}
break;
case OBJ_HASH:
if (!hashTypeExists(val, szFromObj(subkey))) {
addReply(c,shared.czero);
return;
}
break;
case OBJ_ZSET:
if (zsetScore(val, szFromObj(subkey), &dblT) == C_ERR) {
addReply(c,shared.czero);
return;
}
break;
default:
addReplyError(c, "object type is unsupported");
return;

View File

@ -279,7 +279,7 @@ extern "C" void fastlock_lock(struct fastlock *lock)
int tid = gettid();
unsigned myticket = __atomic_fetch_add(&lock->m_ticket.m_avail, 1, __ATOMIC_RELEASE);
unsigned mask = (1U << (myticket % 32));
int cloops = 0;
unsigned cloops = 0;
ticket ticketT;
for (;;)
@ -289,9 +289,11 @@ extern "C" void fastlock_lock(struct fastlock *lock)
break;
#if defined(__i386__) || defined(__amd64__)
__asm__ ("pause");
__asm__ __volatile__ ("pause");
#elif defined(__arm__)
__asm__ __volatile__ ("yield");
#endif
if ((++cloops % 1024*1024) == 0)
if ((++cloops % 0x100000) == 0)
{
fastlock_sleep(lock, tid, ticketT.u, mask);
}

View File

@ -140,6 +140,7 @@ fastlock_unlock:
mov ecx, [rdi+64] # get current active (this one)
inc ecx # bump it to the next thread
mov [rdi+64], cx # give up our ticket (note: lock is not required here because the spinlock itself guards this variable)
mfence # sync other threads
# At this point the lock is removed, however we must wake up any pending futexs
mov r9d, 1 # eax is the bitmask for 2 threads
rol r9d, cl # place the mask in the right spot for the next 2 threads

View File

@ -4043,7 +4043,7 @@ int moduleGILAcquiredByModule(void) {
* used to send anything to the client, and has the db number where the event
* occurred as its selected db number.
*
* Notice that it is not necessary to enable notifications in redis.conf for
* Notice that it is not necessary to enable notifications in keydb.conf for
* module notifications to work.
*
* Warning: the notification callbacks are performed in a synchronous manner,

View File

@ -1919,7 +1919,7 @@ int rdbLoadRio(rio *rdb, rdbSaveInfo *rsi, int loading_aof) {
redisDb *db = g_pserver->db+0;
char buf[1024];
/* Key-specific attributes, set by opcodes before the key type. */
long long lru_idle = -1, lfu_freq = -1, expiretime = -1, now = mstime();
long long lru_idle = -1, lfu_freq = -1, expiretime = -1, now;
long long lru_clock = 0;
uint64_t mvcc_tstamp = OBJ_MVCC_INVALID;
robj *subexpireKey = nullptr;

View File

@ -6594,6 +6594,8 @@ static char *fetchMOTDFromCache()
static void setMOTDCache(const char *sz)
{
FILE *pf = fopen(szMotdCachePath(), "wb");
if (pf == NULL)
return;
size_t celem = fwrite(sz, strlen(sz), 1, pf);
(void)celem; // best effort
fclose(pf);

View File

@ -2298,7 +2298,8 @@ int connectWithMaster(redisMaster *mi) {
fd = anetTcpNonBlockBestEffortBindConnect(NULL,
mi->masterhost,mi->masterport,NET_FIRST_BIND_ADDR);
if (fd == -1) {
serverLog(LL_WARNING,"Unable to connect to MASTER: %s",
int sev = g_pserver->enable_multimaster ? LL_NOTICE : LL_WARNING; // with multimaster its not unheard of to intentiallionall have downed masters
serverLog(sev,"Unable to connect to MASTER: %s",
strerror(errno));
return C_ERR;
}
@ -3056,12 +3057,12 @@ void replicationCron(void) {
if (mi->masterhost && mi->repl_state == REPL_STATE_TRANSFER &&
(time(NULL)-mi->repl_transfer_lastio) > g_pserver->repl_timeout)
{
serverLog(LL_WARNING,"Timeout receiving bulk data from MASTER... If the problem persists try to set the 'repl-timeout' parameter in redis.conf to a larger value.");
serverLog(LL_WARNING,"Timeout receiving bulk data from MASTER... If the problem persists try to set the 'repl-timeout' parameter in keydb.conf to a larger value.");
cancelReplicationHandshake(mi);
}
/* Timed out master when we are an already connected replica? */
if (mi->masterhost && mi->repl_state == REPL_STATE_CONNECTED &&
if (mi->masterhost && mi->master && mi->repl_state == REPL_STATE_CONNECTED &&
(time(NULL)-mi->master->lastinteraction) > g_pserver->repl_timeout)
{
serverLog(LL_WARNING,"MASTER timeout: no data nor PING received...");
@ -3516,4 +3517,4 @@ static void propagateMasterStaleKeys()
}
decrRefCount(rgobj[0]);
}
}

View File

@ -53,11 +53,18 @@ static inline int sdsHdrSize(char type) {
return sizeof(struct sdshdr32);
case SDS_TYPE_64:
return sizeof(struct sdshdr64);
case SDS_TYPE_REFCOUNTED:
return sizeof(struct sdshdrrefcount);
}
return 0;
}
static inline char sdsReqType(size_t string_size) {
static inline char sdsReqType(ssize_t string_size) {
if (string_size < 0){
string_size = -string_size;
if (string_size < 1<<16)
return SDS_TYPE_REFCOUNTED;
}
if (string_size < 1<<5)
return SDS_TYPE_5;
if (string_size < 1<<8)
@ -86,10 +93,12 @@ static inline char sdsReqType(size_t string_size) {
* You can print the string with printf() as there is an implicit \0 at the
* end of the string. However the string is binary safe and can contain
* \0 characters in the middle, as the length is stored in the sds header. */
sds sdsnewlen(const void *init, size_t initlen) {
sds sdsnewlen(const void *init, ssize_t initlen) {
void *sh;
sds s;
char type = sdsReqType(initlen);
if (initlen < 0)
initlen = -initlen;
/* Empty strings are usually created in order to append. Use type 8
* since type 5 is not good at this. */
if (type == SDS_TYPE_5 && initlen == 0) type = SDS_TYPE_8;
@ -137,6 +146,13 @@ sds sdsnewlen(const void *init, size_t initlen) {
*fp = type;
break;
}
case SDS_TYPE_REFCOUNTED: {
SDS_HDR_VAR_REFCOUNTED(s);
sh->len = initlen;
sh->refcount = 1;
*fp = type;
break;
}
}
if (initlen && init)
memcpy(s, init, initlen);
@ -161,9 +177,25 @@ sds sdsdup(const char *s) {
return sdsnewlen(s, sdslen(s));
}
sds sdsdupshared(const char *s) {
unsigned char flags = s[-1];
if ((flags & SDS_TYPE_MASK) != SDS_TYPE_REFCOUNTED)
return sdsnewlen(s, -sdslen(s));
SDS_HDR_VAR_REFCOUNTED(s);
__atomic_fetch_add(&sh->refcount, 1, __ATOMIC_RELAXED);
return (sds)s;
}
/* Free an sds string. No operation is performed if 's' is NULL. */
void sdsfree(const char *s) {
if (s == NULL) return;
unsigned char flags = s[-1];
if ((flags & SDS_TYPE_MASK) == SDS_TYPE_REFCOUNTED)
{
SDS_HDR_VAR_REFCOUNTED(s);
if (__atomic_fetch_sub(&sh->refcount, 1, __ATOMIC_RELAXED) > 1)
return;
}
s_free((char*)s-sdsHdrSize(s[-1]));
}
@ -368,6 +400,11 @@ void sdsIncrLen(sds s, ssize_t incr) {
len = (sh->len += incr);
break;
}
case SDS_TYPE_REFCOUNTED: {
SDS_HDR_VAR_REFCOUNTED(s);
len = (sh->len += incr);
break;
}
default: len = 0; /* Just to avoid compilation warnings. */
}
s[len] = '\0';
@ -787,7 +824,7 @@ void sdstoupper(sds s) {
* If two strings share exactly the same prefix, but one of the two has
* additional characters, the longer string is considered to be greater than
* the smaller one. */
int sdscmp(const sds s1, const sds s2) {
int sdscmp(const char *s1, const char *s2) {
size_t l1, l2, minlen;
int cmp;

138
src/sds.h
View File

@ -91,15 +91,27 @@ struct __attribute__ ((__packed__)) sdshdr64 {
#endif
};
struct __attribute__ ((__packed__)) sdshdrrefcount {
uint64_t len; /* used */
uint16_t refcount;
unsigned char flags; /* 3 lsb of type, 5 unused bits */
#ifndef __cplusplus
char buf[];
#endif
};
#define SDS_TYPE_5 0
#define SDS_TYPE_8 1
#define SDS_TYPE_16 2
#define SDS_TYPE_32 3
#define SDS_TYPE_64 4
#define SDS_TYPE_REFCOUNTED 5
#define SDS_TYPE_MASK 7
#define SDS_TYPE_BITS 3
#define SDS_HDR_VAR(T,s) struct sdshdr##T *sh = (struct sdshdr##T *)(((void*)((s)-(sizeof(struct sdshdr##T)))));
#define SDS_HDR_VAR_REFCOUNTED(s) struct sdshdrrefcount *sh = (struct sdshdrrefcount *)(((void*)((s)-(sizeof(struct sdshdrrefcount)))));
#define SDS_HDR(T,s) ((struct sdshdr##T *)((s)-(sizeof(struct sdshdr##T))))
#define SDS_HDR_REFCOUNTED(s) ((struct sdshdrrefcount *)((s)-(sizeof(struct sdshdrrefcount))))
#define SDS_TYPE_5_LEN(f) ((f)>>SDS_TYPE_BITS)
static inline size_t sdslen(const char *s) {
@ -121,6 +133,8 @@ static inline size_t sdslen(const char *s) {
return SDS_HDR(32,s)->len;
case SDS_TYPE_64:
return SDS_HDR(64,s)->len;
case SDS_TYPE_REFCOUNTED:
return SDS_HDR_REFCOUNTED(s)->len;
}
}
return 0;
@ -148,6 +162,9 @@ static inline size_t sdsavail(const char * s) {
SDS_HDR_VAR(64,s);
return sh->alloc - sh->len;
}
case SDS_TYPE_REFCOUNTED: {
return 0; // immutable
}
}
return 0;
}
@ -173,6 +190,9 @@ static inline void sdssetlen(sds s, size_t newlen) {
case SDS_TYPE_64:
SDS_HDR(64,s)->len = newlen;
break;
case SDS_TYPE_REFCOUNTED:
SDS_HDR_REFCOUNTED(s)->len = newlen;
break;
}
}
@ -198,6 +218,9 @@ static inline void sdsinclen(sds s, size_t inc) {
case SDS_TYPE_64:
SDS_HDR(64,s)->len += inc;
break;
case SDS_TYPE_REFCOUNTED:
SDS_HDR_REFCOUNTED(s)->len += inc;
break;
}
}
@ -215,6 +238,8 @@ static inline size_t sdsalloc(const sds s) {
return SDS_HDR(32,s)->alloc;
case SDS_TYPE_64:
return SDS_HDR(64,s)->alloc;
case SDS_TYPE_REFCOUNTED:
return SDS_HDR_REFCOUNTED(s)->len;
}
return 0;
}
@ -237,13 +262,22 @@ static inline void sdssetalloc(sds s, size_t newlen) {
case SDS_TYPE_64:
SDS_HDR(64,s)->alloc = newlen;
break;
case SDS_TYPE_REFCOUNTED:
break;
}
}
sds sdsnewlen(const void *init, size_t initlen);
static inline int sdsisshared(const char *s)
{
unsigned char flags = s[-1];
return ((flags & SDS_TYPE_MASK) == SDS_TYPE_REFCOUNTED);
}
sds sdsnewlen(const void *init, ssize_t initlen);
sds sdsnew(const char *init);
sds sdsempty(void);
sds sdsdup(const char *s);
sds sdsdupshared(const char *s);
void sdsfree(const char *s);
sds sdsgrowzero(sds s, size_t len);
sds sdscatlen(sds s, const void *t, size_t len);
@ -265,7 +299,7 @@ sds sdstrim(sds s, const char *cset);
void sdsrange(sds s, ssize_t start, ssize_t end);
void sdsupdatelen(sds s);
void sdsclear(sds s);
int sdscmp(const sds s1, const sds s2);
int sdscmp(const char *s1, const char *s2);
sds *sdssplitlen(const char *s, ssize_t len, const char *sep, int seplen, int *count);
void sdsfreesplitres(sds *tokens, int count);
void sdstolower(sds s);
@ -298,6 +332,106 @@ int sdsTest(int argc, char *argv[]);
#ifdef __cplusplus
}
class sdsview
{
protected:
sds m_str = nullptr;
sdsview() = default; // Not allowed to create a sdsview directly with a nullptr
public:
sdsview(sds str)
: m_str(str)
{}
sdsview(const char *str)
: m_str((sds)str)
{}
bool operator<(const sdsview &other) const
{
return sdscmp(m_str, other.m_str) < 0;
}
bool operator==(const sdsview &other) const
{
return sdscmp(m_str, other.m_str) == 0;
}
bool operator==(const char *other) const
{
return sdscmp(m_str, other) == 0;
}
char operator[](size_t idx) const
{
return m_str[idx];
}
size_t size() const
{
return sdslen(m_str);
}
const char *get() const { return m_str; }
explicit operator const char*() const { return m_str; }
};
class sdsstring : public sdsview
{
public:
sdsstring() = default;
explicit sdsstring(sds str)
: sdsview(str)
{}
sdsstring(const sdsstring &other)
: sdsview(sdsdup(other.m_str))
{}
sdsstring(sdsstring &&other)
: sdsview(other.m_str)
{
other.m_str = nullptr;
}
~sdsstring()
{
sdsfree(m_str);
}
};
class sdsimmutablestring : public sdsstring
{
public:
sdsimmutablestring() = default;
explicit sdsimmutablestring(sds str)
: sdsstring(str)
{}
explicit sdsimmutablestring(const char *str)
: sdsstring((sds)str)
{}
sdsimmutablestring(const sdsimmutablestring &other)
: sdsstring(sdsdupshared(other.m_str))
{}
sdsimmutablestring(sdsimmutablestring &&other)
: sdsstring(other.m_str)
{
other.m_str = nullptr;
}
auto &operator=(const sdsimmutablestring &other)
{
sdsfree(m_str);
m_str = sdsdupshared(other.m_str);
return *this;
}
};
#endif
#endif

View File

@ -2487,7 +2487,7 @@ void initServerConfig(void) {
/* Command table -- we initiialize it here as it is part of the
* initial configuration, since command names may be changed via
* redis.conf using the rename-command directive. */
* keydb.conf using the rename-command directive. */
g_pserver->commands = dictCreate(&commandTableDictType,NULL);
g_pserver->orig_commands = dictCreate(&commandTableDictType,NULL);
populateCommandTable();
@ -2523,7 +2523,7 @@ void initServerConfig(void) {
/* By default we want scripts to be always replicated by effects
* (single commands executed by the script), and not by sending the
* script to the replica / AOF. This is the new way starting from
* Redis 5. However it is possible to revert it via redis.conf. */
* Redis 5. However it is possible to revert it via keydb.conf. */
g_pserver->lua_always_replicate_commands = 1;
/* Multithreading */
@ -3139,7 +3139,7 @@ void populateCommandTable(void) {
c->id = ACLGetCommandID(c->name); /* Assign the ID used for ACL. */
retval1 = dictAdd(g_pserver->commands, sdsnew(c->name), c);
/* Populate an additional dictionary that will be unaffected
* by rename-command statements in redis.conf. */
* by rename-command statements in keydb.conf. */
retval2 = dictAdd(g_pserver->orig_commands, sdsnew(c->name), c);
serverAssert(retval1 == DICT_OK && retval2 == DICT_OK);
}
@ -3214,7 +3214,7 @@ struct redisCommand *lookupCommandByCString(const char *s) {
/* Lookup the command in the current table, if not found also check in
* the original table containing the original command names unaffected by
* redis.conf rename-command statement.
* keydb.conf rename-command statement.
*
* This is used by functions rewriting the argument vector such as
* rewriteClientCommandVector() in order to set client->cmd pointer
@ -4695,7 +4695,7 @@ void version(void) {
}
void usage(void) {
fprintf(stderr,"Usage: ./keydb-server [/path/to/redis.conf] [options]\n");
fprintf(stderr,"Usage: ./keydb-server [/path/to/keydb.conf] [options]\n");
fprintf(stderr," ./keydb-server - (read config from stdin)\n");
fprintf(stderr," ./keydb-server -v or --version\n");
fprintf(stderr," ./keydb-server -h or --help\n");
@ -4705,7 +4705,7 @@ void usage(void) {
fprintf(stderr," ./keydb-server /etc/redis/6379.conf\n");
fprintf(stderr," ./keydb-server --port 7777\n");
fprintf(stderr," ./keydb-server --port 7777 --replicaof 127.0.0.1 8888\n");
fprintf(stderr," ./keydb-server /etc/myredis.conf --loglevel verbose\n\n");
fprintf(stderr," ./keydb-server /etc/mykeydb.conf --loglevel verbose\n\n");
fprintf(stderr,"Sentinel mode:\n");
fprintf(stderr," ./keydb-server /etc/sentinel.conf --sentinel\n");
exit(1);
@ -4722,7 +4722,7 @@ void redisAsciiArt(void) {
/* Show the ASCII logo if: log file is stdout AND stdout is a
* tty AND syslog logging is disabled. Also show logo if the user
* forced us to do so via redis.conf. */
* forced us to do so via keydb.conf. */
int show_logo = ((!g_pserver->syslog_enabled &&
g_pserver->logfile[0] == '\0' &&
isatty(fileno(stdout))) ||
@ -5032,6 +5032,21 @@ void *workerThreadMain(void *parg)
return NULL;
}
static void validateConfiguration()
{
if (cserver.cthreads > (int)std::thread::hardware_concurrency()) {
serverLog(LL_WARNING, "WARNING: server-threads is greater than this machine's core count. Truncating to %u threads", std::thread::hardware_concurrency());
cserver.cthreads = (int)std::thread::hardware_concurrency();
cserver.cthreads = std::max(cserver.cthreads, 1); // in case of any weird sign overflows
}
if (g_pserver->enable_multimaster && !g_pserver->fActiveReplica) {
serverLog(LL_WARNING, "ERROR: Multi Master requires active replication to be enabled.");
serverLog(LL_WARNING, "\tKeyDB will now exit. Please update your configuration file.");
exit(EXIT_FAILURE);
}
}
int main(int argc, char **argv) {
struct timeval tv;
int j;
@ -5183,6 +5198,12 @@ int main(int argc, char **argv) {
sdsfree(options);
}
if (cserver.fUsePro) {
execv("keydb-pro-server", argv);
perror("Failed launch the pro binary");
exit(EXIT_FAILURE);
}
serverLog(LL_WARNING, "oO0OoO0OoO0Oo KeyDB is starting oO0OoO0OoO0Oo");
serverLog(LL_WARNING,
"KeyDB version=%s, bits=%d, commit=%s, modified=%d, pid=%d, just started",
@ -5198,11 +5219,7 @@ int main(int argc, char **argv) {
serverLog(LL_WARNING, "Configuration loaded");
}
if (cserver.cthreads > (int)std::thread::hardware_concurrency()) {
serverLog(LL_WARNING, "WARNING: server-threads is greater than this machine's core count. Truncating to %u threads", std::thread::hardware_concurrency());
cserver.cthreads = (int)std::thread::hardware_concurrency();
cserver.cthreads = std::max(cserver.cthreads, 1); // in case of any weird sign overflows
}
validateConfiguration();
cserver.supervised = redisIsSupervised(cserver.supervised_mode);
int background = cserver.daemonize && !cserver.supervised;

View File

@ -264,7 +264,7 @@ public:
#define CONFIG_DEFAULT_REPL_BACKLOG_TIME_LIMIT (60*60) /* 1 hour */
#define CONFIG_REPL_BACKLOG_MIN_SIZE (1024*16) /* 16k */
#define CONFIG_BGSAVE_RETRY_DELAY 5 /* Wait a few secs before trying again. */
#define CONFIG_DEFAULT_PID_FILE "/var/run/redis.pid"
#define CONFIG_DEFAULT_PID_FILE "/var/run/keydb.pid"
#define CONFIG_DEFAULT_SYSLOG_IDENT "redis"
#define CONFIG_DEFAULT_CLUSTER_CONFIG_FILE "nodes.conf"
#define CONFIG_DEFAULT_CLUSTER_ANNOUNCE_IP NULL /* Auto detect. */
@ -1578,7 +1578,7 @@ struct redisServerConst {
/* Configuration */
char *default_masteruser; /* AUTH with this user and masterauth with master */
char *default_masterauth; /* AUTH with this password with master */
int verbosity; /* Loglevel in redis.conf */
int verbosity; /* Loglevel in keydb.conf */
int maxidletime; /* Client timeout in seconds */
int tcpkeepalive; /* Set SO_KEEPALIVE if non-zero. */
int active_defrag_enabled;
@ -1599,6 +1599,7 @@ struct redisServerConst {
size_t system_memory_size; /* Total memory in system as reported by OS */
unsigned char uuid[UUID_BINARY_LEN]; /* This server's UUID - populated on boot */
bool fUsePro = false;
};
struct redisServer {
@ -1829,7 +1830,7 @@ struct redisServer {
int sort_alpha;
int sort_bypattern;
int sort_store;
/* Zip structure config, see redis.conf for more information */
/* Zip structure config, see keydb.conf for more information */
size_t hash_max_ziplist_entries;
size_t hash_max_ziplist_value;
size_t set_max_intset_entries;

View File

@ -219,4 +219,37 @@ start_server {tags {"expire"}} {
set ttl [r ttl foo]
assert {$ttl <= 98 && $ttl > 90}
}
test { EXPIREMEMBER works (set) } {
r flushall
r sadd testkey foo bar baz
r expiremember testkey foo 1
after 1500
assert_equal {2} [r scard testkey]
}
test { EXPIREMEMBER works (hash) } {
r flushall
r hset testkey foo bar
r expiremember testkey foo 1
after 1500
r exists testkey
} {0}
test { EXPIREMEMBER works (zset) } {
r flushall
r zadd testkey 1 foo
r zadd testkey 2 bar
assert_equal {2} [r zcard testkey]
r expiremember testkey foo 1
after 1500
assert_equal {1} [r zcard testkey]
}
test { TTL for subkey expires works } {
r flushall
r sadd testkey foo bar baz
r expiremember testkey foo 10000
assert [expr [r ttl testkey foo] > 0]
}
}

View File

@ -156,7 +156,7 @@ mkdir -p "$REDIS_DATA_DIR" || die "Could not create redis data directory"
#render the templates
TMP_FILE="/tmp/${REDIS_PORT}.conf"
DEFAULT_CONFIG="${SCRIPTPATH}/../redis.conf"
DEFAULT_CONFIG="${SCRIPTPATH}/../keydb.conf"
INIT_TPL_FILE="${SCRIPTPATH}/redis_init_script.tpl"
INIT_SCRIPT_DEST="/etc/init.d/redis_${REDIS_PORT}"
PIDFILE="/var/run/redis_${REDIS_PORT}.pid"