Merge keydb community

Former-commit-id: b9dcd0997c3dfd3cd2a5908ba540bc4d389104a0
This commit is contained in:
John Sully 2019-12-27 17:44:48 -05:00
commit 0fd9a4c67a
11 changed files with 200 additions and 150 deletions

2
.gitignore vendored
View File

@ -27,7 +27,7 @@ release.h
src/transfer.sh
src/configs
redis.ds
src/redis.conf
src/keydb.conf
src/nodes.conf
deps/lua/src/lua
deps/lua/src/luac

View File

@ -1,9 +1,9 @@
# Redis configuration file example.
# KeyDB configuration file example.
#
# Note that in order to read the configuration file, Redis must be
# Note that in order to read the configuration file, KeyDB must be
# started with the file path as first argument:
#
# ./keydb-server /path/to/redis.conf
# ./keydb-server /path/to/keydb.conf
# Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth:
@ -20,12 +20,12 @@
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis servers but also need
# have a standard template that goes to all KeyDB servers but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
# from admin or Redis Sentinel. Since Redis always uses the last processed
# from admin or KeyDB Sentinel. Since KeyDB always uses the last processed
# line as value of a configuration directive, you'd better put includes
# at the beginning of this file to avoid overwriting config change at runtime.
#
@ -45,7 +45,7 @@
################################## NETWORK #####################################
# By default, if no "bind" configuration directive is specified, Redis listens
# By default, if no "bind" configuration directive is specified, KeyDB listens
# for connections from all the network interfaces available on the server.
# It is possible to listen to just one or multiple selected interfaces using
# the "bind" configuration directive, followed by one or more IP addresses.
@ -55,11 +55,11 @@
# bind 192.168.1.100 10.0.0.1
# bind 127.0.0.1 ::1
#
# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the
# ~~~ WARNING ~~~ If the computer running KeyDB is directly exposed to the
# internet, binding to all the interfaces is dangerous and will expose the
# instance to everybody on the internet. So by default we uncomment the
# following bind directive, that will force Redis to listen only into
# the IPv4 loopback interface address (this means Redis will be able to
# following bind directive, that will force KeyDB to listen only into
# the IPv4 loopback interface address (this means KeyDB will be able to
# accept connections only from clients running into the same computer it
# is running).
#
@ -69,7 +69,7 @@
bind 127.0.0.1
# Protected mode is a layer of security protection, in order to avoid that
# Redis instances left open on the internet are accessed and exploited.
# KeyDB instances left open on the internet are accessed and exploited.
#
# When protected mode is on and if:
#
@ -82,13 +82,13 @@ bind 127.0.0.1
# sockets.
#
# By default protected mode is enabled. You should disable it only if
# you are sure you want clients from other hosts to connect to Redis
# you are sure you want clients from other hosts to connect to KeyDB
# even if no authentication is configured, nor a specific set of interfaces
# are explicitly listed using the "bind" directive.
protected-mode yes
# Accept connections on the specified port, default is 6379 (IANA #815344).
# If port 0 is specified Redis will not listen on a TCP socket.
# If port 0 is specified KeyDB will not listen on a TCP socket.
port 6379
# TCP listen() backlog.
@ -103,10 +103,10 @@ tcp-backlog 511
# Unix socket.
#
# Specify the path for the Unix socket that will be used to listen for
# incoming connections. There is no default, so Redis will not listen
# incoming connections. There is no default, so KeyDB will not listen
# on a unix socket when not specified.
#
# unixsocket /tmp/redis.sock
# unixsocket /tmp/keydb.sock
# unixsocketperm 700
# Close the connection after a client is idle for N seconds (0 to disable)
@ -126,19 +126,19 @@ timeout 0
# On other kernels the period depends on the kernel configuration.
#
# A reasonable value for this option is 300 seconds, which is the new
# Redis default starting with Redis 3.2.1.
# KeyDB default starting with Redis 3.2.1.
tcp-keepalive 300
################################# GENERAL #####################################
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
# By default KeyDB does not run as a daemon. Use 'yes' if you need it.
# Note that KeyDB will write a pid file in /var/run/keydb.pid when daemonized.
daemonize no
# If you run Redis from upstart or systemd, Redis can interact with your
# If you run KeyDB from upstart or systemd, KeyDB can interact with your
# supervision tree. Options:
# supervised no - no supervision interaction
# supervised upstart - signal upstart by putting Redis into SIGSTOP mode
# supervised upstart - signal upstart by putting KeyDB into SIGSTOP mode
# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
# supervised auto - detect upstart or systemd method based on
# UPSTART_JOB or NOTIFY_SOCKET environment variables
@ -146,16 +146,16 @@ daemonize no
# They do not enable continuous liveness pings back to your supervisor.
supervised no
# If a pid file is specified, Redis writes it where specified at startup
# If a pid file is specified, KeyDB writes it where specified at startup
# and removes it at exit.
#
# When the server runs non daemonized, no pid file is created if none is
# specified in the configuration. When the server is daemonized, the pid file
# is used even if not specified, defaulting to "/var/run/redis.pid".
# is used even if not specified, defaulting to "/var/run/keydb.pid".
#
# Creating a pid file is best effort: if Redis is not able to create it
# Creating a pid file is best effort: if KeyDB is not able to create it
# nothing bad happens, the server will start and run normally.
pidfile /var/run/redis_6379.pid
pidfile /var/run/keydb_6379.pid
# Specify the server verbosity level.
# This can be one of:
@ -166,7 +166,7 @@ pidfile /var/run/redis_6379.pid
loglevel notice
# Specify the log file name. Also the empty string can be used to force
# Redis to log on the standard output. Note that if you use standard
# KeyDB to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile ""
@ -175,7 +175,7 @@ logfile ""
# syslog-enabled no
# Specify the syslog identity.
# syslog-ident redis
# syslog-ident keydb
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
# syslog-facility local0
@ -185,7 +185,7 @@ logfile ""
# dbid is a number between 0 and 'databases'-1
databases 16
# By default Redis shows an ASCII art logo only when started to log to the
# By default KeyDB shows an ASCII art logo only when started to log to the
# standard output and if the standard output is a TTY. Basically this means
# that normally a logo is displayed only in interactive sessions.
#
@ -219,17 +219,17 @@ save 900 1
save 300 10
save 60 10000
# By default Redis will stop accepting writes if RDB snapshots are enabled
# By default KeyDB will stop accepting writes if RDB snapshots are enabled
# (at least one save point) and the latest background save failed.
# This will make the user aware (in a hard way) that data is not persisting
# on disk properly, otherwise chances are that no one will notice and some
# disaster will happen.
#
# If the background saving process will start working again Redis will
# If the background saving process will start working again KeyDB will
# automatically allow writes again.
#
# However if you have setup your proper monitoring of the Redis server
# and persistence, you may want to disable this feature so that Redis will
# However if you have setup your proper monitoring of the KeyDB server
# and persistence, you may want to disable this feature so that KeyDB will
# continue to work as usual even if there are problems with disk,
# permissions, and so forth.
stop-writes-on-bgsave-error yes
@ -264,18 +264,18 @@ dir ./
################################# REPLICATION #################################
# Master-Replica replication. Use replicaof to make a Redis instance a copy of
# another Redis server. A few things to understand ASAP about Redis replication.
# Master-Replica replication. Use replicaof to make a KeyDB instance a copy of
# another KeyDB server. A few things to understand ASAP about KeyDB replication.
#
# +------------------+ +---------------+
# | Master | ---> | Replica |
# | (receive writes) | | (exact copy) |
# +------------------+ +---------------+
#
# 1) Redis replication is asynchronous, but you can configure a master to
# 1) KeyDB replication is asynchronous, but you can configure a master to
# stop accepting writes if it appears to be not connected with at least
# a given number of replicas.
# 2) Redis replicas are able to perform a partial resynchronization with the
# 2) KeyDB replicas are able to perform a partial resynchronization with the
# master if the replication link is lost for a relatively small amount of
# time. You may want to configure the replication backlog size (see the next
# sections of this file) with a sensible value depending on your needs.
@ -292,7 +292,7 @@ dir ./
#
# masterauth <master-password>
#
# However this is not enough if you are using Redis ACLs (for Redis version
# However this is not enough if you are using KeyDB ACLs (for Redis version
# 6 or greater), and the default user is not capable of running the PSYNC
# command and/or other commands needed for replication. In this case it's
# better to configure a special user to use with replication, and specify the
@ -345,10 +345,10 @@ replica-read-only yes
# synchronization". An RDB file is transmitted from the master to the replicas.
# The transmission can happen in two different ways:
#
# 1) Disk-backed: The Redis master creates a new process that writes the RDB
# 1) Disk-backed: The KeyDB master creates a new process that writes the RDB
# file on disk. Later the file is transferred by the parent
# process to the replicas incrementally.
# 2) Diskless: The Redis master creates a new process that directly writes the
# 2) Diskless: The KeyDB master creates a new process that directly writes the
# RDB file to replica sockets, without touching the disk at all.
#
# With disk-backed replication, while the RDB file is generated, more replicas
@ -397,7 +397,7 @@ repl-diskless-sync-delay 5
# Disable TCP_NODELAY on the replica socket after SYNC?
#
# If you select "yes" Redis will use a smaller number of TCP packets and
# If you select "yes" KeyDB will use a smaller number of TCP packets and
# less bandwidth to send data to replicas. But this can add a delay for
# the data to appear on the replica side, up to 40 milliseconds with
# Linux kernels using a default configuration.
@ -436,8 +436,8 @@ repl-disable-tcp-nodelay no
#
# repl-backlog-ttl 3600
# The replica priority is an integer number published by Redis in the INFO output.
# It is used by Redis Sentinel in order to select a replica to promote into a
# The replica priority is an integer number published by KeyDB in the INFO output.
# It is used by KeyDB Sentinel in order to select a replica to promote into a
# master if the master is no longer working correctly.
#
# A replica with a low priority number is considered better for promotion, so
@ -446,7 +446,7 @@ repl-disable-tcp-nodelay no
#
# However a special priority of 0 marks the replica as not able to perform the
# role of master, so a replica with priority of 0 will never be selected by
# Redis Sentinel for promotion.
# KeyDB Sentinel for promotion.
#
# By default the priority is 100.
replica-priority 100
@ -473,10 +473,10 @@ replica-priority 100
# By default min-replicas-to-write is set to 0 (feature disabled) and
# min-replicas-max-lag is set to 10.
# A Redis master is able to list the address and port of the attached
# A KeyDB master is able to list the address and port of the attached
# replicas in different ways. For example the "INFO replication" section
# offers this information, which is used, among other tools, by
# Redis Sentinel in order to discover replica instances.
# KeyDB Sentinel in order to discover replica instances.
# Another place where this info is available is in the output of the
# "ROLE" command of a master.
#
@ -504,7 +504,7 @@ replica-priority 100
################################## SECURITY ###################################
# Warning: since Redis is pretty fast an outside user can try up to
# Warning: since KeyDB is pretty fast an outside user can try up to
# 1 million passwords per second against a modern box. This means that you
# should use very strong passwords, otherwise they will be very easy to break.
# Note that because the password is really a shared secret between the client
@ -512,7 +512,7 @@ replica-priority 100
# can be easily a long string from /dev/urandom or whatever, so by using a
# long and unguessable password no brute force attack will be possible.
# Redis ACL users are defined in the following format:
# KeyDB ACL users are defined in the following format:
#
# user <username> ... acl rules ...
#
@ -539,7 +539,7 @@ replica-priority 100
# +@<category> Allow the execution of all the commands in such category
# with valid categories are like @admin, @set, @sortedset, ...
# and so forth, see the full list in the server.c file where
# the Redis command table is described and defined.
# the KeyDB command table is described and defined.
# The special category @all means all the commands, but currently
# present in the server, and that will be loaded in the future
# via modules.
@ -606,9 +606,9 @@ replica-priority 100
# ACL file, the server will refuse to start.
#
# The format of the external ACL user file is exactly the same as the
# format that is used inside redis.conf to describe users.
# format that is used inside keydb.conf to describe users.
#
# aclfile /etc/redis/users.acl
# aclfile /etc/keydb/users.acl
# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatiblity
# layer on top of the new ACL system. The option effect will be just setting
@ -646,12 +646,12 @@ replica-priority 100
################################### CLIENTS ####################################
# Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the Redis server is not
# this limit is set to 10000 clients, however if the KeyDB server is not
# able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit
# minus 32 (as Redis reserves a few file descriptors for internal uses).
# minus 32 (as KeyDB reserves a few file descriptors for internal uses).
#
# Once the limit is reached Redis will close all the new connections sending
# Once the limit is reached KeyDB will close all the new connections sending
# an error 'max number of clients reached'.
#
# maxclients 10000
@ -659,15 +659,15 @@ replica-priority 100
############################## MEMORY MANAGEMENT ################################
# Set a memory usage limit to the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# When the memory limit is reached KeyDB will try to remove keys
# according to the eviction policy selected (see maxmemory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
# If KeyDB can't remove keys according to the policy, or if the policy is
# set to 'noeviction', KeyDB will start to reply with errors to commands
# that would use more memory, like SET, LPUSH, and so on, and will continue
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU or LFU cache, or to
# This option is usually useful when using KeyDB as an LRU or LFU cache, or to
# set a hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have replicas attached to an instance with maxmemory on,
@ -683,7 +683,7 @@ replica-priority 100
#
# maxmemory <bytes>
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# MAXMEMORY POLICY: how KeyDB will select what to remove when maxmemory
# is reached. You can select among five behaviors:
#
# volatile-lru -> Evict using approximated LRU among the keys with an expire set.
@ -701,7 +701,7 @@ replica-priority 100
# Both LRU, LFU and volatile-ttl are implemented using approximated
# randomized algorithms.
#
# Note: with any of the above policies, Redis will return an error on write
# Note: with any of the above policies, KeyDB will return an error on write
# operations, when there are no suitable keys for eviction.
#
# At the date of writing these commands are: set setnx setex append
@ -716,7 +716,7 @@ replica-priority 100
# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can tune it for speed or
# accuracy. For default Redis will check five keys and pick the one that was
# accuracy. For default KeyDB will check five keys and pick the one that was
# used less recently, you can change the sample size using the following
# configuration directive.
#
@ -747,16 +747,16 @@ replica-priority 100
############################# LAZY FREEING ####################################
# Redis has two primitives to delete keys. One is called DEL and is a blocking
# KeyDB has two primitives to delete keys. One is called DEL and is a blocking
# deletion of the object. It means that the server stops processing new commands
# in order to reclaim all the memory associated with an object in a synchronous
# way. If the key deleted is associated with a small object, the time needed
# in order to execute the DEL command is very small and comparable to most other
# O(1) or O(log_N) commands in Redis. However if the key is associated with an
# O(1) or O(log_N) commands in KeyDB. However if the key is associated with an
# aggregated value containing millions of elements, the server can block for
# a long time (even seconds) in order to complete the operation.
#
# For the above reasons Redis also offers non blocking deletion primitives
# For the above reasons KeyDB also offers non blocking deletion primitives
# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and
# FLUSHDB commands, in order to reclaim memory in background. Those commands
# are executed in constant time. Another thread will incrementally free the
@ -764,9 +764,9 @@ replica-priority 100
#
# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled.
# It's up to the design of the application to understand when it is a good
# idea to use one or the other. However the Redis server sometimes has to
# idea to use one or the other. However the KeyDB server sometimes has to
# delete keys or flush the whole database as a side effect of other operations.
# Specifically Redis deletes objects independently of a user call in the
# Specifically KeyDB deletes objects independently of a user call in the
# following scenarios:
#
# 1) On eviction, because of the maxmemory and maxmemory policy configurations,
@ -796,20 +796,20 @@ replica-lazy-flush no
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. This mode is
# good enough in many applications, but an issue with the Redis process or
# By default KeyDB asynchronously dumps the dataset on disk. This mode is
# good enough in many applications, but an issue with the KeyDB process or
# a power outage may result into a few minutes of writes lost (depending on
# the configured save points).
#
# The Append Only File is an alternative persistence mode that provides
# much better durability. For instance using the default data fsync policy
# (see later in the config file) Redis can lose just one second of writes in a
# (see later in the config file) KeyDB can lose just one second of writes in a
# dramatic event like a server power outage, or a single write if something
# wrong with the Redis process itself happens, but the operating system is
# wrong with the KeyDB process itself happens, but the operating system is
# still running correctly.
#
# AOF and RDB persistence can be enabled at the same time without problems.
# If the AOF is enabled on startup Redis will load the AOF, that is the file
# If the AOF is enabled on startup KeyDB will load the AOF, that is the file
# with the better durability guarantees.
#
# Please check http://redis.io/topics/persistence for more information.
@ -824,7 +824,7 @@ appendfilename "appendonly.aof"
# instead of waiting for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
# KeyDB supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log. Slow, Safest.
@ -850,7 +850,7 @@ appendfsync everysec
# When the AOF fsync policy is set to always or everysec, and a background
# saving process (a background save or AOF log background rewriting) is
# performing a lot of I/O against the disk, in some Linux configurations
# Redis may block too long on the fsync() call. Note that there is no fix for
# KeyDB may block too long on the fsync() call. Note that there is no fix for
# this currently, as even performing fsync in a different thread will block
# our synchronous write(2) call.
#
@ -858,7 +858,7 @@ appendfsync everysec
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving, the durability of Redis is
# This means that while another child is saving, the durability of KeyDB is
# the same as "appendfsync none". In practical terms, this means that it is
# possible to lose up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
@ -869,10 +869,10 @@ appendfsync everysec
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# KeyDB is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# This is how it works: KeyDB remembers the size of the AOF file after the
# latest rewrite (if no rewrite has happened since the restart, the size of
# the AOF at startup is used).
#
@ -888,19 +888,19 @@ no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
# An AOF file may be found to be truncated at the end during the Redis
# An AOF file may be found to be truncated at the end during the KeyDB
# startup process, when the AOF data gets loaded back into memory.
# This may happen when the system where Redis is running
# This may happen when the system where KeyDB is running
# crashes, especially when an ext4 filesystem is mounted without the
# data=ordered option (however this can't happen when Redis itself
# data=ordered option (however this can't happen when KeyDB itself
# crashes or aborts but the operating system still works correctly).
#
# Redis can either exit with an error when this happens, or load as much
# KeyDB can either exit with an error when this happens, or load as much
# data as possible (the default now) and start if the AOF file is found
# to be truncated at the end. The following option controls this behavior.
#
# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
# the Redis server starts emitting a log to inform the user of the event.
# the KeyDB server starts emitting a log to inform the user of the event.
# Otherwise if the option is set to no, the server aborts with an error
# and refuses to start. When the option is set to no, the user requires
# to fix the AOF file using the "keydb-check-aof" utility before to restart
@ -908,17 +908,17 @@ auto-aof-rewrite-min-size 64mb
#
# Note that if the AOF file will be found to be corrupted in the middle
# the server will still exit with an error. This option only applies when
# Redis will try to read more data from the AOF file but not enough bytes
# KeyDB will try to read more data from the AOF file but not enough bytes
# will be found.
aof-load-truncated yes
# When rewriting the AOF file, Redis is able to use an RDB preamble in the
# When rewriting the AOF file, KeyDB is able to use an RDB preamble in the
# AOF file for faster rewrites and recoveries. When this option is turned
# on the rewritten AOF file is composed of two different stanzas:
#
# [RDB file][AOF tail]
#
# When loading Redis recognizes that the AOF file starts with the "REDIS"
# When loading KeyDB recognizes that the AOF file starts with the "REDIS"
# string and loads the prefixed RDB file, and continues loading the AOF
# tail.
aof-use-rdb-preamble yes
@ -927,7 +927,7 @@ aof-use-rdb-preamble yes
# Max execution time of a Lua script in milliseconds.
#
# If the maximum execution time is reached Redis will log that a script is
# If the maximum execution time is reached KeyDB will log that a script is
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
@ -941,17 +941,17 @@ aof-use-rdb-preamble yes
# Set it to 0 or a negative value for unlimited execution without warnings.
lua-time-limit 5000
################################ REDIS CLUSTER ###############################
################################ KEYDB CLUSTER ###############################
# Normal Redis instances can't be part of a Redis Cluster; only nodes that are
# started as cluster nodes can. In order to start a Redis instance as a
# Normal KeyDB instances can't be part of a KeyDB Cluster; only nodes that are
# started as cluster nodes can. In order to start a KeyDB instance as a
# cluster node enable the cluster support uncommenting the following:
#
# cluster-enabled yes
# Every cluster node has a cluster configuration file. This file is not
# intended to be edited by hand. It is created and updated by Redis nodes.
# Every Redis Cluster node requires a different cluster configuration file.
# intended to be edited by hand. It is created and updated by KeyDB nodes.
# Every KeyDB Cluster node requires a different cluster configuration file.
# Make sure that instances running in the same system do not have
# overlapping cluster configuration file names.
#
@ -1027,7 +1027,7 @@ lua-time-limit 5000
#
# cluster-migration-barrier 1
# By default Redis Cluster nodes stop accepting queries if they detect there
# By default KeyDB Cluster nodes stop accepting queries if they detect there
# is at least an hash slot uncovered (no available node is serving it).
# This way if the cluster is partially down (for example a range of hash slots
# are no longer covered) all the cluster becomes, eventually, unavailable.
@ -1055,11 +1055,11 @@ lua-time-limit 5000
########################## CLUSTER DOCKER/NAT support ########################
# In certain deployments, Redis Cluster nodes address discovery fails, because
# In certain deployments, KeyDB Cluster nodes address discovery fails, because
# addresses are NAT-ted or because ports are forwarded (the typical case is
# Docker and other containers).
#
# In order to make Redis Cluster working in such environments, a static
# In order to make KeyDB Cluster working in such environments, a static
# configuration where each node knows its public address is needed. The
# following two options are used for this scope, and are:
#
@ -1072,7 +1072,7 @@ lua-time-limit 5000
# so that other nodes will be able to correctly map the address of the node
# publishing the information.
#
# If the above options are not used, the normal Redis Cluster auto-detection
# If the above options are not used, the normal KeyDB Cluster auto-detection
# will be used instead.
#
# Note that when remapped, the bus port may not be at the fixed offset of
@ -1088,14 +1088,14 @@ lua-time-limit 5000
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
# The KeyDB Slow Log is a system to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
#
# You can configure the slow log with two parameters: one tells Redis
# You can configure the slow log with two parameters: one tells KeyDB
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
# slow log. When a new command is logged the oldest one is removed from the
@ -1112,9 +1112,9 @@ slowlog-max-len 128
################################ LATENCY MONITOR ##############################
# The Redis latency monitoring subsystem samples different operations
# The KeyDB latency monitoring subsystem samples different operations
# at runtime in order to collect data related to possible sources of
# latency of a Redis instance.
# latency of a KeyDB instance.
#
# Via the LATENCY command this information is available to the user that can
# print graphs and obtain reports.
@ -1133,7 +1133,7 @@ latency-monitor-threshold 0
############################# EVENT NOTIFICATION ##############################
# Redis can notify Pub/Sub clients about events happening in the key space.
# KeyDB can notify Pub/Sub clients about events happening in the key space.
# This feature is documented at http://redis.io/topics/notifications
#
# For instance if keyspace events notification is enabled, and a client
@ -1143,7 +1143,7 @@ latency-monitor-threshold 0
# PUBLISH __keyspace@0__:foo del
# PUBLISH __keyevent@0__:del foo
#
# It is possible to select the events that Redis will notify among a set
# It is possible to select the events that KeyDB will notify among a set
# of classes. Every class is identified by a single character:
#
# K Keyspace events, published with __keyspace@<db>__ prefix.
@ -1179,12 +1179,12 @@ notify-keyspace-events ""
############################### GOPHER SERVER #################################
# Redis contains an implementation of the Gopher protocol, as specified in
# KeyDB contains an implementation of the Gopher protocol, as specified in
# the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt).
#
# The Gopher protocol was very popular in the late '90s. It is an alternative
# to the web, and the implementation both server and client side is so simple
# that the Redis server has just 100 lines of code in order to implement this
# that the KeyDB server has just 100 lines of code in order to implement this
# support.
#
# What do you do with Gopher nowadays? Well Gopher never *really* died, and
@ -1194,18 +1194,18 @@ notify-keyspace-events ""
# controlled, and it's cool to create an alternative space for people that
# want a bit of fresh air.
#
# Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol
# Anyway for the 10nth birthday of the KeyDB, we gave it the Gopher protocol
# as a gift.
#
# --- HOW IT WORKS? ---
#
# The Redis Gopher support uses the inline protocol of Redis, and specifically
# The KeyDB Gopher support uses the inline protocol of KeyDB, and specifically
# two kind of inline requests that were anyway illegal: an empty request
# or any request that starts with "/" (there are no Redis commands starting
# or any request that starts with "/" (there are no KeyDB commands starting
# with such a slash). Normal RESP2/RESP3 requests are completely out of the
# path of the Gopher protocol implementation and are served as usually as well.
#
# If you open a connection to Redis when Gopher is enabled and send it
# If you open a connection to KeyDB when Gopher is enabled and send it
# a string like "/foo", if there is a key named "/foo" it is served via the
# Gopher protocol.
#
@ -1216,7 +1216,7 @@ notify-keyspace-events ""
#
# --- SECURITY WARNING ---
#
# If you plan to put Redis on the internet in a publicly accessible address
# If you plan to put KeyDB on the internet in a publicly accessible address
# to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance.
# Once a password is set:
#
@ -1310,8 +1310,8 @@ stream-node-max-bytes 4096
stream-node-max-entries 100
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
# order to help rehashing the main KeyDB hash table (the one mapping top-level
# keys to values). The hash table implementation KeyDB uses (see dict.c)
# performs a lazy rehashing: the more operation you run into a hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
@ -1322,7 +1322,7 @@ stream-node-max-entries 100
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply from time to time
# not a good thing in your environment that KeyDB can reply from time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
@ -1374,21 +1374,21 @@ client-output-buffer-limit pubsub 32mb 8mb 60
#
# client-query-buffer-limit 1gb
# In the Redis protocol, bulk requests, that are, elements representing single
# In the KeyDB protocol, bulk requests, that are, elements representing single
# strings, are normally limited ot 512 mb. However you can change this limit
# here.
#
# proto-max-bulk-len 512mb
# Redis calls an internal function to perform many background tasks, like
# KeyDB calls an internal function to perform many background tasks, like
# closing connections of clients in timeout, purging expired keys that are
# never requested, and so forth.
#
# Not all tasks are performed with the same frequency, but Redis checks for
# Not all tasks are performed with the same frequency, but KeyDB checks for
# tasks to perform according to the specified "hz" value.
#
# By default "hz" is set to 10. Raising the value will use more CPU when
# Redis is idle, but at the same time will make Redis more responsive when
# KeyDB is idle, but at the same time will make KeyDB more responsive when
# there are many keys expiring at the same time, and timeouts may be
# handled with more precision.
#
@ -1402,7 +1402,7 @@ hz 10
# avoid too many clients are processed for each background task invocation
# in order to avoid latency spikes.
#
# Since the default HZ value by default is conservatively set to 10, Redis
# Since the default HZ value by default is conservatively set to 10, KeyDB
# offers, and enables by default, the ability to use an adaptive HZ value
# which will temporary raise when there are many connected clients.
#
@ -1419,22 +1419,22 @@ dynamic-hz yes
# big latency spikes.
aof-rewrite-incremental-fsync yes
# When redis saves RDB file, if the following option is enabled
# When KeyDB saves RDB file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
rdb-save-incremental-fsync yes
# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good
# KeyDB LFU eviction (see maxmemory setting) can be tuned. However it is a good
# idea to start with the default settings and only change them after investigating
# how to improve the performances and how the keys LFU change over time, which
# is possible to inspect via the OBJECT FREQ command.
#
# There are two tunable parameters in the Redis LFU implementation: the
# There are two tunable parameters in the KeyDB LFU implementation: the
# counter logarithm factor and the counter decay time. It is important to
# understand what the two parameters mean before changing them.
#
# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis
# The LFU counter is just 8 bits per key, it's maximum value is 255, so KeyDB
# uses a probabilistic increment with logarithmic behavior. Given the value
# of the old counter, when a key is accessed, the counter is incremented in
# this way:
@ -1486,7 +1486,7 @@ rdb-save-incremental-fsync yes
# What is active defragmentation?
# -------------------------------
#
# Active (online) defragmentation allows a Redis server to compact the
# Active (online) defragmentation allows a KeyDB server to compact the
# spaces left between small allocations and deallocations of data in memory,
# thus allowing to reclaim back memory.
#
@ -1498,7 +1498,7 @@ rdb-save-incremental-fsync yes
# in an "hot" way, while the server is running.
#
# Basically when the fragmentation is over a certain level (see the
# configuration options below) Redis will start to create new copies of the
# configuration options below) KeyDB will start to create new copies of the
# values in contiguous memory regions by exploiting certain specific Jemalloc
# features (in order to understand if an allocation is causing fragmentation
# and to allocate it in a better place), and at the same time, will release the
@ -1507,8 +1507,8 @@ rdb-save-incremental-fsync yes
#
# Important things to understand:
#
# 1. This feature is disabled by default, and only works if you compiled Redis
# to use the copy of Jemalloc we ship with the source code of Redis.
# 1. This feature is disabled by default, and only works if you compiled KeyDB
# to use the copy of Jemalloc we ship with the source code of KeyDB.
# This is the default with Linux builds.
#
# 2. You never need to enable this feature if you don't have fragmentation

View File

@ -295,14 +295,18 @@ $(REDIS_BENCHMARK_NAME): $(REDIS_BENCHMARK_OBJ)
dict-benchmark: dict.cpp zmalloc.cpp sds.c siphash.c
$(REDIS_CC) $(FINAL_CFLAGS) $^ -D DICT_BENCHMARK_MAIN -o $@ $(FINAL_LIBS)
DEP = $(REDIS_SERVER_OBJ:%.o=%.d) $(REDIS_CLI_OBJ:%.o=%.d) $(REDIS_BENCHMARK_OBJ:%.o=%.d)
-include $(DEP)
# Because the jemalloc.h header is generated as a part of the jemalloc build,
# building it should complete before building any other object. Instead of
# depending on a single artifact, build all dependencies first.
%.o: %.c .make-prerequisites
$(REDIS_CC) -c $<
$(REDIS_CC) -MMD -o $@ -c $<
%.o: %.cpp .make-prerequisites
$(REDIS_CXX) -o $@ -c $<
$(REDIS_CXX) -MMD -o $@ -c $<
%.o: %.asm .make-prerequisites
$(KEYDB_AS) $< -o $@
@ -310,6 +314,7 @@ dict-benchmark: dict.cpp zmalloc.cpp sds.c siphash.c
clean:
rm -rf $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) *.o *.gcda *.gcno *.gcov redis.info lcov-html Makefile.dep dict-benchmark
rm -rf storage/*.o
rm -f $(DEP)
.PHONY: clean

View File

@ -1123,7 +1123,7 @@ int ACLLoadConfiguredUsers(void) {
/* This function loads the ACL from the specified filename: every line
* is validated and should be either empty or in the format used to specify
* users in the redis.conf configuration or in the ACL file, that is:
* users in the keydb.conf configuration or in the ACL file, that is:
*
* user <username> ... rules ...
*
@ -1347,17 +1347,17 @@ cleanup:
/* This function is called once the server is already running, modules are
* loaded, and we are ready to start, in order to load the ACLs either from
* the pending list of users defined in redis.conf, or from the ACL file.
* the pending list of users defined in keydb.conf, or from the ACL file.
* The function will just exit with an error if the user is trying to mix
* both the loading methods. */
void ACLLoadUsersAtStartup(void) {
if (g_pserver->acl_filename[0] != '\0' && listLength(UsersToLoad) != 0) {
serverLog(LL_WARNING,
"Configuring Redis with users defined in redis.conf and at "
"Configuring KeyDB with users defined in keydb.conf and at "
"the same setting an ACL file path is invalid. This setup "
"is very likely to lead to configuration errors and security "
"holes, please define either an ACL file or declare users "
"directly in your redis.conf, but not both.");
"directly in your keydb.conf, but not both.");
exit(1);
}

View File

@ -1780,7 +1780,7 @@ void rewriteConfigRewriteLine(struct rewriteConfigState *state, const char *opti
}
/* Write the long long 'bytes' value as a string in a way that is parsable
* inside redis.conf. If possible uses the GB, MB, KB notation. */
* inside keydb.conf. If possible uses the GB, MB, KB notation. */
int rewriteConfigFormatMemory(char *buf, size_t len, long long bytes) {
int gb = 1024*1024*1024;
int mb = 1024*1024;
@ -1943,7 +1943,7 @@ void rewriteConfigDirOption(struct rewriteConfigState *state) {
void rewriteConfigSlaveofOption(struct rewriteConfigState *state, const char *option) {
/* If this is a master, we want all the slaveof config options
* in the file to be removed. Note that if this is a cluster instance
* we don't want a slaveof directive inside redis.conf. */
* we don't want a slaveof directive inside keydb.conf. */
if (g_pserver->cluster_enabled || listLength(g_pserver->masters) == 0) {
rewriteConfigMarkAsProcessed(state,option);
return;

View File

@ -99,9 +99,28 @@ void activeExpireCycleExpire(redisDb *db, expireEntry &e, long long now) {
}
}
break;
case OBJ_LIST:
case OBJ_ZSET:
case OBJ_HASH:
if (hashTypeDelete(val,(sds)pfat->nextExpireEntry().spsubkey.get())) {
deleted++;
if (hashTypeLength(val) == 0) {
activeExpireCycleExpireFullKey(db, e.key());
return;
}
}
break;
case OBJ_ZSET:
if (zsetDel(val,(sds)pfat->nextExpireEntry().spsubkey.get())) {
deleted++;
if (zsetLength(val) == 0) {
activeExpireCycleExpireFullKey(db, e.key());
return;
}
}
break;
case OBJ_LIST:
default:
serverAssert(false);
}
@ -160,6 +179,7 @@ void expireMemberCore(client *c, robj *key, robj *subkey, long long basetime, lo
return;
}
double dblT;
switch (val->type)
{
case OBJ_SET:
@ -169,6 +189,20 @@ void expireMemberCore(client *c, robj *key, robj *subkey, long long basetime, lo
}
break;
case OBJ_HASH:
if (!hashTypeExists(val, szFromObj(subkey))) {
addReply(c,shared.czero);
return;
}
break;
case OBJ_ZSET:
if (zsetScore(val, szFromObj(subkey), &dblT) == C_ERR) {
addReply(c,shared.czero);
return;
}
break;
default:
addReplyError(c, "object type is unsupported");
return;

View File

@ -4043,7 +4043,7 @@ int moduleGILAcquiredByModule(void) {
* used to send anything to the client, and has the db number where the event
* occurred as its selected db number.
*
* Notice that it is not necessary to enable notifications in redis.conf for
* Notice that it is not necessary to enable notifications in keydb.conf for
* module notifications to work.
*
* Warning: the notification callbacks are performed in a synchronous manner,

View File

@ -3055,7 +3055,7 @@ void replicationCron(void) {
if (mi->masterhost && mi->repl_state == REPL_STATE_TRANSFER &&
(time(NULL)-mi->repl_transfer_lastio) > g_pserver->repl_timeout)
{
serverLog(LL_WARNING,"Timeout receiving bulk data from MASTER... If the problem persists try to set the 'repl-timeout' parameter in redis.conf to a larger value.");
serverLog(LL_WARNING,"Timeout receiving bulk data from MASTER... If the problem persists try to set the 'repl-timeout' parameter in keydb.conf to a larger value.");
cancelReplicationHandshake(mi);
}
@ -3515,4 +3515,4 @@ static void propagateMasterStaleKeys()
}
decrRefCount(rgobj[0]);
}
}

View File

@ -2589,7 +2589,7 @@ void initServerConfig(void) {
/* Command table -- we initiialize it here as it is part of the
* initial configuration, since command names may be changed via
* redis.conf using the rename-command directive. */
* keydb.conf using the rename-command directive. */
g_pserver->commands = dictCreate(&commandTableDictType,NULL);
g_pserver->orig_commands = dictCreate(&commandTableDictType,NULL);
populateCommandTable();
@ -2625,7 +2625,7 @@ void initServerConfig(void) {
/* By default we want scripts to be always replicated by effects
* (single commands executed by the script), and not by sending the
* script to the replica / AOF. This is the new way starting from
* Redis 5. However it is possible to revert it via redis.conf. */
* Redis 5. However it is possible to revert it via keydb.conf. */
g_pserver->lua_always_replicate_commands = 1;
/* Multithreading */
@ -3239,7 +3239,7 @@ void populateCommandTable(void) {
c->id = ACLGetCommandID(c->name); /* Assign the ID used for ACL. */
retval1 = dictAdd(g_pserver->commands, sdsnew(c->name), c);
/* Populate an additional dictionary that will be unaffected
* by rename-command statements in redis.conf. */
* by rename-command statements in keydb.conf. */
retval2 = dictAdd(g_pserver->orig_commands, sdsnew(c->name), c);
serverAssert(retval1 == DICT_OK && retval2 == DICT_OK);
}
@ -3314,7 +3314,7 @@ struct redisCommand *lookupCommandByCString(const char *s) {
/* Lookup the command in the current table, if not found also check in
* the original table containing the original command names unaffected by
* redis.conf rename-command statement.
* keydb.conf rename-command statement.
*
* This is used by functions rewriting the argument vector such as
* rewriteClientCommandVector() in order to set client->cmd pointer
@ -4808,7 +4808,7 @@ void version(void) {
}
void usage(void) {
fprintf(stderr,"Usage: ./keydb-server [/path/to/redis.conf] [options]\n");
fprintf(stderr,"Usage: ./keydb-server [/path/to/keydb.conf] [options]\n");
fprintf(stderr," ./keydb-server - (read config from stdin)\n");
fprintf(stderr," ./keydb-server -v or --version\n");
fprintf(stderr," ./keydb-server -h or --help\n");
@ -4818,7 +4818,7 @@ void usage(void) {
fprintf(stderr," ./keydb-server /etc/redis/6379.conf\n");
fprintf(stderr," ./keydb-server --port 7777\n");
fprintf(stderr," ./keydb-server --port 7777 --replicaof 127.0.0.1 8888\n");
fprintf(stderr," ./keydb-server /etc/myredis.conf --loglevel verbose\n\n");
fprintf(stderr," ./keydb-server /etc/mykeydb.conf --loglevel verbose\n\n");
fprintf(stderr,"Sentinel mode:\n");
fprintf(stderr," ./keydb-server /etc/sentinel.conf --sentinel\n");
exit(1);
@ -4835,7 +4835,7 @@ void redisAsciiArt(void) {
/* Show the ASCII logo if: log file is stdout AND stdout is a
* tty AND syslog logging is disabled. Also show logo if the user
* forced us to do so via redis.conf. */
* forced us to do so via keydb.conf. */
int show_logo = ((!g_pserver->syslog_enabled &&
g_pserver->logfile[0] == '\0' &&
isatty(fileno(stdout))) ||
@ -5171,6 +5171,21 @@ void *workerThreadMain(void *parg)
return NULL;
}
static void validateConfiguration()
{
if (cserver.cthreads > (int)std::thread::hardware_concurrency()) {
serverLog(LL_WARNING, "WARNING: server-threads is greater than this machine's core count. Truncating to %u threads", std::thread::hardware_concurrency());
cserver.cthreads = (int)std::thread::hardware_concurrency();
cserver.cthreads = std::max(cserver.cthreads, 1); // in case of any weird sign overflows
}
if (g_pserver->enable_multimaster && !g_pserver->fActiveReplica) {
serverLog(LL_WARNING, "ERROR: Multi Master requires active replication to be enabled.");
serverLog(LL_WARNING, "\tKeyDB will now exit. Please update your configuration file.");
exit(EXIT_FAILURE);
}
}
int main(int argc, char **argv) {
struct timeval tv;
int j;
@ -5337,11 +5352,7 @@ int main(int argc, char **argv) {
serverLog(LL_WARNING, "Configuration loaded");
}
if (cserver.cthreads > (int)std::thread::hardware_concurrency()) {
serverLog(LL_WARNING, "WARNING: server-threads is greater than this machine's core count. Truncating to %u threads", std::thread::hardware_concurrency());
cserver.cthreads = (int)std::thread::hardware_concurrency();
cserver.cthreads = std::max(cserver.cthreads, 1); // in case of any weird sign overflows
}
validateConfiguration();
cserver.supervised = redisIsSupervised(cserver.supervised_mode);
int background = cserver.daemonize && !cserver.supervised;

View File

@ -325,7 +325,7 @@ public:
#define CONFIG_DEFAULT_REPL_BACKLOG_TIME_LIMIT (60*60) /* 1 hour */
#define CONFIG_REPL_BACKLOG_MIN_SIZE (1024*16) /* 16k */
#define CONFIG_BGSAVE_RETRY_DELAY 5 /* Wait a few secs before trying again. */
#define CONFIG_DEFAULT_PID_FILE "/var/run/redis.pid"
#define CONFIG_DEFAULT_PID_FILE "/var/run/keydb.pid"
#define CONFIG_DEFAULT_SYSLOG_IDENT "redis"
#define CONFIG_DEFAULT_CLUSTER_CONFIG_FILE "nodes.conf"
#define CONFIG_DEFAULT_CLUSTER_ANNOUNCE_IP NULL /* Auto detect. */
@ -1937,7 +1937,7 @@ struct redisServerConst {
/* Configuration */
char *default_masteruser; /* AUTH with this user and masterauth with master */
char *default_masterauth; /* AUTH with this password with master */
int verbosity; /* Loglevel in redis.conf */
int verbosity; /* Loglevel in keydb.conf */
int maxidletime; /* Client timeout in seconds */
int tcpkeepalive; /* Set SO_KEEPALIVE if non-zero. */
int active_defrag_enabled;
@ -2198,7 +2198,7 @@ struct redisServer {
int sort_alpha;
int sort_bypattern;
int sort_store;
/* Zip structure config, see redis.conf for more information */
/* Zip structure config, see keydb.conf for more information */
size_t hash_max_ziplist_entries;
size_t hash_max_ziplist_value;
size_t set_max_intset_entries;

View File

@ -156,7 +156,7 @@ mkdir -p "$REDIS_DATA_DIR" || die "Could not create redis data directory"
#render the templates
TMP_FILE="/tmp/${REDIS_PORT}.conf"
DEFAULT_CONFIG="${SCRIPTPATH}/../redis.conf"
DEFAULT_CONFIG="${SCRIPTPATH}/../keydb.conf"
INIT_TPL_FILE="${SCRIPTPATH}/redis_init_script.tpl"
INIT_SCRIPT_DEST="/etc/init.d/redis_${REDIS_PORT}"
PIDFILE="/var/run/redis_${REDIS_PORT}.pid"