Merge branch 'unstable' into keydbpro
Former-commit-id: 4fd95b064648ee3d02097a1c2dabf306978b44f9
This commit is contained in:
commit
702bfaee9b
26
TLS.md
26
TLS.md
@ -13,13 +13,13 @@ Run `make BUILD_TLS=yes`.
|
||||
|
||||
### Tests
|
||||
|
||||
To run Redis test suite with TLS, you'll need TLS support for TCL (i.e.
|
||||
To run KeyDB test suite with TLS, you'll need TLS support for TCL (i.e.
|
||||
`tcl-tls` package on Debian/Ubuntu).
|
||||
|
||||
1. Run `./utils/gen-test-certs.sh` to generate a root CA and a server
|
||||
certificate.
|
||||
|
||||
2. Run `./runtest --tls` or `./runtest-cluster --tls` to run Redis and Redis
|
||||
2. Run `./runtest --tls` or `./runtest-cluster --tls` to run KeyDB and KeyDB
|
||||
Cluster tests in TLS mode.
|
||||
|
||||
### Running manually
|
||||
@ -27,23 +27,23 @@ To run Redis test suite with TLS, you'll need TLS support for TCL (i.e.
|
||||
To manually run a Redis server with TLS mode (assuming `gen-test-certs.sh` was
|
||||
invoked so sample certificates/keys are available):
|
||||
|
||||
./src/redis-server --tls-port 6379 --port 0 \
|
||||
--tls-cert-file ./tests/tls/redis.crt \
|
||||
--tls-key-file ./tests/tls/redis.key \
|
||||
./src/keydb-server --tls-port 6379 --port 0 \
|
||||
--tls-cert-file ./tests/tls/keydb.crt \
|
||||
--tls-key-file ./tests/tls/keydb.key \
|
||||
--tls-ca-cert-file ./tests/tls/ca.crt
|
||||
|
||||
To connect to this Redis server with `redis-cli`:
|
||||
To connect to this Redis server with `keydb-cli`:
|
||||
|
||||
./src/redis-cli --tls \
|
||||
--cert ./tests/tls/redis.crt \
|
||||
--key ./tests/tls/redis.key \
|
||||
./src/keydb-cli --tls \
|
||||
--cert ./tests/tls/keydb.crt \
|
||||
--key ./tests/tls/keydb.key \
|
||||
--cacert ./tests/tls/ca.crt
|
||||
|
||||
This will disable TCP and enable TLS on port 6379. It's also possible to have
|
||||
both TCP and TLS available, but you'll need to assign different ports.
|
||||
|
||||
To make a Replica connect to the master using TLS, use `--tls-replication yes`,
|
||||
and to make Redis Cluster use TLS across nodes use `--tls-cluster yes`.
|
||||
and to make KeyDB Cluster use TLS across nodes use `--tls-cluster yes`.
|
||||
|
||||
Connections
|
||||
-----------
|
||||
@ -56,18 +56,18 @@ Note that unlike Redis, KeyDB fully supports multithreading of TLS connections.
|
||||
To-Do List
|
||||
----------
|
||||
|
||||
- [ ] redis-benchmark support. The current implementation is a mix of using
|
||||
- [ ] keydb-benchmark support. The current implementation is a mix of using
|
||||
hiredis for parsing and basic networking (establishing connections), but
|
||||
directly manipulating sockets for most actions. This will need to be cleaned
|
||||
up for proper TLS support. The best approach is probably to migrate to hiredis
|
||||
async mode.
|
||||
- [ ] redis-cli `--slave` and `--rdb` support.
|
||||
- [ ] keydb-cli `--slave` and `--rdb` support.
|
||||
|
||||
Multi-port
|
||||
----------
|
||||
|
||||
Consider the implications of allowing TLS to be configured on a separate port,
|
||||
making Redis listening on multiple ports:
|
||||
making KeyDB listening on multiple ports:
|
||||
|
||||
1. Startup banner port notification
|
||||
2. Proctitle
|
||||
|
@ -31,7 +31,6 @@ $TCLSH tests/test_helper.tcl \
|
||||
--single unit/moduleapi/blockedclient \
|
||||
--single unit/moduleapi/moduleloadsave \
|
||||
--single unit/moduleapi/getkeys \
|
||||
--single unit/moduleapi/timers \
|
||||
--single unit/moduleapi/test_lazyfree \
|
||||
--single unit/moduleapi/defrag \
|
||||
--single unit/moduleapi/hash \
|
||||
|
@ -829,3 +829,11 @@ int aeLockContention()
|
||||
avail += 0x10000;
|
||||
return avail - active;
|
||||
}
|
||||
|
||||
void aeClosePipesForForkChild(aeEventLoop *el)
|
||||
{
|
||||
close(el->fdCmdRead);
|
||||
el->fdCmdRead = -1;
|
||||
close(el->fdCmdWrite);
|
||||
el->fdCmdWrite = -1;
|
||||
}
|
||||
|
1
src/ae.h
1
src/ae.h
@ -161,6 +161,7 @@ int aeGetSetSize(aeEventLoop *eventLoop);
|
||||
aeEventLoop *aeGetCurrentEventLoop();
|
||||
int aeResizeSetSize(aeEventLoop *eventLoop, int setsize);
|
||||
void aeSetDontWait(aeEventLoop *eventLoop, int noWait);
|
||||
void aeClosePipesForForkChild(aeEventLoop *eventLoop);
|
||||
|
||||
void setAeLockSetThreadSpinWorker(spin_worker worker);
|
||||
void aeAcquireLock();
|
||||
|
@ -688,6 +688,7 @@ void freeClusterLink(clusterLink *link) {
|
||||
}
|
||||
|
||||
static void clusterConnAcceptHandler(connection *conn) {
|
||||
serverAssert(ielFromEventLoop(serverTL->el) == IDX_EVENT_LOOP_MAIN);
|
||||
clusterLink *link;
|
||||
|
||||
if (connGetState(conn) != CONN_STATE_CONNECTED) {
|
||||
@ -1791,6 +1792,8 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc
|
||||
* processing lead to some inconsistency error (for instance a PONG
|
||||
* received from the wrong sender ID). */
|
||||
int clusterProcessPacket(clusterLink *link) {
|
||||
serverAssert(ielFromEventLoop(serverTL->el) == IDX_EVENT_LOOP_MAIN);
|
||||
|
||||
clusterMsg *hdr = (clusterMsg*) link->rcvbuf;
|
||||
uint32_t totlen = ntohl(hdr->totlen);
|
||||
uint16_t type = ntohs(hdr->type);
|
||||
@ -3546,6 +3549,7 @@ void clusterHandleManualFailover(void) {
|
||||
|
||||
/* This is executed 10 times every second */
|
||||
void clusterCron(void) {
|
||||
serverAssert(ielFromEventLoop(serverTL->el) == IDX_EVENT_LOOP_MAIN);
|
||||
dictIterator *di;
|
||||
dictEntry *de;
|
||||
int update_state = 0;
|
||||
|
@ -3081,7 +3081,11 @@ static void backgroundSaveDoneHandlerSocket(int exitcode, bool fCancelled) {
|
||||
}
|
||||
if (g_pserver->rdb_child_exit_pipe!=-1)
|
||||
close(g_pserver->rdb_child_exit_pipe);
|
||||
close(g_pserver->rdb_pipe_read);
|
||||
auto pipeT = g_pserver->rdb_pipe_read;
|
||||
aePostFunction(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el, [pipeT]{
|
||||
aeDeleteFileEvent(serverTL->el, pipeT, AE_READABLE);
|
||||
close(pipeT);
|
||||
});
|
||||
g_pserver->rdb_child_exit_pipe = -1;
|
||||
g_pserver->rdb_pipe_read = -1;
|
||||
zfree(g_pserver->rdb_pipe_conns);
|
||||
|
@ -1985,6 +1985,7 @@ int main(int argc, const char **argv) {
|
||||
|
||||
if (!config.csv) printf("\n");
|
||||
} while(config.loop);
|
||||
zfree(data);
|
||||
|
||||
if (config.redis_config != NULL) freeRedisConfig(config.redis_config);
|
||||
|
||||
|
@ -2020,6 +2020,10 @@ void readSyncBulkPayload(connection *conn) {
|
||||
// Should we update our database, or create from scratch?
|
||||
int fUpdate = g_pserver->fActiveReplica || g_pserver->enable_multimaster;
|
||||
redisMaster *mi = (redisMaster*)connGetPrivateData(conn);
|
||||
if (mi == nullptr) {
|
||||
// We're about to be free'd so bail out
|
||||
return;
|
||||
}
|
||||
|
||||
serverAssert(GlobalLocksAcquired());
|
||||
serverAssert(mi->master == nullptr);
|
||||
|
@ -878,6 +878,10 @@ void sentinelRunPendingScripts(void) {
|
||||
} else if (pid == 0) {
|
||||
/* Child */
|
||||
tlsCleanup();
|
||||
for (int iel = 0; iel < cserver.cthreads; ++iel) {
|
||||
aeClosePipesForForkChild(g_pserver->rgthreadvar[iel].el);
|
||||
}
|
||||
aeClosePipesForForkChild(g_pserver->modulethreadvar.el);
|
||||
execve(sj->argv[0],sj->argv,environ);
|
||||
/* If we are here an error occurred. */
|
||||
_exit(2); /* Don't retry execution. */
|
||||
|
@ -6623,6 +6623,11 @@ void closeChildUnusedResourceAfterFork() {
|
||||
if (g_pserver->cluster_enabled && g_pserver->cluster_config_file_lock_fd != -1)
|
||||
close(g_pserver->cluster_config_file_lock_fd); /* don't care if this fails */
|
||||
|
||||
for (int iel = 0; iel < cserver.cthreads; ++iel) {
|
||||
aeClosePipesForForkChild(g_pserver->rgthreadvar[iel].el);
|
||||
}
|
||||
aeClosePipesForForkChild(g_pserver->modulethreadvar.el);
|
||||
|
||||
/* Clear cserver.pidfile, this is the parent pidfile which should not
|
||||
* be touched (or deleted) by the child (on exit / crash) */
|
||||
zfree(cserver.pidfile);
|
||||
|
@ -36,7 +36,7 @@ test "Set allow-replica-migration no" {
|
||||
set master0_id [dict get [get_myself 0] id]
|
||||
test "Resharding all the master #0 slots away from it" {
|
||||
set output [exec \
|
||||
../../../src/redis-cli --cluster rebalance \
|
||||
../../../src/keydb-cli --cluster rebalance \
|
||||
127.0.0.1:[get_instance_attrib redis 0 port] \
|
||||
{*}[rediscli_tls_config "../../../tests"] \
|
||||
--cluster-weight ${master0_id}=0 >@ stdout ]
|
||||
@ -44,7 +44,7 @@ test "Resharding all the master #0 slots away from it" {
|
||||
|
||||
test "Wait cluster to be stable" {
|
||||
wait_for_condition 1000 50 {
|
||||
[catch {exec ../../../src/redis-cli --cluster \
|
||||
[catch {exec ../../../src/keydb-cli --cluster \
|
||||
check 127.0.0.1:[get_instance_attrib redis 0 port] \
|
||||
{*}[rediscli_tls_config "../../../tests"] \
|
||||
}] == 0
|
||||
|
@ -8,18 +8,18 @@ proc config_set_all_nodes {keyword value} {
|
||||
|
||||
proc fix_cluster {addr} {
|
||||
set code [catch {
|
||||
exec ../../../src/redis-cli {*}[rediscli_tls_config "../../../tests"] --cluster fix $addr << yes
|
||||
exec ../../../src/keydb-cli {*}[rediscli_tls_config "../../../tests"] --cluster fix $addr << yes
|
||||
} result]
|
||||
if {$code != 0} {
|
||||
puts "redis-cli --cluster fix returns non-zero exit code, output below:\n$result"
|
||||
puts "keydb-cli --cluster fix returns non-zero exit code, output below:\n$result"
|
||||
}
|
||||
# Note: redis-cli --cluster fix may return a non-zero exit code if nodes don't agree,
|
||||
# Note: keydb-cli --cluster fix may return a non-zero exit code if nodes don't agree,
|
||||
# but we can ignore that and rely on the check below.
|
||||
assert_cluster_state ok
|
||||
wait_for_condition 100 100 {
|
||||
[catch {exec ../../../src/redis-cli {*}[rediscli_tls_config "../../../tests"] --cluster check $addr} result] == 0
|
||||
[catch {exec ../../../src/keydb-cli {*}[rediscli_tls_config "../../../tests"] --cluster check $addr} result] == 0
|
||||
} else {
|
||||
puts "redis-cli --cluster check returns non-zero exit code, output below:\n$result"
|
||||
puts "keydb-cli --cluster check returns non-zero exit code, output below:\n$result"
|
||||
fail "Cluster could not settle with configuration"
|
||||
}
|
||||
}
|
||||
|
@ -1,35 +0,0 @@
|
||||
set testmodule [file normalize tests/modules/timers.so]
|
||||
set timercount 4000
|
||||
|
||||
|
||||
tags "modules" {
|
||||
test {Ascending module timers can load in correctly} {
|
||||
start_server [list overrides [list loadmodule "$testmodule ascending $timercount"]] {
|
||||
wait_for_condition [expr round($timercount/10)] 20 {
|
||||
[r timer.elapsed] == $timercount
|
||||
} else {
|
||||
fail "Server failed to load in timers with ascending periods"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test {Descending module timers can load in correctly} {
|
||||
start_server [list overrides [list loadmodule "$testmodule descending $timercount"]] {
|
||||
wait_for_condition [expr round($timercount/10)] 20 {
|
||||
[r timer.elapsed] == $timercount
|
||||
} else {
|
||||
fail "Server failed to load in timers with descending periods"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test {Module timers with the same period can load in correctly} {
|
||||
start_server [list overrides [list loadmodule "$testmodule same $timercount"]] {
|
||||
wait_for_condition [expr round($timercount/10)] 20 {
|
||||
[r timer.elapsed] == $timercount
|
||||
} else {
|
||||
fail "Server failed to load in timers with the same period"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user