Make active client balancing a configurable option
This commit is contained in:
parent
c910471914
commit
95c0146ec9
@ -2070,3 +2070,12 @@ server-threads 2
|
|||||||
#
|
#
|
||||||
# By default KeyDB sets this to 2.
|
# By default KeyDB sets this to 2.
|
||||||
replica-weighting-factor 2
|
replica-weighting-factor 2
|
||||||
|
|
||||||
|
# Should KeyDB make active attempts at balancing clients across threads? This can impact
|
||||||
|
# performance accepting new clients. By default this is enabled. If disabled there is still
|
||||||
|
# a best effort from the kernel to distribute across threads with SO_REUSEPORT but it will not
|
||||||
|
# be as fair.
|
||||||
|
#
|
||||||
|
# By default this is enabled
|
||||||
|
#
|
||||||
|
active-client-balancing yes
|
@ -2779,6 +2779,7 @@ standardConfig configs[] = {
|
|||||||
createBoolConfig("replica-announced", NULL, MODIFIABLE_CONFIG, g_pserver->replica_announced, 1, NULL, NULL),
|
createBoolConfig("replica-announced", NULL, MODIFIABLE_CONFIG, g_pserver->replica_announced, 1, NULL, NULL),
|
||||||
createBoolConfig("enable-async-commands", NULL, MODIFIABLE_CONFIG, g_pserver->enable_async_commands, 1, NULL, NULL),
|
createBoolConfig("enable-async-commands", NULL, MODIFIABLE_CONFIG, g_pserver->enable_async_commands, 1, NULL, NULL),
|
||||||
createBoolConfig("multithread-load-enabled", NULL, MODIFIABLE_CONFIG, g_pserver->multithread_load_enabled, 0, NULL, NULL),
|
createBoolConfig("multithread-load-enabled", NULL, MODIFIABLE_CONFIG, g_pserver->multithread_load_enabled, 0, NULL, NULL),
|
||||||
|
createBoolConfig("active-client-balancing", NULL, MODIFIABLE_CONFIG, g_pserver->active_client_balancing, 1, NULL, NULL),
|
||||||
|
|
||||||
/* String Configs */
|
/* String Configs */
|
||||||
createStringConfig("aclfile", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, g_pserver->acl_filename, "", NULL, NULL),
|
createStringConfig("aclfile", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, g_pserver->acl_filename, "", NULL, NULL),
|
||||||
|
@ -1319,7 +1319,7 @@ void acceptOnThread(connection *conn, int flags, char *cip)
|
|||||||
int ielCur = ielFromEventLoop(serverTL->el);
|
int ielCur = ielFromEventLoop(serverTL->el);
|
||||||
bool fBootLoad = (g_pserver->loading == LOADING_BOOT);
|
bool fBootLoad = (g_pserver->loading == LOADING_BOOT);
|
||||||
|
|
||||||
int ielTarget = 0;
|
int ielTarget = ielCur;
|
||||||
if (fBootLoad)
|
if (fBootLoad)
|
||||||
{
|
{
|
||||||
ielTarget = IDX_EVENT_LOOP_MAIN; // During load only the main thread is active
|
ielTarget = IDX_EVENT_LOOP_MAIN; // During load only the main thread is active
|
||||||
@ -1330,7 +1330,7 @@ void acceptOnThread(connection *conn, int flags, char *cip)
|
|||||||
while (cserver.cthreads > 1 && ielTarget == IDX_EVENT_LOOP_MAIN)
|
while (cserver.cthreads > 1 && ielTarget == IDX_EVENT_LOOP_MAIN)
|
||||||
ielTarget = rand() % cserver.cthreads;
|
ielTarget = rand() % cserver.cthreads;
|
||||||
}
|
}
|
||||||
else
|
else if (g_pserver->active_client_balancing)
|
||||||
{
|
{
|
||||||
// Cluster connections are more transient, so its not worth the cost to balance
|
// Cluster connections are more transient, so its not worth the cost to balance
|
||||||
// we can trust that SO_REUSEPORT is doing its job of distributing connections
|
// we can trust that SO_REUSEPORT is doing its job of distributing connections
|
||||||
|
@ -2635,6 +2635,7 @@ struct redisServer {
|
|||||||
|
|
||||||
int enable_async_commands;
|
int enable_async_commands;
|
||||||
int multithread_load_enabled = 0;
|
int multithread_load_enabled = 0;
|
||||||
|
int active_client_balancing = 1;
|
||||||
|
|
||||||
long long repl_batch_offStart = -1;
|
long long repl_batch_offStart = -1;
|
||||||
long long repl_batch_idxStart = -1;
|
long long repl_batch_idxStart = -1;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user