don't overload the IO job queue if there are alrady too much entries
This commit is contained in:
parent
e70f346651
commit
1c6a04f6b8
@ -239,6 +239,8 @@ robj *dsGet(redisDb *db, robj *key, time_t *expire) {
|
||||
return val;
|
||||
|
||||
readerr:
|
||||
redisLog(REDIS_WARNING,"Read error reading reading %s. Corrupted key?",
|
||||
buf);
|
||||
redisPanic("Unrecoverable error reading from disk store");
|
||||
return NULL; /* unreached */
|
||||
}
|
||||
|
@ -362,6 +362,8 @@ void *IOThreadEntryPoint(void *arg) {
|
||||
pthread_cond_wait(&server.io_condvar,&server.io_mutex);
|
||||
continue;
|
||||
}
|
||||
redisLog(REDIS_DEBUG,"%ld IO jobs to process",
|
||||
listLength(server.io_newjobs));
|
||||
ln = listFirst(server.io_newjobs);
|
||||
j = ln->value;
|
||||
listDelNode(server.io_newjobs,ln);
|
||||
@ -530,11 +532,22 @@ void cacheScheduleForFlush(redisDb *db, robj *key) {
|
||||
void cacheCron(void) {
|
||||
time_t now = time(NULL);
|
||||
listNode *ln;
|
||||
int jobs, topush = 0;
|
||||
|
||||
/* Sync stuff on disk, but only if we have less than 100 IO jobs */
|
||||
lockThreadedIO();
|
||||
jobs = listLength(server.io_newjobs);
|
||||
unlockThreadedIO();
|
||||
|
||||
topush = 100-jobs;
|
||||
if (topush < 0) topush = 0;
|
||||
|
||||
/* Sync stuff on disk */
|
||||
while((ln = listFirst(server.cache_flush_queue)) != NULL) {
|
||||
dirtykey *dk = ln->value;
|
||||
|
||||
if (!topush) break;
|
||||
topush--;
|
||||
|
||||
if ((now - dk->ctime) >= server.cache_flush_delay) {
|
||||
struct dictEntry *de;
|
||||
robj *val;
|
||||
|
Loading…
x
Reference in New Issue
Block a user