don't overload the IO job queue if there are alrady too much entries

This commit is contained in:
antirez 2010-12-31 16:10:09 +01:00
parent e70f346651
commit 1c6a04f6b8
2 changed files with 16 additions and 1 deletions

View File

@ -239,6 +239,8 @@ robj *dsGet(redisDb *db, robj *key, time_t *expire) {
return val; return val;
readerr: readerr:
redisLog(REDIS_WARNING,"Read error reading reading %s. Corrupted key?",
buf);
redisPanic("Unrecoverable error reading from disk store"); redisPanic("Unrecoverable error reading from disk store");
return NULL; /* unreached */ return NULL; /* unreached */
} }

View File

@ -362,6 +362,8 @@ void *IOThreadEntryPoint(void *arg) {
pthread_cond_wait(&server.io_condvar,&server.io_mutex); pthread_cond_wait(&server.io_condvar,&server.io_mutex);
continue; continue;
} }
redisLog(REDIS_DEBUG,"%ld IO jobs to process",
listLength(server.io_newjobs));
ln = listFirst(server.io_newjobs); ln = listFirst(server.io_newjobs);
j = ln->value; j = ln->value;
listDelNode(server.io_newjobs,ln); listDelNode(server.io_newjobs,ln);
@ -530,11 +532,22 @@ void cacheScheduleForFlush(redisDb *db, robj *key) {
void cacheCron(void) { void cacheCron(void) {
time_t now = time(NULL); time_t now = time(NULL);
listNode *ln; listNode *ln;
int jobs, topush = 0;
/* Sync stuff on disk, but only if we have less than 100 IO jobs */
lockThreadedIO();
jobs = listLength(server.io_newjobs);
unlockThreadedIO();
topush = 100-jobs;
if (topush < 0) topush = 0;
/* Sync stuff on disk */
while((ln = listFirst(server.cache_flush_queue)) != NULL) { while((ln = listFirst(server.cache_flush_queue)) != NULL) {
dirtykey *dk = ln->value; dirtykey *dk = ln->value;
if (!topush) break;
topush--;
if ((now - dk->ctime) >= server.cache_flush_delay) { if ((now - dk->ctime) >= server.cache_flush_delay) {
struct dictEntry *de; struct dictEntry *de;
robj *val; robj *val;