strictly honour memory limit
This commit is contained in:
parent
779fa2af7a
commit
418d5eaf50
@ -190,7 +190,7 @@ int dsSet(redisDb *db, robj *key, robj *val) {
|
|||||||
|
|
||||||
len = dsKeyToPath(db,buf,key);
|
len = dsKeyToPath(db,buf,key);
|
||||||
memcpy(buf2,buf,len);
|
memcpy(buf2,buf,len);
|
||||||
snprintf(buf2+len,sizeof(buf2)-len,"%ld.%ld",(long)time(NULL),(long)val);
|
snprintf(buf2+len,sizeof(buf2)-len,"_%ld_%ld",(long)time(NULL),(long)val);
|
||||||
fp = fopen(buf2,"w");
|
fp = fopen(buf2,"w");
|
||||||
if ((retval = rdbSaveKeyValuePair(fp,db,key,val,time(NULL))) == -1)
|
if ((retval = rdbSaveKeyValuePair(fp,db,key,val,time(NULL))) == -1)
|
||||||
return REDIS_ERR;
|
return REDIS_ERR;
|
||||||
|
@ -107,6 +107,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
void spawnIOThread(void);
|
void spawnIOThread(void);
|
||||||
|
int cacheScheduleIOPushJobs(int onlyloads);
|
||||||
|
|
||||||
/* =================== Virtual Memory - Blocking Side ====================== */
|
/* =================== Virtual Memory - Blocking Side ====================== */
|
||||||
|
|
||||||
@ -210,10 +211,17 @@ int cacheFreeOneEntry(void) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (best == NULL) {
|
if (best == NULL) {
|
||||||
/* FIXME: If there are objects that are in the write queue
|
/* Was not able to fix a single object... we should check if our
|
||||||
* so we can't delete them we should block here, at the cost of
|
* IO queues have stuff in queue, and try to consume the queue
|
||||||
* slowness as the object cache memory limit is considered
|
* otherwise we'll use an infinite amount of memory if changes to
|
||||||
* n hard limit. */
|
* the dataset are faster than I/O */
|
||||||
|
if (listLength(server.cache_io_queue) > 0) {
|
||||||
|
cacheScheduleIOPushJobs(0);
|
||||||
|
waitEmptyIOJobsQueue();
|
||||||
|
processAllPendingIOJobs();
|
||||||
|
return REDIS_OK;
|
||||||
|
}
|
||||||
|
/* Nothing to free at all... */
|
||||||
return REDIS_ERR;
|
return REDIS_ERR;
|
||||||
}
|
}
|
||||||
key = dictGetEntryKey(best);
|
key = dictGetEntryKey(best);
|
||||||
@ -597,8 +605,6 @@ void dsCreateIOJob(int type, redisDb *db, robj *key, robj *val) {
|
|||||||
#define REDIS_IO_LOADINPROG 4
|
#define REDIS_IO_LOADINPROG 4
|
||||||
#define REDIS_IO_SAVEINPROG 8
|
#define REDIS_IO_SAVEINPROG 8
|
||||||
|
|
||||||
void cacheScheduleIOPushJobs(int onlyloads);
|
|
||||||
|
|
||||||
void cacheScheduleIOAddFlag(redisDb *db, robj *key, long flag) {
|
void cacheScheduleIOAddFlag(redisDb *db, robj *key, long flag) {
|
||||||
struct dictEntry *de = dictFind(db->io_queued,key);
|
struct dictEntry *de = dictFind(db->io_queued,key);
|
||||||
|
|
||||||
@ -675,10 +681,10 @@ void cacheScheduleIO(redisDb *db, robj *key, int type) {
|
|||||||
* (not protected by lookupKey() that will block on keys in IO_SAVEINPROG
|
* (not protected by lookupKey() that will block on keys in IO_SAVEINPROG
|
||||||
* state. */
|
* state. */
|
||||||
#define MAX_IO_JOBS_QUEUE 100
|
#define MAX_IO_JOBS_QUEUE 100
|
||||||
void cacheScheduleIOPushJobs(int onlyloads) {
|
int cacheScheduleIOPushJobs(int onlyloads) {
|
||||||
time_t now = time(NULL);
|
time_t now = time(NULL);
|
||||||
listNode *ln;
|
listNode *ln;
|
||||||
int jobs, topush = 0;
|
int jobs, topush = 0, pushed = 0;
|
||||||
|
|
||||||
/* Sync stuff on disk, but only if we have less
|
/* Sync stuff on disk, but only if we have less
|
||||||
* than MAX_IO_JOBS_QUEUE IO jobs. */
|
* than MAX_IO_JOBS_QUEUE IO jobs. */
|
||||||
@ -749,10 +755,12 @@ void cacheScheduleIOPushJobs(int onlyloads) {
|
|||||||
listDelNode(server.cache_io_queue,ln);
|
listDelNode(server.cache_io_queue,ln);
|
||||||
decrRefCount(op->key);
|
decrRefCount(op->key);
|
||||||
zfree(op);
|
zfree(op);
|
||||||
|
pushed++;
|
||||||
} else {
|
} else {
|
||||||
break; /* too early */
|
break; /* too early */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return pushed;
|
||||||
}
|
}
|
||||||
|
|
||||||
void cacheCron(void) {
|
void cacheCron(void) {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user