MEMORY OVERHEAD refactored into a generic API.

This commit is contained in:
antirez 2016-09-15 09:37:55 +02:00
parent 09a50d34a2
commit be5439bde3

View File

@ -853,41 +853,49 @@ void objectCommand(client *c) {
}
}
/* The memory command will eventually be a complete interface for the
* memory introspection capabilities of Redis.
*
* Usage: MEMORY usage <key> */
void memoryCommand(client *c) {
robj *o;
/* This structure is returned by the getMemoryOverheadData() function in
* order to return memory overhead information. */
struct memoh {
size_t total_allocated;
size_t startup_allocated;
size_t repl_backlog;
size_t clients_slaves;
size_t clients_normal;
size_t aof_buffer;
size_t overhead_total;
size_t dataset;
size_t num_dbs;
struct {
size_t dbid;
size_t overhead_ht_main;
size_t overhead_ht_expires;
} *db;
};
if (!strcasecmp(c->argv[1]->ptr,"usage") && c->argc == 3) {
if ((o = objectCommandLookupOrReply(c,c->argv[2],shared.nullbulk))
== NULL) return;
size_t usage = objectComputeSize(o,OBJ_COMPUTE_SIZE_DEF_SAMPLES);
usage += sdsAllocSize(c->argv[1]->ptr);
usage += sizeof(dictEntry);
addReplyLongLong(c,usage);
} else if (!strcasecmp(c->argv[1]->ptr,"overhead") && c->argc == 2) {
/* Release data obtained with getMemoryOverheadData(). */
void freeMemoryOverheadData(struct memoh *mh) {
zfree(mh->db);
zfree(mh);
}
/* Return a struct memoh filled with memory overhead information used
* for the MEMORY OVERHEAD and INFO command. The returned structure
* pointer should be freed calling freeMemoryOverheadData(). */
struct memoh *getMemoryOverheadData(void) {
int j;
size_t mem_total = 0;
size_t mem = 0;
size_t zmalloc_used = zmalloc_used_memory();
struct memoh *mh = zcalloc(sizeof(*mh));
int toplevel_keys = 8;
void *tlk = addDeferredMultiBulkLength(c);
addReplyBulkCString(c,"total.allocated");
addReplyLongLong(c,zmalloc_used);
addReplyBulkCString(c,"startup.allocated");
addReplyLongLong(c,server.initial_memory_usage);
mh->total_allocated = zmalloc_used;
mh->startup_allocated = server.initial_memory_usage;
mem_total += server.initial_memory_usage;
mem = 0;
if (server.repl_backlog)
mem += zmalloc_size(server.repl_backlog);
addReplyBulkCString(c,"replication.backlog");
addReplyLongLong(c,mem);
mh->repl_backlog = mem;
mem_total += mem;
mem = 0;
@ -903,8 +911,7 @@ void memoryCommand(client *c) {
mem += sizeof(client);
}
}
addReplyBulkCString(c,"clients.slaves");
addReplyLongLong(c,mem);
mh->clients_slaves = mem;
mem_total+=mem;
mem = 0;
@ -922,8 +929,7 @@ void memoryCommand(client *c) {
mem += sizeof(client);
}
}
addReplyBulkCString(c,"clients.normal");
addReplyLongLong(c,mem);
mh->clients_normal = mem;
mem_total+=mem;
mem = 0;
@ -931,8 +937,7 @@ void memoryCommand(client *c) {
mem += sdslen(server.aof_buf);
mem += aofRewriteBufferSize();
}
addReplyBulkCString(c,"aof.buffer");
addReplyLongLong(c,mem);
mh->aof_buffer = mem;
mem_total+=mem;
for (j = 0; j < server.dbnum; j++) {
@ -940,33 +945,85 @@ void memoryCommand(client *c) {
long long keyscount = dictSize(db->dict);
if (keyscount==0) continue;
char dbname[32];
toplevel_keys++;
snprintf(dbname,sizeof(dbname),"db.%d",j);
addReplyBulkCString(c,dbname);
addReplyMultiBulkLen(c,4);
mh->db = zrealloc(mh->db,sizeof(mh->db[0])*(mh->num_dbs+1));
mh->db[mh->num_dbs].dbid = j;
mem = dictSize(db->dict) * sizeof(dictEntry) +
dictSlots(db->dict) * sizeof(dictEntry*) +
dictSize(db->dict) * sizeof(robj);
addReplyBulkCString(c,"overhead.hashtable.main");
addReplyLongLong(c,mem);
mh->db[mh->num_dbs].overhead_ht_main = mem;
mem_total+=mem;
mem = dictSize(db->expires) * sizeof(dictEntry) +
dictSlots(db->expires) * sizeof(dictEntry*);
addReplyBulkCString(c,"overhead.hashtable.expires");
addReplyLongLong(c,mem);
mh->db[mh->num_dbs].overhead_ht_expires = mem;
mem_total+=mem;
mh->num_dbs++;
}
mh->overhead_total = mem_total;
mh->dataset = zmalloc_used - mem_total;
return mh;
}
/* The memory command will eventually be a complete interface for the
* memory introspection capabilities of Redis.
*
* Usage: MEMORY usage <key> */
void memoryCommand(client *c) {
robj *o;
if (!strcasecmp(c->argv[1]->ptr,"usage") && c->argc == 3) {
if ((o = objectCommandLookupOrReply(c,c->argv[2],shared.nullbulk))
== NULL) return;
size_t usage = objectComputeSize(o,OBJ_COMPUTE_SIZE_DEF_SAMPLES);
usage += sdsAllocSize(c->argv[1]->ptr);
usage += sizeof(dictEntry);
addReplyLongLong(c,usage);
} else if (!strcasecmp(c->argv[1]->ptr,"overhead") && c->argc == 2) {
struct memoh *mh = getMemoryOverheadData();
addReplyMultiBulkLen(c,(8+mh->num_dbs)*2);
addReplyBulkCString(c,"total.allocated");
addReplyLongLong(c,mh->total_allocated);
addReplyBulkCString(c,"startup.allocated");
addReplyLongLong(c,mh->startup_allocated);
addReplyBulkCString(c,"replication.backlog");
addReplyLongLong(c,mh->repl_backlog);
addReplyBulkCString(c,"clients.slaves");
addReplyLongLong(c,mh->clients_slaves);
addReplyBulkCString(c,"clients.normal");
addReplyLongLong(c,mh->clients_normal);
addReplyBulkCString(c,"aof.buffer");
addReplyLongLong(c,mh->aof_buffer);
for (size_t j = 0; j < mh->num_dbs; j++) {
char dbname[32];
snprintf(dbname,sizeof(dbname),"db.%zd",mh->db[j].dbid);
addReplyBulkCString(c,dbname);
addReplyMultiBulkLen(c,4);
addReplyBulkCString(c,"overhead.hashtable.main");
addReplyLongLong(c,mh->db[j].overhead_ht_main);
addReplyBulkCString(c,"overhead.hashtable.expires");
addReplyLongLong(c,mh->db[j].overhead_ht_expires);
}
addReplyBulkCString(c,"overhead.total");
addReplyLongLong(c,mem_total);
addReplyLongLong(c,mh->overhead_total);
addReplyBulkCString(c,"dataset");
addReplyLongLong(c,zmalloc_used - mem_total);
addReplyLongLong(c,mh->dataset);
setDeferredMultiBulkLength(c,tlk,toplevel_keys*2);
freeMemoryOverheadData(mh);
} else {
addReplyError(c,"Syntax error. Try MEMORY [usage <key>] | [overhead]");
}