2012-11-08 18:25:23 +01:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright notice,
|
|
|
|
* this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* * Neither the name of Redis nor the names of its contributors may be used
|
|
|
|
* to endorse or promote products derived from this software without
|
|
|
|
* specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
|
|
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2015-07-26 15:14:57 +02:00
|
|
|
#include "server.h"
|
Fix zuiFind crash / RM_ScanKey hang on SET object listpack encoding (#11581)
In #11290, we added listpack encoding for SET object.
But forgot to support it in zuiFind, causes ZINTER, ZINTERSTORE,
ZINTERCARD, ZIDFF, ZDIFFSTORE to crash.
And forgot to support it in RM_ScanKey, causes it hang.
This PR add support SET listpack in zuiFind, and in RM_ScanKey.
And add tests for related commands to cover this case.
Other changes:
- There is no reason for zuiFind to go into the internals of the SET.
It can simply use setTypeIsMember and don't care about encoding.
- Remove the `#include "intset.h"` from server.h reduce the chance of
accidental intset API use.
- Move setTypeAddAux, setTypeRemoveAux and setTypeIsMemberAux
interfaces to the header.
- In scanGenericCommand, use setTypeInitIterator and setTypeNext
to handle OBJ_SET scan.
- In RM_ScanKey, improve hash scan mode, use lpGetValue like zset,
they can share code and better performance.
The zuiFind part fixes #11578
Co-authored-by: Oran Agra <oran@redislabs.com>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2022-12-09 23:08:01 +08:00
|
|
|
#include "intset.h" /* Compact integer set structure */
|
2022-11-09 18:50:07 +01:00
|
|
|
|
2010-06-22 00:07:48 +02:00
|
|
|
/*-----------------------------------------------------------------------------
|
|
|
|
* Set Commands
|
|
|
|
*----------------------------------------------------------------------------*/
|
|
|
|
|
2015-07-26 15:20:46 +02:00
|
|
|
void sunionDiffGenericCommand(client *c, robj **setkeys, int setnum,
|
2014-03-11 15:38:55 +01:00
|
|
|
robj *dstkey, int op);
|
2012-09-19 21:29:40 +02:00
|
|
|
|
2010-07-02 19:57:12 +02:00
|
|
|
/* Factory method to return a set that *can* hold "value". When the object has
|
2023-05-17 02:32:21 +08:00
|
|
|
* an integer-encodable value, an intset will be returned. Otherwise a listpack
|
|
|
|
* or a regular hash table.
|
2023-05-08 16:11:20 -07:00
|
|
|
*
|
|
|
|
* The size hint indicates approximately how many items will be added which is
|
|
|
|
* used to determine the initial representation. */
|
|
|
|
robj *setTypeCreate(sds value, size_t size_hint) {
|
2023-05-17 02:32:21 +08:00
|
|
|
if (isSdsRepresentableAsLongLong(value,NULL) == C_OK && size_hint <= server.set_max_intset_entries)
|
2010-07-02 19:57:12 +02:00
|
|
|
return createIntsetObject();
|
2023-05-17 02:32:21 +08:00
|
|
|
if (size_hint <= server.set_max_listpack_entries)
|
2023-05-08 16:11:20 -07:00
|
|
|
return createSetListpackObject();
|
|
|
|
|
|
|
|
/* We may oversize the set by using the hint if the hint is not accurate,
|
2023-05-17 02:32:21 +08:00
|
|
|
* but we will assume this is acceptable to maximize performance. */
|
2023-05-08 16:11:20 -07:00
|
|
|
robj *o = createSetObject();
|
|
|
|
dictExpand(o->ptr, size_hint);
|
|
|
|
return o;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if the existing set should be converted to another encoding based off the
|
|
|
|
* the size hint. */
|
|
|
|
void setTypeMaybeConvert(robj *set, size_t size_hint) {
|
2023-05-17 02:32:21 +08:00
|
|
|
if ((set->encoding == OBJ_ENCODING_LISTPACK && size_hint > server.set_max_listpack_entries)
|
|
|
|
|| (set->encoding == OBJ_ENCODING_INTSET && size_hint > server.set_max_intset_entries))
|
2023-05-08 16:11:20 -07:00
|
|
|
{
|
|
|
|
setTypeConvertAndExpand(set, OBJ_ENCODING_HT, size_hint, 1);
|
|
|
|
}
|
2022-11-09 18:50:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the maximum number of entries to store in an intset. */
|
|
|
|
static size_t intsetMaxEntries(void) {
|
|
|
|
size_t max_entries = server.set_max_intset_entries;
|
|
|
|
/* limit to 1G entries due to intset internals. */
|
|
|
|
if (max_entries >= 1<<30) max_entries = 1<<30;
|
|
|
|
return max_entries;
|
2010-07-02 19:57:12 +02:00
|
|
|
}
|
|
|
|
|
2022-11-09 18:50:07 +01:00
|
|
|
/* Converts intset to HT if it contains too many entries. */
|
|
|
|
static void maybeConvertIntset(robj *subject) {
|
|
|
|
serverAssert(subject->encoding == OBJ_ENCODING_INTSET);
|
|
|
|
if (intsetLen(subject->ptr) > intsetMaxEntries())
|
|
|
|
setTypeConvert(subject,OBJ_ENCODING_HT);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* When you know all set elements are integers, call this to convert the set to
|
|
|
|
* an intset. No conversion happens if the set contains too many entries for an
|
|
|
|
* intset. */
|
|
|
|
static void maybeConvertToIntset(robj *set) {
|
|
|
|
if (set->encoding == OBJ_ENCODING_INTSET) return; /* already intset */
|
|
|
|
if (setTypeSize(set) > intsetMaxEntries()) return; /* can't use intset */
|
|
|
|
intset *is = intsetNew();
|
|
|
|
char *str;
|
|
|
|
size_t len;
|
|
|
|
int64_t llval;
|
|
|
|
setTypeIterator *si = setTypeInitIterator(set);
|
|
|
|
while (setTypeNext(si, &str, &len, &llval) != -1) {
|
|
|
|
if (str) {
|
|
|
|
/* If the element is returned as a string, we may be able to convert
|
|
|
|
* it to integer. This happens for OBJ_ENCODING_HT. */
|
|
|
|
serverAssert(string2ll(str, len, (long long *)&llval));
|
|
|
|
}
|
|
|
|
uint8_t success = 0;
|
|
|
|
is = intsetAdd(is, llval, &success);
|
|
|
|
serverAssert(success);
|
|
|
|
}
|
|
|
|
setTypeReleaseIterator(si);
|
|
|
|
freeSetObject(set); /* frees the internals but not robj itself */
|
|
|
|
set->ptr = is;
|
|
|
|
set->encoding = OBJ_ENCODING_INTSET;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add the specified sds value into a set.
|
2015-02-09 22:49:27 +01:00
|
|
|
*
|
|
|
|
* If the value was already member of the set, nothing is done and 0 is
|
|
|
|
* returned, otherwise the new element is added and 1 is returned. */
|
2015-07-31 18:01:23 +02:00
|
|
|
int setTypeAdd(robj *subject, sds value) {
|
2022-11-09 18:50:07 +01:00
|
|
|
return setTypeAddAux(subject, value, sdslen(value), 0, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add member. This function is optimized for the different encodings. The
|
|
|
|
* value can be provided as an sds string (indicated by passing str_is_sds =
|
|
|
|
* 1), as string and length (str_is_sds = 0) or as an integer in which case str
|
|
|
|
* is set to NULL and llval is provided instead.
|
|
|
|
*
|
|
|
|
* Returns 1 if the value was added and 0 if it was already a member. */
|
|
|
|
int setTypeAddAux(robj *set, char *str, size_t len, int64_t llval, int str_is_sds) {
|
|
|
|
char tmpbuf[LONG_STR_SIZE];
|
|
|
|
if (!str) {
|
|
|
|
if (set->encoding == OBJ_ENCODING_INTSET) {
|
|
|
|
uint8_t success = 0;
|
|
|
|
set->ptr = intsetAdd(set->ptr, llval, &success);
|
|
|
|
if (success) maybeConvertIntset(set);
|
|
|
|
return success;
|
|
|
|
}
|
|
|
|
/* Convert int to string. */
|
|
|
|
len = ll2string(tmpbuf, sizeof tmpbuf, llval);
|
|
|
|
str = tmpbuf;
|
|
|
|
str_is_sds = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
serverAssert(str);
|
|
|
|
if (set->encoding == OBJ_ENCODING_HT) {
|
|
|
|
/* Avoid duping the string if it is an sds string. */
|
|
|
|
sds sdsval = str_is_sds ? (sds)str : sdsnewlen(str, len);
|
|
|
|
dict *ht = set->ptr;
|
Key as dict entry - memory optimization for sets (#11595)
If a dict has only keys, and no use of values, then a key can be stored directly in a
dict's hashtable. The key replaces the dictEntry. To distinguish between a key and
a dictEntry, we only use this optimization if the key is odd, i.e. if the key has the least
significant bit set. This is true for sds strings, since the sds header is always an odd
number of bytes.
Dict entries are used as a fallback when there is a hash collision. A special dict entry
without a value (only key and next) is used so we save one word in this case too.
This saves 24 bytes per set element for larges sets, and also gains some speed improvement
as a side effect (less allocations and cache misses).
A quick test adding 1M elements to a set using the command below resulted in memory
usage of 28.83M, compared to 46.29M on unstable.
That's 18 bytes per set element on average.
eval 'for i=1,1000000,1 do redis.call("sadd", "myset", "x"..i) end' 0
Other changes:
Allocations are ensured to have at least 8 bits alignment on all systems. This affects 32-bit
builds compiled without HAVE_MALLOC_SIZE (not jemalloc or glibc) in which Redis
stores the size of each allocation, after this change in 8 bytes instead of previously 4 bytes
per allocation. This is done so we can reliably use the 3 least significant bits in a pointer to
encode stuff.
2023-01-20 17:45:29 +01:00
|
|
|
void *position = dictFindPositionForInsert(ht, sdsval, NULL);
|
|
|
|
if (position) {
|
|
|
|
/* Key doesn't already exist in the set. Add it but dup the key. */
|
|
|
|
if (sdsval == str) sdsval = sdsdup(sdsval);
|
|
|
|
dictInsertAtPosition(ht, sdsval, position);
|
|
|
|
} else if (sdsval != str) {
|
|
|
|
/* String is already a member. Free our temporary sds copy. */
|
2022-11-09 18:50:07 +01:00
|
|
|
sdsfree(sdsval);
|
|
|
|
}
|
Key as dict entry - memory optimization for sets (#11595)
If a dict has only keys, and no use of values, then a key can be stored directly in a
dict's hashtable. The key replaces the dictEntry. To distinguish between a key and
a dictEntry, we only use this optimization if the key is odd, i.e. if the key has the least
significant bit set. This is true for sds strings, since the sds header is always an odd
number of bytes.
Dict entries are used as a fallback when there is a hash collision. A special dict entry
without a value (only key and next) is used so we save one word in this case too.
This saves 24 bytes per set element for larges sets, and also gains some speed improvement
as a side effect (less allocations and cache misses).
A quick test adding 1M elements to a set using the command below resulted in memory
usage of 28.83M, compared to 46.29M on unstable.
That's 18 bytes per set element on average.
eval 'for i=1,1000000,1 do redis.call("sadd", "myset", "x"..i) end' 0
Other changes:
Allocations are ensured to have at least 8 bits alignment on all systems. This affects 32-bit
builds compiled without HAVE_MALLOC_SIZE (not jemalloc or glibc) in which Redis
stores the size of each allocation, after this change in 8 bytes instead of previously 4 bytes
per allocation. This is done so we can reliably use the 3 least significant bits in a pointer to
encode stuff.
2023-01-20 17:45:29 +01:00
|
|
|
return (position != NULL);
|
2022-11-09 18:50:07 +01:00
|
|
|
} else if (set->encoding == OBJ_ENCODING_LISTPACK) {
|
|
|
|
unsigned char *lp = set->ptr;
|
|
|
|
unsigned char *p = lpFirst(lp);
|
|
|
|
if (p != NULL)
|
|
|
|
p = lpFind(lp, p, (unsigned char*)str, len, 0);
|
|
|
|
if (p == NULL) {
|
|
|
|
/* Not found. */
|
|
|
|
if (lpLength(lp) < server.set_max_listpack_entries &&
|
|
|
|
len <= server.set_max_listpack_value &&
|
|
|
|
lpSafeToAdd(lp, len))
|
|
|
|
{
|
|
|
|
if (str == tmpbuf) {
|
|
|
|
/* This came in as integer so we can avoid parsing it again.
|
|
|
|
* TODO: Create and use lpFindInteger; don't go via string. */
|
|
|
|
lp = lpAppendInteger(lp, llval);
|
|
|
|
} else {
|
|
|
|
lp = lpAppend(lp, (unsigned char*)str, len);
|
|
|
|
}
|
|
|
|
set->ptr = lp;
|
|
|
|
} else {
|
|
|
|
/* Size limit is reached. Convert to hashtable and add. */
|
2022-12-06 10:25:51 +01:00
|
|
|
setTypeConvertAndExpand(set, OBJ_ENCODING_HT, lpLength(lp) + 1, 1);
|
2022-11-09 18:50:07 +01:00
|
|
|
serverAssert(dictAdd(set->ptr,sdsnewlen(str,len),NULL) == DICT_OK);
|
|
|
|
}
|
2010-07-02 19:57:12 +02:00
|
|
|
return 1;
|
|
|
|
}
|
2022-11-09 18:50:07 +01:00
|
|
|
} else if (set->encoding == OBJ_ENCODING_INTSET) {
|
|
|
|
long long value;
|
|
|
|
if (string2ll(str, len, &value)) {
|
2010-07-02 19:57:12 +02:00
|
|
|
uint8_t success = 0;
|
2022-11-09 18:50:07 +01:00
|
|
|
set->ptr = intsetAdd(set->ptr,value,&success);
|
2010-07-02 19:57:12 +02:00
|
|
|
if (success) {
|
2022-11-09 18:50:07 +01:00
|
|
|
maybeConvertIntset(set);
|
2010-07-02 19:57:12 +02:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
} else {
|
2022-12-06 10:25:51 +01:00
|
|
|
/* Check if listpack encoding is safe not to cross any threshold. */
|
|
|
|
size_t maxelelen = 0, totsize = 0;
|
|
|
|
unsigned long n = intsetLen(set->ptr);
|
|
|
|
if (n != 0) {
|
|
|
|
size_t elelen1 = sdigits10(intsetMax(set->ptr));
|
|
|
|
size_t elelen2 = sdigits10(intsetMin(set->ptr));
|
|
|
|
maxelelen = max(elelen1, elelen2);
|
|
|
|
size_t s1 = lpEstimateBytesRepeatedInteger(intsetMax(set->ptr), n);
|
|
|
|
size_t s2 = lpEstimateBytesRepeatedInteger(intsetMin(set->ptr), n);
|
|
|
|
totsize = max(s1, s2);
|
|
|
|
}
|
2022-11-09 18:50:07 +01:00
|
|
|
if (intsetLen((const intset*)set->ptr) < server.set_max_listpack_entries &&
|
|
|
|
len <= server.set_max_listpack_value &&
|
|
|
|
maxelelen <= server.set_max_listpack_value &&
|
2022-12-06 10:25:51 +01:00
|
|
|
lpSafeToAdd(NULL, totsize + len))
|
2022-11-09 18:50:07 +01:00
|
|
|
{
|
|
|
|
/* In the "safe to add" check above we assumed all elements in
|
|
|
|
* the intset are of size maxelelen. This is an upper bound. */
|
2022-12-06 10:25:51 +01:00
|
|
|
setTypeConvertAndExpand(set, OBJ_ENCODING_LISTPACK,
|
|
|
|
intsetLen(set->ptr) + 1, 1);
|
2022-11-09 18:50:07 +01:00
|
|
|
unsigned char *lp = set->ptr;
|
|
|
|
lp = lpAppend(lp, (unsigned char *)str, len);
|
2022-12-06 10:25:51 +01:00
|
|
|
lp = lpShrinkToFit(lp);
|
2022-11-09 18:50:07 +01:00
|
|
|
set->ptr = lp;
|
|
|
|
return 1;
|
|
|
|
} else {
|
2022-12-06 10:25:51 +01:00
|
|
|
setTypeConvertAndExpand(set, OBJ_ENCODING_HT,
|
|
|
|
intsetLen(set->ptr) + 1, 1);
|
2022-11-09 18:50:07 +01:00
|
|
|
/* The set *was* an intset and this value is not integer
|
|
|
|
* encodable, so dictAdd should always work. */
|
|
|
|
serverAssert(dictAdd(set->ptr,sdsnewlen(str,len),NULL) == DICT_OK);
|
|
|
|
return 1;
|
|
|
|
}
|
2010-07-02 19:57:12 +02:00
|
|
|
}
|
|
|
|
} else {
|
2015-07-27 09:41:48 +02:00
|
|
|
serverPanic("Unknown set encoding");
|
2010-07-02 19:57:12 +02:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-11-09 18:50:07 +01:00
|
|
|
/* Deletes a value provided as an sds string from the set. Returns 1 if the
|
|
|
|
* value was deleted and 0 if it was not a member of the set. */
|
2015-07-31 18:01:23 +02:00
|
|
|
int setTypeRemove(robj *setobj, sds value) {
|
2022-11-09 18:50:07 +01:00
|
|
|
return setTypeRemoveAux(setobj, value, sdslen(value), 0, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove a member. This function is optimized for the different encodings. The
|
|
|
|
* value can be provided as an sds string (indicated by passing str_is_sds =
|
|
|
|
* 1), as string and length (str_is_sds = 0) or as an integer in which case str
|
|
|
|
* is set to NULL and llval is provided instead.
|
|
|
|
*
|
|
|
|
* Returns 1 if the value was deleted and 0 if it was not a member of the set. */
|
|
|
|
int setTypeRemoveAux(robj *setobj, char *str, size_t len, int64_t llval, int str_is_sds) {
|
|
|
|
char tmpbuf[LONG_STR_SIZE];
|
|
|
|
if (!str) {
|
|
|
|
if (setobj->encoding == OBJ_ENCODING_INTSET) {
|
|
|
|
int success;
|
|
|
|
setobj->ptr = intsetRemove(setobj->ptr,llval,&success);
|
|
|
|
return success;
|
|
|
|
}
|
|
|
|
len = ll2string(tmpbuf, sizeof tmpbuf, llval);
|
|
|
|
str = tmpbuf;
|
|
|
|
str_is_sds = 0;
|
|
|
|
}
|
|
|
|
|
2015-07-26 15:28:00 +02:00
|
|
|
if (setobj->encoding == OBJ_ENCODING_HT) {
|
2022-11-09 18:50:07 +01:00
|
|
|
sds sdsval = str_is_sds ? (sds)str : sdsnewlen(str, len);
|
|
|
|
int deleted = (dictDelete(setobj->ptr, sdsval) == DICT_OK);
|
|
|
|
if (deleted && htNeedsResize(setobj->ptr)) dictResize(setobj->ptr);
|
|
|
|
if (sdsval != str) sdsfree(sdsval); /* free temp copy */
|
|
|
|
return deleted;
|
|
|
|
} else if (setobj->encoding == OBJ_ENCODING_LISTPACK) {
|
|
|
|
unsigned char *lp = setobj->ptr;
|
|
|
|
unsigned char *p = lpFirst(lp);
|
|
|
|
if (p == NULL) return 0;
|
|
|
|
p = lpFind(lp, p, (unsigned char*)str, len, 0);
|
|
|
|
if (p != NULL) {
|
|
|
|
lp = lpDelete(lp, p, NULL);
|
|
|
|
setobj->ptr = lp;
|
2010-07-02 19:57:12 +02:00
|
|
|
return 1;
|
|
|
|
}
|
2015-07-26 15:28:00 +02:00
|
|
|
} else if (setobj->encoding == OBJ_ENCODING_INTSET) {
|
2022-11-09 18:50:07 +01:00
|
|
|
long long llval;
|
|
|
|
if (string2ll(str, len, &llval)) {
|
2010-12-09 10:21:02 +01:00
|
|
|
int success;
|
|
|
|
setobj->ptr = intsetRemove(setobj->ptr,llval,&success);
|
2010-07-02 19:57:12 +02:00
|
|
|
if (success) return 1;
|
|
|
|
}
|
|
|
|
} else {
|
2015-07-27 09:41:48 +02:00
|
|
|
serverPanic("Unknown set encoding");
|
2010-07-02 19:57:12 +02:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-11-09 18:50:07 +01:00
|
|
|
/* Check if an sds string is a member of the set. Returns 1 if the value is a
|
|
|
|
* member of the set and 0 if it isn't. */
|
2015-07-31 18:01:23 +02:00
|
|
|
int setTypeIsMember(robj *subject, sds value) {
|
2022-11-09 18:50:07 +01:00
|
|
|
return setTypeIsMemberAux(subject, value, sdslen(value), 0, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Membership checking optimized for the different encodings. The value can be
|
|
|
|
* provided as an sds string (indicated by passing str_is_sds = 1), as string
|
|
|
|
* and length (str_is_sds = 0) or as an integer in which case str is set to NULL
|
|
|
|
* and llval is provided instead.
|
|
|
|
*
|
|
|
|
* Returns 1 if the value is a member of the set and 0 if it isn't. */
|
|
|
|
int setTypeIsMemberAux(robj *set, char *str, size_t len, int64_t llval, int str_is_sds) {
|
|
|
|
char tmpbuf[LONG_STR_SIZE];
|
|
|
|
if (!str) {
|
|
|
|
if (set->encoding == OBJ_ENCODING_INTSET)
|
|
|
|
return intsetFind(set->ptr, llval);
|
|
|
|
len = ll2string(tmpbuf, sizeof tmpbuf, llval);
|
|
|
|
str = tmpbuf;
|
|
|
|
str_is_sds = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (set->encoding == OBJ_ENCODING_LISTPACK) {
|
|
|
|
unsigned char *lp = set->ptr;
|
|
|
|
unsigned char *p = lpFirst(lp);
|
|
|
|
return p && lpFind(lp, p, (unsigned char*)str, len, 0);
|
|
|
|
} else if (set->encoding == OBJ_ENCODING_INTSET) {
|
|
|
|
long long llval;
|
|
|
|
return string2ll(str, len, &llval) && intsetFind(set->ptr, llval);
|
|
|
|
} else if (set->encoding == OBJ_ENCODING_HT && str_is_sds) {
|
|
|
|
return dictFind(set->ptr, (sds)str) != NULL;
|
|
|
|
} else if (set->encoding == OBJ_ENCODING_HT) {
|
|
|
|
sds sdsval = sdsnewlen(str, len);
|
|
|
|
int result = dictFind(set->ptr, sdsval) != NULL;
|
|
|
|
sdsfree(sdsval);
|
|
|
|
return result;
|
2010-07-02 19:57:12 +02:00
|
|
|
} else {
|
2015-07-27 09:41:48 +02:00
|
|
|
serverPanic("Unknown set encoding");
|
2010-07-02 19:57:12 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-21 11:25:13 +02:00
|
|
|
setTypeIterator *setTypeInitIterator(robj *subject) {
|
2010-08-26 12:13:51 +02:00
|
|
|
setTypeIterator *si = zmalloc(sizeof(setTypeIterator));
|
2010-07-02 19:57:12 +02:00
|
|
|
si->subject = subject;
|
|
|
|
si->encoding = subject->encoding;
|
2015-07-26 15:28:00 +02:00
|
|
|
if (si->encoding == OBJ_ENCODING_HT) {
|
2010-07-02 19:57:12 +02:00
|
|
|
si->di = dictGetIterator(subject->ptr);
|
2015-07-26 15:28:00 +02:00
|
|
|
} else if (si->encoding == OBJ_ENCODING_INTSET) {
|
2010-07-02 19:57:12 +02:00
|
|
|
si->ii = 0;
|
2022-11-09 18:50:07 +01:00
|
|
|
} else if (si->encoding == OBJ_ENCODING_LISTPACK) {
|
|
|
|
si->lpi = NULL;
|
2010-07-02 19:57:12 +02:00
|
|
|
} else {
|
2015-07-27 09:41:48 +02:00
|
|
|
serverPanic("Unknown set encoding");
|
2010-07-02 19:57:12 +02:00
|
|
|
}
|
|
|
|
return si;
|
|
|
|
}
|
|
|
|
|
2010-08-21 11:25:13 +02:00
|
|
|
void setTypeReleaseIterator(setTypeIterator *si) {
|
2015-07-26 15:28:00 +02:00
|
|
|
if (si->encoding == OBJ_ENCODING_HT)
|
2010-07-02 19:57:12 +02:00
|
|
|
dictReleaseIterator(si->di);
|
|
|
|
zfree(si);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Move to the next entry in the set. Returns the object at the current
|
2022-11-09 18:50:07 +01:00
|
|
|
* position, as a string or as an integer.
|
2010-12-09 21:11:56 +01:00
|
|
|
*
|
2022-11-09 18:50:07 +01:00
|
|
|
* Since set elements can be internally be stored as SDS strings, char buffers or
|
2010-12-09 21:11:56 +01:00
|
|
|
* simple arrays of integers, setTypeNext returns the encoding of the
|
2022-11-09 18:50:07 +01:00
|
|
|
* set object you are iterating, and will populate the appropriate pointers
|
|
|
|
* (str and len) or (llele) depending on whether the value is stored as a string
|
|
|
|
* or as an integer internally.
|
|
|
|
*
|
|
|
|
* If OBJ_ENCODING_HT is returned, then str points to an sds string and can be
|
|
|
|
* used as such. If OBJ_ENCODING_INTSET, then llele is populated and str is
|
|
|
|
* pointed to NULL. If OBJ_ENCODING_LISTPACK is returned, the value can be
|
|
|
|
* either a string or an integer. If *str is not NULL, then str and len are
|
|
|
|
* populated with the string content and length. Otherwise, llele populated with
|
|
|
|
* an integer value.
|
2015-03-31 15:22:56 +02:00
|
|
|
*
|
2022-11-09 18:50:07 +01:00
|
|
|
* Note that str, len and llele pointers should all be passed and cannot
|
2015-03-31 15:22:56 +02:00
|
|
|
* be NULL since the function will try to defensively populate the non
|
|
|
|
* used field with values which are easy to trap if misused.
|
2010-12-09 21:11:56 +01:00
|
|
|
*
|
2022-11-09 18:50:07 +01:00
|
|
|
* When there are no more elements -1 is returned. */
|
|
|
|
int setTypeNext(setTypeIterator *si, char **str, size_t *len, int64_t *llele) {
|
2015-07-26 15:28:00 +02:00
|
|
|
if (si->encoding == OBJ_ENCODING_HT) {
|
2010-07-02 19:57:12 +02:00
|
|
|
dictEntry *de = dictNext(si->di);
|
2010-12-09 21:11:56 +01:00
|
|
|
if (de == NULL) return -1;
|
2022-11-09 18:50:07 +01:00
|
|
|
*str = dictGetKey(de);
|
|
|
|
*len = sdslen(*str);
|
2015-03-30 12:24:57 +02:00
|
|
|
*llele = -123456789; /* Not needed. Defensive. */
|
2015-07-26 15:28:00 +02:00
|
|
|
} else if (si->encoding == OBJ_ENCODING_INTSET) {
|
2010-12-09 21:11:56 +01:00
|
|
|
if (!intsetGet(si->subject->ptr,si->ii++,llele))
|
|
|
|
return -1;
|
2022-11-09 18:50:07 +01:00
|
|
|
*str = NULL;
|
|
|
|
} else if (si->encoding == OBJ_ENCODING_LISTPACK) {
|
|
|
|
unsigned char *lp = si->subject->ptr;
|
|
|
|
unsigned char *lpi = si->lpi;
|
|
|
|
if (lpi == NULL) {
|
|
|
|
lpi = lpFirst(lp);
|
|
|
|
} else {
|
|
|
|
lpi = lpNext(lp, lpi);
|
|
|
|
}
|
|
|
|
if (lpi == NULL) return -1;
|
|
|
|
si->lpi = lpi;
|
|
|
|
unsigned int l;
|
|
|
|
*str = (char *)lpGetValue(lpi, &l, (long long *)llele);
|
|
|
|
*len = (size_t)l;
|
2015-03-30 12:24:57 +02:00
|
|
|
} else {
|
2015-07-27 09:41:48 +02:00
|
|
|
serverPanic("Wrong set encoding in setTypeNext");
|
2010-07-02 19:57:12 +02:00
|
|
|
}
|
2010-12-09 21:11:56 +01:00
|
|
|
return si->encoding;
|
2010-07-02 19:57:12 +02:00
|
|
|
}
|
|
|
|
|
2010-12-09 21:11:56 +01:00
|
|
|
/* The not copy on write friendly version but easy to use version
|
2015-07-31 18:01:23 +02:00
|
|
|
* of setTypeNext() is setTypeNextObject(), returning new SDS
|
|
|
|
* strings. So if you don't retain a pointer to this object you should call
|
|
|
|
* sdsfree() against it.
|
2010-12-09 21:11:56 +01:00
|
|
|
*
|
|
|
|
* This function is the way to go for write operations where COW is not
|
2015-07-31 18:01:23 +02:00
|
|
|
* an issue. */
|
|
|
|
sds setTypeNextObject(setTypeIterator *si) {
|
2010-12-09 21:11:56 +01:00
|
|
|
int64_t intele;
|
2022-11-09 18:50:07 +01:00
|
|
|
char *str;
|
|
|
|
size_t len;
|
2010-12-09 21:11:56 +01:00
|
|
|
|
2022-11-09 18:50:07 +01:00
|
|
|
if (setTypeNext(si, &str, &len, &intele) == -1) return NULL;
|
|
|
|
if (str != NULL) return sdsnewlen(str, len);
|
|
|
|
return sdsfromlonglong(intele);
|
2010-12-09 21:11:56 +01:00
|
|
|
}
|
2010-07-02 19:57:12 +02:00
|
|
|
|
2010-12-09 10:21:02 +01:00
|
|
|
/* Return random element from a non empty set.
|
Squash merging 125 typo/grammar/comment/doc PRs (#7773)
List of squashed commits or PRs
===============================
commit 66801ea
Author: hwware <wen.hui.ware@gmail.com>
Date: Mon Jan 13 00:54:31 2020 -0500
typo fix in acl.c
commit 46f55db
Author: Itamar Haber <itamar@redislabs.com>
Date: Sun Sep 6 18:24:11 2020 +0300
Updates a couple of comments
Specifically:
* RM_AutoMemory completed instead of pointing to docs
* Updated link to custom type doc
commit 61a2aa0
Author: xindoo <xindoo@qq.com>
Date: Tue Sep 1 19:24:59 2020 +0800
Correct errors in code comments
commit a5871d1
Author: yz1509 <pro-756@qq.com>
Date: Tue Sep 1 18:36:06 2020 +0800
fix typos in module.c
commit 41eede7
Author: bookug <bookug@qq.com>
Date: Sat Aug 15 01:11:33 2020 +0800
docs: fix typos in comments
commit c303c84
Author: lazy-snail <ws.niu@outlook.com>
Date: Fri Aug 7 11:15:44 2020 +0800
fix spelling in redis.conf
commit 1eb76bf
Author: zhujian <zhujianxyz@gmail.com>
Date: Thu Aug 6 15:22:10 2020 +0800
add a missing 'n' in comment
commit 1530ec2
Author: Daniel Dai <764122422@qq.com>
Date: Mon Jul 27 00:46:35 2020 -0400
fix spelling in tracking.c
commit e517b31
Author: Hunter-Chen <huntcool001@gmail.com>
Date: Fri Jul 17 22:33:32 2020 +0800
Update redis.conf
Co-authored-by: Itamar Haber <itamar@redislabs.com>
commit c300eff
Author: Hunter-Chen <huntcool001@gmail.com>
Date: Fri Jul 17 22:33:23 2020 +0800
Update redis.conf
Co-authored-by: Itamar Haber <itamar@redislabs.com>
commit 4c058a8
Author: 陈浩鹏 <chenhaopeng@heytea.com>
Date: Thu Jun 25 19:00:56 2020 +0800
Grammar fix and clarification
commit 5fcaa81
Author: bodong.ybd <bodong.ybd@alibaba-inc.com>
Date: Fri Jun 19 10:09:00 2020 +0800
Fix typos
commit 4caca9a
Author: Pruthvi P <pruthvi@ixigo.com>
Date: Fri May 22 00:33:22 2020 +0530
Fix typo eviciton => eviction
commit b2a25f6
Author: Brad Dunbar <dunbarb2@gmail.com>
Date: Sun May 17 12:39:59 2020 -0400
Fix a typo.
commit 12842ae
Author: hwware <wen.hui.ware@gmail.com>
Date: Sun May 3 17:16:59 2020 -0400
fix spelling in redis conf
commit ddba07c
Author: Chris Lamb <chris@chris-lamb.co.uk>
Date: Sat May 2 23:25:34 2020 +0100
Correct a "conflicts" spelling error.
commit 8fc7bf2
Author: Nao YONASHIRO <yonashiro@r.recruit.co.jp>
Date: Thu Apr 30 10:25:27 2020 +0900
docs: fix EXPIRE_FAST_CYCLE_DURATION to ACTIVE_EXPIRE_CYCLE_FAST_DURATION
commit 9b2b67a
Author: Brad Dunbar <dunbarb2@gmail.com>
Date: Fri Apr 24 11:46:22 2020 -0400
Fix a typo.
commit 0746f10
Author: devilinrust <63737265+devilinrust@users.noreply.github.com>
Date: Thu Apr 16 00:17:53 2020 +0200
Fix typos in server.c
commit 92b588d
Author: benjessop12 <56115861+benjessop12@users.noreply.github.com>
Date: Mon Apr 13 13:43:55 2020 +0100
Fix spelling mistake in lazyfree.c
commit 1da37aa
Merge: 2d4ba28 af347a8
Author: hwware <wen.hui.ware@gmail.com>
Date: Thu Mar 5 22:41:31 2020 -0500
Merge remote-tracking branch 'upstream/unstable' into expiretypofix
commit 2d4ba28
Author: hwware <wen.hui.ware@gmail.com>
Date: Mon Mar 2 00:09:40 2020 -0500
fix typo in expire.c
commit 1a746f7
Author: SennoYuki <minakami1yuki@gmail.com>
Date: Thu Feb 27 16:54:32 2020 +0800
fix typo
commit 8599b1a
Author: dongheejeong <donghee950403@gmail.com>
Date: Sun Feb 16 20:31:43 2020 +0000
Fix typo in server.c
commit f38d4e8
Author: hwware <wen.hui.ware@gmail.com>
Date: Sun Feb 2 22:58:38 2020 -0500
fix typo in evict.c
commit fe143fc
Author: Leo Murillo <leonardo.murillo@gmail.com>
Date: Sun Feb 2 01:57:22 2020 -0600
Fix a few typos in redis.conf
commit 1ab4d21
Author: viraja1 <anchan.viraj@gmail.com>
Date: Fri Dec 27 17:15:58 2019 +0530
Fix typo in Latency API docstring
commit ca1f70e
Author: gosth <danxuedexing@qq.com>
Date: Wed Dec 18 15:18:02 2019 +0800
fix typo in sort.c
commit a57c06b
Author: ZYunH <zyunhjob@163.com>
Date: Mon Dec 16 22:28:46 2019 +0800
fix-zset-typo
commit b8c92b5
Author: git-hulk <hulk.website@gmail.com>
Date: Mon Dec 16 15:51:42 2019 +0800
FIX: typo in cluster.c, onformation->information
commit 9dd981c
Author: wujm2007 <jim.wujm@gmail.com>
Date: Mon Dec 16 09:37:52 2019 +0800
Fix typo
commit e132d7a
Author: Sebastien Williams-Wynn <s.williamswynn.mail@gmail.com>
Date: Fri Nov 15 00:14:07 2019 +0000
Minor typo change
commit 47f44d5
Author: happynote3966 <01ssrmikururudevice01@gmail.com>
Date: Mon Nov 11 22:08:48 2019 +0900
fix comment typo in redis-cli.c
commit b8bdb0d
Author: fulei <fulei@kuaishou.com>
Date: Wed Oct 16 18:00:17 2019 +0800
Fix a spelling mistake of comments in defragDictBucketCallback
commit 0def46a
Author: fulei <fulei@kuaishou.com>
Date: Wed Oct 16 13:09:27 2019 +0800
fix some spelling mistakes of comments in defrag.c
commit f3596fd
Author: Phil Rajchgot <tophil@outlook.com>
Date: Sun Oct 13 02:02:32 2019 -0400
Typo and grammar fixes
Redis and its documentation are great -- just wanted to submit a few corrections in the spirit of Hacktoberfest. Thanks for all your work on this project. I use it all the time and it works beautifully.
commit 2b928cd
Author: KangZhiDong <worldkzd@gmail.com>
Date: Sun Sep 1 07:03:11 2019 +0800
fix typos
commit 33aea14
Author: Axlgrep <axlgrep@gmail.com>
Date: Tue Aug 27 11:02:18 2019 +0800
Fixed eviction spelling issues
commit e282a80
Author: Simen Flatby <simen@oms.no>
Date: Tue Aug 20 15:25:51 2019 +0200
Update comments to reflect prop name
In the comments the prop is referenced as replica-validity-factor,
but it is really named cluster-replica-validity-factor.
commit 74d1f9a
Author: Jim Green <jimgreen2013@qq.com>
Date: Tue Aug 20 20:00:31 2019 +0800
fix comment error, the code is ok
commit eea1407
Author: Liao Tonglang <liaotonglang@gmail.com>
Date: Fri May 31 10:16:18 2019 +0800
typo fix
fix cna't to can't
commit 0da553c
Author: KAWACHI Takashi <tkawachi@gmail.com>
Date: Wed Jul 17 00:38:16 2019 +0900
Fix typo
commit 7fc8fb6
Author: Michael Prokop <mika@grml.org>
Date: Tue May 28 17:58:42 2019 +0200
Typo fixes
s/familar/familiar/
s/compatiblity/compatibility/
s/ ot / to /
s/itsef/itself/
commit 5f46c9d
Author: zhumoing <34539422+zhumoing@users.noreply.github.com>
Date: Tue May 21 21:16:50 2019 +0800
typo-fixes
typo-fixes
commit 321dfe1
Author: wxisme <850885154@qq.com>
Date: Sat Mar 16 15:10:55 2019 +0800
typo fix
commit b4fb131
Merge: 267e0e6 3df1eb8
Author: Nikitas Bastas <nikitasbst@gmail.com>
Date: Fri Feb 8 22:55:45 2019 +0200
Merge branch 'unstable' of antirez/redis into unstable
commit 267e0e6
Author: Nikitas Bastas <nikitasbst@gmail.com>
Date: Wed Jan 30 21:26:04 2019 +0200
Minor typo fix
commit 30544e7
Author: inshal96 <39904558+inshal96@users.noreply.github.com>
Date: Fri Jan 4 16:54:50 2019 +0500
remove an extra 'a' in the comments
commit 337969d
Author: BrotherGao <yangdongheng11@gmail.com>
Date: Sat Dec 29 12:37:29 2018 +0800
fix typo in redis.conf
commit 9f4b121
Merge: 423a030 e504583
Author: BrotherGao <yangdongheng@xiaomi.com>
Date: Sat Dec 29 11:41:12 2018 +0800
Merge branch 'unstable' of antirez/redis into unstable
commit 423a030
Merge: 42b02b7 46a51cd
Author: 杨东衡 <yangdongheng@xiaomi.com>
Date: Tue Dec 4 23:56:11 2018 +0800
Merge branch 'unstable' of antirez/redis into unstable
commit 42b02b7
Merge: 68c0e6e b8febe6
Author: Dongheng Yang <yangdongheng11@gmail.com>
Date: Sun Oct 28 15:54:23 2018 +0800
Merge pull request #1 from antirez/unstable
update local data
commit 714b589
Author: Christian <crifei93@gmail.com>
Date: Fri Dec 28 01:17:26 2018 +0100
fix typo "resulution"
commit e23259d
Author: garenchan <1412950785@qq.com>
Date: Wed Dec 26 09:58:35 2018 +0800
fix typo: segfauls -> segfault
commit a9359f8
Author: xjp <jianping_xie@aliyun.com>
Date: Tue Dec 18 17:31:44 2018 +0800
Fixed REDISMODULE_H spell bug
commit a12c3e4
Author: jdiaz <jrd.palacios@gmail.com>
Date: Sat Dec 15 23:39:52 2018 -0600
Fixes hyperloglog hash function comment block description
commit 770eb11
Author: 林上耀 <1210tom@163.com>
Date: Sun Nov 25 17:16:10 2018 +0800
fix typo
commit fd97fbb
Author: Chris Lamb <chris@chris-lamb.co.uk>
Date: Fri Nov 23 17:14:01 2018 +0100
Correct "unsupported" typo.
commit a85522d
Author: Jungnam Lee <jungnam.lee@oracle.com>
Date: Thu Nov 8 23:01:29 2018 +0900
fix typo in test comments
commit ade8007
Author: Arun Kumar <palerdot@users.noreply.github.com>
Date: Tue Oct 23 16:56:35 2018 +0530
Fixed grammatical typo
Fixed typo for word 'dictionary'
commit 869ee39
Author: Hamid Alaei <hamid.a85@gmail.com>
Date: Sun Aug 12 16:40:02 2018 +0430
fix documentations: (ThreadSafeContextStart/Stop -> ThreadSafeContextLock/Unlock), minor typo
commit f89d158
Author: Mayank Jain <mayankjain255@gmail.com>
Date: Tue Jul 31 23:01:21 2018 +0530
Updated README.md with some spelling corrections.
Made correction in spelling of some misspelled words.
commit 892198e
Author: dsomeshwar <someshwar.dhayalan@gmail.com>
Date: Sat Jul 21 23:23:04 2018 +0530
typo fix
commit 8a4d780
Author: Itamar Haber <itamar@redislabs.com>
Date: Mon Apr 30 02:06:52 2018 +0300
Fixes some typos
commit e3acef6
Author: Noah Rosamilia <ivoahivoah@gmail.com>
Date: Sat Mar 3 23:41:21 2018 -0500
Fix typo in /deps/README.md
commit 04442fb
Author: WuYunlong <xzsyeb@126.com>
Date: Sat Mar 3 10:32:42 2018 +0800
Fix typo in readSyncBulkPayload() comment.
commit 9f36880
Author: WuYunlong <xzsyeb@126.com>
Date: Sat Mar 3 10:20:37 2018 +0800
replication.c comment: run_id -> replid.
commit f866b4a
Author: Francesco 'makevoid' Canessa <makevoid@gmail.com>
Date: Thu Feb 22 22:01:56 2018 +0000
fix comment typo in server.c
commit 0ebc69b
Author: 줍 <jubee0124@gmail.com>
Date: Mon Feb 12 16:38:48 2018 +0900
Fix typo in redis.conf
Fix `five behaviors` to `eight behaviors` in [this sentence ](antirez/redis@unstable/redis.conf#L564)
commit b50a620
Author: martinbroadhurst <martinbroadhurst@users.noreply.github.com>
Date: Thu Dec 28 12:07:30 2017 +0000
Fix typo in valgrind.sup
commit 7d8f349
Author: Peter Boughton <peter@sorcerersisle.com>
Date: Mon Nov 27 19:52:19 2017 +0000
Update CONTRIBUTING; refer doc updates to redis-doc repo.
commit 02dec7e
Author: Klauswk <klauswk1@hotmail.com>
Date: Tue Oct 24 16:18:38 2017 -0200
Fix typo in comment
commit e1efbc8
Author: chenshi <baiwfg2@gmail.com>
Date: Tue Oct 3 18:26:30 2017 +0800
Correct two spelling errors of comments
commit 93327d8
Author: spacewander <spacewanderlzx@gmail.com>
Date: Wed Sep 13 16:47:24 2017 +0800
Update the comment for OBJ_ENCODING_EMBSTR_SIZE_LIMIT's value
The value of OBJ_ENCODING_EMBSTR_SIZE_LIMIT is 44 now instead of 39.
commit 63d361f
Author: spacewander <spacewanderlzx@gmail.com>
Date: Tue Sep 12 15:06:42 2017 +0800
Fix <prevlen> related doc in ziplist.c
According to the definition of ZIP_BIG_PREVLEN and other related code,
the guard of single byte <prevlen> should be 254 instead of 255.
commit ebe228d
Author: hanael80 <hanael80@gmail.com>
Date: Tue Aug 15 09:09:40 2017 +0900
Fix typo
commit 6b696e6
Author: Matt Robenolt <matt@ydekproductions.com>
Date: Mon Aug 14 14:50:47 2017 -0700
Fix typo in LATENCY DOCTOR output
commit a2ec6ae
Author: caosiyang <caosiyang@qiyi.com>
Date: Tue Aug 15 14:15:16 2017 +0800
Fix a typo: form => from
commit 3ab7699
Author: caosiyang <caosiyang@qiyi.com>
Date: Thu Aug 10 18:40:33 2017 +0800
Fix a typo: replicationFeedSlavesFromMaster() => replicationFeedSlavesFromMasterStream()
commit 72d43ef
Author: caosiyang <caosiyang@qiyi.com>
Date: Tue Aug 8 15:57:25 2017 +0800
fix a typo: servewr => server
commit 707c958
Author: Bo Cai <charpty@gmail.com>
Date: Wed Jul 26 21:49:42 2017 +0800
redis-cli.c typo: conut -> count.
Signed-off-by: Bo Cai <charpty@gmail.com>
commit b9385b2
Author: JackDrogon <jack.xsuperman@gmail.com>
Date: Fri Jun 30 14:22:31 2017 +0800
Fix some spell problems
commit 20d9230
Author: akosel <aaronjkosel@gmail.com>
Date: Sun Jun 4 19:35:13 2017 -0500
Fix typo
commit b167bfc
Author: Krzysiek Witkowicz <krzysiekwitkowicz@gmail.com>
Date: Mon May 22 21:32:27 2017 +0100
Fix #4008 small typo in comment
commit 2b78ac8
Author: Jake Clarkson <jacobwclarkson@gmail.com>
Date: Wed Apr 26 15:49:50 2017 +0100
Correct typo in tests/unit/hyperloglog.tcl
commit b0f1cdb
Author: Qi Luo <qiluo-msft@users.noreply.github.com>
Date: Wed Apr 19 14:25:18 2017 -0700
Fix typo
commit a90b0f9
Author: charsyam <charsyam@naver.com>
Date: Thu Mar 16 18:19:53 2017 +0900
fix typos
fix typos
fix typos
commit 8430a79
Author: Richard Hart <richardhart92@gmail.com>
Date: Mon Mar 13 22:17:41 2017 -0400
Fixed log message typo in listenToPort.
commit 481a1c2
Author: Vinod Kumar <kumar003vinod@gmail.com>
Date: Sun Jan 15 23:04:51 2017 +0530
src/db.c: Correct "save" -> "safe" typo
commit 586b4d3
Author: wangshaonan <wshn13@gmail.com>
Date: Wed Dec 21 20:28:27 2016 +0800
Fix typo they->the in helloworld.c
commit c1c4b5e
Author: Jenner <hypxm@qq.com>
Date: Mon Dec 19 16:39:46 2016 +0800
typo error
commit 1ee1a3f
Author: tielei <43289893@qq.com>
Date: Mon Jul 18 13:52:25 2016 +0800
fix some comments
commit 11a41fb
Author: Otto Kekäläinen <otto@seravo.fi>
Date: Sun Jul 3 10:23:55 2016 +0100
Fix spelling in documentation and comments
commit 5fb5d82
Author: francischan <f1ancis621@gmail.com>
Date: Tue Jun 28 00:19:33 2016 +0800
Fix outdated comments about redis.c file.
It should now refer to server.c file.
commit 6b254bc
Author: lmatt-bit <lmatt123n@gmail.com>
Date: Thu Apr 21 21:45:58 2016 +0800
Refine the comment of dictRehashMilliseconds func
SLAVECONF->REPLCONF in comment - by andyli029
commit ee9869f
Author: clark.kang <charsyam@naver.com>
Date: Tue Mar 22 11:09:51 2016 +0900
fix typos
commit f7b3b11
Author: Harisankar H <harisankarh@gmail.com>
Date: Wed Mar 9 11:49:42 2016 +0530
Typo correction: "faield" --> "failed"
Typo correction: "faield" --> "failed"
commit 3fd40fc
Author: Itamar Haber <itamar@redislabs.com>
Date: Thu Feb 25 10:31:51 2016 +0200
Fixes a typo in comments
commit 621c160
Author: Prayag Verma <prayag.verma@gmail.com>
Date: Mon Feb 1 12:36:20 2016 +0530
Fix typo in Readme.md
Spelling mistakes -
`eviciton` > `eviction`
`familar` > `familiar`
commit d7d07d6
Author: WonCheol Lee <toctoc21c@gmail.com>
Date: Wed Dec 30 15:11:34 2015 +0900
Typo fixed
commit a4dade7
Author: Felix Bünemann <buenemann@louis.info>
Date: Mon Dec 28 11:02:55 2015 +0100
[ci skip] Improve supervised upstart config docs
This mentions that "expect stop" is required for supervised upstart
to work correctly. See http://upstart.ubuntu.com/cookbook/#expect-stop
for an explanation.
commit d9caba9
Author: daurnimator <quae@daurnimator.com>
Date: Mon Dec 21 18:30:03 2015 +1100
README: Remove trailing whitespace
commit 72d42e5
Author: daurnimator <quae@daurnimator.com>
Date: Mon Dec 21 18:29:32 2015 +1100
README: Fix typo. th => the
commit dd6e957
Author: daurnimator <quae@daurnimator.com>
Date: Mon Dec 21 18:29:20 2015 +1100
README: Fix typo. familar => familiar
commit 3a12b23
Author: daurnimator <quae@daurnimator.com>
Date: Mon Dec 21 18:28:54 2015 +1100
README: Fix typo. eviciton => eviction
commit 2d1d03b
Author: daurnimator <quae@daurnimator.com>
Date: Mon Dec 21 18:21:45 2015 +1100
README: Fix typo. sever => server
commit 3973b06
Author: Itamar Haber <itamar@garantiadata.com>
Date: Sat Dec 19 17:01:20 2015 +0200
Typo fix
commit 4f2e460
Author: Steve Gao <fu@2token.com>
Date: Fri Dec 4 10:22:05 2015 +0800
Update README - fix typos
commit b21667c
Author: binyan <binbin.yan@nokia.com>
Date: Wed Dec 2 22:48:37 2015 +0800
delete redundancy color judge in sdscatcolor
commit 88894c7
Author: binyan <binbin.yan@nokia.com>
Date: Wed Dec 2 22:14:42 2015 +0800
the example output shoule be HelloWorld
commit 2763470
Author: binyan <binbin.yan@nokia.com>
Date: Wed Dec 2 17:41:39 2015 +0800
modify error word keyevente
Signed-off-by: binyan <binbin.yan@nokia.com>
commit 0847b3d
Author: Bruno Martins <bscmartins@gmail.com>
Date: Wed Nov 4 11:37:01 2015 +0000
typo
commit bbb9e9e
Author: dawedawe <dawedawe@gmx.de>
Date: Fri Mar 27 00:46:41 2015 +0100
typo: zimap -> zipmap
commit 5ed297e
Author: Axel Advento <badwolf.bloodseeker.rev@gmail.com>
Date: Tue Mar 3 15:58:29 2015 +0800
Fix 'salve' typos to 'slave'
commit edec9d6
Author: LudwikJaniuk <ludvig.janiuk@gmail.com>
Date: Wed Jun 12 14:12:47 2019 +0200
Update README.md
Co-Authored-By: Qix <Qix-@users.noreply.github.com>
commit 692a7af
Author: LudwikJaniuk <ludvig.janiuk@gmail.com>
Date: Tue May 28 14:32:04 2019 +0200
grammar
commit d962b0a
Author: Nick Frost <nickfrostatx@gmail.com>
Date: Wed Jul 20 15:17:12 2016 -0700
Minor grammar fix
commit 24fff01aaccaf5956973ada8c50ceb1462e211c6 (typos)
Author: Chad Miller <chadm@squareup.com>
Date: Tue Sep 8 13:46:11 2020 -0400
Fix faulty comment about operation of unlink()
commit 3cd5c1f3326c52aa552ada7ec797c6bb16452355
Author: Kevin <kevin.xgr@gmail.com>
Date: Wed Nov 20 00:13:50 2019 +0800
Fix typo in server.c.
From a83af59 Mon Sep 17 00:00:00 2001
From: wuwo <wuwo@wacai.com>
Date: Fri, 17 Mar 2017 20:37:45 +0800
Subject: [PATCH] falure to failure
From c961896 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=B7=A6=E6=87=B6?= <veficos@gmail.com>
Date: Sat, 27 May 2017 15:33:04 +0800
Subject: [PATCH] fix typo
From e600ef2 Mon Sep 17 00:00:00 2001
From: "rui.zou" <rui.zou@yunify.com>
Date: Sat, 30 Sep 2017 12:38:15 +0800
Subject: [PATCH] fix a typo
From c7d07fa Mon Sep 17 00:00:00 2001
From: Alexandre Perrin <alex@kaworu.ch>
Date: Thu, 16 Aug 2018 10:35:31 +0200
Subject: [PATCH] deps README.md typo
From b25cb67 Mon Sep 17 00:00:00 2001
From: Guy Korland <gkorland@gmail.com>
Date: Wed, 26 Sep 2018 10:55:37 +0300
Subject: [PATCH 1/2] fix typos in header
From ad28ca6 Mon Sep 17 00:00:00 2001
From: Guy Korland <gkorland@gmail.com>
Date: Wed, 26 Sep 2018 11:02:36 +0300
Subject: [PATCH 2/2] fix typos
commit 34924cdedd8552466fc22c1168d49236cb7ee915
Author: Adrian Lynch <adi_ady_ade@hotmail.com>
Date: Sat Apr 4 21:59:15 2015 +0100
Typos fixed
commit fd2a1e7
Author: Jan <jsteemann@users.noreply.github.com>
Date: Sat Oct 27 19:13:01 2018 +0200
Fix typos
Fix typos
commit e14e47c1a234b53b0e103c5f6a1c61481cbcbb02
Author: Andy Lester <andy@petdance.com>
Date: Fri Aug 2 22:30:07 2019 -0500
Fix multiple misspellings of "following"
commit 79b948ce2dac6b453fe80995abbcaac04c213d5a
Author: Andy Lester <andy@petdance.com>
Date: Fri Aug 2 22:24:28 2019 -0500
Fix misspelling of create-cluster
commit 1fffde52666dc99ab35efbd31071a4c008cb5a71
Author: Andy Lester <andy@petdance.com>
Date: Wed Jul 31 17:57:56 2019 -0500
Fix typos
commit 204c9ba9651e9e05fd73936b452b9a30be456cfe
Author: Xiaobo Zhu <xiaobo.zhu@shopee.com>
Date: Tue Aug 13 22:19:25 2019 +0800
fix typos
Squashed commit of the following:
commit 1d9aaf8
Author: danmedani <danmedani@gmail.com>
Date: Sun Aug 2 11:40:26 2015 -0700
README typo fix.
Squashed commit of the following:
commit 32bfa7c
Author: Erik Dubbelboer <erik@dubbelboer.com>
Date: Mon Jul 6 21:15:08 2015 +0200
Fixed grammer
Squashed commit of the following:
commit b24f69c
Author: Sisir Koppaka <sisir.koppaka@gmail.com>
Date: Mon Mar 2 22:38:45 2015 -0500
utils/hashtable/rehashing.c: Fix typos
Squashed commit of the following:
commit 4e04082
Author: Erik Dubbelboer <erik@dubbelboer.com>
Date: Mon Mar 23 08:22:21 2015 +0000
Small config file documentation improvements
Squashed commit of the following:
commit acb8773
Author: ctd1500 <ctd1500@gmail.com>
Date: Fri May 8 01:52:48 2015 -0700
Typo and grammar fixes in readme
commit 2eb75b6
Author: ctd1500 <ctd1500@gmail.com>
Date: Fri May 8 01:36:18 2015 -0700
fixed redis.conf comment
Squashed commit of the following:
commit a8249a2
Author: Masahiko Sawada <sawada.mshk@gmail.com>
Date: Fri Dec 11 11:39:52 2015 +0530
Revise correction of typos.
Squashed commit of the following:
commit 3c02028
Author: zhaojun11 <zhaojun11@jd.com>
Date: Wed Jan 17 19:05:28 2018 +0800
Fix typos include two code typos in cluster.c and latency.c
Squashed commit of the following:
commit 9dba47c
Author: q191201771 <191201771@qq.com>
Date: Sat Jan 4 11:31:04 2020 +0800
fix function listCreate comment in adlist.c
Update src/server.c
commit 2c7c2cb536e78dd211b1ac6f7bda00f0f54faaeb
Author: charpty <charpty@gmail.com>
Date: Tue May 1 23:16:59 2018 +0800
server.c typo: modules system dictionary type comment
Signed-off-by: charpty <charpty@gmail.com>
commit a8395323fb63cb59cb3591cb0f0c8edb7c29a680
Author: Itamar Haber <itamar@redislabs.com>
Date: Sun May 6 00:25:18 2018 +0300
Updates test_helper.tcl's help with undocumented options
Specifically:
* Host
* Port
* Client
commit bde6f9ced15755cd6407b4af7d601b030f36d60b
Author: wxisme <850885154@qq.com>
Date: Wed Aug 8 15:19:19 2018 +0800
fix comments in deps files
commit 3172474ba991532ab799ee1873439f3402412331
Author: wxisme <850885154@qq.com>
Date: Wed Aug 8 14:33:49 2018 +0800
fix some comments
commit 01b6f2b6858b5cf2ce4ad5092d2c746e755f53f0
Author: Thor Juhasz <thor@juhasz.pro>
Date: Sun Nov 18 14:37:41 2018 +0100
Minor fixes to comments
Found some parts a little unclear on a first read, which prompted me to have a better look at the file and fix some minor things I noticed.
Fixing minor typos and grammar. There are no changes to configuration options.
These changes are only meant to help the user better understand the explanations to the various configuration options
2020-09-10 13:43:38 +03:00
|
|
|
* The returned element can be an int64_t value if the set is encoded
|
2022-11-09 18:50:07 +01:00
|
|
|
* as an "intset" blob of integers, or an string.
|
2010-12-09 10:21:02 +01:00
|
|
|
*
|
2022-11-09 18:50:07 +01:00
|
|
|
* The caller provides three pointers to be populated with the right
|
2010-12-09 10:21:02 +01:00
|
|
|
* object. The return value of the function is the object->encoding
|
2022-11-09 18:50:07 +01:00
|
|
|
* field of the object and can be used by the caller to check if the
|
|
|
|
* int64_t pointer or the str and len pointers were populated, as for
|
|
|
|
* setTypeNext. If OBJ_ENCODING_HT is returned, str is pointed to a
|
|
|
|
* string which is actually an sds string and it can be used as such.
|
2010-12-09 10:21:02 +01:00
|
|
|
*
|
2022-11-09 18:50:07 +01:00
|
|
|
* Note that both the str, len and llele pointers should be passed and cannot
|
|
|
|
* be NULL. If str is set to NULL, the value is an integer stored in llele. */
|
|
|
|
int setTypeRandomElement(robj *setobj, char **str, size_t *len, int64_t *llele) {
|
2015-07-26 15:28:00 +02:00
|
|
|
if (setobj->encoding == OBJ_ENCODING_HT) {
|
2019-02-18 18:27:18 +01:00
|
|
|
dictEntry *de = dictGetFairRandomKey(setobj->ptr);
|
2022-11-09 18:50:07 +01:00
|
|
|
*str = dictGetKey(de);
|
|
|
|
*len = sdslen(*str);
|
2015-03-30 12:24:57 +02:00
|
|
|
*llele = -123456789; /* Not needed. Defensive. */
|
2015-07-26 15:28:00 +02:00
|
|
|
} else if (setobj->encoding == OBJ_ENCODING_INTSET) {
|
2010-12-09 10:21:02 +01:00
|
|
|
*llele = intsetRandom(setobj->ptr);
|
2022-11-09 18:50:07 +01:00
|
|
|
*str = NULL; /* Not needed. Defensive. */
|
|
|
|
} else if (setobj->encoding == OBJ_ENCODING_LISTPACK) {
|
|
|
|
unsigned char *lp = setobj->ptr;
|
|
|
|
int r = rand() % lpLength(lp);
|
|
|
|
unsigned char *p = lpSeek(lp, r);
|
|
|
|
unsigned int l;
|
|
|
|
*str = (char *)lpGetValue(p, &l, (long long *)llele);
|
|
|
|
*len = (size_t)l;
|
2010-07-02 19:57:12 +02:00
|
|
|
} else {
|
2015-07-27 09:41:48 +02:00
|
|
|
serverPanic("Unknown set encoding");
|
2010-07-02 19:57:12 +02:00
|
|
|
}
|
2010-12-09 10:21:02 +01:00
|
|
|
return setobj->encoding;
|
2010-07-02 19:57:12 +02:00
|
|
|
}
|
|
|
|
|
2022-11-09 18:50:07 +01:00
|
|
|
/* Pops a random element and returns it as an object. */
|
|
|
|
robj *setTypePopRandom(robj *set) {
|
|
|
|
robj *obj;
|
|
|
|
if (set->encoding == OBJ_ENCODING_LISTPACK) {
|
|
|
|
/* Find random and delete it without re-seeking the listpack. */
|
|
|
|
unsigned int i = 0;
|
|
|
|
unsigned char *p = lpNextRandom(set->ptr, lpFirst(set->ptr), &i, 1, 0);
|
|
|
|
unsigned int len = 0; /* initialize to silence warning */
|
|
|
|
long long llele = 0; /* initialize to silence warning */
|
|
|
|
char *str = (char *)lpGetValue(p, &len, &llele);
|
|
|
|
if (str)
|
|
|
|
obj = createStringObject(str, len);
|
|
|
|
else
|
|
|
|
obj = createStringObjectFromLongLong(llele);
|
|
|
|
set->ptr = lpDelete(set->ptr, p, NULL);
|
|
|
|
} else {
|
|
|
|
char *str;
|
|
|
|
size_t len = 0;
|
|
|
|
int64_t llele = 0;
|
|
|
|
int encoding = setTypeRandomElement(set, &str, &len, &llele);
|
|
|
|
if (str)
|
|
|
|
obj = createStringObject(str, len);
|
|
|
|
else
|
|
|
|
obj = createStringObjectFromLongLong(llele);
|
|
|
|
setTypeRemoveAux(set, str, len, llele, encoding == OBJ_ENCODING_HT);
|
|
|
|
}
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
2016-06-20 23:08:06 +03:00
|
|
|
unsigned long setTypeSize(const robj *subject) {
|
2015-07-26 15:28:00 +02:00
|
|
|
if (subject->encoding == OBJ_ENCODING_HT) {
|
2016-06-20 23:08:06 +03:00
|
|
|
return dictSize((const dict*)subject->ptr);
|
2015-07-26 15:28:00 +02:00
|
|
|
} else if (subject->encoding == OBJ_ENCODING_INTSET) {
|
2016-06-20 23:08:06 +03:00
|
|
|
return intsetLen((const intset*)subject->ptr);
|
2022-11-09 18:50:07 +01:00
|
|
|
} else if (subject->encoding == OBJ_ENCODING_LISTPACK) {
|
|
|
|
return lpLength((unsigned char *)subject->ptr);
|
2010-07-02 19:57:12 +02:00
|
|
|
} else {
|
2015-07-27 09:41:48 +02:00
|
|
|
serverPanic("Unknown set encoding");
|
2010-07-02 19:57:12 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Convert the set to specified encoding. The resulting dict (when converting
|
2012-04-07 14:40:29 +02:00
|
|
|
* to a hash table) is presized to hold the number of elements in the original
|
2010-07-02 19:57:12 +02:00
|
|
|
* set. */
|
2010-12-09 21:11:56 +01:00
|
|
|
void setTypeConvert(robj *setobj, int enc) {
|
2022-12-06 10:25:51 +01:00
|
|
|
setTypeConvertAndExpand(setobj, enc, setTypeSize(setobj), 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Converts a set to the specified encoding, pre-sizing it for 'cap' elements.
|
|
|
|
* The 'panic' argument controls whether to panic on OOM (panic=1) or return
|
|
|
|
* C_ERR on OOM (panic=0). If panic=1 is given, this function always returns
|
|
|
|
* C_OK. */
|
|
|
|
int setTypeConvertAndExpand(robj *setobj, int enc, unsigned long cap, int panic) {
|
2010-08-21 11:25:13 +02:00
|
|
|
setTypeIterator *si;
|
2015-07-26 15:29:53 +02:00
|
|
|
serverAssertWithInfo(NULL,setobj,setobj->type == OBJ_SET &&
|
2022-11-09 18:50:07 +01:00
|
|
|
setobj->encoding != enc);
|
2010-07-02 19:57:12 +02:00
|
|
|
|
2015-07-26 15:28:00 +02:00
|
|
|
if (enc == OBJ_ENCODING_HT) {
|
2021-08-05 08:25:58 +03:00
|
|
|
dict *d = dictCreate(&setDictType);
|
2015-07-31 18:01:23 +02:00
|
|
|
sds element;
|
2010-12-09 21:11:56 +01:00
|
|
|
|
2010-07-02 19:57:12 +02:00
|
|
|
/* Presize the dict to avoid rehashing */
|
2022-12-06 10:25:51 +01:00
|
|
|
if (panic) {
|
|
|
|
dictExpand(d, cap);
|
|
|
|
} else if (dictTryExpand(d, cap) != DICT_OK) {
|
|
|
|
dictRelease(d);
|
|
|
|
return C_ERR;
|
|
|
|
}
|
2010-07-02 19:57:12 +02:00
|
|
|
|
2010-12-09 21:11:56 +01:00
|
|
|
/* To add the elements we extract integers and create redis objects */
|
|
|
|
si = setTypeInitIterator(setobj);
|
2022-11-09 18:50:07 +01:00
|
|
|
while ((element = setTypeNextObject(si)) != NULL) {
|
2015-07-31 18:01:23 +02:00
|
|
|
serverAssert(dictAdd(d,element,NULL) == DICT_OK);
|
2010-12-09 21:11:56 +01:00
|
|
|
}
|
2010-07-02 19:57:12 +02:00
|
|
|
setTypeReleaseIterator(si);
|
|
|
|
|
2022-11-09 18:50:07 +01:00
|
|
|
freeSetObject(setobj); /* frees the internals but not setobj itself */
|
2015-07-26 15:28:00 +02:00
|
|
|
setobj->encoding = OBJ_ENCODING_HT;
|
2010-12-09 21:11:56 +01:00
|
|
|
setobj->ptr = d;
|
2022-11-09 18:50:07 +01:00
|
|
|
} else if (enc == OBJ_ENCODING_LISTPACK) {
|
2022-12-06 10:25:51 +01:00
|
|
|
/* Preallocate the minimum two bytes per element (enc/value + backlen) */
|
|
|
|
size_t estcap = cap * 2;
|
|
|
|
if (setobj->encoding == OBJ_ENCODING_INTSET && setTypeSize(setobj) > 0) {
|
|
|
|
/* If we're converting from intset, we have a better estimate. */
|
|
|
|
size_t s1 = lpEstimateBytesRepeatedInteger(intsetMin(setobj->ptr), cap);
|
|
|
|
size_t s2 = lpEstimateBytesRepeatedInteger(intsetMax(setobj->ptr), cap);
|
|
|
|
estcap = max(s1, s2);
|
|
|
|
}
|
2022-11-09 18:50:07 +01:00
|
|
|
unsigned char *lp = lpNew(estcap);
|
|
|
|
char *str;
|
|
|
|
size_t len;
|
|
|
|
int64_t llele;
|
|
|
|
si = setTypeInitIterator(setobj);
|
|
|
|
while (setTypeNext(si, &str, &len, &llele) != -1) {
|
|
|
|
if (str != NULL)
|
|
|
|
lp = lpAppend(lp, (unsigned char *)str, len);
|
|
|
|
else
|
|
|
|
lp = lpAppendInteger(lp, llele);
|
|
|
|
}
|
|
|
|
setTypeReleaseIterator(si);
|
|
|
|
|
|
|
|
freeSetObject(setobj); /* frees the internals but not setobj itself */
|
|
|
|
setobj->encoding = OBJ_ENCODING_LISTPACK;
|
|
|
|
setobj->ptr = lp;
|
2010-07-02 19:57:12 +02:00
|
|
|
} else {
|
2015-07-27 09:41:48 +02:00
|
|
|
serverPanic("Unsupported set conversion");
|
2010-07-02 19:57:12 +02:00
|
|
|
}
|
2022-12-06 10:25:51 +01:00
|
|
|
return C_OK;
|
2010-07-02 19:57:12 +02:00
|
|
|
}
|
|
|
|
|
2020-11-17 19:03:05 +09:00
|
|
|
/* This is a helper function for the COPY command.
|
|
|
|
* Duplicate a set object, with the guarantee that the returned object
|
|
|
|
* has the same encoding as the original one.
|
|
|
|
*
|
|
|
|
* The resulting object always has refcount set to 1 */
|
|
|
|
robj *setTypeDup(robj *o) {
|
|
|
|
robj *set;
|
|
|
|
setTypeIterator *si;
|
|
|
|
|
|
|
|
serverAssert(o->type == OBJ_SET);
|
|
|
|
|
|
|
|
/* Create a new set object that have the same encoding as the original object's encoding */
|
2020-11-25 03:40:58 +08:00
|
|
|
if (o->encoding == OBJ_ENCODING_INTSET) {
|
2020-11-17 19:03:05 +09:00
|
|
|
intset *is = o->ptr;
|
|
|
|
size_t size = intsetBlobLen(is);
|
|
|
|
intset *newis = zmalloc(size);
|
|
|
|
memcpy(newis,is,size);
|
2020-11-25 03:40:58 +08:00
|
|
|
set = createObject(OBJ_SET, newis);
|
|
|
|
set->encoding = OBJ_ENCODING_INTSET;
|
2022-11-09 18:50:07 +01:00
|
|
|
} else if (o->encoding == OBJ_ENCODING_LISTPACK) {
|
|
|
|
unsigned char *lp = o->ptr;
|
|
|
|
size_t sz = lpBytes(lp);
|
|
|
|
unsigned char *new_lp = zmalloc(sz);
|
|
|
|
memcpy(new_lp, lp, sz);
|
|
|
|
set = createObject(OBJ_SET, new_lp);
|
|
|
|
set->encoding = OBJ_ENCODING_LISTPACK;
|
2020-11-25 03:40:58 +08:00
|
|
|
} else if (o->encoding == OBJ_ENCODING_HT) {
|
|
|
|
set = createSetObject();
|
|
|
|
dict *d = o->ptr;
|
|
|
|
dictExpand(set->ptr, dictSize(d));
|
2020-11-17 19:03:05 +09:00
|
|
|
si = setTypeInitIterator(o);
|
2022-11-09 18:50:07 +01:00
|
|
|
char *str;
|
|
|
|
size_t len;
|
|
|
|
int64_t intobj;
|
|
|
|
while (setTypeNext(si, &str, &len, &intobj) != -1) {
|
|
|
|
setTypeAdd(set, (sds)str);
|
2020-11-17 19:03:05 +09:00
|
|
|
}
|
|
|
|
setTypeReleaseIterator(si);
|
|
|
|
} else {
|
|
|
|
serverPanic("Unknown set encoding");
|
|
|
|
}
|
|
|
|
return set;
|
|
|
|
}
|
|
|
|
|
2015-07-26 15:20:46 +02:00
|
|
|
void saddCommand(client *c) {
|
2010-06-22 00:07:48 +02:00
|
|
|
robj *set;
|
2011-04-15 18:08:32 +02:00
|
|
|
int j, added = 0;
|
2010-06-22 00:07:48 +02:00
|
|
|
|
|
|
|
set = lookupKeyWrite(c->db,c->argv[1]);
|
2020-08-11 20:04:54 -07:00
|
|
|
if (checkType(c,set,OBJ_SET)) return;
|
|
|
|
|
2010-06-22 00:07:48 +02:00
|
|
|
if (set == NULL) {
|
2023-05-08 16:11:20 -07:00
|
|
|
set = setTypeCreate(c->argv[2]->ptr, c->argc - 2);
|
2010-06-22 00:07:48 +02:00
|
|
|
dbAdd(c->db,c->argv[1],set);
|
2023-05-08 16:11:20 -07:00
|
|
|
} else {
|
|
|
|
setTypeMaybeConvert(set, c->argc - 2);
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
2011-04-15 18:08:32 +02:00
|
|
|
|
|
|
|
for (j = 2; j < c->argc; j++) {
|
2015-07-31 18:01:23 +02:00
|
|
|
if (setTypeAdd(set,c->argv[j]->ptr)) added++;
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
2013-01-24 16:20:53 +01:00
|
|
|
if (added) {
|
2020-04-21 10:51:46 +02:00
|
|
|
signalModifiedKey(c,c->db,c->argv[1]);
|
2015-07-27 09:41:48 +02:00
|
|
|
notifyKeyspaceEvent(NOTIFY_SET,"sadd",c->argv[1],c->db->id);
|
2013-01-24 16:20:53 +01:00
|
|
|
}
|
2011-04-15 18:08:32 +02:00
|
|
|
server.dirty += added;
|
|
|
|
addReplyLongLong(c,added);
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
|
|
|
|
2015-07-26 15:20:46 +02:00
|
|
|
void sremCommand(client *c) {
|
2010-06-22 00:07:48 +02:00
|
|
|
robj *set;
|
2013-01-24 16:20:53 +01:00
|
|
|
int j, deleted = 0, keyremoved = 0;
|
2010-06-22 00:07:48 +02:00
|
|
|
|
|
|
|
if ((set = lookupKeyWriteOrReply(c,c->argv[1],shared.czero)) == NULL ||
|
2015-07-26 15:28:00 +02:00
|
|
|
checkType(c,set,OBJ_SET)) return;
|
2010-06-22 00:07:48 +02:00
|
|
|
|
2011-04-19 17:37:03 +02:00
|
|
|
for (j = 2; j < c->argc; j++) {
|
2015-07-31 18:01:23 +02:00
|
|
|
if (setTypeRemove(set,c->argv[j]->ptr)) {
|
2011-04-19 17:37:03 +02:00
|
|
|
deleted++;
|
2011-05-31 20:14:29 +02:00
|
|
|
if (setTypeSize(set) == 0) {
|
|
|
|
dbDelete(c->db,c->argv[1]);
|
2013-01-24 16:20:53 +01:00
|
|
|
keyremoved = 1;
|
2011-05-31 20:14:29 +02:00
|
|
|
break;
|
|
|
|
}
|
2011-04-19 17:37:03 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (deleted) {
|
2020-04-21 10:51:46 +02:00
|
|
|
signalModifiedKey(c,c->db,c->argv[1]);
|
2015-07-27 09:41:48 +02:00
|
|
|
notifyKeyspaceEvent(NOTIFY_SET,"srem",c->argv[1],c->db->id);
|
2013-01-25 13:19:08 +01:00
|
|
|
if (keyremoved)
|
2015-07-27 09:41:48 +02:00
|
|
|
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",c->argv[1],
|
2013-01-25 13:19:08 +01:00
|
|
|
c->db->id);
|
2011-04-19 17:37:03 +02:00
|
|
|
server.dirty += deleted;
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
2011-04-19 17:37:03 +02:00
|
|
|
addReplyLongLong(c,deleted);
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
|
|
|
|
2015-07-26 15:20:46 +02:00
|
|
|
void smoveCommand(client *c) {
|
2010-07-02 19:57:12 +02:00
|
|
|
robj *srcset, *dstset, *ele;
|
2010-06-22 00:07:48 +02:00
|
|
|
srcset = lookupKeyWrite(c->db,c->argv[1]);
|
|
|
|
dstset = lookupKeyWrite(c->db,c->argv[2]);
|
2015-07-31 18:01:23 +02:00
|
|
|
ele = c->argv[3];
|
2010-06-22 00:07:48 +02:00
|
|
|
|
2010-07-02 19:57:12 +02:00
|
|
|
/* If the source key does not exist return 0 */
|
|
|
|
if (srcset == NULL) {
|
|
|
|
addReply(c,shared.czero);
|
2010-06-22 00:07:48 +02:00
|
|
|
return;
|
|
|
|
}
|
2010-07-02 19:57:12 +02:00
|
|
|
|
|
|
|
/* If the source key has the wrong type, or the destination key
|
|
|
|
* is set and has the wrong type, return with an error. */
|
2015-07-26 15:28:00 +02:00
|
|
|
if (checkType(c,srcset,OBJ_SET) ||
|
2020-08-11 20:04:54 -07:00
|
|
|
checkType(c,dstset,OBJ_SET)) return;
|
2010-07-02 19:57:12 +02:00
|
|
|
|
|
|
|
/* If srcset and dstset are equal, SMOVE is a no-op */
|
|
|
|
if (srcset == dstset) {
|
2015-07-31 18:01:23 +02:00
|
|
|
addReply(c,setTypeIsMember(srcset,ele->ptr) ?
|
|
|
|
shared.cone : shared.czero);
|
2010-06-22 00:07:48 +02:00
|
|
|
return;
|
|
|
|
}
|
2010-07-02 19:57:12 +02:00
|
|
|
|
|
|
|
/* If the element cannot be removed from the src set, return 0. */
|
2015-07-31 18:01:23 +02:00
|
|
|
if (!setTypeRemove(srcset,ele->ptr)) {
|
2010-06-22 00:07:48 +02:00
|
|
|
addReply(c,shared.czero);
|
|
|
|
return;
|
|
|
|
}
|
2015-07-27 09:41:48 +02:00
|
|
|
notifyKeyspaceEvent(NOTIFY_SET,"srem",c->argv[1],c->db->id);
|
2010-07-02 19:57:12 +02:00
|
|
|
|
|
|
|
/* Remove the src set from the database when empty */
|
2013-01-24 16:20:53 +01:00
|
|
|
if (setTypeSize(srcset) == 0) {
|
|
|
|
dbDelete(c->db,c->argv[1]);
|
2015-07-27 09:41:48 +02:00
|
|
|
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",c->argv[1],c->db->id);
|
2013-01-24 16:20:53 +01:00
|
|
|
}
|
2010-07-02 19:57:12 +02:00
|
|
|
|
|
|
|
/* Create the destination set when it doesn't exist */
|
2010-06-22 00:07:48 +02:00
|
|
|
if (!dstset) {
|
2023-05-08 16:11:20 -07:00
|
|
|
dstset = setTypeCreate(ele->ptr, 1);
|
2010-06-22 00:07:48 +02:00
|
|
|
dbAdd(c->db,c->argv[2],dstset);
|
|
|
|
}
|
2010-07-02 19:57:12 +02:00
|
|
|
|
2020-04-21 10:51:46 +02:00
|
|
|
signalModifiedKey(c,c->db,c->argv[1]);
|
2016-05-09 09:12:38 +03:00
|
|
|
server.dirty++;
|
|
|
|
|
2010-07-02 19:57:12 +02:00
|
|
|
/* An extra key has changed when ele was successfully added to dstset */
|
2015-07-31 18:01:23 +02:00
|
|
|
if (setTypeAdd(dstset,ele->ptr)) {
|
2013-01-24 16:20:53 +01:00
|
|
|
server.dirty++;
|
2021-07-17 14:54:06 +08:00
|
|
|
signalModifiedKey(c,c->db,c->argv[2]);
|
2015-07-27 09:41:48 +02:00
|
|
|
notifyKeyspaceEvent(NOTIFY_SET,"sadd",c->argv[2],c->db->id);
|
2013-01-24 16:20:53 +01:00
|
|
|
}
|
2010-06-22 00:07:48 +02:00
|
|
|
addReply(c,shared.cone);
|
|
|
|
}
|
|
|
|
|
2015-07-26 15:20:46 +02:00
|
|
|
void sismemberCommand(client *c) {
|
2010-06-22 00:07:48 +02:00
|
|
|
robj *set;
|
|
|
|
|
|
|
|
if ((set = lookupKeyReadOrReply(c,c->argv[1],shared.czero)) == NULL ||
|
2015-07-26 15:28:00 +02:00
|
|
|
checkType(c,set,OBJ_SET)) return;
|
2010-06-22 00:07:48 +02:00
|
|
|
|
2015-07-31 18:01:23 +02:00
|
|
|
if (setTypeIsMember(set,c->argv[2]->ptr))
|
2010-06-22 00:07:48 +02:00
|
|
|
addReply(c,shared.cone);
|
|
|
|
else
|
|
|
|
addReply(c,shared.czero);
|
|
|
|
}
|
|
|
|
|
2020-08-11 04:55:06 -04:00
|
|
|
void smismemberCommand(client *c) {
|
|
|
|
robj *set;
|
|
|
|
int j;
|
|
|
|
|
|
|
|
/* Don't abort when the key cannot be found. Non-existing keys are empty
|
|
|
|
* sets, where SMISMEMBER should respond with a series of zeros. */
|
|
|
|
set = lookupKeyRead(c->db,c->argv[1]);
|
|
|
|
if (set && checkType(c,set,OBJ_SET)) return;
|
|
|
|
|
|
|
|
addReplyArrayLen(c,c->argc - 2);
|
|
|
|
|
|
|
|
for (j = 2; j < c->argc; j++) {
|
|
|
|
if (set && setTypeIsMember(set,c->argv[j]->ptr))
|
|
|
|
addReply(c,shared.cone);
|
|
|
|
else
|
|
|
|
addReply(c,shared.czero);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-26 15:20:46 +02:00
|
|
|
void scardCommand(client *c) {
|
2010-06-22 00:07:48 +02:00
|
|
|
robj *o;
|
|
|
|
|
|
|
|
if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.czero)) == NULL ||
|
2015-07-26 15:28:00 +02:00
|
|
|
checkType(c,o,OBJ_SET)) return;
|
2010-06-22 00:07:48 +02:00
|
|
|
|
2010-09-02 14:30:56 +02:00
|
|
|
addReplyLongLong(c,setTypeSize(o));
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
|
|
|
|
2015-02-10 22:59:12 +01:00
|
|
|
/* Handle the "SPOP key <count>" variant. The normal version of the
|
2014-03-11 15:38:55 +01:00
|
|
|
* command is handled by the spopCommand() function itself. */
|
|
|
|
|
2015-02-10 22:59:12 +01:00
|
|
|
/* How many times bigger should be the set compared to the remaining size
|
|
|
|
* for us to use the "create new set" strategy? Read later in the
|
|
|
|
* implementation for more info. */
|
|
|
|
#define SPOP_MOVE_STRATEGY_MUL 5
|
|
|
|
|
2015-07-26 15:20:46 +02:00
|
|
|
void spopWithCountCommand(client *c) {
|
2014-03-11 15:38:55 +01:00
|
|
|
long l;
|
|
|
|
unsigned long count, size;
|
2015-02-10 22:59:12 +01:00
|
|
|
robj *set;
|
2014-03-11 15:38:55 +01:00
|
|
|
|
|
|
|
/* Get the count argument */
|
2020-12-08 04:15:39 -05:00
|
|
|
if (getPositiveLongFromObjectOrReply(c,c->argv[2],&l,NULL) != C_OK) return;
|
|
|
|
count = (unsigned long) l;
|
2014-03-11 15:38:55 +01:00
|
|
|
|
2014-12-21 15:08:44 +02:00
|
|
|
/* Make sure a key with the name inputted exists, and that it's type is
|
|
|
|
* indeed a set. Otherwise, return nil */
|
2019-12-21 21:27:38 +08:00
|
|
|
if ((set = lookupKeyWriteOrReply(c,c->argv[1],shared.emptyset[c->resp]))
|
2015-07-26 15:28:00 +02:00
|
|
|
== NULL || checkType(c,set,OBJ_SET)) return;
|
2014-03-11 15:38:55 +01:00
|
|
|
|
2019-09-02 12:50:47 +02:00
|
|
|
/* If count is zero, serve an empty set ASAP to avoid special
|
2014-12-21 15:08:44 +02:00
|
|
|
* cases later. */
|
2014-03-11 15:38:55 +01:00
|
|
|
if (count == 0) {
|
2019-09-02 12:50:47 +02:00
|
|
|
addReply(c,shared.emptyset[c->resp]);
|
2014-03-11 15:38:55 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
size = setTypeSize(set);
|
|
|
|
|
|
|
|
/* Generate an SPOP keyspace notification */
|
2015-07-27 09:41:48 +02:00
|
|
|
notifyKeyspaceEvent(NOTIFY_SET,"spop",c->argv[1],c->db->id);
|
2020-12-15 15:30:24 +08:00
|
|
|
server.dirty += (count >= size) ? size : count;
|
2014-03-11 15:38:55 +01:00
|
|
|
|
|
|
|
/* CASE 1:
|
2014-12-21 15:08:44 +02:00
|
|
|
* The number of requested elements is greater than or equal to
|
2014-03-11 15:38:55 +01:00
|
|
|
* the number of elements inside the set: simply return the whole set. */
|
|
|
|
if (count >= size) {
|
2014-12-21 15:08:44 +02:00
|
|
|
/* We just return the entire set */
|
2015-07-27 09:41:48 +02:00
|
|
|
sunionDiffGenericCommand(c,c->argv+1,1,NULL,SET_OP_UNION);
|
2014-03-11 15:38:55 +01:00
|
|
|
|
|
|
|
/* Delete the set as it is now empty */
|
|
|
|
dbDelete(c->db,c->argv[1]);
|
2015-07-27 09:41:48 +02:00
|
|
|
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",c->argv[1],c->db->id);
|
2014-03-11 15:38:55 +01:00
|
|
|
|
2022-08-24 12:51:36 +03:00
|
|
|
/* todo: Move the spop notification to be executed after the command logic. */
|
|
|
|
|
2023-06-16 23:14:11 +08:00
|
|
|
/* Propagate this command as a DEL or UNLINK operation */
|
|
|
|
robj *aux = server.lazyfree_lazy_server_del ? shared.unlink : shared.del;
|
|
|
|
rewriteClientCommandVector(c, 2, aux, c->argv[1]);
|
2020-04-21 10:51:46 +02:00
|
|
|
signalModifiedKey(c,c->db,c->argv[1]);
|
2014-03-11 15:38:55 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-07-31 18:01:23 +02:00
|
|
|
/* Case 2 and 3 require to replicate SPOP as a set of SREM commands.
|
2015-02-10 22:59:12 +01:00
|
|
|
* Prepare our replication argument vector. Also send the array length
|
|
|
|
* which is common to both the code paths. */
|
2023-05-22 15:27:14 +08:00
|
|
|
unsigned long batchsize = count > 1024 ? 1024 : count;
|
|
|
|
robj **propargv = zmalloc(sizeof(robj *) * (2 + batchsize));
|
2021-02-09 11:52:28 -08:00
|
|
|
propargv[0] = shared.srem;
|
2015-02-10 22:59:12 +01:00
|
|
|
propargv[1] = c->argv[1];
|
2023-05-22 15:27:14 +08:00
|
|
|
unsigned long propindex = 2;
|
2018-11-26 16:17:19 +01:00
|
|
|
addReplySetLen(c,count);
|
2014-03-11 15:38:55 +01:00
|
|
|
|
2015-02-10 22:59:12 +01:00
|
|
|
/* Common iteration vars. */
|
2022-11-09 18:50:07 +01:00
|
|
|
char *str;
|
|
|
|
size_t len;
|
2015-02-10 22:59:12 +01:00
|
|
|
int64_t llele;
|
|
|
|
unsigned long remaining = size-count; /* Elements left after SPOP. */
|
|
|
|
|
|
|
|
/* If we are here, the number of requested elements is less than the
|
|
|
|
* number of elements inside the set. Also we are sure that count < size.
|
|
|
|
* Use two different strategies.
|
|
|
|
*
|
|
|
|
* CASE 2: The number of elements to return is small compared to the
|
|
|
|
* set size. We can just extract random elements and return them to
|
|
|
|
* the set. */
|
2022-11-09 18:50:07 +01:00
|
|
|
if (remaining*SPOP_MOVE_STRATEGY_MUL > count &&
|
|
|
|
set->encoding == OBJ_ENCODING_LISTPACK)
|
|
|
|
{
|
|
|
|
/* Specialized case for listpack. Traverse it only once. */
|
|
|
|
unsigned char *lp = set->ptr;
|
|
|
|
unsigned char *p = lpFirst(lp);
|
|
|
|
unsigned int index = 0;
|
|
|
|
unsigned char **ps = zmalloc(sizeof(char *) * count);
|
|
|
|
for (unsigned long i = 0; i < count; i++) {
|
|
|
|
p = lpNextRandom(lp, p, &index, count - i, 0);
|
|
|
|
unsigned int len;
|
|
|
|
str = (char *)lpGetValue(p, &len, (long long *)&llele);
|
|
|
|
|
|
|
|
if (str) {
|
|
|
|
addReplyBulkCBuffer(c, str, len);
|
2023-05-22 15:27:14 +08:00
|
|
|
propargv[propindex++] = createStringObject(str, len);
|
2015-02-09 22:49:27 +01:00
|
|
|
} else {
|
2022-11-09 18:50:07 +01:00
|
|
|
addReplyBulkLongLong(c, llele);
|
2023-05-22 15:27:14 +08:00
|
|
|
propargv[propindex++] = createStringObjectFromLongLong(llele);
|
2014-03-11 15:38:55 +01:00
|
|
|
}
|
2015-02-09 22:49:27 +01:00
|
|
|
/* Replicate/AOF this command as an SREM operation */
|
2023-05-22 15:27:14 +08:00
|
|
|
if (propindex == 2 + batchsize) {
|
|
|
|
alsoPropagate(c->db->id, propargv, propindex, PROPAGATE_AOF | PROPAGATE_REPL);
|
|
|
|
for (unsigned long j = 2; j < propindex; j++) {
|
|
|
|
decrRefCount(propargv[j]);
|
|
|
|
}
|
|
|
|
propindex = 2;
|
|
|
|
}
|
2022-11-09 18:50:07 +01:00
|
|
|
|
|
|
|
/* Store pointer for later deletion and move to next. */
|
|
|
|
ps[i] = p;
|
|
|
|
p = lpNext(lp, p);
|
|
|
|
index++;
|
|
|
|
}
|
|
|
|
lp = lpBatchDelete(lp, ps, count);
|
|
|
|
zfree(ps);
|
|
|
|
set->ptr = lp;
|
|
|
|
} else if (remaining*SPOP_MOVE_STRATEGY_MUL > count) {
|
2023-05-22 15:27:14 +08:00
|
|
|
for (unsigned long i = 0; i < count; i++) {
|
|
|
|
propargv[propindex] = setTypePopRandom(set);
|
|
|
|
addReplyBulk(c, propargv[propindex]);
|
|
|
|
propindex++;
|
2022-11-09 18:50:07 +01:00
|
|
|
/* Replicate/AOF this command as an SREM operation */
|
2023-05-22 15:27:14 +08:00
|
|
|
if (propindex == 2 + batchsize) {
|
|
|
|
alsoPropagate(c->db->id, propargv, propindex, PROPAGATE_AOF | PROPAGATE_REPL);
|
|
|
|
for (unsigned long j = 2; j < propindex; j++) {
|
|
|
|
decrRefCount(propargv[j]);
|
|
|
|
}
|
|
|
|
propindex = 2;
|
|
|
|
}
|
2014-03-11 15:38:55 +01:00
|
|
|
}
|
2015-02-10 22:59:12 +01:00
|
|
|
} else {
|
|
|
|
/* CASE 3: The number of elements to return is very big, approaching
|
|
|
|
* the size of the set itself. After some time extracting random elements
|
|
|
|
* from such a set becomes computationally expensive, so we use
|
|
|
|
* a different strategy, we extract random elements that we don't
|
|
|
|
* want to return (the elements that will remain part of the set),
|
|
|
|
* creating a new set as we do this (that will be stored as the original
|
|
|
|
* set). Then we return the elements left in the original set and
|
|
|
|
* release it. */
|
|
|
|
robj *newset = NULL;
|
|
|
|
|
|
|
|
/* Create a new set with just the remaining elements. */
|
2022-11-09 18:50:07 +01:00
|
|
|
if (set->encoding == OBJ_ENCODING_LISTPACK) {
|
|
|
|
/* Specialized case for listpack. Traverse it only once. */
|
|
|
|
newset = createSetListpackObject();
|
|
|
|
unsigned char *lp = set->ptr;
|
|
|
|
unsigned char *p = lpFirst(lp);
|
|
|
|
unsigned int index = 0;
|
|
|
|
unsigned char **ps = zmalloc(sizeof(char *) * remaining);
|
|
|
|
for (unsigned long i = 0; i < remaining; i++) {
|
|
|
|
p = lpNextRandom(lp, p, &index, remaining - i, 0);
|
|
|
|
unsigned int len;
|
|
|
|
str = (char *)lpGetValue(p, &len, (long long *)&llele);
|
|
|
|
setTypeAddAux(newset, str, len, llele, 0);
|
|
|
|
ps[i] = p;
|
|
|
|
p = lpNext(lp, p);
|
|
|
|
index++;
|
|
|
|
}
|
|
|
|
lp = lpBatchDelete(lp, ps, remaining);
|
|
|
|
zfree(ps);
|
|
|
|
set->ptr = lp;
|
|
|
|
} else {
|
|
|
|
while(remaining--) {
|
|
|
|
int encoding = setTypeRandomElement(set, &str, &len, &llele);
|
|
|
|
if (!newset) {
|
|
|
|
newset = str ? createSetListpackObject() : createIntsetObject();
|
|
|
|
}
|
|
|
|
setTypeAddAux(newset, str, len, llele, encoding == OBJ_ENCODING_HT);
|
|
|
|
setTypeRemoveAux(set, str, len, llele, encoding == OBJ_ENCODING_HT);
|
2015-02-10 22:59:12 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-31 16:41:33 +02:00
|
|
|
/* Transfer the old set to the client. */
|
2015-02-10 22:59:12 +01:00
|
|
|
setTypeIterator *si;
|
|
|
|
si = setTypeInitIterator(set);
|
2022-11-09 18:50:07 +01:00
|
|
|
while (setTypeNext(si, &str, &len, &llele) != -1) {
|
|
|
|
if (str == NULL) {
|
2015-07-31 18:01:23 +02:00
|
|
|
addReplyBulkLongLong(c,llele);
|
2023-05-22 15:27:14 +08:00
|
|
|
propargv[propindex++] = createStringObjectFromLongLong(llele);
|
2015-02-10 22:59:12 +01:00
|
|
|
} else {
|
2022-11-09 18:50:07 +01:00
|
|
|
addReplyBulkCBuffer(c, str, len);
|
2023-05-22 15:27:14 +08:00
|
|
|
propargv[propindex++] = createStringObject(str, len);
|
2015-02-10 22:59:12 +01:00
|
|
|
}
|
2015-02-11 10:11:28 +01:00
|
|
|
/* Replicate/AOF this command as an SREM operation */
|
2023-05-22 15:27:14 +08:00
|
|
|
if (propindex == 2 + batchsize) {
|
|
|
|
alsoPropagate(c->db->id, propargv, propindex, PROPAGATE_AOF | PROPAGATE_REPL);
|
|
|
|
for (unsigned long i = 2; i < propindex; i++) {
|
|
|
|
decrRefCount(propargv[i]);
|
|
|
|
}
|
|
|
|
propindex = 2;
|
|
|
|
}
|
2015-02-10 22:59:12 +01:00
|
|
|
}
|
2014-03-11 15:38:55 +01:00
|
|
|
setTypeReleaseIterator(si);
|
2018-07-31 12:07:57 +08:00
|
|
|
|
|
|
|
/* Assign the new set as the key value. */
|
2022-11-30 17:56:36 +08:00
|
|
|
dbReplaceValue(c->db,c->argv[1],newset);
|
2014-03-11 15:38:55 +01:00
|
|
|
}
|
|
|
|
|
2023-05-22 15:27:14 +08:00
|
|
|
/* Replicate/AOF the remaining elements as an SREM operation */
|
|
|
|
if (propindex != 2) {
|
|
|
|
alsoPropagate(c->db->id, propargv, propindex, PROPAGATE_AOF | PROPAGATE_REPL);
|
|
|
|
for (unsigned long i = 2; i < propindex; i++) {
|
|
|
|
decrRefCount(propargv[i]);
|
|
|
|
}
|
|
|
|
propindex = 2;
|
|
|
|
}
|
|
|
|
zfree(propargv);
|
|
|
|
|
2015-02-09 22:49:27 +01:00
|
|
|
/* Don't propagate the command itself even if we incremented the
|
|
|
|
* dirty counter. We don't want to propagate an SPOP command since
|
|
|
|
* we propagated the command as a set of SREMs operations using
|
|
|
|
* the alsoPropagate() API. */
|
|
|
|
preventCommandPropagation(c);
|
2020-04-21 10:51:46 +02:00
|
|
|
signalModifiedKey(c,c->db,c->argv[1]);
|
2014-03-11 15:38:55 +01:00
|
|
|
}
|
|
|
|
|
2015-07-26 15:20:46 +02:00
|
|
|
void spopCommand(client *c) {
|
2021-02-09 11:52:28 -08:00
|
|
|
robj *set, *ele;
|
2010-06-22 00:07:48 +02:00
|
|
|
|
2014-03-11 15:38:55 +01:00
|
|
|
if (c->argc == 3) {
|
|
|
|
spopWithCountCommand(c);
|
|
|
|
return;
|
|
|
|
} else if (c->argc > 3) {
|
2020-12-23 19:06:25 -08:00
|
|
|
addReplyErrorObject(c,shared.syntaxerr);
|
2014-03-11 15:38:55 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-12-21 15:08:44 +02:00
|
|
|
/* Make sure a key with the name inputted exists, and that it's type is
|
|
|
|
* indeed a set */
|
2018-11-30 09:41:54 +01:00
|
|
|
if ((set = lookupKeyWriteOrReply(c,c->argv[1],shared.null[c->resp]))
|
|
|
|
== NULL || checkType(c,set,OBJ_SET)) return;
|
2010-06-22 00:07:48 +02:00
|
|
|
|
2022-11-09 18:50:07 +01:00
|
|
|
/* Pop a random element from the set */
|
|
|
|
ele = setTypePopRandom(set);
|
2014-03-11 15:38:55 +01:00
|
|
|
|
2015-07-27 09:41:48 +02:00
|
|
|
notifyKeyspaceEvent(NOTIFY_SET,"spop",c->argv[1],c->db->id);
|
2011-02-16 12:41:40 +01:00
|
|
|
|
2011-06-20 17:07:18 +02:00
|
|
|
/* Replicate/AOF this command as an SREM operation */
|
2021-02-09 11:52:28 -08:00
|
|
|
rewriteClientCommandVector(c,3,shared.srem,c->argv[1],ele);
|
2011-02-16 12:41:40 +01:00
|
|
|
|
2014-03-11 15:38:55 +01:00
|
|
|
/* Add the element to the reply */
|
2022-11-09 18:50:07 +01:00
|
|
|
addReplyBulk(c, ele);
|
2015-07-31 18:01:23 +02:00
|
|
|
decrRefCount(ele);
|
2014-03-11 15:38:55 +01:00
|
|
|
|
|
|
|
/* Delete the set if it's empty */
|
2013-01-24 16:20:53 +01:00
|
|
|
if (setTypeSize(set) == 0) {
|
|
|
|
dbDelete(c->db,c->argv[1]);
|
2015-07-27 09:41:48 +02:00
|
|
|
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",c->argv[1],c->db->id);
|
2013-01-24 16:20:53 +01:00
|
|
|
}
|
2014-03-11 15:38:55 +01:00
|
|
|
|
|
|
|
/* Set has been modified */
|
2020-04-21 10:51:46 +02:00
|
|
|
signalModifiedKey(c,c->db,c->argv[1]);
|
2010-12-09 10:21:02 +01:00
|
|
|
server.dirty++;
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
|
|
|
|
2012-09-19 21:29:40 +02:00
|
|
|
/* handle the "SRANDMEMBER key <count>" variant. The normal version of the
|
|
|
|
* command is handled by the srandmemberCommand() function itself. */
|
|
|
|
|
|
|
|
/* How many times bigger should be the set compared to the requested size
|
|
|
|
* for us to don't use the "remove elements" strategy? Read later in the
|
|
|
|
* implementation for more info. */
|
|
|
|
#define SRANDMEMBER_SUB_STRATEGY_MUL 3
|
|
|
|
|
2023-01-05 08:21:57 +02:00
|
|
|
/* If client is trying to ask for a very large number of random elements,
|
|
|
|
* queuing may consume an unlimited amount of memory, so we want to limit
|
|
|
|
* the number of randoms per time. */
|
|
|
|
#define SRANDFIELD_RANDOM_SAMPLE_LIMIT 1000
|
|
|
|
|
2015-07-26 15:20:46 +02:00
|
|
|
void srandmemberWithCountCommand(client *c) {
|
2012-09-19 21:29:40 +02:00
|
|
|
long l;
|
|
|
|
unsigned long count, size;
|
|
|
|
int uniq = 1;
|
2015-07-31 18:01:23 +02:00
|
|
|
robj *set;
|
2022-11-09 18:50:07 +01:00
|
|
|
char *str;
|
|
|
|
size_t len;
|
2012-09-19 21:29:40 +02:00
|
|
|
int64_t llele;
|
|
|
|
|
|
|
|
dict *d;
|
|
|
|
|
2023-02-28 15:15:46 +02:00
|
|
|
if (getRangeLongFromObjectOrReply(c,c->argv[2],-LONG_MAX,LONG_MAX,&l,NULL) != C_OK) return;
|
2012-09-19 21:29:40 +02:00
|
|
|
if (l >= 0) {
|
2017-12-05 17:19:19 +08:00
|
|
|
count = (unsigned long) l;
|
2012-09-19 21:29:40 +02:00
|
|
|
} else {
|
|
|
|
/* A negative count means: return the same elements multiple times
|
|
|
|
* (i.e. don't remove the extracted element after every extraction). */
|
|
|
|
count = -l;
|
|
|
|
uniq = 0;
|
|
|
|
}
|
|
|
|
|
2021-02-22 08:00:59 -05:00
|
|
|
if ((set = lookupKeyReadOrReply(c,c->argv[1],shared.emptyarray))
|
2015-07-26 15:28:00 +02:00
|
|
|
== NULL || checkType(c,set,OBJ_SET)) return;
|
2012-09-19 21:29:40 +02:00
|
|
|
size = setTypeSize(set);
|
|
|
|
|
|
|
|
/* If count is zero, serve it ASAP to avoid special cases later. */
|
|
|
|
if (count == 0) {
|
2021-02-22 08:00:59 -05:00
|
|
|
addReply(c,shared.emptyarray);
|
2012-09-19 21:29:40 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* CASE 1: The count was negative, so the extraction method is just:
|
|
|
|
* "return N random elements" sampling the whole set every time.
|
|
|
|
* This case is trivial and can be served without auxiliary data
|
2021-01-29 16:47:28 +08:00
|
|
|
* structures. This case is the only one that also needs to return the
|
|
|
|
* elements in random order. */
|
|
|
|
if (!uniq || count == 1) {
|
2021-02-22 08:00:59 -05:00
|
|
|
addReplyArrayLen(c,count);
|
2022-11-09 18:50:07 +01:00
|
|
|
|
|
|
|
if (set->encoding == OBJ_ENCODING_LISTPACK && count > 1) {
|
|
|
|
/* Specialized case for listpack, traversing it only once. */
|
2023-01-05 08:21:57 +02:00
|
|
|
unsigned long limit, sample_count;
|
|
|
|
limit = count > SRANDFIELD_RANDOM_SAMPLE_LIMIT ? SRANDFIELD_RANDOM_SAMPLE_LIMIT : count;
|
|
|
|
listpackEntry *entries = zmalloc(limit * sizeof(listpackEntry));
|
|
|
|
while (count) {
|
|
|
|
sample_count = count > limit ? limit : count;
|
|
|
|
count -= sample_count;
|
|
|
|
lpRandomEntries(set->ptr, sample_count, entries);
|
|
|
|
for (unsigned long i = 0; i < sample_count; i++) {
|
|
|
|
if (entries[i].sval)
|
|
|
|
addReplyBulkCBuffer(c, entries[i].sval, entries[i].slen);
|
|
|
|
else
|
|
|
|
addReplyBulkLongLong(c, entries[i].lval);
|
|
|
|
}
|
Obuf limit, exit during loop in *RAND* commands and KEYS (#11676)
Related to the hang reported in #11671
Currently, redis can disconnect a client due to reaching output buffer limit,
it'll also avoid feeding that output buffer with more data, but it will keep
running the loop in the command (despite the client already being marked for
disconnection)
This PR is an attempt to mitigate the problem, specifically for commands that
are easy to abuse, specifically: KEYS, HRANDFIELD, SRANDMEMBER, ZRANDMEMBER.
The RAND family of commands can take a negative COUNT argument (which is not
bound to the number of elements in the key), so it's enough to create a key
with one field, and then these commands can be used to hang redis.
For KEYS the caller can use the existing keyspace in redis (if big enough).
2023-01-16 13:51:18 +02:00
|
|
|
if (c->flags & CLIENT_CLOSE_ASAP)
|
|
|
|
break;
|
2022-11-09 18:50:07 +01:00
|
|
|
}
|
|
|
|
zfree(entries);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-09-19 21:29:40 +02:00
|
|
|
while(count--) {
|
2022-11-09 18:50:07 +01:00
|
|
|
setTypeRandomElement(set, &str, &len, &llele);
|
|
|
|
if (str == NULL) {
|
2012-09-19 21:29:40 +02:00
|
|
|
addReplyBulkLongLong(c,llele);
|
|
|
|
} else {
|
2022-11-09 18:50:07 +01:00
|
|
|
addReplyBulkCBuffer(c, str, len);
|
2012-09-19 21:29:40 +02:00
|
|
|
}
|
Obuf limit, exit during loop in *RAND* commands and KEYS (#11676)
Related to the hang reported in #11671
Currently, redis can disconnect a client due to reaching output buffer limit,
it'll also avoid feeding that output buffer with more data, but it will keep
running the loop in the command (despite the client already being marked for
disconnection)
This PR is an attempt to mitigate the problem, specifically for commands that
are easy to abuse, specifically: KEYS, HRANDFIELD, SRANDMEMBER, ZRANDMEMBER.
The RAND family of commands can take a negative COUNT argument (which is not
bound to the number of elements in the key), so it's enough to create a key
with one field, and then these commands can be used to hang redis.
For KEYS the caller can use the existing keyspace in redis (if big enough).
2023-01-16 13:51:18 +02:00
|
|
|
if (c->flags & CLIENT_CLOSE_ASAP)
|
|
|
|
break;
|
2012-09-19 21:29:40 +02:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* CASE 2:
|
|
|
|
* The number of requested elements is greater than the number of
|
|
|
|
* elements inside the set: simply return the whole set. */
|
|
|
|
if (count >= size) {
|
2021-02-22 08:00:59 -05:00
|
|
|
setTypeIterator *si;
|
|
|
|
addReplyArrayLen(c,size);
|
|
|
|
si = setTypeInitIterator(set);
|
2022-11-09 18:50:07 +01:00
|
|
|
while (setTypeNext(si, &str, &len, &llele) != -1) {
|
|
|
|
if (str == NULL) {
|
2021-02-22 08:00:59 -05:00
|
|
|
addReplyBulkLongLong(c,llele);
|
|
|
|
} else {
|
2022-11-09 18:50:07 +01:00
|
|
|
addReplyBulkCBuffer(c, str, len);
|
2021-02-22 08:00:59 -05:00
|
|
|
}
|
|
|
|
size--;
|
|
|
|
}
|
|
|
|
setTypeReleaseIterator(si);
|
|
|
|
serverAssert(size==0);
|
2012-09-19 21:29:40 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-11-09 18:50:07 +01:00
|
|
|
/* CASE 2.5 listpack only. Sampling unique elements, in non-random order.
|
|
|
|
* Listpack encoded sets are meant to be relatively small, so
|
|
|
|
* SRANDMEMBER_SUB_STRATEGY_MUL isn't necessary and we rather not make
|
|
|
|
* copies of the entries. Instead, we emit them directly to the output
|
2023-05-22 20:48:32 +08:00
|
|
|
* buffer.
|
|
|
|
*
|
|
|
|
* And it is inefficient to repeatedly pick one random element from a
|
|
|
|
* listpack in CASE 4. So we use this instead. */
|
2022-11-09 18:50:07 +01:00
|
|
|
if (set->encoding == OBJ_ENCODING_LISTPACK) {
|
|
|
|
unsigned char *lp = set->ptr;
|
|
|
|
unsigned char *p = lpFirst(lp);
|
|
|
|
unsigned int i = 0;
|
|
|
|
addReplyArrayLen(c, count);
|
|
|
|
while (count) {
|
|
|
|
p = lpNextRandom(lp, p, &i, count--, 0);
|
|
|
|
unsigned int len;
|
|
|
|
str = (char *)lpGetValue(p, &len, (long long *)&llele);
|
|
|
|
if (str == NULL) {
|
|
|
|
addReplyBulkLongLong(c, llele);
|
|
|
|
} else {
|
|
|
|
addReplyBulkCBuffer(c, str, len);
|
|
|
|
}
|
|
|
|
p = lpNext(lp, p);
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-09-19 21:29:40 +02:00
|
|
|
/* For CASE 3 and CASE 4 we need an auxiliary dictionary. */
|
2021-08-05 08:25:58 +03:00
|
|
|
d = dictCreate(&sdsReplyDictType);
|
2012-09-19 21:29:40 +02:00
|
|
|
|
|
|
|
/* CASE 3:
|
|
|
|
* The number of elements inside the set is not greater than
|
|
|
|
* SRANDMEMBER_SUB_STRATEGY_MUL times the number of requested elements.
|
|
|
|
* In this case we create a set from scratch with all the elements, and
|
|
|
|
* subtract random elements to reach the requested number of elements.
|
|
|
|
*
|
Squash merging 125 typo/grammar/comment/doc PRs (#7773)
List of squashed commits or PRs
===============================
commit 66801ea
Author: hwware <wen.hui.ware@gmail.com>
Date: Mon Jan 13 00:54:31 2020 -0500
typo fix in acl.c
commit 46f55db
Author: Itamar Haber <itamar@redislabs.com>
Date: Sun Sep 6 18:24:11 2020 +0300
Updates a couple of comments
Specifically:
* RM_AutoMemory completed instead of pointing to docs
* Updated link to custom type doc
commit 61a2aa0
Author: xindoo <xindoo@qq.com>
Date: Tue Sep 1 19:24:59 2020 +0800
Correct errors in code comments
commit a5871d1
Author: yz1509 <pro-756@qq.com>
Date: Tue Sep 1 18:36:06 2020 +0800
fix typos in module.c
commit 41eede7
Author: bookug <bookug@qq.com>
Date: Sat Aug 15 01:11:33 2020 +0800
docs: fix typos in comments
commit c303c84
Author: lazy-snail <ws.niu@outlook.com>
Date: Fri Aug 7 11:15:44 2020 +0800
fix spelling in redis.conf
commit 1eb76bf
Author: zhujian <zhujianxyz@gmail.com>
Date: Thu Aug 6 15:22:10 2020 +0800
add a missing 'n' in comment
commit 1530ec2
Author: Daniel Dai <764122422@qq.com>
Date: Mon Jul 27 00:46:35 2020 -0400
fix spelling in tracking.c
commit e517b31
Author: Hunter-Chen <huntcool001@gmail.com>
Date: Fri Jul 17 22:33:32 2020 +0800
Update redis.conf
Co-authored-by: Itamar Haber <itamar@redislabs.com>
commit c300eff
Author: Hunter-Chen <huntcool001@gmail.com>
Date: Fri Jul 17 22:33:23 2020 +0800
Update redis.conf
Co-authored-by: Itamar Haber <itamar@redislabs.com>
commit 4c058a8
Author: 陈浩鹏 <chenhaopeng@heytea.com>
Date: Thu Jun 25 19:00:56 2020 +0800
Grammar fix and clarification
commit 5fcaa81
Author: bodong.ybd <bodong.ybd@alibaba-inc.com>
Date: Fri Jun 19 10:09:00 2020 +0800
Fix typos
commit 4caca9a
Author: Pruthvi P <pruthvi@ixigo.com>
Date: Fri May 22 00:33:22 2020 +0530
Fix typo eviciton => eviction
commit b2a25f6
Author: Brad Dunbar <dunbarb2@gmail.com>
Date: Sun May 17 12:39:59 2020 -0400
Fix a typo.
commit 12842ae
Author: hwware <wen.hui.ware@gmail.com>
Date: Sun May 3 17:16:59 2020 -0400
fix spelling in redis conf
commit ddba07c
Author: Chris Lamb <chris@chris-lamb.co.uk>
Date: Sat May 2 23:25:34 2020 +0100
Correct a "conflicts" spelling error.
commit 8fc7bf2
Author: Nao YONASHIRO <yonashiro@r.recruit.co.jp>
Date: Thu Apr 30 10:25:27 2020 +0900
docs: fix EXPIRE_FAST_CYCLE_DURATION to ACTIVE_EXPIRE_CYCLE_FAST_DURATION
commit 9b2b67a
Author: Brad Dunbar <dunbarb2@gmail.com>
Date: Fri Apr 24 11:46:22 2020 -0400
Fix a typo.
commit 0746f10
Author: devilinrust <63737265+devilinrust@users.noreply.github.com>
Date: Thu Apr 16 00:17:53 2020 +0200
Fix typos in server.c
commit 92b588d
Author: benjessop12 <56115861+benjessop12@users.noreply.github.com>
Date: Mon Apr 13 13:43:55 2020 +0100
Fix spelling mistake in lazyfree.c
commit 1da37aa
Merge: 2d4ba28 af347a8
Author: hwware <wen.hui.ware@gmail.com>
Date: Thu Mar 5 22:41:31 2020 -0500
Merge remote-tracking branch 'upstream/unstable' into expiretypofix
commit 2d4ba28
Author: hwware <wen.hui.ware@gmail.com>
Date: Mon Mar 2 00:09:40 2020 -0500
fix typo in expire.c
commit 1a746f7
Author: SennoYuki <minakami1yuki@gmail.com>
Date: Thu Feb 27 16:54:32 2020 +0800
fix typo
commit 8599b1a
Author: dongheejeong <donghee950403@gmail.com>
Date: Sun Feb 16 20:31:43 2020 +0000
Fix typo in server.c
commit f38d4e8
Author: hwware <wen.hui.ware@gmail.com>
Date: Sun Feb 2 22:58:38 2020 -0500
fix typo in evict.c
commit fe143fc
Author: Leo Murillo <leonardo.murillo@gmail.com>
Date: Sun Feb 2 01:57:22 2020 -0600
Fix a few typos in redis.conf
commit 1ab4d21
Author: viraja1 <anchan.viraj@gmail.com>
Date: Fri Dec 27 17:15:58 2019 +0530
Fix typo in Latency API docstring
commit ca1f70e
Author: gosth <danxuedexing@qq.com>
Date: Wed Dec 18 15:18:02 2019 +0800
fix typo in sort.c
commit a57c06b
Author: ZYunH <zyunhjob@163.com>
Date: Mon Dec 16 22:28:46 2019 +0800
fix-zset-typo
commit b8c92b5
Author: git-hulk <hulk.website@gmail.com>
Date: Mon Dec 16 15:51:42 2019 +0800
FIX: typo in cluster.c, onformation->information
commit 9dd981c
Author: wujm2007 <jim.wujm@gmail.com>
Date: Mon Dec 16 09:37:52 2019 +0800
Fix typo
commit e132d7a
Author: Sebastien Williams-Wynn <s.williamswynn.mail@gmail.com>
Date: Fri Nov 15 00:14:07 2019 +0000
Minor typo change
commit 47f44d5
Author: happynote3966 <01ssrmikururudevice01@gmail.com>
Date: Mon Nov 11 22:08:48 2019 +0900
fix comment typo in redis-cli.c
commit b8bdb0d
Author: fulei <fulei@kuaishou.com>
Date: Wed Oct 16 18:00:17 2019 +0800
Fix a spelling mistake of comments in defragDictBucketCallback
commit 0def46a
Author: fulei <fulei@kuaishou.com>
Date: Wed Oct 16 13:09:27 2019 +0800
fix some spelling mistakes of comments in defrag.c
commit f3596fd
Author: Phil Rajchgot <tophil@outlook.com>
Date: Sun Oct 13 02:02:32 2019 -0400
Typo and grammar fixes
Redis and its documentation are great -- just wanted to submit a few corrections in the spirit of Hacktoberfest. Thanks for all your work on this project. I use it all the time and it works beautifully.
commit 2b928cd
Author: KangZhiDong <worldkzd@gmail.com>
Date: Sun Sep 1 07:03:11 2019 +0800
fix typos
commit 33aea14
Author: Axlgrep <axlgrep@gmail.com>
Date: Tue Aug 27 11:02:18 2019 +0800
Fixed eviction spelling issues
commit e282a80
Author: Simen Flatby <simen@oms.no>
Date: Tue Aug 20 15:25:51 2019 +0200
Update comments to reflect prop name
In the comments the prop is referenced as replica-validity-factor,
but it is really named cluster-replica-validity-factor.
commit 74d1f9a
Author: Jim Green <jimgreen2013@qq.com>
Date: Tue Aug 20 20:00:31 2019 +0800
fix comment error, the code is ok
commit eea1407
Author: Liao Tonglang <liaotonglang@gmail.com>
Date: Fri May 31 10:16:18 2019 +0800
typo fix
fix cna't to can't
commit 0da553c
Author: KAWACHI Takashi <tkawachi@gmail.com>
Date: Wed Jul 17 00:38:16 2019 +0900
Fix typo
commit 7fc8fb6
Author: Michael Prokop <mika@grml.org>
Date: Tue May 28 17:58:42 2019 +0200
Typo fixes
s/familar/familiar/
s/compatiblity/compatibility/
s/ ot / to /
s/itsef/itself/
commit 5f46c9d
Author: zhumoing <34539422+zhumoing@users.noreply.github.com>
Date: Tue May 21 21:16:50 2019 +0800
typo-fixes
typo-fixes
commit 321dfe1
Author: wxisme <850885154@qq.com>
Date: Sat Mar 16 15:10:55 2019 +0800
typo fix
commit b4fb131
Merge: 267e0e6 3df1eb8
Author: Nikitas Bastas <nikitasbst@gmail.com>
Date: Fri Feb 8 22:55:45 2019 +0200
Merge branch 'unstable' of antirez/redis into unstable
commit 267e0e6
Author: Nikitas Bastas <nikitasbst@gmail.com>
Date: Wed Jan 30 21:26:04 2019 +0200
Minor typo fix
commit 30544e7
Author: inshal96 <39904558+inshal96@users.noreply.github.com>
Date: Fri Jan 4 16:54:50 2019 +0500
remove an extra 'a' in the comments
commit 337969d
Author: BrotherGao <yangdongheng11@gmail.com>
Date: Sat Dec 29 12:37:29 2018 +0800
fix typo in redis.conf
commit 9f4b121
Merge: 423a030 e504583
Author: BrotherGao <yangdongheng@xiaomi.com>
Date: Sat Dec 29 11:41:12 2018 +0800
Merge branch 'unstable' of antirez/redis into unstable
commit 423a030
Merge: 42b02b7 46a51cd
Author: 杨东衡 <yangdongheng@xiaomi.com>
Date: Tue Dec 4 23:56:11 2018 +0800
Merge branch 'unstable' of antirez/redis into unstable
commit 42b02b7
Merge: 68c0e6e b8febe6
Author: Dongheng Yang <yangdongheng11@gmail.com>
Date: Sun Oct 28 15:54:23 2018 +0800
Merge pull request #1 from antirez/unstable
update local data
commit 714b589
Author: Christian <crifei93@gmail.com>
Date: Fri Dec 28 01:17:26 2018 +0100
fix typo "resulution"
commit e23259d
Author: garenchan <1412950785@qq.com>
Date: Wed Dec 26 09:58:35 2018 +0800
fix typo: segfauls -> segfault
commit a9359f8
Author: xjp <jianping_xie@aliyun.com>
Date: Tue Dec 18 17:31:44 2018 +0800
Fixed REDISMODULE_H spell bug
commit a12c3e4
Author: jdiaz <jrd.palacios@gmail.com>
Date: Sat Dec 15 23:39:52 2018 -0600
Fixes hyperloglog hash function comment block description
commit 770eb11
Author: 林上耀 <1210tom@163.com>
Date: Sun Nov 25 17:16:10 2018 +0800
fix typo
commit fd97fbb
Author: Chris Lamb <chris@chris-lamb.co.uk>
Date: Fri Nov 23 17:14:01 2018 +0100
Correct "unsupported" typo.
commit a85522d
Author: Jungnam Lee <jungnam.lee@oracle.com>
Date: Thu Nov 8 23:01:29 2018 +0900
fix typo in test comments
commit ade8007
Author: Arun Kumar <palerdot@users.noreply.github.com>
Date: Tue Oct 23 16:56:35 2018 +0530
Fixed grammatical typo
Fixed typo for word 'dictionary'
commit 869ee39
Author: Hamid Alaei <hamid.a85@gmail.com>
Date: Sun Aug 12 16:40:02 2018 +0430
fix documentations: (ThreadSafeContextStart/Stop -> ThreadSafeContextLock/Unlock), minor typo
commit f89d158
Author: Mayank Jain <mayankjain255@gmail.com>
Date: Tue Jul 31 23:01:21 2018 +0530
Updated README.md with some spelling corrections.
Made correction in spelling of some misspelled words.
commit 892198e
Author: dsomeshwar <someshwar.dhayalan@gmail.com>
Date: Sat Jul 21 23:23:04 2018 +0530
typo fix
commit 8a4d780
Author: Itamar Haber <itamar@redislabs.com>
Date: Mon Apr 30 02:06:52 2018 +0300
Fixes some typos
commit e3acef6
Author: Noah Rosamilia <ivoahivoah@gmail.com>
Date: Sat Mar 3 23:41:21 2018 -0500
Fix typo in /deps/README.md
commit 04442fb
Author: WuYunlong <xzsyeb@126.com>
Date: Sat Mar 3 10:32:42 2018 +0800
Fix typo in readSyncBulkPayload() comment.
commit 9f36880
Author: WuYunlong <xzsyeb@126.com>
Date: Sat Mar 3 10:20:37 2018 +0800
replication.c comment: run_id -> replid.
commit f866b4a
Author: Francesco 'makevoid' Canessa <makevoid@gmail.com>
Date: Thu Feb 22 22:01:56 2018 +0000
fix comment typo in server.c
commit 0ebc69b
Author: 줍 <jubee0124@gmail.com>
Date: Mon Feb 12 16:38:48 2018 +0900
Fix typo in redis.conf
Fix `five behaviors` to `eight behaviors` in [this sentence ](antirez/redis@unstable/redis.conf#L564)
commit b50a620
Author: martinbroadhurst <martinbroadhurst@users.noreply.github.com>
Date: Thu Dec 28 12:07:30 2017 +0000
Fix typo in valgrind.sup
commit 7d8f349
Author: Peter Boughton <peter@sorcerersisle.com>
Date: Mon Nov 27 19:52:19 2017 +0000
Update CONTRIBUTING; refer doc updates to redis-doc repo.
commit 02dec7e
Author: Klauswk <klauswk1@hotmail.com>
Date: Tue Oct 24 16:18:38 2017 -0200
Fix typo in comment
commit e1efbc8
Author: chenshi <baiwfg2@gmail.com>
Date: Tue Oct 3 18:26:30 2017 +0800
Correct two spelling errors of comments
commit 93327d8
Author: spacewander <spacewanderlzx@gmail.com>
Date: Wed Sep 13 16:47:24 2017 +0800
Update the comment for OBJ_ENCODING_EMBSTR_SIZE_LIMIT's value
The value of OBJ_ENCODING_EMBSTR_SIZE_LIMIT is 44 now instead of 39.
commit 63d361f
Author: spacewander <spacewanderlzx@gmail.com>
Date: Tue Sep 12 15:06:42 2017 +0800
Fix <prevlen> related doc in ziplist.c
According to the definition of ZIP_BIG_PREVLEN and other related code,
the guard of single byte <prevlen> should be 254 instead of 255.
commit ebe228d
Author: hanael80 <hanael80@gmail.com>
Date: Tue Aug 15 09:09:40 2017 +0900
Fix typo
commit 6b696e6
Author: Matt Robenolt <matt@ydekproductions.com>
Date: Mon Aug 14 14:50:47 2017 -0700
Fix typo in LATENCY DOCTOR output
commit a2ec6ae
Author: caosiyang <caosiyang@qiyi.com>
Date: Tue Aug 15 14:15:16 2017 +0800
Fix a typo: form => from
commit 3ab7699
Author: caosiyang <caosiyang@qiyi.com>
Date: Thu Aug 10 18:40:33 2017 +0800
Fix a typo: replicationFeedSlavesFromMaster() => replicationFeedSlavesFromMasterStream()
commit 72d43ef
Author: caosiyang <caosiyang@qiyi.com>
Date: Tue Aug 8 15:57:25 2017 +0800
fix a typo: servewr => server
commit 707c958
Author: Bo Cai <charpty@gmail.com>
Date: Wed Jul 26 21:49:42 2017 +0800
redis-cli.c typo: conut -> count.
Signed-off-by: Bo Cai <charpty@gmail.com>
commit b9385b2
Author: JackDrogon <jack.xsuperman@gmail.com>
Date: Fri Jun 30 14:22:31 2017 +0800
Fix some spell problems
commit 20d9230
Author: akosel <aaronjkosel@gmail.com>
Date: Sun Jun 4 19:35:13 2017 -0500
Fix typo
commit b167bfc
Author: Krzysiek Witkowicz <krzysiekwitkowicz@gmail.com>
Date: Mon May 22 21:32:27 2017 +0100
Fix #4008 small typo in comment
commit 2b78ac8
Author: Jake Clarkson <jacobwclarkson@gmail.com>
Date: Wed Apr 26 15:49:50 2017 +0100
Correct typo in tests/unit/hyperloglog.tcl
commit b0f1cdb
Author: Qi Luo <qiluo-msft@users.noreply.github.com>
Date: Wed Apr 19 14:25:18 2017 -0700
Fix typo
commit a90b0f9
Author: charsyam <charsyam@naver.com>
Date: Thu Mar 16 18:19:53 2017 +0900
fix typos
fix typos
fix typos
commit 8430a79
Author: Richard Hart <richardhart92@gmail.com>
Date: Mon Mar 13 22:17:41 2017 -0400
Fixed log message typo in listenToPort.
commit 481a1c2
Author: Vinod Kumar <kumar003vinod@gmail.com>
Date: Sun Jan 15 23:04:51 2017 +0530
src/db.c: Correct "save" -> "safe" typo
commit 586b4d3
Author: wangshaonan <wshn13@gmail.com>
Date: Wed Dec 21 20:28:27 2016 +0800
Fix typo they->the in helloworld.c
commit c1c4b5e
Author: Jenner <hypxm@qq.com>
Date: Mon Dec 19 16:39:46 2016 +0800
typo error
commit 1ee1a3f
Author: tielei <43289893@qq.com>
Date: Mon Jul 18 13:52:25 2016 +0800
fix some comments
commit 11a41fb
Author: Otto Kekäläinen <otto@seravo.fi>
Date: Sun Jul 3 10:23:55 2016 +0100
Fix spelling in documentation and comments
commit 5fb5d82
Author: francischan <f1ancis621@gmail.com>
Date: Tue Jun 28 00:19:33 2016 +0800
Fix outdated comments about redis.c file.
It should now refer to server.c file.
commit 6b254bc
Author: lmatt-bit <lmatt123n@gmail.com>
Date: Thu Apr 21 21:45:58 2016 +0800
Refine the comment of dictRehashMilliseconds func
SLAVECONF->REPLCONF in comment - by andyli029
commit ee9869f
Author: clark.kang <charsyam@naver.com>
Date: Tue Mar 22 11:09:51 2016 +0900
fix typos
commit f7b3b11
Author: Harisankar H <harisankarh@gmail.com>
Date: Wed Mar 9 11:49:42 2016 +0530
Typo correction: "faield" --> "failed"
Typo correction: "faield" --> "failed"
commit 3fd40fc
Author: Itamar Haber <itamar@redislabs.com>
Date: Thu Feb 25 10:31:51 2016 +0200
Fixes a typo in comments
commit 621c160
Author: Prayag Verma <prayag.verma@gmail.com>
Date: Mon Feb 1 12:36:20 2016 +0530
Fix typo in Readme.md
Spelling mistakes -
`eviciton` > `eviction`
`familar` > `familiar`
commit d7d07d6
Author: WonCheol Lee <toctoc21c@gmail.com>
Date: Wed Dec 30 15:11:34 2015 +0900
Typo fixed
commit a4dade7
Author: Felix Bünemann <buenemann@louis.info>
Date: Mon Dec 28 11:02:55 2015 +0100
[ci skip] Improve supervised upstart config docs
This mentions that "expect stop" is required for supervised upstart
to work correctly. See http://upstart.ubuntu.com/cookbook/#expect-stop
for an explanation.
commit d9caba9
Author: daurnimator <quae@daurnimator.com>
Date: Mon Dec 21 18:30:03 2015 +1100
README: Remove trailing whitespace
commit 72d42e5
Author: daurnimator <quae@daurnimator.com>
Date: Mon Dec 21 18:29:32 2015 +1100
README: Fix typo. th => the
commit dd6e957
Author: daurnimator <quae@daurnimator.com>
Date: Mon Dec 21 18:29:20 2015 +1100
README: Fix typo. familar => familiar
commit 3a12b23
Author: daurnimator <quae@daurnimator.com>
Date: Mon Dec 21 18:28:54 2015 +1100
README: Fix typo. eviciton => eviction
commit 2d1d03b
Author: daurnimator <quae@daurnimator.com>
Date: Mon Dec 21 18:21:45 2015 +1100
README: Fix typo. sever => server
commit 3973b06
Author: Itamar Haber <itamar@garantiadata.com>
Date: Sat Dec 19 17:01:20 2015 +0200
Typo fix
commit 4f2e460
Author: Steve Gao <fu@2token.com>
Date: Fri Dec 4 10:22:05 2015 +0800
Update README - fix typos
commit b21667c
Author: binyan <binbin.yan@nokia.com>
Date: Wed Dec 2 22:48:37 2015 +0800
delete redundancy color judge in sdscatcolor
commit 88894c7
Author: binyan <binbin.yan@nokia.com>
Date: Wed Dec 2 22:14:42 2015 +0800
the example output shoule be HelloWorld
commit 2763470
Author: binyan <binbin.yan@nokia.com>
Date: Wed Dec 2 17:41:39 2015 +0800
modify error word keyevente
Signed-off-by: binyan <binbin.yan@nokia.com>
commit 0847b3d
Author: Bruno Martins <bscmartins@gmail.com>
Date: Wed Nov 4 11:37:01 2015 +0000
typo
commit bbb9e9e
Author: dawedawe <dawedawe@gmx.de>
Date: Fri Mar 27 00:46:41 2015 +0100
typo: zimap -> zipmap
commit 5ed297e
Author: Axel Advento <badwolf.bloodseeker.rev@gmail.com>
Date: Tue Mar 3 15:58:29 2015 +0800
Fix 'salve' typos to 'slave'
commit edec9d6
Author: LudwikJaniuk <ludvig.janiuk@gmail.com>
Date: Wed Jun 12 14:12:47 2019 +0200
Update README.md
Co-Authored-By: Qix <Qix-@users.noreply.github.com>
commit 692a7af
Author: LudwikJaniuk <ludvig.janiuk@gmail.com>
Date: Tue May 28 14:32:04 2019 +0200
grammar
commit d962b0a
Author: Nick Frost <nickfrostatx@gmail.com>
Date: Wed Jul 20 15:17:12 2016 -0700
Minor grammar fix
commit 24fff01aaccaf5956973ada8c50ceb1462e211c6 (typos)
Author: Chad Miller <chadm@squareup.com>
Date: Tue Sep 8 13:46:11 2020 -0400
Fix faulty comment about operation of unlink()
commit 3cd5c1f3326c52aa552ada7ec797c6bb16452355
Author: Kevin <kevin.xgr@gmail.com>
Date: Wed Nov 20 00:13:50 2019 +0800
Fix typo in server.c.
From a83af59 Mon Sep 17 00:00:00 2001
From: wuwo <wuwo@wacai.com>
Date: Fri, 17 Mar 2017 20:37:45 +0800
Subject: [PATCH] falure to failure
From c961896 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=B7=A6=E6=87=B6?= <veficos@gmail.com>
Date: Sat, 27 May 2017 15:33:04 +0800
Subject: [PATCH] fix typo
From e600ef2 Mon Sep 17 00:00:00 2001
From: "rui.zou" <rui.zou@yunify.com>
Date: Sat, 30 Sep 2017 12:38:15 +0800
Subject: [PATCH] fix a typo
From c7d07fa Mon Sep 17 00:00:00 2001
From: Alexandre Perrin <alex@kaworu.ch>
Date: Thu, 16 Aug 2018 10:35:31 +0200
Subject: [PATCH] deps README.md typo
From b25cb67 Mon Sep 17 00:00:00 2001
From: Guy Korland <gkorland@gmail.com>
Date: Wed, 26 Sep 2018 10:55:37 +0300
Subject: [PATCH 1/2] fix typos in header
From ad28ca6 Mon Sep 17 00:00:00 2001
From: Guy Korland <gkorland@gmail.com>
Date: Wed, 26 Sep 2018 11:02:36 +0300
Subject: [PATCH 2/2] fix typos
commit 34924cdedd8552466fc22c1168d49236cb7ee915
Author: Adrian Lynch <adi_ady_ade@hotmail.com>
Date: Sat Apr 4 21:59:15 2015 +0100
Typos fixed
commit fd2a1e7
Author: Jan <jsteemann@users.noreply.github.com>
Date: Sat Oct 27 19:13:01 2018 +0200
Fix typos
Fix typos
commit e14e47c1a234b53b0e103c5f6a1c61481cbcbb02
Author: Andy Lester <andy@petdance.com>
Date: Fri Aug 2 22:30:07 2019 -0500
Fix multiple misspellings of "following"
commit 79b948ce2dac6b453fe80995abbcaac04c213d5a
Author: Andy Lester <andy@petdance.com>
Date: Fri Aug 2 22:24:28 2019 -0500
Fix misspelling of create-cluster
commit 1fffde52666dc99ab35efbd31071a4c008cb5a71
Author: Andy Lester <andy@petdance.com>
Date: Wed Jul 31 17:57:56 2019 -0500
Fix typos
commit 204c9ba9651e9e05fd73936b452b9a30be456cfe
Author: Xiaobo Zhu <xiaobo.zhu@shopee.com>
Date: Tue Aug 13 22:19:25 2019 +0800
fix typos
Squashed commit of the following:
commit 1d9aaf8
Author: danmedani <danmedani@gmail.com>
Date: Sun Aug 2 11:40:26 2015 -0700
README typo fix.
Squashed commit of the following:
commit 32bfa7c
Author: Erik Dubbelboer <erik@dubbelboer.com>
Date: Mon Jul 6 21:15:08 2015 +0200
Fixed grammer
Squashed commit of the following:
commit b24f69c
Author: Sisir Koppaka <sisir.koppaka@gmail.com>
Date: Mon Mar 2 22:38:45 2015 -0500
utils/hashtable/rehashing.c: Fix typos
Squashed commit of the following:
commit 4e04082
Author: Erik Dubbelboer <erik@dubbelboer.com>
Date: Mon Mar 23 08:22:21 2015 +0000
Small config file documentation improvements
Squashed commit of the following:
commit acb8773
Author: ctd1500 <ctd1500@gmail.com>
Date: Fri May 8 01:52:48 2015 -0700
Typo and grammar fixes in readme
commit 2eb75b6
Author: ctd1500 <ctd1500@gmail.com>
Date: Fri May 8 01:36:18 2015 -0700
fixed redis.conf comment
Squashed commit of the following:
commit a8249a2
Author: Masahiko Sawada <sawada.mshk@gmail.com>
Date: Fri Dec 11 11:39:52 2015 +0530
Revise correction of typos.
Squashed commit of the following:
commit 3c02028
Author: zhaojun11 <zhaojun11@jd.com>
Date: Wed Jan 17 19:05:28 2018 +0800
Fix typos include two code typos in cluster.c and latency.c
Squashed commit of the following:
commit 9dba47c
Author: q191201771 <191201771@qq.com>
Date: Sat Jan 4 11:31:04 2020 +0800
fix function listCreate comment in adlist.c
Update src/server.c
commit 2c7c2cb536e78dd211b1ac6f7bda00f0f54faaeb
Author: charpty <charpty@gmail.com>
Date: Tue May 1 23:16:59 2018 +0800
server.c typo: modules system dictionary type comment
Signed-off-by: charpty <charpty@gmail.com>
commit a8395323fb63cb59cb3591cb0f0c8edb7c29a680
Author: Itamar Haber <itamar@redislabs.com>
Date: Sun May 6 00:25:18 2018 +0300
Updates test_helper.tcl's help with undocumented options
Specifically:
* Host
* Port
* Client
commit bde6f9ced15755cd6407b4af7d601b030f36d60b
Author: wxisme <850885154@qq.com>
Date: Wed Aug 8 15:19:19 2018 +0800
fix comments in deps files
commit 3172474ba991532ab799ee1873439f3402412331
Author: wxisme <850885154@qq.com>
Date: Wed Aug 8 14:33:49 2018 +0800
fix some comments
commit 01b6f2b6858b5cf2ce4ad5092d2c746e755f53f0
Author: Thor Juhasz <thor@juhasz.pro>
Date: Sun Nov 18 14:37:41 2018 +0100
Minor fixes to comments
Found some parts a little unclear on a first read, which prompted me to have a better look at the file and fix some minor things I noticed.
Fixing minor typos and grammar. There are no changes to configuration options.
These changes are only meant to help the user better understand the explanations to the various configuration options
2020-09-10 13:43:38 +03:00
|
|
|
* This is done because if the number of requested elements is just
|
2012-09-19 21:29:40 +02:00
|
|
|
* a bit less than the number of elements in the set, the natural approach
|
2020-10-18 01:14:45 -04:00
|
|
|
* used into CASE 4 is highly inefficient. */
|
2012-09-19 21:29:40 +02:00
|
|
|
if (count*SRANDMEMBER_SUB_STRATEGY_MUL > size) {
|
|
|
|
setTypeIterator *si;
|
|
|
|
|
|
|
|
/* Add all the elements into the temporary dictionary. */
|
|
|
|
si = setTypeInitIterator(set);
|
2021-02-22 08:00:59 -05:00
|
|
|
dictExpand(d, size);
|
2022-11-09 18:50:07 +01:00
|
|
|
while (setTypeNext(si, &str, &len, &llele) != -1) {
|
2013-02-05 15:56:04 +08:00
|
|
|
int retval = DICT_ERR;
|
2012-09-19 21:29:40 +02:00
|
|
|
|
2022-11-09 18:50:07 +01:00
|
|
|
if (str == NULL) {
|
2021-01-29 16:47:28 +08:00
|
|
|
retval = dictAdd(d,sdsfromlonglong(llele),NULL);
|
2012-06-05 21:50:10 +02:00
|
|
|
} else {
|
2022-11-09 18:50:07 +01:00
|
|
|
retval = dictAdd(d, sdsnewlen(str, len), NULL);
|
2012-09-19 21:29:40 +02:00
|
|
|
}
|
2015-07-26 15:29:53 +02:00
|
|
|
serverAssert(retval == DICT_OK);
|
2012-09-19 21:29:40 +02:00
|
|
|
}
|
|
|
|
setTypeReleaseIterator(si);
|
2015-07-26 15:29:53 +02:00
|
|
|
serverAssert(dictSize(d) == size);
|
2012-09-19 21:29:40 +02:00
|
|
|
|
|
|
|
/* Remove random elements to reach the right count. */
|
2021-01-29 16:47:28 +08:00
|
|
|
while (size > count) {
|
2012-09-19 21:29:40 +02:00
|
|
|
dictEntry *de;
|
2021-09-24 22:36:26 +08:00
|
|
|
de = dictGetFairRandomKey(d);
|
2021-01-29 16:47:28 +08:00
|
|
|
dictUnlink(d,dictGetKey(de));
|
|
|
|
sdsfree(dictGetKey(de));
|
|
|
|
dictFreeUnlinkedEntry(d,de);
|
2012-09-19 21:29:40 +02:00
|
|
|
size--;
|
|
|
|
}
|
|
|
|
}
|
2014-06-26 18:48:40 +02:00
|
|
|
|
2012-09-19 21:29:40 +02:00
|
|
|
/* CASE 4: We have a big set compared to the requested number of elements.
|
|
|
|
* In this case we can simply get random elements from the set and add
|
|
|
|
* to the temporary set, trying to eventually get enough unique elements
|
|
|
|
* to reach the specified count. */
|
|
|
|
else {
|
|
|
|
unsigned long added = 0;
|
2021-01-29 16:47:28 +08:00
|
|
|
sds sdsele;
|
2012-09-19 21:29:40 +02:00
|
|
|
|
2021-02-22 08:00:59 -05:00
|
|
|
dictExpand(d, count);
|
2021-01-29 16:47:28 +08:00
|
|
|
while (added < count) {
|
2022-11-09 18:50:07 +01:00
|
|
|
setTypeRandomElement(set, &str, &len, &llele);
|
|
|
|
if (str == NULL) {
|
2021-01-29 16:47:28 +08:00
|
|
|
sdsele = sdsfromlonglong(llele);
|
2012-06-05 21:50:10 +02:00
|
|
|
} else {
|
2022-11-09 18:50:07 +01:00
|
|
|
sdsele = sdsnewlen(str, len);
|
2012-09-19 21:29:40 +02:00
|
|
|
}
|
2012-09-20 16:33:36 +02:00
|
|
|
/* Try to add the object to the dictionary. If it already exists
|
|
|
|
* free it, otherwise increment the number of objects we have
|
|
|
|
* in the result dictionary. */
|
2021-01-29 16:47:28 +08:00
|
|
|
if (dictAdd(d,sdsele,NULL) == DICT_OK)
|
2012-09-20 16:33:36 +02:00
|
|
|
added++;
|
|
|
|
else
|
2021-01-29 16:47:28 +08:00
|
|
|
sdsfree(sdsele);
|
2012-09-19 21:29:40 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* CASE 3 & 4: send the result to the user. */
|
|
|
|
{
|
|
|
|
dictIterator *di;
|
|
|
|
dictEntry *de;
|
|
|
|
|
2021-02-22 08:00:59 -05:00
|
|
|
addReplyArrayLen(c,count);
|
2012-09-19 21:29:40 +02:00
|
|
|
di = dictGetIterator(d);
|
|
|
|
while((de = dictNext(di)) != NULL)
|
2021-01-29 16:47:28 +08:00
|
|
|
addReplyBulkSds(c,dictGetKey(de));
|
2012-09-19 21:29:40 +02:00
|
|
|
dictReleaseIterator(di);
|
|
|
|
dictRelease(d);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-13 10:55:49 -04:00
|
|
|
/* SRANDMEMBER <key> [<count>] */
|
2015-07-26 15:20:46 +02:00
|
|
|
void srandmemberCommand(client *c) {
|
2015-07-31 18:01:23 +02:00
|
|
|
robj *set;
|
2022-11-09 18:50:07 +01:00
|
|
|
char *str;
|
|
|
|
size_t len;
|
2010-12-09 21:11:56 +01:00
|
|
|
int64_t llele;
|
2010-06-22 00:07:48 +02:00
|
|
|
|
2012-09-19 21:29:40 +02:00
|
|
|
if (c->argc == 3) {
|
|
|
|
srandmemberWithCountCommand(c);
|
|
|
|
return;
|
|
|
|
} else if (c->argc > 3) {
|
2020-12-23 19:06:25 -08:00
|
|
|
addReplyErrorObject(c,shared.syntaxerr);
|
2012-09-19 21:29:40 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-01-29 16:47:28 +08:00
|
|
|
/* Handle variant without <count> argument. Reply with simple bulk string */
|
2018-11-30 09:41:54 +01:00
|
|
|
if ((set = lookupKeyReadOrReply(c,c->argv[1],shared.null[c->resp]))
|
|
|
|
== NULL || checkType(c,set,OBJ_SET)) return;
|
2010-06-22 00:07:48 +02:00
|
|
|
|
2022-11-09 18:50:07 +01:00
|
|
|
setTypeRandomElement(set, &str, &len, &llele);
|
|
|
|
if (str == NULL) {
|
2010-12-09 10:21:02 +01:00
|
|
|
addReplyBulkLongLong(c,llele);
|
2010-06-22 00:07:48 +02:00
|
|
|
} else {
|
2022-11-09 18:50:07 +01:00
|
|
|
addReplyBulkCBuffer(c, str, len);
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int qsortCompareSetsByCardinality(const void *s1, const void *s2) {
|
2017-12-05 17:42:19 +08:00
|
|
|
if (setTypeSize(*(robj**)s1) > setTypeSize(*(robj**)s2)) return 1;
|
|
|
|
if (setTypeSize(*(robj**)s1) < setTypeSize(*(robj**)s2)) return -1;
|
|
|
|
return 0;
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
|
|
|
|
SDIFF is now able to select between two algorithms for speed.
SDIFF used an algorithm that was O(N) where N is the total number
of elements of all the sets involved in the operation.
The algorithm worked like that:
ALGORITHM 1:
1) For the first set, add all the members to an auxiliary set.
2) For all the other sets, remove all the members of the set from the
auxiliary set.
So it is an O(N) algorithm where N is the total number of elements in
all the sets involved in the diff operation.
Cristobal Viedma suggested to modify the algorithm to the following:
ALGORITHM 2:
1) Iterate all the elements of the first set.
2) For every element, check if the element also exists in all the other
remaining sets.
3) Add the element to the auxiliary set only if it does not exist in any
of the other sets.
The complexity of this algorithm on the worst case is O(N*M) where N is
the size of the first set and M the total number of sets involved in the
operation.
However when there are elements in common, with this algorithm we stop
the computation for a given element as long as we find a duplicated
element into another set.
I (antirez) added an additional step to algorithm 2 to make it faster,
that is to sort the set to subtract from the biggest to the
smallest, so that it is more likely to find a duplicate in a larger sets
that are checked before the smaller ones.
WHAT IS BETTER?
None of course, for instance if the first set is much larger than the
other sets the second algorithm does a lot more work compared to the
first algorithm.
Similarly if the first set is much smaller than the other sets, the
original algorithm will less work.
So this commit makes Redis able to guess the number of operations
required by each algorithm, and select the best at runtime according
to the input received.
However, since the second algorithm has better constant times and can do
less work if there are duplicated elements, an advantage is given to the
second algorithm.
2012-11-30 15:41:26 +01:00
|
|
|
/* This is used by SDIFF and in this case we can receive NULL that should
|
|
|
|
* be handled as empty sets. */
|
|
|
|
int qsortCompareSetsByRevCardinality(const void *s1, const void *s2) {
|
|
|
|
robj *o1 = *(robj**)s1, *o2 = *(robj**)s2;
|
2017-12-05 17:42:19 +08:00
|
|
|
unsigned long first = o1 ? setTypeSize(o1) : 0;
|
|
|
|
unsigned long second = o2 ? setTypeSize(o2) : 0;
|
SDIFF is now able to select between two algorithms for speed.
SDIFF used an algorithm that was O(N) where N is the total number
of elements of all the sets involved in the operation.
The algorithm worked like that:
ALGORITHM 1:
1) For the first set, add all the members to an auxiliary set.
2) For all the other sets, remove all the members of the set from the
auxiliary set.
So it is an O(N) algorithm where N is the total number of elements in
all the sets involved in the diff operation.
Cristobal Viedma suggested to modify the algorithm to the following:
ALGORITHM 2:
1) Iterate all the elements of the first set.
2) For every element, check if the element also exists in all the other
remaining sets.
3) Add the element to the auxiliary set only if it does not exist in any
of the other sets.
The complexity of this algorithm on the worst case is O(N*M) where N is
the size of the first set and M the total number of sets involved in the
operation.
However when there are elements in common, with this algorithm we stop
the computation for a given element as long as we find a duplicated
element into another set.
I (antirez) added an additional step to algorithm 2 to make it faster,
that is to sort the set to subtract from the biggest to the
smallest, so that it is more likely to find a duplicate in a larger sets
that are checked before the smaller ones.
WHAT IS BETTER?
None of course, for instance if the first set is much larger than the
other sets the second algorithm does a lot more work compared to the
first algorithm.
Similarly if the first set is much smaller than the other sets, the
original algorithm will less work.
So this commit makes Redis able to guess the number of operations
required by each algorithm, and select the best at runtime according
to the input received.
However, since the second algorithm has better constant times and can do
less work if there are duplicated elements, an advantage is given to the
second algorithm.
2012-11-30 15:41:26 +01:00
|
|
|
|
2017-12-05 17:42:19 +08:00
|
|
|
if (first < second) return 1;
|
|
|
|
if (first > second) return -1;
|
|
|
|
return 0;
|
SDIFF is now able to select between two algorithms for speed.
SDIFF used an algorithm that was O(N) where N is the total number
of elements of all the sets involved in the operation.
The algorithm worked like that:
ALGORITHM 1:
1) For the first set, add all the members to an auxiliary set.
2) For all the other sets, remove all the members of the set from the
auxiliary set.
So it is an O(N) algorithm where N is the total number of elements in
all the sets involved in the diff operation.
Cristobal Viedma suggested to modify the algorithm to the following:
ALGORITHM 2:
1) Iterate all the elements of the first set.
2) For every element, check if the element also exists in all the other
remaining sets.
3) Add the element to the auxiliary set only if it does not exist in any
of the other sets.
The complexity of this algorithm on the worst case is O(N*M) where N is
the size of the first set and M the total number of sets involved in the
operation.
However when there are elements in common, with this algorithm we stop
the computation for a given element as long as we find a duplicated
element into another set.
I (antirez) added an additional step to algorithm 2 to make it faster,
that is to sort the set to subtract from the biggest to the
smallest, so that it is more likely to find a duplicate in a larger sets
that are checked before the smaller ones.
WHAT IS BETTER?
None of course, for instance if the first set is much larger than the
other sets the second algorithm does a lot more work compared to the
first algorithm.
Similarly if the first set is much smaller than the other sets, the
original algorithm will less work.
So this commit makes Redis able to guess the number of operations
required by each algorithm, and select the best at runtime according
to the input received.
However, since the second algorithm has better constant times and can do
less work if there are duplicated elements, an advantage is given to the
second algorithm.
2012-11-30 15:41:26 +01:00
|
|
|
}
|
|
|
|
|
2022-05-13 10:55:49 -04:00
|
|
|
/* SINTER / SMEMBERS / SINTERSTORE / SINTERCARD
|
2021-09-16 19:07:08 +08:00
|
|
|
*
|
|
|
|
* 'cardinality_only' work for SINTERCARD, only return the cardinality
|
|
|
|
* with minimum processing and memory overheads.
|
|
|
|
*
|
|
|
|
* 'limit' work for SINTERCARD, stop searching after reaching the limit.
|
|
|
|
* Passing a 0 means unlimited.
|
|
|
|
*/
|
2015-07-26 15:20:46 +02:00
|
|
|
void sinterGenericCommand(client *c, robj **setkeys,
|
2021-09-16 19:07:08 +08:00
|
|
|
unsigned long setnum, robj *dstkey,
|
|
|
|
int cardinality_only, unsigned long limit) {
|
2010-07-02 19:57:12 +02:00
|
|
|
robj **sets = zmalloc(sizeof(robj*)*setnum);
|
2010-08-21 11:25:13 +02:00
|
|
|
setTypeIterator *si;
|
2015-07-31 18:01:23 +02:00
|
|
|
robj *dstset = NULL;
|
2022-11-09 18:50:07 +01:00
|
|
|
char *str;
|
|
|
|
size_t len;
|
2010-12-09 21:11:56 +01:00
|
|
|
int64_t intobj;
|
2010-08-30 16:02:06 +02:00
|
|
|
void *replylen = NULL;
|
2010-06-22 00:07:48 +02:00
|
|
|
unsigned long j, cardinality = 0;
|
2021-06-13 15:53:46 +08:00
|
|
|
int encoding, empty = 0;
|
2010-06-22 00:07:48 +02:00
|
|
|
|
2010-07-02 19:57:12 +02:00
|
|
|
for (j = 0; j < setnum; j++) {
|
2021-11-28 10:26:28 +01:00
|
|
|
robj *setobj = lookupKeyRead(c->db, setkeys[j]);
|
2010-06-22 00:07:48 +02:00
|
|
|
if (!setobj) {
|
2021-06-13 15:53:46 +08:00
|
|
|
/* A NULL is considered an empty set */
|
|
|
|
empty += 1;
|
|
|
|
sets[j] = NULL;
|
|
|
|
continue;
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
2015-07-26 15:28:00 +02:00
|
|
|
if (checkType(c,setobj,OBJ_SET)) {
|
2010-07-02 19:57:12 +02:00
|
|
|
zfree(sets);
|
2010-06-22 00:07:48 +02:00
|
|
|
return;
|
|
|
|
}
|
2010-07-02 19:57:12 +02:00
|
|
|
sets[j] = setobj;
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
2021-06-13 15:53:46 +08:00
|
|
|
|
|
|
|
/* Set intersection with an empty set always results in an empty set.
|
|
|
|
* Return ASAP if there is an empty set. */
|
|
|
|
if (empty > 0) {
|
|
|
|
zfree(sets);
|
|
|
|
if (dstkey) {
|
|
|
|
if (dbDelete(c->db,dstkey)) {
|
|
|
|
signalModifiedKey(c,c->db,dstkey);
|
|
|
|
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",dstkey,c->db->id);
|
|
|
|
server.dirty++;
|
|
|
|
}
|
|
|
|
addReply(c,shared.czero);
|
2021-08-03 04:45:27 -04:00
|
|
|
} else if (cardinality_only) {
|
|
|
|
addReplyLongLong(c,cardinality);
|
2021-06-13 15:53:46 +08:00
|
|
|
} else {
|
|
|
|
addReply(c,shared.emptyset[c->resp]);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-06-22 00:07:48 +02:00
|
|
|
/* Sort sets from the smallest to largest, this will improve our
|
2013-01-17 01:00:20 +08:00
|
|
|
* algorithm's performance */
|
2010-07-02 19:57:12 +02:00
|
|
|
qsort(sets,setnum,sizeof(robj*),qsortCompareSetsByCardinality);
|
2010-06-22 00:07:48 +02:00
|
|
|
|
|
|
|
/* The first thing we should output is the total number of elements...
|
|
|
|
* since this is a multi-bulk write, but at this stage we don't know
|
|
|
|
* the intersection set size, so we use a trick, append an empty object
|
|
|
|
* to the output list and save the pointer to later modify it with the
|
|
|
|
* right length */
|
2021-08-03 04:45:27 -04:00
|
|
|
if (dstkey) {
|
2010-06-22 00:07:48 +02:00
|
|
|
/* If we have a target key where to store the resulting set
|
|
|
|
* create this key with an empty set inside */
|
2022-11-09 18:50:07 +01:00
|
|
|
if (sets[0]->encoding == OBJ_ENCODING_INTSET) {
|
|
|
|
/* The first set is an intset, so the result is an intset too. The
|
|
|
|
* elements are inserted in ascending order which is efficient in an
|
|
|
|
* intset. */
|
|
|
|
dstset = createIntsetObject();
|
|
|
|
} else if (sets[0]->encoding == OBJ_ENCODING_LISTPACK) {
|
|
|
|
/* To avoid many reallocs, we estimate that the result is a listpack
|
|
|
|
* of approximately the same size as the first set. Then we shrink
|
2023-05-22 20:48:32 +08:00
|
|
|
* it or possibly convert it to intset in the end. */
|
2022-11-09 18:50:07 +01:00
|
|
|
unsigned char *lp = lpNew(lpBytes(sets[0]->ptr));
|
|
|
|
dstset = createObject(OBJ_SET, lp);
|
|
|
|
dstset->encoding = OBJ_ENCODING_LISTPACK;
|
|
|
|
} else {
|
|
|
|
/* We start off with a listpack, since it's more efficient to append
|
|
|
|
* to than an intset. Later we can convert it to intset or a
|
|
|
|
* hashtable. */
|
|
|
|
dstset = createSetListpackObject();
|
|
|
|
}
|
2021-08-03 04:45:27 -04:00
|
|
|
} else if (!cardinality_only) {
|
|
|
|
replylen = addReplyDeferredLen(c);
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Iterate all the elements of the first (smallest) set, and test
|
|
|
|
* the element against all the other sets, if at least one set does
|
|
|
|
* not include the element it is discarded */
|
2022-11-09 18:50:07 +01:00
|
|
|
int only_integers = 1;
|
2010-07-02 19:57:12 +02:00
|
|
|
si = setTypeInitIterator(sets[0]);
|
2022-11-09 18:50:07 +01:00
|
|
|
while((encoding = setTypeNext(si, &str, &len, &intobj)) != -1) {
|
2010-12-09 21:11:56 +01:00
|
|
|
for (j = 1; j < setnum; j++) {
|
2011-05-15 12:15:54 +02:00
|
|
|
if (sets[j] == sets[0]) continue;
|
2022-11-09 18:50:07 +01:00
|
|
|
if (!setTypeIsMemberAux(sets[j], str, len, intobj,
|
|
|
|
encoding == OBJ_ENCODING_HT))
|
|
|
|
break;
|
2010-12-09 21:11:56 +01:00
|
|
|
}
|
2010-07-02 19:57:12 +02:00
|
|
|
|
|
|
|
/* Only take action when all sets contain the member */
|
|
|
|
if (j == setnum) {
|
2021-08-03 04:45:27 -04:00
|
|
|
if (cardinality_only) {
|
|
|
|
cardinality++;
|
2021-09-16 19:07:08 +08:00
|
|
|
|
|
|
|
/* We stop the searching after reaching the limit. */
|
|
|
|
if (limit && cardinality >= limit)
|
|
|
|
break;
|
2021-08-03 04:45:27 -04:00
|
|
|
} else if (!dstkey) {
|
2022-11-09 18:50:07 +01:00
|
|
|
if (str != NULL)
|
|
|
|
addReplyBulkCBuffer(c, str, len);
|
2010-12-09 21:11:56 +01:00
|
|
|
else
|
|
|
|
addReplyBulkLongLong(c,intobj);
|
2010-07-02 19:57:12 +02:00
|
|
|
cardinality++;
|
|
|
|
} else {
|
2022-11-09 18:50:07 +01:00
|
|
|
if (str && only_integers) {
|
|
|
|
/* It may be an integer although we got it as a string. */
|
|
|
|
if (encoding == OBJ_ENCODING_HT &&
|
|
|
|
string2ll(str, len, (long long *)&intobj))
|
|
|
|
{
|
|
|
|
if (dstset->encoding == OBJ_ENCODING_LISTPACK ||
|
|
|
|
dstset->encoding == OBJ_ENCODING_INTSET)
|
|
|
|
{
|
|
|
|
/* Adding it as an integer is more efficient. */
|
|
|
|
str = NULL;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* It's not an integer */
|
|
|
|
only_integers = 0;
|
|
|
|
}
|
2010-12-09 21:11:56 +01:00
|
|
|
}
|
2022-11-09 18:50:07 +01:00
|
|
|
setTypeAddAux(dstset, str, len, intobj, encoding == OBJ_ENCODING_HT);
|
2010-07-02 19:57:12 +02:00
|
|
|
}
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
|
|
|
}
|
2010-07-02 19:57:12 +02:00
|
|
|
setTypeReleaseIterator(si);
|
2010-06-22 00:07:48 +02:00
|
|
|
|
2021-08-03 04:45:27 -04:00
|
|
|
if (cardinality_only) {
|
|
|
|
addReplyLongLong(c,cardinality);
|
|
|
|
} else if (dstkey) {
|
2010-06-22 00:07:48 +02:00
|
|
|
/* Store the resulting set into the target, if the intersection
|
|
|
|
* is not an empty set. */
|
2010-07-02 19:57:12 +02:00
|
|
|
if (setTypeSize(dstset) > 0) {
|
2022-11-09 18:50:07 +01:00
|
|
|
if (only_integers) maybeConvertToIntset(dstset);
|
|
|
|
if (dstset->encoding == OBJ_ENCODING_LISTPACK) {
|
|
|
|
/* We allocated too much memory when we created it to avoid
|
|
|
|
* frequent reallocs. Therefore, we shrink it now. */
|
|
|
|
dstset->ptr = lpShrinkToFit(dstset->ptr);
|
|
|
|
}
|
2021-11-03 14:12:33 +02:00
|
|
|
setKey(c,c->db,dstkey,dstset,0);
|
2010-07-02 19:57:12 +02:00
|
|
|
addReplyLongLong(c,setTypeSize(dstset));
|
2015-07-27 09:41:48 +02:00
|
|
|
notifyKeyspaceEvent(NOTIFY_SET,"sinterstore",
|
2013-01-25 13:19:08 +01:00
|
|
|
dstkey,c->db->id);
|
2020-07-11 20:52:41 +08:00
|
|
|
server.dirty++;
|
2010-06-22 00:07:48 +02:00
|
|
|
} else {
|
|
|
|
addReply(c,shared.czero);
|
2020-07-11 20:52:41 +08:00
|
|
|
if (dbDelete(c->db,dstkey)) {
|
|
|
|
server.dirty++;
|
|
|
|
signalModifiedKey(c,c->db,dstkey);
|
|
|
|
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",dstkey,c->db->id);
|
|
|
|
}
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
2020-07-11 20:52:41 +08:00
|
|
|
decrRefCount(dstset);
|
2010-06-22 00:07:48 +02:00
|
|
|
} else {
|
2018-11-26 16:17:19 +01:00
|
|
|
setDeferredSetLen(c,replylen,cardinality);
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
2010-07-02 19:57:12 +02:00
|
|
|
zfree(sets);
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
|
|
|
|
2021-06-13 15:53:46 +08:00
|
|
|
/* SINTER key [key ...] */
|
2015-07-26 15:20:46 +02:00
|
|
|
void sinterCommand(client *c) {
|
2021-09-16 19:07:08 +08:00
|
|
|
sinterGenericCommand(c, c->argv+1, c->argc-1, NULL, 0, 0);
|
2021-08-03 04:45:27 -04:00
|
|
|
}
|
|
|
|
|
2021-09-16 19:07:08 +08:00
|
|
|
/* SINTERCARD numkeys key [key ...] [LIMIT limit] */
|
2021-08-03 04:45:27 -04:00
|
|
|
void sinterCardCommand(client *c) {
|
2021-09-16 19:07:08 +08:00
|
|
|
long j;
|
|
|
|
long numkeys = 0; /* Number of keys. */
|
|
|
|
long limit = 0; /* 0 means not limit. */
|
|
|
|
|
|
|
|
if (getRangeLongFromObjectOrReply(c, c->argv[1], 1, LONG_MAX,
|
|
|
|
&numkeys, "numkeys should be greater than 0") != C_OK)
|
|
|
|
return;
|
|
|
|
if (numkeys > (c->argc - 2)) {
|
|
|
|
addReplyError(c, "Number of keys can't be greater than number of args");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (j = 2 + numkeys; j < c->argc; j++) {
|
|
|
|
char *opt = c->argv[j]->ptr;
|
|
|
|
int moreargs = (c->argc - 1) - j;
|
|
|
|
|
|
|
|
if (!strcasecmp(opt, "LIMIT") && moreargs) {
|
|
|
|
j++;
|
|
|
|
if (getPositiveLongFromObjectOrReply(c, c->argv[j], &limit,
|
|
|
|
"LIMIT can't be negative") != C_OK)
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
addReplyErrorObject(c, shared.syntaxerr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sinterGenericCommand(c, c->argv+2, numkeys, NULL, 1, limit);
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
|
|
|
|
2021-06-13 15:53:46 +08:00
|
|
|
/* SINTERSTORE destination key [key ...] */
|
2015-07-26 15:20:46 +02:00
|
|
|
void sinterstoreCommand(client *c) {
|
2021-09-16 19:07:08 +08:00
|
|
|
sinterGenericCommand(c, c->argv+2, c->argc-2, c->argv[1], 0, 0);
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
|
|
|
|
2015-07-26 15:20:46 +02:00
|
|
|
void sunionDiffGenericCommand(client *c, robj **setkeys, int setnum,
|
2014-03-11 15:38:55 +01:00
|
|
|
robj *dstkey, int op) {
|
2010-07-02 19:57:12 +02:00
|
|
|
robj **sets = zmalloc(sizeof(robj*)*setnum);
|
2010-08-21 11:25:13 +02:00
|
|
|
setTypeIterator *si;
|
2015-07-31 18:01:23 +02:00
|
|
|
robj *dstset = NULL;
|
2022-11-09 18:50:07 +01:00
|
|
|
char *str;
|
|
|
|
size_t len;
|
|
|
|
int64_t llval;
|
|
|
|
int encoding;
|
2010-07-02 19:57:12 +02:00
|
|
|
int j, cardinality = 0;
|
SDIFF is now able to select between two algorithms for speed.
SDIFF used an algorithm that was O(N) where N is the total number
of elements of all the sets involved in the operation.
The algorithm worked like that:
ALGORITHM 1:
1) For the first set, add all the members to an auxiliary set.
2) For all the other sets, remove all the members of the set from the
auxiliary set.
So it is an O(N) algorithm where N is the total number of elements in
all the sets involved in the diff operation.
Cristobal Viedma suggested to modify the algorithm to the following:
ALGORITHM 2:
1) Iterate all the elements of the first set.
2) For every element, check if the element also exists in all the other
remaining sets.
3) Add the element to the auxiliary set only if it does not exist in any
of the other sets.
The complexity of this algorithm on the worst case is O(N*M) where N is
the size of the first set and M the total number of sets involved in the
operation.
However when there are elements in common, with this algorithm we stop
the computation for a given element as long as we find a duplicated
element into another set.
I (antirez) added an additional step to algorithm 2 to make it faster,
that is to sort the set to subtract from the biggest to the
smallest, so that it is more likely to find a duplicate in a larger sets
that are checked before the smaller ones.
WHAT IS BETTER?
None of course, for instance if the first set is much larger than the
other sets the second algorithm does a lot more work compared to the
first algorithm.
Similarly if the first set is much smaller than the other sets, the
original algorithm will less work.
So this commit makes Redis able to guess the number of operations
required by each algorithm, and select the best at runtime according
to the input received.
However, since the second algorithm has better constant times and can do
less work if there are duplicated elements, an advantage is given to the
second algorithm.
2012-11-30 15:41:26 +01:00
|
|
|
int diff_algo = 1;
|
2022-05-02 21:18:11 +08:00
|
|
|
int sameset = 0;
|
2010-06-22 00:07:48 +02:00
|
|
|
|
2010-07-02 19:57:12 +02:00
|
|
|
for (j = 0; j < setnum; j++) {
|
2021-11-28 10:26:28 +01:00
|
|
|
robj *setobj = lookupKeyRead(c->db, setkeys[j]);
|
2010-06-22 00:07:48 +02:00
|
|
|
if (!setobj) {
|
2010-07-02 19:57:12 +02:00
|
|
|
sets[j] = NULL;
|
2010-06-22 00:07:48 +02:00
|
|
|
continue;
|
|
|
|
}
|
2015-07-26 15:28:00 +02:00
|
|
|
if (checkType(c,setobj,OBJ_SET)) {
|
2010-07-02 19:57:12 +02:00
|
|
|
zfree(sets);
|
2010-06-22 00:07:48 +02:00
|
|
|
return;
|
|
|
|
}
|
2010-07-02 19:57:12 +02:00
|
|
|
sets[j] = setobj;
|
2022-05-02 21:18:11 +08:00
|
|
|
if (j > 0 && sets[0] == sets[j]) {
|
|
|
|
sameset = 1;
|
|
|
|
}
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
|
|
|
|
SDIFF is now able to select between two algorithms for speed.
SDIFF used an algorithm that was O(N) where N is the total number
of elements of all the sets involved in the operation.
The algorithm worked like that:
ALGORITHM 1:
1) For the first set, add all the members to an auxiliary set.
2) For all the other sets, remove all the members of the set from the
auxiliary set.
So it is an O(N) algorithm where N is the total number of elements in
all the sets involved in the diff operation.
Cristobal Viedma suggested to modify the algorithm to the following:
ALGORITHM 2:
1) Iterate all the elements of the first set.
2) For every element, check if the element also exists in all the other
remaining sets.
3) Add the element to the auxiliary set only if it does not exist in any
of the other sets.
The complexity of this algorithm on the worst case is O(N*M) where N is
the size of the first set and M the total number of sets involved in the
operation.
However when there are elements in common, with this algorithm we stop
the computation for a given element as long as we find a duplicated
element into another set.
I (antirez) added an additional step to algorithm 2 to make it faster,
that is to sort the set to subtract from the biggest to the
smallest, so that it is more likely to find a duplicate in a larger sets
that are checked before the smaller ones.
WHAT IS BETTER?
None of course, for instance if the first set is much larger than the
other sets the second algorithm does a lot more work compared to the
first algorithm.
Similarly if the first set is much smaller than the other sets, the
original algorithm will less work.
So this commit makes Redis able to guess the number of operations
required by each algorithm, and select the best at runtime according
to the input received.
However, since the second algorithm has better constant times and can do
less work if there are duplicated elements, an advantage is given to the
second algorithm.
2012-11-30 15:41:26 +01:00
|
|
|
/* Select what DIFF algorithm to use.
|
|
|
|
*
|
|
|
|
* Algorithm 1 is O(N*M) where N is the size of the element first set
|
|
|
|
* and M the total number of sets.
|
|
|
|
*
|
|
|
|
* Algorithm 2 is O(N) where N is the total number of elements in all
|
|
|
|
* the sets.
|
|
|
|
*
|
|
|
|
* We compute what is the best bet with the current input here. */
|
2022-05-02 21:18:11 +08:00
|
|
|
if (op == SET_OP_DIFF && sets[0] && !sameset) {
|
SDIFF is now able to select between two algorithms for speed.
SDIFF used an algorithm that was O(N) where N is the total number
of elements of all the sets involved in the operation.
The algorithm worked like that:
ALGORITHM 1:
1) For the first set, add all the members to an auxiliary set.
2) For all the other sets, remove all the members of the set from the
auxiliary set.
So it is an O(N) algorithm where N is the total number of elements in
all the sets involved in the diff operation.
Cristobal Viedma suggested to modify the algorithm to the following:
ALGORITHM 2:
1) Iterate all the elements of the first set.
2) For every element, check if the element also exists in all the other
remaining sets.
3) Add the element to the auxiliary set only if it does not exist in any
of the other sets.
The complexity of this algorithm on the worst case is O(N*M) where N is
the size of the first set and M the total number of sets involved in the
operation.
However when there are elements in common, with this algorithm we stop
the computation for a given element as long as we find a duplicated
element into another set.
I (antirez) added an additional step to algorithm 2 to make it faster,
that is to sort the set to subtract from the biggest to the
smallest, so that it is more likely to find a duplicate in a larger sets
that are checked before the smaller ones.
WHAT IS BETTER?
None of course, for instance if the first set is much larger than the
other sets the second algorithm does a lot more work compared to the
first algorithm.
Similarly if the first set is much smaller than the other sets, the
original algorithm will less work.
So this commit makes Redis able to guess the number of operations
required by each algorithm, and select the best at runtime according
to the input received.
However, since the second algorithm has better constant times and can do
less work if there are duplicated elements, an advantage is given to the
second algorithm.
2012-11-30 15:41:26 +01:00
|
|
|
long long algo_one_work = 0, algo_two_work = 0;
|
|
|
|
|
|
|
|
for (j = 0; j < setnum; j++) {
|
|
|
|
if (sets[j] == NULL) continue;
|
|
|
|
|
|
|
|
algo_one_work += setTypeSize(sets[0]);
|
|
|
|
algo_two_work += setTypeSize(sets[j]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Algorithm 1 has better constant times and performs less operations
|
|
|
|
* if there are elements in common. Give it some advantage. */
|
|
|
|
algo_one_work /= 2;
|
|
|
|
diff_algo = (algo_one_work <= algo_two_work) ? 1 : 2;
|
|
|
|
|
|
|
|
if (diff_algo == 1 && setnum > 1) {
|
|
|
|
/* With algorithm 1 it is better to order the sets to subtract
|
|
|
|
* by decreasing size, so that we are more likely to find
|
|
|
|
* duplicated elements ASAP. */
|
|
|
|
qsort(sets+1,setnum-1,sizeof(robj*),
|
|
|
|
qsortCompareSetsByRevCardinality);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-29 14:55:17 +08:00
|
|
|
/* We need a temp set object to store our union/diff. If the dstkey
|
|
|
|
* is not NULL (that is, we are inside an SUNIONSTORE/SDIFFSTORE operation) then
|
2010-06-22 00:07:48 +02:00
|
|
|
* this set object will be the resulting object to set into the target key*/
|
2010-07-02 19:57:12 +02:00
|
|
|
dstset = createIntsetObject();
|
2010-06-22 00:07:48 +02:00
|
|
|
|
2015-07-27 09:41:48 +02:00
|
|
|
if (op == SET_OP_UNION) {
|
SDIFF is now able to select between two algorithms for speed.
SDIFF used an algorithm that was O(N) where N is the total number
of elements of all the sets involved in the operation.
The algorithm worked like that:
ALGORITHM 1:
1) For the first set, add all the members to an auxiliary set.
2) For all the other sets, remove all the members of the set from the
auxiliary set.
So it is an O(N) algorithm where N is the total number of elements in
all the sets involved in the diff operation.
Cristobal Viedma suggested to modify the algorithm to the following:
ALGORITHM 2:
1) Iterate all the elements of the first set.
2) For every element, check if the element also exists in all the other
remaining sets.
3) Add the element to the auxiliary set only if it does not exist in any
of the other sets.
The complexity of this algorithm on the worst case is O(N*M) where N is
the size of the first set and M the total number of sets involved in the
operation.
However when there are elements in common, with this algorithm we stop
the computation for a given element as long as we find a duplicated
element into another set.
I (antirez) added an additional step to algorithm 2 to make it faster,
that is to sort the set to subtract from the biggest to the
smallest, so that it is more likely to find a duplicate in a larger sets
that are checked before the smaller ones.
WHAT IS BETTER?
None of course, for instance if the first set is much larger than the
other sets the second algorithm does a lot more work compared to the
first algorithm.
Similarly if the first set is much smaller than the other sets, the
original algorithm will less work.
So this commit makes Redis able to guess the number of operations
required by each algorithm, and select the best at runtime according
to the input received.
However, since the second algorithm has better constant times and can do
less work if there are duplicated elements, an advantage is given to the
second algorithm.
2012-11-30 15:41:26 +01:00
|
|
|
/* Union is trivial, just add every element of every set to the
|
|
|
|
* temporary set. */
|
|
|
|
for (j = 0; j < setnum; j++) {
|
|
|
|
if (!sets[j]) continue; /* non existing keys are like empty sets */
|
2010-06-22 00:07:48 +02:00
|
|
|
|
SDIFF is now able to select between two algorithms for speed.
SDIFF used an algorithm that was O(N) where N is the total number
of elements of all the sets involved in the operation.
The algorithm worked like that:
ALGORITHM 1:
1) For the first set, add all the members to an auxiliary set.
2) For all the other sets, remove all the members of the set from the
auxiliary set.
So it is an O(N) algorithm where N is the total number of elements in
all the sets involved in the diff operation.
Cristobal Viedma suggested to modify the algorithm to the following:
ALGORITHM 2:
1) Iterate all the elements of the first set.
2) For every element, check if the element also exists in all the other
remaining sets.
3) Add the element to the auxiliary set only if it does not exist in any
of the other sets.
The complexity of this algorithm on the worst case is O(N*M) where N is
the size of the first set and M the total number of sets involved in the
operation.
However when there are elements in common, with this algorithm we stop
the computation for a given element as long as we find a duplicated
element into another set.
I (antirez) added an additional step to algorithm 2 to make it faster,
that is to sort the set to subtract from the biggest to the
smallest, so that it is more likely to find a duplicate in a larger sets
that are checked before the smaller ones.
WHAT IS BETTER?
None of course, for instance if the first set is much larger than the
other sets the second algorithm does a lot more work compared to the
first algorithm.
Similarly if the first set is much smaller than the other sets, the
original algorithm will less work.
So this commit makes Redis able to guess the number of operations
required by each algorithm, and select the best at runtime according
to the input received.
However, since the second algorithm has better constant times and can do
less work if there are duplicated elements, an advantage is given to the
second algorithm.
2012-11-30 15:41:26 +01:00
|
|
|
si = setTypeInitIterator(sets[j]);
|
2022-11-09 18:50:07 +01:00
|
|
|
while ((encoding = setTypeNext(si, &str, &len, &llval)) != -1) {
|
|
|
|
cardinality += setTypeAddAux(dstset, str, len, llval, encoding == OBJ_ENCODING_HT);
|
SDIFF is now able to select between two algorithms for speed.
SDIFF used an algorithm that was O(N) where N is the total number
of elements of all the sets involved in the operation.
The algorithm worked like that:
ALGORITHM 1:
1) For the first set, add all the members to an auxiliary set.
2) For all the other sets, remove all the members of the set from the
auxiliary set.
So it is an O(N) algorithm where N is the total number of elements in
all the sets involved in the diff operation.
Cristobal Viedma suggested to modify the algorithm to the following:
ALGORITHM 2:
1) Iterate all the elements of the first set.
2) For every element, check if the element also exists in all the other
remaining sets.
3) Add the element to the auxiliary set only if it does not exist in any
of the other sets.
The complexity of this algorithm on the worst case is O(N*M) where N is
the size of the first set and M the total number of sets involved in the
operation.
However when there are elements in common, with this algorithm we stop
the computation for a given element as long as we find a duplicated
element into another set.
I (antirez) added an additional step to algorithm 2 to make it faster,
that is to sort the set to subtract from the biggest to the
smallest, so that it is more likely to find a duplicate in a larger sets
that are checked before the smaller ones.
WHAT IS BETTER?
None of course, for instance if the first set is much larger than the
other sets the second algorithm does a lot more work compared to the
first algorithm.
Similarly if the first set is much smaller than the other sets, the
original algorithm will less work.
So this commit makes Redis able to guess the number of operations
required by each algorithm, and select the best at runtime according
to the input received.
However, since the second algorithm has better constant times and can do
less work if there are duplicated elements, an advantage is given to the
second algorithm.
2012-11-30 15:41:26 +01:00
|
|
|
}
|
|
|
|
setTypeReleaseIterator(si);
|
|
|
|
}
|
2022-05-02 21:18:11 +08:00
|
|
|
} else if (op == SET_OP_DIFF && sameset) {
|
|
|
|
/* At least one of the sets is the same one (same key) as the first one, result must be empty. */
|
2015-07-27 09:41:48 +02:00
|
|
|
} else if (op == SET_OP_DIFF && sets[0] && diff_algo == 1) {
|
SDIFF is now able to select between two algorithms for speed.
SDIFF used an algorithm that was O(N) where N is the total number
of elements of all the sets involved in the operation.
The algorithm worked like that:
ALGORITHM 1:
1) For the first set, add all the members to an auxiliary set.
2) For all the other sets, remove all the members of the set from the
auxiliary set.
So it is an O(N) algorithm where N is the total number of elements in
all the sets involved in the diff operation.
Cristobal Viedma suggested to modify the algorithm to the following:
ALGORITHM 2:
1) Iterate all the elements of the first set.
2) For every element, check if the element also exists in all the other
remaining sets.
3) Add the element to the auxiliary set only if it does not exist in any
of the other sets.
The complexity of this algorithm on the worst case is O(N*M) where N is
the size of the first set and M the total number of sets involved in the
operation.
However when there are elements in common, with this algorithm we stop
the computation for a given element as long as we find a duplicated
element into another set.
I (antirez) added an additional step to algorithm 2 to make it faster,
that is to sort the set to subtract from the biggest to the
smallest, so that it is more likely to find a duplicate in a larger sets
that are checked before the smaller ones.
WHAT IS BETTER?
None of course, for instance if the first set is much larger than the
other sets the second algorithm does a lot more work compared to the
first algorithm.
Similarly if the first set is much smaller than the other sets, the
original algorithm will less work.
So this commit makes Redis able to guess the number of operations
required by each algorithm, and select the best at runtime according
to the input received.
However, since the second algorithm has better constant times and can do
less work if there are duplicated elements, an advantage is given to the
second algorithm.
2012-11-30 15:41:26 +01:00
|
|
|
/* DIFF Algorithm 1:
|
|
|
|
*
|
|
|
|
* We perform the diff by iterating all the elements of the first set,
|
|
|
|
* and only adding it to the target set if the element does not exist
|
|
|
|
* into all the other sets.
|
|
|
|
*
|
|
|
|
* This way we perform at max N*M operations, where N is the size of
|
|
|
|
* the first set, and M the number of sets. */
|
|
|
|
si = setTypeInitIterator(sets[0]);
|
2022-11-09 18:50:07 +01:00
|
|
|
while ((encoding = setTypeNext(si, &str, &len, &llval)) != -1) {
|
SDIFF is now able to select between two algorithms for speed.
SDIFF used an algorithm that was O(N) where N is the total number
of elements of all the sets involved in the operation.
The algorithm worked like that:
ALGORITHM 1:
1) For the first set, add all the members to an auxiliary set.
2) For all the other sets, remove all the members of the set from the
auxiliary set.
So it is an O(N) algorithm where N is the total number of elements in
all the sets involved in the diff operation.
Cristobal Viedma suggested to modify the algorithm to the following:
ALGORITHM 2:
1) Iterate all the elements of the first set.
2) For every element, check if the element also exists in all the other
remaining sets.
3) Add the element to the auxiliary set only if it does not exist in any
of the other sets.
The complexity of this algorithm on the worst case is O(N*M) where N is
the size of the first set and M the total number of sets involved in the
operation.
However when there are elements in common, with this algorithm we stop
the computation for a given element as long as we find a duplicated
element into another set.
I (antirez) added an additional step to algorithm 2 to make it faster,
that is to sort the set to subtract from the biggest to the
smallest, so that it is more likely to find a duplicate in a larger sets
that are checked before the smaller ones.
WHAT IS BETTER?
None of course, for instance if the first set is much larger than the
other sets the second algorithm does a lot more work compared to the
first algorithm.
Similarly if the first set is much smaller than the other sets, the
original algorithm will less work.
So this commit makes Redis able to guess the number of operations
required by each algorithm, and select the best at runtime according
to the input received.
However, since the second algorithm has better constant times and can do
less work if there are duplicated elements, an advantage is given to the
second algorithm.
2012-11-30 15:41:26 +01:00
|
|
|
for (j = 1; j < setnum; j++) {
|
|
|
|
if (!sets[j]) continue; /* no key is an empty set. */
|
2013-12-13 11:29:59 +01:00
|
|
|
if (sets[j] == sets[0]) break; /* same set! */
|
2022-11-09 18:50:07 +01:00
|
|
|
if (setTypeIsMemberAux(sets[j], str, len, llval,
|
|
|
|
encoding == OBJ_ENCODING_HT))
|
|
|
|
break;
|
SDIFF is now able to select between two algorithms for speed.
SDIFF used an algorithm that was O(N) where N is the total number
of elements of all the sets involved in the operation.
The algorithm worked like that:
ALGORITHM 1:
1) For the first set, add all the members to an auxiliary set.
2) For all the other sets, remove all the members of the set from the
auxiliary set.
So it is an O(N) algorithm where N is the total number of elements in
all the sets involved in the diff operation.
Cristobal Viedma suggested to modify the algorithm to the following:
ALGORITHM 2:
1) Iterate all the elements of the first set.
2) For every element, check if the element also exists in all the other
remaining sets.
3) Add the element to the auxiliary set only if it does not exist in any
of the other sets.
The complexity of this algorithm on the worst case is O(N*M) where N is
the size of the first set and M the total number of sets involved in the
operation.
However when there are elements in common, with this algorithm we stop
the computation for a given element as long as we find a duplicated
element into another set.
I (antirez) added an additional step to algorithm 2 to make it faster,
that is to sort the set to subtract from the biggest to the
smallest, so that it is more likely to find a duplicate in a larger sets
that are checked before the smaller ones.
WHAT IS BETTER?
None of course, for instance if the first set is much larger than the
other sets the second algorithm does a lot more work compared to the
first algorithm.
Similarly if the first set is much smaller than the other sets, the
original algorithm will less work.
So this commit makes Redis able to guess the number of operations
required by each algorithm, and select the best at runtime according
to the input received.
However, since the second algorithm has better constant times and can do
less work if there are duplicated elements, an advantage is given to the
second algorithm.
2012-11-30 15:41:26 +01:00
|
|
|
}
|
|
|
|
if (j == setnum) {
|
|
|
|
/* There is no other set with this element. Add it. */
|
2022-11-22 17:20:24 +08:00
|
|
|
cardinality += setTypeAddAux(dstset, str, len, llval, encoding == OBJ_ENCODING_HT);
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
|
|
|
}
|
2010-07-02 19:57:12 +02:00
|
|
|
setTypeReleaseIterator(si);
|
2015-07-27 09:41:48 +02:00
|
|
|
} else if (op == SET_OP_DIFF && sets[0] && diff_algo == 2) {
|
SDIFF is now able to select between two algorithms for speed.
SDIFF used an algorithm that was O(N) where N is the total number
of elements of all the sets involved in the operation.
The algorithm worked like that:
ALGORITHM 1:
1) For the first set, add all the members to an auxiliary set.
2) For all the other sets, remove all the members of the set from the
auxiliary set.
So it is an O(N) algorithm where N is the total number of elements in
all the sets involved in the diff operation.
Cristobal Viedma suggested to modify the algorithm to the following:
ALGORITHM 2:
1) Iterate all the elements of the first set.
2) For every element, check if the element also exists in all the other
remaining sets.
3) Add the element to the auxiliary set only if it does not exist in any
of the other sets.
The complexity of this algorithm on the worst case is O(N*M) where N is
the size of the first set and M the total number of sets involved in the
operation.
However when there are elements in common, with this algorithm we stop
the computation for a given element as long as we find a duplicated
element into another set.
I (antirez) added an additional step to algorithm 2 to make it faster,
that is to sort the set to subtract from the biggest to the
smallest, so that it is more likely to find a duplicate in a larger sets
that are checked before the smaller ones.
WHAT IS BETTER?
None of course, for instance if the first set is much larger than the
other sets the second algorithm does a lot more work compared to the
first algorithm.
Similarly if the first set is much smaller than the other sets, the
original algorithm will less work.
So this commit makes Redis able to guess the number of operations
required by each algorithm, and select the best at runtime according
to the input received.
However, since the second algorithm has better constant times and can do
less work if there are duplicated elements, an advantage is given to the
second algorithm.
2012-11-30 15:41:26 +01:00
|
|
|
/* DIFF Algorithm 2:
|
|
|
|
*
|
|
|
|
* Add all the elements of the first set to the auxiliary set.
|
|
|
|
* Then remove all the elements of all the next sets from it.
|
|
|
|
*
|
|
|
|
* This is O(N) where N is the sum of all the elements in every
|
|
|
|
* set. */
|
|
|
|
for (j = 0; j < setnum; j++) {
|
|
|
|
if (!sets[j]) continue; /* non existing keys are like empty sets */
|
|
|
|
|
|
|
|
si = setTypeInitIterator(sets[j]);
|
2022-11-09 18:50:07 +01:00
|
|
|
while((encoding = setTypeNext(si, &str, &len, &llval)) != -1) {
|
SDIFF is now able to select between two algorithms for speed.
SDIFF used an algorithm that was O(N) where N is the total number
of elements of all the sets involved in the operation.
The algorithm worked like that:
ALGORITHM 1:
1) For the first set, add all the members to an auxiliary set.
2) For all the other sets, remove all the members of the set from the
auxiliary set.
So it is an O(N) algorithm where N is the total number of elements in
all the sets involved in the diff operation.
Cristobal Viedma suggested to modify the algorithm to the following:
ALGORITHM 2:
1) Iterate all the elements of the first set.
2) For every element, check if the element also exists in all the other
remaining sets.
3) Add the element to the auxiliary set only if it does not exist in any
of the other sets.
The complexity of this algorithm on the worst case is O(N*M) where N is
the size of the first set and M the total number of sets involved in the
operation.
However when there are elements in common, with this algorithm we stop
the computation for a given element as long as we find a duplicated
element into another set.
I (antirez) added an additional step to algorithm 2 to make it faster,
that is to sort the set to subtract from the biggest to the
smallest, so that it is more likely to find a duplicate in a larger sets
that are checked before the smaller ones.
WHAT IS BETTER?
None of course, for instance if the first set is much larger than the
other sets the second algorithm does a lot more work compared to the
first algorithm.
Similarly if the first set is much smaller than the other sets, the
original algorithm will less work.
So this commit makes Redis able to guess the number of operations
required by each algorithm, and select the best at runtime according
to the input received.
However, since the second algorithm has better constant times and can do
less work if there are duplicated elements, an advantage is given to the
second algorithm.
2012-11-30 15:41:26 +01:00
|
|
|
if (j == 0) {
|
2022-11-09 18:50:07 +01:00
|
|
|
cardinality += setTypeAddAux(dstset, str, len, llval,
|
|
|
|
encoding == OBJ_ENCODING_HT);
|
SDIFF is now able to select between two algorithms for speed.
SDIFF used an algorithm that was O(N) where N is the total number
of elements of all the sets involved in the operation.
The algorithm worked like that:
ALGORITHM 1:
1) For the first set, add all the members to an auxiliary set.
2) For all the other sets, remove all the members of the set from the
auxiliary set.
So it is an O(N) algorithm where N is the total number of elements in
all the sets involved in the diff operation.
Cristobal Viedma suggested to modify the algorithm to the following:
ALGORITHM 2:
1) Iterate all the elements of the first set.
2) For every element, check if the element also exists in all the other
remaining sets.
3) Add the element to the auxiliary set only if it does not exist in any
of the other sets.
The complexity of this algorithm on the worst case is O(N*M) where N is
the size of the first set and M the total number of sets involved in the
operation.
However when there are elements in common, with this algorithm we stop
the computation for a given element as long as we find a duplicated
element into another set.
I (antirez) added an additional step to algorithm 2 to make it faster,
that is to sort the set to subtract from the biggest to the
smallest, so that it is more likely to find a duplicate in a larger sets
that are checked before the smaller ones.
WHAT IS BETTER?
None of course, for instance if the first set is much larger than the
other sets the second algorithm does a lot more work compared to the
first algorithm.
Similarly if the first set is much smaller than the other sets, the
original algorithm will less work.
So this commit makes Redis able to guess the number of operations
required by each algorithm, and select the best at runtime according
to the input received.
However, since the second algorithm has better constant times and can do
less work if there are duplicated elements, an advantage is given to the
second algorithm.
2012-11-30 15:41:26 +01:00
|
|
|
} else {
|
2022-11-09 18:50:07 +01:00
|
|
|
cardinality -= setTypeRemoveAux(dstset, str, len, llval,
|
|
|
|
encoding == OBJ_ENCODING_HT);
|
SDIFF is now able to select between two algorithms for speed.
SDIFF used an algorithm that was O(N) where N is the total number
of elements of all the sets involved in the operation.
The algorithm worked like that:
ALGORITHM 1:
1) For the first set, add all the members to an auxiliary set.
2) For all the other sets, remove all the members of the set from the
auxiliary set.
So it is an O(N) algorithm where N is the total number of elements in
all the sets involved in the diff operation.
Cristobal Viedma suggested to modify the algorithm to the following:
ALGORITHM 2:
1) Iterate all the elements of the first set.
2) For every element, check if the element also exists in all the other
remaining sets.
3) Add the element to the auxiliary set only if it does not exist in any
of the other sets.
The complexity of this algorithm on the worst case is O(N*M) where N is
the size of the first set and M the total number of sets involved in the
operation.
However when there are elements in common, with this algorithm we stop
the computation for a given element as long as we find a duplicated
element into another set.
I (antirez) added an additional step to algorithm 2 to make it faster,
that is to sort the set to subtract from the biggest to the
smallest, so that it is more likely to find a duplicate in a larger sets
that are checked before the smaller ones.
WHAT IS BETTER?
None of course, for instance if the first set is much larger than the
other sets the second algorithm does a lot more work compared to the
first algorithm.
Similarly if the first set is much smaller than the other sets, the
original algorithm will less work.
So this commit makes Redis able to guess the number of operations
required by each algorithm, and select the best at runtime according
to the input received.
However, since the second algorithm has better constant times and can do
less work if there are duplicated elements, an advantage is given to the
second algorithm.
2012-11-30 15:41:26 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
setTypeReleaseIterator(si);
|
2010-06-22 00:07:48 +02:00
|
|
|
|
SDIFF is now able to select between two algorithms for speed.
SDIFF used an algorithm that was O(N) where N is the total number
of elements of all the sets involved in the operation.
The algorithm worked like that:
ALGORITHM 1:
1) For the first set, add all the members to an auxiliary set.
2) For all the other sets, remove all the members of the set from the
auxiliary set.
So it is an O(N) algorithm where N is the total number of elements in
all the sets involved in the diff operation.
Cristobal Viedma suggested to modify the algorithm to the following:
ALGORITHM 2:
1) Iterate all the elements of the first set.
2) For every element, check if the element also exists in all the other
remaining sets.
3) Add the element to the auxiliary set only if it does not exist in any
of the other sets.
The complexity of this algorithm on the worst case is O(N*M) where N is
the size of the first set and M the total number of sets involved in the
operation.
However when there are elements in common, with this algorithm we stop
the computation for a given element as long as we find a duplicated
element into another set.
I (antirez) added an additional step to algorithm 2 to make it faster,
that is to sort the set to subtract from the biggest to the
smallest, so that it is more likely to find a duplicate in a larger sets
that are checked before the smaller ones.
WHAT IS BETTER?
None of course, for instance if the first set is much larger than the
other sets the second algorithm does a lot more work compared to the
first algorithm.
Similarly if the first set is much smaller than the other sets, the
original algorithm will less work.
So this commit makes Redis able to guess the number of operations
required by each algorithm, and select the best at runtime according
to the input received.
However, since the second algorithm has better constant times and can do
less work if there are duplicated elements, an advantage is given to the
second algorithm.
2012-11-30 15:41:26 +01:00
|
|
|
/* Exit if result set is empty as any additional removal
|
|
|
|
* of elements will have no effect. */
|
|
|
|
if (cardinality == 0) break;
|
|
|
|
}
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Output the content of the resulting set, if not in STORE mode */
|
|
|
|
if (!dstkey) {
|
2018-11-26 16:17:19 +01:00
|
|
|
addReplySetLen(c,cardinality);
|
2010-07-02 19:57:12 +02:00
|
|
|
si = setTypeInitIterator(dstset);
|
2022-11-09 18:50:07 +01:00
|
|
|
while (setTypeNext(si, &str, &len, &llval) != -1) {
|
|
|
|
if (str)
|
|
|
|
addReplyBulkCBuffer(c, str, len);
|
|
|
|
else
|
|
|
|
addReplyBulkLongLong(c, llval);
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
2010-07-02 19:57:12 +02:00
|
|
|
setTypeReleaseIterator(si);
|
2021-06-16 14:45:49 +08:00
|
|
|
server.lazyfree_lazy_server_del ? freeObjAsync(NULL, dstset, -1) :
|
2019-03-07 22:08:04 +08:00
|
|
|
decrRefCount(dstset);
|
2010-06-22 00:07:48 +02:00
|
|
|
} else {
|
|
|
|
/* If we have a target key where to store the resulting set
|
|
|
|
* create this key with the result set inside */
|
2010-07-02 19:57:12 +02:00
|
|
|
if (setTypeSize(dstset) > 0) {
|
2021-11-03 14:12:33 +02:00
|
|
|
setKey(c,c->db,dstkey,dstset,0);
|
2010-07-02 19:57:12 +02:00
|
|
|
addReplyLongLong(c,setTypeSize(dstset));
|
2015-07-27 09:41:48 +02:00
|
|
|
notifyKeyspaceEvent(NOTIFY_SET,
|
|
|
|
op == SET_OP_UNION ? "sunionstore" : "sdiffstore",
|
2013-01-24 16:20:53 +01:00
|
|
|
dstkey,c->db->id);
|
2020-07-11 20:52:41 +08:00
|
|
|
server.dirty++;
|
2010-06-22 00:07:48 +02:00
|
|
|
} else {
|
|
|
|
addReply(c,shared.czero);
|
2020-07-11 20:52:41 +08:00
|
|
|
if (dbDelete(c->db,dstkey)) {
|
|
|
|
server.dirty++;
|
|
|
|
signalModifiedKey(c,c->db,dstkey);
|
|
|
|
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",dstkey,c->db->id);
|
|
|
|
}
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
2020-07-11 20:52:41 +08:00
|
|
|
decrRefCount(dstset);
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
2010-07-02 19:57:12 +02:00
|
|
|
zfree(sets);
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
|
|
|
|
2021-06-13 15:53:46 +08:00
|
|
|
/* SUNION key [key ...] */
|
2015-07-26 15:20:46 +02:00
|
|
|
void sunionCommand(client *c) {
|
2015-07-27 09:41:48 +02:00
|
|
|
sunionDiffGenericCommand(c,c->argv+1,c->argc-1,NULL,SET_OP_UNION);
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
|
|
|
|
2021-06-13 15:53:46 +08:00
|
|
|
/* SUNIONSTORE destination key [key ...] */
|
2015-07-26 15:20:46 +02:00
|
|
|
void sunionstoreCommand(client *c) {
|
2015-07-27 09:41:48 +02:00
|
|
|
sunionDiffGenericCommand(c,c->argv+2,c->argc-2,c->argv[1],SET_OP_UNION);
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
|
|
|
|
2021-06-13 15:53:46 +08:00
|
|
|
/* SDIFF key [key ...] */
|
2015-07-26 15:20:46 +02:00
|
|
|
void sdiffCommand(client *c) {
|
2015-07-27 09:41:48 +02:00
|
|
|
sunionDiffGenericCommand(c,c->argv+1,c->argc-1,NULL,SET_OP_DIFF);
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
|
|
|
|
2021-06-13 15:53:46 +08:00
|
|
|
/* SDIFFSTORE destination key [key ...] */
|
2015-07-26 15:20:46 +02:00
|
|
|
void sdiffstoreCommand(client *c) {
|
2015-07-27 09:41:48 +02:00
|
|
|
sunionDiffGenericCommand(c,c->argv+2,c->argc-2,c->argv[1],SET_OP_DIFF);
|
2010-06-22 00:07:48 +02:00
|
|
|
}
|
2013-10-28 11:17:32 +01:00
|
|
|
|
2015-07-26 15:20:46 +02:00
|
|
|
void sscanCommand(client *c) {
|
2013-10-28 11:17:32 +01:00
|
|
|
robj *set;
|
2013-11-05 15:47:50 +01:00
|
|
|
unsigned long cursor;
|
2013-10-28 11:17:32 +01:00
|
|
|
|
2015-07-26 23:17:55 +02:00
|
|
|
if (parseScanCursorOrReply(c,c->argv[2],&cursor) == C_ERR) return;
|
2013-10-28 11:17:32 +01:00
|
|
|
if ((set = lookupKeyReadOrReply(c,c->argv[1],shared.emptyscan)) == NULL ||
|
2015-07-26 15:28:00 +02:00
|
|
|
checkType(c,set,OBJ_SET)) return;
|
2013-11-05 15:47:50 +01:00
|
|
|
scanGenericCommand(c,set,cursor);
|
2013-10-28 11:17:32 +01:00
|
|
|
}
|