From f264678ccf0dc77c7f577214c9922866a241cccf Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Thu, 10 Sep 2020 13:43:38 +0300 Subject: [PATCH 001/114] Squash merging 125 typo/grammar/comment/doc PRs (#7773) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit List of squashed commits or PRs =============================== commit 66801ea Author: hwware Date: Mon Jan 13 00:54:31 2020 -0500 typo fix in acl.c commit 46f55db Author: Itamar Haber Date: Sun Sep 6 18:24:11 2020 +0300 Updates a couple of comments Specifically: * RM_AutoMemory completed instead of pointing to docs * Updated link to custom type doc commit 61a2aa0 Author: xindoo Date: Tue Sep 1 19:24:59 2020 +0800 Correct errors in code comments commit a5871d1 Author: yz1509 Date: Tue Sep 1 18:36:06 2020 +0800 fix typos in module.c commit 41eede7 Author: bookug Date: Sat Aug 15 01:11:33 2020 +0800 docs: fix typos in comments commit c303c84 Author: lazy-snail Date: Fri Aug 7 11:15:44 2020 +0800 fix spelling in redis.conf commit 1eb76bf Author: zhujian Date: Thu Aug 6 15:22:10 2020 +0800 add a missing 'n' in comment commit 1530ec2 Author: Daniel Dai <764122422@qq.com> Date: Mon Jul 27 00:46:35 2020 -0400 fix spelling in tracking.c commit e517b31 Author: Hunter-Chen Date: Fri Jul 17 22:33:32 2020 +0800 Update redis.conf Co-authored-by: Itamar Haber commit c300eff Author: Hunter-Chen Date: Fri Jul 17 22:33:23 2020 +0800 Update redis.conf Co-authored-by: Itamar Haber commit 4c058a8 Author: 陈浩鹏 Date: Thu Jun 25 19:00:56 2020 +0800 Grammar fix and clarification commit 5fcaa81 Author: bodong.ybd Date: Fri Jun 19 10:09:00 2020 +0800 Fix typos commit 4caca9a Author: Pruthvi P Date: Fri May 22 00:33:22 2020 +0530 Fix typo eviciton => eviction commit b2a25f6 Author: Brad Dunbar Date: Sun May 17 12:39:59 2020 -0400 Fix a typo. commit 12842ae Author: hwware Date: Sun May 3 17:16:59 2020 -0400 fix spelling in redis conf commit ddba07c Author: Chris Lamb Date: Sat May 2 23:25:34 2020 +0100 Correct a "conflicts" spelling error. commit 8fc7bf2 Author: Nao YONASHIRO Date: Thu Apr 30 10:25:27 2020 +0900 docs: fix EXPIRE_FAST_CYCLE_DURATION to ACTIVE_EXPIRE_CYCLE_FAST_DURATION commit 9b2b67a Author: Brad Dunbar Date: Fri Apr 24 11:46:22 2020 -0400 Fix a typo. commit 0746f10 Author: devilinrust <63737265+devilinrust@users.noreply.github.com> Date: Thu Apr 16 00:17:53 2020 +0200 Fix typos in server.c commit 92b588d Author: benjessop12 <56115861+benjessop12@users.noreply.github.com> Date: Mon Apr 13 13:43:55 2020 +0100 Fix spelling mistake in lazyfree.c commit 1da37aa Merge: 2d4ba28 c90b2a4 Author: hwware Date: Thu Mar 5 22:41:31 2020 -0500 Merge remote-tracking branch 'upstream/unstable' into expiretypofix commit 2d4ba28 Author: hwware Date: Mon Mar 2 00:09:40 2020 -0500 fix typo in expire.c commit 1a746f7 Author: SennoYuki Date: Thu Feb 27 16:54:32 2020 +0800 fix typo commit 8599b1a Author: dongheejeong Date: Sun Feb 16 20:31:43 2020 +0000 Fix typo in server.c commit f38d4e8 Author: hwware Date: Sun Feb 2 22:58:38 2020 -0500 fix typo in evict.c commit fe143fc Author: Leo Murillo Date: Sun Feb 2 01:57:22 2020 -0600 Fix a few typos in redis.conf commit 1ab4d21 Author: viraja1 Date: Fri Dec 27 17:15:58 2019 +0530 Fix typo in Latency API docstring commit ca1f70e Author: gosth Date: Wed Dec 18 15:18:02 2019 +0800 fix typo in sort.c commit a57c06b Author: ZYunH Date: Mon Dec 16 22:28:46 2019 +0800 fix-zset-typo commit b8c92b5 Author: git-hulk Date: Mon Dec 16 15:51:42 2019 +0800 FIX: typo in cluster.c, onformation->information commit 9dd981c Author: wujm2007 Date: Mon Dec 16 09:37:52 2019 +0800 Fix typo commit e132d7a Author: Sebastien Williams-Wynn Date: Fri Nov 15 00:14:07 2019 +0000 Minor typo change commit 47f44d5 Author: happynote3966 <01ssrmikururudevice01@gmail.com> Date: Mon Nov 11 22:08:48 2019 +0900 fix comment typo in redis-cli.c commit b8bdb0d Author: fulei Date: Wed Oct 16 18:00:17 2019 +0800 Fix a spelling mistake of comments in defragDictBucketCallback commit 0def46a Author: fulei Date: Wed Oct 16 13:09:27 2019 +0800 fix some spelling mistakes of comments in defrag.c commit f3596fd Author: Phil Rajchgot Date: Sun Oct 13 02:02:32 2019 -0400 Typo and grammar fixes Redis and its documentation are great -- just wanted to submit a few corrections in the spirit of Hacktoberfest. Thanks for all your work on this project. I use it all the time and it works beautifully. commit 2b928cd Author: KangZhiDong Date: Sun Sep 1 07:03:11 2019 +0800 fix typos commit 33aea14 Author: Axlgrep Date: Tue Aug 27 11:02:18 2019 +0800 Fixed eviction spelling issues commit e282a80 Author: Simen Flatby Date: Tue Aug 20 15:25:51 2019 +0200 Update comments to reflect prop name In the comments the prop is referenced as replica-validity-factor, but it is really named cluster-replica-validity-factor. commit 74d1f9a Author: Jim Green Date: Tue Aug 20 20:00:31 2019 +0800 fix comment error, the code is ok commit eea1407 Author: Liao Tonglang Date: Fri May 31 10:16:18 2019 +0800 typo fix fix cna't to can't commit 0da553c Author: KAWACHI Takashi Date: Wed Jul 17 00:38:16 2019 +0900 Fix typo commit 7fc8fb6 Author: Michael Prokop Date: Tue May 28 17:58:42 2019 +0200 Typo fixes s/familar/familiar/ s/compatiblity/compatibility/ s/ ot / to / s/itsef/itself/ commit 5f46c9d Author: zhumoing <34539422+zhumoing@users.noreply.github.com> Date: Tue May 21 21:16:50 2019 +0800 typo-fixes typo-fixes commit 321dfe1 Author: wxisme <850885154@qq.com> Date: Sat Mar 16 15:10:55 2019 +0800 typo fix commit b4fb131 Merge: 267e0e6 4842305 Author: Nikitas Bastas Date: Fri Feb 8 22:55:45 2019 +0200 Merge branch 'unstable' of antirez/redis into unstable commit 267e0e6 Author: Nikitas Bastas Date: Wed Jan 30 21:26:04 2019 +0200 Minor typo fix commit 30544e7 Author: inshal96 <39904558+inshal96@users.noreply.github.com> Date: Fri Jan 4 16:54:50 2019 +0500 remove an extra 'a' in the comments commit 337969d Author: BrotherGao Date: Sat Dec 29 12:37:29 2018 +0800 fix typo in redis.conf commit 9f4b121 Merge: 423a030 19d0ece Author: BrotherGao Date: Sat Dec 29 11:41:12 2018 +0800 Merge branch 'unstable' of antirez/redis into unstable commit 423a030 Merge: 42b02b7 0423081 Author: 杨东衡 Date: Tue Dec 4 23:56:11 2018 +0800 Merge branch 'unstable' of antirez/redis into unstable commit 42b02b7 Merge: 8c7dcff efa96f0 Author: Dongheng Yang Date: Sun Oct 28 15:54:23 2018 +0800 Merge pull request #1 from antirez/unstable update local data commit 714b589 Author: Christian Date: Fri Dec 28 01:17:26 2018 +0100 fix typo "resulution" commit e23259d Author: garenchan <1412950785@qq.com> Date: Wed Dec 26 09:58:35 2018 +0800 fix typo: segfauls -> segfault commit a9359f8 Author: xjp Date: Tue Dec 18 17:31:44 2018 +0800 Fixed REDISMODULE_H spell bug commit a12c3e4 Author: jdiaz Date: Sat Dec 15 23:39:52 2018 -0600 Fixes hyperloglog hash function comment block description commit 770eb11 Author: 林上耀 <1210tom@163.com> Date: Sun Nov 25 17:16:10 2018 +0800 fix typo commit fd97fbb Author: Chris Lamb Date: Fri Nov 23 17:14:01 2018 +0100 Correct "unsupported" typo. commit a85522d Author: Jungnam Lee Date: Thu Nov 8 23:01:29 2018 +0900 fix typo in test comments commit ade8007 Author: Arun Kumar Date: Tue Oct 23 16:56:35 2018 +0530 Fixed grammatical typo Fixed typo for word 'dictionary' commit 869ee39 Author: Hamid Alaei Date: Sun Aug 12 16:40:02 2018 +0430 fix documentations: (ThreadSafeContextStart/Stop -> ThreadSafeContextLock/Unlock), minor typo commit f89d158 Author: Mayank Jain Date: Tue Jul 31 23:01:21 2018 +0530 Updated README.md with some spelling corrections. Made correction in spelling of some misspelled words. commit 892198e Author: dsomeshwar Date: Sat Jul 21 23:23:04 2018 +0530 typo fix commit 8a4d780 Author: Itamar Haber Date: Mon Apr 30 02:06:52 2018 +0300 Fixes some typos commit e3acef6 Author: Noah Rosamilia Date: Sat Mar 3 23:41:21 2018 -0500 Fix typo in /deps/README.md commit 04442fb Author: WuYunlong Date: Sat Mar 3 10:32:42 2018 +0800 Fix typo in readSyncBulkPayload() comment. commit 9f36880 Author: WuYunlong Date: Sat Mar 3 10:20:37 2018 +0800 replication.c comment: run_id -> replid. commit f866b4a Author: Francesco 'makevoid' Canessa Date: Thu Feb 22 22:01:56 2018 +0000 fix comment typo in server.c commit 0ebc69b Author: 줍 Date: Mon Feb 12 16:38:48 2018 +0900 Fix typo in redis.conf Fix `five behaviors` to `eight behaviors` in [this sentence ](antirez/redis@unstable/redis.conf#L564) commit b50a620 Author: martinbroadhurst Date: Thu Dec 28 12:07:30 2017 +0000 Fix typo in valgrind.sup commit 7d8f349 Author: Peter Boughton Date: Mon Nov 27 19:52:19 2017 +0000 Update CONTRIBUTING; refer doc updates to redis-doc repo. commit 02dec7e Author: Klauswk Date: Tue Oct 24 16:18:38 2017 -0200 Fix typo in comment commit e1efbc8 Author: chenshi Date: Tue Oct 3 18:26:30 2017 +0800 Correct two spelling errors of comments commit 93327d8 Author: spacewander Date: Wed Sep 13 16:47:24 2017 +0800 Update the comment for OBJ_ENCODING_EMBSTR_SIZE_LIMIT's value The value of OBJ_ENCODING_EMBSTR_SIZE_LIMIT is 44 now instead of 39. commit 63d361f Author: spacewander Date: Tue Sep 12 15:06:42 2017 +0800 Fix related doc in ziplist.c According to the definition of ZIP_BIG_PREVLEN and other related code, the guard of single byte should be 254 instead of 255. commit ebe228d Author: hanael80 Date: Tue Aug 15 09:09:40 2017 +0900 Fix typo commit 6b696e6 Author: Matt Robenolt Date: Mon Aug 14 14:50:47 2017 -0700 Fix typo in LATENCY DOCTOR output commit a2ec6ae Author: caosiyang Date: Tue Aug 15 14:15:16 2017 +0800 Fix a typo: form => from commit 3ab7699 Author: caosiyang Date: Thu Aug 10 18:40:33 2017 +0800 Fix a typo: replicationFeedSlavesFromMaster() => replicationFeedSlavesFromMasterStream() commit 72d43ef Author: caosiyang Date: Tue Aug 8 15:57:25 2017 +0800 fix a typo: servewr => server commit 707c958 Author: Bo Cai Date: Wed Jul 26 21:49:42 2017 +0800 redis-cli.c typo: conut -> count. Signed-off-by: Bo Cai commit b9385b2 Author: JackDrogon Date: Fri Jun 30 14:22:31 2017 +0800 Fix some spell problems commit 20d9230 Author: akosel Date: Sun Jun 4 19:35:13 2017 -0500 Fix typo commit b167bfc Author: Krzysiek Witkowicz Date: Mon May 22 21:32:27 2017 +0100 Fix #4008 small typo in comment commit 2b78ac8 Author: Jake Clarkson Date: Wed Apr 26 15:49:50 2017 +0100 Correct typo in tests/unit/hyperloglog.tcl commit b0f1cdb Author: Qi Luo Date: Wed Apr 19 14:25:18 2017 -0700 Fix typo commit a90b0f9 Author: charsyam Date: Thu Mar 16 18:19:53 2017 +0900 fix typos fix typos fix typos commit 8430a79 Author: Richard Hart Date: Mon Mar 13 22:17:41 2017 -0400 Fixed log message typo in listenToPort. commit 481a1c2 Author: Vinod Kumar Date: Sun Jan 15 23:04:51 2017 +0530 src/db.c: Correct "save" -> "safe" typo commit 586b4d3 Author: wangshaonan Date: Wed Dec 21 20:28:27 2016 +0800 Fix typo they->the in helloworld.c commit c1c4b5e Author: Jenner Date: Mon Dec 19 16:39:46 2016 +0800 typo error commit 1ee1a3f Author: tielei <43289893@qq.com> Date: Mon Jul 18 13:52:25 2016 +0800 fix some comments commit 11a41fb Author: Otto Kekäläinen Date: Sun Jul 3 10:23:55 2016 +0100 Fix spelling in documentation and comments commit 5fb5d82 Author: francischan Date: Tue Jun 28 00:19:33 2016 +0800 Fix outdated comments about redis.c file. It should now refer to server.c file. commit 6b254bc Author: lmatt-bit Date: Thu Apr 21 21:45:58 2016 +0800 Refine the comment of dictRehashMilliseconds func SLAVECONF->REPLCONF in comment - by andyli029 commit ee9869f Author: clark.kang Date: Tue Mar 22 11:09:51 2016 +0900 fix typos commit f7b3b11 Author: Harisankar H Date: Wed Mar 9 11:49:42 2016 +0530 Typo correction: "faield" --> "failed" Typo correction: "faield" --> "failed" commit 3fd40fc Author: Itamar Haber Date: Thu Feb 25 10:31:51 2016 +0200 Fixes a typo in comments commit 621c160 Author: Prayag Verma Date: Mon Feb 1 12:36:20 2016 +0530 Fix typo in Readme.md Spelling mistakes - `eviciton` > `eviction` `familar` > `familiar` commit d7d07d6 Author: WonCheol Lee Date: Wed Dec 30 15:11:34 2015 +0900 Typo fixed commit a4dade7 Author: Felix Bünemann Date: Mon Dec 28 11:02:55 2015 +0100 [ci skip] Improve supervised upstart config docs This mentions that "expect stop" is required for supervised upstart to work correctly. See http://upstart.ubuntu.com/cookbook/#expect-stop for an explanation. commit d9caba9 Author: daurnimator Date: Mon Dec 21 18:30:03 2015 +1100 README: Remove trailing whitespace commit 72d42e5 Author: daurnimator Date: Mon Dec 21 18:29:32 2015 +1100 README: Fix typo. th => the commit dd6e957 Author: daurnimator Date: Mon Dec 21 18:29:20 2015 +1100 README: Fix typo. familar => familiar commit 3a12b23 Author: daurnimator Date: Mon Dec 21 18:28:54 2015 +1100 README: Fix typo. eviciton => eviction commit 2d1d03b Author: daurnimator Date: Mon Dec 21 18:21:45 2015 +1100 README: Fix typo. sever => server commit 3973b06 Author: Itamar Haber Date: Sat Dec 19 17:01:20 2015 +0200 Typo fix commit 4f2e460 Author: Steve Gao Date: Fri Dec 4 10:22:05 2015 +0800 Update README - fix typos commit b21667c Author: binyan Date: Wed Dec 2 22:48:37 2015 +0800 delete redundancy color judge in sdscatcolor commit 88894c7 Author: binyan Date: Wed Dec 2 22:14:42 2015 +0800 the example output shoule be HelloWorld commit 2763470 Author: binyan Date: Wed Dec 2 17:41:39 2015 +0800 modify error word keyevente Signed-off-by: binyan commit 0847b3d Author: Bruno Martins Date: Wed Nov 4 11:37:01 2015 +0000 typo commit bbb9e9e Author: dawedawe Date: Fri Mar 27 00:46:41 2015 +0100 typo: zimap -> zipmap commit 5ed297e Author: Axel Advento Date: Tue Mar 3 15:58:29 2015 +0800 Fix 'salve' typos to 'slave' commit edec9d6 Author: LudwikJaniuk Date: Wed Jun 12 14:12:47 2019 +0200 Update README.md Co-Authored-By: Qix commit 692a7af Author: LudwikJaniuk Date: Tue May 28 14:32:04 2019 +0200 grammar commit d962b0a Author: Nick Frost Date: Wed Jul 20 15:17:12 2016 -0700 Minor grammar fix commit 24fff01aaccaf5956973ada8c50ceb1462e211c6 (typos) Author: Chad Miller Date: Tue Sep 8 13:46:11 2020 -0400 Fix faulty comment about operation of unlink() commit 3cd5c1f3326c52aa552ada7ec797c6bb16452355 Author: Kevin Date: Wed Nov 20 00:13:50 2019 +0800 Fix typo in server.c. From a83af59 Mon Sep 17 00:00:00 2001 From: wuwo Date: Fri, 17 Mar 2017 20:37:45 +0800 Subject: [PATCH] falure to failure From c961896 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B7=A6=E6=87=B6?= Date: Sat, 27 May 2017 15:33:04 +0800 Subject: [PATCH] fix typo From e600ef2 Mon Sep 17 00:00:00 2001 From: "rui.zou" Date: Sat, 30 Sep 2017 12:38:15 +0800 Subject: [PATCH] fix a typo From c7d07fa Mon Sep 17 00:00:00 2001 From: Alexandre Perrin Date: Thu, 16 Aug 2018 10:35:31 +0200 Subject: [PATCH] deps README.md typo From b25cb67 Mon Sep 17 00:00:00 2001 From: Guy Korland Date: Wed, 26 Sep 2018 10:55:37 +0300 Subject: [PATCH 1/2] fix typos in header From ad28ca6 Mon Sep 17 00:00:00 2001 From: Guy Korland Date: Wed, 26 Sep 2018 11:02:36 +0300 Subject: [PATCH 2/2] fix typos commit 34924cdedd8552466fc22c1168d49236cb7ee915 Author: Adrian Lynch Date: Sat Apr 4 21:59:15 2015 +0100 Typos fixed commit fd2a1e7 Author: Jan Date: Sat Oct 27 19:13:01 2018 +0200 Fix typos Fix typos commit e14e47c1a234b53b0e103c5f6a1c61481cbcbb02 Author: Andy Lester Date: Fri Aug 2 22:30:07 2019 -0500 Fix multiple misspellings of "following" commit 79b948ce2dac6b453fe80995abbcaac04c213d5a Author: Andy Lester Date: Fri Aug 2 22:24:28 2019 -0500 Fix misspelling of create-cluster commit 1fffde52666dc99ab35efbd31071a4c008cb5a71 Author: Andy Lester Date: Wed Jul 31 17:57:56 2019 -0500 Fix typos commit 204c9ba9651e9e05fd73936b452b9a30be456cfe Author: Xiaobo Zhu Date: Tue Aug 13 22:19:25 2019 +0800 fix typos Squashed commit of the following: commit 1d9aaf8 Author: danmedani Date: Sun Aug 2 11:40:26 2015 -0700 README typo fix. Squashed commit of the following: commit 32bfa7c Author: Erik Dubbelboer Date: Mon Jul 6 21:15:08 2015 +0200 Fixed grammer Squashed commit of the following: commit b24f69c Author: Sisir Koppaka Date: Mon Mar 2 22:38:45 2015 -0500 utils/hashtable/rehashing.c: Fix typos Squashed commit of the following: commit 4e04082 Author: Erik Dubbelboer Date: Mon Mar 23 08:22:21 2015 +0000 Small config file documentation improvements Squashed commit of the following: commit acb8773 Author: ctd1500 Date: Fri May 8 01:52:48 2015 -0700 Typo and grammar fixes in readme commit 2eb75b6 Author: ctd1500 Date: Fri May 8 01:36:18 2015 -0700 fixed redis.conf comment Squashed commit of the following: commit a8249a2 Author: Masahiko Sawada Date: Fri Dec 11 11:39:52 2015 +0530 Revise correction of typos. Squashed commit of the following: commit 3c02028 Author: zhaojun11 Date: Wed Jan 17 19:05:28 2018 +0800 Fix typos include two code typos in cluster.c and latency.c Squashed commit of the following: commit 9dba47c Author: q191201771 <191201771@qq.com> Date: Sat Jan 4 11:31:04 2020 +0800 fix function listCreate comment in adlist.c Update src/server.c commit 2c7c2cb536e78dd211b1ac6f7bda00f0f54faaeb Author: charpty Date: Tue May 1 23:16:59 2018 +0800 server.c typo: modules system dictionary type comment Signed-off-by: charpty commit a8395323fb63cb59cb3591cb0f0c8edb7c29a680 Author: Itamar Haber Date: Sun May 6 00:25:18 2018 +0300 Updates test_helper.tcl's help with undocumented options Specifically: * Host * Port * Client commit bde6f9ced15755cd6407b4af7d601b030f36d60b Author: wxisme <850885154@qq.com> Date: Wed Aug 8 15:19:19 2018 +0800 fix comments in deps files commit 3172474ba991532ab799ee1873439f3402412331 Author: wxisme <850885154@qq.com> Date: Wed Aug 8 14:33:49 2018 +0800 fix some comments commit 01b6f2b6858b5cf2ce4ad5092d2c746e755f53f0 Author: Thor Juhasz Date: Sun Nov 18 14:37:41 2018 +0100 Minor fixes to comments Found some parts a little unclear on a first read, which prompted me to have a better look at the file and fix some minor things I noticed. Fixing minor typos and grammar. There are no changes to configuration options. These changes are only meant to help the user better understand the explanations to the various configuration options (cherry picked from commit 285ef446b05e09013556e7a490677494a9b4bb3e) --- CONTRIBUTING | 4 + README.md | 66 +++++++------- deps/README.md | 8 +- deps/linenoise/linenoise.c | 4 +- redis.conf | 124 +++++++++++++------------- sentinel.conf | 2 +- src/acl.c | 2 +- src/adlist.c | 9 +- src/ae.c | 4 +- src/ae_evport.c | 2 +- src/aof.c | 4 +- src/atomicvar.h | 2 +- src/bitops.c | 6 +- src/blocked.c | 4 +- src/cluster.c | 66 +++++++------- src/cluster.h | 8 +- src/config.c | 6 +- src/config.h | 2 +- src/connection.h | 4 +- src/db.c | 8 +- src/debug.c | 6 +- src/defrag.c | 30 +++---- src/dict.c | 8 +- src/endianconv.c | 2 +- src/evict.c | 8 +- src/expire.c | 8 +- src/geo.c | 6 +- src/geohash_helper.c | 4 +- src/hyperloglog.c | 16 ++-- src/latency.c | 12 +-- src/lazyfree.c | 6 +- src/listpack.c | 2 +- src/lolwut.c | 4 +- src/lolwut5.c | 2 +- src/lzfP.h | 2 +- src/module.c | 93 ++++++++++--------- src/modules/hellodict.c | 2 +- src/modules/helloworld.c | 4 +- src/multi.c | 2 +- src/networking.c | 12 +-- src/notify.c | 2 +- src/object.c | 4 +- src/quicklist.h | 4 +- src/rax.c | 16 ++-- src/rax.h | 4 +- src/rdb.c | 4 +- src/redis-check-rdb.c | 2 +- src/redis-cli.c | 12 +-- src/redismodule.h | 2 +- src/replication.c | 40 ++++----- src/scripting.c | 26 +++--- src/sds.c | 6 +- src/sentinel.c | 26 +++--- src/server.c | 35 ++++---- src/server.h | 12 +-- src/siphash.c | 6 +- src/slowlog.c | 2 +- src/sort.c | 2 +- src/sparkline.c | 2 +- src/stream.h | 2 +- src/t_hash.c | 2 +- src/t_list.c | 6 +- src/t_set.c | 6 +- src/t_stream.c | 6 +- src/t_string.c | 2 +- src/t_zset.c | 2 +- src/tracking.c | 2 +- src/valgrind.sup | 6 +- src/ziplist.c | 16 ++-- src/zipmap.c | 2 +- tests/cluster/tests/04-resharding.tcl | 6 +- tests/instances.tcl | 2 +- tests/integration/replication-4.tcl | 2 +- tests/support/test.tcl | 2 +- tests/test_helper.tcl | 7 +- tests/unit/expire.tcl | 2 +- tests/unit/hyperloglog.tcl | 2 +- tests/unit/scripting.tcl | 2 +- utils/create-cluster/README | 2 +- utils/hashtable/README | 2 +- 80 files changed, 436 insertions(+), 416 deletions(-) diff --git a/CONTRIBUTING b/CONTRIBUTING index 000edbeaf..82064afa3 100644 --- a/CONTRIBUTING +++ b/CONTRIBUTING @@ -20,6 +20,10 @@ each source file that you contribute. http://stackoverflow.com/questions/tagged/redis + Issues and pull requests for documentation belong on the redis-doc repo: + + https://github.com/redis/redis-doc + # How to provide a patch for a new feature 1. If it is a major feature or a semantical change, please don't start coding diff --git a/README.md b/README.md index a90b95cc1..80c2c9178 100644 --- a/README.md +++ b/README.md @@ -3,22 +3,22 @@ This README is just a fast *quick start* document. You can find more detailed do What is Redis? -------------- -Redis is often referred as a *data structures* server. What this means is that Redis provides access to mutable data structures via a set of commands, which are sent using a *server-client* model with TCP sockets and a simple protocol. So different processes can query and modify the same data structures in a shared way. +Redis is often referred to as a *data structures* server. What this means is that Redis provides access to mutable data structures via a set of commands, which are sent using a *server-client* model with TCP sockets and a simple protocol. So different processes can query and modify the same data structures in a shared way. Data structures implemented into Redis have a few special properties: -* Redis cares to store them on disk, even if they are always served and modified into the server memory. This means that Redis is fast, but that is also non-volatile. -* Implementation of data structures stress on memory efficiency, so data structures inside Redis will likely use less memory compared to the same data structure modeled using an high level programming language. -* Redis offers a number of features that are natural to find in a database, like replication, tunable levels of durability, cluster, high availability. +* Redis cares to store them on disk, even if they are always served and modified into the server memory. This means that Redis is fast, but that it is also non-volatile. +* The implementation of data structures emphasizes memory efficiency, so data structures inside Redis will likely use less memory compared to the same data structure modelled using a high-level programming language. +* Redis offers a number of features that are natural to find in a database, like replication, tunable levels of durability, clustering, and high availability. -Another good example is to think of Redis as a more complex version of memcached, where the operations are not just SETs and GETs, but operations to work with complex data types like Lists, Sets, ordered data structures, and so forth. +Another good example is to think of Redis as a more complex version of memcached, where the operations are not just SETs and GETs, but operations that work with complex data types like Lists, Sets, ordered data structures, and so forth. If you want to know more, this is a list of selected starting points: * Introduction to Redis data types. http://redis.io/topics/data-types-intro * Try Redis directly inside your browser. http://try.redis.io * The full list of Redis commands. http://redis.io/commands -* There is much more inside the Redis official documentation. http://redis.io/documentation +* There is much more inside the official Redis documentation. http://redis.io/documentation Building Redis -------------- @@ -29,7 +29,7 @@ and 64 bit systems. It may compile on Solaris derived systems (for instance SmartOS) but our support for this platform is *best effort* and Redis is not guaranteed to -work as well as in Linux, OSX, and \*BSD there. +work as well as in Linux, OSX, and \*BSD. It is as simple as: @@ -63,7 +63,7 @@ installed): Fixing build problems with dependencies or cached build options --------- -Redis has some dependencies which are included into the `deps` directory. +Redis has some dependencies which are included in the `deps` directory. `make` does not automatically rebuild dependencies even if something in the source code of dependencies changes. @@ -90,7 +90,7 @@ with a 64 bit target, or the other way around, you need to perform a In case of build errors when trying to build a 32 bit binary of Redis, try the following steps: -* Install the packages libc6-dev-i386 (also try g++-multilib). +* Install the package libc6-dev-i386 (also try g++-multilib). * Try using the following command line instead of `make 32bit`: `make CFLAGS="-m32 -march=native" LDFLAGS="-m32"` @@ -114,15 +114,15 @@ To compile against jemalloc on Mac OS X systems, use: Verbose build ------------- -Redis will build with a user friendly colorized output by default. -If you want to see a more verbose output use the following: +Redis will build with a user-friendly colorized output by default. +If you want to see a more verbose output, use the following: % make V=1 Running Redis ------------- -To run Redis with the default configuration just type: +To run Redis with the default configuration, just type: % cd src % ./redis-server @@ -173,7 +173,7 @@ You can find the list of all the available commands at http://redis.io/commands. Installing Redis ----------------- -In order to install Redis binaries into /usr/local/bin just use: +In order to install Redis binaries into /usr/local/bin, just use: % make install @@ -182,8 +182,8 @@ different destination. Make install will just install binaries in your system, but will not configure init scripts and configuration files in the appropriate place. This is not -needed if you want just to play a bit with Redis, but if you are installing -it the proper way for a production system, we have a script doing this +needed if you just want to play a bit with Redis, but if you are installing +it the proper way for a production system, we have a script that does this for Ubuntu and Debian systems: % cd utils @@ -201,7 +201,7 @@ You'll be able to stop and start Redis using the script named Code contributions ----------------- -Note: by contributing code to the Redis project in any form, including sending +Note: By contributing code to the Redis project in any form, including sending a pull request via Github, a code fragment or patch via private email or public discussion groups, you agree to release your code under the terms of the BSD license that you can find in the [COPYING][1] file included in the Redis @@ -251,7 +251,7 @@ of complexity incrementally. Note: lately Redis was refactored quite a bit. Function names and file names have been changed, so you may find that this documentation reflects the -`unstable` branch more closely. For instance in Redis 3.0 the `server.c` +`unstable` branch more closely. For instance, in Redis 3.0 the `server.c` and `server.h` files were named `redis.c` and `redis.h`. However the overall structure is the same. Keep in mind that all the new developments and pull requests should be performed against the `unstable` branch. @@ -296,7 +296,7 @@ The client structure defines a *connected client*: * The `fd` field is the client socket file descriptor. * `argc` and `argv` are populated with the command the client is executing, so that functions implementing a given Redis command can read the arguments. * `querybuf` accumulates the requests from the client, which are parsed by the Redis server according to the Redis protocol and executed by calling the implementations of the commands the client is executing. -* `reply` and `buf` are dynamic and static buffers that accumulate the replies the server sends to the client. These buffers are incrementally written to the socket as soon as the file descriptor is writable. +* `reply` and `buf` are dynamic and static buffers that accumulate the replies the server sends to the client. These buffers are incrementally written to the socket as soon as the file descriptor is writeable. As you can see in the client structure above, arguments in a command are described as `robj` structures. The following is the full `robj` @@ -329,13 +329,13 @@ This is the entry point of the Redis server, where the `main()` function is defined. The following are the most important steps in order to startup the Redis server. -* `initServerConfig()` setups the default values of the `server` structure. +* `initServerConfig()` sets up the default values of the `server` structure. * `initServer()` allocates the data structures needed to operate, setup the listening socket, and so forth. * `aeMain()` starts the event loop which listens for new connections. There are two special functions called periodically by the event loop: -1. `serverCron()` is called periodically (according to `server.hz` frequency), and performs tasks that must be performed from time to time, like checking for timedout clients. +1. `serverCron()` is called periodically (according to `server.hz` frequency), and performs tasks that must be performed from time to time, like checking for timed out clients. 2. `beforeSleep()` is called every time the event loop fired, Redis served a few requests, and is returning back into the event loop. Inside server.c you can find code that handles other vital things of the Redis server: @@ -352,16 +352,16 @@ This file defines all the I/O functions with clients, masters and replicas (which in Redis are just special clients): * `createClient()` allocates and initializes a new client. -* the `addReply*()` family of functions are used by commands implementations in order to append data to the client structure, that will be transmitted to the client as a reply for a given command executed. +* the `addReply*()` family of functions are used by command implementations in order to append data to the client structure, that will be transmitted to the client as a reply for a given command executed. * `writeToClient()` transmits the data pending in the output buffers to the client and is called by the *writable event handler* `sendReplyToClient()`. -* `readQueryFromClient()` is the *readable event handler* and accumulates data from read from the client into the query buffer. +* `readQueryFromClient()` is the *readable event handler* and accumulates data read from the client into the query buffer. * `processInputBuffer()` is the entry point in order to parse the client query buffer according to the Redis protocol. Once commands are ready to be processed, it calls `processCommand()` which is defined inside `server.c` in order to actually execute the command. * `freeClient()` deallocates, disconnects and removes a client. aof.c and rdb.c --- -As you can guess from the names these files implement the RDB and AOF +As you can guess from the names, these files implement the RDB and AOF persistence for Redis. Redis uses a persistence model based on the `fork()` system call in order to create a thread with the same (shared) memory content of the main Redis thread. This secondary thread dumps the content @@ -373,13 +373,13 @@ The implementation inside `aof.c` has additional functions in order to implement an API that allows commands to append new commands into the AOF file as clients execute them. -The `call()` function defined inside `server.c` is responsible to call +The `call()` function defined inside `server.c` is responsible for calling the functions that in turn will write the commands into the AOF. db.c --- -Certain Redis commands operate on specific data types, others are general. +Certain Redis commands operate on specific data types; others are general. Examples of generic commands are `DEL` and `EXPIRE`. They operate on keys and not on their values specifically. All those generic commands are defined inside `db.c`. @@ -387,7 +387,7 @@ defined inside `db.c`. Moreover `db.c` implements an API in order to perform certain operations on the Redis dataset without directly accessing the internal data structures. -The most important functions inside `db.c` which are used in many commands +The most important functions inside `db.c` which are used in many command implementations are the following: * `lookupKeyRead()` and `lookupKeyWrite()` are used in order to get a pointer to the value associated to a given key, or `NULL` if the key does not exist. @@ -405,7 +405,7 @@ The `robj` structure defining Redis objects was already described. Inside a basic level, like functions to allocate new objects, handle the reference counting and so forth. Notable functions inside this file: -* `incrRefcount()` and `decrRefCount()` are used in order to increment or decrement an object reference count. When it drops to 0 the object is finally freed. +* `incrRefCount()` and `decrRefCount()` are used in order to increment or decrement an object reference count. When it drops to 0 the object is finally freed. * `createObject()` allocates a new object. There are also specialized functions to allocate string objects having a specific content, like `createStringObjectFromLongLong()` and similar functions. This file also implements the `OBJECT` command. @@ -429,12 +429,12 @@ replicas, or to continue the replication after a disconnection. Other C files --- -* `t_hash.c`, `t_list.c`, `t_set.c`, `t_string.c`, `t_zset.c` and `t_stream.c` contains the implementation of the Redis data types. They implement both an API to access a given data type, and the client commands implementations for these data types. +* `t_hash.c`, `t_list.c`, `t_set.c`, `t_string.c`, `t_zset.c` and `t_stream.c` contains the implementation of the Redis data types. They implement both an API to access a given data type, and the client command implementations for these data types. * `ae.c` implements the Redis event loop, it's a self contained library which is simple to read and understand. * `sds.c` is the Redis string library, check http://github.com/antirez/sds for more information. * `anet.c` is a library to use POSIX networking in a simpler way compared to the raw interface exposed by the kernel. * `dict.c` is an implementation of a non-blocking hash table which rehashes incrementally. -* `scripting.c` implements Lua scripting. It is completely self contained from the rest of the Redis implementation and is simple enough to understand if you are familar with the Lua API. +* `scripting.c` implements Lua scripting. It is completely self-contained and isolated from the rest of the Redis implementation and is simple enough to understand if you are familiar with the Lua API. * `cluster.c` implements the Redis Cluster. Probably a good read only after being very familiar with the rest of the Redis code base. If you want to read `cluster.c` make sure to read the [Redis Cluster specification][3]. [3]: http://redis.io/topics/cluster-spec @@ -460,12 +460,12 @@ top comment inside `server.c`. After the command operates in some way, it returns a reply to the client, usually using `addReply()` or a similar function defined inside `networking.c`. -There are tons of commands implementations inside the Redis source code -that can serve as examples of actual commands implementations. To write -a few toy commands can be a good exercise to familiarize with the code base. +There are tons of command implementations inside the Redis source code +that can serve as examples of actual commands implementations. Writing +a few toy commands can be a good exercise to get familiar with the code base. There are also many other files not described here, but it is useless to -cover everything. We want to just help you with the first steps. +cover everything. We just want to help you with the first steps. Eventually you'll find your way inside the Redis code base :-) Enjoy! diff --git a/deps/README.md b/deps/README.md index f923c06ad..02c99052f 100644 --- a/deps/README.md +++ b/deps/README.md @@ -21,7 +21,7 @@ just following tose steps: 1. Remove the jemalloc directory. 2. Substitute it with the new jemalloc source tree. -3. Edit the Makefile localted in the same directory as the README you are +3. Edit the Makefile located in the same directory as the README you are reading, and change the --with-version in the Jemalloc configure script options with the version you are using. This is required because otherwise Jemalloc configuration script is broken and will not work nested in another @@ -33,7 +33,7 @@ If you want to upgrade Jemalloc while also providing support for active defragmentation, in addition to the above steps you need to perform the following additional steps: -5. In Jemalloc three, file `include/jemalloc/jemalloc_macros.h.in`, make sure +5. In Jemalloc tree, file `include/jemalloc/jemalloc_macros.h.in`, make sure to add `#define JEMALLOC_FRAG_HINT`. 6. Implement the function `je_get_defrag_hint()` inside `src/jemalloc.c`. You can see how it is implemented in the current Jemalloc source tree shipped @@ -49,7 +49,7 @@ Hiredis uses the SDS string library, that must be the same version used inside R 1. Check with diff if hiredis API changed and what impact it could have in Redis. 2. Make sure that the SDS library inside Hiredis and inside Redis are compatible. 3. After the upgrade, run the Redis Sentinel test. -4. Check manually that redis-cli and redis-benchmark behave as expecteed, since we have no tests for CLI utilities currently. +4. Check manually that redis-cli and redis-benchmark behave as expected, since we have no tests for CLI utilities currently. Linenoise --- @@ -77,6 +77,6 @@ and our version: 1. Makefile is modified to allow a different compiler than GCC. 2. We have the implementation source code, and directly link to the following external libraries: `lua_cjson.o`, `lua_struct.o`, `lua_cmsgpack.o` and `lua_bit.o`. -3. There is a security fix in `ldo.c`, line 498: The check for `LUA_SIGNATURE[0]` is removed in order toa void direct bytecode execution. +3. There is a security fix in `ldo.c`, line 498: The check for `LUA_SIGNATURE[0]` is removed in order to avoid direct bytecode execution. diff --git a/deps/linenoise/linenoise.c b/deps/linenoise/linenoise.c index cfe51e768..ccf5c5548 100644 --- a/deps/linenoise/linenoise.c +++ b/deps/linenoise/linenoise.c @@ -625,7 +625,7 @@ static void refreshMultiLine(struct linenoiseState *l) { rpos2 = (plen+l->pos+l->cols)/l->cols; /* current cursor relative row. */ lndebug("rpos2 %d", rpos2); - /* Go up till we reach the expected positon. */ + /* Go up till we reach the expected position. */ if (rows-rpos2 > 0) { lndebug("go-up %d", rows-rpos2); snprintf(seq,64,"\x1b[%dA", rows-rpos2); @@ -767,7 +767,7 @@ void linenoiseEditBackspace(struct linenoiseState *l) { } } -/* Delete the previosu word, maintaining the cursor at the start of the +/* Delete the previous word, maintaining the cursor at the start of the * current word. */ void linenoiseEditDeletePrevWord(struct linenoiseState *l) { size_t old_pos = l->pos; diff --git a/redis.conf b/redis.conf index f2e7f1964..38499b276 100644 --- a/redis.conf +++ b/redis.conf @@ -24,7 +24,7 @@ # to customize a few per-server settings. Include files can include # other files, so use this wisely. # -# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# Note that option "include" won't be rewritten by command "CONFIG REWRITE" # from admin or Redis Sentinel. Since Redis always uses the last processed # line as value of a configuration directive, you'd better put includes # at the beginning of this file to avoid overwriting config change at runtime. @@ -46,7 +46,7 @@ ################################## NETWORK ##################################### # By default, if no "bind" configuration directive is specified, Redis listens -# for connections from all the network interfaces available on the server. +# for connections from all available network interfaces on the host machine. # It is possible to listen to just one or multiple selected interfaces using # the "bind" configuration directive, followed by one or more IP addresses. # @@ -58,13 +58,12 @@ # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the # internet, binding to all the interfaces is dangerous and will expose the # instance to everybody on the internet. So by default we uncomment the -# following bind directive, that will force Redis to listen only into -# the IPv4 loopback interface address (this means Redis will be able to -# accept connections only from clients running into the same computer it -# is running). +# following bind directive, that will force Redis to listen only on the +# IPv4 loopback interface address (this means Redis will only be able to +# accept client connections from the same host that it is running on). # # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES -# JUST COMMENT THE FOLLOWING LINE. +# JUST COMMENT OUT THE FOLLOWING LINE. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ bind 127.0.0.1 @@ -93,8 +92,8 @@ port 6379 # TCP listen() backlog. # -# In high requests-per-second environments you need an high backlog in order -# to avoid slow clients connections issues. Note that the Linux kernel +# In high requests-per-second environments you need a high backlog in order +# to avoid slow clients connection issues. Note that the Linux kernel # will silently truncate it to the value of /proc/sys/net/core/somaxconn so # make sure to raise both the value of somaxconn and tcp_max_syn_backlog # in order to get the desired effect. @@ -118,8 +117,8 @@ timeout 0 # of communication. This is useful for two reasons: # # 1) Detect dead peers. -# 2) Take the connection alive from the point of view of network -# equipment in the middle. +# 2) Force network equipment in the middle to consider the connection to be +# alive. # # On Linux, the specified value (in seconds) is the period used to send ACKs. # Note that to close the connection the double of the time is needed. @@ -228,11 +227,12 @@ daemonize no # supervision tree. Options: # supervised no - no supervision interaction # supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# requires "expect stop" in your upstart job config # supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET # supervised auto - detect upstart or systemd method based on # UPSTART_JOB or NOTIFY_SOCKET environment variables # Note: these supervision methods only signal "process is ready." -# They do not enable continuous liveness pings back to your supervisor. +# They do not enable continuous pings back to your supervisor. supervised no # If a pid file is specified, Redis writes it where specified at startup @@ -291,7 +291,7 @@ always-show-logo yes # Will save the DB if both the given number of seconds and the given # number of write operations against the DB occurred. # -# In the example below the behaviour will be to save: +# In the example below the behavior will be to save: # after 900 sec (15 min) if at least 1 key changed # after 300 sec (5 min) if at least 10 keys changed # after 60 sec if at least 10000 keys changed @@ -324,7 +324,7 @@ save 60 10000 stop-writes-on-bgsave-error yes # Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. +# By default compression is enabled as it's almost always a win. # If you want to save some CPU in the saving child set it to 'no' but # the dataset will likely be bigger if you have compressible values or keys. rdbcompression yes @@ -412,11 +412,11 @@ dir ./ # still reply to client requests, possibly with out of date data, or the # data set may just be empty if this is the first synchronization. # -# 2) if replica-serve-stale-data is set to 'no' the replica will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, -# SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, -# COMMAND, POST, HOST: and LATENCY. +# 2) If replica-serve-stale-data is set to 'no' the replica will reply with +# an error "SYNC with master in progress" to all commands except: +# INFO, REPLICAOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE, +# UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST, +# HOST and LATENCY. # replica-serve-stale-data yes @@ -487,7 +487,7 @@ repl-diskless-sync-delay 5 # # Replica can load the RDB it reads from the replication link directly from the # socket, or store the RDB to a file and read that file after it was completely -# recived from the master. +# received from the master. # # In many cases the disk is slower than the network, and storing and loading # the RDB file may increase replication time (and even increase the master's @@ -517,7 +517,8 @@ repl-diskless-load disabled # # It is important to make sure that this value is greater than the value # specified for repl-ping-replica-period otherwise a timeout will be detected -# every time there is low traffic between the master and the replica. +# every time there is low traffic between the master and the replica. The default +# value is 60 seconds. # # repl-timeout 60 @@ -542,21 +543,21 @@ repl-disable-tcp-nodelay no # partial resync is enough, just passing the portion of data the replica # missed while disconnected. # -# The bigger the replication backlog, the longer the time the replica can be -# disconnected and later be able to perform a partial resynchronization. +# The bigger the replication backlog, the longer the replica can endure the +# disconnect and later be able to perform a partial resynchronization. # -# The backlog is only allocated once there is at least a replica connected. +# The backlog is only allocated if there is at least one replica connected. # # repl-backlog-size 1mb -# After a master has no longer connected replicas for some time, the backlog -# will be freed. The following option configures the amount of seconds that -# need to elapse, starting from the time the last replica disconnected, for -# the backlog buffer to be freed. +# After a master has no connected replicas for some time, the backlog will be +# freed. The following option configures the amount of seconds that need to +# elapse, starting from the time the last replica disconnected, for the backlog +# buffer to be freed. # # Note that replicas never free the backlog for timeout, since they may be # promoted to masters later, and should be able to correctly "partially -# resynchronize" with the replicas: hence they should always accumulate backlog. +# resynchronize" with other replicas: hence they should always accumulate backlog. # # A value of 0 means to never release the backlog. # @@ -606,8 +607,8 @@ replica-priority 100 # Another place where this info is available is in the output of the # "ROLE" command of a master. # -# The listed IP and address normally reported by a replica is obtained -# in the following way: +# The listed IP address and port normally reported by a replica is +# obtained in the following way: # # IP: The address is auto detected by checking the peer address # of the socket used by the replica to connect with the master. @@ -617,7 +618,7 @@ replica-priority 100 # listen for connections. # # However when port forwarding or Network Address Translation (NAT) is -# used, the replica may be actually reachable via different IP and port +# used, the replica may actually be reachable via different IP and port # pairs. The following two options can be used by a replica in order to # report to its master a specific set of IP and port, so that both INFO # and ROLE will report those values. @@ -634,7 +635,7 @@ replica-priority 100 # This is implemented using an invalidation table that remembers, using # 16 millions of slots, what clients may have certain subsets of keys. In turn # this is used in order to send invalidation messages to clients. Please -# to understand more about the feature check this page: +# check this page to understand more about the feature: # # https://redis.io/topics/client-side-caching # @@ -666,7 +667,7 @@ replica-priority 100 ################################## SECURITY ################################### -# Warning: since Redis is pretty fast an outside user can try up to +# Warning: since Redis is pretty fast, an outside user can try up to # 1 million passwords per second against a modern box. This means that you # should use very strong passwords, otherwise they will be very easy to break. # Note that because the password is really a shared secret between the client @@ -690,7 +691,7 @@ replica-priority 100 # AUTH (or the HELLO command AUTH option) in order to be authenticated and # start to work. # -# The ACL rules that describe what an user can do are the following: +# The ACL rules that describe what a user can do are the following: # # on Enable the user: it is possible to authenticate as this user. # off Disable the user: it's no longer possible to authenticate @@ -718,7 +719,7 @@ replica-priority 100 # It is possible to specify multiple patterns. # allkeys Alias for ~* # resetkeys Flush the list of allowed keys patterns. -# > Add this passowrd to the list of valid password for the user. +# > Add this password to the list of valid password for the user. # For example >mypass will add "mypass" to the list. # This directive clears the "nopass" flag (see later). # < Remove this password from the list of valid passwords. @@ -772,7 +773,7 @@ acllog-max-len 128 # # Instead of configuring users here in this file, it is possible to use # a stand-alone file just listing users. The two methods cannot be mixed: -# if you configure users here and at the same time you activate the exteranl +# if you configure users here and at the same time you activate the external # ACL file, the server will refuse to start. # # The format of the external ACL user file is exactly the same as the @@ -780,7 +781,7 @@ acllog-max-len 128 # # aclfile /etc/redis/users.acl -# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatiblity +# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility # layer on top of the new ACL system. The option effect will be just setting # the password for the default user. Clients will still authenticate using # AUTH as usually, or more explicitly with AUTH default @@ -891,8 +892,8 @@ acllog-max-len 128 # LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated # algorithms (in order to save memory), so you can tune it for speed or -# accuracy. For default Redis will check five keys and pick the one that was -# used less recently, you can change the sample size using the following +# accuracy. By default Redis will check five keys and pick the one that was +# used least recently, you can change the sample size using the following # configuration directive. # # The default of 5 produces good enough results. 10 Approximates very closely @@ -932,8 +933,8 @@ acllog-max-len 128 # it is possible to increase the expire "effort" that is normally set to # "1", to a greater value, up to the value "10". At its maximum value the # system will use more CPU, longer cycles (and technically may introduce -# more latency), and will tollerate less already expired keys still present -# in the system. It's a tradeoff betweeen memory, CPU and latecy. +# more latency), and will tolerate less already expired keys still present +# in the system. It's a tradeoff between memory, CPU and latency. # # active-expire-effort 1 @@ -1001,7 +1002,7 @@ lazyfree-lazy-user-del no # # Now it is also possible to handle Redis clients socket reads and writes # in different I/O threads. Since especially writing is so slow, normally -# Redis users use pipelining in order to speedup the Redis performances per +# Redis users use pipelining in order to speed up the Redis performances per # core, and spawn multiple instances in order to scale more. Using I/O # threads it is possible to easily speedup two times Redis without resorting # to pipelining nor sharding of the instance. @@ -1019,7 +1020,7 @@ lazyfree-lazy-user-del no # # io-threads 4 # -# Setting io-threads to 1 will just use the main thread as usually. +# Setting io-threads to 1 will just use the main thread as usual. # When I/O threads are enabled, we only use threads for writes, that is # to thread the write(2) syscall and transfer the client buffers to the # socket. However it is also possible to enable threading of reads and @@ -1036,7 +1037,7 @@ lazyfree-lazy-user-del no # # NOTE 2: If you want to test the Redis speedup using redis-benchmark, make # sure you also run the benchmark itself in threaded mode, using the -# --threads option to match the number of Redis theads, otherwise you'll not +# --threads option to match the number of Redis threads, otherwise you'll not # be able to notice the improvements. ############################ KERNEL OOM CONTROL ############################## @@ -1189,8 +1190,8 @@ aof-load-truncated yes # # [RDB file][AOF tail] # -# When loading Redis recognizes that the AOF file starts with the "REDIS" -# string and loads the prefixed RDB file, and continues loading the AOF +# When loading, Redis recognizes that the AOF file starts with the "REDIS" +# string and loads the prefixed RDB file, then continues loading the AOF # tail. aof-use-rdb-preamble yes @@ -1204,7 +1205,7 @@ aof-use-rdb-preamble yes # # When a long running script exceeds the maximum execution time only the # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second +# used to stop a script that did not yet call any write commands. The second # is the only way to shut down the server in the case a write command was # already issued by the script but the user doesn't want to wait for the natural # termination of the script. @@ -1230,7 +1231,7 @@ lua-time-limit 5000 # Cluster node timeout is the amount of milliseconds a node must be unreachable # for it to be considered in failure state. -# Most other internal time limits are multiple of the node timeout. +# Most other internal time limits are a multiple of the node timeout. # # cluster-node-timeout 15000 @@ -1257,18 +1258,18 @@ lua-time-limit 5000 # the failover if, since the last interaction with the master, the time # elapsed is greater than: # -# (node-timeout * replica-validity-factor) + repl-ping-replica-period +# (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period # -# So for example if node-timeout is 30 seconds, and the replica-validity-factor +# So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor # is 10, and assuming a default repl-ping-replica-period of 10 seconds, the # replica will not try to failover if it was not able to talk with the master # for longer than 310 seconds. # -# A large replica-validity-factor may allow replicas with too old data to failover +# A large cluster-replica-validity-factor may allow replicas with too old data to failover # a master, while a too small value may prevent the cluster from being able to # elect a replica at all. # -# For maximum availability, it is possible to set the replica-validity-factor +# For maximum availability, it is possible to set the cluster-replica-validity-factor # to a value of 0, which means, that replicas will always try to failover the # master regardless of the last time they interacted with the master. # (However they'll always try to apply a delay proportional to their @@ -1299,7 +1300,7 @@ lua-time-limit 5000 # cluster-migration-barrier 1 # By default Redis Cluster nodes stop accepting queries if they detect there -# is at least an hash slot uncovered (no available node is serving it). +# is at least a hash slot uncovered (no available node is serving it). # This way if the cluster is partially down (for example a range of hash slots # are no longer covered) all the cluster becomes, eventually, unavailable. # It automatically returns available as soon as all the slots are covered again. @@ -1354,7 +1355,7 @@ lua-time-limit 5000 # * cluster-announce-port # * cluster-announce-bus-port # -# Each instruct the node about its address, client port, and cluster message +# Each instructs the node about its address, client port, and cluster message # bus port. The information is then published in the header of the bus packets # so that other nodes will be able to correctly map the address of the node # publishing the information. @@ -1365,7 +1366,7 @@ lua-time-limit 5000 # Note that when remapped, the bus port may not be at the fixed offset of # clients port + 10000, so you can specify any port and bus-port depending # on how they get remapped. If the bus-port is not set, a fixed offset of -# 10000 will be used as usually. +# 10000 will be used as usual. # # Example: # @@ -1494,7 +1495,7 @@ notify-keyspace-events "" # two kind of inline requests that were anyway illegal: an empty request # or any request that starts with "/" (there are no Redis commands starting # with such a slash). Normal RESP2/RESP3 requests are completely out of the -# path of the Gopher protocol implementation and are served as usually as well. +# path of the Gopher protocol implementation and are served as usual as well. # # If you open a connection to Redis when Gopher is enabled and send it # a string like "/foo", if there is a key named "/foo" it is served via the @@ -1666,7 +1667,7 @@ client-output-buffer-limit pubsub 32mb 8mb 60 # client-query-buffer-limit 1gb # In the Redis protocol, bulk requests, that are, elements representing single -# strings, are normally limited ot 512 mb. However you can change this limit +# strings, are normally limited to 512 mb. However you can change this limit # here, but must be 1mb or greater # # proto-max-bulk-len 512mb @@ -1695,7 +1696,7 @@ hz 10 # # Since the default HZ value by default is conservatively set to 10, Redis # offers, and enables by default, the ability to use an adaptive HZ value -# which will temporary raise when there are many connected clients. +# which will temporarily raise when there are many connected clients. # # When dynamic HZ is enabled, the actual configured HZ will be used # as a baseline, but multiples of the configured HZ value will be actually @@ -1762,7 +1763,7 @@ rdb-save-incremental-fsync yes # for the key counter to be divided by two (or decremented if it has a value # less <= 10). # -# The default value for the lfu-decay-time is 1. A Special value of 0 means to +# The default value for the lfu-decay-time is 1. A special value of 0 means to # decay the counter every time it happens to be scanned. # # lfu-log-factor 10 @@ -1782,7 +1783,7 @@ rdb-save-incremental-fsync yes # restart is needed in order to lower the fragmentation, or at least to flush # away all the data and create it again. However thanks to this feature # implemented by Oran Agra for Redis 4.0 this process can happen at runtime -# in an "hot" way, while the server is running. +# in a "hot" way, while the server is running. # # Basically when the fragmentation is over a certain level (see the # configuration options below) Redis will start to create new copies of the @@ -1859,3 +1860,4 @@ jemalloc-bg-thread yes # # Set bgsave child process to cpu affinity 1,10,11 # bgsave_cpulist 1,10-11 + diff --git a/sentinel.conf b/sentinel.conf index 4ca5e5f8f..b6ff05f25 100644 --- a/sentinel.conf +++ b/sentinel.conf @@ -259,6 +259,6 @@ sentinel deny-scripts-reconfig yes # SENTINEL SET can also be used in order to perform this configuration at runtime. # # In order to set a command back to its original name (undo the renaming), it -# is possible to just rename a command to itsef: +# is possible to just rename a command to itself: # # SENTINEL rename-command mymaster CONFIG CONFIG diff --git a/src/acl.c b/src/acl.c index e0432ba5c..74768aa27 100644 --- a/src/acl.c +++ b/src/acl.c @@ -289,7 +289,7 @@ void ACLFreeUserAndKillClients(user *u) { while ((ln = listNext(&li)) != NULL) { client *c = listNodeValue(ln); if (c->user == u) { - /* We'll free the conenction asynchronously, so + /* We'll free the connection asynchronously, so * in theory to set a different user is not needed. * However if there are bugs in Redis, soon or later * this may result in some security hole: it's much diff --git a/src/adlist.c b/src/adlist.c index 0fedc0729..bc06ffc8f 100644 --- a/src/adlist.c +++ b/src/adlist.c @@ -34,8 +34,9 @@ #include "zmalloc.h" /* Create a new list. The created list can be freed with - * AlFreeList(), but private value of every node need to be freed - * by the user before to call AlFreeList(). + * listRelease(), but private value of every node need to be freed + * by the user before to call listRelease(), or by setting a free method using + * listSetFreeMethod. * * On error, NULL is returned. Otherwise the pointer to the new list. */ list *listCreate(void) @@ -217,8 +218,8 @@ void listRewindTail(list *list, listIter *li) { * listDelNode(), but not to remove other elements. * * The function returns a pointer to the next element of the list, - * or NULL if there are no more elements, so the classical usage patter - * is: + * or NULL if there are no more elements, so the classical usage + * pattern is: * * iter = listGetIterator(list,); * while ((node = listNext(iter)) != NULL) { diff --git a/src/ae.c b/src/ae.c index 689a27d16..c51666562 100644 --- a/src/ae.c +++ b/src/ae.c @@ -457,7 +457,7 @@ int aeProcessEvents(aeEventLoop *eventLoop, int flags) int fired = 0; /* Number of events fired for current fd. */ /* Normally we execute the readable event first, and the writable - * event laster. This is useful as sometimes we may be able + * event later. This is useful as sometimes we may be able * to serve the reply of a query immediately after processing the * query. * @@ -465,7 +465,7 @@ int aeProcessEvents(aeEventLoop *eventLoop, int flags) * asking us to do the reverse: never fire the writable event * after the readable. In such a case, we invert the calls. * This is useful when, for instance, we want to do things - * in the beforeSleep() hook, like fsynching a file to disk, + * in the beforeSleep() hook, like fsyncing a file to disk, * before replying to a client. */ int invert = fe->mask & AE_BARRIER; diff --git a/src/ae_evport.c b/src/ae_evport.c index 5c317becb..b79ed9bc7 100644 --- a/src/ae_evport.c +++ b/src/ae_evport.c @@ -232,7 +232,7 @@ static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) { /* * ENOMEM is a potentially transient condition, but the kernel won't * generally return it unless things are really bad. EAGAIN indicates - * we've reached an resource limit, for which it doesn't make sense to + * we've reached a resource limit, for which it doesn't make sense to * retry (counter-intuitively). All other errors indicate a bug. In any * of these cases, the best we can do is to abort. */ diff --git a/src/aof.c b/src/aof.c index cbc0989d0..dc50e2228 100644 --- a/src/aof.c +++ b/src/aof.c @@ -544,7 +544,7 @@ sds catAppendOnlyGenericCommand(sds dst, int argc, robj **argv) { return dst; } -/* Create the sds representation of an PEXPIREAT command, using +/* Create the sds representation of a PEXPIREAT command, using * 'seconds' as time to live and 'cmd' to understand what command * we are translating into a PEXPIREAT. * @@ -1818,7 +1818,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { "Background AOF rewrite terminated with error"); } else { /* SIGUSR1 is whitelisted, so we have a way to kill a child without - * tirggering an error condition. */ + * triggering an error condition. */ if (bysignal != SIGUSR1) server.aof_lastbgrewrite_status = C_ERR; diff --git a/src/atomicvar.h b/src/atomicvar.h index 160056cd7..ecd26ad70 100644 --- a/src/atomicvar.h +++ b/src/atomicvar.h @@ -21,7 +21,7 @@ * * Never use return value from the macros, instead use the AtomicGetIncr() * if you need to get the current value and increment it atomically, like - * in the followign example: + * in the following example: * * long oldvalue; * atomicGetIncr(myvar,oldvalue,1); diff --git a/src/bitops.c b/src/bitops.c index 4b1a09aa4..eb3a9bb1f 100644 --- a/src/bitops.c +++ b/src/bitops.c @@ -36,7 +36,7 @@ /* Count number of bits set in the binary array pointed by 's' and long * 'count' bytes. The implementation of this function is required to - * work with a input string length up to 512 MB. */ + * work with an input string length up to 512 MB. */ size_t redisPopcount(void *s, long count) { size_t bits = 0; unsigned char *p = s; @@ -107,7 +107,7 @@ long redisBitpos(void *s, unsigned long count, int bit) { int found; /* Process whole words first, seeking for first word that is not - * all ones or all zeros respectively if we are lookig for zeros + * all ones or all zeros respectively if we are looking for zeros * or ones. This is much faster with large strings having contiguous * blocks of 1 or 0 bits compared to the vanilla bit per bit processing. * @@ -496,7 +496,7 @@ robj *lookupStringForBitCommand(client *c, size_t maxbit) { * in 'len'. The user is required to pass (likely stack allocated) buffer * 'llbuf' of at least LONG_STR_SIZE bytes. Such a buffer is used in the case * the object is integer encoded in order to provide the representation - * without usign heap allocation. + * without using heap allocation. * * The function returns the pointer to the object array of bytes representing * the string it contains, that may be a pointer to 'llbuf' or to the diff --git a/src/blocked.c b/src/blocked.c index 92f1cee65..ea20f5923 100644 --- a/src/blocked.c +++ b/src/blocked.c @@ -53,7 +53,7 @@ * to 0, no timeout is processed). * It usually just needs to send a reply to the client. * - * When implementing a new type of blocking opeation, the implementation + * When implementing a new type of blocking operation, the implementation * should modify unblockClient() and replyToBlockedClientTimedOut() in order * to handle the btype-specific behavior of this two functions. * If the blocking operation waits for certain keys to change state, the @@ -118,7 +118,7 @@ void processUnblockedClients(void) { /* This function will schedule the client for reprocessing at a safe time. * - * This is useful when a client was blocked for some reason (blocking opeation, + * This is useful when a client was blocked for some reason (blocking operation, * CLIENT PAUSE, or whatever), because it may end with some accumulated query * buffer that needs to be processed ASAP: * diff --git a/src/cluster.c b/src/cluster.c index 8d8b61ab4..e8db4050d 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -377,7 +377,7 @@ void clusterSaveConfigOrDie(int do_fsync) { } } -/* Lock the cluster config using flock(), and leaks the file descritor used to +/* Lock the cluster config using flock(), and leaks the file descriptor used to * acquire the lock so that the file will be locked forever. * * This works because we always update nodes.conf with a new version @@ -544,13 +544,13 @@ void clusterInit(void) { /* Reset a node performing a soft or hard reset: * - * 1) All other nodes are forget. + * 1) All other nodes are forgotten. * 2) All the assigned / open slots are released. * 3) If the node is a slave, it turns into a master. - * 5) Only for hard reset: a new Node ID is generated. - * 6) Only for hard reset: currentEpoch and configEpoch are set to 0. - * 7) The new configuration is saved and the cluster state updated. - * 8) If the node was a slave, the whole data set is flushed away. */ + * 4) Only for hard reset: a new Node ID is generated. + * 5) Only for hard reset: currentEpoch and configEpoch are set to 0. + * 6) The new configuration is saved and the cluster state updated. + * 7) If the node was a slave, the whole data set is flushed away. */ void clusterReset(int hard) { dictIterator *di; dictEntry *de; @@ -646,7 +646,7 @@ static void clusterConnAcceptHandler(connection *conn) { /* Create a link object we use to handle the connection. * It gets passed to the readable handler when data is available. - * Initiallly the link->node pointer is set to NULL as we don't know + * Initially the link->node pointer is set to NULL as we don't know * which node is, but the right node is references once we know the * node identity. */ link = createClusterLink(NULL); @@ -1060,7 +1060,7 @@ uint64_t clusterGetMaxEpoch(void) { * 3) Persist the configuration on disk before sending packets with the * new configuration. * - * If the new config epoch is generated and assigend, C_OK is returned, + * If the new config epoch is generated and assigned, C_OK is returned, * otherwise C_ERR is returned (since the node has already the greatest * configuration around) and no operation is performed. * @@ -1133,7 +1133,7 @@ int clusterBumpConfigEpochWithoutConsensus(void) { * * In general we want a system that eventually always ends with different * masters having different configuration epochs whatever happened, since - * nothign is worse than a split-brain condition in a distributed system. + * nothing is worse than a split-brain condition in a distributed system. * * BEHAVIOR * @@ -1192,7 +1192,7 @@ void clusterHandleConfigEpochCollision(clusterNode *sender) { * entries from the black list. This is an O(N) operation but it is not a * problem since add / exists operations are called very infrequently and * the hash table is supposed to contain very little elements at max. - * However without the cleanup during long uptimes and with some automated + * However without the cleanup during long uptime and with some automated * node add/removal procedures, entries could accumulate. */ void clusterBlacklistCleanup(void) { dictIterator *di; @@ -1346,12 +1346,12 @@ int clusterHandshakeInProgress(char *ip, int port, int cport) { return de != NULL; } -/* Start an handshake with the specified address if there is not one +/* Start a handshake with the specified address if there is not one * already in progress. Returns non-zero if the handshake was actually * started. On error zero is returned and errno is set to one of the * following values: * - * EAGAIN - There is already an handshake in progress for this address. + * EAGAIN - There is already a handshake in progress for this address. * EINVAL - IP or port are not valid. */ int clusterStartHandshake(char *ip, int port, int cport) { clusterNode *n; @@ -1793,7 +1793,7 @@ int clusterProcessPacket(clusterLink *link) { if (sender) sender->data_received = now; if (sender && !nodeInHandshake(sender)) { - /* Update our curretEpoch if we see a newer epoch in the cluster. */ + /* Update our currentEpoch if we see a newer epoch in the cluster. */ senderCurrentEpoch = ntohu64(hdr->currentEpoch); senderConfigEpoch = ntohu64(hdr->configEpoch); if (senderCurrentEpoch > server.cluster->currentEpoch) @@ -2480,7 +2480,7 @@ void clusterSetGossipEntry(clusterMsg *hdr, int i, clusterNode *n) { } /* Send a PING or PONG packet to the specified node, making sure to add enough - * gossip informations. */ + * gossip information. */ void clusterSendPing(clusterLink *link, int type) { unsigned char *buf; clusterMsg *hdr; @@ -2500,7 +2500,7 @@ void clusterSendPing(clusterLink *link, int type) { * node_timeout we exchange with each other node at least 4 packets * (we ping in the worst case in node_timeout/2 time, and we also * receive two pings from the host), we have a total of 8 packets - * in the node_timeout*2 falure reports validity time. So we have + * in the node_timeout*2 failure reports validity time. So we have * that, for a single PFAIL node, we can expect to receive the following * number of failure reports (in the specified window of time): * @@ -2527,7 +2527,7 @@ void clusterSendPing(clusterLink *link, int type) { * faster to propagate to go from PFAIL to FAIL state. */ int pfail_wanted = server.cluster->stats_pfail_nodes; - /* Compute the maxium totlen to allocate our buffer. We'll fix the totlen + /* Compute the maximum totlen to allocate our buffer. We'll fix the totlen * later according to the number of gossip sections we really were able * to put inside the packet. */ totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); @@ -2564,7 +2564,7 @@ void clusterSendPing(clusterLink *link, int type) { if (this->flags & (CLUSTER_NODE_HANDSHAKE|CLUSTER_NODE_NOADDR) || (this->link == NULL && this->numslots == 0)) { - freshnodes--; /* Tecnically not correct, but saves CPU. */ + freshnodes--; /* Technically not correct, but saves CPU. */ continue; } @@ -3149,7 +3149,7 @@ void clusterHandleSlaveFailover(void) { } } - /* If the previous failover attempt timedout and the retry time has + /* If the previous failover attempt timeout and the retry time has * elapsed, we can setup a new one. */ if (auth_age > auth_retry_time) { server.cluster->failover_auth_time = mstime() + @@ -3255,7 +3255,7 @@ void clusterHandleSlaveFailover(void) { * * Slave migration is the process that allows a slave of a master that is * already covered by at least another slave, to "migrate" to a master that - * is orpaned, that is, left with no working slaves. + * is orphaned, that is, left with no working slaves. * ------------------------------------------------------------------------- */ /* This function is responsible to decide if this replica should be migrated @@ -3272,7 +3272,7 @@ void clusterHandleSlaveFailover(void) { * the nodes anyway, so we spend time into clusterHandleSlaveMigration() * if definitely needed. * - * The fuction is called with a pre-computed max_slaves, that is the max + * The function is called with a pre-computed max_slaves, that is the max * number of working (not in FAIL state) slaves for a single master. * * Additional conditions for migration are examined inside the function. @@ -3391,7 +3391,7 @@ void clusterHandleSlaveMigration(int max_slaves) { * data loss due to the asynchronous master-slave replication. * -------------------------------------------------------------------------- */ -/* Reset the manual failover state. This works for both masters and slavesa +/* Reset the manual failover state. This works for both masters and slaves * as all the state about manual failover is cleared. * * The function can be used both to initialize the manual failover state at @@ -3683,7 +3683,7 @@ void clusterCron(void) { replicationSetMaster(myself->slaveof->ip, myself->slaveof->port); } - /* Abourt a manual failover if the timeout is reached. */ + /* Abort a manual failover if the timeout is reached. */ manualFailoverCheckTimeout(); if (nodeIsSlave(myself)) { @@ -3788,12 +3788,12 @@ int clusterNodeSetSlotBit(clusterNode *n, int slot) { * target for replicas migration, if and only if at least one of * the other masters has slaves right now. * - * Normally masters are valid targerts of replica migration if: + * Normally masters are valid targets of replica migration if: * 1. The used to have slaves (but no longer have). * 2. They are slaves failing over a master that used to have slaves. * * However new masters with slots assigned are considered valid - * migration tagets if the rest of the cluster is not a slave-less. + * migration targets if the rest of the cluster is not a slave-less. * * See https://github.com/antirez/redis/issues/3043 for more info. */ if (n->numslots == 1 && clusterMastersHaveSlaves()) @@ -3977,7 +3977,7 @@ void clusterUpdateState(void) { * A) If no other node is in charge according to the current cluster * configuration, we add these slots to our node. * B) If according to our config other nodes are already in charge for - * this lots, we set the slots as IMPORTING from our point of view + * this slots, we set the slots as IMPORTING from our point of view * in order to justify we have those slots, and in order to make * redis-trib aware of the issue, so that it can try to fix it. * 2) If we find data in a DB different than DB0 we return C_ERR to @@ -4507,7 +4507,7 @@ NULL } /* If this slot is in migrating status but we have no keys * for it assigning the slot to another node will clear - * the migratig status. */ + * the migrating status. */ if (countKeysInSlot(slot) == 0 && server.cluster->migrating_slots_to[slot]) server.cluster->migrating_slots_to[slot] = NULL; @@ -4852,7 +4852,7 @@ NULL server.cluster->currentEpoch = epoch; /* No need to fsync the config here since in the unlucky event * of a failure to persist the config, the conflict resolution code - * will assign an unique config to this node. */ + * will assign a unique config to this node. */ clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE| CLUSTER_TODO_SAVE_CONFIG); addReply(c,shared.ok); @@ -4900,7 +4900,7 @@ void createDumpPayload(rio *payload, robj *o, robj *key) { unsigned char buf[2]; uint64_t crc; - /* Serialize the object in a RDB-like format. It consist of an object type + /* Serialize the object in an RDB-like format. It consist of an object type * byte followed by the serialized object. This is understood by RESTORE. */ rioInitWithBuffer(payload,sdsempty()); serverAssert(rdbSaveObjectType(payload,o)); @@ -5567,7 +5567,7 @@ void readwriteCommand(client *c) { * resharding in progress). * * On success the function returns the node that is able to serve the request. - * If the node is not 'myself' a redirection must be perfomed. The kind of + * If the node is not 'myself' a redirection must be performed. The kind of * redirection is specified setting the integer passed by reference * 'error_code', which will be set to CLUSTER_REDIR_ASK or * CLUSTER_REDIR_MOVED. @@ -5694,7 +5694,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in } } - /* Migarting / Improrting slot? Count keys we don't have. */ + /* Migrating / Importing slot? Count keys we don't have. */ if ((migrating_slot || importing_slot) && lookupKeyRead(&server.db[0],thiskey) == NULL) { @@ -5763,7 +5763,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in } /* Handle the read-only client case reading from a slave: if this - * node is a slave and the request is about an hash slot our master + * node is a slave and the request is about a hash slot our master * is serving, we can reply without redirection. */ int is_readonly_command = (c->cmd->flags & CMD_READONLY) || (c->cmd->proc == execCommand && !(c->mstate.cmd_inv_flags & CMD_READONLY)); @@ -5777,7 +5777,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in } /* Base case: just return the right node. However if this node is not - * myself, set error_code to MOVED since we need to issue a rediretion. */ + * myself, set error_code to MOVED since we need to issue a redirection. */ if (n != myself && error_code) *error_code = CLUSTER_REDIR_MOVED; return n; } @@ -5823,7 +5823,7 @@ void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_co * 3) The client may remain blocked forever (or up to the max timeout time) * waiting for a key change that will never happen. * - * If the client is found to be blocked into an hash slot this node no + * If the client is found to be blocked into a hash slot this node no * longer handles, the client is sent a redirection error, and the function * returns 1. Otherwise 0 is returned and no operation is performed. */ int clusterRedirectBlockedClientIfNeeded(client *c) { diff --git a/src/cluster.h b/src/cluster.h index 596a4629a..48a111764 100644 --- a/src/cluster.h +++ b/src/cluster.h @@ -51,8 +51,8 @@ typedef struct clusterLink { #define CLUSTER_NODE_HANDSHAKE 32 /* We have still to exchange the first ping */ #define CLUSTER_NODE_NOADDR 64 /* We don't know the address of this node */ #define CLUSTER_NODE_MEET 128 /* Send a MEET message to this node */ -#define CLUSTER_NODE_MIGRATE_TO 256 /* Master elegible for replica migration. */ -#define CLUSTER_NODE_NOFAILOVER 512 /* Slave will not try to failver. */ +#define CLUSTER_NODE_MIGRATE_TO 256 /* Master eligible for replica migration. */ +#define CLUSTER_NODE_NOFAILOVER 512 /* Slave will not try to failover. */ #define CLUSTER_NODE_NULL_NAME "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" #define nodeIsMaster(n) ((n)->flags & CLUSTER_NODE_MASTER) @@ -164,10 +164,10 @@ typedef struct clusterState { clusterNode *mf_slave; /* Slave performing the manual failover. */ /* Manual failover state of slave. */ long long mf_master_offset; /* Master offset the slave needs to start MF - or zero if stil not received. */ + or zero if still not received. */ int mf_can_start; /* If non-zero signal that the manual failover can start requesting masters vote. */ - /* The followign fields are used by masters to take state on elections. */ + /* The following fields are used by masters to take state on elections. */ uint64_t lastVoteEpoch; /* Epoch of the last vote granted. */ int todo_before_sleep; /* Things to do in clusterBeforeSleep(). */ /* Messages received and sent by type. */ diff --git a/src/config.c b/src/config.c index 2c69540db..63852ff4f 100644 --- a/src/config.c +++ b/src/config.c @@ -1279,7 +1279,7 @@ void rewriteConfigNumericalOption(struct rewriteConfigState *state, const char * rewriteConfigRewriteLine(state,option,line,force); } -/* Rewrite a octal option. */ +/* Rewrite an octal option. */ void rewriteConfigOctalOption(struct rewriteConfigState *state, char *option, int value, int defvalue) { int force = value != defvalue; sds line = sdscatprintf(sdsempty(),"%s %o",option,value); @@ -2097,7 +2097,7 @@ static int isValidAOFfilename(char *val, char **err) { static int updateHZ(long long val, long long prev, char **err) { UNUSED(prev); UNUSED(err); - /* Hz is more an hint from the user, so we accept values out of range + /* Hz is more a hint from the user, so we accept values out of range * but cap them to reasonable values. */ server.config_hz = val; if (server.config_hz < CONFIG_MIN_HZ) server.config_hz = CONFIG_MIN_HZ; @@ -2115,7 +2115,7 @@ static int updateJemallocBgThread(int val, int prev, char **err) { static int updateReplBacklogSize(long long val, long long prev, char **err) { /* resizeReplicationBacklog sets server.repl_backlog_size, and relies on - * being able to tell when the size changes, so restore prev becore calling it. */ + * being able to tell when the size changes, so restore prev before calling it. */ UNUSED(err); server.repl_backlog_size = prev; resizeReplicationBacklog(val); diff --git a/src/config.h b/src/config.h index e807b9330..f9ec7e44a 100644 --- a/src/config.h +++ b/src/config.h @@ -166,7 +166,7 @@ void setproctitle(const char *fmt, ...); #endif /* BYTE_ORDER */ /* Sometimes after including an OS-specific header that defines the - * endianess we end with __BYTE_ORDER but not with BYTE_ORDER that is what + * endianness we end with __BYTE_ORDER but not with BYTE_ORDER that is what * the Redis code uses. In this case let's define everything without the * underscores. */ #ifndef BYTE_ORDER diff --git a/src/connection.h b/src/connection.h index 85585a3d0..e00d2ea17 100644 --- a/src/connection.h +++ b/src/connection.h @@ -106,7 +106,7 @@ static inline int connAccept(connection *conn, ConnectionCallbackFunc accept_han } /* Establish a connection. The connect_handler will be called when the connection - * is established, or if an error has occured. + * is established, or if an error has occurred. * * The connection handler will be responsible to set up any read/write handlers * as needed. @@ -168,7 +168,7 @@ static inline int connSetReadHandler(connection *conn, ConnectionCallbackFunc fu /* Set a write handler, and possibly enable a write barrier, this flag is * cleared when write handler is changed or removed. - * With barroer enabled, we never fire the event if the read handler already + * With barrier enabled, we never fire the event if the read handler already * fired in the same event loop iteration. Useful when you want to persist * things to disk before sending replies, and want to do that in a group fashion. */ static inline int connSetWriteHandlerWithBarrier(connection *conn, ConnectionCallbackFunc func, int barrier) { diff --git a/src/db.c b/src/db.c index 19b2c48e4..9efda0907 100644 --- a/src/db.c +++ b/src/db.c @@ -116,7 +116,7 @@ robj *lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) { * However, if the command caller is not the master, and as additional * safety measure, the command invoked is a read-only command, we can * safely return NULL here, and provide a more consistent behavior - * to clients accessign expired values in a read-only fashion, that + * to clients accessing expired values in a read-only fashion, that * will say the key as non existing. * * Notably this covers GETs when slaves are used to scale reads. */ @@ -374,7 +374,7 @@ robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o) { * firing module events. * and the function to return ASAP. * - * On success the fuction returns the number of keys removed from the + * On success the function returns the number of keys removed from the * database(s). Otherwise -1 is returned in the specific case the * DB number is out of range, and errno is set to EINVAL. */ long long emptyDbGeneric(redisDb *dbarray, int dbnum, int flags, void(callback)(void*)) { @@ -866,7 +866,7 @@ void scanGenericCommand(client *c, robj *o, unsigned long cursor) { /* Filter element if it is an expired key. */ if (!filter && o == NULL && expireIfNeeded(c->db, kobj)) filter = 1; - /* Remove the element and its associted value if needed. */ + /* Remove the element and its associated value if needed. */ if (filter) { decrRefCount(kobj); listDelNode(keys, node); @@ -1367,7 +1367,7 @@ int *getKeysUsingCommandTable(struct redisCommand *cmd,robj **argv, int argc, in /* Return all the arguments that are keys in the command passed via argc / argv. * * The command returns the positions of all the key arguments inside the array, - * so the actual return value is an heap allocated array of integers. The + * so the actual return value is a heap allocated array of integers. The * length of the array is returned by reference into *numkeys. * * 'cmd' must be point to the corresponding entry into the redisCommand diff --git a/src/debug.c b/src/debug.c index 4831c4d74..921c681a5 100644 --- a/src/debug.c +++ b/src/debug.c @@ -387,7 +387,7 @@ void debugCommand(client *c) { "OOM -- Crash the server simulating an out-of-memory error.", "PANIC -- Crash the server simulating a panic.", "POPULATE [prefix] [size] -- Create string keys named key:. If a prefix is specified is used instead of the 'key' prefix.", -"RELOAD [MERGE] [NOFLUSH] [NOSAVE] -- Save the RDB on disk and reload it back in memory. By default it will save the RDB file and load it back. With the NOFLUSH option the current database is not removed before loading the new one, but conficts in keys will kill the server with an exception. When MERGE is used, conflicting keys will be loaded (the key in the loaded RDB file will win). When NOSAVE is used, the server will not save the current dataset in the RDB file before loading. Use DEBUG RELOAD NOSAVE when you want just to load the RDB file you placed in the Redis working directory in order to replace the current dataset in memory. Use DEBUG RELOAD NOSAVE NOFLUSH MERGE when you want to add what is in the current RDB file placed in the Redis current directory, with the current memory content. Use DEBUG RELOAD when you want to verify Redis is able to persist the current dataset in the RDB file, flush the memory content, and load it back.", +"RELOAD [MERGE] [NOFLUSH] [NOSAVE] -- Save the RDB on disk and reload it back in memory. By default it will save the RDB file and load it back. With the NOFLUSH option the current database is not removed before loading the new one, but conflicts in keys will kill the server with an exception. When MERGE is used, conflicting keys will be loaded (the key in the loaded RDB file will win). When NOSAVE is used, the server will not save the current dataset in the RDB file before loading. Use DEBUG RELOAD NOSAVE when you want just to load the RDB file you placed in the Redis working directory in order to replace the current dataset in memory. Use DEBUG RELOAD NOSAVE NOFLUSH MERGE when you want to add what is in the current RDB file placed in the Redis current directory, with the current memory content. Use DEBUG RELOAD when you want to verify Redis is able to persist the current dataset in the RDB file, flush the memory content, and load it back.", "RESTART -- Graceful restart: save config, db, restart.", "SDSLEN -- Show low level SDS string info representing key and value.", "SEGFAULT -- Crash the server with sigsegv.", @@ -456,7 +456,7 @@ NULL } } - /* The default beahvior is to save the RDB file before loading + /* The default behavior is to save the RDB file before loading * it back. */ if (save) { rdbSaveInfo rsi, *rsiptr; @@ -1449,7 +1449,7 @@ void logCurrentClient(void) { #define MEMTEST_MAX_REGIONS 128 -/* A non destructive memory test executed during segfauls. */ +/* A non destructive memory test executed during segfault. */ int memtest_test_linux_anonymous_maps(void) { FILE *fp; char line[1024]; diff --git a/src/defrag.c b/src/defrag.c index 07a16ca6c..04ade30ea 100644 --- a/src/defrag.c +++ b/src/defrag.c @@ -47,11 +47,11 @@ int je_get_defrag_hint(void* ptr); /* forward declarations*/ void defragDictBucketCallback(void *privdata, dictEntry **bucketref); -dictEntry* replaceSateliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sds newkey, uint64_t hash, long *defragged); +dictEntry* replaceSatelliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sds newkey, uint64_t hash, long *defragged); /* Defrag helper for generic allocations. * - * returns NULL in case the allocatoin wasn't moved. + * returns NULL in case the allocation wasn't moved. * when it returns a non-null value, the old pointer was already released * and should NOT be accessed. */ void* activeDefragAlloc(void *ptr) { @@ -74,7 +74,7 @@ void* activeDefragAlloc(void *ptr) { /*Defrag helper for sds strings * - * returns NULL in case the allocatoin wasn't moved. + * returns NULL in case the allocation wasn't moved. * when it returns a non-null value, the old pointer was already released * and should NOT be accessed. */ sds activeDefragSds(sds sdsptr) { @@ -90,7 +90,7 @@ sds activeDefragSds(sds sdsptr) { /* Defrag helper for robj and/or string objects * - * returns NULL in case the allocatoin wasn't moved. + * returns NULL in case the allocation wasn't moved. * when it returns a non-null value, the old pointer was already released * and should NOT be accessed. */ robj *activeDefragStringOb(robj* ob, long *defragged) { @@ -130,11 +130,11 @@ robj *activeDefragStringOb(robj* ob, long *defragged) { } /* Defrag helper for dictEntries to be used during dict iteration (called on - * each step). Teturns a stat of how many pointers were moved. */ + * each step). Returns a stat of how many pointers were moved. */ long dictIterDefragEntry(dictIterator *iter) { /* This function is a little bit dirty since it messes with the internals * of the dict and it's iterator, but the benefit is that it is very easy - * to use, and require no other chagnes in the dict. */ + * to use, and require no other changes in the dict. */ long defragged = 0; dictht *ht; /* Handle the next entry (if there is one), and update the pointer in the @@ -238,7 +238,7 @@ double *zslDefrag(zskiplist *zsl, double score, sds oldele, sds newele) { return NULL; } -/* Defrag helpler for sorted set. +/* Defrag helper for sorted set. * Defrag a single dict entry key name, and corresponding skiplist struct */ long activeDefragZsetEntry(zset *zs, dictEntry *de) { sds newsds; @@ -349,7 +349,7 @@ long activeDefragSdsListAndDict(list *l, dict *d, int dict_val_type) { if ((newsds = activeDefragSds(sdsele))) { /* When defragging an sds value, we need to update the dict key */ uint64_t hash = dictGetHash(d, newsds); - replaceSateliteDictKeyPtrAndOrDefragDictEntry(d, sdsele, newsds, hash, &defragged); + replaceSatelliteDictKeyPtrAndOrDefragDictEntry(d, sdsele, newsds, hash, &defragged); ln->value = newsds; defragged++; } @@ -385,7 +385,7 @@ long activeDefragSdsListAndDict(list *l, dict *d, int dict_val_type) { * moved. Return value is the the dictEntry if found, or NULL if not found. * NOTE: this is very ugly code, but it let's us avoid the complication of * doing a scan on another dict. */ -dictEntry* replaceSateliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sds newkey, uint64_t hash, long *defragged) { +dictEntry* replaceSatelliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sds newkey, uint64_t hash, long *defragged) { dictEntry **deref = dictFindEntryRefByPtrAndHash(d, oldkey, hash); if (deref) { dictEntry *de = *deref; @@ -433,7 +433,7 @@ long activeDefragQuickListNodes(quicklist *ql) { } /* when the value has lots of elements, we want to handle it later and not as - * oart of the main dictionary scan. this is needed in order to prevent latency + * part of the main dictionary scan. this is needed in order to prevent latency * spikes when handling large items */ void defragLater(redisDb *db, dictEntry *kde) { sds key = sdsdup(dictGetKey(kde)); @@ -814,7 +814,7 @@ long defragKey(redisDb *db, dictEntry *de) { * I can't search in db->expires for that key after i already released * the pointer it holds it won't be able to do the string compare */ uint64_t hash = dictGetHash(db->dict, de->key); - replaceSateliteDictKeyPtrAndOrDefragDictEntry(db->expires, keysds, newsds, hash, &defragged); + replaceSatelliteDictKeyPtrAndOrDefragDictEntry(db->expires, keysds, newsds, hash, &defragged); } /* Try to defrag robj and / or string value. */ @@ -885,7 +885,7 @@ void defragScanCallback(void *privdata, const dictEntry *de) { server.stat_active_defrag_scanned++; } -/* Defrag scan callback for each hash table bicket, +/* Defrag scan callback for each hash table bucket, * used in order to defrag the dictEntry allocations. */ void defragDictBucketCallback(void *privdata, dictEntry **bucketref) { UNUSED(privdata); /* NOTE: this function is also used by both activeDefragCycle and scanLaterHash, etc. don't use privdata */ @@ -919,7 +919,7 @@ float getAllocatorFragmentation(size_t *out_frag_bytes) { return frag_pct; } -/* We may need to defrag other globals, one small allcation can hold a full allocator run. +/* We may need to defrag other globals, one small allocation can hold a full allocator run. * so although small, it is still important to defrag these */ long defragOtherGlobals() { long defragged = 0; @@ -1090,7 +1090,7 @@ void activeDefragCycle(void) { if (hasActiveChildProcess()) return; /* Defragging memory while there's a fork will just do damage. */ - /* Once a second, check if we the fragmentation justfies starting a scan + /* Once a second, check if the fragmentation justfies starting a scan * or making it more aggressive. */ run_with_period(1000) { computeDefragCycles(); @@ -1160,7 +1160,7 @@ void activeDefragCycle(void) { * (if we have a lot of pointers in one hash bucket or rehasing), * check if we reached the time limit. * But regardless, don't start a new db in this loop, this is because after - * the last db we call defragOtherGlobals, which must be done in once cycle */ + * the last db we call defragOtherGlobals, which must be done in one cycle */ if (!cursor || (++iterations > 16 || server.stat_active_defrag_hits - prev_defragged > 512 || server.stat_active_defrag_scanned - prev_scanned > 64)) { diff --git a/src/dict.c b/src/dict.c index 45aab66f9..6ebabbf8e 100644 --- a/src/dict.c +++ b/src/dict.c @@ -237,7 +237,9 @@ long long timeInMilliseconds(void) { return (((long long)tv.tv_sec)*1000)+(tv.tv_usec/1000); } -/* Rehash for an amount of time between ms milliseconds and ms+1 milliseconds */ +/* Rehash in ms+"delta" milliseconds. The value of "delta" is larger + * than 0, and is smaller than 1 in most cases. The exact upper bound + * depends on the running time of dictRehash(d,100).*/ int dictRehashMilliseconds(dict *d, int ms) { long long start = timeInMilliseconds(); int rehashes = 0; @@ -749,7 +751,7 @@ unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count) { * this function instead what we do is to consider a "linear" range of the table * that may be constituted of N buckets with chains of different lengths * appearing one after the other. Then we report a random element in the range. - * In this way we smooth away the problem of different chain lenghts. */ + * In this way we smooth away the problem of different chain lengths. */ #define GETFAIR_NUM_ENTRIES 15 dictEntry *dictGetFairRandomKey(dict *d) { dictEntry *entries[GETFAIR_NUM_ENTRIES]; @@ -1119,7 +1121,7 @@ size_t _dictGetStatsHt(char *buf, size_t bufsize, dictht *ht, int tableid) { i, clvector[i], ((float)clvector[i]/ht->size)*100); } - /* Unlike snprintf(), teturn the number of characters actually written. */ + /* Unlike snprintf(), return the number of characters actually written. */ if (bufsize) buf[bufsize-1] = '\0'; return strlen(buf); } diff --git a/src/endianconv.c b/src/endianconv.c index f3b0b4730..918844e25 100644 --- a/src/endianconv.c +++ b/src/endianconv.c @@ -8,7 +8,7 @@ * to be backward compatible are still in big endian) because most of the * production environments are little endian, and we have a lot of conversions * in a few places because ziplists, intsets, zipmaps, need to be endian-neutral - * even in memory, since they are serialied on RDB files directly with a single + * even in memory, since they are serialized on RDB files directly with a single * write(2) without other additional steps. * * ---------------------------------------------------------------------------- diff --git a/src/evict.c b/src/evict.c index 0755acc0e..5d398c6c9 100644 --- a/src/evict.c +++ b/src/evict.c @@ -41,7 +41,7 @@ /* To improve the quality of the LRU approximation we take a set of keys * that are good candidate for eviction across freeMemoryIfNeeded() calls. * - * Entries inside the eviciton pool are taken ordered by idle time, putting + * Entries inside the eviction pool are taken ordered by idle time, putting * greater idle times to the right (ascending order). * * When an LFU policy is used instead, a reverse frequency indication is used @@ -242,7 +242,7 @@ void evictionPoolPopulate(int dbid, dict *sampledict, dict *keydict, struct evic /* Try to reuse the cached SDS string allocated in the pool entry, * because allocating and deallocating this object is costly * (according to the profiler, not my fantasy. Remember: - * premature optimizbla bla bla bla. */ + * premature optimization bla bla bla. */ int klen = sdslen(key); if (klen > EVPOOL_CACHED_SDS_SIZE) { pool[k].key = sdsdup(key); @@ -342,7 +342,7 @@ unsigned long LFUDecrAndReturn(robj *o) { } /* ---------------------------------------------------------------------------- - * The external API for eviction: freeMemroyIfNeeded() is called by the + * The external API for eviction: freeMemoryIfNeeded() is called by the * server when there is data to add in order to make space if needed. * --------------------------------------------------------------------------*/ @@ -441,7 +441,7 @@ int getMaxmemoryState(size_t *total, size_t *logical, size_t *tofree, float *lev * * The function returns C_OK if we are under the memory limit or if we * were over the limit, but the attempt to free memory was successful. - * Otehrwise if we are over the memory limit, but not enough memory + * Otherwise if we are over the memory limit, but not enough memory * was freed to return back under the limit, the function returns C_ERR. */ int freeMemoryIfNeeded(void) { int keys_freed = 0; diff --git a/src/expire.c b/src/expire.c index 1c4f71df3..85fd59fe2 100644 --- a/src/expire.c +++ b/src/expire.c @@ -97,7 +97,7 @@ int activeExpireCycleTryExpire(redisDb *db, dictEntry *de, long long now) { * conditions: * * If type is ACTIVE_EXPIRE_CYCLE_FAST the function will try to run a - * "fast" expire cycle that takes no longer than EXPIRE_FAST_CYCLE_DURATION + * "fast" expire cycle that takes no longer than ACTIVE_EXPIRE_CYCLE_FAST_DURATION * microseconds, and is not repeated again before the same amount of time. * The cycle will also refuse to run at all if the latest slow cycle did not * terminate because of a time limit condition. @@ -414,7 +414,7 @@ void expireSlaveKeys(void) { else dictDelete(slaveKeysWithExpire,keyname); - /* Stop conditions: found 3 keys we cna't expire in a row or + /* Stop conditions: found 3 keys we can't expire in a row or * time limit was reached. */ cycles++; if (noexpire > 3) break; @@ -466,7 +466,7 @@ size_t getSlaveKeyWithExpireCount(void) { * * Note: technically we should handle the case of a single DB being flushed * but it is not worth it since anyway race conditions using the same set - * of key names in a wriatable slave and in its master will lead to + * of key names in a writable slave and in its master will lead to * inconsistencies. This is just a best-effort thing we do. */ void flushSlaveKeysWithExpireList(void) { if (slaveKeysWithExpire) { @@ -490,7 +490,7 @@ int checkAlreadyExpired(long long when) { *----------------------------------------------------------------------------*/ /* This is the generic command implementation for EXPIRE, PEXPIRE, EXPIREAT - * and PEXPIREAT. Because the commad second argument may be relative or absolute + * and PEXPIREAT. Because the command second argument may be relative or absolute * the "basetime" argument is used to signal what the base time is (either 0 * for *AT variants of the command, or the current time for relative expires). * diff --git a/src/geo.c b/src/geo.c index 3e5d5f606..5c5054414 100644 --- a/src/geo.c +++ b/src/geo.c @@ -143,8 +143,8 @@ double extractUnitOrReply(client *c, robj *unit) { } /* Input Argument Helper. - * Extract the dinstance from the specified two arguments starting at 'argv' - * that shouldbe in the form: and return the dinstance in the + * Extract the distance from the specified two arguments starting at 'argv' + * that should be in the form: , and return the distance in the * specified unit on success. *conversions is populated with the coefficient * to use in order to convert meters to the unit. * @@ -788,7 +788,7 @@ void geoposCommand(client *c) { /* GEODIST key ele1 ele2 [unit] * - * Return the distance, in meters by default, otherwise accordig to "unit", + * Return the distance, in meters by default, otherwise according to "unit", * between points ele1 and ele2. If one or more elements are missing NULL * is returned. */ void geodistCommand(client *c) { diff --git a/src/geohash_helper.c b/src/geohash_helper.c index e23f17b4e..01fb2cb88 100644 --- a/src/geohash_helper.c +++ b/src/geohash_helper.c @@ -68,7 +68,7 @@ uint8_t geohashEstimateStepsByRadius(double range_meters, double lat) { } step -= 2; /* Make sure range is included in most of the base cases. */ - /* Wider range torwards the poles... Note: it is possible to do better + /* Wider range towards the poles... Note: it is possible to do better * than this approximation by computing the distance between meridians * at this latitude, but this does the trick for now. */ if (lat > 66 || lat < -66) { @@ -84,7 +84,7 @@ uint8_t geohashEstimateStepsByRadius(double range_meters, double lat) { /* Return the bounding box of the search area centered at latitude,longitude * having a radius of radius_meter. bounds[0] - bounds[2] is the minimum - * and maxium longitude, while bounds[1] - bounds[3] is the minimum and + * and maximum longitude, while bounds[1] - bounds[3] is the minimum and * maximum latitude. * * This function does not behave correctly with very large radius values, for diff --git a/src/hyperloglog.c b/src/hyperloglog.c index 721f492a1..d018e975e 100644 --- a/src/hyperloglog.c +++ b/src/hyperloglog.c @@ -36,9 +36,9 @@ /* The Redis HyperLogLog implementation is based on the following ideas: * - * * The use of a 64 bit hash function as proposed in [1], in order to don't - * limited to cardinalities up to 10^9, at the cost of just 1 additional - * bit per register. + * * The use of a 64 bit hash function as proposed in [1], in order to estimate + * cardinalities larger than 10^9, at the cost of just 1 additional bit per + * register. * * The use of 16384 6-bit registers for a great level of accuracy, using * a total of 12k per key. * * The use of the Redis string data type. No new type is introduced. @@ -279,7 +279,7 @@ static char *invalid_hll_err = "-INVALIDOBJ Corrupted HLL object detected\r\n"; * So we right shift of 0 bits (no shift in practice) and * left shift the next byte of 8 bits, even if we don't use it, * but this has the effect of clearing the bits so the result - * will not be affacted after the OR. + * will not be affected after the OR. * * ------------------------------------------------------------------------- * @@ -297,7 +297,7 @@ static char *invalid_hll_err = "-INVALIDOBJ Corrupted HLL object detected\r\n"; * |11000000| <- Our byte at b0 * +--------+ * - * To create a AND-mask to clear the bits about this position, we just + * To create an AND-mask to clear the bits about this position, we just * initialize the mask with the value 63, left shift it of "fs" bits, * and finally invert the result. * @@ -766,7 +766,7 @@ int hllSparseSet(robj *o, long index, uint8_t count) { * by a ZERO opcode with len > 1, or by an XZERO opcode. * * In those cases the original opcode must be split into multiple - * opcodes. The worst case is an XZERO split in the middle resuling into + * opcodes. The worst case is an XZERO split in the middle resulting into * XZERO - VAL - XZERO, so the resulting sequence max length is * 5 bytes. * @@ -899,7 +899,7 @@ promote: /* Promote to dense representation. */ * the element belongs to is incremented if needed. * * This function is actually a wrapper for hllSparseSet(), it only performs - * the hashshing of the elmenet to obtain the index and zeros run length. */ + * the hashshing of the element to obtain the index and zeros run length. */ int hllSparseAdd(robj *o, unsigned char *ele, size_t elesize) { long index; uint8_t count = hllPatLen(ele,elesize,&index); @@ -1014,7 +1014,7 @@ uint64_t hllCount(struct hllhdr *hdr, int *invalid) { double m = HLL_REGISTERS; double E; int j; - /* Note that reghisto size could be just HLL_Q+2, becuase HLL_Q+1 is + /* Note that reghisto size could be just HLL_Q+2, because HLL_Q+1 is * the maximum frequency of the "000...1" sequence the hash function is * able to return. However it is slow to check for sanity of the * input: instead we history array at a safe size: overflows will diff --git a/src/latency.c b/src/latency.c index b5ccc7cc6..6148543c8 100644 --- a/src/latency.c +++ b/src/latency.c @@ -85,7 +85,7 @@ int THPGetAnonHugePagesSize(void) { /* ---------------------------- Latency API --------------------------------- */ /* Latency monitor initialization. We just need to create the dictionary - * of time series, each time serie is created on demand in order to avoid + * of time series, each time series is created on demand in order to avoid * having a fixed list to maintain. */ void latencyMonitorInit(void) { server.latency_events = dictCreate(&latencyTimeSeriesDictType,NULL); @@ -154,7 +154,7 @@ int latencyResetEvent(char *event_to_reset) { /* Analyze the samples available for a given event and return a structure * populate with different metrics, average, MAD, min, max, and so forth. - * Check latency.h definition of struct latenctStat for more info. + * Check latency.h definition of struct latencyStats for more info. * If the specified event has no elements the structure is populate with * zero values. */ void analyzeLatencyForEvent(char *event, struct latencyStats *ls) { @@ -343,7 +343,7 @@ sds createLatencyReport(void) { } if (!strcasecmp(event,"aof-fstat") || - !strcasecmp(event,"rdb-unlik-temp-file")) { + !strcasecmp(event,"rdb-unlink-temp-file")) { advise_disk_contention = 1; advise_local_disk = 1; advices += 2; @@ -396,7 +396,7 @@ sds createLatencyReport(void) { /* Better VM. */ report = sdscat(report,"\nI have a few advices for you:\n\n"); if (advise_better_vm) { - report = sdscat(report,"- If you are using a virtual machine, consider upgrading it with a faster one using an hypervisior that provides less latency during fork() calls. Xen is known to have poor fork() performance. Even in the context of the same VM provider, certain kinds of instances can execute fork faster than others.\n"); + report = sdscat(report,"- If you are using a virtual machine, consider upgrading it with a faster one using a hypervisior that provides less latency during fork() calls. Xen is known to have poor fork() performance. Even in the context of the same VM provider, certain kinds of instances can execute fork faster than others.\n"); } /* Slow log. */ @@ -416,7 +416,7 @@ sds createLatencyReport(void) { if (advise_scheduler) { report = sdscat(report,"- The system is slow to execute Redis code paths not containing system calls. This usually means the system does not provide Redis CPU time to run for long periods. You should try to:\n" " 1) Lower the system load.\n" - " 2) Use a computer / VM just for Redis if you are running other softawre in the same system.\n" + " 2) Use a computer / VM just for Redis if you are running other software in the same system.\n" " 3) Check if you have a \"noisy neighbour\" problem.\n" " 4) Check with 'redis-cli --intrinsic-latency 100' what is the intrinsic latency in your system.\n" " 5) Check if the problem is allocator-related by recompiling Redis with MALLOC=libc, if you are using Jemalloc. However this may create fragmentation problems.\n"); @@ -432,7 +432,7 @@ sds createLatencyReport(void) { } if (advise_data_writeback) { - report = sdscat(report,"- Mounting ext3/4 filesystems with data=writeback can provide a performance boost compared to data=ordered, however this mode of operation provides less guarantees, and sometimes it can happen that after a hard crash the AOF file will have an half-written command at the end and will require to be repaired before Redis restarts.\n"); + report = sdscat(report,"- Mounting ext3/4 filesystems with data=writeback can provide a performance boost compared to data=ordered, however this mode of operation provides less guarantees, and sometimes it can happen that after a hard crash the AOF file will have a half-written command at the end and will require to be repaired before Redis restarts.\n"); } if (advise_disk_contention) { diff --git a/src/lazyfree.c b/src/lazyfree.c index cbcc1c240..821dc50df 100644 --- a/src/lazyfree.c +++ b/src/lazyfree.c @@ -15,7 +15,7 @@ size_t lazyfreeGetPendingObjectsCount(void) { /* Return the amount of work needed in order to free an object. * The return value is not always the actual number of allocations the - * object is compoesd of, but a number proportional to it. + * object is composed of, but a number proportional to it. * * For strings the function always returns 1. * @@ -137,7 +137,7 @@ void emptyDbAsync(redisDb *db) { } /* Empty the slots-keys map of Redis CLuster by creating a new empty one - * and scheduiling the old for lazy freeing. */ + * and scheduling the old for lazy freeing. */ void slotToKeyFlushAsync(void) { rax *old = server.cluster->slots_to_keys; @@ -156,7 +156,7 @@ void lazyfreeFreeObjectFromBioThread(robj *o) { } /* Release a database from the lazyfree thread. The 'db' pointer is the - * database which was substitutied with a fresh one in the main thread + * database which was substituted with a fresh one in the main thread * when the database was logically deleted. 'sl' is a skiplist used by * Redis Cluster in order to take the hash slots -> keys mapping. This * may be NULL if Redis Cluster is disabled. */ diff --git a/src/listpack.c b/src/listpack.c index 9e77ab12d..075552ccb 100644 --- a/src/listpack.c +++ b/src/listpack.c @@ -405,7 +405,7 @@ unsigned char *lpNext(unsigned char *lp, unsigned char *p) { } /* If 'p' points to an element of the listpack, calling lpPrev() will return - * the pointer to the preivous element (the one on the left), or NULL if 'p' + * the pointer to the previous element (the one on the left), or NULL if 'p' * already pointed to the first element of the listpack. */ unsigned char *lpPrev(unsigned char *lp, unsigned char *p) { if (p-lp == LP_HDR_SIZE) return NULL; diff --git a/src/lolwut.c b/src/lolwut.c index 0e1552ba0..eebd5da6a 100644 --- a/src/lolwut.c +++ b/src/lolwut.c @@ -85,7 +85,7 @@ void lolwutCommand(client *c) { } /* ========================== LOLWUT Canvase =============================== - * Many LOWUT versions will likely print some computer art to the screen. + * Many LOLWUT versions will likely print some computer art to the screen. * This is the case with LOLWUT 5 and LOLWUT 6, so here there is a generic * canvas implementation that can be reused. */ @@ -106,7 +106,7 @@ void lwFreeCanvas(lwCanvas *canvas) { } /* Set a pixel to the specified color. Color is 0 or 1, where zero means no - * dot will be displyed, and 1 means dot will be displayed. + * dot will be displayed, and 1 means dot will be displayed. * Coordinates are arranged so that left-top corner is 0,0. You can write * out of the size of the canvas without issues. */ void lwDrawPixel(lwCanvas *canvas, int x, int y, int color) { diff --git a/src/lolwut5.c b/src/lolwut5.c index 5a9348800..d64e0bb27 100644 --- a/src/lolwut5.c +++ b/src/lolwut5.c @@ -156,7 +156,7 @@ void lolwut5Command(client *c) { return; /* Limits. We want LOLWUT to be always reasonably fast and cheap to execute - * so we have maximum number of columns, rows, and output resulution. */ + * so we have maximum number of columns, rows, and output resolution. */ if (cols < 1) cols = 1; if (cols > 1000) cols = 1000; if (squares_per_row < 1) squares_per_row = 1; diff --git a/src/lzfP.h b/src/lzfP.h index 93c27b42d..78c858fad 100644 --- a/src/lzfP.h +++ b/src/lzfP.h @@ -127,7 +127,7 @@ /* * Whether to store pointers or offsets inside the hash table. On - * 64 bit architetcures, pointers take up twice as much space, + * 64 bit architectures, pointers take up twice as much space, * and might also be slower. Default is to autodetect. */ /*#define LZF_USER_OFFSETS autodetect */ diff --git a/src/module.c b/src/module.c index f293d6a6c..bd75c8f92 100644 --- a/src/module.c +++ b/src/module.c @@ -46,7 +46,7 @@ typedef struct RedisModuleInfoCtx { sds info; /* info string we collected so far */ int sections; /* number of sections we collected so far */ int in_section; /* indication if we're in an active section or not */ - int in_dict_field; /* indication that we're curreintly appending to a dict */ + int in_dict_field; /* indication that we're currently appending to a dict */ } RedisModuleInfoCtx; typedef void (*RedisModuleInfoFunc)(RedisModuleInfoCtx *ctx, int for_crash_report); @@ -906,10 +906,21 @@ int RM_SignalModifiedKey(RedisModuleCtx *ctx, RedisModuleString *keyname) { * Automatic memory management for modules * -------------------------------------------------------------------------- */ -/* Enable automatic memory management. See API.md for more information. +/* Enable automatic memory management. * * The function must be called as the first function of a command implementation - * that wants to use automatic memory. */ + * that wants to use automatic memory. + * + * When enabled, automatic memory management tracks and automatically frees + * keys, call replies and Redis string objects once the command returns. In most + * cases this eliminates the need of calling the following functions: + * + * 1) RedisModule_CloseKey() + * 2) RedisModule_FreeCallReply() + * 3) RedisModule_FreeString() + * + * These functions can still be used with automatic memory management enabled, + * to optimize loops that make numerous allocations for example. */ void RM_AutoMemory(RedisModuleCtx *ctx) { ctx->flags |= REDISMODULE_CTX_AUTO_MEMORY; } @@ -1045,7 +1056,7 @@ RedisModuleString *RM_CreateStringFromLongLong(RedisModuleCtx *ctx, long long ll } /* Like RedisModule_CreatString(), but creates a string starting from a double - * integer instead of taking a buffer and its length. + * instead of taking a buffer and its length. * * The returned string must be released with RedisModule_FreeString() or by * enabling automatic memory management. */ @@ -1922,7 +1933,7 @@ int RM_GetContextFlags(RedisModuleCtx *ctx) { flags |= REDISMODULE_CTX_FLAGS_LUA; if (ctx->client->flags & CLIENT_MULTI) flags |= REDISMODULE_CTX_FLAGS_MULTI; - /* Module command recieved from MASTER, is replicated. */ + /* Module command received from MASTER, is replicated. */ if (ctx->client->flags & CLIENT_MASTER) flags |= REDISMODULE_CTX_FLAGS_REPLICATED; } @@ -2921,7 +2932,7 @@ int RM_HashSet(RedisModuleKey *key, int flags, ...) { /* Get fields from an hash value. This function is called using a variable * number of arguments, alternating a field name (as a StringRedisModule * pointer) with a pointer to a StringRedisModule pointer, that is set to the - * value of the field if the field exist, or NULL if the field did not exist. + * value of the field if the field exists, or NULL if the field does not exist. * At the end of the field/value-ptr pairs, NULL must be specified as last * argument to signal the end of the arguments in the variadic function. * @@ -3040,7 +3051,7 @@ void moduleParseCallReply_SimpleString(RedisModuleCallReply *reply); void moduleParseCallReply_Array(RedisModuleCallReply *reply); /* Do nothing if REDISMODULE_REPLYFLAG_TOPARSE is false, otherwise - * use the protcol of the reply in reply->proto in order to fill the + * use the protocol of the reply in reply->proto in order to fill the * reply with parsed data according to the reply type. */ void moduleParseCallReply(RedisModuleCallReply *reply) { if (!(reply->flags & REDISMODULE_REPLYFLAG_TOPARSE)) return; @@ -3599,7 +3610,7 @@ void moduleTypeNameByID(char *name, uint64_t moduleid) { /* Register a new data type exported by the module. The parameters are the * following. Please for in depth documentation check the modules API - * documentation, especially the TYPES.md file. + * documentation, especially https://redis.io/topics/modules-native-types. * * * **name**: A 9 characters data type name that MUST be unique in the Redis * Modules ecosystem. Be creative... and there will be no collisions. Use @@ -3646,7 +3657,7 @@ void moduleTypeNameByID(char *name, uint64_t moduleid) { * * **aux_load**: A callback function pointer that loads out of keyspace data from RDB files. * Similar to aux_save, returns REDISMODULE_OK on success, and ERR otherwise. * - * The **digest* and **mem_usage** methods should currently be omitted since + * The **digest** and **mem_usage** methods should currently be omitted since * they are not yet implemented inside the Redis modules core. * * Note: the module name "AAAAAAAAA" is reserved and produces an error, it @@ -3656,7 +3667,7 @@ void moduleTypeNameByID(char *name, uint64_t moduleid) { * and if the module name or encver is invalid, NULL is returned. * Otherwise the new type is registered into Redis, and a reference of * type RedisModuleType is returned: the caller of the function should store - * this reference into a gobal variable to make future use of it in the + * this reference into a global variable to make future use of it in the * modules type API, since a single module may register multiple types. * Example code fragment: * @@ -3738,7 +3749,7 @@ moduleType *RM_ModuleTypeGetType(RedisModuleKey *key) { /* Assuming RedisModule_KeyType() returned REDISMODULE_KEYTYPE_MODULE on * the key, returns the module type low-level value stored at key, as - * it was set by the user via RedisModule_ModuleTypeSet(). + * it was set by the user via RedisModule_ModuleTypeSetValue(). * * If the key is NULL, is not associated with a module type, or is empty, * then NULL is returned instead. */ @@ -3795,7 +3806,7 @@ int moduleAllDatatypesHandleErrors() { /* Returns true if any previous IO API failed. * for Load* APIs the REDISMODULE_OPTIONS_HANDLE_IO_ERRORS flag must be set with - * RediModule_SetModuleOptions first. */ + * RedisModule_SetModuleOptions first. */ int RM_IsIOError(RedisModuleIO *io) { return io->error; } @@ -3928,7 +3939,7 @@ RedisModuleString *RM_LoadString(RedisModuleIO *io) { * * The size of the string is stored at '*lenptr' if not NULL. * The returned string is not automatically NULL terminated, it is loaded - * exactly as it was stored inisde the RDB file. */ + * exactly as it was stored inside the RDB file. */ char *RM_LoadStringBuffer(RedisModuleIO *io, size_t *lenptr) { return moduleLoadString(io,1,lenptr); } @@ -4517,14 +4528,14 @@ int moduleTryServeClientBlockedOnKey(client *c, robj *key) { * * The callbacks are called in the following contexts: * - * reply_callback: called after a successful RedisModule_UnblockClient() - * call in order to reply to the client and unblock it. + * reply_callback: called after a successful RedisModule_UnblockClient() + * call in order to reply to the client and unblock it. * - * reply_timeout: called when the timeout is reached in order to send an - * error to the client. + * timeout_callback: called when the timeout is reached in order to send an + * error to the client. * - * free_privdata: called in order to free the private data that is passed - * by RedisModule_UnblockClient() call. + * free_privdata: called in order to free the private data that is passed + * by RedisModule_UnblockClient() call. * * Note: RedisModule_UnblockClient should be called for every blocked client, * even if client was killed, timed-out or disconnected. Failing to do so @@ -4547,13 +4558,13 @@ RedisModuleBlockedClient *RM_BlockClient(RedisModuleCtx *ctx, RedisModuleCmdFunc * once certain keys become "ready", that is, contain more data. * * Basically this is similar to what a typical Redis command usually does, - * like BLPOP or ZPOPMAX: the client blocks if it cannot be served ASAP, + * like BLPOP or BZPOPMAX: the client blocks if it cannot be served ASAP, * and later when the key receives new data (a list push for instance), the * client is unblocked and served. * * However in the case of this module API, when the client is unblocked? * - * 1. If you block ok a key of a type that has blocking operations associated, + * 1. If you block on a key of a type that has blocking operations associated, * like a list, a sorted set, a stream, and so forth, the client may be * unblocked once the relevant key is targeted by an operation that normally * unblocks the native blocking operations for that type. So if we block @@ -4948,7 +4959,7 @@ void moduleReleaseGIL(void) { /* Subscribe to keyspace notifications. This is a low-level version of the * keyspace-notifications API. A module can register callbacks to be notified - * when keyspce events occur. + * when keyspace events occur. * * Notification events are filtered by their type (string events, set events, * etc), and the subscriber callback receives only events that match a specific @@ -5659,7 +5670,7 @@ int RM_AuthenticateClientWithACLUser(RedisModuleCtx *ctx, const char *name, size /* Deauthenticate and close the client. The client resources will not be * be immediately freed, but will be cleaned up in a background job. This is * the recommended way to deauthenicate a client since most clients can't - * handle users becomming deauthenticated. Returns REDISMODULE_ERR when the + * handle users becoming deauthenticated. Returns REDISMODULE_ERR when the * client doesn't exist and REDISMODULE_OK when the operation was successful. * * The client ID is returned from the RM_AuthenticateClientWithUser and @@ -5779,14 +5790,14 @@ int RM_DictDel(RedisModuleDict *d, RedisModuleString *key, void *oldval) { return RM_DictDelC(d,key->ptr,sdslen(key->ptr),oldval); } -/* Return an interator, setup in order to start iterating from the specified +/* Return an iterator, setup in order to start iterating from the specified * key by applying the operator 'op', which is just a string specifying the * comparison operator to use in order to seek the first element. The - * operators avalable are: + * operators available are: * * "^" -- Seek the first (lexicographically smaller) key. * "$" -- Seek the last (lexicographically biffer) key. - * ">" -- Seek the first element greter than the specified key. + * ">" -- Seek the first element greater than the specified key. * ">=" -- Seek the first element greater or equal than the specified key. * "<" -- Seek the first element smaller than the specified key. * "<=" -- Seek the first element smaller or equal than the specified key. @@ -5913,7 +5924,7 @@ RedisModuleString *RM_DictPrev(RedisModuleCtx *ctx, RedisModuleDictIter *di, voi * in the loop, as we iterate elements, we can also check if we are still * on range. * - * The function returne REDISMODULE_ERR if the iterator reached the + * The function return REDISMODULE_ERR if the iterator reached the * end of elements condition as well. */ int RM_DictCompareC(RedisModuleDictIter *di, const char *op, void *key, size_t keylen) { if (raxEOF(&di->ri)) return REDISMODULE_ERR; @@ -6294,7 +6305,7 @@ int RM_ExportSharedAPI(RedisModuleCtx *ctx, const char *apiname, void *func) { * command that requires external APIs: if some API cannot be resolved, the * command should return an error. * - * Here is an exmaple: + * Here is an example: * * int ... myCommandImplementation() { * if (getExternalAPIs() == 0) { @@ -6680,7 +6691,7 @@ void RM_ScanCursorDestroy(RedisModuleScanCursor *cursor) { * RedisModule_ScanCursorDestroy(c); * * It is also possible to use this API from another thread while the lock - * is acquired durring the actuall call to RM_Scan: + * is acquired during the actuall call to RM_Scan: * * RedisModuleCursor *c = RedisModule_ScanCursorCreate(); * RedisModule_ThreadSafeContextLock(ctx); @@ -6694,7 +6705,7 @@ void RM_ScanCursorDestroy(RedisModuleScanCursor *cursor) { * The function will return 1 if there are more elements to scan and * 0 otherwise, possibly setting errno if the call failed. * - * It is also possible to restart and existing cursor using RM_CursorRestart. + * It is also possible to restart an existing cursor using RM_ScanCursorRestart. * * IMPORTANT: This API is very similar to the Redis SCAN command from the * point of view of the guarantees it provides. This means that the API @@ -6708,7 +6719,7 @@ void RM_ScanCursorDestroy(RedisModuleScanCursor *cursor) { * Moreover playing with the Redis keyspace while iterating may have the * effect of returning more duplicates. A safe pattern is to store the keys * names you want to modify elsewhere, and perform the actions on the keys - * later when the iteration is complete. Howerver this can cost a lot of + * later when the iteration is complete. However this can cost a lot of * memory, so it may make sense to just operate on the current key when * possible during the iteration, given that this is safe. */ int RM_Scan(RedisModuleCtx *ctx, RedisModuleScanCursor *cursor, RedisModuleScanCB fn, void *privdata) { @@ -6773,8 +6784,8 @@ static void moduleScanKeyCallback(void *privdata, const dictEntry *de) { * RedisModule_CloseKey(key); * RedisModule_ScanCursorDestroy(c); * - * It is also possible to use this API from another thread while the lock is acquired durring - * the actuall call to RM_Scan, and re-opening the key each time: + * It is also possible to use this API from another thread while the lock is acquired during + * the actuall call to RM_ScanKey, and re-opening the key each time: * RedisModuleCursor *c = RedisModule_ScanCursorCreate(); * RedisModule_ThreadSafeContextLock(ctx); * RedisModuleKey *key = RedisModule_OpenKey(...) @@ -6790,7 +6801,7 @@ static void moduleScanKeyCallback(void *privdata, const dictEntry *de) { * * The function will return 1 if there are more elements to scan and 0 otherwise, * possibly setting errno if the call failed. - * It is also possible to restart and existing cursor using RM_CursorRestart. + * It is also possible to restart an existing cursor using RM_ScanCursorRestart. * * NOTE: Certain operations are unsafe while iterating the object. For instance * while the API guarantees to return at least one time all the elements that @@ -6943,7 +6954,7 @@ int TerminateModuleForkChild(int child_pid, int wait) { } /* Can be used to kill the forked child process from the parent process. - * child_pid whould be the return value of RedisModule_Fork. */ + * child_pid would be the return value of RedisModule_Fork. */ int RM_KillForkChild(int child_pid) { /* Kill module child, wait for child exit. */ if (TerminateModuleForkChild(child_pid,1) == C_OK) @@ -7081,7 +7092,7 @@ void ModuleForkDoneHandler(int exitcode, int bysignal) { * REDISMODULE_SUBEVENT_LOADING_FAILED * * Note that AOF loading may start with an RDB data in case of - * rdb-preamble, in which case you'll only recieve an AOF_START event. + * rdb-preamble, in which case you'll only receive an AOF_START event. * * * RedisModuleEvent_ClientChange @@ -7103,7 +7114,7 @@ void ModuleForkDoneHandler(int exitcode, int bysignal) { * This event is called when the instance (that can be both a * master or a replica) get a new online replica, or lose a * replica since it gets disconnected. - * The following sub events are availble: + * The following sub events are available: * * REDISMODULE_SUBEVENT_REPLICA_CHANGE_ONLINE * REDISMODULE_SUBEVENT_REPLICA_CHANGE_OFFLINE @@ -7141,7 +7152,7 @@ void ModuleForkDoneHandler(int exitcode, int bysignal) { * RedisModuleEvent_ModuleChange * * This event is called when a new module is loaded or one is unloaded. - * The following sub events are availble: + * The following sub events are available: * * REDISMODULE_SUBEVENT_MODULE_LOADED * REDISMODULE_SUBEVENT_MODULE_UNLOADED @@ -7168,7 +7179,7 @@ void ModuleForkDoneHandler(int exitcode, int bysignal) { * int32_t progress; // Approximate progress between 0 and 1024, * or -1 if unknown. * - * The function returns REDISMODULE_OK if the module was successfully subscrived + * The function returns REDISMODULE_OK if the module was successfully subscribed * for the specified event. If the API is called from a wrong context then * REDISMODULE_ERR is returned. */ int RM_SubscribeToServerEvent(RedisModuleCtx *ctx, RedisModuleEvent event, RedisModuleEventCallback callback) { @@ -7364,7 +7375,7 @@ void moduleInitModulesSystem(void) { server.loadmodule_queue = listCreate(); modules = dictCreate(&modulesDictType,NULL); - /* Set up the keyspace notification susbscriber list and static client */ + /* Set up the keyspace notification subscriber list and static client */ moduleKeyspaceSubscribers = listCreate(); moduleFreeContextReusedClient = createClient(NULL); moduleFreeContextReusedClient->flags |= CLIENT_MODULE; @@ -7728,7 +7739,7 @@ size_t moduleCount(void) { return dictSize(modules); } -/* Set the key last access time for LRU based eviction. not relevent if the +/* Set the key last access time for LRU based eviction. not relevant if the * servers's maxmemory policy is LFU based. Value is idle time in milliseconds. * returns REDISMODULE_OK if the LRU was updated, REDISMODULE_ERR otherwise. */ int RM_SetLRU(RedisModuleKey *key, mstime_t lru_idle) { diff --git a/src/modules/hellodict.c b/src/modules/hellodict.c index 651615b03..1428a1381 100644 --- a/src/modules/hellodict.c +++ b/src/modules/hellodict.c @@ -125,7 +125,7 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) cmd_KEYRANGE,"readonly",1,1,0) == REDISMODULE_ERR) return REDISMODULE_ERR; - /* Create our global dictionray. Here we'll set our keys and values. */ + /* Create our global dictionary. Here we'll set our keys and values. */ Keyspace = RedisModule_CreateDict(NULL); return REDISMODULE_OK; diff --git a/src/modules/helloworld.c b/src/modules/helloworld.c index 3b00dea77..043f5be32 100644 --- a/src/modules/helloworld.c +++ b/src/modules/helloworld.c @@ -91,7 +91,7 @@ int HelloPushCall_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, in } /* HELLO.PUSH.CALL2 - * This is exaxctly as HELLO.PUSH.CALL, but shows how we can reply to the + * This is exactly as HELLO.PUSH.CALL, but shows how we can reply to the * client using directly a reply object that Call() returned. */ int HelloPushCall2_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { @@ -345,7 +345,7 @@ int HelloToggleCase_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, /* HELLO.MORE.EXPIRE key milliseconds. * - * If they key has already an associated TTL, extends it by "milliseconds" + * If the key has already an associated TTL, extends it by "milliseconds" * milliseconds. Otherwise no operation is performed. */ int HelloMoreExpire_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { RedisModule_AutoMemory(ctx); /* Use automatic memory management. */ diff --git a/src/multi.c b/src/multi.c index a99c308be..3ce6d60ec 100644 --- a/src/multi.c +++ b/src/multi.c @@ -87,7 +87,7 @@ void discardTransaction(client *c) { unwatchAllKeys(c); } -/* Flag the transacation as DIRTY_EXEC so that EXEC will fail. +/* Flag the transaction as DIRTY_EXEC so that EXEC will fail. * Should be called every time there is an error while queueing a command. */ void flagTransaction(client *c) { if (c->flags & CLIENT_MULTI) diff --git a/src/networking.c b/src/networking.c index 495be0ece..0d290e169 100644 --- a/src/networking.c +++ b/src/networking.c @@ -170,7 +170,7 @@ client *createClient(connection *conn) { return c; } -/* This funciton puts the client in the queue of clients that should write +/* This function puts the client in the queue of clients that should write * their output buffers to the socket. Note that it does not *yet* install * the write handler, to start clients are put in a queue of clients that need * to write, so we try to do that before returning in the event loop (see the @@ -268,7 +268,7 @@ void _addReplyProtoToList(client *c, const char *s, size_t len) { listNode *ln = listLast(c->reply); clientReplyBlock *tail = ln? listNodeValue(ln): NULL; - /* Note that 'tail' may be NULL even if we have a tail node, becuase when + /* Note that 'tail' may be NULL even if we have a tail node, because when * addReplyDeferredLen() is used, it sets a dummy node to NULL just * fo fill it later, when the size of the bulk length is set. */ @@ -1161,7 +1161,7 @@ void freeClient(client *c) { listDelNode(server.clients_to_close,ln); } - /* If it is our master that's beging disconnected we should make sure + /* If it is our master that's being disconnected we should make sure * to cache the state to try a partial resynchronization later. * * Note that before doing this we make sure that the client is not in @@ -1491,7 +1491,7 @@ void resetClient(client *c) { } } -/* This funciton is used when we want to re-enter the event loop but there +/* This function is used when we want to re-enter the event loop but there * is the risk that the client we are dealing with will be freed in some * way. This happens for instance in: * @@ -2050,7 +2050,7 @@ char *getClientPeerId(client *c) { return c->peerid; } -/* Concatenate a string representing the state of a client in an human +/* Concatenate a string representing the state of a client in a human * readable format, into the sds string 's'. */ sds catClientInfoString(sds s, client *client) { char flags[16], events[3], conninfo[CONN_INFO_LEN], *p; @@ -3050,7 +3050,7 @@ void stopThreadedIO(void) { * we need to handle in parallel, however the I/O threading is disabled * globally for reads as well if we have too little pending clients. * - * The function returns 0 if the I/O threading should be used becuase there + * The function returns 0 if the I/O threading should be used because there * are enough active threads, otherwise 1 is returned and the I/O threads * could be possibly stopped (if already active) as a side effect. */ int stopThreadedIOIfNeeded(void) { diff --git a/src/notify.c b/src/notify.c index bb1055724..5c7634bce 100644 --- a/src/notify.c +++ b/src/notify.c @@ -62,7 +62,7 @@ int keyspaceEventsStringToFlags(char *classes) { return flags; } -/* This function does exactly the revese of the function above: it gets +/* This function does exactly the reverse of the function above: it gets * as input an integer with the xored flags and returns a string representing * the selected classes. The string returned is an sds string that needs to * be released with sdsfree(). */ diff --git a/src/object.c b/src/object.c index 1bc400e85..f8775ea97 100644 --- a/src/object.c +++ b/src/object.c @@ -126,7 +126,7 @@ robj *createStringObject(const char *ptr, size_t len) { /* Create a string object from a long long value. When possible returns a * shared integer object, or at least an integer encoded one. * - * If valueobj is non zero, the function avoids returning a a shared + * If valueobj is non zero, the function avoids returning a shared * integer, because the object is going to be used as value in the Redis key * space (for instance when the INCR command is used), so we want LFU/LRU * values specific for each key. */ @@ -1223,7 +1223,7 @@ robj *objectCommandLookupOrReply(client *c, robj *key, robj *reply) { return o; } -/* Object command allows to inspect the internals of an Redis Object. +/* Object command allows to inspect the internals of a Redis Object. * Usage: OBJECT */ void objectCommand(client *c) { robj *o; diff --git a/src/quicklist.h b/src/quicklist.h index 8b553c119..fd9878af0 100644 --- a/src/quicklist.h +++ b/src/quicklist.h @@ -40,7 +40,7 @@ * count: 16 bits, max 65536 (max zl bytes is 65k, so max count actually < 32k). * encoding: 2 bits, RAW=1, LZF=2. * container: 2 bits, NONE=1, ZIPLIST=2. - * recompress: 1 bit, bool, true if node is temporarry decompressed for usage. + * recompress: 1 bit, bool, true if node is temporary decompressed for usage. * attempted_compress: 1 bit, boolean, used for verifying during testing. * extra: 10 bits, free for future use; pads out the remainder of 32 bits */ typedef struct quicklistNode { @@ -97,7 +97,7 @@ typedef struct quicklistBookmark { /* quicklist is a 40 byte struct (on 64-bit systems) describing a quicklist. * 'count' is the number of total entries. * 'len' is the number of quicklist nodes. - * 'compress' is: -1 if compression disabled, otherwise it's the number + * 'compress' is: 0 if compression disabled, otherwise it's the number * of quicklistNodes to leave uncompressed at ends of quicklist. * 'fill' is the user-requested (or default) fill factor. * 'bookmakrs are an optional feature that is used by realloc this struct, diff --git a/src/rax.c b/src/rax.c index c8a1fb6b4..5768071c0 100644 --- a/src/rax.c +++ b/src/rax.c @@ -628,7 +628,7 @@ int raxGenericInsert(rax *rax, unsigned char *s, size_t len, void *data, void ** * * 3b. IF $SPLITPOS != 0: * Trim the compressed node (reallocating it as well) in order to - * contain $splitpos characters. Change chilid pointer in order to link + * contain $splitpos characters. Change child pointer in order to link * to the split node. If new compressed node len is just 1, set * iscompr to 0 (layout is the same). Fix parent's reference. * @@ -1082,7 +1082,7 @@ int raxRemove(rax *rax, unsigned char *s, size_t len, void **old) { } } else if (h->size == 1) { /* If the node had just one child, after the removal of the key - * further compression with adjacent nodes is pontentially possible. */ + * further compression with adjacent nodes is potentially possible. */ trycompress = 1; } @@ -1329,7 +1329,7 @@ int raxIteratorNextStep(raxIterator *it, int noup) { if (!noup && children) { debugf("GO DEEPER\n"); /* Seek the lexicographically smaller key in this subtree, which - * is the first one found always going torwards the first child + * is the first one found always going towards the first child * of every successive node. */ if (!raxStackPush(&it->stack,it->node)) return 0; raxNode **cp = raxNodeFirstChildPtr(it->node); @@ -1348,7 +1348,7 @@ int raxIteratorNextStep(raxIterator *it, int noup) { return 1; } } else { - /* If we finished exporing the previous sub-tree, switch to the + /* If we finished exploring the previous sub-tree, switch to the * new one: go upper until a node is found where there are * children representing keys lexicographically greater than the * current key. */ @@ -1510,7 +1510,7 @@ int raxIteratorPrevStep(raxIterator *it, int noup) { int raxSeek(raxIterator *it, const char *op, unsigned char *ele, size_t len) { int eq = 0, lt = 0, gt = 0, first = 0, last = 0; - it->stack.items = 0; /* Just resetting. Intialized by raxStart(). */ + it->stack.items = 0; /* Just resetting. Initialized by raxStart(). */ it->flags |= RAX_ITER_JUST_SEEKED; it->flags &= ~RAX_ITER_EOF; it->key_len = 0; @@ -1731,7 +1731,7 @@ int raxPrev(raxIterator *it) { * tree, expect a disappointing distribution. A random walk produces good * random elements if the tree is not sparse, however in the case of a radix * tree certain keys will be reported much more often than others. At least - * this function should be able to expore every possible element eventually. */ + * this function should be able to explore every possible element eventually. */ int raxRandomWalk(raxIterator *it, size_t steps) { if (it->rt->numele == 0) { it->flags |= RAX_ITER_EOF; @@ -1825,7 +1825,7 @@ uint64_t raxSize(rax *rax) { /* ----------------------------- Introspection ------------------------------ */ /* This function is mostly used for debugging and learning purposes. - * It shows an ASCII representation of a tree on standard output, outling + * It shows an ASCII representation of a tree on standard output, outline * all the nodes and the contained keys. * * The representation is as follow: @@ -1835,7 +1835,7 @@ uint64_t raxSize(rax *rax) { * [abc]=0x12345678 (node is a key, pointing to value 0x12345678) * [] (a normal empty node) * - * Children are represented in new idented lines, each children prefixed by + * Children are represented in new indented lines, each children prefixed by * the "`-(x)" string, where "x" is the edge byte. * * [abc] diff --git a/src/rax.h b/src/rax.h index f2521d14a..6b1fd4188 100644 --- a/src/rax.h +++ b/src/rax.h @@ -58,7 +58,7 @@ * successive nodes having a single child are "compressed" into the node * itself as a string of characters, each representing a next-level child, * and only the link to the node representing the last character node is - * provided inside the representation. So the above representation is turend + * provided inside the representation. So the above representation is turned * into: * * ["foo"] "" @@ -123,7 +123,7 @@ typedef struct raxNode { * nodes). * * If the node has an associated key (iskey=1) and is not NULL - * (isnull=0), then after the raxNode pointers poiting to the + * (isnull=0), then after the raxNode pointers pointing to the * children, an additional value pointer is present (as you can see * in the representation above as "value-ptr" field). */ diff --git a/src/rdb.c b/src/rdb.c index 54a169cd8..4bcf96038 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -2172,7 +2172,7 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { } else if (type == RDB_OPCODE_AUX) { /* AUX: generic string-string fields. Use to add state to RDB * which is backward compatible. Implementations of RDB loading - * are requierd to skip AUX fields they don't understand. + * are required to skip AUX fields they don't understand. * * An AUX field is composed of two strings: key and value. */ robj *auxkey, *auxval; @@ -2421,7 +2421,7 @@ void backgroundSaveDoneHandlerDisk(int exitcode, int bysignal) { latencyEndMonitor(latency); latencyAddSampleIfNeeded("rdb-unlink-temp-file",latency); /* SIGUSR1 is whitelisted, so we have a way to kill a child without - * tirggering an error condition. */ + * triggering an error condition. */ if (bysignal != SIGUSR1) server.lastbgsave_status = C_ERR; } diff --git a/src/redis-check-rdb.c b/src/redis-check-rdb.c index 17ec656ce..592feaf42 100644 --- a/src/redis-check-rdb.c +++ b/src/redis-check-rdb.c @@ -331,7 +331,7 @@ err: return 1; } -/* RDB check main: called form redis.c when Redis is executed with the +/* RDB check main: called form server.c when Redis is executed with the * redis-check-rdb alias, on during RDB loading errors. * * The function works in two ways: can be called with argc/argv as a diff --git a/src/redis-cli.c b/src/redis-cli.c index ca949b8f0..2f4609661 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -309,7 +309,7 @@ static void cliRefreshPrompt(void) { /* Return the name of the dotfile for the specified 'dotfilename'. * Normally it just concatenates user $HOME to the file specified - * in 'dotfilename'. However if the environment varialbe 'envoverride' + * in 'dotfilename'. However if the environment variable 'envoverride' * is set, its value is taken as the path. * * The function returns NULL (if the file is /dev/null or cannot be @@ -1713,7 +1713,7 @@ static void usage(void) { " -a Password to use when connecting to the server.\n" " You can also use the " REDIS_CLI_AUTH_ENV " environment\n" " variable to pass this password more safely\n" -" (if both are used, this argument takes predecence).\n" +" (if both are used, this argument takes precedence).\n" " --user Used to send ACL style 'AUTH username pass'. Needs -a.\n" " --pass Alias of -a for consistency with the new --user option.\n" " --askpass Force user to input password with mask from STDIN.\n" @@ -2142,7 +2142,7 @@ static int evalMode(int argc, char **argv) { argv2[2] = sdscatprintf(sdsempty(),"%d",keys); /* Call it */ - int eval_ldb = config.eval_ldb; /* Save it, may be reverteed. */ + int eval_ldb = config.eval_ldb; /* Save it, may be reverted. */ retval = issueCommand(argc+3-got_comma, argv2); if (eval_ldb) { if (!config.eval_ldb) { @@ -6658,13 +6658,13 @@ struct distsamples { * samples greater than the previous one, and is also the stop sentinel. * * "tot' is the total number of samples in the different buckets, so it - * is the SUM(samples[i].conut) for i to 0 up to the max sample. + * is the SUM(samples[i].count) for i to 0 up to the max sample. * * As a side effect the function sets all the buckets count to 0. */ void showLatencyDistSamples(struct distsamples *samples, long long tot) { int j; - /* We convert samples into a index inside the palette + /* We convert samples into an index inside the palette * proportional to the percentage a given bucket represents. * This way intensity of the different parts of the spectrum * don't change relative to the number of requests, which avoids to @@ -7971,7 +7971,7 @@ static void LRUTestMode(void) { * Intrisic latency mode. * * Measure max latency of a running process that does not result from - * syscalls. Basically this software should provide an hint about how much + * syscalls. Basically this software should provide a hint about how much * time the kernel leaves the process without a chance to run. *--------------------------------------------------------------------------- */ diff --git a/src/redismodule.h b/src/redismodule.h index 4bfc14cc7..4a0e5bf15 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -963,4 +963,4 @@ static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int #define RedisModuleString robj #endif /* REDISMODULE_CORE */ -#endif /* REDISMOUDLE_H */ +#endif /* REDISMODULE_H */ diff --git a/src/replication.c b/src/replication.c index 8f4ad2c92..6feb9ab6c 100644 --- a/src/replication.c +++ b/src/replication.c @@ -83,16 +83,16 @@ char *replicationGetSlaveName(client *c) { * the file deletion to the filesystem. This call removes the file in a * background thread instead. We actually just do close() in the thread, * by using the fact that if there is another instance of the same file open, - * the foreground unlink() will not really do anything, and deleting the - * file will only happen once the last reference is lost. */ + * the foreground unlink() will only remove the fs name, and deleting the + * file's storage space will only happen once the last reference is lost. */ int bg_unlink(const char *filename) { int fd = open(filename,O_RDONLY|O_NONBLOCK); if (fd == -1) { /* Can't open the file? Fall back to unlinking in the main thread. */ return unlink(filename); } else { - /* The following unlink() will not do anything since file - * is still open. */ + /* The following unlink() removes the name but doesn't free the + * file contents because a process still has it open. */ int retval = unlink(filename); if (retval == -1) { /* If we got an unlink error, we just return it, closing the @@ -204,7 +204,7 @@ void feedReplicationBacklogWithObject(robj *o) { * as well. This function is used if the instance is a master: we use * the commands received by our clients in order to create the replication * stream. Instead if the instance is a slave and has sub-slaves attached, - * we use replicationFeedSlavesFromMaster() */ + * we use replicationFeedSlavesFromMasterStream() */ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) { listNode *ln; listIter li; @@ -535,7 +535,7 @@ int masterTryPartialResynchronization(client *c) { (strcasecmp(master_replid, server.replid2) || psync_offset > server.second_replid_offset)) { - /* Run id "?" is used by slaves that want to force a full resync. */ + /* Replid "?" is used by slaves that want to force a full resync. */ if (master_replid[0] != '?') { if (strcasecmp(master_replid, server.replid) && strcasecmp(master_replid, server.replid2)) @@ -707,7 +707,7 @@ int startBgsaveForReplication(int mincapa) { return retval; } -/* SYNC and PSYNC command implemenation. */ +/* SYNC and PSYNC command implementation. */ void syncCommand(client *c) { /* ignore SYNC if already slave or in monitor mode */ if (c->flags & CLIENT_SLAVE) return; @@ -1377,7 +1377,7 @@ void replicationEmptyDbCallback(void *privdata) { replicationSendNewlineToMaster(); } -/* Once we have a link with the master and the synchroniziation was +/* Once we have a link with the master and the synchronization was * performed, this function materializes the master client we store * at server.master, starting from the specified file descriptor. */ void replicationCreateMasterClient(connection *conn, int dbid) { @@ -1454,7 +1454,7 @@ redisDb *disklessLoadMakeBackups(void) { * the 'restore' argument (the number of DBs to replace) is non-zero. * * When instead the loading succeeded we want just to free our old backups, - * in that case the funciton will do just that when 'restore' is 0. */ + * in that case the function will do just that when 'restore' is 0. */ void disklessLoadRestoreBackups(redisDb *backup, int restore, int empty_db_flags) { if (restore) { @@ -1488,7 +1488,7 @@ void readSyncBulkPayload(connection *conn) { off_t left; /* Static vars used to hold the EOF mark, and the last bytes received - * form the server: when they match, we reached the end of the transfer. */ + * from the server: when they match, we reached the end of the transfer. */ static char eofmark[CONFIG_RUN_ID_SIZE]; static char lastbytes[CONFIG_RUN_ID_SIZE]; static int usemark = 0; @@ -1805,7 +1805,7 @@ void readSyncBulkPayload(connection *conn) { REDISMODULE_SUBEVENT_MASTER_LINK_UP, NULL); - /* After a full resynchroniziation we use the replication ID and + /* After a full resynchronization we use the replication ID and * offset of the master. The secondary ID / offset are cleared since * we are starting a new history. */ memcpy(server.replid,server.master->replid,sizeof(server.replid)); @@ -1901,7 +1901,7 @@ char *sendSynchronousCommand(int flags, connection *conn, ...) { /* Try a partial resynchronization with the master if we are about to reconnect. * If there is no cached master structure, at least try to issue a * "PSYNC ? -1" command in order to trigger a full resync using the PSYNC - * command in order to obtain the master run id and the master replication + * command in order to obtain the master replid and the master replication * global offset. * * This function is designed to be called from syncWithMaster(), so the @@ -1929,7 +1929,7 @@ char *sendSynchronousCommand(int flags, connection *conn, ...) { * * PSYNC_CONTINUE: If the PSYNC command succeeded and we can continue. * PSYNC_FULLRESYNC: If PSYNC is supported but a full resync is needed. - * In this case the master run_id and global replication + * In this case the master replid and global replication * offset is saved. * PSYNC_NOT_SUPPORTED: If the server does not understand PSYNC at all and * the caller should fall back to SYNC. @@ -1960,7 +1960,7 @@ int slaveTryPartialResynchronization(connection *conn, int read_reply) { /* Writing half */ if (!read_reply) { /* Initially set master_initial_offset to -1 to mark the current - * master run_id and offset as not valid. Later if we'll be able to do + * master replid and offset as not valid. Later if we'll be able to do * a FULL resync using the PSYNC command we'll set the offset at the * right value, so that this information will be propagated to the * client structure representing the master into server.master. */ @@ -2001,7 +2001,7 @@ int slaveTryPartialResynchronization(connection *conn, int read_reply) { if (!strncmp(reply,"+FULLRESYNC",11)) { char *replid = NULL, *offset = NULL; - /* FULL RESYNC, parse the reply in order to extract the run id + /* FULL RESYNC, parse the reply in order to extract the replid * and the replication offset. */ replid = strchr(reply,' '); if (replid) { @@ -2293,7 +2293,7 @@ void syncWithMaster(connection *conn) { /* Try a partial resynchonization. If we don't have a cached master * slaveTryPartialResynchronization() will at least try to use PSYNC - * to start a full resynchronization so that we get the master run id + * to start a full resynchronization so that we get the master replid * and the global offset, to try a partial resync at the next * reconnection attempt. */ if (server.repl_state == REPL_STATE_SEND_PSYNC) { @@ -2455,7 +2455,7 @@ void replicationAbortSyncTransfer(void) { * If there was a replication handshake in progress 1 is returned and * the replication state (server.repl_state) set to REPL_STATE_CONNECT. * - * Otherwise zero is returned and no operation is perforemd at all. */ + * Otherwise zero is returned and no operation is performed at all. */ int cancelReplicationHandshake(void) { if (server.repl_state == REPL_STATE_TRANSFER) { replicationAbortSyncTransfer(); @@ -2887,7 +2887,7 @@ void refreshGoodSlavesCount(void) { * * We don't care about taking a different cache for every different slave * since to fill the cache again is not very costly, the goal of this code - * is to avoid that the same big script is trasmitted a big number of times + * is to avoid that the same big script is transmitted a big number of times * per second wasting bandwidth and processor speed, but it is not a problem * if we need to rebuild the cache from scratch from time to time, every used * script will need to be transmitted a single time to reappear in the cache. @@ -2897,7 +2897,7 @@ void refreshGoodSlavesCount(void) { * 1) Every time a new slave connects, we flush the whole script cache. * 2) We only send as EVALSHA what was sent to the master as EVALSHA, without * trying to convert EVAL into EVALSHA specifically for slaves. - * 3) Every time we trasmit a script as EVAL to the slaves, we also add the + * 3) Every time we transmit a script as EVAL to the slaves, we also add the * corresponding SHA1 of the script into the cache as we are sure every * slave knows about the script starting from now. * 4) On SCRIPT FLUSH command, we replicate the command to all the slaves @@ -2988,7 +2988,7 @@ int replicationScriptCacheExists(sds sha1) { /* This just set a flag so that we broadcast a REPLCONF GETACK command * to all the slaves in the beforeSleep() function. Note that this way - * we "group" all the clients that want to wait for synchronouns replication + * we "group" all the clients that want to wait for synchronous replication * in a given event loop iteration, and send a single GETACK for them all. */ void replicationRequestAckFromSlaves(void) { server.get_ack_from_slaves = 1; diff --git a/src/scripting.c b/src/scripting.c index bccbcf637..e43472b3a 100644 --- a/src/scripting.c +++ b/src/scripting.c @@ -69,7 +69,7 @@ struct ldbState { list *children; /* All forked debugging sessions pids. */ int bp[LDB_BREAKPOINTS_MAX]; /* An array of breakpoints line numbers. */ int bpcount; /* Number of valid entries inside bp. */ - int step; /* Stop at next line ragardless of breakpoints. */ + int step; /* Stop at next line regardless of breakpoints. */ int luabp; /* Stop at next line because redis.breakpoint() was called. */ sds *src; /* Lua script source code split by line. */ int lines; /* Number of lines in 'src'. */ @@ -886,7 +886,7 @@ int luaRedisReplicateCommandsCommand(lua_State *lua) { /* redis.breakpoint() * - * Allows to stop execution during a debuggign session from within + * Allows to stop execution during a debugging session from within * the Lua code implementation, like if a breakpoint was set in the code * immediately after the function. */ int luaRedisBreakpointCommand(lua_State *lua) { @@ -1499,7 +1499,7 @@ void evalGenericCommand(client *c, int evalsha) { /* Hash the code if this is an EVAL call */ sha1hex(funcname+2,c->argv[1]->ptr,sdslen(c->argv[1]->ptr)); } else { - /* We already have the SHA if it is a EVALSHA */ + /* We already have the SHA if it is an EVALSHA */ int j; char *sha = c->argv[1]->ptr; @@ -1628,7 +1628,7 @@ void evalGenericCommand(client *c, int evalsha) { * To do so we use a cache of SHA1s of scripts that we already propagated * as full EVAL, that's called the Replication Script Cache. * - * For repliation, everytime a new slave attaches to the master, we need to + * For replication, everytime a new slave attaches to the master, we need to * flush our cache of scripts that can be replicated as EVALSHA, while * for AOF we need to do so every time we rewrite the AOF file. */ if (evalsha && !server.lua_replicate_commands) { @@ -1801,7 +1801,7 @@ void ldbLog(sds entry) { } /* A version of ldbLog() which prevents producing logs greater than - * ldb.maxlen. The first time the limit is reached an hint is generated + * ldb.maxlen. The first time the limit is reached a hint is generated * to inform the user that reply trimming can be disabled using the * debugger "maxlen" command. */ void ldbLogWithMaxLen(sds entry) { @@ -1842,7 +1842,7 @@ void ldbSendLogs(void) { } /* Start a debugging session before calling EVAL implementation. - * The techique we use is to capture the client socket file descriptor, + * The technique we use is to capture the client socket file descriptor, * in order to perform direct I/O with it from within Lua hooks. This * way we don't have to re-enter Redis in order to handle I/O. * @@ -1925,7 +1925,7 @@ void ldbEndSession(client *c) { connNonBlock(ldb.conn); connSendTimeout(ldb.conn,0); - /* Close the client connectin after sending the final EVAL reply + /* Close the client connection after sending the final EVAL reply * in order to signal the end of the debugging session. */ c->flags |= CLIENT_CLOSE_AFTER_REPLY; @@ -2094,7 +2094,7 @@ void ldbLogSourceLine(int lnum) { /* Implement the "list" command of the Lua debugger. If around is 0 * the whole file is listed, otherwise only a small portion of the file * around the specified line is shown. When a line number is specified - * the amonut of context (lines before/after) is specified via the + * the amount of context (lines before/after) is specified via the * 'context' argument. */ void ldbList(int around, int context) { int j; @@ -2105,7 +2105,7 @@ void ldbList(int around, int context) { } } -/* Append an human readable representation of the Lua value at position 'idx' +/* Append a human readable representation of the Lua value at position 'idx' * on the stack of the 'lua' state, to the SDS string passed as argument. * The new SDS string with the represented value attached is returned. * Used in order to implement ldbLogStackValue(). @@ -2349,7 +2349,7 @@ char *ldbRedisProtocolToHuman_Double(sds *o, char *reply) { return p+2; } -/* Log a Redis reply as debugger output, in an human readable format. +/* Log a Redis reply as debugger output, in a human readable format. * If the resulting string is longer than 'len' plus a few more chars * used as prefix, it gets truncated. */ void ldbLogRedisReply(char *reply) { @@ -2533,7 +2533,7 @@ void ldbTrace(lua_State *lua) { } } -/* Impleemnts the debugger "maxlen" command. It just queries or sets the +/* Implements the debugger "maxlen" command. It just queries or sets the * ldb.maxlen variable. */ void ldbMaxlen(sds *argv, int argc) { if (argc == 2) { @@ -2606,8 +2606,8 @@ ldbLog(sdsnew(" mode dataset changes will be retained.")); ldbLog(sdsnew("")); ldbLog(sdsnew("Debugger functions you can call from Lua scripts:")); ldbLog(sdsnew("redis.debug() Produce logs in the debugger console.")); -ldbLog(sdsnew("redis.breakpoint() Stop execution like if there was a breakpoing.")); -ldbLog(sdsnew(" in the next line of code.")); +ldbLog(sdsnew("redis.breakpoint() Stop execution like if there was a breakpoint in the")); +ldbLog(sdsnew(" next line of code.")); ldbSendLogs(); } else if (!strcasecmp(argv[0],"s") || !strcasecmp(argv[0],"step") || !strcasecmp(argv[0],"n") || !strcasecmp(argv[0],"next")) { diff --git a/src/sds.c b/src/sds.c index 118971621..a723a42c3 100644 --- a/src/sds.c +++ b/src/sds.c @@ -405,7 +405,7 @@ sds sdscatlen(sds s, const void *t, size_t len) { return s; } -/* Append the specified null termianted C string to the sds string 's'. +/* Append the specified null terminated C string to the sds string 's'. * * After the call, the passed sds string is no longer valid and all the * references must be substituted with the new pointer returned by the call. */ @@ -453,7 +453,7 @@ int sdsll2str(char *s, long long value) { size_t l; /* Generate the string representation, this method produces - * an reversed string. */ + * a reversed string. */ v = (value < 0) ? -value : value; p = s; do { @@ -484,7 +484,7 @@ int sdsull2str(char *s, unsigned long long v) { size_t l; /* Generate the string representation, this method produces - * an reversed string. */ + * a reversed string. */ p = s; do { *p++ = '0'+(v%10); diff --git a/src/sentinel.c b/src/sentinel.c index bdc339674..1bd82453f 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -131,13 +131,13 @@ typedef struct sentinelAddr { /* The link to a sentinelRedisInstance. When we have the same set of Sentinels * monitoring many masters, we have different instances representing the * same Sentinels, one per master, and we need to share the hiredis connections - * among them. Oherwise if 5 Sentinels are monitoring 100 masters we create + * among them. Otherwise if 5 Sentinels are monitoring 100 masters we create * 500 outgoing connections instead of 5. * * So this structure represents a reference counted link in terms of the two * hiredis connections for commands and Pub/Sub, and the fields needed for * failure detection, since the ping/pong time are now local to the link: if - * the link is available, the instance is avaialbe. This way we don't just + * the link is available, the instance is available. This way we don't just * have 5 connections instead of 500, we also send 5 pings instead of 500. * * Links are shared only for Sentinels: master and slave instances have @@ -986,7 +986,7 @@ instanceLink *createInstanceLink(void) { return link; } -/* Disconnect an hiredis connection in the context of an instance link. */ +/* Disconnect a hiredis connection in the context of an instance link. */ void instanceLinkCloseConnection(instanceLink *link, redisAsyncContext *c) { if (c == NULL) return; @@ -1125,7 +1125,7 @@ int sentinelUpdateSentinelAddressInAllMasters(sentinelRedisInstance *ri) { return reconfigured; } -/* This function is called when an hiredis connection reported an error. +/* This function is called when a hiredis connection reported an error. * We set it to NULL and mark the link as disconnected so that it will be * reconnected again. * @@ -2015,7 +2015,7 @@ void sentinelSendAuthIfNeeded(sentinelRedisInstance *ri, redisAsyncContext *c) { * The connection type is "cmd" or "pubsub" as specified by 'type'. * * This makes it possible to list all the sentinel instances connected - * to a Redis servewr with CLIENT LIST, grepping for a specific name format. */ + * to a Redis server with CLIENT LIST, grepping for a specific name format. */ void sentinelSetClientName(sentinelRedisInstance *ri, redisAsyncContext *c, char *type) { char name[64]; @@ -2470,7 +2470,7 @@ void sentinelPublishReplyCallback(redisAsyncContext *c, void *reply, void *privd ri->last_pub_time = mstime(); } -/* Process an hello message received via Pub/Sub in master or slave instance, +/* Process a hello message received via Pub/Sub in master or slave instance, * or sent directly to this sentinel via the (fake) PUBLISH command of Sentinel. * * If the master name specified in the message is not known, the message is @@ -2607,7 +2607,7 @@ void sentinelReceiveHelloMessages(redisAsyncContext *c, void *reply, void *privd sentinelProcessHelloMessage(r->element[2]->str, r->element[2]->len); } -/* Send an "Hello" message via Pub/Sub to the specified 'ri' Redis +/* Send a "Hello" message via Pub/Sub to the specified 'ri' Redis * instance in order to broadcast the current configuration for this * master, and to advertise the existence of this Sentinel at the same time. * @@ -2661,7 +2661,7 @@ int sentinelSendHello(sentinelRedisInstance *ri) { } /* Reset last_pub_time in all the instances in the specified dictionary - * in order to force the delivery of an Hello update ASAP. */ + * in order to force the delivery of a Hello update ASAP. */ void sentinelForceHelloUpdateDictOfRedisInstances(dict *instances) { dictIterator *di; dictEntry *de; @@ -2675,13 +2675,13 @@ void sentinelForceHelloUpdateDictOfRedisInstances(dict *instances) { dictReleaseIterator(di); } -/* This function forces the delivery of an "Hello" message (see +/* This function forces the delivery of a "Hello" message (see * sentinelSendHello() top comment for further information) to all the Redis * and Sentinel instances related to the specified 'master'. * * It is technically not needed since we send an update to every instance * with a period of SENTINEL_PUBLISH_PERIOD milliseconds, however when a - * Sentinel upgrades a configuration it is a good idea to deliever an update + * Sentinel upgrades a configuration it is a good idea to deliver an update * to the other Sentinels ASAP. */ int sentinelForceHelloUpdateForMaster(sentinelRedisInstance *master) { if (!(master->flags & SRI_MASTER)) return C_ERR; @@ -3082,7 +3082,7 @@ void sentinelCommand(client *c) { * ip and port are the ip and port of the master we want to be * checked by Sentinel. Note that the command will not check by * name but just by master, in theory different Sentinels may monitor - * differnet masters with the same name. + * different masters with the same name. * * current-epoch is needed in order to understand if we are allowed * to vote for a failover leader or not. Each Sentinel can vote just @@ -3995,7 +3995,7 @@ int sentinelSendSlaveOf(sentinelRedisInstance *ri, char *host, int port) { * the following tasks: * 1) Reconfigure the instance according to the specified host/port params. * 2) Rewrite the configuration. - * 3) Disconnect all clients (but this one sending the commnad) in order + * 3) Disconnect all clients (but this one sending the command) in order * to trigger the ask-master-on-reconnection protocol for connected * clients. * @@ -4547,7 +4547,7 @@ void sentinelHandleDictOfRedisInstances(dict *instances) { * difference bigger than SENTINEL_TILT_TRIGGER milliseconds if one of the * following conditions happen: * - * 1) The Sentiel process for some time is blocked, for every kind of + * 1) The Sentinel process for some time is blocked, for every kind of * random reason: the load is huge, the computer was frozen for some time * in I/O or alike, the process was stopped by a signal. Everything. * 2) The system clock was altered significantly. diff --git a/src/server.c b/src/server.c index a7a36df13..cf9dc83ea 100644 --- a/src/server.c +++ b/src/server.c @@ -115,7 +115,7 @@ volatile unsigned long lru_clock; /* Server global current LRU time. */ * write: Write command (may modify the key space). * * read-only: All the non special commands just reading from keys without - * changing the content, or returning other informations like + * changing the content, or returning other information like * the TIME command. Special commands such administrative commands * or transaction related commands (multi, exec, discard, ...) * are not flagged as read-only commands, since they affect the @@ -1280,7 +1280,7 @@ dictType objectKeyHeapPointerValueDictType = { dictVanillaFree /* val destructor */ }; -/* Set dictionary type. Keys are SDS strings, values are ot used. */ +/* Set dictionary type. Keys are SDS strings, values are not used. */ dictType setDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ @@ -1385,9 +1385,8 @@ dictType clusterNodesBlackListDictType = { NULL /* val destructor */ }; -/* Cluster re-addition blacklist. This maps node IDs to the time - * we can re-add this node. The goal is to avoid readding a removed - * node for some time. */ +/* Modules system dictionary type. Keys are module name, + * values are pointer to RedisModule struct. */ dictType modulesDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ @@ -1440,7 +1439,7 @@ void tryResizeHashTables(int dbid) { /* Our hash table implementation performs rehashing incrementally while * we write/read from the hash table. Still if the server is idle, the hash * table will use two tables for a long time. So we try to use 1 millisecond - * of CPU time at every call of this function to perform some rehahsing. + * of CPU time at every call of this function to perform some rehashing. * * The function returns 1 if some rehashing was performed, otherwise 0 * is returned. */ @@ -1462,8 +1461,8 @@ int incrementallyRehash(int dbid) { * as we want to avoid resizing the hash tables when there is a child in order * to play well with copy-on-write (otherwise when a resize happens lots of * memory pages are copied). The goal of this function is to update the ability - * for dict.c to resize the hash tables accordingly to the fact we have o not - * running childs. */ + * for dict.c to resize the hash tables accordingly to the fact we have an + * active fork child running. */ void updateDictResizePolicy(void) { if (!hasActiveChildProcess()) dictEnableResize(); @@ -1613,7 +1612,7 @@ int clientsCronTrackClientsMemUsage(client *c) { mem += sdsAllocSize(c->querybuf); mem += sizeof(client); /* Now that we have the memory used by the client, remove the old - * value from the old categoty, and add it back. */ + * value from the old category, and add it back. */ server.stat_clients_type_memory[c->client_cron_last_memory_type] -= c->client_cron_last_memory_usage; server.stat_clients_type_memory[type] += mem; @@ -2028,7 +2027,7 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { /* AOF write errors: in this case we have a buffer to flush as well and * clear the AOF error in case of success to make the DB writable again, * however to try every second is enough in case of 'hz' is set to - * an higher frequency. */ + * a higher frequency. */ run_with_period(1000) { if (server.aof_last_write_status == C_ERR) flushAppendOnlyFile(0); @@ -2198,7 +2197,7 @@ void beforeSleep(struct aeEventLoop *eventLoop) { if (moduleCount()) moduleReleaseGIL(); } -/* This function is called immadiately after the event loop multiplexing +/* This function is called immediately after the event loop multiplexing * API returned, and the control is going to soon return to Redis by invoking * the different events callbacks. */ void afterSleep(struct aeEventLoop *eventLoop) { @@ -2420,7 +2419,7 @@ void initServerConfig(void) { R_NegInf = -1.0/R_Zero; R_Nan = R_Zero/R_Zero; - /* Command table -- we initiialize it here as it is part of the + /* Command table -- we initialize it here as it is part of the * initial configuration, since command names may be changed via * redis.conf using the rename-command directive. */ server.commands = dictCreate(&commandTableDictType,NULL); @@ -3085,7 +3084,7 @@ int populateCommandTableParseFlags(struct redisCommand *c, char *strflags) { } /* Populates the Redis Command Table starting from the hard coded list - * we have on top of redis.c file. */ + * we have on top of server.c file. */ void populateCommandTable(void) { int j; int numcommands = sizeof(redisCommandTable)/sizeof(struct redisCommand); @@ -3219,12 +3218,12 @@ void propagate(struct redisCommand *cmd, int dbid, robj **argv, int argc, * * 'cmd' must be a pointer to the Redis command to replicate, dbid is the * database ID the command should be propagated into. - * Arguments of the command to propagte are passed as an array of redis + * Arguments of the command to propagate are passed as an array of redis * objects pointers of len 'argc', using the 'argv' vector. * * The function does not take a reference to the passed 'argv' vector, * so it is up to the caller to release the passed argv (but it is usually - * stack allocated). The function autoamtically increments ref count of + * stack allocated). The function automatically increments ref count of * passed objects, so the caller does not need to. */ void alsoPropagate(struct redisCommand *cmd, int dbid, robj **argv, int argc, int target) @@ -3384,7 +3383,7 @@ void call(client *c, int flags) { if (c->flags & CLIENT_FORCE_AOF) propagate_flags |= PROPAGATE_AOF; /* However prevent AOF / replication propagation if the command - * implementations called preventCommandPropagation() or similar, + * implementation called preventCommandPropagation() or similar, * or if we don't have the call() flags to do so. */ if (c->flags & CLIENT_PREVENT_REPL_PROP || !(flags & CMD_CALL_PROPAGATE_REPL)) @@ -3632,7 +3631,7 @@ int processCommand(client *c) { } /* Save out_of_memory result at script start, otherwise if we check OOM - * untill first write within script, memory used by lua stack and + * until first write within script, memory used by lua stack and * arguments might interfere. */ if (c->cmd->proc == evalCommand || c->cmd->proc == evalShaCommand) { server.lua_oom = out_of_memory; @@ -3870,7 +3869,7 @@ int prepareForShutdown(int flags) { /*================================== Commands =============================== */ -/* Sometimes Redis cannot accept write commands because there is a perstence +/* Sometimes Redis cannot accept write commands because there is a persistence * error with the RDB or AOF file, and Redis is configured in order to stop * accepting writes in such situation. This function returns if such a * condition is active, and the type of the condition. diff --git a/src/server.h b/src/server.h index 980011938..6a130879e 100644 --- a/src/server.h +++ b/src/server.h @@ -161,7 +161,7 @@ extern int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT]; /* Hash table parameters */ #define HASHTABLE_MIN_FILL 10 /* Minimal hash table fill 10% */ -/* Command flags. Please check the command table defined in the redis.c file +/* Command flags. Please check the command table defined in the server.c file * for more information about the meaning of every flag. */ #define CMD_WRITE (1ULL<<0) /* "write" flag */ #define CMD_READONLY (1ULL<<1) /* "read-only" flag */ @@ -827,7 +827,7 @@ typedef struct client { copying this slave output buffer should use. */ char replid[CONFIG_RUN_ID_SIZE+1]; /* Master replication ID (if master). */ - int slave_listening_port; /* As configured with: SLAVECONF listening-port */ + int slave_listening_port; /* As configured with: REPLCONF listening-port */ char slave_ip[NET_IP_STR_LEN]; /* Optionally given by REPLCONF ip-address */ int slave_capa; /* Slave capabilities: SLAVE_CAPA_* bitwise OR. */ multiState mstate; /* MULTI/EXEC state */ @@ -939,7 +939,7 @@ typedef struct redisOp { } redisOp; /* Defines an array of Redis operations. There is an API to add to this - * structure in a easy way. + * structure in an easy way. * * redisOpArrayInit(); * redisOpArrayAppend(); @@ -1349,7 +1349,7 @@ struct redisServer { unsigned int maxclients; /* Max number of simultaneous clients */ unsigned long long maxmemory; /* Max number of memory bytes to use */ int maxmemory_policy; /* Policy for key eviction */ - int maxmemory_samples; /* Pricision of random sampling */ + int maxmemory_samples; /* Precision of random sampling */ int lfu_log_factor; /* LFU logarithmic counter factor. */ int lfu_decay_time; /* LFU counter decay factor. */ long long proto_max_bulk_len; /* Protocol bulk length maximum size. */ @@ -1429,7 +1429,7 @@ struct redisServer { int lua_random_dirty; /* True if a random command was called during the execution of the current script. */ int lua_replicate_commands; /* True if we are doing single commands repl. */ - int lua_multi_emitted;/* True if we already proagated MULTI. */ + int lua_multi_emitted;/* True if we already propagated MULTI. */ int lua_repl; /* Script replication flags for redis.set_repl(). */ int lua_timedout; /* True if we reached the time limit for script execution. */ @@ -1935,7 +1935,7 @@ void addACLLogEntry(client *c, int reason, int keypos, sds username); /* Flags only used by the ZADD command but not by zsetAdd() API: */ #define ZADD_CH (1<<16) /* Return num of elements added or updated. */ -/* Struct to hold a inclusive/exclusive range spec by score comparison. */ +/* Struct to hold an inclusive/exclusive range spec by score comparison. */ typedef struct { double min, max; int minex, maxex; /* are min or max exclusive? */ diff --git a/src/siphash.c b/src/siphash.c index 357741132..30c15c04e 100644 --- a/src/siphash.c +++ b/src/siphash.c @@ -22,7 +22,7 @@ 1. We use SipHash 1-2. This is not believed to be as strong as the suggested 2-4 variant, but AFAIK there are not trivial attacks against this reduced-rounds version, and it runs at the same speed - as Murmurhash2 that we used previously, why the 2-4 variant slowed + as Murmurhash2 that we used previously, while the 2-4 variant slowed down Redis by a 4% figure more or less. 2. Hard-code rounds in the hope the compiler can optimize it more in this raw from. Anyway we always want the standard 2-4 variant. @@ -36,7 +36,7 @@ perform a text transformation in some temporary buffer, which is costly. 5. Remove debugging code. 6. Modified the original test.c file to be a stand-alone function testing - the function in the new form (returing an uint64_t) using just the + the function in the new form (returning an uint64_t) using just the relevant test vector. */ #include @@ -46,7 +46,7 @@ #include /* Fast tolower() alike function that does not care about locale - * but just returns a-z insetad of A-Z. */ + * but just returns a-z instead of A-Z. */ int siptlw(int c) { if (c >= 'A' && c <= 'Z') { return c+('a'-'A'); diff --git a/src/slowlog.c b/src/slowlog.c index 1d715e39b..408456b14 100644 --- a/src/slowlog.c +++ b/src/slowlog.c @@ -75,7 +75,7 @@ slowlogEntry *slowlogCreateEntry(client *c, robj **argv, int argc, long long dur } else if (argv[j]->refcount == OBJ_SHARED_REFCOUNT) { se->argv[j] = argv[j]; } else { - /* Here we need to dupliacate the string objects composing the + /* Here we need to duplicate the string objects composing the * argument vector of the command, because those may otherwise * end shared with string objects stored into keys. Having * shared objects between any part of Redis, and the data diff --git a/src/sort.c b/src/sort.c index f269a7731..aeef53e6a 100644 --- a/src/sort.c +++ b/src/sort.c @@ -115,7 +115,7 @@ robj *lookupKeyByPattern(redisDb *db, robj *pattern, robj *subst, int writeflag) if (fieldobj) { if (o->type != OBJ_HASH) goto noobj; - /* Retrieve value from hash by the field name. The returend object + /* Retrieve value from hash by the field name. The returned object * is a new object with refcount already incremented. */ o = hashTypeGetValueObject(o, fieldobj->ptr); } else { diff --git a/src/sparkline.c b/src/sparkline.c index 0a986883d..67482c774 100644 --- a/src/sparkline.c +++ b/src/sparkline.c @@ -92,7 +92,7 @@ void freeSparklineSequence(struct sequence *seq) { * ------------------------------------------------------------------------- */ /* Render part of a sequence, so that render_sequence() call call this function - * with differnent parts in order to create the full output without overflowing + * with different parts in order to create the full output without overflowing * the current terminal columns. */ sds sparklineRenderRange(sds output, struct sequence *seq, int rows, int offset, int len, int flags) { int j; diff --git a/src/stream.h b/src/stream.h index 0d3bf63fc..e4c5ff78d 100644 --- a/src/stream.h +++ b/src/stream.h @@ -74,7 +74,7 @@ typedef struct streamConsumer { consumer not yet acknowledged. Keys are big endian message IDs, while values are the same streamNACK structure referenced - in the "pel" of the conumser group structure + in the "pel" of the consumer group structure itself, so the value is shared. */ } streamConsumer; diff --git a/src/t_hash.c b/src/t_hash.c index 240e11c91..8e79432a4 100644 --- a/src/t_hash.c +++ b/src/t_hash.c @@ -630,7 +630,7 @@ void hincrbyfloatCommand(client *c) { server.dirty++; /* Always replicate HINCRBYFLOAT as an HSET command with the final value - * in order to make sure that differences in float pricision or formatting + * in order to make sure that differences in float precision or formatting * will not create differences in replicas or after an AOF restart. */ robj *aux, *newobj; aux = createStringObject("HSET",4); diff --git a/src/t_list.c b/src/t_list.c index a751dde26..4f0bd7b81 100644 --- a/src/t_list.c +++ b/src/t_list.c @@ -723,7 +723,7 @@ void rpoplpushCommand(client *c) { * Blocking POP operations *----------------------------------------------------------------------------*/ -/* This is a helper function for handleClientsBlockedOnKeys(). It's work +/* This is a helper function for handleClientsBlockedOnKeys(). Its work * is to serve a specific client (receiver) that is blocked on 'key' * in the context of the specified 'db', doing the following: * @@ -809,7 +809,7 @@ void blockingPopGenericCommand(client *c, int where) { return; } else { if (listTypeLength(o) != 0) { - /* Non empty list, this is like a non normal [LR]POP. */ + /* Non empty list, this is like a normal [LR]POP. */ char *event = (where == LIST_HEAD) ? "lpop" : "rpop"; robj *value = listTypePop(o,where); serverAssert(value != NULL); @@ -845,7 +845,7 @@ void blockingPopGenericCommand(client *c, int where) { return; } - /* If the list is empty or the key does not exists we must block */ + /* If the keys do not exist we must block */ blockForKeys(c,BLOCKED_LIST,c->argv + 1,c->argc - 2,timeout,NULL,NULL); } diff --git a/src/t_set.c b/src/t_set.c index c2e73a6e6..837337ba7 100644 --- a/src/t_set.c +++ b/src/t_set.c @@ -193,7 +193,7 @@ sds setTypeNextObject(setTypeIterator *si) { } /* Return random element from a non empty set. - * The returned element can be a int64_t value if the set is encoded + * The returned element can be an int64_t value if the set is encoded * as an "intset" blob of integers, or an SDS string if the set * is a regular set. * @@ -442,7 +442,7 @@ void spopWithCountCommand(client *c) { dbDelete(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_GENERIC,"del",c->argv[1],c->db->id); - /* Propagate this command as an DEL operation */ + /* Propagate this command as a DEL operation */ rewriteClientCommandVector(c,2,shared.del,c->argv[1]); signalModifiedKey(c,c->db,c->argv[1]); server.dirty++; @@ -676,7 +676,7 @@ void srandmemberWithCountCommand(client *c) { * In this case we create a set from scratch with all the elements, and * subtract random elements to reach the requested number of elements. * - * This is done because if the number of requsted elements is just + * This is done because if the number of requested elements is just * a bit less than the number of elements in the set, the natural approach * used into CASE 3 is highly inefficient. */ if (count*SRANDMEMBER_SUB_STRATEGY_MUL > size) { diff --git a/src/t_stream.c b/src/t_stream.c index a54671938..357975079 100644 --- a/src/t_stream.c +++ b/src/t_stream.c @@ -1197,7 +1197,7 @@ void xaddCommand(client *c) { int id_given = 0; /* Was an ID different than "*" specified? */ long long maxlen = -1; /* If left to -1 no trimming is performed. */ int approx_maxlen = 0; /* If 1 only delete whole radix tree nodes, so - the maxium length is not applied verbatim. */ + the maximum length is not applied verbatim. */ int maxlen_arg_idx = 0; /* Index of the count in MAXLEN, for rewriting. */ /* Parse options. */ @@ -1893,7 +1893,7 @@ NULL } } -/* XSETID +/* XSETID * * Set the internal "last ID" of a stream. */ void xsetidCommand(client *c) { @@ -1982,7 +1982,7 @@ void xackCommand(client *c) { * * If start and stop are omitted, the command just outputs information about * the amount of pending messages for the key/group pair, together with - * the minimum and maxium ID of pending messages. + * the minimum and maximum ID of pending messages. * * If start and stop are provided instead, the pending messages are returned * with informations about the current owner, number of deliveries and last diff --git a/src/t_string.c b/src/t_string.c index 4be758e65..4886f7e44 100644 --- a/src/t_string.c +++ b/src/t_string.c @@ -316,7 +316,7 @@ void msetGenericCommand(client *c, int nx) { } /* Handle the NX flag. The MSETNX semantic is to return zero and don't - * set anything if at least one key alerady exists. */ + * set anything if at least one key already exists. */ if (nx) { for (j = 1; j < c->argc; j += 2) { if (lookupKeyWrite(c->db,c->argv[j]) != NULL) { diff --git a/src/t_zset.c b/src/t_zset.c index 9c409cd96..cf2d7f972 100644 --- a/src/t_zset.c +++ b/src/t_zset.c @@ -245,7 +245,7 @@ int zslDelete(zskiplist *zsl, double score, sds ele, zskiplistNode **node) { return 0; /* not found */ } -/* Update the score of an elmenent inside the sorted set skiplist. +/* Update the score of an element inside the sorted set skiplist. * Note that the element must exist and must match 'score'. * This function does not update the score in the hash table side, the * caller should take care of it. diff --git a/src/tracking.c b/src/tracking.c index 2721de32a..3737f6859 100644 --- a/src/tracking.c +++ b/src/tracking.c @@ -134,7 +134,7 @@ void enableTracking(client *c, uint64_t redirect_to, uint64_t options, robj **pr CLIENT_TRACKING_NOLOOP); c->client_tracking_redirection = redirect_to; - /* This may be the first client we ever enable. Crete the tracking + /* This may be the first client we ever enable. Create the tracking * table if it does not exist. */ if (TrackingTable == NULL) { TrackingTable = raxNew(); diff --git a/src/valgrind.sup b/src/valgrind.sup index 3024d63bc..b05843d8c 100644 --- a/src/valgrind.sup +++ b/src/valgrind.sup @@ -1,17 +1,17 @@ { - + Memcheck:Cond fun:lzf_compress } { - + Memcheck:Value4 fun:lzf_compress } { - + Memcheck:Value8 fun:lzf_compress } diff --git a/src/ziplist.c b/src/ziplist.c index 13881c117..e27875f6e 100644 --- a/src/ziplist.c +++ b/src/ziplist.c @@ -99,7 +99,7 @@ * Integer encoded as 24 bit signed (3 bytes). * |11111110| - 2 bytes * Integer encoded as 8 bit signed (1 byte). - * |1111xxxx| - (with xxxx between 0000 and 1101) immediate 4 bit integer. + * |1111xxxx| - (with xxxx between 0001 and 1101) immediate 4 bit integer. * Unsigned integer from 0 to 12. The encoded value is actually from * 1 to 13 because 0000 and 1111 can not be used, so 1 should be * subtracted from the encoded 4 bit value to obtain the right value. @@ -191,10 +191,10 @@ #include "redisassert.h" #define ZIP_END 255 /* Special "end of ziplist" entry. */ -#define ZIP_BIG_PREVLEN 254 /* Max number of bytes of the previous entry, for - the "prevlen" field prefixing each entry, to be - represented with just a single byte. Otherwise - it is represented as FE AA BB CC DD, where +#define ZIP_BIG_PREVLEN 254 /* ZIP_BIG_PREVLEN - 1 is the max number of bytes of + the previous entry, for the "prevlen" field prefixing + each entry, to be represented with just a single byte. + Otherwise it is represented as FE AA BB CC DD, where AA BB CC DD are a 4 bytes unsigned integer representing the previous entry len. */ @@ -317,7 +317,7 @@ unsigned int zipIntSize(unsigned char encoding) { return 0; } -/* Write the encoidng header of the entry in 'p'. If p is NULL it just returns +/* Write the encoding header of the entry in 'p'. If p is NULL it just returns * the amount of bytes required to encode such a length. Arguments: * * 'encoding' is the encoding we are using for the entry. It could be @@ -325,7 +325,7 @@ unsigned int zipIntSize(unsigned char encoding) { * for single-byte small immediate integers. * * 'rawlen' is only used for ZIP_STR_* encodings and is the length of the - * srting that this entry represents. + * string that this entry represents. * * The function returns the number of bytes used by the encoding/length * header stored in 'p'. */ @@ -914,7 +914,7 @@ unsigned char *ziplistMerge(unsigned char **first, unsigned char **second) { } else { /* !append == prepending to target */ /* Move target *contents* exactly size of (source - [END]), - * then copy source into vacataed space (source - [END]): + * then copy source into vacated space (source - [END]): * [SOURCE - END, TARGET - HEADER] */ memmove(target + source_bytes - ZIPLIST_END_SIZE, target + ZIPLIST_HEADER_SIZE, diff --git a/src/zipmap.c b/src/zipmap.c index 22bfa1a46..365c4aea4 100644 --- a/src/zipmap.c +++ b/src/zipmap.c @@ -133,7 +133,7 @@ static unsigned int zipmapEncodeLength(unsigned char *p, unsigned int len) { * zipmap. Returns NULL if the key is not found. * * If NULL is returned, and totlen is not NULL, it is set to the entire - * size of the zimap, so that the calling function will be able to + * size of the zipmap, so that the calling function will be able to * reallocate the original zipmap to make room for more entries. */ static unsigned char *zipmapLookupRaw(unsigned char *zm, unsigned char *key, unsigned int klen, unsigned int *totlen) { unsigned char *p = zm+1, *k = NULL; diff --git a/tests/cluster/tests/04-resharding.tcl b/tests/cluster/tests/04-resharding.tcl index 33f861dc5..cee2ec5ba 100644 --- a/tests/cluster/tests/04-resharding.tcl +++ b/tests/cluster/tests/04-resharding.tcl @@ -1,7 +1,7 @@ # Failover stress test. # In this test a different node is killed in a loop for N # iterations. The test checks that certain properties -# are preseved across iterations. +# are preserved across iterations. source "../tests/includes/init-tests.tcl" source "../../../tests/support/cli.tcl" @@ -32,7 +32,7 @@ test "Enable AOF in all the instances" { } } -# Return nno-zero if the specified PID is about a process still in execution, +# Return non-zero if the specified PID is about a process still in execution, # otherwise 0 is returned. proc process_is_running {pid} { # PS should return with an error if PID is non existing, @@ -45,7 +45,7 @@ proc process_is_running {pid} { # # - N commands are sent to the cluster in the course of the test. # - Every command selects a random key from key:0 to key:MAX-1. -# - The operation RPUSH key is perforemd. +# - The operation RPUSH key is performed. # - Tcl remembers into an array all the values pushed to each list. # - After N/2 commands, the resharding process is started in background. # - The test continues while the resharding is in progress. diff --git a/tests/instances.tcl b/tests/instances.tcl index 2199cfcd4..275db45c3 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -322,7 +322,7 @@ proc pause_on_error {} { puts "S cmd ... arg Call command in Sentinel ." puts "R cmd ... arg Call command in Redis ." puts "SI Show Sentinel INFO ." - puts "RI Show Sentinel INFO ." + puts "RI Show Redis INFO ." puts "continue Resume test." } else { set errcode [catch {eval $line} retval] diff --git a/tests/integration/replication-4.tcl b/tests/integration/replication-4.tcl index 54891151b..8071c4f97 100644 --- a/tests/integration/replication-4.tcl +++ b/tests/integration/replication-4.tcl @@ -16,7 +16,7 @@ start_server {tags {"repl"}} { s 0 role } {slave} - test {Test replication with parallel clients writing in differnet DBs} { + test {Test replication with parallel clients writing in different DBs} { after 5000 stop_bg_complex_data $load_handle0 stop_bg_complex_data $load_handle1 diff --git a/tests/support/test.tcl b/tests/support/test.tcl index 55937b8f4..23015b3a7 100644 --- a/tests/support/test.tcl +++ b/tests/support/test.tcl @@ -108,7 +108,7 @@ proc test {name code {okpattern undefined} {options undefined}} { return } - # abort if test name in skiptests + # abort if only_tests was set but test name is not included if {[llength $::only_tests] > 0 && [lsearch $::only_tests $name] < 0} { incr ::num_skipped send_data_packet $::test_server_fd skip $name diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index fe2d484b8..7e1c5c88f 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -471,7 +471,7 @@ proc signal_idle_client fd { # The the_end function gets called when all the test units were already # executed, so the test finished. proc the_end {} { - # TODO: print the status, exit with the rigth exit code. + # TODO: print the status, exit with the right exit code. puts "\n The End\n" puts "Execution time of different units:" foreach {time name} $::clients_time_history { @@ -526,9 +526,10 @@ proc print_help_screen {} { "--stack-logging Enable OSX leaks/malloc stack logging." "--accurate Run slow randomized tests for more iterations." "--quiet Don't show individual tests." - "--single Just execute the specified unit (see next option). this option can be repeated." + "--single Just execute the specified unit (see next option). This option can be repeated." + "--verbose Increases verbosity." "--list-tests List all the available test units." - "--only Just execute the specified test by test name. this option can be repeated." + "--only Just execute the specified test by test name. This option can be repeated." "--skip-till Skip all units until (and including) the specified one." "--skipunit Skip one unit." "--clients Number of test clients (default 16)." diff --git a/tests/unit/expire.tcl b/tests/unit/expire.tcl index 52d174d75..444525f36 100644 --- a/tests/unit/expire.tcl +++ b/tests/unit/expire.tcl @@ -72,7 +72,7 @@ start_server {tags {"expire"}} { list [r persist foo] [r persist nokeyatall] } {0 0} - test {EXPIRE pricision is now the millisecond} { + test {EXPIRE precision is now the millisecond} { # This test is very likely to do a false positive if the # server is under pressure, so if it does not work give it a few more # chances. diff --git a/tests/unit/hyperloglog.tcl b/tests/unit/hyperloglog.tcl index 712fcc641..db26a2e75 100644 --- a/tests/unit/hyperloglog.tcl +++ b/tests/unit/hyperloglog.tcl @@ -79,7 +79,7 @@ start_server {tags {"hll"}} { } } - test {Corrupted sparse HyperLogLogs are detected: Additionl at tail} { + test {Corrupted sparse HyperLogLogs are detected: Additional at tail} { r del hll r pfadd hll a b c r append hll "hello" diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl index 8b364b287..3283edc66 100644 --- a/tests/unit/scripting.tcl +++ b/tests/unit/scripting.tcl @@ -533,7 +533,7 @@ start_server {tags {"scripting"}} { # Note: keep this test at the end of this server stanza because it # kills the server. test {SHUTDOWN NOSAVE can kill a timedout script anyway} { - # The server could be still unresponding to normal commands. + # The server should be still unresponding to normal commands. catch {r ping} e assert_match {BUSY*} $e catch {r shutdown nosave} diff --git a/utils/create-cluster/README b/utils/create-cluster/README index 37a3080db..bcd745977 100644 --- a/utils/create-cluster/README +++ b/utils/create-cluster/README @@ -1,4 +1,4 @@ -Create-custer is a small script used to easily start a big number of Redis +create-cluster is a small script used to easily start a big number of Redis instances configured to run in cluster mode. Its main goal is to allow manual testing in a condition which is not easy to replicate with the Redis cluster unit tests, for example when a lot of instances are needed in order to trigger diff --git a/utils/hashtable/README b/utils/hashtable/README index 87a76c9a5..87ffc2f08 100644 --- a/utils/hashtable/README +++ b/utils/hashtable/README @@ -5,7 +5,7 @@ rehashing.c Visually show buckets in the two hash tables between rehashings. Also stress test getRandomKeys() implementation, that may actually disappear from -Redis soon, however visualization some code is reusable in new bugs +Redis soon, However the visualization code is reusable in new bugs investigation. Compile with: From 410c101439d895b7354fad327e590154451da086 Mon Sep 17 00:00:00 2001 From: Wang Yuan Date: Thu, 10 Sep 2020 22:02:27 +0800 Subject: [PATCH 002/114] Remove dead global variable 'lru_clock' (#7782) (cherry picked from commit 95595d0636d4a993201ca9034fba6e91527b3337) --- src/server.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/server.c b/src/server.c index cf9dc83ea..06dcc5d9e 100644 --- a/src/server.c +++ b/src/server.c @@ -70,7 +70,6 @@ double R_Zero, R_PosInf, R_NegInf, R_Nan; /* Global vars */ struct redisServer server; /* Server global state */ -volatile unsigned long lru_clock; /* Server global current LRU time. */ /* Our command table. * From c769bc81b685253a91d5a2d38bbb8e2dfa38fd0c Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 13 Sep 2020 13:50:23 +0300 Subject: [PATCH 003/114] fix broken PEXPIREAT test (#7791) This test was nearly always failing on MacOS github actions. This is because of bugs in the test that caused it to nearly always run all 3 attempts and just look at the last one as the pass/fail creteria. i.e. the test was nearly always running all 3 attempts and still sometimes succeed. this is because the break condition was different than the test completion condition. The reason the test succeeded is because the break condition tested the results of all 3 tests (PSETEX/PEXPIRE/PEXPIREAT), but the success check at the end was only testing the result of PSETEX. The reason the PEXPIREAT test nearly always failed is because it was getting the current time wrong: getting the current second and loosing the sub-section time, so the only chance for it to succeed is if it run right when a certain second started. Because i now get the time from redis, adding another round trip, i added another 100ms to the PEXPIRE test to make it less fragile, and also added many more attempts. Adding many more attempts before failure to account for slow platforms, github actions and valgrind (cherry picked from commit 1fd56bb75a9afa5469b3ecb70d394b2adaf9baac) --- tests/unit/expire.tcl | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/tests/unit/expire.tcl b/tests/unit/expire.tcl index 444525f36..8bcdc16b7 100644 --- a/tests/unit/expire.tcl +++ b/tests/unit/expire.tcl @@ -92,7 +92,7 @@ start_server {tags {"expire"}} { # This test is very likely to do a false positive if the # server is under pressure, so if it does not work give it a few more # chances. - for {set j 0} {$j < 3} {incr j} { + for {set j 0} {$j < 30} {incr j} { r del x y z r psetex x 100 somevalue after 80 @@ -108,18 +108,22 @@ start_server {tags {"expire"}} { set d [r get x] r set x somevalue - r pexpireat x [expr ([clock seconds]*1000)+100] - after 80 + set now [r time] + r pexpireat x [expr ([lindex $now 0]*1000)+([lindex $now 1]/1000)+200] + after 20 set e [r get x] - after 120 + after 220 set f [r get x] if {$a eq {somevalue} && $b eq {} && $c eq {somevalue} && $d eq {} && $e eq {somevalue} && $f eq {}} break } - list $a $b - } {somevalue {}} + if {$::verbose} { + puts "sub-second expire test attempts: $j" + } + list $a $b $c $d $e $f + } {somevalue {} somevalue {} somevalue {}} test {TTL returns time to live in seconds} { r del x From 5097434dee085aae16a70c55dd2ddefcefc9aa10 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 13 Sep 2020 13:51:21 +0300 Subject: [PATCH 004/114] Fix failing valgrind installation in github actions (#7792) These tests started failing every day on http 404 (not being able to install valgrind) (cherry picked from commit 9428c1a591472fc87775781e5955aa527c6f1ff0) --- .github/workflows/daily.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 07cd55c87..087b9f2ef 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -109,6 +109,7 @@ jobs: run: make valgrind - name: test run: | + sudo apt-get update sudo apt-get install tcl8.5 valgrind -y ./runtest --valgrind --verbose --clients 1 - name: module api test From b4299b0629359e89a34f4c41c33016e37fb0fbf8 Mon Sep 17 00:00:00 2001 From: Mykhailo Pylyp Date: Sun, 13 Sep 2020 18:39:59 +0300 Subject: [PATCH 005/114] Recalculate hardcoded variables from $::instances_count in sentinel tests (#7561) Co-authored-by: MemuraiUser (cherry picked from commit c0a41896dad0396cb6e09ed0bbe72d90cdbf25e0) --- tests/sentinel/tests/06-ckquorum.tcl | 13 +++++++------ tests/sentinel/tests/07-down-conditions.tcl | 13 +++++++------ tests/sentinel/tests/includes/init-tests.tcl | 2 +- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/tests/sentinel/tests/06-ckquorum.tcl b/tests/sentinel/tests/06-ckquorum.tcl index 31e5fa2f8..4ea4e55d8 100644 --- a/tests/sentinel/tests/06-ckquorum.tcl +++ b/tests/sentinel/tests/06-ckquorum.tcl @@ -20,15 +20,16 @@ test "CKQUORUM detects quorum cannot be reached" { test "CKQUORUM detects failover authorization cannot be reached" { set orig_quorum [expr {$num_sentinels/2+1}] S 0 SENTINEL SET mymaster quorum 1 - kill_instance sentinel 1 - kill_instance sentinel 2 - kill_instance sentinel 3 + for {set i 0} {$i < $orig_quorum} {incr i} { + kill_instance sentinel [expr {$i + 1}] + } + after 5000 catch {[S 0 SENTINEL CKQUORUM mymaster]} err assert_match "*NOQUORUM*" $err S 0 SENTINEL SET mymaster quorum $orig_quorum - restart_instance sentinel 1 - restart_instance sentinel 2 - restart_instance sentinel 3 + for {set i 0} {$i < $orig_quorum} {incr i} { + restart_instance sentinel [expr {$i + 1}] + } } diff --git a/tests/sentinel/tests/07-down-conditions.tcl b/tests/sentinel/tests/07-down-conditions.tcl index a12ea3151..0a696fa6b 100644 --- a/tests/sentinel/tests/07-down-conditions.tcl +++ b/tests/sentinel/tests/07-down-conditions.tcl @@ -3,9 +3,10 @@ source "../tests/includes/init-tests.tcl" source "../../../tests/support/cli.tcl" +set ::alive_sentinel [expr {$::instances_count/2+2}] proc ensure_master_up {} { wait_for_condition 1000 50 { - [dict get [S 4 sentinel master mymaster] flags] eq "master" + [dict get [S $::alive_sentinel sentinel master mymaster] flags] eq "master" } else { fail "Master flags are not just 'master'" } @@ -14,7 +15,7 @@ proc ensure_master_up {} { proc ensure_master_down {} { wait_for_condition 1000 50 { [string match *down* \ - [dict get [S 4 sentinel master mymaster] flags]] + [dict get [S $::alive_sentinel sentinel master mymaster] flags]] } else { fail "Master is not flagged SDOWN" } @@ -27,7 +28,7 @@ test "Crash the majority of Sentinels to prevent failovers for this unit" { } test "SDOWN is triggered by non-responding but not crashed instance" { - lassign [S 4 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] host port + lassign [S $::alive_sentinel SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] host port ensure_master_up exec ../../../src/redis-cli -h $host -p $port {*}[rediscli_tls_config "../../../tests"] debug sleep 10 > /dev/null & ensure_master_down @@ -35,7 +36,7 @@ test "SDOWN is triggered by non-responding but not crashed instance" { } test "SDOWN is triggered by crashed instance" { - lassign [S 4 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] host port + lassign [S $::alive_sentinel SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] host port ensure_master_up kill_instance redis 0 ensure_master_down @@ -72,8 +73,8 @@ test "SDOWN is triggered by misconfigured instance repling with errors" { # effect of the master going down if we send PONG instead of PING test "SDOWN is triggered if we rename PING to PONG" { ensure_master_up - S 4 SENTINEL SET mymaster rename-command PING PONG + S $::alive_sentinel SENTINEL SET mymaster rename-command PING PONG ensure_master_down - S 4 SENTINEL SET mymaster rename-command PING PING + S $::alive_sentinel SENTINEL SET mymaster rename-command PING PING ensure_master_up } diff --git a/tests/sentinel/tests/includes/init-tests.tcl b/tests/sentinel/tests/includes/init-tests.tcl index c8165dcfa..234f9c589 100644 --- a/tests/sentinel/tests/includes/init-tests.tcl +++ b/tests/sentinel/tests/includes/init-tests.tcl @@ -18,7 +18,7 @@ test "(init) Remove old master entry from sentinels" { } } -set redis_slaves 4 +set redis_slaves [expr $::instances_count - 1] test "(init) Create a master-slaves cluster of [expr $redis_slaves+1] instances" { create_redis_master_slave_cluster [expr {$redis_slaves+1}] } From 7e8f233e8b95abdc8ef0a17bdd55f64c5c40b303 Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Tue, 15 Sep 2020 13:27:42 +0800 Subject: [PATCH 006/114] Clarify help text of tcl scripts. (#7798) Before this commit, following command did not show --tls option: ./runtest-cluster --help ./runtest-sentinel --help (cherry picked from commit e4a1280a0e6c33d03ec6b622b8159b2b26f0f9c3) --- tests/instances.tcl | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/instances.tcl b/tests/instances.tcl index 275db45c3..5c4b665db 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -243,6 +243,7 @@ proc parse_options {} { puts "--pause-on-error Pause for manual inspection on error." puts "--fail Simulate a test failure." puts "--valgrind Run with valgrind." + puts "--tls Run tests in TLS mode." puts "--help Shows this help." exit 0 } else { From 385e3596b50ce3bb6991dbbac71da064030b88dc Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Tue, 15 Sep 2020 01:58:21 -0400 Subject: [PATCH 007/114] correct OBJECT ENCODING response for stream type (#7797) This commit makes stream object returning "stream" as encoding type in OBJECT ENCODING subcommand and DEBUG OBJECT command. Till now, it would return "unknown" (cherry picked from commit 2a8803f534728a6fd1b7c29a2d7e195f6a928f50) --- src/object.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/object.c b/src/object.c index f8775ea97..6caa71bb9 100644 --- a/src/object.c +++ b/src/object.c @@ -739,6 +739,7 @@ char *strEncoding(int encoding) { case OBJ_ENCODING_INTSET: return "intset"; case OBJ_ENCODING_SKIPLIST: return "skiplist"; case OBJ_ENCODING_EMBSTR: return "embstr"; + case OBJ_ENCODING_STREAM: return "stream"; default: return "unknown"; } } From 4aef590a633fabf6a5bc1f5e03e2373c402ba390 Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Tue, 15 Sep 2020 13:06:47 +0800 Subject: [PATCH 008/114] bio: fix doFastMemoryTest. If one thread got SIGSEGV, function sigsegvHandler() would be triggered, it would call bioKillThreads(). But call pthread_cancel() to cancel itself would make it block. Also note that if SIGSEGV is caught by bio thread, it should kill the main thread in order to give a positive report. (cherry picked from commit cf8a6e3c7a0448851f0c00ff1a726701a2be9f1a) --- src/bio.c | 3 ++- src/debug.c | 22 +++++++++++++++++++++- src/server.c | 2 +- src/server.h | 1 + 4 files changed, 25 insertions(+), 3 deletions(-) diff --git a/src/bio.c b/src/bio.c index 69c62fc6f..33465a166 100644 --- a/src/bio.c +++ b/src/bio.c @@ -268,10 +268,11 @@ void bioKillThreads(void) { int err, j; for (j = 0; j < BIO_NUM_OPS; j++) { + if (bio_threads[j] == pthread_self()) continue; if (bio_threads[j] && pthread_cancel(bio_threads[j]) == 0) { if ((err = pthread_join(bio_threads[j],NULL)) != 0) { serverLog(LL_WARNING, - "Bio thread for job type #%d can be joined: %s", + "Bio thread for job type #%d can not be joined: %s", j, strerror(err)); } else { serverLog(LL_WARNING, diff --git a/src/debug.c b/src/debug.c index 921c681a5..178893bae 100644 --- a/src/debug.c +++ b/src/debug.c @@ -1512,6 +1512,26 @@ int memtest_test_linux_anonymous_maps(void) { } #endif +static void killMainThread(void) { + int err; + if (pthread_self() != server.main_thread_id && pthread_cancel(server.main_thread_id) == 0) { + if ((err = pthread_join(server.main_thread_id,NULL)) != 0) { + serverLog(LL_WARNING, "main thread can not be joined: %s", strerror(err)); + } else { + serverLog(LL_WARNING, "main thread terminated"); + } + } +} + +/* Kill the running threads (other than current) in an unclean way. This function + * should be used only when it's critical to stop the threads for some reason. + * Currently Redis does this only on crash (for instance on SIGSEGV) in order + * to perform a fast memory check without other threads messing with memory. */ +static void killThreads(void) { + killMainThread(); + bioKillThreads(); +} + /* Scans the (assumed) x86 code starting at addr, for a max of `len` * bytes, searching for E8 (callq) opcodes, and dumping the symbols * and the call offset if they appear to be valid. */ @@ -1589,7 +1609,7 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) { #if defined(HAVE_PROC_MAPS) /* Test memory */ serverLogRaw(LL_WARNING|LL_RAW, "\n------ FAST MEMORY TEST ------\n"); - bioKillThreads(); + killThreads(); if (memtest_test_linux_anonymous_maps()) { serverLogRaw(LL_WARNING|LL_RAW, "!!! MEMORY ERROR DETECTED! Check your memory ASAP !!!\n"); diff --git a/src/server.c b/src/server.c index 06dcc5d9e..37e3c37df 100644 --- a/src/server.c +++ b/src/server.c @@ -2814,6 +2814,7 @@ void initServer(void) { server.aof_state = server.aof_enabled ? AOF_ON : AOF_OFF; server.hz = server.config_hz; server.pid = getpid(); + server.main_thread_id = pthread_self(); server.current_client = NULL; server.fixed_time_expire = 0; server.clients = listCreate(); @@ -5091,7 +5092,6 @@ int iAmMaster(void) { (server.cluster_enabled && nodeIsMaster(server.cluster->myself))); } - int main(int argc, char **argv) { struct timeval tv; int j; diff --git a/src/server.h b/src/server.h index 6a130879e..a1ce26cc2 100644 --- a/src/server.h +++ b/src/server.h @@ -1050,6 +1050,7 @@ struct clusterState; struct redisServer { /* General */ pid_t pid; /* Main process pid. */ + pthread_t main_thread_id; /* Main thread id */ char *configfile; /* Absolute config file path, or NULL */ char *executable; /* Absolute executable file path. */ char **exec_argv; /* Executable argv vector (copy). */ From 201b993840292b920c06658f7df4a5ef94d645f0 Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Wed, 16 Sep 2020 09:58:24 +0800 Subject: [PATCH 009/114] bio: doFastMemoryTest should try to kill io threads as well. (cherry picked from commit e9b6077ac798e4d30c9401a3687ffe61568b6eae) --- src/debug.c | 1 + src/networking.c | 17 +++++++++++++++++ src/server.h | 1 + 3 files changed, 19 insertions(+) diff --git a/src/debug.c b/src/debug.c index 178893bae..ae62c0216 100644 --- a/src/debug.c +++ b/src/debug.c @@ -1530,6 +1530,7 @@ static void killMainThread(void) { static void killThreads(void) { killMainThread(); bioKillThreads(); + killIOThreads(); } /* Scans the (assumed) x86 code starting at addr, for a max of `len` diff --git a/src/networking.c b/src/networking.c index 0d290e169..b7d6d6211 100644 --- a/src/networking.c +++ b/src/networking.c @@ -3021,6 +3021,23 @@ void initThreadedIO(void) { } } +void killIOThreads(void) { + int err, j; + for (j = 0; j < server.io_threads_num; j++) { + if (io_threads[j] == pthread_self()) continue; + if (io_threads[j] && pthread_cancel(io_threads[j]) == 0) { + if ((err = pthread_join(io_threads[j],NULL)) != 0) { + serverLog(LL_WARNING, + "IO thread(tid:%lu) can not be joined: %s", + (unsigned long)io_threads[j], strerror(err)); + } else { + serverLog(LL_WARNING, + "IO thread(tid:%lu) terminated",(unsigned long)io_threads[j]); + } + } + } +} + void startThreadedIO(void) { if (tio_debug) { printf("S"); fflush(stdout); } if (tio_debug) printf("--- STARTING THREADED IO ---\n"); diff --git a/src/server.h b/src/server.h index a1ce26cc2..8cf29820d 100644 --- a/src/server.h +++ b/src/server.h @@ -2460,6 +2460,7 @@ int memtest_preserving_test(unsigned long *m, size_t bytes, int passes); void mixDigest(unsigned char *digest, void *ptr, size_t len); void xorDigest(unsigned char *digest, void *ptr, size_t len); int populateCommandTableParseFlags(struct redisCommand *c, char *strflags); +void killIOThreads(void); /* TLS stuff */ void tlsInit(void); From 1cbdafc9804352ae37cb9645cda582d7be0b3d84 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Wed, 16 Sep 2020 20:21:04 +0300 Subject: [PATCH 010/114] Add printf attribute and fix warnings and a minor bug (#7803) The fix in error handling of rdbGenericLoadStringObject is an actual bugfix (cherry picked from commit 622b57e9eea44e069ad973597bed40107cfbeff0) --- src/rdb.c | 11 +++++++---- src/server.h | 5 +++++ 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/rdb.c b/src/rdb.c index 4bcf96038..a9b262eac 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -52,6 +52,9 @@ extern int rdbCheckMode; void rdbCheckError(const char *fmt, ...); void rdbCheckSetError(const char *fmt, ...); +#ifdef __GNUC__ +void rdbReportError(int corruption_error, int linenum, char *reason, ...) __attribute__ ((format (printf, 3, 4))); +#endif void rdbReportError(int corruption_error, int linenum, char *reason, ...) { va_list ap; char msg[1024]; @@ -487,7 +490,7 @@ void *rdbGenericLoadStringObject(rio *rdb, int flags, size_t *lenptr) { int plain = flags & RDB_LOAD_PLAIN; int sds = flags & RDB_LOAD_SDS; int isencoded; - uint64_t len; + unsigned long long len; len = rdbLoadLen(rdb,&isencoded); if (isencoded) { @@ -499,8 +502,8 @@ void *rdbGenericLoadStringObject(rio *rdb, int flags, size_t *lenptr) { case RDB_ENC_LZF: return rdbLoadLzfStringObject(rdb,flags,lenptr); default: - rdbExitReportCorruptRDB("Unknown RDB string encoding type %d",len); - return NULL; /* Never reached. */ + rdbExitReportCorruptRDB("Unknown RDB string encoding type %llu",len); + return NULL; } } @@ -2200,7 +2203,7 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { if (luaCreateFunction(NULL,server.lua,auxval) == NULL) { rdbExitReportCorruptRDB( "Can't load Lua script from RDB file! " - "BODY: %s", auxval->ptr); + "BODY: %s", (char*)auxval->ptr); } } else if (!strcasecmp(auxkey->ptr,"redis-ver")) { serverLog(LL_NOTICE,"Loading RDB produced by version %s", diff --git a/src/server.h b/src/server.h index 8cf29820d..21d506d39 100644 --- a/src/server.h +++ b/src/server.h @@ -2446,7 +2446,12 @@ void *realloc(void *ptr, size_t size) __attribute__ ((deprecated)); /* Debugging stuff */ void _serverAssertWithInfo(const client *c, const robj *o, const char *estr, const char *file, int line); void _serverAssert(const char *estr, const char *file, int line); +#ifdef __GNUC__ +void _serverPanic(const char *file, int line, const char *msg, ...) + __attribute__ ((format (printf, 3, 4))); +#else void _serverPanic(const char *file, int line, const char *msg, ...); +#endif void bugReportStart(void); void serverLogObjectDebugInfo(const robj *o); void sigsegvHandler(int sig, siginfo_t *info, void *secret); From 048816bf27b09bf95c5bc6126bce373288e395f3 Mon Sep 17 00:00:00 2001 From: Wang Yuan Date: Thu, 17 Sep 2020 23:20:10 +0800 Subject: [PATCH 011/114] Remove tmp rdb file in background thread (#7762) We're already using bg_unlink in several places to delete the rdb file in the background, and avoid paying the cost of the deletion from our main thread. This commit uses bg_unlink to remove the temporary rdb file in the background too. However, in case we delete that rdb file just before exiting, we don't actually wait for the background thread or the main thread to delete it, and just let the OS clean up after us. i.e. we open the file, unlink it and exit with the fd still open. Furthermore, rdbRemoveTempFile can be called from a thread and was using snprintf which is not async-signal-safe, we now use ll2string instead. (cherry picked from commit 6638f6129553d0f19c60944e70fe619a4217658c) --- src/rdb.c | 29 +++++++++++++++++++----- src/rdb.h | 2 +- src/server.c | 6 ++++- src/server.h | 1 + tests/test_helper.tcl | 1 + tests/unit/shutdown.tcl | 49 +++++++++++++++++++++++++++++++++++++++++ 6 files changed, 81 insertions(+), 7 deletions(-) create mode 100644 tests/unit/shutdown.tcl diff --git a/src/rdb.c b/src/rdb.c index a9b262eac..7e0d33565 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -34,6 +34,7 @@ #include "stream.h" #include +#include #include #include #include @@ -1413,11 +1414,29 @@ int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) { return C_OK; /* unreached */ } -void rdbRemoveTempFile(pid_t childpid) { +/* Note that we may call this function in signal handle 'sigShutdownHandler', + * so we need guarantee all functions we call are async-signal-safe. + * If we call this function from signal handle, we won't call bg_unlik that + * is not async-signal-safe. */ +void rdbRemoveTempFile(pid_t childpid, int from_signal) { char tmpfile[256]; + char pid[32]; - snprintf(tmpfile,sizeof(tmpfile),"temp-%d.rdb", (int) childpid); - unlink(tmpfile); + /* Generate temp rdb file name using aync-signal safe functions. */ + int pid_len = ll2string(pid, sizeof(pid), childpid); + strcpy(tmpfile, "temp-"); + strncpy(tmpfile+5, pid, pid_len); + strcpy(tmpfile+5+pid_len, ".rdb"); + + if (from_signal) { + /* bg_unlink is not async-signal-safe, but in this case we don't really + * need to close the fd, it'll be released when the process exists. */ + int fd = open(tmpfile, O_RDONLY|O_NONBLOCK); + UNUSED(fd); + unlink(tmpfile); + } else { + bg_unlink(tmpfile); + } } /* This function is called by rdbLoadObject() when the code is in RDB-check @@ -2420,7 +2439,7 @@ void backgroundSaveDoneHandlerDisk(int exitcode, int bysignal) { serverLog(LL_WARNING, "Background saving terminated by signal %d", bysignal); latencyStartMonitor(latency); - rdbRemoveTempFile(server.rdb_child_pid); + rdbRemoveTempFile(server.rdb_child_pid, 0); latencyEndMonitor(latency); latencyAddSampleIfNeeded("rdb-unlink-temp-file",latency); /* SIGUSR1 is whitelisted, so we have a way to kill a child without @@ -2477,7 +2496,7 @@ void backgroundSaveDoneHandler(int exitcode, int bysignal) { * the cleanup needed. */ void killRDBChild(void) { kill(server.rdb_child_pid,SIGUSR1); - rdbRemoveTempFile(server.rdb_child_pid); + rdbRemoveTempFile(server.rdb_child_pid, 0); closeChildInfoPipe(); updateDictResizePolicy(); } diff --git a/src/rdb.h b/src/rdb.h index aae682dbc..885cf49c6 100644 --- a/src/rdb.h +++ b/src/rdb.h @@ -141,7 +141,7 @@ int rdbLoadObjectType(rio *rdb); int rdbLoad(char *filename, rdbSaveInfo *rsi, int rdbflags); int rdbSaveBackground(char *filename, rdbSaveInfo *rsi); int rdbSaveToSlavesSockets(rdbSaveInfo *rsi); -void rdbRemoveTempFile(pid_t childpid); +void rdbRemoveTempFile(pid_t childpid, int from_signal); int rdbSave(char *filename, rdbSaveInfo *rsi); ssize_t rdbSaveObject(rio *rdb, robj *o, robj *key); size_t rdbSavedObjectLen(robj *o, robj *key); diff --git a/src/server.c b/src/server.c index 37e3c37df..a88ffc6ee 100644 --- a/src/server.c +++ b/src/server.c @@ -3797,6 +3797,10 @@ int prepareForShutdown(int flags) { overwrite the synchronous saving did by SHUTDOWN. */ if (server.rdb_child_pid != -1) { serverLog(LL_WARNING,"There is a child saving an .rdb. Killing it!"); + /* Note that, in killRDBChild, we call rdbRemoveTempFile that will + * do close fd(in order to unlink file actully) in background thread. + * The temp rdb file fd may won't be closed when redis exits quickly, + * but OS will close this fd when process exits. */ killRDBChild(); } @@ -4846,7 +4850,7 @@ static void sigShutdownHandler(int sig) { * on disk. */ if (server.shutdown_asap && sig == SIGINT) { serverLogFromHandler(LL_WARNING, "You insist... exiting now."); - rdbRemoveTempFile(getpid()); + rdbRemoveTempFile(getpid(), 1); exit(1); /* Exit with an error since this was not a clean shutdown. */ } else if (server.loading) { serverLogFromHandler(LL_WARNING, "Received shutdown signal during loading, exiting now."); diff --git a/src/server.h b/src/server.h index 21d506d39..4bbe59703 100644 --- a/src/server.h +++ b/src/server.h @@ -1865,6 +1865,7 @@ int writeCommandsDeniedByDiskError(void); /* RDB persistence */ #include "rdb.h" void killRDBChild(void); +int bg_unlink(const char *filename); /* AOF persistence */ void flushAppendOnlyFile(int force); diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index 7e1c5c88f..b60adb881 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -69,6 +69,7 @@ set ::all_tests { unit/tls unit/tracking unit/oom-score-adj + unit/shutdown } # Index to the next test to run in the ::all_tests list. set ::next_test 0 diff --git a/tests/unit/shutdown.tcl b/tests/unit/shutdown.tcl new file mode 100644 index 000000000..21ea8545d --- /dev/null +++ b/tests/unit/shutdown.tcl @@ -0,0 +1,49 @@ +start_server {tags {"shutdown"}} { + test {Temp rdb will be deleted if we use bg_unlink when shutdown} { + for {set i 0} {$i < 20} {incr i} { + r set $i $i + } + # It will cost 2s(20 * 100ms) to dump rdb + r config set rdb-key-save-delay 100000 + + # Child is dumping rdb + r bgsave + after 100 + set dir [lindex [r config get dir] 1] + set child_pid [get_child_pid 0] + set temp_rdb [file join [lindex [r config get dir] 1] temp-${child_pid}.rdb] + # Temp rdb must be existed + assert {[file exists $temp_rdb]} + + catch {r shutdown nosave} + # Make sure the server was killed + catch {set rd [redis_deferring_client]} e + assert_match {*connection refused*} $e + + # Temp rdb file must be deleted + assert {![file exists $temp_rdb]} + } +} + +start_server {tags {"shutdown"}} { + test {Temp rdb will be deleted in signal handle} { + for {set i 0} {$i < 20} {incr i} { + r set $i $i + } + # It will cost 2s(20 * 100ms) to dump rdb + r config set rdb-key-save-delay 100000 + + set pid [s process_id] + set temp_rdb [file join [lindex [r config get dir] 1] temp-${pid}.rdb] + + exec kill -SIGINT $pid + after 100 + # Temp rdb must be existed + assert {[file exists $temp_rdb]} + + # Temp rdb file must be deleted + exec kill -SIGINT $pid + after 100 + assert {![file exists $temp_rdb]} + } +} From 57e1dbff57c11d0c2144ae4688d2beb45fc9ebb5 Mon Sep 17 00:00:00 2001 From: David CARLIER Date: Sat, 19 Sep 2020 10:24:40 +0100 Subject: [PATCH 012/114] debug.c: NetBSD build warning fix. (#7810) The symbol base address is a const on this system. (cherry picked from commit c9edb477921d2fbf80c8ffef0882fbd0281675fa) --- src/debug.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/debug.c b/src/debug.c index ae62c0216..0f8760ef6 100644 --- a/src/debug.c +++ b/src/debug.c @@ -1638,13 +1638,14 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) { /* Find the address of the next page, which is our "safety" * limit when dumping. Then try to dump just 128 bytes more * than EIP if there is room, or stop sooner. */ + void *base = (void *)info.dli_saddr; unsigned long next = ((unsigned long)eip + sz) & ~(sz-1); unsigned long end = (unsigned long)eip + 128; if (end > next) end = next; - len = end - (unsigned long)info.dli_saddr; + len = end - (unsigned long)base; serverLogHexDump(LL_WARNING, "dump of function", - info.dli_saddr ,len); - dumpX86Calls(info.dli_saddr,len); + base ,len); + dumpX86Calls(base,len); } } } From 064992af62b92c74735f599c50c0b9aba688c39a Mon Sep 17 00:00:00 2001 From: Daniel Dai <764122422@qq.com> Date: Sun, 20 Sep 2020 05:06:17 -0400 Subject: [PATCH 013/114] fix make warnings in debug.c MacOS (#7805) Co-authored-by: Oran Agra (cherry picked from commit 6d46a8e2163750f707f9d36889d5fdf514132a69) --- src/debug.c | 4 ++-- src/server.h | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/debug.c b/src/debug.c index 0f8760ef6..e64ec1b78 100644 --- a/src/debug.c +++ b/src/debug.c @@ -1510,7 +1510,7 @@ int memtest_test_linux_anonymous_maps(void) { closeDirectLogFiledes(fd); return errors; } -#endif +#endif /* HAVE_PROC_MAPS */ static void killMainThread(void) { int err; @@ -1527,7 +1527,7 @@ static void killMainThread(void) { * should be used only when it's critical to stop the threads for some reason. * Currently Redis does this only on crash (for instance on SIGSEGV) in order * to perform a fast memory check without other threads messing with memory. */ -static void killThreads(void) { +void killThreads(void) { killMainThread(); bioKillThreads(); killIOThreads(); diff --git a/src/server.h b/src/server.h index 4bbe59703..ba470c303 100644 --- a/src/server.h +++ b/src/server.h @@ -2467,6 +2467,7 @@ void mixDigest(unsigned char *digest, void *ptr, size_t len); void xorDigest(unsigned char *digest, void *ptr, size_t len); int populateCommandTableParseFlags(struct redisCommand *c, char *strflags); void killIOThreads(void); +void killThreads(void); /* TLS stuff */ void tlsInit(void); From d3f36e93a97879896ec923686b855ce3d976b075 Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Sun, 20 Sep 2020 06:36:20 -0400 Subject: [PATCH 014/114] Add Swapdb Module Event (#7804) (cherry picked from commit 0db3223bc6090556c920912d9c92dd42878e316c) --- src/db.c | 2 ++ src/module.c | 16 ++++++++++++++++ src/redismodule.h | 16 ++++++++++++++++ tests/modules/hooks.c | 12 ++++++++++++ tests/unit/moduleapi/hooks.tcl | 6 ++++++ 5 files changed, 52 insertions(+) diff --git a/src/db.c b/src/db.c index 9efda0907..7ed746f9a 100644 --- a/src/db.c +++ b/src/db.c @@ -1163,6 +1163,8 @@ void swapdbCommand(client *c) { addReplyError(c,"DB index is out of range"); return; } else { + RedisModuleSwapDbInfo si = {REDISMODULE_SWAPDBINFO_VERSION,id1,id2}; + moduleFireServerEvent(REDISMODULE_EVENT_SWAPDB,0,&si); server.dirty++; addReply(c,shared.ok); } diff --git a/src/module.c b/src/module.c index bd75c8f92..5dd845de0 100644 --- a/src/module.c +++ b/src/module.c @@ -7179,6 +7179,20 @@ void ModuleForkDoneHandler(int exitcode, int bysignal) { * int32_t progress; // Approximate progress between 0 and 1024, * or -1 if unknown. * + * RedisModuleEvent_SwapDB + * + * This event is called when a swap db command has been successfully + * Executed. + * For this event call currently there is no subevents available. + * + * The data pointer can be casted to a RedisModuleSwapDbInfo + * structure with the following fields: + * + * int32_t dbnum_first; // Swap Db first dbnum + * int32_t dbnum_second; // Swap Db second dbnum + * + * + * * The function returns REDISMODULE_OK if the module was successfully subscribed * for the specified event. If the API is called from a wrong context then * REDISMODULE_ERR is returned. */ @@ -7283,6 +7297,8 @@ void moduleFireServerEvent(uint64_t eid, int subid, void *data) { moduledata = data; } else if (eid == REDISMODULE_EVENT_CRON_LOOP) { moduledata = data; + } else if (eid == REDISMODULE_EVENT_SWAPDB) { + moduledata = data; } ModulesInHooks++; diff --git a/src/redismodule.h b/src/redismodule.h index 4a0e5bf15..56011fae0 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -190,6 +190,7 @@ typedef uint64_t RedisModuleTimerID; #define REDISMODULE_EVENT_CRON_LOOP 8 #define REDISMODULE_EVENT_MODULE_CHANGE 9 #define REDISMODULE_EVENT_LOADING_PROGRESS 10 +#define REDISMODULE_EVENT_SWAPDB 11 typedef struct RedisModuleEvent { uint64_t id; /* REDISMODULE_EVENT_... defines. */ @@ -243,6 +244,10 @@ static const RedisModuleEvent RedisModuleEvent_LoadingProgress = { REDISMODULE_EVENT_LOADING_PROGRESS, 1 + }, + RedisModuleEvent_SwapDB = { + REDISMODULE_EVENT_SWAPDB, + 1 }; /* Those are values that are used for the 'subevent' callback argument. */ @@ -374,6 +379,17 @@ typedef struct RedisModuleLoadingProgressInfo { #define RedisModuleLoadingProgress RedisModuleLoadingProgressV1 +#define REDISMODULE_SWAPDBINFO_VERSION 1 +typedef struct RedisModuleSwapDbInfo { + uint64_t version; /* Not used since this structure is never passed + from the module to the core right now. Here + for future compatibility. */ + int32_t dbnum_first; /* Swap Db first dbnum */ + int32_t dbnum_second; /* Swap Db second dbnum */ +} RedisModuleSwapDbInfoV1; + +#define RedisModuleSwapDbInfo RedisModuleSwapDbInfoV1 + /* ------------------------- End of common defines ------------------------ */ #ifndef REDISMODULE_CORE diff --git a/tests/modules/hooks.c b/tests/modules/hooks.c index 665a20481..54f84aa23 100644 --- a/tests/modules/hooks.c +++ b/tests/modules/hooks.c @@ -253,6 +253,16 @@ void moduleChangeCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, LogStringEvent(ctx, keyname, ei->module_name); } +void swapDbCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data) +{ + REDISMODULE_NOT_USED(e); + REDISMODULE_NOT_USED(sub); + + RedisModuleSwapDbInfo *ei = data; + LogNumericEvent(ctx, "swapdb-first", ei->dbnum_first); + LogNumericEvent(ctx, "swapdb-second", ei->dbnum_second); +} + /* This function must be present on each Redis module. It is used in order to * register the commands into the Redis server. */ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { @@ -289,6 +299,8 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) RedisModuleEvent_CronLoop, cronLoopCallback); RedisModule_SubscribeToServerEvent(ctx, RedisModuleEvent_ModuleChange, moduleChangeCallback); + RedisModule_SubscribeToServerEvent(ctx, + RedisModuleEvent_SwapDB, swapDbCallback); event_log = RedisModule_CreateDict(ctx); diff --git a/tests/unit/moduleapi/hooks.tcl b/tests/unit/moduleapi/hooks.tcl index da0307ce6..c4af59bd2 100644 --- a/tests/unit/moduleapi/hooks.tcl +++ b/tests/unit/moduleapi/hooks.tcl @@ -147,6 +147,12 @@ tags "modules" { set replica_stdout [srv 0 stdout] } + test {Test swapdb hooks} { + r swapdb 0 10 + assert_equal [r hooks.event_last swapdb-first] 0 + assert_equal [r hooks.event_last swapdb-second] 10 + + } # look into the log file of the server that just exited test {Test shutdown hook} { From f216bf312ae70dd62b447448e9c003df3b940dcd Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 20 Sep 2020 13:43:28 +0300 Subject: [PATCH 015/114] RM_GetContextFlags provides indication that we're in a fork child (#7783) (cherry picked from commit 0b476b591d8b92e88ec56675e747de23968eeae0) --- src/aof.c | 4 ++-- src/childinfo.c | 6 +++--- src/module.c | 5 +++-- src/rdb.c | 8 ++++---- src/redismodule.h | 2 ++ src/scripting.c | 2 +- src/server.c | 8 ++++++-- src/server.h | 11 +++++++---- 8 files changed, 28 insertions(+), 18 deletions(-) diff --git a/src/aof.c b/src/aof.c index dc50e2228..757c68807 100644 --- a/src/aof.c +++ b/src/aof.c @@ -1603,7 +1603,7 @@ int rewriteAppendOnlyFileBackground(void) { if (hasActiveChildProcess()) return C_ERR; if (aofCreatePipes() != C_OK) return C_ERR; openChildInfoPipe(); - if ((childpid = redisFork()) == 0) { + if ((childpid = redisFork(CHILD_TYPE_AOF)) == 0) { char tmpfile[256]; /* Child */ @@ -1611,7 +1611,7 @@ int rewriteAppendOnlyFileBackground(void) { redisSetCpuAffinity(server.aof_rewrite_cpulist); snprintf(tmpfile,256,"temp-rewriteaof-bg-%d.aof", (int) getpid()); if (rewriteAppendOnlyFile(tmpfile) == C_OK) { - sendChildCOWInfo(CHILD_INFO_TYPE_AOF, "AOF rewrite"); + sendChildCOWInfo(CHILD_TYPE_AOF, "AOF rewrite"); exitFromChild(0); } else { exitFromChild(1); diff --git a/src/childinfo.c b/src/childinfo.c index fa0600552..f95ae9647 100644 --- a/src/childinfo.c +++ b/src/childinfo.c @@ -76,11 +76,11 @@ void receiveChildInfo(void) { if (read(server.child_info_pipe[0],&server.child_info_data,wlen) == wlen && server.child_info_data.magic == CHILD_INFO_MAGIC) { - if (server.child_info_data.process_type == CHILD_INFO_TYPE_RDB) { + if (server.child_info_data.process_type == CHILD_TYPE_RDB) { server.stat_rdb_cow_bytes = server.child_info_data.cow_size; - } else if (server.child_info_data.process_type == CHILD_INFO_TYPE_AOF) { + } else if (server.child_info_data.process_type == CHILD_TYPE_AOF) { server.stat_aof_cow_bytes = server.child_info_data.cow_size; - } else if (server.child_info_data.process_type == CHILD_INFO_TYPE_MODULE) { + } else if (server.child_info_data.process_type == CHILD_TYPE_MODULE) { server.stat_module_cow_bytes = server.child_info_data.cow_size; } } diff --git a/src/module.c b/src/module.c index 5dd845de0..4cb4cfe20 100644 --- a/src/module.c +++ b/src/module.c @@ -1996,6 +1996,7 @@ int RM_GetContextFlags(RedisModuleCtx *ctx) { /* Presence of children processes. */ if (hasActiveChildProcess()) flags |= REDISMODULE_CTX_FLAGS_ACTIVE_CHILD; + if (server.in_fork_child) flags |= REDISMODULE_CTX_FLAGS_IS_CHILD; return flags; } @@ -6904,7 +6905,7 @@ int RM_Fork(RedisModuleForkDoneHandler cb, void *user_data) { } openChildInfoPipe(); - if ((childpid = redisFork()) == 0) { + if ((childpid = redisFork(CHILD_TYPE_MODULE)) == 0) { /* Child */ redisSetProcTitle("redis-module-fork"); } else if (childpid == -1) { @@ -6924,7 +6925,7 @@ int RM_Fork(RedisModuleForkDoneHandler cb, void *user_data) { * retcode will be provided to the done handler executed on the parent process. */ int RM_ExitFromChild(int retcode) { - sendChildCOWInfo(CHILD_INFO_TYPE_MODULE, "Module fork"); + sendChildCOWInfo(CHILD_TYPE_MODULE, "Module fork"); exitFromChild(retcode); return REDISMODULE_OK; } diff --git a/src/rdb.c b/src/rdb.c index 7e0d33565..fe9397624 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -1385,7 +1385,7 @@ int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) { server.lastbgsave_try = time(NULL); openChildInfoPipe(); - if ((childpid = redisFork()) == 0) { + if ((childpid = redisFork(CHILD_TYPE_RDB)) == 0) { int retval; /* Child */ @@ -1393,7 +1393,7 @@ int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) { redisSetCpuAffinity(server.bgsave_cpulist); retval = rdbSave(filename,rsi); if (retval == C_OK) { - sendChildCOWInfo(CHILD_INFO_TYPE_RDB, "RDB"); + sendChildCOWInfo(CHILD_TYPE_RDB, "RDB"); } exitFromChild((retval == C_OK) ? 0 : 1); } else { @@ -2540,7 +2540,7 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) { /* Create the child process. */ openChildInfoPipe(); - if ((childpid = redisFork()) == 0) { + if ((childpid = redisFork(CHILD_TYPE_RDB)) == 0) { /* Child */ int retval; rio rdb; @@ -2555,7 +2555,7 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) { retval = C_ERR; if (retval == C_OK) { - sendChildCOWInfo(CHILD_INFO_TYPE_RDB, "RDB"); + sendChildCOWInfo(CHILD_TYPE_RDB, "RDB"); } rioFreeFd(&rdb); diff --git a/src/redismodule.h b/src/redismodule.h index 56011fae0..c0eedc221 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -112,6 +112,8 @@ #define REDISMODULE_CTX_FLAGS_ACTIVE_CHILD (1<<18) /* The next EXEC will fail due to dirty CAS (touched keys). */ #define REDISMODULE_CTX_FLAGS_MULTI_DIRTY (1<<19) +/* Redis is currently running inside background child process. */ +#define REDISMODULE_CTX_FLAGS_IS_CHILD (1<<20) /* Keyspace changes notification classes. Every class is associated with a * character for configuration purposes. diff --git a/src/scripting.c b/src/scripting.c index e43472b3a..6beb6cdbf 100644 --- a/src/scripting.c +++ b/src/scripting.c @@ -1856,7 +1856,7 @@ void ldbSendLogs(void) { int ldbStartSession(client *c) { ldb.forked = (c->flags & CLIENT_LUA_DEBUG_SYNC) == 0; if (ldb.forked) { - pid_t cp = redisFork(); + pid_t cp = redisFork(CHILD_TYPE_LDB); if (cp == -1) { addReplyError(c,"Fork() failed: can't run EVAL in debugging mode."); return 0; diff --git a/src/server.c b/src/server.c index a88ffc6ee..ed416fb4c 100644 --- a/src/server.c +++ b/src/server.c @@ -2814,6 +2814,7 @@ void initServer(void) { server.aof_state = server.aof_enabled ? AOF_ON : AOF_OFF; server.hz = server.config_hz; server.pid = getpid(); + server.in_fork_child = CHILD_TYPE_NONE; server.main_thread_id = pthread_self(); server.current_client = NULL; server.fixed_time_expire = 0; @@ -4890,7 +4891,8 @@ void setupSignalHandlers(void) { * accepting writes because of a write error condition. */ static void sigKillChildHandler(int sig) { UNUSED(sig); - serverLogFromHandler(LL_WARNING, "Received SIGUSR1 in child, exiting now."); + int level = server.in_fork_child == CHILD_TYPE_MODULE? LL_VERBOSE: LL_WARNING; + serverLogFromHandler(level, "Received SIGUSR1 in child, exiting now."); exitFromChild(SERVER_CHILD_NOERROR_RETVAL); } @@ -4916,11 +4918,13 @@ void closeClildUnusedResourceAfterFork() { close(server.cluster_config_file_lock_fd); /* don't care if this fails */ } -int redisFork() { +/* purpose is one of CHILD_TYPE_ types */ +int redisFork(int purpose) { int childpid; long long start = ustime(); if ((childpid = fork()) == 0) { /* Child */ + server.in_fork_child = purpose; setOOMScoreAdj(CONFIG_OOM_BGCHILD); setupChildSignalHandlers(); closeClildUnusedResourceAfterFork(); diff --git a/src/server.h b/src/server.h index ba470c303..66d373944 100644 --- a/src/server.h +++ b/src/server.h @@ -1043,9 +1043,11 @@ struct clusterState; #endif #define CHILD_INFO_MAGIC 0xC17DDA7A12345678LL -#define CHILD_INFO_TYPE_RDB 0 -#define CHILD_INFO_TYPE_AOF 1 -#define CHILD_INFO_TYPE_MODULE 3 +#define CHILD_TYPE_NONE 0 +#define CHILD_TYPE_RDB 1 +#define CHILD_TYPE_AOF 2 +#define CHILD_TYPE_LDB 3 +#define CHILD_TYPE_MODULE 4 struct redisServer { /* General */ @@ -1059,6 +1061,7 @@ struct redisServer { the actual 'hz' field value if dynamic-hz is enabled. */ int hz; /* serverCron() calls frequency in hertz */ + int in_fork_child; /* indication that this is a fork child */ redisDb *db; dict *commands; /* Command table */ dict *orig_commands; /* Command table before command renaming. */ @@ -1889,7 +1892,7 @@ void sendChildInfo(int process_type); void receiveChildInfo(void); /* Fork helpers */ -int redisFork(); +int redisFork(int type); int hasActiveChildProcess(); void sendChildCOWInfo(int ptype, char *pname); From 4a3330c9412d98310c8312f46d56fa55623bbffb Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Fri, 18 Sep 2020 16:08:52 +0800 Subject: [PATCH 016/114] Make main thread killable so that it can be canceled at any time. Refine comment of makeThreadKillable(). This commit can be backported to 5.0, only if we also backport cf8a6e3. Co-authored-by: Oran Agra (cherry picked from commit d2291627305d606a5d3b1e3b3bfa17ab10a3ef32) --- src/bio.c | 5 +---- src/server.c | 9 +++++++++ src/server.h | 1 + 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/bio.c b/src/bio.c index 33465a166..ff1108799 100644 --- a/src/bio.c +++ b/src/bio.c @@ -168,10 +168,7 @@ void *bioProcessBackgroundJobs(void *arg) { redisSetCpuAffinity(server.bio_cpulist); - /* Make the thread killable at any time, so that bioKillThreads() - * can work reliably. */ - pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); - pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); + makeThreadKillable(); pthread_mutex_lock(&bio_mutex[type]); /* Block SIGALRM so we are sure that only the main thread will diff --git a/src/server.c b/src/server.c index ed416fb4c..5face48bb 100644 --- a/src/server.c +++ b/src/server.c @@ -2798,12 +2798,21 @@ void resetServerStats(void) { server.aof_delayed_fsync = 0; } +/* Make the thread killable at any time, so that kill threads functions + * can work reliably (default cancelability type is PTHREAD_CANCEL_DEFERRED). + * Needed for pthread_cancel used by the fast memory test used by the crash report. */ +void makeThreadKillable(void) { + pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); + pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); +} + void initServer(void) { int j; signal(SIGHUP, SIG_IGN); signal(SIGPIPE, SIG_IGN); setupSignalHandlers(); + makeThreadKillable(); if (server.syslog_enabled) { openlog(server.syslog_ident, LOG_PID | LOG_NDELAY | LOG_NOWAIT, diff --git a/src/server.h b/src/server.h index 66d373944..3317092a0 100644 --- a/src/server.h +++ b/src/server.h @@ -2471,6 +2471,7 @@ void xorDigest(unsigned char *digest, void *ptr, size_t len); int populateCommandTableParseFlags(struct redisCommand *c, char *strflags); void killIOThreads(void); void killThreads(void); +void makeThreadKillable(void); /* TLS stuff */ void tlsInit(void); From 5021ed7c458fa8d995050e2480835f2cfac1de10 Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Fri, 18 Sep 2020 16:10:54 +0800 Subject: [PATCH 017/114] Make IO threads killable so that they can be canceled at any time. This commit can be cherry picked to 6.0 only if we also cherry pick e9b6077. (cherry picked from commit 6c6ab16e5a31c09a6ea09f1b4638c121e610566a) --- src/networking.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/networking.c b/src/networking.c index b7d6d6211..e738d1c92 100644 --- a/src/networking.c +++ b/src/networking.c @@ -2948,6 +2948,7 @@ void *IOThreadMain(void *myid) { snprintf(thdname, sizeof(thdname), "io_thd_%ld", id); redis_set_thread_title(thdname); redisSetCpuAffinity(server.server_cpulist); + makeThreadKillable(); while(1) { /* Wait for start */ From 8f4ad687afaf56de7324ec6ee7fff533c8da4062 Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Tue, 22 Sep 2020 02:05:47 -0400 Subject: [PATCH 018/114] refactor rewriteStreamObject code for adding missing streamIteratorStop call (#7829) This commit adds streamIteratorStop call in rewriteStreamObject function in some of the return statement. Although currently this will not cause memory leak since stream id is only 16 bytes long. (cherry picked from commit 7934f163b4b6c1c0c0fc55710d3c7e49f56281f1) --- src/aof.c | 54 ++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 18 deletions(-) diff --git a/src/aof.c b/src/aof.c index 757c68807..2114a17e4 100644 --- a/src/aof.c +++ b/src/aof.c @@ -1201,16 +1201,24 @@ int rewriteStreamObject(rio *r, robj *key, robj *o) { * the ID, the second is an array of field-value pairs. */ /* Emit the XADD ...fields... command. */ - if (rioWriteBulkCount(r,'*',3+numfields*2) == 0) return 0; - if (rioWriteBulkString(r,"XADD",4) == 0) return 0; - if (rioWriteBulkObject(r,key) == 0) return 0; - if (rioWriteBulkStreamID(r,&id) == 0) return 0; + if (!rioWriteBulkCount(r,'*',3+numfields*2) || + !rioWriteBulkString(r,"XADD",4) || + !rioWriteBulkObject(r,key) || + !rioWriteBulkStreamID(r,&id)) + { + streamIteratorStop(&si); + return 0; + } while(numfields--) { unsigned char *field, *value; int64_t field_len, value_len; streamIteratorGetField(&si,&field,&value,&field_len,&value_len); - if (rioWriteBulkString(r,(char*)field,field_len) == 0) return 0; - if (rioWriteBulkString(r,(char*)value,value_len) == 0) return 0; + if (!rioWriteBulkString(r,(char*)field,field_len) || + !rioWriteBulkString(r,(char*)value,value_len)) + { + streamIteratorStop(&si); + return 0; + } } } } else { @@ -1218,22 +1226,30 @@ int rewriteStreamObject(rio *r, robj *key, robj *o) { * the key we are serializing is an empty string, which is possible * for the Stream type. */ id.ms = 0; id.seq = 1; - if (rioWriteBulkCount(r,'*',7) == 0) return 0; - if (rioWriteBulkString(r,"XADD",4) == 0) return 0; - if (rioWriteBulkObject(r,key) == 0) return 0; - if (rioWriteBulkString(r,"MAXLEN",6) == 0) return 0; - if (rioWriteBulkString(r,"0",1) == 0) return 0; - if (rioWriteBulkStreamID(r,&id) == 0) return 0; - if (rioWriteBulkString(r,"x",1) == 0) return 0; - if (rioWriteBulkString(r,"y",1) == 0) return 0; + if (!rioWriteBulkCount(r,'*',7) || + !rioWriteBulkString(r,"XADD",4) || + !rioWriteBulkObject(r,key) || + !rioWriteBulkString(r,"MAXLEN",6) || + !rioWriteBulkString(r,"0",1) || + !rioWriteBulkStreamID(r,&id) || + !rioWriteBulkString(r,"x",1) || + !rioWriteBulkString(r,"y",1)) + { + streamIteratorStop(&si); + return 0; + } } /* Append XSETID after XADD, make sure lastid is correct, * in case of XDEL lastid. */ - if (rioWriteBulkCount(r,'*',3) == 0) return 0; - if (rioWriteBulkString(r,"XSETID",6) == 0) return 0; - if (rioWriteBulkObject(r,key) == 0) return 0; - if (rioWriteBulkStreamID(r,&s->last_id) == 0) return 0; + if (!rioWriteBulkCount(r,'*',3) || + !rioWriteBulkString(r,"XSETID",6) || + !rioWriteBulkObject(r,key) || + !rioWriteBulkStreamID(r,&s->last_id)) + { + streamIteratorStop(&si); + return 0; + } /* Create all the stream consumer groups. */ @@ -1252,6 +1268,7 @@ int rewriteStreamObject(rio *r, robj *key, robj *o) { !rioWriteBulkStreamID(r,&group->last_id)) { raxStop(&ri); + streamIteratorStop(&si); return 0; } @@ -1277,6 +1294,7 @@ int rewriteStreamObject(rio *r, robj *key, robj *o) { raxStop(&ri_pel); raxStop(&ri_cons); raxStop(&ri); + streamIteratorStop(&si); return 0; } } From 29f6e9fe9593a237ddf82e695db37dff90ecc6b8 Mon Sep 17 00:00:00 2001 From: Ariel Shtul Date: Tue, 22 Sep 2020 10:18:07 +0300 Subject: [PATCH 019/114] Fix redis-check-rdb support for modules aux data (#7826) redis-check-rdb was unable to parse rdb files containing module aux data. Co-authored-by: Oran Agra (cherry picked from commit b914d4fc4825cc20cebca43431af5029ee077d09) --- src/rdb.c | 2 ++ src/rdb.h | 1 + src/redis-check-rdb.c | 19 ++++++++++++++++++- 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/src/rdb.c b/src/rdb.c index fe9397624..5a6be6e38 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -1146,6 +1146,8 @@ ssize_t rdbSaveSingleModuleAux(rio *rdb, int when, moduleType *mt) { /* Save a module-specific aux value. */ RedisModuleIO io; int retval = rdbSaveType(rdb, RDB_OPCODE_MODULE_AUX); + if (retval == -1) return -1; + io.bytes += retval; /* Write the "module" identifier as prefix, so that we'll be able * to call the right module during loading. */ diff --git a/src/rdb.h b/src/rdb.h index 885cf49c6..f22fbecd1 100644 --- a/src/rdb.h +++ b/src/rdb.h @@ -149,6 +149,7 @@ robj *rdbLoadObject(int type, rio *rdb, sds key); void backgroundSaveDoneHandler(int exitcode, int bysignal); int rdbSaveKeyValuePair(rio *rdb, robj *key, robj *val, long long expiretime); ssize_t rdbSaveSingleModuleAux(rio *rdb, int when, moduleType *mt); +robj *rdbLoadCheckModuleValue(rio *rdb, char *modulename); robj *rdbLoadStringObject(rio *rdb); ssize_t rdbSaveStringObject(rio *rdb, robj *obj); ssize_t rdbSaveRawString(rio *rdb, unsigned char *s, size_t len); diff --git a/src/redis-check-rdb.c b/src/redis-check-rdb.c index 592feaf42..a9d110aa8 100644 --- a/src/redis-check-rdb.c +++ b/src/redis-check-rdb.c @@ -58,6 +58,7 @@ struct { #define RDB_CHECK_DOING_CHECK_SUM 5 #define RDB_CHECK_DOING_READ_LEN 6 #define RDB_CHECK_DOING_READ_AUX 7 +#define RDB_CHECK_DOING_READ_MODULE_AUX 8 char *rdb_check_doing_string[] = { "start", @@ -67,7 +68,8 @@ char *rdb_check_doing_string[] = { "read-object-value", "check-sum", "read-len", - "read-aux" + "read-aux", + "read-module-aux" }; char *rdb_type_string[] = { @@ -272,6 +274,21 @@ int redis_check_rdb(char *rdbfilename, FILE *fp) { decrRefCount(auxkey); decrRefCount(auxval); continue; /* Read type again. */ + } else if (type == RDB_OPCODE_MODULE_AUX) { + /* AUX: Auxiliary data for modules. */ + uint64_t moduleid, when_opcode, when; + rdbstate.doing = RDB_CHECK_DOING_READ_MODULE_AUX; + if ((moduleid = rdbLoadLen(&rdb,NULL)) == RDB_LENERR) goto eoferr; + if ((when_opcode = rdbLoadLen(&rdb,NULL)) == RDB_LENERR) goto eoferr; + if ((when = rdbLoadLen(&rdb,NULL)) == RDB_LENERR) goto eoferr; + + char name[10]; + moduleTypeNameByID(name,moduleid); + rdbCheckInfo("MODULE AUX for: %s", name); + + robj *o = rdbLoadCheckModuleValue(&rdb,name); + decrRefCount(o); + continue; /* Read type again. */ } else { if (!rdbIsObjectType(type)) { rdbCheckError("Invalid object type: %d", type); From 24f258e39cf6573720704b2b37c905ded77f759e Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 22 Sep 2020 11:38:52 +0300 Subject: [PATCH 020/114] Fix occasional hangs on replication reconnection. (#7830) This happens only on diskless replicas when attempting to reconnect after failing to load an RDB file. It is more likely to occur with larger datasets. After reconnection is initiated, replicationEmptyDbCallback() may get called and try to write to an unconnected socket. This triggered another issue where the connection is put into an error state and the connect handler never gets called. The problem is a regression introduced by commit cad93ed. (cherry picked from commit ecd86283ec292c1062f377f5707be57a8a77adb4) --- src/connection.c | 14 ++++++++++++-- src/replication.c | 3 ++- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/connection.c b/src/connection.c index 23b44a314..415cbdf78 100644 --- a/src/connection.c +++ b/src/connection.c @@ -168,7 +168,12 @@ static int connSocketWrite(connection *conn, const void *data, size_t data_len) int ret = write(conn->fd, data, data_len); if (ret < 0 && errno != EAGAIN) { conn->last_errno = errno; - conn->state = CONN_STATE_ERROR; + + /* Don't overwrite the state of a connection that is not already + * connected, not to mess with handler callbacks. + */ + if (conn->state == CONN_STATE_CONNECTED) + conn->state = CONN_STATE_ERROR; } return ret; @@ -180,7 +185,12 @@ static int connSocketRead(connection *conn, void *buf, size_t buf_len) { conn->state = CONN_STATE_CLOSED; } else if (ret < 0 && errno != EAGAIN) { conn->last_errno = errno; - conn->state = CONN_STATE_ERROR; + + /* Don't overwrite the state of a connection that is not already + * connected, not to mess with handler callbacks. + */ + if (conn->state == CONN_STATE_CONNECTED) + conn->state = CONN_STATE_ERROR; } return ret; diff --git a/src/replication.c b/src/replication.c index 6feb9ab6c..047449c4b 100644 --- a/src/replication.c +++ b/src/replication.c @@ -1374,7 +1374,8 @@ void replicationSendNewlineToMaster(void) { * the new dataset received by the master. */ void replicationEmptyDbCallback(void *privdata) { UNUSED(privdata); - replicationSendNewlineToMaster(); + if (server.repl_state == REPL_STATE_TRANSFER) + replicationSendNewlineToMaster(); } /* Once we have a link with the master and the synchronization was From 90e8da536e919a99a14790a057acb3853a668591 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 22 Sep 2020 12:11:19 +0300 Subject: [PATCH 021/114] RM_GetContextFlags - document missing flags (#7821) (cherry picked from commit 78c80b3f8c4d37884ee387ef44abdd83664ee448) --- src/module.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/module.c b/src/module.c index 4cb4cfe20..feaff0223 100644 --- a/src/module.c +++ b/src/module.c @@ -1922,6 +1922,12 @@ int RM_GetSelectedDb(RedisModuleCtx *ctx) { * * * REDISMODULE_CTX_FLAGS_ACTIVE_CHILD: There is currently some background * process active (RDB, AUX or module). + * + * * REDISMODULE_CTX_FLAGS_MULTI_DIRTY: The next EXEC will fail due to dirty + * CAS (touched keys). + * + * * REDISMODULE_CTX_FLAGS_IS_CHILD: Redis is currently running inside + * background child process. */ int RM_GetContextFlags(RedisModuleCtx *ctx) { From f478a1b6fb146671712086eec9187fa84bb5380e Mon Sep 17 00:00:00 2001 From: yixiang Date: Tue, 22 Sep 2020 17:53:36 +0800 Subject: [PATCH 022/114] Fix connGetSocketError usage (#7811) (cherry picked from commit 4e70e49d2bdaa477d9436a394f8626a1cc6e94af) --- src/connection.c | 5 +++-- src/tls.c | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/connection.c b/src/connection.c index 415cbdf78..83fb84d6d 100644 --- a/src/connection.c +++ b/src/connection.c @@ -261,8 +261,9 @@ static void connSocketEventHandler(struct aeEventLoop *el, int fd, void *clientD if (conn->state == CONN_STATE_CONNECTING && (mask & AE_WRITABLE) && conn->conn_handler) { - if (connGetSocketError(conn)) { - conn->last_errno = errno; + int conn_error = connGetSocketError(conn); + if (conn_error) { + conn->last_errno = conn_error; conn->state = CONN_STATE_ERROR; } else { conn->state = CONN_STATE_CONNECTED; diff --git a/src/tls.c b/src/tls.c index 52887cd23..f55d25c78 100644 --- a/src/tls.c +++ b/src/tls.c @@ -464,8 +464,9 @@ static void tlsHandleEvent(tls_connection *conn, int mask) { switch (conn->c.state) { case CONN_STATE_CONNECTING: - if (connGetSocketError((connection *) conn)) { - conn->c.last_errno = errno; + int conn_error = connGetSocketError((connection *) conn); + if (conn_error) { + conn->c.last_errno = conn_error; conn->c.state = CONN_STATE_ERROR; } else { if (!(conn->flags & TLS_CONN_FLAG_FD_SET)) { From d3fc73612360e0c8818c3f4d43f4862c5767f579 Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Wed, 23 Sep 2020 14:09:48 +0800 Subject: [PATCH 023/114] Fix redundancy use of semicolon in do-while macros in ziplist.c. (#7832) this is very dangerous bug, but it looks like it didn't cause any harm. (cherry picked from commit 00668f782f0d8e987fc2c049c34e100567c0a5c6) --- src/ziplist.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/ziplist.c b/src/ziplist.c index e27875f6e..8721ace39 100644 --- a/src/ziplist.c +++ b/src/ziplist.c @@ -390,7 +390,7 @@ unsigned int zipStoreEntryEncoding(unsigned char *p, unsigned char encoding, uns (lensize) = 1; \ (len) = zipIntSize(encoding); \ } \ -} while(0); +} while(0) /* Encode the length of the previous entry and write it to "p". This only * uses the larger encoding (required in __ziplistCascadeUpdate). */ @@ -426,7 +426,7 @@ unsigned int zipStorePrevEntryLength(unsigned char *p, unsigned int len) { } else { \ (prevlensize) = 5; \ } \ -} while(0); +} while(0) /* Return the length of the previous element, and the number of bytes that * are used in order to encode the previous element length. @@ -444,7 +444,7 @@ unsigned int zipStorePrevEntryLength(unsigned char *p, unsigned int len) { memcpy(&(prevlen), ((char*)(ptr)) + 1, 4); \ memrev32ifbe(&prevlen); \ } \ -} while(0); +} while(0) /* Given a pointer 'p' to the prevlen info that prefixes an entry, this * function returns the difference in number of bytes needed to encode From 06c8f03ba1dd8ed5b4cd84ef117cd8b3f7d87d22 Mon Sep 17 00:00:00 2001 From: David CARLIER Date: Wed, 23 Sep 2020 08:00:31 +0100 Subject: [PATCH 024/114] Further NetBSD update and build fixes. (#7831) mainly backtrace and register dump support. (cherry picked from commit 6bc28d99a3a24c31c44e134b12a502441266e8bc) --- src/Makefile | 12 ++++++++++- src/config.h | 4 ++-- src/debug.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+), 3 deletions(-) diff --git a/src/Makefile b/src/Makefile index 873797330..e59089811 100644 --- a/src/Makefile +++ b/src/Makefile @@ -121,12 +121,21 @@ ifeq ($(uname_S),OpenBSD) endif else +ifeq ($(uname_S),NetBSD) + # NetBSD + FINAL_LIBS+= -lpthread + ifeq ($(USE_BACKTRACE),yes) + FINAL_CFLAGS+= -DUSE_BACKTRACE -I/usr/pkg/include + FINAL_LDFLAGS+= -L/usr/pkg/lib + FINAL_LIBS+= -lexecinfo + endif +else ifeq ($(uname_S),FreeBSD) # FreeBSD FINAL_LIBS+= -lpthread -lexecinfo else ifeq ($(uname_S),DragonFly) - # FreeBSD + # DragonFly FINAL_LIBS+= -lpthread -lexecinfo else ifeq ($(uname_S),OpenBSD) @@ -148,6 +157,7 @@ endif endif endif endif +endif # Include paths to dependencies FINAL_CFLAGS+= -I../deps/hiredis -I../deps/linenoise -I../deps/lua/src diff --git a/src/config.h b/src/config.h index f9ec7e44a..d391508fa 100644 --- a/src/config.h +++ b/src/config.h @@ -64,7 +64,7 @@ /* Test for backtrace() */ #if defined(__APPLE__) || (defined(__linux__) && defined(__GLIBC__)) || \ - defined(__FreeBSD__) || (defined(__OpenBSD__) && defined(USE_BACKTRACE))\ + defined(__FreeBSD__) || ((defined(__OpenBSD__) || defined(__NetBSD__)) && defined(USE_BACKTRACE))\ || defined(__DragonFly__) #define HAVE_BACKTRACE 1 #endif @@ -236,7 +236,7 @@ void setproctitle(const char *fmt, ...); #define redis_set_thread_title(name) pthread_set_name_np(pthread_self(), name) #elif defined __NetBSD__ #include -#define redis_set_thread_title(name) pthread_setname_np(pthread_self(), name, NULL) +#define redis_set_thread_title(name) pthread_setname_np(pthread_self(), "%s", name) #else #if (defined __APPLE__ && defined(MAC_OS_X_VERSION_10_7)) int pthread_setname_np(const char *name); diff --git a/src/debug.c b/src/debug.c index e64ec1b78..1a41574e4 100644 --- a/src/debug.c +++ b/src/debug.c @@ -967,6 +967,12 @@ static void *getMcontextEip(ucontext_t *uc) { #elif defined(__x86_64__) return (void*) uc->sc_rip; #endif +#elif defined(__NetBSD__) + #if defined(__i386__) + return (void*) uc->uc_mcontext.__gregs[_REG_EIP]; + #elif defined(__x86_64__) + return (void*) uc->uc_mcontext.__gregs[_REG_RIP]; + #endif #elif defined(__DragonFly__) return (void*) uc->uc_mcontext.mc_rip; #else @@ -1324,6 +1330,59 @@ void logRegisters(ucontext_t *uc) { ); logStackContent((void**)uc->sc_esp); #endif +#elif defined(__NetBSD__) + #if defined(__x86_64__) + serverLog(LL_WARNING, + "\n" + "RAX:%016lx RBX:%016lx\nRCX:%016lx RDX:%016lx\n" + "RDI:%016lx RSI:%016lx\nRBP:%016lx RSP:%016lx\n" + "R8 :%016lx R9 :%016lx\nR10:%016lx R11:%016lx\n" + "R12:%016lx R13:%016lx\nR14:%016lx R15:%016lx\n" + "RIP:%016lx EFL:%016lx\nCSGSFS:%016lx", + (unsigned long) uc->uc_mcontext.__gregs[_REG_RAX], + (unsigned long) uc->uc_mcontext.__gregs[_REG_RBX], + (unsigned long) uc->uc_mcontext.__gregs[_REG_RCX], + (unsigned long) uc->uc_mcontext.__gregs[_REG_RDX], + (unsigned long) uc->uc_mcontext.__gregs[_REG_RDI], + (unsigned long) uc->uc_mcontext.__gregs[_REG_RSI], + (unsigned long) uc->uc_mcontext.__gregs[_REG_RBP], + (unsigned long) uc->uc_mcontext.__gregs[_REG_RSP], + (unsigned long) uc->uc_mcontext.__gregs[_REG_R8], + (unsigned long) uc->uc_mcontext.__gregs[_REG_R9], + (unsigned long) uc->uc_mcontext.__gregs[_REG_R10], + (unsigned long) uc->uc_mcontext.__gregs[_REG_R11], + (unsigned long) uc->uc_mcontext.__gregs[_REG_R12], + (unsigned long) uc->uc_mcontext.__gregs[_REG_R13], + (unsigned long) uc->uc_mcontext.__gregs[_REG_R14], + (unsigned long) uc->uc_mcontext.__gregs[_REG_R15], + (unsigned long) uc->uc_mcontext.__gregs[_REG_RIP], + (unsigned long) uc->uc_mcontext.__gregs[_REG_RFLAGS], + (unsigned long) uc->uc_mcontext.__gregs[_REG_CS] + ); + logStackContent((void**)uc->uc_mcontext.__gregs[_REG_RSP]); + #elif defined(__i386__) + serverLog(LL_WARNING, + "\n" + "EAX:%08lx EBX:%08lx ECX:%08lx EDX:%08lx\n" + "EDI:%08lx ESI:%08lx EBP:%08lx ESP:%08lx\n" + "SS :%08lx EFL:%08lx EIP:%08lx CS:%08lx\n" + "DS :%08lx ES :%08lx FS :%08lx GS:%08lx", + (unsigned long) uc->uc_mcontext.__gregs[_REG_EAX], + (unsigned long) uc->uc_mcontext.__gregs[_REG_EBX], + (unsigned long) uc->uc_mcontext.__gregs[_REG_EDX], + (unsigned long) uc->uc_mcontext.__gregs[_REG_EDI], + (unsigned long) uc->uc_mcontext.__gregs[_REG_ESI], + (unsigned long) uc->uc_mcontext.__gregs[_REG_EBP], + (unsigned long) uc->uc_mcontext.__gregs[_REG_ESP], + (unsigned long) uc->uc_mcontext.__gregs[_REG_SS], + (unsigned long) uc->uc_mcontext.__gregs[_REG_EFLAGS], + (unsigned long) uc->uc_mcontext.__gregs[_REG_EIP], + (unsigned long) uc->uc_mcontext.__gregs[_REG_CS], + (unsigned long) uc->uc_mcontext.__gregs[_REG_ES], + (unsigned long) uc->uc_mcontext.__gregs[_REG_FS], + (unsigned long) uc->uc_mcontext.__gregs[_REG_GS] + ); + #endif #elif defined(__DragonFly__) serverLog(LL_WARNING, "\n" From 7ab8961c6d588f1d34be7b9e2fa8413cd6117392 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Wed, 23 Sep 2020 11:30:24 +0300 Subject: [PATCH 025/114] fix recently broken TLS build error, and add coverage for CI (#7833) (cherry picked from commit 270fcb80bf8c5d8458d60d3a494f422d12e1dfaf) --- .github/workflows/ci.yml | 3 ++- src/tls.c | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4d6c1c14c..70aebfc87 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -9,7 +9,8 @@ jobs: steps: - uses: actions/checkout@v2 - name: make - run: make + # build with TLS just for compilatoin coverage + run: make BUILD_TLS=yes - name: test run: | sudo apt-get install tcl8.5 diff --git a/src/tls.c b/src/tls.c index f55d25c78..d3173fab0 100644 --- a/src/tls.c +++ b/src/tls.c @@ -454,7 +454,7 @@ void updateSSLEvent(tls_connection *conn) { } static void tlsHandleEvent(tls_connection *conn, int mask) { - int ret; + int ret, conn_error; TLSCONN_DEBUG("tlsEventHandler(): fd=%d, state=%d, mask=%d, r=%d, w=%d, flags=%d", fd, conn->c.state, mask, conn->c.read_handler != NULL, conn->c.write_handler != NULL, @@ -464,7 +464,7 @@ static void tlsHandleEvent(tls_connection *conn, int mask) { switch (conn->c.state) { case CONN_STATE_CONNECTING: - int conn_error = connGetSocketError((connection *) conn); + conn_error = connGetSocketError((connection *) conn); if (conn_error) { conn->c.last_errno = conn_error; conn->c.state = CONN_STATE_ERROR; From 66a13267c7b734526de3959511eb14f59132bd46 Mon Sep 17 00:00:00 2001 From: Guy Korland Date: Thu, 24 Sep 2020 12:45:30 +0300 Subject: [PATCH 026/114] Fix RedisModule_HashGet examples (#6697) (cherry picked from commit 04945e0e6d5aadd9fb5a7b47d947d759073af51a) --- src/module.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/module.c b/src/module.c index feaff0223..9c7d74c50 100644 --- a/src/module.c +++ b/src/module.c @@ -2952,22 +2952,22 @@ int RM_HashSet(RedisModuleKey *key, int flags, ...) { * As with RedisModule_HashSet() the behavior of the command can be specified * passing flags different than REDISMODULE_HASH_NONE: * - * REDISMODULE_HASH_CFIELD: field names as null terminated C strings. + * REDISMODULE_HASH_CFIELDS: field names as null terminated C strings. * * REDISMODULE_HASH_EXISTS: instead of setting the value of the field * expecting a RedisModuleString pointer to pointer, the function just * reports if the field exists or not and expects an integer pointer * as the second element of each pair. * - * Example of REDISMODULE_HASH_CFIELD: + * Example of REDISMODULE_HASH_CFIELDS: * * RedisModuleString *username, *hashedpass; - * RedisModule_HashGet(mykey,"username",&username,"hp",&hashedpass, NULL); + * RedisModule_HashGet(mykey,REDISMODULE_HASH_CFIELDS,"username",&username,"hp",&hashedpass, NULL); * * Example of REDISMODULE_HASH_EXISTS: * * int exists; - * RedisModule_HashGet(mykey,argv[1],&exists,NULL); + * RedisModule_HashGet(mykey,REDISMODULE_HASH_EXISTS,argv[1],&exists,NULL); * * The function returns REDISMODULE_OK on success and REDISMODULE_ERR if * the key is not an hash value. From 1c4a99c9ec61c68f31ccf121c44fd7108be61f68 Mon Sep 17 00:00:00 2001 From: Wang Yuan Date: Thu, 24 Sep 2020 21:01:41 +0800 Subject: [PATCH 027/114] Don't write replies if close the client ASAP (#7202) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before this commit, we would have continued to add replies to the reply buffer even if client output buffer limit is reached, so the used memory would keep increasing over the configured limit. What's more, we shouldn’t write any reply to the client if it is set 'CLIENT_CLOSE_ASAP' flag because that doesn't conform to its definition and we will close all clients flagged with 'CLIENT_CLOSE_ASAP' in ‘beforeSleep’. Because of code execution order, before this, we may firstly write to part of the replies to the socket before disconnecting it, but in fact, we may can’t send the full replies to clients since OS socket buffer is limited. But this unexpected behavior makes some commands work well, for instance ACL DELUSER, if the client deletes the current user, we need to send reply to client and close the connection, but before, we close the client firstly and write the reply to reply buffer. secondly, we shouldn't do this despite the fact it works well in most cases. We add a flag 'CLIENT_CLOSE_AFTER_COMMAND' to mark clients, this flag means we will close the client after executing commands and send all entire replies, so that we can write replies to reply buffer during executing commands, send replies to clients, and close them later. We also fix some implicit problems. If client output buffer limit is enforced in 'multi/exec', all commands will be executed completely in redis and clients will not read any reply instead of partial replies. Even more, if the client executes 'ACL deluser' the using user in 'multi/exec', it will not read the replies after 'ACL deluser' just like before executing 'client kill' itself in 'multi/exec'. We added some tests for output buffer limit breach during multi-exec and using a pipeline of many small commands rather than one with big response. Co-authored-by: Oran Agra (cherry picked from commit 3085577c095a0f3b1261f6dbf016d7701aadab46) --- src/acl.c | 8 +++- src/module.c | 8 +++- src/networking.c | 14 ++++++ src/server.c | 7 +++ src/server.h | 2 + tests/unit/acl.tcl | 17 +++++++ tests/unit/obuf-limits.tcl | 90 ++++++++++++++++++++++++++++++++++++++ 7 files changed, 144 insertions(+), 2 deletions(-) diff --git a/src/acl.c b/src/acl.c index 74768aa27..5d484a742 100644 --- a/src/acl.c +++ b/src/acl.c @@ -297,7 +297,13 @@ void ACLFreeUserAndKillClients(user *u) { * it in non authenticated mode. */ c->user = DefaultUser; c->authenticated = 0; - freeClientAsync(c); + /* We will write replies to this client later, so we can't + * close it directly even if async. */ + if (c == server.current_client) { + c->flags |= CLIENT_CLOSE_AFTER_COMMAND; + } else { + freeClientAsync(c); + } } } ACLFreeUser(u); diff --git a/src/module.c b/src/module.c index 9c7d74c50..0655272ff 100644 --- a/src/module.c +++ b/src/module.c @@ -5537,7 +5537,13 @@ void revokeClientAuthentication(client *c) { c->user = DefaultUser; c->authenticated = 0; - freeClientAsync(c); + /* We will write replies to this client later, so we can't close it + * directly even if async. */ + if (c == server.current_client) { + c->flags |= CLIENT_CLOSE_AFTER_COMMAND; + } else { + freeClientAsync(c); + } } /* Cleanup all clients that have been authenticated with this module. This diff --git a/src/networking.c b/src/networking.c index e738d1c92..9b744eb0c 100644 --- a/src/networking.c +++ b/src/networking.c @@ -223,6 +223,9 @@ int prepareClientToWrite(client *c) { * handler since there is no socket at all. */ if (c->flags & (CLIENT_LUA|CLIENT_MODULE)) return C_OK; + /* If CLIENT_CLOSE_ASAP flag is set, we need not write anything. */ + if (c->flags & CLIENT_CLOSE_ASAP) return C_ERR; + /* CLIENT REPLY OFF / SKIP handling: don't send replies. */ if (c->flags & (CLIENT_REPLY_OFF|CLIENT_REPLY_SKIP)) return C_ERR; @@ -1436,6 +1439,9 @@ int handleClientsWithPendingWrites(void) { * that may trigger write error or recreate handler. */ if (c->flags & CLIENT_PROTECTED) continue; + /* Don't write to clients that are going to be closed anyway. */ + if (c->flags & CLIENT_CLOSE_ASAP) continue; + /* Try to write buffers to the client socket. */ if (writeToClient(c,0) == C_ERR) continue; @@ -3108,6 +3114,14 @@ int handleClientsWithPendingWritesUsingThreads(void) { while((ln = listNext(&li))) { client *c = listNodeValue(ln); c->flags &= ~CLIENT_PENDING_WRITE; + + /* Remove clients from the list of pending writes since + * they are going to be closed ASAP. */ + if (c->flags & CLIENT_CLOSE_ASAP) { + listDelNode(server.clients_pending_write, ln); + continue; + } + int target_id = item_id % server.io_threads_num; listAddNodeTail(io_threads_list[target_id],c); item_id++; diff --git a/src/server.c b/src/server.c index 5face48bb..1f20efe94 100644 --- a/src/server.c +++ b/src/server.c @@ -3345,6 +3345,13 @@ void call(client *c, int flags) { dirty = server.dirty-dirty; if (dirty < 0) dirty = 0; + /* After executing command, we will close the client after writing entire + * reply if it is set 'CLIENT_CLOSE_AFTER_COMMAND' flag. */ + if (c->flags & CLIENT_CLOSE_AFTER_COMMAND) { + c->flags &= ~CLIENT_CLOSE_AFTER_COMMAND; + c->flags |= CLIENT_CLOSE_AFTER_REPLY; + } + /* When EVAL is called loading the AOF we don't want commands called * from Lua to go into the slowlog or to populate statistics. */ if (server.loading && c->flags & CLIENT_LUA) diff --git a/src/server.h b/src/server.h index 3317092a0..48dcee631 100644 --- a/src/server.h +++ b/src/server.h @@ -264,6 +264,8 @@ extern int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT]; about writes performed by myself.*/ #define CLIENT_IN_TO_TABLE (1ULL<<38) /* This client is in the timeout table. */ #define CLIENT_PROTOCOL_ERROR (1ULL<<39) /* Protocol error chatting with it. */ +#define CLIENT_CLOSE_AFTER_COMMAND (1ULL<<40) /* Close after executing commands + * and writing entire reply. */ /* Client block type (btype field in client structure) * if CLIENT_BLOCKED flag is set. */ diff --git a/tests/unit/acl.tcl b/tests/unit/acl.tcl index 381f2f95f..f015f75a0 100644 --- a/tests/unit/acl.tcl +++ b/tests/unit/acl.tcl @@ -260,6 +260,23 @@ start_server {tags {"acl"}} { catch {r ACL help xxx} e assert_match "*Unknown subcommand or wrong number of arguments*" $e } + + test {Delete a user that the client doesn't use} { + r ACL setuser not_used on >passwd + assert {[r ACL deluser not_used] == 1} + # The client is not closed + assert {[r ping] eq {PONG}} + } + + test {Delete a user that the client is using} { + r ACL setuser using on +acl >passwd + r AUTH using passwd + # The client will receive reply normally + assert {[r ACL deluser using] == 1} + # The client is closed + catch {[r ping]} e + assert_match "*I/O error*" $e + } } set server_path [tmpdir "server.acl"] diff --git a/tests/unit/obuf-limits.tcl b/tests/unit/obuf-limits.tcl index c45bf8e86..20ba32fd5 100644 --- a/tests/unit/obuf-limits.tcl +++ b/tests/unit/obuf-limits.tcl @@ -70,4 +70,94 @@ start_server {tags {"obuf-limits"}} { assert {$omem >= 100000 && $time_elapsed < 6} $rd1 close } + + test {No response for single command if client output buffer hard limit is enforced} { + r config set client-output-buffer-limit {normal 100000 0 0} + # Total size of all items must be more than 100k + set item [string repeat "x" 1000] + for {set i 0} {$i < 150} {incr i} { + r lpush mylist $item + } + set orig_mem [s used_memory] + # Set client name and get all items + set rd [redis_deferring_client] + $rd client setname mybiglist + assert {[$rd read] eq "OK"} + $rd lrange mylist 0 -1 + $rd flush + after 100 + + # Before we read reply, redis will close this client. + set clients [r client list] + assert_no_match "*name=mybiglist*" $clients + set cur_mem [s used_memory] + # 10k just is a deviation threshold + assert {$cur_mem < 10000 + $orig_mem} + + # Read nothing + set fd [$rd channel] + assert_equal {} [read $fd] + } + + test {No response for multi commands in pipeline if client output buffer limit is enforced} { + r config set client-output-buffer-limit {normal 100000 0 0} + set value [string repeat "x" 10000] + r set bigkey $value + set rd1 [redis_deferring_client] + set rd2 [redis_deferring_client] + $rd2 client setname multicommands + assert_equal "OK" [$rd2 read] + # Let redis sleep 2s firstly + $rd1 debug sleep 2 + $rd1 flush + after 100 + + # Total size should be less than OS socket buffer, redis can + # execute all commands in this pipeline when it wakes up. + for {set i 0} {$i < 15} {incr i} { + $rd2 set $i $i + $rd2 get $i + $rd2 del $i + # One bigkey is 10k, total response size must be more than 100k + $rd2 get bigkey + } + $rd2 flush + after 100 + + # Reds must wake up if it can send reply + assert_equal "PONG" [r ping] + set clients [r client list] + assert_no_match "*name=multicommands*" $clients + set fd [$rd2 channel] + assert_equal {} [read $fd] + } + + test {Execute transactions completely even if client output buffer limit is enforced} { + r config set client-output-buffer-limit {normal 100000 0 0} + # Total size of all items must be more than 100k + set item [string repeat "x" 1000] + for {set i 0} {$i < 150} {incr i} { + r lpush mylist2 $item + } + + # Output buffer limit is enforced during executing transaction + r client setname transactionclient + r set k1 v1 + r multi + r set k2 v2 + r get k2 + r lrange mylist2 0 -1 + r set k3 v3 + r del k1 + catch {[r exec]} e + assert_match "*I/O error*" $e + reconnect + set clients [r client list] + assert_no_match "*name=transactionclient*" $clients + + # Transactions should be executed completely + assert_equal {} [r get k1] + assert_equal "v2" [r get k2] + assert_equal "v3" [r get k3] + } } From ac867bfb6a348dd792ef681fe7d5121cfe28cdcb Mon Sep 17 00:00:00 2001 From: Wen Hui Date: Thu, 24 Sep 2020 11:17:53 -0400 Subject: [PATCH 028/114] rdb.c: handle fclose error case differently to avoid double fclose (#7307) When fclose would fail, the previous implementation would have attempted to do fclose again this can in theory lead to segfault. other changes: check for non-zero return value as failure rather than a specific error code. this doesn't fix a real bug, just a minor cleanup. (cherry picked from commit c67656fa3541376590fe9a9b146ad5641cb861aa) --- src/rdb.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/rdb.c b/src/rdb.c index 5a6be6e38..fa3934027 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -1314,7 +1314,7 @@ werr: /* Write error. */ int rdbSave(char *filename, rdbSaveInfo *rsi) { char tmpfile[256]; char cwd[MAXPATHLEN]; /* Current working dir path for error messages. */ - FILE *fp; + FILE *fp = NULL; rio rdb; int error = 0; @@ -1343,10 +1343,11 @@ int rdbSave(char *filename, rdbSaveInfo *rsi) { } /* Make sure data will not remain on the OS's output buffers */ - if (fflush(fp) == EOF) goto werr; - if (fsync(fileno(fp)) == -1) goto werr; - if (fclose(fp) == EOF) goto werr; - + if (fflush(fp)) goto werr; + if (fsync(fileno(fp))) goto werr; + if (fclose(fp)) { fp = NULL; goto werr; } + fp = NULL; + /* Use RENAME to make sure the DB file is changed atomically only * if the generate DB file is ok. */ if (rename(tmpfile,filename) == -1) { @@ -1372,7 +1373,7 @@ int rdbSave(char *filename, rdbSaveInfo *rsi) { werr: serverLog(LL_WARNING,"Write error saving DB on disk: %s", strerror(errno)); - fclose(fp); + if (fp) fclose(fp); unlink(tmpfile); stopSaving(0); return C_ERR; From 39e1f2e2bd761d1226353369b92d542205cd845b Mon Sep 17 00:00:00 2001 From: WuYunlong Date: Fri, 25 Sep 2020 13:08:06 +0800 Subject: [PATCH 029/114] Add fsync to readSyncBulkPayload(). (#7839) We should sync temp DB file before renaming as rdb_fsync_range does not use flag `SYNC_FILE_RANGE_WAIT_AFTER`. Refer to `Linux Programmer's Manual`: SYNC_FILE_RANGE_WAIT_AFTER Wait upon write-out of all pages in the range after performing any write. (cherry picked from commit d119448881655a1529eb6d7d7e78af5f15132536) --- src/replication.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/replication.c b/src/replication.c index 047449c4b..acc0befbb 100644 --- a/src/replication.c +++ b/src/replication.c @@ -1752,6 +1752,17 @@ void readSyncBulkPayload(connection *conn) { killRDBChild(); } + /* Make sure the new file (also used for persistence) is fully synced + * (not covered by earlier calls to rdb_fsync_range). */ + if (fsync(server.repl_transfer_fd) == -1) { + serverLog(LL_WARNING, + "Failed trying to sync the temp DB to disk in " + "MASTER <-> REPLICA synchronization: %s", + strerror(errno)); + cancelReplicationHandshake(); + return; + } + /* Rename rdb like renaming rewrite aof asynchronously. */ int old_rdb_fd = open(server.rdb_filename,O_RDONLY|O_NONBLOCK); if (rename(server.repl_transfer_tmpfile,server.rdb_filename) == -1) { From 10be3d96d872770ab3d8df644290d765a59c47b9 Mon Sep 17 00:00:00 2001 From: Uri Shachar Date: Fri, 25 Sep 2020 12:55:45 +0300 Subject: [PATCH 030/114] Fix config rewrite file handling to make it really atomic (#7824) Make sure we handle short writes correctly, sync to disk after writing and use rename to make sure the replacement is actually atomic. In any case of failure old configuration will remain in place. Also, add some additional logging to make it easier to diagnose rewrite problems. (cherry picked from commit 8dbe91f0316f08d785bad1e8e28f1c13ddfbef2c) --- src/config.c | 82 +++++++++++++++++++++++++++------------------------- 1 file changed, 42 insertions(+), 40 deletions(-) diff --git a/src/config.c b/src/config.c index 63852ff4f..2902758fa 100644 --- a/src/config.c +++ b/src/config.c @@ -1543,60 +1543,62 @@ void rewriteConfigRemoveOrphaned(struct rewriteConfigState *state) { dictReleaseIterator(di); } -/* This function overwrites the old configuration file with the new content. - * - * 1) The old file length is obtained. - * 2) If the new content is smaller, padding is added. - * 3) A single write(2) call is used to replace the content of the file. - * 4) Later the file is truncated to the length of the new content. - * - * This way we are sure the file is left in a consistent state even if the - * process is stopped between any of the four operations. +/* This function replaces the old configuration file with the new content + * in an atomic manner. * * The function returns 0 on success, otherwise -1 is returned and errno - * set accordingly. */ + * is set accordingly. */ int rewriteConfigOverwriteFile(char *configfile, sds content) { - int retval = 0; - int fd = open(configfile,O_RDWR|O_CREAT,0644); - int content_size = sdslen(content), padding = 0; - struct stat sb; - sds content_padded; + int fd = -1; + int retval = -1; + char tmp_conffile[PATH_MAX]; + const char *tmp_suffix = ".XXXXXX"; + size_t offset = 0; + ssize_t written_bytes = 0; - /* 1) Open the old file (or create a new one if it does not - * exist), get the size. */ - if (fd == -1) return -1; /* errno set by open(). */ - if (fstat(fd,&sb) == -1) { - close(fd); - return -1; /* errno set by fstat(). */ + int tmp_path_len = snprintf(tmp_conffile, sizeof(tmp_conffile), "%s%s", configfile, tmp_suffix); + if (tmp_path_len <= 0 || (unsigned int)tmp_path_len >= sizeof(tmp_conffile)) { + serverLog(LL_WARNING, "Config file full path is too long"); + errno = ENAMETOOLONG; + return retval; } - /* 2) Pad the content at least match the old file size. */ - content_padded = sdsdup(content); - if (content_size < sb.st_size) { - /* If the old file was bigger, pad the content with - * a newline plus as many "#" chars as required. */ - padding = sb.st_size - content_size; - content_padded = sdsgrowzero(content_padded,sb.st_size); - content_padded[content_size] = '\n'; - memset(content_padded+content_size+1,'#',padding-1); +#ifdef _GNU_SOURCE + fd = mkostemp(tmp_conffile, O_CLOEXEC); +#else + /* There's a theoretical chance here to leak the FD if a module thread forks & execv in the middle */ + fd = mkstemp(tmp_conffile); +#endif + + if (fd == -1) { + serverLog(LL_WARNING, "Could not create tmp config file (%s)", strerror(errno)); + return retval; } - /* 3) Write the new content using a single write(2). */ - if (write(fd,content_padded,strlen(content_padded)) == -1) { - retval = -1; - goto cleanup; + while (offset < sdslen(content)) { + written_bytes = write(fd, content + offset, sdslen(content) - offset); + if (written_bytes <= 0) { + if (errno == EINTR) continue; /* FD is blocking, no other retryable errors */ + serverLog(LL_WARNING, "Failed after writing (%ld) bytes to tmp config file (%s)", offset, strerror(errno)); + goto cleanup; + } + offset+=written_bytes; } - /* 4) Truncate the file to the right length if we used padding. */ - if (padding) { - if (ftruncate(fd,content_size) == -1) { - /* Non critical error... */ - } + if (fsync(fd)) + serverLog(LL_WARNING, "Could not sync tmp config file to disk (%s)", strerror(errno)); + else if (fchmod(fd, 0644) == -1) + serverLog(LL_WARNING, "Could not chmod config file (%s)", strerror(errno)); + else if (rename(tmp_conffile, configfile) == -1) + serverLog(LL_WARNING, "Could not rename tmp config file (%s)", strerror(errno)); + else { + retval = 0; + serverLog(LL_DEBUG, "Rewritten config file (%s) successfully", configfile); } cleanup: - sdsfree(content_padded); close(fd); + if (retval) unlink(tmp_conffile); return retval; } From 9042852ea1e2e09a59180e7726955f0a1eac19db Mon Sep 17 00:00:00 2001 From: Wang Yuan Date: Fri, 25 Sep 2020 21:25:47 +0800 Subject: [PATCH 031/114] Set 'loading' and 'shutdown_asap' to volatile sig_atomic_t type (#7845) We may access and modify these two variables in signal handler function, to guarantee them async-signal-safe, so we should set them to volatile sig_atomic_t type. It doesn't look like this could have caused any real issue, and it seems that signals are handled in main thread on most platforms. But we want to follow C and POSIX standard in signal handler function. (cherry picked from commit 917043fa438d9bbe9a80fb838fcfd33a7e390952) --- src/server.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/server.h b/src/server.h index 48dcee631..b12e4587d 100644 --- a/src/server.h +++ b/src/server.h @@ -1069,7 +1069,7 @@ struct redisServer { dict *orig_commands; /* Command table before command renaming. */ aeEventLoop *el; _Atomic unsigned int lruclock; /* Clock for LRU eviction */ - int shutdown_asap; /* SHUTDOWN needed ASAP */ + volatile sig_atomic_t shutdown_asap; /* SHUTDOWN needed ASAP */ int activerehashing; /* Incremental rehash in serverCron() */ int active_defrag_running; /* Active defragmentation running (holds current scan aggressiveness) */ char *pidfile; /* PID file path */ @@ -1126,7 +1126,7 @@ struct redisServer { long long events_processed_while_blocked; /* processEventsWhileBlocked() */ /* RDB / AOF loading information */ - int loading; /* We are loading data from disk if true */ + volatile sig_atomic_t loading; /* We are loading data from disk if true */ off_t loading_total_bytes; off_t loading_loaded_bytes; time_t loading_start_time; From a6f8745127915dfb8ba3ee9cfff6cc34bce07e82 Mon Sep 17 00:00:00 2001 From: Wang Yuan Date: Sun, 27 Sep 2020 17:35:16 +0800 Subject: [PATCH 032/114] Don't support Gopher if enable io threads to read queries (#7851) There's currently an issue with IO threads and gopher (issuing lookupKey from within the thread). simply fix is to just not support it for now. (cherry picked from commit 9bdef76f8e3bbfaacf0962ab1ceded1bafa80bda) --- redis.conf | 8 +++++--- src/networking.c | 5 +++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/redis.conf b/redis.conf index 38499b276..ab0a30cbd 100644 --- a/redis.conf +++ b/redis.conf @@ -1519,8 +1519,11 @@ notify-keyspace-events "" # # So use the 'requirepass' option to protect your instance. # -# To enable Gopher support uncomment the following line and set -# the option from no (the default) to yes. +# Note that Gopher is not currently supported when 'io-threads-do-reads' +# is enabled. +# +# To enable Gopher support, uncomment the following line and set the option +# from no (the default) to yes. # # gopher-enabled no @@ -1860,4 +1863,3 @@ jemalloc-bg-thread yes # # Set bgsave child process to cpu affinity 1,10,11 # bgsave_cpulist 1,10-11 - diff --git a/src/networking.c b/src/networking.c index 9b744eb0c..445150ab3 100644 --- a/src/networking.c +++ b/src/networking.c @@ -1884,8 +1884,9 @@ void processInputBuffer(client *c) { if (c->reqtype == PROTO_REQ_INLINE) { if (processInlineBuffer(c) != C_OK) break; /* If the Gopher mode and we got zero or one argument, process - * the request in Gopher mode. */ - if (server.gopher_enabled && + * the request in Gopher mode. To avoid data race, Redis won't + * support Gopher if enable io threads to read queries. */ + if (server.gopher_enabled && !server.io_threads_do_reads && ((c->argc == 1 && ((char*)(c->argv[0]->ptr))[0] == '/') || c->argc == 0)) { From 5ed795f025ef6f4955df4012aafeed9df94811e4 Mon Sep 17 00:00:00 2001 From: caozb <1162650653@qq.com> Date: Sun, 27 Sep 2020 20:40:07 +0800 Subject: [PATCH 033/114] ignore slaveof no one in redis.conf (#7842) when slaveof config is "no one", reset any pre-existing config and resume. also solve a memory leak if slaveof appears twice. and fail loading if port number is out of range or not an integer. Co-authored-by: caozhengbin Co-authored-by: Oran Agra (cherry picked from commit 01694608cb4e39a6ec7970d24b21ab33b7347e31) --- src/config.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/config.c b/src/config.c index 2902758fa..d8915343e 100644 --- a/src/config.c +++ b/src/config.c @@ -464,8 +464,17 @@ void loadServerConfigFromString(char *config) { } else if ((!strcasecmp(argv[0],"slaveof") || !strcasecmp(argv[0],"replicaof")) && argc == 3) { slaveof_linenum = linenum; + sdsfree(server.masterhost); + if (!strcasecmp(argv[1], "no") && !strcasecmp(argv[2], "one")) { + server.masterhost = NULL; + continue; + } server.masterhost = sdsnew(argv[1]); - server.masterport = atoi(argv[2]); + char *ptr; + server.masterport = strtol(argv[2], &ptr, 10); + if (server.masterport < 0 || server.masterport > 65535 || *ptr != '\0') { + err = "Invalid master port"; goto loaderr; + } server.repl_state = REPL_STATE_CONNECT; } else if (!strcasecmp(argv[0],"requirepass") && argc == 2) { if (strlen(argv[1]) > CONFIG_AUTHPASS_MAX_LEN) { From 0fc601b3dd9d571a72551bf41826de6f73a62ec7 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Sun, 27 Sep 2020 17:13:33 +0300 Subject: [PATCH 034/114] Fix new obuf-limits tests to work with TLS (#7848) Also stabilize new shutdown tests on slow machines (valgrind) (cherry picked from commit d89ae2d7ab3f6d181689b2546f2784b574d9b80e) --- tests/unit/obuf-limits.tcl | 20 ++++++++++++++------ tests/unit/shutdown.tcl | 22 +++++++++++++++------- 2 files changed, 29 insertions(+), 13 deletions(-) diff --git a/tests/unit/obuf-limits.tcl b/tests/unit/obuf-limits.tcl index 20ba32fd5..456d3ac82 100644 --- a/tests/unit/obuf-limits.tcl +++ b/tests/unit/obuf-limits.tcl @@ -99,6 +99,8 @@ start_server {tags {"obuf-limits"}} { assert_equal {} [read $fd] } + # Note: This test assumes that what's written with one write, will be read by redis in one read. + # this assumption is wrong, but seem to work empirically (for now) test {No response for multi commands in pipeline if client output buffer limit is enforced} { r config set client-output-buffer-limit {normal 100000 0 0} set value [string repeat "x" 10000] @@ -107,20 +109,26 @@ start_server {tags {"obuf-limits"}} { set rd2 [redis_deferring_client] $rd2 client setname multicommands assert_equal "OK" [$rd2 read] - # Let redis sleep 2s firstly - $rd1 debug sleep 2 + + # Let redis sleep 1s firstly + $rd1 debug sleep 1 $rd1 flush after 100 + # Create a pipeline of commands that will be processed in one socket read. + # It is important to use one write, in TLS mode independant writes seem + # to wait for response from the server. # Total size should be less than OS socket buffer, redis can # execute all commands in this pipeline when it wakes up. + set buf "" for {set i 0} {$i < 15} {incr i} { - $rd2 set $i $i - $rd2 get $i - $rd2 del $i + append buf "set $i $i\r\n" + append buf "get $i\r\n" + append buf "del $i\r\n" # One bigkey is 10k, total response size must be more than 100k - $rd2 get bigkey + append buf "get bigkey\r\n" } + $rd2 write $buf $rd2 flush after 100 diff --git a/tests/unit/shutdown.tcl b/tests/unit/shutdown.tcl index 21ea8545d..f48eadc50 100644 --- a/tests/unit/shutdown.tcl +++ b/tests/unit/shutdown.tcl @@ -30,20 +30,28 @@ start_server {tags {"shutdown"}} { for {set i 0} {$i < 20} {incr i} { r set $i $i } - # It will cost 2s(20 * 100ms) to dump rdb + # It will cost 2s (20 * 100ms) to dump rdb r config set rdb-key-save-delay 100000 set pid [s process_id] set temp_rdb [file join [lindex [r config get dir] 1] temp-${pid}.rdb] + # trigger a shutdown which will save an rdb exec kill -SIGINT $pid - after 100 - # Temp rdb must be existed - assert {[file exists $temp_rdb]} + # Wait for creation of temp rdb + wait_for_condition 50 10 { + [file exists $temp_rdb] + } else { + fail "Can't trigger rdb save on shutdown" + } - # Temp rdb file must be deleted + # Insist on immediate shutdown, temp rdb file must be deleted exec kill -SIGINT $pid - after 100 - assert {![file exists $temp_rdb]} + # wait for the rdb file to be deleted + wait_for_condition 50 10 { + ![file exists $temp_rdb] + } else { + fail "Can't trigger rdb save on shutdown" + } } } From 58a7774ca4acbfc1079cc2d68a972ce42cdffe50 Mon Sep 17 00:00:00 2001 From: David CARLIER Date: Tue, 29 Sep 2020 06:49:35 +0100 Subject: [PATCH 035/114] getting rss size implementation for netbsd (#7293) (cherry picked from commit 520c3b26c3fce1c86cf0c70961acd0515c8cb498) --- src/zmalloc.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/src/zmalloc.c b/src/zmalloc.c index 639a5fe2b..2645432b6 100644 --- a/src/zmalloc.c +++ b/src/zmalloc.c @@ -314,6 +314,26 @@ size_t zmalloc_get_rss(void) { return 0L; } +#elif defined(__NetBSD__) +#include +#include +#include + +size_t zmalloc_get_rss(void) { + struct kinfo_proc2 info; + size_t infolen = sizeof(info); + int mib[6]; + mib[0] = CTL_KERN; + mib[1] = KERN_PROC; + mib[2] = KERN_PROC_PID; + mib[3] = getpid(); + mib[4] = sizeof(info); + mib[5] = 1; + if (sysctl(mib, 4, &info, &infolen, NULL, 0) == 0) + return (size_t)info.p_vm_rssize; + + return 0L; +} #else size_t zmalloc_get_rss(void) { /* If we can't get the RSS in an OS-specific way for this system just From c6d664656721f8950050a5ac41b6de0c9f2c71f5 Mon Sep 17 00:00:00 2001 From: Gavrie Philipson Date: Tue, 29 Sep 2020 13:10:08 +0300 Subject: [PATCH 036/114] Fix typo in module API docs (#7861) (cherry picked from commit ce5efb444b203536335ca6dd5d34cb57425b55be) --- src/module.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/module.c b/src/module.c index 0655272ff..bfe7d6886 100644 --- a/src/module.c +++ b/src/module.c @@ -2937,8 +2937,8 @@ int RM_HashSet(RedisModuleKey *key, int flags, ...) { } /* Get fields from an hash value. This function is called using a variable - * number of arguments, alternating a field name (as a StringRedisModule - * pointer) with a pointer to a StringRedisModule pointer, that is set to the + * number of arguments, alternating a field name (as a RedisModuleString + * pointer) with a pointer to a RedisModuleString pointer, that is set to the * value of the field if the field exists, or NULL if the field does not exist. * At the end of the field/value-ptr pairs, NULL must be specified as last * argument to signal the end of the arguments in the variadic function. From 4da82110634828ca999217e7c7bd3f0529d117c2 Mon Sep 17 00:00:00 2001 From: David CARLIER Date: Tue, 29 Sep 2020 13:52:13 +0100 Subject: [PATCH 037/114] Add support for Haiku OS (#7435) (cherry picked from commit d535a5061ccd561d0c132b2e97b56a3bd252fde9) --- src/Makefile | 7 +++++++ src/config.h | 4 ++++ src/memtest.c | 5 +++++ 3 files changed, 16 insertions(+) diff --git a/src/Makefile b/src/Makefile index e59089811..a91d457f4 100644 --- a/src/Makefile +++ b/src/Makefile @@ -145,6 +145,12 @@ else ifeq ($(uname_S),NetBSD) # NetBSD FINAL_LIBS+= -lpthread -lexecinfo +else +ifeq ($(uname_S),Haiku) + # Haiku + FINAL_CFLAGS+= -DBSD_SOURCE + FINAL_LDFLAGS+= -lbsd -lnetwork + FINAL_LIBS+= -lpthread else # All the other OSes (notably Linux) FINAL_LDFLAGS+= -rdynamic @@ -158,6 +164,7 @@ endif endif endif endif +endif # Include paths to dependencies FINAL_CFLAGS+= -I../deps/hiredis -I../deps/linenoise -I../deps/lua/src diff --git a/src/config.h b/src/config.h index d391508fa..320837b7e 100644 --- a/src/config.h +++ b/src/config.h @@ -124,6 +124,10 @@ #define USE_SETPROCTITLE #endif +#if defined(__HAIKU__) +#define ESOCKTNOSUPPORT 0 +#endif + #if ((defined __linux && defined(__GLIBC__)) || defined __APPLE__) #define USE_SETPROCTITLE #define INIT_SETPROCTITLE_REPLACEMENT diff --git a/src/memtest.c b/src/memtest.c index a455430f5..cb4d35e83 100644 --- a/src/memtest.c +++ b/src/memtest.c @@ -347,10 +347,15 @@ void memtest_alloc_and_test(size_t megabytes, int passes) { } void memtest(size_t megabytes, int passes) { +#if !defined(__HAIKU__) if (ioctl(1, TIOCGWINSZ, &ws) == -1) { ws.ws_col = 80; ws.ws_row = 20; } +#else + ws.ws_col = 80; + ws.ws_row = 20; +#endif memtest_alloc_and_test(megabytes,passes); printf("\nYour memory passed this test.\n"); printf("Please if you are still in doubt use the following two tools:\n"); From c7cae0df77f5d7cb3861ae418c9b726cc13c5ef0 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 29 Sep 2020 17:03:47 +0300 Subject: [PATCH 038/114] warning: comparison between signed and unsigned integer in 32bit build (#7838) (cherry picked from commit c11bda25fd2959523cb1e87af5b366cc451dbd04) --- src/listpack.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/listpack.c b/src/listpack.c index 075552ccb..7e2da9b74 100644 --- a/src/listpack.c +++ b/src/listpack.c @@ -768,10 +768,10 @@ unsigned char *lpSeek(unsigned char *lp, long index) { if (numele != LP_HDR_NUMELE_UNKNOWN) { if (index < 0) index = (long)numele+index; if (index < 0) return NULL; /* Index still < 0 means out of range. */ - if (index >= numele) return NULL; /* Out of range the other side. */ + if ((long)index >= numele) return NULL; /* Out of range the other side. */ /* We want to scan right-to-left if the element we are looking for * is past the half of the listpack. */ - if (index > numele/2) { + if ((long)index > numele/2) { forward = 0; /* Right to left scanning always expects a negative index. Convert * our index to negative form. */ From 51a6e1e61a7461c4b1181e3a38e189a833c5d546 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 29 Sep 2020 20:48:21 +0300 Subject: [PATCH 039/114] TLS: Do not require CA config if not used. (#7862) The tls-ca-cert or tls-ca-cert-dir configuration parameters are only used when Redis needs to authenticate peer certificates, in one of these scenarios: 1. Incoming clients or replicas, with `tls-auth-clients` enabled. 2. A replica authenticating the master's peer certificate. 3. Cluster nodes authenticating other nodes when establishing the bus protocol connection. (cherry picked from commit 3bd9d0cc85d4ef8f4cc2789e9ab27e5557471409) --- src/tls.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/tls.c b/src/tls.c index d3173fab0..0a9e07895 100644 --- a/src/tls.c +++ b/src/tls.c @@ -167,8 +167,9 @@ int tlsConfigure(redisTLSContextConfig *ctx_config) { goto error; } - if (!ctx_config->ca_cert_file && !ctx_config->ca_cert_dir) { - serverLog(LL_WARNING, "Either tls-ca-cert-file or tls-ca-cert-dir must be configured!"); + if (((server.tls_auth_clients != TLS_CLIENT_AUTH_NO) || server.tls_cluster || server.tls_replication) && + !ctx_config->ca_cert_file && !ctx_config->ca_cert_dir) { + serverLog(LL_WARNING, "Either tls-ca-cert-file or tls-ca-cert-dir must be specified when tls-cluster, tls-replication or tls-auth-clients are enabled!"); goto error; } @@ -235,7 +236,8 @@ int tlsConfigure(redisTLSContextConfig *ctx_config) { goto error; } - if (SSL_CTX_load_verify_locations(ctx, ctx_config->ca_cert_file, ctx_config->ca_cert_dir) <= 0) { + if ((ctx_config->ca_cert_file || ctx_config->ca_cert_dir) && + SSL_CTX_load_verify_locations(ctx, ctx_config->ca_cert_file, ctx_config->ca_cert_dir) <= 0) { ERR_error_string_n(ERR_get_error(), errbuf, sizeof(errbuf)); serverLog(LL_WARNING, "Failed to configure CA certificate(s) file/directory: %s", errbuf); goto error; From 0816b8fadd3cba0e49418452842f0d5c97a2040f Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 18 Aug 2020 17:13:09 +0300 Subject: [PATCH 040/114] Module API: Fail ineffective auth calls. The client pointed to by the module context may in some cases be a fake client. RM_Authenticate*() calls in this case would be ineffective but appear to succeed, and this change fails them to make it easier to catch such cases. (cherry picked from commit 82866776d0c26f17043f9c1b0f0f5f48660e6848) --- src/module.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/module.c b/src/module.c index bfe7d6886..65d22b713 100644 --- a/src/module.c +++ b/src/module.c @@ -5638,6 +5638,11 @@ static int authenticateClientWithUser(RedisModuleCtx *ctx, user *user, RedisModu return REDISMODULE_ERR; } + /* Avoid settings which are meaningless and will be lost */ + if (!ctx->client || (ctx->client->flags & CLIENT_MODULE)) { + return REDISMODULE_ERR; + } + moduleNotifyUserChanged(ctx->client); ctx->client->user = user; From 38853fd48771ab95797d1201723e6f463839323d Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Tue, 18 Aug 2020 17:16:08 +0300 Subject: [PATCH 041/114] Modules: expose real client on conn events. When REDISMODULE_EVENT_CLIENT_CHANGE events are delivered, modules may want to mutate the client state (e.g. perform authentication). This change links the module context with the real client rather than a fake client for these events. (cherry picked from commit 4aca4e5f392ad6030150a92a9ef82412072f9622) --- src/module.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/module.c b/src/module.c index 65d22b713..afb525dbe 100644 --- a/src/module.c +++ b/src/module.c @@ -7263,6 +7263,7 @@ void moduleFireServerEvent(uint64_t eid, int subid, void *data) { * cheap if there are no registered modules. */ if (listLength(RedisModule_EventListeners) == 0) return; + int real_client_used = 0; listIter li; listNode *ln; listRewind(RedisModule_EventListeners,&li); @@ -7272,7 +7273,15 @@ void moduleFireServerEvent(uint64_t eid, int subid, void *data) { RedisModuleCtx ctx = REDISMODULE_CTX_INIT; ctx.module = el->module; - if (ModulesInHooks == 0) { + if (eid == REDISMODULE_EVENT_CLIENT_CHANGE) { + /* In the case of client changes, we're pushing the real client + * so the event handler can mutate it if needed. For example, + * to change its authentication state in a way that does not + * depend on specific commands executed later. + */ + ctx.client = (client *) data; + real_client_used = 1; + } else if (ModulesInHooks == 0) { ctx.client = moduleFreeContextReusedClient; } else { ctx.client = createClient(NULL); @@ -7325,7 +7334,7 @@ void moduleFireServerEvent(uint64_t eid, int subid, void *data) { el->module->in_hook--; ModulesInHooks--; - if (ModulesInHooks != 0) freeClient(ctx.client); + if (ModulesInHooks != 0 && !real_client_used) freeClient(ctx.client); moduleFreeContext(&ctx); } } From e7bae8839249ec8d49136929e4644e5187da5336 Mon Sep 17 00:00:00 2001 From: nitaicaro <42576749+nitaicaro@users.noreply.github.com> Date: Wed, 30 Sep 2020 19:52:01 +0300 Subject: [PATCH 042/114] =?UTF-8?q?Fixed=20Tracking=20test=20=E2=80=9CThe?= =?UTF-8?q?=20other=20connection=20is=20able=20to=20get=20invalidations?= =?UTF-8?q?=E2=80=9D=20(#7871)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PROBLEM: [$rd1 read] reads invalidation messages one by one, so it's never going to see the second invalidation message produced after INCR b, whether or not it exists. Adding another read will block incase no invalidation message is produced. FIX: We switch the order of "INCR a" and "INCR b" - now "INCR b" comes first. We still only read the first invalidation message produces. If an invalidation message is wrongly produces for b - then it will be produced before that of a, since "INCR b" comes before "INCR a". Co-authored-by: Nitai Caro (cherry picked from commit 94e9b0124e8582912c3771f9828842348490bc38) --- tests/unit/tracking.tcl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/unit/tracking.tcl b/tests/unit/tracking.tcl index 0332fa726..839b894ea 100644 --- a/tests/unit/tracking.tcl +++ b/tests/unit/tracking.tcl @@ -16,9 +16,10 @@ start_server {tags {"tracking"}} { test {The other connection is able to get invalidations} { r SET a 1 + r SET b 1 r GET a - r INCR a - r INCR b ; # This key should not be notified, since it wasn't fetched. + r INCR b ; # This key should not be notified, since it wasn't fetched. + r INCR a set keys [lindex [$rd1 read] 2] assert {[llength $keys] == 1} assert {[lindex $keys 0] eq {a}} From 57027f0c55193e561fff4d56b7c87691c9f16933 Mon Sep 17 00:00:00 2001 From: Rafi Einstein Date: Thu, 1 Oct 2020 10:56:23 +0300 Subject: [PATCH 043/114] Makefile: enable program suffixes via PROG_SUFFIX (#7868) (cherry picked from commit 2636b760fb062a763ae528800fd998d2913c7cb1) --- README.md | 4 ++++ src/Makefile | 12 ++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 80c2c9178..b6cfbbc5c 100644 --- a/README.md +++ b/README.md @@ -45,6 +45,10 @@ as libsystemd-dev on Debian/Ubuntu or systemd-devel on CentOS) and run: % make USE_SYSTEMD=yes +To append a suffix to Redis program names, use: + + % make PROG_SUFFIX="-alt" + You can run a 32 bit Redis binary using: % make 32bit diff --git a/src/Makefile b/src/Makefile index a91d457f4..3a09ccd3f 100644 --- a/src/Makefile +++ b/src/Makefile @@ -243,15 +243,15 @@ QUIET_LINK = @printf ' %b %b\n' $(LINKCOLOR)LINK$(ENDCOLOR) $(BINCOLOR)$@$(EN QUIET_INSTALL = @printf ' %b %b\n' $(LINKCOLOR)INSTALL$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR) 1>&2; endif -REDIS_SERVER_NAME=redis-server -REDIS_SENTINEL_NAME=redis-sentinel +REDIS_SERVER_NAME=redis-server$(PROG_SUFFIX) +REDIS_SENTINEL_NAME=redis-sentinel$(PROG_SUFFIX) REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crcspeed.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o redis-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o lolwut.o lolwut5.o lolwut6.o acl.o gopher.o tracking.o connection.o tls.o sha256.o timeout.o setcpuaffinity.o -REDIS_CLI_NAME=redis-cli +REDIS_CLI_NAME=redis-cli$(PROG_SUFFIX) REDIS_CLI_OBJ=anet.o adlist.o dict.o redis-cli.o zmalloc.o release.o ae.o crcspeed.o crc64.o siphash.o crc16.o -REDIS_BENCHMARK_NAME=redis-benchmark +REDIS_BENCHMARK_NAME=redis-benchmark$(PROG_SUFFIX) REDIS_BENCHMARK_OBJ=ae.o anet.o redis-benchmark.o adlist.o dict.o zmalloc.o siphash.o -REDIS_CHECK_RDB_NAME=redis-check-rdb -REDIS_CHECK_AOF_NAME=redis-check-aof +REDIS_CHECK_RDB_NAME=redis-check-rdb$(PROG_SUFFIX) +REDIS_CHECK_AOF_NAME=redis-check-aof$(PROG_SUFFIX) all: $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) @echo "" From 51dd4677c9f8d2003c80ca71bf6dfda8ce2a8dfe Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Thu, 1 Oct 2020 11:27:45 +0300 Subject: [PATCH 044/114] Fix crash in script timeout during AOF loading (#7870) (cherry picked from commit 8cff3e03520bb08cb7dfdbd11f98827a3cb1d3a5) --- src/networking.c | 12 ++++++++---- tests/unit/scripting.tcl | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 4 deletions(-) diff --git a/src/networking.c b/src/networking.c index 445150ab3..e05c8e1af 100644 --- a/src/networking.c +++ b/src/networking.c @@ -1512,16 +1512,20 @@ void resetClient(client *c) { * path, it is not really released, but only marked for later release. */ void protectClient(client *c) { c->flags |= CLIENT_PROTECTED; - connSetReadHandler(c->conn,NULL); - connSetWriteHandler(c->conn,NULL); + if (c->conn) { + connSetReadHandler(c->conn,NULL); + connSetWriteHandler(c->conn,NULL); + } } /* This will undo the client protection done by protectClient() */ void unprotectClient(client *c) { if (c->flags & CLIENT_PROTECTED) { c->flags &= ~CLIENT_PROTECTED; - connSetReadHandler(c->conn,readQueryFromClient); - if (clientHasPendingReplies(c)) clientInstallWriteHandler(c); + if (c->conn) { + connSetReadHandler(c->conn,readQueryFromClient); + if (clientHasPendingReplies(c)) clientInstallWriteHandler(c); + } } } diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl index 3283edc66..6bcba4c3f 100644 --- a/tests/unit/scripting.tcl +++ b/tests/unit/scripting.tcl @@ -430,6 +430,45 @@ start_server {tags {"scripting"}} { set res } {102} + test {EVAL timeout from AOF} { + # generate a long running script that is propagated to the AOF as script + # make sure that the script times out during loading + r config set appendonly no + r config set aof-use-rdb-preamble no + r config set lua-replicate-commands no + r flushall + r config set appendonly yes + wait_for_condition 50 100 { + [s aof_rewrite_in_progress] == 0 + } else { + fail "AOF rewrite can't complete after CONFIG SET appendonly yes." + } + r config set lua-time-limit 1 + set rd [redis_deferring_client] + set start [clock clicks -milliseconds] + $rd eval {redis.call('set',KEYS[1],'y'); for i=1,1500000 do redis.call('ping') end return 'ok'} 1 x + $rd flush + after 100 + catch {r ping} err + assert_match {BUSY*} $err + $rd read + set elapsed [expr [clock clicks -milliseconds]-$start] + if {$::verbose} { puts "script took $elapsed milliseconds" } + set start [clock clicks -milliseconds] + $rd debug loadaof + $rd flush + after 100 + catch {r ping} err + assert_match {LOADING*} $err + $rd read + set elapsed [expr [clock clicks -milliseconds]-$start] + if {$::verbose} { puts "loading took $elapsed milliseconds" } + $rd close + r get x + } {y} + r config set aof-use-rdb-preamble yes + r config set lua-replicate-commands yes + test {We can call scripts rewriting client->argv from Lua} { r del myset r sadd myset a b c From 0d7a1d1d373a04f28ef71f165178debbffc39177 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Thu, 1 Oct 2020 11:30:22 +0300 Subject: [PATCH 045/114] Include internal sds fragmentation in MEMORY reporting (#7864) The MEMORY command is used for debugging memory usage, so it should include internal fragmentation, same as used_memory (cherry picked from commit 86483e795262c6e2efdffe92c1642a72ef0dd6a0) --- src/object.c | 12 ++++++------ src/server.c | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/object.c b/src/object.c index 6caa71bb9..92eebb556 100644 --- a/src/object.c +++ b/src/object.c @@ -786,7 +786,7 @@ size_t objectComputeSize(robj *o, size_t sample_size) { if(o->encoding == OBJ_ENCODING_INT) { asize = sizeof(*o); } else if(o->encoding == OBJ_ENCODING_RAW) { - asize = sdsAllocSize(o->ptr)+sizeof(*o); + asize = sdsZmallocSize(o->ptr)+sizeof(*o); } else if(o->encoding == OBJ_ENCODING_EMBSTR) { asize = sdslen(o->ptr)+2+sizeof(*o); } else { @@ -814,7 +814,7 @@ size_t objectComputeSize(robj *o, size_t sample_size) { asize = sizeof(*o)+sizeof(dict)+(sizeof(struct dictEntry*)*dictSlots(d)); while((de = dictNext(di)) != NULL && samples < sample_size) { ele = dictGetKey(de); - elesize += sizeof(struct dictEntry) + sdsAllocSize(ele); + elesize += sizeof(struct dictEntry) + sdsZmallocSize(ele); samples++; } dictReleaseIterator(di); @@ -836,7 +836,7 @@ size_t objectComputeSize(robj *o, size_t sample_size) { (sizeof(struct dictEntry*)*dictSlots(d))+ zmalloc_size(zsl->header); while(znode != NULL && samples < sample_size) { - elesize += sdsAllocSize(znode->ele); + elesize += sdsZmallocSize(znode->ele); elesize += sizeof(struct dictEntry) + zmalloc_size(znode); samples++; znode = znode->level[0].forward; @@ -855,7 +855,7 @@ size_t objectComputeSize(robj *o, size_t sample_size) { while((de = dictNext(di)) != NULL && samples < sample_size) { ele = dictGetKey(de); ele2 = dictGetVal(de); - elesize += sdsAllocSize(ele) + sdsAllocSize(ele2); + elesize += sdsZmallocSize(ele) + sdsZmallocSize(ele2); elesize += sizeof(struct dictEntry); samples++; } @@ -995,7 +995,7 @@ struct redisMemOverhead *getMemoryOverheadData(void) { mem = 0; if (server.aof_state != AOF_OFF) { - mem += sdsalloc(server.aof_buf); + mem += sdsZmallocSize(server.aof_buf); mem += aofRewriteBufferSize(); } mh->aof_buffer = mem; @@ -1311,7 +1311,7 @@ NULL return; } size_t usage = objectComputeSize(dictGetVal(de),samples); - usage += sdsAllocSize(dictGetKey(de)); + usage += sdsZmallocSize(dictGetKey(de)); usage += sizeof(dictEntry); addReplyLongLong(c,usage); } else if (!strcasecmp(c->argv[1]->ptr,"stats") && c->argc == 2) { diff --git a/src/server.c b/src/server.c index 1f20efe94..cc9fe7d2f 100644 --- a/src/server.c +++ b/src/server.c @@ -1608,7 +1608,7 @@ int clientsCronTrackClientsMemUsage(client *c) { size_t mem = 0; int type = getClientType(c); mem += getClientOutputBufferMemoryUsage(c); - mem += sdsAllocSize(c->querybuf); + mem += sdsZmallocSize(c->querybuf); mem += sizeof(client); /* Now that we have the memory used by the client, remove the old * value from the old category, and add it back. */ From 7a8a268ac40a25db699757748e285a84b1fd9a04 Mon Sep 17 00:00:00 2001 From: DvirDukhan Date: Sun, 4 Oct 2020 17:18:17 +0300 Subject: [PATCH 046/114] redis-cli add control on raw format line delimiter (#7841) Adding -D option for redis-cli to control newline between command responses in raw mode. Also removing cleanup code before calling exit, just in order to avoid adding more adding more cleanup code (redis doesn't bother to release allocations before exit anyway) Co-authored-by: Oran Agra (cherry picked from commit f0f8e9c824b819c8aec996ec8c8851773a6f9432) --- src/redis-cli.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/redis-cli.c b/src/redis-cli.c index 2f4609661..73d4abff2 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -237,6 +237,7 @@ static struct config { char *user; int output; /* output mode, see OUTPUT_* defines */ sds mb_delim; + sds cmd_delim; char prompt[128]; char *eval; int eval_ldb; @@ -1251,7 +1252,7 @@ static int cliReadReply(int output_raw_strings) { } else { if (config.output == OUTPUT_RAW) { out = cliFormatReplyRaw(reply); - out = sdscat(out,"\n"); + out = sdscatsds(out, config.cmd_delim); } else if (config.output == OUTPUT_STANDARD) { out = cliFormatReplyTTY(reply,""); } else if (config.output == OUTPUT_CSV) { @@ -1533,6 +1534,9 @@ static int parseOptions(int argc, char **argv) { } else if (!strcmp(argv[i],"-d") && !lastarg) { sdsfree(config.mb_delim); config.mb_delim = sdsnew(argv[++i]); + } else if (!strcmp(argv[i],"-D") && !lastarg) { + sdsfree(config.cmd_delim); + config.cmd_delim = sdsnew(argv[++i]); } else if (!strcmp(argv[i],"--verbose")) { config.verbose = 1; } else if (!strcmp(argv[i],"--cluster") && !lastarg) { @@ -1726,7 +1730,8 @@ static void usage(void) { " -n Database number.\n" " -3 Start session in RESP3 protocol mode.\n" " -x Read last argument from STDIN.\n" -" -d Multi-bulk delimiter in for raw formatting (default: \\n).\n" +" -d Delimiter between response bulks for raw formatting (default: \\n).\n" +" -D Delimiter between responses for raw formatting (default: \\n).\n" " -c Enable cluster mode (follow -ASK and -MOVED redirections).\n" #ifdef USE_OPENSSL " --tls Establish a secure TLS connection.\n" @@ -5360,8 +5365,6 @@ static void clusterManagerMode(clusterManagerCommandProc *proc) { exit(0); cluster_manager_err: freeClusterManager(); - sdsfree(config.hostip); - sdsfree(config.mb_delim); exit(1); } @@ -8118,6 +8121,7 @@ int main(int argc, char **argv) { else config.output = OUTPUT_STANDARD; config.mb_delim = sdsnew("\n"); + config.cmd_delim = sdsnew("\n"); firstarg = parseOptions(argc,argv); argc -= firstarg; @@ -8141,8 +8145,6 @@ int main(int argc, char **argv) { if (CLUSTER_MANAGER_MODE()) { clusterManagerCommandProc *proc = validateClusterManagerCommand(); if (!proc) { - sdsfree(config.hostip); - sdsfree(config.mb_delim); exit(1); } clusterManagerMode(proc); From a5302a8c21ec0dfbe9894ec12e3fd19f0fce1c49 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Mon, 5 Oct 2020 11:15:36 +0300 Subject: [PATCH 047/114] memory reporting of clients argv (#7874) track and report memory used by clients argv. this is very usaful in case clients started sending a command and didn't complete it. in which case the first args of the command are already trimmed from the query buffer. in an effort to avoid cache misses and overheads while keeping track of these, i avoid calling sdsZmallocSize and instead use the sdslen / bulk-len which can at least give some insight into the problem. This memory is now added to the total clients memory usage, as well as the client list. (cherry picked from commit 7481e513f0507d01381a87046d8d1366c718f94e) --- src/aof.c | 2 ++ src/networking.c | 51 +++++++++++++++++++++++++++++++++--- src/server.c | 4 ++- src/server.h | 1 + tests/unit/introspection.tcl | 2 +- 5 files changed, 55 insertions(+), 5 deletions(-) diff --git a/src/aof.c b/src/aof.c index 2114a17e4..b8ba31c19 100644 --- a/src/aof.c +++ b/src/aof.c @@ -669,6 +669,7 @@ struct client *createAOFClient(void) { c->querybuf_peak = 0; c->argc = 0; c->argv = NULL; + c->argv_len_sum = 0; c->bufpos = 0; c->flags = 0; c->btype = BLOCKED_NONE; @@ -694,6 +695,7 @@ void freeFakeClientArgv(struct client *c) { for (j = 0; j < c->argc; j++) decrRefCount(c->argv[j]); zfree(c->argv); + c->argv_len_sum = 0; } void freeFakeClient(struct client *c) { diff --git a/src/networking.c b/src/networking.c index e05c8e1af..54de8ac54 100644 --- a/src/networking.c +++ b/src/networking.c @@ -48,7 +48,7 @@ size_t sdsZmallocSize(sds s) { } /* Return the amount of memory used by the sds string at object->ptr - * for a string object. */ + * for a string object. This includes internal fragmentation. */ size_t getStringObjectSdsUsedMemory(robj *o) { serverAssertWithInfo(NULL,o,o->type == OBJ_STRING); switch(o->encoding) { @@ -58,6 +58,17 @@ size_t getStringObjectSdsUsedMemory(robj *o) { } } +/* Return the length of a string object. + * This does NOT includes internal fragmentation or sds unused space. */ +size_t getStringObjectLen(robj *o) { + serverAssertWithInfo(NULL,o,o->type == OBJ_STRING); + switch(o->encoding) { + case OBJ_ENCODING_RAW: return sdslen(o->ptr); + case OBJ_ENCODING_EMBSTR: return sdslen(o->ptr); + default: return 0; /* Just integer encoding for now. */ + } +} + /* Client.reply list dup and free methods. */ void *dupClientReplyValue(void *o) { clientReplyBlock *old = o; @@ -116,6 +127,7 @@ client *createClient(connection *conn) { c->reqtype = 0; c->argc = 0; c->argv = NULL; + c->argv_len_sum = 0; c->cmd = c->lastcmd = NULL; c->user = DefaultUser; c->multibulklen = 0; @@ -1051,6 +1063,7 @@ static void freeClientArgv(client *c) { decrRefCount(c->argv[j]); c->argc = 0; c->cmd = NULL; + c->argv_len_sum = 0; } /* Close all the slaves connections. This is useful in chained replication @@ -1249,6 +1262,7 @@ void freeClient(client *c) { * and finally release the client structure itself. */ if (c->name) decrRefCount(c->name); zfree(c->argv); + c->argv_len_sum = 0; freeClientMultiState(c); sdsfree(c->peerid); zfree(c); @@ -1595,12 +1609,14 @@ int processInlineBuffer(client *c) { if (argc) { if (c->argv) zfree(c->argv); c->argv = zmalloc(sizeof(robj*)*argc); + c->argv_len_sum = 0; } /* Create redis objects for all arguments. */ for (c->argc = 0, j = 0; j < argc; j++) { c->argv[c->argc] = createObject(OBJ_STRING,argv[j]); c->argc++; + c->argv_len_sum += sdslen(argv[j]); } zfree(argv); return C_OK; @@ -1692,6 +1708,7 @@ int processMultibulkBuffer(client *c) { /* Setup argv array on client structure */ if (c->argv) zfree(c->argv); c->argv = zmalloc(sizeof(robj*)*c->multibulklen); + c->argv_len_sum = 0; } serverAssertWithInfo(c,NULL,c->multibulklen > 0); @@ -1764,6 +1781,7 @@ int processMultibulkBuffer(client *c) { sdslen(c->querybuf) == (size_t)(c->bulklen+2)) { c->argv[c->argc++] = createObject(OBJ_STRING,c->querybuf); + c->argv_len_sum += c->bulklen; sdsIncrLen(c->querybuf,-2); /* remove CRLF */ /* Assume that if we saw a fat argument we'll see another one * likely... */ @@ -1772,6 +1790,7 @@ int processMultibulkBuffer(client *c) { } else { c->argv[c->argc++] = createStringObject(c->querybuf+c->qb_pos,c->bulklen); + c->argv_len_sum += c->bulklen; c->qb_pos += c->bulklen+2; } c->bulklen = -1; @@ -2094,8 +2113,21 @@ sds catClientInfoString(sds s, client *client) { if (connHasWriteHandler(client->conn)) *p++ = 'w'; } *p = '\0'; + + /* Compute the total memory consumed by this client. */ + size_t obufmem = getClientOutputBufferMemoryUsage(client); + size_t total_mem = obufmem; + total_mem += zmalloc_size(client); /* includes client->buf */ + total_mem += sdsZmallocSize(client->querybuf); + /* For efficiency (less work keeping track of the argv memory), it doesn't include the used memory + * i.e. unused sds space and internal fragmentation, just the string length. but this is enough to + * spot problematic clients. */ + total_mem += client->argv_len_sum; + if (client->argv) + total_mem += zmalloc_size(client->argv); + return sdscatfmt(s, - "id=%U addr=%s %s name=%s age=%I idle=%I flags=%s db=%i sub=%i psub=%i multi=%i qbuf=%U qbuf-free=%U obl=%U oll=%U omem=%U events=%s cmd=%s user=%s", + "id=%U addr=%s %s name=%s age=%I idle=%I flags=%s db=%i sub=%i psub=%i multi=%i qbuf=%U qbuf-free=%U argv-mem=%U obl=%U oll=%U omem=%U tot-mem=%U events=%s cmd=%s user=%s", (unsigned long long) client->id, getClientPeerId(client), connGetInfo(client->conn, conninfo, sizeof(conninfo)), @@ -2109,9 +2141,11 @@ sds catClientInfoString(sds s, client *client) { (client->flags & CLIENT_MULTI) ? client->mstate.count : -1, (unsigned long long) sdslen(client->querybuf), (unsigned long long) sdsavail(client->querybuf), + (unsigned long long) client->argv_len_sum, (unsigned long long) client->bufpos, (unsigned long long) listLength(client->reply), - (unsigned long long) getClientOutputBufferMemoryUsage(client), + (unsigned long long) obufmem, /* should not include client->buf since we want to see 0 for static clients. */ + (unsigned long long) total_mem, events, client->lastcmd ? client->lastcmd->name : "NULL", client->user ? client->user->name : "(superuser)"); @@ -2649,6 +2683,10 @@ void rewriteClientCommandVector(client *c, int argc, ...) { /* Replace argv and argc with our new versions. */ c->argv = argv; c->argc = argc; + c->argv_len_sum = 0; + for (j = 0; j < c->argc; j++) + if (c->argv[j]) + c->argv_len_sum += getStringObjectLen(c->argv[j]); c->cmd = lookupCommandOrOriginal(c->argv[0]->ptr); serverAssertWithInfo(c,NULL,c->cmd != NULL); va_end(ap); @@ -2656,10 +2694,15 @@ void rewriteClientCommandVector(client *c, int argc, ...) { /* Completely replace the client command vector with the provided one. */ void replaceClientCommandVector(client *c, int argc, robj **argv) { + int j; freeClientArgv(c); zfree(c->argv); c->argv = argv; c->argc = argc; + c->argv_len_sum = 0; + for (j = 0; j < c->argc; j++) + if (c->argv[j]) + c->argv_len_sum += getStringObjectLen(c->argv[j]); c->cmd = lookupCommandOrOriginal(c->argv[0]->ptr); serverAssertWithInfo(c,NULL,c->cmd != NULL); } @@ -2684,6 +2727,8 @@ void rewriteClientCommandArgument(client *c, int i, robj *newval) { c->argv[i] = NULL; } oldval = c->argv[i]; + if (oldval) c->argv_len_sum -= getStringObjectLen(oldval); + if (newval) c->argv_len_sum += getStringObjectLen(newval); c->argv[i] = newval; incrRefCount(newval); if (oldval) decrRefCount(oldval); diff --git a/src/server.c b/src/server.c index cc9fe7d2f..c9f92702f 100644 --- a/src/server.c +++ b/src/server.c @@ -1609,7 +1609,9 @@ int clientsCronTrackClientsMemUsage(client *c) { int type = getClientType(c); mem += getClientOutputBufferMemoryUsage(c); mem += sdsZmallocSize(c->querybuf); - mem += sizeof(client); + mem += zmalloc_size(c); + mem += c->argv_len_sum; + if (c->argv) mem += zmalloc_size(c->argv); /* Now that we have the memory used by the client, remove the old * value from the old category, and add it back. */ server.stat_clients_type_memory[c->client_cron_last_memory_type] -= diff --git a/src/server.h b/src/server.h index b12e4587d..9f2c50af1 100644 --- a/src/server.h +++ b/src/server.h @@ -799,6 +799,7 @@ typedef struct client { size_t querybuf_peak; /* Recent (100ms or more) peak of querybuf size. */ int argc; /* Num of arguments of current command. */ robj **argv; /* Arguments of current command. */ + size_t argv_len_sum; /* Sum of lengths of objects in argv list. */ struct redisCommand *cmd, *lastcmd; /* Last command executed. */ user *user; /* User associated with this connection. If the user is set to NULL the connection can do diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl index 37470c068..32215868c 100644 --- a/tests/unit/introspection.tcl +++ b/tests/unit/introspection.tcl @@ -1,7 +1,7 @@ start_server {tags {"introspection"}} { test {CLIENT LIST} { r client list - } {*addr=*:* fd=* age=* idle=* flags=N db=9 sub=0 psub=0 multi=-1 qbuf=26 qbuf-free=* obl=0 oll=0 omem=0 events=r cmd=client*} + } {*addr=*:* fd=* age=* idle=* flags=N db=9 sub=0 psub=0 multi=-1 qbuf=26 qbuf-free=* argv-mem=* obl=0 oll=0 omem=0 tot-mem=* events=r cmd=client*} test {MONITOR can log executed commands} { set rd [redis_deferring_client] From 6e03b388ce91f2bd89c7a100690af3c79635c184 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Tue, 6 Oct 2020 21:43:30 +0300 Subject: [PATCH 048/114] Allow blocked XREAD on a cluster replica (#7881) I suppose that it was overlooked, since till recently none of the blocked commands were readonly. other changes: - add test for the above. - add better support for additional (and deferring) clients for cluster tests - improve a test which left the client in MULTI state. (cherry picked from commit ba61700db24628451212c5875e0ca7e5d83ea743) --- src/cluster.c | 9 ++++++++ .../tests/16-transactions-on-replica.tcl | 21 +++++++++++++++++++ tests/instances.tcl | 13 ++++++++++++ 3 files changed, 43 insertions(+) diff --git a/src/cluster.c b/src/cluster.c index e8db4050d..43bea155b 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -5851,6 +5851,15 @@ int clusterRedirectBlockedClientIfNeeded(client *c) { int slot = keyHashSlot((char*)key->ptr, sdslen(key->ptr)); clusterNode *node = server.cluster->slots[slot]; + /* if the client is read-only and attempting to access key that our + * replica can handle, allow it. */ + if ((c->flags & CLIENT_READONLY) && + (c->lastcmd->flags & CMD_READONLY) && + nodeIsSlave(myself) && myself->slaveof == node) + { + node = myself; + } + /* We send an error and unblock the client if: * 1) The slot is unassigned, emitting a cluster down error. * 2) The slot is not handled by this node, nor being imported. */ diff --git a/tests/cluster/tests/16-transactions-on-replica.tcl b/tests/cluster/tests/16-transactions-on-replica.tcl index da9dff1ca..41083f421 100644 --- a/tests/cluster/tests/16-transactions-on-replica.tcl +++ b/tests/cluster/tests/16-transactions-on-replica.tcl @@ -45,4 +45,25 @@ test "MULTI-EXEC with write operations is MOVED" { $replica MULTI catch {$replica HSET h b 4} err assert {[string range $err 0 4] eq {MOVED}} + catch {$replica exec} err + assert {[string range $err 0 8] eq {EXECABORT}} +} + +test "read-only blocking operations from replica" { + set rd [redis_deferring_client redis 1] + $rd readonly + $rd read + $rd XREAD BLOCK 0 STREAMS k 0 + + wait_for_condition 1000 50 { + [RI 1 blocked_clients] eq {1} + } else { + fail "client wasn't blocked" + } + + $primary XADD k * foo bar + set res [$rd read] + set res [lindex [lindex [lindex [lindex $res 0] 1] 0] 1] + assert {$res eq {foo bar}} + $rd close } diff --git a/tests/instances.tcl b/tests/instances.tcl index 5c4b665db..d3b1b50cd 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -606,3 +606,16 @@ proc restart_instance {type id} { } } +proc redis_deferring_client {type id} { + set port [get_instance_attrib $type $id port] + set host [get_instance_attrib $type $id host] + set client [redis $host $port 1 $::tls] + return $client +} + +proc redis_client {type id} { + set port [get_instance_attrib $type $id port] + set host [get_instance_attrib $type $id host] + set client [redis $host $port 0 $::tls] + return $client +} From 9cf7292ef7ca2668a799a7e4b82a1381553aadc1 Mon Sep 17 00:00:00 2001 From: Oran Agra Date: Wed, 7 Oct 2020 20:28:57 +0300 Subject: [PATCH 049/114] Add some additional signal info to the crash log (#7891) - si_code can be very useful info some day. - a clear indication that redis was killed by an external user (cherry picked from commit 38c7c62d2270b4921219953aaabfcdc721154b88) --- src/debug.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/debug.c b/src/debug.c index 1a41574e4..7112ff535 100644 --- a/src/debug.c +++ b/src/debug.c @@ -1627,7 +1627,7 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) { bugReportStart(); serverLog(LL_WARNING, - "Redis %s crashed by signal: %d", REDIS_VERSION, sig); + "Redis %s crashed by signal: %d, si_code: %d", REDIS_VERSION, sig, info->si_code); if (eip != NULL) { serverLog(LL_WARNING, "Crashed running the instruction at: %p", eip); @@ -1636,6 +1636,9 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) { serverLog(LL_WARNING, "Accessing address: %p", (void*)info->si_addr); } + if (info->si_pid != -1) { + serverLog(LL_WARNING, "Killed by PID: %d, UID: %d", info->si_pid, info->si_uid); + } serverLog(LL_WARNING, "Failed assertion: %s (%s:%d)", server.assert_failed, server.assert_file, server.assert_line); From ec9b1cca59fdadb56ad4d541d2c9572207847974 Mon Sep 17 00:00:00 2001 From: Madelyn Olson <34459052+madolson@users.noreply.github.com> Date: Wed, 7 Oct 2020 22:09:09 -0700 Subject: [PATCH 050/114] Fixed excessive categories being displayed from acls (#7889) (cherry picked from commit abe416c5f251b7b151440b38829a719c4846b8b8) --- src/acl.c | 17 +++++++++++++++-- tests/unit/acl.tcl | 14 ++++++++++++++ 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/src/acl.c b/src/acl.c index 5d484a742..e781c9211 100644 --- a/src/acl.c +++ b/src/acl.c @@ -478,17 +478,30 @@ sds ACLDescribeUserCommandRules(user *u) { /* Try to add or subtract each category one after the other. Often a * single category will not perfectly match the set of commands into * it, so at the end we do a final pass adding/removing the single commands - * needed to make the bitmap exactly match. */ + * needed to make the bitmap exactly match. A temp user is maintained to + * keep track of categories already applied. */ + user tu = {0}; + user *tempuser = &tu; + memcpy(tempuser->allowed_commands, + u->allowed_commands, + sizeof(u->allowed_commands)); + for (int j = 0; ACLCommandCategories[j].flag != 0; j++) { unsigned long on, off; - ACLCountCategoryBitsForUser(u,&on,&off,ACLCommandCategories[j].name); + ACLCountCategoryBitsForUser(tempuser,&on,&off,ACLCommandCategories[j].name); if ((additive && on > off) || (!additive && off > on)) { sds op = sdsnewlen(additive ? "+@" : "-@", 2); op = sdscat(op,ACLCommandCategories[j].name); ACLSetUser(fakeuser,op,-1); + + sds invop = sdsnewlen(additive ? "-@" : "+@", 2); + invop = sdscat(invop,ACLCommandCategories[j].name); + ACLSetUser(tempuser,invop,-1); + rules = sdscatsds(rules,op); rules = sdscatlen(rules," ",1); sdsfree(op); + sdsfree(invop); } } diff --git a/tests/unit/acl.tcl b/tests/unit/acl.tcl index f015f75a0..12f59e749 100644 --- a/tests/unit/acl.tcl +++ b/tests/unit/acl.tcl @@ -135,6 +135,20 @@ start_server {tags {"acl"}} { assert_match {*+acl*} $cmdstr } + # A regression test make sure that as long as there is a simple + # category defining the commands, that it will be used as is. + test {ACL GETUSER provides reasonable results} { + # Test for future commands where allowed + r ACL setuser additive reset +@all -@write + set cmdstr [dict get [r ACL getuser additive] commands] + assert_match {+@all -@write} $cmdstr + + # Test for future commands are disallowed + r ACL setuser subtractive reset -@all +@read + set cmdstr [dict get [r ACL getuser subtractive] commands] + assert_match {-@all +@read} $cmdstr + } + test {ACL #5998 regression: memory leaks adding / removing subcommands} { r AUTH default "" r ACL setuser newuser reset -debug +debug|a +debug|b +debug|c From ad1ed7dcd0fcf299e9560b4d74194a885723f7c2 Mon Sep 17 00:00:00 2001 From: Yossi Gottlieb Date: Mon, 5 Oct 2020 17:03:17 +0300 Subject: [PATCH 051/114] Introduce getKeysResult for getKeysFromCommand. Avoid using a static buffer for short key index responses, and make it caller's responsibility to stack-allocate a result type. Responses that don't fit are still allocated on the heap. (cherry picked from commit bf5beab64a196214c3c741d9ef67d0446c6480c3) --- src/acl.c | 9 +-- src/cluster.c | 11 ++-- src/db.c | 171 ++++++++++++++++++++++++++----------------------- src/module.c | 33 +++++++--- src/server.c | 14 ++-- src/server.h | 40 ++++++++---- src/tracking.c | 13 ++-- 7 files changed, 170 insertions(+), 121 deletions(-) diff --git a/src/acl.c b/src/acl.c index e781c9211..e0fd3f728 100644 --- a/src/acl.c +++ b/src/acl.c @@ -1115,8 +1115,9 @@ int ACLCheckCommandPerm(client *c, int *keyidxptr) { if (!(c->user->flags & USER_FLAG_ALLKEYS) && (c->cmd->getkeys_proc || c->cmd->firstkey)) { - int numkeys; - int *keyidx = getKeysFromCommand(c->cmd,c->argv,c->argc,&numkeys); + getKeysResult result = GETKEYS_RESULT_INIT; + int numkeys = getKeysFromCommand(c->cmd,c->argv,c->argc,&result); + int *keyidx = result.keys; for (int j = 0; j < numkeys; j++) { listIter li; listNode *ln; @@ -1137,11 +1138,11 @@ int ACLCheckCommandPerm(client *c, int *keyidxptr) { } if (!match) { if (keyidxptr) *keyidxptr = keyidx[j]; - getKeysFreeResult(keyidx); + getKeysFreeResult(&result); return ACL_DENIED_KEY; } } - getKeysFreeResult(keyidx); + getKeysFreeResult(&result); } /* If we survived all the above checks, the user can execute the diff --git a/src/cluster.c b/src/cluster.c index 43bea155b..7d690e863 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -5640,7 +5640,10 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in margc = ms->commands[i].argc; margv = ms->commands[i].argv; - keyindex = getKeysFromCommand(mcmd,margv,margc,&numkeys); + getKeysResult result = GETKEYS_RESULT_INIT; + numkeys = getKeysFromCommand(mcmd,margv,margc,&result); + keyindex = result.keys; + for (j = 0; j < numkeys; j++) { robj *thiskey = margv[keyindex[j]]; int thisslot = keyHashSlot((char*)thiskey->ptr, @@ -5658,7 +5661,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in * not trapped earlier in processCommand(). Report the same * error to the client. */ if (n == NULL) { - getKeysFreeResult(keyindex); + getKeysFreeResult(&result); if (error_code) *error_code = CLUSTER_REDIR_DOWN_UNBOUND; return NULL; @@ -5682,7 +5685,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in if (!equalStringObjects(firstkey,thiskey)) { if (slot != thisslot) { /* Error: multiple keys from different slots. */ - getKeysFreeResult(keyindex); + getKeysFreeResult(&result); if (error_code) *error_code = CLUSTER_REDIR_CROSS_SLOT; return NULL; @@ -5701,7 +5704,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in missing_keys++; } } - getKeysFreeResult(keyindex); + getKeysFreeResult(&result); } /* No key at all in command? then we can serve the request diff --git a/src/db.c b/src/db.c index 7ed746f9a..eb87cebc5 100644 --- a/src/db.c +++ b/src/db.c @@ -1322,27 +1322,54 @@ int expireIfNeeded(redisDb *db, robj *key) { /* ----------------------------------------------------------------------------- * API to get key arguments from commands * ---------------------------------------------------------------------------*/ -#define MAX_KEYS_BUFFER 256 -static int getKeysTempBuffer[MAX_KEYS_BUFFER]; + +/* Prepare the getKeysResult struct to hold numkeys, either by using the + * pre-allocated keysbuf or by allocating a new array on the heap. + * + * This function must be called at least once before starting to populate + * the result, and can be called repeatedly to enlarge the result array. + */ +int *getKeysPrepareResult(getKeysResult *result, int numkeys) { + /* GETKEYS_RESULT_INIT initializes keys to NULL, point it to the pre-allocated stack + * buffer here. */ + if (!result->keys) { + serverAssert(!result->numkeys); + result->keys = result->keysbuf; + } + + /* Resize if necessary */ + if (numkeys > result->size) { + if (result->keys != result->keysbuf) { + /* We're not using a static buffer, just (re)alloc */ + result->keys = zrealloc(result->keys, numkeys * sizeof(int)); + } else { + /* We are using a static buffer, copy its contents */ + result->keys = zmalloc(numkeys * sizeof(int)); + if (result->numkeys) + memcpy(result->keys, result->keysbuf, result->numkeys * sizeof(int)); + } + result->size = numkeys; + } + + return result->keys; +} /* The base case is to use the keys position as given in the command table * (firstkey, lastkey, step). */ -int *getKeysUsingCommandTable(struct redisCommand *cmd,robj **argv, int argc, int *numkeys) { +int getKeysUsingCommandTable(struct redisCommand *cmd,robj **argv, int argc, getKeysResult *result) { int j, i = 0, last, *keys; UNUSED(argv); if (cmd->firstkey == 0) { - *numkeys = 0; - return NULL; + result->numkeys = 0; + return 0; } last = cmd->lastkey; if (last < 0) last = argc+last; int count = ((last - cmd->firstkey)+1); - keys = getKeysTempBuffer; - if (count > MAX_KEYS_BUFFER) - keys = zmalloc(sizeof(int)*count); + keys = getKeysPrepareResult(result, count); for (j = cmd->firstkey; j <= last; j += cmd->keystep) { if (j >= argc) { @@ -1353,17 +1380,17 @@ int *getKeysUsingCommandTable(struct redisCommand *cmd,robj **argv, int argc, in * return no keys and expect the command implementation to report * an arity or syntax error. */ if (cmd->flags & CMD_MODULE || cmd->arity < 0) { - getKeysFreeResult(keys); - *numkeys = 0; - return NULL; + getKeysFreeResult(result); + result->numkeys = 0; + return 0; } else { serverPanic("Redis built-in command declared keys positions not matching the arity requirements."); } } keys[i++] = j; } - *numkeys = i; - return keys; + result->numkeys = i; + return i; } /* Return all the arguments that are keys in the command passed via argc / argv. @@ -1377,26 +1404,26 @@ int *getKeysUsingCommandTable(struct redisCommand *cmd,robj **argv, int argc, in * * This function uses the command table if a command-specific helper function * is not required, otherwise it calls the command-specific function. */ -int *getKeysFromCommand(struct redisCommand *cmd, robj **argv, int argc, int *numkeys) { +int getKeysFromCommand(struct redisCommand *cmd, robj **argv, int argc, getKeysResult *result) { if (cmd->flags & CMD_MODULE_GETKEYS) { - return moduleGetCommandKeysViaAPI(cmd,argv,argc,numkeys); + return moduleGetCommandKeysViaAPI(cmd,argv,argc,result); } else if (!(cmd->flags & CMD_MODULE) && cmd->getkeys_proc) { - return cmd->getkeys_proc(cmd,argv,argc,numkeys); + return cmd->getkeys_proc(cmd,argv,argc,result); } else { - return getKeysUsingCommandTable(cmd,argv,argc,numkeys); + return getKeysUsingCommandTable(cmd,argv,argc,result); } } /* Free the result of getKeysFromCommand. */ -void getKeysFreeResult(int *result) { - if (result != getKeysTempBuffer) - zfree(result); +void getKeysFreeResult(getKeysResult *result) { + if (result && result->keys != result->keysbuf) + zfree(result->keys); } /* Helper function to extract keys from following commands: * ZUNIONSTORE ... * ZINTERSTORE ... */ -int *zunionInterGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *numkeys) { +int zunionInterGetKeys(struct redisCommand *cmd, robj **argv, int argc, getKeysResult *result) { int i, num, *keys; UNUSED(cmd); @@ -1404,30 +1431,30 @@ int *zunionInterGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *nu /* Sanity check. Don't return any key if the command is going to * reply with syntax error. */ if (num < 1 || num > (argc-3)) { - *numkeys = 0; - return NULL; + result->numkeys = 0; + return 0; } /* Keys in z{union,inter}store come from two places: * argv[1] = storage key, * argv[3...n] = keys to intersect */ - keys = getKeysTempBuffer; - if (num+1>MAX_KEYS_BUFFER) - keys = zmalloc(sizeof(int)*(num+1)); + /* Total keys = {union,inter} keys + storage key */ + keys = getKeysPrepareResult(result, num+1); + result->numkeys = num+1; /* Add all key positions for argv[3...n] to keys[] */ for (i = 0; i < num; i++) keys[i] = 3+i; /* Finally add the argv[1] key position (the storage key target). */ keys[num] = 1; - *numkeys = num+1; /* Total keys = {union,inter} keys + storage key */ - return keys; + + return result->numkeys; } /* Helper function to extract keys from the following commands: * EVAL