Compare commits

...

470 Commits

Author SHA1 Message Date
5b4ca543a2 Update cluster-experimental
Some checks failed
Daily / test-sanitizer-address (gcc) (push) Has been cancelled
Daily / test-sanitizer-undefined (clang) (push) Has been cancelled
Daily / test-sanitizer-undefined (gcc) (push) Has been cancelled
Daily / test-sanitizer-force-defrag (push) Has been cancelled
Daily / test-almalinux8-jemalloc (push) Has been cancelled
Daily / test-almalinux9-jemalloc (push) Has been cancelled
Daily / test-fedoralatest-jemalloc (push) Has been cancelled
Daily / test-fedorarawhide-jemalloc (push) Has been cancelled
Daily / test-centosstream9-jemalloc (push) Has been cancelled
Daily / test-almalinux8-tls-module (push) Has been cancelled
Daily / test-almalinux9-tls-module (push) Has been cancelled
Daily / test-fedoralatest-tls-module (push) Has been cancelled
Daily / test-fedorarawhide-tls-module (push) Has been cancelled
Daily / test-centosstream9-tls-module (push) Has been cancelled
Daily / test-almalinux8-tls-module-no-tls (push) Has been cancelled
Daily / test-almalinux9-tls-module-no-tls (push) Has been cancelled
Daily / test-fedoralatest-tls-module-no-tls (push) Has been cancelled
Daily / test-fedorarawhide-tls-module-no-tls (push) Has been cancelled
Daily / test-centosstream9-tls-module-no-tls (push) Has been cancelled
Daily / test-macos-latest (push) Has been cancelled
Daily / test-macos-latest-sentinel (push) Has been cancelled
Daily / test-macos-latest-cluster (push) Has been cancelled
Daily / build-macos (macos-13) (push) Has been cancelled
Daily / build-macos (macos-14) (push) Has been cancelled
Daily / test-freebsd (push) Has been cancelled
Daily / test-alpine-jemalloc (push) Has been cancelled
Daily / test-alpine-libc-malloc (push) Has been cancelled
Daily / reply-schemas-validator (push) Has been cancelled
Daily / notify-about-job-results (push) Has been cancelled
CodeQL / Analyze (cpp) (push) Has been cancelled
2025-04-04 19:40:14 +00:00
2eff3d9874 Update cluster
Some checks are pending
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-04-04 19:39:42 +00:00
90ac6479c3 Update cluster-experimental
Some checks are pending
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-04-04 19:31:29 +00:00
1dbf196e5a Upload files to "/"
Some checks are pending
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-04-04 19:27:28 +00:00
65ab81a1ac Update README.md
Some checks failed
CI / build-32bit (push) Has been cancelled
CI / build-libc-malloc (push) Has been cancelled
CI / build-almalinux8-jemalloc (push) Has been cancelled
Spellcheck / Spellcheck (push) Has been cancelled
CI / test-ubuntu-latest (push) Has been cancelled
CI / test-ubuntu-latest-cmake (push) Has been cancelled
CI / test-sanitizer-address (push) Has been cancelled
CI / test-rdma (push) Has been cancelled
CI / build-debian-old (push) Has been cancelled
CI / build-macos-latest (push) Has been cancelled
CI / format-yaml (push) Has been cancelled
Clang Format Check / clang-format-check (push) Has been cancelled
Codecov / code-coverage (push) Has been cancelled
External Server Tests / test-external-standalone (push) Has been cancelled
External Server Tests / test-external-cluster (push) Has been cancelled
External Server Tests / test-external-nodebug (push) Has been cancelled
2025-04-02 20:09:00 +00:00
cb7ebda652 Update README.md
Some checks are pending
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-04-02 20:08:26 +00:00
3ec2a4bd68 Update README.md
Some checks are pending
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-04-02 20:07:16 +00:00
a5c63f9904 Update README.md
Some checks are pending
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-04-02 20:01:56 +00:00
c823b181ee Update README.md
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-04-02 19:50:40 +00:00
0b5c1b4b69 Update README.md
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-04-02 19:48:24 +00:00
28126899ff Update README.md
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-04-02 19:47:44 +00:00
74494c71c5 Update README.md
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-04-02 19:45:05 +00:00
2a8c840bd3 Update README.md
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-04-02 19:37:25 +00:00
a2383d93fd Update README.md
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-04-02 19:36:45 +00:00
020b96fe67 Update README.md
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-04-02 17:23:57 +00:00
04386d7cf8 Update cluster
Some checks failed
CI / build-almalinux8-jemalloc (push) Has been cancelled
Clang Format Check / clang-format-check (push) Has been cancelled
Codecov / code-coverage (push) Has been cancelled
CI / test-ubuntu-latest (push) Has been cancelled
CI / test-ubuntu-latest-cmake (push) Has been cancelled
CI / test-sanitizer-address (push) Has been cancelled
CI / test-rdma (push) Has been cancelled
CI / build-debian-old (push) Has been cancelled
CI / build-macos-latest (push) Has been cancelled
CI / build-32bit (push) Has been cancelled
CI / build-libc-malloc (push) Has been cancelled
CI / format-yaml (push) Has been cancelled
Spellcheck / Spellcheck (push) Has been cancelled
External Server Tests / test-external-standalone (push) Has been cancelled
External Server Tests / test-external-cluster (push) Has been cancelled
External Server Tests / test-external-nodebug (push) Has been cancelled
CodeQL / Analyze (cpp) (push) Has been cancelled
2025-03-23 19:41:33 +00:00
40a6e15c30 Update futriix.conf
Some checks are pending
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-03-23 15:40:11 +00:00
8afff26a55 Delete sentinel.conf
Some checks are pending
CI / test-sanitizer-address (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-03-23 15:34:20 +00:00
aec56296e2 Upload files to "/"
Some checks are pending
CI / test-sanitizer-address (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
2025-03-23 15:31:44 +00:00
c07840b626 Delete cluster
Some checks are pending
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-03-23 15:31:19 +00:00
f4aeea00bf Update README.md
Some checks are pending
CI / test-sanitizer-address (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-03-23 15:30:54 +00:00
0b2a5e53f4 Update README.md
Some checks are pending
CI / test-sanitizer-address (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-03-23 15:29:42 +00:00
7c17219702 Update README.md
Some checks are pending
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-03-23 15:29:15 +00:00
ee50166b0c Update README.md
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-03-23 15:26:26 +00:00
4414431a99 Update README.md
Some checks are pending
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-03-23 15:24:19 +00:00
41464c0cad Delete src/modules/hellotype.c
Some checks failed
CI / test-ubuntu-latest (push) Has been cancelled
CI / test-ubuntu-latest-cmake (push) Has been cancelled
CI / test-sanitizer-address (push) Has been cancelled
CI / test-rdma (push) Has been cancelled
CI / build-debian-old (push) Has been cancelled
CI / build-macos-latest (push) Has been cancelled
CI / build-32bit (push) Has been cancelled
CI / build-libc-malloc (push) Has been cancelled
CI / build-almalinux8-jemalloc (push) Has been cancelled
CI / format-yaml (push) Has been cancelled
Clang Format Check / clang-format-check (push) Has been cancelled
Codecov / code-coverage (push) Has been cancelled
External Server Tests / test-external-standalone (push) Has been cancelled
External Server Tests / test-external-cluster (push) Has been cancelled
External Server Tests / test-external-nodebug (push) Has been cancelled
Spellcheck / Spellcheck (push) Has been cancelled
CodeQL / Analyze (cpp) (push) Has been cancelled
2025-02-15 15:56:13 +00:00
fe044a4d63 Delete src/modules/hellotimer.c
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-15 15:56:04 +00:00
9bda1019e3 Delete src/modules/hellocluster.c
Some checks are pending
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-15 15:55:55 +00:00
bd8d11b2d3 Delete src/modules/helloworld.c
Some checks are pending
CI / test-rdma (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-15 15:55:47 +00:00
8dc71285eb Delete src/modules/hellohook.c
Some checks are pending
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
2025-02-15 15:55:39 +00:00
9d6053c00f Delete src/modules/hellodict.c
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-15 15:55:30 +00:00
9b9baca82c Delete src/modules/Makefile
Some checks are pending
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-15 15:55:21 +00:00
315bc059be Delete src/modules/helloblock.c
Some checks are pending
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-15 15:54:55 +00:00
4cf16b5249 Delete src/modules/helloacl.c
Some checks are pending
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-15 15:54:40 +00:00
fe3c1c34b0 Delete valkey.conf
Some checks failed
CI / test-rdma (push) Has been cancelled
CI / build-debian-old (push) Has been cancelled
CI / build-macos-latest (push) Has been cancelled
CI / build-32bit (push) Has been cancelled
CI / build-libc-malloc (push) Has been cancelled
CI / build-almalinux8-jemalloc (push) Has been cancelled
CI / test-ubuntu-latest (push) Has been cancelled
CI / test-ubuntu-latest-cmake (push) Has been cancelled
CI / test-sanitizer-address (push) Has been cancelled
CI / format-yaml (push) Has been cancelled
Clang Format Check / clang-format-check (push) Has been cancelled
Codecov / code-coverage (push) Has been cancelled
External Server Tests / test-external-standalone (push) Has been cancelled
External Server Tests / test-external-cluster (push) Has been cancelled
External Server Tests / test-external-nodebug (push) Has been cancelled
Spellcheck / Spellcheck (push) Has been cancelled
2025-02-13 19:39:37 +00:00
5979980595 Upload files to "/"
Some checks are pending
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-13 19:38:56 +00:00
7317466c05 Update src/rdb.c
Some checks failed
Codecov / code-coverage (push) Has been cancelled
CI / test-ubuntu-latest (push) Has been cancelled
CI / test-ubuntu-latest-cmake (push) Has been cancelled
CI / test-sanitizer-address (push) Has been cancelled
CI / test-rdma (push) Has been cancelled
CI / build-debian-old (push) Has been cancelled
CI / build-macos-latest (push) Has been cancelled
CI / build-32bit (push) Has been cancelled
CI / build-libc-malloc (push) Has been cancelled
CI / build-almalinux8-jemalloc (push) Has been cancelled
Clang Format Check / clang-format-check (push) Has been cancelled
CI / format-yaml (push) Has been cancelled
External Server Tests / test-external-standalone (push) Has been cancelled
External Server Tests / test-external-cluster (push) Has been cancelled
External Server Tests / test-external-nodebug (push) Has been cancelled
Spellcheck / Spellcheck (push) Has been cancelled
2025-02-10 20:30:08 +00:00
18e232f0e5 Upload files to "/"
Some checks failed
CI / test-ubuntu-latest (push) Has been cancelled
CI / test-ubuntu-latest-cmake (push) Has been cancelled
CI / test-sanitizer-address (push) Has been cancelled
CI / test-rdma (push) Has been cancelled
CI / build-debian-old (push) Has been cancelled
CI / build-macos-latest (push) Has been cancelled
CI / build-32bit (push) Has been cancelled
CI / build-libc-malloc (push) Has been cancelled
CI / build-almalinux8-jemalloc (push) Has been cancelled
CI / format-yaml (push) Has been cancelled
Clang Format Check / clang-format-check (push) Has been cancelled
Codecov / code-coverage (push) Has been cancelled
External Server Tests / test-external-standalone (push) Has been cancelled
External Server Tests / test-external-cluster (push) Has been cancelled
External Server Tests / test-external-nodebug (push) Has been cancelled
Spellcheck / Spellcheck (push) Has been cancelled
Coverity Scan / coverity (push) Has been cancelled
CodeQL / Analyze (cpp) (push) Has been cancelled
2025-02-03 20:02:39 +00:00
a5601ba85a Delete cluster
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-03 20:01:27 +00:00
7eaad5782b Delete utils/create-cluster/create-cluster
Some checks are pending
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-03 20:01:10 +00:00
5cba04d02c Delete utils/create-cluster/.gitignore
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-03 20:01:01 +00:00
e9a66d0ae0 Delete utils/create-cluster/README
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-03 20:00:51 +00:00
e6ad354b20 Update README.md
Some checks are pending
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-03 18:47:46 +00:00
be5f5e18e0 Update README.md
Some checks are pending
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-03 18:42:56 +00:00
0865036a8d Update COPYING
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-03 18:41:48 +00:00
e6f6239b05 Delete 00-RELEASENOTES
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-03 18:40:23 +00:00
3667aefcd7 Upload files to "src"
Some checks are pending
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-03 18:39:44 +00:00
0f15725225 Delete src/server.c
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-03 18:38:56 +00:00
feb27073ca Upload files to "src"
Some checks are pending
CI / build-macos-latest (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-03 18:38:34 +00:00
3a40802dfe Delete src/valkey-cli.c
Some checks are pending
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-03 18:38:16 +00:00
28a627c170 Upload files to "src"
Some checks are pending
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-03 17:34:11 +00:00
f9d25adab9 Upload files to "src"
Some checks are pending
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-03 17:32:27 +00:00
410738342c Upload files to "src"
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-03 17:32:06 +00:00
da64d5a94f Delete src/config.c
Some checks are pending
CI / test-sanitizer-address (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-03 16:55:31 +00:00
f8730df47f Delete src/asciilogo.h
Some checks are pending
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-03 16:55:10 +00:00
2bd092bcb8 Delete src/valkey-cli.c
Some checks are pending
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-03 16:54:47 +00:00
2ccd70a257 Delete src/server.c
Some checks are pending
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-03 16:53:07 +00:00
a4f2c53f46 Update README.md
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-02 22:06:37 +00:00
916e917d50 Upload files to "/"
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-02 21:52:21 +00:00
9982efe26d Upload files to "/"
Some checks are pending
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-02 21:51:12 +00:00
2cae0b1910 Update README.md
Some checks are pending
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-02 21:50:41 +00:00
5664f394c0 Upload files to "src"
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-02 21:47:42 +00:00
2ba8847b6d Delete src/Makefile
Some checks are pending
CI / test-ubuntu-latest (push) Waiting to run
CI / test-ubuntu-latest-cmake (push) Waiting to run
CI / test-sanitizer-address (push) Waiting to run
CI / test-rdma (push) Waiting to run
CI / build-debian-old (push) Waiting to run
CI / build-macos-latest (push) Waiting to run
CI / build-32bit (push) Waiting to run
CI / build-libc-malloc (push) Waiting to run
CI / build-almalinux8-jemalloc (push) Waiting to run
CI / format-yaml (push) Waiting to run
Clang Format Check / clang-format-check (push) Waiting to run
Codecov / code-coverage (push) Waiting to run
External Server Tests / test-external-standalone (push) Waiting to run
External Server Tests / test-external-cluster (push) Waiting to run
External Server Tests / test-external-nodebug (push) Waiting to run
Spellcheck / Spellcheck (push) Waiting to run
2025-02-02 21:46:38 +00:00
烈香
26c6f1af9b
Loop optimization: move maxlen check outside to reduce unnecessary checks (#1557)
A trival pr, move maxlen check outside to reduce unnecessary ecks

---------

Signed-off-by: hengyouhai <hengyouhai@tuhu.cn>
Signed-off-by: 烈香 <hengyoush1@163.com>
Co-authored-by: hengyouhai <hengyouhai@tuhu.cn>
2025-02-01 05:10:32 -08:00
Harkrishn Patro
78bcc0a2cf
Update daily failure notification job list (#1648)
Two jobs were missing from the job list for failure notification

* test-ubuntu-tls-io-threads
* test-sanitizer-force-defrag

Signed-off-by: Harkrishn Patro <harkrisp@amazon.com>
2025-01-30 15:21:31 -08:00
Viktor Söderqvist
12ec3d5932
Increase timeout for cross-version-replication test (#1644)
Fixes #1641

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2025-01-29 13:29:35 -08:00
Madelyn Olson
d3aabd7f13
Hex encode the data in dump test (#1637)
Addresses the failure here:
https://github.com/valkey-io/valkey/actions/runs/13000845302/job/36259016156#step:5:7272.

This change does three things:
1. For some reason TCL 8.5 (which is used on macos) is handling `\x03ba`
as `\0xba`, according to
https://www.tcl-lang.org/man/tcl8.5/TclCmd/Tcl.htm#M27 so we encode
"bar" using hex escapes too.
2. Fix a spacing issue. 
3. Make it so that if the restore fails, it immediately errors.

---------

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2025-01-29 16:20:38 -05:00
xingbowang
ff8a528fd6
Fix a heap-use-after-free bug in cluster bus (#1643)
https://github.com/valkey-io/valkey/issues/1642

Avoid heap-use-after-free in cluster bus around node cleanup code.

freeClusterNode free the human_nodename.
https://github.com/valkey-io/valkey/blob/unstable/src/cluster_legacy.c#L1725
Then it calls freeClusterLink to free the links.
https://github.com/valkey-io/valkey/blob/unstable/src/cluster_legacy.c#L1730
freeClusterLink print human_nodename here, which just got freed by the
caller freeClusterNode.
https://github.com/valkey-io/valkey/blob/unstable/src/cluster_legacy.c#L1383

Signed-off-by: xingbowang <shawn.xingbo.wang@gmail.com>
2025-01-29 13:13:40 -08:00
Binbin
4b8f3ed9ac
Do command existence and arity checks when loading AOF to avoid crash (#1614)
Do command existence and arity checks when loading AOF to avoid crash

Currently, loading commands such as `cluster` or `cluster slots xxx`
from AOF will cause the server to crash.
1. `cluster` is a container command, and executing proc will cause a
    crash because we do not check subcommand and arity.
2. `cluster slots xxx`, arity check fail, reply with an error from the
    AOF client and trigger a panic.

Of course, there are many other ways for a problematic AOF to cause the
panic, but it is still necessary do some basic checks before executing.
In this way, in these basic cases, we can print useful error messages
instead of crashing directly.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2025-01-30 01:06:13 +08:00
zhenwei pi
d72a97edf6
RDMA: Protect RDMA memory regions (#1602)
Use Linux syscall mmap/munmap to manage a RDMA memory region, then we
have a guard page protected VMA like (cat /proc/PID/maps):
 785018afe000-785018aff000 ---p 00000000 00:00 0  -> top guard page
 785018aff000-785018bff000 rw-p 00000000 00:00 0  -> RDMA memory region
 785018bff000-785018c00000 ---p 00000000 00:00 0  -> bottom guard page

Once any code accesses memory unexpectedly, segment fault occurs.

Signed-off-by: zhenwei pi <zhenwei.pi@linux.dev>
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2025-01-28 12:22:16 -05:00
Wen Hui
ad60d6b7b3
Initialize one variable in struct to avoid risk (#1606)
In C, we had better initialize every variable in struct, this PR fixes
one missed variable Initialization.

---------

Signed-off-by: hwware <wen.hui.ware@gmail.com>
2025-01-28 11:37:41 -05:00
Madelyn Olson
f695c52acb
Fix timing issue in pause test (#1631) 2025-01-28 06:35:24 -08:00
ranshid
230efa4fbf
deflake tracking-redir-broken test (#1628)
This address 2 issues:

1. It is possible (somehow) that the inner server client (r) was not
working resp 3 when entering this test.
this makes sure it does.

2. in case the test failed it might leave the redirection client closed.
there is a cross test assumption it should be open, so moved most of the
assert checks to the end of the test.

example fail:
https://github.com/valkey-io/valkey/actions/runs/12979601179/job/36195523412

---------

Signed-off-by: Ran Shidlansik <ranshid@amazon.com>
2025-01-28 12:35:32 +02:00
Viktor Söderqvist
e9b8970e72
Relaxed RDB version check (#1604)
New config `rdb-version-check` with values:

* `strict`: Reject future RDB versions.
* `relaxed`: Try parsing future RDB versions and fail only when an
unknown RDB opcode or type is encountered.

This can make it possible for Valkey 8.1 to try read a dump from for
example Valkey 9.0 or later on a best-effort basis. The conditions for
when this is expected to work can be defined when the future Valkey
versions are released. Loading is expected to fail in the following
cases:

* If the data set contains any new key types or other data elements not
supported by the current version.
* If the RDB contains new representations or encodings of existing key
types or other data elements.

This change also prepares for the next RDB version bump. A range of RDB
versions (12-79) is reserved, since it's expected to be used by foreign
software RDB versions, so Valkey will not accept versions in this range
even with the `relaxed` version check. The DUMP/RESTORE format has no
magic string; only the RDB version number.

This change also prepares for the magic string to change from REDIS to
VALKEY next time we bump the RDB version.

Related to #1108.

---------

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
2025-01-27 18:44:24 +01:00
Viktor Söderqvist
7699a3a94a
Fix use-after-free in hashtableTwoPhasePopDelete (#1626)
Use-after-free has been detect by address sanitizer, such as in this
test run:

https://github.com/valkey-io/valkey/actions/runs/12981530413/job/36200075972?pr=1620#step:5:1339

`hashtableShrinkIfNeeded` may free one of the hash tables and invalidate
the variables used by the `fillBucketHole(ht, b, pos_in_bucket,
table_index)` just after, causing use-after-free. Fill bucket hole first
and shrink afterwards is assumed to solve the issue. (Not reproduced
locally.)

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2025-01-27 15:45:09 +01:00
Madelyn Olson
88a68303c0
Make sure to disable pause after fork for dual channel test (#1612)
Might close https://github.com/valkey-io/valkey/issues/1484.

I noticed that we don't disable pause after fork on the last test that
was getting executed, so it might getting stuck in pause loops after the
test ends if it tries another psync for any reason.

---------

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2025-01-27 06:44:48 -08:00
Viktor Söderqvist
a18fcdb371
Deflake hashtable random fairness test (#1618)
Fixes the unit test for hashtable random fairness intermittent failures when
running with the `--accurate` flag.

https://github.com/valkey-io/valkey/actions/runs/12969591890/job/36173815884#step:10:105

The test case picks a random element out of 400, repeated 1M times, and
then checks that 60% of the elements are picked within 3 standard
deviations from the number of times they're expected to be picked. In
this test run (with `--accurate`), the expected number is 2500 and the
standard deviation is 50, which is only 2% of the expected value. This
makes the check too strict and makes the test flaky.

As an alternative, we allow 80% of the elements to be picked within 10%
of the expected number. With this alternative condition, we can also
raise the check for the non-edge case from 60% to 80% of the elements to
be within 3 standard deviations. (With fewer repetitions, 3 standard
deviations is greater than 10% of the expected value, so this new
condition only affects the `--accurate` test run.)

Additional change: Set a random seed to the hash function in the test
suite. Until now, we only seeded the random number generator.

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2025-01-27 10:13:46 +01:00
Viktor Söderqvist
66577573f2
Test coverage for COMMANDLOG HELP (#1617)
Fixes reply-schema-validator test job which needs coverage for all
commands.

Failing job:
https://github.com/valkey-io/valkey/actions/runs/12969591890/job/36173810824

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2025-01-27 04:38:54 +01:00
Harkrishn Patro
9071a5c8e6
Set GH actions job timeout to a day (#1540)
Signed-off-by: Harkrishn Patro <harkrisp@amazon.com>
2025-01-24 10:47:10 -08:00
zhaozhao.zz
3f21705a6c
Feature COMMANDLOG to record slow execution and large request/reply (#1294)
As discussed in PR #336.

We have different types of resources like CPU, memory, network, etc. The
`slowlog` can only record commands eat lots of CPU during the processing
phase (doesn't include read/write network time), but can not record
commands eat too many memory and network. For example:

1. run "SET key value(10 megabytes)" command would not be recored in
slowlog, since when processing it the SET command only insert the
value's pointer into db dict. But that command eats huge memory in query
buffer and bandwidth from network. In this case, just 1000 tps can cause
10GB/s network flow.
2. run "GET key" command and the key's value length is 10 megabytes. The
get command can eat huge memory in output buffer and bandwidth to
network.

This PR introduces a new command `COMMANDLOG`, to log commands that
consume significant network bandwidth, including both input and output.
Users can retrieve the results using `COMMANDLOG get <count>
large-request` and `COMMANDLOG get <count> large-reply`, all subcommands
for `COMMANDLOG` are:

* `COMMANDLOG HELP`
* `COMMANDLOG GET <count> <slow|large-request|large-reply>`
* `COMMANDLOG LEN <slow|large-request|large-reply>`
* `COMMANDLOG RESET <slow|large-request|large-reply>`

And the slowlog is also incorporated into the commandlog.

For each of these three types, additional configs have been added for
control:

* `commandlog-request-larger-than` and
`commandlog-large-request-max-len` represent the threshold for large
requests(the unit is Bytes) and the maximum number of commands that can
be recorded.
* `commandlog-reply-larger-than` and `commandlog-large-reply-max-len`
represent the threshold for large replies(the unit is Bytes) and the
maximum number of commands that can be recorded.
* `commandlog-execution-slower-than` and
`commandlog-slow-execution-max-len` represent the threshold for slow
executions(the unit is microseconds) and the maximum number of commands
that can be recorded.
* Additionally, `slowlog-log-slower-than` and `slowlog-max-len` are now
set as aliases for these two new configs.

---------

Signed-off-by: zhaozhao.zz <zhaozhao.zz@alibaba-inc.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
Co-authored-by: Ping Xie <pingxie@outlook.com>
2025-01-24 11:41:40 +08:00
Nadav Gigi
f2510783f9
Accelerate hash table iterator with value prefetching (#1568)
This PR builds upon the [previous entry prefetching
optimization](https://github.com/valkey-io/valkey/pull/1501) to further
enhance performance by implementing value prefetching for hashtable
iterators.

## Implementation
Modified `hashtableInitIterator` to accept a new flags parameter,
allowing control over iterator behavior.
Implemented conditional value prefetching within `hashtableNext` based
on the new `HASHTABLE_ITER_PREFETCH_VALUES` flag.
When the flag is set, hashtableNext now calls `prefetchBucketValues` at
the start of each new bucket, preemptively loading the values of filled
entries into the CPU cache.
The actual prefetching of values is performed using type-specific
callback functions implemented in `server.c`:
- For `robj` the `hashtableObjectPrefetchValue` callback is used to
prefetch the value if not embeded.

This implementation is specifically focused on main database iterations
at this stage. Applying it to hashtables that hold other object types
should not be problematic, but its performance benefits for those cases
will need to be proven through testing and benchmarking.

## Performance

### Setup:
- 64cores Graviton 3 Amazon EC2 instance.
-  50 mil keys with different value sizes.
-  Running valkey server over RAM file system.
-  crc checksum and comperssion off.

### Action
- save command.

### Results
The results regarding the duration of “save” command was taken from
“info all” command.
```
+--------------------+------------------+------------------+ 
| Prefetching        | Value size (byte)| Time (seconds)   | 
+--------------------+------------------+------------------+ 
| No                 | 100              | 20.112279        | 
| Yes                | 100              | 12.758519        | 
| No                 | 40               | 16.945366        | 
| Yes                | 40               | 10.902022        |
| No                 | 20               | 9.817000         | 
| Yes                | 20               | 9.626821         |
| No                 | 10               | 9.71510          | 
| Yes                | 10               | 9.510565         |
+--------------------+------------------+------------------+
```
The results largely align with our expectations, showing significant
improvements for larger values (100 bytes and 40 bytes) that are stored
outside the robj. For smaller values (20 bytes and 10 bytes) that are
embedded within the robj, we see almost no improvement, which is as
expected.

However, the small improvement observed even for these embedded values
is somewhat surprising. Given that we are not actively prefetching these
embedded values, this minor performance gain was not anticipated.

perf record on save command **without** value prefetching:
```
                --99.98%--rdbSaveDb
                          |          
                          |--91.38%--rdbSaveKeyValuePair
                          |          |          
                          |          |--42.72%--rdbSaveRawString
                          |          |          |          
                          |          |          |--26.69%--rdbWriteRaw
                          |          |          |          |          
                          |          |          |           --25.75%--rioFileWrite.lto_priv.0
                          |          |          |          
                          |          |           --15.41%--rdbSaveLen
                          |          |                     |          
                          |          |                     |--7.58%--rdbWriteRaw
                          |          |                     |          |          
                          |          |                     |           --7.08%--rioFileWrite.lto_priv.0
                          |          |                     |                     |          
                          |          |                     |                      --6.54%--_IO_fwrite
                          |          |                     |                                         
                          |          |                     |          
                          |          |                      --7.42%--rdbWriteRaw.constprop.1
                          |          |                                |          
                          |          |                                 --7.18%--rioFileWrite.lto_priv.0
                          |          |                                           |          
                          |          |                                            --6.73%--_IO_fwrite
                          |          |                                                            
                          |          |          
                          |          |--40.44%--rdbSaveStringObject
                          |          |          
                          |           --7.62%--rdbSaveObjectType
                          |                     |          
                          |                      --7.39%--rdbWriteRaw.constprop.1
                          |                                |          
                          |                                 --7.04%--rioFileWrite.lto_priv.0
                          |                                           |          
                          |                                            --6.59%--_IO_fwrite
                          |                                                               
                          |          
                           --7.33%--hashtableNext.constprop.1
                                     |          
                                      --6.28%--prefetchNextBucketEntries.lto_priv.0
```
perf record on save command **with** value prefetching:
```
               rdbSaveRio
               |          
                --99.93%--rdbSaveDb
                          |          
                          |--79.81%--rdbSaveKeyValuePair
                          |          |          
                          |          |--66.79%--rdbSaveRawString
                          |          |          |          
                          |          |          |--42.31%--rdbWriteRaw
                          |          |          |          |          
                          |          |          |           --40.74%--rioFileWrite.lto_priv.0
                          |          |          |          
                          |          |           --23.37%--rdbSaveLen
                          |          |                     |          
                          |          |                     |--11.78%--rdbWriteRaw
                          |          |                     |          |          
                          |          |                     |           --11.03%--rioFileWrite.lto_priv.0
                          |          |                     |                     |          
                          |          |                     |                      --10.30%--_IO_fwrite
                          |          |                     |                                |          
                          |          |                     |          
                          |          |                      --10.98%--rdbWriteRaw.constprop.1
                          |          |                                |          
                          |          |                                 --10.44%--rioFileWrite.lto_priv.0
                          |          |                                           |          
                          |          |                                            --9.74%--_IO_fwrite
                          |          |                                                      |          
                          |          |          
                          |          |--11.33%--rdbSaveObjectType
                          |          |          |          
                          |          |           --10.96%--rdbWriteRaw.constprop.1
                          |          |                     |          
                          |          |                      --10.51%--rioFileWrite.lto_priv.0
                          |          |                                |          
                          |          |                                 --9.75%--_IO_fwrite
                          |          |                                           |          
                          |          |          
                          |           --0.77%--rdbSaveStringObject
                          |          
                           --18.39%--hashtableNext
                                     |          
                                     |--10.04%--hashtableObjectPrefetchValue
                                     |
                                      --6.06%--prefetchNextBucketEntries        

```
Conclusions:

The prefetching strategy appears to be working as intended, shifting the
performance bottleneck from data access to I/O operations.
The significant reduction in rdbSaveStringObject time suggests that
string objects(which are the values) are being accessed more
efficiently.

Signed-off-by: NadavGigi <nadavgigi102@gmail.com>
2025-01-23 12:17:20 +01:00
Viktor Söderqvist
99ed308817
Add cross-version test framework (and a simple test) (#1371)
This includes a way to run two versions of the server from the TCL test
framework. It's a preparation to add more cross-version tests. The
runtest script accepts a new parameter

    ./runtest --other-server-path path/to/valkey-server

and a new tag "needs:other-server" for test cases and start_server.
Tests with this tag are automatically skipped if `--other-server-path`
is not provided.

This PR adds it in a CI job with Valkey 7.2.7 by downloading a binary
release.

Fixes #76

---------

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2025-01-23 11:26:54 +01:00
ranshid
7fc958da52
fix test Protocol desync regression test with TLS (#1593)
remove socket nonblocking and simplify the validation

fixes https://github.com/valkey-io/valkey/issues/1592

Signed-off-by: ranshid <ranshid@amazon.com>
2025-01-21 08:57:01 +02:00
ranshid
dd92d079dc
Fix Protocol desync regression test (#1590)
The desync regression test was created as a regression test for the
following bug:
in case we embed NULL termination inside inline/multi-bulk message we
will not be able to perform strchr in order to
identify the newline(\n)/carriage-return(\r) in the client query buffer.
this can influence (for example) replica reading primary stream and keep
filling it's query buffer endlessly consuming more and more memory.

In order to handle the above risk, a check was added to verify the
inline bulk and multi-bulk size are not exceeding the 64K bytes in the
query-buffer. A test was placed in order to verify this.

This PR introduce the following fixes to the desync regression test:
1. fix the sent payload to flush 1024 bytes block of 'A's instead of
'payload' which was sent by mistake.
2. Make sure that the connection is correctly terminated on protocol
error by the server after exceeding the 64K and not over 64K.
3. add another test intrinsic which will also verify the nested bulk
with embedded null termination (was not verified before)

fixes https://github.com/valkey-io/valkey/issues/1583


NOTE: Although it is possible to change the use of strchr to a more
"safe" utility (eg memchr) which will not pause scan at first occurrence
of '\0', we still like to protect against over excessive usage of the
query buffer and also preserve the current behavior(?). We will look
into improving this though in a followup issue.

---------

Signed-off-by: Ran Shidlansik <ranshid@amazon.com>
Signed-off-by: ranshid <88133677+ranshid@users.noreply.github.com>
2025-01-20 20:28:45 +02:00
ranshid
3032ccd48a
Change the shared format for dual channel replication logs (#1586)
change the format of the dual channel replication logs so that it will
not
conflict with existing log formats like modules. 

Fixes: https://github.com/valkey-io/valkey/issues/1509

Signed-off-by: Ran Shidlansik <ranshid@amazon.com>
2025-01-20 08:04:47 +02:00
Viktor Söderqvist
b2e4155f54
Lower latenct-monitor-threashold in expire-cycle test case (#1584)
The test case checks for expire-cycle in LATENCY LATEST, but with the
new hash table, the expiry-cycle is too fast to be logged by latency
monitor. Lower the latency monitor threshold to make it more likely to
be logged.

Fixes #1580

---------

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2025-01-19 19:23:00 +01:00
Pierre
2d0b8e3608
Update comments and log message in cluster_legacy.c (#1561)
Update comments and log message in `cluster_legacy.c`.

Follow-up from #1441.

Signed-off-by: Pierre Turin <pieturin@amazon.com>
Co-authored-by: Ping Xie <pingxie@outlook.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
2025-01-17 15:56:52 +08:00
Pierre
c9aea6d2d3
Fix memory leak in forgotten node ping ext code path (#1574)
When processing a cluster bus PING extension, there is a memory leak
when adding a new key to the `nodes_black_list` dict. We now make sure
to free the key `sds` if the dict did not take ownership of it.

Signed-off-by: Pierre Turin <pieturin@amazon.com>
2025-01-16 15:38:15 -08:00
Harkrishn Patro
87cc3d7a71
Fix cluster info sent stats for message with light header (#1563)
This issue affected only two message types (CLUSTERMSG_TYPE_PUBLISH and CLUSTERMSG_TYPE_PUBLISHSHARD) because they used a light message header, which caused the CLUSTER INFO stats to miss sent/received message information for those types.

---------

Signed-off-by: Harkrishn Patro <harkrisp@amazon.com>
Signed-off-by: Harkrishn Patro <bunty.hari@gmail.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
2025-01-16 11:25:37 -08:00
Ricardo Dias
af71619c45
Extract the scripting engine code from the functions unit (#1312)
This commit creates a new compilation unit for the scripting engine code
by extracting the existing code from the functions unit.
We're doing this refactor to prepare the code for running the `EVAL`
command using different scripting engines.

This PR has a module API change: we changed the type of error messages
returned by the callback
`ValkeyModuleScriptingEngineCreateFunctionsLibraryFunc` to be a
`ValkeyModuleString` (aka `robj`);

This PR also fixes #1470.

---------

Signed-off-by: Ricardo Dias <ricardo.dias@percona.com>
2025-01-16 10:08:16 +01:00
Ray Cao
921ba19acb
Incr expired_keys if the unix-time is already expired for EXPIREAT and other commands(#1517)
Some commands that use unix-time, such as `EXPIREAT` and `SET EXAT`, should include the deleted keys in the `expired_keys` statistics if the specified time has already expired, and notifications should be sent in the manner of expired.

---------

Signed-off-by: Ray Cao <zisong.cw@alibaba-inc.com>
2025-01-16 16:40:34 +08:00
Binbin
cda9eee8c9
Allow clang-format to be triggered in push events (#1565)
Just like spell-check workflow, we should allow to trigger it
in push events, so that the forks repo can notice the format
thing way before submitting the PR.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2025-01-16 10:23:03 +08:00
Sarthak Aggarwal
6a8f068e36
Adding Missing filters to CLIENT LIST and Dedup Parsing (#1401)
Adds filter options to CLIENT LIST:

    * USER <username>
      Return clients authenticated by <username>.
    * ADDR <ip:port>
      Return clients connected from the specified address.
    * LADDR <ip:port>
      Return clients connected to the specified local address.
    * SKIPME (YES|NO)
      Exclude the current client from the list (default: no).
    * MAXAGE <maxage>
      Only list connections older than the specified age.

Modifies the ID filter to CLIENT KILL to allow multiple IDs

    * ID <client-id> [<client-id>...]
      Kill connections by client ids.


This makes CLIENT LIST and CLIENT KILL accept the same options.

For backward compatibility, the default value for SKIPME is NO for
CLIENT LIST and YES for CLIENT KILL.

The MAXAGE comes from CLIENT KILL, where it *keeps* clients with the
given max age and kills the older ones. This logic becomes weird for
CLIENT LIST, but is kept for similary with CLIENT KILL, for the use case
of first testing manually using CLIENT LIST, and then running CLIENT
KILL with the same filters.

The `ID client-id [client-id ...]` no longer needs to be the last
filter. The parsing logic determines if an argument is an ID or not
based on whether it can be parsed as an integer or not.

Partly addresses: #668

---------

Signed-off-by: Sarthak Aggarwal <sarthagg@amazon.com>
2025-01-15 20:44:13 +01:00
zhaozhao.zz
c5a1585547
add paused_actions for INFO Clients (#1519)
Add `paused_actions` and `paused_timeout_milliseconds` for INFO Clients
to inform users about if clients are paused.

---------

Signed-off-by: zhaozhao.zz <zhaozhao.zz@alibaba-inc.com>
2025-01-14 19:01:00 +08:00
Viktor Söderqvist
2a1a65b4c7
Introduce const_sds for const-content sds (#1553)
`sds` is a typedef of `char *`.

`const sds` means `char * const`, i.e. a const-pointer to non-const
content.

More often, you would want `const char *`, i.e. a pointer to
const-content. Until now, it's not possible to express that. This PR
adds `const_sds` which is a pointer to const-content sds.

To get a const-pointer to const-content sds, you can use `const
const_sds`.

In this PR, some uses of `const sds` are replaced by `const_sds`. We can
use it more later.

Fixes #1542

---------

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2025-01-14 10:38:12 +01:00
Amit Nagler
6be1c77b1e
Fix valgrind test (#1555)
Introduced at https://github.com/valkey-io/valkey/pull/1165/files

Signed-off-by: naglera <anagler123@gmail.com>
2025-01-14 10:49:46 +02:00
secwall
fdc89c56b7
Escape unix socket group in unit tests (#1554)
In some cases unix groups could have whitespace and/or `\` in them.
One example is my workstation. It's a MacOS in an Active Directory
domain. So my user has group `LD\Domain Users`.
Running `make test` on `unstable` and `8.0` branches fails with:

I'm not sure if we need to fix this in 8.0. But it seems that it should
be fixed in unstable.

Signed-off-by: secwall <secwall@yandex-team.ru>
2025-01-13 20:05:04 -08:00
Rain Valentine
d13aad45f4
Replace dict with new hashtable: hash datatype (#1502)
This PR replaces dict with the new hashtable data structure in the HASH
datatype. There is a new struct for hashtable items which contains a
pointer to value sds string and the embedded key sds string. These
values were previously stored in dictEntry. This structure is kept
opaque so we can easily add small value embedding or other optimizations
in the future.

closes #1095

---------

Signed-off-by: Rain Valentine <rsg000@gmail.com>
2025-01-13 11:17:16 +01:00
Viktor Söderqvist
dc9ca1b98d
Test coverage for ECHO for reply schema validation (#1549)
After #1545 disabled some tests for reply schema validation, we now have
another issue that ECHO is not covered.

```
WARNING! The following commands were not hit at all:
  echo
ERROR! at least one command was not hit by the tests
```

This patch adds a test case for ECHO in the unit/other test suite. I
haven't checked if there are more commands that aren't covered.

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2025-01-13 10:14:09 +08:00
Viktor Söderqvist
ad592f73d7
Skip CLI tests with reply schema validation (#1545)
The commands used in valkey-cli tests are not important the reply schema
validation. Skip them to avoid the problem if tests hanging. This has
failed lately in the daily job:

```
[TIMEOUT]: clients state report follows.
sock55fedcc19be0 => (IN PROGRESS) valkey-cli pubsub mode with single standard channel subscription
Killing still running Valkey server 33357
```

These test cases use a special valkey-cli command `:get pubsub` command,
which is an internal command to valkey-cli rather than a Valkey server
command. This command hangs when compiled with with logreqres enabled.
Easy solution is to skip the tests in this setup.

The test cases were introduced in #1432.

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2025-01-12 08:02:39 +08:00
Binbin
11cb8ee27c
Add latency stats around cluster config file operations (#1534)
When the cluster changes, we need to persist the cluster configuration,
and these file IO operations may cause latency.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2025-01-11 11:03:10 +08:00
Binbin
10357ceda5
Mark the node as FAIL when the node is marked as NOADDR and broadcast the FAIL (#1191)
Imagine we have a cluster, for example a three-shard cluster,
if shard 1 doing a CLUSTER RESET HARD, it will change the node
name, and then other nodes will mark it as NOADR since the node
name received by PONG has changed.

In the eyes of other nodes, there is one working primary node
left but with no address, and in this case, the address report
in MOVED will be invalid and will confuse the clients. And in
the same time, the replica will not failover since its primary
is not in the FAIL state. And the cluster looks OK to everyone.

This leaves a cluster that appears OK, but with no coverage for
shard 1, obviously we should do something like CLUSTER FORGET
to remove the node and fix the cluster before using it.

But the point in here, we can mark the NOADDR node as FAIL to
advance the cluster state. If a node is NOADDR means it does
not have a valid address, so we won't reconnect it, we won't
send PING, we won't gossip it, it seems reasonable to mark it
as FAIL.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2025-01-11 11:02:05 +08:00
Binbin
211b250aad
Do election in order based on failed primary rank to avoid voting conflicts (#1018)
When multiple primary nodes fail simultaneously, the cluster can not recover
within the default effective time (data_age limit). The main reason is that
the vote is without ranking among multiple replica nodes, which case too many
epoch conflicts.

Therefore, we introduced into ranking based on the failed primary shard-id.
Introduced a new failed_primary_rank var, this var means the rank of this
myself instance in the context of all failed primary list. This var will be
used in failover and we will do the failover election packets in order based
on the rank, this can effectively avoid the voting conflicts.

If a single primary is down, the behavior is the same as before. If multiple
primaries are down, their replica election initiation time will be delayed
by 500ms according to the ranking.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2025-01-11 10:43:18 +08:00
Binbin
d6bdd9e7d7
Fix module LatencyAddSample still work when latency-monitor-threshold is 0 (#1541)
When latency-monitor-threshold is set to 0, it means the latency monitor
is disabled, and in VM_LatencyAddSample, we wrote the condition
incorrectly, causing us to record latency when latency was turned off.

This bug was introduced in the very first day, see e3b1d6d, it was merged
in 2019.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2025-01-11 10:32:58 +08:00
Binbin
e60990e579
Fix crash when freeing newly created node when nodeIp2String fail (#1535)
In #1441, we found a assert, and decided remove this assert and instead
just free the newly created node and close the link, since if we cannot
get the IP from the link it probably means the connection was closed.
```
=== VALKEY BUG REPORT START: Cut & paste starting from here ===
17847:M 19 Dec 2024 00:15:58.021 # === ASSERTION FAILED ===
17847:M 19 Dec 2024 00:15:58.021 # ==> cluster_legacy.c:3252 'nodeIp2String(node->ip, link, hdr->myip) == C_OK' is not true

------ STACK TRACE ------

17847 valkey-server *
src/valkey-server 127.0.0.1:27131 [cluster](clusterProcessPacket+0x1304) [0x4e5634]
src/valkey-server 127.0.0.1:27131 [cluster](clusterReadHandler+0x11e) [0x4e59de]
/__w/valkey/valkey/src/valkey-tls.so(+0x2f1e) [0x7f083983ff1e]
src/valkey-server 127.0.0.1:27131 [cluster](aeMain+0x8a) [0x41afea]
src/valkey-server 127.0.0.1:27131 [cluster](main+0x4d7) [0x40f547]
/lib64/libc.so.6(+0x40c8) [0x7f083985a0c8]
/lib64/libc.so.6(__libc_start_main+0x8b) [0x7f083985a18b]
src/valkey-server 127.0.0.1:27131 [cluster](_start+0x25) [0x410ef5]
```

But it also introduces another assert. The reason is that this new node
is not added to the cluster nodes dict.
```
17128:M 08 Jan 2025 10:51:44.061 # === ASSERTION FAILED ===
17128:M 08 Jan 2025 10:51:44.061 # ==> cluster_legacy.c:1693 'dictDelete(server.cluster->nodes, nodename) == DICT_OK' is not true

------ STACK TRACE ------

17128 valkey-server *
src/valkey-server 127.0.0.1:28627 [cluster][0x4ebdc4]
src/valkey-server 127.0.0.1:28627 [cluster][0x4e81d2]
src/valkey-server 127.0.0.1:28627 [cluster](clusterReadHandler+0x268)[0x4e8618]
/__w/valkey/valkey/src/valkey-tls.so(+0xb278)[0x7f109480b278]
src/valkey-server 127.0.0.1:28627 [cluster](aeMain+0x89)[0x592b09]
src/valkey-server 127.0.0.1:28627 [cluster](main+0x4b3)[0x453e23]
/lib64/libc.so.6(__libc_start_main+0xe5)[0x7f10958bf7e5]
src/valkey-server 127.0.0.1:28627 [cluster](_start+0x2e)[0x454a5e]
```

This closes #1527.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2025-01-10 10:19:04 +08:00
Harkrishn Patro
c338de3d46
Update upload artifacts to v4 (#1539)
Fixes #1538

Signed-off-by: Harkrishn Patro <harkrisp@amazon.com>
2025-01-09 17:19:36 -08:00
Madelyn Olson
d99457c09c
Free the passed in lua context instead of the global (#1536)
The fix that Redis gave us for the CVE-2024-46981 was freeing lctx.lua,
and I didn't merge it correctly. We made some changes so that we are
able to async free the lua context, so we need to free the passed in
context. This was applied correctly on the two released versions (8.0
and 7.2) just not on unstable.

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2025-01-09 14:35:48 +08:00
Binbin
b207b421bc
Fix new cli subscribed mode test in cluster mode (#1533)
We need to add a hash tag in cluster mode.
Fixes #1531.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2025-01-09 12:21:31 +08:00
Karthick Ariyaratnam
80c35402bc
Remove legacy SERVER_TEST compiler flag from cmake. (#1530)
This PR is to cleanup the `SERVER_TEST` compiler flag from cmake compile
definitions, as it is no longer required in the new unit test framework, see #428.

Signed-off-by: Karthick Ariyaratnam <karthyuom@gmail.com>
2025-01-09 11:52:45 +08:00
Nadav Gigi
9f4815a224
Accelerate hash table iterator with prefetching (#1501)
This PR introduces improvements to the hashtable iterator, implementing
prefetching technique described in the blog post [Unlock One Million RPS
- Part 2](https://valkey.io/blog/unlock-one-million-rps-part2/) . The
changes lay the groundwork for further enhancements in use cases
involving iterators. Future PRs will build upon this foundation to
improve performance and functionality in various iterator-dependent
operations.

In the pursuit of maximizing iterator performance, I conducted a
comprehensive series of experiments. My tests encompassed a wide range
of approaches, including processing multiple bucket indices in parallel,
prefetching the next bucket upon completion of the current one, and
several other timing and quantity variations. Surprisingly, after
rigorous testing and performance analysis, the simplest implementation
presented in this PR consistently outperformed all other more complex
strategies.

## Implementation

Each time we start iterating over a bucket, we prefetch data for future
iterations:

- We prefetch the entries of the next bucket (if it exists).
- We prefetch the structure (but not the entries) of the bucket after
  the next.

This prefetching is done when we pick up a new bucket, increasing the
chance that the data will be in cache by the time we need it.

## Performance

The data below was taken by conducting keys command on 64cores Graviton
3 Amazon EC2 instance with 50 mil keys in size of 100 bytes each. The
results regarding the duration of “keys *” command was taken from “info
all” command.

```
+--------------------+------------------+-----------------------------+
| prefetching        | Time (seconds)   | Keys Processed per Second   |
+--------------------+------------------+-----------------------------+
| No                 | 11.112279        | 4,499,529                   |
| Yes                | 3.141916         | 15,913,862                  |
+--------------------+------------------+-----------------------------+
Improvement:
Comparing the iterator without prefetching to the one with prefetching, 
we can see a speed improvement of 11.112279 / 3.141916 ≈ 3.54 times faster.
```


### Save command improvment

#### Setup:
- 64cores Graviton 3 Amazon EC2 instance.
-  50 mil keys in size of 100 bytes each.
-  Running valkey server over RAM file system.
-  crc checksum and comperssion off.

#### Results

```
+--------------------+------------------+-----------------------------+
| prefetching        | Time (seconds)   | Keys Processed per Second   |
+--------------------+------------------+-----------------------------+
| No                 | 28               | 1,785,700                   |
| Yes                | 19.6             | 2,550,000                   |
+--------------------+------------------+-----------------------------+
Improvement:
- Reduced SAVE time by 30% (8.4 seconds faster)
- Increased key processing rate by 42.8% (764,300 more keys/second)
```

Signed-off-by: NadavGigi <nadavgigi102@gmail.com>
2025-01-08 23:18:55 +01:00
Viktor Szépe
418f1d059f
Improve Typos configuration (#1456)
- remove old ignores
- fix a "new" typo 🎁

Signed-off-by: Viktor Szépe <viktor@szepe.net>
2025-01-08 22:39:45 +01:00
Nikhil Manglore
9e0204941d
valkey-cli auto-exit from subscribed mode (#1432)
Resolves issue with valkey-cli not auto exiting from subscribed mode on
reaching zero pub/sub subscription (previously filed on Redis)
https://github.com/redis/redis/issues/12592

---------

Signed-off-by: Nikhil Manglore <nmanglor@amazon.com>
2025-01-08 21:03:06 +01:00
Rueian
0a89571dcc
Skip logreqres on tests for the HELLO command (#1528)
Skip logreqres on tests for the HELLO command

Signed-off-by: Rueian <rueiancsie@gmail.com>
2025-01-08 10:05:20 -08:00
Rain Valentine
ab627d6721
Replace dict with new hashtable: sorted set datatype (#1427)
This PR replaces dict with hashtable in the ZSET datatype. Instead of
mapping key to score as dict did, the hashtable maps key to a node in
the skiplist, which contains the score. This takes advantage of
hashtable performance improvements and saves 15 bytes per set item - 24
bytes overhead before, 9 bytes after.

Closes #1096

---------

Signed-off-by: Rain Valentine <rsg000@gmail.com>
Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2025-01-08 18:34:02 +01:00
Lipeng Zhu
8af35a1712
Add build folder to gitignore. (#1488)
Default cmake build folder in vscode is `"cmake.buildDirectory": "${workspaceFolder}/build"`.

Signed-off-by: Lipeng Zhu <lipeng.zhu@intel.com>
2025-01-08 19:33:02 +08:00
uriyage
6c09eea2bc
client struct: lazy init components and optimize struct layout (#1405)
# Refactor client structure to use modular data components

## Current State
The client structure allocates memory for replication / pubsub /
multi-keys / module / blocked data for every client, despite these
features being used by only a small subset of clients. In addition the
current field layout in the client struct is suboptimal, with poor
alignment and unnecessary padding between fields, leading to a larger
than necessary memory footprint of 896 bytes per client. Furthermore,
fields that are frequently accessed together during operations are
scattered throughout the struct, resulting in poor cache locality.

## This PR's Change

1.  Lazy Initialization 
- **Components are only allocated when first used:**
  - PubSubData: Created on first SUBSCRIBE/PUBLISH operation
  - ReplicationData: Initialized only for replica connections
  - ModuleData: Allocated when module interaction begins
  - BlockingState: Created when first blocking command is issued
  - MultiState: Initialized on MULTI command

2. Memory Layout Optimization:
   - Grouped related fields for better locality
   - Moved rarely accessed fields (e.g., client->name) to struct end
   - Optimized field alignment to eliminate padding

3. Additional changes:
   - Moved watched_keys to be static allocated in the `mstate` struct
   - Relocated replication init logic to replication.c
  

### Key Benefits
- **Efficient Memory Usage:**
- 45% smaller base client structure - Basic clients now use 528 bytes
(down from 896).
- Better memory locality for related operations
- Performance improvement in high throughput scenarios. No performance
regressions in other cases.


### Performance Impact

Tested with 650 clients and 512 bytes values.

#### Single Thread Performance
| Operation   | Dataset | New (ops/sec) | Old (ops/sec) | Change % |
|------------|---------|---------------|---------------|-----------|
| SET        | 1 key   | 261,799      | 258,261      | +1.37%    |
| SET        | 3M keys | 209,134      | ~209,000     | ~0%       |
| GET        | 1 key   | 281,564      | 277,965      | +1.29%    |
| GET        | 3M keys | 231,158      | 228,410      | +1.20%    |

#### 8 IO Threads Performance
| Operation   | Dataset | New (ops/sec) | Old (ops/sec) | Change % |
|------------|---------|---------------|---------------|-----------|
| SET        | 1 key   | 1,331,578    | 1,331,626    | -0.00%    |
| SET        | 3M keys | 1,254,441    | 1,152,645    | +8.83%    |
| GET        | 1 key   | 1,293,149    | 1,289,503    | +0.28%    |
| GET        | 3M keys | 1,152,898    | 1,101,791    | +4.64%    |

#### Pipeline Performance (3M keys)
| Operation | Pipeline Size | New (ops/sec) | Old (ops/sec) | Change % |
|-----------|--------------|---------------|---------------|-----------|
| SET       | 10          | 548,964      | 538,498      | +1.94%    |
| SET       | 20          | 606,148      | 594,872      | +1.89%    |
| SET       | 30          | 631,122      | 616,606      | +2.35%    |
| GET       | 10          | 628,482      | 624,166      | +0.69%    |
| GET       | 20          | 687,371      | 681,659      | +0.84%    |
| GET       | 30          | 725,855      | 721,102      | +0.66%    |

### Observations:
1. Single-threaded operations show consistent improvements (1-1.4%)
2. Multi-threaded performance shows significant gains for large
datasets:
   - SET with 3M keys: +8.83% improvement
   - GET with 3M keys: +4.64% improvement
3. Pipeline operations show consistent improvements:
   - SET operations: +1.89% to +2.35%
   - GET operations: +0.66% to +0.84%
4. No performance regressions observed in any test scenario


Related issue:https://github.com/valkey-io/valkey/issues/761

---------

Signed-off-by: Uri Yagelnik <uriy@amazon.com>
Signed-off-by: uriyage <78144248+uriyage@users.noreply.github.com>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2025-01-08 10:28:54 +02:00
Rueian
dc4628d444
Add availability_zone to the HELLO command history (#1524)
This PR is a followup for #1487.

Signed-off-by: Rueian <rueiancsie@gmail.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
2025-01-08 10:04:58 +08:00
Madelyn Olson
d3acd90320
Actually run code coverage on ubuntu 22 (#1522)
This commit, https://github.com/valkey-io/valkey/pull/1504, moved the
wrong worker to ubuntu 22. We wanted to move codecov and not coverity.

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2025-01-07 15:43:46 -08:00
Rueian
3b52186b6a
Add availability_zone to the HELLO response (#1487)
It's inconvenient for client implementations to extract the
`availability_zone` information from the `INFO` response. The `INFO`
response contains a lot of information that a client implementation
typically doesn't need.

This PR adds the availability zone to the `HELLO` response. Clients
usually already use the `HELLO` command for protocol negotiation and
also get the server `version` and `role` from its response. To keep the
`HELLO` response small, the field is only added if availability zone is
configured.

---------

Signed-off-by: Rueian <rueiancsie@gmail.com>
2025-01-07 22:54:55 +01:00
Madelyn Olson
e1db553834
Add tests for acl selectors with no permissions or patterns (#1515)
Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2025-01-06 15:46:55 -08:00
Madelyn Olson
4ffd3ebdeb
Fix LUA garbage collector (CVE-2024-46981) (#1513)
Reset GC state before closing the lua VM to prevent user data to be
wrongly freed while still might be used on destructor callbacks.

Created and publish by Redis in their OSS branch.

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
Co-authored-by: YaacovHazan <yaacov.hazan@redis.com>
2025-01-06 14:02:22 -08:00
Madelyn Olson
7977c55ac9
Fix Read/Write key pattern selector (CVE-2024-51741) (#1514)
The explanation on the original commit was wrong. Key based access must
have a `~` in order to correctly configure whey key prefixes to apply
the selector to. If this is missing, a server assert will be triggered
later.

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
Co-authored-by: YaacovHazan <yaacov.hazan@redis.com>
2025-01-06 14:02:16 -08:00
Binbin
c0014ef15e
Check whether to switch to fail when setting the node to pfail in cron (#1061)
This may speed up the transition to the fail state a bit.
Previously we would only check when we received a pfail/fail
report from others in gossip. If myself is the last vote,
we can directly switch to fail in here without waiting for
the next gossip packet.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2025-01-06 09:26:17 +08:00
Binbin
33b824137e
Explicitly check C_ERR condition to improve readability in clusterSaveConfig (#1505)
It's not obvious to see it at first, modify it to use C_ERR.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2025-01-04 10:47:32 +08:00
eifrah-aws
b3b4bdcda4
CMake: fail on warnings (#1503)
When building with `CMake` (especially the targets `valkey-cli`,
`valkey-server` and `valkey-benchmark`) it is possible to have a
successful build while having warnings.

This PR fixes this - which is aligned with how the `Makefile` is working
today:
- Enable `-Wall` + `-Werror` for valkey targets
- Fixed warning in valkey-cli:jsonStringOutput method

Signed-off-by: Eran Ifrah <eifrah@amazon.com>
2025-01-03 09:44:41 +08:00
Madelyn Olson
fe72c784b7
Move coverity back to ubuntu 22 until test failures are fixed (#1504)
The issues in #1453 seem to
have only shown up since we moved to ubuntu 24, as part of the rolling
`ubunut-latest` migration from 22->24.

Closes #1453.

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2025-01-03 09:43:16 +08:00
gmbnomis
26a72fa89c
Use the correct command proc for the LOOKUP_NOTOUCH exception in lookupKey (#1499)
When looking up a key in no-touch mode, `LOOKUP_NOTOUCH` is set to avoid
updating the last access time in `lookupKey`. An exception must be made
for the `TOUCH` command which must always update the key.

When called from a script, `server.executing_client` will point to the
`TOUCH` command, while `server.current_client` will point to e.g. an
`EVAL` command. So, we must use the former to find out the currently
executing command if defined.

This fix addresses the issue where TOUCH wasn't updating key access
times when called from scripts like EVAL.

Fixes #1498

Signed-off-by: Simon Baatz <gmbnomis@gmail.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
2025-01-03 09:41:15 +08:00
Wen Hui
93b701d8d4
Update Redis legacy keyword and link in utils/whatisdoing.sh (#1495)
Signed-off-by: hwware <wen.hui.ware@gmail.com>
2025-01-03 09:37:55 +08:00
Ricardo Dias
8d764f27b3
Refactor: move all valkey modules related declarations to module.h (#1489)
In this commit we move all structures and functions declarations related
to Valkey modules from `server.h` to the recently added `module.h` file.

This re-organization makes it easier for new contributors to find the
valkey modules related code, as well as reducing the compilation times
when changes are made to the modules code.

---------

Signed-off-by: Ricardo Dias <ricardo.dias@percona.com>
2025-01-02 18:35:10 +01:00
Wen Hui
ede4adde7a
Remove releasetools folder (#1496)
The release tool utils\releasetools\ does not work anymore in Valkey, in
this PR, we remove it.

Signed-off-by: hwware <wen.hui.ware@gmail.com>
2025-01-02 10:12:09 -05:00
uriyage
35abb68b79
Offload reading the replication stream to IO threads (#1449)
Support Primary client IO offload.

Related issue: https://github.com/valkey-io/valkey/issues/761

---------

Signed-off-by: Uri Yagelnik <uriy@amazon.com>
2025-01-02 10:42:39 +01:00
uriyage
ae70c5459b
replication: fix io-threads possible race by moving waitForClientIO (#1422)
### Fix race with pending writes in replica state transition

#### The Problem
In #60 (Dual channel replication) a new `connWrite` call was added
before the `waitForClientIO` check. This created a race condition where
the main thread may attempt to write to a client that could have pending
writes in IO threads.

#### The Fix
Moved the `waitForClientIO()` call earlier in `syncCommand`, before any
`connWrite` call. This ensures all pending IO operations are completed
before attempting to write to the client.

---------

Signed-off-by: Uri Yagelnik <uriy@amazon.com>
2025-01-02 10:01:55 +02:00
Amit Nagler
8aff235721
Fix unreliable dual channel Valgrind tests (#1500)
Used same approach as PR #1165 to solve random failures.

Resolves #1491

Signed-off-by: naglera <anagler123@gmail.com>
2025-01-02 10:00:29 +08:00
ranshid
0f273bb648
Align rejected unblocked commands to update the correct error statistic (#577)
Currently, in case a blocked command is unblocked externally (eg. due to
the relevant slot being migrated or the CLIENT UNBLOCK command was
issued, the command statistics will always update the failed_calls error
statistic. This leads to missalignment with
90b9f08e5d
as well as some inconsistencies. For example when a key is migrated
during cluster slot migration, clients blocked on XREADGROUP will be
unblocked and update the rejected_calls stat, while clients blocked on
BLPOP will get unblocked updating the failed_calls stat.

In this PR we add explicit indication in updateStatsOnUnblock thet
indicates if the command was rejected or failed.

---------

Signed-off-by: ranshid <ranshid@amazon.com>
Signed-off-by: Ran Shidlansik <ranshid@amazon.com>
2025-01-01 16:33:09 +02:00
zhenwei pi
a136ad9a50
Make global configs as static (#1159)
Don't expose static configs symbol, and make configEnumGetValue as
static function.

Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2024-12-30 15:58:06 -05:00
Pierre
e4179f1f3b
Only (re-)send MEET packet once every handshake timeout period (#1441)
Add `meet_sent` field in `clusterNode` indicating the last time we sent
a MEET packet. Use this field to only (re-)send a MEET packet once every
handshake timeout period when detecting a node without an inbound link.

When receiving multiple MEET packets on the same link while the node is
in handshake state, instead of dropping the packet, we now simply
prevent the creation of a new node. This way we still process the MEET
packet's gossip and reply with a PONG as any other packets.

Improve some logging messages to include `human_nodename`. Add
`nodeExceedsHandshakeTimeout()` function.

This is a follow-up to this previous PR:
https://github.com/valkey-io/valkey/pull/1307
And a partial fix to the crash described in:
https://github.com/valkey-io/valkey/pull/1436

---------

Signed-off-by: Pierre Turin <pieturin@amazon.com>
2024-12-30 15:56:39 -05:00
Madelyn Olson
e470735d91
Immediately restart the defrag cycle if we still need to defrag (#1492) 2024-12-29 08:22:49 -08:00
gmbnomis
8b40341295
Fix JSON description of SET command (#1473)
In the `arguments` section, the `arguments` key is only used for
arguments of type `block` or `oneof`.

Consequently, the `arguments` given for `IFEQ` are ignored by the
server. However, they lead to strange results when rendering the
command's page for the web documentation.

Fix this by removing `arguments` for `IFEQ`.

Signed-off-by: Simon Baatz <gmbnomis@gmail.com>
2024-12-27 00:55:20 +01:00
uriyage
bb325bde35
Fix restore replica output bytes stat update (#1486)
This PR fixes the missing stat update for `total_net_repl_output_bytes`
that was removed during the refactoring in PR #758. The metric was not
being updated when writing to replica connections.

Changes:
- Restored the stat update in postWriteToClient for replica connections
- Added integration test to verify the metric is properly updated

Signed-off-by: Uri Yagelnik <uriy@amazon.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
2024-12-25 10:58:49 +08:00
Binbin
da92c1d6c8
Document all command flags near serverCommand (#1474)
These flags are not documented here.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-12-25 10:57:42 +08:00
Amit Nagler
9f4503ca50
Add scoped RDB loading context and immediate abort flag (#1173)
This PR introduces a new mechanism for temporarily changing the
server's loading_rio context during RDB loading operations. The new
`RDB_SCOPED_LOADING_RIO` macro allows for a scoped change of the
`server.loading_rio` value, ensuring that it's automatically restored
to its original value when the scope ends.

Introduces a dedicated flag to `rio` to signal immediate abort,
preventing
potential use-after-free scenarios during replication disconnection in 
dual-channel load. This ensures proper termination of
`rdbLoadRioWithLoadingCtx`
when replication is cancelled due to connection loss on main connection.

Fixes https://github.com/valkey-io/valkey/issues/1152

---------

Signed-off-by: naglera <anagler123@gmail.com>
Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
Signed-off-by: Amit Nagler <58042354+naglera@users.noreply.github.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
Co-authored-by: ranshid <88133677+ranshid@users.noreply.github.com>
2024-12-24 08:14:32 +02:00
Amit Nagler
f1b7f3072c
Reduce dual channel testing time (#1477)
- By not waiting `repl-diskless-sync-delay` when we don't have to, we
can reduce ~30% of dual channel tests execution time.
- This commit also drops one test which is not required for regular sync
(`Sync should continue if not all slaves dropped`).
- Skip dual channel test with master diskless disabled because it will
initiate the same synchronization process as the non-dual channel test,
making it redundant.


Before:
```
Execution time of different units:
  171 seconds - integration/dual-channel-replication
  305 seconds - integration/replication-psync

\o/ All tests passed without errors!
```
After:
```
Execution time of different units:
  120 seconds - integration/dual-channel-replication
  236 seconds - integration/replication-psync

\o/ All tests passed without errors!
```

Discused on https://github.com/valkey-io/valkey/pull/1173

---------

Signed-off-by: naglera <anagler123@gmail.com>
2024-12-24 08:13:25 +02:00
Madelyn Olson
2ee06e7983
Remove readability refactor for failover auth to fix clang warning (#1481)
As part of #1463, I made a small refactor between the PR and the daily
test I submitted to try to improve readability by adding a function to
abstract the extraction of the message types. However, that change
apparently caused GCC to throw another warning, so reverting the
abstraction on just one line.

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-12-24 13:07:15 +08:00
Binbin
d00c856448
Fix switch case compilation error in the new helloscripting (#1472)
It is missing the curly braces for variable declaration after case.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-12-22 22:57:56 +01:00
Ricardo Dias
6adef8e2f9
Adds support for scripting engines as Valkey modules (#1277)
This PR extends the module API to support the addition of different
scripting engines to execute user defined functions.

The scripting engine can be implemented as a Valkey module, and can be
dynamically loaded with the `loadmodule` config directive, or with the
`MODULE LOAD` command.

This PR also adds an example of a dummy scripting engine module, to show
how to use the new module API. The dummy module is implemented in
`tests/modules/helloscripting.c`.

The current module API support, only allows to load scripting engines to
run functions using `FCALL` command.

The additions to the module API are the following:

```c
/* This struct represents a scripting engine function that results from the
 * compilation of a script by the engine implementation. */
struct ValkeyModuleScriptingEngineCompiledFunction

typedef ValkeyModuleScriptingEngineCompiledFunction **(*ValkeyModuleScriptingEngineCreateFunctionsLibraryFunc)(
    ValkeyModuleScriptingEngineCtx *engine_ctx,
    const char *code,
    size_t timeout,
    size_t *out_num_compiled_functions,
    char **err);

typedef void (*ValkeyModuleScriptingEngineCallFunctionFunc)(
    ValkeyModuleCtx *module_ctx,
    ValkeyModuleScriptingEngineCtx *engine_ctx,
    ValkeyModuleScriptingEngineFunctionCtx *func_ctx,
    void *compiled_function,
    ValkeyModuleString **keys,
    size_t nkeys,
    ValkeyModuleString **args,
    size_t nargs);

typedef size_t (*ValkeyModuleScriptingEngineGetUsedMemoryFunc)(
    ValkeyModuleScriptingEngineCtx *engine_ctx);

typedef size_t (*ValkeyModuleScriptingEngineGetFunctionMemoryOverheadFunc)(
    void *compiled_function);

typedef size_t (*ValkeyModuleScriptingEngineGetEngineMemoryOverheadFunc)(
    ValkeyModuleScriptingEngineCtx *engine_ctx);

typedef void (*ValkeyModuleScriptingEngineFreeFunctionFunc)(
    ValkeyModuleScriptingEngineCtx *engine_ctx,
    void *compiled_function);

/* This struct stores the callback functions implemented by the scripting
 * engine to provide the functionality for the `FUNCTION *` commands. */
typedef struct ValkeyModuleScriptingEngineMethodsV1 {
    uint64_t version; /* Version of this structure for ABI compat. */

    /* Library create function callback. When a new script is loaded, this
     * callback will be called with the script code, and returns a list of
     * ValkeyModuleScriptingEngineCompiledFunc objects. */
    ValkeyModuleScriptingEngineCreateFunctionsLibraryFunc create_functions_library;

    /* The callback function called when `FCALL` command is called on a function
     * registered in this engine. */
    ValkeyModuleScriptingEngineCallFunctionFunc call_function;

    /* Function callback to get current used memory by the engine. */
    ValkeyModuleScriptingEngineGetUsedMemoryFunc get_used_memory;

    /* Function callback to return memory overhead for a given function. */
    ValkeyModuleScriptingEngineGetFunctionMemoryOverheadFunc get_function_memory_overhead;

    /* Function callback to return memory overhead of the engine. */
    ValkeyModuleScriptingEngineGetEngineMemoryOverheadFunc get_engine_memory_overhead;

    /* Function callback to free the memory of a registered engine function. */
    ValkeyModuleScriptingEngineFreeFunctionFunc free_function;
} ValkeyModuleScriptingEngineMethodsV1;

/* Registers a new scripting engine in the server.
 *
 * - `engine_name`: the name of the scripting engine. This name will match
 *   against the engine name specified in the script header using a shebang.
 *
 * - `engine_ctx`: engine specific context pointer.
 *
 * - `engine_methods`: the struct with the scripting engine callback functions
 * pointers.
 */
int ValkeyModule_RegisterScriptingEngine(ValkeyModuleCtx *ctx,
                                         const char *engine_name,
                                         void *engine_ctx,
                                         ValkeyModuleScriptingEngineMethods engine_methods);

/* Removes the scripting engine from the server.
 *
 * `engine_name` is the name of the scripting engine.
 *
 */
int ValkeyModule_UnregisterScriptingEngine(ValkeyModuleCtx *ctx, const char *engine_name);
```

---------

Signed-off-by: Ricardo Dias <ricardo.dias@percona.com>
2024-12-21 23:09:35 +01:00
Madelyn Olson
1c97317518
Resolve bounds checks on cluster_legacy.c (#1463)
We are getting a number of errors like:
```
array subscript ‘clusterMsg[0]’ is partly outside array bounds of ‘unsigned char[2272]’
```

Which is basically GCC telling us that we have an object which is longer
than the underlying storage of the allocation. We actually do this a
lot, but GCC is generally not aware of how big the underlying allocation
is, so it doesn't throw this error. We are specifically getting this
error because the msgBlock can be of variable length depending on the
type of message, but GCC assumes it's the longest one possible. The
solution I went with here was make the message type optional, so that it
wasn't included in the size. I think this also makes some sense, since
it's really just a helper for us to easily cast the object around.

I considered disabling this error, but it is generally pretty useful
since it can catch real issues. Another solution would be to
over-allocate to the largest possible object, which could hurt
performance as we initialize it to zero.

Results:
https://github.com/madolson/valkey/actions/runs/12423414811/job/34686899884

This is a slightly cleaned up version of
https://github.com/valkey-io/valkey/pull/1439. I thought I had another
strategy but alas, it didn't work out.

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-12-20 12:10:48 -08:00
Madelyn Olson
b56f4f70d2
Update info.tcl test to revert client output limits sooner (#1462)
We set the client output buffer limits to 10 bytes, and then execute
`info stats` which produces more than 10 bytes of output, which can
cause that command to throw an error.

I'm not sure why it wasn't consistently erroring before, might have been
some change related to the ubuntu upgrade though.

Issues related to ubuntu-tls are hopefully resolved now.

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-12-20 10:16:46 +08:00
Madelyn Olson
ffef236dbb
Fix storing the wrong PID in active servers (#1464)
In #1459, I missed that the data was also used to keep track of the PID
files so if the testing framework crashed it would no longer be able to
cleanup the extra servers. So now we properly extract the PID and store
it so we can clean up PIDs.

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-12-20 10:14:56 +08:00
Binbin
ca0b0c662a
Clear outdated failure reports more accurately (#1184)
There are two changes here:

1. The one in clusterNodeCleanupFailureReports, only primary with slots can
report the failure report, if the primary became a replica its failure report
should be cleared. This may lead to inaccurate node fail judgment in some network
partition cases i guess, it will also affect the CLUSTER COUNT-FAILURE-REPORTS
command.

2. The one in clusterProcessGossipSection, it is not that important, but it can
print a "node is back online" log helps us troubleshoot the problem, although
it may conflict with 1 at some points.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-12-20 10:14:01 +08:00
Roshan Khatri
e48317eb34
Workflow changes to fix old release binaries (#1461)
- Moves `build-config.json` to workflow dir to build old versions with
new configs.
- Enables contributors to test release Wf on private repo by adding
`github.event_name == 'workflow_dispatch' ||`

---------

Signed-off-by: Roshan Khatri <rvkhatri@amazon.com>
2024-12-19 21:32:40 +01:00
Jungwoo Song
e9a1fe0b32
Support for reading from replicas in valkey-benchmark (#1392)
**Background**
When conducting performance tests using `valkey-benchmark`, reading from
replicas was not supported. Consequently, even in cluster mode, all
reads were directed to the primary nodes. This limitation made it
challenging to obtain accurate metrics during workload stress testing
for performance measurement or before a version upgrade.

Related issue : https://github.com/valkey-io/valkey/issues/900

**Changes**
1. Replaced the use of `CLUSTER NODES` with `CLUSTER SLOTS` when
fetching cluster configuration. This allows for easier identification of
replica slots.
2. Support for reading from replicas by executing the client in
`READONLY` mode.
3. Support reading from replicas even during slot migrations.
4. Introduced two CLI options `--rfr` to enable reading from replicas
only or all cluster nodes. A warning added to indicate that write
requests might not be handled correctly when using this option.

---------

Signed-off-by: bluayer <ijacsong98@gmail.com>
Signed-off-by: bluayer <bluayer@gmail.com>
Signed-off-by: Jungwoo Song <37579681+bluayer@users.noreply.github.com>
Co-authored-by: ranshid <88133677+ranshid@users.noreply.github.com>
2024-12-19 18:32:31 +02:00
Binbin
97029953a0
Minor log fixes when failover auth denied due to slot epoch (#1341)
The old reqEpoch mainly refers to requestCurrentEpoch, see:
```
    if (requestCurrentEpoch < server.cluster->currentEpoch) {
        serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): reqEpoch (%llu) < curEpoch(%llu)", node->name,
                  node->human_nodename, (unsigned long long)requestCurrentEpoch,
                  (unsigned long long)server.cluster->currentEpoch);
        return;
    }
```

And in here we refer to requestConfigEpoch, it's a bit misleading,
so change it to reqConfigEpoch to make it clear.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-12-19 16:12:34 +08:00
Madelyn Olson
079f4edf2d
Add a hint about the current file for TCL debugging (#1459)
There are some tests that fail and give no useful information since they are
outside of a test context. Now we will at least get the file we are located in.

We can sort of reverse engineer where we are in the test by seeing which
tests have finished in a file.

```
[TIMEOUT]: clients state report follows.
sock6 => (SPAWNED SERVER) pid:30375 - tests/unit/info.tcl
Killing still running Valkey server 30375 - tests/unit/info.tcl
```

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-12-19 14:18:02 +08:00
Madelyn Olson
60197b30e2
Attempt to read secondary error from info test (#1452)
The test attempts to write 1MB of data in order to trigger a disconnect.
Normally, the data is fully flushed and we get the error on the read
(I/O error). However, it's possible we might fail the write, which
leaves the client in an inconsistent state. On the next command, we
finally process the I/O error on the FD. So, the simple fix is to
consume any secondary errors.

---------

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-12-18 09:17:11 -08:00
uriyage
8060c86d20
Offload TLS negotiation to I/O threads (#1338)
## TLS Negotiation Offloading to I/O Threads

### Overview
This PR introduces the ability to offload TLS handshake negotiations to
I/O threads, significantly improving performance under high TLS
connection loads.

### Key Changes
- Added infrastructure to offload TLS negotiations to I/O threads
- Refactored SSL event handling to allow I/O threads modify conn flags.
- Introduced new connection flag to identify client connections

### Performance Impact
Testing with 650 clients with SET commands and 160 new TLS connections
per second in the background:

#### Throughput Impact of new TLS connections
- **With Offloading**: Minimal impact (1050K → 990K ops/sec)
- **Without Offloading**: Significant drop (1050K → 670K ops/sec)

#### New Connection Rate
- **With Offloading**: 
  - 1,757 conn/sec
- **Without Offloading**: 
  - 477 conn/sec

### Implementation Details
1. **Main Thread**:
   - Initiates negotiation-offload jobs to I/O threads
- Adds connections to pending-read clients list (using existing read
offload mechanism)
   - Post-negotiation handling:
     - Creates read/write events if needed for incomplete negotiations
     - Calls accept handler for completed negotiations

2. **I/O Thread**:
   - Performs TLS negotiation
   - Updates connection flags based on negotiation result

Related issue:https://github.com/valkey-io/valkey/issues/761

---------

Signed-off-by: Uri Yagelnik <uriy@amazon.com>
Signed-off-by: ranshid <88133677+ranshid@users.noreply.github.com>
Co-authored-by: ranshid <88133677+ranshid@users.noreply.github.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
2024-12-18 09:03:30 +02:00
Madelyn Olson
e203ca35b7
Fix undefined behavior defined by ASAN (#1451)
Asan now supports making sure you are passing in the correct pointer
type, which seems useful but we can't support it since we pass in an
incorrect pointer in several places. This is most commonly done with
generic free functions, where we simply cast it to the correct type.

It's not a lot of code to clean up, so it seems appropriate to cleanup
instead of disabling the check.

---------

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-12-17 17:48:53 -08:00
Viktor Szépe
b66698b887
Discover and fix new typos (#1446)
Upgrade `typos` and fix corresponding typos

---------

Signed-off-by: Viktor Szépe <viktor@szepe.net>
2024-12-17 17:45:43 -08:00
ranshid
ba25b586d5
Introduce FORCE_DEFRAG compilation option to allow activedefrag run when allocator is not jemalloc (#1303)
Introduce compile time option to force activedefrag to run even when
jemalloc is not used as the allocator.
This is in order to be able to run tests with defrag enabled
while using memory instrumentation tools.

fixes: https://github.com/valkey-io/valkey/issues/1241

---------

Signed-off-by: ranshid <ranshid@amazon.com>
Signed-off-by: Ran Shidlansik <ranshid@amazon.com>
Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
Signed-off-by: ranshid <88133677+ranshid@users.noreply.github.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
2024-12-17 19:07:55 +02:00
xbasel
7892bf808b
Fix test_reclaimFilePageCache to avoid tmpfs (#1379)
Avoid tmpfs as fadvise(FADV_DONTNEED) has no effect on memory-backed
filesystems.

Fixes https://github.com/valkey-io/valkey/issues/897

---------

Signed-off-by: Ran Shidlansik <ranshid@amazon.com>
Signed-off-by: ranshid <88133677+ranshid@users.noreply.github.com>
Co-authored-by: ranshid <88133677+ranshid@users.noreply.github.com>
Co-authored-by: Ran Shidlansik <ranshid@amazon.com>
2024-12-17 18:04:27 +02:00
Roshan Khatri
980a801159
Fix the secrete for test bucket. (#1447)
We have set the secret as `AWS_S3_TEST_BUCKET` for test bucket and I
missed it in the initial review.

Signed-off-by: Roshan Khatri <rvkhatri@amazon.com>
2024-12-16 13:01:34 -08:00
Binbin
e024b4bd27
Drop the MEET packet if the link node is in handshake state (#1436)
After #1307 got merged, we notice there is a assert happen in setClusterNodeToInboundClusterLink:
```
=== ASSERTION FAILED ===
==> '!link->node' is not true
```

In #778, we will call setClusterNodeToInboundClusterLink to attach the node to the link
during the MEET processing, so if we receive a another MEET packet in a short time, the
node is still in handshake state, we will meet this assert and crash the server.

If the link is bound to a node and the node is in the handshake state, and we receive
a MEET packet, it may be that the sender sent multiple MEET packets so in here we are
dropping the MEET to avoid the assert in setClusterNodeToInboundClusterLink. The assert
will happen if the other sends a MEET packet because it detects that there is no inbound
link, this node creates a new node in HANDSHAKE state (with a random node name), and
respond with a PONG. The other node receives the PONG and removes the CLUSTER_NODE_MEET
flag. This node is supposed to open an outbound connection to the other node in the next
cron cycle, but before this happens, the other node re-sends a MEET on the same link
because it still detects no inbound connection.

Note that in getNodeFromLinkAndMsg, the node in the handshake state has a random name
and not truly "known", so we don't know the sender. Dropping the MEET packet can prevent
us from creating a random node, avoid incorrect link binding, and avoid duplicate MEET
packet eliminate the handshake state.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-12-16 13:43:48 +08:00
Binbin
ad24220681
Automatic failover vote is not limited by two times the node timeout (#1356)
This is a follow of #1305, we now decided to apply the same change
to automatic failover as well, that is, move forward with removing
it for both automatic and manual failovers.

Quote from Ping during the review:
Note that we already debounce transient primary failures with node
timeout, ensuring failover is only triggered after sustained outages.
Election timing is naturally staggered by replica spacing, making the
likelihood of simultaneous elections from replicas of the same shard
very low. The one-vote-per-epoch rule further throttles retries and
ensures orderly elections. On top of that, quorum-based primary failure
confirmation, cluster-state convergence, and slot ownership validation
are all built into the process.

Quote from Madelyn during the review:
It against the specific primary. It's to prevent double failovers.
If a primary just took over we don't want someone else to try to
take over and give the new primary some amount of time to take over.
I have not seen this issue though, it might have been over optimizing?
The double failure mode, where a node fails and then another node fails
within the nodetimeout also doesn't seem that common either though.

So the conclusion is that we all agreed to remove it completely,
it will make the code a lot simpler. And if there is other specific
edge cases we are missing, we will fix it in other way.

See discussion #1305 for more information.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-12-15 12:09:53 +08:00
Rain Valentine
88942c8e61
Replace dict with new hashtable for sets datatype (#1176)
The new `hashtable` provides faster lookups and uses less memory than
`dict`.

A TCL test case "SRANDMEMBER with a dict containing long chain" is
deleted because it's covered by a hashtable unit test
"test_random_entry_with_long_chain", which is already present.

This change also moves some logic from dismissMemory (object.c) to
zmadvise_dontneed (zmalloc.c), so the hashtable implementation which
needs the dismiss functionality doesn't need to depend on object.c and
server.h.

This PR follows #1186.

---------

Signed-off-by: Rain Valentine <rsg000@gmail.com>
Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-12-14 20:53:48 +01:00
Madelyn Olson
0e96bb311e
Synchronously delete data during defrag tests (#1443)
The creation of fragmentation is delayed when we use lazy-free. You can
induce some of the active-defrag tests to fail by artificially adding a
delay in the lazyfree process, similar to the issues seen in #1433 and
issues like
https://github.com/valkey-io/valkey/actions/runs/12267010712/job/34226304803#step:7:6538.
The solution is to always do sync free during tests.

Might close https://github.com/valkey-io/valkey/issues/1433.

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-12-14 19:14:01 +01:00
Madelyn Olson
3cd176dc39
Avoid importing memory aligned malloc (#1442)
We deprecate the usage of classic malloc and free, but under certain
circumstances they might get imported from intrinsics. The original
thought is we should just override malloc and free to use zmalloc and
zfree, but I think we should continue to deprecate it to avoid
accidental imports of allocations.

Closes https://github.com/valkey-io/valkey/issues/1434.

---------

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-12-14 19:13:04 +01:00
Binbin
7d72fada2c
Fix wrong file name in build-release-packages.yml (#1437)
Introduced in #1363, the file name does not match.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-12-13 14:26:20 -08:00
Binbin
d588bb4406
Skip build-release-packages CI job in forks (#1438)
The CI job was introduced in #1363, we should skip it in forks.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-12-13 16:32:54 -05:00
Thalia Archibald
b60097ba07
Check length before reading in stringmatchlen (#1431)
Fixes four cases where `stringmatchlen` could overrun the pattern if it
is not terminated with NUL.

These commits are cherry-picked from my
[fork](https://github.com/thaliaarchi/antirez-stringmatch) which
extracts `stringmatch` as a library and compares it to other projects by
antirez which use the same matcher.

Signed-off-by: Thalia Archibald <thalia@archibald.dev>
2024-12-13 11:05:19 +01:00
Jim Brunner
32f2c73cb5
defrag: eliminate persistent kvstore pointer and edge case fixes (#1430)
This update addresses several issues in defrag:
1. In the defrag redesign
(https://github.com/valkey-io/valkey/pull/1242), a bug was introduced
where `server.cronloops` was no longer being incremented in the
`whileBlockedCron()`. This resulted in some memory statistics not being
updated while blocked.
2. In the test case for AOF loading, we were seeing errors due to defrag
latencies. However, running the math, the latencies are justified given
the extremely high CPU target of the testcase. Adjusted the expected
latency check to allow longer latencies for this case where defrag is
undergoing starvation while AOF loading is in progress.
3. A "stage" is passed a "target". For the main dictionary and expires,
we were passing in a `kvstore*`. However, on flushall or swapdb, the
pointer may change. It's safer and more stable to use an index for the
DB (a DBID). Then if the pointer changes, we can detect the change, and
simply abort the stage. (If there's still fragmentation to deal with,
we'll pick it up again on the next cycle.)
4. We always start a new stage on a new defrag cycle. This gives the new
stage time to run, and prevents latency issues for certain stages which
don't operate incrementally. However, often several stages will require
almost no work, and this will leave a chunk of our CPU allotment unused.
This is mainly an issue in starvation situations (like AOF loading or
LUA script) - where defrag is running infrequently, with a large
duty-cycle. This change allows a new stage to be initiated if we still
have a standard duty-cycle remaining. (This can happen during starvation
situations where the planned duty cycle is larger than the standard
cycle. Most likely this isn't a concern for real scenarios, but it was
observed in testing.)
5. Minor comment correction in `server.h`

Signed-off-by: Jim Brunner <brunnerj@amazon.com>
2024-12-12 14:55:57 -08:00
Roshan Khatri
3a1043a4f0
Fix Valkey binary build workflow, version support changes. (#1429)
This change makes the binary build on the target ubuntu version.

This PR also deprecated ubuntu18 and valkey will not support:

- X86:
  - Ubuntu 20
  - Ubuntu 22
  - Ubuntu 24
 - ARM:
   - Ubuntu 20
   - Ubuntu 22
   
Removed ARM ubuntu 24 as the action we are using for ARM builds does not
support Ubuntu 24.

---------

Signed-off-by: Roshan Khatri <rvkhatri@amazon.com>
2024-12-12 14:46:35 -08:00
Vu Diep
ab69a8a55d
Use configure-aws-credentials workflow instead of passing secret_access_key (#1363)
## Summary
This PR fixes #1346 where we can get rid of the long term credentials by
using OpenID Connect. OpenID Connect (OIDC) allows your GitHub Actions
workflows to access resources in Amazon Web Services (AWS), without
needing to store the AWS credentials as long-lived GitHub secrets.

---------

Signed-off-by: vudiep411 <vdiep@amazon.com>
2024-12-12 14:42:52 -08:00
ranshid
2d92404522
Avoid defragging scripts during EVAL command execution (#1414)
This can happen when scripts are running for long period of time and the server attempts to defrag it in the whileBlockedCron.

Signed-off-by: Ran Shidlansik <ranshid@amazon.com>
2024-12-12 13:52:58 -08:00
Pierre
5f7fe9ef21
Send MEET packet to node if there is no inbound link to fix inconsistency when handshake timedout (#1307)
In some cases, when meeting a new node, if the handshake times out, we
can end up with an inconsistent view of the cluster where the new node
knows about all the nodes in the cluster, but the cluster does not know
about this new node (or vice versa).
To detect this inconsistency, we now check if a node has an outbound
link but no inbound link, in this case it probably means this node does
not know us. In this case we (re-)send a MEET packet to this node to do
a new handshake with it.
If we receive a MEET packet from a known node, we disconnect the
outbound link to force a reconnect and sending of a PING packet so that
the other node recognizes the link as belonging to us. This prevents
cases where a node could send MEET packets in a loop because it thinks
the other node does not have an inbound link.

This fixes the bug described in #1251.

---------

Signed-off-by: Pierre Turin <pieturin@amazon.com>
2024-12-11 17:26:06 -08:00
Jim Brunner
0c8ad5cd34
defrag: allow defrag to start during AOF loading (#1420)
Addresses https://github.com/valkey-io/valkey/issues/1393

Changes:
* During AOF loading or long running script, this allows defrag to be
initiated.
* The AOF defrag test was corrected to eliminate the wait period and
rely on non-timer invocations.
* Logic for "overage" time in defrag was changed. It previously
accumulated underage leading to large latencies in extreme tests having
very high CPU percentage. After several simple stages were completed
during infrequent blocked processing, a large cycle time would be
experienced.

Signed-off-by: Jim Brunner <brunnerj@amazon.com>
2024-12-11 19:47:06 +02:00
Binbin
1acf7f71c0
Fix memory leak in the new hashtable unittest (#1421)
There is a leak in here, hashtableTwoPhasePopDelete won't call the entry
destructor and like hashtablePop we need to call it by myself.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-12-11 06:40:18 +01:00
Viktor Söderqvist
3eb8314be6 Replace dict with hashtable for keys, expires and pubsub channels
Instead of a dictEntry with pointers to key and value, the hashtable
has a pointer directly to the value (robj) which can hold an embedded
key and acts as a key-value in the hashtable. This minimizes the number
of pointers to follow and thus the number of memory accesses to lookup
a key-value pair.

        Keys         robj
      hashtable
      +-------+   +-----------------------+
      | 0     |   | type, encoding, LRU   |
      | 1 ------->| refcount, expire      |
      | 2     |   | ptr                   |
      | ...   |   | optional embedded key |
      +-------+   | optional embedded val |
                  +-----------------------+

The expire timestamp (TTL) is also stored in the robj, if any. The expire
hash table points to the same robj.

Overview of changes:

* Replace dict with hashtable in kvstore (kvstore.c)
* Add functions for embedding key and expire in robj (object.c)
  * When there's unused space, reserve an expire field to avoid realloting
    it later if expire is added.
  * Always reserve space for expire for large key names to avoid realloc
    if it's set later.
* Update db functions (db.c)
  * dbAdd, setKey and setExpire reallocate the object when embedding a key
  * setKey does not increment the reference counter, since it would require
    duplicating the object. This responsibility is moved to the caller.
* Remove logic for shared integer objects as values in the database. The keys
  are now embedded in the objects, so all objects in the database need to be
  unique. Thus, we can't use shared objects as values. Also delete test cases
  for shared integers.
* Adjust various commands to the changes mentioned above.
* Adjust defrag code
  * Improvement: Don't access the expires table before defrag has actually
    reallocated the object.
* Adjust test cases that were using hard-coded sizes for dict when realloc
  would happen, and some other adjustments in test cases.
* Adjust memory prefetch for new hash table implementation in IO-threading,
  using new `hashtableIncrementalFind` API
* Adjust offloading of free() to IO threads: Object free to be done in main
  thread while keeping obj->ptr offloading in IO-thread since the DB object is
  now allocated by the main-thread and not by the IO-thread as it used to be.
* Let expireIfNeeded take an optional value, to avoid looking up the expires
  table when possible.

---------

Signed-off-by: Uri Yagelnik <uriy@amazon.com>
Signed-off-by: uriyage <78144248+uriyage@users.noreply.github.com>
Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
Co-authored-by: Uri Yagelnik <uriy@amazon.com>
2024-12-10 21:30:56 +01:00
Rain Valentine
4efff42f04 Replace dict with hashtable in command tables (#1065)
This changes the type of command tables from dict to hashtable. Command
table lookup takes ~3% of overall CPU time in benchmarks, so it is a
good candidate for optimization.

My initial SET benchmark comparison suggests that hashtable is about 4.5
times faster than dict and this replacement reduced overall CPU time by
2.79% 🥳

---------

Signed-off-by: Rain Valentine <rainval@amazon.com>
Signed-off-by: Rain Valentine <rsg000@gmail.com>
Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
Co-authored-by: Rain Valentine <rainval@amazon.com>
2024-12-10 21:30:56 +01:00
Viktor Söderqvist
c8ee5c2c46 Hashtable implementation including unit tests
A cache-line aware hash table with a user-defined key-value entry type,
supporting incremental rehashing, scan, iterator, random sampling,
incremental lookup and more...

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-12-10 21:30:56 +01:00
Viktor Söderqvist
b4c2a1804a
Fix flaky init_test proc in maxmemory test suite (#1419)
The following error has been seen, but not reliably reproduced:

```
*** [err]: eviction due to output buffers of pubsub, client eviction: true in tests/unit/maxmemory.tcl
Expected '42' to be equal to '50' (context: type proc line 17 cmd {assert_equal [r dbsize] 50} proc ::init_test level 2)
```

The reason is probably that FLUSHDB is asynchronous and when we start
populating new keys, they are evicted because the background flush is
too slow. Changing this to FLUSHDB SYNC prevents this.

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-12-10 20:52:06 +02:00
Binbin
7e564887b9
Set HIDDEN_CONFIG flag on events-per-io-thread (#1408)
events-per-io-thread is for testing purposes that allow us to force the
main thread to always offload the works to the IO threads, see
adjustIOThreadsByEventLoad for more details.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-12-11 00:37:18 +08:00
Viktor Söderqvist
2dfe25b408
Fix race in test "CLUSTER SLOT-STATS cpu-usec for blocking commands, unblocked on timeout" (#1416)
This fix changes the timeout for BLPOP in this test case from 1 second
to 0.5 seconds.

In the test case quoted below, the procedure
`wait_for_blocked_clients_count` waits for one second by default. If
BLPOP has 1 second timeout and the first
`wait_for_blocked_clients_count` finishes very fast, then the second
`wait_for_blocked_clients_count` can time out before the BLPOP has been
unblocked.

```TCL
    test "CLUSTER SLOT-STATS cpu-usec for blocking commands, unblocked on timeout." {
        # Blocking command with 1 second timeout.
        set rd [valkey_deferring_client]
        $rd BLPOP $key 1

        # Confirm that the client is blocked, then unblocked after 1 second timeout.
        wait_for_blocked_clients_count 1
        wait_for_blocked_clients_count 0
```

As seen in the definition of `wait_for_blocked_clients_count`, the total
time to wait is 1 second by default.

```TCL
proc wait_for_blocked_clients_count {count {maxtries 100} {delay 10} {idx 0}} {
    wait_for_condition $maxtries $delay  {
        [s $idx blocked_clients] == $count
    } else {
        fail "Timeout waiting for blocked clients"
    }
}
```

Fixes #1121

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-12-10 14:46:21 +01:00
Yanqi Lv
f951a1ca73
Add new flag in CLIENT LIST for import-source client (#1398)
- Add new flag "I" in `CLIENT LIST` for import-source client
- Add `DEBUG_CONFIG` for import-mode
- Allow import-source status to be turned off when import-mode is off

Fixes #1350 and
https://github.com/valkey-io/valkey/pull/1185#discussion_r1851049362.

---------

Signed-off-by: lvyanqi.lyq <lvyanqi.lyq@alibaba-inc.com>
Signed-off-by: Yanqi Lv <lvyanqi.lyq@alibaba-inc.com>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
Co-authored-by: Binbin <binloveplay1314@qq.com>
2024-12-10 13:35:07 +01:00
Sarthak Aggarwal
9cfe1b3d81
Set Command with IFEQ Support (#1324)
This PR allows the Valkey users to perform conditional updates where the
SET command is completed if the given comparison-value matches the key’s
current value.

Syntax:

```
SET key value IFEQ comparison-value
```

Behavior:

If the values match, the SET completes as expected. If they do not
match, the command returns a (nil), except if the GET argument is also
given (see below).

Behavior with Additional Flags:

1. ```SET key value IFEQ comparison-value GET``` returns the existing
value, regardless of whether it matches comparison-value or not. The
conditional set operation is performed if the given comparison value
matches the existing value. To check if the SET succeeded, the caller
needs to check if the returned string matches the comparison-value.
2. ```SET key value IFEQ comparison-value XX``` is a syntax error.
3.  ```SET key value IFEQ comparison-value NX``` is a syntax error.

Closes: #1215

---------

Signed-off-by: Sarthak Aggarwal <sarthagg@amazon.com>
2024-12-10 12:54:49 +01:00
Madelyn Olson
4f61034934
Update governance and maintainers file for Valkey committers (#1390)
We added two more committers, but according to our governance document
that makes them TSC members. As we discussed, for now we want to keep
the balance of corporate interests, so so updating the governance to
explicitly list TSC members compared to folks with just write
permissions.

Also adds the new new folks with commit permissions.

---------

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-12-09 12:28:17 -08:00
Binbin
1ba85d002a
Use binary representation in assert crash log, cleanup in crash log (#1410)
Change assert crash log to also use binary representation like 5bdd72bea77d4bb237441c9a671e80edcdc998ad.
And do not print the password in assert crash log like 56eef6fb5ab7a755485c19f358761954ca459472.

In addition, for 5bdd72bea77d4bb237441c9a671e80edcdc998ad, we will print '"argv"',
because originally the code would print a '', and sdscatrepr will add an extra "",
so now removing the extra '' here.

Extract the getArgvReprString method and clean up the code a bit.

Examples:
```
debug assert "\x00abc"

before:
client->argv[0] = "debug" (refcount: 1)
client->argv[1] = "assert" (refcount: 1)
client->argv[2] = "" (refcount: 1)

after:
client->argv[0] = "debug" (refcount: 1)
client->argv[1] = "assert" (refcount: 1)
client->argv[2] = "\x00abc" (refcount: 1)

debug panic "\x00abc"

before:
argc: '3'
argv[0]: '"debug"'
argv[1]: '"panic"'
argv[2]: '"\x00abc"'

after:
argc: 3
argv[0]: "debug"
argv[1]: "panic"
argv[2]: "\x00abc"
```

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-12-10 00:37:04 +08:00
ranshid
5be4ce6d27
Optimize ZRANK to avoid path comparisons (#1389)
ZRANK is a widly used command for workloads using sorted-sets. For
example, in leaderboards It enables query the specific rank of a player.
The way ZRANK is currently implemented is:

1. locate the element in the SortedSet hashtable.
2. take the score of the element and use it in order to locate the
element in the SkipList (when listpack encoding is not used)
3. During the SkipLis scan for the elemnt we keep the path and use it in
order to sum the span in each path node in order to calculate the elemnt
rank

One problem with this approach is that it involves multiple compare
operations in order to locate the element. Specifically string
comparison can be expensive since it will require access multiple memory
locations for the items the element string is compared against.
Perf analysis showed this can take up to 20% of the rank scan time. (TBD
- provide the perf results for example)

We can improve the rank search by taking advantage of the fact that the
element node in the skiplist is pointed by the hashtable value!
Our Skiplist implementation is using FatKeys, where each added node is
assigned a randomly chosen height. Say we keep a height record for every
skiplist element. In order to get an element rank we simply:

1. locate the element in the SortedSet hashtable.
2. we go directly to the node in the skiplist.
3. we jump to the full height of the node and take the span value.
4. we continue going foreward and always jump to the heighst point in
each node we get to, making sure to sum all the spans.
5. we take off the summed spans from the SkipList length and we now have
the specific node rank. :)

In order to test this method I created several benchmarks. All
benchmarks used the same seeds and the lists contained 1M elements.
Since a very important factor is the number of scores compared to the
number of elements (since small ratio means more string compares during
searches) each benchmark test used different number of scores (1, 10K,
100K, 1M)
some results:

**TPS**

Scores range | non-optimized | optimized | gain
-- | -- | -- | --
1 | 416042 | 605363 | 45.51%
10K | 359776 | 459200 | 27.63%
100K | 380387 | 459157 | 20.71%
1M | 416059 | 450853 | 8.36%

**Latency**

Scores range | non-optimized | optimized | gain
-- | -- | -- | --
1 | 1.191000 | 0.831000 | -30.23%
10K | 1.383000 | 1.095000 | -20.82%
100K | 1.311000 | 1.087000 | -17.09%
1M | 1.191000 | 1.119000 | -6.05%

###  Memory efficiency

adding another field to each skiplist node can cause degredation in
memory efficiency for large sortedsets. We use the fact that level 0
recorded span of ALL nodes can either be 1 or zero (for the last node).
So we use wrappers in order to get a node span and override the span for
level 0 to hold the node height.

---------

Signed-off-by: Ran Shidlansik <ranshid@amazon.com>
2024-12-09 15:48:46 +01:00
Binbin
924729eb16
Fix the election was reset wrongly before failover epoch was obtained (#1339)
After #1009, we will reset the election when we received
a claim with an equal or higher epoch since a node can win
an election in the past.

But we need to consider the time before the node actually
obtains the failover_auth_epoch. The failover_auth_epoch
default is 0, so before the node actually get the failover
epoch, we might wrongly reset the election.

This is probably harmless, but will produce misleading log
output and may delay election by a cron cycle or beforesleep.
Now we will only reset the election when a node is actually
obtains the failover epoch.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-12-09 16:19:02 +08:00
Roman Gershman
b09db3ef78
Fix typo in streams seen-time / active-time test (#1409)
This variable name is wrong, it causes the wrong variable to be asserted.

Signed-off-by: Roman Gershman <romange@gmail.com>
2024-12-09 16:01:43 +08:00
Guillaume Koenig
e8078b7315
Allow MEMORY MALLOC-STATS and MEMORY PURGE during loading phase (#1317)
- Enable investigation of memory issues during loading
- Previously, all memory commands were rejected with LOADING error
(except memory help)
- `MEMORY MALLOC-STATS` and `MEMORTY PURGE` are now allowed
as they don't depend on the dataset
- `MEMORY STATS` and `MEMORY USAGE KEY` remain disallowed

Fixes #1299

Signed-off-by: Guillaume Koenig <knggk@amazon.com>
Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
2024-12-08 20:30:07 +08:00
Binbin
176fafcaf7
Add a note to conf about the dangers of modifying dir at runtime (#887)
We've had security issues in the past with it, which is why
we marked it as PROTECTED. But, modifying during runtime
is also a dangerous action. For example, when child processes
are running, persistent temp files and log files may have
unexpected effects.

A scenario for modifying dir at runtime is to migrate a disk
failure, such as using disk-based replication to migrate a node,
writing nodes.conf to save the cluster configuration.

We decided to leave it as is and add a note in the conf
about the dangers of modifying dir at runtime.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-12-08 20:28:14 +08:00
Viktor Söderqvist
f20d629dbe
Fix sanitizer builds with clang (#1402)
By including <stdatomic.h> after the other includes in the unit test, we
can avoid redefining a macro which led to a build failure.

Fixes #1394

---------

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-12-07 10:26:31 +01:00
Viktor Söderqvist
a2fe6af457
Fix Module Update Args test when other modules are loaded (#1403)
Fixes #1400

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-12-07 10:25:40 +01:00
Caiyi Wu
6df376d68a
Fix coredump when use hellodict example module (#1395)
In the ValkeyModule_OnLoad method of the file hellodict.c, the parameter
keystep of ValkeyModule_CreateCommand should be 1. Otherwise, execute
command will coredump.

    MODULE LOAD /home/tiger/valkey/src/modules/hellodict.so
    COMMAND GETKEYS HELLODICT.SET key value

Signed-off-by: Codebells <1347103071@qq.com>
2024-12-05 20:01:38 +01:00
风去幽墨
6b3e1228cd
RDMA: Fix dead loop when transfer large data (20KB) (#1386)
Determine the status of the Client when attempting to read data. If
state=CLIENT_COMPLETED_IO, no read attempt is made and I/O operations on
the Client are rescheduled by the main thread.

> And 20474 Byte = PROTO_IOBUF_LEN(16KB) + SDS_HDR_VAR(16, s)(4090 Byte)

Fixes #1385

---------

Signed-off-by: fengquyoumo <1455117463@qq.com>
2024-12-05 18:26:56 +01:00
Wen Hui
71560a2a4a
Add API UpdateRuntimeArgs for updating the module arguments during runtime (#1041)
Before Redis OSS 7, if we load a module with some arguments during
runtime,
and run the command "config rewrite", the module information will not be
saved into the
config file.

Since Redis OSS 7 and Valkey 7.2, if we load a module with some
arguments during runtime,
the module information (path, arguments number, and arguments value) can
be saved into the config file after config rewrite command is called.
Thus, the module will be loaded automatically when the server startup
next time.

Following is one example:

bind 172.25.0.58
port 7000
protected-mode no
enable-module-command yes

Generated by CONFIG REWRITE
latency-tracking-info-percentiles 50 99 99.9
dir "/home/ubuntu/valkey"
save 3600 1 300 100 60 10000
user default on nopass sanitize-payload ~* &* +https://github.com/ALL
loadmodule tests/modules/datatype.so 10 20

However, there is one problem.
If developers write a module, and update the running arguments by
someway, the updated arguments can not be saved into the config file
even "config rewrite" is called.
The reason comes from the following function
rewriteConfigLoadmoduleOption (src/config.c)

void rewriteConfigLoadmoduleOption(struct rewriteConfigState *state) {
..........
struct ValkeyModule *module = dictGetVal(de);
line = sdsnew("loadmodule ");
line = sdscatsds(line, module->loadmod->path);
for (int i = 0; i < module->loadmod->argc; i++) {
line = sdscatlen(line, " ", 1);
line = sdscatsds(line, module->loadmod->argv[i]->ptr);
}
rewriteConfigRewriteLine(state, "loadmodule", line, 1);
.......
}

The function only save the initial arguments information
(module->loadmod) into the configfile.

After core members discuss, ref
https://github.com/valkey-io/valkey/issues/1177


We decide add the following API to implement this feature:

Original proposal:

int VM_UpdateRunTimeArgs(ValkeyModuleCtx *ctx, int index, char *value);


Updated proposal:

ValkeyModuleString **values VM_GetRuntimeArgs(ValkeyModuleCtx *ctx);
**int VM_UpdateRuntimeArgs(ValkeyModuleCtx *ctx, int argc,
ValkeyModuleString **values);



Why we do not recommend the following way: 


MODULE UNLOAD
Update module args in the conf file
MODULE LOAD

I think there are the following disadvantages:

1. Some modules can not be unloaded. Such as the example module
datatype.so, which is tests/modules/datatype.so
2. it is not atomic operation for MODULE UNLOAD + MODULE LOAD
3. sometimes, if we just run the module unload, the client business
could be interrupted

---------

Signed-off-by: hwware <wen.hui.ware@gmail.com>
2024-12-05 11:58:24 -05:00
Madelyn Olson
a401e3789d
Update code of conduct maintainers email address (#1391)
Updating code of conduct maintainer's email address

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-12-04 10:33:14 -08:00
zhenwei pi
105509cdad
Run RDMA builtin in CI workflow (#1380)
Since 4695d118dd (#1209), RDMA supports builtin.
And module connection type may be removed in future. So run a builtin
RDMA support for CI workflow.

RDMA module is complied only in CI, keep it building check only until
module connection type gets obsolete.

Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2024-12-03 23:09:56 +01:00
Jim Brunner
349bc7547b
defrag: use monotime in module interface (#1388)
The recent PR (https://github.com/valkey-io/valkey/pull/1242) converted
Active Defrag to use `monotime`. In that change, a conversion was
performed to continue to use `ustime()` as part of the module interface.
Since this time is only used internally, and never actually exposed to
the module, we can convert this to use `monotime` directly.

Signed-off-by: Jim Brunner <brunnerj@amazon.com>
2024-12-03 11:19:53 -08:00
uriyage
9f8b174c2e
Optimize IO thread offload for modified argv (#1360)
### Improve expired commands performance with IO threads

#### Background
In our IO threads architecture, IO threads allocate client argv's and
later when we free it after processCommand we offload its free to the IO
threads.
With jemalloc, it's crucial that the same thread that allocates memory
also frees it.

For some commands we modify the client's argv in the main thread during
command processing (for example in `SET EX` command we rewrite the
command to use absolute time for replication propagation).

#### Current issues
1. When commands are rewritten (e.g., expire commands), we store the
original argv
   in `c->original_argv`. However, we're currently:
   - Freeing new argv (allocated by main thread) in IO threads
   - Freeing original argv (allocated by IO threads) in main thread
2. Currently, `c->original_argv` points to new array with old 
objects, while `c->argv` has old array with new objects, making memory
free management complicated.

#### Changes
1. Refactored argv modification handling code to ensure consistency -
both array and objects are now either all new or all old
2. Moved original_argv cleanup to happen in resetClient after argv
cleanup
3. Modified IO threads code to properly handle original argv cleanup
when argv are modified.

#### Performance Impact
Benchmark with `SET EX` commands (650 clients, 512 byte value, 8 IO
threads):
- New implementation: **729,548 ops/sec**
- Old implementation: **633,243 ops/sec**
Representing a **~15%** performance improvement due to more efficient
memory handling.

---------

Signed-off-by: Uri Yagelnik <uriy@amazon.com>
Signed-off-by: ranshid <88133677+ranshid@users.noreply.github.com>
Co-authored-by: ranshid <88133677+ranshid@users.noreply.github.com>
2024-12-03 19:20:31 +02:00
Jim Brunner
397201c48f
Refactor of ActiveDefrag to reduce latencies (#1242)
Refer to:  https://github.com/valkey-io/valkey/issues/1141

This update refactors the defrag code to:
* Make the overall code more readable and maintainable
* Reduce latencies incurred during defrag processing

With this update, the defrag cycle time is reduced to 500us, with more
frequent cycles. This results in much more predictable latencies, with a
dramatic reduction in tail latencies.

(See https://github.com/valkey-io/valkey/issues/1141 for more complete
details.)

This update is focused mostly on the high-level processing, and does NOT
address lower level functions which aren't currently timebound (e.g.
`activeDefragSdsDict()`, and `moduleDefragGlobals()`). These are out of
scope for this update and left for a future update.

I fixed `kvstoreDictLUTDefrag` because it was using up to 7ms on a CME
single shard. See original github issue for performance details.

---------

Signed-off-by: Jim Brunner <brunnerj@amazon.com>
Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
2024-12-03 08:42:29 -08:00
Nugine
3df609ef06
Optimize PFCOUNT, PFMERGE command by SIMD acceleration (#1293)
This PR optimizes the performance of HyperLogLog commands (PFCOUNT,
PFMERGE) by adding AVX2 fast paths.

Two AVX2 functions are added for conversion between raw representation
and dense representation. They are 15 ~ 30 times faster than scalar
implementaion. Note that sparse representation is not accelerated.

AVX2 fast paths are enabled when the CPU supports AVX2 (checked at
runtime) and the hyperloglog configuration is default (HLL_REGISTERS ==
16384 && HLL_BITS == 6).

`PFDEBUG SIMD (ON|OFF)` subcommand is added for unit tests. A new TCL
unit test checks that the results produced by non-AVX2 and AVX2
implementations are exactly equal.

When merging 3 dense hll structures, the benchmark shows a 12x speedup
compared to the scalar version.

```
pfcount key1 key2 key3
pfmerge keyall key1 key2 key3
```

```
======================================================================================================
Type             Ops/sec    Avg. Latency     p50 Latency     p99 Latency   p99.9 Latency       KB/sec 
------------------------------------------------------------------------------------------------------
PFCOUNT-scalar    5665.56        35.29839        32.25500        63.99900        67.58300       608.60
PFCOUNT-avx2     72377.83         2.75834         2.67100         5.34300         6.81500      7774.96
------------------------------------------------------------------------------------------------------
PFMERGE-scalar    9851.29        20.28806        20.09500        36.86300        39.16700       615.71
PFMERGE-avx2    125621.89         1.59126         1.55100         3.11900         4.70300     15702.74
------------------------------------------------------------------------------------------------------

scalar: valkey:unstable  2df56d87c0ebe802f38e8922bb2ea1e4ca9cfa76
avx2:   Nugine:hll-simd  8f9adc34021080d96e60bd0abe06b043f3ed0275

CPU:    13th Gen Intel® Core™ i9-13900H × 20
Memory: 32.0 GiB
OS:     Ubuntu 22.04.5 LTS
```

Experiment repo: https://github.com/Nugine/redis-hyperloglog
Benchmark script:
https://github.com/Nugine/redis-hyperloglog/blob/main/scripts/memtier.sh
Algorithm:
https://github.com/Nugine/redis-hyperloglog/blob/main/cpp/bench.cpp

---------

Signed-off-by: Xuyang Wang <xuyangwang@link.cuhk.edu.cn>
2024-12-02 19:40:38 +01:00
Binbin
fbbfe5d3d3
Print logs when the cluster state changes to fail or the fail reason changes (#1188)
This log allows us to easily distinguish between full coverage and
minority partition when the cluster fails. Sometimes it is not easy
to see the minority partition in a healthy shards (both primary and
replicas).

And we decided not to add a cluster_fail_reason field to cluster info.
Given that there are only two reasons and both are well-known and if
we ended up adding more down the road we can add it in the furture.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-12-02 15:55:24 +08:00
Vadym Khoptynets
90475af594
Free strings during BGSAVE/BGAOFRW to reduce copy-on-write (#905)
**Motivation**

Copy-on-write (COW) amplification refers to the issue where writing to a
small object leads to the entire page being cloned, resulting in
inefficient memory usage. This issue arises during the BGSAVE process,
which can be particularly problematic on instances with limited memory.
If the BGSAVE process could release unneeded memory, it could reduce
memory consumption. To address this, the BGSAVE process calls the
`madvise` function to signal the operating system to reclaim the buffer.
However, this approach does not work for buffers smaller than a page
(usually 4KiB). Even after multiple such calls, where a full page may be
free, the operating system will not reclaim it.
To solve this issue, we can call `zfree` directly. This allows the
allocator (jemalloc) to handle the bookkeeping and release pages when
buffers are no longer needed. This approach reduces copy-on-write
events.

**Benchmarks**
To understand how usage of `zfree` affects BGSAVE and the memory
consumption I ran 45 benchmarks that compares my clonewith the vanilla
version. The benchmark has the following steps:
1. Start a new Valkey process
2. Fill the DB with data sequentially
3. Run a warmup to randomize the memory layout
4. Introduce fragmentation by deleting part of the keys
5. In parallel:
    1. Trigger BGSAVE
    2. Start 80/20 get/set load

I played the following parameters to understand their influence:

1. Number of keys: 3M, 6M, and 12M.
2. Data size. While key themselves are of fixed length ~30 bytes, the
value size is 120, 250, 500, 1000, and 2000 bytes.
3. Fragmentation. I delete 5%, 10%, and 15% of the original key range.

I'm attaching a graph of BGSAVE process memory consumption. Instead of
all benchmarks, I show the most representative runs IMO.

<img width="1570" alt="3m-fixed"
src="https://github.com/user-attachments/assets/3dbbc528-01c1-4821-a3c2-6be455e7f78a">


For 2000 bytes values peak memory usage is ~53% compared to vanilla. The
peak happens at 57% BGSAVE progress.
For 500 bytes values the peak is ~80% compared to vanilla. And happens
at ~80% progress.
For 120 bytes the difference is under 5%, and the patched version could
even use more memory.



![500b-fixed](https://github.com/user-attachments/assets/b09451d3-4bce-4f33-b3db-2b5df2178ed2)


For 12M keys, the peak is ~85% of the vanilla’s. Happens at ~70% mark.
For 6M keys, the peak is ~87% of the vanilla’s. Happens at ~77% mark.
For 3M keys, the peak is ~87% of the vanilla’s Happens at ~80% mark.

**Changes**

The PR contains 2 changes:
1. Static buffer for RDB comrpession.
RDB compression leads to COW events even without any write load if we
use `zfree`. It happens because the compression functions allocates a
new buffer for each object. Together with freeing objects with `zfree`
it leads to reusing of the memory shared with the main process.
To deal with this problem, we use a pre-allocated constant 8K buffer for
compression. If the object size is too big for this buffer, than we fall
back to the ad hoc allocation behavior.

2. Freeing string objects instead of dismissing them
Call to `zfree` is more expensive than direct call to `madvise`. But
with #453 strings use the fast path – `zfree_with_size`. As a possible
next step we can optimize `zfree` for other data types as well.

---------

Signed-off-by: Vadym Khoptynets <vadymkh@amazon.com>
Signed-off-by: ranshid <88133677+ranshid@users.noreply.github.com>
Co-authored-by: ranshid <88133677+ranshid@users.noreply.github.com>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-12-01 17:12:27 +02:00
Amit Nagler
7043ef0bbb
Split dual-channel COB overrun tests to separate servers (#1374)
1. The test isn't waiting long enough for the output buffer to overrun.
This problem is happening because an error from the previous test is
bleeding into the current test's logs. The simplest fix would be to
split these tests.
2. Increased replication timeout to ensure sync fails due to output
buffer overrun before a timeout occurs.

Fixes #1367

Signed-off-by: naglera <anagler123@gmail.com>
2024-12-01 21:33:43 +08:00
Binbin
9c48f56790
Reset repl_down_since to zero only on state change (#1149)
We should reset repl_down_since only on state change, in the
current code, if the rdb channel in the dual channel is normal,
that is, rdb is loaded normally, but the psync channel is
abnormal, we will set repl_down_since 0 here. If the primary
is down at this time, the replica may be abnormal when calculating
data_age in cluster failover, since repl_state != REPL_STATE_CONNECTED,
this causes the replica to be unable to initiate an election due
to the old data_age.

In dualChannelSyncHandleRdbLoadCompletion, if the psync channel
is not established, the function will return. We will set repl_state
to REPL_STATE_CONNECTED and set repl_down_since to 0 in
dualChannelSyncSuccess, that is, in establishPrimaryConnection.

See also 677d10b2a8ff7f13033ccfe56ffcd246dbe70fb6 for more details.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-12-01 21:33:21 +08:00
Stav Ben-Tov
c8ceb2ee25
Use zfree_with_size for client buffer (#1376)
Replace occurrences of 'zfree' with 'zfree_with_size' to improve
performance.
'zfree_with_size' function avoids calling 'zmalloc_size' to retrieve
buffer size and
uses previuos calculation of size for calling 'zfree_with_size'. This
results in faster
memory deallocation and reduces overhead.

Signed-off-by: stav bentov <stavbt@amazon.com>
Co-authored-by: stav bentov <stavbt@amazon.com>
2024-12-01 12:24:18 +01:00
zhenwei pi
4695d118dd
RDMA builtin support (#1209)
There are several patches in this PR:

* Abstract set/rewrite config bind option: `bind` option is a special
config, `socket` and `tls` are using the same one. However RDMA uses the
similar style but different one. Use a bit abstract work to make it
flexible for both `socket` and `RDMA`. (Even for QUIC in the future.)
* Introduce closeListener for connection type: closing socket by a
simple syscall would be fine, RDMA has complex logic. Introduce
connection type specific close listener method.
* RDMA: Use valkey.conf style instead of module parameters: use
`--rdma-bind` and `--rdma-port` style instead of module parameters. The
module style config `rdma.bind` and `rdma.port` are removed.
* RDMA: Support builtin: support `make BUILD_RDMA=yes`. module style is
still kept for now.

Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2024-11-29 11:13:34 +01:00
zvi-code
fd58f8d058
Disable lazy free in defrag test to fix 32bit daily failure (#1370)
Signed-off-by: Zvi Schneider <zvi.schneider22@gmail.com>
Co-authored-by: Zvi Schneider <zvi.schneider22@gmail.com>
2024-11-28 16:27:00 +01:00
Binbin
a939cb88ee
Handle keyIsExpiredWithDictIndex to make it check for import mode (#1368)
In #1326 we make KEYS can visit expired key in import-source state
by updating keyIsExpired to check for import mode. But after #1205,
we now use keyIsExpiredWithDictIndex to optimize and remove the
redundant dict_index, and keyIsExpiredWithDictIndex does not handle
this logic.

In this commit, we handle keyIsExpiredWithDictIndex to make it check
for import mode as well so that KEYS can visit the expired key.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-28 14:10:48 +08:00
Binbin
db7b7396ff
Make KEYS can visit expired key in import-source state (#1326)
After #1185, a client in import-source state can visit expired key
both in read commands and write commands, this commit handle
keyIsExpired function to handle import-source state as well, so
KEYS can visit the expired key.

This is not particularly important, but it ensures the definition,
also doing some cleanup around the test, verified that the client
can indeed visit the expired key.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-28 00:16:55 +08:00
Binbin
5d08149e72
Use fake client flag to replace not conn check (#1198)
The fake client flag was introduced in #1063, 
we want this to replace all !conn fake client checks.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-27 18:02:07 +08:00
ranshid
66ae8b7135
change the container image to ubuntu:plucky (#1359)
Our fortify workflow is running on ubuntu lunar container that is EOL
since [January 25, 2024(January 25,
2024](https://lists.ubuntu.com/archives/ubuntu-announce/2024-January/000298.html).
This case cause the workflow to fail during update actions like:
```
apt-get update && apt-get install -y make gcc-13
  update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-1[3](https://github.com/valkey-io/valkey/actions/runs/12021130026/job/33547460209#step:5:3) 100
  make all-with-unit-tests CC=gcc OPT=-O3 SERVER_CFLAGS='-Werror -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=3'
  shell: sh -e {0}
Ign:1 http://security.ubuntu.com/ubuntu lunar-security InRelease
Err:2 http://security.ubuntu.com/ubuntu lunar-security Release
  [4](https://github.com/valkey-io/valkey/actions/runs/12021130026/job/33547460209#step:5:4)04  Not Found [IP: 91.189.91.82 80]
Ign:3 http://archive.ubuntu.com/ubuntu lunar InRelease
Ign:4 http://archive.ubuntu.com/ubuntu lunar-updates InRelease
Ign:[5](https://github.com/valkey-io/valkey/actions/runs/12021130026/job/33547460209#step:5:5) http://archive.ubuntu.com/ubuntu lunar-backports InRelease
Err:[6](https://github.com/valkey-io/valkey/actions/runs/12021130026/job/33547460209#step:5:7) http://archive.ubuntu.com/ubuntu lunar Release
  404  Not Found [IP: 185.125.190.81 80]
Err:7 http://archive.ubuntu.com/ubuntu lunar-updates Release
  404  Not Found [IP: 185.125.190.81 80]
Err:8 http://archive.ubuntu.com/ubuntu lunar-backports Release
  404  Not Found [IP: 185.125.190.81 80]
Reading package lists...
E: The repository 'http://security.ubuntu.com/ubuntu lunar-security Release' does not have a Release file.
E: The repository 'http://archive.ubuntu.com/ubuntu lunar Release' does not have a Release file.
E: The repository 'http://archive.ubuntu.com/ubuntu lunar-updates Release' does not have a Release file.
E: The repository 'http://archive.ubuntu.com/ubuntu lunar-backports Release' does not have a Release file.
update-alternatives: error: alternative path /usr/bin/gcc-[13](https://github.com/valkey-io/valkey/actions/runs/12021130026/job/33547460209#step:5:14) doesn't exist
Error: Process completed with exit code 2.
```

example:
https://github.com/valkey-io/valkey/actions/runs/12021130026/job/33547460209

This pr uses the latest stable ubuntu image release
[plucky](https://hub.docker.com/layers/library/ubuntu/plucky/images/sha256-dc4565c7636f006c26d54c988faae576465e825ea349fef6fd3af6bf5100e8b6?context=explore)

Signed-off-by: Ran Shidlansik <ranshid@amazon.com>
2024-11-27 07:34:02 +02:00
Amit Nagler
9305b49145
Add tag for dual-channel logs (#999)
This PR introduces a consistent tagging system for dual-channel logs.
The goal is to improve log readability and filterability, making it
easier for operators to manage and analyze log entries.

Resolves https://github.com/valkey-io/valkey/issues/986

---------

Signed-off-by: naglera <anagler123@gmail.com>
2024-11-26 16:51:52 +02:00
Binbin
469d41fb37
Avoid double close on repl_transfer_fd (#1349)
The code is ok before 2de544cfcc6d1aa7cf6d0c75a6116f7fc27b6fd6,
but now we will set server.repl_transfer_fd right after dfd was
initiated, and in here we have a double close error since dfd and
server.repl_transfer_fd are the same fd.

Also move the declaration of dfd/maxtries to a small scope to avoid
the confusion since they are only used in this code.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-26 00:00:47 +08:00
Binbin
2d48a39c27
Save open's errno when opening temp rdb fails to prevent it from being modified (#1347)
Apparently on Mac, sleep will modify errno to ETIMEDOUT, and then it
prints the misleading message: Operation timed out.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-25 23:56:51 +08:00
Ray Cao
cf1a1e0931
Optimize sdscatrepr by batch processing printable characters (#1342)
Optimize sdscatrepr by reducing realloc calls, furthermore, we can reduce memcpy calls by
batch processing of consecutive printable characters.

Signed-off-by: Ray Cao <zisong.cw@alibaba.com>
Co-authored-by: Ray Cao <zisong.cw@alibaba.com>
2024-11-25 07:16:46 -08:00
Parth
c4920bca4a
Integrating fast_float to optionally replace strtod (#1260)
Fast_float is a C++ header-only library to parse doubles using SIMD
instructions. The purpose is to speed up sorted sets and other commands
that use doubles. A single-file copy of fast_float is included in this
repo. This introduces an optional dependency on a C++ compiler.

The use of fast_float is enabled at compile time using the make variable
`USE_FAST_FLOAT=yes`. It is disabled by default.

Fixes #1069.

---------

Signed-off-by: Parth Patel <661497+parthpatel@users.noreply.github.com>
Signed-off-by: Parth <661497+parthpatel@users.noreply.github.com>
Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
Co-authored-by: Roshan Swain <swainroshan001@gmail.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-11-25 10:01:43 +01:00
Binbin
653d5f7fe3
Support empty callback on function and free temp function in async way (#1334)
We have a replicationEmptyDbCallback, it is a callback used by emptyData
while flushing away old data. Previously, we did not add this callback
logic for function, in case of abuse, there may be a lot of functions,
and also to make the code consistent, we add the same callback logic
for function.

Changes around this commit:
1. Extend emptyData / functionsLibCtxClear to support passing callback
   when flushing functions.
2. Added disklessLoad function create and discard helper function, just
   like disklessLoadInitTempDb and disklessLoadDiscardTempDb), we wll
   always flush the temp function in a async way to avoid any block.
3. Cleanup around discardTempDb, remove the callback pointer since in
   async way we don't need the callback.
4. Remove functionsLibCtxClear call in readSyncBulkPayload, because we
   called emptyData in the previous lines, which also empty functions.

We are doing this callback in replication is because during the flush,
replica may block a while if the flush is doing in the sync way, to
avoid the primary to detect the replica is timing out, replica will use this
callback to notify the primary (we also do this callback when loading
a RDB). And in the async way, we empty the data in the bio and there is
no slw operation, so it will ignores the callback.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-25 09:59:37 +08:00
eifrah-aws
33f42d7fb5
CMake fixes + README update (#1276) 2024-11-22 12:17:53 -08:00
Binbin
9851006d6d
Add short client info log to CLUSTER MEET / FORGET / RESET commands (#1249)
These commands are all administrator commands. If they are operated
incorrectly, serious consequences may occur. Print the full client
info by using catClientInfoString, the info is useful when we want
to identify the source of request.

Since the origin client info is very large and might complicate the
output, we added a catClientInfoShortString function, it will only
print some basic fields, we want these fields that are useful to
identify the client. These fields are:
- id
- addr
- laddr
- connection info
- name
- user
- lib-name
- lib-ver

And also used it to replace the origin client info where it has the
same purpose. Some logging is changed from full client info to short
client info:
- CLUSTER FAILOVER
- FAILOVER / PSYNC
- REPLICAOF NO ONE
- SHUTDOWN

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-23 00:23:38 +08:00
Binbin
b9d224097a
Brocast a PONG to all node in cluster when role changed (#1295)
When a node role changes, we should brocast the change to notify other nodes.
For example, one primary and one replica, after a failover, the replica became
a new primary, the primary became a new replica.

And then we trigger a second cluster failover for the new replica, the
new replica will send a MFSTART to its primary, ie, the new primary.

But the new primary may reject the MFSTART due to this logic:
```
    } else if (type == CLUSTERMSG_TYPE_MFSTART) {
        if (!sender || sender->replicaof != myself) return 1;
```

In the new primary views, sender is still a primary, and sender->replicaof
is NULL, so we will return. Then the manual failover timedout.

Another possibility is that other primaries refuse to vote after receiving
the FAILOVER_AUTH_REQUEST, since in their's views, sender is still a primary,
so it refuse to vote, and then manual failover timedout.
```
void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) {
    ...
        if (clusterNodeIsPrimary(node)) {
            serverLog(LL_WARNING, "Failover auth denied to...
```

The reason is that, currently, we only update the node->replicaof information
when we receive a PING/PONG from the sender. For details, see clusterProcessPacket.
Therefore, in some scenarios, such as clusters with many nodes and a large
cluster-ping-interval (that is, cluster-node-timeout), the role change of the node
will be very delayed.

Added a DEBUG DISABLE-CLUSTER-RANDOM-PING command, send cluster ping
to a random node every second (see clusterCron).

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-23 00:22:04 +08:00
Binbin
979f4c1ceb
Add cmake-build-debug and cmake-build-release to gitignore (#1340)
Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-22 16:49:16 +08:00
Alan Scherger
377ed22c97
[feat] add Ubuntu 24.04 Noble package support (#971)
add Ubuntu 24.04 Noble package support

Signed-off-by: Alan Scherger <alan.scherger@gmail.com>
2024-11-21 19:26:30 -08:00
Yury-Fridlyand
109d2dadc0
Add slack link for users (#1273)
Add slack link for users

---------

Signed-off-by: Yury-Fridlyand <yury.fridlyand@improving.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
2024-11-21 19:19:10 -08:00
Nadav Levanoni
18d1eb5a85
Remove redundant dict_index calculations (#1205)
We need to start making use of the new `WithDictIndex` APIs which allow
us to reuse the dict_index calculation (avoid over-calling `getKeySlot`
for no good reason).

In this PR I optimized `lookupKey` so it now calls `getKeySlot` to reuse
the dict_index two additional times. It also optimizes the keys command
to avoid unnecessary computation of the slot id.

---------

Signed-off-by: Nadav Levanoni <nadavl@amazon.com>
Co-authored-by: Nadav Levanoni <nadavl@amazon.com>
2024-11-21 19:14:28 -08:00
Sinkevich Artem
43b5026162
Fix argument types of formatting functions (#1253)
`cluster_legacy.c`: `slot_info_pairs` has `uint16_t` values, but they
were cast to `unsigned long` and `%i` was used.

`valkey-cli.c`: `node->replicas_count` is `int`, not `unsigned long`.

Signed-off-by: ArtSin <artsin666@gmail.com>
2024-11-21 18:58:15 -08:00
Binbin
50aae13b0a
Skip reclaim file page cache test in valgrind (#1327)
The test is incompatible with valgrind. Added a new `--valgrind`
argument to test suite, which will cause that test to be skipped.

We skipped it in the past, see 5b61b0dc6d2579ee484fa6cf29bfac59513f84ab

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-22 10:29:24 +08:00
Binbin
c4be326c32
Make manual failover reset the on-going election to promote failover (#1274)
If a manual failover got timed out, like the election don't get the
enough votes, since we have a auth_timeout and a auth_retry_time, a
new manual failover will not be able to proceed on the replica side.

Like if we initiate a new manual failover after a election timed out,
we will pause the primary, but on the replica side, due to retry_time,
replica does not trigger the new election and the manual failover will
eventually time out.

In this case, if we initiate manual failover again and there is an
ongoing election, we will reset it so that the replica can initiate
a new election at the manual failover's request.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-22 10:28:59 +08:00
zvi-code
b56eed2479
Remove valkey specific changes in jemalloc source code (#1266)
### Summary of the change

This is a base PR for refactoring defrag. It moves the defrag logic to
rely on jemalloc [native
api](https://github.com/jemalloc/jemalloc/pull/1463#issuecomment-479706489)
instead of relying on custom code changes made by valkey in the jemalloc
([je_defrag_hint](9f8185f5c8/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h (L382)))
library. This enables valkey to use latest vanila jemalloc without the
need to maintain code changes cross jemalloc versions.

This change requires some modifications because the new api is providing
only the information, not a yes\no defrag. The logic needs to be
implemented at valkey code. Additionally, the api does not provide,
within single call, all the information needed to make a decision, this
information is available through additional api call. To reduce the
calls to jemalloc, in this PR the required information is collected
during the `computeDefragCycles` and not for every single ptr, this way
we are avoiding the additional api call.
Followup work will utilize the new options that are now open and will
further improve the defrag decision and process.

### Added files: 

`allocator_defrag.c` / `allocator_defrag.h` - This files implement the
allocator specific knowledge for making defrag decision. The knowledge
about slabs and allocation logic and so on, all goes into this file.
This improves the separation between jemalloc specific code and other
possible implementation.


### Moved functions: 

[`zmalloc_no_tcache` , `zfree_no_tcache`
](4593dc2f05/src/zmalloc.c (L215))
- these are very jemalloc specific logic assumptions, and are very
specific to how we defrag with jemalloc. This is also with the vision
that from performance perspective we should consider using tcache, we
only need to make sure we don't recycle entries without going through
the arena [for example: we can use private tcache, one for free and one
for alloc].
`frag_smallbins_bytes` - the logic and implementation moved to the new
file

### Existing API:

* [once a second + when completed full cycle]
[`computeDefragCycles`](4593dc2f05/src/defrag.c (L916))
* `zmalloc_get_allocator_info` : gets from jemalloc _allocated, active,
resident, retained, muzzy_, `frag_smallbins_bytes`
*
[`frag_smallbins_bytes`](4593dc2f05/src/zmalloc.c (L690))
: for each bin; gets from jemalloc bin_info, `curr_regs`, `cur_slabs`
* [during defrag, for each pointer]
* `je_defrag_hint` is getting a memory pointer and returns {0,1} .
[Internally it
uses](4593dc2f05/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h (L368))
this information points:
        * #`nonfull_slabs`
        * #`total_slabs`
        * #free regs in the ptr slab

## Jemalloc API (via ctl interface)


[BATCH][`experimental_utilization_batch_query_ctl`](4593dc2f05/deps/jemalloc/src/ctl.c (L4114))
: gets an array of pointers, returns for each pointer 3 values,

* number of free regions in the extent
* number of regions in the extent
* size of the extent in terms of bytes


[EXTENDED][`experimental_utilization_query_ctl`](4593dc2f05/deps/jemalloc/src/ctl.c (L3989))
:

* memory address of the extent a potential reallocation would go into
* number of free regions in the extent
* number of regions in the extent
* size of the extent in terms of bytes
* [stats-enabled]total number of free regions in the bin the extent
belongs to
* [stats-enabled]total number of regions in the bin the extent belongs
to

### `experimental_utilization_batch_query_ctl` vs valkey
`je_defrag_hint`?
[good]
   - We can query pointers in a batch, reduce the overall overhead
- The per ptr decision algorithm is not within jemalloc api, jemalloc
only provides information, valkey can tune\configure\optimize easily

 
[bad]
- In the batch API we only know the utilization of the slab (of that
memory ptr), we don’t get the data about #`nonfull_slabs` and total
allocated regs.


## New functions:
1. `defrag_jemalloc_init`: Reducing the cost of call to je_ctl: use the
[MIB interface](https://jemalloc.net/jemalloc.3.html) to get a faster
calls. See this quote from the jemalloc documentation:
    
The mallctlnametomib() function provides a way to avoid repeated name
lookups for
applications that repeatedly query the same portion of the namespace,by
translating
a name to a “Management Information Base” (MIB) that can be passed
repeatedly to
    mallctlbymib().

6. `jemalloc_sz2binind_lgq*` : this api is to support reverse map
between bin size and it’s info without lookup. This mapping depends on
the number of size classes we have that are derived from
[`lg_quantum`](4593dc2f05/deps/Makefile (L115))
7. `defrag_jemalloc_get_frag_smallbins` : This function replaces
`frag_smallbins_bytes` the logic moved to the new file allocator_defrag
`defrag_jemalloc_should_defrag_multi` → `handle_results` - unpacks the
results
8. `should_defrag` : implements the same logic as the existing
implementation
[inside](9f8185f5c8/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h (L382))
je_defrag_hint
9. `defrag_jemalloc_should_defrag_multi` : implements the hint for an
array of pointers, utilizing the new batch api. currently only 1 pointer
is passed.


### Logical differences:

In order to get the information about #`nonfull_slabs` and #`regs`, we
use the query cycle to collect the information per size class. In order
to find the index of bin information given bin size, in o(1), we use
`jemalloc_sz2binind_lgq*` .


## Testing
This is the first draft. I did some initial testing that basically
fragmentation by reducing max memory and than waiting for defrag to
reach desired level. The test only serves as sanity that defrag is
succeeding eventually, no data provided here regarding efficiency and
performance.

### Test: 
1. disable `activedefrag`
2. run valkey benchmark on overlapping address ranges with different
block sizes
3. wait untill `used_memory` reaches 10GB
4. set `maxmemory` to 5GB and `maxmemory-policy` to `allkeys-lru`
5. stop load
6. wait for `mem_fragmentation_ratio` to reach 2
7. enable `activedefrag` - start test timer
8. wait until reach `mem_fragmentation_ratio` = 1.1

#### Results*:
(With this PR)Test results: ` 56 sec`
(Without this PR)Test results: `67 sec`

*both runs perform same "work" number of buffers moved to reach
fragmentation target

Next benchmarking is to compare to:
- DONE // existing `je_get_defrag_hint` 
- compare with naive defrag all: `int defrag_hint() {return 1;}`

---------

Signed-off-by: Zvi Schneider <ezvisch@amazon.com>
Signed-off-by: Zvi Schneider <zvi.schneider22@gmail.com>
Signed-off-by: zvi-code <54795925+zvi-code@users.noreply.github.com>
Co-authored-by: Zvi Schneider <ezvisch@amazon.com>
Co-authored-by: Zvi Schneider <zvi.schneider22@gmail.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
2024-11-21 16:29:21 -08:00
xbasel
b486a41500
Preserve original fd blocking state in TLS I/O operations (#1298)
This change prevents unintended side effects on connection state and
improves consistency with non-TLS sync operations.

For example, when invoking `connTLSSyncRead` with a blocking file
descriptor, the mode is switched to non-blocking upon `connTLSSyncRead`
exit. If the code assumes the file descriptor remains blocking and calls
the normal `read` expecting it to block, it may result in a short read.

This caused a crash in dual-channel, which was fixed in this PR by
relocating `connBlock()`:
https://github.com/valkey-io/valkey/pull/837

Signed-off-by: xbasel <103044017+xbasel@users.noreply.github.com>
2024-11-21 18:22:16 +02:00
Binbin
6038eda010
Make FUNCTION RESTORE FLUSH flush async based on lazyfree-lazy-user-flush (#1254)
FUNCTION RESTORE have a FLUSH option, it will delete all the existing
libraries before restoring the payload. If for some reasons, there are
a lot of libraries, we will block a while in here.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-21 21:02:05 +08:00
Binbin
f553ccbda6
Use goto to cleanup error handling in readSyncBulkPayload (#1332)
The goto error label is the same as the error return, use goto
to reduce the references.
```
error:
    cancelReplicationHandshake(1);
    return;
```

Also this can make the log printing more continuous under the
error, that is, we print the error log first, and then print
the reconnecting log at the last (in cancelReplicationHandshake).

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-21 20:01:30 +08:00
Yanqi Lv
4986310945
Import-mode: Avoid expiration and eviction during data syncing (#1185)
New config: `import-mode (yes|no)`

New command: `CLIENT IMPORT-SOURCE (ON|OFF)`

The config, when set to `yes`, disables eviction and deletion of expired
keys, except for commands coming from a client which has marked itself
as an import-source, the data source when importing data from another
node, using the CLIENT IMPORT-SOURCE command.

When we sync data from the source Valkey to the destination Valkey using
some sync tools like
[redis-shake](https://github.com/tair-opensource/RedisShake), the
destination Valkey can perform expiration and eviction, which may cause
data corruption. This problem has been discussed in
https://github.com/redis/redis/discussions/9760#discussioncomment-1681041
and Redis already have a solution. But in Valkey we haven't fixed it by
now.

E.g. we call `set key 1 ex 1` on the source server and transfer this
command to the destination server. Then we call `incr key` on the source
server before the key expired, we will have a key on the source server
with a value of 2. But when the command arrived at the destination
server, the key may be expired and has deleted. So we will have a key on
the destination server with a value of 1, which is inconsistent with the
source server.

In standalone mode, we can use writable replica to simplify the sync
process. However, in cluster mode, we still need a sync tool to help us
transfer the source data to the destination. The sync tool usually work
as a normal client and the destination works as a primary which keep
expiration and eviction.

In this PR, we add a new mode named 'import-mode'. In this mode, server
stop expiration and eviction just like a replica. Notice that this mode
exists only in sync state to avoid data inconsistency caused by
expiration and eviction. Import mode only takes effect on the primary.
Sync tools can mark their clients as an import source by `CLIENT
IMPORT-SOURCE`, which work like a client from primary and can visit
expired keys in `lookupkey`.

**Notice: during the migration, other clients, apart from the import
source, should not access the data imported by import source.**

---------

Signed-off-by: lvyanqi.lyq <lvyanqi.lyq@alibaba-inc.com>
Signed-off-by: Yanqi Lv <lvyanqi.lyq@alibaba-inc.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
2024-11-19 21:53:19 +01:00
Binbin
ee386c92ff
Manual failover vote is not limited by two times the node timeout (#1305)
This limit should not restrict manual failover, otherwise in some
scenarios, manual failover will time out.

For example, if some FAILOVER_AUTH_REQUESTs or some FAILOVER_AUTH_ACKs
are lost during a manual failover, it cannot vote in the second manual
failover. Or in a mixed scenario of plain failover and manual failover,
it cannot vote for the subsequent manual failover.

The problem with the manual failover retry is that the mf will pause
the client 5s in the primary side. So every retry every manual failover
timed out is a bad move.

---------

Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-11-19 11:17:20 -05:00
Binbin
132798b57d
Receipt of REPLCONF VERSION reply should be triggered by event (#1320)
This add the missing return when repl_state change to RECEIVE_VERSION_REPLY,
this way we won’t be blocked if the primary doesn’t reply with REPLCONF
VERSION.

In practice i guess this is no likely to block in this context, reading
small responses are are likely to be received in one packet, so this
is just a cleanup (consistent with the previous state machine
processing).

Also update the state machine diagram to mention the VERSION reply.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-19 23:42:50 +08:00
Seungmin Lee
3d0c834203
Fix LRU crash when getting too many random lua scripts (#1310)
### Problem
Valkey stores scripts in a dictionary (lua_scripts) keyed by their SHA1
hashes, but it needs a way to know which scripts are least recently
used. It uses an LRU list (lua_scripts_lru_list) to keep track of
scripts in usage order. When the list reaches a maximum length, Valkey
evicts the oldest scripts to free memory in both the list and
dictionary. The problem here is that the sds from the LRU list can be
pointing to already freed/moved memory by active defrag that the sds in
the dictionary used to point to. It results in assertion error at [this
line](https://github.com/valkey-io/valkey/blob/unstable/src/eval.c#L519)

### Solution
If we duplicate the sds when adding it to the LRU list, we can create an
independent copy of the script identifier (sha). This duplication
ensures that the sha string in the LRU list remains stable and
unaffected by any defragmentation that could alter or free the original
sds. In addition, dictUnlink doesn't require exact pointer
match([ref](https://github.com/valkey-io/valkey/blob/unstable/src/eval.c#L71-L78))
so this change makes sense to unlink the right dictEntry with the copy
of the sds.

### Reproduce
To reproduce it with tcl test:
1. Disable je_get_defrag_hint in defrag.c to trigger defrag often
2. Execute test script
```
start_server {tags {"auth external:skip"}} {

    test {Regression for script LRU crash} {
        r config set activedefrag yes
        r config set active-defrag-ignore-bytes 1
        r config set active-defrag-threshold-lower 0
        r config set active-defrag-threshold-upper 1
        r config set active-defrag-cycle-min 99
        r config set active-defrag-cycle-max 99

        for {set i 0} {$i < 100000} {incr i} {
            r eval "return $i" 0
        }
        after 5000;
    }
}
```


### Crash info
Crash report:
```
=== REDIS BUG REPORT START: Cut & paste starting from here ===
14044:M 12 Nov 2024 14:51:27.054 # === ASSERTION FAILED ===
14044:M 12 Nov 2024 14:51:27.054 # ==> eval.c:556 'de' is not true

------ STACK TRACE ------

Backtrace:
/usr/bin/redis-server 127.0.0.1:6379 [cluster](luaDeleteFunction+0x148)[0x723708]
/usr/bin/redis-server 127.0.0.1:6379 [cluster](luaCreateFunction+0x26c)[0x724450]
/usr/bin/redis-server 127.0.0.1:6379 [cluster](evalCommand+0x2bc)[0x7254dc]
/usr/bin/redis-server 127.0.0.1:6379 [cluster](call+0x574)[0x5b8d14]
/usr/bin/redis-server 127.0.0.1:6379 [cluster](processCommand+0xc84)[0x5b9b10]
/usr/bin/redis-server 127.0.0.1:6379 [cluster](processCommandAndResetClient+0x11c)[0x6db63c]
/usr/bin/redis-server 127.0.0.1:6379 [cluster](processInputBuffer+0x1b0)[0x6dffd4]
/usr/bin/redis-server 127.0.0.1:6379 [cluster][0x6bd968]
/usr/bin/redis-server 127.0.0.1:6379 [cluster][0x659634]
/usr/bin/redis-server 127.0.0.1:6379 [cluster](amzTLSEventHandler+0x194)[0x6588d8]
/usr/bin/redis-server 127.0.0.1:6379 [cluster][0x750c88]
/usr/bin/redis-server 127.0.0.1:6379 [cluster](aeProcessEvents+0x228)[0x757fa8]
/usr/bin/redis-server 127.0.0.1:6379 [cluster](redisMain+0x478)[0x7786b8]
/lib64/libc.so.6(__libc_start_main+0xe4)[0xffffa7763da4]
/usr/bin/redis-server 127.0.0.1:6379 [cluster][0x5ad3b0]
```
Defrag info:
```
mem_fragmentation_ratio:1.18
mem_fragmentation_bytes:47229992
active_defrag_hits:20561
active_defrag_misses:5878518
active_defrag_key_hits:77
active_defrag_key_misses:212
total_active_defrag_time:29009
```

### Test:
Run the test script to push 100,000 scripts to ensure the LRU list keeps
500 maximum length without any crash.
```
27489:M 14 Nov 2024 20:56:41.583 * LRU List length: 500
27489:M 14 Nov 2024 20:56:41.583 * LRU List length: 500
27489:M 14 Nov 2024 20:56:41.584 * LRU List length: 500
27489:M 14 Nov 2024 20:56:41.584 * LRU List length: 500
27489:M 14 Nov 2024 20:56:41.584 * LRU List length: 500
27489:M 14 Nov 2024 20:56:41.584 * LRU List length: 500
27489:M 14 Nov 2024 20:56:41.584 * LRU List length: 500
27489:M 14 Nov 2024 20:56:41.584 * LRU List length: 500
27489:M 14 Nov 2024 20:56:41.584 * LRU List length: 500
27489:M 14 Nov 2024 20:56:41.584 * LRU List length: 500
27489:M 14 Nov 2024 20:56:41.584 * LRU List length: 500
27489:M 14 Nov 2024 20:56:41.584 * LRU List length: 500
27489:M 14 Nov 2024 20:56:41.584 * LRU List length: 500
[ok]: Regression for script LRU crash (6811 ms)
[1/1 done]: unit/test (7 seconds)
```

---------

Signed-off-by: Seungmin Lee <sungming@amazon.com>
Signed-off-by: Seungmin Lee <155032684+sungming2@users.noreply.github.com>
Co-authored-by: Seungmin Lee <sungming@amazon.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
2024-11-18 18:06:35 -08:00
Seungmin Lee
f9d0b87622
Upgrade macos-12 to macos-13 in workflows (#1318)
### Problem
GitHub Actions is starting the deprecation process for macOS 12.
Deprecation will begin on 10/7/24 and the image will be fully
unsupported by 12/3/24.
For more details, see
https://github.com/actions/runner-images/issues/10721

Signed-off-by: Seungmin Lee <sungming@amazon.com>
Co-authored-by: Seungmin Lee <sungming@amazon.com>
2024-11-18 18:00:30 -08:00
Amit Nagler
c5012cc630
Optimize RDB load performance and fix cluster mode resizing on replica side (#1199)
This PR addresses two issues:

1. Performance Degradation Fix - Resolves a significant performance
issue during RDB load on replica nodes.
- The problem was causing replicas to rehash multiple times during the
load process. Local testing demonstrated up to 50% degradation in BGSAVE
time.
- The problem occurs when the replica tries to expand pre-created slot
dictionaries. This operation fails quietly, resulting in undetected
performance issues.
- This fix aims to optimize the RDB load process and restore expected
performance levels.

2. Bug fix when reading `RDB_OPCODE_RESIZEDB` in Valkey 8.0 cluster
mode-
- Use the shard's master slots count when processing this opcode, as
`clusterNodeCoversSlot` is not initialized for the currently syncing
replica.
- Previously, this problem went unnoticed because `RDB_OPCODE_RESIZEDB`
had no practical impact (due to 1).

These improvements will enhance overall system performance and ensure
smoother upgrades to Valkey 8.0 in the future.

Testing:
- Conducted local tests to verify the performance improvement during RDB
load.
- Verified that ignoring `RDB_OPCODE_RESIZEDB` does not negatively
impact functionality in the current version.

Signed-off-by: naglera <anagler123@gmail.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
2024-11-18 19:09:35 +08:00
Binbin
d07674fc01
Fix sds unittest tests to check for zmalloc_usable_size (#1314)
s_malloc_size == zmalloc_size, currently sdsAllocSize does not
calculate PREFIX_SIZE when no malloc_size available, this casue
test_typesAndAllocSize fail in the new unittest, what we want to
check is actually zmalloc_usable_size.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-18 14:55:26 +08:00
uriyage
94113fde7f
Improvements for TLS with I/O threads (#1271)
Main thread profiling revealed significant overhead in TLS operations,
even with read/write offloaded to I/O threads:

Perf results:

**10.82%** 8.82% `valkey-server libssl.so.3 [.] SSL_pending` # Called by
main thread after I/O completion

**10.16%** 5.06% `valkey-server libcrypto.so.3 [.] ERR_clear_error` #
Called for every event regardless of thread handling

This commit further optimizes TLS operations by moving more work from
the main thread to I/O threads:

Improve TLS offloading to I/O threads with two main changes:

1. Move `ERR_clear_error()` calls closer to SSL operations
   - Currently, error queue is cleared for every TLS event
   - Now only clear before actual SSL function calls
   - This prevents unnecessary clearing in main thread when operations
     are handled by I/O threads

2. Optimize `SSL_pending()` checks
   - Add `TLS_CONN_FLAG_HAS_PENDING` flag to track pending data
   - Move pending check to follow read operations immediately
   - I/O thread sets flag when pending data exists
   - Main thread uses flag to update pending list

Performance improvements:
Testing setup based on
https://valkey.io/blog/unlock-one-million-rps-part2/

Before:
- SET: 896,047 ops/sec
- GET: 875,794 ops/sec

After:
- SET: 985,784 ops/sec (+10% improvement)
- GET: 1,066,171 ops/sec (+22% improvement)

Signed-off-by: Uri Yagelnik <uriy@amazon.com>
2024-11-17 21:52:35 -08:00
Binbin
aa2dd3ecb8
Stabilize replica migration test to make sure cluster config is consistent (#1311)
CI report this failure:
```
[exception]: Executing test client: MOVED 1 127.0.0.1:22128.
MOVED 1 127.0.0.1:22128
    while executing
"wait_for_condition 1000 50 {
            [R 3 get key_991803] == 1024 && [R 3 get key_977613] == 10240 &&
            [R 4 get key_991803] == 1024 && ..."
```

This may be because, even though the cluster state becomes OK,
The cluster still has inconsistent configuration for a short period
of time. We make sure to wait for the config to be consistent.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-16 18:58:25 +08:00
Binbin
86f33ea2b0
Unprotect rdb channel when bgsave child fails in dual channel replication (#1297)
If bgsaveerr is error, there is no need to protect the rdb channel.
The impact of this may be that when bgsave fails, we will protect
the rdb channel for 60s. It may occupy the reference of the repl
buf block, making it impossible to recycle it until we free the
client due to COB or free the client after 60s.

We kept the RDB channel open as long as the replica hadn't established
a main connection, even if the snapshot process failed. There is no
value in keeping the RDB client in this case.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-15 16:48:13 +08:00
Binbin
92181b6797
Fix primary crash when processing dirty slots during shutdown wait / failover wait / client pause (#1131)
We have an assert in propagateNow. If the primary node receives a
CLUSTER UPDATE such as dirty slots during SIGTERM waitting or during
a manual failover pausing or during a client pause, the delKeysInSlot
call will trigger this assert and cause primary crash.

In this case, we added a new server_del_keys_in_slot state just like
client_pause_in_transaction to track the state to avoid the assert
in propagateNow, the dirty slots will be deleted in the end without
affecting the data consistency.

Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-11-15 16:47:15 +08:00
Binbin
4e2493e5c9
Kill diskless fork child asap when the last replica drop (#1227)
We originally checked the replica connection to whether to kill the
diskless child only when rdbPipeReadHandler is triggered. Actually
we can check it when the replica is disconnected, so that we don't
have to wait for rdbPipeReadHandler to be triggered and can kill
the forkless child as soon as possible.

In this way, when the child or rdbPipeReadHandler is stuck for some
reason, we can kill the child faster and release the fork resources.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-15 16:34:32 +08:00
Binbin
d3f3b9cc3a
Fix daily valgrind build with unit tests (#1309)
This was introduced in #515.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-15 14:27:28 +08:00
bentotten
b9994030e9
Log clusterbus handshake timeout failures (#1247)
This adds a log when a handshake fails for a timeout. This can help
troubleshoot cluster asymmetry issues caused by failed MEETs

---------

Signed-off-by: Ben Totten <btotten@amazon.com>
Signed-off-by: bentotten <59932872+bentotten@users.noreply.github.com>
Co-authored-by: Ben Totten <btotten@amazon.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
2024-11-14 20:48:48 -08:00
Qu Chen
32f7541fe3
Simplify dictType callbacks and move some macros from dict.h to dict.c (#1281)
Remove the dict pointer argument to the `dictType` callbacks `keyDup`,
`keyCompare`, `keyDestructor` and `valDestructor`. This argument was
unused in all of the callback implementations.

The macros `dictFreeKey()` and `dictFreeVal()` are made internal to dict
and moved from dict.h to dict.c. They're also changed from macros to
static inline functions.

Signed-off-by: Qu Chen <quchen@amazon.com>
2024-11-14 09:45:47 +01:00
Parth
863d312803
Fix link-time optimization to work correctly for unit tests (i.e. -flto flag) (#1290) (#1296)
* We compile various c files into object and package them into library
(.a file) using ar to feed to unit tests. With new GCC versions, the
objects inside such library don't participate in LTO process without
additional flags.
* Here is a direct quote from gcc documentation explaining this issue:
"If you are not using a linker with plugin support and/or do not enable
the linker plugin, then the objects inside libfoo.a are extracted and
linked as usual, but they do not participate in the LTO optimization
process. In order to make a static library suitable for both LTO
optimization and usual linkage, compile its object files with
-flto-ffat-lto-objects."
* Read full documentation about -flto at
https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html
* Without this additional flag, I get following errors while executing
"make test-unit". With this change, those errors go away.

```
ARCHIVE libvalkey.a
ar: threads_mngr.o: plugin needed to handle lto object
...
..
.
/tmp/ccDYbMXL.ltrans0.ltrans.o: In function `dictClear':
/local/workplace/elasticache/valkey/src/unit/../dict.c:776: undefined
reference to `valkey_free'
/local/workplace/elasticache/valkey/src/unit/../dict.c:770: undefined
reference to `valkey_free'
/tmp/ccDYbMXL.ltrans0.ltrans.o: In function `dictGetVal':
```

Fixes #1290

---------

Signed-off-by: Parth Patel <661497+parthpatel@users.noreply.github.com>
2024-11-13 21:50:55 -08:00
skyfirelee
4a9864206f
Migrate quicklist unit test to new framework (#515)
Migrate quicklist unit test to new unit test framework, and cleanup
remaining references of SERVER_TEST, parent ticket #428.

Closes #428.

Signed-off-by: artikell <739609084@qq.com>
Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
2024-11-14 10:37:44 +08:00
Binbin
6fba747c39
Fix log printing always shows the role as child under daemonize (#1301)
In #1282, we init server.pid earlier to keep log message role
consistent, but we forgot to consider daemonize. In daemonize
mode, we will always print the child role.

We need to reset server.pid after daemonize(), otherwise the
log printing role will always be the child. It also causes a
incorrect server.pid value, affecting the concatenation of
some pid names.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-14 10:26:23 +08:00
Binbin
2df56d87c0
Fix empty primary may have dirty slots data due to bad migration (#1285)
If we become an empty primary for some reason, we still need to
check if we need to delete dirty slots, because we may have dirty
slots data left over from a bad migration. Like the target node forcibly
executes CLUSTER SETSLOT NODE to take over the slot without
performing key migration.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-11 22:13:47 +08:00
Binbin
a2d22c63c0
Fix replica not able to initate election in time when epoch fails (#1009)
If multiple primary nodes go down at the same time, their replica nodes will
initiate the elections at the same time. There is a certain probability that
the replicas will initate the elections in the same epoch.

And obviously, in our current election mechanism, only one replica node can
eventually get the enough votes, and the other replica node will fail to win
due the the insufficient majority, and then its election will time out and
we will wait for the retry, which result in a long failure time.

If another node has been won the election in the failover epoch, we can assume
that my election has failed and we can retry as soom as possible.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-11 22:12:49 +08:00
Binbin
167e8ab8de
Trigger the election immediately when doing a manual failover (#1081)
Currently when a manual failover is triggeded, we will set a
CLUSTER_TODO_HANDLE_FAILOVER to start the election as soon as
possible in the next beforeSleep. But in fact, we won't delay
the election in manual failover, waitting for the next beforeSleep
to kick in will delay the election a some milliseconds.

We can trigger the election immediately in this case in the
same function call, without waitting for beforeSleep, which
can save us some milliseconds.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-11 21:43:46 +08:00
Binbin
4aacffa32d
Stabilize dual replication test to avoid getting LOADING error (#1288)
When doing `$replica replicaof no one`, we may get a LOADING
error, this is because during the test execution, the replica
may reconnect very quickly, and the full sync is initiated,
and the replica has entered the LOADING state.

In this commit, we make sure the primary is pasued after the
fork, so the replica won't enter the LOADING state, and with
this fix, this test seems more natural and predictable.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-11 21:42:34 +08:00
Qu Chen
9300a7ebc8
Set fields to NULL after free in freeClient() (#1279)
Null out several references after freeing the object in `freeClient()`.

This is just to make the code more safe, to protect against
use-after-free for future changes.

Signed-off-by: Qu Chen <quchen@amazon.com>
2024-11-11 10:39:48 +01:00
zixuan zhao
0b5b2c7484
Log as primary role (M) instead of child process (C) during startup (#1282)
Init server.pid earlier to keep log message role consistent.

Closes #1206.

Before:
```text
24881:C 21 Oct 2024 21:10:57.165 * oO0OoO0OoO0Oo Valkey is starting oO0OoO0OoO0Oo
24881:C 21 Oct 2024 21:10:57.165 * Valkey version=255.255.255, bits=64, commit=814e0f55, modified=1, pid=24881, just started
24881:C 21 Oct 2024 21:10:57.165 * Configuration loaded
24881:M 21 Oct 2024 21:10:57.167 * Increased maximum number of open files to 10032 (it was originally set to 1024).
```
After:
```text
68560:M 08 Nov 2024 16:10:12.257 * oO0OoO0OoO0Oo Valkey is starting oO0OoO0OoO0Oo
68560:M 08 Nov 2024 16:10:12.257 * Valkey version=255.255.255, bits=64, commit=45d596e1, modified=1, pid=68560, just started
68560:M 08 Nov 2024 16:10:12.257 * Configuration loaded
68560:M 08 Nov 2024 16:10:12.258 * monotonic clock: POSIX clock_gettime
```

Signed-off-by: azuredream <zhaozixuan67@gmail.com>
2024-11-11 10:33:26 +01:00
zhenwei pi
45d596e121
RDMA: Use conn ref counter to prevent double close (#1250)
RDMA: Use connection reference counter style
    
The reference counter of connection is used to protect re-entry of closenmethod.
Use this style instead the unsafe one.

Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2024-11-08 09:33:01 +01:00
Jacob Murphy
e972d56460
Make sure to copy null terminator byte in dual channel code (#1272)
As @madolson pointed out, these do have proper null terminators. This
cleans them up to follow the rest of the code which copies the last byte
explicitly, which should help reduce cognitive load and make it more
resilient should code refactors occur (e.g. non-static allocation of
memory, changes to other functions).

---------

Signed-off-by: Jacob Murphy <jkmurphy@google.com>
2024-11-07 18:25:43 -08:00
eifrah-aws
07b3e7ae7a
Add CMake build system for valkey (#1196)
With this commit, users are able to build valkey using `CMake`.

## Example usage:

Build `valkey-server` in Release mode with TLS enabled and using
`jemalloc` as the allocator:

```bash
mkdir build-release
cd $_
cmake .. -DCMAKE_BUILD_TYPE=Release \
         -DCMAKE_INSTALL_PREFIX=/tmp/valkey-install \
         -DBUILD_MALLOC=jemalloc -DBUILD_TLS=1
make -j$(nproc) install

# start valkey
/tmp/valkey-install/bin/valkey-server
```

Build `valkey-unit-tests`:

```bash
mkdir build-release-ut
cd $_
cmake .. -DCMAKE_BUILD_TYPE=Release \
         -DBUILD_MALLOC=jemalloc -DBUILD_UNIT_TESTS=1
make -j$(nproc)

# Run the tests
./bin/valkey-unit-tests 
```

Current features supported by this PR:

- Building against different allocators: (`jemalloc`, `tcmalloc`,
`tcmalloc_minimal` and `libc`), e.g. to enable `jemalloc` pass
`-DBUILD_MALLOC=jemalloc` to `cmake`
- OpenSSL builds (to enable TLS, pass `-DBUILD_TLS=1` to `cmake`)
- Sanitizier: pass `-DBUILD_SANITIZER=<address|thread|undefined>` to
`cmake`
- Install target + redis symbolic links
- Build `valkey-unit-tests` executable
- Standard CMake variables are supported. e.g. to install `valkey` under
`/home/you/root` pass `-DCMAKE_INSTALL_PREFIX=/home/you/root`

Why using `CMake`? To list *some* of the advantages of using `CMake`:

- Superior IDE integrations: cmake generates the file
`compile_commands.json` which is required by `clangd` to get a compiler
accuracy code completion (in other words: your VScode will thank you)
- Out of the source build tree: with the current build system, object
files are created all over the place polluting the build source tree,
the best practice is to build the project on a separate folder
- Multiple build types co-existing: with the current build system, it is
often hard to have multiple build configurations. With cmake you can do
it easily:
- It is the de-facto standard for C/C++ project these days

More build examples: 

ASAN build:

```bash
mkdir build-asan
cd $_
cmake .. -DBUILD_SANITIZER=address -DBUILD_MALLOC=libc
make -j$(nproc)
```

ASAN with jemalloc:

```bash
mkdir build-asan-jemalloc
cd $_
cmake .. -DBUILD_SANITIZER=address -DBUILD_MALLOC=jemalloc 
make -j$(nproc)
```

As seen by the previous examples, any combination is allowed and
co-exist on the same source tree.

## Valkey installation

With this new `CMake`, it is possible to install the binary by running
`make install` or creating a package `make package` (currently supported
on Debian like distros)

### Example 1: build & install using `make install`:

```bash
mkdir build-release
cd $_
cmake .. -DCMAKE_INSTALL_PREFIX=$HOME/valkey-install -DCMAKE_BUILD_TYPE=Release
make -j$(nproc) install
# valkey is now installed under $HOME/valkey-install
```

### Example 2: create a `.deb` installer:

```bash
mkdir build-release
cd $_
cmake .. -DCMAKE_BUILD_TYPE=Release
make -j$(nproc) package
# ... CPack deb generation output
sudo gdebi -n ./valkey_8.1.0_amd64.deb
# valkey is now installed under /opt/valkey
```

### Example 3: create installer for non Debian systems (e.g. FreeBSD or
macOS):

```bash
mkdir build-release
cd $_
cmake .. -DCMAKE_BUILD_TYPE=Release
make -j$(nproc) package
mkdir -p /opt/valkey && ./valkey-8.1.0-Darwin.sh --prefix=/opt/valkey  --exclude-subdir
# valkey-server is now installed under /opt/valkey

```

Signed-off-by: Eran Ifrah <eifrah@amazon.com>
2024-11-07 18:01:37 -08:00
Wen Hui
3672f9b2c3
Revert "Decline unsubscribe related command in non-subscribed mode" (#1265)
This PR goal is to revert the changes on PR
https://github.com/valkey-io/valkey/pull/759

Recently, we got some reports that in Valkey 8.0 the PR
https://github.com/valkey-io/valkey/pull/759 (Decline unsubscribe
related command in non-subscribed mode) causes break change.
(https://github.com/valkey-io/valkey/issues/1228)

Although from my thought, call commands "unsubscribeCommand",
"sunsubscribeCommand", "punsubscribeCommand" in request-response mode
make no sense. This is why I created PR
https://github.com/valkey-io/valkey/pull/759

But breaking change is always no good, @valkey-io/core-team How do you
think we revert this PR code changes?

Signed-off-by: hwware <wen.hui.ware@gmail.com>
2024-11-07 20:05:16 -05:00
Binbin
1c18c80844
Fix incorrect cache_memory reset in functionsLibCtxClear (#1255)
functionsLibCtxClear should clear the provided lib_ctx parameter,
not the static variable curr_functions_lib_ctx, as this contradicts
the function's intended purpose.

The impact i guess is minor, like in some unhappy paths (diskless load
fails, function restore fails?), we will mess up the functions_caches
field, which is used in used_memory_functions / used_memory_scripts
fileds in INFO.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-07 13:44:21 +08:00
Binbin
22bc49c4a6
Try to stabilize the failover call in the slot migration test (#1078)
The CI report replica will return the error when performing CLUSTER
FAILOVER:
```
-ERR Master is down or failed, please use CLUSTER FAILOVER FORCE
```

This may because the primary state is fail or the cluster connection
is disconnected during the primary pause. In this PR, we added some
waits in wait_for_role, if the role is replica, we will wait for the
replication link and the cluster link to be ok.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-07 13:42:20 +08:00
Binbin
a0b1cbad83
Change errno from EEXIST to EALREADY in serverFork if child process exists (#1258)
We set this to EEXIST in 568c2e039bac388003068cd8debb2f93619dd462,
it prints "File exists" which is not quite accurate,
change it to EALREADY, it will print "Operation already in progress".

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-07 12:13:00 +08:00
Binbin
12c5af03b8
Remove empty DB check branch in KEYS command (#1259)
We don't think we really care about optimizing for the empty DB case,
which should be uncommon. Adding branches hurts branch prediction.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-06 10:32:00 +08:00
Amit Nagler
48ebe21ad1
fix: clean up refactoring leftovers (#1264)
This commit addresses issues that were likely introduced during a rebase
related to:
b0f23df165

Change dual channel replication state in main handler only

Signed-off-by: naglera <anagler123@gmail.com>
2024-11-05 04:57:34 -08:00
Madelyn Olson
3c32ee1bda
Add a filter option to drop all cluster packets (#1252)
A minor debugging change that helped in the investigation of
https://github.com/valkey-io/valkey/issues/1251. Basically there are
some edge cases where we want to fully isolate a note from receiving
packets, but can't suspend the process because we need it to continue
sending outbound traffic. So, added a filter for that.

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-11-04 12:36:20 -08:00
Binbin
a102852d5e
Fix timing issue in cluster-shards tests (#1243)
The cluster-node-timeout is 3000 in our tests, the timing test wasn't
succeeding, so extending the wait_for made them much more reliable.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-11-02 19:51:14 +08:00
Jim Brunner
0d7b2344b2
correct type internal to kvstore (minor) (#1246)
All of the internal variables related to number of dicts in the kvstore
are type `int`. Not sure why these 2 items were declared as `long long`.

Signed-off-by: Jim Brunner <brunnerj@amazon.com>
2024-11-01 15:16:18 -07:00
zhenwei pi
e985ead7f9
RDMA: Prevent IO for child process (#1244)
RDMA MR (memory region) is not forkable, the VMA (virtual memory area)
of a MR gets empty in a child process. Prevent IO for child process to
avoid server crash.

In the check for whether read and write is allowed in an RDMA
connection, a check that if we're in a child process is added. If we
are, the function returns an error, which will cause the RDMA client to
be disconnected.

Suggested-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2024-11-01 13:28:09 +01:00
Madelyn Olson
1c222f77ce
Improve performance of sdssplitargs (#1230)
The current implementation of `sdssplitargs` does repeated `sdscatlen`
to build the parsed arguments, which isn't very efficient because it
does a lot of extra reallocations and moves through the sds code a lot.
It also typically results in memory overhead, because `sdscatlen`
over-allocates, which is usually not needed since args are usually not
modified after being created.

The new implementation of sdssplitargs does two passes, the first to
parse the argument to figure out the final length and the second to
actually copy the string. It's generally about 2x faster for larger
strings (~100 bytes), and about 20% faster for small strings (~10
bytes). This is generally faster since as long as everything is in the
CPU cache, it's going to be fast.

There are a couple of sanity tests, none existed before, as well as some
fuzzying which was used to find some bugs and also to do the
benchmarking. The original benchmarking code can be seen
6576aeb86a.

```
test_sdssplitargs_benchmark - unit/test_sds.c:530] Using random seed: 1729883235
[test_sdssplitargs_benchmark - unit/test_sds.c:577] Improvement: 56.44%, new:13039us, old:29930us
[test_sdssplitargs_benchmark - unit/test_sds.c:577] Improvement: 56.58%, new:12057us, old:27771us
[test_sdssplitargs_benchmark - unit/test_sds.c:577] Improvement: 59.18%, new:9048us, old:22165us
[test_sdssplitargs_benchmark - unit/test_sds.c:577] Improvement: 54.61%, new:12381us, old:27278us
[test_sdssplitargs_benchmark - unit/test_sds.c:577] Improvement: 51.17%, new:16012us, old:32793us
[test_sdssplitargs_benchmark - unit/test_sds.c:577] Improvement: 49.18%, new:16041us, old:31563us
[test_sdssplitargs_benchmark - unit/test_sds.c:577] Improvement: 58.40%, new:12450us, old:29930us
[test_sdssplitargs_benchmark - unit/test_sds.c:577] Improvement: 56.49%, new:13066us, old:30031us
[test_sdssplitargs_benchmark - unit/test_sds.c:577] Improvement: 58.75%, new:12744us, old:30894us
[test_sdssplitargs_benchmark - unit/test_sds.c:577] Improvement: 52.44%, new:16885us, old:35504us
[test_sdssplitargs_benchmark - unit/test_sds.c:577] Improvement: 62.57%, new:8107us, old:21659us
[test_sdssplitargs_benchmark - unit/test_sds.c:577] Improvement: 62.12%, new:8320us, old:21966us
[test_sdssplitargs_benchmark - unit/test_sds.c:577] Improvement: 45.23%, new:13960us, old:25487us
[test_sdssplitargs_benchmark - unit/test_sds.c:577] Improvement: 57.95%, new:9188us, old:21849us
```

---------

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-10-31 11:37:53 -07:00
Masahiro Ide
91cbf77442
Eliminate snprintf usage at setDeferredAggregateLen (#1234)
to align with how we encode the length at `_addReplyLongLongWithPrefix`

Signed-off-by: Masahiro Ide <masahiro.ide@lycorp.co.jp>
Co-authored-by: Masahiro Ide <masahiro.ide@lycorp.co.jp>
2024-10-31 11:30:05 -07:00
zhenwei pi
ab98f375db
RDMA: Delete keepalive timer on closing (#1237)
Typically, RDMA connection gets closed by client side, the server side
handles diconnected CM event, and delete keepalive timer correctly.
However, the server side may close connection voluntarily, for example
the maxium connections exceed. Handle this case to avoid invalid memory
access.

Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2024-10-30 11:12:42 +01:00
Binbin
789a73b0d0
Minor fix to debug logging in replicationFeedStreamFromPrimaryStream (#1235)
We should only print logs when hide-user-data-from-log is off.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-10-30 10:25:50 +08:00
Shivshankar
13f5f665f2
Update the argument of clusterNodeGetReplica declaration (#1239)
clusterNodeGetReplica agrumnets are missed to migrate during the slave
to replication migration so updated the argument slave to replica.

Signed-off-by: Shivshankar-Reddy <shiva.sheri.github@gmail.com>
2024-10-30 00:19:56 +01:00
Madelyn Olson
5a4c0640ce
Mark main and serverAssert as weak symbols to be overridden (#1232)
At some point unit tests stopped building on MacOS because of duplicate
symbols. I had originally solved this problem by using a flag that
overrides symbols, but the much better solution is to mark the duplicate
symbols as weak and they can be overridden during linking. (Symbols by
default are strong, strong symbols override weak symbols)

I also added macos unit build to the CI, so that this doesn't silently
break in the future again.

---------

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-10-29 14:26:17 -07:00
zixuan zhao
8ee7a58025
Document log format configs in valkey.conf (#1233)
Add config options for log format and timestamp format introduced by
#1022
Related to #1225

This change adds two new configs into valkey.conf:
log-format
log-timestamp-format

---------

Signed-off-by: azuredream <zhaozixuan67@gmail.com>
2024-10-29 11:13:30 +01:00
Lipeng Zhu
c21f1dc084
Increase the IO_THREADS_MAX_NUM. (#1220)
### Description

This patch try to increase the max number of io-threads from 16(128) to
256 for below reasons:

1. The core number increases a lot in the modern server processors, for
example, the [Sierra
Forest](https://en.wikipedia.org/wiki/Sierra_Forest) processors are
targeted towards with up to **288** cores.
Due to limitation of **_io-threads_** number (16 and 128 ), benchmark
like https://openbenchmarking.org/test/pts/valkey even cannot run on a
high core count server.

2. For some workloads, the bottleneck could be main thread, but for the
other workloads, big key/value which caused heavy io, the bottleneck
could be the io-threads, for example benchmark `memtier_benchmark -s
127.0.0.1 -p 9001 "--data-size" "20000" --ratio 1:0 --key-pattern P:P
--key-minimum=1 --key-maximum 1000000 --test-time 180 -c 50 -t 16
--hide-histogram`. The QPS is still scalable after 16 io-threads.

![image](https://github.com/user-attachments/assets/e980f805-a162-44be-b03e-ab37a9c489cf)
**Fig 1. QPS Scale factor with io-threads number grows.**

Signed-off-by: Lipeng Zhu <lipeng.zhu@intel.com>
Co-authored-by: Wangyang Guo <wangyang.guo@intel.com>
2024-10-27 22:43:23 -07:00
Binbin
5d2ff853a3
Fix minor repldbfd leak in updateReplicasWaitingBgsave if fstat fails (#1226)
In the old code, if fstat fails, replica->repldbfd will hold the
fd and we are doing a free client. And in freeClient, we check and
close only if repl_state == REPLICA_STATE_SEND_BULK. So if fstat
fails, we will leak the fd.

We can also extend freeClient to handle REPLICA_STATE_WAIT_BGSAVE_END
as well, but here seems to be a more friendly (and safer) way.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-10-27 15:23:00 +08:00
Shivshankar
4be09e434a
Fix typo in valkey.conf file's shutdown section (#1224)
Found typo "exists" ==> "exits" in valkey.conf in shutdown section.

Signed-off-by: Shivshankar-Reddy <shiva.sheri.github@gmail.com>
2024-10-25 14:03:59 +02:00
Lipeng Zhu
9c60fcdae2
Do security attack check only when command not found to reduce the critical path (#1212)
When explored the cycles distribution for main thread with io-threads
enabled. We found this security attack check takes significant time in
main thread, **~3%** cycles were used to do the commands security check
in main thread.

This patch try to completely avoid doing it in the hot path. We can do
it only after we looked up the command and it wasn't found, just before
we call commandCheckExistence.

---------

Signed-off-by: Lipeng Zhu <lipeng.zhu@intel.com>
Co-authored-by: Wangyang Guo <wangyang.guo@intel.com>
2024-10-25 11:13:28 +02:00
zixuan zhao
55bbbe09a3
Configurable log and timestamp formats (logfmt, ISO8601) (#1022)
Add ability to configure log output format and timestamp format in the
logs.

This change adds two new configs:

* `log-format`: Either legacy or logfmt (See https://brandur.org/logfmt)
* `log-timestamp-format`: legacy, iso8601 or milliseconds (since the
eppch).

Related to #1006.

Example:

```
$ ./valkey-server  /home/zhaoz12/git/valkey/valkey/valkey.conf
pid=109463 role=RDB/AOF timestamp="2024-09-10T20:37:25.738-04:00" level=warning message="WARNING Memory overcommit must be enabled! Without it, a background save or replication may fail under low memory condition. Being disabled, it can also cause failures without low memory condition, see https://github.com/jemalloc/jemalloc/issues/1328. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect."
pid=109463 role=RDB/AOF timestamp="2024-09-10T20:37:25.738-04:00" level=notice message="oO0OoO0OoO0Oo Valkey is starting oO0OoO0OoO0Oo"
pid=109463 role=RDB/AOF timestamp="2024-09-10T20:37:25.738-04:00" level=notice message="Valkey version=255.255.255, bits=64, commit=affbea5d, modified=1, pid=109463, just started"
pid=109463 role=RDB/AOF timestamp="2024-09-10T20:37:25.738-04:00" level=notice message="Configuration loaded"
pid=109463 role=master timestamp="2024-09-10T20:37:25.738-04:00" level=notice message="monotonic clock: POSIX clock_gettime"
pid=109463 role=master timestamp="2024-09-10T20:37:25.739-04:00" level=warning message="Failed to write PID file: Permission denied"
```

---------

Signed-off-by: azuredream <zhaozixuan67@gmail.com>
2024-10-25 00:36:32 +02:00
Binbin
2956367731
Maintain return value of rdbSaveDb after writing slot-info aux (#1222)
All other places written in this function are maintained it,
although the caller of rdbSaveDb does not reply on it, it is
maintained to be consistent with other places, is its duty.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-10-24 09:53:05 -04:00
Binbin
a21fe718f4
Limit CLUSTER_CANT_FAILOVER_DATA_AGE log to 10 times period (#1189)
If a replica is step into data_age too old stage, it can not
trigger the failover and currently it can not be automatically
recovered and we will print a log every
CLUSTER_CANT_FAILOVER_RELOG_PERIOD,
which is every second. If the primary has not recovered or there is
no manual failover, this log will flood the log file.

In this case, limit its frequency to 10 times period, which is
10 seconds in our code. Also in this data_age too old stage,
the repeated logs also can stand for the progress of the failover.

See also #780 for more details about it.

Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Ping Xie <pingxie@outlook.com>
2024-10-24 16:38:47 +08:00
muelstefamzn
c419524c05
Trim free space from inline command argument strings to avoid excess memory usage (#1213)
The command argument strings created while parsing inline commands (see
`processInlineBuffer()`) can contain free capacity. Since some commands
,such as `SET`, store these strings in the database, that free capacity
increases the memory usage. In the worst case, it could double the
memory usage.

This only occurs if the inline command format is used. The argument
strings are built by appending character by character in
`sdssplitargs()`. Regular RESP commands are not affected.

This change trims the strings within `processInlineBuffer()`.

### Why `trimStringObjectIfNeeded()` within `object.c` is not solving
this?

When the command argument string is packed into an object,
`trimStringObjectIfNeeded()` is called.

This does only trim the string if it is larger than
`PROTO_MBULK_BIG_ARG` (32kB), as only strings larger than this would
ever need trimming if the command it sent using the bulk string format.

We could modify this condition, but that would potentially have a
performance impact on commands using the bulk format. Since those make
up for the vast majority of executed commands, limiting this change to
inline commands seems prudent.

### Experiment Results

* 1 million `SET [key] [value]` commands
* Random keys (16 bytes)
* 600 bytes values

Memory usage without this change:

```
used_memory:1089327888
used_memory_human:1.01G
used_memory_rss:1131696128
used_memory_rss_human:1.05G
used_memory_peak:1089348264
used_memory_peak_human:1.01G
used_memory_peak_perc:100.00%
used_memory_overhead:49302800
used_memory_startup:911808
used_memory_dataset:1040025088
used_memory_dataset_perc:95.55%
```

Memory usage with this change:
```
used_memory:705327888
used_memory_human:672.65M
used_memory_rss:718802944
used_memory_rss_human:685.50M
used_memory_peak:705348256
used_memory_peak_human:672.67M
used_memory_peak_perc:100.00%
used_memory_overhead:49302800
used_memory_startup:911808
used_memory_dataset:656025088
used_memory_dataset_perc:93.13%
```

If the same experiment is repeated using the normal RESP array of bulk
string format (`*3\r\n$3\r\nSET\r\n...`) then the memory usage is 672MB
with and without of this change.

If a replica is attached, its memory usage is 672MB with and without
this change, since the replication link never uses inline commands.

Signed-off-by: Stefan Mueller <muelstef@amazon.com>
2024-10-23 16:56:32 -07:00
danish-mehmood
c176de4251
Clarify the wording from dually to the more common doubly (#1214)
Clarify documentation is ziplist.c

Signed-off-by: danish-mehmood <rdm355190@gmail.com>
2024-10-23 14:30:42 -07:00
Binbin
b803f7aeff
Cleaned up getSlotOrReply is return -1 instead of C_ERR (#1211)
Minor cleanup since getSlotOrReply return -1 on error, not return C_ERR.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-10-23 17:11:42 +08:00
Binbin
5d70ccd70e
Make replica CLUSTER RESET flush async based on lazyfree-lazy-user-flush (#1190)
Currently, if the replica has a lot of data, CLUSTER RESET
will block for a while and report the slowlog, and it seems
that there is no harm in making it async so external components
can be easier when monitoring it.

Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Ping Xie <pingxie@outlook.com>
2024-10-23 10:22:25 +08:00
Shivshankar
285064b114
fix typo (#1202)
Signed-off-by: Shivshankar-Reddy <shiva.sheri.github@gmail.com>
2024-10-21 22:54:40 -04:00
Shivshankar
771918e4bf
Updating command.def by running the generate-command-code.py (#1203)
Part of https://github.com/valkey-io/valkey/pull/1200 PR, since feild is
changed. Looks like commands.def is missed to get genereated based on
the changes so that is causing CI failure on unstable.

Signed-off-by: Shivshankar-Reddy <shiva.sheri.github@gmail.com>
2024-10-21 13:48:29 -07:00
Viktor Söderqvist
5885dc56bd
Fix BGSAVE CANCEL since and history fields (#1200)
Fixes wrong "since" and "history" introduced in #757.

---------

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-10-21 16:04:47 +02:00
ranshid
29b83f1ac8
Introduce bgsave cancel (#757)
In some cases bgsave child process can run for a long time exhausting
system resources. Although it is possible to kill the bgsave child
process from the system shell, sometimes it is not possible allowing OS
level access.

This PR adds a new subcommand to the BGSAVE command.
When user will issue `BGSAVE CANCEL`, it will do one of the 2:

1. In case a bgsave child process is currently running, the child
   process would be immediately killed thus terminating any
   save/replication full sync process.
2. In case a bgsave child process is SCHEDULED to run, the scheduled
   execution will be cancelled.

---------

Signed-off-by: ranshid <ranshid@amazon.com>
Signed-off-by: ranshid <88133677+ranshid@users.noreply.github.com>
Signed-off-by: Ran Shidlansik <ranshid@amazon.com>
Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-10-21 11:56:44 +02:00
zhenwei pi
71f8c34eed
RDMA: Fix listener priv opaque pointer (#1194)
struct connListener.priv should be used by connection type specific
data, static local listener data should not use this.

A RDMA config structure is going to be introduced in the next step:

```
typedef struct serverRdmaContextConfig {
    char *bindaddr;
    int bindaddr_count;
    int port;
    int rx_size;
    int comp_vector;
    ...
} serverRdmaContextConfig;
```

Then a builtin RDMA will be supported.

Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2024-10-21 10:11:27 +02:00
Binbin
2743b7e04b
Fix SORT GET to ignore special pattern # in cluster slot check (#1182)
This special pattern '#' is used to get the element itself,
it does not actually participate in the slot check.

In this case, passing `GET #` will cause '#' to participate
in the slot check, causing the command to get an
`pattern may be in different slots` error.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-10-19 14:56:10 +08:00
zhenwei pi
64cfdf61eb
Introduce connection context for Unix socket (#1160)
Hide 'unixsocketgroup' and 'unixsocketperm' into a Unix socket specific
data structure. A single opaque pointer 'void *priv' is enough for a
listener. Once any new config is added, we don't need 'void *priv2',
'void *priv3' and so on.

Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2024-10-18 17:48:18 -07:00
Lipeng Zhu
a62d1f177b
Fix false sharing issue between main thread and io-threads when access used_memory_thread. (#1179)
When profiling some workloads with `io-threads` enabled. We found the
false sharing issue is heavy.

This patch try to split the the elements accessed by main thread and
io-threads into different cache line by padding the elements in the head
of `used_memory_thread_padded` array. 

This design helps mitigate the false sharing between main
thread and io-threads, because the main thread has been the bottleneck
with io-threads enabled. We didn't put each element in an individual
cache line is that we don't want to bring the additional cache line
fetch operation (3 vs 16 cache line) when call function like
`zmalloc_used_memory()`.

---------

Signed-off-by: Lipeng Zhu <lipeng.zhu@intel.com>
Signed-off-by: Lipeng Zhu <zhu.lipeng@outlook.com>
Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
Co-authored-by: Wangyang Guo <wangyang.guo@intel.com>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-10-17 12:37:10 +02:00
Binbin
701ab72429
Remove the restriction that cli --cluster create requires at least 3 primary nodes (#1075)
There is no limitation in Valkey to create a cluster with 1 or 2 primaries,
only that it cannot do automatic failover. Remove this restriction and
add `are you sure` prompt to prompt the user.

This allow we use it to create a test cluster by cli or by
create-cluster.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-10-17 13:33:44 +08:00
Nadav Levanoni
136d0fd212
Add 'WithDictIndex' expiry API and update RANDOMKEY command (#1155)
https://github.com/valkey-io/valkey/issues/1145

First part of a two-step effort to add `WithSlot` API for expiry. This
PR is to fix a crash that occurs when a RANDOMKEY uses a different slot
than the cached slot of a client during a multi-exec.

The next part will be to utilize the new API as an optimization to
prevent duplicate work when calculating the slot for a key.

---------

Signed-off-by: Nadav Levanoni <nadavl@amazon.com>
Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
Co-authored-by: Nadav Levanoni <nadavl@amazon.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
2024-10-16 17:40:11 -07:00
zarkash-aws
06cfe2c254
Improved hashing algorithm in luaS_newlstr (#1168)
**Overview**

This PR introduces the use of
[MurmurHash3](https://en.wikipedia.org/wiki/MurmurHash) as the hashing
function for Lua's luaS_newlstr function, replacing the previous simple
hash function. The change aims to improve performance, particularly for
large strings.

**Changes**

Implemented MurmurHash3 algorithm in lstring.c
Updated luaS_newlstr to use MurmurHash3 for string hashing

**Performance Testing:**
Test Setup:

1. Ran a valkey server
2. Loaded 1000 keys with large values (100KB each) to the server using a
Lua script
```
local numKeys = 1000

for i = 1, numKeys do
    local key = "large_key_" .. i
    local largeValue = string.rep("x", 1024*100)
    redis.call("SET", key, largeValue)
end
```
3. Used a Lua script to randomly select and retrieve keys
```
local randomKey = redis.call("RANDOMKEY")
local result = redis.call("GET", randomKey)
```
4. Benchmarked using valkey-benchmark:
`./valkey-benchmark -n 100000 evalsha
c157a37967e69569339a39a953c046fc2ecb4258 0`

Results:

A | Unstable | This PR | Change
-- | -- | -- | --
Throughput | 6,835.74 requests per second | 17,061.94 requests per
second | **+150% increase**
Avg Latency | 7.218 ms | 2.838 ms | **-61% decrease**
Min Latency | 3.144 ms | 1.320 ms | **-58% decrease**
P50 Latency | 8.463 ms | 3.167 ms | **-63% decrease**
P95 Latency | 8.863 ms | 3.527 ms | **-60% decrease**
P99 Latency | 9.063 ms | 3.663 ms | **-60% decrease**
Max Latency | 63.871 ms | 55.327 ms | **-13% decrease**

Summary:
* Throughput: Improved by 150%.
* Latency: Significant reductions in average, minimum, and percentile
latencies (P50, P95, P99), leading to much faster response times.
* Max Latency: Slightly decreased by 13%, indicating fewer outlier
delays after the fix.

---------

Signed-off-by: Shai Zarka <zarkash@amazon.com>
Signed-off-by: zarkash-aws <zarkash@amazon.com>
Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-10-15 15:18:58 -07:00
Shivshankar
b927fb09d4
Remove 'posting in the mailing list' in CONTRIBUTING.md (#1174)
Remove reference to "the mailing list". We don't have a mailing list.
2024-10-15 23:03:27 +02:00
Amit Nagler
b0f23df165
Refactor return and goto statements (#945)
Consolidate the cleanup of local variables to a single point within the
method, ensuring proper resource management and p
reventing memory leaks or double-free issues.

Previoslly descused here:
- https://github.com/valkey-io/valkey/pull/60#discussion_r1667872633
- https://github.com/valkey-io/valkey/pull/60#discussion_r1668045666

---------

Signed-off-by: naglera <anagler123@gmail.com>
Signed-off-by: Amit Nagler <58042354+naglera@users.noreply.github.com>
Co-authored-by: Ping Xie <pingxie@outlook.com>
2024-10-15 09:26:42 -07:00
Binbin
247a8f23c5
Fix FUNCTION KILL error message being displayed as SCRIPT KILL (#1171)
The client that was killed by FUNCTION KILL received a reply of
SCRIPT KILL and the server log also showed SCRIPT KILL.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-10-15 23:32:42 +08:00
Binbin
dc05a327f9
Take hz into account in activerehashing to avoid CPU spikes (#977)
Currently in conf we describe activerehashing as: Active rehashing
uses 1 millisecond every 100 milliseconds of CPU time. This is the
case for hz = 10.

If we change hz, the description in conf will be inaccurate. Users
may notice that the server spends some CPU (used in activerehashing)
at high hz but don't know why, since our cron calls are fixed to 1ms.

This PR takes hz into account and fixed the CPU usage at 1% (this may
not be accurate in some cases because we do 100 step rehashing in
dictRehashMicroseconds but it can avoid CPU spikes in this case).

This PR also improves the description of the activerehashing
configuration item to explain this change.

Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-10-15 23:32:22 +08:00
Romain Geissler @ Amadeus
e30ae762a8
Rename z{malloc,calloc,realloc,free} into valkey_{malloc,calloc,realloc,free} (#1169)
The zcalloc symbol is a symbol name already used by zlib, which is
defining other names using the "z" prefix specific to zlib. In practice,
linking valkey with a static openssl, which itself might depend on a
static libz will result in link time error rejecting multiple symbol
definitions.

Fixes: #1157

Signed-off-by: Romain Geissler <romain.geissler@amadeus.com>
2024-10-15 13:05:22 +02:00
Binbin
416defdc0e
Minor cleanups in acl-v2 tests (#1166)
1. Make sure to assert the ERR prefix.
2. Match "Syntax error*" in case of the message change.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-10-15 10:30:03 +08:00
Binbin
87b5e13465
Use listLast to replace listIndex -1 (#1163)
Minor cleanup, listLast do the same thing and is widely used
and easier to understand (less code).

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-10-15 10:29:52 +08:00
Binbin
9c20c84251
Set fail-fast to false in daily CI (#1162)
Currently in our daily, if a job fails, it will cancel the other jobs
in the same matrix, we want to avoid this so that all jobs in a matrix
can eventually run to completion.

Docs: jobs.<job_id>.strategy.fail-fast applies to the entire matrix.
If jobs.<job_id>.strategy.fail-fast is set to true or its expression
evaluates to true, GitHub will cancel all in-progress and queued jobs
in the matrix if any job in the matrix fails. This property defaults
to true.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-10-15 10:29:34 +08:00
ranshid
36d438ba27
Deflake test ync should continue if not all slaves dropped dual-channel-replication (#1164)
Sometimes when dual-channel is turned off the tested replica might
disconnect on COB overrun. disable the replica COB limit in order to
prevent such cases.

Fixes: #1153

Signed-off-by: Ran Shidlansik <ranshid@amazon.com>
Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
2024-10-14 15:31:59 +08:00
ranshid
597aa037cc
Deflake test Primary COB growth with inactive replica (#1165)
in case of valgrind run, the replica might get disconnected from the
primary due to repl-timeout reached. Fix is to configure larger timeout
in case of valgrind test.

**Partially** fixes: #1152

Signed-off-by: Ran Shidlansik <ranshid@amazon.com>
2024-10-14 15:30:29 +08:00
Binbin
1a5c80fe90
Minor comments cleanup around replication.c (#1154)
Typo, comment cleanups.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-10-14 12:37:19 +08:00
Binbin
e50f31ef3a
Fix aof race in shutdown nosave timedout script test (#1156)
Ci report this failure:
```
*** [err]: SHUTDOWN NOSAVE can kill a timedout script anyway in tests/unit/scripting.tcl
Expected 'BUSY Valkey is busy running a script. *' to match '*connection refused*' (context: type eval line 8 cmd {assert_match {*connection refused*} $e} proc ::test)
```

We can see the logs the shutdown got rejected because there is an AOFRW
pending:
```
Writing initial AOF, can't exit.
Errors trying to shut down the server. Check the logs for more information.
```

The reason is that the previous test enabled the aof.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-10-13 22:06:28 +08:00
Masahiro Ide
262d970a50
Move prepareClientToWrite out of loop for HGETALL command (#1119)
Similar to #860 but this is for HGETALL families (HGETALL/HKEYS/HVALS).
This patch moves `prepareClientToWrite` out of the loop to reduce the
function overhead.

Signed-off-by: Masahiro Ide <imasahiro9@gmail.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
2024-10-11 21:28:42 -07:00
Shivshankar
ef971a34eb
Correct the note details for deprecated config 'io-threads-do-reads' (#1150)
Remove explicit reference to removal and just indicate to avoid using it.

Signed-off-by: Shivshankar-Reddy <shiva.sheri.github@gmail.com>
2024-10-11 21:21:09 -07:00
Binbin
014219879d
Fix typo last_procssed -> last_processed (#1142)
Minor typo.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-10-11 00:09:22 +08:00
Shivshankar
079f18ad97
Add io-threads-do-reads config to deprecated config table to have no effect. (#1138)
this fixes: https://github.com/valkey-io/valkey/issues/1116

_Issue details from #1116 by @zuiderkwast_ 

> This config is undocumented since #758. The default was changed to
"yes" and it is quite useless to set it to "no". Yet, it can happen that
some user has an old config file where it is explicitly set to "no". The
result will be bad performace, since I/O threads will not do all the
I/O.
> 
> It's indeed confusing.
> 
> 1. Either remove the whole option from the code. And thus no need for
documentation. _OR:_
> 2. Introduce the option back in the configuration, just as a comment
is fine. And showing the default value "yes": `# io-threads-do-reads
yes` with additional text.
> 
> _Originally posted by @melroy89 in [#1019 (reply in
thread)](https://github.com/orgs/valkey-io/discussions/1019#discussioncomment-10824778)_

---------

Signed-off-by: Shivshankar-Reddy <shiva.sheri.github@gmail.com>
2024-10-10 17:46:09 +02:00
Roshan Khatri
9b8a06137c
Fix empty response for ACL CAT category subcommand for module defined categories (#1140)
The module commands which were added to acl categories were getting
skipped when `ACL CAT category` command was executed.

This PR fixes the bug.
Before:
```
127.0.0.1:6379> ACL CAT foocategory
(empty array)
```
After:
```
127.0.0.1:6379> ACL CAT foocategory
aclcheck.module.command.test.add.new.aclcategories
```

---------

Signed-off-by: Roshan Khatri <rvkhatri@amazon.com>
Co-authored-by: Harkrishn Patro <bunty.hari@gmail.com>
2024-10-09 21:20:47 -07:00
kronwerk
cd8de095c4
Add flush-before-load option for repl-diskless-load (#909)
A new option for diskless replication on the replica side.

After a network failure, the replica may need to perform a full sync.
The other option for diskless full sync is `swapdb`, but it uses twice
as much memory, temporarily. In situations where this is not acceptable,
and where losing data is acceptable, the `flush-before-load` can be
useful. If the full sync fails, the old data is lost though. Therefore,
the new option is marked as "dangerous".

---------

Signed-off-by: kronwerk <ca11e5e22g@gmail.com>
Signed-off-by: kronwerk <kronwerk@users.noreply.github.com>
Co-authored-by: kronwerk <ca11e5e22g@gmail.com>
2024-10-09 13:11:53 +02:00
Binbin
1892f8a731
Add server log when module load fails with busy name (#1084)
Currently when module loading fails due to busy name, we
don't have a clean way to assist to troubleshooting.

Case 1: when loading the same module multiple times, we can
not detemine the cause of its failure without referring to
the module list or the earliest module load log. The log
may not exist and sometimes it is difficult for people
to associate module list.

Case 2: when multiple modules use the same module name,
we can not quickly associate the busy name without referring
to the module list and the earliest module load log.
Different people wrote modules with the same module name,
they don't easily associate module name.

So in this PR, when doing module onload, we will try to
print a busy name log if this happen. Currently we check
ctx.module since if it is NULL it means the Init call
failed, and Init currently only fails with busy name.

It's kind of ugly. It would have been nice if we could have had a
better way for onload to signal why the load failed.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-10-09 16:10:29 +08:00
chx9
cba8eaf4c9
fix typo (#1136)
Signed-off-by: chx9 <cheng.huan@icloud.com>
2024-10-08 08:07:51 -07:00
Madelyn Olson
e617bf2ddc
Removing incorrect comment about a warning (#1132)
There is a lot of bad legacy usage of `default:` with enums, which is an
anti-pattern. If you omit the default, the compiler will tell you if a
new enum value was added and that it is missing from a switch statement.

Someone mentioned on another PR they used `default:` because of this
warning, so just removing it, but might create an issue to do a wider
cleanup.

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-10-07 11:56:15 -07:00
Masahiro Ide
b5eb793079
Eliminate hashTypeIterator memory allocation by assigning it on stack (#1105)
Signed-off-by: Masahiro Ide <masahiro.ide@lycorp.co.jp>
Signed-off-by: Masahiro Ide <imasahiro9@gmail.com>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
Co-authored-by: Masahiro Ide <masahiro.ide@lycorp.co.jp>
2024-10-06 21:34:45 +02:00
otheng
a1cc7c263a
Reuse obey_client variable in processCommand() function (#1101)
I’ve prepared a minor fix for `processCommand()` function. 

In `processCommand()`, the `obey_client` variable is created, but some
conditional statements call the `mustObeyClient()` function instead of
reusing `obey_client`.

I’ve modified these statements to `reuse obey_client`.

Since I’m relatively new to Redis, please let me know if there are any
reasons why the conditional statements need to call `mustObeyClient()`
again.

Thank you for taking the time to review my PR.

Signed-off-by: otheng03 <07c00h@gmail.com>
2024-10-06 10:40:58 -07:00
Viktor Söderqvist
00c97979d9
Make ./runtest --dump-logs dump logs on crash (#1117)
Until now, this flag only dumped logs on a failed assert in test case.
It is useful that this flag dumps logs on a crash as well.

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-10-06 10:40:36 -07:00
Shivshankar
0c49053214
Adding the "-j" option in ci make commands to parallelize CI builds (#1128)
fixes: https://github.com/valkey-io/valkey/issues/1123 

As per github documentation below is core information on runners.

**Linux:**
public repositories: 4 cores 
private repositories: 2 cores

**Macos:**
its 3 or 4 based on both and its depends on the Processor.

**Reference details for more information:** Discussion in
https://github.com/valkey-io/valkey/issues/1123

- Public repo:
https://docs.github.com/en/actions/using-github-hosted-runners/using-github-hosted-runners/about-github-hosted-runners#standard-github-hosted-runners-for-public-repositories

- Private repo:
https://docs.github.com/en/actions/using-github-hosted-runners/using-github-hosted-runners/about-github-hosted-runners#standard-github-hosted-runners-for--private-repositories

Suggested-by: zhenwei pi <pizhenwei@bytedance.com>
Signed-off-by: Shivshankar-Reddy <shiva.sheri.github@gmail.com>
2024-10-05 21:12:07 -07:00
zhenwei pi
b96f8813b7
Add tags into .gitignore (#1125)
ctags is used widely on a linux platform, add tags into .gitignore.

Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2024-10-05 10:03:57 +02:00
zhenwei pi
23ae21244e
RDMA: use protected mode for test (#1124)
Since a7cbca40661 ("RDMA: Support .is_local method (#1089)"),
valkey-server started to support auto-detect local connection, then we
can use protected mode for local RDMA device for test.

Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2024-10-04 23:22:48 +02:00
Shivshankar
c8aaceed46
Correct the typo in valkey.conf file (#1118)
Correct the typo in valkey.conf file

Signed-off-by: Shivshankar-Reddy <shiva.sheri.github@gmail.com>
2024-10-04 13:30:59 -07:00
Parth
d8cd3527bf
Removing Redis from internal lua function names and comments (#1102)
Improved documentation and readability of lua code as well as removed references to Redis.

---------

Signed-off-by: Parth Patel <661497+parthpatel@users.noreply.github.com>
2024-10-04 12:58:42 -07:00
Shivshankar
1c22680fa7
Include second solo test execution in total test count (#1071)
This change counts both solo test executions to give an accurate total number of tests being run.

---------

Signed-off-by: Shivshankar-Reddy <shiva.sheri.github@gmail.com>
2024-10-04 10:19:44 -07:00
Ricardo Dias
6a8540cefe
Fix some unitialized fields in client struct (#1126)
This commit adds initialization code for the fields
`io_last_reply_block` and `io_last_bufpos` of the `client` struct.

While in the current code flow, these fields are only accessed after
being written in the `trySendWriteToIOThreads`, I discovered that they
were not being initialized while doing some changes to the code flow of
IO threads.

I believe it's good pratice to initialize all fields of a struct upon
creation, and will avoid future bugs which are usually hard to debug.

Signed-off-by: Ricardo Dias <ricardo.dias@percona.com>
2024-10-04 09:17:49 -07:00
Viktor Söderqvist
dcac3e1499
Fix undefined-santitizer warning in rax test (#1122)
Fix the warning introduced in #688:

```
unit/test_rax.c:168:15: runtime error: left shift of 36625 by 16 places cannot be represented in type 'int'
SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior unit/test_rax.c:168:15 in 
Fuzz test in mode 1 [7504]: 
```

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-10-03 17:34:03 +02:00
Madelyn Olson
150c197bdd
Apply CVE patches for CVE-2024-31449, CVE-2024-31227, CVE-2024-31228 (#1115)
Applying the CVEs against mainline.

(CVE-2024-31449) Lua library commands may lead to stack overflow and
potential RCE.
(CVE-2024-31227) Potential Denial-of-service due to malformed ACL
selectors.
(CVE-2024-31228) Potential Denial-of-service due to unbounded pattern
matching.

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-10-02 19:22:09 -04:00
Melroy van den Berg
b77440a9b9
Build binary releases with systemd support (#1107)
- Add systemd support to the build artifact tarballs, so people can use
it under systemd compatible distros. As discussed here:
https://github.com/orgs/valkey-io/discussions/1103#discussioncomment-10815549.
Adding `libsystemd-dev` to install and add `USE_SYSTEMD=yes` to the
build.
- Cleanup & bring the arm & x86 workflow files in-sync. It was a bit of
a mess ;) (removing `jq wget awscli` from the 'Tarball' step)

Signed-off-by: Melroy van den Berg <melroy@melroy.org>
2024-10-02 19:48:54 +02:00
Melroy van den Berg
43c80a2860
Avoid .c, .d and .o files from being copied to the binary tar.gz releases (#1106)
As discussed here:
https://github.com/orgs/valkey-io/discussions/1103#discussioncomment-10814006

`cp` can't be used anymore, `rsync` is more powerful and allow to
exclude files.

Alternatively:

1. Remove the c, d and o files. Which isn't ideal either.
2. Improve the build. Eg. by building inside a `build` directory instead
of in the src folder.

Ps. I know these workflows aren't trigger in this PR. Only via "Build
Release Packages" workflow action:
https://github.com/valkey-io/valkey/actions/workflows/build-release-packages.yml..
So I can't fully test in this PR. But it should work ^^

Ps. ps. I did test `rsync -av --exclude='*.c' --exclude='*.d'
--exclude='*.o' src/valkey-*` command in isolation and that works as
expected!

---------

Signed-off-by: Melroy van den Berg <melroy@melroy.org>
2024-10-02 19:43:34 +02:00
Guillaume Koenig
f85d8bfde9
Rax size tracking (#688)
Introduce a `size_t` field into the rax struct to track allocation size.
Update the allocation size on rax insert and deletes.
Return the allocation size when `raxAllocSize` is called.

This size tracking is now used in MEMORY USAGE and MEMORY STATS in place
of the previous method based on sampling.

The module API allows to create sorted dictionaries, which are backed by
rax. Users now also get precise memory allocation for them (through
`ValkeyModule_MallocSizeDict`).

Fixes #677.

For the release notes:

* MEMORY USAGE and MEMORY STATS are now exact for streams, rather than
based on sampling.

---------

Signed-off-by: Guillaume Koenig <knggk@amazon.com>
Signed-off-by: Guillaume Koenig <106696198+knggk@users.noreply.github.com>
Co-authored-by: Joey <yzhaon@amazon.com>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-10-02 19:28:55 +02:00
Binbin
9827eef4d0
Avoid timing issue in diskless-load-swapdb test (#1077)
Since we paused the primary node earlier, the replica may enter
cluster down due to primary node pfail. Here set allow read to
prevent subsequent read errors.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-10-01 13:14:30 -07:00
Wen Hui
613e4e028f
Update keyspace notifications link to valkey.io in code comment (#1100)
As title description


![image](https://github.com/user-attachments/assets/655324e6-b042-4c2f-b558-b912a7d2c10c)

Signed-off-by: hwware <wen.hui.ware@gmail.com>
2024-10-01 04:30:35 -04:00
Masahiro Ide
ac569c09f8
Create empty lua tables with specified initial capacity as much as possible (#1092)
Currently, we create a Lua table without initial capacity even when the
capacity is known. As a result, we need to resize the Lua tables
repeatedly when converting RESP serialized object to Lua object and it
consumes extra cpu resources a bit when we need to transfer
RESP-serialized data to Lua world.

This patch try to remove this extra resize to reduce (re-)allocation
overhead.

| name | unstable bb57dfe6303 (rps) | this patch(rps) | improvements |
| --------------- | -------- | --------- | -------------- |
| evalsha - hgetall h1 | 60565.68 | 64487.01 |  6.47% |
| evalsha - hgetall h10 | 47023.41 | 50602.17 | 7.61% |
| evalsha - hgetall h25 | 33572.82 | 37345.48 | 11.23% |
| evalsha - hgetall h50 | 24206.63 | 25276.14 | 4.42% |
| evalsha - hgetall h100 | 15068.87 | 15656.8 | 3.90% |
| evalsha - hgetall h300 | 5948.56 | 6094.74 | 2.46% |

Signed-off-by: Masahiro Ide <masahiro.ide@lycorp.co.jp>
Co-authored-by: Masahiro Ide <masahiro.ide@lycorp.co.jp>
2024-09-30 20:59:22 -07:00
Viktor Söderqvist
69eddb4874
Speed up AOF rewrite test case (#1093)
These two test cases run in a loop:

* AOF rewrite during write load: RDB preamble=yes
* AOF rewrite during write load: RDB preamble=no

Both of the test cases build up a lot of data (3-4 million keys when I
run locally) so we should empty the data before the second test case.
Otherwise, the second test cases adds keys on top of the keys added in
the first test case, resulting in the double number of keys and takes
more time.

Before this commit:

    [ok]: AOF rewrite during write load: RDB preamble=yes (18225 ms)
    [ok]: AOF rewrite during write load: RDB preamble=no (37249 ms)

After:

    [ok]: AOF rewrite during write load: RDB preamble=yes (18777 ms)
    [ok]: AOF rewrite during write load: RDB preamble=no (19940 ms)

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-09-30 19:55:23 +02:00
ranshid
c873287d16
avoid double close on replica main channel (#1097)
fixes #1088

Signed-off-by: Ran Shidlansik <ranshid@amazon.com>
2024-09-30 07:41:05 -07:00
zhenwei pi
a7cbca4066
RDMA: Support .is_local method (#1089)
There is no ethernet style virtual device (like lo 127.0.0.1) for RDMA,
however a connection with the same local address and peer address are
considered as local.

Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2024-09-30 11:54:05 +02:00
chx9
bb57dfe630
Fix typo in test_helper.tcl (#1080)
Fix typo in test_helper.tcl: even driven => event driven

Signed-off-by: chx9 <cheng.huan@icloud.com>
2024-09-28 11:48:35 +08:00
Shivshankar
a37dee4b3a
Change return value of aeTimeProc callback function to long long. (#1057)
moduleTimerHandler is aeTimeProc handler and event loop gets created
with this. However, found that the function return type is int but
actually returns "long long" value(i.e., next_period). and return value
being assigned to int variable in processTimeEvents(where time events
are processed), this might cause an overflow of the timer values. So
changed the return type of the function to long long. And also updated
other callback function return type to be consistent.

I found this when I was checking functions reported in
https://github.com/valkey-io/valkey/issues/1054 issue stacktrace. (FYI,
this is just to update the return type to be consistent and it will not
the fix for the issue reported)

Signed-off-by: Shivshankar-Reddy <shiva.sheri.github@gmail.com>
2024-09-27 12:20:47 -07:00
Binbin
bf8183d065
Add --cluster option to runtest to run only cluster tests (#1052)
Currently cluster tests in unit/cluster are run as part of
the ./runtest. Sometims we change the cluster code and only
want to run cluster tests. This PR added a --cluster option
to runtest so that we can run only cluster tests.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-09-26 10:31:57 +08:00
zhenwei pi
983bb5110d
Fix RDMA build dependence (#1074)
RDMA module has dependence on '$(SERVER_NAME)' rather than the old style
'$(REDIS_SERVER_NAME)'.

Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2024-09-25 11:30:45 +02:00
Viktor Söderqvist
99865b197c
Fix bug for CLUSTER SLOTS from EVAL over TLS (#1072)
For fake clients like the ones used for Lua and modules, we don't
determine TLS in the right way, causing CLUSTER SLOTS from EVAL over TLS
to fail a debug-assert.

This error was introduced when the caching of CLUSTER SLOTS was
introduced, i.e. in 8.0.0.

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-09-25 03:55:53 -04:00
Mikhail Koviazin
6b3a90e40e
Added new reformat commit to .git-blame-ignore-revs (#1073)
Signed-off-by: Mikhail Koviazin <mikhail.koviazin@aiven.io>
Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
2024-09-25 15:34:36 +08:00
Binbin
80fcbd3fec
Fix module / script call CLUSTER SLOTS / SHARDS fake client check crash (#1063)
The reason is VM_Call will use a fake client without connection,
so we also need to check if c->conn is NULL.

This also affects scripts. If they are called in the script, the
server will crash. Injecting commands into AOF will also cause
startup failure.

Fixes #1054.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-09-25 14:50:48 +08:00
Binbin
6e0216471d
Trigger the election as soon as possible when doing a forced manual failover (#1067)
In CLUSTER FAILOVER FORCE case, we will set mf_can_start to
1 and wait for a cron to trigger the election. We can also set a
CLUSTER_TODO_HANDLE_MANUALFAILOVER flag so that we
can start the election as soon as possible instead of waiting for
the cron, so that we won't have a 100ms delay (clusterCron).

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-09-25 12:08:48 +08:00
Mikhail Koviazin
af811748e7
clang-format: set ColumnLimit to 0 and reformat (#1045)
This commit hopefully improves the formatting of the codebase by setting
ColumnLimit to 0 and hence stopping clang-format from trying to put as
much stuff in one line as possible.

This change enabled us to remove most of `clang-format off` directives
and fixed a bunch of lines that looked like this:

```c
#define KEY \
    VALUE /* comment */
```

Additionally, one pair of `clang-format off` / `clang-format on` had
`clang-format off` as the second comment and hence didn't enable the
formatting for the rest of the file. This commit addresses this issue as
well.

Please tell me if anything in the changes seem off. If everything is
fine, I will add this commit to `.git-blame-ignore-revs` later.

---------

Signed-off-by: Mikhail Koviazin <mikhail.koviazin@aiven.io>
2024-09-25 01:22:54 +02:00
Binbin
6ce75cdea8
Fix replica online timing issue in failover test (#1044)
Ci reported this failure:
```
[exception]: Executing test client: ERR FAILOVER target replica is not online..
ERR FAILOVER target replica is not online.
    while executing
"$node_0 failover to $node_1_host $node_1_port"
```

We can see somehow the replica is not online in time and
casuing this failure, added a verify_replica_online to make
sure the replica is online for the test.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-09-23 17:35:02 +08:00
Ricardo Dias
c15eee3407
Changes tcmalloc.h header location (#1039)
This commit changes the `tcmalloc.h` header location from the deprecated
location `google/` to `gperftools/`.

**Why we're doing this now?**

The location `google/tcmalloc.h` has been deprecated for more than 10
years in favor of `gperftools/tcmalloc.h`, and the deprecated location
will be removed in the next release of gperftools.

Fixes #1033

Signed-off-by: Ricardo Dias <ricardo.dias@percona.com>
2024-09-23 10:23:48 +02:00
Binbin
56fba564b6
Print an empty primary log when primary lost its last slot (#1064)
The one in CLUSTER SETSLOT help us keep track of state better,
of course it also can make the test case happy.

The one in gossip process fixes a problem that a replica can
print a log saying it is an empty primary.

Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Ping Xie <pingxie@outlook.com>
2024-09-23 13:14:09 +08:00
Binbin
d07c29791a
Use _Thread_local to solve threads.h build issue (#1053)
Apparently this will fail to compile in some masOS version.
And internet claims _Thread_local is portable.

Fixes #1051.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-09-22 20:20:55 +08:00
Shivshankar
56c90b78e3
Fix a typo in the valkey.conf (#1048)
Signed-off-by: Shivshankar-Reddy <shiva.sheri.github@gmail.com>
2024-09-21 21:22:39 +08:00
Binbin
ea7a7995ed
Fix default value of primary-reboot-down-after-period in sentinel.conf (#1040)
Since in here the monitor value is mymaster, we need to make sure the
primary name is the same, otherwise the default configuration cannot start
sentinel.
```
sentinel monitor mymaster 127.0.0.1 6379 2
```

The following error occurs when the default configuration is started:
```
*** FATAL CONFIG FILE ERROR (Version 255.255.255) ***
Reading the configuration file, at line 358
>>> 'SENTINEL primary-reboot-down-after-period myprimary 0'
No such master with specified name.
```

Introduced in #647.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-09-21 21:09:13 +08:00
Binbin
d9c41e9ef9
Fix timing issue in the new tot-net-out replica test (#1060)
Apparently there is a timing issue when using wait_for_ofs_sync:
```
[exception]: Executing test client: can't read "out_before": no such variable.
can't read "out_before": no such variable
```

The reason is that if the connection between the primary
and the replica is not established yet, the master_repl_offset
of the primary and replica in wait_for_ofs_sync is 0, and
the check fails, resulting in no replica client in the
client list below.

In this case, we need to make sure the replica is online
before proceeding.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-09-20 14:25:05 +08:00
Binbin
7fab15795f
Add log about old primary after myself failover (#1058)
Sometims it is hard to see the old primary during a
multi primaries failover, adding this log can help
use to find the old primary node.

Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Ping Xie <pingxie@outlook.com>
2024-09-20 14:15:19 +08:00
Shivshankar
56fd97733b
Move printver test to info-command file (#1056)
This fixes: #219

Signed-off-by: Shivshankar-Reddy <shiva.sheri.github@gmail.com>
2024-09-20 10:18:19 +08:00
ranshid
4593dc2f05
Fix memory allocation for server databases (#1046)
Fix a bug in the way we allocate memory for the server databases
Introduced in #156.

Signed-off-by: Ran Shidlansik <ranshid@amazon.com>
2024-09-18 19:35:35 +08:00
Shivshankar
ba71c7e56e
Copy 'errno' and use copied value in the if check of retry in cluster migrate commands socket_err block. (#1042)
errno is global variable and shared with system calls, so there is
chance it may be overwritten during io free or close socket in migrate
command code. It would be better it is copied before the free or
closesocket and use copied value to check for retry in socket_err block.
So added new variable to take copy and used the copy variable for the
check.

Signed-off-by: Shivshankar-Reddy <shiva.sheri.github@gmail.com>
2024-09-18 10:34:11 +08:00
Josef Šimánek
ff69b4be1d
Fix casing in README.md (#1043)
TO -> To

Signed-off-by: Josef Šimánek <josef.simanek@gmail.com>
2024-09-18 10:32:40 +08:00
Binbin
f89ff3137d
Add --moduleapi option to better use runtest-moduleapi (#1007)
This allows us to avoid error #1002 and enables us to actually
use `./runtest-moduleapi --single xxx`.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-09-17 19:50:38 +08:00
Shivshankar
9f8185f5c8
Update valkey-benchmark log output to reference 'server' instead of 'Redis' (#1029)
Replaced "Could not connect to Redis" with "Could not connect to server" in the log
output for connection errors in `getRedisContext` and `createClient`.

Signed-off-by: Shivshankar-Reddy <shiva.sheri.github@gmail.com>
2024-09-13 21:43:20 -07:00
Binbin
17390383b5
Replica flush the old data after RDB file is ok in disk-based replication (#926)
Call emptyData right before rdbLoad to prevent errors in the middle
and we drop the replication stream and leaving an empty database.
The real changes is in disk-based part, the rest is just code movement.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-09-14 11:49:49 +08:00
Ping Xie
09def3cf03
Improve code readability in dict.c (#943)
This pull request improves code readability, as a follow up of #749.

- Internal Naming Conventions: Removed the use of underscores (_) for
internal static structures/functions.

- Descriptive Function Names: Updated function names to be more
descriptive, making their purpose clearer. For instance, `_dictExpand`
is renamed to `dictExpandIfAutoResizeAllowed`.

---------

Signed-off-by: Ping Xie <pingxie@google.com>
2024-09-13 17:21:20 -07:00
Binbin
dcc7678fc4
Fix replica unable trigger migration when it received CLUSTER SETSLOT in advance (#981)
Fix timing issue in evaluating `cluster-allow-replica-migration` for replicas

There is a timing bug where the primary and replica have different 
`cluster-allow-replica-migration` settings. In issue #970, we found that if 
the replica receives `CLUSTER SETSLOT` before the gossip update, it remains 
in the original shard. This happens because we only process the 
`cluster-allow-replica-migration` flag for primaries during `CLUSTER SETSLOT`.

This commit fixes the issue by also evaluating this flag for replicas in the 
`CLUSTER SETSLOT` path, ensuring correct replica migration behavior.

Closes #970
---------

Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Ping Xie <pingxie@outlook.com>
2024-09-13 15:32:20 -07:00
Wen Hui
d090fbefde
Add the missing help output for new command: client capa redirect (#1025)
Update client help output message for new command: client capa redirect

---------

Signed-off-by: hwware <wen.hui.ware@gmail.com>
Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
2024-09-13 09:22:21 -07:00
Ping Xie
3cc619f637
Disable flaky empty shard slot migration tests (#1027)
Will continue my investigation offline

Signed-off-by: Ping Xie <pingxie@google.com>
2024-09-13 00:02:39 -07:00
Binbin
f7c5b40183
Avoid false positive in election tests (#984)
The node may not be able to initiate an election in time due to
problems with cluster communication. If an election is initiated,
make sure its offset is 0.

Closes #967.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-09-13 14:53:39 +08:00
Binbin
38457b7320
Trigger a save of the cluster configuration file before shutting down (#822)
The cluster configuration file is the metadata "database" for the
cluster. It is best to trigger a save when shutdown the server, to
avoid inconsistent content that is not refreshed.

We save the nodes.conf whenever something that affects the nodes.conf
has changed. But we are saving nodes.conf in clusterBeforeSleep, and
some events may save it without a fsync, there is a time gap.

And shutdown has its own save seems good to me, it doesn't need to
care about the others.

At the same time, a comment is added in unlock nodes.conf to explain
why we actively unlock when shutdown.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-09-12 15:43:12 +08:00
Ping Xie
76a59788e6
Re-enable empty-shard slot migration tests (#1024)
Related to #734 and #858

Signed-off-by: Ping Xie <pingxie@google.com>
2024-09-11 23:19:32 -07:00
xu0o0
3513f22027
Make clang-format insert a newline at end of file if missing (#1023)
clang generates warning if there is no newline at the end of the source
file.

Update .clang-format to handle the missing newline at eof.

Signed-off-by: haoqixu <hq.xu0o0@gmail.com>
2024-09-11 22:33:07 -07:00
uriyage
8cca11ac54
Fix wrong count for replica's tot-net-out (#1013)
Fix duplicate calculation of replica's `net_output_bytes`

- Remove redundant calculation leftover from previous refactor
- Add test to prevent regression

Signed-off-by: Uri Yagelnik <uriy@amazon.com>
Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
2024-09-12 10:36:40 +08:00
Madelyn Olson
fa348e2e59
Optimize the per slot dictionary by checking for cluster mode earlier (#995)
While doing some profiling, I noticed that getKeySlot() was a fairly
large part (~0.7%) of samples doing perf with high pipeline during
standalone. I think this is because we do a very late check for
server.cluster_mode, we first call getKeySlot() and then call
calculateKeySlot(). (calculateKeySlot was surprisingly not automatically
inlined, we were doing a jump into it and then immediately returning
zero). We then also do useless work in the form of caching zero in
client->slot, which will further mess with cache lines.

So, this PR tries to accomplish a few things things.
1) The usage of the `slot` name made a lot more sense before the
introduction of the kvstore. Now with kvstore, we call this the database
index, so all the references to slot in standalone are no longer really
accurate.
2) Pull the cluster mode check all the way out of getKeySlot(), so
hopefully a bit more performant.
3) Remove calculateKeySlot() as independent from getKeySlot().
calculateKeySlot used to have 3 call sites outside of db.c, which
warranted it's own function. It's now only called in two places,
pubsub.c and networking.c.

I ran some profiling, and saw about ~0.3% improvement, but don't really
trust it because you'll see a much higher (~2%) variance in test runs
just by how the branch predictions will get changed with a new memory
layout. Running perf again showed no samples in getKeySlot() and a
reduction in samples in lookupKey(), so maybe this will help a little
bit.

---------

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-09-11 09:53:42 -07:00
Madelyn Olson
2b207ee1b3
Improve stability of hostnames test (#1016)
Maybe partially resolves https://github.com/valkey-io/valkey/issues/952.

The hostnames test relies on an assumption that node zero and node six
don't communicate with each other to test a bunch of behavior in the
handshake stake. This was done by previously dropping all meet packets,
however it seems like there was some case where node zero was sending a
single pong message to node 6, which was partially initializing the
state.

I couldn't track down why this happened, but I adjusted the test to
simply pause node zero which also correctly emulates the state we want
to be in since we're just testing state on node 6, and removes the
chance of errant messages. The test was failing about 5% of the time
locally, and I wasn't able to reproduce a failure with this new
configuration.

---------

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-09-11 09:52:34 -07:00
Mikhail Koviazin
c77e8f223c
Added .git-blame-ignore-revs (#1010)
This file enables developers to ignore the certain revisions in
git-blame. This is quite handy considering there was a commit that
reformatted the large amount of code in valkey.

As a downside, one has to do a manual step for each clone of valkey to
enable this feature. The instructions are available in the file itself.

---------

Signed-off-by: Mikhail Koviazin <mikhail.koviazin@aiven.io>
2024-09-10 22:50:35 -07:00
Binbin
4033c99ef5
Fix module RdbLoad wrongly disable the AOF (#1001)
In RdbLoad, we disable AOF before emptyData and rdbLoad to prevent copy-on-write issues. After rdbLoad completes, AOF should be re-enabled, but the code incorrectly checks server.aof_state, which has been reset to AOF_OFF in stopAppendOnly. This leads to AOF not being re-enabled after being disabled.
---------

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-09-10 21:00:08 -07:00
Amit Nagler
1b24168450
Dual Channel Replication - Verify Replica Local Buffer Limit Configuration (#989)
Prior to comparing the replica buffer against the configured limit, we
need to ensure that the limit configuration is enabled. If the limit is
set to zero, it indicates that there is no limit, and we should skip the
buffer limit check.

---------

Signed-off-by: naglera <anagler123@gmail.com>
2024-09-10 17:26:28 -07:00
Lipeng Zhu
58fe9c0138
Use hashtable as the default type of temp set object during sunion/sdiff (#996)
This patch try to set the temp set object as default hash table type.
And did a simple predication of the temp set object encoding when
initialize `dstset` to reduce the unnecessary conversation.

## Issue Description

According to existing code logic, when did operation like `sunion` and
`sdiff` , the temp set object could be `intset`, `listpack` and
`hashtable`, for the `listpack`, the efficiency is low when did
operation like `find` and `compare` , need to traverse all elements.
When we exploring the hotspots, found the `lpFind` and `memcmp` has been
the bottleneck when running workloads like below:

-
[memtier_benchmark-2keys-set-10-100-elements-sunion.yml](https://github.com/redis/redis-benchmarks-specification/blob/main/redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-elements-sunion.yml)
-
[memtier_benchmark-2keys-set-10-100-elements-sdiff.yml](https://github.com/redis/redis-benchmarks-specification/blob/main/redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-elements-sdiff.yml)


![image](https://github.com/user-attachments/assets/71dfc70b-2ad5-4832-a338-712deefca20e)

## Optimization 

This patch try to set the temp set object as default hash table type.
And did a simple predication of the temp set object encoding when
initialize `dstset` to reduce the unnecessary conversation.

### Test Environment

- OPERATING SYSTEM: Ubuntu 22.04.4 LTS
- Kernel: 5.15.0-116-generic
- PROCESSOR: Intel Xeon Platinum 8380
- Server and Client in same socket.

#### Server Configuration
```
taskset -c 0-3 ~/valkey/src/valkey-server /tmp/valkey.conf

port 9001
bind * -::*
daemonize no
protected-mode no
save ""
```

#### Performance Boost 

| Test Name| Perf Boost|
|-|-|

|[memtier_benchmark-2keys-set-10-100-elements-sunion.yml](https://github.com/redis/redis-benchmarks-specification/blob/main/redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-elements-sunion.yml)
|41%|

|[memtier_benchmark-2keys-set-10-100-elements-sdiff.yml](https://github.com/redis/redis-benchmarks-specification/blob/main/redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-elements-sdiff.yml)
|27%|


### More Tests
With above test set which have total 110 elements in the 2 given sets.
We also did some benchmarking by adjusting the total number of elements
in all given sets. We can still observe the performance boost.


![image](https://github.com/user-attachments/assets/b2ab420c-43e5-45de-9715-7d943df229cb)

---------

Signed-off-by: Lipeng Zhu <lipeng.zhu@intel.com>
Co-authored-by: Wangyang Guo <wangyang.guo@intel.com>
2024-09-10 22:09:18 +02:00
uriyage
9f0c80187e
Fix crash in async IO threads with TLS (#1011)
Fix for https://github.com/valkey-io/valkey/issues/997

Root Cause Analysis:
1. Two different jobs (READ and WRITE) may be sent to the same IO
thread.
2. When processing the read job in `processIOThreadsReadDone`, the IO
thread may find that the write job has also been completed.
3. In this case, the IO thread calls `processClientIOWriteDone` to first
process the completed write job and free the COBs
affbea5dc1/src/networking.c (L4666)
4. If there are pending writes (resulting from pipeline commands), a new
async IO write job is sent before processing the completed read job
affbea5dc1/src/networking.c (L2417)
When sending the write job, the `TLS_CONN_FLAG_POSTPONE_UPDATE_STATE`
flag is set to prevent the IO thread from updating the event loop, which
is not thread-safe.
5. Upon resuming the read job processing, the flag is cleared,
affbea5dc1/src/networking.c (L4685)
causing the IO thread to update the event loop.

Fix:
Prevent sending async write job for pending writes when a read job is
about to be processed.

Testing:
The issue could not be reproduced due to its rare occurrence, which
requires multiple specific conditions to align simultaneously.

Signed-off-by: Uri Yagelnik <uriy@amazon.com>
2024-09-10 11:20:10 -07:00
bentotten
affbea5dc1
For MEETs, save the extensions support flag immediately during MEET processing (#778)
For backwards compatibility reasons, a node will wait until it receives
a cluster message with the extensions flag before sending its own
extensions. This leads to a delay in shard ID propagation that can
corrupt nodes.conf with inaccurate shard IDs if a node is restarted
before this can stabilize.

This fixes much of that delay by immediately triggering the
extensions-supported flag during the MEET processing and attaching the
node to the link, allowing the PONG reply to contain OSS extensions.

Partially fixes #774

---------

Signed-off-by: Ben Totten <btotten@amazon.com>
Co-authored-by: Ben Totten <btotten@amazon.com>
2024-09-09 20:46:02 -07:00
Binbin
50c1fe59f7
Add missing moduleapi getchannels test and fix tests (#1002)
Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-09-10 10:13:54 +08:00
zhaozhao.zz
f504cf233b
add assertion for kvstore's dictType (#1004)
Signed-off-by: zhaozhao.zz <zhaozhao.zz@alibaba-inc.com>
2024-09-09 12:13:18 -07:00
xu0o0
20d583f774
Migrate dict.c unit tests to new framework (#946)
This PR migrates the tests related to dict into new test framework as
part of #428.

Signed-off-by: haoqixu <hq.xu0o0@gmail.com>
Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
2024-09-09 13:03:15 +08:00
xu0o0
14016d2df7
Migrate listpack.c unit tests to new framework (#949)
This PR migrates the tests related to listpack into new test framework
as part of #428.

Signed-off-by: haoqixu <hq.xu0o0@gmail.com>
Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
2024-09-09 13:01:25 +08:00
Binbin
c642cf0134
Add client info to SHUTDOWN / CLUSTER FAILOVER logs (#875)
Print the full client info by using catClientInfoString, the
info is useful when we want to identify the source of request.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-09-08 16:26:56 +08:00
Binbin
6478526597
Fix aof base suffix when modifying aof-use-rdb-preamble during rewrite (#886)
If we modify aof-use-rdb-preamble in the middle of rewrite,
we may get a wrong aof base suffix. This is because the suffix
is concatenated by the main process afterwards, and it may be
different from the beginning.

We cache this value when we start the rewrite.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-09-07 23:27:59 +08:00
Binbin
9b51949abe
Fix missing replication link re-connection when primary's IP/port is updated in clusterProcessGossipSection (#965)
`clusterProcessGossipSection` currently doesn't trigger a check and call `replicationSetPrimary` when `myself`'s primary node’s IP/port is updated. This fix ensures that after every node address update, `replicationSetPrimary` is called if the updated node is `myself`'s primary. This prevents missed updates and ensures that replicas reconnect properly to maintain their replication link with the primary.
2024-09-05 22:19:50 -07:00
Binbin
9033734b6b
Add newline to argv in crash report when doing redact (#993)
Minor cleanup, introduced in #877.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-09-05 11:13:29 +08:00
Kyle Kim (kimkyle@)
2d1eca577e
Add SLOT-STATS under CLUSTER HELP string. (#988)
Add help wording for cluster SLOT-STATS.

Signed-off-by: Kyle Kim <kimkyle@amazon.com>
2024-09-03 12:59:06 -07:00
Viktor Söderqvist
ea58fbf40d
Rewrite lazyfree docs in valkey.conf to reflect that lazy is now default (#983)
Before this doc update, the comments in valkey.conf said that DEL is a
blocking command, and even refered to other synchronous freeing as "in a
blocking way, like if DEL was called". This has now become confusing and
incorrect, since DEL is now non-blocking by default.

The comments also mentioned too much about the "old default" and only
later explain that the "new default" is non-blocking.

This doc update focuses on the current default and expresses it like
"Starting from Valkey 8.0, lazy freeing is enabled by default", rather
than using words like old and new.

This is a follow-up to #913.

---------

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-09-03 10:47:23 +02:00
NAM UK KIM
f143ffd2a5
Fix typo in valkey-cli.c (#979)
Change from replicsa to replicas in valkey-cli.c

Signed-off-by: NAM UK KIM <namuk2004@naver.com>
2024-09-03 14:58:09 +08:00
Ping Xie
981f977abf
Improve type safety and refactor dict entry handling (#749)
This pull request introduces several changes to improve the type safety
of Valkey's dictionary implementation:

- Getter/Setter Macros: Implemented macros `DICT_SET_VALUE` and
`DICT_GET_VALUE` to centralize type casting within these macros. This
change emulates the behavior of C++ templates in C, limiting type
casting to specific low-level operations and preventing it from being
spread across the codebase.

- Reduced Assert Overhead: Removed unnecessary asserts from critical hot
paths in the dictionary implementation.

- Consistent Naming: Standardized the naming of dictionary entry types.
For example, all dictionary entry types start their names with
`dictEntry`.


Fix #737

---------

Signed-off-by: Ping Xie <pingxie@google.com>
Signed-off-by: Ping Xie <pingxie@outlook.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
2024-09-02 18:28:15 -07:00
Madelyn Olson
3e14516d86
Initialize all the fields for the test kvstore (#982)
Follow up to https://github.com/valkey-io/valkey/pull/966, which didn't
update the kvstore tests. I'm not actually entirely clear why it fixes
it, but the consistency prevents the crash very reliably so will merge
it now and maybe see if Zhao has a better explanation.

---------

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-09-02 11:01:59 -07:00
Amit Nagler
5fdb47c2e2
Add configuration hide-user-data-from-log to hide user data from server logs (#877)
Implement data masking for user data in server logs and diagnostic output. This change prevents potential exposure of confidential information, such as PII, and enhances privacy protection. It masks all command arguments, client names, and client usernames.

Added a new hide-user-data-from-log configuration item, default yes.

---------

Signed-off-by: Amit Nagler <anagler123@gmail.com>
2024-09-02 09:50:36 -07:00
Binbin
5693fe4664
Fix set expire test due to the new lazyfree configs changes (#980)
Test failed because these two PRs #865 and #913.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-09-02 22:43:09 +08:00
zhaozhao.zz
32116d09bb
Use metadata to handle the reference relationship between kvstore and dict (#966)
Feature `one-dict-per-slot` refactors the database, and part of it
involved splitting the rehashing list from the global level back to the
database level, or more specifically, the kvstore level. This change is
fine, and it also simplifies the process of swapping databases, which is
good. And it should not have a major impact on the efficiency of
incremental rehashing.

To implement the kvstore-level rehashing list, each `dict` under the
`kvstore` needs to know which `kvstore` it belongs. However, kvstore did
not insert the reference relationship into the `dict` itself, instead,
it placed it in the `dictType`. In my view, this is a somewhat odd way.
Theoretically, `dictType` is just a collection of function handles, a
kind of virtual type that can be referenced globally, not an entity. But
now the `dictType` is instantiated, with each `kvstore` owning an actual
`dictType`, which in turn holds a reverse reference to the `kvstore`'s
resource pointer. This design is somewhat uncomfortable for me.

I think the `dictType` should not be instantiated. The references
between actual resources (`kvstore` and `dict`) should occur between
specific objects, rather than force materializing the `dictType`, which
is supposed to be virtual.

---------

Signed-off-by: zhaozhao.zz <zhaozhao.zz@alibaba-inc.com>
2024-09-02 22:35:24 +08:00
Binbin
70624ea63d
Change all the lazyfree configurations to yes by default (#913)
## Set replica-lazy-flush and lazyfree-lazy-user-flush to yes by
default.
There are many problems with running flush synchronously. Even in
single CPU environments, the thread managers should balance between
the freeing and serving incoming requests.

## Set lazy eviction, expire, server-del, user-del to yes by default
We now have a del and a lazyfree del, we also have these configuration
items to control: lazyfree-lazy-eviction, lazyfree-lazy-expire,
lazyfree-lazy-server-del, lazyfree-lazy-user-del. In most cases lazyfree
is better since it reduces the risk of blocking the main thread, and
because we have lazyfreeGetFreeEffort, on those with high effor
(currently
64) will use lazyfree.

Part of #653.

---------

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-09-02 07:07:17 -07:00
Madelyn Olson
089048d364
Fix zipmap test null pointer (#975)
The previous test does a strncmp on a NULL, which is not valid. It
should be using an empty length string instead. Addresses
https://github.com/valkey-io/valkey/actions/runs/10649272046/job/29519233939.

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-09-01 12:05:37 +02:00
Binbin
e3af1a30e4
Fast path in SET if the expiration time is expired (#865)
If the expiration time passed in SET is expired, for example, it
has expired due to the machine time (DTS) or the expiration time
passed in (wrong arg). In this case, we don't need to set the key
and wait for the active expire scan before deleting the key.

Compared with previous changes:
1. If the key does not exist, previously we would set the key and wait
for the active expire to delete it, so it is a set + del from the
perspective
of propaganda. Now we will no set the key and return, so it a NOP.

2. If the key exists, previously we woule set the key and wait
for the active expire to delete it, so it is a set + del From the
perspective
of propaganda. Now we will delete it and return, so it is a del.

Adding a new deleteExpiredKeyFromOverwriteAndPropagate function
to reduce the duplicate code.

Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
2024-08-31 22:39:07 +08:00
Viktor Söderqvist
5d458c6292
Delete unused parts of zipmap (#973)
Deletes zipmapSet, zipmapGet, etc. Only keep iterator and validate
integrity, what we use when loading an old RDB file.

Adjust unit tests to not use zipmapSet, etc.

Solves a build failure where when compiling with fortify source.

---------

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-08-31 15:42:44 +02:00
Binbin
fea49bce2c
Fix timing issue in replica migration test (#968)
The reason is the server 3 still have the server 7 as its replica
due to a short wait, the wait is not enough, we should wait for
server loss its replica.
```
*** [err]: valkey-cli make source node ignores NOREPLICAS error when doing the last CLUSTER SETSLOT
Expected '{127.0.0.1 21497 267}' to be equal to '' (context: type eval line 34 cmd {assert_equal [lindex [R 3 role] 2] {}} proc ::test)
```

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-30 19:58:46 +08:00
zhaozhao.zz
743f5ac2ae
standalone -REDIRECT handles special case of MULTI context (#895)
In standalone mode, when a `-REDIRECT` error occurs, special handling is
required if the client is in the `MULTI` context.

We have adopted the same handling method as the cluster mode:

1. If a command in the transaction encounters a `REDIRECT` at the time
of queuing, the execution of `EXEC` will return an `EXECABORT` error (we
expect the client to redirect and discard the transaction upon receiving
a `REDIRECT`). That is:

    ```
    MULTI    ==>  +OK
    SET x y  ==>  -REDIRECT
    EXEC     ==>  -EXECABORT
    ```
2. If all commands are successfully queued (i.e., `QUEUED` results are
received) but a redirect is detected during `EXEC` execution (such as a
primary-replica switch), a `REDIRECT` is returned to instruct the client
to perform a redirect. That is:

    ```
    MULTI    ==>  +OK
    SET x y  ==>  +QUEUED
    failover
    EXEC     ==>  -REDIRECT
    ```

---------

Signed-off-by: zhaozhao.zz <zhaozhao.zz@alibaba-inc.com>
2024-08-30 10:17:53 +08:00
Shivshankar
2b76c8fbe2
Migrate zipmap unit test to new framework (#474)
Migrate zipmap unit test to new unit test framework, parent ticket #428
.

---------

Signed-off-by: Shivshankar-Reddy <shiva.sheri.github@gmail.com>
Signed-off-by: hwware <wen.hui.ware@gmail.com>
Co-authored-by: hwware <wen.hui.ware@gmail.com>
2024-08-29 11:17:53 -04:00
Binbin
ecbfb6a7ec
Fix reconfiguring sub-replica causing data loss when myself change shard_id (#944)
When reconfiguring sub-replica, there may a case that the sub-replica will
use the old offset and win the election and cause the data loss if the old
primary went down.

In this case, sender is myself's primary, when executing updateShardId,
not only the sender's shard_id is updated, but also the shard_id of
myself is updated, casuing the subsequent areInSameShard check, that is,
the full_sync_required check to fail.

As part of the recent fix of #885, the sub-replica needs to decide whether
a full sync is required or not when switching shards. This shard membership
check is supposed to be done against sub-replica's current shard_id, which
however was lost in this code path. This then leads to sub-replica joining
the other shard with a completely different and incorrect replication history.

This is the only place where replicaof state can be updated on this path
so the most natural fix would be to pull the chain replication reduction
logic into this code block and before the updateShardId call.

This one follow #885 and closes #942.

Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Ping Xie <pingxie@outlook.com>
2024-08-29 22:39:53 +08:00
zhaozhao.zz
4a9b4f667c
free client's multi state when it becomes dirty (#961)
Release the client's MULTI state when the transaction becomes dirty to
save memory.

---------

Signed-off-by: zhaozhao.zz <zhaozhao.zz@alibaba-inc.com>
2024-08-29 19:20:53 +08:00
Ping Xie
ad0ede302c
Exclude '.' and ':' from isValidAuxChar's banned charset (#963)
Fix a bug in isValidAuxChar where valid characters '.' and ':' were
incorrectly included in the banned charset. This issue affected the
validation of auxiliary fields in the nodes.conf file used by Valkey in
cluster mode, particularly when handling IPv4 and IPv6 addresses. The
code now correctly allows '.' and ':' as valid characters, ensuring
proper handling of these fields. Comments were added to clarify the use
of the banned charset.
 
Related to #736

---------

Signed-off-by: Ping Xie <pingxie@google.com>
2024-08-28 23:35:31 -07:00
Binbin
75b824052d
Revert make KEYS to be an exact match if there is no pattern (#964)
In #792, the time complexity became ambiguous, fluctuating between
O(1) and O(n), which is a significant difference. And we agree uncertainty
can potentially bring disaster to the business, the right thing to do is
to persuade users to use EXISTS instead of KEYS in this case, to do the
right thing the right way, rather than accommodating this incorrect usage.

This reverts commit d66a06e8183818c035bb78706f46fd62645db07e.
This reverts #792.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-29 10:58:19 +08:00
Viktor Söderqvist
25dd943087
Delete TLS.md and update README.md about tests (#960)
Most of the content of TLS.md has already been copied to README.md in
#927.

The description of how to run tests with TLS is moved to
tests/README.md.

Descriptions of the additional scripts runtest-cluster, runtest-sentinel
and runtest-module are added in tests/README.md.

Links to tests/README.md and src/unit/README.md are added in the
top-level README.md along with a brief overview of the `make test-*`
commands.

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-08-28 21:17:04 +02:00
Viktor Söderqvist
927c2a8cd1
Delete files MANIFESTO, BUGS and INSTALL (#958)
The MANIFESTO is not Valkey's manifesto and it doesn't even mention open
source. Let's write another one later, or some other document about our
project principles.

The other two files are one-line files with no relevant info. They're
polluting the file listing at root level. It's the first thing you see
when you start exploring the repo for the first time.

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-08-28 20:04:23 +02:00
I-Hsin Cheng
6172907094
Migrate the contents of TLS.md into README.md (#927)
Migrate the contents in TLS.md into TLS sections including building,
running and detail supports. TODO list in the TLS.md is almost done
except the implementation of benchmark support is still not the best
approach which should migrate to hiredis async mode.

Closes #888

---------

Signed-off-by: I Hsin Cheng <richard120310@gmail.com>
Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-08-28 12:43:29 +02:00
Ping Xie
2b71a78241
Add comment explaining log file reopening for rotation support (#956) 2024-08-27 21:00:17 -07:00
mwish
744b13e302
Using intrinsics to optimize counting HyperLogLog trailing bits (#846)
Godbolt link: https://godbolt.org/z/3YPvxsr5s

__builtin_ctz would generate shorter code than hand-written loop.

---------

Signed-off-by: mwish <maplewish117@gmail.com>
Signed-off-by: Binbin <binloveplay1314@qq.com>
Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
2024-08-27 20:44:32 -07:00
Binbin
4fe8320711
Add pause path coverage to replica migration tests (#937)
In #885, we only add a shutdown path, there is another path
is that the server might got hang by slowlog. This PR added
the pause path coverage to cover it.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-28 11:08:27 +08:00
Lipeng Zhu
076bf6605f
Move prepareClientToWrite out of loop for lrange command to reduce the redundant call. (#860)
## Description 
When I explore the cycles distributions for `lrange` test (
`valkey-benchmark -p 9001 -t lrange -d 100 -r 1000000 -n 1000000 -c 50
--threads 4`). I found the `prepareClientToWrite` and
`clientHasPendingReplies` could be reduced to single call outside
instead of called in a loop, ideally we can gain 3% performance. The
corresponding `LRANG_100`, `LRANG_300`, `LRANGE_500`, `LRANGE_600` have
~2% - 3% performance boost, the benchmark test prove it helps.

This patch try to move the `prepareClientToWrite` and its child
`clientHasPendingReplies` out of the loop to reduce the function
overhead.

---------

Signed-off-by: Lipeng Zhu <lipeng.zhu@intel.com>
2024-08-27 19:11:09 -07:00
Binbin
6a84e06b05
Wait for the role change and fix the timing issue in the new test (#947)
The test might be fast enough and then there is no change in the role
causing the test to fail. Adding a wait to avoid the timing issue:
```
*** [err]: valkey-cli make source node ignores NOREPLICAS error when doing the last CLUSTER SETSLOT
Expected '{127.0.0.1 23154 267}' to be equal to '' (context: type eval line 24 cmd {assert_equal [lindex [R 3 role] 2] {}} proc ::test)
```

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-28 09:51:10 +08:00
Vadym Khoptynets
4f29ad4583
Use sdsAllocSize instead of sdsZmallocSize (#923)
sdsAllocSize returns the correct size without consulting the
allocator. Which is much faster than consulting the allocator.
The only exception is SDS_TYPE_5, for which it has to
consult the allocator.

This PR also sets alloc field correctly for embedded string objects.
It assumes that no allocator would allocate a buffer larger
than `259 + sizeof(robj)` for embedded string. We use embedded strings
for strings up to 44 bytes. If this assumption is wrong, the whole
function would require a rewrite. In general case sds type adjustment
might be needed. Such logic should go to sds.c.

---------

Signed-off-by: Vadym Khoptynets <vadymkh@amazon.com>
2024-08-27 14:43:01 -07:00
Amit Nagler
1ff2a3b6ae
Remove dual-channel-replication Feature Flag's Protection (#908)
Currently, the `dual-channel-replication` feature flag is immutable if
`enable-protected-configs` is enabled, which is the default behavior.
This PR proposes to make the `dual-channel-replication` flag mutable,
allowing it to be changed dynamically without restarting the cluster.

**Motivation:**
The ability to change the `dual-channel-replication` flag dynamically is
essential for testing and validating the feature on real clusters
running in production environments. By making the flag mutable, we can
enable or disable the feature without disrupting the cluster's
operations, facilitating easier testing and experimentation.
Additionally, this change would provide more flexibility for users to
enable or disable the feature based on their specific requirements or
operational needs without requiring a cluster restart.

---------

Signed-off-by: naglera <anagler123@gmail.com>
2024-08-27 10:18:48 -07:00
Viktor Söderqvist
54c0f743dd
Connection minor fixes (#953)
1. Remove redundant connIncrRefs/connDecrRefs

    In socket.c, the reference counter is incremented before calling
callHandler, but the same reference counter is also incremented inside
callHandler before calling the actual callback.

        static inline int callHandler(connection *conn, ConnectionCallbackFunc handler) {
            connIncrRefs(conn);
            if (handler) handler(conn);
            connDecrRefs(conn);
            ...
        }

    This commit removes the redundant incr/decr calls in socket.c

2. Correct return value of connRead for TLS when peer closed

    According to comments in connection.h, connRead returns 0 when the peer
has closed the connection. This patch corrects the return value for TLS
connections. (Without this patch, it returns -1 which means error.)

    There is an observable difference in what is logged in the verbose
level: "Client closed connection" vs "Reading from client: (null)".

---------

Signed-off-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
2024-08-27 16:11:33 +02:00
uriyage
04d76d8b02
Improve multithreaded performance with memory prefetching (#861)
This PR utilizes the IO threads to execute commands in batches, allowing
us to prefetch the dictionary data in advance.

After making the IO threads asynchronous and offloading more work to
them in the first 2 PRs, the `lookupKey` function becomes a main
bottle-neck and it takes about 50% of the main-thread time (Tested with
SET command). This is because the Valkey dictionary is a straightforward
but inefficient chained hash implementation. While traversing the hash
linked lists, every access to either a dictEntry structure, pointer to
key, or a value object requires, with high probability, an expensive
external memory access.

### Memory Access Amortization

Memory Access Amortization (MAA) is a technique designed to optimize the
performance of dynamic data structures by reducing the impact of memory
access latency. It is applicable when multiple operations need to be
executed concurrently. The principle behind it is that for certain
dynamic data structures, executing operations in a batch is more
efficient than executing each one separately.

Rather than executing operations sequentially, this approach interleaves
the execution of all operations. This is done in such a way that
whenever a memory access is required during an operation, the program
prefetches the necessary memory and transitions to another operation.
This ensures that when one operation is blocked awaiting memory access,
other memory accesses are executed in parallel, thereby reducing the
average access latency.

We applied this method in the development of `dictPrefetch`, which takes
as parameters a vector of keys and dictionaries. It ensures that all
memory addresses required to execute dictionary operations for these
keys are loaded into the L1-L3 caches when executing commands.
Essentially, `dictPrefetch` is an interleaved execution of dictFind for
all the keys.


**Implementation details**

When the main thread iterates over the `clients-pending-io-read`, for
clients with ready-to-execute commands (i.e., clients for which the IO
thread has parsed the commands), a batch of up to 16 commands is
created. Initially, the command's argv, which were allocated by the IO
thread, is prefetched to the main thread's L1 cache. Subsequently, all
the dict entries and values required for the commands are prefetched
from the dictionary before the command execution. Only then will the
commands be executed.

---------

Signed-off-by: Uri Yagelnik <uriy@amazon.com>
2024-08-26 21:10:44 -07:00
Binbin
694246cfab
Drop the outdated script replication example comments (#951)
This example was for script replication which we have
completely removed in 7.0, so this example is outdated now.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-27 12:04:47 +08:00
Binbin
d66a06e818
Make KEYS to be an exact match if there is no pattern (#792)
Although KEYS is a dangerous command and we recommend people
to avoid using it, some people who are not familiar with it
still using it, and even use KEYS with no pattern at all.

Once KEYS is using with no pattern, we can convert it to an
exact match to avoid iterating over all data.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-27 12:04:27 +08:00
xu0o0
73698fa028
Fix invalid escape sequence in utils, minor cleanup in python script (#948)
According to the Python document[1], any invalid escape sequences in
string literals now generate a DeprecationWarning (SyntaxWarning as of
3.12) and in the future this will become a SyntaxError.

This Change uses Python’s raw string notation for regular expression
patterns to avoid it.

[1]: https://docs.python.org/3.10/library/re.html

Signed-off-by: haoqixu <hq.xu0o0@gmail.com>
2024-08-26 22:53:35 +08:00
Binbin
9f4b1adbea
Add explicit assert to ensure thread_shared_qb won't expand (#938)
Although this won't happen now, adding this statement explicitly.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-25 12:03:34 +08:00
Binbin
c7d1daea05
Add epoch information to failover auth denied logs (#816)
When failover deny to vote, sometimes due to network or
some blocking operations, the time of FAILOVER_AUTH_REQUEST
packet arrival is very uncertain. Since there is no epoch
information in these logs, it is hard to associate the log
with other logs.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-24 18:03:24 +08:00
NAM UK KIM
0053429a02
Update "Total" message and used_memory_human log information in serverCron() function (#594)
At the VERBOSE/DEBUG log level, which is output once every 5 seconds,
added to show the "Total" message of all clients and to show memory
usage (used_memory) with used_memory_human.
Also, it seems clearer to show "total" number of keys and the number of
volatile in entire keys.

---------

Signed-off-by: NAM UK KIM <namuk2004@naver.com>
2024-08-23 18:02:18 -07:00
Ayush Sharma
b48596a914
Add support for setting the group on a unix domain socket (#901)
Add new optional, immutable string config called `unixsocketgroup`. 
Change the group of the unix socket to `unixsocketgroup` after `bind()`
if specified.

Adds tests to validate the behavior.

Fixes #873.

Signed-off-by: Ayush Sharma <mrayushs933@gmail.com>
2024-08-23 11:52:08 -07:00
Madelyn Olson
829aa7fe3c
Remove accurate from extra test tag (#935)
Today if we attached the "run-extra-tests" tag it adds at least 20
minutes because the dump-fuzzer test runs with full accuracy. This
fuzzer is useful, but probably only really needed for the daily, so
removing it from the PRs. We still run the fuzzers, just not for as
long.

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-08-23 11:05:41 -07:00
Binbin
8045994972
valkey-cli make source node ignores NOREPLICAS when doing the last CLUSTER SETSLOT (#928)
This fixes #899. In that issue, the primary is cluster-allow-replica-migration no
and its replica is cluster-allow-replica-migration yes.

And during the slot migration:
1. Primary calling blockClientForReplicaAck, waiting its replica.
2. Its replica reconfiguring itself as a replica of other shards due to
replica migration and disconnect from the old primary.
3. The old primary never got the chance to receive the ack, so it got a
timeout and got a NOREPLICAS error.

In this case, the replicas might automatically migrate to another primary,
resulting in the client being unblocked with the NOREPLICAS error. In this
case, since the configuration will eventually propagate itself, we can safely
ignore this error on the source node.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-23 16:22:30 +08:00
Binbin
5d97f5133c
Fix CLUSTER SETSLOT block and unblock error when all replicas are down (#879)
In CLUSTER SETSLOT propagation logic, if the replicas are down, the
client will get block during command processing and then unblock
with `NOREPLICAS Not enough good replicas to write`.

The reason is that all replicas are down (or some are down), but
myself->num_replicas is including all replicas, so the client will
get block and always get timeout.

We should only wait for those online replicas, otherwise the waiting
propagation will always timeout since there are not enough replicas.
The admin can easily check if there are replicas that are down for an
extended period of time. If they decide to move forward anyways, we
should not block it. If a replica  failed right before the replication and
was not included in the replication, it would also unlikely win the election.

Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Ping Xie <pingxie@google.com>
2024-08-23 16:21:53 +08:00
Yunxiao Du
0a11c4a140
Delete redundant declaration clusterNodeCoversSlot and countKeysInSlot (#930)
Delete redundant declaration, clusterNodeCoversSlot and countKeysInSlot
has been declared in cluster.h

Signed-off-by: Yunxiao Du <me@jackdu.cn>
2024-08-23 12:17:27 +08:00
Madelyn Olson
b12668af7a
Revert repl backlog size back to 1mb for dual channel tests (#934)
There is a test that assumes that the backlog will get overrun, but
because of the recent changes to the default it no longer fails. It
seems like it is a bit flakey now though, so resetting the value in the
test back to 1mb. (This relates to the CoB of 1100k. So it should
consistently work with a 1mb limit).

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-08-22 15:35:28 -07:00
Wen Hui
959dd3485b
Decline unsubscribe related command in non-subscribed mode (#759)
Now, when clients run the unsubscribe, sunsubscribe and punsubscribe
commands in the non-subscribed mode, it returns 0.
Indeed this is a bug, we should not allow client run these kind of
commands here.

Thus, this PR fixes this bug, but it is a break change for existing
clients

---------

Signed-off-by: hwware <wen.hui.ware@gmail.com>
2024-08-22 11:21:33 -04:00
Binbin
8d9b8c9d3d
Make runtest-cluster support --io-threads option (#933)
In #764, we add a --io-threads mode in test, but forgot
to handle runtest-cluster, they are different framework.

Currently runtest-cluster does not support tags, and we
don't have plan to support it. And currently cluster tests
does not have any io-threads tests, so this PR just align
--io-threads option with #764.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-22 11:21:06 -04:00
Binbin
08aaeea4b7
Avoid to re-establish replication if node is already myself primary in CLUSTER REPLICATE (#884)
If n is already myself primary, there is no need to re-establish the
replication connection.

In the past we allow a replica node to reconnect with its primary via
this CLUSTER REPLICATE command, it will use psync. But since #885, we
will assume that a full sync is needed in this case, so if we don't do
this, the replica will always use full sync.

Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Ping Xie <pingxie@google.com>
2024-08-22 11:00:18 +08:00
uriyage
39f8bcb91b
Skip tracking clients OOM test when I/O threads are enabled (#764)
Fix feedback loop in key eviction with tracking clients when using I/O
threads.

Current issue:
Evicting keys while tracking clients or key space-notification exist
creates a feedback loop when using I/O threads:

While evicting keys we send tracking async writes to I/O threads,
preventing immediate release of tracking clients' COB memory
consumption.

Before the I/O thread finishes its write, we recheck used_memory, which
now includes the tracking clients' COB and thus continue to evict more
keys.

**Fix:**
We will skip the test for now while IO threads are active. We may
consider avoiding sending writes in `processPendingWrites` to I/O
threads for tracking clients when we are out of memory.

---------

Signed-off-by: Uri Yagelnik <uriy@amazon.com>
Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
2024-08-21 17:02:57 -07:00
Harkrishn Patro
002d052eef
Update README.md file to reference valkey.io (#931)
Update README.md since the project is no longer under construction, and
can reference the main website.

---------

Signed-off-by: Harkrishn Patro <harkrisp@amazon.com>
2024-08-21 14:19:50 -07:00
Binbin
a1ac459ef1
Set repl-backlog-size from 1mb to 10mb by default (#911)
The repl-backlog-size 1mb is too small in most cases, now network
transmission and bandwidth performance have improved rapidly in more
than ten years.

The bigger the replication backlog, the longer the replica can endure
the disconnect and later be able to perform a partial resynchronization.

Part of #653.

---------

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-21 11:59:02 -04:00
Wen Hui
b8dd4fbbf7
Fix Error in Daily CI -- reply-schemas-validator (#922)
Just add one more test for command "sentinel IS-PRIMARY-DOWN-BY-ADDR" to
make the reply-schemas-validator
run successfully.

Note: test result here
https://github.com/hwware/valkey/actions/runs/10457516111

Signed-off-by: hwware <wen.hui.ware@gmail.com>
2024-08-21 09:36:02 -04:00
zhenwei pi
2673320b66
RDMA: Support user keepalive command (#916)
If the client side crashes by any issue or exits normally, the kernel
will try to disconnect RDMA QPs. Then the kernel of server side receives
CM packets, valkey-server handles CM disconnected event and close
connection.

However, there is a lack of keepalive mechanism from RDMA transport
layer. Once the kernel of client side crashes, the server side will not
be notified. To avoid this issue, valkey server sents Keepaliv command
periodically to detect any dead QPs.

An example of mlx-cx5:

```
 # RDMA: CQ handle error status: transport retry counter exceeded[0xc], opcode : 0x0
 # RDMA: CQ handle error status: transport retry counter exceeded[0xc], opcode : 0x0
 # RDMA: CQ handle error status: Work Request Flushed Error[0x5], opcode : 0x0
 # RDMA: CQ handle error status: Work Request Flushed Error[0x5], opcode : 0x0
 # RDMA: CQ handle error status: Work Request Flushed Error[0x5], opcode : 0x0
 # RDMA: CQ handle error status: Work Request Flushed Error[0x5], opcode : 0x0
```

Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2024-08-21 10:38:34 +02:00
Binbin
e1b3629186 Fix data loss when replica do a failover with a old history repl offset (#885)
Our current replica can initiate a failover without restriction when
it detects that the primary node is offline. This is generally not a
problem. However, consider the following scenarios:

1. In slot migration, a primary loses its last slot and then becomes
a replica. When it is fully synchronized with the new primary, the new
primary downs.

2. In CLUSTER REPLICATE command, a replica becomes a replica of another
primary. When it is fully synchronized with the new primary, the new
primary downs.

In the above scenario, case 1 may cause the empty primary to be elected
as the new primary, resulting in primary data loss. Case 2 may cause the
non-empty replica to be elected as the new primary, resulting in data
loss and confusion.

The reason is that we have cached primary logic, which is used for psync.
In the above scenario, when clusterSetPrimary is called, myself will cache
server.primary in server.cached_primary for psync. In replicationGetReplicaOffset,
we get server.cached_primary->reploff for offset, gossip it and rank it,
which causes the replica to use the old historical offset to initiate
failover, and it get a good rank, initiates election first, and then is
elected as the new primary.

The main problem here is that when the replica has not completed full
sync, it may get the historical offset in replicationGetReplicaOffset.

The fix is to clear cached_primary in these places where full sync is
obviously needed, and let the replica use offset == 0 to participate
in the election. In this way, this unhealthy replica has a worse rank
and is not easy to be elected.

Of course, it is possible that it will be elected with offset == 0.
In the future, we may need to prohibit the replica with offset == 0
from having the right to initiate elections.

Another point worth mentioning, in above cases:
1. In the ROLE command, the replica status will be handshake, and the
offset will be -1.
2. Before this PR, in the CLUSTER SHARD command, the replica status will
be online, and the offset will be the old cached value (which is wrong).
3. After this PR, in the CLUSTER SHARD, the replica status will be loading,
and the offset will be 0.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-21 13:11:21 +08:00
Binbin
829243e76b
Correct RDB_EOF_MARK_SIZE usage where EOF mark is relevant (#925)
In these places we should use RDB_EOF_MARK_SIZE, but we mixed
it with CONFIG_RUN_ID_SIZE. This is not an issue since they are
all 40, just a cleanup.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-21 00:00:29 +08:00
ranshid
e2ab7ffd89
Make use of a single listNode pointer for blocking utility lists (#919)
Saves some memory (one pointer) in the client struct.

Since a client cannot be blocked multiple times, we can assume
it will be held in only one extra utility list, so it is ok to maintain
a union of these listNode references. 

Signed-off-by: Ran Shidlansik <ranshid@amazon.com>
Signed-off-by: ranshid <88133677+ranshid@users.noreply.github.com>
Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
Co-authored-by: Binbin <binloveplay1314@qq.com>
2024-08-20 18:54:53 +08:00
gmbnomis
7795152fff
Fix valgrind timing issue failure in replica-redirect test (#917)
Wait for the replica to become online before starting the actual test.

Signed-off-by: Simon Baatz <gmbnomis@gmail.com>
2024-08-18 21:20:53 +08:00
Binbin
70b9285802
Optimize linear search of WAIT and WAITAOF when unblocking the client (#787)
Currently, if the client enters a blocked state, it will be
added to the server.clients_waiting_acks list. When the client
is unblocked, that is, when unblockClient is called, we will
need to linearly traverse server.clients_waiting_acks to delete
the client, and this search is O(N).

When WAIT (or WAITAOF) is used extensively in some cases, this
O(N) search may be time-consuming. We can remember the list node
and store it in the blockingState struct and it can avoid the
linear search in unblockClientWaitingReplicas.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-18 21:20:35 +08:00
Wen Hui
33c7ca41be
Add 4 commands for sentinel and update most test cases and json files (#789)
Add 4 new commands for Sentinel (reference
https://github.com/valkey-io/valkey/issues/36)

Sentinel GET-PRIMARY-ADDR-BY-NAME
Sentinel PRIMARY
Sentinel PRIMARIES
Sentinel IS-PRIMARY-DOWN-BY-ADDR

and deprecate 4 old commands:

Sentinel GET-MASTER-ADDR-BY-NAME
Sentinel MASTER
Sentinel MASTERS
Sentinel IS-MASTER-DOWN-BY-ADDR

and all sentinel tests pass here
https://github.com/hwware/valkey/actions/runs/9962102363/job/27525124583

Note: 

1. runtest-sentinel pass all test cases
2. I finished a sentinel rolling upgrade test: 1 primary 2 replicas 3
sentinel
   there are 4 steps in this test scenario: 
step 1: all 3 sentinel nodes run old sentinel, shutdown primary, and
then new primary can be voted successfully.
step 2: replace sentinel 1 with new sentinel bin file, and then shutdown
primary, and then another new primary can be voted successfully
step 3: replace sentinel 2 with new sentinel bin file, and then shutdown
primary, and then another new primary can be voted successfully
step 4: replace sentinel 3 with new sentinel bin file, and then shutdown
primary, and then another new primary can be voted successfully
   
We can see, even mixed version sentinel running, whole system still
works.

---------

Signed-off-by: hwware <wen.hui.ware@gmail.com>
2024-08-16 09:46:36 -04:00
DarrenJiang13
adf53c212b
Add lfu support for DEBUG OBJECT command, added lfu_freq and lfu_access_time_minutes fields (#479)
For `debug object` command, we use `val->lru` but ignore the `lfu` mode.  
So in `lfu` mode, `debug object` would return meaningless `lru` descriptions. 

Added two new fields lfu_freq and lfu_access_time_minutes.

Signed-off-by: jiangyujie.jyj <yjjiang1996@163.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
2024-08-16 17:49:46 +08:00
Binbin
fc9f291033
Make a light weight version of DEBUG OBJECT, add FAST option (#881)
Adding FAST option to DEBUG OBJECT command.

The light version only shows the light weight infomation,
which mostly O(1). The pre-existing version that show more
stats such as serializedlength sometimes is time consuming.

This should allow looking into debug stats (the key expired
but not deleted), even on huge object, on which we're afraid
to run the command for fear of causing a server freeze.

Somehow like 3ca451c46fed894bf49e7561fa0282d2583f1c06.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-16 10:18:36 +08:00
Binbin
76ad8f7a76
Skip IPv6 tests when TCLSH version is < 8.6 (#910)
In #786, we did skip it in the daily, but not for the others.
When running ./runtest on MacOS, we will get the failure.
```
couldn't open socket: host is unreachable (nodename nor servname provided, or not known)
```

The reason is that TCL 8.5 doesn't support ipv6, so we skip tests
tagged with ipv6. This also revert #786.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-15 15:11:38 +08:00
secwall
103365fe0e
Log unexpected $ENDOFF responses in dual channel replication (#839)
I've tried to test a dual channel replication but forgot to add +sync
for my replication user. As a result replica entered silent cycle like
this:
```
* Connecting to PRIMARY 127.0.0.1:6379
* PRIMARY <-> REPLICA sync started
* Non blocking connect for SYNC fired the event.
* Primary replied to PING, replication can continue...
* Trying a partial resynchronization (request ...)
* PSYNC is not possible, initialize RDB channel.
* Aborting dual channel sync
```

And primary got endless cycle like this:
```
* Replica 127.0.0.1:6380 asks for synchronization
* Partial resynchronization not accepted: Replication ID mismatch (Replica asked for '...', my replication IDs are '...' and '...')
* Replica 127.0.0.1:6380 is capable of dual channel synchronization, and partial sync isn't possible. Full sync will continue with dedicated RDB channel.
```

There was no way to understand that replication user is missing +sync
acl on notice log level. With this one-line change we get a warning
message in our replica log.

---------

Signed-off-by: secwall <secwall@yandex-team.ru>
2024-08-14 22:00:57 -07:00
Pieter Cailliau
4d284daefd
Copyright update to reflect IP transfer from salvatore to Redis (#740)
Update references of copyright being assigned to Salvatore when it was
transferred to Redis Ltd. as per
https://github.com/valkey-io/valkey/issues/544.

---------

Signed-off-by: Pieter Cailliau <pieter@redis.com>
2024-08-14 09:20:36 -07:00
Salvatore Mesoraca
68b2270947
Prevent later accesses to unallocated memory (#907)
A pointer to dtype is stored in the dict forever.
dtype is stack-allocated while the dict created is global.
The dict (and the pointer to dtype in it) will live past the lifetime of
dtype.
clusterManagerLinkDictType is a global object that has the same values
as dtype.

Signed-off-by: Salvatore Mesoraca <salvatore.mesoraca@aiven.io>
2024-08-14 09:03:27 -07:00
zhaozhao.zz
131857e80a
To avoid bouncing -REDIRECT during FAILOVER (#871)
Fix #821

During the `FAILOVER` process, when conditions are met (such as when the
force time is reached or the primary and replica offsets are
consistent), the primary actively becomes the replica and transitions to
the `FAILOVER_IN_PROGRESS` state. After the primary becomes the replica,
and after handshaking and other operations, it will eventually send the
`PSYNC FAILOVER` command to the replica, after which the replica will
become the primary. This means that the upgrade of the replica to the
primary is an asynchronous operation, which implies that during the
`FAILOVER_IN_PROGRESS` state, there may be a period of time where both
nodes are replicas. In this scenario, if a `-REDIRECT` is returned, the
request will be redirected to the replica and then redirected back,
causing back and forth redirection. To avoid this situation, during the
`FAILOVER_IN_PROGRESS state`, we temporarily suspend the clients that
need to be redirected until the replica truly becomes the primary, and
then resume the execution.

---------

Signed-off-by: zhaozhao.zz <zhaozhao.zz@alibaba-inc.com>
2024-08-14 14:04:29 +08:00
Binbin
370bdb3e46
Change server.daylight_active to an atomic variable (#876)
We are updating this variable in the main thread, and the
child threads can printing the logs at the same time. This
generating a warning in SANITIZER=thread:
```
WARNING: ThreadSanitizer: data race (pid=74208)
  Read of size 4 at 0x000102875c10 by thread T3:
    #0 serverLogRaw <null>:52173615 (valkey-server:x86_64+0x10003c556)
    #1 _serverLog <null>:52173615 (valkey-server:x86_64+0x10003ca89)
    #2 bioProcessBackgroundJobs <null>:52173615 (valkey-server:x86_64+0x1001402c9)

  Previous write of size 4 at 0x000102875c10 by main thread (mutexes: write M0):
    #0 afterSleep <null>:52173615 (valkey-server:x86_64+0x10004989b)
    #1 aeProcessEvents <null>:52173615 (valkey-server:x86_64+0x100031e52)
    #2 main <null>:52173615 (valkey-server:x86_64+0x100064a3c)
    #3 start <null>:52173615 (dyld:x86_64+0xfffffffffff5c365)
    #4 start <null>:52173615 (dyld:x86_64+0xfffffffffff5c365)
```

The refresh of daylight_active is not real time, we update
it in aftersleep, so we don't need a strong synchronization,
so using memory_order_relaxed. But also noted we are doing
load/store operations only for daylight_active, which is an
aligned 32-bit integer, so using memory_order_relaxed will
not provide more consistency than what we have today.

So this is just a cleanup that to clear the warning.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-14 13:08:20 +08:00
Amit Nagler
6cb86fff51
Fix dual-channel replication test under valgrind (#904)
Test dual-channel-replication primary gets cob overrun during replica
rdb load` fails during the Valgrind run. This is due to the load
handlers disconnecting before the tests complete, resulting in a low
primary COB. Increasing the handlers' timeout should resolve this issue.

Failure:
https://github.com/valkey-io/valkey/actions/runs/10361286333/job/28681321393

Server logs reveals that the load handler clients were disconnected
before the test started

Also the two previus test took about 20 seconds which is the handler
timeout.

---------

Signed-off-by: naglera <anagler123@gmail.com>
Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
2024-08-13 10:40:19 -07:00
Binbin
f622e375a0
Better messages when valkey-cli cluster --fix meet value check failed (#867)
The clusterManagerCompareKeysValues is introduced in
143bfa1e6e65cf8be1eaad0b8169e2d95ca62f9a, which calls
DEBUG DIGEST-VALUE to check whether the value of the
source node and the target node are consistent.

However, the DEBUG DIGEST-VALUE command is not supported
in older version, such as version 4, and the node will return
unknown subcommand. Or the DEBUG command can be disabled
in version 7, and the node will return DEBUG not allowed.

In these cases, we need to output friendly message to
allow users to proceed to the next step, instead of just
outputing `Value check failed!`.

Unknown subcommand example:
```
*** Target key exists
*** Checking key values on both nodes...
Node 127.0.0.1:30001 replied with error:
ERR unknown subcommand or wrong number of arguments for 'DIGEST-VALUE'. Try DEBUG HELP.
Node 127.0.0.1:30003 replied with error:
ERR unknown subcommand or wrong number of arguments for 'DIGEST-VALUE'. Try DEBUG HELP.
*** Value check failed!
DEBUG DIGEST-VALUE command is not supported.
You can relaunch the command with --cluster-replace option to force key overriding.
```

DEBUG not allowed example:
```
*** Target key exists
*** Checking key values on both nodes...
Node 127.0.0.1:30001 replied with error:
ERR DEBUG command not allowed. If the enable-debug-command option is ...
Node 127.0.0.1:30003 replied with error:
ERR DEBUG command not allowed. If the enable-debug-command option is ...
*** Value check failed!
DEBUG command is not allowed.
You can turn on the enable-debug-command option.
Or you can relaunch the command with --cluster-replace option to force key overriding.
```

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-13 19:25:14 +08:00
Rayacoo
76f809bc19
Optimize ZUNION[STORE] command by removing unnecessary accumulator dict (#829)
In the past implementation of `ZUNION` and `ZUNIONSTORE` commands, we
first create a temporary dict called `accumulator`. After adding all
member-score mappings to `accumulator`, we still need to convert
`accumulator` back to the final dict `dstzset->dict`. However, we can
directly use `dstzset->dict` to avoid the additional copy operation.

This PR removes the `accumulator` dict and directly uses` dstzset->dict
`to store the member-score mappings.

- **Test**
First, I added 300 unique elements to two sorted sets called
'zunion_test1' and 'zunion_test2'. Then, I tested `zunion` and
`zunionstore` on these two sorted sets. The test results shown below
indicate that the performance of both zunion and zunionstore improved
about 31%.

### ZUNION
#### unstable
```
./valkey-benchmark -P 10 -n 100000 zunion 2 zunion_test1 zunion_test2

Summary:
  throughput summary: 2713.41 requests per second
  latency summary (msec):
          avg       min       p50       p95       p99       max
      146.252     3.464   153.343   182.015   184.959   192.895
```
#### This PR
```
./valkey-benchmark -P 10 -n 100000 zunion 2 zunion_test1 zunion_test2

Summary:
  throughput summary: 3543.84 requests per second
  latency summary (msec):
          avg       min       p50       p95       p99       max
      108.259     2.984   114.239   141.695   145.151   160.255
```
### ZUNIONSTORE
#### unstable
```
./valkey-benchmark -P 10 -n 100000 zunionstore out 2 zunion_test1 zunion_test2

Summary:
  throughput summary: 3168.07 requests per second
  latency summary (msec):
          avg       min       p50       p95       p99       max
      157.511     3.368   183.167   189.311   193.535   231.679
```
#### This PR
```
./valkey-benchmark -P 10 -n 100000 zunionstore out 2 zunion_test1 zunion_test2

Summary:
  throughput summary: 4144.73 requests per second
  latency summary (msec):
          avg       min       p50       p95       p99       max
      120.374     2.648   141.823   149.119   153.855   183.167
```

---------

Signed-off-by: RayCao <zisong.cw@alibaba-inc.com>
Signed-off-by: zisong.cw <zisong.cw@alibaba-inc.com>
2024-08-13 16:50:57 +08:00
Eran Liberty
6dfb8203cc
Add debug-context config (#874)
A configuration option with zero impact on server operation but is
printed out on server crash and can be accessed by gdb for debugging. It
can be used by the user/operator to store any free-form string. This
string will persist as long as the server is running and will be
accessible in the following ways:

And printed in crash reports:
```
------ CONFIG DEBUG OUTPUT ------
lazyfree-lazy-eviction no
...
io-threads-do-reads yes
debug-context "test2"
proto-max-bulk-len 512mb
```

---------

Signed-off-by: Eran Liberty <eranl@amazon.com>
Co-authored-by: Eran Liberty <eranl@amazon.com>
2024-08-12 16:33:23 -07:00
naglera
27fce29500
Fix dual-channel-replication related issues (#837)
- Fix TLS bug where connection were shutdown by primary's main process
while the child process was still writing- causing main process to be
blocked.
- TLS connection fix -file descriptors are set to blocking mode in the
main thread, followed by a blocking write. This sets the file
descriptors to non-blocking if TLS is used (see `connTLSSyncWrite()`)
(@xbasel).
- Improve the reliability of dual-channel tests. Modify the pause
mechanism to verify process status directly, rather than relying on log.
- Ensure that `server.repl_offset` and `server.replid` are updated
correctly when dual channel synchronization completes successfully.
Thist led to failures in replication tests that validate replication IDs
or compare replication offsets.

---------

Signed-off-by: naglera <anagler123@gmail.com>
Signed-off-by: naglera <58042354+naglera@users.noreply.github.com>
Signed-off-by: xbasel <103044017+xbasel@users.noreply.github.com>
Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
Signed-off-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: ranshid <88133677+ranshid@users.noreply.github.com>
Co-authored-by: xbasel <103044017+xbasel@users.noreply.github.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
2024-08-12 13:03:12 -07:00
naglera
1c198a95ac
Add debug assert on duplicate freeClientAsync (#896)
When debug assert mode enabled, verify that we don't insert the same
client twice into server.clients_to_close.

Signed-off-by: naglera <anagler123@gmail.com>
2024-08-12 12:44:53 -07:00
Binbin
929283fc6f
Dual channel replication should not update lastbgsave_status when transfer error (#811)
Currently lastbgsave_status is used in bgsave or disk-replication,
and the target is the disk. In #60, we update it when transfer error,
i think it is mainly used in tests, so we can use log to replace it.

It changes lastbgsave_status to err in this case, but it is strange
that it does not set ok or err in the above if and the following else.
Also noted this will affect stop-writes-on-bgsave-error.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-12 11:25:55 -07:00
Binbin
5166d489da
Correctly recode client infomation to the slowlog when runing script (#805)
Currently when we are runing a script, we will passing a fake client.
So if the command executed in the script is recoded in the slowlog,
the client's ip:port and name information will be empty.

before:
```
127.0.0.1:6379> client setname myclient
OK
127.0.0.1:6379> config set slowlog-log-slower-than 0
OK
127.0.0.1:6379> eval "redis.call('ping')" 0
(nil)
127.0.0.1:6379> slowlog get 2
1) 1) (integer) 2
   2) (integer) 1721314289
   3) (integer) 96
   4) 1) "eval"
      2) "redis.call('ping')"
      3) "0"
   5) "127.0.0.1:61106"
   6) "myclient"
2) 1) (integer) 1
   2) (integer) 1721314289
   3) (integer) 4
   4) 1) "ping"
   5) ""
   6) ""
```

after:
```
127.0.0.1:6379> client setname myclient
OK
127.0.0.1:6379> config set slowlog-log-slower-than 0
OK
127.0.0.1:6379> eval "redis.call('ping')" 0
(nil)
127.0.0.1:6379> slowlog get 2
1) 1) (integer) 2
   2) (integer) 1721314371
   3) (integer) 53
   4) 1) "eval"
      2) "redis.call('ping')"
      3) "0"
   5) "127.0.0.1:51983"
   6) "myclient"
2) 1) (integer) 1
   2) (integer) 1721314371
   3) (integer) 1
   4) 1) "ping"
   5) "127.0.0.1:51983"
   6) "myclient"
```

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-10 23:46:56 +08:00
Harkrishn Patro
7424620ca0
Check if the server is currently running the feature before cron run (#838)
I think we should first check if the server is currently enabled in
cluster mode or if it has modules loaded prior to the throttled cron run
(`run_with_period`) condition.

Signed-off-by: Harkrishn Patro <harkrisp@amazon.com>
2024-08-08 13:28:45 -07:00
Harkrishn Patro
109cc21267
Assert network bytes out for replication slot stat computation is only allowed on primary (#847)
Added an assertion to avoid incorrect usage of the network bytes out for
replication code flow in slot stats computation.

Signed-off-by: Harkrishn Patro <harkrisp@amazon.com>
2024-08-07 16:14:16 -07:00
Binbin
380f700816
Improve cluster cant failover log conditions (#780)
This PR adjusts the logging conditions of clusterLogCantFailover
in this two ways.

1. For the same cant_failover_reason, we will print the log once
in CLUSTER_CANT_FAILOVER_RELOG_PERIOD, but its value is 10s, which
is a bit long, shorten it to 1s, so we can better track its state.
We get to see the system making progress by watching the message.
Using 1s also covers pretty much all cases as i don't see a reason
for using a <1s node timeout, test or prod.

2. We will not print logs before the nolog_fail_time, its value
is cluster-node-timeout+5000. This may casue us to lose some logs,
for example, if cluster-node-timeout is small, auth_timeout will
be 2000, and auth_retry_time will be 4000. In this case, we will
lose all the reasons during the election if the failover is timedout.
So remove the nolog_fail_time logic, since we still do have the
CLUSTER_CANT_FAILOVER_RELOG_PERIOD logic, we won't print too many
logs.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-06 21:14:18 +08:00
Yury-Fridlyand
bfdab65791
Fix CI concurrency (#849)
Few CI improvements witch will reduce occupation CI queue and eliminate
stale runs.

1. Kill CI jobs on PRs once PR branch gets a new push. This will prevent
situation happened today - a huge job triggered twice in less than an
hour and occupied all **org** (for all repositories) runners queue for
the rest of the day (see pic). This completely blocked valkey-glide
team.
2. Distribute nightly croned jobs on time to prevent them running
together. Keep in mind, cron's TZ is UTC, so midnight tasks incur
developers located in other timezones.

This must be backported to all release branches (`valkey-x.y` and `x.y`)

![image](https://github.com/user-attachments/assets/923d8237-3cb7-42f5-80c8-5322b3f5187d)

---------

Signed-off-by: Yury-Fridlyand <yury.fridlyand@improving.com>
2024-08-05 22:05:29 -07:00
Harkrishn Patro
0fc43edc6c
Update sentinel conf access string to allow hello channel access (#854)
This example of a minimal user account in your Valkey server
for Sentinel is incorrect. If you add this ACL as-is to your
valkey users.acl, valkey will add resetchannels -@all before
the +client which prevents sentinel from publishing messages
to the __sentinel__:hello pubsub for sentinel discovery.

Fix #744.

Signed-off-by: Harkrishn Patro <harkrisp@amazon.com>
2024-08-03 23:32:53 +08:00
Wen Hui
facd123ce6
Update redis.conf to valkey.conf in log message (#855)
Update redis.conf to valkey.conf

Signed-off-by: hwware <wen.hui.ware@gmail.com>
2024-08-03 23:30:55 +08:00
Binbin
054ffd140f
Fix outdated comment of migrate in valkey-cli --cluster (#864)
After 503fd229e4181e932ba74b3ca8a222712d80ebca the comment is outdated.

Signed-off-by: Binbin <binloveplay1314@qq.com>
2024-08-03 14:13:37 +08:00
Madelyn Olson
b728e4170f
Disable empty shard slot migration test until test is de-flaked (#859)
We have a number of test failures in the empty shard migration which
seem to be related to race conditions in the failover, but could be more
pervasive. For now disable the tests to prevent so many false negative
test failures.

Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
2024-07-31 16:52:20 -07:00
449 changed files with 34965 additions and 16193 deletions

76
.cmake-format.yaml Normal file
View File

@ -0,0 +1,76 @@
format:
_help_line_width:
- How wide to allow formatted cmake files
line_width: 120
_help_tab_size:
- How many spaces to tab for indent
tab_size: 4
_help_use_tabchars:
- If true, lines are indented using tab characters (utf-8
- 0x09) instead of <tab_size> space characters (utf-8 0x20).
- In cases where the layout would require a fractional tab
- character, the behavior of the fractional indentation is
- governed by <fractional_tab_policy>
use_tabchars: false
_help_separate_ctrl_name_with_space:
- If true, separate flow control names from their parentheses
- with a space
separate_ctrl_name_with_space: true
_help_min_prefix_chars:
- If the statement spelling length (including space and
- parenthesis) is smaller than this amount, then force reject
- nested layouts.
min_prefix_chars: 4
_help_max_prefix_chars:
- If the statement spelling length (including space and
- parenthesis) is larger than the tab width by more than this
- amount, then force reject un-nested layouts.
max_prefix_chars: 10
_help_max_lines_hwrap:
- If a candidate layout is wrapped horizontally but it exceeds
- this many lines, then reject the layout.
max_lines_hwrap: 2
_help_line_ending:
- What style line endings to use in the output.
line_ending: unix
_help_command_case:
- Format command names consistently as 'lower' or 'upper' case
command_case: lower
_help_keyword_case:
- Format keywords consistently as 'lower' or 'upper' case
keyword_case: unchanged
_help_always_wrap:
- A list of command names which should always be wrapped
always_wrap: []
_help_enable_sort:
- If true, the argument lists which are known to be sortable
- will be sorted lexicographicall
enable_sort: true
_help_autosort:
- If true, the parsers may infer whether or not an argument
- list is sortable (without annotation).
autosort: false
_help_require_valid_layout:
- By default, if cmake-format cannot successfully fit
- everything into the desired linewidth it will apply the
- last, most aggressive attempt that it made. If this flag is
- True, however, cmake-format will print error, exit with non-
- zero status code, and write-out nothing
require_valid_layout: false
_help_layout_passes:
- A dictionary mapping layout nodes to a list of wrap
- decisions. See the documentation for more information.
layout_passes: {}
encode:
_help_emit_byteorder_mark:
- If true, emit the unicode byte-order mark (BOM) at the start
- of the file
emit_byteorder_mark: false
_help_input_encoding:
- Specify the encoding of the input file. Defaults to utf-8
input_encoding: utf-8
_help_output_encoding:
- Specify the encoding of the output file. Defaults to utf-8.
- Note that cmake only claims to support utf-8 so be careful
- when using anything else
output_encoding: utf-8

View File

@ -2,16 +2,16 @@
[files] [files]
extend-exclude = [ extend-exclude = [
".git/",
"deps/", "deps/",
# crc16_slottable is primarily pre-generated random strings. # crc16_slottable is primarily pre-generated random strings.
"src/crc16_slottable.h", "src/crc16_slottable.h",
] ]
ignore-hidden = false
[default.extend-words] [default.extend-words]
advices = "advices"
exat = "exat" exat = "exat"
optin = "optin" optin = "optin"
ro = "ro"
smove = "smove" smove = "smove"
[type.c] [type.c]
@ -20,6 +20,7 @@ extend-ignore-re = [
"D4C4DAA4", # sha1.c "D4C4DAA4", # sha1.c
"Georg Nees", "Georg Nees",
"\\[l\\]ist", # eval.c "\\[l\\]ist", # eval.c
'"LKE"', # test_rax.c
] ]
[type.tcl] [type.tcl]
@ -27,26 +28,23 @@ extend-ignore-re = [
"DUMPed", "DUMPed",
] ]
[type.sv.extend-identifiers]
# sv = .h
module_gil_acquring = "module_gil_acquring"
[type.c.extend-identifiers] [type.c.extend-identifiers]
ang = "ang" advices = "advices"
clen = "clen" clen = "clen"
fle = "fle" fle = "fle"
module_gil_acquring = "module_gil_acquring"
nd = "nd" nd = "nd"
ot = "ot" ot = "ot"
[type.tcl.extend-identifiers] [type.tcl.extend-identifiers]
fo = "fo"
oll = "oll" oll = "oll"
stressers = "stressers" stressers = "stressers"
[type.sv.extend-words] [type.sv.extend-identifiers]
# sv = .h # sv = .h
fo = "fo" fo = "fo"
[type.sv.extend-words]
# sv = .h
seeked = "seeked" seeked = "seeked"
[type.c.extend-words] [type.c.extend-words]
@ -57,7 +55,6 @@ limite = "limite"
pn = "pn" pn = "pn"
seeked = "seeked" seeked = "seeked"
tre = "tre" tre = "tre"
ws = "ws"
[type.systemd.extend-words] [type.systemd.extend-words]
# systemd = .conf # systemd = .conf
@ -65,5 +62,4 @@ ake = "ake"
[type.tcl.extend-words] [type.tcl.extend-words]
fo = "fo" fo = "fo"
lst = "lst"
tre = "tre" tre = "tre"

17
.git-blame-ignore-revs Normal file
View File

@ -0,0 +1,17 @@
# This is a file that can be used by git-blame to ignore some revisions.
# (git 2.23+, released in August 2019)
#
# Can be configured as follow:
#
# $ git config blame.ignoreRevsFile .git-blame-ignore-revs
#
# For more information you can look at git-blame(1) man page.
# Applied clang-format (#323)
c41dd77a3e93e02be3c4bc75d8c76b7b4169a4ce
# Removed terms `master` and `slave` from the source code (#591)
54c97479356ecf41b4b63733494a1be2ab919e17
# Set ColumnLimit to 0 and reformat (#1045)
af811748e7819a5ac31a6df4b21622aa58c64ae4

View File

@ -9,6 +9,9 @@ contact_links:
- name: Chat with us on Matrix? - name: Chat with us on Matrix?
url: https://matrix.to/#/#valkey:matrix.org url: https://matrix.to/#/#valkey:matrix.org
about: We are on Matrix too! about: We are on Matrix too!
- name: Chat with us on Slack?
url: https://join.slack.com/t/valkey-oss-developer/shared_invite/zt-2nxs51chx-EB9hu9Qdch3GMfRcztTSkQ
about: We are on Slack too!
- name: Documentation issue? - name: Documentation issue?
url: https://github.com/valkey-io/valkey-doc/issues url: https://github.com/valkey-io/valkey-doc/issues
about: Report it on the valkey-doc repo. about: Report it on the valkey-doc repo.

View File

@ -24,11 +24,11 @@ runs:
- name: Get targets - name: Get targets
run: | run: |
x86_arch=$(jq -c '[.linux_targets[] | select(.arch=="x86_64")]' utils/releasetools/build-config.json) x86_arch=$(jq -c '[.linux_targets[] | select(.arch=="x86_64")]' .github/actions/generate-package-build-matrix/build-config.json)
x86_matrix=$(echo "{ \"distro\" : $x86_arch }" | jq -c .) x86_matrix=$(echo "{ \"distro\" : $x86_arch }" | jq -c .)
echo "X86_MATRIX=$x86_matrix" >> $GITHUB_ENV echo "X86_MATRIX=$x86_matrix" >> $GITHUB_ENV
arm_arch=$(jq -c '[.linux_targets[] | select(.arch=="arm64")]' utils/releasetools/build-config.json) arm_arch=$(jq -c '[.linux_targets[] | select(.arch=="arm64")]' .github/actions/generate-package-build-matrix/build-config.json)
arm_matrix=$(echo "{ \"distro\" : $arm_arch }" | jq -c .) arm_matrix=$(echo "{ \"distro\" : $arm_arch }" | jq -c .)
echo "ARM_MATRIX=$arm_matrix" >> $GITHUB_ENV echo "ARM_MATRIX=$arm_matrix" >> $GITHUB_ENV
shell: bash shell: bash

View File

@ -1,28 +1,35 @@
{ {
"linux_targets": [ "linux_targets": [
{ {
"arch": "x86_64", "arch": "x86_64",
"target": "ubuntu18.04", "target": "ubuntu-20.04",
"type": "deb",
"platform": "bionic"
},
{
"arch": "x86_64",
"target": "ubuntu20.04",
"type": "deb", "type": "deb",
"platform": "focal" "platform": "focal"
}, },
{ {
"arch": "arm64", "arch": "x86_64",
"target": "ubuntu18.04", "target": "ubuntu-22.04",
"type": "deb", "type": "deb",
"platform": "bionic" "platform": "jammy"
},
{
"arch": "x86_64",
"target": "ubuntu-24.04",
"type": "deb",
"platform": "noble"
}, },
{ {
"arch": "arm64", "arch": "arm64",
"target": "ubuntu20.04", "target": "ubuntu20.04",
"type": "deb", "type": "deb",
"platform": "focal" "platform": "focal"
},
{
"arch": "arm64",
"target": "ubuntu22.04",
"type": "deb",
"platform": "jammy"
} }
] ]
} }

View File

@ -3,7 +3,12 @@ name: Build Release Packages
on: on:
release: release:
types: [published] types: [published]
push:
paths:
- '.github/workflows/build-release-packages.yml'
- '.github/workflows/call-build-linux-arm-packages.yml'
- '.github/workflows/call-build-linux-x86-packages.yml'
- '.github/actions/generate-package-build-matrix/build-config.json'
workflow_dispatch: workflow_dispatch:
inputs: inputs:
version: version:
@ -11,17 +16,19 @@ on:
required: true required: true
permissions: permissions:
id-token: write
contents: read contents: read
jobs: jobs:
# This job provides the version metadata from the tag for the other jobs to use. # This job provides the version metadata from the tag for the other jobs to use.
release-build-get-meta: release-build-get-meta:
name: Get metadata to build name: Get metadata to build
if: github.event_name == 'workflow_dispatch' || github.repository == 'valkey-io/valkey'
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs: outputs:
version: ${{ steps.get_version.outputs.VERSION }} version: ${{ steps.get_version.outputs.VERSION }}
is_test: ${{ steps.check-if-testing.outputs.IS_TEST }}
steps: steps:
- run: | - run: |
echo "Version: ${{ inputs.version || github.ref_name }}" echo "Version: ${{ inputs.version || github.ref_name }}"
shell: bash shell: bash
@ -32,8 +39,13 @@ jobs:
- name: Get the version - name: Get the version
id: get_version id: get_version
run: | run: |
VERSION="${INPUT_VERSION}" if [[ "${{ github.event_name }}" == "push" ]]; then
VERSION=${{ github.ref_name }}
else
VERSION="${INPUT_VERSION}"
fi
if [ -z "${VERSION}" ]; then if [ -z "${VERSION}" ]; then
echo "Error: No version specified"
exit 1 exit 1
fi fi
echo "VERSION=$VERSION" >> $GITHUB_OUTPUT echo "VERSION=$VERSION" >> $GITHUB_OUTPUT
@ -43,8 +55,21 @@ jobs:
# only ever be a tag # only ever be a tag
INPUT_VERSION: ${{ inputs.version || github.ref_name }} INPUT_VERSION: ${{ inputs.version || github.ref_name }}
- name: Check if we are testing
id: check-if-testing
run: |
if [[ "${{ github.event_name }}" == "push" ]]; then
echo "This is a test workflow -> We will upload to the Test S3 Bucket"
echo "IS_TEST=true" >> $GITHUB_OUTPUT
else
echo "This is a Release workflow -> We will upload to the Release S3 Bucket"
echo "IS_TEST=false" >> $GITHUB_OUTPUT
fi
shell: bash
generate-build-matrix: generate-build-matrix:
name: Generating build matrix name: Generating build matrix
if: github.event_name == 'workflow_dispatch' || github.repository == 'valkey-io/valkey'
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs: outputs:
x86_64-build-matrix: ${{ steps.set-matrix.outputs.x86_64-build-matrix }} x86_64-build-matrix: ${{ steps.set-matrix.outputs.x86_64-build-matrix }}
@ -56,7 +81,7 @@ jobs:
- uses: ./.github/actions/generate-package-build-matrix - uses: ./.github/actions/generate-package-build-matrix
id: set-matrix id: set-matrix
with: with:
ref: ${{ inputs.version || github.ref_name }} ref: ${{ needs.release-build-get-meta.outputs.version }}
release-build-linux-x86-packages: release-build-linux-x86-packages:
needs: needs:
@ -67,11 +92,10 @@ jobs:
version: ${{ needs.release-build-get-meta.outputs.version }} version: ${{ needs.release-build-get-meta.outputs.version }}
ref: ${{ inputs.version || github.ref_name }} ref: ${{ inputs.version || github.ref_name }}
build_matrix: ${{ needs.generate-build-matrix.outputs.x86_64-build-matrix }} build_matrix: ${{ needs.generate-build-matrix.outputs.x86_64-build-matrix }}
region: us-west-2
secrets: secrets:
token: ${{ secrets.GITHUB_TOKEN }} bucket_name: ${{ needs.release-build-get-meta.outputs.is_test == 'true' && secrets.AWS_S3_TEST_BUCKET || secrets.AWS_S3_BUCKET }}
bucket: ${{ secrets.AWS_S3_BUCKET }} role_to_assume: ${{ secrets.AWS_ROLE_TO_ASSUME }}
access_key_id: ${{ secrets.AWS_S3_ACCESS_KEY_ID }}
secret_access_key: ${{ secrets.AWS_S3_ACCESS_KEY }}
release-build-linux-arm-packages: release-build-linux-arm-packages:
needs: needs:
@ -82,8 +106,7 @@ jobs:
version: ${{ needs.release-build-get-meta.outputs.version }} version: ${{ needs.release-build-get-meta.outputs.version }}
ref: ${{ inputs.version || github.ref_name }} ref: ${{ inputs.version || github.ref_name }}
build_matrix: ${{ needs.generate-build-matrix.outputs.arm64-build-matrix }} build_matrix: ${{ needs.generate-build-matrix.outputs.arm64-build-matrix }}
region: us-west-2
secrets: secrets:
token: ${{ secrets.GITHUB_TOKEN }} bucket_name: ${{ needs.release-build-get-meta.outputs.is_test == 'true' && secrets.AWS_S3_TEST_BUCKET || secrets.AWS_S3_BUCKET }}
bucket: ${{ secrets.AWS_S3_BUCKET }} role_to_assume: ${{ secrets.AWS_ROLE_TO_ASSUME }}
access_key_id: ${{ secrets.AWS_S3_ACCESS_KEY_ID }}
secret_access_key: ${{ secrets.AWS_S3_ACCESS_KEY }}

View File

@ -15,28 +15,27 @@ on:
description: The build targets to produce as a JSON matrix. description: The build targets to produce as a JSON matrix.
type: string type: string
required: true required: true
secrets: region:
token: description: The AWS region to push packages into.
description: The Github token or similar to authenticate with. type: string
required: true
secrets:
bucket_name:
description: The S3 bucket to push packages into.
required: true
role_to_assume:
description: The role to assume for the S3 bucket.
required: true required: true
bucket:
description: The name of the S3 bucket to push packages into.
required: false
access_key_id:
description: The S3 access key id for the bucket.
required: false
secret_access_key:
description: The S3 secret access key for the bucket.
required: false
permissions: permissions:
id-token: write
contents: read contents: read
jobs: jobs:
build-valkey: build-valkey:
# Capture source tarball and generate checksum for it # Capture source tarball and generate checksum for it
name: Build package ${{ matrix.distro.target }} ${{ matrix.distro.arch }} name: Build package ${{ matrix.distro.target }} ${{ matrix.distro.arch }}
runs-on: 'ubuntu-latest' runs-on: "ubuntu-latest"
strategy: strategy:
fail-fast: false fail-fast: false
matrix: ${{ fromJSON(inputs.build_matrix) }} matrix: ${{ fromJSON(inputs.build_matrix) }}
@ -46,34 +45,30 @@ jobs:
with: with:
ref: ${{ inputs.version }} ref: ${{ inputs.version }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: ${{ inputs.region }}
role-to-assume: ${{ secrets.role_to_assume }}
- name: Make Valkey - name: Make Valkey
uses: uraimo/run-on-arch-action@v2 uses: uraimo/run-on-arch-action@v2
with: with:
arch: aarch64 arch: aarch64
distro: ${{matrix.distro.target}} distro: ${{matrix.distro.target}}
install: apt-get update && apt-get install -y build-essential libssl-dev install: apt-get update && apt-get install -y build-essential libssl-dev libsystemd-dev
run: make -C src all BUILD_TLS=yes run: make -C src all BUILD_TLS=yes USE_SYSTEMD=yes
- name: Create Tarball and SHA256sums - name: Create Tarball and SHA256sums
run: | run: |
TAR_FILE_NAME=valkey-${{inputs.version}}-${{matrix.distro.platform}}-${{ matrix.distro.arch}} TAR_FILE_NAME=valkey-${{inputs.version}}-${{matrix.distro.platform}}-${{ matrix.distro.arch}}
mkdir -p $TAR_FILE_NAME/bin $TAR_FILE_NAME/share mkdir -p "$TAR_FILE_NAME/bin" "$TAR_FILE_NAME/share"
cp -rfv src/valkey-* $TAR_FILE_NAME/bin rsync -av --exclude='*.c' --exclude='*.d' --exclude='*.o' src/valkey-* "$TAR_FILE_NAME/bin/"
cp -v /home/runner/work/valkey/valkey/COPYING $TAR_FILE_NAME/share/LICENSE cp -v /home/runner/work/valkey/valkey/COPYING "$TAR_FILE_NAME/share/LICENSE"
tar -czvf $TAR_FILE_NAME.tar.gz $TAR_FILE_NAME tar -czvf $TAR_FILE_NAME.tar.gz $TAR_FILE_NAME
sha256sum $TAR_FILE_NAME.tar.gz > $TAR_FILE_NAME.tar.gz.sha256 sha256sum $TAR_FILE_NAME.tar.gz > $TAR_FILE_NAME.tar.gz.sha256
mkdir -p packages-files mkdir -p packages-files
cp -rfv $TAR_FILE_NAME.tar* packages-files/ cp -rfv $TAR_FILE_NAME.tar* packages-files/
- name: Install AWS cli.
run: |
sudo apt-get install -y awscli
- name: Configure AWS credentials
run: |
aws configure set region us-west-2
aws configure set aws_access_key_id ${{ secrets.access_key_id }}
aws configure set aws_secret_access_key ${{ secrets.secret_access_key }}
- name: Sync to S3 - name: Sync to S3
run: aws s3 sync packages-files s3://${{secrets.bucket}}/releases/ run: aws s3 sync packages-files s3://${{ secrets.bucket_name }}/releases/

View File

@ -15,28 +15,27 @@ on:
description: The build targets to produce as a JSON matrix. description: The build targets to produce as a JSON matrix.
type: string type: string
required: true required: true
secrets: region:
token: description: The AWS region to upload the packages to.
description: The Github token or similar to authenticate with. type: string
required: true
secrets:
bucket_name:
description: The name of the S3 bucket to upload the packages to.
required: true
role_to_assume:
description: The role to assume for the S3 bucket.
required: true required: true
bucket:
description: The name of the S3 bucket to push packages into.
required: false
access_key_id:
description: The S3 access key id for the bucket.
required: false
secret_access_key:
description: The S3 secret access key for the bucket.
required: false
permissions: permissions:
id-token: write
contents: read contents: read
jobs: jobs:
build-valkey: build-valkey:
# Capture source tarball and generate checksum for it # Capture source tarball and generate checksum for it
name: Build package ${{ matrix.distro.target }} ${{ matrix.distro.arch }} name: Build package ${{ matrix.distro.target }} ${{ matrix.distro.arch }}
runs-on: 'ubuntu-latest' runs-on: ${{matrix.distro.target}}
strategy: strategy:
fail-fast: false fail-fast: false
matrix: ${{ fromJSON(inputs.build_matrix) }} matrix: ${{ fromJSON(inputs.build_matrix) }}
@ -46,28 +45,28 @@ jobs:
with: with:
ref: ${{ inputs.version }} ref: ${{ inputs.version }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: ${{ inputs.region }}
role-to-assume: ${{ secrets.role_to_assume }}
- name: Install dependencies - name: Install dependencies
run: sudo apt-get update && sudo apt-get install -y build-essential libssl-dev jq wget awscli run: sudo apt-get update && sudo apt-get install -y build-essential libssl-dev libsystemd-dev
- name: Make Valkey - name: Make Valkey
run: make -C src all BUILD_TLS=yes run: make -C src all BUILD_TLS=yes USE_SYSTEMD=yes
- name: Create Tarball and SHA256sums - name: Create Tarball and SHA256sums
run: | run: |
TAR_FILE_NAME=valkey-${{inputs.version}}-${{matrix.distro.platform}}-${{ matrix.distro.arch}} TAR_FILE_NAME=valkey-${{inputs.version}}-${{matrix.distro.platform}}-${{ matrix.distro.arch}}
mkdir -p $TAR_FILE_NAME/bin $TAR_FILE_NAME/share mkdir -p "$TAR_FILE_NAME/bin" "$TAR_FILE_NAME/share"
cp -rfv src/valkey-* $TAR_FILE_NAME/bin rsync -av --exclude='*.c' --exclude='*.d' --exclude='*.o' src/valkey-* "$TAR_FILE_NAME/bin/"
cp -v /home/runner/work/valkey/valkey/COPYING $TAR_FILE_NAME/share/LICENSE cp -v /home/runner/work/valkey/valkey/COPYING "$TAR_FILE_NAME/share/LICENSE"
tar -czvf $TAR_FILE_NAME.tar.gz $TAR_FILE_NAME tar -czvf $TAR_FILE_NAME.tar.gz $TAR_FILE_NAME
sha256sum $TAR_FILE_NAME.tar.gz > $TAR_FILE_NAME.tar.gz.sha256 sha256sum $TAR_FILE_NAME.tar.gz > $TAR_FILE_NAME.tar.gz.sha256
mkdir -p packages-files mkdir -p packages-files
cp -rfv $TAR_FILE_NAME.tar* packages-files/ cp -rfv $TAR_FILE_NAME.tar* packages-files/
- name: Configure AWS credentials
run: |
aws configure set region us-west-2
aws configure set aws_access_key_id ${{ secrets.access_key_id }}
aws configure set aws_secret_access_key ${{ secrets.secret_access_key }}
- name: Sync to S3 - name: Sync to S3
run: aws s3 sync packages-files s3://${{secrets.bucket}}/releases/ run: aws s3 sync packages-files s3://${{ secrets.bucket_name }}/releases/

View File

@ -2,6 +2,10 @@ name: CI
on: [push, pull_request] on: [push, pull_request]
concurrency:
group: ci-${{ github.head_ref || github.ref }}
cancel-in-progress: true
permissions: permissions:
contents: read contents: read
@ -13,11 +17,16 @@ jobs:
- name: make - name: make
# Fail build if there are warnings # Fail build if there are warnings
# build with TLS just for compilation coverage # build with TLS just for compilation coverage
run: make all-with-unit-tests SERVER_CFLAGS='-Werror' BUILD_TLS=yes run: make -j4 all-with-unit-tests SERVER_CFLAGS='-Werror' BUILD_TLS=yes USE_FAST_FLOAT=yes
- name: install old server for compatibility testing
run: |
cd tests/tmp
wget https://download.valkey.io/releases/valkey-7.2.7-noble-x86_64.tar.gz
tar -xvf valkey-7.2.7-noble-x86_64.tar.gz
- name: test - name: test
run: | run: |
sudo apt-get install tcl8.6 tclx sudo apt-get install tcl8.6 tclx
./runtest --verbose --tags -slow --dump-logs ./runtest --verbose --tags -slow --dump-logs --other-server-path tests/tmp/valkey-7.2.7-noble-x86_64/bin/valkey-server
- name: module api test - name: module api test
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs
- name: validate commands.def up to date - name: validate commands.def up to date
@ -30,13 +39,38 @@ jobs:
run: | run: |
./src/valkey-unit-tests ./src/valkey-unit-tests
test-ubuntu-latest-cmake:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: cmake and make
run: |
sudo apt-get install -y cmake libssl-dev
mkdir -p build-release
cd build-release
cmake -DCMAKE_BUILD_TYPE=Release .. -DBUILD_TLS=yes -DBUILD_UNIT_TESTS=yes
make -j$(nproc)
- name: test
run: |
sudo apt-get install -y tcl8.6 tclx
ln -sf $(pwd)/build-release/bin/valkey-server $(pwd)/src/valkey-server
ln -sf $(pwd)/build-release/bin/valkey-cli $(pwd)/src/valkey-cli
ln -sf $(pwd)/build-release/bin/valkey-benchmark $(pwd)/src/valkey-benchmark
ln -sf $(pwd)/build-release/bin/valkey-server $(pwd)/src/valkey-check-aof
ln -sf $(pwd)/build-release/bin/valkey-server $(pwd)/src/valkey-check-rdb
ln -sf $(pwd)/build-release/bin/valkey-server $(pwd)/src/valkey-sentinel
./runtest --verbose --tags -slow --dump-logs
- name: unit tests
run: |
./build-release/bin/valkey-unit-tests
test-sanitizer-address: test-sanitizer-address:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: make - name: make
# build with TLS module just for compilation coverage # build with TLS module just for compilation coverage
run: make SANITIZER=address SERVER_CFLAGS='-Werror' BUILD_TLS=module run: make -j4 SANITIZER=address SERVER_CFLAGS='-Werror' BUILD_TLS=module
- name: testprep - name: testprep
run: sudo apt-get install tcl8.6 tclx -y run: sudo apt-get install tcl8.6 tclx -y
- name: test - name: test
@ -48,10 +82,14 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: make - name: prepare-development-libraries
run: sudo apt-get install librdmacm-dev libibverbs-dev
- name: make-rdma-module
run: make -j4 BUILD_RDMA=module
- name: make-rdma-builtin
run: | run: |
sudo apt-get install librdmacm-dev libibverbs-dev make distclean
make BUILD_RDMA=module make -j4 BUILD_RDMA=yes
- name: clone-rxe-kmod - name: clone-rxe-kmod
run: | run: |
mkdir -p tests/rdma/rxe mkdir -p tests/rdma/rxe
@ -72,30 +110,37 @@ jobs:
- name: make - name: make
run: | run: |
apt-get update && apt-get install -y build-essential apt-get update && apt-get install -y build-essential
make SERVER_CFLAGS='-Werror' make -j4 SERVER_CFLAGS='-Werror'
build-macos-latest: build-macos-latest:
runs-on: macos-latest runs-on: macos-latest
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: make - name: make
run: make SERVER_CFLAGS='-Werror' # Build with additional upcoming features
run: make -j3 all-with-unit-tests SERVER_CFLAGS='-Werror' USE_FAST_FLOAT=yes
build-32bit: build-32bit:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: make - name: make
# Fast float requires C++ 32-bit libraries to compile on 64-bit ubuntu
# machine i.e. "-cross" suffixed version. Cross-compiling c++ to 32-bit
# also requires multilib support for g++ compiler i.e. "-multilib"
# suffixed version of g++. g++-multilib generally includes libstdc++.
# *cross version as well, but it is also added explicitly just in case.
run: | run: |
sudo apt-get update && sudo apt-get install libc6-dev-i386 sudo apt-get update
make SERVER_CFLAGS='-Werror' 32bit sudo apt-get install libc6-dev-i386 libstdc++-11-dev-i386-cross gcc-multilib g++-multilib
make -j4 SERVER_CFLAGS='-Werror' 32bit USE_FAST_FLOAT=yes
build-libc-malloc: build-libc-malloc:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: make - name: make
run: make SERVER_CFLAGS='-Werror' MALLOC=libc run: make -j4 SERVER_CFLAGS='-Werror' MALLOC=libc USE_FAST_FLOAT=yes
build-almalinux8-jemalloc: build-almalinux8-jemalloc:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -105,8 +150,8 @@ jobs:
- name: make - name: make
run: | run: |
dnf -y install epel-release gcc make procps-ng which dnf -y install epel-release gcc gcc-c++ make procps-ng which
make -j SERVER_CFLAGS='-Werror' make -j4 SERVER_CFLAGS='-Werror' USE_FAST_FLOAT=yes
format-yaml: format-yaml:
runs-on: ubuntu-latest runs-on: ubuntu-latest

View File

@ -1,10 +1,15 @@
name: Clang Format Check name: Clang Format Check
on: on:
push:
pull_request: pull_request:
paths: paths:
- 'src/**' - 'src/**'
concurrency:
group: clang-${{ github.head_ref || github.ref }}
cancel-in-progress: true
jobs: jobs:
clang-format-check: clang-format-check:
runs-on: ubuntu-latest runs-on: ubuntu-latest

View File

@ -4,9 +4,13 @@ name: "Codecov"
# where each PR needs to be compared against the coverage of the head commit # where each PR needs to be compared against the coverage of the head commit
on: [push, pull_request] on: [push, pull_request]
concurrency:
group: codecov-${{ github.head_ref || github.ref }}
cancel-in-progress: true
jobs: jobs:
code-coverage: code-coverage:
runs-on: ubuntu-latest runs-on: ubuntu-22.04
steps: steps:
- name: Checkout repository - name: Checkout repository

View File

@ -4,7 +4,11 @@ on:
pull_request: pull_request:
schedule: schedule:
# run weekly new vulnerability was added to the database # run weekly new vulnerability was added to the database
- cron: '0 0 * * 0' - cron: '0 3 * * 0'
concurrency:
group: codeql-${{ github.head_ref || github.ref }}
cancel-in-progress: true
permissions: permissions:
contents: read contents: read

View File

@ -3,11 +3,17 @@ name: Coverity Scan
on: on:
schedule: schedule:
# Run once daily, since below 500k LOC can have 21 builds per week, per https://scan.coverity.com/faq#frequency # Run once daily, since below 500k LOC can have 21 builds per week, per https://scan.coverity.com/faq#frequency
- cron: '0 0 * * *' - cron: '0 1 * * *'
# Support manual execution # Support manual execution
workflow_dispatch: workflow_dispatch:
concurrency:
group: coverity-${{ github.head_ref || github.ref }}
cancel-in-progress: true
permissions: permissions:
contents: read contents: read
jobs: jobs:
coverity: coverity:
if: github.repository == 'valkey-io/valkey' if: github.repository == 'valkey-io/valkey'

View File

@ -29,6 +29,10 @@ on:
description: "git branch or sha to use" description: "git branch or sha to use"
default: "unstable" default: "unstable"
concurrency:
group: daily-${{ github.head_ref || github.ref }}
cancel-in-progress: true
permissions: permissions:
contents: read contents: read
@ -40,7 +44,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) && (github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'ubuntu') !contains(github.event.inputs.skipjobs, 'ubuntu')
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
@ -56,12 +60,12 @@ jobs:
repository: ${{ env.GITHUB_REPOSITORY }} repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }} ref: ${{ env.GITHUB_HEAD_REF }}
- name: make - name: make
run: make all-with-unit-tests SERVER_CFLAGS='-Werror -DSERVER_TEST' run: make all-with-unit-tests SERVER_CFLAGS='-Werror'
- name: testprep - name: testprep
run: sudo apt-get install tcl8.6 tclx run: sudo apt-get install tcl8.6 tclx
- name: test - name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey') if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test - name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules') if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
@ -71,10 +75,7 @@ jobs:
- name: cluster tests - name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster') if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}}
- name: legacy unit tests - name: unittest
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-server test all --accurate
- name: new unit tests
if: true && !contains(github.event.inputs.skiptests, 'unittest') if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-unit-tests --accurate run: ./src/valkey-unit-tests --accurate
@ -85,8 +86,8 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) && (github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'fortify') !contains(github.event.inputs.skipjobs, 'fortify')
container: ubuntu:lunar container: ubuntu:plucky
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
@ -105,12 +106,12 @@ jobs:
run: | run: |
apt-get update && apt-get install -y make gcc-13 apt-get update && apt-get install -y make gcc-13
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-13 100 update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-13 100
make all-with-unit-tests CC=gcc OPT=-O3 SERVER_CFLAGS='-Werror -DSERVER_TEST -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=3' make all-with-unit-tests CC=gcc OPT=-O3 SERVER_CFLAGS='-Werror -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=3'
- name: testprep - name: testprep
run: apt-get install -y tcl8.6 tclx procps run: apt-get install -y tcl8.6 tclx procps
- name: test - name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey') if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test - name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules') if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
@ -120,10 +121,7 @@ jobs:
- name: cluster tests - name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster') if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}}
- name: legacy unit tests - name: unittest
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-server test all --accurate
- name: new unit tests
if: true && !contains(github.event.inputs.skiptests, 'unittest') if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-unit-tests --accurate run: ./src/valkey-unit-tests --accurate
@ -134,7 +132,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) && (github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'malloc') !contains(github.event.inputs.skipjobs, 'malloc')
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
@ -155,7 +153,7 @@ jobs:
run: sudo apt-get install tcl8.6 tclx run: sudo apt-get install tcl8.6 tclx
- name: test - name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey') if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test - name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules') if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
@ -173,7 +171,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) && (github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'malloc') !contains(github.event.inputs.skipjobs, 'malloc')
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
@ -194,7 +192,7 @@ jobs:
run: sudo apt-get install tcl8.6 tclx run: sudo apt-get install tcl8.6 tclx
- name: test - name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey') if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test - name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules') if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
@ -212,7 +210,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) && (github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, '32bit') !contains(github.event.inputs.skipjobs, '32bit')
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
@ -230,12 +228,12 @@ jobs:
- name: make - name: make
run: | run: |
sudo apt-get update && sudo apt-get install libc6-dev-i386 sudo apt-get update && sudo apt-get install libc6-dev-i386
make 32bit SERVER_CFLAGS='-Werror -DSERVER_TEST' make 32bit SERVER_CFLAGS='-Werror'
- name: testprep - name: testprep
run: sudo apt-get install tcl8.6 tclx run: sudo apt-get install tcl8.6 tclx
- name: test - name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey') if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test - name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules') if: true && !contains(github.event.inputs.skiptests, 'modules')
run: | run: |
@ -247,10 +245,7 @@ jobs:
- name: cluster tests - name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster') if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}}
- name: legacy unit tests - name: unittest
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-server test all --accurate
- name: new unit tests
if: true && !contains(github.event.inputs.skiptests, 'unittest') if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-unit-tests --accurate run: ./src/valkey-unit-tests --accurate
@ -261,7 +256,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) && (github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'tls') !contains(github.event.inputs.skipjobs, 'tls')
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
@ -286,7 +281,7 @@ jobs:
- name: test - name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey') if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: | run: |
./runtest --accurate --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}} ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}}
- name: module api test - name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules') if: true && !contains(github.event.inputs.skiptests, 'modules')
run: | run: |
@ -307,7 +302,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) && (github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'tls') !contains(github.event.inputs.skipjobs, 'tls')
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
@ -324,7 +319,7 @@ jobs:
ref: ${{ env.GITHUB_HEAD_REF }} ref: ${{ env.GITHUB_HEAD_REF }}
- name: make - name: make
run: | run: |
make BUILD_TLS=yes SERVER_CFLAGS='-Werror' make BUILD_TLS=yes SERVER_CFLAGS='-Werror' USE_FAST_FLOAT=yes
- name: testprep - name: testprep
run: | run: |
sudo apt-get install tcl8.6 tclx tcl-tls sudo apt-get install tcl8.6 tclx tcl-tls
@ -332,7 +327,7 @@ jobs:
- name: test - name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey') if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: | run: |
./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test - name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules') if: true && !contains(github.event.inputs.skiptests, 'modules')
run: | run: |
@ -353,7 +348,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) && (github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'iothreads') !contains(github.event.inputs.skipjobs, 'iothreads')
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
@ -375,10 +370,48 @@ jobs:
run: sudo apt-get install tcl8.6 tclx run: sudo apt-get install tcl8.6 tclx
- name: test - name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey') if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest --config io-threads 2 --config events-per-io-thread 0 --accurate --verbose --tags network --dump-logs ${{github.event.inputs.test_args}} run: ./runtest --io-threads --accurate --verbose --tags network --dump-logs ${{github.event.inputs.test_args}}
- name: cluster tests - name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster') if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster --config io-threads 2 --config events-per-io-thread 0 ${{github.event.inputs.cluster_test_args}} run: ./runtest-cluster --io-threads ${{github.event.inputs.cluster_test_args}}
test-ubuntu-tls-io-threads:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'tls') && !contains(github.event.inputs.skipjobs, 'iothreads')
timeout-minutes: 1440
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: |
make BUILD_TLS=yes SERVER_CFLAGS='-Werror'
- name: testprep
run: |
sudo apt-get install tcl8.6 tclx tcl-tls
./utils/gen-test-certs.sh
- name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: |
./runtest --io-threads --tls --accurate --verbose --tags network --dump-logs ${{github.event.inputs.test_args}}
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: |
./runtest-cluster --io-threads --tls ${{github.event.inputs.cluster_test_args}}
test-ubuntu-reclaim-cache: test-ubuntu-reclaim-cache:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -387,7 +420,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) && (github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'specific') !contains(github.event.inputs.skipjobs, 'specific')
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
@ -463,7 +496,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) && (github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) &&
!contains(github.event.inputs.skipjobs, 'valgrind') && !contains(github.event.inputs.skiptests, 'valkey') !contains(github.event.inputs.skipjobs, 'valgrind') && !contains(github.event.inputs.skiptests, 'valkey')
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
@ -479,7 +512,7 @@ jobs:
repository: ${{ env.GITHUB_REPOSITORY }} repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }} ref: ${{ env.GITHUB_HEAD_REF }}
- name: make - name: make
run: make valgrind SERVER_CFLAGS='-Werror -DSERVER_TEST' run: make valgrind SERVER_CFLAGS='-Werror'
- name: testprep - name: testprep
run: | run: |
sudo apt-get update sudo apt-get update
@ -495,7 +528,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) && (github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) &&
!contains(github.event.inputs.skipjobs, 'valgrind') && !(contains(github.event.inputs.skiptests, 'modules') && contains(github.event.inputs.skiptests, 'unittest')) !contains(github.event.inputs.skipjobs, 'valgrind') && !(contains(github.event.inputs.skiptests, 'modules') && contains(github.event.inputs.skiptests, 'unittest'))
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
@ -511,7 +544,7 @@ jobs:
repository: ${{ env.GITHUB_REPOSITORY }} repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }} ref: ${{ env.GITHUB_HEAD_REF }}
- name: make - name: make
run: make valgrind SERVER_CFLAGS='-Werror -DSERVER_TEST' run: make valgrind all-with-unit-tests SERVER_CFLAGS='-Werror'
- name: testprep - name: testprep
run: | run: |
sudo apt-get update sudo apt-get update
@ -522,7 +555,7 @@ jobs:
- name: unittest - name: unittest
if: true && !contains(github.event.inputs.skiptests, 'unittest') if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: | run: |
valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/valkey-server test all --valgrind valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/valkey-unit-tests --valgrind
if grep -q 0x err.txt; then cat err.txt; exit 1; fi if grep -q 0x err.txt; then cat err.txt; exit 1; fi
test-valgrind-no-malloc-usable-size-test: test-valgrind-no-malloc-usable-size-test:
@ -532,7 +565,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) && (github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) &&
!contains(github.event.inputs.skipjobs, 'valgrind') && !contains(github.event.inputs.skiptests, 'valkey') !contains(github.event.inputs.skipjobs, 'valgrind') && !contains(github.event.inputs.skiptests, 'valkey')
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
@ -548,7 +581,7 @@ jobs:
repository: ${{ env.GITHUB_REPOSITORY }} repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }} ref: ${{ env.GITHUB_HEAD_REF }}
- name: make - name: make
run: make valgrind CFLAGS="-DNO_MALLOC_USABLE_SIZE -DSERVER_TEST" SERVER_CFLAGS='-Werror' run: make valgrind CFLAGS="-DNO_MALLOC_USABLE_SIZE" SERVER_CFLAGS='-Werror'
- name: testprep - name: testprep
run: | run: |
sudo apt-get update sudo apt-get update
@ -564,7 +597,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) && (github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) &&
!contains(github.event.inputs.skipjobs, 'valgrind') && !(contains(github.event.inputs.skiptests, 'modules') && contains(github.event.inputs.skiptests, 'unittest')) !contains(github.event.inputs.skipjobs, 'valgrind') && !(contains(github.event.inputs.skiptests, 'modules') && contains(github.event.inputs.skiptests, 'unittest'))
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
@ -580,7 +613,7 @@ jobs:
repository: ${{ env.GITHUB_REPOSITORY }} repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }} ref: ${{ env.GITHUB_HEAD_REF }}
- name: make - name: make
run: make valgrind CFLAGS="-DNO_MALLOC_USABLE_SIZE -DSERVER_TEST" SERVER_CFLAGS='-Werror' run: make valgrind all-with-unit-tests CFLAGS="-DNO_MALLOC_USABLE_SIZE" SERVER_CFLAGS='-Werror'
- name: testprep - name: testprep
run: | run: |
sudo apt-get update sudo apt-get update
@ -591,7 +624,7 @@ jobs:
- name: unittest - name: unittest
if: true && !contains(github.event.inputs.skiptests, 'unittest') if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: | run: |
valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/valkey-server test all --valgrind valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/valkey-unit-tests --valgrind
if grep -q 0x err.txt; then cat err.txt; exit 1; fi if grep -q 0x err.txt; then cat err.txt; exit 1; fi
test-sanitizer-address: test-sanitizer-address:
@ -601,8 +634,9 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) && (github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) &&
!contains(github.event.inputs.skipjobs, 'sanitizer') !contains(github.event.inputs.skipjobs, 'sanitizer')
timeout-minutes: 14400 timeout-minutes: 1440
strategy: strategy:
fail-fast: false
matrix: matrix:
compiler: [gcc, clang] compiler: [gcc, clang]
env: env:
@ -622,7 +656,7 @@ jobs:
repository: ${{ env.GITHUB_REPOSITORY }} repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }} ref: ${{ env.GITHUB_HEAD_REF }}
- name: make - name: make
run: make all-with-unit-tests OPT=-O3 SANITIZER=address SERVER_CFLAGS='-DSERVER_TEST -Werror' run: make all-with-unit-tests OPT=-O3 SANITIZER=address SERVER_CFLAGS='-Werror'
- name: testprep - name: testprep
run: | run: |
sudo apt-get update sudo apt-get update
@ -639,10 +673,7 @@ jobs:
- name: cluster tests - name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster') if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}}
- name: legacy unit tests - name: unittest
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-server test all
- name: new unit tests
if: true && !contains(github.event.inputs.skiptests, 'unittest') if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-unit-tests run: ./src/valkey-unit-tests
@ -653,8 +684,9 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) && (github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) &&
!contains(github.event.inputs.skipjobs, 'sanitizer') !contains(github.event.inputs.skipjobs, 'sanitizer')
timeout-minutes: 14400 timeout-minutes: 1440
strategy: strategy:
fail-fast: false
matrix: matrix:
compiler: [gcc, clang] compiler: [gcc, clang]
env: env:
@ -674,7 +706,7 @@ jobs:
repository: ${{ env.GITHUB_REPOSITORY }} repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }} ref: ${{ env.GITHUB_HEAD_REF }}
- name: make - name: make
run: make all-with-unit-tests OPT=-O3 SANITIZER=undefined SERVER_CFLAGS='-DSERVER_TEST -Werror' LUA_DEBUG=yes # we (ab)use this flow to also check Lua C API violations run: make all-with-unit-tests OPT=-O3 SANITIZER=undefined SERVER_CFLAGS='-Werror' LUA_DEBUG=yes # we (ab)use this flow to also check Lua C API violations
- name: testprep - name: testprep
run: | run: |
sudo apt-get update sudo apt-get update
@ -691,13 +723,56 @@ jobs:
- name: cluster tests - name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster') if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}}
- name: legacy unit tests - name: unittest
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-server test all --accurate
- name: new unit tests
if: true && !contains(github.event.inputs.skiptests, 'unittest') if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-unit-tests --accurate run: ./src/valkey-unit-tests --accurate
test-sanitizer-force-defrag:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) &&
!contains(github.event.inputs.skipjobs, 'sanitizer')
timeout-minutes: 1440
strategy:
fail-fast: false
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: make all-with-unit-tests OPT=-O3 SANITIZER=address DEBUG_FORCE_DEFRAG=yes USE_JEMALLOC=no SERVER_CFLAGS='-Werror'
- name: testprep
run: |
sudo apt-get update
sudo apt-get install tcl8.6 tclx -y
- name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}}
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}}
- name: unittest
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-unit-tests
test-rpm-distros-jemalloc: test-rpm-distros-jemalloc:
if: | if: |
(github.event_name == 'workflow_dispatch' || (github.event_name == 'workflow_dispatch' ||
@ -726,7 +801,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: ${{ matrix.container }} container: ${{ matrix.container }}
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
@ -753,7 +828,7 @@ jobs:
run: dnf -y install tcl tcltls run: dnf -y install tcl tcltls
- name: test - name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey') if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test - name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules') if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
@ -792,7 +867,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: ${{ matrix.container }} container: ${{ matrix.container }}
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
@ -822,7 +897,7 @@ jobs:
- name: test - name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey') if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: | run: |
./runtest --accurate --verbose --dump-logs --tls-module --dump-logs ${{github.event.inputs.test_args}} ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs --tls-module --dump-logs ${{github.event.inputs.test_args}}
- name: module api test - name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules') if: true && !contains(github.event.inputs.skiptests, 'modules')
run: | run: |
@ -864,7 +939,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: ${{ matrix.container }} container: ${{ matrix.container }}
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
@ -915,7 +990,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) && (github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'macos') && !(contains(github.event.inputs.skiptests, 'valkey') && contains(github.event.inputs.skiptests, 'modules')) !contains(github.event.inputs.skipjobs, 'macos') && !(contains(github.event.inputs.skiptests, 'valkey') && contains(github.event.inputs.skiptests, 'modules'))
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
@ -934,7 +1009,7 @@ jobs:
run: make SERVER_CFLAGS='-Werror' run: make SERVER_CFLAGS='-Werror'
- name: test - name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey') if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest --accurate --verbose --tags -ipv6 --clients 1 --no-latency --dump-logs ${{github.event.inputs.test_args}} run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --clients 1 --no-latency --dump-logs ${{github.event.inputs.test_args}}
- name: module api test - name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules') if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --clients 1 --no-latency --dump-logs ${{github.event.inputs.test_args}} run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --clients 1 --no-latency --dump-logs ${{github.event.inputs.test_args}}
@ -946,7 +1021,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) && (github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'macos') && !contains(github.event.inputs.skiptests, 'sentinel') !contains(github.event.inputs.skipjobs, 'macos') && !contains(github.event.inputs.skiptests, 'sentinel')
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
@ -974,7 +1049,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) && (github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'macos') && !contains(github.event.inputs.skiptests, 'cluster') !contains(github.event.inputs.skipjobs, 'macos') && !contains(github.event.inputs.skiptests, 'cluster')
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
@ -997,15 +1072,16 @@ jobs:
build-macos: build-macos:
strategy: strategy:
fail-fast: false
matrix: matrix:
os: [macos-12, macos-14] os: [macos-13, macos-14]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
if: | if: |
(github.event_name == 'workflow_dispatch' || (github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) && (github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'macos') !contains(github.event.inputs.skipjobs, 'macos')
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0 - uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0
with: with:
@ -1024,16 +1100,16 @@ jobs:
repository: ${{ env.GITHUB_REPOSITORY }} repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }} ref: ${{ env.GITHUB_HEAD_REF }}
- name: make - name: make
run: make SERVER_CFLAGS='-Werror -DSERVER_TEST' run: make SERVER_CFLAGS='-Werror'
test-freebsd: test-freebsd:
runs-on: macos-12 runs-on: macos-13
if: | if: |
(github.event_name == 'workflow_dispatch' || (github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) && (github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'freebsd') !contains(github.event.inputs.skipjobs, 'freebsd')
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- name: prep - name: prep
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
@ -1086,7 +1162,7 @@ jobs:
run: apk add tcl procps tclx run: apk add tcl procps tclx
- name: test - name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey') if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test - name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules') if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
@ -1127,7 +1203,7 @@ jobs:
run: apk add tcl procps tclx run: apk add tcl procps tclx
- name: test - name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey') if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test - name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules') if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
@ -1140,11 +1216,11 @@ jobs:
reply-schemas-validator: reply-schemas-validator:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 14400 timeout-minutes: 1440
if: | if: |
(github.event_name == 'workflow_dispatch' || (github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) && (github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'reply-schema') !contains(github.event.inputs.skipjobs, 'reply-schema')
steps: steps:
- name: prep - name: prep
@ -1186,7 +1262,7 @@ jobs:
notify-about-job-results: notify-about-job-results:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: always() && github.event_name == 'schedule' && github.repository == 'valkey-io/valkey' if: always() && github.event_name == 'schedule' && github.repository == 'valkey-io/valkey'
needs: [test-ubuntu-jemalloc, test-ubuntu-jemalloc-fortify, test-ubuntu-libc-malloc, test-ubuntu-no-malloc-usable-size, test-ubuntu-32bit, test-ubuntu-tls, test-ubuntu-tls-no-tls, test-ubuntu-io-threads, test-ubuntu-reclaim-cache, test-valgrind-test, test-valgrind-misc, test-valgrind-no-malloc-usable-size-test, test-valgrind-no-malloc-usable-size-misc, test-sanitizer-address, test-sanitizer-undefined, test-rpm-distros-jemalloc, test-rpm-distros-tls-module, test-rpm-distros-tls-module-no-tls, test-macos-latest, test-macos-latest-sentinel, test-macos-latest-cluster, build-macos, test-freebsd, test-alpine-jemalloc, test-alpine-libc-malloc, reply-schemas-validator] needs: [test-ubuntu-jemalloc, test-ubuntu-jemalloc-fortify, test-ubuntu-libc-malloc, test-ubuntu-no-malloc-usable-size, test-ubuntu-32bit, test-ubuntu-tls, test-ubuntu-tls-no-tls, test-ubuntu-io-threads, test-ubuntu-tls-io-threads, test-ubuntu-reclaim-cache, test-valgrind-test, test-valgrind-misc, test-valgrind-no-malloc-usable-size-test, test-valgrind-no-malloc-usable-size-misc, test-sanitizer-address, test-sanitizer-undefined, test-sanitizer-force-defrag, test-rpm-distros-jemalloc, test-rpm-distros-tls-module, test-rpm-distros-tls-module-no-tls, test-macos-latest, test-macos-latest-sentinel, test-macos-latest-cluster, build-macos, test-freebsd, test-alpine-jemalloc, test-alpine-libc-malloc, reply-schemas-validator]
steps: steps:
- name: Collect job status - name: Collect job status
run: | run: |

View File

@ -4,7 +4,11 @@ on:
pull_request: pull_request:
push: push:
schedule: schedule:
- cron: '0 0 * * *' - cron: '0 2 * * *'
concurrency:
group: external-${{ github.head_ref || github.ref }}
cancel-in-progress: true
permissions: permissions:
contents: read contents: read
@ -13,7 +17,7 @@ jobs:
test-external-standalone: test-external-standalone:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.event_name != 'schedule' || github.repository == 'valkey-io/valkey' if: github.event_name != 'schedule' || github.repository == 'valkey-io/valkey'
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Build - name: Build
@ -30,7 +34,7 @@ jobs:
--tags -slow --tags -slow
- name: Archive server log - name: Archive server log
if: ${{ failure() }} if: ${{ failure() }}
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0
with: with:
name: test-external-standalone-log name: test-external-standalone-log
path: external-server.log path: external-server.log
@ -38,7 +42,7 @@ jobs:
test-external-cluster: test-external-cluster:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.event_name != 'schedule' || github.repository == 'valkey-io/valkey' if: github.event_name != 'schedule' || github.repository == 'valkey-io/valkey'
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Build - name: Build
@ -58,7 +62,7 @@ jobs:
--tags -slow --tags -slow
- name: Archive server log - name: Archive server log
if: ${{ failure() }} if: ${{ failure() }}
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0
with: with:
name: test-external-cluster-log name: test-external-cluster-log
path: external-server.log path: external-server.log
@ -66,7 +70,7 @@ jobs:
test-external-nodebug: test-external-nodebug:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.event_name != 'schedule' || github.repository == 'valkey-io/valkey' if: github.event_name != 'schedule' || github.repository == 'valkey-io/valkey'
timeout-minutes: 14400 timeout-minutes: 1440
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Build - name: Build
@ -82,7 +86,7 @@ jobs:
--tags "-slow -needs:debug" --tags "-slow -needs:debug"
- name: Archive server log - name: Archive server log
if: ${{ failure() }} if: ${{ failure() }}
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0
with: with:
name: test-external-nodebug-log name: test-external-nodebug-log
path: external-server.log path: external-server.log

View File

@ -8,6 +8,10 @@ on:
paths: paths:
- 'src/commands/*.json' - 'src/commands/*.json'
concurrency:
group: reply-schemas-linter-${{ github.head_ref || github.ref }}
cancel-in-progress: true
permissions: permissions:
contents: read contents: read

View File

@ -9,6 +9,10 @@ on:
push: push:
pull_request: pull_request:
concurrency:
group: spellcheck-${{ github.head_ref || github.ref }}
cancel-in-progress: true
permissions: permissions:
contents: read contents: read
@ -22,7 +26,7 @@ jobs:
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Install typos - name: Install typos
uses: taiki-e/install-action@cd5df4de2e75f3b819ba55f780f7bb8cd4a05a41 # v2.32.2 uses: taiki-e/install-action@fe9759bf4432218c779595708e80a1aadc85cedc # v2.46.10
with: with:
tool: typos tool: typos

6
.gitignore vendored
View File

@ -48,3 +48,9 @@ redis.code-workspace
nodes*.conf nodes*.conf
tests/cluster/tmp/* tests/cluster/tmp/*
tests/rdma/rdma-test tests/rdma/rdma-test
tags
build/
build-debug/
build-release/
cmake-build-debug/
cmake-build-release/

View File

@ -1,16 +0,0 @@
Hello! This file is just a placeholder, since this is the "unstable" branch
of Valkey, the place where all the development happens.
There is no release notes for this branch, it gets forked into another branch
every time there is a partial feature freeze in order to eventually create
a new stable release.
Usually "unstable" is stable enough for you to use it in development environments
however you should never use it in production environments. It is possible
to download the latest stable release here:
https://valkey.io/download/
More information is available at https://valkey.io
Happy hacking!

1
BUGS
View File

@ -1 +0,0 @@
Please check https://github.com/valkey-io/valkey/issues

44
CMakeLists.txt Normal file
View File

@ -0,0 +1,44 @@
cmake_minimum_required(VERSION 3.10)
# Must be done first
if (APPLE)
# Force clang compiler on macOS
find_program(CLANGPP "clang++")
find_program(CLANG "clang")
if (CLANG AND CLANGPP)
message(STATUS "Found ${CLANGPP}, ${CLANG}")
set(CMAKE_CXX_COMPILER ${CLANGPP})
set(CMAKE_C_COMPILER ${CLANG})
endif ()
endif ()
# Options
option(BUILD_UNIT_TESTS "Build valkey-unit-tests" OFF)
option(BUILD_TEST_MODULES "Build all test modules" OFF)
option(BUILD_EXAMPLE_MODULES "Build example modules" OFF)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/Modules/")
project("valkey")
set(CMAKE_C_STANDARD 11)
set(CMAKE_C_STANDARD_REQUIRED ON)
set(CMAKE_C_EXTENSIONS ON)
include(ValkeySetup)
add_subdirectory(src)
add_subdirectory(tests)
# Include the packaging module
include(Packaging)
# Clear cached variables from the cache
unset(BUILD_TESTS CACHE)
unset(CLANGPP CACHE)
unset(CLANG CACHE)
unset(BUILD_RDMA_MODULE CACHE)
unset(BUILD_TLS_MODULE CACHE)
unset(BUILD_UNIT_TESTS CACHE)
unset(BUILD_TEST_MODULES CACHE)
unset(BUILD_EXAMPLE_MODULES CACHE)
unset(USE_TLS CACHE)
unset(DEBUG_FORCE_DEFRAG CACHE)

View File

@ -49,7 +49,7 @@ representative at an online or offline event.
Enforcement Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at reported to the community leaders responsible for enforcement at
this email address: placeholderkv@gmail.com. this email address: maintainers@lists.valkey.io.
All complaints will be reviewed and investigated promptly and fairly. All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the All community leaders are obligated to respect the privacy and security of the
reporter of any incident. reporter of any incident.

View File

@ -79,10 +79,9 @@ you need to ensure that the contribution is in accordance with the DCO.
1. If it is a major feature or a semantical change, please don't start coding 1. If it is a major feature or a semantical change, please don't start coding
straight away: if your feature is not a conceptual fit you'll lose a lot of straight away: if your feature is not a conceptual fit you'll lose a lot of
time writing the code without any reason. Start by posting in the mailing list time writing the code without any reason. Start by creating an issue at Github with the
and creating an issue at Github with the description of, exactly, what you want description of, exactly, what you want to accomplish and why. Use cases are important for
to accomplish and why. Use cases are important for features to be accepted. features to be accepted. Here you can see if there is consensus about your idea.
Here you can see if there is consensus about your idea.
2. If in step 1 you get an acknowledgment from the project leaders, use the following 2. If in step 1 you get an acknowledgment from the project leaders, use the following
procedure to submit a patch: procedure to submit a patch:

20
COPYING
View File

@ -2,6 +2,22 @@
BSD 3-Clause License BSD 3-Clause License
Copyright (c) 2024-present, Futriix contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# License 2
BSD 3-Clause License
Copyright (c) 2024-present, Valkey contributors Copyright (c) 2024-present, Valkey contributors
All rights reserved. All rights reserved.
@ -13,11 +29,11 @@ Redistribution and use in source and binary forms, with or without modification,
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# License 2 # License 3
BSD 3-Clause License BSD 3-Clause License
Copyright (c) 2006-2020, Salvatore Sanfilippo Copyright (c) 2006-2020, Redis Ltd.
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

View File

@ -2,7 +2,9 @@
The Valkey project is managed by a Technical Steering Committee (TSC) composed of the maintainers of the Valkey repository. The Valkey project is managed by a Technical Steering Committee (TSC) composed of the maintainers of the Valkey repository.
The Valkey project includes all of the current and future repositories under the Valkey-io organization. The Valkey project includes all of the current and future repositories under the Valkey-io organization.
Maintainers are defined as individuals with full commit access to a repository, which shall be in sync with the MAINTAINERS.md file in a given projects repository. Committers are defined as individuals with write access to the code within a repository.
Maintainers are defined as individuals with full access to a repository and own its governance.
Both maintainers and committers should be clearly listed in the MAINTAINERS.md file in a given projects repository.
Maintainers of other repositories within the Valkey project are not members of the TSC unless explicitly added. Maintainers of other repositories within the Valkey project are not members of the TSC unless explicitly added.
## Technical Steering Committee ## Technical Steering Committee

View File

@ -1 +0,0 @@
See README

BIN
Logo-Futriix.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

View File

@ -16,8 +16,16 @@ Maintainers listed in alphabetical order by their github ID.
| Zhao Zhao | [soloestoy](https://github.com/soloestoy) | Alibaba | | Zhao Zhao | [soloestoy](https://github.com/soloestoy) | Alibaba |
| Viktor Söderqvist | [zuiderkwast](https://github.com/zuiderkwast) | Ericsson | | Viktor Söderqvist | [zuiderkwast](https://github.com/zuiderkwast) | Ericsson |
## Current Committers
### Former Maintainers Committers listed in alphabetical order by their github ID.
| Committer | GitHub ID | Affiliation |
| ------------------- | ----------------------------------------------- | ----------- |
| Harkrishn Patro | [hpatro](https://github.com/hpatro) | Amazon |
| Ran Shidlansik | [ranshid](https://github.com/ranshid) | Amazon |
### Former Maintainers and Committers
| Maintainer | GitHub ID | Affiliation | | Maintainer | GitHub ID | Affiliation |
| ------------------- | ----------------------------------------------- | ----------- | | ------------------- | ----------------------------------------------- | ----------- |

106
MANIFESTO
View File

@ -1,106 +0,0 @@
[Note: This was the manifesto of Redis. It does not represent the ideals of Valkey, but is
kept in remembrance for the ideals that Salvatore had for the project.]
Redis Manifesto
===============
1 - A DSL for Abstract Data Types. Redis is a DSL (Domain Specific Language)
that manipulates abstract data types and implemented as a TCP daemon.
Commands manipulate a key space where keys are binary-safe strings and
values are different kinds of abstract data types. Every data type
represents an abstract version of a fundamental data structure. For instance
Redis Lists are an abstract representation of linked lists. In Redis, the
essence of a data type isn't just the kind of operations that the data types
support, but also the space and time complexity of the data type and the
operations performed upon it.
2 - Memory storage is #1. The Redis data set, composed of defined key-value
pairs, is primarily stored in the computer's memory. The amount of memory in
all kinds of computers, including entry-level servers, is increasing
significantly each year. Memory is fast, and allows Redis to have very
predictable performance. Datasets composed of 10k or 40 millions keys will
perform similarly. Complex data types like Redis Sorted Sets are easy to
implement and manipulate in memory with good performance, making Redis very
simple. Redis will continue to explore alternative options (where data can
be optionally stored on disk, say) but the main goal of the project remains
the development of an in-memory database.
3 - Fundamental data structures for a fundamental API. The Redis API is a direct
consequence of fundamental data structures. APIs can often be arbitrary but
not an API that resembles the nature of fundamental data structures. If we
ever meet intelligent life forms from another part of the universe, they'll
likely know, understand and recognize the same basic data structures we have
in our computer science books. Redis will avoid intermediate layers in API,
so that the complexity is obvious and more complex operations can be
performed as the sum of the basic operations.
4 - We believe in code efficiency. Computers get faster and faster, yet we
believe that abusing computing capabilities is not wise: the amount of
operations you can do for a given amount of energy remains anyway a
significant parameter: it allows to do more with less computers and, at
the same time, having a smaller environmental impact. Similarly Redis is
able to "scale down" to smaller devices. It is perfectly usable in a
Raspberry Pi and other small ARM based computers. Faster code having
just the layers of abstractions that are really needed will also result,
often, in more predictable performances. We think likewise about memory
usage, one of the fundamental goals of the Redis project is to
incrementally build more and more memory efficient data structures, so that
problems that were not approachable in RAM in the past will be perfectly
fine to handle in the future.
5 - Code is like a poem; it's not just something we write to reach some
practical result. Sometimes people that are far from the Redis philosophy
suggest using other code written by other authors (frequently in other
languages) in order to implement something Redis currently lacks. But to us
this is like if Shakespeare decided to end Enrico IV using the Paradiso from
the Divina Commedia. Is using any external code a bad idea? Not at all. Like
in "One Thousand and One Nights" smaller self contained stories are embedded
in a bigger story, we'll be happy to use beautiful self contained libraries
when needed. At the same time, when writing the Redis story we're trying to
write smaller stories that will fit in to other code.
6 - We're against complexity. We believe designing systems is a fight against
complexity. We'll accept to fight the complexity when it's worthwhile but
we'll try hard to recognize when a small feature is not worth 1000s of lines
of code. Most of the time the best way to fight complexity is by not
creating it at all. Complexity is also a form of lock-in: code that is
very hard to understand cannot be modified by users in an independent way
regardless of the license. One of the main Redis goals is to remain
understandable, enough for a single programmer to have a clear idea of how
it works in detail just reading the source code for a couple of weeks.
7 - Threading is not a silver bullet. Instead of making Redis threaded we
believe on the idea of an efficient (mostly) single threaded Redis core.
Multiple of such cores, that may run in the same computer or may run
in multiple computers, are abstracted away as a single big system by
higher order protocols and features: Redis Cluster and the upcoming
Redis Proxy are our main goals. A shared nothing approach is not just
much simpler (see the previous point in this document), is also optimal
in NUMA systems. In the specific case of Redis it allows for each instance
to have a more limited amount of data, making the Redis persist-by-fork
approach more sounding. In the future we may explore parallelism only for
I/O, which is the low hanging fruit: minimal complexity could provide an
improved single process experience.
8 - Two levels of API. The Redis API has two levels: 1) a subset of the API fits
naturally into a distributed version of Redis and 2) a more complex API that
supports multi-key operations. Both are useful if used judiciously but
there's no way to make the more complex multi-keys API distributed in an
opaque way without violating our other principles. We don't want to provide
the illusion of something that will work magically when actually it can't in
all cases. Instead we'll provide commands to quickly migrate keys from one
instance to another to perform multi-key operations and expose the
trade-offs to the user.
9 - We optimize for joy. We believe writing code is a lot of hard work, and the
only way it can be worth is by enjoying it. When there is no longer joy in
writing code, the best thing to do is stop. To prevent this, we'll avoid
taking paths that will make Redis less of a joy to develop.
10 - All the above points are put together in what we call opportunistic
programming: trying to get the most for the user with minimal increases
in complexity (hanging fruits). Solve 95% of the problem with 5% of the
code when it is acceptable. Avoid a fixed schedule but follow the flow of
user requests, inspiration, Redis internal readiness for certain features
(sometimes many past changes reach a critical point making a previously
complex feature very easy to obtain).

539
README.md
View File

@ -1,261 +1,408 @@
[![codecov](https://codecov.io/gh/valkey-io/valkey/graph/badge.svg?token=KYYSJAYC5F)](https://codecov.io/gh/valkey-io/valkey) <!-- Improved compatibility of К началу link: See: https://github.com/othneildrew/Best-README-Template/pull/73 -->
<a id="readme-top"></a>
This README is under construction as we work to build a new community driven high performance key-value store. <!-- PROJECT LOGO -->
<br />
<div align="center">
<!-- <a href="https://github.com/othneildrew/Best-README-Template"> -->
<img src="Logo-Futriix.png" height=100></img>
</a>
This project was forked from the open source Redis project right before the transition to their new source available licenses. <h3 align="center">Futriix</h3>
This README is just a fast *quick start* document. We are currently working on a more permanent documentation page. <p align="center">
Futriix's полная документация (команды идентичны)
<br />
<a href="https://valkey.io/"><strong>Изучить полную документацию</strong></a>
<br />
<a href="">Сообщить об ошибке</a>
&middot;
<a href="">Предложение новой функциональности</a>
</p>
</div>
What is Valkey? ## Краткая документация проекта Futriix
--------------
Valkey is a high-performance data structure server that primarily serves key/value workloads.
It supports a wide range of native structures and an extensible plugin system for adding new data structures and access patterns.
Building Valkey <!-- TABLE OF CONTENTS -->
-------------- <br>
<details>
Valkey can be compiled and used on Linux, OSX, OpenBSD, NetBSD, FreeBSD. <summary><b>Содержание</b></summary>
We support big endian and little endian architectures, and both 32 bit <ol>
and 64 bit systems. <li>
<a href="#о-проекте">О проекте</a>
It may compile on Solaris derived systems (for instance SmartOS) but our </li>
support for this platform is *best effort* and Valkey is not guaranteed to <li><a href="#подготовка">Подготовка</a></li>
work as well as in Linux, OSX, and \*BSD. <li><a href="#компиляция">Компиляция</a></li>
<li><a href="#использование">Использование</a></li>
It is as simple as: <li><a href="#кластер">Кластер</a></li>
<li><a href="#дорожная-карта">Дорожная карта</a></li>
% make <li><a href="#вклад">Вклад</a></li>
<li><a href="#лицензия">Лицензия</a></li>
To build with TLS support, you'll need OpenSSL development libraries (e.g. <li><a href="#контакты">Контакты</a></li>
libssl-dev on Debian/Ubuntu) and run: </ol>
</details>
% make BUILD_TLS=yes
To build with experimental RDMA support you'll need RDMA development libraries
(e.g. librdmacm-dev and libibverbs-dev on Debian/Ubuntu). For now, Valkey only
supports RDMA as connection module mode. Run:
% make BUILD_RDMA=module
To build with systemd support, you'll need systemd development libraries (such
as libsystemd-dev on Debian/Ubuntu or systemd-devel on CentOS) and run:
% make USE_SYSTEMD=yes
To append a suffix to Valkey program names, use:
% make PROG_SUFFIX="-alt"
You can build a 32 bit Valkey binary using:
% make 32bit
After building Valkey, it is a good idea to test it using:
% make test
If TLS is built, running the tests with TLS enabled (you will need `tcl-tls`
installed):
% ./utils/gen-test-certs.sh
% ./runtest --tls
Fixing build problems with dependencies or cached build options <!-- ABOUT THE PROJECT -->
--------- ## О проекте
Valkey has some dependencies which are included in the `deps` directory. Проект Futriix является форком проекта Valkey.
`make` does not automatically rebuild dependencies even if something in Futriix-Распределённая СУБД на языке "C", построенная на базе [Valkey](https://valkey.io/), с поддержкой модулей на базе Искусственного интеллекта и модулей на языке Golang.
the source code of dependencies changes.
When you update the source code with `git pull` or when code inside the СУБД поддерживает модуль c распределённым [JSON](https://source.futriix.ru/gvsafronov/futriix-json), [ИИ-модуль "Виртуальный помощник"](), [SQL-модуль](https://source.futriix.ru/gvsafronov/fdx).
dependencies tree is modified in any other way, make sure to use the following
command in order to really clean everything and rebuild from scratch:
% make distclean Ниже приведён пример того, инструкции по настройке вашего проекта локально.
Чтобы запустить локальную копию проекта, выполните следующие простые шаги.
This will clean: jemalloc, lua, hiredis, linenoise and other dependencies.
Also if you force certain build options like 32bit target, no C compiler
optimizations (for debugging purposes), and other similar build time options,
those options are cached indefinitely until you issue a `make distclean`
command.
Fixing problems building 32 bit binaries ### Подготовка
---------
If after building Valkey with a 32 bit target you need to rebuild it Ниже приведены шаги, которые помогут вам скомпилировать и установить Futriix.
with a 64 bit target, or the other way around, you need to perform a * Устанавливаем язык программирования C, соопутствующие утилиты (autoconf и другие)
`make distclean` in the root directory of the Valkey distribution.
In case of build errors when trying to build a 32 bit binary of Valkey, try ```sh
the following steps: unix:$ sudo apt update && sudo apt upgrade
unix:$ sudo apt install build-essential nasm autotools-dev autoconf libjemalloc-dev tcl tcl-dev uuid-dev libcurl4-openssl-dev git
```
* Install the package libc6-dev-i386 (also try g++-multilib). * Устанавливаем язык программирования Golang по инструкции с [официального сайта](https://go.dev/doc/install)
* Try using the following command line instead of `make 32bit`:
`make CFLAGS="-m32 -march=native" LDFLAGS="-m32"`
Allocator ### Компиляция
---------
Selecting a non-default memory allocator when building Valkey is done by setting Для того, чтобы успешно скомпилировать проект, выполните шаги ниже:
the `MALLOC` environment variable. Valkey is compiled and linked against libc
malloc by default, with the exception of jemalloc being the default on Linux
systems. This default was picked because jemalloc has proven to have fewer
fragmentation problems than libc malloc.
To force compiling against libc malloc, use: 1. Скопировать репозиторий
```sh
git clone https://source.futriix.ru/gvsafronov/Futriix
```
2. Перейти в каталог с исходном кодом src
```sh
cd src/
```
<p align="right">(<a href="#readme-top">К началу</a>)</p>
% make MALLOC=libc
3. Скомпилировать Futriix с помощью утилиты Make
Futriix может быть скомпилирован для Linux, OSX, OpenBSD, NetBSD, FreeBSD.
Мы поддерживаем архитектуры endian и little endian, и 32-битные и 64-битные системы.
```sh
unix:$ make
```
Для сборки проекта с поддержкой TLS, вам необходима библиотека OpenSSL (например,
libssl-dev для Debian/Ubuntu).
Для сборки проекта с поддержкой TLS выпоните команды ниже:
```sh
unix:$ make BUILD_TLS=yes
```
To build TLS as Futriix module:
```sh
unix:$ make BUILD_TLS=module
```
Для сборки проекта с экспериментальной поддержкой RDMA вам необходимо установить библиотеку разработки RDMA
(например, librdmacm-dev and libibverbs-dev для Debian/Ubuntu).
Для сборки Futriix c поддержкой RDMA просто выполните следующие команды:
```sh
unix:$ make BUILD_RDMA=yes
```
To build RDMA as Futriix module:
```sh
unix:$ make BUILD_RDMA=module
```
Для сборки проекта с поддержкой systemd, вам необходимо установить соответсвующие библиотеки разработки (такие как
libsystemd-dev для Debian/Ubuntu или systemd-devel для CentOS) и выполнить следующие команды:
```sh
unix:$ make USE_SYSTEMD=yes
```
Для добавления суффикса в имя проекта Futriix, выполните следующие команды:
```sh
unix:$ make PROG_SUFFIX="-alt"
```
После сборки Futriix, мы рекомендуем запустить утилиту для проверки корректности сборки:
```sh
unix:$ make test
```
Команда выше запустит интегрированные в проект тесты. Additional tests are started using:
```sh
unix:$ make test-unit # Юнит-тесты
unix:$ make test-modules # Тесты модулей API
unix:$ make test-cluster # Тест Futriix для проверки работы кластера
```
Более подробную информацию вы найдёте ознакомившись со следующими источниками:
[tests/README.md](tests/README.md) а также [src/unit/README.md](src/unit/README.md).
<p align="right">(<a href="#readme-top">К началу</a>)</p>
## Исправление проблем сборки с зависимостями или кэшированными параметрами сборки.
Futriix содержит некоторые зависимости, которые хранятся в директории `deps`.
Утилита `make` автоматически не пересобирает зависимости даже если вносятся каие-либо изменения в код зависимостей.
Когда вы обновляете код проекта командой `git pull` или когда код внутри
дерева зависимостей изменен каким-либо другим способом, обязательно используйте следующее
команду для того, чтобы действительно все почистить и пересобрать с нуля:
```sh
unix:$ make distclean
```
В результате работы команды выше будут очищены: аллокатор памяти jemalloc, язык lua, библиотеку hiredis, библиотеку linenoise а также другие зависимости.
Кроме того, если вы принудительно используете определенные параметры сборки, такие как 32-битная версия для 32-битной системы, оптимизации компилятора C в данном случае не будут выполнены. Оптимизации (для целей отладки) и другие подобные параметры времени сборки,
кэшируются на неопределенный срок, пока вы не выполните команду `make distclean`.
<p align="right">(<a href="#readme-top">К началу</a>)</p>
## Аллокатор
Выбор аллокатора памяти не по умолчанию при сборке Futriix выполняется путем установки
параметра `MALLOC` переменной окружения. Futriix компилируется и компонуется с libc
malloc по умолчанию, за исключением jemalloc, который используется по умолчанию в дистрибутивах Linux.
Это значение по умолчанию было выбрано потому, что в jemalloc меньше
проблем c фрагментацией, чем libc malloc.
Чтобы принудительно скомпилировать libc malloc, выполните следующую команду:
```sh
unix:$ make MALLOC=libc
```
To compile against jemalloc on Mac OS X systems, use: To compile against jemalloc on Mac OS X systems, use:
```sh
unix:$ make MALLOC=jemalloc
```
% make MALLOC=jemalloc ## Монотонные часы
Monotonic clock По умолчанию Futriix будет использовать функцию POSIX clock_gettime в качестве
--------------- монотонный источник тактовой частоты. В большинстве современных систем внутреннюю тактовую частоту процессора
можно использовать для улучшения производительности. Предостережения можно найти здесь:
By default, Valkey will build using the POSIX clock_gettime function as the
monotonic clock source. On most modern systems, the internal processor clock
can be used to improve performance. Cautions can be found here:
http://oliveryang.net/2015/09/pitfalls-of-TSC-usage/ http://oliveryang.net/2015/09/pitfalls-of-TSC-usage/
To build with support for the processor's internal instruction clock, use: Для сборки с поддержкой внутренней тактовой частоты процессора, используйте команду ниже:
% make CFLAGS="-DUSE_PROCESSOR_CLOCK" ```sh
unix:$ make CFLAGS="-DUSE_PROCESSOR_CLOCK"
```
Verbose build ## Расширенный вариант сборки
-------------
Valkey will build with a user-friendly colorized output by default. Futriix по умолчанию создает удобный для пользователя цветной вывод.
If you want to see a more verbose output, use the following: Если вы хотите увидеть более подробный вывод, выполните следующую команду:
% make V=1 ```sh
unix:$ make V=1
```
4. Если вы хотите запустить сервер Futriix с параметрами по-умолчанию (без указания файла конфигурации) выполните следующую команду:
```sh
`./futriix-server`
```
5. Также вы можете использовать файл конфигурации, располагающийся в директории "Futriix" `futriix.conf` для конфигурирования вашего сервера.
Для запуска Futriix с файлом конфигурации используйте команду ниже:
```sh
./futriix-server /path/to/futriix.conf
```
6. Запустите утилиту futriix-cli (Client Futriix) для подключения к **локальному** серверу Futriix, а также для того чтобы начать работу с инстансом:
Running Valkey ```sh
------------- ./futriix-cli
```
To run Valkey with the default configuration, just type: 7. Для подключения с помощью утилиты futriix-cli к конкретному узлу в сети, добавьте параметр `h`-указание удалённого хоста по его ip-адресу и параметр `p`- указания номера порта:
% cd src ```sh
% ./valkey-server ./futriix-cli -h 11.164.22.7 -p 50000
```
If you want to provide your valkey.conf, you have to run it using an additional <p align="right">(<a href="#readme-top">К началу</a>)</p>
parameter (the path of the configuration file):
% cd src ## Запуск Futriix с RDMA:
% ./valkey-server /path/to/valkey.conf
It is possible to alter the Valkey configuration by passing parameters directly Обратите внимание, что поддержка RDMA в Futriix— экспериментальная функция.
as options using the command line. Examples: Она может быть изменена или удалена в любой дополнительной или основной версии.
В настоящее время она поддерживается только в Linux.
% ./valkey-server --port 9999 --replicaof 127.0.0.1 6379 * Команда для включения RDMA :
% ./valkey-server /etc/valkey/6379.conf --loglevel debug
```sh
./src/futriix-server --protected-mode no \
--rdma-bind 192.168.122.100 --rdma-port 9880
```
All the options in valkey.conf are also supported as options using the command * Режим работы модуля RDMA:
line, with exactly the same name.
```sh
./src/futriix-server --protected-mode no \
--loadmodule src/Futriix-rdma.so --rdma-bind 192.168.122.100 --rdma-port 9880
```
Можно изменить адрес/порт привязки RDMA с помощью команды времени выполнения:
Running Valkey with TLS: ```sh
------------------ unix:$ 192.168.122.100:9880> CONFIG SET rdma-port 9380
```
Please consult the [TLS.md](TLS.md) file for more information on Также возможно наличие одновременно RDMA и TCP, но нет
how to use Valkey with TLS. конфликт TCP(9880) и RDMA(9880), например:
Running Valkey with RDMA: ```sh
------------------ unix:$ ./src/futriix-server --protected-mode no \
--loadmodule src/Futriix-rdma.so --rdma-bind 192.168.122.100 --rdma-port 9880 \
--port 9880
```
Note that Valkey Over RDMA is an experimental feature. Примечание: Ваша сетевая карта (с ip-адресом 192.168.122.100 в данном примере) должна поддерживать режим
It may be changed or removed in any minor or major version. RDMA. Для того что понять поддерживает сервер режим RDMA или нет, выполните команду ниже:
Currently, it is only supported on Linux.
To manually run a Valkey server with RDMA mode: ```sh
unix:$ rdma res show (a new version iproute2 package)
```
% ./src/valkey-server --protected-mode no \ Или команду ниже:
--loadmodule src/valkey-rdma.so bind=192.168.122.100 port=6379
It's possible to change bind address/port of RDMA by runtime command: ```sh
unix:$ ibv_devices
```
192.168.122.100:6379> CONFIG SET rdma.port 6380 <!-- USAGE EXAMPLES -->
## Использование
It's also possible to have both RDMA and TCP available, and there is no unix:$ cd src
conflict of TCP(6379) and RDMA(6379), Ex: unix:$ ./futriix-cli
127.0.0.1:futriix:~> ping
% ./src/valkey-server --protected-mode no \
--loadmodule src/valkey-rdma.so bind=192.168.122.100 port=6379 \
--port 6379
Note that the network card (192.168.122.100 of this example) should support
RDMA. To test a server supports RDMA or not:
% rdma res show (a new version iproute2 package)
Or:
% ibv_devices
Playing with Valkey
------------------
You can use valkey-cli to play with Valkey. Start a valkey-server instance,
then in another terminal try the following:
% cd src
% ./valkey-cli
valkey> ping
PONG PONG
valkey> set foo bar 127.0.0.1:futriix:~> set foo bar
OK OK
valkey> get foo 127.0.0.1:futriix:~> get foo
"bar" "bar"
valkey> incr mycounter 127.0.0.1:futriix:~> incr mycounter
(integer) 1 (integer) 1
valkey> incr mycounter 127.0.0.1:futriix:~> incr mycounter
(integer) 2 (integer) 2
valkey> 127.0.0.1:futriix:~>
Installing Valkey
-----------------
In order to install Valkey binaries into /usr/local/bin, just use: <p align="right">(<a href="#readme-top">К началу</a>)</p>
% make install
You can use `make PREFIX=/some/other/directory install` if you wish to use a ## Кластер
different destination.
_Note_: For compatibility with Redis, we create symlinks from the Redis names (`redis-server`, `redis-cli`, etc.) to the Valkey binaries installed by `make install`.
The symlinks are created in same directory as the Valkey binaries.
The symlinks are removed when using `make uninstall`.
The creation of the symlinks can be skipped by setting the makefile variable `USE_REDIS_SYMLINKS=no`.
`make install` will just install binaries in your system, but will not configure 1. Откройте директорию Futriix
init scripts and configuration files in the appropriate place. This is not
needed if you just want to play a bit with Valkey, but if you are installing
it the proper way for a production system, we have a script that does this
for Ubuntu and Debian systems:
% cd utils ```sh
% ./install_server.sh unix:$ cd futriix
_Note_: `install_server.sh` will not work on Mac OSX; it is built for Linux only. ```
The script will ask you a few questions and will setup everything you need 2. Откройте файл конфигурации futriix.conf в любом текстовом редакторе, например nano, как в примере приведённом ниже:
to run Valkey properly as a background daemon that will start again on
system reboots.
You'll be able to stop and start Valkey using the script named ```sh
`/etc/init.d/valkey_<portnumber>`, for instance `/etc/init.d/valkey_6379`. unix:$ nano futriix/futriix.conf
Code contributions ```
-----------------
Please see the [CONTRIBUTING.md][2]. For security bugs and vulnerabilities, please see [SECURITY.md][3].
[1]: https://github.com/valkey-io/valkey/blob/unstable/COPYING 3. Найдите и установите значения "yes" для параметров "active-replica" и "multi-master". После чего добавьте в файл конфигурации ip-адреса, узлов вашего кластера. Если вы всё сделали правильно у вас должны отробразится строки в файле конфигурации `futriix.conf` как показано ниже:
[2]: https://github.com/valkey-io/valkey/blob/unstable/CONTRIBUTING.md
[3]: https://github.com/valkey-io/valkey/blob/unstable/SECURITY.md
Valkey is an open community project under LF Projects ```sh
-----------------
Valkey a Series of LF Projects, LLC active-replica yes
2810 N Church St, PMB 57274 multi-master yes
Wilmington, Delaware 19802-4447 replicaof 192.168.11.5 9880
replicaof 192.168.11.6 9880
replicaof 192.168.11.7 9880
```
4. Сохраните внесённые вами изменния, выйдите из редактора, воспользовавшись командами ниже:
```sh
unix:$ ctrl+O
unix:$ ctrl+x
```
5. Перейдите в директорию Futriix и запустите скрипт `cluster.sh` с параметрами `pick` (скрипт запущенный с данным параметром "соберёт кластер"), и `run`,(скрипт запущенный с данным параметром "запустит кластер") как указано ниже:
```sh
unix:$ ./cluster pick
unix:$ ./cluster run
```
6. Установите права на исполнение на скрипт `cluster.sh` , воспользовавшись командой ниже:
```sh
unix:$ chmod +x cluster.sh
```
7. Для остановки кластера запустите скрипт `cluster.sh` с параметром `stop`
```sh
unix:$ ./cluster stop
```
<p align="right">(<a href="#readme-top">К началу</a>)</p>
<!-- ROADMAP -->
## Дорожная карта
- [x] Добавить поддержку хранимых процедур
- [x] Изменить приглашение командной строки клиента futriix-cli
- [x] Переписать скрипт cluster.sh, формирующий кластер Futriix
- [x] Добавить поддержку модуля для работы с JSON
- [ ] Добавить в проект поддержку модуля, позволяющего запускать команды терминала операционной системы
- [ ] Реализовать поддержку алгоритма Raft
- [ ] Добавить поддержку языка запросов SQL
См. [Открытые проблемы](https://source.futriix.ru/gvsafronov/Futriix/issues) полный список предлагаемых функций (и известных проблем).
<p align="right">(<a href="#readme-top">К началу</a>)</p>
<!-- CONTRIBUTING -->
## Вклад
Вклады — это то, что делает сообщество открытого исходного кода таким замечательным местом для обучения, вдохновения и творчества. Любой ваш вклад **очень ценится**.
Если у вас есть предложение, которое могло бы улучшить ситуацию, создайте форк репозитория и создайте запрос на включение. Также можно просто открыть задачу с тегом «улучшение».
Не забудьте поставить проекту звезду! Еще раз спасибо!
1. Форкните проект
2. Создайте свою ветку функций (`git checkout -b Feature/AmazingFeature`)
3. Зафиксируйте свои изменения (git commit -m 'Add some AmazingFeature'`)
4. Отправьте в ветку (`git push origin Feature/AmazingFeature`)
5. Откройте запрос на включение
<!-- LICENSE -->
## Лицензия
Проект распространяется под 3-пунктной лицензией BSD. Подробнсти смотрите в файле `COPYING.txt`.
<p align="right">(<a href="#readme-top">К началу</a>)</p>
<!-- CONTACT -->
## Контакты
Григорий Сафронов - [E-mail](gvsafronov@yandex.ru)
Ссылка на проект (https://source.futriix.ru/gvsafronov/Futriix)
<p align="right">(<a href="#readme-top">К началу</a>)</p>

106
TLS.md
View File

@ -1,106 +0,0 @@
TLS Support
===========
Getting Started
---------------
### Building
To build with TLS support you'll need OpenSSL development libraries (e.g.
libssl-dev on Debian/Ubuntu).
To build TLS support as Valkey built-in:
Run `make BUILD_TLS=yes`.
Or to build TLS as Valkey module:
Run `make BUILD_TLS=module`.
Note that sentinel mode does not support TLS module.
### Tests
To run Valkey test suite with TLS, you'll need TLS support for TCL (i.e.
`tcl-tls` package on Debian/Ubuntu).
1. Run `./utils/gen-test-certs.sh` to generate a root CA and a server
certificate.
2. Run `./runtest --tls` or `./runtest-cluster --tls` to run Valkey and Valkey
Cluster tests in TLS mode.
3. Run `./runtest --tls-module` or `./runtest-cluster --tls-module` to
run Valkey and Valkey cluster tests in TLS mode with Valkey module.
### Running manually
To manually run a Valkey server with TLS mode (assuming `gen-test-certs.sh` was
invoked so sample certificates/keys are available):
For TLS built-in mode:
./src/valkey-server --tls-port 6379 --port 0 \
--tls-cert-file ./tests/tls/valkey.crt \
--tls-key-file ./tests/tls/valkey.key \
--tls-ca-cert-file ./tests/tls/ca.crt
For TLS module mode:
./src/valkey-server --tls-port 6379 --port 0 \
--tls-cert-file ./tests/tls/valkey.crt \
--tls-key-file ./tests/tls/valkey.key \
--tls-ca-cert-file ./tests/tls/ca.crt \
--loadmodule src/valkey-tls.so
To connect to this Valkey server with `valkey-cli`:
./src/valkey-cli --tls \
--cert ./tests/tls/valkey.crt \
--key ./tests/tls/valkey.key \
--cacert ./tests/tls/ca.crt
Specifying `port 0` will disable TCP. It's also possible to have
both TCP and TLS available, but you'll need to assign different ports.
To make a Replica connect to the master using TLS, use `--tls-replication yes`,
and to make Valkey Cluster use TLS across nodes use `--tls-cluster yes`.
Connections
-----------
All socket operations now go through a connection abstraction layer that hides
I/O and read/write event handling from the caller.
**Multi-threading I/O is not currently supported for TLS**, as a TLS connection
needs to do its own manipulation of AE events which is not thread safe. The
solution is probably to manage independent AE loops for I/O threads and longer
term association of connections with threads. This may potentially improve
overall performance as well.
Sync IO for TLS is currently implemented in a hackish way, i.e. making the
socket blocking and configuring socket-level timeout. This means the timeout
value may not be so accurate, and there would be a lot of syscall overhead.
However I believe that getting rid of syncio completely in favor of pure async
work is probably a better move than trying to fix that. For replication it would
probably not be so hard. For cluster keys migration it might be more difficult,
but there are probably other good reasons to improve that part anyway.
To-Do List
----------
- [ ] valkey-benchmark support. The current implementation is a mix of using
hiredis for parsing and basic networking (establishing connections), but
directly manipulating sockets for most actions. This will need to be cleaned
up for proper TLS support. The best approach is probably to migrate to hiredis
async mode.
- [ ] valkey-cli `--slave` and `--rdb` support.
Multi-port
----------
Consider the implications of allowing TLS to be configured on a separate port,
making Valkey listening on multiple ports:
1. Startup banner port notification
2. Proctitle
3. How slaves announce themselves
4. Cluster bus port calculation

39
utils/create-cluster/create-cluster → cluster Executable file → Normal file
View File

@ -1,16 +1,17 @@
#!/bin/bash #!/usr/bin/env sh
SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
# Settings # Settings
BIN_PATH="$SCRIPT_DIR/../../src/" BIN_PATH="$SCRIPT_DIR/src"
CLUSTER_HOST=127.0.0.1 CLUSTER_HOST=127.0.0.1
PORT=30000 PORT=7000
TIMEOUT=2000 TIMEOUT=2000
NODES=6 NODES=6
REPLICAS=1 REPLICAS=1
PROTECTED_MODE=yes PROTECTED_MODE=yes
ADDITIONAL_OPTIONS="" ADDITIONAL_OPTIONS=""
CONFIG_PATH="./futriix.conf"
# You may want to put the above config parameters into config.sh in order to # You may want to put the above config parameters into config.sh in order to
# override the defaults without modifying this script. # override the defaults without modifying this script.
@ -23,17 +24,17 @@ fi
# Computed vars # Computed vars
ENDPORT=$((PORT+NODES)) ENDPORT=$((PORT+NODES))
if [ "$1" == "start" ] if [ "$1" == "pick" ]
then then
while [ $((PORT < ENDPORT)) != "0" ]; do while [ $((PORT < ENDPORT)) != "0" ]; do
PORT=$((PORT+1)) PORT=$((PORT+1))
echo "Starting $PORT" echo "Starting $PORT"
$BIN_PATH/valkey-server --port $PORT --protected-mode $PROTECTED_MODE --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --appenddirname appendonlydir-${PORT} --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes --enable-protected-configs yes --enable-debug-command yes --enable-module-command yes ${ADDITIONAL_OPTIONS} $BIN_PATH/futriix-server ${CONFIG_PATH} --port $PORT --protected-mode $PROTECTED_MODE --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --appenddirname appendonlydir-${PORT} --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes --enable-protected-configs yes --enable-debug-command yes --enable-module-command yes ${ADDITIONAL_OPTIONS}
done done
exit 0 exit 0
fi fi
if [ "$1" == "create" ] if [ "$1" == "run" ]
then then
HOSTS="" HOSTS=""
while [ $((PORT < ENDPORT)) != "0" ]; do while [ $((PORT < ENDPORT)) != "0" ]; do
@ -44,7 +45,7 @@ then
if [ "$2" == "-f" ]; then if [ "$2" == "-f" ]; then
OPT_ARG="--cluster-yes" OPT_ARG="--cluster-yes"
fi fi
$BIN_PATH/valkey-cli --cluster create $HOSTS --cluster-replicas $REPLICAS $OPT_ARG $BIN_PATH/futriix-cli --cluster create $HOSTS --cluster-replicas $REPLICAS $OPT_ARG
exit 0 exit 0
fi fi
@ -53,24 +54,24 @@ then
while [ $((PORT < ENDPORT)) != "0" ]; do while [ $((PORT < ENDPORT)) != "0" ]; do
PORT=$((PORT+1)) PORT=$((PORT+1))
echo "Stopping $PORT" echo "Stopping $PORT"
$BIN_PATH/valkey-cli -p $PORT shutdown nosave $BIN_PATH/futriix-cli -p $PORT shutdown nosave
done done
exit 0 exit 0
fi fi
if [ "$1" == "restart" ] if [ "$1" == "repick" ]
then then
OLD_PORT=$PORT OLD_PORT=$PORT
while [ $((PORT < ENDPORT)) != "0" ]; do while [ $((PORT < ENDPORT)) != "0" ]; do
PORT=$((PORT+1)) PORT=$((PORT+1))
echo "Stopping $PORT" echo "Stopping $PORT"
$BIN_PATH/valkey-cli -p $PORT shutdown nosave $BIN_PATH/futriix-cli -p $PORT shutdown nosave
done done
PORT=$OLD_PORT PORT=$OLD_PORT
while [ $((PORT < ENDPORT)) != "0" ]; do while [ $((PORT < ENDPORT)) != "0" ]; do
PORT=$((PORT+1)) PORT=$((PORT+1))
echo "Starting $PORT" echo "picking $PORT"
$BIN_PATH/valkey-server --port $PORT --protected-mode $PROTECTED_MODE --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --appenddirname appendonlydir-${PORT} --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes --enable-protected-configs yes --enable-debug-command yes --enable-module-command yes ${ADDITIONAL_OPTIONS} $BIN_PATH/futriix-server ${CONFIG_PATH} --port $PORT --protected-mode $PROTECTED_MODE --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --appenddirname appendonlydir-${PORT} --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes --enable-protected-configs yes --enable-debug-command yes --enable-module-command yes ${ADDITIONAL_OPTIONS}
done done
exit 0 exit 0
fi fi
@ -81,7 +82,7 @@ then
while [ 1 ]; do while [ 1 ]; do
clear clear
date date
$BIN_PATH/valkey-cli -p $PORT cluster nodes | head -30 $BIN_PATH/futriix-cli -p $PORT cluster nodes | head -30
sleep 1 sleep 1
done done
exit 0 exit 0
@ -130,14 +131,16 @@ then
exit 0 exit 0
fi fi
echo "Usage: $0 [start|create|stop|restart|watch|tail|tailall|clean|clean-logs|call]" echo ""
echo "start -- Launch Valkey Cluster instances." echo "Usage: $0 [pick|run|stop|restart|watch|tail|tailall|clean|clean-logs|call]"
echo "create [-f] -- Create a cluster using valkey-cli --cluster create." echo "pick -- Launch Futriix Cluster instances."
echo "stop -- Stop Valkey Cluster instances." echo "run [-f] -- Create a cluster using futriix-cli --cluster create."
echo "restart -- Restart Valkey Cluster instances." echo "stop -- Stop Futriix Cluster instances."
echo "restart -- Restart Futriix Cluster instances."
echo "watch -- Show CLUSTER NODES output (first 30 lines) of first node." echo "watch -- Show CLUSTER NODES output (first 30 lines) of first node."
echo "tail <id> -- Run tail -f of instance at base port + ID." echo "tail <id> -- Run tail -f of instance at base port + ID."
echo "tailall -- Run tail -f for all the log files at once." echo "tailall -- Run tail -f for all the log files at once."
echo "clean -- Remove all instances data, logs, configs." echo "clean -- Remove all instances data, logs, configs."
echo "clean-logs -- Remove just instances logs." echo "clean-logs -- Remove just instances logs."
echo "call <cmd> -- Call a command (up to 7 arguments) on all nodes." echo "call <cmd> -- Call a command (up to 7 arguments) on all nodes."
echo ""

192
cluster-experimental Normal file
View File

@ -0,0 +1,192 @@
#!/usr/bin/env sh
# This script automatically picked and started Futriix-cluster
# Also in this script added color indicator "Ok" and "Fail"
SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
# Settings of color
SETCOLOR_SUCCESS="echo -en \\033[1;32m"
SETCOLOR_FAILURE="echo -en \\033[1;31m"
SETCOLOR_NORMAL="echo -en \\033[0;39m"
# Settings
BIN_PATH="$SCRIPT_DIR/src"
CLUSTER_HOST=127.0.0.1
PORT=7000
TIMEOUT=2000
NODES=6
REPLICAS=1
PROTECTED_MODE=yes
ADDITIONAL_OPTIONS=""
CONFIG_PATH="./futriix.conf"
# You may want to put the above config parameters into config.sh in order to
# override the defaults without modifying this script.
if [ -a config.sh ]
then
source "config.sh"
fi
# Computed vars
ENDPORT=$((PORT+NODES))
#if [ $? -eq 0 ]; then
# $SETCOLOR_SUCCESS
# echo -n "$(tput hpa $(tput cols))$(tput cub 6)[OK]"
# $SETCOLOR_NORMAL
# echo
# else
# $SETCOLOR_FAILURE
# echo -n "$(tput hpa $(tput cols))$(tput cub 6)[fail]"
# $SETCOLOR_NORMAL
# echo
#fi
if [ "$1" == "pick" ]
then
while [ $((PORT < ENDPORT)) != "0" ]; do
PORT=$((PORT+1))
echo "Starting $PORT"
yes | $BIN_PATH/futriix-server ${CONFIG_PATH} --port $PORT --protected-mode $PROTECTED_MODE --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --appenddirname appendonlydir-${PORT} --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes --enable-protected-configs yes --enable-debug-command yes --enable-module-command yes ${ADDITIONAL_OPTIONS} >/dev/null 2>&1
if [ $? -eq 0 ]; then
$SETCOLOR_SUCCESS
echo -n "$(tput hpa $(tput cols))$(tput cub 6)[OK]"
$SETCOLOR_NORMAL
echo
else
$SETCOLOR_FAILURE
echo -n "$(tput hpa $(tput cols))$(tput cub 6)[fail]"
$SETCOLOR_NORMAL
echo
fi
done
exit 0
fi
if [ "$1" == "run" ]
then
HOSTS=""
while [ $((PORT < ENDPORT)) != "0" ]; do
PORT=$((PORT+1))
HOSTS="$HOSTS $CLUSTER_HOST:$PORT"
done
OPT_ARG=""
if [ "$2" == "-f" ]; then
OPT_ARG="--cluster-yes"
fi
yes | $BIN_PATH/futriix-cli --cluster create $HOSTS --cluster-replicas $REPLICAS $OPT_ARG >/dev/null 2>&1
exit 0
fi
if [ $? -eq 0 ]; then
$SETCOLOR_SUCCESS
echo -n "$(tput hpa $(tput cols))$(tput cub 6)[OK]"
$SETCOLOR_NORMAL
echo
else
$SETCOLOR_FAILURE
echo -n "$(tput hpa $(tput cols))$(tput cub 6)[fail]"
$SETCOLOR_NORMAL
echo
fi
if [ "$1" == "stop" ]
then
while [ $((PORT < ENDPORT)) != "0" ]; do
PORT=$((PORT+1))
echo "Stopping $PORT"
$BIN_PATH/futriix-cli -p $PORT shutdown nosave
done
exit 0
fi
if [ "$1" == "repick" ]
then
OLD_PORT=$PORT
while [ $((PORT < ENDPORT)) != "0" ]; do
PORT=$((PORT+1))
echo "Stopping $PORT"
$BIN_PATH/futriix-cli -p $PORT shutdown nosave
done
PORT=$OLD_PORT
while [ $((PORT < ENDPORT)) != "0" ]; do
PORT=$((PORT+1))
echo "picking $PORT"
$BIN_PATH/futriix-server ${CONFIG_PATH} --port $PORT --protected-mode $PROTECTED_MODE --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --appenddirname appendonlydir-${PORT} --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes --enable-protected-configs yes --enable-debug-command yes --enable-module-command yes ${ADDITIONAL_OPTIONS}
done
exit 0
fi
if [ "$1" == "watch" ]
then
PORT=$((PORT+1))
while [ 1 ]; do
clear
date
$BIN_PATH/futriix-cli -p $PORT cluster nodes | head -30
sleep 1
done
exit 0
fi
if [ "$1" == "tail" ]
then
INSTANCE=$2
PORT=$((PORT+INSTANCE))
tail -f ${PORT}.log
exit 0
fi
if [ "$1" == "tailall" ]
then
tail -f *.log
exit 0
fi
if [ "$1" == "call" ]
then
while [ $((PORT < ENDPORT)) != "0" ]; do
PORT=$((PORT+1))
$BIN_PATH/valkey-cli -p $PORT $2 $3 $4 $5 $6 $7 $8 $9
done
exit 0
fi
if [ "$1" == "clean" ]
then
echo "Cleaning *.log"
rm -rf *.log
echo "Cleaning appendonlydir-*"
rm -rf appendonlydir-*
echo "Cleaning dump-*.rdb"
rm -rf dump-*.rdb
echo "Cleaning nodes-*.conf"
rm -rf nodes-*.conf
exit 0
fi
if [ "$1" == "clean-logs" ]
then
echo "Cleaning *.log"
rm -rf *.log
exit 0
fi
echo ""
echo "Usage: $0 [pick|run|stop|restart|watch|tail|tailall|clean|clean-logs|call]"
echo "pick -- Launch Futriix Cluster instances."
echo "run [-f] -- Create a cluster using futriix-cli --cluster create."
echo "stop -- Stop Futriix Cluster instances."
echo "restart -- Restart Futriix Cluster instances."
echo "watch -- Show CLUSTER NODES output (first 30 lines) of first node."
echo "tail <id> -- Run tail -f of instance at base port + ID."
echo "tailall -- Run tail -f for all the log files at once."
echo "clean -- Remove all instances data, logs, configs."
echo "clean-logs -- Remove just instances logs."
echo "call <cmd> -- Call a command (up to 7 arguments) on all nodes."
echo ""

View File

@ -0,0 +1,44 @@
set(CPACK_PACKAGE_NAME "valkey")
valkey_parse_version(CPACK_PACKAGE_VERSION_MAJOR CPACK_PACKAGE_VERSION_MINOR CPACK_PACKAGE_VERSION_PATCH)
set(CPACK_PACKAGE_CONTACT "maintainers@lists.valkey.io")
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Valkey is an open source (BSD) high-performance key/value datastore")
set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_SOURCE_DIR}/COPYING")
set(CPACK_RESOURCE_FILE_README "${CMAKE_SOURCE_DIR}/README.md")
set(CPACK_STRIP_FILES TRUE)
valkey_get_distro_name(DISTRO_NAME)
message(STATUS "Current host distro: ${DISTRO_NAME}")
if (DISTRO_NAME MATCHES ubuntu
OR DISTRO_NAME MATCHES debian
OR DISTRO_NAME MATCHES mint)
message(STATUS "Adding target package for ${DISTRO_NAME}")
set(CPACK_PACKAGING_INSTALL_PREFIX "/opt/valkey")
# Debian related parameters
set(CPACK_DEBIAN_PACKAGE_MAINTAINER "Valkey contributors")
set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON)
set(CPACK_DEBIAN_FILE_NAME DEB-DEFAULT)
set(CPACK_GENERATOR "DEB")
endif ()
include(CPack)
unset(DISTRO_NAME CACHE)
# ---------------------------------------------------
# Create a helper script for creating symbolic links
# ---------------------------------------------------
write_file(
${CMAKE_BINARY_DIR}/CreateSymlink.sh
"\
#!/bin/bash \n\
if [ -z \${DESTDIR} ]; then \n\
# Script is called during 'make install' \n\
PREFIX=${CMAKE_INSTALL_PREFIX}/bin \n\
else \n\
# Script is called during 'make package' \n\
PREFIX=\${DESTDIR}${CPACK_PACKAGING_INSTALL_PREFIX}/bin \n\
fi \n\
cd \$PREFIX \n\
ln -sf \$1 \$2")

View File

@ -0,0 +1,157 @@
# -------------------------------------------------
# Define the sources to be built
# -------------------------------------------------
# valkey-server source files
set(VALKEY_SERVER_SRCS
${CMAKE_SOURCE_DIR}/src/threads_mngr.c
${CMAKE_SOURCE_DIR}/src/adlist.c
${CMAKE_SOURCE_DIR}/src/quicklist.c
${CMAKE_SOURCE_DIR}/src/ae.c
${CMAKE_SOURCE_DIR}/src/anet.c
${CMAKE_SOURCE_DIR}/src/dict.c
${CMAKE_SOURCE_DIR}/src/hashtable.c
${CMAKE_SOURCE_DIR}/src/kvstore.c
${CMAKE_SOURCE_DIR}/src/sds.c
${CMAKE_SOURCE_DIR}/src/zmalloc.c
${CMAKE_SOURCE_DIR}/src/lzf_c.c
${CMAKE_SOURCE_DIR}/src/lzf_d.c
${CMAKE_SOURCE_DIR}/src/pqsort.c
${CMAKE_SOURCE_DIR}/src/zipmap.c
${CMAKE_SOURCE_DIR}/src/sha1.c
${CMAKE_SOURCE_DIR}/src/ziplist.c
${CMAKE_SOURCE_DIR}/src/release.c
${CMAKE_SOURCE_DIR}/src/memory_prefetch.c
${CMAKE_SOURCE_DIR}/src/io_threads.c
${CMAKE_SOURCE_DIR}/src/networking.c
${CMAKE_SOURCE_DIR}/src/util.c
${CMAKE_SOURCE_DIR}/src/object.c
${CMAKE_SOURCE_DIR}/src/db.c
${CMAKE_SOURCE_DIR}/src/replication.c
${CMAKE_SOURCE_DIR}/src/rdb.c
${CMAKE_SOURCE_DIR}/src/t_string.c
${CMAKE_SOURCE_DIR}/src/t_list.c
${CMAKE_SOURCE_DIR}/src/t_set.c
${CMAKE_SOURCE_DIR}/src/t_zset.c
${CMAKE_SOURCE_DIR}/src/t_hash.c
${CMAKE_SOURCE_DIR}/src/config.c
${CMAKE_SOURCE_DIR}/src/aof.c
${CMAKE_SOURCE_DIR}/src/pubsub.c
${CMAKE_SOURCE_DIR}/src/multi.c
${CMAKE_SOURCE_DIR}/src/debug.c
${CMAKE_SOURCE_DIR}/src/sort.c
${CMAKE_SOURCE_DIR}/src/intset.c
${CMAKE_SOURCE_DIR}/src/syncio.c
${CMAKE_SOURCE_DIR}/src/cluster.c
${CMAKE_SOURCE_DIR}/src/cluster_legacy.c
${CMAKE_SOURCE_DIR}/src/cluster_slot_stats.c
${CMAKE_SOURCE_DIR}/src/crc16.c
${CMAKE_SOURCE_DIR}/src/endianconv.c
${CMAKE_SOURCE_DIR}/src/commandlog.c
${CMAKE_SOURCE_DIR}/src/eval.c
${CMAKE_SOURCE_DIR}/src/bio.c
${CMAKE_SOURCE_DIR}/src/rio.c
${CMAKE_SOURCE_DIR}/src/rand.c
${CMAKE_SOURCE_DIR}/src/memtest.c
${CMAKE_SOURCE_DIR}/src/syscheck.c
${CMAKE_SOURCE_DIR}/src/crcspeed.c
${CMAKE_SOURCE_DIR}/src/crccombine.c
${CMAKE_SOURCE_DIR}/src/crc64.c
${CMAKE_SOURCE_DIR}/src/bitops.c
${CMAKE_SOURCE_DIR}/src/sentinel.c
${CMAKE_SOURCE_DIR}/src/notify.c
${CMAKE_SOURCE_DIR}/src/setproctitle.c
${CMAKE_SOURCE_DIR}/src/blocked.c
${CMAKE_SOURCE_DIR}/src/hyperloglog.c
${CMAKE_SOURCE_DIR}/src/latency.c
${CMAKE_SOURCE_DIR}/src/sparkline.c
${CMAKE_SOURCE_DIR}/src/valkey-check-rdb.c
${CMAKE_SOURCE_DIR}/src/valkey-check-aof.c
${CMAKE_SOURCE_DIR}/src/geo.c
${CMAKE_SOURCE_DIR}/src/lazyfree.c
${CMAKE_SOURCE_DIR}/src/module.c
${CMAKE_SOURCE_DIR}/src/evict.c
${CMAKE_SOURCE_DIR}/src/expire.c
${CMAKE_SOURCE_DIR}/src/geohash.c
${CMAKE_SOURCE_DIR}/src/geohash_helper.c
${CMAKE_SOURCE_DIR}/src/childinfo.c
${CMAKE_SOURCE_DIR}/src/allocator_defrag.c
${CMAKE_SOURCE_DIR}/src/defrag.c
${CMAKE_SOURCE_DIR}/src/siphash.c
${CMAKE_SOURCE_DIR}/src/rax.c
${CMAKE_SOURCE_DIR}/src/t_stream.c
${CMAKE_SOURCE_DIR}/src/listpack.c
${CMAKE_SOURCE_DIR}/src/localtime.c
${CMAKE_SOURCE_DIR}/src/lolwut.c
${CMAKE_SOURCE_DIR}/src/lolwut5.c
${CMAKE_SOURCE_DIR}/src/lolwut6.c
${CMAKE_SOURCE_DIR}/src/acl.c
${CMAKE_SOURCE_DIR}/src/tracking.c
${CMAKE_SOURCE_DIR}/src/socket.c
${CMAKE_SOURCE_DIR}/src/tls.c
${CMAKE_SOURCE_DIR}/src/rdma.c
${CMAKE_SOURCE_DIR}/src/sha256.c
${CMAKE_SOURCE_DIR}/src/timeout.c
${CMAKE_SOURCE_DIR}/src/setcpuaffinity.c
${CMAKE_SOURCE_DIR}/src/monotonic.c
${CMAKE_SOURCE_DIR}/src/mt19937-64.c
${CMAKE_SOURCE_DIR}/src/resp_parser.c
${CMAKE_SOURCE_DIR}/src/call_reply.c
${CMAKE_SOURCE_DIR}/src/script_lua.c
${CMAKE_SOURCE_DIR}/src/script.c
${CMAKE_SOURCE_DIR}/src/functions.c
${CMAKE_SOURCE_DIR}/src/scripting_engine.c
${CMAKE_SOURCE_DIR}/src/function_lua.c
${CMAKE_SOURCE_DIR}/src/commands.c
${CMAKE_SOURCE_DIR}/src/strl.c
${CMAKE_SOURCE_DIR}/src/connection.c
${CMAKE_SOURCE_DIR}/src/unix.c
${CMAKE_SOURCE_DIR}/src/server.c
${CMAKE_SOURCE_DIR}/src/logreqres.c)
# valkey-cli
set(VALKEY_CLI_SRCS
${CMAKE_SOURCE_DIR}/src/anet.c
${CMAKE_SOURCE_DIR}/src/adlist.c
${CMAKE_SOURCE_DIR}/src/dict.c
${CMAKE_SOURCE_DIR}/src/valkey-cli.c
${CMAKE_SOURCE_DIR}/src/zmalloc.c
${CMAKE_SOURCE_DIR}/src/release.c
${CMAKE_SOURCE_DIR}/src/ae.c
${CMAKE_SOURCE_DIR}/src/serverassert.c
${CMAKE_SOURCE_DIR}/src/crcspeed.c
${CMAKE_SOURCE_DIR}/src/crccombine.c
${CMAKE_SOURCE_DIR}/src/crc64.c
${CMAKE_SOURCE_DIR}/src/siphash.c
${CMAKE_SOURCE_DIR}/src/crc16.c
${CMAKE_SOURCE_DIR}/src/monotonic.c
${CMAKE_SOURCE_DIR}/src/cli_common.c
${CMAKE_SOURCE_DIR}/src/mt19937-64.c
${CMAKE_SOURCE_DIR}/src/strl.c
${CMAKE_SOURCE_DIR}/src/cli_commands.c)
# valkey-benchmark
set(VALKEY_BENCHMARK_SRCS
${CMAKE_SOURCE_DIR}/src/ae.c
${CMAKE_SOURCE_DIR}/src/anet.c
${CMAKE_SOURCE_DIR}/src/valkey-benchmark.c
${CMAKE_SOURCE_DIR}/src/adlist.c
${CMAKE_SOURCE_DIR}/src/dict.c
${CMAKE_SOURCE_DIR}/src/zmalloc.c
${CMAKE_SOURCE_DIR}/src/serverassert.c
${CMAKE_SOURCE_DIR}/src/release.c
${CMAKE_SOURCE_DIR}/src/crcspeed.c
${CMAKE_SOURCE_DIR}/src/crccombine.c
${CMAKE_SOURCE_DIR}/src/crc64.c
${CMAKE_SOURCE_DIR}/src/siphash.c
${CMAKE_SOURCE_DIR}/src/crc16.c
${CMAKE_SOURCE_DIR}/src/monotonic.c
${CMAKE_SOURCE_DIR}/src/cli_common.c
${CMAKE_SOURCE_DIR}/src/mt19937-64.c
${CMAKE_SOURCE_DIR}/src/strl.c)
# valkey-rdma module
set(VALKEY_RDMA_MODULE_SRCS ${CMAKE_SOURCE_DIR}/src/rdma.c)
# valkey-tls module
set(VALKEY_TLS_MODULE_SRCS ${CMAKE_SOURCE_DIR}/src/tls.c)

115
cmake/Modules/Utils.cmake Normal file
View File

@ -0,0 +1,115 @@
# Return the current host distro name. For example: ubuntu, debian, amzn etc
function (valkey_get_distro_name DISTRO_NAME)
if (LINUX AND NOT APPLE)
execute_process(
COMMAND /bin/bash "-c" "cat /etc/os-release |grep ^ID=|cut -d = -f 2"
OUTPUT_VARIABLE _OUT_VAR
OUTPUT_STRIP_TRAILING_WHITESPACE)
# clean the output
string(REPLACE "\"" "" _OUT_VAR "${_OUT_VAR}")
string(REPLACE "." "" _OUT_VAR "${_OUT_VAR}")
set(${DISTRO_NAME}
"${_OUT_VAR}"
PARENT_SCOPE)
elseif (APPLE)
set(${DISTRO_NAME}
"darwin"
PARENT_SCOPE)
elseif (IS_FREEBSD)
set(${DISTRO_NAME}
"freebsd"
PARENT_SCOPE)
else ()
set(${DISTRO_NAME}
"unknown"
PARENT_SCOPE)
endif ()
endfunction ()
function (valkey_parse_version OUT_MAJOR OUT_MINOR OUT_PATCH)
# Read and parse package version from version.h file
file(STRINGS ${CMAKE_SOURCE_DIR}/src/version.h VERSION_LINES)
foreach (LINE ${VERSION_LINES})
string(FIND "${LINE}" "#define VALKEY_VERSION " VERSION_STR_POS)
if (VERSION_STR_POS GREATER -1)
string(REPLACE "#define VALKEY_VERSION " "" LINE "${LINE}")
string(REPLACE "\"" "" LINE "${LINE}")
# Change "." to ";" to make it a list
string(REPLACE "." ";" LINE "${LINE}")
list(GET LINE 0 _MAJOR)
list(GET LINE 1 _MINOR)
list(GET LINE 2 _PATCH)
message(STATUS "Valkey version: ${_MAJOR}.${_MINOR}.${_PATCH}")
# Set the output variables
set(${OUT_MAJOR}
${_MAJOR}
PARENT_SCOPE)
set(${OUT_MINOR}
${_MINOR}
PARENT_SCOPE)
set(${OUT_PATCH}
${_PATCH}
PARENT_SCOPE)
endif ()
endforeach ()
endfunction ()
# Given input argument `OPTION_VALUE`, check that the `OPTION_VALUE` is from the allowed values (one of:
# module/yes/no/1/0/true/false)
#
# Return value:
#
# If ARG is valid, return its number where:
#
# ~~~
# - `no` | `0` | `off` => return `0`
# - `yes` | `1` | `on` => return `1`
# - `module` => return `2`
# ~~~
function (valkey_parse_build_option OPTION_VALUE OUT_ARG_ENUM)
list(APPEND VALID_OPTIONS "yes")
list(APPEND VALID_OPTIONS "1")
list(APPEND VALID_OPTIONS "on")
list(APPEND VALID_OPTIONS "no")
list(APPEND VALID_OPTIONS "0")
list(APPEND VALID_OPTIONS "off")
list(APPEND VALID_OPTIONS "module")
string(TOLOWER "${OPTION_VALUE}" OPTION_VALUE)
list(FIND VALID_OPTIONS "${ARG}" OPT_INDEX)
if (VERSION_STR_POS GREATER -1)
message(FATAL_ERROR "Invalid value passed ''${OPTION_VALUE}'")
endif ()
if ("${OPTION_VALUE}" STREQUAL "yes"
OR "${OPTION_VALUE}" STREQUAL "1"
OR "${OPTION_VALUE}" STREQUAL "on")
set(${OUT_ARG_ENUM}
1
PARENT_SCOPE)
elseif (
"${OPTION_VALUE}" STREQUAL "no"
OR "${OPTION_VALUE}" STREQUAL "0"
OR "${OPTION_VALUE}" STREQUAL "off")
set(${OUT_ARG_ENUM}
0
PARENT_SCOPE)
else ()
set(${OUT_ARG_ENUM}
2
PARENT_SCOPE)
endif ()
endfunction ()
function (valkey_pkg_config PKGNAME OUT_VARIABLE)
if (NOT FOUND_PKGCONFIG)
# Locate pkg-config once
find_package(PkgConfig REQUIRED)
set(FOUND_PKGCONFIG 1)
endif ()
pkg_check_modules(__PREFIX REQUIRED ${PKGNAME})
message(STATUS "Found library for '${PKGNAME}': ${__PREFIX_LIBRARIES}")
set(${OUT_VARIABLE}
"${__PREFIX_LIBRARIES}"
PARENT_SCOPE)
endfunction ()

View File

@ -0,0 +1,394 @@
include(CheckIncludeFiles)
include(ProcessorCount)
include(Utils)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib")
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin")
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib")
# Generate compile_commands.json file for IDEs code completion support
set(CMAKE_EXPORT_COMPILE_COMMANDS 1)
processorcount(VALKEY_PROCESSOR_COUNT)
message(STATUS "Processor count: ${VALKEY_PROCESSOR_COUNT}")
# Installed executables will have this permissions
set(VALKEY_EXE_PERMISSIONS
OWNER_EXECUTE
OWNER_WRITE
OWNER_READ
GROUP_EXECUTE
GROUP_READ
WORLD_EXECUTE
WORLD_READ)
set(VALKEY_SERVER_CFLAGS "")
set(VALKEY_SERVER_LDFLAGS "")
# ----------------------------------------------------
# Helper functions & macros
# ----------------------------------------------------
macro (add_valkey_server_compiler_options value)
set(VALKEY_SERVER_CFLAGS "${VALKEY_SERVER_CFLAGS} ${value}")
endmacro ()
macro (add_valkey_server_linker_option value)
list(APPEND VALKEY_SERVER_LDFLAGS ${value})
endmacro ()
macro (get_valkey_server_linker_option return_value)
list(JOIN VALKEY_SERVER_LDFLAGS " " ${value} ${return_value})
endmacro ()
set(IS_FREEBSD 0)
if (CMAKE_SYSTEM_NAME MATCHES "^.*BSD$|DragonFly")
message(STATUS "Building for FreeBSD compatible system")
set(IS_FREEBSD 1)
include_directories("/usr/local/include")
add_valkey_server_compiler_options("-DUSE_BACKTRACE")
endif ()
# Helper function for creating symbolic link so that: link -> source
macro (valkey_create_symlink source link)
install(
CODE "execute_process( \
COMMAND /bin/bash ${CMAKE_BINARY_DIR}/CreateSymlink.sh \
${source} \
${link} \
)"
COMPONENT "valkey")
endmacro ()
# Install a binary
macro (valkey_install_bin target)
# Install cli tool and create a redis symbolic link
install(
TARGETS ${target}
DESTINATION ${CMAKE_INSTALL_BINDIR}
PERMISSIONS ${VALKEY_EXE_PERMISSIONS}
COMPONENT "valkey")
endmacro ()
# Helper function that defines, builds and installs `target` In addition, it creates a symbolic link between the target
# and `link_name`
macro (valkey_build_and_install_bin target sources ld_flags libs link_name)
add_executable(${target} ${sources})
if (USE_JEMALLOC
OR USE_TCMALLOC
OR USE_TCMALLOC_MINIMAL)
# Using custom allocator
target_link_libraries(${target} ${ALLOCATOR_LIB})
endif ()
# Place this line last to ensure that ${ld_flags} is placed last on the linker line
target_link_libraries(${target} ${libs} ${ld_flags})
target_link_libraries(${target} hiredis)
if (USE_TLS)
# Add required libraries needed for TLS
target_link_libraries(${target} OpenSSL::SSL hiredis_ssl)
endif ()
if (IS_FREEBSD)
target_link_libraries(${target} execinfo)
endif ()
# Enable all warnings + fail on warning
target_compile_options(${target} PRIVATE -Werror -Wall)
# Install cli tool and create a redis symbolic link
valkey_install_bin(${target})
valkey_create_symlink(${target} ${link_name})
endmacro ()
# Helper function that defines, builds and installs `target` module.
macro (valkey_build_and_install_module target sources ld_flags libs)
add_library(${target} SHARED ${sources})
if (USE_JEMALLOC)
# Using jemalloc
target_link_libraries(${target} jemalloc)
endif ()
# Place this line last to ensure that ${ld_flags} is placed last on the linker line
target_link_libraries(${target} ${libs} ${ld_flags})
if (USE_TLS)
# Add required libraries needed for TLS
target_link_libraries(${target} OpenSSL::SSL hiredis_ssl)
endif ()
if (IS_FREEBSD)
target_link_libraries(${target} execinfo)
endif ()
# Install cli tool and create a redis symbolic link
valkey_install_bin(${target})
endmacro ()
# Determine if we are building in Release or Debug mode
if (CMAKE_BUILD_TYPE MATCHES Debug OR CMAKE_BUILD_TYPE MATCHES DebugFull)
set(VALKEY_DEBUG_BUILD 1)
set(VALKEY_RELEASE_BUILD 0)
message(STATUS "Building in debug mode")
else ()
set(VALKEY_DEBUG_BUILD 0)
set(VALKEY_RELEASE_BUILD 1)
message(STATUS "Building in release mode")
endif ()
# ----------------------------------------------------
# Helper functions - end
# ----------------------------------------------------
# ----------------------------------------------------
# Build options (allocator, tls, rdma et al)
# ----------------------------------------------------
if (NOT BUILD_MALLOC)
if (APPLE)
set(BUILD_MALLOC "libc")
elseif (UNIX)
set(BUILD_MALLOC "jemalloc")
endif ()
endif ()
# User may pass different allocator library. Using -DBUILD_MALLOC=<libname>, make sure it is a valid value
if (BUILD_MALLOC)
if ("${BUILD_MALLOC}" STREQUAL "jemalloc")
set(MALLOC_LIB "jemalloc")
set(ALLOCATOR_LIB "jemalloc")
add_valkey_server_compiler_options("-DUSE_JEMALLOC")
set(USE_JEMALLOC 1)
elseif ("${BUILD_MALLOC}" STREQUAL "libc")
set(MALLOC_LIB "libc")
elseif ("${BUILD_MALLOC}" STREQUAL "tcmalloc")
set(MALLOC_LIB "tcmalloc")
valkey_pkg_config(libtcmalloc ALLOCATOR_LIB)
add_valkey_server_compiler_options("-DUSE_TCMALLOC")
set(USE_TCMALLOC 1)
elseif ("${BUILD_MALLOC}" STREQUAL "tcmalloc_minimal")
set(MALLOC_LIB "tcmalloc_minimal")
valkey_pkg_config(libtcmalloc_minimal ALLOCATOR_LIB)
add_valkey_server_compiler_options("-DUSE_TCMALLOC")
set(USE_TCMALLOC_MINIMAL 1)
else ()
message(FATAL_ERROR "BUILD_MALLOC can be one of: jemalloc, libc, tcmalloc or tcmalloc_minimal")
endif ()
endif ()
message(STATUS "Using ${MALLOC_LIB}")
# TLS support
if (BUILD_TLS)
valkey_parse_build_option(${BUILD_TLS} USE_TLS)
if (USE_TLS EQUAL 1)
# Only search for OpenSSL if needed
find_package(OpenSSL REQUIRED)
message(STATUS "OpenSSL include dir: ${OPENSSL_INCLUDE_DIR}")
message(STATUS "OpenSSL libraries: ${OPENSSL_LIBRARIES}")
include_directories(${OPENSSL_INCLUDE_DIR})
endif ()
if (USE_TLS EQUAL 1)
add_valkey_server_compiler_options("-DUSE_OPENSSL=1")
add_valkey_server_compiler_options("-DBUILD_TLS_MODULE=0")
else ()
# Build TLS as a module RDMA can only be built as a module. So disable it
message(WARNING "BUILD_TLS can be one of: [ON | OFF | 1 | 0], but '${BUILD_TLS}' was provided")
message(STATUS "TLS support is disabled")
set(USE_TLS 0)
endif ()
else ()
# By default, TLS is disabled
message(STATUS "TLS is disabled")
set(USE_TLS 0)
endif ()
if (BUILD_RDMA)
set(BUILD_RDMA_MODULE 0)
# RDMA support (Linux only)
if (LINUX AND NOT APPLE)
valkey_parse_build_option(${BUILD_RDMA} USE_RDMA)
find_package(PkgConfig REQUIRED)
# Locate librdmacm & libibverbs, fail if we can't find them
valkey_pkg_config(librdmacm RDMACM_LIBS)
valkey_pkg_config(libibverbs IBVERBS_LIBS)
message(STATUS "${RDMACM_LIBS};${IBVERBS_LIBS}")
list(APPEND RDMA_LIBS "${RDMACM_LIBS};${IBVERBS_LIBS}")
if (USE_RDMA EQUAL 2) # Module
message(STATUS "Building RDMA as module")
add_valkey_server_compiler_options("-DUSE_RDMA=2")
set(BUILD_RDMA_MODULE 2)
elseif (USE_RDMA EQUAL 1) # Builtin
message(STATUS "Building RDMA as builtin")
add_valkey_server_compiler_options("-DUSE_RDMA=1")
add_valkey_server_compiler_options("-DBUILD_RDMA_MODULE=0")
list(APPEND SERVER_LIBS "${RDMA_LIBS}")
endif ()
else ()
message(WARNING "RDMA is only supported on Linux platforms")
endif ()
else ()
# By default, RDMA is disabled
message(STATUS "RDMA is disabled")
set(USE_RDMA 0)
endif ()
set(BUILDING_ARM64 0)
set(BUILDING_ARM32 0)
if ("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "arm64")
set(BUILDING_ARM64 1)
endif ()
if ("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "arm")
set(BUILDING_ARM32 1)
endif ()
message(STATUS "Building on ${CMAKE_HOST_SYSTEM_NAME}")
if (BUILDING_ARM64)
message(STATUS "Compiling valkey for ARM64")
add_valkey_server_linker_option("-funwind-tables")
endif ()
if (APPLE)
add_valkey_server_linker_option("-rdynamic")
add_valkey_server_linker_option("-ldl")
elseif (UNIX)
add_valkey_server_linker_option("-rdynamic")
add_valkey_server_linker_option("-pthread")
add_valkey_server_linker_option("-ldl")
add_valkey_server_linker_option("-lm")
endif ()
if (VALKEY_DEBUG_BUILD)
# Debug build, use enable "-fno-omit-frame-pointer"
add_valkey_server_compiler_options("-fno-omit-frame-pointer")
endif ()
# Check for Atomic
check_include_files(stdatomic.h HAVE_C11_ATOMIC)
if (HAVE_C11_ATOMIC)
add_valkey_server_compiler_options("-std=gnu11")
else ()
add_valkey_server_compiler_options("-std=c99")
endif ()
# Sanitizer
if (BUILD_SANITIZER)
# Common CFLAGS
list(APPEND VALKEY_SANITAIZER_CFLAGS "-fno-sanitize-recover=all")
list(APPEND VALKEY_SANITAIZER_CFLAGS "-fno-omit-frame-pointer")
if ("${BUILD_SANITIZER}" STREQUAL "address")
list(APPEND VALKEY_SANITAIZER_CFLAGS "-fsanitize=address")
list(APPEND VALKEY_SANITAIZER_LDFLAGS "-fsanitize=address")
elseif ("${BUILD_SANITIZER}" STREQUAL "thread")
list(APPEND VALKEY_SANITAIZER_CFLAGS "-fsanitize=thread")
list(APPEND VALKEY_SANITAIZER_LDFLAGS "-fsanitize=thread")
elseif ("${BUILD_SANITIZER}" STREQUAL "undefined")
list(APPEND VALKEY_SANITAIZER_CFLAGS "-fsanitize=undefined")
list(APPEND VALKEY_SANITAIZER_LDFLAGS "-fsanitize=undefined")
else ()
message(FATAL_ERROR "Unknown sanitizer: ${BUILD_SANITIZER}")
endif ()
endif ()
include_directories("${CMAKE_SOURCE_DIR}/deps/hiredis")
include_directories("${CMAKE_SOURCE_DIR}/deps/linenoise")
include_directories("${CMAKE_SOURCE_DIR}/deps/lua/src")
include_directories("${CMAKE_SOURCE_DIR}/deps/hdr_histogram")
include_directories("${CMAKE_SOURCE_DIR}/deps/fpconv")
add_subdirectory("${CMAKE_SOURCE_DIR}/deps")
# Update linker flags for the allocator
if (USE_JEMALLOC)
include_directories("${CMAKE_SOURCE_DIR}/deps/jemalloc/include")
endif ()
# Common compiler flags
add_valkey_server_compiler_options("-pedantic")
# ----------------------------------------------------
# Build options (allocator, tls, rdma et al) - end
# ----------------------------------------------------
# -------------------------------------------------
# Code Generation section
# -------------------------------------------------
find_program(PYTHON_EXE python3)
if (PYTHON_EXE)
# Python based code generation
message(STATUS "Found python3: ${PYTHON_EXE}")
# Rule for generating commands.def file from json files
message(STATUS "Adding target generate_commands_def")
file(GLOB COMMAND_FILES_JSON "${CMAKE_SOURCE_DIR}/src/commands/*.json")
add_custom_command(
OUTPUT ${CMAKE_BINARY_DIR}/commands_def_generated
DEPENDS ${COMMAND_FILES_JSON}
COMMAND ${PYTHON_EXE} ${CMAKE_SOURCE_DIR}/utils/generate-command-code.py
COMMAND touch ${CMAKE_BINARY_DIR}/commands_def_generated
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}/src")
add_custom_target(generate_commands_def DEPENDS ${CMAKE_BINARY_DIR}/commands_def_generated)
# Rule for generating fmtargs.h
message(STATUS "Adding target generate_fmtargs_h")
add_custom_command(
OUTPUT ${CMAKE_BINARY_DIR}/fmtargs_generated
DEPENDS ${CMAKE_SOURCE_DIR}/utils/generate-fmtargs.py
COMMAND sed '/Everything/,$$d' fmtargs.h > fmtargs.h.tmp
COMMAND ${PYTHON_EXE} ${CMAKE_SOURCE_DIR}/utils/generate-fmtargs.py >> fmtargs.h.tmp
COMMAND mv fmtargs.h.tmp fmtargs.h
COMMAND touch ${CMAKE_BINARY_DIR}/fmtargs_generated
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}/src")
add_custom_target(generate_fmtargs_h DEPENDS ${CMAKE_BINARY_DIR}/fmtargs_generated)
# Rule for generating test_files.h
message(STATUS "Adding target generate_test_files_h")
file(GLOB UNIT_TEST_SRCS "${CMAKE_SOURCE_DIR}/src/unit/*.c")
add_custom_command(
OUTPUT ${CMAKE_BINARY_DIR}/test_files_generated
DEPENDS "${UNIT_TEST_SRCS};${CMAKE_SOURCE_DIR}/utils/generate-unit-test-header.py"
COMMAND ${PYTHON_EXE} ${CMAKE_SOURCE_DIR}/utils/generate-unit-test-header.py
COMMAND touch ${CMAKE_BINARY_DIR}/test_files_generated
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}/src")
add_custom_target(generate_test_files_h DEPENDS ${CMAKE_BINARY_DIR}/test_files_generated)
else ()
# Fake targets
add_custom_target(generate_commands_def)
add_custom_target(generate_fmtargs_h)
add_custom_target(generate_test_files_h)
endif ()
# Generate release.h file (always)
add_custom_target(
release_header
COMMAND sh -c '${CMAKE_SOURCE_DIR}/src/mkreleasehdr.sh'
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}/src")
# -------------------------------------------------
# Code Generation section - end
# -------------------------------------------------
# ----------------------------------------------------------
# All our source files are defined in SourceFiles.cmake file
# ----------------------------------------------------------
include(SourceFiles)
# Clear the below variables from the cache
unset(CMAKE_C_FLAGS CACHE)
unset(VALKEY_SERVER_LDFLAGS CACHE)
unset(VALKEY_SERVER_CFLAGS CACHE)
unset(PYTHON_EXE CACHE)
unset(HAVE_C11_ATOMIC CACHE)
unset(USE_TLS CACHE)
unset(USE_RDMA CACHE)
unset(BUILD_TLS CACHE)
unset(BUILD_RDMA CACHE)
unset(BUILD_MALLOC CACHE)
unset(USE_JEMALLOC CACHE)
unset(BUILD_TLS_MODULE CACHE)
unset(BUILD_TLS_BUILTIN CACHE)

28
deps/CMakeLists.txt vendored Normal file
View File

@ -0,0 +1,28 @@
if (USE_JEMALLOC)
add_subdirectory(jemalloc)
endif ()
add_subdirectory(lua)
# Set hiredis options. We need to disable the defaults set in the OPTION(..) we do this by setting them in the CACHE
set(BUILD_SHARED_LIBS
OFF
CACHE BOOL "Build shared libraries")
set(DISABLE_TESTS
ON
CACHE BOOL "If tests should be compiled or not")
if (USE_TLS) # Module or no module
message(STATUS "Building hiredis_ssl")
set(ENABLE_SSL
ON
CACHE BOOL "Should we test SSL connections")
endif ()
add_subdirectory(hiredis)
add_subdirectory(linenoise)
add_subdirectory(fpconv)
add_subdirectory(hdr_histogram)
# Clear any cached variables passed to hiredis from the cache
unset(BUILD_SHARED_LIBS CACHE)
unset(DISABLE_TESTS CACHE)
unset(ENABLE_SSL CACHE)

7
deps/Makefile vendored
View File

@ -42,6 +42,7 @@ distclean:
-(cd jemalloc && [ -f Makefile ] && $(MAKE) distclean) > /dev/null || true -(cd jemalloc && [ -f Makefile ] && $(MAKE) distclean) > /dev/null || true
-(cd hdr_histogram && $(MAKE) clean) > /dev/null || true -(cd hdr_histogram && $(MAKE) clean) > /dev/null || true
-(cd fpconv && $(MAKE) clean) > /dev/null || true -(cd fpconv && $(MAKE) clean) > /dev/null || true
-(cd fast_float_c_interface && $(MAKE) clean) > /dev/null || true
-(rm -f .make-*) -(rm -f .make-*)
.PHONY: distclean .PHONY: distclean
@ -116,3 +117,9 @@ jemalloc: .make-prerequisites
cd jemalloc && $(MAKE) lib/libjemalloc.a cd jemalloc && $(MAKE) lib/libjemalloc.a
.PHONY: jemalloc .PHONY: jemalloc
fast_float_c_interface: .make-prerequisites
@printf '%b %b\n' $(MAKECOLOR)MAKE$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR)
cd fast_float_c_interface && $(MAKE)
.PHONY: fast_float_c_interface

16
deps/README.md vendored
View File

@ -6,6 +6,7 @@ should be provided by the operating system.
* **linenoise** is a readline replacement. It is developed by the same authors of Valkey but is managed as a separated project and updated as needed. * **linenoise** is a readline replacement. It is developed by the same authors of Valkey but is managed as a separated project and updated as needed.
* **lua** is Lua 5.1 with minor changes for security and additional libraries. * **lua** is Lua 5.1 with minor changes for security and additional libraries.
* **hdr_histogram** Used for per-command latency tracking histograms. * **hdr_histogram** Used for per-command latency tracking histograms.
* **fast_float** is a replacement for strtod to convert strings to floats efficiently.
How to upgrade the above dependencies How to upgrade the above dependencies
=== ===
@ -94,6 +95,7 @@ and our version:
1. Makefile is modified to allow a different compiler than GCC. 1. Makefile is modified to allow a different compiler than GCC.
2. We have the implementation source code, and directly link to the following external libraries: `lua_cjson.o`, `lua_struct.o`, `lua_cmsgpack.o` and `lua_bit.o`. 2. We have the implementation source code, and directly link to the following external libraries: `lua_cjson.o`, `lua_struct.o`, `lua_cmsgpack.o` and `lua_bit.o`.
3. There is a security fix in `ldo.c`, line 498: The check for `LUA_SIGNATURE[0]` is removed in order to avoid direct bytecode execution. 3. There is a security fix in `ldo.c`, line 498: The check for `LUA_SIGNATURE[0]` is removed in order to avoid direct bytecode execution.
4. In `lstring.c`, the luaS_newlstr function's hash calculation has been upgraded from a simple hash function to MurmurHash3, implemented within the same file, to enhance performance, particularly for operations involving large strings.
Hdr_Histogram Hdr_Histogram
--- ---
@ -104,3 +106,17 @@ We use a customized version based on master branch commit e4448cf6d1cd08fff51981
2. Copy updated files from newer version onto files in /hdr_histogram. 2. Copy updated files from newer version onto files in /hdr_histogram.
3. Apply the changes from 1 above to the updated files. 3. Apply the changes from 1 above to the updated files.
fast_float
---
The fast_float library provides fast header-only implementations for the C++ from_chars functions for `float` and `double` types as well as integer types. These functions convert ASCII strings representing decimal values (e.g., `1.3e10`) into binary types. The functions are much faster than comparable number-parsing functions from existing C++ standard libraries.
Specifically, `fast_float` provides the following function to parse floating-point numbers with a C++17-like syntax (the library itself only requires C++11):
template <typename T, typename UC = char, typename = FASTFLOAT_ENABLE_IF(is_supported_float_type<T>())>
from_chars_result_t<UC> from_chars(UC const *first, UC const *last, T &value, chars_format fmt = chars_format::general);
To upgrade the library,
1. Check out https://github.com/fastfloat/fast_float/tree/main
2. cd fast_float
3. Invoke "python3 ./script/amalgamate.py --output fast_float.h"
4. Copy fast_float.h file to "deps/fast_float/".

3912
deps/fast_float/fast_float.h vendored Normal file

File diff suppressed because it is too large Load Diff

37
deps/fast_float_c_interface/Makefile vendored Normal file
View File

@ -0,0 +1,37 @@
CCCOLOR:="\033[34m"
SRCCOLOR:="\033[33m"
ENDCOLOR:="\033[0m"
CXX?=c++
# we need = instead of := so that $@ in QUIET_CXX gets evaluated in the rule and is assigned appropriate value.
TEMP:=$(CXX)
QUIET_CXX=@printf ' %b %b\n' $(CCCOLOR)C++$(ENDCOLOR) $(SRCCOLOR)$@$(ENDCOLOR) 1>&2;
CXX=$(QUIET_CXX)$(TEMP)
WARN=-Wall -W -Wno-missing-field-initializers
STD=-pedantic -std=c++11
OPT?=-O3
CLANG := $(findstring clang,$(shell sh -c '$(CC) --version | head -1'))
ifeq ($(OPT),-O3)
ifeq (clang,$(CLANG))
OPT+=-flto
else
OPT+=-flto=auto -ffat-lto-objects
endif
endif
# 1) Today src/Makefile passes -m32 flag for explicit 32-bit build on 64-bit machine, via CFLAGS. For 32-bit build on
# 32-bit machine and 64-bit on 64-bit machine, CFLAGS are empty. No other flags are set that can conflict with C++,
# therefore let's use CFLAGS without changes for now.
# 2) FASTFLOAT_ALLOWS_LEADING_PLUS allows +inf to be parsed as inf, instead of error.
CXXFLAGS=$(STD) $(OPT) $(WARN) -static -fPIC -fno-exceptions $(CFLAGS) -D FASTFLOAT_ALLOWS_LEADING_PLUS
.PHONY: all clean
all: fast_float_strtod.o
clean:
rm -f *.o || true;

View File

@ -0,0 +1,24 @@
/*
* Copyright Valkey Contributors.
* All rights reserved.
* SPDX-License-Identifier: BSD 3-Clause
*/
#include "../fast_float/fast_float.h"
#include <cerrno>
extern "C"
{
double fast_float_strtod(const char *str, const char** endptr)
{
double temp = 0;
auto answer = fast_float::from_chars(str, str + strlen(str), temp);
if (answer.ec != std::errc()) {
errno = (answer.ec == std::errc::result_out_of_range) ? ERANGE : EINVAL;
}
if (endptr) {
*endptr = answer.ptr;
}
return temp;
}
}

4
deps/fpconv/CMakeLists.txt vendored Normal file
View File

@ -0,0 +1,4 @@
project(fpconv)
set(SRCS "${CMAKE_CURRENT_LIST_DIR}/fpconv_dtoa.c" "${CMAKE_CURRENT_LIST_DIR}/fpconv_dtoa.h")
add_library(fpconv STATIC ${SRCS})

View File

@ -6,7 +6,7 @@
* [1] https://www.cs.tufts.edu/~nr/cs257/archive/florian-loitsch/printf.pdf * [1] https://www.cs.tufts.edu/~nr/cs257/archive/florian-loitsch/printf.pdf
* ---------------------------------------------------------------------------- * ----------------------------------------------------------------------------
* *
* Copyright (c) 2021, Redis Labs * Copyright (c) 2021, Redis Ltd.
* Copyright (c) 2013-2019, night-shift <as.smljk at gmail dot com> * Copyright (c) 2013-2019, night-shift <as.smljk at gmail dot com>
* Copyright (c) 2009, Florian Loitsch < florian.loitsch at inria dot fr > * Copyright (c) 2009, Florian Loitsch < florian.loitsch at inria dot fr >
* All rights reserved. * All rights reserved.

7
deps/hdr_histogram/CMakeLists.txt vendored Normal file
View File

@ -0,0 +1,7 @@
project(hdr_histogram)
set(SRCS "${CMAKE_CURRENT_LIST_DIR}/hdr_histogram.c" "${CMAKE_CURRENT_LIST_DIR}/hdr_histogram.h"
"${CMAKE_CURRENT_LIST_DIR}/hdr_atomic.h" "${CMAKE_CURRENT_LIST_DIR}/hdr_redis_malloc.h")
add_library(hdr_histogram STATIC ${SRCS})
target_compile_definitions(hdr_histogram PRIVATE HDR_MALLOC_INCLUDE=\"hdr_redis_malloc.h\")

View File

@ -1,13 +1,13 @@
#ifndef HDR_MALLOC_H__ #ifndef HDR_MALLOC_H__
#define HDR_MALLOC_H__ #define HDR_MALLOC_H__
void *zmalloc(size_t size); void *valkey_malloc(size_t size);
void *zcalloc_num(size_t num, size_t size); void *zcalloc_num(size_t num, size_t size);
void *zrealloc(void *ptr, size_t size); void *valkey_realloc(void *ptr, size_t size);
void zfree(void *ptr); void valkey_free(void *ptr);
#define hdr_malloc zmalloc #define hdr_malloc valkey_malloc
#define hdr_calloc zcalloc_num #define hdr_calloc zcalloc_num
#define hdr_realloc zrealloc #define hdr_realloc valkey_realloc
#define hdr_free zfree #define hdr_free valkey_free
#endif #endif

View File

@ -112,7 +112,7 @@ jobs:
run: $GITHUB_WORKSPACE/test.sh run: $GITHUB_WORKSPACE/test.sh
freebsd: freebsd:
runs-on: macos-12 runs-on: macos-13
name: FreeBSD name: FreeBSD
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3

View File

@ -1,4 +1,4 @@
Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com> Copyright (c) 2009-2011, Redis Ltd.
Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com> Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
All rights reserved. All rights reserved.

View File

@ -1,5 +1,5 @@
# Hiredis Makefile # Hiredis Makefile
# Copyright (C) 2010-2011 Salvatore Sanfilippo <antirez at gmail dot com> # Copyright (C) 2010-2011 Redis Ltd.
# Copyright (C) 2010-2011 Pieter Noordhuis <pcnoordhuis at gmail dot com> # Copyright (C) 2010-2011 Pieter Noordhuis <pcnoordhuis at gmail dot com>
# This file is released under the BSD license, see the COPYING file # This file is released under the BSD license, see the COPYING file

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com> * Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* *
* All rights reserved. * All rights reserved.

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com> * Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* *
* All rights reserved. * All rights reserved.

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com> * Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* *
* All rights reserved. * All rights reserved.

2
deps/hiredis/dict.c vendored
View File

@ -5,7 +5,7 @@
* tables of power of two in size are used, collisions are handled by * tables of power of two in size are used, collisions are handled by
* chaining. See the source code for more information... :) * chaining. See the source code for more information... :)
* *
* Copyright (c) 2006-2010, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2006-2010, Redis Ltd.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without

2
deps/hiredis/dict.h vendored
View File

@ -5,7 +5,7 @@
* tables of power of two in size are used, collisions are handled by * tables of power of two in size are used, collisions are handled by
* chaining. See the source code for more information... :) * chaining. See the source code for more information... :)
* *
* Copyright (c) 2006-2010, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2006-2010, Redis Ltd.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2020, Redis Ltd.
* Copyright (c) 2020, Pieter Noordhuis <pcnoordhuis at gmail dot com> * Copyright (c) 2020, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* Copyright (c) 2020, Matt Stancliff <matt at genges dot com>, * Copyright (c) 2020, Matt Stancliff <matt at genges dot com>,
* Jan-Erik Rediger <janerik at fnordig dot com> * Jan-Erik Rediger <janerik at fnordig dot com>

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2010-2014, Pieter Noordhuis <pcnoordhuis at gmail dot com> * Copyright (c) 2010-2014, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* Copyright (c) 2015, Matt Stancliff <matt at genges dot com>, * Copyright (c) 2015, Matt Stancliff <matt at genges dot com>,
* Jan-Erik Rediger <janerik at fnordig dot com> * Jan-Erik Rediger <janerik at fnordig dot com>

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2010-2014, Pieter Noordhuis <pcnoordhuis at gmail dot com> * Copyright (c) 2010-2014, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* Copyright (c) 2015, Matt Stancliff <matt at genges dot com>, * Copyright (c) 2015, Matt Stancliff <matt at genges dot com>,
* Jan-Erik Rediger <janerik at fnordig dot com> * Jan-Erik Rediger <janerik at fnordig dot com>

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2019, Redis Labs * Copyright (c) 2019, Redis Ltd.
* *
* All rights reserved. * All rights reserved.
* *

2
deps/hiredis/net.c vendored
View File

@ -1,6 +1,6 @@
/* Extracted from anet.c to work properly with Hiredis error reporting. /* Extracted from anet.c to work properly with Hiredis error reporting.
* *
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2010-2014, Pieter Noordhuis <pcnoordhuis at gmail dot com> * Copyright (c) 2010-2014, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* Copyright (c) 2015, Matt Stancliff <matt at genges dot com>, * Copyright (c) 2015, Matt Stancliff <matt at genges dot com>,
* Jan-Erik Rediger <janerik at fnordig dot com> * Jan-Erik Rediger <janerik at fnordig dot com>

2
deps/hiredis/net.h vendored
View File

@ -1,6 +1,6 @@
/* Extracted from anet.c to work properly with Hiredis error reporting. /* Extracted from anet.c to work properly with Hiredis error reporting.
* *
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2010-2014, Pieter Noordhuis <pcnoordhuis at gmail dot com> * Copyright (c) 2010-2014, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* Copyright (c) 2015, Matt Stancliff <matt at genges dot com>, * Copyright (c) 2015, Matt Stancliff <matt at genges dot com>,
* Jan-Erik Rediger <janerik at fnordig dot com> * Jan-Erik Rediger <janerik at fnordig dot com>

2
deps/hiredis/read.c vendored
View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com> * Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* *
* All rights reserved. * All rights reserved.

2
deps/hiredis/read.h vendored
View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com> * Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* *
* All rights reserved. * All rights reserved.

3
deps/hiredis/sds.c vendored
View File

@ -1,8 +1,7 @@
/* SDSLib 2.0 -- A C dynamic strings library /* SDSLib 2.0 -- A C dynamic strings library
* *
* Copyright (c) 2006-2015, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2006-2015, Redis Ltd.
* Copyright (c) 2015, Oran Agra * Copyright (c) 2015, Oran Agra
* Copyright (c) 2015, Redis Labs, Inc
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without

3
deps/hiredis/sds.h vendored
View File

@ -1,8 +1,7 @@
/* SDSLib 2.0 -- A C dynamic strings library /* SDSLib 2.0 -- A C dynamic strings library
* *
* Copyright (c) 2006-2015, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2006-2015, Redis Ltd.
* Copyright (c) 2015, Oran Agra * Copyright (c) 2015, Oran Agra
* Copyright (c) 2015, Redis Labs, Inc
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without

View File

@ -1,8 +1,7 @@
/* SDSLib 2.0 -- A C dynamic strings library /* SDSLib 2.0 -- A C dynamic strings library
* *
* Copyright (c) 2006-2015, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2006-2015, Redis Ltd.
* Copyright (c) 2015, Oran Agra * Copyright (c) 2015, Oran Agra
* Copyright (c) 2015, Redis Labs, Inc
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without

4
deps/hiredis/ssl.c vendored
View File

@ -1,7 +1,7 @@
/* /*
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com> * Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* Copyright (c) 2019, Redis Labs * Copyright (c) 2019, Redis Ltd.
* *
* All rights reserved. * All rights reserved.
* *

32
deps/jemalloc/CMakeLists.txt vendored Normal file
View File

@ -0,0 +1,32 @@
project(jemalloc)
# Build jemalloc using configure && make install
set(JEMALLOC_INSTALL_DIR ${CMAKE_BINARY_DIR}/jemalloc-build)
set(JEMALLOC_SRC_DIR ${CMAKE_CURRENT_LIST_DIR})
if (NOT EXISTS ${JEMALLOC_INSTALL_DIR}/lib/libjemalloc.a)
message(STATUS "Building jemalloc (custom build)")
message(STATUS "JEMALLOC_SRC_DIR = ${JEMALLOC_SRC_DIR}")
message(STATUS "JEMALLOC_INSTALL_DIR = ${JEMALLOC_INSTALL_DIR}")
execute_process(
COMMAND sh -c "${JEMALLOC_SRC_DIR}/configure --disable-cxx \
--with-version=5.3.0-0-g0 --with-lg-quantum=3 --disable-cache-oblivious --with-jemalloc-prefix=je_ \
--enable-static --disable-shared --prefix=${JEMALLOC_INSTALL_DIR}"
WORKING_DIRECTORY ${JEMALLOC_SRC_DIR} RESULTS_VARIABLE CONFIGURE_RESULT)
if (NOT ${CONFIGURE_RESULT} EQUAL 0)
message(FATAL_ERROR "Jemalloc configure failed")
endif ()
execute_process(COMMAND make -j${VALKEY_PROCESSOR_COUNT} lib/libjemalloc.a install
WORKING_DIRECTORY "${JEMALLOC_SRC_DIR}" RESULTS_VARIABLE MAKE_RESULT)
if (NOT ${MAKE_RESULT} EQUAL 0)
message(FATAL_ERROR "Jemalloc build failed")
endif ()
endif ()
# Import the compiled library as a CMake target
add_library(jemalloc STATIC IMPORTED GLOBAL)
set_target_properties(jemalloc PROPERTIES IMPORTED_LOCATION "${JEMALLOC_INSTALL_DIR}/lib/libjemalloc.a"
INCLUDE_DIRECTORIES "${JEMALLOC_INSTALL_DIR}/include")

View File

@ -337,55 +337,4 @@ imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) {
return fallback_alloc(size); return fallback_alloc(size);
} }
JEMALLOC_ALWAYS_INLINE int
iget_defrag_hint(tsdn_t *tsdn, void* ptr) {
int defrag = 0;
emap_alloc_ctx_t alloc_ctx;
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
if (likely(alloc_ctx.slab)) {
/* Small allocation. */
edata_t *slab = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
arena_t *arena = arena_get_from_edata(slab);
szind_t binind = edata_szind_get(slab);
unsigned binshard = edata_binshard_get(slab);
bin_t *bin = arena_get_bin(arena, binind, binshard);
malloc_mutex_lock(tsdn, &bin->lock);
arena_dalloc_bin_locked_info_t info;
arena_dalloc_bin_locked_begin(&info, binind);
/* Don't bother moving allocations from the slab currently used for new allocations */
if (slab != bin->slabcur) {
int free_in_slab = edata_nfree_get(slab);
if (free_in_slab) {
const bin_info_t *bin_info = &bin_infos[binind];
/* Find number of non-full slabs and the number of regs in them */
unsigned long curslabs = 0;
size_t curregs = 0;
/* Run on all bin shards (usually just one) */
for (uint32_t i=0; i< bin_info->n_shards; i++) {
bin_t *bb = arena_get_bin(arena, binind, i);
curslabs += bb->stats.nonfull_slabs;
/* Deduct the regs in full slabs (they're not part of the game) */
unsigned long full_slabs = bb->stats.curslabs - bb->stats.nonfull_slabs;
curregs += bb->stats.curregs - full_slabs * bin_info->nregs;
if (bb->slabcur) {
/* Remove slabcur from the overall utilization (not a candidate to nove from) */
curregs -= bin_info->nregs - edata_nfree_get(bb->slabcur);
curslabs -= 1;
}
}
/* Compare the utilization ratio of the slab in question to the total average
* among non-full slabs. To avoid precision loss in division, we do that by
* extrapolating the usage of the slab as if all slabs have the same usage.
* If this slab is less used than the average, we'll prefer to move the data
* to hopefully more used ones. To avoid stagnation when all slabs have the same
* utilization, we give additional 12.5% weight to the decision to defrag. */
defrag = (bin_info->nregs - free_in_slab) * curslabs <= curregs + curregs / 8;
}
}
arena_dalloc_bin_locked_finish(tsdn, arena, bin, &info);
malloc_mutex_unlock(tsdn, &bin->lock);
}
return defrag;
}
#endif /* JEMALLOC_INTERNAL_INLINES_C_H */ #endif /* JEMALLOC_INTERNAL_INLINES_C_H */

View File

@ -147,7 +147,3 @@
#else #else
# define JEMALLOC_SYS_NOTHROW JEMALLOC_NOTHROW # define JEMALLOC_SYS_NOTHROW JEMALLOC_NOTHROW
#endif #endif
/* This version of Jemalloc, modified for Redis, has the je_get_defrag_hint()
* function. */
#define JEMALLOC_FRAG_HINT

View File

@ -4474,12 +4474,3 @@ jemalloc_postfork_child(void) {
} }
/******************************************************************************/ /******************************************************************************/
/* Helps the application decide if a pointer is worth re-allocating in order to reduce fragmentation.
* returns 1 if the allocation should be moved, and 0 if the allocation be kept.
* If the application decides to re-allocate it should use MALLOCX_TCACHE_NONE when doing so. */
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
get_defrag_hint(void* ptr) {
assert(ptr != NULL);
return iget_defrag_hint(TSDN_NULL, ptr);
}

4
deps/linenoise/CMakeLists.txt vendored Normal file
View File

@ -0,0 +1,4 @@
project(linenoise)
set(SRCS "${CMAKE_CURRENT_LIST_DIR}/linenoise.c" "${CMAKE_CURRENT_LIST_DIR}/linenoise.h")
add_library(linenoise STATIC ${SRCS})

View File

@ -10,7 +10,7 @@
* *
* ------------------------------------------------------------------------ * ------------------------------------------------------------------------
* *
* Copyright (c) 2010-2016, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2010-2016, Redis Ltd.
* Copyright (c) 2010-2013, Pieter Noordhuis <pcnoordhuis at gmail dot com> * Copyright (c) 2010-2013, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* *
* All rights reserved. * All rights reserved.

View File

@ -7,7 +7,7 @@
* *
* ------------------------------------------------------------------------ * ------------------------------------------------------------------------
* *
* Copyright (c) 2010-2014, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2010-2014, Redis Ltd.
* Copyright (c) 2010-2013, Pieter Noordhuis <pcnoordhuis at gmail dot com> * Copyright (c) 2010-2013, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* *
* All rights reserved. * All rights reserved.

53
deps/lua/CMakeLists.txt vendored Normal file
View File

@ -0,0 +1,53 @@
project(lualib)
include(CheckFunctionExists)
set(LUA_SRC_DIR "${CMAKE_CURRENT_LIST_DIR}/src")
set(LUA_SRCS
${LUA_SRC_DIR}/fpconv.c
${LUA_SRC_DIR}/lbaselib.c
${LUA_SRC_DIR}/lmathlib.c
${LUA_SRC_DIR}/lstring.c
${LUA_SRC_DIR}/lparser.c
${LUA_SRC_DIR}/ldo.c
${LUA_SRC_DIR}/lzio.c
${LUA_SRC_DIR}/lmem.c
${LUA_SRC_DIR}/strbuf.c
${LUA_SRC_DIR}/lstrlib.c
${LUA_SRC_DIR}/lundump.c
${LUA_SRC_DIR}/lua_cmsgpack.c
${LUA_SRC_DIR}/loslib.c
${LUA_SRC_DIR}/lua_struct.c
${LUA_SRC_DIR}/ldebug.c
${LUA_SRC_DIR}/lobject.c
${LUA_SRC_DIR}/ldump.c
${LUA_SRC_DIR}/lua_cjson.c
${LUA_SRC_DIR}/ldblib.c
${LUA_SRC_DIR}/ltm.c
${LUA_SRC_DIR}/ltable.c
${LUA_SRC_DIR}/lstate.c
${LUA_SRC_DIR}/lua_bit.c
${LUA_SRC_DIR}/lua.c
${LUA_SRC_DIR}/loadlib.c
${LUA_SRC_DIR}/lcode.c
${LUA_SRC_DIR}/lapi.c
${LUA_SRC_DIR}/lgc.c
${LUA_SRC_DIR}/lvm.c
${LUA_SRC_DIR}/lfunc.c
${LUA_SRC_DIR}/lauxlib.c
${LUA_SRC_DIR}/ltablib.c
${LUA_SRC_DIR}/linit.c
${LUA_SRC_DIR}/lopcodes.c
${LUA_SRC_DIR}/llex.c
${LUA_SRC_DIR}/liolib.c)
add_library(lualib STATIC "${LUA_SRCS}")
target_include_directories(lualib PUBLIC "${LUA_SRC_DIR}")
target_compile_definitions(lualib PRIVATE ENABLE_CJSON_GLOBAL)
# Use mkstemp if available
check_function_exists(mkstemp HAVE_MKSTEMP)
if (HAVE_MKSTEMP)
target_compile_definitions(lualib PRIVATE LUA_USE_MKSTEMP)
endif ()
unset(HAVE_MKSTEMP CACHE)

View File

@ -6,6 +6,7 @@
#include <string.h> #include <string.h>
#include <stdint.h>
#define lstring_c #define lstring_c
#define LUA_CORE #define LUA_CORE
@ -71,14 +72,55 @@ static TString *newlstr (lua_State *L, const char *str, size_t l,
return ts; return ts;
} }
uint32_t murmur32(const uint8_t* key, size_t len, uint32_t seed) {
static const uint32_t c1 = 0xcc9e2d51;
static const uint32_t c2 = 0x1b873593;
static const uint32_t r1 = 15;
static const uint32_t r2 = 13;
static const uint32_t m = 5;
static const uint32_t n = 0xe6546b64;
uint32_t hash = seed;
const int nblocks = len / 4;
const uint32_t* blocks = (const uint32_t*) key;
for (int i = 0; i < nblocks; i++) {
uint32_t k = blocks[i];
k *= c1;
k = (k << r1) | (k >> (32 - r1));
k *= c2;
hash ^= k;
hash = ((hash << r2) | (hash >> (32 - r2))) * m + n;
}
const uint8_t* tail = (const uint8_t*) (key + nblocks * 4);
uint32_t k1 = 0;
switch (len & 3) {
case 3:
k1 ^= tail[2] << 16;
case 2:
k1 ^= tail[1] << 8;
case 1:
k1 ^= tail[0];
k1 *= c1;
k1 = (k1 << r1) | (k1 >> (32 - r1));
k1 *= c2;
hash ^= k1;
}
hash ^= len;
hash ^= (hash >> 16);
hash *= 0x85ebca6b;
hash ^= (hash >> 13);
hash *= 0xc2b2ae35;
hash ^= (hash >> 16);
return hash;
}
TString *luaS_newlstr (lua_State *L, const char *str, size_t l) { TString *luaS_newlstr (lua_State *L, const char *str, size_t l) {
GCObject *o; GCObject *o;
unsigned int h = cast(unsigned int, l); /* seed */ unsigned int h = murmur32((uint8_t *)str, l, (uint32_t)l);
size_t step = 1;
size_t l1;
for (l1=l; l1>=step; l1-=step) /* compute hash */
h = h ^ ((h<<5)+(h>>2)+cast(unsigned char, str[l1-1]));
for (o = G(L)->strt.hash[lmod(h, G(L)->strt.size)]; for (o = G(L)->strt.hash[lmod(h, G(L)->strt.size)];
o != NULL; o != NULL;
o = o->gch.next) { o = o->gch.next) {

View File

@ -132,6 +132,7 @@ static int bit_tohex(lua_State *L)
const char *hexdigits = "0123456789abcdef"; const char *hexdigits = "0123456789abcdef";
char buf[8]; char buf[8];
int i; int i;
if (n == INT32_MIN) n = INT32_MIN+1;
if (n < 0) { n = -n; hexdigits = "0123456789ABCDEF"; } if (n < 0) { n = -n; hexdigits = "0123456789ABCDEF"; }
if (n > 8) n = 8; if (n > 8) n = 8;
for (i = (int)n; --i >= 0; ) { buf[i] = hexdigits[b & 15]; b >>= 4; } for (i = (int)n; --i >= 0; ) { buf[i] = hexdigits[b & 15]; b >>= 4; }

View File

@ -10,7 +10,7 @@
#define LUACMSGPACK_NAME "cmsgpack" #define LUACMSGPACK_NAME "cmsgpack"
#define LUACMSGPACK_SAFE_NAME "cmsgpack_safe" #define LUACMSGPACK_SAFE_NAME "cmsgpack_safe"
#define LUACMSGPACK_VERSION "lua-cmsgpack 0.4.0" #define LUACMSGPACK_VERSION "lua-cmsgpack 0.4.0"
#define LUACMSGPACK_COPYRIGHT "Copyright (C) 2012, Salvatore Sanfilippo" #define LUACMSGPACK_COPYRIGHT "Copyright (C) 2012, Redis Ltd."
#define LUACMSGPACK_DESCRIPTION "MessagePack C implementation for Lua" #define LUACMSGPACK_DESCRIPTION "MessagePack C implementation for Lua"
/* Allows a preprocessor directive to override MAX_NESTING */ /* Allows a preprocessor directive to override MAX_NESTING */
@ -39,7 +39,7 @@
/* ============================================================================= /* =============================================================================
* MessagePack implementation and bindings for Lua 5.1/5.2. * MessagePack implementation and bindings for Lua 5.1/5.2.
* Copyright(C) 2012 Salvatore Sanfilippo <antirez@gmail.com> * Copyright(C) 2012 Redis Ltd.
* *
* http://github.com/antirez/lua-cmsgpack * http://github.com/antirez/lua-cmsgpack
* *
@ -958,7 +958,7 @@ LUALIB_API int luaopen_cmsgpack_safe(lua_State *L) {
} }
/****************************************************************************** /******************************************************************************
* Copyright (C) 2012 Salvatore Sanfilippo. All rights reserved. * Copyright (C) 2012 Redis Ltd. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining * Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the * a copy of this software and associated documentation files (the

View File

@ -1,9 +1,9 @@
# Valkey configuration file example. # Futriix configuration file example.
# #
# Note that in order to read the configuration file, the server must be # Note that in order to read the configuration file, the server must be
# started with the file path as first argument: # started with the file path as first argument:
# #
# ./valkey-server /path/to/valkey.conf # ./futriix-server /path/to/futriix.conf
# Note on units: when memory size is needed, it is possible to specify # Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth: # it in the usual form of 1k 5GB 4M and so forth:
@ -49,7 +49,7 @@
# Load modules at startup. If the server is not able to load modules # Load modules at startup. If the server is not able to load modules
# it will abort. It is possible to use multiple loadmodule directives. # it will abort. It is possible to use multiple loadmodule directives.
# #
# loadmodule /path/to/my_module.so loadmodule /home/grigoriy/futriix/futriixjson.so
# loadmodule /path/to/other_module.so # loadmodule /path/to/other_module.so
# loadmodule /path/to/args_module.so [arg [arg ...]] # loadmodule /path/to/args_module.so [arg [arg ...]]
@ -62,7 +62,7 @@
# Each address can be prefixed by "-", which means that the server will not fail to # Each address can be prefixed by "-", which means that the server will not fail to
# start if the address is not available. Being not available only refers to # start if the address is not available. Being not available only refers to
# addresses that does not correspond to any network interface. Addresses that # addresses that does not correspond to any network interface. Addresses that
# are already in use will always fail, and unsupported protocols will always BE # are already in use will always fail, and unsupported protocols will always be
# silently skipped. # silently skipped.
# #
# Examples: # Examples:
@ -134,9 +134,9 @@ protected-mode yes
# enable-debug-command no # enable-debug-command no
# enable-module-command no # enable-module-command no
# Accept connections on the specified port, default is 6379 (IANA #815344). # Accept connections on the specified port, default is 9880.
# If port 0 is specified the server will not listen on a TCP socket. # If port 0 is specified the server will not listen on a TCP socket.
port 6379 port 7007
# TCP listen() backlog. # TCP listen() backlog.
# #
@ -154,6 +154,7 @@ tcp-backlog 511
# on a unix socket when not specified. # on a unix socket when not specified.
# #
# unixsocket /run/valkey.sock # unixsocket /run/valkey.sock
# unixsocketgroup wheel
# unixsocketperm 700 # unixsocketperm 700
# Close the connection after a client is idle for N seconds (0 to disable) # Close the connection after a client is idle for N seconds (0 to disable)
@ -196,8 +197,8 @@ tcp-keepalive 300
# server to connected clients, primaries or cluster peers. These files should be # server to connected clients, primaries or cluster peers. These files should be
# PEM formatted. # PEM formatted.
# #
# tls-cert-file valkey.crt # tls-cert-file futriix.crt
# tls-key-file valkey.key # tls-key-file futriix.key
# #
# If the key file is encrypted using a passphrase, it can be included here # If the key file is encrypted using a passphrase, it can be included here
# as well. # as well.
@ -225,7 +226,7 @@ tcp-keepalive 300
# required by older versions of OpenSSL (<3.0). Newer versions do not require # required by older versions of OpenSSL (<3.0). Newer versions do not require
# this configuration and recommend against it. # this configuration and recommend against it.
# #
# tls-dh-params-file valkey.dh # tls-dh-params-file futriix.dh
# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL # Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL
# clients and peers. The server requires an explicit configuration of at least one # clients and peers. The server requires an explicit configuration of at least one
@ -299,6 +300,54 @@ tcp-keepalive 300
# #
# tls-session-cache-timeout 60 # tls-session-cache-timeout 60
################################### RDMA ######################################
# Valkey Over RDMA is experimental, it may be changed or be removed in any minor or major version.
# By default, RDMA is disabled. To enable it, the "rdma-port" configuration
# directive can be used to define RDMA-listening ports.
#
# rdma-port 6379
# rdma-bind 192.168.1.100
# The RDMA receive transfer buffer is 1M by default. It can be set between 64K and 16M.
# Note that page size aligned size is preferred.
#
# rdma-rx-size 1048576
# The RDMA completion queue will use the completion vector to signal completion events
# via hardware interrupts. A large number of hardware interrupts can affect CPU performance.
# It is possible to tune the performance using rdma-completion-vector.
#
# Example 1. a) Pin hardware interrupt vectors [0, 3] to CPU [0, 3].
# b) Set CPU affinity for valkey to CPU [4, X].
# c) Any valkey server uses a random RDMA completion vector [-1].
# All valkey servers will not affect each other and will be isolated from kernel interrupts.
#
# SYS SYS SYS SYS VALKEY VALKEY VALKEY
# | | | | | | |
# CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 ... CPUX
# | | | |
# INTR0 INTR1 INTR2 INTR3
#
# Example 2. a) 1:1 pin hardware interrupt vectors [0, X] to CPU [0, X].
# b) Set CPU affinity for valkey [M] to CPU [M].
# c) Valkey server [M] uses RDMA completion vector [M].
# A single CPU [M] handles hardware interrupts, the RDMA completion vector [M],
# and the valkey server [M] within its context only.
# This avoids overhead and function calls across multiple CPUs, fully isolating
# each valkey server from one another.
#
# VALKEY VALKEY VALKEY VALKEY VALKEY VALKEY VALKEY
# | | | | | | |
# CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 ... CPUX
# | | | | | | |
# INTR0 INTR1 INTR2 INTR3 INTR4 INTR5 INTRX
#
# Use 0 and positive numbers to specify the RDMA completion vector, or specify -1 to allow
# the server to use a random vector for a new connection. The default vector is -1.
#
# rdma-completion-vector 0
################################# GENERAL ##################################### ################################# GENERAL #####################################
# By default the server does not run as a daemon. Use 'yes' if you need it. # By default the server does not run as a daemon. Use 'yes' if you need it.
@ -336,7 +385,7 @@ daemonize no
# #
# Note that on modern Linux systems "/run/valkey.pid" is more conforming # Note that on modern Linux systems "/run/valkey.pid" is more conforming
# and should be used instead. # and should be used instead.
pidfile /var/run/valkey_6379.pid pidfile /var/run/futriix_9880.pid
# Specify the server verbosity level. # Specify the server verbosity level.
# This can be one of: # This can be one of:
@ -347,6 +396,23 @@ pidfile /var/run/valkey_6379.pid
# nothing (nothing is logged) # nothing (nothing is logged)
loglevel notice loglevel notice
# Specify the logging format.
# This can be one of:
#
# - legacy: the default, traditional log format
# - logfmt: a structured log format; see https://www.brandur.org/logfmt
#
# log-format legacy
# Specify the timestamp format used in logs using 'log-timestamp-format'.
#
# - legacy: default format
# - iso8601: ISO 8601 extended date and time with time zone, on the form
# yyyy-mm-ddThh:mm:ss.sss±hh:mm
# - milliseconds: milliseconds since the epoch
#
# log-timestamp-format legacy
# Specify the log file name. Also the empty string can be used to force # Specify the log file name. Also the empty string can be used to force
# the server to log on the standard output. Note that if you use standard # the server to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null # output for logging but daemonize, logs will be sent to /dev/null
@ -386,6 +452,14 @@ databases 16
# ASCII art logo in startup logs by setting the following option to yes. # ASCII art logo in startup logs by setting the following option to yes.
always-show-logo no always-show-logo no
# User data, including keys, values, client names, and ACL usernames, can be
# logged as part of assertions and other error cases. To prevent sensitive user
# information, such as PII, from being recorded in the server log file, this
# user data is hidden from the log by default. If you need to log user data for
# debugging or troubleshooting purposes, you can disable this feature by
# changing the config value to no.
hide-user-data-from-log yes
# By default, the server modifies the process title (as seen in 'top' and 'ps') to # By default, the server modifies the process title (as seen in 'top' and 'ps') to
# provide some runtime information. It is possible to disable this and leave # provide some runtime information. It is possible to disable this and leave
# the process name as executed by setting the following to no. # the process name as executed by setting the following to no.
@ -475,6 +549,15 @@ rdbcompression yes
# tell the loading code to skip the check. # tell the loading code to skip the check.
rdbchecksum yes rdbchecksum yes
# Valkey can try to load an RDB dump produced by a future version of Valkey.
# This can only work on a best-effort basis, because future RDB versions may
# contain information that's not known to the current version. If no new features
# are used, it may be possible to import the data produced by a later version,
# but loading is aborted if unknown information is encountered. Possible values
# are 'strict' and 'relaxed'. This also applies to replication and the RESTORE
# command.
rdb-version-check strict
# Enables or disables full sanitization checks for ziplist and listpack etc when # Enables or disables full sanitization checks for ziplist and listpack etc when
# loading an RDB or RESTORE payload. This reduces the chances of a assertion or # loading an RDB or RESTORE payload. This reduces the chances of a assertion or
# crash later on while processing commands. # crash later on while processing commands.
@ -508,6 +591,9 @@ rdb-del-sync-files no
# The working directory. # The working directory.
# #
# The server log is written relative this directory, if the 'logfile'
# configuration directive is a relative path.
#
# The DB will be written inside this directory, with the filename specified # The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive. # above using the 'dbfilename' configuration directive.
# #
@ -517,6 +603,9 @@ rdb-del-sync-files no
# 'cluster-config-file' configuration directive is a relative path. # 'cluster-config-file' configuration directive is a relative path.
# #
# Note that you must specify a directory here, not a file name. # Note that you must specify a directory here, not a file name.
# Note that modifying 'dir' during runtime may have unexpected behavior,
# for example when a child process is running, related file operations may
# have unexpected effects.
dir ./ dir ./
################################# REPLICATION ################################# ################################# REPLICATION #################################
@ -658,17 +747,20 @@ repl-diskless-sync-max-replicas 0
# fully loaded in memory, resulting in higher memory usage. # fully loaded in memory, resulting in higher memory usage.
# For this reason we have the following options: # For this reason we have the following options:
# #
# "disabled" - Don't use diskless load (store the rdb file to the disk first) # "disabled" - Don't use diskless load (store the rdb file to the disk first)
# "swapdb" - Keep current db contents in RAM while parsing the data directly # "swapdb" - Keep current db contents in RAM while parsing the data directly
# from the socket. Replicas in this mode can keep serving current # from the socket. Replicas in this mode can keep serving current
# dataset while replication is in progress, except for cases where # dataset while replication is in progress, except for cases where
# they can't recognize primary as having a data set from same # they can't recognize primary as having a data set from same
# replication history. # replication history.
# Note that this requires sufficient memory, if you don't have it, # Note that this requires sufficient memory, if you don't have it,
# you risk an OOM kill. # you risk an OOM kill.
# "on-empty-db" - Use diskless load only when current dataset is empty. This is # "on-empty-db" - Use diskless load only when current dataset is empty. This is
# safer and avoid having old and new dataset loaded side by side # safer and avoid having old and new dataset loaded side by side
# during replication. # during replication.
# "flush-before-load" - [dangerous] Flush all data before parsing. Note that if
# there's a problem before the replication succeeded you may
# lose all your data.
repl-diskless-load disabled repl-diskless-load disabled
# This dual channel replication sync feature optimizes the full synchronization process # This dual channel replication sync feature optimizes the full synchronization process
@ -687,6 +779,11 @@ repl-diskless-load disabled
# memory to accommodate the buffer during synchronization. However, this tradeoff is # memory to accommodate the buffer during synchronization. However, this tradeoff is
# generally beneficial as it prevents potential performance degradation on the primary # generally beneficial as it prevents potential performance degradation on the primary
# server, which is typically handling more critical operations. # server, which is typically handling more critical operations.
#
# When toggling this configuration on or off during an ongoing synchronization process,
# it does not change the already running sync method. The new configuration will take
# effect only for subsequent synchronization processes.
dual-channel-replication-enabled no dual-channel-replication-enabled no
# Master send PINGs to its replicas in a predefined interval. It's possible to # Master send PINGs to its replicas in a predefined interval. It's possible to
@ -734,7 +831,7 @@ repl-disable-tcp-nodelay no
# #
# The backlog is only allocated if there is at least one replica connected. # The backlog is only allocated if there is at least one replica connected.
# #
# repl-backlog-size 1mb # repl-backlog-size 10mb
# After a primary has no connected replicas for some time, the backlog will be # After a primary has no connected replicas for some time, the backlog will be
# freed. The following option configures the amount of seconds that need to # freed. The following option configures the amount of seconds that need to
@ -784,6 +881,13 @@ replica-priority 100
# #
# replica-ignore-disk-write-errors no # replica-ignore-disk-write-errors no
# Make the primary forbid expiration and eviction.
# This is useful for sync tools, because expiration and eviction may cause the data corruption.
# Sync tools can mark their connections as importing source by CLIENT IMPORT-SOURCE.
# NOTICE: Clients should avoid writing the same key on the source server and the destination server.
#
# import-mode no
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
# By default, Sentinel includes all replicas in its reports. A replica # By default, Sentinel includes all replicas in its reports. A replica
# can be excluded from Sentinel's announcements. An unannounced replica # can be excluded from Sentinel's announcements. An unannounced replica
@ -1234,8 +1338,8 @@ acllog-max-len 128
############################# LAZY FREEING #################################### ############################# LAZY FREEING ####################################
# The server has two primitives to delete keys. One is called DEL and is a blocking # When keys are deleted, the served has historically freed their memory using
# deletion of the object. It means that the server stops processing new commands # blocking operations. It means that the server stopped processing new commands
# in order to reclaim all the memory associated with an object in a synchronous # in order to reclaim all the memory associated with an object in a synchronous
# way. If the key deleted is associated with a small object, the time needed # way. If the key deleted is associated with a small object, the time needed
# in order to execute the DEL command is very small and comparable to most other # in order to execute the DEL command is very small and comparable to most other
@ -1243,15 +1347,16 @@ acllog-max-len 128
# aggregated value containing millions of elements, the server can block for # aggregated value containing millions of elements, the server can block for
# a long time (even seconds) in order to complete the operation. # a long time (even seconds) in order to complete the operation.
# #
# For the above reasons the server also offers non blocking deletion primitives # For the above reasons, lazy freeing (or asynchronous freeing), has been
# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and # introduced. With lazy freeing, keys are deleted in constant time. Another
# FLUSHDB commands, in order to reclaim memory in background. Those commands # thread will incrementally free the object in the background as fast as
# are executed in constant time. Another thread will incrementally free the # possible.
# object in the background as fast as possible.
# #
# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. # Starting from Valkey 8.0, lazy freeing is enabled by default. It is possible
# It's up to the design of the application to understand when it is a good # to retain the synchronous freeing behaviour by setting the lazyfree related
# idea to use one or the other. However the server sometimes has to # configuration directives to 'no'.
# Commands like DEL, FLUSHALL and FLUSHDB delete keys, but the server can also
# delete keys or flush the whole database as a side effect of other operations. # delete keys or flush the whole database as a side effect of other operations.
# Specifically the server deletes objects independently of a user call in the # Specifically the server deletes objects independently of a user call in the
# following scenarios: # following scenarios:
@ -1271,29 +1376,35 @@ acllog-max-len 128
# its primary, the content of the whole database is removed in order to # its primary, the content of the whole database is removed in order to
# load the RDB file just transferred. # load the RDB file just transferred.
# #
# In all the above cases the default is to delete objects in a blocking way, # In all the above cases, the default is to release memory in a non-blocking
# like if DEL was called. However you can configure each case specifically # way.
# in order to instead release memory in a non-blocking way like if UNLINK
# was called, using the following configuration directives.
lazyfree-lazy-eviction no lazyfree-lazy-eviction yes
lazyfree-lazy-expire no lazyfree-lazy-expire yes
lazyfree-lazy-server-del no lazyfree-lazy-server-del yes
replica-lazy-flush no replica-lazy-flush yes
# It is also possible, for the case when to replace the user code DEL calls # For keys deleted using the DEL command, lazy freeing is controlled by the
# with UNLINK calls is not easy, to modify the default behavior of the DEL # configuration directive 'lazyfree-lazy-user-del'. The default is 'yes'. The
# command to act exactly like UNLINK, using the following configuration # UNLINK command is identical to the DEL command, except that UNLINK always
# directive: # frees the memory lazily, regardless of this configuration directive:
lazyfree-lazy-user-del no lazyfree-lazy-user-del yes
# FLUSHDB, FLUSHALL, SCRIPT FLUSH and FUNCTION FLUSH support both asynchronous and synchronous # FLUSHDB, FLUSHALL, SCRIPT FLUSH and FUNCTION FLUSH support both asynchronous and synchronous
# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the # deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the
# commands. When neither flag is passed, this directive will be used to determine # commands. When neither flag is passed, this directive will be used to determine
# if the data should be deleted asynchronously. # if the data should be deleted asynchronously.
#
# When a replica performs a node reset via CLUSTER RESET, the entire
# database content is removed to allow the node to become an empty primary.
# This directive also determines whether the data should be deleted asynchronously.
#
# There are many problems with running flush synchronously. Even in single CPU
# environments, the thread managers should balance between the freeing and
# serving incoming requests. The default value is yes.
lazyfree-lazy-user-flush no lazyfree-lazy-user-flush yes
################################ THREADED I/O ################################# ################################ THREADED I/O #################################
@ -1325,7 +1436,23 @@ lazyfree-lazy-user-flush no
# to thread the write and read syscall and transfer the client buffers to the # to thread the write and read syscall and transfer the client buffers to the
# socket and to enable threading of reads and protocol parsing. # socket and to enable threading of reads and protocol parsing.
# #
# NOTE 2: If you want to test the server speedup using valkey-benchmark, make # When multiple commands are parsed by the I/O threads and ready for execution,
# we take advantage of knowing the next set of commands and prefetch their
# required dictionary entries in a batch. This reduces memory access costs.
#
# The optimal batch size depends on the specific workflow of the user.
# The default batch size is 16, which can be modified using the
# 'prefetch-batch-max-size' config.
#
# When the config is set to 0, prefetching is disabled.
#
# prefetch-batch-max-size 16
#
# NOTE:
# 1. The 'io-threads-do-reads' config is deprecated and has no effect. Please
# avoid using this config if possible.
#
# 2. If you want to test the server speedup using valkey-benchmark, make
# sure you also run the benchmark itself in threaded mode, using the # sure you also run the benchmark itself in threaded mode, using the
# --threads option to match the number of server threads, otherwise you'll not # --threads option to match the number of server threads, otherwise you'll not
# be able to notice the improvements. # be able to notice the improvements.
@ -1365,7 +1492,7 @@ oom-score-adj-values 0 200 800
#################### KERNEL transparent hugepage CONTROL ###################### #################### KERNEL transparent hugepage CONTROL ######################
# Usually the kernel Transparent Huge Pages control is set to "madvise" or # Usually the kernel Transparent Huge Pages control is set to "madvise" or
# or "never" by default (/sys/kernel/mm/transparent_hugepage/enabled), in which # "never" by default (/sys/kernel/mm/transparent_hugepage/enabled), in which
# case this config has no effect. On systems in which it is set to "always", # case this config has no effect. On systems in which it is set to "always",
# the server will attempt to disable it specifically for the server process in order # the server will attempt to disable it specifically for the server process in order
# to avoid latency problems specifically with fork(2) and CoW. # to avoid latency problems specifically with fork(2) and CoW.
@ -1398,7 +1525,7 @@ disable-thp yes
# #
# Please check https://valkey.io/topics/persistence for more information. # Please check https://valkey.io/topics/persistence for more information.
appendonly no appendonly yes
# The base name of the append only file. # The base name of the append only file.
# #
@ -1540,7 +1667,7 @@ aof-timestamp-enabled no
# Maximum time to wait for replicas when shutting down, in seconds. # Maximum time to wait for replicas when shutting down, in seconds.
# #
# During shut down, a grace period allows any lagging replicas to catch up with # During shut down, a grace period allows any lagging replicas to catch up with
# the latest replication offset before the primary exists. This period can # the latest replication offset before the primary exits. This period can
# prevent data loss, especially for deployments without configured disk backups. # prevent data loss, especially for deployments without configured disk backups.
# #
# The 'shutdown-timeout' value is the grace period's duration in seconds. It is # The 'shutdown-timeout' value is the grace period's duration in seconds. It is
@ -1595,7 +1722,7 @@ aof-timestamp-enabled no
# started as cluster nodes can. In order to start a server instance as a # started as cluster nodes can. In order to start a server instance as a
# cluster node enable the cluster support uncommenting the following: # cluster node enable the cluster support uncommenting the following:
# #
# cluster-enabled yes cluster-enabled yes
# Every cluster node has a cluster configuration file. This file is not # Every cluster node has a cluster configuration file. This file is not
# intended to be edited by hand. It is created and updated by each node. # intended to be edited by hand. It is created and updated by each node.
@ -1603,13 +1730,13 @@ aof-timestamp-enabled no
# Make sure that instances running in the same system do not have # Make sure that instances running in the same system do not have
# overlapping cluster configuration file names. # overlapping cluster configuration file names.
# #
# cluster-config-file nodes-6379.conf cluster-config-file nodes.conf
# Cluster node timeout is the amount of milliseconds a node must be unreachable # Cluster node timeout is the amount of milliseconds a node must be unreachable
# for it to be considered in failure state. # for it to be considered in failure state.
# Most other internal time limits are a multiple of the node timeout. # Most other internal time limits are a multiple of the node timeout.
# #
# cluster-node-timeout 15000 cluster-node-timeout 5000
# The cluster port is the port that the cluster bus will listen for inbound connections on. When set # The cluster port is the port that the cluster bus will listen for inbound connections on. When set
# to the default value, 0, it will be bound to the command port + 10000. Setting this value requires # to the default value, 0, it will be bound to the command port + 10000. Setting this value requires
@ -1851,29 +1978,70 @@ aof-timestamp-enabled no
# cluster-announce-port 0 # cluster-announce-port 0
# cluster-announce-bus-port 6380 # cluster-announce-bus-port 6380
################################## SLOW LOG ################################### ################################## COMMAND LOG ###################################
# The server Slow Log is a system to log queries that exceeded a specified # The Command Log system is used to record commands that consume significant resources
# execution time. The execution time does not include the I/O operations # during server operation, including CPU, memory, and network bandwidth.
# like talking with the client, sending the reply and so forth, # These commands and the data they access may lead to abnormal instance operations,
# but just the time needed to actually execute the command (this is the only # the commandlog can help users quickly and intuitively locate issues.
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
# #
# You can configure the slow log with two parameters: one tells the server # Currently, three types of command logs are supported:
# what is the execution time, in microseconds, to exceed in order for the #
# command to get logged, and the other parameter is the length of the # SLOW: Logs commands that exceed a specified execution time. This excludes time spent
# slow log. When a new command is logged the oldest one is removed from the # on I/O operations like client communication and focuses solely on the command's
# queue of logged commands. # processing time, where the main thread is blocked.
#
# The following time is expressed in microseconds, so 1000000 is equivalent # LARGE-REQUEST: Logs commands with requests exceeding a defined size. This helps
# to one second. Note that a negative number disables the slow log, while # identify potentially problematic commands that send excessive data to the server.
# a value of zero forces the logging of every command. #
slowlog-log-slower-than 10000 # LARGE-REPLY: Logs commands that generate replies exceeding a defined size. This
# helps identify commands that return unusually large amounts of data, which may
# impact network performance or client processing.
#
# Each log type has two key parameters:
# 1. A threshold value that determines when a command is logged. This threshold is specific
# to the type of log (e.g., execution time, request size, or reply size). A negative value disables
# logging. A value of 0 logs all commands.
# 2. A maximum length that specifies the number of entries to retain in the log. Increasing
# the length allows more entries to be stored but consumes additional memory. To clear all
# entries for a specific log type and reclaim memory, use the `COMMANDLOG RESET`
# subcommand followed by the log type.
#
# SLOW Command Logs
# The SLOW log records commands that exceed a specified execution time. The execution time
# does not include I/O operations, such as client communication or sending responses.
# It only measures the time spent executing the command, during which the thread is blocked
# and cannot handle other requests.
#
# The threshold is measured in microseconds.
#
# Backward Compatibility: The parameter `slowlog-log-slower-than` is still supported but
# deprecated in favor of `commandlog-slow-execution`.
commandlog-execution-slower-than 10000
# There is no limit to this length. Just be aware that it will consume memory. # There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET. # You can reclaim memory used by the slow log with SLOWLOG RESET or COMMANDLOG RESET SLOW.
slowlog-max-len 128 commandlog-slow-execution-max-len 128
#
# LARGE_REQUEST Command Logs
# The LARGE_REQUEST log tracks commands with requests exceeding a specified size. The request size
# includes the command itself and all its arguments. For example, in `SET KEY VALUE`, the size is
# determined by the combined size of the key and value. Commands that consume excessive network
# bandwidth or query buffer space are recorded here.
#
# The threshold is measured in bytes.
commandlog-request-larger-than 1048576
# Record the number of commands.
commandlog-large-request-max-len 128
#
# LARGE_REPLY Command Logs
# The LARGE_REPLY log records commands that produce replies exceeding a specified size. These replies
# may consume significant network bandwidth or client output buffer space. Examples include commands
# like `KEYS` or `HGETALL` that return large datasets. Even a `GET` command may qualify if the value
# is substantial.
#
# The threshold is measured in bytes.
commandlog-reply-larger-than 1048576
commandlog-large-reply-max-len 128
################################ LATENCY MONITOR ############################## ################################ LATENCY MONITOR ##############################
@ -2046,24 +2214,22 @@ hll-sparse-max-bytes 3000
stream-node-max-bytes 4096 stream-node-max-bytes 4096
stream-node-max-entries 100 stream-node-max-entries 100
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in # Active rehashing uses 1% of the CPU time to help perform incremental rehashing
# order to help rehashing the main server hash table (the one mapping top-level # of the main server hash tables, the ones mapping top-level keys to values.
# keys to values). The hash table implementation the server uses (see dict.c)
# performs a lazy rehashing: the more operation you run into a hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
# #
# The default is to use this millisecond 10 times every second in order to # If active rehashing is disabled and rehashing is needed, a hash table is
# actively rehash the main dictionaries, freeing memory when possible. # rehashed one "step" on every operation performed on the hash table (add, find,
# etc.), so if the server is idle, the rehashing may never complete and some
# more memory is used by the hash tables. Active rehashing helps prevent this.
# #
# If unsure: # Active rehashing runs as a background task. Depending on the value of 'hz',
# use "activerehashing no" if you have hard latency requirements and it is # the frequency at which the server performs background tasks, active rehashing
# not a good thing in your environment that the server can reply from time to time # can cause the server to freeze for a short time. For example, if 'hz' is set
# to queries with 2 milliseconds delay. # to 10, active rehashing runs for up to one millisecond every 100 milliseconds.
# # If a freeze of one millisecond is not acceptable, you can increase 'hz' to let
# use "activerehashing yes" if you don't have such hard requirements but # active rehashing run more often. If instead 'hz' is set to 100, active
# want to free memory asap when possible. # rehashing runs up to only 100 microseconds every 10 milliseconds. The total is
# still 1% of the time.
activerehashing yes activerehashing yes
# The client output buffer limits can be used to force disconnection of clients # The client output buffer limits can be used to force disconnection of clients
@ -2271,9 +2437,8 @@ rdb-save-incremental-fsync yes
# Fragmentation is a natural process that happens with every allocator (but # Fragmentation is a natural process that happens with every allocator (but
# less so with Jemalloc, fortunately) and certain workloads. Normally a server # less so with Jemalloc, fortunately) and certain workloads. Normally a server
# restart is needed in order to lower the fragmentation, or at least to flush # restart is needed in order to lower the fragmentation, or at least to flush
# away all the data and create it again. However thanks to this feature # away all the data and create it again. However thanks to this feature, this
# implemented by Oran Agra, this process can happen at runtime # process can happen at runtime in a "hot" way, while the server is running.
# in a "hot" way, while the server is running.
# #
# Basically when the fragmentation is over a certain level (see the # Basically when the fragmentation is over a certain level (see the
# configuration options below) the server will start to create new copies of the # configuration options below) the server will start to create new copies of the
@ -2311,18 +2476,23 @@ rdb-save-incremental-fsync yes
# Maximum percentage of fragmentation at which we use maximum effort # Maximum percentage of fragmentation at which we use maximum effort
# active-defrag-threshold-upper 100 # active-defrag-threshold-upper 100
# Minimal effort for defrag in CPU percentage, to be used when the lower # Minimal effort for defrag in CPU percentage, not cycle time as the name might
# threshold is reached # suggest, to be used when the lower threshold is reached.
# active-defrag-cycle-min 1 # active-defrag-cycle-min 1
# Maximal effort for defrag in CPU percentage, to be used when the upper # Maximal effort for defrag in CPU percentage, not cycle time as the name might
# threshold is reached # suggest, to be used when the upper threshold is reached.
# active-defrag-cycle-max 25 # active-defrag-cycle-max 25
# Maximum number of set/hash/zset/list fields that will be processed from # Maximum number of set/hash/zset/list fields that will be processed from
# the main dictionary scan # the main dictionary scan
# active-defrag-max-scan-fields 1000 # active-defrag-max-scan-fields 1000
# The time spent (in microseconds) of the periodic active defrag process. This
# affects the latency impact of active defrag on client commands. Smaller numbers
# will result in less latency impact at the cost of increased defrag overhead.
# active-defrag-cycle-us 500
# Jemalloc background thread for purging will be enabled by default # Jemalloc background thread for purging will be enabled by default
jemalloc-bg-thread yes jemalloc-bg-thread yes

View File

@ -14,46 +14,4 @@ then
fi fi
$MAKE -C tests/modules && \ $MAKE -C tests/modules && \
$TCLSH tests/test_helper.tcl \ $TCLSH tests/test_helper.tcl --moduleapi "${@}"
--single unit/moduleapi/commandfilter \
--single unit/moduleapi/basics \
--single unit/moduleapi/fork \
--single unit/moduleapi/testrdb \
--single unit/moduleapi/infotest \
--single unit/moduleapi/moduleconfigs \
--single unit/moduleapi/infra \
--single unit/moduleapi/propagate \
--single unit/moduleapi/hooks \
--single unit/moduleapi/misc \
--single unit/moduleapi/blockonkeys \
--single unit/moduleapi/blockonbackground \
--single unit/moduleapi/scan \
--single unit/moduleapi/datatype \
--single unit/moduleapi/auth \
--single unit/moduleapi/keyspace_events \
--single unit/moduleapi/blockedclient \
--single unit/moduleapi/getkeys \
--single unit/moduleapi/test_lazyfree \
--single unit/moduleapi/defrag \
--single unit/moduleapi/keyspecs \
--single unit/moduleapi/hash \
--single unit/moduleapi/zset \
--single unit/moduleapi/list \
--single unit/moduleapi/stream \
--single unit/moduleapi/mallocsize \
--single unit/moduleapi/datatype2 \
--single unit/moduleapi/cluster \
--single unit/moduleapi/aclcheck \
--single unit/moduleapi/subcommands \
--single unit/moduleapi/reply \
--single unit/moduleapi/cmdintrospection \
--single unit/moduleapi/eventloop \
--single unit/moduleapi/timer \
--single unit/moduleapi/publish \
--single unit/moduleapi/usercall \
--single unit/moduleapi/postnotifications \
--single unit/moduleapi/async_rm_call \
--single unit/moduleapi/moduleauth \
--single unit/moduleapi/rdbloadsave \
--single unit/moduleapi/crash \
"${@}"

View File

@ -1,358 +0,0 @@
# Example sentinel.conf
# By default protected mode is disabled in sentinel mode. Sentinel is reachable
# from interfaces different than localhost. Make sure the sentinel instance is
# protected from the outside world via firewalling or other means.
protected-mode no
# port <sentinel-port>
# The port that this sentinel instance will run on
port 26379
# By default Valkey Sentinel does not run as a daemon. Use 'yes' if you need it.
# Note that Valkey will write a pid file in /var/run/valkey-sentinel.pid when
# daemonized.
daemonize no
# When running daemonized, Valkey Sentinel writes a pid file in
# /var/run/valkey-sentinel.pid by default. You can specify a custom pid file
# location here.
pidfile /var/run/valkey-sentinel.pid
# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
# nothing (nothing is logged)
loglevel notice
# Specify the log file name. Also the empty string can be used to force
# Sentinel to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile ""
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
# and optionally update the other syslog parameters to suit your needs.
# syslog-enabled no
# Specify the syslog identity.
# syslog-ident sentinel
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
# syslog-facility local0
# sentinel announce-ip <ip>
# sentinel announce-port <port>
#
# The above two configuration directives are useful in environments where,
# because of NAT, Sentinel is reachable from outside via a non-local address.
#
# When announce-ip is provided, the Sentinel will claim the specified IP address
# in HELLO messages used to gossip its presence, instead of auto-detecting the
# local address as it usually does.
#
# Similarly when announce-port is provided and is valid and non-zero, Sentinel
# will announce the specified TCP port.
#
# The two options don't need to be used together, if only announce-ip is
# provided, the Sentinel will announce the specified IP and the server port
# as specified by the "port" option. If only announce-port is provided, the
# Sentinel will announce the auto-detected local IP and the specified port.
#
# Example:
#
# sentinel announce-ip 1.2.3.4
# dir <working-directory>
# Every long running process should have a well-defined working directory.
# For Valkey Sentinel to chdir to /tmp at startup is the simplest thing
# for the process to don't interfere with administrative tasks such as
# unmounting filesystems.
dir /tmp
# sentinel monitor <master-name> <ip> <valkey-port> <quorum>
#
# Tells Sentinel to monitor this master, and to consider it in O_DOWN
# (Objectively Down) state only if at least <quorum> sentinels agree.
#
# Note that whatever is the ODOWN quorum, a Sentinel will require to
# be elected by the majority of the known Sentinels in order to
# start a failover, so no failover can be performed in minority.
#
# Replicas are auto-discovered, so you don't need to specify replicas in
# any way. Sentinel itself will rewrite this configuration file adding
# the replicas using additional configuration options.
# Also note that the configuration file is rewritten when a
# replica is promoted to master.
#
# Note: master name should not include special characters or spaces.
# The valid charset is A-z 0-9 and the three characters ".-_".
sentinel monitor mymaster 127.0.0.1 6379 2
# sentinel auth-pass <master-name> <password>
#
# Set the password to use to authenticate with the master and replicas.
# Useful if there is a password set in the Valkey instances to monitor.
#
# Note that the master password is also used for replicas, so it is not
# possible to set a different password in masters and replicas instances
# if you want to be able to monitor these instances with Sentinel.
#
# However you can have Valkey instances without the authentication enabled
# mixed with Valkey instances requiring the authentication (as long as the
# password set is the same for all the instances requiring the password) as
# the AUTH command will have no effect in Valkey instances with authentication
# switched off.
#
# Example:
#
# sentinel auth-pass mymaster MySUPER--secret-0123passw0rd
# sentinel auth-user <master-name> <username>
#
# This is useful in order to authenticate to instances having ACL capabilities,
# that is, running Valkey. When just auth-pass is provided the
# Sentinel instance will authenticate to Valkey using the old "AUTH <pass>"
# method. When also an username is provided, it will use "AUTH <user> <pass>".
# In the Valkey servers side, the ACL to provide just minimal access to
# Sentinel instances, should be configured along the following lines:
#
# user sentinel-user >somepassword +client +subscribe +publish \
# +ping +info +multi +slaveof +config +client +exec on
# sentinel down-after-milliseconds <master-name> <milliseconds>
#
# Number of milliseconds the master (or any attached replica or sentinel) should
# be unreachable (as in, not acceptable reply to PING, continuously, for the
# specified period) in order to consider it in S_DOWN state (Subjectively
# Down).
#
# Default is 30 seconds.
sentinel down-after-milliseconds mymaster 30000
# Sentinel's ACL users are defined in the following format:
#
# user <username> ... acl rules ...
#
# For example:
#
# user worker +@admin +@connection ~* on >ffa9203c493aa99
#
# For more information about ACL configuration please refer to the Valkey
# website at https://valkey.io/topics/acl and valkey server configuration
# template valkey.conf.
# ACL LOG
#
# The ACL Log tracks failed commands and authentication events associated
# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked
# by ACLs. The ACL Log is stored in memory. You can reclaim memory with
# ACL LOG RESET. Define the maximum entry length of the ACL Log below.
acllog-max-len 128
# Using an external ACL file
#
# Instead of configuring users here in this file, it is possible to use
# a stand-alone file just listing users. The two methods cannot be mixed:
# if you configure users here and at the same time you activate the external
# ACL file, the server will refuse to start.
#
# The format of the external ACL user file is exactly the same as the
# format that is used inside valkey.conf to describe users.
#
# aclfile /etc/valkey/sentinel-users.acl
# requirepass <password>
#
# You can configure Sentinel itself to require a password, however when doing
# so Sentinel will try to authenticate with the same password to all the
# other Sentinels. So you need to configure all your Sentinels in a given
# group with the same "requirepass" password. Check the following documentation
# for more info: https://valkey.io/topics/sentinel
#
# IMPORTANT NOTE: "requirepass" is a compatibility
# layer on top of the ACL system. The option effect will be just setting
# the password for the default user. Clients will still authenticate using
# AUTH <password> as usually, or more explicitly with AUTH default <password>
# if they follow the new protocol: both will work.
#
# New config files are advised to use separate authentication control for
# incoming connections (via ACL), and for outgoing connections (via
# sentinel-user and sentinel-pass)
#
# The requirepass is not compatible with aclfile option and the ACL LOAD
# command, these will cause requirepass to be ignored.
# sentinel sentinel-user <username>
#
# You can configure Sentinel to authenticate with other Sentinels with specific
# user name.
# sentinel sentinel-pass <password>
#
# The password for Sentinel to authenticate with other Sentinels. If sentinel-user
# is not configured, Sentinel will use 'default' user with sentinel-pass to authenticate.
# sentinel parallel-syncs <master-name> <numreplicas>
#
# How many replicas we can reconfigure to point to the new replica simultaneously
# during the failover. Use a low number if you use the replicas to serve query
# to avoid that all the replicas will be unreachable at about the same
# time while performing the synchronization with the master.
sentinel parallel-syncs mymaster 1
# sentinel failover-timeout <master-name> <milliseconds>
#
# Specifies the failover timeout in milliseconds. It is used in many ways:
#
# - The time needed to re-start a failover after a previous failover was
# already tried against the same master by a given Sentinel, is two
# times the failover timeout.
#
# - The time needed for a replica replicating to a wrong master according
# to a Sentinel current configuration, to be forced to replicate
# with the right master, is exactly the failover timeout (counting since
# the moment a Sentinel detected the misconfiguration).
#
# - The time needed to cancel a failover that is already in progress but
# did not produced any configuration change (SLAVEOF NO ONE yet not
# acknowledged by the promoted replica).
#
# - The maximum time a failover in progress waits for all the replicas to be
# reconfigured as replicas of the new master. However even after this time
# the replicas will be reconfigured by the Sentinels anyway, but not with
# the exact parallel-syncs progression as specified.
#
# Default is 3 minutes.
sentinel failover-timeout mymaster 180000
# SCRIPTS EXECUTION
#
# sentinel notification-script and sentinel reconfig-script are used in order
# to configure scripts that are called to notify the system administrator
# or to reconfigure clients after a failover. The scripts are executed
# with the following rules for error handling:
#
# If script exits with "1" the execution is retried later (up to a maximum
# number of times currently set to 10).
#
# If script exits with "2" (or an higher value) the script execution is
# not retried.
#
# If script terminates because it receives a signal the behavior is the same
# as exit code 1.
#
# A script has a maximum running time of 60 seconds. After this limit is
# reached the script is terminated with a SIGKILL and the execution retried.
# NOTIFICATION SCRIPT
#
# sentinel notification-script <master-name> <script-path>
#
# Call the specified notification script for any sentinel event that is
# generated in the WARNING level (for instance -sdown, -odown, and so forth).
# This script should notify the system administrator via email, SMS, or any
# other messaging system, that there is something wrong with the monitored
# Valkey systems.
#
# The script is called with just two arguments: the first is the event type
# and the second the event description.
#
# The script must exist and be executable in order for sentinel to start if
# this option is provided.
#
# Example:
#
# sentinel notification-script mymaster /var/valkey/notify.sh
# CLIENTS RECONFIGURATION SCRIPT
#
# sentinel client-reconfig-script <master-name> <script-path>
#
# When the master changed because of a failover a script can be called in
# order to perform application-specific tasks to notify the clients that the
# configuration has changed and the master is at a different address.
#
# The following arguments are passed to the script:
#
# <master-name> <role> <state> <from-ip> <from-port> <to-ip> <to-port>
#
# <state> is currently always "start"
# <role> is either "leader" or "observer"
#
# The arguments from-ip, from-port, to-ip, to-port are used to communicate
# the old address of the master and the new address of the elected replica
# (now a master).
#
# This script should be resistant to multiple invocations.
#
# Example:
#
# sentinel client-reconfig-script mymaster /var/valkey/reconfig.sh
# SECURITY
#
# By default SENTINEL SET will not be able to change the notification-script
# and client-reconfig-script at runtime. This avoids a trivial security issue
# where clients can set the script to anything and trigger a failover in order
# to get the program executed.
sentinel deny-scripts-reconfig yes
# VALKEY COMMANDS RENAMING (DEPRECATED)
#
# WARNING: avoid using this option if possible, instead use ACLs.
#
# Sometimes the Valkey server has certain commands, that are needed for Sentinel
# to work correctly, renamed to unguessable strings. This is often the case
# of CONFIG and SLAVEOF in the context of providers that provide Valkey as
# a service, and don't want the customers to reconfigure the instances outside
# of the administration console.
#
# In such case it is possible to tell Sentinel to use different command names
# instead of the normal ones. For example if the master "mymaster", and the
# associated replicas, have "CONFIG" all renamed to "GUESSME", I could use:
#
# SENTINEL rename-command mymaster CONFIG GUESSME
#
# After such configuration is set, every time Sentinel would use CONFIG it will
# use GUESSME instead. Note that there is no actual need to respect the command
# case, so writing "config guessme" is the same in the example above.
#
# SENTINEL SET can also be used in order to perform this configuration at runtime.
#
# In order to set a command back to its original name (undo the renaming), it
# is possible to just rename a command to itself:
#
# SENTINEL rename-command mymaster CONFIG CONFIG
# HOSTNAMES SUPPORT
#
# Normally Sentinel uses only IP addresses and requires SENTINEL MONITOR
# to specify an IP address. Also, it requires the Valkey replica-announce-ip
# keyword to specify only IP addresses.
#
# You may enable hostnames support by enabling resolve-hostnames. Note
# that you must make sure your DNS is configured properly and that DNS
# resolution does not introduce very long delays.
#
SENTINEL resolve-hostnames no
# When resolve-hostnames is enabled, Sentinel still uses IP addresses
# when exposing instances to users, configuration files, etc. If you want
# to retain the hostnames when announced, enable announce-hostnames below.
#
SENTINEL announce-hostnames no
# When primary-reboot-down-after-period is set to 0, Sentinel does not fail over
# when receiving a -LOADING response from a primary. This was the only supported
# behavior before Redis OSS 7.0.
#
# Otherwise, Sentinel will use this value as the time (in ms) it is willing to
# accept a -LOADING response after a primary has been rebooted, before failing
# over.
SENTINEL primary-reboot-down-after-period myprimary 0

View File

@ -2,7 +2,7 @@ BasedOnStyle: LLVM
IndentWidth: 4 IndentWidth: 4
TabWidth: 4 TabWidth: 4
UseTab: Never UseTab: Never
ColumnLimit: 120 ColumnLimit: 0
PenaltyBreakComment: 300 PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120 PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 100 PenaltyBreakString: 100
@ -30,3 +30,4 @@ SortIncludes: false
AllowAllParametersOfDeclarationOnNextLine: false AllowAllParametersOfDeclarationOnNextLine: false
BinPackParameters: false BinPackParameters: false
AlignAfterOpenBracket: Align AlignAfterOpenBracket: Align
InsertNewlineAtEOF: true

93
src/CMakeLists.txt Normal file
View File

@ -0,0 +1,93 @@
project(valkey-server)
set(INSTALL_BIN_PATH ${CMAKE_INSTALL_PREFIX}/bin)
set_directory_properties(PROPERTIES CLEAN_NO_CUSTOM 1)
# Target: valkey-server
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${VALKEY_SERVER_CFLAGS}")
message(STATUS "CFLAGS: ${CMAKE_C_FLAGS}")
get_valkey_server_linker_option(VALKEY_SERVER_LDFLAGS)
list(APPEND SERVER_LIBS "fpconv")
list(APPEND SERVER_LIBS "lualib")
list(APPEND SERVER_LIBS "hdr_histogram")
valkey_build_and_install_bin(valkey-server "${VALKEY_SERVER_SRCS}" "${VALKEY_SERVER_LDFLAGS}" "${SERVER_LIBS}"
"redis-server")
add_dependencies(valkey-server generate_commands_def)
add_dependencies(valkey-server generate_fmtargs_h)
add_dependencies(valkey-server release_header)
if (VALKEY_RELEASE_BUILD)
# Enable LTO for Release build
set_property(TARGET valkey-server PROPERTY INTERPROCEDURAL_OPTIMIZATION TRUE)
endif ()
if (DEBUG_FORCE_DEFRAG)
message(STATUS "Forcing Active Defrag run on valkey-server")
target_compile_definitions(valkey-server PRIVATE DEBUG_FORCE_DEFRAG)
target_compile_definitions(valkey-server PRIVATE HAVE_DEFRAG)
endif ()
if (BUILD_SANITIZER)
# 'BUILD_SANITIZER' is defined in ValkeySetup module (based on user input)
# If defined, the variables 'VALKEY_SANITAIZER_CFLAGS' and 'VALKEY_SANITAIZER_LDFLAGS'
# are set with the link & compile flags required
message(STATUS "Adding sanitizer flags for target valkey-server")
target_compile_options(valkey-server PRIVATE ${VALKEY_SANITAIZER_CFLAGS})
target_link_options(valkey-server PRIVATE ${VALKEY_SANITAIZER_LDFLAGS})
endif ()
unset(BUILD_SANITIZER CACHE)
# Target: valkey-cli
list(APPEND CLI_LIBS "linenoise")
valkey_build_and_install_bin(valkey-cli "${VALKEY_CLI_SRCS}" "${VALKEY_SERVER_LDFLAGS}" "${CLI_LIBS}" "redis-cli")
add_dependencies(valkey-cli generate_commands_def)
add_dependencies(valkey-cli generate_fmtargs_h)
# Target: valkey-benchmark
list(APPEND BENCH_LIBS "hdr_histogram")
valkey_build_and_install_bin(valkey-benchmark "${VALKEY_BENCHMARK_SRCS}" "${VALKEY_SERVER_LDFLAGS}" "${BENCH_LIBS}"
"redis-benchmark")
add_dependencies(valkey-benchmark generate_commands_def)
add_dependencies(valkey-benchmark generate_fmtargs_h)
# Targets: valkey-sentinel, valkey-check-aof and valkey-check-rdb are just symbolic links
valkey_create_symlink("valkey-server" "valkey-sentinel")
valkey_create_symlink("valkey-server" "valkey-check-rdb")
valkey_create_symlink("valkey-server" "valkey-check-aof")
# Target valkey-rdma
if (BUILD_RDMA_MODULE)
set(MODULE_NAME "valkey-rdma")
message(STATUS "Building RDMA module")
add_library(${MODULE_NAME} SHARED "${VALKEY_RDMA_MODULE_SRCS}")
target_compile_options(${MODULE_NAME} PRIVATE -DBUILD_RDMA_MODULE=2 -DUSE_RDMA=1)
target_link_libraries(${MODULE_NAME} "${RDMA_LIBS}")
# remove the "lib" prefix from the module
set_target_properties(${MODULE_NAME} PROPERTIES PREFIX "")
valkey_install_bin(${MODULE_NAME})
endif ()
# Target valkey-tls (a module)
if (BUILD_TLS_MODULE)
message(STATUS "Building TLS as a module")
set(MODULE_NAME "valkey-tls")
add_library(${MODULE_NAME} SHARED ${VALKEY_TLS_MODULE_SRCS})
target_compile_options(${MODULE_NAME} PRIVATE -DUSE_OPENSSL=2 -DBUILD_TLS_MODULE=2)
if (APPLE)
# Some symbols can only be resolved during runtime (they exist in the executable)
target_link_options(${MODULE_NAME} PRIVATE -undefined dynamic_lookup)
endif ()
target_link_libraries(${MODULE_NAME} hiredis_ssl OpenSSL::SSL)
set_target_properties(${MODULE_NAME} PROPERTIES PREFIX "")
endif ()
if (BUILD_EXAMPLE_MODULES)
# Include the modules ("hello*")
message(STATUS "Building example modules")
add_subdirectory(modules)
endif ()
if (BUILD_UNIT_TESTS)
add_subdirectory(unit)
endif ()

View File

@ -1,5 +1,5 @@
# Valkey Makefile # Valkey Makefile
# Copyright (C) 2009 Salvatore Sanfilippo <antirez at gmail dot com> # Copyright (C) 2009 Redis Ltd.
# This file is released under the BSD license, see the COPYING file # This file is released under the BSD license, see the COPYING file
# #
# The Makefile composes the final FINAL_CFLAGS and FINAL_LDFLAGS using # The Makefile composes the final FINAL_CFLAGS and FINAL_LDFLAGS using
@ -25,7 +25,7 @@ ifeq ($(OPTIMIZATION),-O3)
ifeq (clang,$(CLANG)) ifeq (clang,$(CLANG))
OPTIMIZATION+=-flto OPTIMIZATION+=-flto
else else
OPTIMIZATION+=-flto=auto OPTIMIZATION+=-flto=auto -ffat-lto-objects
endif endif
endif endif
ifneq ($(OPTIMIZATION),-O0) ifneq ($(OPTIMIZATION),-O0)
@ -98,15 +98,6 @@ ifeq ($(USE_JEMALLOC),no)
MALLOC=libc MALLOC=libc
endif endif
# Some unit tests compile files a second time to get access to static functions, the "--allow-multiple-definition" flag
# allows us to do that without an error, by using the first instance of function. This behavior can also be used
# to tweak behavior of code just for unit tests. The version of ld on MacOS apparently always does this.
ifneq ($(uname_S),Darwin)
ALLOW_DUPLICATE_FLAG=-Wl,--allow-multiple-definition
else
ALLOW_DUPLICATE_FLAG=
endif
ifdef SANITIZER ifdef SANITIZER
ifeq ($(SANITIZER),address) ifeq ($(SANITIZER),address)
MALLOC=libc MALLOC=libc
@ -139,10 +130,12 @@ ifdef REDIS_LDFLAGS
SERVER_LDFLAGS := $(REDIS_LDFLAGS) SERVER_LDFLAGS := $(REDIS_LDFLAGS)
endif endif
FINAL_CFLAGS=$(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(SERVER_CFLAGS) # Special case of forcing defrag to run even though we have no Jemlloc support
ifeq ($(SERVER_TEST),yes) ifeq ($(DEBUG_FORCE_DEFRAG), yes)
FINAL_CFLAGS +=-DSERVER_TEST=1 SERVER_CFLAGS +=-DHAVE_DEFRAG -DDEBUG_FORCE_DEFRAG
endif endif
FINAL_CFLAGS=$(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(SERVER_CFLAGS)
FINAL_LDFLAGS=$(LDFLAGS) $(OPT) $(SERVER_LDFLAGS) $(DEBUG) FINAL_LDFLAGS=$(LDFLAGS) $(OPT) $(SERVER_LDFLAGS) $(DEBUG)
FINAL_LIBS=-lm FINAL_LIBS=-lm
DEBUG=-g -ggdb DEBUG=-g -ggdb
@ -337,26 +330,26 @@ ifeq ($(BUILD_TLS),module)
TLS_MODULE_CFLAGS+=-DUSE_OPENSSL=$(BUILD_MODULE) $(OPENSSL_CFLAGS) -DBUILD_TLS_MODULE=$(BUILD_MODULE) TLS_MODULE_CFLAGS+=-DUSE_OPENSSL=$(BUILD_MODULE) $(OPENSSL_CFLAGS) -DBUILD_TLS_MODULE=$(BUILD_MODULE)
endif endif
BUILD_RDMA:=no RDMA_LIBS=
RDMA_MODULE= RDMA_PKGCONFIG := $(shell $(PKG_CONFIG) --exists librdmacm libibverbs && echo $$?)
RDMA_MODULE_NAME:=valkey-rdma$(PROG_SUFFIX).so
RDMA_MODULE_CFLAGS:=$(FINAL_CFLAGS)
ifeq ($(BUILD_RDMA),module)
FINAL_CFLAGS+=-DUSE_RDMA=$(BUILD_MODULE)
RDMA_PKGCONFIG := $(shell $(PKG_CONFIG) --exists librdmacm libibverbs && echo $$?)
ifeq ($(RDMA_PKGCONFIG),0) ifeq ($(RDMA_PKGCONFIG),0)
RDMA_LIBS=$(shell $(PKG_CONFIG) --libs librdmacm libibverbs) RDMA_LIBS=$(shell $(PKG_CONFIG) --libs librdmacm libibverbs)
else else
RDMA_LIBS=-lrdmacm -libverbs RDMA_LIBS=-lrdmacm -libverbs
endif endif
RDMA_MODULE=$(RDMA_MODULE_NAME)
RDMA_MODULE_CFLAGS+=-DUSE_RDMA=$(BUILD_YES) -DBUILD_RDMA_MODULE $(RDMA_LIBS) ifeq ($(BUILD_RDMA),yes)
else FINAL_CFLAGS+=-DUSE_RDMA=$(BUILD_YES) -DBUILD_RDMA_MODULE=$(BUILD_NO)
ifeq ($(BUILD_RDMA),no) FINAL_LIBS += $(RDMA_LIBS)
# disable RDMA, do nothing
else
$(error "RDMA is only supported as module (BUILD_RDMA=module), or disabled (BUILD_RDMA=no)")
endif endif
RDMA_MODULE=
RDMA_MODULE_NAME:=valkey-rdma$(PROG_SUFFIX).so
RDMA_MODULE_CFLAGS:=$(FINAL_CFLAGS)
ifeq ($(BUILD_RDMA),module)
FINAL_CFLAGS+=-DUSE_RDMA=$(BUILD_MODULE)
RDMA_MODULE=$(RDMA_MODULE_NAME)
RDMA_MODULE_CFLAGS+=-DUSE_RDMA=$(BUILD_MODULE) -DBUILD_RDMA_MODULE=$(BUILD_MODULE) $(RDMA_LIBS)
endif endif
ifndef V ifndef V
@ -381,7 +374,7 @@ else
endef endef
endif endif
# Determine install/uninstall Redis symlinks for compatibility when # Determine install/uninstall Redis symlinks for compatibility when
# installing/uninstalling Valkey binaries (defaulting to `yes`) # installing/uninstalling Valkey binaries (defaulting to `yes`)
USE_REDIS_SYMLINKS?=yes USE_REDIS_SYMLINKS?=yes
ifeq ($(USE_REDIS_SYMLINKS),yes) ifeq ($(USE_REDIS_SYMLINKS),yes)
@ -397,9 +390,9 @@ SERVER_AR=$(QUIET_AR)$(AR)
SERVER_LD=$(QUIET_LINK)$(CC) $(FINAL_LDFLAGS) SERVER_LD=$(QUIET_LINK)$(CC) $(FINAL_LDFLAGS)
ENGINE_INSTALL=$(QUIET_INSTALL)$(INSTALL) ENGINE_INSTALL=$(QUIET_INSTALL)$(INSTALL)
CCCOLOR="\033[34m" CCCOLOR="\033[33m"
LINKCOLOR="\033[34;1m" LINKCOLOR="\033[36;1m"
SRCCOLOR="\033[33m" SRCCOLOR="\033[32m"
BINCOLOR="\033[37;1m" BINCOLOR="\033[37;1m"
MAKECOLOR="\033[32;1m" MAKECOLOR="\033[32;1m"
ENDCOLOR="\033[0m" ENDCOLOR="\033[0m"
@ -420,10 +413,10 @@ else
GEN_COMMANDS_FLAGS= GEN_COMMANDS_FLAGS=
endif endif
ENGINE_NAME=valkey ENGINE_NAME=futriix
SERVER_NAME=$(ENGINE_NAME)-server$(PROG_SUFFIX) SERVER_NAME=$(ENGINE_NAME)-server$(PROG_SUFFIX)
ENGINE_SENTINEL_NAME=$(ENGINE_NAME)-sentinel$(PROG_SUFFIX) ENGINE_SENTINEL_NAME=$(ENGINE_NAME)-sentinel$(PROG_SUFFIX)
ENGINE_SERVER_OBJ=threads_mngr.o adlist.o quicklist.o ae.o anet.o dict.o kvstore.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o io_threads.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o cluster_legacy.o cluster_slot_stats.o crc16.o endianconv.o slowlog.o eval.o bio.o rio.o rand.o memtest.o syscheck.o crcspeed.o crccombine.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o valkey-check-rdb.o valkey-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o lolwut.o lolwut5.o lolwut6.o acl.o tracking.o socket.o tls.o sha256.o timeout.o setcpuaffinity.o monotonic.o mt19937-64.o resp_parser.o call_reply.o script_lua.o script.o functions.o function_lua.o commands.o strl.o connection.o unix.o logreqres.o ENGINE_SERVER_OBJ=threads_mngr.o adlist.o quicklist.o ae.o anet.o dict.o hashtable.o kvstore.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o memory_prefetch.o io_threads.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o cluster_legacy.o cluster_slot_stats.o crc16.o endianconv.o commandlog.o eval.o bio.o rio.o rand.o memtest.o syscheck.o crcspeed.o crccombine.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o valkey-check-rdb.o valkey-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o allocator_defrag.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o lolwut.o lolwut5.o lolwut6.o acl.o tracking.o socket.o tls.o sha256.o timeout.o setcpuaffinity.o monotonic.o mt19937-64.o resp_parser.o call_reply.o script_lua.o script.o functions.o function_lua.o commands.o strl.o connection.o unix.o logreqres.o rdma.o scripting_engine.o
ENGINE_CLI_NAME=$(ENGINE_NAME)-cli$(PROG_SUFFIX) ENGINE_CLI_NAME=$(ENGINE_NAME)-cli$(PROG_SUFFIX)
ENGINE_CLI_OBJ=anet.o adlist.o dict.o valkey-cli.o zmalloc.o release.o ae.o serverassert.o crcspeed.o crccombine.o crc64.o siphash.o crc16.o monotonic.o cli_common.o mt19937-64.o strl.o cli_commands.o ENGINE_CLI_OBJ=anet.o adlist.o dict.o valkey-cli.o zmalloc.o release.o ae.o serverassert.o crcspeed.o crccombine.o crc64.o siphash.o crc16.o monotonic.o cli_common.o mt19937-64.o strl.o cli_commands.o
ENGINE_BENCHMARK_NAME=$(ENGINE_NAME)-benchmark$(PROG_SUFFIX) ENGINE_BENCHMARK_NAME=$(ENGINE_NAME)-benchmark$(PROG_SUFFIX)
@ -436,6 +429,17 @@ ENGINE_TEST_OBJ:=$(sort $(patsubst unit/%.c,unit/%.o,$(ENGINE_TEST_FILES)))
ENGINE_UNIT_TESTS:=$(ENGINE_NAME)-unit-tests$(PROG_SUFFIX) ENGINE_UNIT_TESTS:=$(ENGINE_NAME)-unit-tests$(PROG_SUFFIX)
ALL_SOURCES=$(sort $(patsubst %.o,%.c,$(ENGINE_SERVER_OBJ) $(ENGINE_CLI_OBJ) $(ENGINE_BENCHMARK_OBJ))) ALL_SOURCES=$(sort $(patsubst %.o,%.c,$(ENGINE_SERVER_OBJ) $(ENGINE_CLI_OBJ) $(ENGINE_BENCHMARK_OBJ)))
USE_FAST_FLOAT?=no
ifeq ($(USE_FAST_FLOAT),yes)
# valkey_strtod.h uses this flag to switch valkey_strtod function to fast_float_strtod,
# therefore let's pass it to compiler for preprocessing.
FINAL_CFLAGS += -D USE_FAST_FLOAT
# next, let's build and add actual library containing fast_float_strtod function for linking.
DEPENDENCY_TARGETS += fast_float_c_interface
FAST_FLOAT_STRTOD_OBJECT := ../deps/fast_float_c_interface/fast_float_strtod.o
FINAL_LIBS += $(FAST_FLOAT_STRTOD_OBJECT)
endif
all: $(SERVER_NAME) $(ENGINE_SENTINEL_NAME) $(ENGINE_CLI_NAME) $(ENGINE_BENCHMARK_NAME) $(ENGINE_CHECK_RDB_NAME) $(ENGINE_CHECK_AOF_NAME) $(TLS_MODULE) $(RDMA_MODULE) all: $(SERVER_NAME) $(ENGINE_SENTINEL_NAME) $(ENGINE_CLI_NAME) $(ENGINE_BENCHMARK_NAME) $(ENGINE_CHECK_RDB_NAME) $(ENGINE_CHECK_AOF_NAME) $(TLS_MODULE) $(RDMA_MODULE)
@echo "" @echo ""
@echo "Hint: It's a good idea to run 'make test' ;)" @echo "Hint: It's a good idea to run 'make test' ;)"
@ -494,7 +498,7 @@ $(ENGINE_LIB_NAME): $(ENGINE_SERVER_OBJ)
# valkey-unit-tests # valkey-unit-tests
$(ENGINE_UNIT_TESTS): $(ENGINE_TEST_OBJ) $(ENGINE_LIB_NAME) $(ENGINE_UNIT_TESTS): $(ENGINE_TEST_OBJ) $(ENGINE_LIB_NAME)
$(SERVER_LD) $(ALLOW_DUPLICATE_FLAG) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/lua/src/liblua.a ../deps/hdr_histogram/libhdrhistogram.a ../deps/fpconv/libfpconv.a $(FINAL_LIBS) $(SERVER_LD) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/lua/src/liblua.a ../deps/hdr_histogram/libhdrhistogram.a ../deps/fpconv/libfpconv.a $(FINAL_LIBS)
# valkey-sentinel # valkey-sentinel
$(ENGINE_SENTINEL_NAME): $(SERVER_NAME) $(ENGINE_SENTINEL_NAME): $(SERVER_NAME)
@ -513,7 +517,7 @@ $(TLS_MODULE_NAME): $(SERVER_NAME)
$(QUIET_CC)$(CC) -o $@ tls.c -shared -fPIC $(TLS_MODULE_CFLAGS) $(TLS_CLIENT_LIBS) $(QUIET_CC)$(CC) -o $@ tls.c -shared -fPIC $(TLS_MODULE_CFLAGS) $(TLS_CLIENT_LIBS)
# valkey-rdma.so # valkey-rdma.so
$(RDMA_MODULE_NAME): $(REDIS_SERVER_NAME) $(RDMA_MODULE_NAME): $(SERVER_NAME)
$(QUIET_CC)$(CC) -o $@ rdma.c -shared -fPIC $(RDMA_MODULE_CFLAGS) $(QUIET_CC)$(CC) -o $@ rdma.c -shared -fPIC $(RDMA_MODULE_CFLAGS)
# valkey-cli # valkey-cli
@ -600,7 +604,7 @@ bench: $(ENGINE_BENCHMARK_NAME)
32bit: 32bit:
@echo "" @echo ""
@echo "WARNING: if it fails under Linux you probably need to install libc6-dev-i386" @echo "WARNING: if it fails under Linux you probably need to install libc6-dev-i386 and libstdc++-11-dev-i386-cross"
@echo "" @echo ""
$(MAKE) all-with-unit-tests CFLAGS="-m32" LDFLAGS="-m32" $(MAKE) all-with-unit-tests CFLAGS="-m32" LDFLAGS="-m32"

164
src/acl.c
View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2018, Redis Ltd.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -29,6 +29,7 @@
#include "server.h" #include "server.h"
#include "sha256.h" #include "sha256.h"
#include "module.h"
#include <fcntl.h> #include <fcntl.h>
#include <ctype.h> #include <ctype.h>
@ -297,11 +298,6 @@ int ACLListMatchSds(void *a, void *b) {
return sdscmp(a, b) == 0; return sdscmp(a, b) == 0;
} }
/* Method to free list elements from ACL users password/patterns lists. */
void ACLListFreeSds(void *item) {
sdsfree(item);
}
/* Method to duplicate list elements from ACL users password/patterns lists. */ /* Method to duplicate list elements from ACL users password/patterns lists. */
void *ACLListDupSds(void *item) { void *ACLListDupSds(void *item) {
return sdsdup(item); return sdsdup(item);
@ -374,7 +370,7 @@ aclSelector *ACLCreateSelector(int flags) {
listSetFreeMethod(selector->patterns, ACLListFreeKeyPattern); listSetFreeMethod(selector->patterns, ACLListFreeKeyPattern);
listSetDupMethod(selector->patterns, ACLListDupKeyPattern); listSetDupMethod(selector->patterns, ACLListDupKeyPattern);
listSetMatchMethod(selector->channels, ACLListMatchSds); listSetMatchMethod(selector->channels, ACLListMatchSds);
listSetFreeMethod(selector->channels, ACLListFreeSds); listSetFreeMethod(selector->channels, sdsfreeVoid);
listSetDupMethod(selector->channels, ACLListDupSds); listSetDupMethod(selector->channels, ACLListDupSds);
memset(selector->allowed_commands, 0, sizeof(selector->allowed_commands)); memset(selector->allowed_commands, 0, sizeof(selector->allowed_commands));
@ -445,7 +441,7 @@ user *ACLCreateUser(const char *name, size_t namelen) {
u->passwords = listCreate(); u->passwords = listCreate();
u->acl_string = NULL; u->acl_string = NULL;
listSetMatchMethod(u->passwords, ACLListMatchSds); listSetMatchMethod(u->passwords, ACLListMatchSds);
listSetFreeMethod(u->passwords, ACLListFreeSds); listSetFreeMethod(u->passwords, sdsfreeVoid);
listSetDupMethod(u->passwords, ACLListDupSds); listSetDupMethod(u->passwords, ACLListDupSds);
u->selectors = listCreate(); u->selectors = listCreate();
@ -489,6 +485,11 @@ void ACLFreeUser(user *u) {
zfree(u); zfree(u);
} }
/* Used for generic free functions. */
static void ACLFreeUserVoid(void *u) {
ACLFreeUser(u);
}
/* When a user is deleted we need to cycle the active /* When a user is deleted we need to cycle the active
* connections in order to kill all the pending ones that * connections in order to kill all the pending ones that
* are authenticated with such user. */ * are authenticated with such user. */
@ -652,14 +653,15 @@ void ACLChangeSelectorPerm(aclSelector *selector, struct serverCommand *cmd, int
unsigned long id = cmd->id; unsigned long id = cmd->id;
ACLSetSelectorCommandBit(selector, id, allow); ACLSetSelectorCommandBit(selector, id, allow);
ACLResetFirstArgsForCommand(selector, id); ACLResetFirstArgsForCommand(selector, id);
if (cmd->subcommands_dict) { if (cmd->subcommands_ht) {
dictEntry *de; hashtableIterator iter;
dictIterator *di = dictGetSafeIterator(cmd->subcommands_dict); hashtableInitIterator(&iter, cmd->subcommands_ht, HASHTABLE_ITER_SAFE);
while ((de = dictNext(di)) != NULL) { void *next;
struct serverCommand *sub = (struct serverCommand *)dictGetVal(de); while (hashtableNext(&iter, &next)) {
struct serverCommand *sub = next;
ACLSetSelectorCommandBit(selector, sub->id, allow); ACLSetSelectorCommandBit(selector, sub->id, allow);
} }
dictReleaseIterator(di); hashtableResetIterator(&iter);
} }
} }
@ -669,19 +671,20 @@ void ACLChangeSelectorPerm(aclSelector *selector, struct serverCommand *cmd, int
* value. Since the category passed by the user may be non existing, the * value. Since the category passed by the user may be non existing, the
* function returns C_ERR if the category was not found, or C_OK if it was * function returns C_ERR if the category was not found, or C_OK if it was
* found and the operation was performed. */ * found and the operation was performed. */
void ACLSetSelectorCommandBitsForCategory(dict *commands, aclSelector *selector, uint64_t cflag, int value) { void ACLSetSelectorCommandBitsForCategory(hashtable *commands, aclSelector *selector, uint64_t cflag, int value) {
dictIterator *di = dictGetIterator(commands); hashtableIterator iter;
dictEntry *de; hashtableInitIterator(&iter, commands, 0);
while ((de = dictNext(di)) != NULL) { void *next;
struct serverCommand *cmd = dictGetVal(de); while (hashtableNext(&iter, &next)) {
struct serverCommand *cmd = next;
if (cmd->acl_categories & cflag) { if (cmd->acl_categories & cflag) {
ACLChangeSelectorPerm(selector, cmd, value); ACLChangeSelectorPerm(selector, cmd, value);
} }
if (cmd->subcommands_dict) { if (cmd->subcommands_ht) {
ACLSetSelectorCommandBitsForCategory(cmd->subcommands_dict, selector, cflag, value); ACLSetSelectorCommandBitsForCategory(cmd->subcommands_ht, selector, cflag, value);
} }
} }
dictReleaseIterator(di); hashtableResetIterator(&iter);
} }
/* This function is responsible for recomputing the command bits for all selectors of the existing users. /* This function is responsible for recomputing the command bits for all selectors of the existing users.
@ -732,26 +735,27 @@ int ACLSetSelectorCategory(aclSelector *selector, const char *category, int allo
return C_OK; return C_OK;
} }
void ACLCountCategoryBitsForCommands(dict *commands, void ACLCountCategoryBitsForCommands(hashtable *commands,
aclSelector *selector, aclSelector *selector,
unsigned long *on, unsigned long *on,
unsigned long *off, unsigned long *off,
uint64_t cflag) { uint64_t cflag) {
dictIterator *di = dictGetIterator(commands); hashtableIterator iter;
dictEntry *de; hashtableInitIterator(&iter, commands, 0);
while ((de = dictNext(di)) != NULL) { void *next;
struct serverCommand *cmd = dictGetVal(de); while (hashtableNext(&iter, &next)) {
struct serverCommand *cmd = next;
if (cmd->acl_categories & cflag) { if (cmd->acl_categories & cflag) {
if (ACLGetSelectorCommandBit(selector, cmd->id)) if (ACLGetSelectorCommandBit(selector, cmd->id))
(*on)++; (*on)++;
else else
(*off)++; (*off)++;
} }
if (cmd->subcommands_dict) { if (cmd->subcommands_ht) {
ACLCountCategoryBitsForCommands(cmd->subcommands_dict, selector, on, off, cflag); ACLCountCategoryBitsForCommands(cmd->subcommands_ht, selector, on, off, cflag);
} }
} }
dictReleaseIterator(di); hashtableResetIterator(&iter);
} }
/* Return the number of commands allowed (on) and denied (off) for the user 'u' /* Return the number of commands allowed (on) and denied (off) for the user 'u'
@ -1074,6 +1078,7 @@ int ACLSetSelector(aclSelector *selector, const char *op, size_t oplen) {
int flags = 0; int flags = 0;
size_t offset = 1; size_t offset = 1;
if (op[0] == '%') { if (op[0] == '%') {
int perm_ok = 1;
for (; offset < oplen; offset++) { for (; offset < oplen; offset++) {
if (toupper(op[offset]) == 'R' && !(flags & ACL_READ_PERMISSION)) { if (toupper(op[offset]) == 'R' && !(flags & ACL_READ_PERMISSION)) {
flags |= ACL_READ_PERMISSION; flags |= ACL_READ_PERMISSION;
@ -1083,10 +1088,14 @@ int ACLSetSelector(aclSelector *selector, const char *op, size_t oplen) {
offset++; offset++;
break; break;
} else { } else {
errno = EINVAL; perm_ok = 0;
return C_ERR; break;
} }
} }
if (!flags || !perm_ok) {
errno = EINVAL;
return C_ERR;
}
} else { } else {
flags = ACL_ALL_PERMISSION; flags = ACL_ALL_PERMISSION;
} }
@ -1163,7 +1172,7 @@ int ACLSetSelector(aclSelector *selector, const char *op, size_t oplen) {
return C_ERR; return C_ERR;
} }
if (cmd->subcommands_dict) { if (cmd->subcommands_ht) {
/* If user is trying to allow a valid subcommand we can just add its unique ID */ /* If user is trying to allow a valid subcommand we can just add its unique ID */
cmd = ACLLookupCommand(op + 1); cmd = ACLLookupCommand(op + 1);
if (cmd == NULL) { if (cmd == NULL) {
@ -1951,7 +1960,7 @@ int ACLShouldKillPubsubClient(client *c, list *upcoming) {
if (getClientType(c) == CLIENT_TYPE_PUBSUB) { if (getClientType(c) == CLIENT_TYPE_PUBSUB) {
/* Check for pattern violations. */ /* Check for pattern violations. */
dictIterator *di = dictGetIterator(c->pubsub_patterns); dictIterator *di = dictGetIterator(c->pubsub_data->pubsub_patterns);
dictEntry *de; dictEntry *de;
while (!kill && ((de = dictNext(di)) != NULL)) { while (!kill && ((de = dictNext(di)) != NULL)) {
o = dictGetKey(de); o = dictGetKey(de);
@ -1963,7 +1972,7 @@ int ACLShouldKillPubsubClient(client *c, list *upcoming) {
/* Check for channel violations. */ /* Check for channel violations. */
if (!kill) { if (!kill) {
/* Check for global channels violation. */ /* Check for global channels violation. */
di = dictGetIterator(c->pubsub_channels); di = dictGetIterator(c->pubsub_data->pubsub_channels);
while (!kill && ((de = dictNext(di)) != NULL)) { while (!kill && ((de = dictNext(di)) != NULL)) {
o = dictGetKey(de); o = dictGetKey(de);
@ -1974,7 +1983,7 @@ int ACLShouldKillPubsubClient(client *c, list *upcoming) {
} }
if (!kill) { if (!kill) {
/* Check for shard channels violation. */ /* Check for shard channels violation. */
di = dictGetIterator(c->pubsubshard_channels); di = dictGetIterator(c->pubsub_data->pubsubshard_channels);
while (!kill && ((de = dictNext(di)) != NULL)) { while (!kill && ((de = dictNext(di)) != NULL)) {
o = dictGetKey(de); o = dictGetKey(de);
int res = ACLCheckChannelAgainstList(upcoming, o->ptr, sdslen(o->ptr), 0); int res = ACLCheckChannelAgainstList(upcoming, o->ptr, sdslen(o->ptr), 0);
@ -2442,12 +2451,12 @@ sds ACLLoadFromFile(const char *filename) {
c->user = new_user; c->user = new_user;
} }
if (user_channels) raxFreeWithCallback(user_channels, (void (*)(void *))listRelease); if (user_channels) raxFreeWithCallback(user_channels, listReleaseVoid);
raxFreeWithCallback(old_users, (void (*)(void *))ACLFreeUser); raxFreeWithCallback(old_users, ACLFreeUserVoid);
sdsfree(errors); sdsfree(errors);
return NULL; return NULL;
} else { } else {
raxFreeWithCallback(Users, (void (*)(void *))ACLFreeUser); raxFreeWithCallback(Users, ACLFreeUserVoid);
Users = old_users; Users = old_users;
errors = errors =
sdscat(errors, "WARNING: ACL errors detected, no change to the previously active ACL rules was performed"); sdscat(errors, "WARNING: ACL errors detected, no change to the previously active ACL rules was performed");
@ -2678,7 +2687,7 @@ void addACLLogEntry(client *c, int reason, int context, int argpos, sds username
/* if we have a real client from the network, use it (could be missing on module timers) */ /* if we have a real client from the network, use it (could be missing on module timers) */
client *realclient = server.current_client ? server.current_client : c; client *realclient = server.current_client ? server.current_client : c;
le->cinfo = catClientInfoString(sdsempty(), realclient); le->cinfo = catClientInfoString(sdsempty(), realclient, 0);
le->context = context; le->context = context;
/* Try to match this entry with past ones, to see if we can just /* Try to match this entry with past ones, to see if we can just
@ -2754,23 +2763,22 @@ sds getAclErrorMessage(int acl_res, user *user, struct serverCommand *cmd, sds e
* ==========================================================================*/ * ==========================================================================*/
/* ACL CAT category */ /* ACL CAT category */
void aclCatWithFlags(client *c, dict *commands, uint64_t cflag, int *arraylen) { void aclCatWithFlags(client *c, hashtable *commands, uint64_t cflag, int *arraylen) {
dictEntry *de; hashtableIterator iter;
dictIterator *di = dictGetIterator(commands); hashtableInitIterator(&iter, commands, 0);
void *next;
while ((de = dictNext(di)) != NULL) { while (hashtableNext(&iter, &next)) {
struct serverCommand *cmd = dictGetVal(de); struct serverCommand *cmd = next;
if (cmd->flags & CMD_MODULE) continue;
if (cmd->acl_categories & cflag) { if (cmd->acl_categories & cflag) {
addReplyBulkCBuffer(c, cmd->fullname, sdslen(cmd->fullname)); addReplyBulkCBuffer(c, cmd->fullname, sdslen(cmd->fullname));
(*arraylen)++; (*arraylen)++;
} }
if (cmd->subcommands_dict) { if (cmd->subcommands_ht) {
aclCatWithFlags(c, cmd->subcommands_dict, cflag, arraylen); aclCatWithFlags(c, cmd->subcommands_ht, cflag, arraylen);
} }
} }
dictReleaseIterator(di); hashtableResetIterator(&iter);
} }
/* Add the formatted response from a single selector to the ACL GETUSER /* Add the formatted response from a single selector to the ACL GETUSER
@ -3117,37 +3125,35 @@ void aclCommand(client *c) {
addReply(c, shared.ok); addReply(c, shared.ok);
} else if (c->argc == 2 && !strcasecmp(sub, "help")) { } else if (c->argc == 2 && !strcasecmp(sub, "help")) {
/* clang-format off */
const char *help[] = { const char *help[] = {
"CAT [<category>]", "CAT [<category>]",
" List all commands that belong to <category>, or all command categories", " List all commands that belong to <category>, or all command categories",
" when no category is specified.", " when no category is specified.",
"DELUSER <username> [<username> ...]", "DELUSER <username> [<username> ...]",
" Delete a list of users.", " Delete a list of users.",
"DRYRUN <username> <command> [<arg> ...]", "DRYRUN <username> <command> [<arg> ...]",
" Returns whether the user can execute the given command without executing the command.", " Returns whether the user can execute the given command without executing the command.",
"GETUSER <username>", "GETUSER <username>",
" Get the user's details.", " Get the user's details.",
"GENPASS [<bits>]", "GENPASS [<bits>]",
" Generate a secure 256-bit user password. The optional `bits` argument can", " Generate a secure 256-bit user password. The optional `bits` argument can",
" be used to specify a different size.", " be used to specify a different size.",
"LIST", "LIST",
" Show users details in config file format.", " Show users details in config file format.",
"LOAD", "LOAD",
" Reload users from the ACL file.", " Reload users from the ACL file.",
"LOG [<count> | RESET]", "LOG [<count> | RESET]",
" Show the ACL log entries.", " Show the ACL log entries.",
"SAVE", "SAVE",
" Save the current config to the ACL file.", " Save the current config to the ACL file.",
"SETUSER <username> <attribute> [<attribute> ...]", "SETUSER <username> <attribute> [<attribute> ...]",
" Create or modify a user with the specified attributes.", " Create or modify a user with the specified attributes.",
"USERS", "USERS",
" List all the registered usernames.", " List all the registered usernames.",
"WHOAMI", "WHOAMI",
" Return the current connection username.", " Return the current connection username.",
NULL NULL,
}; };
/* clang-format on */
addReplyHelp(c, help); addReplyHelp(c, help);
} else { } else {
addReplySubcommandSyntaxError(c); addReplySubcommandSyntaxError(c);

View File

@ -1,6 +1,6 @@
/* adlist.c - A generic doubly linked list implementation /* adlist.c - A generic doubly linked list implementation
* *
* Copyright (c) 2006-2010, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2006-2010, Redis Ltd.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -77,6 +77,12 @@ void listRelease(list *list) {
zfree(list); zfree(list);
} }
/* Just like listRelease, but takes the list as a (void *).
* Useful as generic free callback. */
void listReleaseVoid(void *l) {
listRelease((list *)l);
}
/* Add a new node to the list, to head, containing the specified 'value' /* Add a new node to the list, to head, containing the specified 'value'
* pointer as value. * pointer as value.
* *

View File

@ -1,6 +1,6 @@
/* adlist.h - A generic doubly linked list implementation /* adlist.h - A generic doubly linked list implementation
* *
* Copyright (c) 2006-2012, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2006-2012, Redis Ltd.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -72,6 +72,7 @@ typedef struct list {
/* Prototypes */ /* Prototypes */
list *listCreate(void); list *listCreate(void);
void listRelease(list *list); void listRelease(list *list);
void listReleaseVoid(void *list);
void listEmpty(list *list); void listEmpty(list *list);
list *listAddNodeHead(list *list, void *value); list *listAddNodeHead(list *list, void *value);
list *listAddNodeTail(list *list, void *value); list *listAddNodeTail(list *list, void *value);

View File

@ -2,7 +2,7 @@
* for the Jim's event-loop (Jim is a Tcl interpreter) but later translated * for the Jim's event-loop (Jim is a Tcl interpreter) but later translated
* it in form of a library for easy reuse. * it in form of a library for easy reuse.
* *
* Copyright (c) 2006-2010, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2006-2010, Redis Ltd.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -63,14 +63,14 @@
#endif #endif
#endif #endif
#define AE_LOCK(eventLoop) \ #define AE_LOCK(eventLoop) \
if ((eventLoop)->flags & AE_PROTECT_POLL) { \ if ((eventLoop)->flags & AE_PROTECT_POLL) { \
assert(pthread_mutex_lock(&(eventLoop)->poll_mutex) == 0); \ assert(pthread_mutex_lock(&(eventLoop)->poll_mutex) == 0); \
} }
#define AE_UNLOCK(eventLoop) \ #define AE_UNLOCK(eventLoop) \
if ((eventLoop)->flags & AE_PROTECT_POLL) { \ if ((eventLoop)->flags & AE_PROTECT_POLL) { \
assert(pthread_mutex_unlock(&(eventLoop)->poll_mutex) == 0); \ assert(pthread_mutex_unlock(&(eventLoop)->poll_mutex) == 0); \
} }
aeEventLoop *aeCreateEventLoop(int setsize) { aeEventLoop *aeCreateEventLoop(int setsize) {
@ -85,7 +85,7 @@ aeEventLoop *aeCreateEventLoop(int setsize) {
if (eventLoop->events == NULL || eventLoop->fired == NULL) goto err; if (eventLoop->events == NULL || eventLoop->fired == NULL) goto err;
eventLoop->setsize = setsize; eventLoop->setsize = setsize;
eventLoop->timeEventHead = NULL; eventLoop->timeEventHead = NULL;
eventLoop->timeEventNextId = 0; eventLoop->timeEventNextId = 1;
eventLoop->stop = 0; eventLoop->stop = 0;
eventLoop->maxfd = -1; eventLoop->maxfd = -1;
eventLoop->beforesleep = NULL; eventLoop->beforesleep = NULL;
@ -363,7 +363,7 @@ static int processTimeEvents(aeEventLoop *eventLoop) {
} }
if (te->when <= now) { if (te->when <= now) {
int retval; long long retval;
id = te->id; id = te->id;
te->refcount++; te->refcount++;

View File

@ -2,7 +2,7 @@
* for the Jim's event-loop (Jim is a Tcl interpreter) but later translated * for the Jim's event-loop (Jim is a Tcl interpreter) but later translated
* it in form of a library for easy reuse. * it in form of a library for easy reuse.
* *
* Copyright (c) 2006-2012, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2006-2012, Redis Ltd.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -42,12 +42,11 @@
#define AE_NONE 0 /* No events registered. */ #define AE_NONE 0 /* No events registered. */
#define AE_READABLE 1 /* Fire when descriptor is readable. */ #define AE_READABLE 1 /* Fire when descriptor is readable. */
#define AE_WRITABLE 2 /* Fire when descriptor is writable. */ #define AE_WRITABLE 2 /* Fire when descriptor is writable. */
#define AE_BARRIER \ #define AE_BARRIER 4 /* With WRITABLE, never fire the event if the \
4 /* With WRITABLE, never fire the event if the \ READABLE event already fired in the same event \
READABLE event already fired in the same event \ loop iteration. Useful when you want to persist \
loop iteration. Useful when you want to persist \ things to disk before sending replies, and want \
things to disk before sending replies, and want \ to do that in a group fashion. */
to do that in a group fashion. */
#define AE_FILE_EVENTS (1 << 0) #define AE_FILE_EVENTS (1 << 0)
#define AE_TIME_EVENTS (1 << 1) #define AE_TIME_EVENTS (1 << 1)
@ -68,7 +67,7 @@ struct aeEventLoop;
/* Types and data structures */ /* Types and data structures */
typedef void aeFileProc(struct aeEventLoop *eventLoop, int fd, void *clientData, int mask); typedef void aeFileProc(struct aeEventLoop *eventLoop, int fd, void *clientData, int mask);
typedef int aeTimeProc(struct aeEventLoop *eventLoop, long long id, void *clientData); typedef long long aeTimeProc(struct aeEventLoop *eventLoop, long long id, void *clientData);
typedef void aeEventFinalizerProc(struct aeEventLoop *eventLoop, void *clientData); typedef void aeEventFinalizerProc(struct aeEventLoop *eventLoop, void *clientData);
typedef void aeBeforeSleepProc(struct aeEventLoop *eventLoop); typedef void aeBeforeSleepProc(struct aeEventLoop *eventLoop);
typedef void aeAfterSleepProc(struct aeEventLoop *eventLoop, int numevents); typedef void aeAfterSleepProc(struct aeEventLoop *eventLoop, int numevents);

View File

@ -1,6 +1,6 @@
/* Linux epoll(2) based ae.c module /* Linux epoll(2) based ae.c module
* *
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2009-2012, Redis Ltd.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without

View File

@ -1,6 +1,6 @@
/* Select()-based ae.c module. /* Select()-based ae.c module.
* *
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2009-2012, Redis Ltd.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without

477
src/allocator_defrag.c Normal file
View File

@ -0,0 +1,477 @@
/* Copyright 2024- Valkey contributors
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* This file implements allocator-specific defragmentation logic used
* within the Valkey engine. Below is the relationship between various
* components involved in allocation and defragmentation:
*
* Application code
* / \
* allocation / \ defrag
* / \
* zmalloc allocator_defrag
* / | \ / \
* / | \ / \
* / | \ / \
* libc tcmalloc jemalloc other
*
* Explanation:
* - **Application code**: High-level application logic that uses memory
* allocation and may trigger defragmentation.
* - **zmalloc**: An abstraction layer over the memory allocator, providing
* a uniform allocation interface to the application code. It can delegate
* to various underlying allocators (e.g., libc, tcmalloc, jemalloc, or others).
* It is not dependant on defrag implementation logic and it's possible to use jemalloc
* version that does not support defrag.
* - **allocator_defrag**: This file contains allocator-specific logic for
* defragmentation, invoked from `defrag.c` when memory defragmentation is needed.
* currently jemalloc is the only allocator with implemented defrag logic. It is possible that
* future implementation will include non-allocator defragmentation (think of data-structure
* compaction for example).
* - **Underlying allocators**: These are the actual memory allocators, such as
* libc, tcmalloc, jemalloc, or other custom allocators. The defragmentation
* logic in `allocator_defrag` interacts with these allocators to reorganize
* memory and reduce fragmentation.
*
* The `defrag.c` file acts as the central entry point for defragmentation,
* invoking allocator-specific implementations provided here in `allocator_defrag.c`.
*
* Note: Developers working on `zmalloc` or `allocator_defrag` should refer to
* the other component to ensure both are using the same allocator configuration.
*/
#include "server.h"
#include "serverassert.h"
#include "allocator_defrag.h"
#if defined(HAVE_DEFRAG) && defined(USE_JEMALLOC)
#define STRINGIFY_(x) #x
#define STRINGIFY(x) STRINGIFY_(x)
#define BATCH_QUERY_ARGS_OUT 3
#define SLAB_NFREE(out, i) out[(i) * BATCH_QUERY_ARGS_OUT]
#define SLAB_LEN(out, i) out[(i) * BATCH_QUERY_ARGS_OUT + 2]
#define SLAB_NUM_REGS(out, i) out[(i) * BATCH_QUERY_ARGS_OUT + 1]
#define UTILIZATION_THRESHOLD_FACTOR_MILI (125) // 12.5% additional utilization
/*
* Represents a precomputed key for querying jemalloc statistics.
*
* The `jeMallctlKey` structure stores a key corresponding to a specific jemalloc
* statistics field name. This key is used with the `je_mallctlbymib` interface
* to query statistics more efficiently, bypassing the need for runtime string
* lookup and translation performed by `je_mallctl`.
*
* - `je_mallctlnametomib` is called once for each statistics field to precompute
* and store the key corresponding to the field name.
* - Subsequent queries use `je_mallctlbymib` with the stored key, avoiding the
* overhead of repeated string-based lookups.
*
*/
typedef struct jeMallctlKey {
size_t key[6]; /* The precomputed key used to query jemalloc statistics. */
size_t keylen; /* The length of the key array. */
} jeMallctlKey;
/* Stores MIB (Management Information Base) keys for jemalloc bin queries.
*
* This struct holds precomputed `jeMallctlKey` values for querying various
* jemalloc bin-related statistics efficiently.
*/
typedef struct jeBinInfoKeys {
jeMallctlKey curr_slabs; /* Key to query the current number of slabs in the bin. */
jeMallctlKey nonfull_slabs; /* Key to query the number of non-full slabs in the bin. */
jeMallctlKey curr_regs; /* Key to query the current number of regions in the bin. */
} jeBinInfoKeys;
/* Represents detailed information about a jemalloc bin.
*
* This struct provides metadata about a jemalloc bin, including the size of
* its regions, total number of regions, and related MIB keys for efficient
* queries.
*/
typedef struct jeBinInfo {
size_t reg_size; /* Size of each region in the bin. */
uint32_t nregs; /* Total number of regions in the bin. */
jeBinInfoKeys info_keys; /* Precomputed MIB keys for querying bin statistics. */
} jeBinInfo;
/* Represents the configuration for jemalloc bins.
*
* This struct contains information about the number of bins and metadata for
* each bin, as well as precomputed keys for batch utility queries and epoch updates.
*/
typedef struct jemallocCB {
unsigned nbins; /* Number of bins in the jemalloc configuration. */
jeBinInfo *bin_info; /* Array of `jeBinInfo` structs, one for each bin. */
jeMallctlKey util_batch_query; /* Key to query batch utilization information. */
jeMallctlKey epoch; /* Key to trigger statistics sync between threads. */
} jemallocCB;
/* Represents the latest usage statistics for a jemalloc bin.
*
* This struct tracks the current usage of a bin, including the number of slabs
* and regions, and calculates the number of full slabs from other fields.
*/
typedef struct jemallocBinUsageData {
size_t curr_slabs; /* Current number of slabs in the bin. */
size_t curr_nonfull_slabs; /* Current number of non-full slabs in the bin. */
size_t curr_regs; /* Current number of regions in the bin. */
} jemallocBinUsageData;
static int defrag_supported = 0;
/* Control block holding information about bins and query helper -
* this structure is initialized once when calling allocatorDefragInit. It does not change afterwards*/
static jemallocCB je_cb = {0, NULL, {{0}, 0}, {{0}, 0}};
/* Holds the latest usage statistics for each bin. This structure is updated when calling
* allocatorDefragGetFragSmallbins and later is used to make a defrag decision for a memory pointer. */
static jemallocBinUsageData *je_usage_info = NULL;
/* -----------------------------------------------------------------------------
* Alloc/Free API that are cooperative with defrag
* -------------------------------------------------------------------------- */
/* Allocation and free functions that bypass the thread cache
* and go straight to the allocator arena bins.
* Currently implemented only for jemalloc. Used for online defragmentation.
*/
void *allocatorDefragAlloc(size_t size) {
void *ptr = je_mallocx(size, MALLOCX_TCACHE_NONE);
return ptr;
}
void allocatorDefragFree(void *ptr, size_t size) {
if (ptr == NULL) return;
je_sdallocx(ptr, size, MALLOCX_TCACHE_NONE);
}
/* -----------------------------------------------------------------------------
* Helper functions for jemalloc translation between size and index
* -------------------------------------------------------------------------- */
/* Get the bin index in bin array from the reg_size.
*
* these are reverse engineered mapping of reg_size -> binind. We need this information because the utilization query
* returns the size of the buffer and not the bin index, and we need the bin index to access it's usage information
*
* Note: In case future PR will return the binind (that is better API anyway) we can get rid of
* these conversion functions
*/
static inline unsigned jeSize2BinIndexLgQ3(size_t sz) {
/* Smallest power-of-2 quantum for binning */
const size_t size_class_group_size = 4;
/* Number of bins in each power-of-2 size class group */
const size_t lg_quantum_3_first_pow2 = 3;
/* Offset for exponential bins */
const size_t lg_quantum_3_offset = ((64 >> lg_quantum_3_first_pow2) - 1);
/* Small sizes (8-64 bytes) use linear binning */
if (sz <= 64) { // 64 = 1 << (lg_quantum_3_first_pow2 + 3)
return (sz >> 3) - 1; // Divide by 8 and subtract 1
}
/* For larger sizes, use exponential binning */
/* Calculate leading zeros of (sz - 1) to properly handle power-of-2 sizes */
unsigned leading_zeros = __builtin_clzll(sz - 1);
unsigned exp = 64 - leading_zeros; // Effective log2(sz)
/* Calculate the size's position within its group */
unsigned within_group_offset = size_class_group_size -
(((1ULL << exp) - sz) >> (exp - lg_quantum_3_first_pow2));
/* Calculate the final bin index */
return within_group_offset +
((exp - (lg_quantum_3_first_pow2 + 3)) - 1) * size_class_group_size +
lg_quantum_3_offset;
}
/* -----------------------------------------------------------------------------
* Interface functions to get fragmentation info from jemalloc
* -------------------------------------------------------------------------- */
#define ARENA_TO_QUERY MALLCTL_ARENAS_ALL
static inline void jeRefreshStats(const jemallocCB *je_cb) {
uint64_t epoch = 1; // Value doesn't matter
size_t sz = sizeof(epoch);
/* Refresh stats */
je_mallctlbymib(je_cb->epoch.key, je_cb->epoch.keylen, &epoch, &sz, &epoch, sz);
}
/* Extract key that corresponds to the given name for fast query. This should be called once for each key_name */
static inline int jeQueryKeyInit(const char *key_name, jeMallctlKey *key_info) {
key_info->keylen = sizeof(key_info->key) / sizeof(key_info->key[0]);
int res = je_mallctlnametomib(key_name, key_info->key, &key_info->keylen);
/* sanity check that returned value is not larger than provided */
assert(key_info->keylen <= sizeof(key_info->key) / sizeof(key_info->key[0]));
return res;
}
/* Query jemalloc control interface using previously extracted key (with jeQueryKeyInit) instead of name string.
* This interface (named MIB in jemalloc) is faster as it avoids string dict lookup at run-time. */
static inline int jeQueryCtlInterface(const jeMallctlKey *key_info, void *value) {
size_t sz = sizeof(size_t);
return je_mallctlbymib(key_info->key, key_info->keylen, value, &sz, NULL, 0);
}
static inline int binQueryHelperInitialization(jeBinInfoKeys *helper, unsigned bin_index) {
char mallctl_name[128];
/* Mib of fetch number of used regions in the bin */
snprintf(mallctl_name, sizeof(mallctl_name), "stats.arenas." STRINGIFY(ARENA_TO_QUERY) ".bins.%d.curregs", bin_index);
if (jeQueryKeyInit(mallctl_name, &helper->curr_regs) != 0) return -1;
/* Mib of fetch number of current slabs in the bin */
snprintf(mallctl_name, sizeof(mallctl_name), "stats.arenas." STRINGIFY(ARENA_TO_QUERY) ".bins.%d.curslabs", bin_index);
if (jeQueryKeyInit(mallctl_name, &helper->curr_slabs) != 0) return -1;
/* Mib of fetch nonfull slabs */
snprintf(mallctl_name, sizeof(mallctl_name), "stats.arenas." STRINGIFY(ARENA_TO_QUERY) ".bins.%d.nonfull_slabs", bin_index);
if (jeQueryKeyInit(mallctl_name, &helper->nonfull_slabs) != 0) return -1;
return 0;
}
/* Initializes the defragmentation system for the jemalloc memory allocator.
*
* This function performs the necessary setup and initialization steps for the defragmentation system.
* It retrieves the configuration information for the jemalloc arenas and bins, and initializes the usage
* statistics data structure.
*
* return 0 on success, or a non-zero error code on failure.
*
* The initialization process involves the following steps:
* 1. Check if defragmentation is supported by the current jemalloc version.
* 2. Retrieve the arena bin configuration information using the `je_mallctlbymib` function.
* 3. Initialize the `usage_latest` structure with the bin usage statistics and configuration data.
* 4. Set the `defrag_supported` flag to indicate that defragmentation is enabled.
*
* Note: This function must be called before using any other defragmentation-related functionality.
* It should be called during the initialization phase of the code that uses the
* defragmentation feature.
*/
int allocatorDefragInit(void) {
char mallctl_name[100];
jeBinInfo *bin_info;
size_t sz;
int je_res;
/* the init should be called only once, fail if unexpected call */
assert(!defrag_supported);
/* Get the mib of the per memory pointers query command that is used during defrag scan over memory */
if (jeQueryKeyInit("experimental.utilization.batch_query", &je_cb.util_batch_query) != 0) return -1;
je_res = jeQueryKeyInit("epoch", &je_cb.epoch);
assert(je_res == 0);
jeRefreshStats(&je_cb);
/* get quantum for verification only, current code assumes lg-quantum should be 3 */
size_t jemalloc_quantum;
sz = sizeof(jemalloc_quantum);
je_mallctl("arenas.quantum", &jemalloc_quantum, &sz, NULL, 0);
/* lg-quantum should be 3 so jemalloc_quantum should be 1<<3 */
assert(jemalloc_quantum == 8);
sz = sizeof(je_cb.nbins);
je_res = je_mallctl("arenas.nbins", &je_cb.nbins, &sz, NULL, 0);
assert(je_res == 0 && je_cb.nbins != 0);
je_cb.bin_info = je_calloc(je_cb.nbins, sizeof(jeBinInfo));
assert(je_cb.bin_info != NULL);
je_usage_info = je_calloc(je_cb.nbins, sizeof(jemallocBinUsageData));
assert(je_usage_info != NULL);
for (unsigned j = 0; j < je_cb.nbins; j++) {
bin_info = &je_cb.bin_info[j];
/* The size of the current bin */
snprintf(mallctl_name, sizeof(mallctl_name), "arenas.bin.%d.size", j);
sz = sizeof(bin_info->reg_size);
je_res = je_mallctl(mallctl_name, &bin_info->reg_size, &sz, NULL, 0);
assert(je_res == 0);
/* Number of regions per slab */
snprintf(mallctl_name, sizeof(mallctl_name), "arenas.bin.%d.nregs", j);
sz = sizeof(bin_info->nregs);
je_res = je_mallctl(mallctl_name, &bin_info->nregs, &sz, NULL, 0);
assert(je_res == 0);
/* init bin specific fast query keys */
je_res = binQueryHelperInitialization(&bin_info->info_keys, j);
assert(je_res == 0);
/* verify the reverse map of reg_size to bin index */
assert(jeSize2BinIndexLgQ3(bin_info->reg_size) == j);
}
/* defrag is supported mark it to enable defrag queries */
defrag_supported = 1;
return 0;
}
/* Total size of consumed meomry in unused regs in small bins (AKA external fragmentation).
* The function will refresh the epoch.
*
* return total fragmentation bytes
*/
unsigned long allocatorDefragGetFragSmallbins(void) {
assert(defrag_supported);
unsigned long frag = 0;
jeRefreshStats(&je_cb);
for (unsigned j = 0; j < je_cb.nbins; j++) {
jeBinInfo *bin_info = &je_cb.bin_info[j];
jemallocBinUsageData *bin_usage = &je_usage_info[j];
/* Number of current slabs in the bin */
jeQueryCtlInterface(&bin_info->info_keys.curr_regs, &bin_usage->curr_regs);
/* Number of current slabs in the bin */
jeQueryCtlInterface(&bin_info->info_keys.curr_slabs, &bin_usage->curr_slabs);
/* Number of non full slabs in the bin */
jeQueryCtlInterface(&bin_info->info_keys.nonfull_slabs, &bin_usage->curr_nonfull_slabs);
/* Calculate the fragmentation bytes for the current bin and add it to the total. */
frag += ((bin_info->nregs * bin_usage->curr_slabs) - bin_usage->curr_regs) * bin_info->reg_size;
}
return frag;
}
/* Determines whether defragmentation should be performed on a pointer based on jemalloc information.
*
* bin_info Pointer to the bin information structure.
* bin_usage Pointer to the bin usage structure.
* nalloced Number of allocated regions in the bin.
*
* return 1 if defragmentation should be performed, 0 otherwise.
*
* This function checks the following conditions to determine if defragmentation should be performed:
* 1. If the number of allocated regions (nalloced) is equal to the total number of regions (bin_info->nregs),
* defragmentation is not necessary as moving regions is guaranteed not to change the fragmentation ratio.
* 2. If the number of non-full slabs (bin_usage->curr_nonfull_slabs) is less than 2, defragmentation is not performed
* because there is no other slab to move regions to.
* 3. If slab utilization < 'avg utilization'*1.125 [code 1.125 == (1000+UTILIZATION_THRESHOLD_FACTOR_MILI)/1000]
* than we should defrag. This is aligned with previous je_defrag_hint implementation.
*/
static inline int makeDefragDecision(jeBinInfo *bin_info, jemallocBinUsageData *bin_usage, unsigned long nalloced) {
unsigned long curr_full_slabs = bin_usage->curr_slabs - bin_usage->curr_nonfull_slabs;
size_t allocated_nonfull = bin_usage->curr_regs - curr_full_slabs * bin_info->nregs;
if (bin_info->nregs == nalloced || bin_usage->curr_nonfull_slabs < 2 ||
1000 * nalloced * bin_usage->curr_nonfull_slabs > (1000 + UTILIZATION_THRESHOLD_FACTOR_MILI) * allocated_nonfull) {
return 0;
}
return 1;
}
/*
* Performs defragmentation analysis for a given ptr.
*
* ptr - ptr to memory region to be analyzed.
*
* return - the function returns 1 if defrag should be performed, 0 otherwise.
*/
int allocatorShouldDefrag(void *ptr) {
assert(defrag_supported);
size_t out[BATCH_QUERY_ARGS_OUT];
size_t out_sz = sizeof(out);
size_t in_sz = sizeof(ptr);
for (unsigned j = 0; j < BATCH_QUERY_ARGS_OUT; j++) {
out[j] = -1;
}
je_mallctlbymib(je_cb.util_batch_query.key,
je_cb.util_batch_query.keylen,
out, &out_sz,
&ptr, in_sz);
/* handle results with appropriate quantum value */
assert(SLAB_NUM_REGS(out, 0) > 0);
assert(SLAB_LEN(out, 0) > 0);
assert(SLAB_NFREE(out, 0) != (size_t)-1);
unsigned region_size = SLAB_LEN(out, 0) / SLAB_NUM_REGS(out, 0);
/* check that the allocation size is in range of small bins */
if (region_size > je_cb.bin_info[je_cb.nbins - 1].reg_size) {
return 0;
}
/* get the index based on quantum used */
unsigned binind = jeSize2BinIndexLgQ3(region_size);
/* make sure binind is in range and reverse map is correct */
assert(binind < je_cb.nbins && region_size == je_cb.bin_info[binind].reg_size);
return makeDefragDecision(&je_cb.bin_info[binind],
&je_usage_info[binind],
je_cb.bin_info[binind].nregs - SLAB_NFREE(out, 0));
}
/* Utility function to get the fragmentation ratio from jemalloc.
* It is critical to do that by comparing only heap maps that belong to
* jemalloc, and skip ones the jemalloc keeps as spare. Since we use this
* fragmentation ratio in order to decide if a defrag action should be taken
* or not, a false detection can cause the defragmenter to waste a lot of CPU
* without the possibility of getting any results. */
float getAllocatorFragmentation(size_t *out_frag_bytes) {
size_t resident, active, allocated, frag_smallbins_bytes;
zmalloc_get_allocator_info(&allocated, &active, &resident, NULL, NULL);
frag_smallbins_bytes = allocatorDefragGetFragSmallbins();
/* Calculate the fragmentation ratio as the proportion of wasted memory in small
* bins (which are defraggable) relative to the total allocated memory (including large bins).
* This is because otherwise, if most of the memory usage is large bins, we may show high percentage,
* despite the fact it's not a lot of memory for the user. */
float frag_pct = (float)frag_smallbins_bytes / allocated * 100;
float rss_pct = ((float)resident / allocated) * 100 - 100;
size_t rss_bytes = resident - allocated;
if (out_frag_bytes) *out_frag_bytes = frag_smallbins_bytes;
serverLog(LL_DEBUG, "allocated=%zu, active=%zu, resident=%zu, frag=%.2f%% (%.2f%% rss), frag_bytes=%zu (%zu rss)",
allocated, active, resident, frag_pct, rss_pct, frag_smallbins_bytes, rss_bytes);
return frag_pct;
}
#elif defined(DEBUG_FORCE_DEFRAG)
int allocatorDefragInit(void) {
return 0;
}
void allocatorDefragFree(void *ptr, size_t size) {
UNUSED(size);
zfree(ptr);
}
__attribute__((malloc)) void *allocatorDefragAlloc(size_t size) {
return zmalloc(size);
return NULL;
}
unsigned long allocatorDefragGetFragSmallbins(void) {
return 0;
}
int allocatorShouldDefrag(void *ptr) {
UNUSED(ptr);
return 1;
}
float getAllocatorFragmentation(size_t *out_frag_bytes) {
*out_frag_bytes = server.active_defrag_ignore_bytes + 1;
return server.active_defrag_threshold_upper;
}
#else
int allocatorDefragInit(void) {
return -1;
}
void allocatorDefragFree(void *ptr, size_t size) {
UNUSED(ptr);
UNUSED(size);
}
__attribute__((malloc)) void *allocatorDefragAlloc(size_t size) {
UNUSED(size);
return NULL;
}
unsigned long allocatorDefragGetFragSmallbins(void) {
return 0;
}
int allocatorShouldDefrag(void *ptr) {
UNUSED(ptr);
return 0;
}
float getAllocatorFragmentation(size_t *out_frag_bytes) {
UNUSED(out_frag_bytes);
return 0;
}
#endif

24
src/allocator_defrag.h Normal file
View File

@ -0,0 +1,24 @@
#ifndef __ALLOCATOR_DEFRAG_H
#define __ALLOCATOR_DEFRAG_H
#if defined(USE_JEMALLOC)
#include <jemalloc/jemalloc.h>
/* We can enable the server defrag capabilities only if we are using Jemalloc
* and the version that has the experimental.utilization namespace in mallctl . */
#if (defined(JEMALLOC_VERSION_MAJOR) && \
(JEMALLOC_VERSION_MAJOR > 5 || \
(JEMALLOC_VERSION_MAJOR == 5 && JEMALLOC_VERSION_MINOR > 2) || \
(JEMALLOC_VERSION_MAJOR == 5 && JEMALLOC_VERSION_MINOR == 2 && JEMALLOC_VERSION_BUGFIX >= 1))) || \
defined(DEBUG_FORCE_DEFRAG)
#define HAVE_DEFRAG
#endif
#endif
int allocatorDefragInit(void);
void allocatorDefragFree(void *ptr, size_t size);
__attribute__((malloc)) void *allocatorDefragAlloc(size_t size);
unsigned long allocatorDefragGetFragSmallbins(void);
int allocatorShouldDefrag(void *ptr);
float getAllocatorFragmentation(size_t *out_frag_bytes);
#endif /* __ALLOCATOR_DEFRAG_H */

View File

@ -1,6 +1,6 @@
/* anet.c -- Basic TCP socket stuff made a bit less boring /* anet.c -- Basic TCP socket stuff made a bit less boring
* *
* Copyright (c) 2006-2012, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2006-2012, Redis Ltd.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -45,6 +45,7 @@
#include <errno.h> #include <errno.h>
#include <stdarg.h> #include <stdarg.h>
#include <stdio.h> #include <stdio.h>
#include <grp.h>
#include "anet.h" #include "anet.h"
#include "config.h" #include "config.h"
@ -69,17 +70,24 @@ int anetGetError(int fd) {
return sockerr; return sockerr;
} }
int anetSetBlock(char *err, int fd, int non_block) { static int anetGetSocketFlags(char *err, int fd) {
int flags; int flags;
/* Set the socket blocking (if non_block is zero) or non-blocking.
* Note that fcntl(2) for F_GETFL and F_SETFL can't be
* interrupted by a signal. */
if ((flags = fcntl(fd, F_GETFL)) == -1) { if ((flags = fcntl(fd, F_GETFL)) == -1) {
anetSetError(err, "fcntl(F_GETFL): %s", strerror(errno)); anetSetError(err, "fcntl(F_GETFL): %s", strerror(errno));
return ANET_ERR; return ANET_ERR;
} }
return flags;
}
int anetSetBlock(char *err, int fd, int non_block) {
int flags = anetGetSocketFlags(err, fd);
if (flags == ANET_ERR) {
return ANET_ERR;
}
/* Check if this flag has been set or unset, if so, /* Check if this flag has been set or unset, if so,
* then there is no need to call fcntl to set/unset it again. */ * then there is no need to call fcntl to set/unset it again. */
if (!!(flags & O_NONBLOCK) == !!non_block) return ANET_OK; if (!!(flags & O_NONBLOCK) == !!non_block) return ANET_OK;
@ -104,6 +112,21 @@ int anetBlock(char *err, int fd) {
return anetSetBlock(err, fd, 0); return anetSetBlock(err, fd, 0);
} }
int anetIsBlock(char *err, int fd) {
int flags = anetGetSocketFlags(err, fd);
if (flags == ANET_ERR) {
return ANET_ERR;
}
/* Check if the O_NONBLOCK flag is set */
if (flags & O_NONBLOCK) {
return 0; /* Socket is non-blocking */
} else {
return 1; /* Socket is blocking */
}
}
/* Enable the FD_CLOEXEC on the given fd to avoid fd leaks. /* Enable the FD_CLOEXEC on the given fd to avoid fd leaks.
* This function should be invoked for fd's on specific places * This function should be invoked for fd's on specific places
* where fork + execve system calls are called. */ * where fork + execve system calls are called. */
@ -505,7 +528,7 @@ int anetTcpNonBlockBestEffortBindConnect(char *err, const char *addr, int port,
return anetTcpGenericConnect(err, addr, port, source_addr, ANET_CONNECT_NONBLOCK | ANET_CONNECT_BE_BINDING); return anetTcpGenericConnect(err, addr, port, source_addr, ANET_CONNECT_NONBLOCK | ANET_CONNECT_BE_BINDING);
} }
static int anetListen(char *err, int s, struct sockaddr *sa, socklen_t len, int backlog, mode_t perm) { static int anetListen(char *err, int s, struct sockaddr *sa, socklen_t len, int backlog, mode_t perm, char *group) {
if (bind(s, sa, len) == -1) { if (bind(s, sa, len) == -1) {
anetSetError(err, "bind: %s", strerror(errno)); anetSetError(err, "bind: %s", strerror(errno));
close(s); close(s);
@ -514,6 +537,22 @@ static int anetListen(char *err, int s, struct sockaddr *sa, socklen_t len, int
if (sa->sa_family == AF_LOCAL && perm) chmod(((struct sockaddr_un *)sa)->sun_path, perm); if (sa->sa_family == AF_LOCAL && perm) chmod(((struct sockaddr_un *)sa)->sun_path, perm);
if (sa->sa_family == AF_LOCAL && group != NULL) {
struct group *grp;
if ((grp = getgrnam(group)) == NULL) {
anetSetError(err, "getgrnam error for group '%s': %s", group, strerror(errno));
close(s);
return ANET_ERR;
}
/* Owner of the socket remains same. */
if (chown(((struct sockaddr_un *)sa)->sun_path, -1, grp->gr_gid) == -1) {
anetSetError(err, "chown error for group '%s': %s", group, strerror(errno));
close(s);
return ANET_ERR;
}
}
if (listen(s, backlog) == -1) { if (listen(s, backlog) == -1) {
anetSetError(err, "listen: %s", strerror(errno)); anetSetError(err, "listen: %s", strerror(errno));
close(s); close(s);
@ -553,7 +592,7 @@ static int _anetTcpServer(char *err, int port, char *bindaddr, int af, int backl
if (af == AF_INET6 && anetV6Only(err, s) == ANET_ERR) goto error; if (af == AF_INET6 && anetV6Only(err, s) == ANET_ERR) goto error;
if (anetSetReuseAddr(err, s) == ANET_ERR) goto error; if (anetSetReuseAddr(err, s) == ANET_ERR) goto error;
if (anetListen(err, s, p->ai_addr, p->ai_addrlen, backlog, 0) == ANET_ERR) s = ANET_ERR; if (anetListen(err, s, p->ai_addr, p->ai_addrlen, backlog, 0, NULL) == ANET_ERR) s = ANET_ERR;
goto end; goto end;
} }
if (p == NULL) { if (p == NULL) {
@ -577,7 +616,7 @@ int anetTcp6Server(char *err, int port, char *bindaddr, int backlog) {
return _anetTcpServer(err, port, bindaddr, AF_INET6, backlog); return _anetTcpServer(err, port, bindaddr, AF_INET6, backlog);
} }
int anetUnixServer(char *err, char *path, mode_t perm, int backlog) { int anetUnixServer(char *err, char *path, mode_t perm, int backlog, char *group) {
int s; int s;
struct sockaddr_un sa; struct sockaddr_un sa;
@ -593,7 +632,7 @@ int anetUnixServer(char *err, char *path, mode_t perm, int backlog) {
memset(&sa, 0, sizeof(sa)); memset(&sa, 0, sizeof(sa));
sa.sun_family = AF_LOCAL; sa.sun_family = AF_LOCAL;
valkey_strlcpy(sa.sun_path, path, sizeof(sa.sun_path)); valkey_strlcpy(sa.sun_path, path, sizeof(sa.sun_path));
if (anetListen(err, s, (struct sockaddr *)&sa, sizeof(sa), backlog, perm) == ANET_ERR) return ANET_ERR; if (anetListen(err, s, (struct sockaddr *)&sa, sizeof(sa), backlog, perm, group) == ANET_ERR) return ANET_ERR;
return s; return s;
} }

View File

@ -1,6 +1,6 @@
/* anet.c -- Basic TCP socket stuff made a bit less boring /* anet.c -- Basic TCP socket stuff made a bit less boring
* *
* Copyright (c) 2006-2012, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2006-2012, Redis Ltd.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -56,11 +56,12 @@ int anetTcpNonBlockBestEffortBindConnect(char *err, const char *addr, int port,
int anetResolve(char *err, char *host, char *ipbuf, size_t ipbuf_len, int flags); int anetResolve(char *err, char *host, char *ipbuf, size_t ipbuf_len, int flags);
int anetTcpServer(char *err, int port, char *bindaddr, int backlog); int anetTcpServer(char *err, int port, char *bindaddr, int backlog);
int anetTcp6Server(char *err, int port, char *bindaddr, int backlog); int anetTcp6Server(char *err, int port, char *bindaddr, int backlog);
int anetUnixServer(char *err, char *path, mode_t perm, int backlog); int anetUnixServer(char *err, char *path, mode_t perm, int backlog, char *group);
int anetTcpAccept(char *err, int serversock, char *ip, size_t ip_len, int *port); int anetTcpAccept(char *err, int serversock, char *ip, size_t ip_len, int *port);
int anetUnixAccept(char *err, int serversock); int anetUnixAccept(char *err, int serversock);
int anetNonBlock(char *err, int fd); int anetNonBlock(char *err, int fd);
int anetBlock(char *err, int fd); int anetBlock(char *err, int fd);
int anetIsBlock(char *err, int fd);
int anetCloexec(int fd); int anetCloexec(int fd);
int anetEnableTcpNoDelay(char *err, int fd); int anetEnableTcpNoDelay(char *err, int fd);
int anetDisableTcpNoDelay(char *err, int fd); int anetDisableTcpNoDelay(char *err, int fd);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2009-2012, Redis Ltd.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -31,6 +31,7 @@
#include "bio.h" #include "bio.h"
#include "rio.h" #include "rio.h"
#include "functions.h" #include "functions.h"
#include "module.h"
#include <signal.h> #include <signal.h>
#include <fcntl.h> #include <fcntl.h>
@ -423,7 +424,7 @@ void aofManifestFreeAndUpdate(aofManifest *am) {
* appendonly.aof.1.base.aof (server.aof_use_rdb_preamble is no) * appendonly.aof.1.base.aof (server.aof_use_rdb_preamble is no)
* appendonly.aof.1.base.rdb (server.aof_use_rdb_preamble is yes) * appendonly.aof.1.base.rdb (server.aof_use_rdb_preamble is yes)
*/ */
sds getNewBaseFileNameAndMarkPreAsHistory(aofManifest *am) { sds getNewBaseFileNameAndMarkPreAsHistory(aofManifest *am, int aof_use_rdb_preamble) {
serverAssert(am != NULL); serverAssert(am != NULL);
if (am->base_aof_info) { if (am->base_aof_info) {
serverAssert(am->base_aof_info->file_type == AOF_FILE_TYPE_BASE); serverAssert(am->base_aof_info->file_type == AOF_FILE_TYPE_BASE);
@ -431,7 +432,7 @@ sds getNewBaseFileNameAndMarkPreAsHistory(aofManifest *am) {
listAddNodeHead(am->history_aof_list, am->base_aof_info); listAddNodeHead(am->history_aof_list, am->base_aof_info);
} }
char *format_suffix = server.aof_use_rdb_preamble ? RDB_FORMAT_SUFFIX : AOF_FORMAT_SUFFIX; char *format_suffix = aof_use_rdb_preamble ? RDB_FORMAT_SUFFIX : AOF_FORMAT_SUFFIX;
aofInfo *ai = aofInfoCreate(); aofInfo *ai = aofInfoCreate();
ai->file_name = sdscatprintf(sdsempty(), "%s.%lld%s%s", server.aof_filename, ++am->curr_base_file_seq, ai->file_name = sdscatprintf(sdsempty(), "%s.%lld%s%s", server.aof_filename, ++am->curr_base_file_seq,
@ -476,7 +477,7 @@ sds getLastIncrAofName(aofManifest *am) {
} }
/* Or return the last one. */ /* Or return the last one. */
listNode *lastnode = listIndex(am->incr_aof_list, -1); listNode *lastnode = listLast(am->incr_aof_list);
aofInfo *ai = listNodeValue(lastnode); aofInfo *ai = listNodeValue(lastnode);
return ai->file_name; return ai->file_name;
} }
@ -712,7 +713,7 @@ void aofOpenIfNeededOnServerStart(void) {
/* If we start with an empty dataset, we will force create a BASE file. */ /* If we start with an empty dataset, we will force create a BASE file. */
size_t incr_aof_len = listLength(server.aof_manifest->incr_aof_list); size_t incr_aof_len = listLength(server.aof_manifest->incr_aof_list);
if (!server.aof_manifest->base_aof_info && !incr_aof_len) { if (!server.aof_manifest->base_aof_info && !incr_aof_len) {
sds base_name = getNewBaseFileNameAndMarkPreAsHistory(server.aof_manifest); sds base_name = getNewBaseFileNameAndMarkPreAsHistory(server.aof_manifest, server.aof_use_rdb_preamble);
sds base_filepath = makePath(server.aof_dirname, base_name); sds base_filepath = makePath(server.aof_dirname, base_name);
if (rewriteAppendOnlyFile(base_filepath) != C_OK) { if (rewriteAppendOnlyFile(base_filepath) != C_OK) {
exit(1); exit(1);
@ -1371,10 +1372,12 @@ struct client *createAOFClient(void) {
*/ */
c->raw_flag = 0; c->raw_flag = 0;
c->flag.deny_blocking = 1; c->flag.deny_blocking = 1;
c->flag.fake = 1;
/* We set the fake client as a replica waiting for the synchronization /* We set the fake client as a replica waiting for the synchronization
* so that the server will not try to send replies to this client. */ * so that the server will not try to send replies to this client. */
c->repl_state = REPLICA_STATE_WAIT_BGSAVE_START; initClientReplicationData(c);
c->repl_data->repl_state = REPLICA_STATE_WAIT_BGSAVE_START;
return c; return c;
} }
@ -1529,10 +1532,11 @@ int loadSingleAppendOnlyFile(char *filename) {
} }
/* Command lookup */ /* Command lookup */
cmd = lookupCommand(argv, argc); sds err = NULL;
if (!cmd) { fakeClient->cmd = fakeClient->lastcmd = cmd = lookupCommand(argv, argc);
serverLog(LL_WARNING, "Unknown command '%s' reading the append only file %s", (char *)argv[0]->ptr, if ((!cmd && !commandCheckExistence(fakeClient, &err)) || (cmd && !commandCheckArity(cmd, argc, &err))) {
filename); serverLog(LL_WARNING, "Error reading the append only file %s, error: %s", filename, err);
sdsfree(err);
freeClientArgv(fakeClient); freeClientArgv(fakeClient);
ret = AOF_FAILED; ret = AOF_FAILED;
goto cleanup; goto cleanup;
@ -1541,7 +1545,6 @@ int loadSingleAppendOnlyFile(char *filename) {
if (cmd->proc == multiCommand) valid_before_multi = valid_up_to; if (cmd->proc == multiCommand) valid_before_multi = valid_up_to;
/* Run the command in the context of a fake client */ /* Run the command in the context of a fake client */
fakeClient->cmd = fakeClient->lastcmd = cmd;
if (fakeClient->flag.multi && fakeClient->cmd->proc != execCommand) { if (fakeClient->flag.multi && fakeClient->cmd->proc != execCommand) {
/* Note: we don't have to attempt calling evalGetCommandFlags, /* Note: we don't have to attempt calling evalGetCommandFlags,
* since this is AOF, the checks in processCommand are not made * since this is AOF, the checks in processCommand are not made
@ -1887,30 +1890,29 @@ int rewriteSortedSetObject(rio *r, robj *key, robj *o) {
} }
} else if (o->encoding == OBJ_ENCODING_SKIPLIST) { } else if (o->encoding == OBJ_ENCODING_SKIPLIST) {
zset *zs = o->ptr; zset *zs = o->ptr;
dictIterator *di = dictGetIterator(zs->dict); hashtableIterator iter;
dictEntry *de; hashtableInitIterator(&iter, zs->ht, 0);
void *next;
while ((de = dictNext(di)) != NULL) { while (hashtableNext(&iter, &next)) {
sds ele = dictGetKey(de); zskiplistNode *node = next;
double *score = dictGetVal(de);
if (count == 0) { if (count == 0) {
int cmd_items = (items > AOF_REWRITE_ITEMS_PER_CMD) ? AOF_REWRITE_ITEMS_PER_CMD : items; int cmd_items = (items > AOF_REWRITE_ITEMS_PER_CMD) ? AOF_REWRITE_ITEMS_PER_CMD : items;
if (!rioWriteBulkCount(r, '*', 2 + cmd_items * 2) || !rioWriteBulkString(r, "ZADD", 4) || if (!rioWriteBulkCount(r, '*', 2 + cmd_items * 2) || !rioWriteBulkString(r, "ZADD", 4) ||
!rioWriteBulkObject(r, key)) { !rioWriteBulkObject(r, key)) {
dictReleaseIterator(di); hashtableResetIterator(&iter);
return 0; return 0;
} }
} }
if (!rioWriteBulkDouble(r, *score) || !rioWriteBulkString(r, ele, sdslen(ele))) { sds ele = node->ele;
dictReleaseIterator(di); if (!rioWriteBulkDouble(r, node->score) || !rioWriteBulkString(r, ele, sdslen(ele))) {
hashtableResetIterator(&iter);
return 0; return 0;
} }
if (++count == AOF_REWRITE_ITEMS_PER_CMD) count = 0; if (++count == AOF_REWRITE_ITEMS_PER_CMD) count = 0;
items--; items--;
} }
dictReleaseIterator(di); hashtableResetIterator(&iter);
} else { } else {
serverPanic("Unknown sorted zset encoding"); serverPanic("Unknown sorted zset encoding");
} }
@ -1920,7 +1922,7 @@ int rewriteSortedSetObject(rio *r, robj *key, robj *o) {
/* Write either the key or the value of the currently selected item of a hash. /* Write either the key or the value of the currently selected item of a hash.
* The 'hi' argument passes a valid hash iterator. * The 'hi' argument passes a valid hash iterator.
* The 'what' filed specifies if to write a key or a value and can be * The 'what' filed specifies if to write a key or a value and can be
* either OBJ_HASH_KEY or OBJ_HASH_VALUE. * either OBJ_HASH_FIELD or OBJ_HASH_VALUE.
* *
* The function returns 0 on error, non-zero on success. */ * The function returns 0 on error, non-zero on success. */
static int rioWriteHashIteratorCursor(rio *r, hashTypeIterator *hi, int what) { static int rioWriteHashIteratorCursor(rio *r, hashTypeIterator *hi, int what) {
@ -1934,7 +1936,7 @@ static int rioWriteHashIteratorCursor(rio *r, hashTypeIterator *hi, int what) {
return rioWriteBulkString(r, (char *)vstr, vlen); return rioWriteBulkString(r, (char *)vstr, vlen);
else else
return rioWriteBulkLongLong(r, vll); return rioWriteBulkLongLong(r, vll);
} else if (hi->encoding == OBJ_ENCODING_HT) { } else if (hi->encoding == OBJ_ENCODING_HASHTABLE) {
sds value = hashTypeCurrentFromHashTable(hi, what); sds value = hashTypeCurrentFromHashTable(hi, what);
return rioWriteBulkString(r, value, sdslen(value)); return rioWriteBulkString(r, value, sdslen(value));
} }
@ -1946,30 +1948,30 @@ static int rioWriteHashIteratorCursor(rio *r, hashTypeIterator *hi, int what) {
/* Emit the commands needed to rebuild a hash object. /* Emit the commands needed to rebuild a hash object.
* The function returns 0 on error, 1 on success. */ * The function returns 0 on error, 1 on success. */
int rewriteHashObject(rio *r, robj *key, robj *o) { int rewriteHashObject(rio *r, robj *key, robj *o) {
hashTypeIterator *hi; hashTypeIterator hi;
long long count = 0, items = hashTypeLength(o); long long count = 0, items = hashTypeLength(o);
hi = hashTypeInitIterator(o); hashTypeInitIterator(o, &hi);
while (hashTypeNext(hi) != C_ERR) { while (hashTypeNext(&hi) != C_ERR) {
if (count == 0) { if (count == 0) {
int cmd_items = (items > AOF_REWRITE_ITEMS_PER_CMD) ? AOF_REWRITE_ITEMS_PER_CMD : items; int cmd_items = (items > AOF_REWRITE_ITEMS_PER_CMD) ? AOF_REWRITE_ITEMS_PER_CMD : items;
if (!rioWriteBulkCount(r, '*', 2 + cmd_items * 2) || !rioWriteBulkString(r, "HMSET", 5) || if (!rioWriteBulkCount(r, '*', 2 + cmd_items * 2) || !rioWriteBulkString(r, "HMSET", 5) ||
!rioWriteBulkObject(r, key)) { !rioWriteBulkObject(r, key)) {
hashTypeReleaseIterator(hi); hashTypeResetIterator(&hi);
return 0; return 0;
} }
} }
if (!rioWriteHashIteratorCursor(r, hi, OBJ_HASH_KEY) || !rioWriteHashIteratorCursor(r, hi, OBJ_HASH_VALUE)) { if (!rioWriteHashIteratorCursor(r, &hi, OBJ_HASH_FIELD) || !rioWriteHashIteratorCursor(r, &hi, OBJ_HASH_VALUE)) {
hashTypeReleaseIterator(hi); hashTypeResetIterator(&hi);
return 0; return 0;
} }
if (++count == AOF_REWRITE_ITEMS_PER_CMD) count = 0; if (++count == AOF_REWRITE_ITEMS_PER_CMD) count = 0;
items--; items--;
} }
hashTypeReleaseIterator(hi); hashTypeResetIterator(&hi);
return 1; return 1;
} }
@ -2160,7 +2162,7 @@ int rewriteModuleObject(rio *r, robj *key, robj *o, int dbid) {
ValkeyModuleIO io; ValkeyModuleIO io;
moduleValue *mv = o->ptr; moduleValue *mv = o->ptr;
moduleType *mt = mv->type; moduleType *mt = mv->type;
moduleInitIOContext(io, mt, r, key, dbid); moduleInitIOContext(&io, mt, r, key, dbid);
mt->aof_rewrite(&io, key, mv->value); mt->aof_rewrite(&io, key, mv->value);
if (io.ctx) { if (io.ctx) {
moduleFreeContext(io.ctx); moduleFreeContext(io.ctx);
@ -2189,7 +2191,6 @@ werr:
} }
int rewriteAppendOnlyFileRio(rio *aof) { int rewriteAppendOnlyFileRio(rio *aof) {
dictEntry *de;
int j; int j;
long key_count = 0; long key_count = 0;
long long updated_time = 0; long long updated_time = 0;
@ -2216,19 +2217,20 @@ int rewriteAppendOnlyFileRio(rio *aof) {
if (rioWrite(aof, selectcmd, sizeof(selectcmd) - 1) == 0) goto werr; if (rioWrite(aof, selectcmd, sizeof(selectcmd) - 1) == 0) goto werr;
if (rioWriteBulkLongLong(aof, j) == 0) goto werr; if (rioWriteBulkLongLong(aof, j) == 0) goto werr;
kvs_it = kvstoreIteratorInit(db->keys); kvs_it = kvstoreIteratorInit(db->keys, HASHTABLE_ITER_SAFE | HASHTABLE_ITER_PREFETCH_VALUES);
/* Iterate this DB writing every entry */ /* Iterate this DB writing every entry */
while ((de = kvstoreIteratorNext(kvs_it)) != NULL) { void *next;
while (kvstoreIteratorNext(kvs_it, &next)) {
robj *o = next;
sds keystr; sds keystr;
robj key, *o; robj key;
long long expiretime; long long expiretime;
size_t aof_bytes_before_key = aof->processed_bytes; size_t aof_bytes_before_key = aof->processed_bytes;
keystr = dictGetKey(de); keystr = objectGetKey(o);
o = dictGetVal(de);
initStaticStringObject(key, keystr); initStaticStringObject(key, keystr);
expiretime = getExpire(db, &key); expiretime = objectGetExpire(o);
/* Save the key and associated value */ /* Save the key and associated value */
if (o->type == OBJ_STRING) { if (o->type == OBJ_STRING) {
@ -2445,6 +2447,7 @@ int rewriteAppendOnlyFileBackground(void) {
serverLog(LL_NOTICE, "Background append only file rewriting started by pid %ld", (long)childpid); serverLog(LL_NOTICE, "Background append only file rewriting started by pid %ld", (long)childpid);
server.aof_rewrite_scheduled = 0; server.aof_rewrite_scheduled = 0;
server.aof_rewrite_time_start = time(NULL); server.aof_rewrite_time_start = time(NULL);
server.aof_rewrite_use_rdb_preamble = server.aof_use_rdb_preamble;
return C_OK; return C_OK;
} }
return C_OK; /* unreached */ return C_OK; /* unreached */
@ -2557,7 +2560,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
/* Get a new BASE file name and mark the previous (if we have) /* Get a new BASE file name and mark the previous (if we have)
* as the HISTORY type. */ * as the HISTORY type. */
sds new_base_filename = getNewBaseFileNameAndMarkPreAsHistory(temp_am); sds new_base_filename = getNewBaseFileNameAndMarkPreAsHistory(temp_am, server.aof_rewrite_use_rdb_preamble);
serverAssert(new_base_filename != NULL); serverAssert(new_base_filename != NULL);
new_base_filepath = makePath(server.aof_dirname, new_base_filename); new_base_filepath = makePath(server.aof_dirname, new_base_filename);

View File

@ -1,6 +1,7 @@
/* /*
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2009-2012, Redis Ltd.
* Copyright (c) 2024, Valkey contributors * Copyright (c) 2024, Valkey contributors
* Copyright (c) 2025, Futriix contributors
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -28,25 +29,19 @@
* POSSIBILITY OF SUCH DAMAGE. * POSSIBILITY OF SUCH DAMAGE.
*/ */
/* clang-format off */
const char *ascii_logo = const char *ascii_logo =
" .+^+. \n" " \n "
" .+#########+. \n" "▗▞▀▀▘█ ▐▌ ■ ▄▄▄ ▄ ▄ ▄ ▄ \n"
" .+########+########+. Valkey %s (%s/%d) %s bit\n" "▐▌ ▀▄▄▞▘▗▄▟▙▄▖█ ▄ ▄ ▀▄▀ \n"
" .+########+' '+########+. \n" "▐▛▀▘ ▐▌ █ █ █ ▄▀ ▀▄ \n"
" .########+' .+. '+########. Running in %s mode\n" "▐▌ ▐▌ █ █ Futriix %s (%s/%d) %s bit\n"
" |####+' .+#######+. '+####| Port: %d\n" " Running in %s mode\n"
" |###| .+###############+. |###| PID: %ld \n" " Port: %d\n"
" |###| |#####*'' ''*#####| |###| \n" " PID: %ld \n"
" |###| |####' .-. '####| |###| \n" " \n"
" |###| |###( (@@@) )###| |###| https://valkey.io \n" " \n\n";
" |###| |####. '-' .####| |###| \n"
" |###| |#####*. .*#####| |###| \n"
" |###| '+#####| |#####+' |###| \n"
" |####+. +##| |#+' .+####| \n"
" '#######+ |##| .+########' \n"
" '+###| |##| .+########+' \n"
" '| |####+########+' \n"
" +#########+' \n"
" '+v+' \n\n";
/* clang-format off */

View File

@ -1,197 +0,0 @@
/* This file implements atomic counters using c11 _Atomic, __atomic or __sync
* macros if available, otherwise we will throw an error when compile.
*
* The exported interface is composed of the following macros:
*
* atomicIncr(var,count) -- Increment the atomic counter
* atomicGetIncr(var,oldvalue_var,count) -- Get and increment the atomic counter
* atomicIncrGet(var,newvalue_var,count) -- Increment and get the atomic counter new value
* atomicDecr(var,count) -- Decrement the atomic counter
* atomicGet(var,dstvar) -- Fetch the atomic counter value
* atomicSet(var,value) -- Set the atomic counter value
* atomicGetWithSync(var,value) -- 'atomicGet' with inter-thread synchronization
* atomicSetWithSync(var,value) -- 'atomicSet' with inter-thread synchronization
*
* Atomic operations on flags.
* Flag type can be int, long, long long or their unsigned counterparts.
* The value of the flag can be 1 or 0.
*
* atomicFlagGetSet(var,oldvalue_var) -- Get and set the atomic counter value
*
* NOTE1: __atomic* and _Atomic implementations can be actually elaborated to support any value by changing the
* hardcoded new value passed to __atomic_exchange* from 1 to @param count
* i.e oldvalue_var = atomic_exchange_explicit(&var, count).
* However, in order to be compatible with the __sync functions family, we can use only 0 and 1.
* The only exchange alternative suggested by __sync is __sync_lock_test_and_set,
* But as described by the gnu manual for __sync_lock_test_and_set():
* https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
* "A target may support reduced functionality here by which the only valid value to store is the immediate constant 1.
* The exact value actually stored in *ptr is implementation defined." Hence, we can't rely on it for a any value other
* than 1. We eventually chose to implement this method with __sync_val_compare_and_swap since it satisfies
* functionality needed for atomicFlagGetSet (if the flag was 0 -> set to 1, if it's already 1 -> do nothing, but the
* final result is that the flag is set), and also it has a full barrier (__sync_lock_test_and_set has acquire barrier).
*
* NOTE2: Unlike other atomic type, which aren't guaranteed to be lock free, c11 atomic_flag does.
* To check whether a type is lock free, atomic_is_lock_free() can be used.
* It can be considered to limit the flag type to atomic_flag to improve performance.
*
* Never use return value from the macros, instead use the AtomicGetIncr()
* if you need to get the current value and increment it atomically, like
* in the following example:
*
* long oldvalue;
* atomicGetIncr(myvar,oldvalue,1);
* doSomethingWith(oldvalue);
*
* ----------------------------------------------------------------------------
*
* Copyright (c) 2015, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <pthread.h>
#include "config.h"
#ifndef __ATOMIC_VAR_H
#define __ATOMIC_VAR_H
/* Define serverAtomic for atomic variable. */
#define serverAtomic
/* To test the server with Helgrind (a Valgrind tool) it is useful to define
* the following macro, so that __sync macros are used: those can be detected
* by Helgrind (even if they are less efficient) so that no false positive
* is reported. */
// #define __ATOMIC_VAR_FORCE_SYNC_MACROS
/* There will be many false positives if we test the server with Helgrind, since
* Helgrind can't understand we have imposed ordering on the program, so
* we use macros in helgrind.h to tell Helgrind inter-thread happens-before
* relationship explicitly for avoiding false positives.
*
* For more details, please see: valgrind/helgrind.h and
* https://www.valgrind.org/docs/manual/hg-manual.html#hg-manual.effective-use
*
* These macros take effect only when 'make helgrind', and you must first
* install Valgrind in the default path configuration. */
#ifdef __ATOMIC_VAR_FORCE_SYNC_MACROS
#include <valgrind/helgrind.h>
#else
#define ANNOTATE_HAPPENS_BEFORE(v) ((void)v)
#define ANNOTATE_HAPPENS_AFTER(v) ((void)v)
#endif
#if !defined(__ATOMIC_VAR_FORCE_SYNC_MACROS) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && \
!defined(__STDC_NO_ATOMICS__)
/* Use '_Atomic' keyword if the compiler supports. */
#undef serverAtomic
#define serverAtomic _Atomic
/* Implementation using _Atomic in C11. */
#include <stdatomic.h>
#define atomicIncr(var, count) atomic_fetch_add_explicit(&var, (count), memory_order_relaxed)
#define atomicGetIncr(var, oldvalue_var, count) \
do { \
oldvalue_var = atomic_fetch_add_explicit(&var, (count), memory_order_relaxed); \
} while (0)
#define atomicIncrGet(var, newvalue_var, count) newvalue_var = atomicIncr(var, count) + count
#define atomicDecr(var, count) atomic_fetch_sub_explicit(&var, (count), memory_order_relaxed)
#define atomicGet(var, dstvar) \
do { \
dstvar = atomic_load_explicit(&var, memory_order_relaxed); \
} while (0)
#define atomicSet(var, value) atomic_store_explicit(&var, value, memory_order_relaxed)
#define atomicGetWithSync(var, dstvar) \
do { \
dstvar = atomic_load_explicit(&var, memory_order_seq_cst); \
} while (0)
#define atomicSetWithSync(var, value) atomic_store_explicit(&var, value, memory_order_seq_cst)
#define atomicFlagGetSet(var, oldvalue_var) oldvalue_var = atomic_exchange_explicit(&var, 1, memory_order_relaxed)
#define REDIS_ATOMIC_API "c11-builtin"
#elif !defined(__ATOMIC_VAR_FORCE_SYNC_MACROS) && \
(!defined(__clang__) || !defined(__APPLE__) || __apple_build_version__ > 4210057) && defined(__ATOMIC_RELAXED) && \
defined(__ATOMIC_SEQ_CST)
/* Implementation using __atomic macros. */
#define atomicIncr(var, count) __atomic_add_fetch(&var, (count), __ATOMIC_RELAXED)
#define atomicIncrGet(var, newvalue_var, count) newvalue_var = __atomic_add_fetch(&var, (count), __ATOMIC_RELAXED)
#define atomicGetIncr(var, oldvalue_var, count) \
do { \
oldvalue_var = __atomic_fetch_add(&var, (count), __ATOMIC_RELAXED); \
} while (0)
#define atomicDecr(var, count) __atomic_sub_fetch(&var, (count), __ATOMIC_RELAXED)
#define atomicGet(var, dstvar) \
do { \
dstvar = __atomic_load_n(&var, __ATOMIC_RELAXED); \
} while (0)
#define atomicSet(var, value) __atomic_store_n(&var, value, __ATOMIC_RELAXED)
#define atomicGetWithSync(var, dstvar) \
do { \
dstvar = __atomic_load_n(&var, __ATOMIC_SEQ_CST); \
} while (0)
#define atomicSetWithSync(var, value) __atomic_store_n(&var, value, __ATOMIC_SEQ_CST)
#define atomicFlagGetSet(var, oldvalue_var) oldvalue_var = __atomic_exchange_n(&var, 1, __ATOMIC_RELAXED)
#define REDIS_ATOMIC_API "atomic-builtin"
#elif defined(HAVE_ATOMIC)
/* Implementation using __sync macros. */
#define atomicIncr(var, count) __sync_add_and_fetch(&var, (count))
#define atomicIncrGet(var, newvalue_var, count) newvalue_var = __sync_add_and_fetch(&var, (count))
#define atomicGetIncr(var, oldvalue_var, count) \
do { \
oldvalue_var = __sync_fetch_and_add(&var, (count)); \
} while (0)
#define atomicDecr(var, count) __sync_sub_and_fetch(&var, (count))
#define atomicGet(var, dstvar) \
do { \
dstvar = __sync_sub_and_fetch(&var, 0); \
} while (0)
#define atomicSet(var, value) \
do { \
while (!__sync_bool_compare_and_swap(&var, var, value)); \
} while (0)
/* Actually the builtin issues a full memory barrier by default. */
#define atomicGetWithSync(var, dstvar) \
do { \
dstvar = __sync_sub_and_fetch(&var, 0, __sync_synchronize); \
ANNOTATE_HAPPENS_AFTER(&var); \
} while (0)
#define atomicSetWithSync(var, value) \
do { \
ANNOTATE_HAPPENS_BEFORE(&var); \
while (!__sync_bool_compare_and_swap(&var, var, value, __sync_synchronize)); \
} while (0)
#define atomicFlagGetSet(var, oldvalue_var) oldvalue_var = __sync_val_compare_and_swap(&var, 0, 1)
#define REDIS_ATOMIC_API "sync-builtin"
#else
#error "Unable to determine atomic operations for your platform"
#endif
#endif /* __ATOMIC_VAR_H */

View File

@ -31,7 +31,7 @@
* *
* ---------------------------------------------------------------------------- * ----------------------------------------------------------------------------
* *
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2009-2012, Redis Ltd.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without

Some files were not shown because too many files have changed in this diff Show More