Compare commits

..

1 Commits

Author SHA1 Message Date
Ping Xie
00a5be7972
Valkey 8.0 RC1 release notes (#850)
Signed-off-by: Ping Xie <pingxie@google.com>
Signed-off-by: Ping Xie <pingxie@outlook.com>
Signed-off-by: Madelyn Olson <madelyneolson@gmail.com>
Co-authored-by: Binbin <binloveplay1314@qq.com>
Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
2024-08-01 08:59:43 -07:00
450 changed files with 16466 additions and 34963 deletions

View File

@ -1,76 +0,0 @@
format:
_help_line_width:
- How wide to allow formatted cmake files
line_width: 120
_help_tab_size:
- How many spaces to tab for indent
tab_size: 4
_help_use_tabchars:
- If true, lines are indented using tab characters (utf-8
- 0x09) instead of <tab_size> space characters (utf-8 0x20).
- In cases where the layout would require a fractional tab
- character, the behavior of the fractional indentation is
- governed by <fractional_tab_policy>
use_tabchars: false
_help_separate_ctrl_name_with_space:
- If true, separate flow control names from their parentheses
- with a space
separate_ctrl_name_with_space: true
_help_min_prefix_chars:
- If the statement spelling length (including space and
- parenthesis) is smaller than this amount, then force reject
- nested layouts.
min_prefix_chars: 4
_help_max_prefix_chars:
- If the statement spelling length (including space and
- parenthesis) is larger than the tab width by more than this
- amount, then force reject un-nested layouts.
max_prefix_chars: 10
_help_max_lines_hwrap:
- If a candidate layout is wrapped horizontally but it exceeds
- this many lines, then reject the layout.
max_lines_hwrap: 2
_help_line_ending:
- What style line endings to use in the output.
line_ending: unix
_help_command_case:
- Format command names consistently as 'lower' or 'upper' case
command_case: lower
_help_keyword_case:
- Format keywords consistently as 'lower' or 'upper' case
keyword_case: unchanged
_help_always_wrap:
- A list of command names which should always be wrapped
always_wrap: []
_help_enable_sort:
- If true, the argument lists which are known to be sortable
- will be sorted lexicographicall
enable_sort: true
_help_autosort:
- If true, the parsers may infer whether or not an argument
- list is sortable (without annotation).
autosort: false
_help_require_valid_layout:
- By default, if cmake-format cannot successfully fit
- everything into the desired linewidth it will apply the
- last, most aggressive attempt that it made. If this flag is
- True, however, cmake-format will print error, exit with non-
- zero status code, and write-out nothing
require_valid_layout: false
_help_layout_passes:
- A dictionary mapping layout nodes to a list of wrap
- decisions. See the documentation for more information.
layout_passes: {}
encode:
_help_emit_byteorder_mark:
- If true, emit the unicode byte-order mark (BOM) at the start
- of the file
emit_byteorder_mark: false
_help_input_encoding:
- Specify the encoding of the input file. Defaults to utf-8
input_encoding: utf-8
_help_output_encoding:
- Specify the encoding of the output file. Defaults to utf-8.
- Note that cmake only claims to support utf-8 so be careful
- when using anything else
output_encoding: utf-8

View File

@ -2,16 +2,18 @@
[files]
extend-exclude = [
".git/",
"deps/",
# crc16_slottable is primarily pre-generated random strings.
"src/crc16_slottable.h",
# 00-RELEASENOTES includes non-English names that might be flagged as typos in other contexts.
"00-RELEASENOTES",
]
ignore-hidden = false
[default.extend-words]
advices = "advices"
exat = "exat"
optin = "optin"
ro = "ro"
smove = "smove"
[type.c]
@ -20,7 +22,6 @@ extend-ignore-re = [
"D4C4DAA4", # sha1.c
"Georg Nees",
"\\[l\\]ist", # eval.c
'"LKE"', # test_rax.c
]
[type.tcl]
@ -28,23 +29,26 @@ extend-ignore-re = [
"DUMPed",
]
[type.sv.extend-identifiers]
# sv = .h
module_gil_acquring = "module_gil_acquring"
[type.c.extend-identifiers]
advices = "advices"
ang = "ang"
clen = "clen"
fle = "fle"
module_gil_acquring = "module_gil_acquring"
nd = "nd"
ot = "ot"
[type.tcl.extend-identifiers]
fo = "fo"
oll = "oll"
stressers = "stressers"
[type.sv.extend-identifiers]
# sv = .h
fo = "fo"
[type.sv.extend-words]
# sv = .h
fo = "fo"
seeked = "seeked"
[type.c.extend-words]
@ -55,6 +59,7 @@ limite = "limite"
pn = "pn"
seeked = "seeked"
tre = "tre"
ws = "ws"
[type.systemd.extend-words]
# systemd = .conf
@ -62,4 +67,5 @@ ake = "ake"
[type.tcl.extend-words]
fo = "fo"
lst = "lst"
tre = "tre"

View File

@ -1,17 +0,0 @@
# This is a file that can be used by git-blame to ignore some revisions.
# (git 2.23+, released in August 2019)
#
# Can be configured as follow:
#
# $ git config blame.ignoreRevsFile .git-blame-ignore-revs
#
# For more information you can look at git-blame(1) man page.
# Applied clang-format (#323)
c41dd77a3e93e02be3c4bc75d8c76b7b4169a4ce
# Removed terms `master` and `slave` from the source code (#591)
54c97479356ecf41b4b63733494a1be2ab919e17
# Set ColumnLimit to 0 and reformat (#1045)
af811748e7819a5ac31a6df4b21622aa58c64ae4

View File

@ -9,9 +9,6 @@ contact_links:
- name: Chat with us on Matrix?
url: https://matrix.to/#/#valkey:matrix.org
about: We are on Matrix too!
- name: Chat with us on Slack?
url: https://join.slack.com/t/valkey-oss-developer/shared_invite/zt-2nxs51chx-EB9hu9Qdch3GMfRcztTSkQ
about: We are on Slack too!
- name: Documentation issue?
url: https://github.com/valkey-io/valkey-doc/issues
about: Report it on the valkey-doc repo.

View File

@ -24,11 +24,11 @@ runs:
- name: Get targets
run: |
x86_arch=$(jq -c '[.linux_targets[] | select(.arch=="x86_64")]' .github/actions/generate-package-build-matrix/build-config.json)
x86_arch=$(jq -c '[.linux_targets[] | select(.arch=="x86_64")]' utils/releasetools/build-config.json)
x86_matrix=$(echo "{ \"distro\" : $x86_arch }" | jq -c .)
echo "X86_MATRIX=$x86_matrix" >> $GITHUB_ENV
arm_arch=$(jq -c '[.linux_targets[] | select(.arch=="arm64")]' .github/actions/generate-package-build-matrix/build-config.json)
arm_arch=$(jq -c '[.linux_targets[] | select(.arch=="arm64")]' utils/releasetools/build-config.json)
arm_matrix=$(echo "{ \"distro\" : $arm_arch }" | jq -c .)
echo "ARM_MATRIX=$arm_matrix" >> $GITHUB_ENV
shell: bash

View File

@ -3,12 +3,7 @@ name: Build Release Packages
on:
release:
types: [published]
push:
paths:
- '.github/workflows/build-release-packages.yml'
- '.github/workflows/call-build-linux-arm-packages.yml'
- '.github/workflows/call-build-linux-x86-packages.yml'
- '.github/actions/generate-package-build-matrix/build-config.json'
workflow_dispatch:
inputs:
version:
@ -16,19 +11,17 @@ on:
required: true
permissions:
id-token: write
contents: read
jobs:
# This job provides the version metadata from the tag for the other jobs to use.
release-build-get-meta:
name: Get metadata to build
if: github.event_name == 'workflow_dispatch' || github.repository == 'valkey-io/valkey'
runs-on: ubuntu-latest
outputs:
version: ${{ steps.get_version.outputs.VERSION }}
is_test: ${{ steps.check-if-testing.outputs.IS_TEST }}
steps:
- run: |
echo "Version: ${{ inputs.version || github.ref_name }}"
shell: bash
@ -39,13 +32,8 @@ jobs:
- name: Get the version
id: get_version
run: |
if [[ "${{ github.event_name }}" == "push" ]]; then
VERSION=${{ github.ref_name }}
else
VERSION="${INPUT_VERSION}"
fi
VERSION="${INPUT_VERSION}"
if [ -z "${VERSION}" ]; then
echo "Error: No version specified"
exit 1
fi
echo "VERSION=$VERSION" >> $GITHUB_OUTPUT
@ -55,21 +43,8 @@ jobs:
# only ever be a tag
INPUT_VERSION: ${{ inputs.version || github.ref_name }}
- name: Check if we are testing
id: check-if-testing
run: |
if [[ "${{ github.event_name }}" == "push" ]]; then
echo "This is a test workflow -> We will upload to the Test S3 Bucket"
echo "IS_TEST=true" >> $GITHUB_OUTPUT
else
echo "This is a Release workflow -> We will upload to the Release S3 Bucket"
echo "IS_TEST=false" >> $GITHUB_OUTPUT
fi
shell: bash
generate-build-matrix:
name: Generating build matrix
if: github.event_name == 'workflow_dispatch' || github.repository == 'valkey-io/valkey'
runs-on: ubuntu-latest
outputs:
x86_64-build-matrix: ${{ steps.set-matrix.outputs.x86_64-build-matrix }}
@ -81,7 +56,7 @@ jobs:
- uses: ./.github/actions/generate-package-build-matrix
id: set-matrix
with:
ref: ${{ needs.release-build-get-meta.outputs.version }}
ref: ${{ inputs.version || github.ref_name }}
release-build-linux-x86-packages:
needs:
@ -92,10 +67,11 @@ jobs:
version: ${{ needs.release-build-get-meta.outputs.version }}
ref: ${{ inputs.version || github.ref_name }}
build_matrix: ${{ needs.generate-build-matrix.outputs.x86_64-build-matrix }}
region: us-west-2
secrets:
bucket_name: ${{ needs.release-build-get-meta.outputs.is_test == 'true' && secrets.AWS_S3_TEST_BUCKET || secrets.AWS_S3_BUCKET }}
role_to_assume: ${{ secrets.AWS_ROLE_TO_ASSUME }}
token: ${{ secrets.GITHUB_TOKEN }}
bucket: ${{ secrets.AWS_S3_BUCKET }}
access_key_id: ${{ secrets.AWS_S3_ACCESS_KEY_ID }}
secret_access_key: ${{ secrets.AWS_S3_ACCESS_KEY }}
release-build-linux-arm-packages:
needs:
@ -106,7 +82,8 @@ jobs:
version: ${{ needs.release-build-get-meta.outputs.version }}
ref: ${{ inputs.version || github.ref_name }}
build_matrix: ${{ needs.generate-build-matrix.outputs.arm64-build-matrix }}
region: us-west-2
secrets:
bucket_name: ${{ needs.release-build-get-meta.outputs.is_test == 'true' && secrets.AWS_S3_TEST_BUCKET || secrets.AWS_S3_BUCKET }}
role_to_assume: ${{ secrets.AWS_ROLE_TO_ASSUME }}
token: ${{ secrets.GITHUB_TOKEN }}
bucket: ${{ secrets.AWS_S3_BUCKET }}
access_key_id: ${{ secrets.AWS_S3_ACCESS_KEY_ID }}
secret_access_key: ${{ secrets.AWS_S3_ACCESS_KEY }}

View File

@ -15,27 +15,28 @@ on:
description: The build targets to produce as a JSON matrix.
type: string
required: true
region:
description: The AWS region to push packages into.
type: string
required: true
secrets:
bucket_name:
description: The S3 bucket to push packages into.
required: true
role_to_assume:
description: The role to assume for the S3 bucket.
token:
description: The Github token or similar to authenticate with.
required: true
bucket:
description: The name of the S3 bucket to push packages into.
required: false
access_key_id:
description: The S3 access key id for the bucket.
required: false
secret_access_key:
description: The S3 secret access key for the bucket.
required: false
permissions:
id-token: write
contents: read
jobs:
build-valkey:
# Capture source tarball and generate checksum for it
name: Build package ${{ matrix.distro.target }} ${{ matrix.distro.arch }}
runs-on: "ubuntu-latest"
runs-on: 'ubuntu-latest'
strategy:
fail-fast: false
matrix: ${{ fromJSON(inputs.build_matrix) }}
@ -45,30 +46,34 @@ jobs:
with:
ref: ${{ inputs.version }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: ${{ inputs.region }}
role-to-assume: ${{ secrets.role_to_assume }}
- name: Make Valkey
uses: uraimo/run-on-arch-action@v2
with:
arch: aarch64
distro: ${{matrix.distro.target}}
install: apt-get update && apt-get install -y build-essential libssl-dev libsystemd-dev
run: make -C src all BUILD_TLS=yes USE_SYSTEMD=yes
install: apt-get update && apt-get install -y build-essential libssl-dev
run: make -C src all BUILD_TLS=yes
- name: Create Tarball and SHA256sums
run: |
TAR_FILE_NAME=valkey-${{inputs.version}}-${{matrix.distro.platform}}-${{ matrix.distro.arch}}
mkdir -p "$TAR_FILE_NAME/bin" "$TAR_FILE_NAME/share"
rsync -av --exclude='*.c' --exclude='*.d' --exclude='*.o' src/valkey-* "$TAR_FILE_NAME/bin/"
cp -v /home/runner/work/valkey/valkey/COPYING "$TAR_FILE_NAME/share/LICENSE"
mkdir -p $TAR_FILE_NAME/bin $TAR_FILE_NAME/share
cp -rfv src/valkey-* $TAR_FILE_NAME/bin
cp -v /home/runner/work/valkey/valkey/COPYING $TAR_FILE_NAME/share/LICENSE
tar -czvf $TAR_FILE_NAME.tar.gz $TAR_FILE_NAME
sha256sum $TAR_FILE_NAME.tar.gz > $TAR_FILE_NAME.tar.gz.sha256
mkdir -p packages-files
cp -rfv $TAR_FILE_NAME.tar* packages-files/
- name: Install AWS cli.
run: |
sudo apt-get install -y awscli
- name: Configure AWS credentials
run: |
aws configure set region us-west-2
aws configure set aws_access_key_id ${{ secrets.access_key_id }}
aws configure set aws_secret_access_key ${{ secrets.secret_access_key }}
- name: Sync to S3
run: aws s3 sync packages-files s3://${{ secrets.bucket_name }}/releases/
run: aws s3 sync packages-files s3://${{secrets.bucket}}/releases/

View File

@ -15,27 +15,28 @@ on:
description: The build targets to produce as a JSON matrix.
type: string
required: true
region:
description: The AWS region to upload the packages to.
type: string
required: true
secrets:
bucket_name:
description: The name of the S3 bucket to upload the packages to.
required: true
role_to_assume:
description: The role to assume for the S3 bucket.
token:
description: The Github token or similar to authenticate with.
required: true
bucket:
description: The name of the S3 bucket to push packages into.
required: false
access_key_id:
description: The S3 access key id for the bucket.
required: false
secret_access_key:
description: The S3 secret access key for the bucket.
required: false
permissions:
id-token: write
contents: read
jobs:
build-valkey:
# Capture source tarball and generate checksum for it
name: Build package ${{ matrix.distro.target }} ${{ matrix.distro.arch }}
runs-on: ${{matrix.distro.target}}
runs-on: 'ubuntu-latest'
strategy:
fail-fast: false
matrix: ${{ fromJSON(inputs.build_matrix) }}
@ -45,28 +46,28 @@ jobs:
with:
ref: ${{ inputs.version }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: ${{ inputs.region }}
role-to-assume: ${{ secrets.role_to_assume }}
- name: Install dependencies
run: sudo apt-get update && sudo apt-get install -y build-essential libssl-dev libsystemd-dev
run: sudo apt-get update && sudo apt-get install -y build-essential libssl-dev jq wget awscli
- name: Make Valkey
run: make -C src all BUILD_TLS=yes USE_SYSTEMD=yes
run: make -C src all BUILD_TLS=yes
- name: Create Tarball and SHA256sums
run: |
TAR_FILE_NAME=valkey-${{inputs.version}}-${{matrix.distro.platform}}-${{ matrix.distro.arch}}
mkdir -p "$TAR_FILE_NAME/bin" "$TAR_FILE_NAME/share"
rsync -av --exclude='*.c' --exclude='*.d' --exclude='*.o' src/valkey-* "$TAR_FILE_NAME/bin/"
cp -v /home/runner/work/valkey/valkey/COPYING "$TAR_FILE_NAME/share/LICENSE"
mkdir -p $TAR_FILE_NAME/bin $TAR_FILE_NAME/share
cp -rfv src/valkey-* $TAR_FILE_NAME/bin
cp -v /home/runner/work/valkey/valkey/COPYING $TAR_FILE_NAME/share/LICENSE
tar -czvf $TAR_FILE_NAME.tar.gz $TAR_FILE_NAME
sha256sum $TAR_FILE_NAME.tar.gz > $TAR_FILE_NAME.tar.gz.sha256
mkdir -p packages-files
cp -rfv $TAR_FILE_NAME.tar* packages-files/
- name: Configure AWS credentials
run: |
aws configure set region us-west-2
aws configure set aws_access_key_id ${{ secrets.access_key_id }}
aws configure set aws_secret_access_key ${{ secrets.secret_access_key }}
- name: Sync to S3
run: aws s3 sync packages-files s3://${{ secrets.bucket_name }}/releases/
run: aws s3 sync packages-files s3://${{secrets.bucket}}/releases/

View File

@ -2,10 +2,6 @@ name: CI
on: [push, pull_request]
concurrency:
group: ci-${{ github.head_ref || github.ref }}
cancel-in-progress: true
permissions:
contents: read
@ -17,16 +13,11 @@ jobs:
- name: make
# Fail build if there are warnings
# build with TLS just for compilation coverage
run: make -j4 all-with-unit-tests SERVER_CFLAGS='-Werror' BUILD_TLS=yes USE_FAST_FLOAT=yes
- name: install old server for compatibility testing
run: |
cd tests/tmp
wget https://download.valkey.io/releases/valkey-7.2.7-noble-x86_64.tar.gz
tar -xvf valkey-7.2.7-noble-x86_64.tar.gz
run: make all-with-unit-tests SERVER_CFLAGS='-Werror' BUILD_TLS=yes
- name: test
run: |
sudo apt-get install tcl8.6 tclx
./runtest --verbose --tags -slow --dump-logs --other-server-path tests/tmp/valkey-7.2.7-noble-x86_64/bin/valkey-server
./runtest --verbose --tags -slow --dump-logs
- name: module api test
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs
- name: validate commands.def up to date
@ -39,38 +30,13 @@ jobs:
run: |
./src/valkey-unit-tests
test-ubuntu-latest-cmake:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: cmake and make
run: |
sudo apt-get install -y cmake libssl-dev
mkdir -p build-release
cd build-release
cmake -DCMAKE_BUILD_TYPE=Release .. -DBUILD_TLS=yes -DBUILD_UNIT_TESTS=yes
make -j$(nproc)
- name: test
run: |
sudo apt-get install -y tcl8.6 tclx
ln -sf $(pwd)/build-release/bin/valkey-server $(pwd)/src/valkey-server
ln -sf $(pwd)/build-release/bin/valkey-cli $(pwd)/src/valkey-cli
ln -sf $(pwd)/build-release/bin/valkey-benchmark $(pwd)/src/valkey-benchmark
ln -sf $(pwd)/build-release/bin/valkey-server $(pwd)/src/valkey-check-aof
ln -sf $(pwd)/build-release/bin/valkey-server $(pwd)/src/valkey-check-rdb
ln -sf $(pwd)/build-release/bin/valkey-server $(pwd)/src/valkey-sentinel
./runtest --verbose --tags -slow --dump-logs
- name: unit tests
run: |
./build-release/bin/valkey-unit-tests
test-sanitizer-address:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: make
# build with TLS module just for compilation coverage
run: make -j4 SANITIZER=address SERVER_CFLAGS='-Werror' BUILD_TLS=module
run: make SANITIZER=address SERVER_CFLAGS='-Werror' BUILD_TLS=module
- name: testprep
run: sudo apt-get install tcl8.6 tclx -y
- name: test
@ -82,14 +48,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: prepare-development-libraries
run: sudo apt-get install librdmacm-dev libibverbs-dev
- name: make-rdma-module
run: make -j4 BUILD_RDMA=module
- name: make-rdma-builtin
- name: make
run: |
make distclean
make -j4 BUILD_RDMA=yes
sudo apt-get install librdmacm-dev libibverbs-dev
make BUILD_RDMA=module
- name: clone-rxe-kmod
run: |
mkdir -p tests/rdma/rxe
@ -110,37 +72,30 @@ jobs:
- name: make
run: |
apt-get update && apt-get install -y build-essential
make -j4 SERVER_CFLAGS='-Werror'
make SERVER_CFLAGS='-Werror'
build-macos-latest:
runs-on: macos-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: make
# Build with additional upcoming features
run: make -j3 all-with-unit-tests SERVER_CFLAGS='-Werror' USE_FAST_FLOAT=yes
run: make SERVER_CFLAGS='-Werror'
build-32bit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: make
# Fast float requires C++ 32-bit libraries to compile on 64-bit ubuntu
# machine i.e. "-cross" suffixed version. Cross-compiling c++ to 32-bit
# also requires multilib support for g++ compiler i.e. "-multilib"
# suffixed version of g++. g++-multilib generally includes libstdc++.
# *cross version as well, but it is also added explicitly just in case.
run: |
sudo apt-get update
sudo apt-get install libc6-dev-i386 libstdc++-11-dev-i386-cross gcc-multilib g++-multilib
make -j4 SERVER_CFLAGS='-Werror' 32bit USE_FAST_FLOAT=yes
sudo apt-get update && sudo apt-get install libc6-dev-i386
make SERVER_CFLAGS='-Werror' 32bit
build-libc-malloc:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: make
run: make -j4 SERVER_CFLAGS='-Werror' MALLOC=libc USE_FAST_FLOAT=yes
run: make SERVER_CFLAGS='-Werror' MALLOC=libc
build-almalinux8-jemalloc:
runs-on: ubuntu-latest
@ -150,8 +105,8 @@ jobs:
- name: make
run: |
dnf -y install epel-release gcc gcc-c++ make procps-ng which
make -j4 SERVER_CFLAGS='-Werror' USE_FAST_FLOAT=yes
dnf -y install epel-release gcc make procps-ng which
make -j SERVER_CFLAGS='-Werror'
format-yaml:
runs-on: ubuntu-latest

View File

@ -1,15 +1,10 @@
name: Clang Format Check
on:
push:
pull_request:
paths:
- 'src/**'
concurrency:
group: clang-${{ github.head_ref || github.ref }}
cancel-in-progress: true
jobs:
clang-format-check:
runs-on: ubuntu-latest

View File

@ -4,13 +4,9 @@ name: "Codecov"
# where each PR needs to be compared against the coverage of the head commit
on: [push, pull_request]
concurrency:
group: codecov-${{ github.head_ref || github.ref }}
cancel-in-progress: true
jobs:
code-coverage:
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
steps:
- name: Checkout repository

View File

@ -4,11 +4,7 @@ on:
pull_request:
schedule:
# run weekly new vulnerability was added to the database
- cron: '0 3 * * 0'
concurrency:
group: codeql-${{ github.head_ref || github.ref }}
cancel-in-progress: true
- cron: '0 0 * * 0'
permissions:
contents: read

View File

@ -3,17 +3,11 @@ name: Coverity Scan
on:
schedule:
# Run once daily, since below 500k LOC can have 21 builds per week, per https://scan.coverity.com/faq#frequency
- cron: '0 1 * * *'
- cron: '0 0 * * *'
# Support manual execution
workflow_dispatch:
concurrency:
group: coverity-${{ github.head_ref || github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
coverity:
if: github.repository == 'valkey-io/valkey'

View File

@ -29,10 +29,6 @@ on:
description: "git branch or sha to use"
default: "unstable"
concurrency:
group: daily-${{ github.head_ref || github.ref }}
cancel-in-progress: true
permissions:
contents: read
@ -44,7 +40,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'ubuntu')
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
@ -60,12 +56,12 @@ jobs:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: make all-with-unit-tests SERVER_CFLAGS='-Werror'
run: make all-with-unit-tests SERVER_CFLAGS='-Werror -DSERVER_TEST'
- name: testprep
run: sudo apt-get install tcl8.6 tclx
- name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}}
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
@ -75,7 +71,10 @@ jobs:
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}}
- name: unittest
- name: legacy unit tests
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-server test all --accurate
- name: new unit tests
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-unit-tests --accurate
@ -86,8 +85,8 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'fortify')
container: ubuntu:plucky
timeout-minutes: 1440
container: ubuntu:lunar
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
@ -106,12 +105,12 @@ jobs:
run: |
apt-get update && apt-get install -y make gcc-13
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-13 100
make all-with-unit-tests CC=gcc OPT=-O3 SERVER_CFLAGS='-Werror -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=3'
make all-with-unit-tests CC=gcc OPT=-O3 SERVER_CFLAGS='-Werror -DSERVER_TEST -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=3'
- name: testprep
run: apt-get install -y tcl8.6 tclx procps
- name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}}
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
@ -121,7 +120,10 @@ jobs:
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}}
- name: unittest
- name: legacy unit tests
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-server test all --accurate
- name: new unit tests
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-unit-tests --accurate
@ -132,7 +134,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'malloc')
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
@ -153,7 +155,7 @@ jobs:
run: sudo apt-get install tcl8.6 tclx
- name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}}
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
@ -171,7 +173,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'malloc')
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
@ -192,7 +194,7 @@ jobs:
run: sudo apt-get install tcl8.6 tclx
- name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}}
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
@ -210,7 +212,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, '32bit')
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
@ -228,12 +230,12 @@ jobs:
- name: make
run: |
sudo apt-get update && sudo apt-get install libc6-dev-i386
make 32bit SERVER_CFLAGS='-Werror'
make 32bit SERVER_CFLAGS='-Werror -DSERVER_TEST'
- name: testprep
run: sudo apt-get install tcl8.6 tclx
- name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}}
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: |
@ -245,7 +247,10 @@ jobs:
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}}
- name: unittest
- name: legacy unit tests
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-server test all --accurate
- name: new unit tests
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-unit-tests --accurate
@ -256,7 +261,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'tls')
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
@ -281,7 +286,7 @@ jobs:
- name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: |
./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}}
./runtest --accurate --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: |
@ -302,7 +307,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'tls')
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
@ -319,7 +324,7 @@ jobs:
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: |
make BUILD_TLS=yes SERVER_CFLAGS='-Werror' USE_FAST_FLOAT=yes
make BUILD_TLS=yes SERVER_CFLAGS='-Werror'
- name: testprep
run: |
sudo apt-get install tcl8.6 tclx tcl-tls
@ -327,7 +332,7 @@ jobs:
- name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: |
./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}}
./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: |
@ -348,7 +353,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'iothreads')
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
@ -370,48 +375,10 @@ jobs:
run: sudo apt-get install tcl8.6 tclx
- name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest --io-threads --accurate --verbose --tags network --dump-logs ${{github.event.inputs.test_args}}
run: ./runtest --config io-threads 2 --config events-per-io-thread 0 --accurate --verbose --tags network --dump-logs ${{github.event.inputs.test_args}}
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster --io-threads ${{github.event.inputs.cluster_test_args}}
test-ubuntu-tls-io-threads:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'tls') && !contains(github.event.inputs.skipjobs, 'iothreads')
timeout-minutes: 1440
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: |
make BUILD_TLS=yes SERVER_CFLAGS='-Werror'
- name: testprep
run: |
sudo apt-get install tcl8.6 tclx tcl-tls
./utils/gen-test-certs.sh
- name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: |
./runtest --io-threads --tls --accurate --verbose --tags network --dump-logs ${{github.event.inputs.test_args}}
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: |
./runtest-cluster --io-threads --tls ${{github.event.inputs.cluster_test_args}}
run: ./runtest-cluster --config io-threads 2 --config events-per-io-thread 0 ${{github.event.inputs.cluster_test_args}}
test-ubuntu-reclaim-cache:
runs-on: ubuntu-latest
@ -420,7 +387,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'specific')
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
@ -496,7 +463,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) &&
!contains(github.event.inputs.skipjobs, 'valgrind') && !contains(github.event.inputs.skiptests, 'valkey')
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
@ -512,7 +479,7 @@ jobs:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: make valgrind SERVER_CFLAGS='-Werror'
run: make valgrind SERVER_CFLAGS='-Werror -DSERVER_TEST'
- name: testprep
run: |
sudo apt-get update
@ -528,7 +495,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) &&
!contains(github.event.inputs.skipjobs, 'valgrind') && !(contains(github.event.inputs.skiptests, 'modules') && contains(github.event.inputs.skiptests, 'unittest'))
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
@ -544,7 +511,7 @@ jobs:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: make valgrind all-with-unit-tests SERVER_CFLAGS='-Werror'
run: make valgrind SERVER_CFLAGS='-Werror -DSERVER_TEST'
- name: testprep
run: |
sudo apt-get update
@ -555,7 +522,7 @@ jobs:
- name: unittest
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: |
valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/valkey-unit-tests --valgrind
valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/valkey-server test all --valgrind
if grep -q 0x err.txt; then cat err.txt; exit 1; fi
test-valgrind-no-malloc-usable-size-test:
@ -565,7 +532,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) &&
!contains(github.event.inputs.skipjobs, 'valgrind') && !contains(github.event.inputs.skiptests, 'valkey')
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
@ -581,7 +548,7 @@ jobs:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: make valgrind CFLAGS="-DNO_MALLOC_USABLE_SIZE" SERVER_CFLAGS='-Werror'
run: make valgrind CFLAGS="-DNO_MALLOC_USABLE_SIZE -DSERVER_TEST" SERVER_CFLAGS='-Werror'
- name: testprep
run: |
sudo apt-get update
@ -597,7 +564,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) &&
!contains(github.event.inputs.skipjobs, 'valgrind') && !(contains(github.event.inputs.skiptests, 'modules') && contains(github.event.inputs.skiptests, 'unittest'))
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
@ -613,7 +580,7 @@ jobs:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: make valgrind all-with-unit-tests CFLAGS="-DNO_MALLOC_USABLE_SIZE" SERVER_CFLAGS='-Werror'
run: make valgrind CFLAGS="-DNO_MALLOC_USABLE_SIZE -DSERVER_TEST" SERVER_CFLAGS='-Werror'
- name: testprep
run: |
sudo apt-get update
@ -624,7 +591,7 @@ jobs:
- name: unittest
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: |
valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/valkey-unit-tests --valgrind
valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/valkey-server test all --valgrind
if grep -q 0x err.txt; then cat err.txt; exit 1; fi
test-sanitizer-address:
@ -634,9 +601,8 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) &&
!contains(github.event.inputs.skipjobs, 'sanitizer')
timeout-minutes: 1440
timeout-minutes: 14400
strategy:
fail-fast: false
matrix:
compiler: [gcc, clang]
env:
@ -656,7 +622,7 @@ jobs:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: make all-with-unit-tests OPT=-O3 SANITIZER=address SERVER_CFLAGS='-Werror'
run: make all-with-unit-tests OPT=-O3 SANITIZER=address SERVER_CFLAGS='-DSERVER_TEST -Werror'
- name: testprep
run: |
sudo apt-get update
@ -673,7 +639,10 @@ jobs:
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}}
- name: unittest
- name: legacy unit tests
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-server test all
- name: new unit tests
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-unit-tests
@ -684,9 +653,8 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) &&
!contains(github.event.inputs.skipjobs, 'sanitizer')
timeout-minutes: 1440
timeout-minutes: 14400
strategy:
fail-fast: false
matrix:
compiler: [gcc, clang]
env:
@ -706,7 +674,7 @@ jobs:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: make all-with-unit-tests OPT=-O3 SANITIZER=undefined SERVER_CFLAGS='-Werror' LUA_DEBUG=yes # we (ab)use this flow to also check Lua C API violations
run: make all-with-unit-tests OPT=-O3 SANITIZER=undefined SERVER_CFLAGS='-DSERVER_TEST -Werror' LUA_DEBUG=yes # we (ab)use this flow to also check Lua C API violations
- name: testprep
run: |
sudo apt-get update
@ -723,56 +691,13 @@ jobs:
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}}
- name: unittest
- name: legacy unit tests
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-server test all --accurate
- name: new unit tests
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-unit-tests --accurate
test-sanitizer-force-defrag:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) &&
!contains(github.event.inputs.skipjobs, 'sanitizer')
timeout-minutes: 1440
strategy:
fail-fast: false
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: make all-with-unit-tests OPT=-O3 SANITIZER=address DEBUG_FORCE_DEFRAG=yes USE_JEMALLOC=no SERVER_CFLAGS='-Werror'
- name: testprep
run: |
sudo apt-get update
sudo apt-get install tcl8.6 tclx -y
- name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}}
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}}
- name: unittest
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/valkey-unit-tests
test-rpm-distros-jemalloc:
if: |
(github.event_name == 'workflow_dispatch' ||
@ -801,7 +726,7 @@ jobs:
runs-on: ubuntu-latest
container: ${{ matrix.container }}
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- name: prep
@ -828,7 +753,7 @@ jobs:
run: dnf -y install tcl tcltls
- name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}}
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
@ -867,7 +792,7 @@ jobs:
runs-on: ubuntu-latest
container: ${{ matrix.container }}
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- name: prep
@ -897,7 +822,7 @@ jobs:
- name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: |
./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs --tls-module --dump-logs ${{github.event.inputs.test_args}}
./runtest --accurate --verbose --dump-logs --tls-module --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: |
@ -939,7 +864,7 @@ jobs:
runs-on: ubuntu-latest
container: ${{ matrix.container }}
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- name: prep
@ -990,7 +915,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'macos') && !(contains(github.event.inputs.skiptests, 'valkey') && contains(github.event.inputs.skiptests, 'modules'))
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
@ -1009,7 +934,7 @@ jobs:
run: make SERVER_CFLAGS='-Werror'
- name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --clients 1 --no-latency --dump-logs ${{github.event.inputs.test_args}}
run: ./runtest --accurate --verbose --tags -ipv6 --clients 1 --no-latency --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --clients 1 --no-latency --dump-logs ${{github.event.inputs.test_args}}
@ -1021,7 +946,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'macos') && !contains(github.event.inputs.skiptests, 'sentinel')
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
@ -1049,7 +974,7 @@ jobs:
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'macos') && !contains(github.event.inputs.skiptests, 'cluster')
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
@ -1072,16 +997,15 @@ jobs:
build-macos:
strategy:
fail-fast: false
matrix:
os: [macos-13, macos-14]
os: [macos-12, macos-14]
runs-on: ${{ matrix.os }}
if: |
(github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'macos')
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0
with:
@ -1100,16 +1024,16 @@ jobs:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: make SERVER_CFLAGS='-Werror'
run: make SERVER_CFLAGS='-Werror -DSERVER_TEST'
test-freebsd:
runs-on: macos-13
runs-on: macos-12
if: |
(github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
!contains(github.event.inputs.skipjobs, 'freebsd')
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
@ -1162,7 +1086,7 @@ jobs:
run: apk add tcl procps tclx
- name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}}
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
@ -1203,7 +1127,7 @@ jobs:
run: apk add tcl procps tclx
- name: test
if: true && !contains(github.event.inputs.skiptests, 'valkey')
run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}}
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
@ -1216,11 +1140,11 @@ jobs:
reply-schemas-validator:
runs-on: ubuntu-latest
timeout-minutes: 1440
timeout-minutes: 14400
if: |
(github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') ||
(github.event_name == 'pull_request' && (contains(github.event.pull_request.labels.*.name, 'run-extra-tests') || github.event.pull_request.base.ref != 'unstable'))) &&
(github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable')) &&
!contains(github.event.inputs.skipjobs, 'reply-schema')
steps:
- name: prep
@ -1262,7 +1186,7 @@ jobs:
notify-about-job-results:
runs-on: ubuntu-latest
if: always() && github.event_name == 'schedule' && github.repository == 'valkey-io/valkey'
needs: [test-ubuntu-jemalloc, test-ubuntu-jemalloc-fortify, test-ubuntu-libc-malloc, test-ubuntu-no-malloc-usable-size, test-ubuntu-32bit, test-ubuntu-tls, test-ubuntu-tls-no-tls, test-ubuntu-io-threads, test-ubuntu-tls-io-threads, test-ubuntu-reclaim-cache, test-valgrind-test, test-valgrind-misc, test-valgrind-no-malloc-usable-size-test, test-valgrind-no-malloc-usable-size-misc, test-sanitizer-address, test-sanitizer-undefined, test-sanitizer-force-defrag, test-rpm-distros-jemalloc, test-rpm-distros-tls-module, test-rpm-distros-tls-module-no-tls, test-macos-latest, test-macos-latest-sentinel, test-macos-latest-cluster, build-macos, test-freebsd, test-alpine-jemalloc, test-alpine-libc-malloc, reply-schemas-validator]
needs: [test-ubuntu-jemalloc, test-ubuntu-jemalloc-fortify, test-ubuntu-libc-malloc, test-ubuntu-no-malloc-usable-size, test-ubuntu-32bit, test-ubuntu-tls, test-ubuntu-tls-no-tls, test-ubuntu-io-threads, test-ubuntu-reclaim-cache, test-valgrind-test, test-valgrind-misc, test-valgrind-no-malloc-usable-size-test, test-valgrind-no-malloc-usable-size-misc, test-sanitizer-address, test-sanitizer-undefined, test-rpm-distros-jemalloc, test-rpm-distros-tls-module, test-rpm-distros-tls-module-no-tls, test-macos-latest, test-macos-latest-sentinel, test-macos-latest-cluster, build-macos, test-freebsd, test-alpine-jemalloc, test-alpine-libc-malloc, reply-schemas-validator]
steps:
- name: Collect job status
run: |

View File

@ -4,11 +4,7 @@ on:
pull_request:
push:
schedule:
- cron: '0 2 * * *'
concurrency:
group: external-${{ github.head_ref || github.ref }}
cancel-in-progress: true
- cron: '0 0 * * *'
permissions:
contents: read
@ -17,7 +13,7 @@ jobs:
test-external-standalone:
runs-on: ubuntu-latest
if: github.event_name != 'schedule' || github.repository == 'valkey-io/valkey'
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Build
@ -34,7 +30,7 @@ jobs:
--tags -slow
- name: Archive server log
if: ${{ failure() }}
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
with:
name: test-external-standalone-log
path: external-server.log
@ -42,7 +38,7 @@ jobs:
test-external-cluster:
runs-on: ubuntu-latest
if: github.event_name != 'schedule' || github.repository == 'valkey-io/valkey'
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Build
@ -62,7 +58,7 @@ jobs:
--tags -slow
- name: Archive server log
if: ${{ failure() }}
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
with:
name: test-external-cluster-log
path: external-server.log
@ -70,7 +66,7 @@ jobs:
test-external-nodebug:
runs-on: ubuntu-latest
if: github.event_name != 'schedule' || github.repository == 'valkey-io/valkey'
timeout-minutes: 1440
timeout-minutes: 14400
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Build
@ -86,7 +82,7 @@ jobs:
--tags "-slow -needs:debug"
- name: Archive server log
if: ${{ failure() }}
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
with:
name: test-external-nodebug-log
path: external-server.log

View File

@ -8,10 +8,6 @@ on:
paths:
- 'src/commands/*.json'
concurrency:
group: reply-schemas-linter-${{ github.head_ref || github.ref }}
cancel-in-progress: true
permissions:
contents: read

View File

@ -9,10 +9,6 @@ on:
push:
pull_request:
concurrency:
group: spellcheck-${{ github.head_ref || github.ref }}
cancel-in-progress: true
permissions:
contents: read
@ -26,7 +22,7 @@ jobs:
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Install typos
uses: taiki-e/install-action@fe9759bf4432218c779595708e80a1aadc85cedc # v2.46.10
uses: taiki-e/install-action@cd5df4de2e75f3b819ba55f780f7bb8cd4a05a41 # v2.32.2
with:
tool: typos

6
.gitignore vendored
View File

@ -48,9 +48,3 @@ redis.code-workspace
nodes*.conf
tests/cluster/tmp/*
tests/rdma/rdma-test
tags
build/
build-debug/
build-release/
cmake-build-debug/
cmake-build-release/

289
00-RELEASENOTES Normal file
View File

@ -0,0 +1,289 @@
Valkey 8.0 release notes
========================
--------------------------------------------------------------------------------
Upgrade urgency levels:
LOW: No need to upgrade unless there are new features you want to use.
MODERATE: Program an upgrade of the server, but it's not urgent.
HIGH: There is a critical bug that may affect a subset of users. Upgrade!
CRITICAL: There is a critical bug affecting MOST USERS. Upgrade ASAP.
SECURITY: There are security fixes in the release.
--------------------------------------------------------------------------------
================================================================================
Valkey 8.0.0 RC1 - Released Thu 1 Aug 2024
================================================================================
Upgrade urgency LOW: This is the first release candidate of Valkey 8.0, with
performance, reliability, and observability improvements. It includes asynchronous
I/O threading, better cluster scaling reliability, dual primary-replica channel
for faster full synchronization, per-slot metrics for resource management, and
experimental RDMA support for increased throughput and reduced latency. This
release is fully compatible with Redis OSS 7.2.4.
Un-deprecated Commands - Cluster
================================
* Un-deprecate the `CLUSTER SLOTS` command. (#536)
New/Modified Commands - Core
============================
* Add `SCRIPT SHOW` sub-command to dump scripts via SHA1. (#617)
* Add `NOSCORES` option to `ZSCAN` command. (#324)
* Add `NOVALUES` option to `HSCAN` command. (Redis#12765)
* Expose Lua `os.clock()` API to allow scripts to determine how long the
script has been executing. (Redis#12971)
* Implement `CLIENT KILL MAXAGE <MAXAGE>`. (Redis#12299)
* Allow running `WAITAOF` in scripts, remove `NOSCRIPT` flag. (Redis#12977)
* Support `XREAD[GROUP]` with `BLOCK` option in scripts. (Redis#12596)
* Introduce `+` as a special ID for the last item in stream on `XREAD`
Command. (Redis#7388, Redis#13117)
New/Modified Commands - Cluster
===============================
* Introduce `CLUSTER SLOT-STATS` command which allows tracking of per slot
metrics for key count, CPU utilization, network bytes in, and network
bytes out. (#20, #351)
* Add `TIMEOUT` option to `CLUSTER SETSLOT` command. (#556, #445)
New/Modified Metrics - Core
===========================
* Introduce per-client metrics for network traffic and command execution
in `CLIENT LIST` and `CLIENT INFO`. (#327)
* Add metrics for DB memory overhead and rehashing status to `INFO MEMORY`
and `MEMORY STATS`. (Redis#12913)
* Add `pubsub_clients` metric to `INFO CLIENTS`. (Redis#12849)
* Add metrics for client buffer limit disconnections to `INFO`. (Redis#12476)
* Add metrics for monitoring clients using `WATCH` command and watched keys.
(Redis#12966)
* Added allocator muzzy memory metrics to `INFO MEMORY` and `MEMORY STATS`.
(Redis#12996)
New Features - Core
===================
* Support replica redirect for read/write operations to primary in standalone
mode. (#325)
* Add server config for cluster blacklist TTL. (#738)
* Add availability zone server config. (#700)
New Features - Cluster
======================
* Support IPv4 and IPv6 dual stack and client-specific IPs in clusters. (#736)
* Support `BY/GET` options for `SORT/SORT_RO` in cluster mode when pattern
implies a single slot. (Redis#12728)
Compatibility Improvements - Core
=================================
* Derive RDB and module child process names based on server start name for
compatibility. (#454)
* Update server identity in `serverPanic` output based on `extended-redis-compatibility`
config. (#415)
Compatibility Improvements - Sentinel
=====================================
* Accept `redis-sentinel` to start Valkey in sentinel mode. (#731)
Performance/Efficiency Improvements - Core
==========================================
* Introduce dual channel for more efficient full sync replication. (#60)
* Introduce async IO threading for improved multi-threaded performance.
(#763, #758)
* Embed key directly in main dictionary entry for improved memory efficiency.
(#541)
* Use thread-local storage to reduce atomic contention in updating memory
metrics. (#674)
* Reduce redundant calls to `prepareClientToWrite` for continuous `addReply*`.
(#670)
* Optimize the logic for checking conversion to skip list during `ZADD` operations.
(#806)
* Optimize `sdsfree` with `zfree_with_size` to avoid redundant size calculation.
(#453)
* Combine events to eliminate redundant `kevent(2)` calls. (#638)
* Introduce shared query buffer for client reads to reduce memory usage. (#258)
* Optimize CRC64 performance for large batches by processing bytes in parallel.
(#350)
* Use `SOCK_NONBLOCK` to reduce system calls for outgoing connections. (#293)
* Enable `accept4()` detection on specific versions of various platforms. (#294)
* Convert CRC16 slot table to fixed-size array for improved memory efficiency.
(Redis#13112)
* Run `SCRIPT FLUSH` truly asynchronously and close Lua interpreter in a
background thread. (Redis#13087)
* Optimize `DEL` command to avoid redundant deletions for expired keys. (Redis#13080)
* Improve defragmentation for large bins to enhance memory efficiency. (Redis#12996)
* Optimize hash table resizing to include empty dictionaries. (Redis#12819)
* Reduce performance impact of dictionary rehashing by optimizing bucket processing.
(Redis#12899)
* Optimize performance for simultaneous client `[P|S]UNSUBSCRIBE`. (Redis#12838)
* Optimize CPU cache efficiency during dictionary rehashing. (Redis#5692)
* Optimize `ZRANGE` offset location from linear search to skip list jump. (Redis#12450)
* Fix `aeSetDontWait` timing to avoid unnecessary waits in `aeProcessEvent`. (Redis#12068)
Performance/Efficiency Improvements - Cluster
=============================================
* Add lightweight cluster message header for Pub/Sub messages. (#654)
* Minor performance improvement in Valkey cluster by avoid initializing key
buffer in `getKeysResult`. (#631)
* Cache `CLUSTER SLOTS` response to improve throughput and reduce latency. (#53)
* Replace slots_to_channels radix tree with slot-specific dictionaries for
shard channels. (Redis#12804)
* Optimize `KEYS` command when pattern includes hashtag and implies a single
slot. (Redis#12754)
* Optimize `SCAN` command with `MATCH` when pattern implies a single slot.
(Redis#12536)
* Replace cluster metadata with slot specific dictionaries to reduce memory
usage when using Valkey cluster. (Redis#11695, Redis#12704)
Reliability Improvements - Core
===============================
* Limit tracking custom errors (e.g. from Lua) while allowing normal errors
to be tracked (#500, Redis#13141)
* Manage maximum number of new connections per cycle to prevent connection
storms. (Redis#12178)
Reliability Improvements - Cluster
==================================
* Reduce fail-over time in Valkey cluster when multiple sequential fail-overs
occurred by resetting `failover_auth_time` when the new primary node goes
down. (#782)
* Restrict node failure marking to primaries with assigned slots. (#634)
* Enhance cluster meet reliability under link failures. (#461)
* Improve reliability of slot migration in Valkey clusters. (#445)
Usability Improvements - Core
=============================
* Re-brand and refine latency report messages. (#644)
* Optimize `ACL LOAD` to avoid disconnecting clients whose users are unchanged.
(Redis#12171)
Usability Improvements - Cluster
================================
* Adjust log levels for various cluster-related logs to improve clarity. (#633)
* Maintain deterministic ordering of replica(s) in `CLUSTER SLOTS` response. (#265)
Usability Improvements - CLI
================================
* Add prompt message when Ctrl-C is pressed in `valkey-cli`. (#702)
* Keep an in-memory history of all commands in `valkey-cli` so that sensitive
commands can be shown within the same session. (Redis#12862)
Module Improvements - Core
==========================
* Add `ValkeyModule_TryCalloc()` and `ValkeyModule_TryRealloc()` to handle
allocation failures gracefully. (Redis#12985)
* Make `ValkeyModule_Yield` thread-safe by handling events in the main thread.
(Redis#12905)
* Allow modules to declare new ACL categories. (Redis#12486)
Module Improvements - Cluster
=============================
* Add API `ValkeyModule_ClusterKeySlot` and `ValkeyModule_ClusterCanonicalKeyNameInSlot`.
(Redis#13069)
Behavior Changes - Core
=======================
* Re-brand the Lua debugger. (#603)
* Change default pidfile from `redis.pid` to `valkey.pid`. (#378)
* Abort transactions on nested `MULTI` or `WATCH` commands. (#723)
* Ensure keys that match the `SCAN` filter are not lazily expired and return
an error for invalid types. (#501)
* Rename `redis` in AOF logs and proc title to `valkey-aof-rewrite`. (#393)
* Change default syslog-ident from `redis` to `valkey`. (#390)
* Update `Redis` to `Valkey` in `serverLog` messages in server.c file. (#231)
* Remove `Redis` from various error reply messages. See GitHub PR for more
details. (#206)
* Reject empty strings for configs `dir`, `dbfilename`, and `cluster-config-file`.
(#636)
* Change key-spec flag from `RW` to `OW` for `SINTERSTORE` command. (Redis#12917)
* Return more precise error messages for some cases verifying keys during script
execution. (Redis#12707)
* Return errors for `BITCOUNT` and `BITPOS` with non-existing keys or invalid
arguments instead of zero. (Redis#11734)
* Validate `BITCOUNT` arguments before key existence check. (Redis#12394)
* Redact ACL username information and mark `*-key-file-pass` configs as
sensitive. (Redis#12860)
* Allow `MULTI/EXEC` to use a small amount of additional memory beyond the
used-memory limit. (Redis#12961)
Behavior Changes - Cluster
==========================
* Allow `CLUSTER NODES/INFO/MYID/MYSHARDID` during loading state. (#596)
* Make cluster replicas return `ASK` and `TRYAGAIN` during slot migration. (#495)
Behavior Changes - Sentinel
===========================
* Replace `master-reboot-down-after-period` with `primary-reboot-down-after-period`
in `sentinel.conf`. (#647)
Bug Fixes - Core
================
* Fix a bug that caused LRU/LFU inconsistencies for some integer objects. (#250)
* Fix a bug where Valkey may use a sub-optimal encoding for some data types.
(Redis#13148)
* Fix propagation of `entries_read` by calling `streamPropagateGroupID`
unconditionally. (Redis#12898)
* Fix race condition issues between the main thread and module threads.
(Redis#12817)
* Wake blocked clients ASAP in next `beforeSleep` for `WAITAOF`. (Redis#12627)
* Fix crash in crash-report and improve thread management with RW locks.
(Redis#12623)
Bug Fixes - Cluster
===================
* Fix a bug where a shard returns the incorrect slot slot information in
`CLUSTER SHARDS` command on primary failure. (#790)
* Allow module authentication to succeed when the cluster is down. (#693)
* Fix `PONG` message processing for primary-ship tracking during fail-overs.
(Redis#13055)
* Prevent double freeing of cluster link with `DEBUG CLUSTERLINK KILL`.
(Redis#12930)
* Unsubscribe all clients from replica for shard channel if the primary
ownership changes. (Redis#12577)
Bug Fixes - Tooling
===============
* Fix `valkey-check-aof` misidentifying data in manifest format as MP-AOF.
(Redis#12958)
* Fix `valkey-cli` to respect the `--count` option without requiring
`--pattern`. (Redis#13092)
* Fix `valkey-benchmark` to distribute operations across all slots owned by
a node in cluster mode. (Redis#12986)
Internal Codebase Improvements
==============================
* Enable debug asserts for cluster and sentinel tests. (#588)
* Introduce a minimal debugger for Tcl integration test suite. (#683)
* Set up clang-format GitHub action for automated code formatting checks. (#538)
* Replace custom atomic logic with C11 _Atomics. (#490)
* Add fast fail option for Tcl test cases. (#482)
* Introduce a simple unit test framework. (#460)
* An initial simple unit test framework. (#344)
* Introduce Codecov for automated code coverage tracking. (#316)
* Remove deprecated `redis-trib` CLI program. (#281)
* Add `-fno-omit-frame-pointer` to default compilation flags to improve
debuggability. (Redis#12973)
* Refactor the per-slot dict-array db.c into a new kvstore data structure.
(Redis#12822)
* Unified database rehash method for both standalone and cluster modes.
(Redis#12848)
* Clarify and decouple the sampling logic in eviction to improve readability.
(Redis#12781)
* Rewrite large printf calls to smaller ones for readability. (Redis#12257)
Experimental
============
* Introduce Valkey Over RDMA transport (experimental). (#477)
We appreciate the efforts of all who contributed code to this release!
lan Slang, Binbin, Brennan, Chen Tianjie, Cui Fliter, Daniel House, Darren Jiang,
David Carlier, Debing Sun, Dingrui, Dmitry Polyakovsky, Eran Liberty, Gabi Ganam,
George Guimares, Guillaume Koenig, Guybe, Harkrishn Patro, Hassaan Khan, Hwang Si Yeon,
ICHINOSE Shogo, icy17, Ikko Eltociear Ashimine, iKun, Itamar Haber, Jachin, Jacob Murphy,
Jason Elbaum, Jeff Liu, John Sully, John Vandenberg, Jonathan Wright, Jonghoonpark, Joe Hu,
Josiah Carlson, Juho Kim, judeng, Jun Luo, K.G. Wang, Karthik Subbarao, Karthick Ariyaratnam,
kell0gg, Kyle Kim, Leibale Eidelman, LiiNen, Lipeng Zhu, Lior Kogan, Lior Lahav, Madelyn Olson,
Makdon, Maria Markova, Mason Hall, Matthew Douglass, meiravgri, michalbiesek, Mike Dolan,
Mikel Olasagasti Uranga, Moshe Kaplan, mwish, naglera, NAM UK KIM, Neal Gompa, nitaicaro,
Nir Rattner, Oran Agra, Ouri Half, Ozan Tezcan, Parth, PatrickJS, Pengfei Han, Pierre, Ping Xie,
poiuj, pshankinclarke, ranshid, Ronen Kalish, Roshan Khatri, Samuel Adetunji, Sankar, secwall,
Sergey Fedorov, Sher_Sun, Shivshankar, skyfirelee, Slava Koyfman, Subhi Al Hasan, sundb,
Ted Lyngmo, Thomas Fline, tison, Tom Morris, Tyler Bream, uriyage, Viktor Söderqvist, Vitaly,
Vitah Lin, VoletiRam, w. ian douglas, WangYu, Wen Hui, Wenwen Chen, Yaacov Hazan, Yanqi Lv,
Yehoshua Hershberg, Yves LeBras, zalj, Zhao Zhao, zhenwei pi, zisong.cw

1
BUGS Normal file
View File

@ -0,0 +1 @@
Please check https://github.com/valkey-io/valkey/issues

View File

@ -1,44 +0,0 @@
cmake_minimum_required(VERSION 3.10)
# Must be done first
if (APPLE)
# Force clang compiler on macOS
find_program(CLANGPP "clang++")
find_program(CLANG "clang")
if (CLANG AND CLANGPP)
message(STATUS "Found ${CLANGPP}, ${CLANG}")
set(CMAKE_CXX_COMPILER ${CLANGPP})
set(CMAKE_C_COMPILER ${CLANG})
endif ()
endif ()
# Options
option(BUILD_UNIT_TESTS "Build valkey-unit-tests" OFF)
option(BUILD_TEST_MODULES "Build all test modules" OFF)
option(BUILD_EXAMPLE_MODULES "Build example modules" OFF)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/Modules/")
project("valkey")
set(CMAKE_C_STANDARD 11)
set(CMAKE_C_STANDARD_REQUIRED ON)
set(CMAKE_C_EXTENSIONS ON)
include(ValkeySetup)
add_subdirectory(src)
add_subdirectory(tests)
# Include the packaging module
include(Packaging)
# Clear cached variables from the cache
unset(BUILD_TESTS CACHE)
unset(CLANGPP CACHE)
unset(CLANG CACHE)
unset(BUILD_RDMA_MODULE CACHE)
unset(BUILD_TLS_MODULE CACHE)
unset(BUILD_UNIT_TESTS CACHE)
unset(BUILD_TEST_MODULES CACHE)
unset(BUILD_EXAMPLE_MODULES CACHE)
unset(USE_TLS CACHE)
unset(DEBUG_FORCE_DEFRAG CACHE)

View File

@ -49,7 +49,7 @@ representative at an online or offline event.
Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
this email address: maintainers@lists.valkey.io.
this email address: placeholderkv@gmail.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.

View File

@ -79,9 +79,10 @@ you need to ensure that the contribution is in accordance with the DCO.
1. If it is a major feature or a semantical change, please don't start coding
straight away: if your feature is not a conceptual fit you'll lose a lot of
time writing the code without any reason. Start by creating an issue at Github with the
description of, exactly, what you want to accomplish and why. Use cases are important for
features to be accepted. Here you can see if there is consensus about your idea.
time writing the code without any reason. Start by posting in the mailing list
and creating an issue at Github with the description of, exactly, what you want
to accomplish and why. Use cases are important for features to be accepted.
Here you can see if there is consensus about your idea.
2. If in step 1 you get an acknowledgment from the project leaders, use the following
procedure to submit a patch:

20
COPYING
View File

@ -2,22 +2,6 @@
BSD 3-Clause License
Copyright (c) 2024-present, Futriix contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# License 2
BSD 3-Clause License
Copyright (c) 2024-present, Valkey contributors
All rights reserved.
@ -29,11 +13,11 @@ Redistribution and use in source and binary forms, with or without modification,
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# License 3
# License 2
BSD 3-Clause License
Copyright (c) 2006-2020, Redis Ltd.
Copyright (c) 2006-2020, Salvatore Sanfilippo
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

View File

@ -2,9 +2,7 @@
The Valkey project is managed by a Technical Steering Committee (TSC) composed of the maintainers of the Valkey repository.
The Valkey project includes all of the current and future repositories under the Valkey-io organization.
Committers are defined as individuals with write access to the code within a repository.
Maintainers are defined as individuals with full access to a repository and own its governance.
Both maintainers and committers should be clearly listed in the MAINTAINERS.md file in a given projects repository.
Maintainers are defined as individuals with full commit access to a repository, which shall be in sync with the MAINTAINERS.md file in a given projects repository.
Maintainers of other repositories within the Valkey project are not members of the TSC unless explicitly added.
## Technical Steering Committee

1
INSTALL Normal file
View File

@ -0,0 +1 @@
See README

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

View File

@ -16,16 +16,8 @@ Maintainers listed in alphabetical order by their github ID.
| Zhao Zhao | [soloestoy](https://github.com/soloestoy) | Alibaba |
| Viktor Söderqvist | [zuiderkwast](https://github.com/zuiderkwast) | Ericsson |
## Current Committers
Committers listed in alphabetical order by their github ID.
| Committer | GitHub ID | Affiliation |
| ------------------- | ----------------------------------------------- | ----------- |
| Harkrishn Patro | [hpatro](https://github.com/hpatro) | Amazon |
| Ran Shidlansik | [ranshid](https://github.com/ranshid) | Amazon |
### Former Maintainers and Committers
### Former Maintainers
| Maintainer | GitHub ID | Affiliation |
| ------------------- | ----------------------------------------------- | ----------- |

106
MANIFESTO Normal file
View File

@ -0,0 +1,106 @@
[Note: This was the manifesto of Redis. It does not represent the ideals of Valkey, but is
kept in remembrance for the ideals that Salvatore had for the project.]
Redis Manifesto
===============
1 - A DSL for Abstract Data Types. Redis is a DSL (Domain Specific Language)
that manipulates abstract data types and implemented as a TCP daemon.
Commands manipulate a key space where keys are binary-safe strings and
values are different kinds of abstract data types. Every data type
represents an abstract version of a fundamental data structure. For instance
Redis Lists are an abstract representation of linked lists. In Redis, the
essence of a data type isn't just the kind of operations that the data types
support, but also the space and time complexity of the data type and the
operations performed upon it.
2 - Memory storage is #1. The Redis data set, composed of defined key-value
pairs, is primarily stored in the computer's memory. The amount of memory in
all kinds of computers, including entry-level servers, is increasing
significantly each year. Memory is fast, and allows Redis to have very
predictable performance. Datasets composed of 10k or 40 millions keys will
perform similarly. Complex data types like Redis Sorted Sets are easy to
implement and manipulate in memory with good performance, making Redis very
simple. Redis will continue to explore alternative options (where data can
be optionally stored on disk, say) but the main goal of the project remains
the development of an in-memory database.
3 - Fundamental data structures for a fundamental API. The Redis API is a direct
consequence of fundamental data structures. APIs can often be arbitrary but
not an API that resembles the nature of fundamental data structures. If we
ever meet intelligent life forms from another part of the universe, they'll
likely know, understand and recognize the same basic data structures we have
in our computer science books. Redis will avoid intermediate layers in API,
so that the complexity is obvious and more complex operations can be
performed as the sum of the basic operations.
4 - We believe in code efficiency. Computers get faster and faster, yet we
believe that abusing computing capabilities is not wise: the amount of
operations you can do for a given amount of energy remains anyway a
significant parameter: it allows to do more with less computers and, at
the same time, having a smaller environmental impact. Similarly Redis is
able to "scale down" to smaller devices. It is perfectly usable in a
Raspberry Pi and other small ARM based computers. Faster code having
just the layers of abstractions that are really needed will also result,
often, in more predictable performances. We think likewise about memory
usage, one of the fundamental goals of the Redis project is to
incrementally build more and more memory efficient data structures, so that
problems that were not approachable in RAM in the past will be perfectly
fine to handle in the future.
5 - Code is like a poem; it's not just something we write to reach some
practical result. Sometimes people that are far from the Redis philosophy
suggest using other code written by other authors (frequently in other
languages) in order to implement something Redis currently lacks. But to us
this is like if Shakespeare decided to end Enrico IV using the Paradiso from
the Divina Commedia. Is using any external code a bad idea? Not at all. Like
in "One Thousand and One Nights" smaller self contained stories are embedded
in a bigger story, we'll be happy to use beautiful self contained libraries
when needed. At the same time, when writing the Redis story we're trying to
write smaller stories that will fit in to other code.
6 - We're against complexity. We believe designing systems is a fight against
complexity. We'll accept to fight the complexity when it's worthwhile but
we'll try hard to recognize when a small feature is not worth 1000s of lines
of code. Most of the time the best way to fight complexity is by not
creating it at all. Complexity is also a form of lock-in: code that is
very hard to understand cannot be modified by users in an independent way
regardless of the license. One of the main Redis goals is to remain
understandable, enough for a single programmer to have a clear idea of how
it works in detail just reading the source code for a couple of weeks.
7 - Threading is not a silver bullet. Instead of making Redis threaded we
believe on the idea of an efficient (mostly) single threaded Redis core.
Multiple of such cores, that may run in the same computer or may run
in multiple computers, are abstracted away as a single big system by
higher order protocols and features: Redis Cluster and the upcoming
Redis Proxy are our main goals. A shared nothing approach is not just
much simpler (see the previous point in this document), is also optimal
in NUMA systems. In the specific case of Redis it allows for each instance
to have a more limited amount of data, making the Redis persist-by-fork
approach more sounding. In the future we may explore parallelism only for
I/O, which is the low hanging fruit: minimal complexity could provide an
improved single process experience.
8 - Two levels of API. The Redis API has two levels: 1) a subset of the API fits
naturally into a distributed version of Redis and 2) a more complex API that
supports multi-key operations. Both are useful if used judiciously but
there's no way to make the more complex multi-keys API distributed in an
opaque way without violating our other principles. We don't want to provide
the illusion of something that will work magically when actually it can't in
all cases. Instead we'll provide commands to quickly migrate keys from one
instance to another to perform multi-key operations and expose the
trade-offs to the user.
9 - We optimize for joy. We believe writing code is a lot of hard work, and the
only way it can be worth is by enjoying it. When there is no longer joy in
writing code, the best thing to do is stop. To prevent this, we'll avoid
taking paths that will make Redis less of a joy to develop.
10 - All the above points are put together in what we call opportunistic
programming: trying to get the most for the user with minimal increases
in complexity (hanging fruits). Solve 95% of the problem with 5% of the
code when it is acceptable. Avoid a fixed schedule but follow the flow of
user requests, inspiration, Redis internal readiness for certain features
(sometimes many past changes reach a critical point making a previously
complex feature very easy to obtain).

539
README.md
View File

@ -1,408 +1,261 @@
<!-- Improved compatibility of К началу link: See: https://github.com/othneildrew/Best-README-Template/pull/73 -->
<a id="readme-top"></a>
[![codecov](https://codecov.io/gh/valkey-io/valkey/graph/badge.svg?token=KYYSJAYC5F)](https://codecov.io/gh/valkey-io/valkey)
<!-- PROJECT LOGO -->
<br />
<div align="center">
<!-- <a href="https://github.com/othneildrew/Best-README-Template"> -->
<img src="Logo-Futriix.png" height=100></img>
</a>
This README is under construction as we work to build a new community driven high performance key-value store.
<h3 align="center">Futriix</h3>
This project was forked from the open source Redis project right before the transition to their new source available licenses.
<p align="center">
Futriix's полная документация (команды идентичны)
<br />
<a href="https://valkey.io/"><strong>Изучить полную документацию</strong></a>
<br />
<a href="">Сообщить об ошибке</a>
&middot;
<a href="">Предложение новой функциональности</a>
</p>
</div>
This README is just a fast *quick start* document. We are currently working on a more permanent documentation page.
## Краткая документация проекта Futriix
What is Valkey?
--------------
Valkey is a high-performance data structure server that primarily serves key/value workloads.
It supports a wide range of native structures and an extensible plugin system for adding new data structures and access patterns.
<!-- TABLE OF CONTENTS -->
<br>
<details>
<summary><b>Содержание</b></summary>
<ol>
<li>
<a href="#о-проекте">О проекте</a>
</li>
<li><a href="#подготовка">Подготовка</a></li>
<li><a href="#компиляция">Компиляция</a></li>
<li><a href="#использование">Использование</a></li>
<li><a href="#кластер">Кластер</a></li>
<li><a href="#дорожная-карта">Дорожная карта</a></li>
<li><a href="#вклад">Вклад</a></li>
<li><a href="#лицензия">Лицензия</a></li>
<li><a href="#контакты">Контакты</a></li>
</ol>
</details>
Building Valkey
--------------
Valkey can be compiled and used on Linux, OSX, OpenBSD, NetBSD, FreeBSD.
We support big endian and little endian architectures, and both 32 bit
and 64 bit systems.
It may compile on Solaris derived systems (for instance SmartOS) but our
support for this platform is *best effort* and Valkey is not guaranteed to
work as well as in Linux, OSX, and \*BSD.
It is as simple as:
% make
To build with TLS support, you'll need OpenSSL development libraries (e.g.
libssl-dev on Debian/Ubuntu) and run:
% make BUILD_TLS=yes
To build with experimental RDMA support you'll need RDMA development libraries
(e.g. librdmacm-dev and libibverbs-dev on Debian/Ubuntu). For now, Valkey only
supports RDMA as connection module mode. Run:
% make BUILD_RDMA=module
To build with systemd support, you'll need systemd development libraries (such
as libsystemd-dev on Debian/Ubuntu or systemd-devel on CentOS) and run:
% make USE_SYSTEMD=yes
To append a suffix to Valkey program names, use:
% make PROG_SUFFIX="-alt"
You can build a 32 bit Valkey binary using:
% make 32bit
After building Valkey, it is a good idea to test it using:
% make test
If TLS is built, running the tests with TLS enabled (you will need `tcl-tls`
installed):
% ./utils/gen-test-certs.sh
% ./runtest --tls
<!-- ABOUT THE PROJECT -->
## О проекте
Fixing build problems with dependencies or cached build options
---------
Проект Futriix является форком проекта Valkey.
Futriix-Распределённая СУБД на языке "C", построенная на базе [Valkey](https://valkey.io/), с поддержкой модулей на базе Искусственного интеллекта и модулей на языке Golang.
Valkey has some dependencies which are included in the `deps` directory.
`make` does not automatically rebuild dependencies even if something in
the source code of dependencies changes.
СУБД поддерживает модуль c распределённым [JSON](https://source.futriix.ru/gvsafronov/futriix-json), [ИИ-модуль "Виртуальный помощник"](), [SQL-модуль](https://source.futriix.ru/gvsafronov/fdx).
When you update the source code with `git pull` or when code inside the
dependencies tree is modified in any other way, make sure to use the following
command in order to really clean everything and rebuild from scratch:
Ниже приведён пример того, инструкции по настройке вашего проекта локально.
Чтобы запустить локальную копию проекта, выполните следующие простые шаги.
% make distclean
This will clean: jemalloc, lua, hiredis, linenoise and other dependencies.
Also if you force certain build options like 32bit target, no C compiler
optimizations (for debugging purposes), and other similar build time options,
those options are cached indefinitely until you issue a `make distclean`
command.
### Подготовка
Fixing problems building 32 bit binaries
---------
Ниже приведены шаги, которые помогут вам скомпилировать и установить Futriix.
* Устанавливаем язык программирования C, соопутствующие утилиты (autoconf и другие)
If after building Valkey with a 32 bit target you need to rebuild it
with a 64 bit target, or the other way around, you need to perform a
`make distclean` in the root directory of the Valkey distribution.
```sh
unix:$ sudo apt update && sudo apt upgrade
unix:$ sudo apt install build-essential nasm autotools-dev autoconf libjemalloc-dev tcl tcl-dev uuid-dev libcurl4-openssl-dev git
```
In case of build errors when trying to build a 32 bit binary of Valkey, try
the following steps:
* Устанавливаем язык программирования Golang по инструкции с [официального сайта](https://go.dev/doc/install)
* Install the package libc6-dev-i386 (also try g++-multilib).
* Try using the following command line instead of `make 32bit`:
`make CFLAGS="-m32 -march=native" LDFLAGS="-m32"`
### Компиляция
Allocator
---------
Для того, чтобы успешно скомпилировать проект, выполните шаги ниже:
Selecting a non-default memory allocator when building Valkey is done by setting
the `MALLOC` environment variable. Valkey is compiled and linked against libc
malloc by default, with the exception of jemalloc being the default on Linux
systems. This default was picked because jemalloc has proven to have fewer
fragmentation problems than libc malloc.
1. Скопировать репозиторий
```sh
git clone https://source.futriix.ru/gvsafronov/Futriix
```
2. Перейти в каталог с исходном кодом src
```sh
cd src/
```
<p align="right">(<a href="#readme-top">К началу</a>)</p>
To force compiling against libc malloc, use:
3. Скомпилировать Futriix с помощью утилиты Make
Futriix может быть скомпилирован для Linux, OSX, OpenBSD, NetBSD, FreeBSD.
Мы поддерживаем архитектуры endian и little endian, и 32-битные и 64-битные системы.
```sh
unix:$ make
```
Для сборки проекта с поддержкой TLS, вам необходима библиотека OpenSSL (например,
libssl-dev для Debian/Ubuntu).
Для сборки проекта с поддержкой TLS выпоните команды ниже:
```sh
unix:$ make BUILD_TLS=yes
```
To build TLS as Futriix module:
```sh
unix:$ make BUILD_TLS=module
```
Для сборки проекта с экспериментальной поддержкой RDMA вам необходимо установить библиотеку разработки RDMA
(например, librdmacm-dev and libibverbs-dev для Debian/Ubuntu).
Для сборки Futriix c поддержкой RDMA просто выполните следующие команды:
```sh
unix:$ make BUILD_RDMA=yes
```
To build RDMA as Futriix module:
```sh
unix:$ make BUILD_RDMA=module
```
Для сборки проекта с поддержкой systemd, вам необходимо установить соответсвующие библиотеки разработки (такие как
libsystemd-dev для Debian/Ubuntu или systemd-devel для CentOS) и выполнить следующие команды:
```sh
unix:$ make USE_SYSTEMD=yes
```
Для добавления суффикса в имя проекта Futriix, выполните следующие команды:
```sh
unix:$ make PROG_SUFFIX="-alt"
```
После сборки Futriix, мы рекомендуем запустить утилиту для проверки корректности сборки:
```sh
unix:$ make test
```
Команда выше запустит интегрированные в проект тесты. Additional tests are started using:
```sh
unix:$ make test-unit # Юнит-тесты
unix:$ make test-modules # Тесты модулей API
unix:$ make test-cluster # Тест Futriix для проверки работы кластера
```
Более подробную информацию вы найдёте ознакомившись со следующими источниками:
[tests/README.md](tests/README.md) а также [src/unit/README.md](src/unit/README.md).
<p align="right">(<a href="#readme-top">К началу</a>)</p>
## Исправление проблем сборки с зависимостями или кэшированными параметрами сборки.
Futriix содержит некоторые зависимости, которые хранятся в директории `deps`.
Утилита `make` автоматически не пересобирает зависимости даже если вносятся каие-либо изменения в код зависимостей.
Когда вы обновляете код проекта командой `git pull` или когда код внутри
дерева зависимостей изменен каким-либо другим способом, обязательно используйте следующее
команду для того, чтобы действительно все почистить и пересобрать с нуля:
```sh
unix:$ make distclean
```
В результате работы команды выше будут очищены: аллокатор памяти jemalloc, язык lua, библиотеку hiredis, библиотеку linenoise а также другие зависимости.
Кроме того, если вы принудительно используете определенные параметры сборки, такие как 32-битная версия для 32-битной системы, оптимизации компилятора C в данном случае не будут выполнены. Оптимизации (для целей отладки) и другие подобные параметры времени сборки,
кэшируются на неопределенный срок, пока вы не выполните команду `make distclean`.
<p align="right">(<a href="#readme-top">К началу</a>)</p>
## Аллокатор
Выбор аллокатора памяти не по умолчанию при сборке Futriix выполняется путем установки
параметра `MALLOC` переменной окружения. Futriix компилируется и компонуется с libc
malloc по умолчанию, за исключением jemalloc, который используется по умолчанию в дистрибутивах Linux.
Это значение по умолчанию было выбрано потому, что в jemalloc меньше
проблем c фрагментацией, чем libc malloc.
Чтобы принудительно скомпилировать libc malloc, выполните следующую команду:
```sh
unix:$ make MALLOC=libc
```
% make MALLOC=libc
To compile against jemalloc on Mac OS X systems, use:
```sh
unix:$ make MALLOC=jemalloc
```
## Монотонные часы
% make MALLOC=jemalloc
По умолчанию Futriix будет использовать функцию POSIX clock_gettime в качестве
монотонный источник тактовой частоты. В большинстве современных систем внутреннюю тактовую частоту процессора
можно использовать для улучшения производительности. Предостережения можно найти здесь:
Monotonic clock
---------------
By default, Valkey will build using the POSIX clock_gettime function as the
monotonic clock source. On most modern systems, the internal processor clock
can be used to improve performance. Cautions can be found here:
http://oliveryang.net/2015/09/pitfalls-of-TSC-usage/
Для сборки с поддержкой внутренней тактовой частоты процессора, используйте команду ниже:
To build with support for the processor's internal instruction clock, use:
```sh
unix:$ make CFLAGS="-DUSE_PROCESSOR_CLOCK"
```
% make CFLAGS="-DUSE_PROCESSOR_CLOCK"
## Расширенный вариант сборки
Verbose build
-------------
Futriix по умолчанию создает удобный для пользователя цветной вывод.
Если вы хотите увидеть более подробный вывод, выполните следующую команду:
Valkey will build with a user-friendly colorized output by default.
If you want to see a more verbose output, use the following:
```sh
unix:$ make V=1
```
4. Если вы хотите запустить сервер Futriix с параметрами по-умолчанию (без указания файла конфигурации) выполните следующую команду:
```sh
`./futriix-server`
```
5. Также вы можете использовать файл конфигурации, располагающийся в директории "Futriix" `futriix.conf` для конфигурирования вашего сервера.
Для запуска Futriix с файлом конфигурации используйте команду ниже:
```sh
./futriix-server /path/to/futriix.conf
```
6. Запустите утилиту futriix-cli (Client Futriix) для подключения к **локальному** серверу Futriix, а также для того чтобы начать работу с инстансом:
% make V=1
```sh
./futriix-cli
```
Running Valkey
-------------
7. Для подключения с помощью утилиты futriix-cli к конкретному узлу в сети, добавьте параметр `h`-указание удалённого хоста по его ip-адресу и параметр `p`- указания номера порта:
To run Valkey with the default configuration, just type:
```sh
./futriix-cli -h 11.164.22.7 -p 50000
```
% cd src
% ./valkey-server
<p align="right">(<a href="#readme-top">К началу</a>)</p>
If you want to provide your valkey.conf, you have to run it using an additional
parameter (the path of the configuration file):
## Запуск Futriix с RDMA:
% cd src
% ./valkey-server /path/to/valkey.conf
Обратите внимание, что поддержка RDMA в Futriix— экспериментальная функция.
Она может быть изменена или удалена в любой дополнительной или основной версии.
В настоящее время она поддерживается только в Linux.
It is possible to alter the Valkey configuration by passing parameters directly
as options using the command line. Examples:
* Команда для включения RDMA :
```sh
./src/futriix-server --protected-mode no \
--rdma-bind 192.168.122.100 --rdma-port 9880
```
% ./valkey-server --port 9999 --replicaof 127.0.0.1 6379
% ./valkey-server /etc/valkey/6379.conf --loglevel debug
* Режим работы модуля RDMA:
```sh
./src/futriix-server --protected-mode no \
--loadmodule src/Futriix-rdma.so --rdma-bind 192.168.122.100 --rdma-port 9880
```
Можно изменить адрес/порт привязки RDMA с помощью команды времени выполнения:
All the options in valkey.conf are also supported as options using the command
line, with exactly the same name.
```sh
unix:$ 192.168.122.100:9880> CONFIG SET rdma-port 9380
```
Running Valkey with TLS:
------------------
Также возможно наличие одновременно RDMA и TCP, но нет
конфликт TCP(9880) и RDMA(9880), например:
Please consult the [TLS.md](TLS.md) file for more information on
how to use Valkey with TLS.
```sh
unix:$ ./src/futriix-server --protected-mode no \
--loadmodule src/Futriix-rdma.so --rdma-bind 192.168.122.100 --rdma-port 9880 \
--port 9880
```
Running Valkey with RDMA:
------------------
Примечание: Ваша сетевая карта (с ip-адресом 192.168.122.100 в данном примере) должна поддерживать режим
RDMA. Для того что понять поддерживает сервер режим RDMA или нет, выполните команду ниже:
Note that Valkey Over RDMA is an experimental feature.
It may be changed or removed in any minor or major version.
Currently, it is only supported on Linux.
```sh
unix:$ rdma res show (a new version iproute2 package)
```
To manually run a Valkey server with RDMA mode:
Или команду ниже:
% ./src/valkey-server --protected-mode no \
--loadmodule src/valkey-rdma.so bind=192.168.122.100 port=6379
```sh
unix:$ ibv_devices
```
It's possible to change bind address/port of RDMA by runtime command:
<!-- USAGE EXAMPLES -->
## Использование
192.168.122.100:6379> CONFIG SET rdma.port 6380
unix:$ cd src
unix:$ ./futriix-cli
127.0.0.1:futriix:~> ping
It's also possible to have both RDMA and TCP available, and there is no
conflict of TCP(6379) and RDMA(6379), Ex:
% ./src/valkey-server --protected-mode no \
--loadmodule src/valkey-rdma.so bind=192.168.122.100 port=6379 \
--port 6379
Note that the network card (192.168.122.100 of this example) should support
RDMA. To test a server supports RDMA or not:
% rdma res show (a new version iproute2 package)
Or:
% ibv_devices
Playing with Valkey
------------------
You can use valkey-cli to play with Valkey. Start a valkey-server instance,
then in another terminal try the following:
% cd src
% ./valkey-cli
valkey> ping
PONG
127.0.0.1:futriix:~> set foo bar
valkey> set foo bar
OK
127.0.0.1:futriix:~> get foo
valkey> get foo
"bar"
127.0.0.1:futriix:~> incr mycounter
valkey> incr mycounter
(integer) 1
127.0.0.1:futriix:~> incr mycounter
valkey> incr mycounter
(integer) 2
127.0.0.1:futriix:~>
valkey>
Installing Valkey
-----------------
<p align="right">(<a href="#readme-top">К началу</a>)</p>
In order to install Valkey binaries into /usr/local/bin, just use:
% make install
## Кластер
You can use `make PREFIX=/some/other/directory install` if you wish to use a
different destination.
_Note_: For compatibility with Redis, we create symlinks from the Redis names (`redis-server`, `redis-cli`, etc.) to the Valkey binaries installed by `make install`.
The symlinks are created in same directory as the Valkey binaries.
The symlinks are removed when using `make uninstall`.
The creation of the symlinks can be skipped by setting the makefile variable `USE_REDIS_SYMLINKS=no`.
1. Откройте директорию Futriix
`make install` will just install binaries in your system, but will not configure
init scripts and configuration files in the appropriate place. This is not
needed if you just want to play a bit with Valkey, but if you are installing
it the proper way for a production system, we have a script that does this
for Ubuntu and Debian systems:
```sh
unix:$ cd futriix
% cd utils
% ./install_server.sh
```
_Note_: `install_server.sh` will not work on Mac OSX; it is built for Linux only.
2. Откройте файл конфигурации futriix.conf в любом текстовом редакторе, например nano, как в примере приведённом ниже:
The script will ask you a few questions and will setup everything you need
to run Valkey properly as a background daemon that will start again on
system reboots.
```sh
unix:$ nano futriix/futriix.conf
You'll be able to stop and start Valkey using the script named
`/etc/init.d/valkey_<portnumber>`, for instance `/etc/init.d/valkey_6379`.
```
Code contributions
-----------------
Please see the [CONTRIBUTING.md][2]. For security bugs and vulnerabilities, please see [SECURITY.md][3].
3. Найдите и установите значения "yes" для параметров "active-replica" и "multi-master". После чего добавьте в файл конфигурации ip-адреса, узлов вашего кластера. Если вы всё сделали правильно у вас должны отробразится строки в файле конфигурации `futriix.conf` как показано ниже:
[1]: https://github.com/valkey-io/valkey/blob/unstable/COPYING
[2]: https://github.com/valkey-io/valkey/blob/unstable/CONTRIBUTING.md
[3]: https://github.com/valkey-io/valkey/blob/unstable/SECURITY.md
```sh
active-replica yes
multi-master yes
replicaof 192.168.11.5 9880
replicaof 192.168.11.6 9880
replicaof 192.168.11.7 9880
```
4. Сохраните внесённые вами изменния, выйдите из редактора, воспользовавшись командами ниже:
```sh
unix:$ ctrl+O
unix:$ ctrl+x
```
5. Перейдите в директорию Futriix и запустите скрипт `cluster.sh` с параметрами `pick` (скрипт запущенный с данным параметром "соберёт кластер"), и `run`,(скрипт запущенный с данным параметром "запустит кластер") как указано ниже:
```sh
unix:$ ./cluster pick
unix:$ ./cluster run
```
6. Установите права на исполнение на скрипт `cluster.sh` , воспользовавшись командой ниже:
```sh
unix:$ chmod +x cluster.sh
```
7. Для остановки кластера запустите скрипт `cluster.sh` с параметром `stop`
```sh
unix:$ ./cluster stop
```
<p align="right">(<a href="#readme-top">К началу</a>)</p>
<!-- ROADMAP -->
## Дорожная карта
- [x] Добавить поддержку хранимых процедур
- [x] Изменить приглашение командной строки клиента futriix-cli
- [x] Переписать скрипт cluster.sh, формирующий кластер Futriix
- [x] Добавить поддержку модуля для работы с JSON
- [ ] Добавить в проект поддержку модуля, позволяющего запускать команды терминала операционной системы
- [ ] Реализовать поддержку алгоритма Raft
- [ ] Добавить поддержку языка запросов SQL
См. [Открытые проблемы](https://source.futriix.ru/gvsafronov/Futriix/issues) полный список предлагаемых функций (и известных проблем).
<p align="right">(<a href="#readme-top">К началу</a>)</p>
<!-- CONTRIBUTING -->
## Вклад
Вклады — это то, что делает сообщество открытого исходного кода таким замечательным местом для обучения, вдохновения и творчества. Любой ваш вклад **очень ценится**.
Если у вас есть предложение, которое могло бы улучшить ситуацию, создайте форк репозитория и создайте запрос на включение. Также можно просто открыть задачу с тегом «улучшение».
Не забудьте поставить проекту звезду! Еще раз спасибо!
1. Форкните проект
2. Создайте свою ветку функций (`git checkout -b Feature/AmazingFeature`)
3. Зафиксируйте свои изменения (git commit -m 'Add some AmazingFeature'`)
4. Отправьте в ветку (`git push origin Feature/AmazingFeature`)
5. Откройте запрос на включение
<!-- LICENSE -->
## Лицензия
Проект распространяется под 3-пунктной лицензией BSD. Подробнсти смотрите в файле `COPYING.txt`.
<p align="right">(<a href="#readme-top">К началу</a>)</p>
<!-- CONTACT -->
## Контакты
Григорий Сафронов - [E-mail](gvsafronov@yandex.ru)
Ссылка на проект (https://source.futriix.ru/gvsafronov/Futriix)
<p align="right">(<a href="#readme-top">К началу</a>)</p>
Valkey is an open community project under LF Projects
-----------------
Valkey a Series of LF Projects, LLC
2810 N Church St, PMB 57274
Wilmington, Delaware 19802-4447

106
TLS.md Normal file
View File

@ -0,0 +1,106 @@
TLS Support
===========
Getting Started
---------------
### Building
To build with TLS support you'll need OpenSSL development libraries (e.g.
libssl-dev on Debian/Ubuntu).
To build TLS support as Valkey built-in:
Run `make BUILD_TLS=yes`.
Or to build TLS as Valkey module:
Run `make BUILD_TLS=module`.
Note that sentinel mode does not support TLS module.
### Tests
To run Valkey test suite with TLS, you'll need TLS support for TCL (i.e.
`tcl-tls` package on Debian/Ubuntu).
1. Run `./utils/gen-test-certs.sh` to generate a root CA and a server
certificate.
2. Run `./runtest --tls` or `./runtest-cluster --tls` to run Valkey and Valkey
Cluster tests in TLS mode.
3. Run `./runtest --tls-module` or `./runtest-cluster --tls-module` to
run Valkey and Valkey cluster tests in TLS mode with Valkey module.
### Running manually
To manually run a Valkey server with TLS mode (assuming `gen-test-certs.sh` was
invoked so sample certificates/keys are available):
For TLS built-in mode:
./src/valkey-server --tls-port 6379 --port 0 \
--tls-cert-file ./tests/tls/valkey.crt \
--tls-key-file ./tests/tls/valkey.key \
--tls-ca-cert-file ./tests/tls/ca.crt
For TLS module mode:
./src/valkey-server --tls-port 6379 --port 0 \
--tls-cert-file ./tests/tls/valkey.crt \
--tls-key-file ./tests/tls/valkey.key \
--tls-ca-cert-file ./tests/tls/ca.crt \
--loadmodule src/valkey-tls.so
To connect to this Valkey server with `valkey-cli`:
./src/valkey-cli --tls \
--cert ./tests/tls/valkey.crt \
--key ./tests/tls/valkey.key \
--cacert ./tests/tls/ca.crt
Specifying `port 0` will disable TCP. It's also possible to have
both TCP and TLS available, but you'll need to assign different ports.
To make a Replica connect to the master using TLS, use `--tls-replication yes`,
and to make Valkey Cluster use TLS across nodes use `--tls-cluster yes`.
Connections
-----------
All socket operations now go through a connection abstraction layer that hides
I/O and read/write event handling from the caller.
**Multi-threading I/O is not currently supported for TLS**, as a TLS connection
needs to do its own manipulation of AE events which is not thread safe. The
solution is probably to manage independent AE loops for I/O threads and longer
term association of connections with threads. This may potentially improve
overall performance as well.
Sync IO for TLS is currently implemented in a hackish way, i.e. making the
socket blocking and configuring socket-level timeout. This means the timeout
value may not be so accurate, and there would be a lot of syscall overhead.
However I believe that getting rid of syncio completely in favor of pure async
work is probably a better move than trying to fix that. For replication it would
probably not be so hard. For cluster keys migration it might be more difficult,
but there are probably other good reasons to improve that part anyway.
To-Do List
----------
- [ ] valkey-benchmark support. The current implementation is a mix of using
hiredis for parsing and basic networking (establishing connections), but
directly manipulating sockets for most actions. This will need to be cleaned
up for proper TLS support. The best approach is probably to migrate to hiredis
async mode.
- [ ] valkey-cli `--slave` and `--rdb` support.
Multi-port
----------
Consider the implications of allowing TLS to be configured on a separate port,
making Valkey listening on multiple ports:
1. Startup banner port notification
2. Proctitle
3. How slaves announce themselves
4. Cluster bus port calculation

View File

@ -1,192 +0,0 @@
#!/usr/bin/env sh
# This script automatically picked and started Futriix-cluster
# Also in this script added color indicator "Ok" and "Fail"
SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
# Settings of color
SETCOLOR_SUCCESS="echo -en \\033[1;32m"
SETCOLOR_FAILURE="echo -en \\033[1;31m"
SETCOLOR_NORMAL="echo -en \\033[0;39m"
# Settings
BIN_PATH="$SCRIPT_DIR/src"
CLUSTER_HOST=127.0.0.1
PORT=7000
TIMEOUT=2000
NODES=6
REPLICAS=1
PROTECTED_MODE=yes
ADDITIONAL_OPTIONS=""
CONFIG_PATH="./futriix.conf"
# You may want to put the above config parameters into config.sh in order to
# override the defaults without modifying this script.
if [ -a config.sh ]
then
source "config.sh"
fi
# Computed vars
ENDPORT=$((PORT+NODES))
#if [ $? -eq 0 ]; then
# $SETCOLOR_SUCCESS
# echo -n "$(tput hpa $(tput cols))$(tput cub 6)[OK]"
# $SETCOLOR_NORMAL
# echo
# else
# $SETCOLOR_FAILURE
# echo -n "$(tput hpa $(tput cols))$(tput cub 6)[fail]"
# $SETCOLOR_NORMAL
# echo
#fi
if [ "$1" == "pick" ]
then
while [ $((PORT < ENDPORT)) != "0" ]; do
PORT=$((PORT+1))
echo "Starting $PORT"
yes | $BIN_PATH/futriix-server ${CONFIG_PATH} --port $PORT --protected-mode $PROTECTED_MODE --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --appenddirname appendonlydir-${PORT} --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes --enable-protected-configs yes --enable-debug-command yes --enable-module-command yes ${ADDITIONAL_OPTIONS} >/dev/null 2>&1
if [ $? -eq 0 ]; then
$SETCOLOR_SUCCESS
echo -n "$(tput hpa $(tput cols))$(tput cub 6)[OK]"
$SETCOLOR_NORMAL
echo
else
$SETCOLOR_FAILURE
echo -n "$(tput hpa $(tput cols))$(tput cub 6)[fail]"
$SETCOLOR_NORMAL
echo
fi
done
exit 0
fi
if [ "$1" == "run" ]
then
HOSTS=""
while [ $((PORT < ENDPORT)) != "0" ]; do
PORT=$((PORT+1))
HOSTS="$HOSTS $CLUSTER_HOST:$PORT"
done
OPT_ARG=""
if [ "$2" == "-f" ]; then
OPT_ARG="--cluster-yes"
fi
yes | $BIN_PATH/futriix-cli --cluster create $HOSTS --cluster-replicas $REPLICAS $OPT_ARG >/dev/null 2>&1
exit 0
fi
if [ $? -eq 0 ]; then
$SETCOLOR_SUCCESS
echo -n "$(tput hpa $(tput cols))$(tput cub 6)[OK]"
$SETCOLOR_NORMAL
echo
else
$SETCOLOR_FAILURE
echo -n "$(tput hpa $(tput cols))$(tput cub 6)[fail]"
$SETCOLOR_NORMAL
echo
fi
if [ "$1" == "stop" ]
then
while [ $((PORT < ENDPORT)) != "0" ]; do
PORT=$((PORT+1))
echo "Stopping $PORT"
$BIN_PATH/futriix-cli -p $PORT shutdown nosave
done
exit 0
fi
if [ "$1" == "repick" ]
then
OLD_PORT=$PORT
while [ $((PORT < ENDPORT)) != "0" ]; do
PORT=$((PORT+1))
echo "Stopping $PORT"
$BIN_PATH/futriix-cli -p $PORT shutdown nosave
done
PORT=$OLD_PORT
while [ $((PORT < ENDPORT)) != "0" ]; do
PORT=$((PORT+1))
echo "picking $PORT"
$BIN_PATH/futriix-server ${CONFIG_PATH} --port $PORT --protected-mode $PROTECTED_MODE --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --appenddirname appendonlydir-${PORT} --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes --enable-protected-configs yes --enable-debug-command yes --enable-module-command yes ${ADDITIONAL_OPTIONS}
done
exit 0
fi
if [ "$1" == "watch" ]
then
PORT=$((PORT+1))
while [ 1 ]; do
clear
date
$BIN_PATH/futriix-cli -p $PORT cluster nodes | head -30
sleep 1
done
exit 0
fi
if [ "$1" == "tail" ]
then
INSTANCE=$2
PORT=$((PORT+INSTANCE))
tail -f ${PORT}.log
exit 0
fi
if [ "$1" == "tailall" ]
then
tail -f *.log
exit 0
fi
if [ "$1" == "call" ]
then
while [ $((PORT < ENDPORT)) != "0" ]; do
PORT=$((PORT+1))
$BIN_PATH/valkey-cli -p $PORT $2 $3 $4 $5 $6 $7 $8 $9
done
exit 0
fi
if [ "$1" == "clean" ]
then
echo "Cleaning *.log"
rm -rf *.log
echo "Cleaning appendonlydir-*"
rm -rf appendonlydir-*
echo "Cleaning dump-*.rdb"
rm -rf dump-*.rdb
echo "Cleaning nodes-*.conf"
rm -rf nodes-*.conf
exit 0
fi
if [ "$1" == "clean-logs" ]
then
echo "Cleaning *.log"
rm -rf *.log
exit 0
fi
echo ""
echo "Usage: $0 [pick|run|stop|restart|watch|tail|tailall|clean|clean-logs|call]"
echo "pick -- Launch Futriix Cluster instances."
echo "run [-f] -- Create a cluster using futriix-cli --cluster create."
echo "stop -- Stop Futriix Cluster instances."
echo "restart -- Restart Futriix Cluster instances."
echo "watch -- Show CLUSTER NODES output (first 30 lines) of first node."
echo "tail <id> -- Run tail -f of instance at base port + ID."
echo "tailall -- Run tail -f for all the log files at once."
echo "clean -- Remove all instances data, logs, configs."
echo "clean-logs -- Remove just instances logs."
echo "call <cmd> -- Call a command (up to 7 arguments) on all nodes."
echo ""

View File

@ -1,44 +0,0 @@
set(CPACK_PACKAGE_NAME "valkey")
valkey_parse_version(CPACK_PACKAGE_VERSION_MAJOR CPACK_PACKAGE_VERSION_MINOR CPACK_PACKAGE_VERSION_PATCH)
set(CPACK_PACKAGE_CONTACT "maintainers@lists.valkey.io")
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Valkey is an open source (BSD) high-performance key/value datastore")
set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_SOURCE_DIR}/COPYING")
set(CPACK_RESOURCE_FILE_README "${CMAKE_SOURCE_DIR}/README.md")
set(CPACK_STRIP_FILES TRUE)
valkey_get_distro_name(DISTRO_NAME)
message(STATUS "Current host distro: ${DISTRO_NAME}")
if (DISTRO_NAME MATCHES ubuntu
OR DISTRO_NAME MATCHES debian
OR DISTRO_NAME MATCHES mint)
message(STATUS "Adding target package for ${DISTRO_NAME}")
set(CPACK_PACKAGING_INSTALL_PREFIX "/opt/valkey")
# Debian related parameters
set(CPACK_DEBIAN_PACKAGE_MAINTAINER "Valkey contributors")
set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON)
set(CPACK_DEBIAN_FILE_NAME DEB-DEFAULT)
set(CPACK_GENERATOR "DEB")
endif ()
include(CPack)
unset(DISTRO_NAME CACHE)
# ---------------------------------------------------
# Create a helper script for creating symbolic links
# ---------------------------------------------------
write_file(
${CMAKE_BINARY_DIR}/CreateSymlink.sh
"\
#!/bin/bash \n\
if [ -z \${DESTDIR} ]; then \n\
# Script is called during 'make install' \n\
PREFIX=${CMAKE_INSTALL_PREFIX}/bin \n\
else \n\
# Script is called during 'make package' \n\
PREFIX=\${DESTDIR}${CPACK_PACKAGING_INSTALL_PREFIX}/bin \n\
fi \n\
cd \$PREFIX \n\
ln -sf \$1 \$2")

View File

@ -1,157 +0,0 @@
# -------------------------------------------------
# Define the sources to be built
# -------------------------------------------------
# valkey-server source files
set(VALKEY_SERVER_SRCS
${CMAKE_SOURCE_DIR}/src/threads_mngr.c
${CMAKE_SOURCE_DIR}/src/adlist.c
${CMAKE_SOURCE_DIR}/src/quicklist.c
${CMAKE_SOURCE_DIR}/src/ae.c
${CMAKE_SOURCE_DIR}/src/anet.c
${CMAKE_SOURCE_DIR}/src/dict.c
${CMAKE_SOURCE_DIR}/src/hashtable.c
${CMAKE_SOURCE_DIR}/src/kvstore.c
${CMAKE_SOURCE_DIR}/src/sds.c
${CMAKE_SOURCE_DIR}/src/zmalloc.c
${CMAKE_SOURCE_DIR}/src/lzf_c.c
${CMAKE_SOURCE_DIR}/src/lzf_d.c
${CMAKE_SOURCE_DIR}/src/pqsort.c
${CMAKE_SOURCE_DIR}/src/zipmap.c
${CMAKE_SOURCE_DIR}/src/sha1.c
${CMAKE_SOURCE_DIR}/src/ziplist.c
${CMAKE_SOURCE_DIR}/src/release.c
${CMAKE_SOURCE_DIR}/src/memory_prefetch.c
${CMAKE_SOURCE_DIR}/src/io_threads.c
${CMAKE_SOURCE_DIR}/src/networking.c
${CMAKE_SOURCE_DIR}/src/util.c
${CMAKE_SOURCE_DIR}/src/object.c
${CMAKE_SOURCE_DIR}/src/db.c
${CMAKE_SOURCE_DIR}/src/replication.c
${CMAKE_SOURCE_DIR}/src/rdb.c
${CMAKE_SOURCE_DIR}/src/t_string.c
${CMAKE_SOURCE_DIR}/src/t_list.c
${CMAKE_SOURCE_DIR}/src/t_set.c
${CMAKE_SOURCE_DIR}/src/t_zset.c
${CMAKE_SOURCE_DIR}/src/t_hash.c
${CMAKE_SOURCE_DIR}/src/config.c
${CMAKE_SOURCE_DIR}/src/aof.c
${CMAKE_SOURCE_DIR}/src/pubsub.c
${CMAKE_SOURCE_DIR}/src/multi.c
${CMAKE_SOURCE_DIR}/src/debug.c
${CMAKE_SOURCE_DIR}/src/sort.c
${CMAKE_SOURCE_DIR}/src/intset.c
${CMAKE_SOURCE_DIR}/src/syncio.c
${CMAKE_SOURCE_DIR}/src/cluster.c
${CMAKE_SOURCE_DIR}/src/cluster_legacy.c
${CMAKE_SOURCE_DIR}/src/cluster_slot_stats.c
${CMAKE_SOURCE_DIR}/src/crc16.c
${CMAKE_SOURCE_DIR}/src/endianconv.c
${CMAKE_SOURCE_DIR}/src/commandlog.c
${CMAKE_SOURCE_DIR}/src/eval.c
${CMAKE_SOURCE_DIR}/src/bio.c
${CMAKE_SOURCE_DIR}/src/rio.c
${CMAKE_SOURCE_DIR}/src/rand.c
${CMAKE_SOURCE_DIR}/src/memtest.c
${CMAKE_SOURCE_DIR}/src/syscheck.c
${CMAKE_SOURCE_DIR}/src/crcspeed.c
${CMAKE_SOURCE_DIR}/src/crccombine.c
${CMAKE_SOURCE_DIR}/src/crc64.c
${CMAKE_SOURCE_DIR}/src/bitops.c
${CMAKE_SOURCE_DIR}/src/sentinel.c
${CMAKE_SOURCE_DIR}/src/notify.c
${CMAKE_SOURCE_DIR}/src/setproctitle.c
${CMAKE_SOURCE_DIR}/src/blocked.c
${CMAKE_SOURCE_DIR}/src/hyperloglog.c
${CMAKE_SOURCE_DIR}/src/latency.c
${CMAKE_SOURCE_DIR}/src/sparkline.c
${CMAKE_SOURCE_DIR}/src/valkey-check-rdb.c
${CMAKE_SOURCE_DIR}/src/valkey-check-aof.c
${CMAKE_SOURCE_DIR}/src/geo.c
${CMAKE_SOURCE_DIR}/src/lazyfree.c
${CMAKE_SOURCE_DIR}/src/module.c
${CMAKE_SOURCE_DIR}/src/evict.c
${CMAKE_SOURCE_DIR}/src/expire.c
${CMAKE_SOURCE_DIR}/src/geohash.c
${CMAKE_SOURCE_DIR}/src/geohash_helper.c
${CMAKE_SOURCE_DIR}/src/childinfo.c
${CMAKE_SOURCE_DIR}/src/allocator_defrag.c
${CMAKE_SOURCE_DIR}/src/defrag.c
${CMAKE_SOURCE_DIR}/src/siphash.c
${CMAKE_SOURCE_DIR}/src/rax.c
${CMAKE_SOURCE_DIR}/src/t_stream.c
${CMAKE_SOURCE_DIR}/src/listpack.c
${CMAKE_SOURCE_DIR}/src/localtime.c
${CMAKE_SOURCE_DIR}/src/lolwut.c
${CMAKE_SOURCE_DIR}/src/lolwut5.c
${CMAKE_SOURCE_DIR}/src/lolwut6.c
${CMAKE_SOURCE_DIR}/src/acl.c
${CMAKE_SOURCE_DIR}/src/tracking.c
${CMAKE_SOURCE_DIR}/src/socket.c
${CMAKE_SOURCE_DIR}/src/tls.c
${CMAKE_SOURCE_DIR}/src/rdma.c
${CMAKE_SOURCE_DIR}/src/sha256.c
${CMAKE_SOURCE_DIR}/src/timeout.c
${CMAKE_SOURCE_DIR}/src/setcpuaffinity.c
${CMAKE_SOURCE_DIR}/src/monotonic.c
${CMAKE_SOURCE_DIR}/src/mt19937-64.c
${CMAKE_SOURCE_DIR}/src/resp_parser.c
${CMAKE_SOURCE_DIR}/src/call_reply.c
${CMAKE_SOURCE_DIR}/src/script_lua.c
${CMAKE_SOURCE_DIR}/src/script.c
${CMAKE_SOURCE_DIR}/src/functions.c
${CMAKE_SOURCE_DIR}/src/scripting_engine.c
${CMAKE_SOURCE_DIR}/src/function_lua.c
${CMAKE_SOURCE_DIR}/src/commands.c
${CMAKE_SOURCE_DIR}/src/strl.c
${CMAKE_SOURCE_DIR}/src/connection.c
${CMAKE_SOURCE_DIR}/src/unix.c
${CMAKE_SOURCE_DIR}/src/server.c
${CMAKE_SOURCE_DIR}/src/logreqres.c)
# valkey-cli
set(VALKEY_CLI_SRCS
${CMAKE_SOURCE_DIR}/src/anet.c
${CMAKE_SOURCE_DIR}/src/adlist.c
${CMAKE_SOURCE_DIR}/src/dict.c
${CMAKE_SOURCE_DIR}/src/valkey-cli.c
${CMAKE_SOURCE_DIR}/src/zmalloc.c
${CMAKE_SOURCE_DIR}/src/release.c
${CMAKE_SOURCE_DIR}/src/ae.c
${CMAKE_SOURCE_DIR}/src/serverassert.c
${CMAKE_SOURCE_DIR}/src/crcspeed.c
${CMAKE_SOURCE_DIR}/src/crccombine.c
${CMAKE_SOURCE_DIR}/src/crc64.c
${CMAKE_SOURCE_DIR}/src/siphash.c
${CMAKE_SOURCE_DIR}/src/crc16.c
${CMAKE_SOURCE_DIR}/src/monotonic.c
${CMAKE_SOURCE_DIR}/src/cli_common.c
${CMAKE_SOURCE_DIR}/src/mt19937-64.c
${CMAKE_SOURCE_DIR}/src/strl.c
${CMAKE_SOURCE_DIR}/src/cli_commands.c)
# valkey-benchmark
set(VALKEY_BENCHMARK_SRCS
${CMAKE_SOURCE_DIR}/src/ae.c
${CMAKE_SOURCE_DIR}/src/anet.c
${CMAKE_SOURCE_DIR}/src/valkey-benchmark.c
${CMAKE_SOURCE_DIR}/src/adlist.c
${CMAKE_SOURCE_DIR}/src/dict.c
${CMAKE_SOURCE_DIR}/src/zmalloc.c
${CMAKE_SOURCE_DIR}/src/serverassert.c
${CMAKE_SOURCE_DIR}/src/release.c
${CMAKE_SOURCE_DIR}/src/crcspeed.c
${CMAKE_SOURCE_DIR}/src/crccombine.c
${CMAKE_SOURCE_DIR}/src/crc64.c
${CMAKE_SOURCE_DIR}/src/siphash.c
${CMAKE_SOURCE_DIR}/src/crc16.c
${CMAKE_SOURCE_DIR}/src/monotonic.c
${CMAKE_SOURCE_DIR}/src/cli_common.c
${CMAKE_SOURCE_DIR}/src/mt19937-64.c
${CMAKE_SOURCE_DIR}/src/strl.c)
# valkey-rdma module
set(VALKEY_RDMA_MODULE_SRCS ${CMAKE_SOURCE_DIR}/src/rdma.c)
# valkey-tls module
set(VALKEY_TLS_MODULE_SRCS ${CMAKE_SOURCE_DIR}/src/tls.c)

View File

@ -1,115 +0,0 @@
# Return the current host distro name. For example: ubuntu, debian, amzn etc
function (valkey_get_distro_name DISTRO_NAME)
if (LINUX AND NOT APPLE)
execute_process(
COMMAND /bin/bash "-c" "cat /etc/os-release |grep ^ID=|cut -d = -f 2"
OUTPUT_VARIABLE _OUT_VAR
OUTPUT_STRIP_TRAILING_WHITESPACE)
# clean the output
string(REPLACE "\"" "" _OUT_VAR "${_OUT_VAR}")
string(REPLACE "." "" _OUT_VAR "${_OUT_VAR}")
set(${DISTRO_NAME}
"${_OUT_VAR}"
PARENT_SCOPE)
elseif (APPLE)
set(${DISTRO_NAME}
"darwin"
PARENT_SCOPE)
elseif (IS_FREEBSD)
set(${DISTRO_NAME}
"freebsd"
PARENT_SCOPE)
else ()
set(${DISTRO_NAME}
"unknown"
PARENT_SCOPE)
endif ()
endfunction ()
function (valkey_parse_version OUT_MAJOR OUT_MINOR OUT_PATCH)
# Read and parse package version from version.h file
file(STRINGS ${CMAKE_SOURCE_DIR}/src/version.h VERSION_LINES)
foreach (LINE ${VERSION_LINES})
string(FIND "${LINE}" "#define VALKEY_VERSION " VERSION_STR_POS)
if (VERSION_STR_POS GREATER -1)
string(REPLACE "#define VALKEY_VERSION " "" LINE "${LINE}")
string(REPLACE "\"" "" LINE "${LINE}")
# Change "." to ";" to make it a list
string(REPLACE "." ";" LINE "${LINE}")
list(GET LINE 0 _MAJOR)
list(GET LINE 1 _MINOR)
list(GET LINE 2 _PATCH)
message(STATUS "Valkey version: ${_MAJOR}.${_MINOR}.${_PATCH}")
# Set the output variables
set(${OUT_MAJOR}
${_MAJOR}
PARENT_SCOPE)
set(${OUT_MINOR}
${_MINOR}
PARENT_SCOPE)
set(${OUT_PATCH}
${_PATCH}
PARENT_SCOPE)
endif ()
endforeach ()
endfunction ()
# Given input argument `OPTION_VALUE`, check that the `OPTION_VALUE` is from the allowed values (one of:
# module/yes/no/1/0/true/false)
#
# Return value:
#
# If ARG is valid, return its number where:
#
# ~~~
# - `no` | `0` | `off` => return `0`
# - `yes` | `1` | `on` => return `1`
# - `module` => return `2`
# ~~~
function (valkey_parse_build_option OPTION_VALUE OUT_ARG_ENUM)
list(APPEND VALID_OPTIONS "yes")
list(APPEND VALID_OPTIONS "1")
list(APPEND VALID_OPTIONS "on")
list(APPEND VALID_OPTIONS "no")
list(APPEND VALID_OPTIONS "0")
list(APPEND VALID_OPTIONS "off")
list(APPEND VALID_OPTIONS "module")
string(TOLOWER "${OPTION_VALUE}" OPTION_VALUE)
list(FIND VALID_OPTIONS "${ARG}" OPT_INDEX)
if (VERSION_STR_POS GREATER -1)
message(FATAL_ERROR "Invalid value passed ''${OPTION_VALUE}'")
endif ()
if ("${OPTION_VALUE}" STREQUAL "yes"
OR "${OPTION_VALUE}" STREQUAL "1"
OR "${OPTION_VALUE}" STREQUAL "on")
set(${OUT_ARG_ENUM}
1
PARENT_SCOPE)
elseif (
"${OPTION_VALUE}" STREQUAL "no"
OR "${OPTION_VALUE}" STREQUAL "0"
OR "${OPTION_VALUE}" STREQUAL "off")
set(${OUT_ARG_ENUM}
0
PARENT_SCOPE)
else ()
set(${OUT_ARG_ENUM}
2
PARENT_SCOPE)
endif ()
endfunction ()
function (valkey_pkg_config PKGNAME OUT_VARIABLE)
if (NOT FOUND_PKGCONFIG)
# Locate pkg-config once
find_package(PkgConfig REQUIRED)
set(FOUND_PKGCONFIG 1)
endif ()
pkg_check_modules(__PREFIX REQUIRED ${PKGNAME})
message(STATUS "Found library for '${PKGNAME}': ${__PREFIX_LIBRARIES}")
set(${OUT_VARIABLE}
"${__PREFIX_LIBRARIES}"
PARENT_SCOPE)
endfunction ()

View File

@ -1,394 +0,0 @@
include(CheckIncludeFiles)
include(ProcessorCount)
include(Utils)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib")
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin")
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib")
# Generate compile_commands.json file for IDEs code completion support
set(CMAKE_EXPORT_COMPILE_COMMANDS 1)
processorcount(VALKEY_PROCESSOR_COUNT)
message(STATUS "Processor count: ${VALKEY_PROCESSOR_COUNT}")
# Installed executables will have this permissions
set(VALKEY_EXE_PERMISSIONS
OWNER_EXECUTE
OWNER_WRITE
OWNER_READ
GROUP_EXECUTE
GROUP_READ
WORLD_EXECUTE
WORLD_READ)
set(VALKEY_SERVER_CFLAGS "")
set(VALKEY_SERVER_LDFLAGS "")
# ----------------------------------------------------
# Helper functions & macros
# ----------------------------------------------------
macro (add_valkey_server_compiler_options value)
set(VALKEY_SERVER_CFLAGS "${VALKEY_SERVER_CFLAGS} ${value}")
endmacro ()
macro (add_valkey_server_linker_option value)
list(APPEND VALKEY_SERVER_LDFLAGS ${value})
endmacro ()
macro (get_valkey_server_linker_option return_value)
list(JOIN VALKEY_SERVER_LDFLAGS " " ${value} ${return_value})
endmacro ()
set(IS_FREEBSD 0)
if (CMAKE_SYSTEM_NAME MATCHES "^.*BSD$|DragonFly")
message(STATUS "Building for FreeBSD compatible system")
set(IS_FREEBSD 1)
include_directories("/usr/local/include")
add_valkey_server_compiler_options("-DUSE_BACKTRACE")
endif ()
# Helper function for creating symbolic link so that: link -> source
macro (valkey_create_symlink source link)
install(
CODE "execute_process( \
COMMAND /bin/bash ${CMAKE_BINARY_DIR}/CreateSymlink.sh \
${source} \
${link} \
)"
COMPONENT "valkey")
endmacro ()
# Install a binary
macro (valkey_install_bin target)
# Install cli tool and create a redis symbolic link
install(
TARGETS ${target}
DESTINATION ${CMAKE_INSTALL_BINDIR}
PERMISSIONS ${VALKEY_EXE_PERMISSIONS}
COMPONENT "valkey")
endmacro ()
# Helper function that defines, builds and installs `target` In addition, it creates a symbolic link between the target
# and `link_name`
macro (valkey_build_and_install_bin target sources ld_flags libs link_name)
add_executable(${target} ${sources})
if (USE_JEMALLOC
OR USE_TCMALLOC
OR USE_TCMALLOC_MINIMAL)
# Using custom allocator
target_link_libraries(${target} ${ALLOCATOR_LIB})
endif ()
# Place this line last to ensure that ${ld_flags} is placed last on the linker line
target_link_libraries(${target} ${libs} ${ld_flags})
target_link_libraries(${target} hiredis)
if (USE_TLS)
# Add required libraries needed for TLS
target_link_libraries(${target} OpenSSL::SSL hiredis_ssl)
endif ()
if (IS_FREEBSD)
target_link_libraries(${target} execinfo)
endif ()
# Enable all warnings + fail on warning
target_compile_options(${target} PRIVATE -Werror -Wall)
# Install cli tool and create a redis symbolic link
valkey_install_bin(${target})
valkey_create_symlink(${target} ${link_name})
endmacro ()
# Helper function that defines, builds and installs `target` module.
macro (valkey_build_and_install_module target sources ld_flags libs)
add_library(${target} SHARED ${sources})
if (USE_JEMALLOC)
# Using jemalloc
target_link_libraries(${target} jemalloc)
endif ()
# Place this line last to ensure that ${ld_flags} is placed last on the linker line
target_link_libraries(${target} ${libs} ${ld_flags})
if (USE_TLS)
# Add required libraries needed for TLS
target_link_libraries(${target} OpenSSL::SSL hiredis_ssl)
endif ()
if (IS_FREEBSD)
target_link_libraries(${target} execinfo)
endif ()
# Install cli tool and create a redis symbolic link
valkey_install_bin(${target})
endmacro ()
# Determine if we are building in Release or Debug mode
if (CMAKE_BUILD_TYPE MATCHES Debug OR CMAKE_BUILD_TYPE MATCHES DebugFull)
set(VALKEY_DEBUG_BUILD 1)
set(VALKEY_RELEASE_BUILD 0)
message(STATUS "Building in debug mode")
else ()
set(VALKEY_DEBUG_BUILD 0)
set(VALKEY_RELEASE_BUILD 1)
message(STATUS "Building in release mode")
endif ()
# ----------------------------------------------------
# Helper functions - end
# ----------------------------------------------------
# ----------------------------------------------------
# Build options (allocator, tls, rdma et al)
# ----------------------------------------------------
if (NOT BUILD_MALLOC)
if (APPLE)
set(BUILD_MALLOC "libc")
elseif (UNIX)
set(BUILD_MALLOC "jemalloc")
endif ()
endif ()
# User may pass different allocator library. Using -DBUILD_MALLOC=<libname>, make sure it is a valid value
if (BUILD_MALLOC)
if ("${BUILD_MALLOC}" STREQUAL "jemalloc")
set(MALLOC_LIB "jemalloc")
set(ALLOCATOR_LIB "jemalloc")
add_valkey_server_compiler_options("-DUSE_JEMALLOC")
set(USE_JEMALLOC 1)
elseif ("${BUILD_MALLOC}" STREQUAL "libc")
set(MALLOC_LIB "libc")
elseif ("${BUILD_MALLOC}" STREQUAL "tcmalloc")
set(MALLOC_LIB "tcmalloc")
valkey_pkg_config(libtcmalloc ALLOCATOR_LIB)
add_valkey_server_compiler_options("-DUSE_TCMALLOC")
set(USE_TCMALLOC 1)
elseif ("${BUILD_MALLOC}" STREQUAL "tcmalloc_minimal")
set(MALLOC_LIB "tcmalloc_minimal")
valkey_pkg_config(libtcmalloc_minimal ALLOCATOR_LIB)
add_valkey_server_compiler_options("-DUSE_TCMALLOC")
set(USE_TCMALLOC_MINIMAL 1)
else ()
message(FATAL_ERROR "BUILD_MALLOC can be one of: jemalloc, libc, tcmalloc or tcmalloc_minimal")
endif ()
endif ()
message(STATUS "Using ${MALLOC_LIB}")
# TLS support
if (BUILD_TLS)
valkey_parse_build_option(${BUILD_TLS} USE_TLS)
if (USE_TLS EQUAL 1)
# Only search for OpenSSL if needed
find_package(OpenSSL REQUIRED)
message(STATUS "OpenSSL include dir: ${OPENSSL_INCLUDE_DIR}")
message(STATUS "OpenSSL libraries: ${OPENSSL_LIBRARIES}")
include_directories(${OPENSSL_INCLUDE_DIR})
endif ()
if (USE_TLS EQUAL 1)
add_valkey_server_compiler_options("-DUSE_OPENSSL=1")
add_valkey_server_compiler_options("-DBUILD_TLS_MODULE=0")
else ()
# Build TLS as a module RDMA can only be built as a module. So disable it
message(WARNING "BUILD_TLS can be one of: [ON | OFF | 1 | 0], but '${BUILD_TLS}' was provided")
message(STATUS "TLS support is disabled")
set(USE_TLS 0)
endif ()
else ()
# By default, TLS is disabled
message(STATUS "TLS is disabled")
set(USE_TLS 0)
endif ()
if (BUILD_RDMA)
set(BUILD_RDMA_MODULE 0)
# RDMA support (Linux only)
if (LINUX AND NOT APPLE)
valkey_parse_build_option(${BUILD_RDMA} USE_RDMA)
find_package(PkgConfig REQUIRED)
# Locate librdmacm & libibverbs, fail if we can't find them
valkey_pkg_config(librdmacm RDMACM_LIBS)
valkey_pkg_config(libibverbs IBVERBS_LIBS)
message(STATUS "${RDMACM_LIBS};${IBVERBS_LIBS}")
list(APPEND RDMA_LIBS "${RDMACM_LIBS};${IBVERBS_LIBS}")
if (USE_RDMA EQUAL 2) # Module
message(STATUS "Building RDMA as module")
add_valkey_server_compiler_options("-DUSE_RDMA=2")
set(BUILD_RDMA_MODULE 2)
elseif (USE_RDMA EQUAL 1) # Builtin
message(STATUS "Building RDMA as builtin")
add_valkey_server_compiler_options("-DUSE_RDMA=1")
add_valkey_server_compiler_options("-DBUILD_RDMA_MODULE=0")
list(APPEND SERVER_LIBS "${RDMA_LIBS}")
endif ()
else ()
message(WARNING "RDMA is only supported on Linux platforms")
endif ()
else ()
# By default, RDMA is disabled
message(STATUS "RDMA is disabled")
set(USE_RDMA 0)
endif ()
set(BUILDING_ARM64 0)
set(BUILDING_ARM32 0)
if ("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "arm64")
set(BUILDING_ARM64 1)
endif ()
if ("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "arm")
set(BUILDING_ARM32 1)
endif ()
message(STATUS "Building on ${CMAKE_HOST_SYSTEM_NAME}")
if (BUILDING_ARM64)
message(STATUS "Compiling valkey for ARM64")
add_valkey_server_linker_option("-funwind-tables")
endif ()
if (APPLE)
add_valkey_server_linker_option("-rdynamic")
add_valkey_server_linker_option("-ldl")
elseif (UNIX)
add_valkey_server_linker_option("-rdynamic")
add_valkey_server_linker_option("-pthread")
add_valkey_server_linker_option("-ldl")
add_valkey_server_linker_option("-lm")
endif ()
if (VALKEY_DEBUG_BUILD)
# Debug build, use enable "-fno-omit-frame-pointer"
add_valkey_server_compiler_options("-fno-omit-frame-pointer")
endif ()
# Check for Atomic
check_include_files(stdatomic.h HAVE_C11_ATOMIC)
if (HAVE_C11_ATOMIC)
add_valkey_server_compiler_options("-std=gnu11")
else ()
add_valkey_server_compiler_options("-std=c99")
endif ()
# Sanitizer
if (BUILD_SANITIZER)
# Common CFLAGS
list(APPEND VALKEY_SANITAIZER_CFLAGS "-fno-sanitize-recover=all")
list(APPEND VALKEY_SANITAIZER_CFLAGS "-fno-omit-frame-pointer")
if ("${BUILD_SANITIZER}" STREQUAL "address")
list(APPEND VALKEY_SANITAIZER_CFLAGS "-fsanitize=address")
list(APPEND VALKEY_SANITAIZER_LDFLAGS "-fsanitize=address")
elseif ("${BUILD_SANITIZER}" STREQUAL "thread")
list(APPEND VALKEY_SANITAIZER_CFLAGS "-fsanitize=thread")
list(APPEND VALKEY_SANITAIZER_LDFLAGS "-fsanitize=thread")
elseif ("${BUILD_SANITIZER}" STREQUAL "undefined")
list(APPEND VALKEY_SANITAIZER_CFLAGS "-fsanitize=undefined")
list(APPEND VALKEY_SANITAIZER_LDFLAGS "-fsanitize=undefined")
else ()
message(FATAL_ERROR "Unknown sanitizer: ${BUILD_SANITIZER}")
endif ()
endif ()
include_directories("${CMAKE_SOURCE_DIR}/deps/hiredis")
include_directories("${CMAKE_SOURCE_DIR}/deps/linenoise")
include_directories("${CMAKE_SOURCE_DIR}/deps/lua/src")
include_directories("${CMAKE_SOURCE_DIR}/deps/hdr_histogram")
include_directories("${CMAKE_SOURCE_DIR}/deps/fpconv")
add_subdirectory("${CMAKE_SOURCE_DIR}/deps")
# Update linker flags for the allocator
if (USE_JEMALLOC)
include_directories("${CMAKE_SOURCE_DIR}/deps/jemalloc/include")
endif ()
# Common compiler flags
add_valkey_server_compiler_options("-pedantic")
# ----------------------------------------------------
# Build options (allocator, tls, rdma et al) - end
# ----------------------------------------------------
# -------------------------------------------------
# Code Generation section
# -------------------------------------------------
find_program(PYTHON_EXE python3)
if (PYTHON_EXE)
# Python based code generation
message(STATUS "Found python3: ${PYTHON_EXE}")
# Rule for generating commands.def file from json files
message(STATUS "Adding target generate_commands_def")
file(GLOB COMMAND_FILES_JSON "${CMAKE_SOURCE_DIR}/src/commands/*.json")
add_custom_command(
OUTPUT ${CMAKE_BINARY_DIR}/commands_def_generated
DEPENDS ${COMMAND_FILES_JSON}
COMMAND ${PYTHON_EXE} ${CMAKE_SOURCE_DIR}/utils/generate-command-code.py
COMMAND touch ${CMAKE_BINARY_DIR}/commands_def_generated
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}/src")
add_custom_target(generate_commands_def DEPENDS ${CMAKE_BINARY_DIR}/commands_def_generated)
# Rule for generating fmtargs.h
message(STATUS "Adding target generate_fmtargs_h")
add_custom_command(
OUTPUT ${CMAKE_BINARY_DIR}/fmtargs_generated
DEPENDS ${CMAKE_SOURCE_DIR}/utils/generate-fmtargs.py
COMMAND sed '/Everything/,$$d' fmtargs.h > fmtargs.h.tmp
COMMAND ${PYTHON_EXE} ${CMAKE_SOURCE_DIR}/utils/generate-fmtargs.py >> fmtargs.h.tmp
COMMAND mv fmtargs.h.tmp fmtargs.h
COMMAND touch ${CMAKE_BINARY_DIR}/fmtargs_generated
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}/src")
add_custom_target(generate_fmtargs_h DEPENDS ${CMAKE_BINARY_DIR}/fmtargs_generated)
# Rule for generating test_files.h
message(STATUS "Adding target generate_test_files_h")
file(GLOB UNIT_TEST_SRCS "${CMAKE_SOURCE_DIR}/src/unit/*.c")
add_custom_command(
OUTPUT ${CMAKE_BINARY_DIR}/test_files_generated
DEPENDS "${UNIT_TEST_SRCS};${CMAKE_SOURCE_DIR}/utils/generate-unit-test-header.py"
COMMAND ${PYTHON_EXE} ${CMAKE_SOURCE_DIR}/utils/generate-unit-test-header.py
COMMAND touch ${CMAKE_BINARY_DIR}/test_files_generated
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}/src")
add_custom_target(generate_test_files_h DEPENDS ${CMAKE_BINARY_DIR}/test_files_generated)
else ()
# Fake targets
add_custom_target(generate_commands_def)
add_custom_target(generate_fmtargs_h)
add_custom_target(generate_test_files_h)
endif ()
# Generate release.h file (always)
add_custom_target(
release_header
COMMAND sh -c '${CMAKE_SOURCE_DIR}/src/mkreleasehdr.sh'
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}/src")
# -------------------------------------------------
# Code Generation section - end
# -------------------------------------------------
# ----------------------------------------------------------
# All our source files are defined in SourceFiles.cmake file
# ----------------------------------------------------------
include(SourceFiles)
# Clear the below variables from the cache
unset(CMAKE_C_FLAGS CACHE)
unset(VALKEY_SERVER_LDFLAGS CACHE)
unset(VALKEY_SERVER_CFLAGS CACHE)
unset(PYTHON_EXE CACHE)
unset(HAVE_C11_ATOMIC CACHE)
unset(USE_TLS CACHE)
unset(USE_RDMA CACHE)
unset(BUILD_TLS CACHE)
unset(BUILD_RDMA CACHE)
unset(BUILD_MALLOC CACHE)
unset(USE_JEMALLOC CACHE)
unset(BUILD_TLS_MODULE CACHE)
unset(BUILD_TLS_BUILTIN CACHE)

28
deps/CMakeLists.txt vendored
View File

@ -1,28 +0,0 @@
if (USE_JEMALLOC)
add_subdirectory(jemalloc)
endif ()
add_subdirectory(lua)
# Set hiredis options. We need to disable the defaults set in the OPTION(..) we do this by setting them in the CACHE
set(BUILD_SHARED_LIBS
OFF
CACHE BOOL "Build shared libraries")
set(DISABLE_TESTS
ON
CACHE BOOL "If tests should be compiled or not")
if (USE_TLS) # Module or no module
message(STATUS "Building hiredis_ssl")
set(ENABLE_SSL
ON
CACHE BOOL "Should we test SSL connections")
endif ()
add_subdirectory(hiredis)
add_subdirectory(linenoise)
add_subdirectory(fpconv)
add_subdirectory(hdr_histogram)
# Clear any cached variables passed to hiredis from the cache
unset(BUILD_SHARED_LIBS CACHE)
unset(DISABLE_TESTS CACHE)
unset(ENABLE_SSL CACHE)

7
deps/Makefile vendored
View File

@ -42,7 +42,6 @@ distclean:
-(cd jemalloc && [ -f Makefile ] && $(MAKE) distclean) > /dev/null || true
-(cd hdr_histogram && $(MAKE) clean) > /dev/null || true
-(cd fpconv && $(MAKE) clean) > /dev/null || true
-(cd fast_float_c_interface && $(MAKE) clean) > /dev/null || true
-(rm -f .make-*)
.PHONY: distclean
@ -117,9 +116,3 @@ jemalloc: .make-prerequisites
cd jemalloc && $(MAKE) lib/libjemalloc.a
.PHONY: jemalloc
fast_float_c_interface: .make-prerequisites
@printf '%b %b\n' $(MAKECOLOR)MAKE$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR)
cd fast_float_c_interface && $(MAKE)
.PHONY: fast_float_c_interface

16
deps/README.md vendored
View File

@ -6,7 +6,6 @@ should be provided by the operating system.
* **linenoise** is a readline replacement. It is developed by the same authors of Valkey but is managed as a separated project and updated as needed.
* **lua** is Lua 5.1 with minor changes for security and additional libraries.
* **hdr_histogram** Used for per-command latency tracking histograms.
* **fast_float** is a replacement for strtod to convert strings to floats efficiently.
How to upgrade the above dependencies
===
@ -95,7 +94,6 @@ and our version:
1. Makefile is modified to allow a different compiler than GCC.
2. We have the implementation source code, and directly link to the following external libraries: `lua_cjson.o`, `lua_struct.o`, `lua_cmsgpack.o` and `lua_bit.o`.
3. There is a security fix in `ldo.c`, line 498: The check for `LUA_SIGNATURE[0]` is removed in order to avoid direct bytecode execution.
4. In `lstring.c`, the luaS_newlstr function's hash calculation has been upgraded from a simple hash function to MurmurHash3, implemented within the same file, to enhance performance, particularly for operations involving large strings.
Hdr_Histogram
---
@ -106,17 +104,3 @@ We use a customized version based on master branch commit e4448cf6d1cd08fff51981
2. Copy updated files from newer version onto files in /hdr_histogram.
3. Apply the changes from 1 above to the updated files.
fast_float
---
The fast_float library provides fast header-only implementations for the C++ from_chars functions for `float` and `double` types as well as integer types. These functions convert ASCII strings representing decimal values (e.g., `1.3e10`) into binary types. The functions are much faster than comparable number-parsing functions from existing C++ standard libraries.
Specifically, `fast_float` provides the following function to parse floating-point numbers with a C++17-like syntax (the library itself only requires C++11):
template <typename T, typename UC = char, typename = FASTFLOAT_ENABLE_IF(is_supported_float_type<T>())>
from_chars_result_t<UC> from_chars(UC const *first, UC const *last, T &value, chars_format fmt = chars_format::general);
To upgrade the library,
1. Check out https://github.com/fastfloat/fast_float/tree/main
2. cd fast_float
3. Invoke "python3 ./script/amalgamate.py --output fast_float.h"
4. Copy fast_float.h file to "deps/fast_float/".

File diff suppressed because it is too large Load Diff

View File

@ -1,37 +0,0 @@
CCCOLOR:="\033[34m"
SRCCOLOR:="\033[33m"
ENDCOLOR:="\033[0m"
CXX?=c++
# we need = instead of := so that $@ in QUIET_CXX gets evaluated in the rule and is assigned appropriate value.
TEMP:=$(CXX)
QUIET_CXX=@printf ' %b %b\n' $(CCCOLOR)C++$(ENDCOLOR) $(SRCCOLOR)$@$(ENDCOLOR) 1>&2;
CXX=$(QUIET_CXX)$(TEMP)
WARN=-Wall -W -Wno-missing-field-initializers
STD=-pedantic -std=c++11
OPT?=-O3
CLANG := $(findstring clang,$(shell sh -c '$(CC) --version | head -1'))
ifeq ($(OPT),-O3)
ifeq (clang,$(CLANG))
OPT+=-flto
else
OPT+=-flto=auto -ffat-lto-objects
endif
endif
# 1) Today src/Makefile passes -m32 flag for explicit 32-bit build on 64-bit machine, via CFLAGS. For 32-bit build on
# 32-bit machine and 64-bit on 64-bit machine, CFLAGS are empty. No other flags are set that can conflict with C++,
# therefore let's use CFLAGS without changes for now.
# 2) FASTFLOAT_ALLOWS_LEADING_PLUS allows +inf to be parsed as inf, instead of error.
CXXFLAGS=$(STD) $(OPT) $(WARN) -static -fPIC -fno-exceptions $(CFLAGS) -D FASTFLOAT_ALLOWS_LEADING_PLUS
.PHONY: all clean
all: fast_float_strtod.o
clean:
rm -f *.o || true;

View File

@ -1,24 +0,0 @@
/*
* Copyright Valkey Contributors.
* All rights reserved.
* SPDX-License-Identifier: BSD 3-Clause
*/
#include "../fast_float/fast_float.h"
#include <cerrno>
extern "C"
{
double fast_float_strtod(const char *str, const char** endptr)
{
double temp = 0;
auto answer = fast_float::from_chars(str, str + strlen(str), temp);
if (answer.ec != std::errc()) {
errno = (answer.ec == std::errc::result_out_of_range) ? ERANGE : EINVAL;
}
if (endptr) {
*endptr = answer.ptr;
}
return temp;
}
}

View File

@ -1,4 +0,0 @@
project(fpconv)
set(SRCS "${CMAKE_CURRENT_LIST_DIR}/fpconv_dtoa.c" "${CMAKE_CURRENT_LIST_DIR}/fpconv_dtoa.h")
add_library(fpconv STATIC ${SRCS})

View File

@ -6,7 +6,7 @@
* [1] https://www.cs.tufts.edu/~nr/cs257/archive/florian-loitsch/printf.pdf
* ----------------------------------------------------------------------------
*
* Copyright (c) 2021, Redis Ltd.
* Copyright (c) 2021, Redis Labs
* Copyright (c) 2013-2019, night-shift <as.smljk at gmail dot com>
* Copyright (c) 2009, Florian Loitsch < florian.loitsch at inria dot fr >
* All rights reserved.

View File

@ -1,7 +0,0 @@
project(hdr_histogram)
set(SRCS "${CMAKE_CURRENT_LIST_DIR}/hdr_histogram.c" "${CMAKE_CURRENT_LIST_DIR}/hdr_histogram.h"
"${CMAKE_CURRENT_LIST_DIR}/hdr_atomic.h" "${CMAKE_CURRENT_LIST_DIR}/hdr_redis_malloc.h")
add_library(hdr_histogram STATIC ${SRCS})
target_compile_definitions(hdr_histogram PRIVATE HDR_MALLOC_INCLUDE=\"hdr_redis_malloc.h\")

View File

@ -1,13 +1,13 @@
#ifndef HDR_MALLOC_H__
#define HDR_MALLOC_H__
void *valkey_malloc(size_t size);
void *zmalloc(size_t size);
void *zcalloc_num(size_t num, size_t size);
void *valkey_realloc(void *ptr, size_t size);
void valkey_free(void *ptr);
void *zrealloc(void *ptr, size_t size);
void zfree(void *ptr);
#define hdr_malloc valkey_malloc
#define hdr_malloc zmalloc
#define hdr_calloc zcalloc_num
#define hdr_realloc valkey_realloc
#define hdr_free valkey_free
#define hdr_realloc zrealloc
#define hdr_free zfree
#endif

View File

@ -112,7 +112,7 @@ jobs:
run: $GITHUB_WORKSPACE/test.sh
freebsd:
runs-on: macos-13
runs-on: macos-12
name: FreeBSD
steps:
- uses: actions/checkout@v3

View File

@ -1,4 +1,4 @@
Copyright (c) 2009-2011, Redis Ltd.
Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com>
Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
All rights reserved.

View File

@ -1,5 +1,5 @@
# Hiredis Makefile
# Copyright (C) 2010-2011 Redis Ltd.
# Copyright (C) 2010-2011 Salvatore Sanfilippo <antirez at gmail dot com>
# Copyright (C) 2010-2011 Pieter Noordhuis <pcnoordhuis at gmail dot com>
# This file is released under the BSD license, see the COPYING file

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
*
* All rights reserved.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
*
* All rights reserved.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
*
* All rights reserved.

2
deps/hiredis/dict.c vendored
View File

@ -5,7 +5,7 @@
* tables of power of two in size are used, collisions are handled by
* chaining. See the source code for more information... :)
*
* Copyright (c) 2006-2010, Redis Ltd.
* Copyright (c) 2006-2010, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

2
deps/hiredis/dict.h vendored
View File

@ -5,7 +5,7 @@
* tables of power of two in size are used, collisions are handled by
* chaining. See the source code for more information... :)
*
* Copyright (c) 2006-2010, Redis Ltd.
* Copyright (c) 2006-2010, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Redis Ltd.
* Copyright (c) 2020, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2020, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* Copyright (c) 2020, Matt Stancliff <matt at genges dot com>,
* Jan-Erik Rediger <janerik at fnordig dot com>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2010-2014, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* Copyright (c) 2015, Matt Stancliff <matt at genges dot com>,
* Jan-Erik Rediger <janerik at fnordig dot com>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2010-2014, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* Copyright (c) 2015, Matt Stancliff <matt at genges dot com>,
* Jan-Erik Rediger <janerik at fnordig dot com>

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2019, Redis Ltd.
* Copyright (c) 2019, Redis Labs
*
* All rights reserved.
*

2
deps/hiredis/net.c vendored
View File

@ -1,6 +1,6 @@
/* Extracted from anet.c to work properly with Hiredis error reporting.
*
* Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2010-2014, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* Copyright (c) 2015, Matt Stancliff <matt at genges dot com>,
* Jan-Erik Rediger <janerik at fnordig dot com>

2
deps/hiredis/net.h vendored
View File

@ -1,6 +1,6 @@
/* Extracted from anet.c to work properly with Hiredis error reporting.
*
* Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2010-2014, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* Copyright (c) 2015, Matt Stancliff <matt at genges dot com>,
* Jan-Erik Rediger <janerik at fnordig dot com>

2
deps/hiredis/read.c vendored
View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
*
* All rights reserved.

2
deps/hiredis/read.h vendored
View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
*
* All rights reserved.

3
deps/hiredis/sds.c vendored
View File

@ -1,7 +1,8 @@
/* SDSLib 2.0 -- A C dynamic strings library
*
* Copyright (c) 2006-2015, Redis Ltd.
* Copyright (c) 2006-2015, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2015, Oran Agra
* Copyright (c) 2015, Redis Labs, Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

3
deps/hiredis/sds.h vendored
View File

@ -1,7 +1,8 @@
/* SDSLib 2.0 -- A C dynamic strings library
*
* Copyright (c) 2006-2015, Redis Ltd.
* Copyright (c) 2006-2015, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2015, Oran Agra
* Copyright (c) 2015, Redis Labs, Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,7 +1,8 @@
/* SDSLib 2.0 -- A C dynamic strings library
*
* Copyright (c) 2006-2015, Redis Ltd.
* Copyright (c) 2006-2015, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2015, Oran Agra
* Copyright (c) 2015, Redis Labs, Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

4
deps/hiredis/ssl.c vendored
View File

@ -1,7 +1,7 @@
/*
* Copyright (c) 2009-2011, Redis Ltd.
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* Copyright (c) 2019, Redis Ltd.
* Copyright (c) 2019, Redis Labs
*
* All rights reserved.
*

View File

@ -1,32 +0,0 @@
project(jemalloc)
# Build jemalloc using configure && make install
set(JEMALLOC_INSTALL_DIR ${CMAKE_BINARY_DIR}/jemalloc-build)
set(JEMALLOC_SRC_DIR ${CMAKE_CURRENT_LIST_DIR})
if (NOT EXISTS ${JEMALLOC_INSTALL_DIR}/lib/libjemalloc.a)
message(STATUS "Building jemalloc (custom build)")
message(STATUS "JEMALLOC_SRC_DIR = ${JEMALLOC_SRC_DIR}")
message(STATUS "JEMALLOC_INSTALL_DIR = ${JEMALLOC_INSTALL_DIR}")
execute_process(
COMMAND sh -c "${JEMALLOC_SRC_DIR}/configure --disable-cxx \
--with-version=5.3.0-0-g0 --with-lg-quantum=3 --disable-cache-oblivious --with-jemalloc-prefix=je_ \
--enable-static --disable-shared --prefix=${JEMALLOC_INSTALL_DIR}"
WORKING_DIRECTORY ${JEMALLOC_SRC_DIR} RESULTS_VARIABLE CONFIGURE_RESULT)
if (NOT ${CONFIGURE_RESULT} EQUAL 0)
message(FATAL_ERROR "Jemalloc configure failed")
endif ()
execute_process(COMMAND make -j${VALKEY_PROCESSOR_COUNT} lib/libjemalloc.a install
WORKING_DIRECTORY "${JEMALLOC_SRC_DIR}" RESULTS_VARIABLE MAKE_RESULT)
if (NOT ${MAKE_RESULT} EQUAL 0)
message(FATAL_ERROR "Jemalloc build failed")
endif ()
endif ()
# Import the compiled library as a CMake target
add_library(jemalloc STATIC IMPORTED GLOBAL)
set_target_properties(jemalloc PROPERTIES IMPORTED_LOCATION "${JEMALLOC_INSTALL_DIR}/lib/libjemalloc.a"
INCLUDE_DIRECTORIES "${JEMALLOC_INSTALL_DIR}/include")

View File

@ -337,4 +337,55 @@ imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) {
return fallback_alloc(size);
}
JEMALLOC_ALWAYS_INLINE int
iget_defrag_hint(tsdn_t *tsdn, void* ptr) {
int defrag = 0;
emap_alloc_ctx_t alloc_ctx;
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
if (likely(alloc_ctx.slab)) {
/* Small allocation. */
edata_t *slab = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
arena_t *arena = arena_get_from_edata(slab);
szind_t binind = edata_szind_get(slab);
unsigned binshard = edata_binshard_get(slab);
bin_t *bin = arena_get_bin(arena, binind, binshard);
malloc_mutex_lock(tsdn, &bin->lock);
arena_dalloc_bin_locked_info_t info;
arena_dalloc_bin_locked_begin(&info, binind);
/* Don't bother moving allocations from the slab currently used for new allocations */
if (slab != bin->slabcur) {
int free_in_slab = edata_nfree_get(slab);
if (free_in_slab) {
const bin_info_t *bin_info = &bin_infos[binind];
/* Find number of non-full slabs and the number of regs in them */
unsigned long curslabs = 0;
size_t curregs = 0;
/* Run on all bin shards (usually just one) */
for (uint32_t i=0; i< bin_info->n_shards; i++) {
bin_t *bb = arena_get_bin(arena, binind, i);
curslabs += bb->stats.nonfull_slabs;
/* Deduct the regs in full slabs (they're not part of the game) */
unsigned long full_slabs = bb->stats.curslabs - bb->stats.nonfull_slabs;
curregs += bb->stats.curregs - full_slabs * bin_info->nregs;
if (bb->slabcur) {
/* Remove slabcur from the overall utilization (not a candidate to nove from) */
curregs -= bin_info->nregs - edata_nfree_get(bb->slabcur);
curslabs -= 1;
}
}
/* Compare the utilization ratio of the slab in question to the total average
* among non-full slabs. To avoid precision loss in division, we do that by
* extrapolating the usage of the slab as if all slabs have the same usage.
* If this slab is less used than the average, we'll prefer to move the data
* to hopefully more used ones. To avoid stagnation when all slabs have the same
* utilization, we give additional 12.5% weight to the decision to defrag. */
defrag = (bin_info->nregs - free_in_slab) * curslabs <= curregs + curregs / 8;
}
}
arena_dalloc_bin_locked_finish(tsdn, arena, bin, &info);
malloc_mutex_unlock(tsdn, &bin->lock);
}
return defrag;
}
#endif /* JEMALLOC_INTERNAL_INLINES_C_H */

View File

@ -147,3 +147,7 @@
#else
# define JEMALLOC_SYS_NOTHROW JEMALLOC_NOTHROW
#endif
/* This version of Jemalloc, modified for Redis, has the je_get_defrag_hint()
* function. */
#define JEMALLOC_FRAG_HINT

View File

@ -4474,3 +4474,12 @@ jemalloc_postfork_child(void) {
}
/******************************************************************************/
/* Helps the application decide if a pointer is worth re-allocating in order to reduce fragmentation.
* returns 1 if the allocation should be moved, and 0 if the allocation be kept.
* If the application decides to re-allocate it should use MALLOCX_TCACHE_NONE when doing so. */
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
get_defrag_hint(void* ptr) {
assert(ptr != NULL);
return iget_defrag_hint(TSDN_NULL, ptr);
}

View File

@ -1,4 +0,0 @@
project(linenoise)
set(SRCS "${CMAKE_CURRENT_LIST_DIR}/linenoise.c" "${CMAKE_CURRENT_LIST_DIR}/linenoise.h")
add_library(linenoise STATIC ${SRCS})

View File

@ -10,7 +10,7 @@
*
* ------------------------------------------------------------------------
*
* Copyright (c) 2010-2016, Redis Ltd.
* Copyright (c) 2010-2016, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2010-2013, Pieter Noordhuis <pcnoordhuis at gmail dot com>
*
* All rights reserved.

View File

@ -7,7 +7,7 @@
*
* ------------------------------------------------------------------------
*
* Copyright (c) 2010-2014, Redis Ltd.
* Copyright (c) 2010-2014, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2010-2013, Pieter Noordhuis <pcnoordhuis at gmail dot com>
*
* All rights reserved.

View File

@ -1,53 +0,0 @@
project(lualib)
include(CheckFunctionExists)
set(LUA_SRC_DIR "${CMAKE_CURRENT_LIST_DIR}/src")
set(LUA_SRCS
${LUA_SRC_DIR}/fpconv.c
${LUA_SRC_DIR}/lbaselib.c
${LUA_SRC_DIR}/lmathlib.c
${LUA_SRC_DIR}/lstring.c
${LUA_SRC_DIR}/lparser.c
${LUA_SRC_DIR}/ldo.c
${LUA_SRC_DIR}/lzio.c
${LUA_SRC_DIR}/lmem.c
${LUA_SRC_DIR}/strbuf.c
${LUA_SRC_DIR}/lstrlib.c
${LUA_SRC_DIR}/lundump.c
${LUA_SRC_DIR}/lua_cmsgpack.c
${LUA_SRC_DIR}/loslib.c
${LUA_SRC_DIR}/lua_struct.c
${LUA_SRC_DIR}/ldebug.c
${LUA_SRC_DIR}/lobject.c
${LUA_SRC_DIR}/ldump.c
${LUA_SRC_DIR}/lua_cjson.c
${LUA_SRC_DIR}/ldblib.c
${LUA_SRC_DIR}/ltm.c
${LUA_SRC_DIR}/ltable.c
${LUA_SRC_DIR}/lstate.c
${LUA_SRC_DIR}/lua_bit.c
${LUA_SRC_DIR}/lua.c
${LUA_SRC_DIR}/loadlib.c
${LUA_SRC_DIR}/lcode.c
${LUA_SRC_DIR}/lapi.c
${LUA_SRC_DIR}/lgc.c
${LUA_SRC_DIR}/lvm.c
${LUA_SRC_DIR}/lfunc.c
${LUA_SRC_DIR}/lauxlib.c
${LUA_SRC_DIR}/ltablib.c
${LUA_SRC_DIR}/linit.c
${LUA_SRC_DIR}/lopcodes.c
${LUA_SRC_DIR}/llex.c
${LUA_SRC_DIR}/liolib.c)
add_library(lualib STATIC "${LUA_SRCS}")
target_include_directories(lualib PUBLIC "${LUA_SRC_DIR}")
target_compile_definitions(lualib PRIVATE ENABLE_CJSON_GLOBAL)
# Use mkstemp if available
check_function_exists(mkstemp HAVE_MKSTEMP)
if (HAVE_MKSTEMP)
target_compile_definitions(lualib PRIVATE LUA_USE_MKSTEMP)
endif ()
unset(HAVE_MKSTEMP CACHE)

View File

@ -6,7 +6,6 @@
#include <string.h>
#include <stdint.h>
#define lstring_c
#define LUA_CORE
@ -72,55 +71,14 @@ static TString *newlstr (lua_State *L, const char *str, size_t l,
return ts;
}
uint32_t murmur32(const uint8_t* key, size_t len, uint32_t seed) {
static const uint32_t c1 = 0xcc9e2d51;
static const uint32_t c2 = 0x1b873593;
static const uint32_t r1 = 15;
static const uint32_t r2 = 13;
static const uint32_t m = 5;
static const uint32_t n = 0xe6546b64;
uint32_t hash = seed;
const int nblocks = len / 4;
const uint32_t* blocks = (const uint32_t*) key;
for (int i = 0; i < nblocks; i++) {
uint32_t k = blocks[i];
k *= c1;
k = (k << r1) | (k >> (32 - r1));
k *= c2;
hash ^= k;
hash = ((hash << r2) | (hash >> (32 - r2))) * m + n;
}
const uint8_t* tail = (const uint8_t*) (key + nblocks * 4);
uint32_t k1 = 0;
switch (len & 3) {
case 3:
k1 ^= tail[2] << 16;
case 2:
k1 ^= tail[1] << 8;
case 1:
k1 ^= tail[0];
k1 *= c1;
k1 = (k1 << r1) | (k1 >> (32 - r1));
k1 *= c2;
hash ^= k1;
}
hash ^= len;
hash ^= (hash >> 16);
hash *= 0x85ebca6b;
hash ^= (hash >> 13);
hash *= 0xc2b2ae35;
hash ^= (hash >> 16);
return hash;
}
TString *luaS_newlstr (lua_State *L, const char *str, size_t l) {
GCObject *o;
unsigned int h = murmur32((uint8_t *)str, l, (uint32_t)l);
unsigned int h = cast(unsigned int, l); /* seed */
size_t step = 1;
size_t l1;
for (l1=l; l1>=step; l1-=step) /* compute hash */
h = h ^ ((h<<5)+(h>>2)+cast(unsigned char, str[l1-1]));
for (o = G(L)->strt.hash[lmod(h, G(L)->strt.size)];
o != NULL;
o = o->gch.next) {

View File

@ -132,7 +132,6 @@ static int bit_tohex(lua_State *L)
const char *hexdigits = "0123456789abcdef";
char buf[8];
int i;
if (n == INT32_MIN) n = INT32_MIN+1;
if (n < 0) { n = -n; hexdigits = "0123456789ABCDEF"; }
if (n > 8) n = 8;
for (i = (int)n; --i >= 0; ) { buf[i] = hexdigits[b & 15]; b >>= 4; }

View File

@ -10,7 +10,7 @@
#define LUACMSGPACK_NAME "cmsgpack"
#define LUACMSGPACK_SAFE_NAME "cmsgpack_safe"
#define LUACMSGPACK_VERSION "lua-cmsgpack 0.4.0"
#define LUACMSGPACK_COPYRIGHT "Copyright (C) 2012, Redis Ltd."
#define LUACMSGPACK_COPYRIGHT "Copyright (C) 2012, Salvatore Sanfilippo"
#define LUACMSGPACK_DESCRIPTION "MessagePack C implementation for Lua"
/* Allows a preprocessor directive to override MAX_NESTING */
@ -39,7 +39,7 @@
/* =============================================================================
* MessagePack implementation and bindings for Lua 5.1/5.2.
* Copyright(C) 2012 Redis Ltd.
* Copyright(C) 2012 Salvatore Sanfilippo <antirez@gmail.com>
*
* http://github.com/antirez/lua-cmsgpack
*
@ -958,7 +958,7 @@ LUALIB_API int luaopen_cmsgpack_safe(lua_State *L) {
}
/******************************************************************************
* Copyright (C) 2012 Redis Ltd. All rights reserved.
* Copyright (C) 2012 Salvatore Sanfilippo. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the

View File

@ -14,4 +14,46 @@ then
fi
$MAKE -C tests/modules && \
$TCLSH tests/test_helper.tcl --moduleapi "${@}"
$TCLSH tests/test_helper.tcl \
--single unit/moduleapi/commandfilter \
--single unit/moduleapi/basics \
--single unit/moduleapi/fork \
--single unit/moduleapi/testrdb \
--single unit/moduleapi/infotest \
--single unit/moduleapi/moduleconfigs \
--single unit/moduleapi/infra \
--single unit/moduleapi/propagate \
--single unit/moduleapi/hooks \
--single unit/moduleapi/misc \
--single unit/moduleapi/blockonkeys \
--single unit/moduleapi/blockonbackground \
--single unit/moduleapi/scan \
--single unit/moduleapi/datatype \
--single unit/moduleapi/auth \
--single unit/moduleapi/keyspace_events \
--single unit/moduleapi/blockedclient \
--single unit/moduleapi/getkeys \
--single unit/moduleapi/test_lazyfree \
--single unit/moduleapi/defrag \
--single unit/moduleapi/keyspecs \
--single unit/moduleapi/hash \
--single unit/moduleapi/zset \
--single unit/moduleapi/list \
--single unit/moduleapi/stream \
--single unit/moduleapi/mallocsize \
--single unit/moduleapi/datatype2 \
--single unit/moduleapi/cluster \
--single unit/moduleapi/aclcheck \
--single unit/moduleapi/subcommands \
--single unit/moduleapi/reply \
--single unit/moduleapi/cmdintrospection \
--single unit/moduleapi/eventloop \
--single unit/moduleapi/timer \
--single unit/moduleapi/publish \
--single unit/moduleapi/usercall \
--single unit/moduleapi/postnotifications \
--single unit/moduleapi/async_rm_call \
--single unit/moduleapi/moduleauth \
--single unit/moduleapi/rdbloadsave \
--single unit/moduleapi/crash \
"${@}"

358
sentinel.conf Normal file
View File

@ -0,0 +1,358 @@
# Example sentinel.conf
# By default protected mode is disabled in sentinel mode. Sentinel is reachable
# from interfaces different than localhost. Make sure the sentinel instance is
# protected from the outside world via firewalling or other means.
protected-mode no
# port <sentinel-port>
# The port that this sentinel instance will run on
port 26379
# By default Valkey Sentinel does not run as a daemon. Use 'yes' if you need it.
# Note that Valkey will write a pid file in /var/run/valkey-sentinel.pid when
# daemonized.
daemonize no
# When running daemonized, Valkey Sentinel writes a pid file in
# /var/run/valkey-sentinel.pid by default. You can specify a custom pid file
# location here.
pidfile /var/run/valkey-sentinel.pid
# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
# nothing (nothing is logged)
loglevel notice
# Specify the log file name. Also the empty string can be used to force
# Sentinel to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile ""
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
# and optionally update the other syslog parameters to suit your needs.
# syslog-enabled no
# Specify the syslog identity.
# syslog-ident sentinel
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
# syslog-facility local0
# sentinel announce-ip <ip>
# sentinel announce-port <port>
#
# The above two configuration directives are useful in environments where,
# because of NAT, Sentinel is reachable from outside via a non-local address.
#
# When announce-ip is provided, the Sentinel will claim the specified IP address
# in HELLO messages used to gossip its presence, instead of auto-detecting the
# local address as it usually does.
#
# Similarly when announce-port is provided and is valid and non-zero, Sentinel
# will announce the specified TCP port.
#
# The two options don't need to be used together, if only announce-ip is
# provided, the Sentinel will announce the specified IP and the server port
# as specified by the "port" option. If only announce-port is provided, the
# Sentinel will announce the auto-detected local IP and the specified port.
#
# Example:
#
# sentinel announce-ip 1.2.3.4
# dir <working-directory>
# Every long running process should have a well-defined working directory.
# For Valkey Sentinel to chdir to /tmp at startup is the simplest thing
# for the process to don't interfere with administrative tasks such as
# unmounting filesystems.
dir /tmp
# sentinel monitor <master-name> <ip> <valkey-port> <quorum>
#
# Tells Sentinel to monitor this master, and to consider it in O_DOWN
# (Objectively Down) state only if at least <quorum> sentinels agree.
#
# Note that whatever is the ODOWN quorum, a Sentinel will require to
# be elected by the majority of the known Sentinels in order to
# start a failover, so no failover can be performed in minority.
#
# Replicas are auto-discovered, so you don't need to specify replicas in
# any way. Sentinel itself will rewrite this configuration file adding
# the replicas using additional configuration options.
# Also note that the configuration file is rewritten when a
# replica is promoted to master.
#
# Note: master name should not include special characters or spaces.
# The valid charset is A-z 0-9 and the three characters ".-_".
sentinel monitor mymaster 127.0.0.1 6379 2
# sentinel auth-pass <master-name> <password>
#
# Set the password to use to authenticate with the master and replicas.
# Useful if there is a password set in the Valkey instances to monitor.
#
# Note that the master password is also used for replicas, so it is not
# possible to set a different password in masters and replicas instances
# if you want to be able to monitor these instances with Sentinel.
#
# However you can have Valkey instances without the authentication enabled
# mixed with Valkey instances requiring the authentication (as long as the
# password set is the same for all the instances requiring the password) as
# the AUTH command will have no effect in Valkey instances with authentication
# switched off.
#
# Example:
#
# sentinel auth-pass mymaster MySUPER--secret-0123passw0rd
# sentinel auth-user <master-name> <username>
#
# This is useful in order to authenticate to instances having ACL capabilities,
# that is, running Valkey. When just auth-pass is provided the
# Sentinel instance will authenticate to Valkey using the old "AUTH <pass>"
# method. When also an username is provided, it will use "AUTH <user> <pass>".
# In the Valkey servers side, the ACL to provide just minimal access to
# Sentinel instances, should be configured along the following lines:
#
# user sentinel-user >somepassword +client +subscribe +publish \
# +ping +info +multi +slaveof +config +client +exec on
# sentinel down-after-milliseconds <master-name> <milliseconds>
#
# Number of milliseconds the master (or any attached replica or sentinel) should
# be unreachable (as in, not acceptable reply to PING, continuously, for the
# specified period) in order to consider it in S_DOWN state (Subjectively
# Down).
#
# Default is 30 seconds.
sentinel down-after-milliseconds mymaster 30000
# Sentinel's ACL users are defined in the following format:
#
# user <username> ... acl rules ...
#
# For example:
#
# user worker +@admin +@connection ~* on >ffa9203c493aa99
#
# For more information about ACL configuration please refer to the Valkey
# website at https://valkey.io/topics/acl and valkey server configuration
# template valkey.conf.
# ACL LOG
#
# The ACL Log tracks failed commands and authentication events associated
# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked
# by ACLs. The ACL Log is stored in memory. You can reclaim memory with
# ACL LOG RESET. Define the maximum entry length of the ACL Log below.
acllog-max-len 128
# Using an external ACL file
#
# Instead of configuring users here in this file, it is possible to use
# a stand-alone file just listing users. The two methods cannot be mixed:
# if you configure users here and at the same time you activate the external
# ACL file, the server will refuse to start.
#
# The format of the external ACL user file is exactly the same as the
# format that is used inside valkey.conf to describe users.
#
# aclfile /etc/valkey/sentinel-users.acl
# requirepass <password>
#
# You can configure Sentinel itself to require a password, however when doing
# so Sentinel will try to authenticate with the same password to all the
# other Sentinels. So you need to configure all your Sentinels in a given
# group with the same "requirepass" password. Check the following documentation
# for more info: https://valkey.io/topics/sentinel
#
# IMPORTANT NOTE: "requirepass" is a compatibility
# layer on top of the ACL system. The option effect will be just setting
# the password for the default user. Clients will still authenticate using
# AUTH <password> as usually, or more explicitly with AUTH default <password>
# if they follow the new protocol: both will work.
#
# New config files are advised to use separate authentication control for
# incoming connections (via ACL), and for outgoing connections (via
# sentinel-user and sentinel-pass)
#
# The requirepass is not compatible with aclfile option and the ACL LOAD
# command, these will cause requirepass to be ignored.
# sentinel sentinel-user <username>
#
# You can configure Sentinel to authenticate with other Sentinels with specific
# user name.
# sentinel sentinel-pass <password>
#
# The password for Sentinel to authenticate with other Sentinels. If sentinel-user
# is not configured, Sentinel will use 'default' user with sentinel-pass to authenticate.
# sentinel parallel-syncs <master-name> <numreplicas>
#
# How many replicas we can reconfigure to point to the new replica simultaneously
# during the failover. Use a low number if you use the replicas to serve query
# to avoid that all the replicas will be unreachable at about the same
# time while performing the synchronization with the master.
sentinel parallel-syncs mymaster 1
# sentinel failover-timeout <master-name> <milliseconds>
#
# Specifies the failover timeout in milliseconds. It is used in many ways:
#
# - The time needed to re-start a failover after a previous failover was
# already tried against the same master by a given Sentinel, is two
# times the failover timeout.
#
# - The time needed for a replica replicating to a wrong master according
# to a Sentinel current configuration, to be forced to replicate
# with the right master, is exactly the failover timeout (counting since
# the moment a Sentinel detected the misconfiguration).
#
# - The time needed to cancel a failover that is already in progress but
# did not produced any configuration change (SLAVEOF NO ONE yet not
# acknowledged by the promoted replica).
#
# - The maximum time a failover in progress waits for all the replicas to be
# reconfigured as replicas of the new master. However even after this time
# the replicas will be reconfigured by the Sentinels anyway, but not with
# the exact parallel-syncs progression as specified.
#
# Default is 3 minutes.
sentinel failover-timeout mymaster 180000
# SCRIPTS EXECUTION
#
# sentinel notification-script and sentinel reconfig-script are used in order
# to configure scripts that are called to notify the system administrator
# or to reconfigure clients after a failover. The scripts are executed
# with the following rules for error handling:
#
# If script exits with "1" the execution is retried later (up to a maximum
# number of times currently set to 10).
#
# If script exits with "2" (or an higher value) the script execution is
# not retried.
#
# If script terminates because it receives a signal the behavior is the same
# as exit code 1.
#
# A script has a maximum running time of 60 seconds. After this limit is
# reached the script is terminated with a SIGKILL and the execution retried.
# NOTIFICATION SCRIPT
#
# sentinel notification-script <master-name> <script-path>
#
# Call the specified notification script for any sentinel event that is
# generated in the WARNING level (for instance -sdown, -odown, and so forth).
# This script should notify the system administrator via email, SMS, or any
# other messaging system, that there is something wrong with the monitored
# Valkey systems.
#
# The script is called with just two arguments: the first is the event type
# and the second the event description.
#
# The script must exist and be executable in order for sentinel to start if
# this option is provided.
#
# Example:
#
# sentinel notification-script mymaster /var/valkey/notify.sh
# CLIENTS RECONFIGURATION SCRIPT
#
# sentinel client-reconfig-script <master-name> <script-path>
#
# When the master changed because of a failover a script can be called in
# order to perform application-specific tasks to notify the clients that the
# configuration has changed and the master is at a different address.
#
# The following arguments are passed to the script:
#
# <master-name> <role> <state> <from-ip> <from-port> <to-ip> <to-port>
#
# <state> is currently always "start"
# <role> is either "leader" or "observer"
#
# The arguments from-ip, from-port, to-ip, to-port are used to communicate
# the old address of the master and the new address of the elected replica
# (now a master).
#
# This script should be resistant to multiple invocations.
#
# Example:
#
# sentinel client-reconfig-script mymaster /var/valkey/reconfig.sh
# SECURITY
#
# By default SENTINEL SET will not be able to change the notification-script
# and client-reconfig-script at runtime. This avoids a trivial security issue
# where clients can set the script to anything and trigger a failover in order
# to get the program executed.
sentinel deny-scripts-reconfig yes
# VALKEY COMMANDS RENAMING (DEPRECATED)
#
# WARNING: avoid using this option if possible, instead use ACLs.
#
# Sometimes the Valkey server has certain commands, that are needed for Sentinel
# to work correctly, renamed to unguessable strings. This is often the case
# of CONFIG and SLAVEOF in the context of providers that provide Valkey as
# a service, and don't want the customers to reconfigure the instances outside
# of the administration console.
#
# In such case it is possible to tell Sentinel to use different command names
# instead of the normal ones. For example if the master "mymaster", and the
# associated replicas, have "CONFIG" all renamed to "GUESSME", I could use:
#
# SENTINEL rename-command mymaster CONFIG GUESSME
#
# After such configuration is set, every time Sentinel would use CONFIG it will
# use GUESSME instead. Note that there is no actual need to respect the command
# case, so writing "config guessme" is the same in the example above.
#
# SENTINEL SET can also be used in order to perform this configuration at runtime.
#
# In order to set a command back to its original name (undo the renaming), it
# is possible to just rename a command to itself:
#
# SENTINEL rename-command mymaster CONFIG CONFIG
# HOSTNAMES SUPPORT
#
# Normally Sentinel uses only IP addresses and requires SENTINEL MONITOR
# to specify an IP address. Also, it requires the Valkey replica-announce-ip
# keyword to specify only IP addresses.
#
# You may enable hostnames support by enabling resolve-hostnames. Note
# that you must make sure your DNS is configured properly and that DNS
# resolution does not introduce very long delays.
#
SENTINEL resolve-hostnames no
# When resolve-hostnames is enabled, Sentinel still uses IP addresses
# when exposing instances to users, configuration files, etc. If you want
# to retain the hostnames when announced, enable announce-hostnames below.
#
SENTINEL announce-hostnames no
# When primary-reboot-down-after-period is set to 0, Sentinel does not fail over
# when receiving a -LOADING response from a primary. This was the only supported
# behavior before Redis OSS 7.0.
#
# Otherwise, Sentinel will use this value as the time (in ms) it is willing to
# accept a -LOADING response after a primary has been rebooted, before failing
# over.
SENTINEL primary-reboot-down-after-period myprimary 0

View File

@ -2,7 +2,7 @@ BasedOnStyle: LLVM
IndentWidth: 4
TabWidth: 4
UseTab: Never
ColumnLimit: 0
ColumnLimit: 120
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 100
@ -30,4 +30,3 @@ SortIncludes: false
AllowAllParametersOfDeclarationOnNextLine: false
BinPackParameters: false
AlignAfterOpenBracket: Align
InsertNewlineAtEOF: true

View File

@ -1,93 +0,0 @@
project(valkey-server)
set(INSTALL_BIN_PATH ${CMAKE_INSTALL_PREFIX}/bin)
set_directory_properties(PROPERTIES CLEAN_NO_CUSTOM 1)
# Target: valkey-server
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${VALKEY_SERVER_CFLAGS}")
message(STATUS "CFLAGS: ${CMAKE_C_FLAGS}")
get_valkey_server_linker_option(VALKEY_SERVER_LDFLAGS)
list(APPEND SERVER_LIBS "fpconv")
list(APPEND SERVER_LIBS "lualib")
list(APPEND SERVER_LIBS "hdr_histogram")
valkey_build_and_install_bin(valkey-server "${VALKEY_SERVER_SRCS}" "${VALKEY_SERVER_LDFLAGS}" "${SERVER_LIBS}"
"redis-server")
add_dependencies(valkey-server generate_commands_def)
add_dependencies(valkey-server generate_fmtargs_h)
add_dependencies(valkey-server release_header)
if (VALKEY_RELEASE_BUILD)
# Enable LTO for Release build
set_property(TARGET valkey-server PROPERTY INTERPROCEDURAL_OPTIMIZATION TRUE)
endif ()
if (DEBUG_FORCE_DEFRAG)
message(STATUS "Forcing Active Defrag run on valkey-server")
target_compile_definitions(valkey-server PRIVATE DEBUG_FORCE_DEFRAG)
target_compile_definitions(valkey-server PRIVATE HAVE_DEFRAG)
endif ()
if (BUILD_SANITIZER)
# 'BUILD_SANITIZER' is defined in ValkeySetup module (based on user input)
# If defined, the variables 'VALKEY_SANITAIZER_CFLAGS' and 'VALKEY_SANITAIZER_LDFLAGS'
# are set with the link & compile flags required
message(STATUS "Adding sanitizer flags for target valkey-server")
target_compile_options(valkey-server PRIVATE ${VALKEY_SANITAIZER_CFLAGS})
target_link_options(valkey-server PRIVATE ${VALKEY_SANITAIZER_LDFLAGS})
endif ()
unset(BUILD_SANITIZER CACHE)
# Target: valkey-cli
list(APPEND CLI_LIBS "linenoise")
valkey_build_and_install_bin(valkey-cli "${VALKEY_CLI_SRCS}" "${VALKEY_SERVER_LDFLAGS}" "${CLI_LIBS}" "redis-cli")
add_dependencies(valkey-cli generate_commands_def)
add_dependencies(valkey-cli generate_fmtargs_h)
# Target: valkey-benchmark
list(APPEND BENCH_LIBS "hdr_histogram")
valkey_build_and_install_bin(valkey-benchmark "${VALKEY_BENCHMARK_SRCS}" "${VALKEY_SERVER_LDFLAGS}" "${BENCH_LIBS}"
"redis-benchmark")
add_dependencies(valkey-benchmark generate_commands_def)
add_dependencies(valkey-benchmark generate_fmtargs_h)
# Targets: valkey-sentinel, valkey-check-aof and valkey-check-rdb are just symbolic links
valkey_create_symlink("valkey-server" "valkey-sentinel")
valkey_create_symlink("valkey-server" "valkey-check-rdb")
valkey_create_symlink("valkey-server" "valkey-check-aof")
# Target valkey-rdma
if (BUILD_RDMA_MODULE)
set(MODULE_NAME "valkey-rdma")
message(STATUS "Building RDMA module")
add_library(${MODULE_NAME} SHARED "${VALKEY_RDMA_MODULE_SRCS}")
target_compile_options(${MODULE_NAME} PRIVATE -DBUILD_RDMA_MODULE=2 -DUSE_RDMA=1)
target_link_libraries(${MODULE_NAME} "${RDMA_LIBS}")
# remove the "lib" prefix from the module
set_target_properties(${MODULE_NAME} PROPERTIES PREFIX "")
valkey_install_bin(${MODULE_NAME})
endif ()
# Target valkey-tls (a module)
if (BUILD_TLS_MODULE)
message(STATUS "Building TLS as a module")
set(MODULE_NAME "valkey-tls")
add_library(${MODULE_NAME} SHARED ${VALKEY_TLS_MODULE_SRCS})
target_compile_options(${MODULE_NAME} PRIVATE -DUSE_OPENSSL=2 -DBUILD_TLS_MODULE=2)
if (APPLE)
# Some symbols can only be resolved during runtime (they exist in the executable)
target_link_options(${MODULE_NAME} PRIVATE -undefined dynamic_lookup)
endif ()
target_link_libraries(${MODULE_NAME} hiredis_ssl OpenSSL::SSL)
set_target_properties(${MODULE_NAME} PROPERTIES PREFIX "")
endif ()
if (BUILD_EXAMPLE_MODULES)
# Include the modules ("hello*")
message(STATUS "Building example modules")
add_subdirectory(modules)
endif ()
if (BUILD_UNIT_TESTS)
add_subdirectory(unit)
endif ()

View File

@ -1,5 +1,5 @@
# Valkey Makefile
# Copyright (C) 2009 Redis Ltd.
# Copyright (C) 2009 Salvatore Sanfilippo <antirez at gmail dot com>
# This file is released under the BSD license, see the COPYING file
#
# The Makefile composes the final FINAL_CFLAGS and FINAL_LDFLAGS using
@ -25,7 +25,7 @@ ifeq ($(OPTIMIZATION),-O3)
ifeq (clang,$(CLANG))
OPTIMIZATION+=-flto
else
OPTIMIZATION+=-flto=auto -ffat-lto-objects
OPTIMIZATION+=-flto=auto
endif
endif
ifneq ($(OPTIMIZATION),-O0)
@ -98,6 +98,15 @@ ifeq ($(USE_JEMALLOC),no)
MALLOC=libc
endif
# Some unit tests compile files a second time to get access to static functions, the "--allow-multiple-definition" flag
# allows us to do that without an error, by using the first instance of function. This behavior can also be used
# to tweak behavior of code just for unit tests. The version of ld on MacOS apparently always does this.
ifneq ($(uname_S),Darwin)
ALLOW_DUPLICATE_FLAG=-Wl,--allow-multiple-definition
else
ALLOW_DUPLICATE_FLAG=
endif
ifdef SANITIZER
ifeq ($(SANITIZER),address)
MALLOC=libc
@ -130,12 +139,10 @@ ifdef REDIS_LDFLAGS
SERVER_LDFLAGS := $(REDIS_LDFLAGS)
endif
# Special case of forcing defrag to run even though we have no Jemlloc support
ifeq ($(DEBUG_FORCE_DEFRAG), yes)
SERVER_CFLAGS +=-DHAVE_DEFRAG -DDEBUG_FORCE_DEFRAG
endif
FINAL_CFLAGS=$(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(SERVER_CFLAGS)
ifeq ($(SERVER_TEST),yes)
FINAL_CFLAGS +=-DSERVER_TEST=1
endif
FINAL_LDFLAGS=$(LDFLAGS) $(OPT) $(SERVER_LDFLAGS) $(DEBUG)
FINAL_LIBS=-lm
DEBUG=-g -ggdb
@ -330,26 +337,26 @@ ifeq ($(BUILD_TLS),module)
TLS_MODULE_CFLAGS+=-DUSE_OPENSSL=$(BUILD_MODULE) $(OPENSSL_CFLAGS) -DBUILD_TLS_MODULE=$(BUILD_MODULE)
endif
RDMA_LIBS=
RDMA_PKGCONFIG := $(shell $(PKG_CONFIG) --exists librdmacm libibverbs && echo $$?)
ifeq ($(RDMA_PKGCONFIG),0)
RDMA_LIBS=$(shell $(PKG_CONFIG) --libs librdmacm libibverbs)
else
RDMA_LIBS=-lrdmacm -libverbs
endif
ifeq ($(BUILD_RDMA),yes)
FINAL_CFLAGS+=-DUSE_RDMA=$(BUILD_YES) -DBUILD_RDMA_MODULE=$(BUILD_NO)
FINAL_LIBS += $(RDMA_LIBS)
endif
BUILD_RDMA:=no
RDMA_MODULE=
RDMA_MODULE_NAME:=valkey-rdma$(PROG_SUFFIX).so
RDMA_MODULE_CFLAGS:=$(FINAL_CFLAGS)
ifeq ($(BUILD_RDMA),module)
FINAL_CFLAGS+=-DUSE_RDMA=$(BUILD_MODULE)
RDMA_PKGCONFIG := $(shell $(PKG_CONFIG) --exists librdmacm libibverbs && echo $$?)
ifeq ($(RDMA_PKGCONFIG),0)
RDMA_LIBS=$(shell $(PKG_CONFIG) --libs librdmacm libibverbs)
else
RDMA_LIBS=-lrdmacm -libverbs
endif
RDMA_MODULE=$(RDMA_MODULE_NAME)
RDMA_MODULE_CFLAGS+=-DUSE_RDMA=$(BUILD_MODULE) -DBUILD_RDMA_MODULE=$(BUILD_MODULE) $(RDMA_LIBS)
RDMA_MODULE_CFLAGS+=-DUSE_RDMA=$(BUILD_YES) -DBUILD_RDMA_MODULE $(RDMA_LIBS)
else
ifeq ($(BUILD_RDMA),no)
# disable RDMA, do nothing
else
$(error "RDMA is only supported as module (BUILD_RDMA=module), or disabled (BUILD_RDMA=no)")
endif
endif
ifndef V
@ -374,7 +381,7 @@ else
endef
endif
# Determine install/uninstall Redis symlinks for compatibility when
# Determine install/uninstall Redis symlinks for compatibility when
# installing/uninstalling Valkey binaries (defaulting to `yes`)
USE_REDIS_SYMLINKS?=yes
ifeq ($(USE_REDIS_SYMLINKS),yes)
@ -390,9 +397,9 @@ SERVER_AR=$(QUIET_AR)$(AR)
SERVER_LD=$(QUIET_LINK)$(CC) $(FINAL_LDFLAGS)
ENGINE_INSTALL=$(QUIET_INSTALL)$(INSTALL)
CCCOLOR="\033[33m"
LINKCOLOR="\033[36;1m"
SRCCOLOR="\033[32m"
CCCOLOR="\033[34m"
LINKCOLOR="\033[34;1m"
SRCCOLOR="\033[33m"
BINCOLOR="\033[37;1m"
MAKECOLOR="\033[32;1m"
ENDCOLOR="\033[0m"
@ -413,10 +420,10 @@ else
GEN_COMMANDS_FLAGS=
endif
ENGINE_NAME=futriix
ENGINE_NAME=valkey
SERVER_NAME=$(ENGINE_NAME)-server$(PROG_SUFFIX)
ENGINE_SENTINEL_NAME=$(ENGINE_NAME)-sentinel$(PROG_SUFFIX)
ENGINE_SERVER_OBJ=threads_mngr.o adlist.o quicklist.o ae.o anet.o dict.o hashtable.o kvstore.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o memory_prefetch.o io_threads.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o cluster_legacy.o cluster_slot_stats.o crc16.o endianconv.o commandlog.o eval.o bio.o rio.o rand.o memtest.o syscheck.o crcspeed.o crccombine.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o valkey-check-rdb.o valkey-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o allocator_defrag.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o lolwut.o lolwut5.o lolwut6.o acl.o tracking.o socket.o tls.o sha256.o timeout.o setcpuaffinity.o monotonic.o mt19937-64.o resp_parser.o call_reply.o script_lua.o script.o functions.o function_lua.o commands.o strl.o connection.o unix.o logreqres.o rdma.o scripting_engine.o
ENGINE_SERVER_OBJ=threads_mngr.o adlist.o quicklist.o ae.o anet.o dict.o kvstore.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o io_threads.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o cluster_legacy.o cluster_slot_stats.o crc16.o endianconv.o slowlog.o eval.o bio.o rio.o rand.o memtest.o syscheck.o crcspeed.o crccombine.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o valkey-check-rdb.o valkey-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o lolwut.o lolwut5.o lolwut6.o acl.o tracking.o socket.o tls.o sha256.o timeout.o setcpuaffinity.o monotonic.o mt19937-64.o resp_parser.o call_reply.o script_lua.o script.o functions.o function_lua.o commands.o strl.o connection.o unix.o logreqres.o
ENGINE_CLI_NAME=$(ENGINE_NAME)-cli$(PROG_SUFFIX)
ENGINE_CLI_OBJ=anet.o adlist.o dict.o valkey-cli.o zmalloc.o release.o ae.o serverassert.o crcspeed.o crccombine.o crc64.o siphash.o crc16.o monotonic.o cli_common.o mt19937-64.o strl.o cli_commands.o
ENGINE_BENCHMARK_NAME=$(ENGINE_NAME)-benchmark$(PROG_SUFFIX)
@ -429,17 +436,6 @@ ENGINE_TEST_OBJ:=$(sort $(patsubst unit/%.c,unit/%.o,$(ENGINE_TEST_FILES)))
ENGINE_UNIT_TESTS:=$(ENGINE_NAME)-unit-tests$(PROG_SUFFIX)
ALL_SOURCES=$(sort $(patsubst %.o,%.c,$(ENGINE_SERVER_OBJ) $(ENGINE_CLI_OBJ) $(ENGINE_BENCHMARK_OBJ)))
USE_FAST_FLOAT?=no
ifeq ($(USE_FAST_FLOAT),yes)
# valkey_strtod.h uses this flag to switch valkey_strtod function to fast_float_strtod,
# therefore let's pass it to compiler for preprocessing.
FINAL_CFLAGS += -D USE_FAST_FLOAT
# next, let's build and add actual library containing fast_float_strtod function for linking.
DEPENDENCY_TARGETS += fast_float_c_interface
FAST_FLOAT_STRTOD_OBJECT := ../deps/fast_float_c_interface/fast_float_strtod.o
FINAL_LIBS += $(FAST_FLOAT_STRTOD_OBJECT)
endif
all: $(SERVER_NAME) $(ENGINE_SENTINEL_NAME) $(ENGINE_CLI_NAME) $(ENGINE_BENCHMARK_NAME) $(ENGINE_CHECK_RDB_NAME) $(ENGINE_CHECK_AOF_NAME) $(TLS_MODULE) $(RDMA_MODULE)
@echo ""
@echo "Hint: It's a good idea to run 'make test' ;)"
@ -498,7 +494,7 @@ $(ENGINE_LIB_NAME): $(ENGINE_SERVER_OBJ)
# valkey-unit-tests
$(ENGINE_UNIT_TESTS): $(ENGINE_TEST_OBJ) $(ENGINE_LIB_NAME)
$(SERVER_LD) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/lua/src/liblua.a ../deps/hdr_histogram/libhdrhistogram.a ../deps/fpconv/libfpconv.a $(FINAL_LIBS)
$(SERVER_LD) $(ALLOW_DUPLICATE_FLAG) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/lua/src/liblua.a ../deps/hdr_histogram/libhdrhistogram.a ../deps/fpconv/libfpconv.a $(FINAL_LIBS)
# valkey-sentinel
$(ENGINE_SENTINEL_NAME): $(SERVER_NAME)
@ -517,7 +513,7 @@ $(TLS_MODULE_NAME): $(SERVER_NAME)
$(QUIET_CC)$(CC) -o $@ tls.c -shared -fPIC $(TLS_MODULE_CFLAGS) $(TLS_CLIENT_LIBS)
# valkey-rdma.so
$(RDMA_MODULE_NAME): $(SERVER_NAME)
$(RDMA_MODULE_NAME): $(REDIS_SERVER_NAME)
$(QUIET_CC)$(CC) -o $@ rdma.c -shared -fPIC $(RDMA_MODULE_CFLAGS)
# valkey-cli
@ -604,7 +600,7 @@ bench: $(ENGINE_BENCHMARK_NAME)
32bit:
@echo ""
@echo "WARNING: if it fails under Linux you probably need to install libc6-dev-i386 and libstdc++-11-dev-i386-cross"
@echo "WARNING: if it fails under Linux you probably need to install libc6-dev-i386"
@echo ""
$(MAKE) all-with-unit-tests CFLAGS="-m32" LDFLAGS="-m32"

164
src/acl.c
View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Redis Ltd.
* Copyright (c) 2018, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -29,7 +29,6 @@
#include "server.h"
#include "sha256.h"
#include "module.h"
#include <fcntl.h>
#include <ctype.h>
@ -298,6 +297,11 @@ int ACLListMatchSds(void *a, void *b) {
return sdscmp(a, b) == 0;
}
/* Method to free list elements from ACL users password/patterns lists. */
void ACLListFreeSds(void *item) {
sdsfree(item);
}
/* Method to duplicate list elements from ACL users password/patterns lists. */
void *ACLListDupSds(void *item) {
return sdsdup(item);
@ -370,7 +374,7 @@ aclSelector *ACLCreateSelector(int flags) {
listSetFreeMethod(selector->patterns, ACLListFreeKeyPattern);
listSetDupMethod(selector->patterns, ACLListDupKeyPattern);
listSetMatchMethod(selector->channels, ACLListMatchSds);
listSetFreeMethod(selector->channels, sdsfreeVoid);
listSetFreeMethod(selector->channels, ACLListFreeSds);
listSetDupMethod(selector->channels, ACLListDupSds);
memset(selector->allowed_commands, 0, sizeof(selector->allowed_commands));
@ -441,7 +445,7 @@ user *ACLCreateUser(const char *name, size_t namelen) {
u->passwords = listCreate();
u->acl_string = NULL;
listSetMatchMethod(u->passwords, ACLListMatchSds);
listSetFreeMethod(u->passwords, sdsfreeVoid);
listSetFreeMethod(u->passwords, ACLListFreeSds);
listSetDupMethod(u->passwords, ACLListDupSds);
u->selectors = listCreate();
@ -485,11 +489,6 @@ void ACLFreeUser(user *u) {
zfree(u);
}
/* Used for generic free functions. */
static void ACLFreeUserVoid(void *u) {
ACLFreeUser(u);
}
/* When a user is deleted we need to cycle the active
* connections in order to kill all the pending ones that
* are authenticated with such user. */
@ -653,15 +652,14 @@ void ACLChangeSelectorPerm(aclSelector *selector, struct serverCommand *cmd, int
unsigned long id = cmd->id;
ACLSetSelectorCommandBit(selector, id, allow);
ACLResetFirstArgsForCommand(selector, id);
if (cmd->subcommands_ht) {
hashtableIterator iter;
hashtableInitIterator(&iter, cmd->subcommands_ht, HASHTABLE_ITER_SAFE);
void *next;
while (hashtableNext(&iter, &next)) {
struct serverCommand *sub = next;
if (cmd->subcommands_dict) {
dictEntry *de;
dictIterator *di = dictGetSafeIterator(cmd->subcommands_dict);
while ((de = dictNext(di)) != NULL) {
struct serverCommand *sub = (struct serverCommand *)dictGetVal(de);
ACLSetSelectorCommandBit(selector, sub->id, allow);
}
hashtableResetIterator(&iter);
dictReleaseIterator(di);
}
}
@ -671,20 +669,19 @@ void ACLChangeSelectorPerm(aclSelector *selector, struct serverCommand *cmd, int
* value. Since the category passed by the user may be non existing, the
* function returns C_ERR if the category was not found, or C_OK if it was
* found and the operation was performed. */
void ACLSetSelectorCommandBitsForCategory(hashtable *commands, aclSelector *selector, uint64_t cflag, int value) {
hashtableIterator iter;
hashtableInitIterator(&iter, commands, 0);
void *next;
while (hashtableNext(&iter, &next)) {
struct serverCommand *cmd = next;
void ACLSetSelectorCommandBitsForCategory(dict *commands, aclSelector *selector, uint64_t cflag, int value) {
dictIterator *di = dictGetIterator(commands);
dictEntry *de;
while ((de = dictNext(di)) != NULL) {
struct serverCommand *cmd = dictGetVal(de);
if (cmd->acl_categories & cflag) {
ACLChangeSelectorPerm(selector, cmd, value);
}
if (cmd->subcommands_ht) {
ACLSetSelectorCommandBitsForCategory(cmd->subcommands_ht, selector, cflag, value);
if (cmd->subcommands_dict) {
ACLSetSelectorCommandBitsForCategory(cmd->subcommands_dict, selector, cflag, value);
}
}
hashtableResetIterator(&iter);
dictReleaseIterator(di);
}
/* This function is responsible for recomputing the command bits for all selectors of the existing users.
@ -735,27 +732,26 @@ int ACLSetSelectorCategory(aclSelector *selector, const char *category, int allo
return C_OK;
}
void ACLCountCategoryBitsForCommands(hashtable *commands,
void ACLCountCategoryBitsForCommands(dict *commands,
aclSelector *selector,
unsigned long *on,
unsigned long *off,
uint64_t cflag) {
hashtableIterator iter;
hashtableInitIterator(&iter, commands, 0);
void *next;
while (hashtableNext(&iter, &next)) {
struct serverCommand *cmd = next;
dictIterator *di = dictGetIterator(commands);
dictEntry *de;
while ((de = dictNext(di)) != NULL) {
struct serverCommand *cmd = dictGetVal(de);
if (cmd->acl_categories & cflag) {
if (ACLGetSelectorCommandBit(selector, cmd->id))
(*on)++;
else
(*off)++;
}
if (cmd->subcommands_ht) {
ACLCountCategoryBitsForCommands(cmd->subcommands_ht, selector, on, off, cflag);
if (cmd->subcommands_dict) {
ACLCountCategoryBitsForCommands(cmd->subcommands_dict, selector, on, off, cflag);
}
}
hashtableResetIterator(&iter);
dictReleaseIterator(di);
}
/* Return the number of commands allowed (on) and denied (off) for the user 'u'
@ -1078,7 +1074,6 @@ int ACLSetSelector(aclSelector *selector, const char *op, size_t oplen) {
int flags = 0;
size_t offset = 1;
if (op[0] == '%') {
int perm_ok = 1;
for (; offset < oplen; offset++) {
if (toupper(op[offset]) == 'R' && !(flags & ACL_READ_PERMISSION)) {
flags |= ACL_READ_PERMISSION;
@ -1088,14 +1083,10 @@ int ACLSetSelector(aclSelector *selector, const char *op, size_t oplen) {
offset++;
break;
} else {
perm_ok = 0;
break;
errno = EINVAL;
return C_ERR;
}
}
if (!flags || !perm_ok) {
errno = EINVAL;
return C_ERR;
}
} else {
flags = ACL_ALL_PERMISSION;
}
@ -1172,7 +1163,7 @@ int ACLSetSelector(aclSelector *selector, const char *op, size_t oplen) {
return C_ERR;
}
if (cmd->subcommands_ht) {
if (cmd->subcommands_dict) {
/* If user is trying to allow a valid subcommand we can just add its unique ID */
cmd = ACLLookupCommand(op + 1);
if (cmd == NULL) {
@ -1960,7 +1951,7 @@ int ACLShouldKillPubsubClient(client *c, list *upcoming) {
if (getClientType(c) == CLIENT_TYPE_PUBSUB) {
/* Check for pattern violations. */
dictIterator *di = dictGetIterator(c->pubsub_data->pubsub_patterns);
dictIterator *di = dictGetIterator(c->pubsub_patterns);
dictEntry *de;
while (!kill && ((de = dictNext(di)) != NULL)) {
o = dictGetKey(de);
@ -1972,7 +1963,7 @@ int ACLShouldKillPubsubClient(client *c, list *upcoming) {
/* Check for channel violations. */
if (!kill) {
/* Check for global channels violation. */
di = dictGetIterator(c->pubsub_data->pubsub_channels);
di = dictGetIterator(c->pubsub_channels);
while (!kill && ((de = dictNext(di)) != NULL)) {
o = dictGetKey(de);
@ -1983,7 +1974,7 @@ int ACLShouldKillPubsubClient(client *c, list *upcoming) {
}
if (!kill) {
/* Check for shard channels violation. */
di = dictGetIterator(c->pubsub_data->pubsubshard_channels);
di = dictGetIterator(c->pubsubshard_channels);
while (!kill && ((de = dictNext(di)) != NULL)) {
o = dictGetKey(de);
int res = ACLCheckChannelAgainstList(upcoming, o->ptr, sdslen(o->ptr), 0);
@ -2451,12 +2442,12 @@ sds ACLLoadFromFile(const char *filename) {
c->user = new_user;
}
if (user_channels) raxFreeWithCallback(user_channels, listReleaseVoid);
raxFreeWithCallback(old_users, ACLFreeUserVoid);
if (user_channels) raxFreeWithCallback(user_channels, (void (*)(void *))listRelease);
raxFreeWithCallback(old_users, (void (*)(void *))ACLFreeUser);
sdsfree(errors);
return NULL;
} else {
raxFreeWithCallback(Users, ACLFreeUserVoid);
raxFreeWithCallback(Users, (void (*)(void *))ACLFreeUser);
Users = old_users;
errors =
sdscat(errors, "WARNING: ACL errors detected, no change to the previously active ACL rules was performed");
@ -2687,7 +2678,7 @@ void addACLLogEntry(client *c, int reason, int context, int argpos, sds username
/* if we have a real client from the network, use it (could be missing on module timers) */
client *realclient = server.current_client ? server.current_client : c;
le->cinfo = catClientInfoString(sdsempty(), realclient, 0);
le->cinfo = catClientInfoString(sdsempty(), realclient);
le->context = context;
/* Try to match this entry with past ones, to see if we can just
@ -2763,22 +2754,23 @@ sds getAclErrorMessage(int acl_res, user *user, struct serverCommand *cmd, sds e
* ==========================================================================*/
/* ACL CAT category */
void aclCatWithFlags(client *c, hashtable *commands, uint64_t cflag, int *arraylen) {
hashtableIterator iter;
hashtableInitIterator(&iter, commands, 0);
void *next;
while (hashtableNext(&iter, &next)) {
struct serverCommand *cmd = next;
void aclCatWithFlags(client *c, dict *commands, uint64_t cflag, int *arraylen) {
dictEntry *de;
dictIterator *di = dictGetIterator(commands);
while ((de = dictNext(di)) != NULL) {
struct serverCommand *cmd = dictGetVal(de);
if (cmd->flags & CMD_MODULE) continue;
if (cmd->acl_categories & cflag) {
addReplyBulkCBuffer(c, cmd->fullname, sdslen(cmd->fullname));
(*arraylen)++;
}
if (cmd->subcommands_ht) {
aclCatWithFlags(c, cmd->subcommands_ht, cflag, arraylen);
if (cmd->subcommands_dict) {
aclCatWithFlags(c, cmd->subcommands_dict, cflag, arraylen);
}
}
hashtableResetIterator(&iter);
dictReleaseIterator(di);
}
/* Add the formatted response from a single selector to the ACL GETUSER
@ -3125,35 +3117,37 @@ void aclCommand(client *c) {
addReply(c, shared.ok);
} else if (c->argc == 2 && !strcasecmp(sub, "help")) {
/* clang-format off */
const char *help[] = {
"CAT [<category>]",
" List all commands that belong to <category>, or all command categories",
" when no category is specified.",
"DELUSER <username> [<username> ...]",
" Delete a list of users.",
"DRYRUN <username> <command> [<arg> ...]",
" Returns whether the user can execute the given command without executing the command.",
"GETUSER <username>",
" Get the user's details.",
"GENPASS [<bits>]",
" Generate a secure 256-bit user password. The optional `bits` argument can",
" be used to specify a different size.",
"LIST",
" Show users details in config file format.",
"LOAD",
" Reload users from the ACL file.",
"LOG [<count> | RESET]",
" Show the ACL log entries.",
"SAVE",
" Save the current config to the ACL file.",
"SETUSER <username> <attribute> [<attribute> ...]",
" Create or modify a user with the specified attributes.",
"USERS",
" List all the registered usernames.",
"WHOAMI",
" Return the current connection username.",
NULL,
"CAT [<category>]",
" List all commands that belong to <category>, or all command categories",
" when no category is specified.",
"DELUSER <username> [<username> ...]",
" Delete a list of users.",
"DRYRUN <username> <command> [<arg> ...]",
" Returns whether the user can execute the given command without executing the command.",
"GETUSER <username>",
" Get the user's details.",
"GENPASS [<bits>]",
" Generate a secure 256-bit user password. The optional `bits` argument can",
" be used to specify a different size.",
"LIST",
" Show users details in config file format.",
"LOAD",
" Reload users from the ACL file.",
"LOG [<count> | RESET]",
" Show the ACL log entries.",
"SAVE",
" Save the current config to the ACL file.",
"SETUSER <username> <attribute> [<attribute> ...]",
" Create or modify a user with the specified attributes.",
"USERS",
" List all the registered usernames.",
"WHOAMI",
" Return the current connection username.",
NULL
};
/* clang-format on */
addReplyHelp(c, help);
} else {
addReplySubcommandSyntaxError(c);

View File

@ -1,6 +1,6 @@
/* adlist.c - A generic doubly linked list implementation
*
* Copyright (c) 2006-2010, Redis Ltd.
* Copyright (c) 2006-2010, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -77,12 +77,6 @@ void listRelease(list *list) {
zfree(list);
}
/* Just like listRelease, but takes the list as a (void *).
* Useful as generic free callback. */
void listReleaseVoid(void *l) {
listRelease((list *)l);
}
/* Add a new node to the list, to head, containing the specified 'value'
* pointer as value.
*

View File

@ -1,6 +1,6 @@
/* adlist.h - A generic doubly linked list implementation
*
* Copyright (c) 2006-2012, Redis Ltd.
* Copyright (c) 2006-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -72,7 +72,6 @@ typedef struct list {
/* Prototypes */
list *listCreate(void);
void listRelease(list *list);
void listReleaseVoid(void *list);
void listEmpty(list *list);
list *listAddNodeHead(list *list, void *value);
list *listAddNodeTail(list *list, void *value);

View File

@ -2,7 +2,7 @@
* for the Jim's event-loop (Jim is a Tcl interpreter) but later translated
* it in form of a library for easy reuse.
*
* Copyright (c) 2006-2010, Redis Ltd.
* Copyright (c) 2006-2010, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -63,14 +63,14 @@
#endif
#endif
#define AE_LOCK(eventLoop) \
if ((eventLoop)->flags & AE_PROTECT_POLL) { \
assert(pthread_mutex_lock(&(eventLoop)->poll_mutex) == 0); \
#define AE_LOCK(eventLoop) \
if ((eventLoop)->flags & AE_PROTECT_POLL) { \
assert(pthread_mutex_lock(&(eventLoop)->poll_mutex) == 0); \
}
#define AE_UNLOCK(eventLoop) \
if ((eventLoop)->flags & AE_PROTECT_POLL) { \
assert(pthread_mutex_unlock(&(eventLoop)->poll_mutex) == 0); \
#define AE_UNLOCK(eventLoop) \
if ((eventLoop)->flags & AE_PROTECT_POLL) { \
assert(pthread_mutex_unlock(&(eventLoop)->poll_mutex) == 0); \
}
aeEventLoop *aeCreateEventLoop(int setsize) {
@ -85,7 +85,7 @@ aeEventLoop *aeCreateEventLoop(int setsize) {
if (eventLoop->events == NULL || eventLoop->fired == NULL) goto err;
eventLoop->setsize = setsize;
eventLoop->timeEventHead = NULL;
eventLoop->timeEventNextId = 1;
eventLoop->timeEventNextId = 0;
eventLoop->stop = 0;
eventLoop->maxfd = -1;
eventLoop->beforesleep = NULL;
@ -363,7 +363,7 @@ static int processTimeEvents(aeEventLoop *eventLoop) {
}
if (te->when <= now) {
long long retval;
int retval;
id = te->id;
te->refcount++;

View File

@ -2,7 +2,7 @@
* for the Jim's event-loop (Jim is a Tcl interpreter) but later translated
* it in form of a library for easy reuse.
*
* Copyright (c) 2006-2012, Redis Ltd.
* Copyright (c) 2006-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -42,11 +42,12 @@
#define AE_NONE 0 /* No events registered. */
#define AE_READABLE 1 /* Fire when descriptor is readable. */
#define AE_WRITABLE 2 /* Fire when descriptor is writable. */
#define AE_BARRIER 4 /* With WRITABLE, never fire the event if the \
READABLE event already fired in the same event \
loop iteration. Useful when you want to persist \
things to disk before sending replies, and want \
to do that in a group fashion. */
#define AE_BARRIER \
4 /* With WRITABLE, never fire the event if the \
READABLE event already fired in the same event \
loop iteration. Useful when you want to persist \
things to disk before sending replies, and want \
to do that in a group fashion. */
#define AE_FILE_EVENTS (1 << 0)
#define AE_TIME_EVENTS (1 << 1)
@ -67,7 +68,7 @@ struct aeEventLoop;
/* Types and data structures */
typedef void aeFileProc(struct aeEventLoop *eventLoop, int fd, void *clientData, int mask);
typedef long long aeTimeProc(struct aeEventLoop *eventLoop, long long id, void *clientData);
typedef int aeTimeProc(struct aeEventLoop *eventLoop, long long id, void *clientData);
typedef void aeEventFinalizerProc(struct aeEventLoop *eventLoop, void *clientData);
typedef void aeBeforeSleepProc(struct aeEventLoop *eventLoop);
typedef void aeAfterSleepProc(struct aeEventLoop *eventLoop, int numevents);

View File

@ -1,6 +1,6 @@
/* Linux epoll(2) based ae.c module
*
* Copyright (c) 2009-2012, Redis Ltd.
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,6 +1,6 @@
/* Select()-based ae.c module.
*
* Copyright (c) 2009-2012, Redis Ltd.
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,477 +0,0 @@
/* Copyright 2024- Valkey contributors
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* This file implements allocator-specific defragmentation logic used
* within the Valkey engine. Below is the relationship between various
* components involved in allocation and defragmentation:
*
* Application code
* / \
* allocation / \ defrag
* / \
* zmalloc allocator_defrag
* / | \ / \
* / | \ / \
* / | \ / \
* libc tcmalloc jemalloc other
*
* Explanation:
* - **Application code**: High-level application logic that uses memory
* allocation and may trigger defragmentation.
* - **zmalloc**: An abstraction layer over the memory allocator, providing
* a uniform allocation interface to the application code. It can delegate
* to various underlying allocators (e.g., libc, tcmalloc, jemalloc, or others).
* It is not dependant on defrag implementation logic and it's possible to use jemalloc
* version that does not support defrag.
* - **allocator_defrag**: This file contains allocator-specific logic for
* defragmentation, invoked from `defrag.c` when memory defragmentation is needed.
* currently jemalloc is the only allocator with implemented defrag logic. It is possible that
* future implementation will include non-allocator defragmentation (think of data-structure
* compaction for example).
* - **Underlying allocators**: These are the actual memory allocators, such as
* libc, tcmalloc, jemalloc, or other custom allocators. The defragmentation
* logic in `allocator_defrag` interacts with these allocators to reorganize
* memory and reduce fragmentation.
*
* The `defrag.c` file acts as the central entry point for defragmentation,
* invoking allocator-specific implementations provided here in `allocator_defrag.c`.
*
* Note: Developers working on `zmalloc` or `allocator_defrag` should refer to
* the other component to ensure both are using the same allocator configuration.
*/
#include "server.h"
#include "serverassert.h"
#include "allocator_defrag.h"
#if defined(HAVE_DEFRAG) && defined(USE_JEMALLOC)
#define STRINGIFY_(x) #x
#define STRINGIFY(x) STRINGIFY_(x)
#define BATCH_QUERY_ARGS_OUT 3
#define SLAB_NFREE(out, i) out[(i) * BATCH_QUERY_ARGS_OUT]
#define SLAB_LEN(out, i) out[(i) * BATCH_QUERY_ARGS_OUT + 2]
#define SLAB_NUM_REGS(out, i) out[(i) * BATCH_QUERY_ARGS_OUT + 1]
#define UTILIZATION_THRESHOLD_FACTOR_MILI (125) // 12.5% additional utilization
/*
* Represents a precomputed key for querying jemalloc statistics.
*
* The `jeMallctlKey` structure stores a key corresponding to a specific jemalloc
* statistics field name. This key is used with the `je_mallctlbymib` interface
* to query statistics more efficiently, bypassing the need for runtime string
* lookup and translation performed by `je_mallctl`.
*
* - `je_mallctlnametomib` is called once for each statistics field to precompute
* and store the key corresponding to the field name.
* - Subsequent queries use `je_mallctlbymib` with the stored key, avoiding the
* overhead of repeated string-based lookups.
*
*/
typedef struct jeMallctlKey {
size_t key[6]; /* The precomputed key used to query jemalloc statistics. */
size_t keylen; /* The length of the key array. */
} jeMallctlKey;
/* Stores MIB (Management Information Base) keys for jemalloc bin queries.
*
* This struct holds precomputed `jeMallctlKey` values for querying various
* jemalloc bin-related statistics efficiently.
*/
typedef struct jeBinInfoKeys {
jeMallctlKey curr_slabs; /* Key to query the current number of slabs in the bin. */
jeMallctlKey nonfull_slabs; /* Key to query the number of non-full slabs in the bin. */
jeMallctlKey curr_regs; /* Key to query the current number of regions in the bin. */
} jeBinInfoKeys;
/* Represents detailed information about a jemalloc bin.
*
* This struct provides metadata about a jemalloc bin, including the size of
* its regions, total number of regions, and related MIB keys for efficient
* queries.
*/
typedef struct jeBinInfo {
size_t reg_size; /* Size of each region in the bin. */
uint32_t nregs; /* Total number of regions in the bin. */
jeBinInfoKeys info_keys; /* Precomputed MIB keys for querying bin statistics. */
} jeBinInfo;
/* Represents the configuration for jemalloc bins.
*
* This struct contains information about the number of bins and metadata for
* each bin, as well as precomputed keys for batch utility queries and epoch updates.
*/
typedef struct jemallocCB {
unsigned nbins; /* Number of bins in the jemalloc configuration. */
jeBinInfo *bin_info; /* Array of `jeBinInfo` structs, one for each bin. */
jeMallctlKey util_batch_query; /* Key to query batch utilization information. */
jeMallctlKey epoch; /* Key to trigger statistics sync between threads. */
} jemallocCB;
/* Represents the latest usage statistics for a jemalloc bin.
*
* This struct tracks the current usage of a bin, including the number of slabs
* and regions, and calculates the number of full slabs from other fields.
*/
typedef struct jemallocBinUsageData {
size_t curr_slabs; /* Current number of slabs in the bin. */
size_t curr_nonfull_slabs; /* Current number of non-full slabs in the bin. */
size_t curr_regs; /* Current number of regions in the bin. */
} jemallocBinUsageData;
static int defrag_supported = 0;
/* Control block holding information about bins and query helper -
* this structure is initialized once when calling allocatorDefragInit. It does not change afterwards*/
static jemallocCB je_cb = {0, NULL, {{0}, 0}, {{0}, 0}};
/* Holds the latest usage statistics for each bin. This structure is updated when calling
* allocatorDefragGetFragSmallbins and later is used to make a defrag decision for a memory pointer. */
static jemallocBinUsageData *je_usage_info = NULL;
/* -----------------------------------------------------------------------------
* Alloc/Free API that are cooperative with defrag
* -------------------------------------------------------------------------- */
/* Allocation and free functions that bypass the thread cache
* and go straight to the allocator arena bins.
* Currently implemented only for jemalloc. Used for online defragmentation.
*/
void *allocatorDefragAlloc(size_t size) {
void *ptr = je_mallocx(size, MALLOCX_TCACHE_NONE);
return ptr;
}
void allocatorDefragFree(void *ptr, size_t size) {
if (ptr == NULL) return;
je_sdallocx(ptr, size, MALLOCX_TCACHE_NONE);
}
/* -----------------------------------------------------------------------------
* Helper functions for jemalloc translation between size and index
* -------------------------------------------------------------------------- */
/* Get the bin index in bin array from the reg_size.
*
* these are reverse engineered mapping of reg_size -> binind. We need this information because the utilization query
* returns the size of the buffer and not the bin index, and we need the bin index to access it's usage information
*
* Note: In case future PR will return the binind (that is better API anyway) we can get rid of
* these conversion functions
*/
static inline unsigned jeSize2BinIndexLgQ3(size_t sz) {
/* Smallest power-of-2 quantum for binning */
const size_t size_class_group_size = 4;
/* Number of bins in each power-of-2 size class group */
const size_t lg_quantum_3_first_pow2 = 3;
/* Offset for exponential bins */
const size_t lg_quantum_3_offset = ((64 >> lg_quantum_3_first_pow2) - 1);
/* Small sizes (8-64 bytes) use linear binning */
if (sz <= 64) { // 64 = 1 << (lg_quantum_3_first_pow2 + 3)
return (sz >> 3) - 1; // Divide by 8 and subtract 1
}
/* For larger sizes, use exponential binning */
/* Calculate leading zeros of (sz - 1) to properly handle power-of-2 sizes */
unsigned leading_zeros = __builtin_clzll(sz - 1);
unsigned exp = 64 - leading_zeros; // Effective log2(sz)
/* Calculate the size's position within its group */
unsigned within_group_offset = size_class_group_size -
(((1ULL << exp) - sz) >> (exp - lg_quantum_3_first_pow2));
/* Calculate the final bin index */
return within_group_offset +
((exp - (lg_quantum_3_first_pow2 + 3)) - 1) * size_class_group_size +
lg_quantum_3_offset;
}
/* -----------------------------------------------------------------------------
* Interface functions to get fragmentation info from jemalloc
* -------------------------------------------------------------------------- */
#define ARENA_TO_QUERY MALLCTL_ARENAS_ALL
static inline void jeRefreshStats(const jemallocCB *je_cb) {
uint64_t epoch = 1; // Value doesn't matter
size_t sz = sizeof(epoch);
/* Refresh stats */
je_mallctlbymib(je_cb->epoch.key, je_cb->epoch.keylen, &epoch, &sz, &epoch, sz);
}
/* Extract key that corresponds to the given name for fast query. This should be called once for each key_name */
static inline int jeQueryKeyInit(const char *key_name, jeMallctlKey *key_info) {
key_info->keylen = sizeof(key_info->key) / sizeof(key_info->key[0]);
int res = je_mallctlnametomib(key_name, key_info->key, &key_info->keylen);
/* sanity check that returned value is not larger than provided */
assert(key_info->keylen <= sizeof(key_info->key) / sizeof(key_info->key[0]));
return res;
}
/* Query jemalloc control interface using previously extracted key (with jeQueryKeyInit) instead of name string.
* This interface (named MIB in jemalloc) is faster as it avoids string dict lookup at run-time. */
static inline int jeQueryCtlInterface(const jeMallctlKey *key_info, void *value) {
size_t sz = sizeof(size_t);
return je_mallctlbymib(key_info->key, key_info->keylen, value, &sz, NULL, 0);
}
static inline int binQueryHelperInitialization(jeBinInfoKeys *helper, unsigned bin_index) {
char mallctl_name[128];
/* Mib of fetch number of used regions in the bin */
snprintf(mallctl_name, sizeof(mallctl_name), "stats.arenas." STRINGIFY(ARENA_TO_QUERY) ".bins.%d.curregs", bin_index);
if (jeQueryKeyInit(mallctl_name, &helper->curr_regs) != 0) return -1;
/* Mib of fetch number of current slabs in the bin */
snprintf(mallctl_name, sizeof(mallctl_name), "stats.arenas." STRINGIFY(ARENA_TO_QUERY) ".bins.%d.curslabs", bin_index);
if (jeQueryKeyInit(mallctl_name, &helper->curr_slabs) != 0) return -1;
/* Mib of fetch nonfull slabs */
snprintf(mallctl_name, sizeof(mallctl_name), "stats.arenas." STRINGIFY(ARENA_TO_QUERY) ".bins.%d.nonfull_slabs", bin_index);
if (jeQueryKeyInit(mallctl_name, &helper->nonfull_slabs) != 0) return -1;
return 0;
}
/* Initializes the defragmentation system for the jemalloc memory allocator.
*
* This function performs the necessary setup and initialization steps for the defragmentation system.
* It retrieves the configuration information for the jemalloc arenas and bins, and initializes the usage
* statistics data structure.
*
* return 0 on success, or a non-zero error code on failure.
*
* The initialization process involves the following steps:
* 1. Check if defragmentation is supported by the current jemalloc version.
* 2. Retrieve the arena bin configuration information using the `je_mallctlbymib` function.
* 3. Initialize the `usage_latest` structure with the bin usage statistics and configuration data.
* 4. Set the `defrag_supported` flag to indicate that defragmentation is enabled.
*
* Note: This function must be called before using any other defragmentation-related functionality.
* It should be called during the initialization phase of the code that uses the
* defragmentation feature.
*/
int allocatorDefragInit(void) {
char mallctl_name[100];
jeBinInfo *bin_info;
size_t sz;
int je_res;
/* the init should be called only once, fail if unexpected call */
assert(!defrag_supported);
/* Get the mib of the per memory pointers query command that is used during defrag scan over memory */
if (jeQueryKeyInit("experimental.utilization.batch_query", &je_cb.util_batch_query) != 0) return -1;
je_res = jeQueryKeyInit("epoch", &je_cb.epoch);
assert(je_res == 0);
jeRefreshStats(&je_cb);
/* get quantum for verification only, current code assumes lg-quantum should be 3 */
size_t jemalloc_quantum;
sz = sizeof(jemalloc_quantum);
je_mallctl("arenas.quantum", &jemalloc_quantum, &sz, NULL, 0);
/* lg-quantum should be 3 so jemalloc_quantum should be 1<<3 */
assert(jemalloc_quantum == 8);
sz = sizeof(je_cb.nbins);
je_res = je_mallctl("arenas.nbins", &je_cb.nbins, &sz, NULL, 0);
assert(je_res == 0 && je_cb.nbins != 0);
je_cb.bin_info = je_calloc(je_cb.nbins, sizeof(jeBinInfo));
assert(je_cb.bin_info != NULL);
je_usage_info = je_calloc(je_cb.nbins, sizeof(jemallocBinUsageData));
assert(je_usage_info != NULL);
for (unsigned j = 0; j < je_cb.nbins; j++) {
bin_info = &je_cb.bin_info[j];
/* The size of the current bin */
snprintf(mallctl_name, sizeof(mallctl_name), "arenas.bin.%d.size", j);
sz = sizeof(bin_info->reg_size);
je_res = je_mallctl(mallctl_name, &bin_info->reg_size, &sz, NULL, 0);
assert(je_res == 0);
/* Number of regions per slab */
snprintf(mallctl_name, sizeof(mallctl_name), "arenas.bin.%d.nregs", j);
sz = sizeof(bin_info->nregs);
je_res = je_mallctl(mallctl_name, &bin_info->nregs, &sz, NULL, 0);
assert(je_res == 0);
/* init bin specific fast query keys */
je_res = binQueryHelperInitialization(&bin_info->info_keys, j);
assert(je_res == 0);
/* verify the reverse map of reg_size to bin index */
assert(jeSize2BinIndexLgQ3(bin_info->reg_size) == j);
}
/* defrag is supported mark it to enable defrag queries */
defrag_supported = 1;
return 0;
}
/* Total size of consumed meomry in unused regs in small bins (AKA external fragmentation).
* The function will refresh the epoch.
*
* return total fragmentation bytes
*/
unsigned long allocatorDefragGetFragSmallbins(void) {
assert(defrag_supported);
unsigned long frag = 0;
jeRefreshStats(&je_cb);
for (unsigned j = 0; j < je_cb.nbins; j++) {
jeBinInfo *bin_info = &je_cb.bin_info[j];
jemallocBinUsageData *bin_usage = &je_usage_info[j];
/* Number of current slabs in the bin */
jeQueryCtlInterface(&bin_info->info_keys.curr_regs, &bin_usage->curr_regs);
/* Number of current slabs in the bin */
jeQueryCtlInterface(&bin_info->info_keys.curr_slabs, &bin_usage->curr_slabs);
/* Number of non full slabs in the bin */
jeQueryCtlInterface(&bin_info->info_keys.nonfull_slabs, &bin_usage->curr_nonfull_slabs);
/* Calculate the fragmentation bytes for the current bin and add it to the total. */
frag += ((bin_info->nregs * bin_usage->curr_slabs) - bin_usage->curr_regs) * bin_info->reg_size;
}
return frag;
}
/* Determines whether defragmentation should be performed on a pointer based on jemalloc information.
*
* bin_info Pointer to the bin information structure.
* bin_usage Pointer to the bin usage structure.
* nalloced Number of allocated regions in the bin.
*
* return 1 if defragmentation should be performed, 0 otherwise.
*
* This function checks the following conditions to determine if defragmentation should be performed:
* 1. If the number of allocated regions (nalloced) is equal to the total number of regions (bin_info->nregs),
* defragmentation is not necessary as moving regions is guaranteed not to change the fragmentation ratio.
* 2. If the number of non-full slabs (bin_usage->curr_nonfull_slabs) is less than 2, defragmentation is not performed
* because there is no other slab to move regions to.
* 3. If slab utilization < 'avg utilization'*1.125 [code 1.125 == (1000+UTILIZATION_THRESHOLD_FACTOR_MILI)/1000]
* than we should defrag. This is aligned with previous je_defrag_hint implementation.
*/
static inline int makeDefragDecision(jeBinInfo *bin_info, jemallocBinUsageData *bin_usage, unsigned long nalloced) {
unsigned long curr_full_slabs = bin_usage->curr_slabs - bin_usage->curr_nonfull_slabs;
size_t allocated_nonfull = bin_usage->curr_regs - curr_full_slabs * bin_info->nregs;
if (bin_info->nregs == nalloced || bin_usage->curr_nonfull_slabs < 2 ||
1000 * nalloced * bin_usage->curr_nonfull_slabs > (1000 + UTILIZATION_THRESHOLD_FACTOR_MILI) * allocated_nonfull) {
return 0;
}
return 1;
}
/*
* Performs defragmentation analysis for a given ptr.
*
* ptr - ptr to memory region to be analyzed.
*
* return - the function returns 1 if defrag should be performed, 0 otherwise.
*/
int allocatorShouldDefrag(void *ptr) {
assert(defrag_supported);
size_t out[BATCH_QUERY_ARGS_OUT];
size_t out_sz = sizeof(out);
size_t in_sz = sizeof(ptr);
for (unsigned j = 0; j < BATCH_QUERY_ARGS_OUT; j++) {
out[j] = -1;
}
je_mallctlbymib(je_cb.util_batch_query.key,
je_cb.util_batch_query.keylen,
out, &out_sz,
&ptr, in_sz);
/* handle results with appropriate quantum value */
assert(SLAB_NUM_REGS(out, 0) > 0);
assert(SLAB_LEN(out, 0) > 0);
assert(SLAB_NFREE(out, 0) != (size_t)-1);
unsigned region_size = SLAB_LEN(out, 0) / SLAB_NUM_REGS(out, 0);
/* check that the allocation size is in range of small bins */
if (region_size > je_cb.bin_info[je_cb.nbins - 1].reg_size) {
return 0;
}
/* get the index based on quantum used */
unsigned binind = jeSize2BinIndexLgQ3(region_size);
/* make sure binind is in range and reverse map is correct */
assert(binind < je_cb.nbins && region_size == je_cb.bin_info[binind].reg_size);
return makeDefragDecision(&je_cb.bin_info[binind],
&je_usage_info[binind],
je_cb.bin_info[binind].nregs - SLAB_NFREE(out, 0));
}
/* Utility function to get the fragmentation ratio from jemalloc.
* It is critical to do that by comparing only heap maps that belong to
* jemalloc, and skip ones the jemalloc keeps as spare. Since we use this
* fragmentation ratio in order to decide if a defrag action should be taken
* or not, a false detection can cause the defragmenter to waste a lot of CPU
* without the possibility of getting any results. */
float getAllocatorFragmentation(size_t *out_frag_bytes) {
size_t resident, active, allocated, frag_smallbins_bytes;
zmalloc_get_allocator_info(&allocated, &active, &resident, NULL, NULL);
frag_smallbins_bytes = allocatorDefragGetFragSmallbins();
/* Calculate the fragmentation ratio as the proportion of wasted memory in small
* bins (which are defraggable) relative to the total allocated memory (including large bins).
* This is because otherwise, if most of the memory usage is large bins, we may show high percentage,
* despite the fact it's not a lot of memory for the user. */
float frag_pct = (float)frag_smallbins_bytes / allocated * 100;
float rss_pct = ((float)resident / allocated) * 100 - 100;
size_t rss_bytes = resident - allocated;
if (out_frag_bytes) *out_frag_bytes = frag_smallbins_bytes;
serverLog(LL_DEBUG, "allocated=%zu, active=%zu, resident=%zu, frag=%.2f%% (%.2f%% rss), frag_bytes=%zu (%zu rss)",
allocated, active, resident, frag_pct, rss_pct, frag_smallbins_bytes, rss_bytes);
return frag_pct;
}
#elif defined(DEBUG_FORCE_DEFRAG)
int allocatorDefragInit(void) {
return 0;
}
void allocatorDefragFree(void *ptr, size_t size) {
UNUSED(size);
zfree(ptr);
}
__attribute__((malloc)) void *allocatorDefragAlloc(size_t size) {
return zmalloc(size);
return NULL;
}
unsigned long allocatorDefragGetFragSmallbins(void) {
return 0;
}
int allocatorShouldDefrag(void *ptr) {
UNUSED(ptr);
return 1;
}
float getAllocatorFragmentation(size_t *out_frag_bytes) {
*out_frag_bytes = server.active_defrag_ignore_bytes + 1;
return server.active_defrag_threshold_upper;
}
#else
int allocatorDefragInit(void) {
return -1;
}
void allocatorDefragFree(void *ptr, size_t size) {
UNUSED(ptr);
UNUSED(size);
}
__attribute__((malloc)) void *allocatorDefragAlloc(size_t size) {
UNUSED(size);
return NULL;
}
unsigned long allocatorDefragGetFragSmallbins(void) {
return 0;
}
int allocatorShouldDefrag(void *ptr) {
UNUSED(ptr);
return 0;
}
float getAllocatorFragmentation(size_t *out_frag_bytes) {
UNUSED(out_frag_bytes);
return 0;
}
#endif

View File

@ -1,24 +0,0 @@
#ifndef __ALLOCATOR_DEFRAG_H
#define __ALLOCATOR_DEFRAG_H
#if defined(USE_JEMALLOC)
#include <jemalloc/jemalloc.h>
/* We can enable the server defrag capabilities only if we are using Jemalloc
* and the version that has the experimental.utilization namespace in mallctl . */
#if (defined(JEMALLOC_VERSION_MAJOR) && \
(JEMALLOC_VERSION_MAJOR > 5 || \
(JEMALLOC_VERSION_MAJOR == 5 && JEMALLOC_VERSION_MINOR > 2) || \
(JEMALLOC_VERSION_MAJOR == 5 && JEMALLOC_VERSION_MINOR == 2 && JEMALLOC_VERSION_BUGFIX >= 1))) || \
defined(DEBUG_FORCE_DEFRAG)
#define HAVE_DEFRAG
#endif
#endif
int allocatorDefragInit(void);
void allocatorDefragFree(void *ptr, size_t size);
__attribute__((malloc)) void *allocatorDefragAlloc(size_t size);
unsigned long allocatorDefragGetFragSmallbins(void);
int allocatorShouldDefrag(void *ptr);
float getAllocatorFragmentation(size_t *out_frag_bytes);
#endif /* __ALLOCATOR_DEFRAG_H */

View File

@ -1,6 +1,6 @@
/* anet.c -- Basic TCP socket stuff made a bit less boring
*
* Copyright (c) 2006-2012, Redis Ltd.
* Copyright (c) 2006-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -45,7 +45,6 @@
#include <errno.h>
#include <stdarg.h>
#include <stdio.h>
#include <grp.h>
#include "anet.h"
#include "config.h"
@ -70,24 +69,17 @@ int anetGetError(int fd) {
return sockerr;
}
static int anetGetSocketFlags(char *err, int fd) {
int anetSetBlock(char *err, int fd, int non_block) {
int flags;
/* Set the socket blocking (if non_block is zero) or non-blocking.
* Note that fcntl(2) for F_GETFL and F_SETFL can't be
* interrupted by a signal. */
if ((flags = fcntl(fd, F_GETFL)) == -1) {
anetSetError(err, "fcntl(F_GETFL): %s", strerror(errno));
return ANET_ERR;
}
return flags;
}
int anetSetBlock(char *err, int fd, int non_block) {
int flags = anetGetSocketFlags(err, fd);
if (flags == ANET_ERR) {
return ANET_ERR;
}
/* Check if this flag has been set or unset, if so,
* then there is no need to call fcntl to set/unset it again. */
if (!!(flags & O_NONBLOCK) == !!non_block) return ANET_OK;
@ -112,21 +104,6 @@ int anetBlock(char *err, int fd) {
return anetSetBlock(err, fd, 0);
}
int anetIsBlock(char *err, int fd) {
int flags = anetGetSocketFlags(err, fd);
if (flags == ANET_ERR) {
return ANET_ERR;
}
/* Check if the O_NONBLOCK flag is set */
if (flags & O_NONBLOCK) {
return 0; /* Socket is non-blocking */
} else {
return 1; /* Socket is blocking */
}
}
/* Enable the FD_CLOEXEC on the given fd to avoid fd leaks.
* This function should be invoked for fd's on specific places
* where fork + execve system calls are called. */
@ -528,7 +505,7 @@ int anetTcpNonBlockBestEffortBindConnect(char *err, const char *addr, int port,
return anetTcpGenericConnect(err, addr, port, source_addr, ANET_CONNECT_NONBLOCK | ANET_CONNECT_BE_BINDING);
}
static int anetListen(char *err, int s, struct sockaddr *sa, socklen_t len, int backlog, mode_t perm, char *group) {
static int anetListen(char *err, int s, struct sockaddr *sa, socklen_t len, int backlog, mode_t perm) {
if (bind(s, sa, len) == -1) {
anetSetError(err, "bind: %s", strerror(errno));
close(s);
@ -537,22 +514,6 @@ static int anetListen(char *err, int s, struct sockaddr *sa, socklen_t len, int
if (sa->sa_family == AF_LOCAL && perm) chmod(((struct sockaddr_un *)sa)->sun_path, perm);
if (sa->sa_family == AF_LOCAL && group != NULL) {
struct group *grp;
if ((grp = getgrnam(group)) == NULL) {
anetSetError(err, "getgrnam error for group '%s': %s", group, strerror(errno));
close(s);
return ANET_ERR;
}
/* Owner of the socket remains same. */
if (chown(((struct sockaddr_un *)sa)->sun_path, -1, grp->gr_gid) == -1) {
anetSetError(err, "chown error for group '%s': %s", group, strerror(errno));
close(s);
return ANET_ERR;
}
}
if (listen(s, backlog) == -1) {
anetSetError(err, "listen: %s", strerror(errno));
close(s);
@ -592,7 +553,7 @@ static int _anetTcpServer(char *err, int port, char *bindaddr, int af, int backl
if (af == AF_INET6 && anetV6Only(err, s) == ANET_ERR) goto error;
if (anetSetReuseAddr(err, s) == ANET_ERR) goto error;
if (anetListen(err, s, p->ai_addr, p->ai_addrlen, backlog, 0, NULL) == ANET_ERR) s = ANET_ERR;
if (anetListen(err, s, p->ai_addr, p->ai_addrlen, backlog, 0) == ANET_ERR) s = ANET_ERR;
goto end;
}
if (p == NULL) {
@ -616,7 +577,7 @@ int anetTcp6Server(char *err, int port, char *bindaddr, int backlog) {
return _anetTcpServer(err, port, bindaddr, AF_INET6, backlog);
}
int anetUnixServer(char *err, char *path, mode_t perm, int backlog, char *group) {
int anetUnixServer(char *err, char *path, mode_t perm, int backlog) {
int s;
struct sockaddr_un sa;
@ -632,7 +593,7 @@ int anetUnixServer(char *err, char *path, mode_t perm, int backlog, char *group)
memset(&sa, 0, sizeof(sa));
sa.sun_family = AF_LOCAL;
valkey_strlcpy(sa.sun_path, path, sizeof(sa.sun_path));
if (anetListen(err, s, (struct sockaddr *)&sa, sizeof(sa), backlog, perm, group) == ANET_ERR) return ANET_ERR;
if (anetListen(err, s, (struct sockaddr *)&sa, sizeof(sa), backlog, perm) == ANET_ERR) return ANET_ERR;
return s;
}

View File

@ -1,6 +1,6 @@
/* anet.c -- Basic TCP socket stuff made a bit less boring
*
* Copyright (c) 2006-2012, Redis Ltd.
* Copyright (c) 2006-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -56,12 +56,11 @@ int anetTcpNonBlockBestEffortBindConnect(char *err, const char *addr, int port,
int anetResolve(char *err, char *host, char *ipbuf, size_t ipbuf_len, int flags);
int anetTcpServer(char *err, int port, char *bindaddr, int backlog);
int anetTcp6Server(char *err, int port, char *bindaddr, int backlog);
int anetUnixServer(char *err, char *path, mode_t perm, int backlog, char *group);
int anetUnixServer(char *err, char *path, mode_t perm, int backlog);
int anetTcpAccept(char *err, int serversock, char *ip, size_t ip_len, int *port);
int anetUnixAccept(char *err, int serversock);
int anetNonBlock(char *err, int fd);
int anetBlock(char *err, int fd);
int anetIsBlock(char *err, int fd);
int anetCloexec(int fd);
int anetEnableTcpNoDelay(char *err, int fd);
int anetDisableTcpNoDelay(char *err, int fd);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009-2012, Redis Ltd.
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -31,7 +31,6 @@
#include "bio.h"
#include "rio.h"
#include "functions.h"
#include "module.h"
#include <signal.h>
#include <fcntl.h>
@ -424,7 +423,7 @@ void aofManifestFreeAndUpdate(aofManifest *am) {
* appendonly.aof.1.base.aof (server.aof_use_rdb_preamble is no)
* appendonly.aof.1.base.rdb (server.aof_use_rdb_preamble is yes)
*/
sds getNewBaseFileNameAndMarkPreAsHistory(aofManifest *am, int aof_use_rdb_preamble) {
sds getNewBaseFileNameAndMarkPreAsHistory(aofManifest *am) {
serverAssert(am != NULL);
if (am->base_aof_info) {
serverAssert(am->base_aof_info->file_type == AOF_FILE_TYPE_BASE);
@ -432,7 +431,7 @@ sds getNewBaseFileNameAndMarkPreAsHistory(aofManifest *am, int aof_use_rdb_pream
listAddNodeHead(am->history_aof_list, am->base_aof_info);
}
char *format_suffix = aof_use_rdb_preamble ? RDB_FORMAT_SUFFIX : AOF_FORMAT_SUFFIX;
char *format_suffix = server.aof_use_rdb_preamble ? RDB_FORMAT_SUFFIX : AOF_FORMAT_SUFFIX;
aofInfo *ai = aofInfoCreate();
ai->file_name = sdscatprintf(sdsempty(), "%s.%lld%s%s", server.aof_filename, ++am->curr_base_file_seq,
@ -477,7 +476,7 @@ sds getLastIncrAofName(aofManifest *am) {
}
/* Or return the last one. */
listNode *lastnode = listLast(am->incr_aof_list);
listNode *lastnode = listIndex(am->incr_aof_list, -1);
aofInfo *ai = listNodeValue(lastnode);
return ai->file_name;
}
@ -713,7 +712,7 @@ void aofOpenIfNeededOnServerStart(void) {
/* If we start with an empty dataset, we will force create a BASE file. */
size_t incr_aof_len = listLength(server.aof_manifest->incr_aof_list);
if (!server.aof_manifest->base_aof_info && !incr_aof_len) {
sds base_name = getNewBaseFileNameAndMarkPreAsHistory(server.aof_manifest, server.aof_use_rdb_preamble);
sds base_name = getNewBaseFileNameAndMarkPreAsHistory(server.aof_manifest);
sds base_filepath = makePath(server.aof_dirname, base_name);
if (rewriteAppendOnlyFile(base_filepath) != C_OK) {
exit(1);
@ -1372,12 +1371,10 @@ struct client *createAOFClient(void) {
*/
c->raw_flag = 0;
c->flag.deny_blocking = 1;
c->flag.fake = 1;
/* We set the fake client as a replica waiting for the synchronization
* so that the server will not try to send replies to this client. */
initClientReplicationData(c);
c->repl_data->repl_state = REPLICA_STATE_WAIT_BGSAVE_START;
c->repl_state = REPLICA_STATE_WAIT_BGSAVE_START;
return c;
}
@ -1532,11 +1529,10 @@ int loadSingleAppendOnlyFile(char *filename) {
}
/* Command lookup */
sds err = NULL;
fakeClient->cmd = fakeClient->lastcmd = cmd = lookupCommand(argv, argc);
if ((!cmd && !commandCheckExistence(fakeClient, &err)) || (cmd && !commandCheckArity(cmd, argc, &err))) {
serverLog(LL_WARNING, "Error reading the append only file %s, error: %s", filename, err);
sdsfree(err);
cmd = lookupCommand(argv, argc);
if (!cmd) {
serverLog(LL_WARNING, "Unknown command '%s' reading the append only file %s", (char *)argv[0]->ptr,
filename);
freeClientArgv(fakeClient);
ret = AOF_FAILED;
goto cleanup;
@ -1545,6 +1541,7 @@ int loadSingleAppendOnlyFile(char *filename) {
if (cmd->proc == multiCommand) valid_before_multi = valid_up_to;
/* Run the command in the context of a fake client */
fakeClient->cmd = fakeClient->lastcmd = cmd;
if (fakeClient->flag.multi && fakeClient->cmd->proc != execCommand) {
/* Note: we don't have to attempt calling evalGetCommandFlags,
* since this is AOF, the checks in processCommand are not made
@ -1890,29 +1887,30 @@ int rewriteSortedSetObject(rio *r, robj *key, robj *o) {
}
} else if (o->encoding == OBJ_ENCODING_SKIPLIST) {
zset *zs = o->ptr;
hashtableIterator iter;
hashtableInitIterator(&iter, zs->ht, 0);
void *next;
while (hashtableNext(&iter, &next)) {
zskiplistNode *node = next;
dictIterator *di = dictGetIterator(zs->dict);
dictEntry *de;
while ((de = dictNext(di)) != NULL) {
sds ele = dictGetKey(de);
double *score = dictGetVal(de);
if (count == 0) {
int cmd_items = (items > AOF_REWRITE_ITEMS_PER_CMD) ? AOF_REWRITE_ITEMS_PER_CMD : items;
if (!rioWriteBulkCount(r, '*', 2 + cmd_items * 2) || !rioWriteBulkString(r, "ZADD", 4) ||
!rioWriteBulkObject(r, key)) {
hashtableResetIterator(&iter);
dictReleaseIterator(di);
return 0;
}
}
sds ele = node->ele;
if (!rioWriteBulkDouble(r, node->score) || !rioWriteBulkString(r, ele, sdslen(ele))) {
hashtableResetIterator(&iter);
if (!rioWriteBulkDouble(r, *score) || !rioWriteBulkString(r, ele, sdslen(ele))) {
dictReleaseIterator(di);
return 0;
}
if (++count == AOF_REWRITE_ITEMS_PER_CMD) count = 0;
items--;
}
hashtableResetIterator(&iter);
dictReleaseIterator(di);
} else {
serverPanic("Unknown sorted zset encoding");
}
@ -1922,7 +1920,7 @@ int rewriteSortedSetObject(rio *r, robj *key, robj *o) {
/* Write either the key or the value of the currently selected item of a hash.
* The 'hi' argument passes a valid hash iterator.
* The 'what' filed specifies if to write a key or a value and can be
* either OBJ_HASH_FIELD or OBJ_HASH_VALUE.
* either OBJ_HASH_KEY or OBJ_HASH_VALUE.
*
* The function returns 0 on error, non-zero on success. */
static int rioWriteHashIteratorCursor(rio *r, hashTypeIterator *hi, int what) {
@ -1936,7 +1934,7 @@ static int rioWriteHashIteratorCursor(rio *r, hashTypeIterator *hi, int what) {
return rioWriteBulkString(r, (char *)vstr, vlen);
else
return rioWriteBulkLongLong(r, vll);
} else if (hi->encoding == OBJ_ENCODING_HASHTABLE) {
} else if (hi->encoding == OBJ_ENCODING_HT) {
sds value = hashTypeCurrentFromHashTable(hi, what);
return rioWriteBulkString(r, value, sdslen(value));
}
@ -1948,30 +1946,30 @@ static int rioWriteHashIteratorCursor(rio *r, hashTypeIterator *hi, int what) {
/* Emit the commands needed to rebuild a hash object.
* The function returns 0 on error, 1 on success. */
int rewriteHashObject(rio *r, robj *key, robj *o) {
hashTypeIterator hi;
hashTypeIterator *hi;
long long count = 0, items = hashTypeLength(o);
hashTypeInitIterator(o, &hi);
while (hashTypeNext(&hi) != C_ERR) {
hi = hashTypeInitIterator(o);
while (hashTypeNext(hi) != C_ERR) {
if (count == 0) {
int cmd_items = (items > AOF_REWRITE_ITEMS_PER_CMD) ? AOF_REWRITE_ITEMS_PER_CMD : items;
if (!rioWriteBulkCount(r, '*', 2 + cmd_items * 2) || !rioWriteBulkString(r, "HMSET", 5) ||
!rioWriteBulkObject(r, key)) {
hashTypeResetIterator(&hi);
hashTypeReleaseIterator(hi);
return 0;
}
}
if (!rioWriteHashIteratorCursor(r, &hi, OBJ_HASH_FIELD) || !rioWriteHashIteratorCursor(r, &hi, OBJ_HASH_VALUE)) {
hashTypeResetIterator(&hi);
if (!rioWriteHashIteratorCursor(r, hi, OBJ_HASH_KEY) || !rioWriteHashIteratorCursor(r, hi, OBJ_HASH_VALUE)) {
hashTypeReleaseIterator(hi);
return 0;
}
if (++count == AOF_REWRITE_ITEMS_PER_CMD) count = 0;
items--;
}
hashTypeResetIterator(&hi);
hashTypeReleaseIterator(hi);
return 1;
}
@ -2162,7 +2160,7 @@ int rewriteModuleObject(rio *r, robj *key, robj *o, int dbid) {
ValkeyModuleIO io;
moduleValue *mv = o->ptr;
moduleType *mt = mv->type;
moduleInitIOContext(&io, mt, r, key, dbid);
moduleInitIOContext(io, mt, r, key, dbid);
mt->aof_rewrite(&io, key, mv->value);
if (io.ctx) {
moduleFreeContext(io.ctx);
@ -2191,6 +2189,7 @@ werr:
}
int rewriteAppendOnlyFileRio(rio *aof) {
dictEntry *de;
int j;
long key_count = 0;
long long updated_time = 0;
@ -2217,20 +2216,19 @@ int rewriteAppendOnlyFileRio(rio *aof) {
if (rioWrite(aof, selectcmd, sizeof(selectcmd) - 1) == 0) goto werr;
if (rioWriteBulkLongLong(aof, j) == 0) goto werr;
kvs_it = kvstoreIteratorInit(db->keys, HASHTABLE_ITER_SAFE | HASHTABLE_ITER_PREFETCH_VALUES);
kvs_it = kvstoreIteratorInit(db->keys);
/* Iterate this DB writing every entry */
void *next;
while (kvstoreIteratorNext(kvs_it, &next)) {
robj *o = next;
while ((de = kvstoreIteratorNext(kvs_it)) != NULL) {
sds keystr;
robj key;
robj key, *o;
long long expiretime;
size_t aof_bytes_before_key = aof->processed_bytes;
keystr = objectGetKey(o);
keystr = dictGetKey(de);
o = dictGetVal(de);
initStaticStringObject(key, keystr);
expiretime = objectGetExpire(o);
expiretime = getExpire(db, &key);
/* Save the key and associated value */
if (o->type == OBJ_STRING) {
@ -2447,7 +2445,6 @@ int rewriteAppendOnlyFileBackground(void) {
serverLog(LL_NOTICE, "Background append only file rewriting started by pid %ld", (long)childpid);
server.aof_rewrite_scheduled = 0;
server.aof_rewrite_time_start = time(NULL);
server.aof_rewrite_use_rdb_preamble = server.aof_use_rdb_preamble;
return C_OK;
}
return C_OK; /* unreached */
@ -2560,7 +2557,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
/* Get a new BASE file name and mark the previous (if we have)
* as the HISTORY type. */
sds new_base_filename = getNewBaseFileNameAndMarkPreAsHistory(temp_am, server.aof_rewrite_use_rdb_preamble);
sds new_base_filename = getNewBaseFileNameAndMarkPreAsHistory(temp_am);
serverAssert(new_base_filename != NULL);
new_base_filepath = makePath(server.aof_dirname, new_base_filename);

View File

@ -1,7 +1,6 @@
/*
* Copyright (c) 2009-2012, Redis Ltd.
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2024, Valkey contributors
* Copyright (c) 2025, Futriix contributors
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -29,19 +28,25 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/* clang-format off */
const char *ascii_logo =
" \n "
"▗▞▀▀▘█ ▐▌ ■ ▄▄▄ ▄ ▄ ▄ ▄ \n"
"▐▌ ▀▄▄▞▘▗▄▟▙▄▖█ ▄ ▄ ▀▄▀ \n"
"▐▛▀▘ ▐▌ █ █ █ ▄▀ ▀▄ \n"
"▐▌ ▐▌ █ █ Futriix %s (%s/%d) %s bit\n"
" Running in %s mode\n"
" Port: %d\n"
" PID: %ld \n"
" \n"
" \n\n";
" .+^+. \n"
" .+#########+. \n"
" .+########+########+. Valkey %s (%s/%d) %s bit\n"
" .+########+' '+########+. \n"
" .########+' .+. '+########. Running in %s mode\n"
" |####+' .+#######+. '+####| Port: %d\n"
" |###| .+###############+. |###| PID: %ld \n"
" |###| |#####*'' ''*#####| |###| \n"
" |###| |####' .-. '####| |###| \n"
" |###| |###( (@@@) )###| |###| https://valkey.io \n"
" |###| |####. '-' .####| |###| \n"
" |###| |#####*. .*#####| |###| \n"
" |###| '+#####| |#####+' |###| \n"
" |####+. +##| |#+' .+####| \n"
" '#######+ |##| .+########' \n"
" '+###| |##| .+########+' \n"
" '| |####+########+' \n"
" +#########+' \n"
" '+v+' \n\n";
/* clang-format off */

197
src/atomicvar.h Normal file
View File

@ -0,0 +1,197 @@
/* This file implements atomic counters using c11 _Atomic, __atomic or __sync
* macros if available, otherwise we will throw an error when compile.
*
* The exported interface is composed of the following macros:
*
* atomicIncr(var,count) -- Increment the atomic counter
* atomicGetIncr(var,oldvalue_var,count) -- Get and increment the atomic counter
* atomicIncrGet(var,newvalue_var,count) -- Increment and get the atomic counter new value
* atomicDecr(var,count) -- Decrement the atomic counter
* atomicGet(var,dstvar) -- Fetch the atomic counter value
* atomicSet(var,value) -- Set the atomic counter value
* atomicGetWithSync(var,value) -- 'atomicGet' with inter-thread synchronization
* atomicSetWithSync(var,value) -- 'atomicSet' with inter-thread synchronization
*
* Atomic operations on flags.
* Flag type can be int, long, long long or their unsigned counterparts.
* The value of the flag can be 1 or 0.
*
* atomicFlagGetSet(var,oldvalue_var) -- Get and set the atomic counter value
*
* NOTE1: __atomic* and _Atomic implementations can be actually elaborated to support any value by changing the
* hardcoded new value passed to __atomic_exchange* from 1 to @param count
* i.e oldvalue_var = atomic_exchange_explicit(&var, count).
* However, in order to be compatible with the __sync functions family, we can use only 0 and 1.
* The only exchange alternative suggested by __sync is __sync_lock_test_and_set,
* But as described by the gnu manual for __sync_lock_test_and_set():
* https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
* "A target may support reduced functionality here by which the only valid value to store is the immediate constant 1.
* The exact value actually stored in *ptr is implementation defined." Hence, we can't rely on it for a any value other
* than 1. We eventually chose to implement this method with __sync_val_compare_and_swap since it satisfies
* functionality needed for atomicFlagGetSet (if the flag was 0 -> set to 1, if it's already 1 -> do nothing, but the
* final result is that the flag is set), and also it has a full barrier (__sync_lock_test_and_set has acquire barrier).
*
* NOTE2: Unlike other atomic type, which aren't guaranteed to be lock free, c11 atomic_flag does.
* To check whether a type is lock free, atomic_is_lock_free() can be used.
* It can be considered to limit the flag type to atomic_flag to improve performance.
*
* Never use return value from the macros, instead use the AtomicGetIncr()
* if you need to get the current value and increment it atomically, like
* in the following example:
*
* long oldvalue;
* atomicGetIncr(myvar,oldvalue,1);
* doSomethingWith(oldvalue);
*
* ----------------------------------------------------------------------------
*
* Copyright (c) 2015, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <pthread.h>
#include "config.h"
#ifndef __ATOMIC_VAR_H
#define __ATOMIC_VAR_H
/* Define serverAtomic for atomic variable. */
#define serverAtomic
/* To test the server with Helgrind (a Valgrind tool) it is useful to define
* the following macro, so that __sync macros are used: those can be detected
* by Helgrind (even if they are less efficient) so that no false positive
* is reported. */
// #define __ATOMIC_VAR_FORCE_SYNC_MACROS
/* There will be many false positives if we test the server with Helgrind, since
* Helgrind can't understand we have imposed ordering on the program, so
* we use macros in helgrind.h to tell Helgrind inter-thread happens-before
* relationship explicitly for avoiding false positives.
*
* For more details, please see: valgrind/helgrind.h and
* https://www.valgrind.org/docs/manual/hg-manual.html#hg-manual.effective-use
*
* These macros take effect only when 'make helgrind', and you must first
* install Valgrind in the default path configuration. */
#ifdef __ATOMIC_VAR_FORCE_SYNC_MACROS
#include <valgrind/helgrind.h>
#else
#define ANNOTATE_HAPPENS_BEFORE(v) ((void)v)
#define ANNOTATE_HAPPENS_AFTER(v) ((void)v)
#endif
#if !defined(__ATOMIC_VAR_FORCE_SYNC_MACROS) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && \
!defined(__STDC_NO_ATOMICS__)
/* Use '_Atomic' keyword if the compiler supports. */
#undef serverAtomic
#define serverAtomic _Atomic
/* Implementation using _Atomic in C11. */
#include <stdatomic.h>
#define atomicIncr(var, count) atomic_fetch_add_explicit(&var, (count), memory_order_relaxed)
#define atomicGetIncr(var, oldvalue_var, count) \
do { \
oldvalue_var = atomic_fetch_add_explicit(&var, (count), memory_order_relaxed); \
} while (0)
#define atomicIncrGet(var, newvalue_var, count) newvalue_var = atomicIncr(var, count) + count
#define atomicDecr(var, count) atomic_fetch_sub_explicit(&var, (count), memory_order_relaxed)
#define atomicGet(var, dstvar) \
do { \
dstvar = atomic_load_explicit(&var, memory_order_relaxed); \
} while (0)
#define atomicSet(var, value) atomic_store_explicit(&var, value, memory_order_relaxed)
#define atomicGetWithSync(var, dstvar) \
do { \
dstvar = atomic_load_explicit(&var, memory_order_seq_cst); \
} while (0)
#define atomicSetWithSync(var, value) atomic_store_explicit(&var, value, memory_order_seq_cst)
#define atomicFlagGetSet(var, oldvalue_var) oldvalue_var = atomic_exchange_explicit(&var, 1, memory_order_relaxed)
#define REDIS_ATOMIC_API "c11-builtin"
#elif !defined(__ATOMIC_VAR_FORCE_SYNC_MACROS) && \
(!defined(__clang__) || !defined(__APPLE__) || __apple_build_version__ > 4210057) && defined(__ATOMIC_RELAXED) && \
defined(__ATOMIC_SEQ_CST)
/* Implementation using __atomic macros. */
#define atomicIncr(var, count) __atomic_add_fetch(&var, (count), __ATOMIC_RELAXED)
#define atomicIncrGet(var, newvalue_var, count) newvalue_var = __atomic_add_fetch(&var, (count), __ATOMIC_RELAXED)
#define atomicGetIncr(var, oldvalue_var, count) \
do { \
oldvalue_var = __atomic_fetch_add(&var, (count), __ATOMIC_RELAXED); \
} while (0)
#define atomicDecr(var, count) __atomic_sub_fetch(&var, (count), __ATOMIC_RELAXED)
#define atomicGet(var, dstvar) \
do { \
dstvar = __atomic_load_n(&var, __ATOMIC_RELAXED); \
} while (0)
#define atomicSet(var, value) __atomic_store_n(&var, value, __ATOMIC_RELAXED)
#define atomicGetWithSync(var, dstvar) \
do { \
dstvar = __atomic_load_n(&var, __ATOMIC_SEQ_CST); \
} while (0)
#define atomicSetWithSync(var, value) __atomic_store_n(&var, value, __ATOMIC_SEQ_CST)
#define atomicFlagGetSet(var, oldvalue_var) oldvalue_var = __atomic_exchange_n(&var, 1, __ATOMIC_RELAXED)
#define REDIS_ATOMIC_API "atomic-builtin"
#elif defined(HAVE_ATOMIC)
/* Implementation using __sync macros. */
#define atomicIncr(var, count) __sync_add_and_fetch(&var, (count))
#define atomicIncrGet(var, newvalue_var, count) newvalue_var = __sync_add_and_fetch(&var, (count))
#define atomicGetIncr(var, oldvalue_var, count) \
do { \
oldvalue_var = __sync_fetch_and_add(&var, (count)); \
} while (0)
#define atomicDecr(var, count) __sync_sub_and_fetch(&var, (count))
#define atomicGet(var, dstvar) \
do { \
dstvar = __sync_sub_and_fetch(&var, 0); \
} while (0)
#define atomicSet(var, value) \
do { \
while (!__sync_bool_compare_and_swap(&var, var, value)); \
} while (0)
/* Actually the builtin issues a full memory barrier by default. */
#define atomicGetWithSync(var, dstvar) \
do { \
dstvar = __sync_sub_and_fetch(&var, 0, __sync_synchronize); \
ANNOTATE_HAPPENS_AFTER(&var); \
} while (0)
#define atomicSetWithSync(var, value) \
do { \
ANNOTATE_HAPPENS_BEFORE(&var); \
while (!__sync_bool_compare_and_swap(&var, var, value, __sync_synchronize)); \
} while (0)
#define atomicFlagGetSet(var, oldvalue_var) oldvalue_var = __sync_val_compare_and_swap(&var, 0, 1)
#define REDIS_ATOMIC_API "sync-builtin"
#else
#error "Unable to determine atomic operations for your platform"
#endif
#endif /* __ATOMIC_VAR_H */

View File

@ -31,7 +31,7 @@
*
* ----------------------------------------------------------------------------
*
* Copyright (c) 2009-2012, Redis Ltd.
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009-2012, Redis Ltd.
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,6 +1,6 @@
/* Bit operations.
*
* Copyright (c) 2009-2012, Redis Ltd.
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -486,7 +486,7 @@ robj *lookupStringForBitCommand(client *c, uint64_t maxbit, int *dirty) {
if (o == NULL) {
o = createObject(OBJ_STRING, sdsnewlen(NULL, byte + 1));
dbAdd(c->db, c->argv[1], &o);
dbAdd(c->db, c->argv[1], o);
if (dirty) *dirty = 1;
} else {
o = dbUnshareStringValue(c->db, c->argv[1], o);
@ -772,8 +772,9 @@ void bitopCommand(client *c) {
/* Store the computed value into the target key */
if (maxlen) {
o = createObject(OBJ_STRING, res);
setKey(c, c->db, targetkey, &o, 0);
setKey(c, c->db, targetkey, o, 0);
notifyKeyspaceEvent(NOTIFY_STRING, "set", targetkey, c->db->id);
decrRefCount(o);
server.dirty++;
} else if (dbDelete(c->db, targetkey)) {
signalModifiedKey(c, c->db, targetkey);

View File

@ -1,6 +1,6 @@
/* blocked.c - generic support for blocking operations like BLPOP & WAIT.
*
* Copyright (c) 2009-2012, Redis Ltd.
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -61,11 +61,10 @@
*/
#include "server.h"
#include "commandlog.h"
#include "slowlog.h"
#include "latency.h"
#include "monotonic.h"
#include "cluster_slot_stats.h"
#include "module.h"
/* forward declarations */
static void unblockClientWaitingData(client *c);
@ -75,25 +74,13 @@ static void moduleUnblockClientOnKey(client *c, robj *key);
static void releaseBlockedEntry(client *c, dictEntry *de, int remove_key);
void initClientBlockingState(client *c) {
if (c->bstate) return;
c->bstate = zmalloc(sizeof(blockingState));
c->bstate->btype = BLOCKED_NONE;
c->bstate->timeout = 0;
c->bstate->unblock_on_nokey = 0;
c->bstate->keys = dictCreate(&objectKeyHeapPointerValueDictType);
c->bstate->numreplicas = 0;
c->bstate->numlocal = 0;
c->bstate->reploffset = 0;
c->bstate->generic_blocked_list_node = NULL;
c->bstate->module_blocked_handle = NULL;
c->bstate->async_rm_call_handle = NULL;
}
void freeClientBlockingState(client *c) {
if (!c->bstate) return;
dictRelease(c->bstate->keys);
zfree(c->bstate);
c->bstate = NULL;
c->bstate.btype = BLOCKED_NONE;
c->bstate.timeout = 0;
c->bstate.keys = dictCreate(&objectKeyHeapPointerValueDictType);
c->bstate.numreplicas = 0;
c->bstate.reploffset = 0;
c->bstate.unblock_on_nokey = 0;
c->bstate.async_rm_call_handle = NULL;
}
/* Block a client for the specific operation type. Once the CLIENT_BLOCKED
@ -103,10 +90,8 @@ void blockClient(client *c, int btype) {
/* Primary client should never be blocked unless pause or module */
serverAssert(!(c->flag.primary && btype != BLOCKED_MODULE && btype != BLOCKED_POSTPONE));
initClientBlockingState(c);
c->flag.blocked = 1;
c->bstate->btype = btype;
c->bstate.btype = btype;
if (!c->flag.module)
server.blocked_clients++; /* We count blocked client stats on regular clients and not on module clients */
server.blocked_clients_by_type[btype]++;
@ -117,31 +102,19 @@ void blockClient(client *c, int btype) {
* he will attempt to reprocess the command which will update the statistics.
* However in case the client was timed out or in case of module blocked client is being unblocked
* the command will not be reprocessed and we need to make stats update.
* This function will make updates to the commandstats, slot-stats, commandlog and monitors.
* The failed_or_rejected parameter is an indication that the blocked command was either failed internally or
* rejected/aborted externally. In case the command was rejected the value ERROR_COMMAND_REJECTED should be passed.
* In case the command failed internally, ERROR_COMMAND_FAILED should be passed.
* A value of zero indicate no error was reported after the command was unblocked */
void updateStatsOnUnblock(client *c, long blocked_us, long reply_us, int failed_or_rejected) {
c->duration += blocked_us + reply_us;
c->lastcmd->microseconds += c->duration;
clusterSlotStatsAddCpuDuration(c, c->duration);
* This function will make updates to the commandstats, slot-stats, slowlog and monitors.*/
void updateStatsOnUnblock(client *c, long blocked_us, long reply_us, int had_errors) {
const ustime_t total_cmd_duration = c->duration + blocked_us + reply_us;
c->lastcmd->microseconds += total_cmd_duration;
clusterSlotStatsAddCpuDuration(c, total_cmd_duration);
c->lastcmd->calls++;
c->commands_processed++;
server.stat_numcommands++;
debugServerAssertWithInfo(c, NULL, failed_or_rejected >= 0 && failed_or_rejected <= ERROR_COMMAND_FAILED);
if (failed_or_rejected) {
if (failed_or_rejected & ERROR_COMMAND_FAILED)
c->lastcmd->failed_calls++;
else if (failed_or_rejected & ERROR_COMMAND_REJECTED)
c->lastcmd->rejected_calls++;
else
debugServerAssertWithInfo(c, NULL, 0);
}
if (had_errors) c->lastcmd->failed_calls++;
if (server.latency_tracking_enabled)
updateCommandLatencyHistogram(&(c->lastcmd->latency_histogram), c->duration * 1000);
/* Log the command into the commandlog if needed. */
commandlogPushCurrentCommand(c, c->lastcmd);
updateCommandLatencyHistogram(&(c->lastcmd->latency_histogram), total_cmd_duration * 1000);
/* Log the command into the Slow log if needed. */
slowlogPushCurrentCommand(c, c->lastcmd, total_cmd_duration);
c->duration = 0;
/* Log the reply duration event. */
latencyAddSampleIfNeeded("command-unblocking", reply_us / 1000);
@ -210,18 +183,17 @@ void queueClientForReprocessing(client *c) {
/* Unblock a client calling the right function depending on the kind
* of operation the client is blocking for. */
void unblockClient(client *c, int queue_for_reprocessing) {
if (c->bstate->btype == BLOCKED_LIST || c->bstate->btype == BLOCKED_ZSET || c->bstate->btype == BLOCKED_STREAM) {
if (c->bstate.btype == BLOCKED_LIST || c->bstate.btype == BLOCKED_ZSET || c->bstate.btype == BLOCKED_STREAM) {
unblockClientWaitingData(c);
} else if (c->bstate->btype == BLOCKED_WAIT) {
} else if (c->bstate.btype == BLOCKED_WAIT) {
unblockClientWaitingReplicas(c);
} else if (c->bstate->btype == BLOCKED_MODULE) {
} else if (c->bstate.btype == BLOCKED_MODULE) {
if (moduleClientIsBlockedOnKeys(c)) unblockClientWaitingData(c);
unblockClientFromModule(c);
} else if (c->bstate->btype == BLOCKED_POSTPONE) {
serverAssert(c->bstate->postponed_list_node);
listDelNode(server.postponed_clients, c->bstate->postponed_list_node);
c->bstate->postponed_list_node = NULL;
} else if (c->bstate->btype == BLOCKED_SHUTDOWN) {
} else if (c->bstate.btype == BLOCKED_POSTPONE) {
listDelNode(server.postponed_clients, c->postponed_list_node);
c->postponed_list_node = NULL;
} else if (c->bstate.btype == BLOCKED_SHUTDOWN) {
/* No special cleanup. */
} else {
serverPanic("Unknown btype in unblockClient().");
@ -229,7 +201,8 @@ void unblockClient(client *c, int queue_for_reprocessing) {
/* Reset the client for a new query, unless the client has pending command to process
* or in case a shutdown operation was canceled and we are still in the processCommand sequence */
if (!c->flag.pending_command && c->bstate->btype != BLOCKED_SHUTDOWN) {
if (!c->flag.pending_command && c->bstate.btype != BLOCKED_SHUTDOWN) {
freeClientOriginalArgv(c);
/* Clients that are not blocked on keys are not reprocessed so we must
* call reqresAppendResponse here (for clients blocked on key,
* unblockClientOnKey is called, which eventually calls processCommand,
@ -240,12 +213,12 @@ void unblockClient(client *c, int queue_for_reprocessing) {
/* We count blocked client stats on regular clients and not on module clients */
if (!c->flag.module) server.blocked_clients--;
server.blocked_clients_by_type[c->bstate->btype]--;
server.blocked_clients_by_type[c->bstate.btype]--;
/* Clear the flags, and put the client in the unblocked list so that
* we'll process new commands in its query buffer ASAP. */
c->flag.blocked = 0;
c->bstate->btype = BLOCKED_NONE;
c->bstate->unblock_on_nokey = 0;
c->bstate.btype = BLOCKED_NONE;
c->bstate.unblock_on_nokey = 0;
removeClientFromTimeoutTable(c);
if (queue_for_reprocessing) queueClientForReprocessing(c);
}
@ -254,22 +227,22 @@ void unblockClient(client *c, int queue_for_reprocessing) {
* send it a reply of some kind. After this function is called,
* unblockClient() will be called with the same client as argument. */
void replyToBlockedClientTimedOut(client *c) {
if (c->bstate->btype == BLOCKED_LIST || c->bstate->btype == BLOCKED_ZSET || c->bstate->btype == BLOCKED_STREAM) {
if (c->bstate.btype == BLOCKED_LIST || c->bstate.btype == BLOCKED_ZSET || c->bstate.btype == BLOCKED_STREAM) {
addReplyNullArray(c);
updateStatsOnUnblock(c, 0, 0, 0);
} else if (c->bstate->btype == BLOCKED_WAIT) {
} else if (c->bstate.btype == BLOCKED_WAIT) {
if (c->cmd->proc == waitCommand) {
addReplyLongLong(c, replicationCountAcksByOffset(c->bstate->reploffset));
addReplyLongLong(c, replicationCountAcksByOffset(c->bstate.reploffset));
} else if (c->cmd->proc == waitaofCommand) {
addReplyArrayLen(c, 2);
addReplyLongLong(c, server.fsynced_reploff >= c->bstate->reploffset);
addReplyLongLong(c, replicationCountAOFAcksByOffset(c->bstate->reploffset));
addReplyLongLong(c, server.fsynced_reploff >= c->bstate.reploffset);
addReplyLongLong(c, replicationCountAOFAcksByOffset(c->bstate.reploffset));
} else if (c->cmd->proc == clusterCommand) {
addReplyErrorObject(c, shared.noreplicaserr);
} else {
serverPanic("Unknown wait command %s in replyToBlockedClientTimedOut().", c->cmd->declared_name);
}
} else if (c->bstate->btype == BLOCKED_MODULE) {
} else if (c->bstate.btype == BLOCKED_MODULE) {
moduleBlockedClientTimedOut(c, 0);
} else {
serverPanic("Unknown btype in replyToBlockedClientTimedOut().");
@ -285,7 +258,7 @@ void replyToClientsBlockedOnShutdown(void) {
listRewind(server.clients, &li);
while ((ln = listNext(&li))) {
client *c = listNodeValue(ln);
if (c->flag.blocked && c->bstate->btype == BLOCKED_SHUTDOWN) {
if (c->flag.blocked && c->bstate.btype == BLOCKED_SHUTDOWN) {
addReplyError(c, "Errors trying to SHUTDOWN. Check logs.");
unblockClient(c, 1);
}
@ -312,7 +285,7 @@ void disconnectAllBlockedClients(void) {
* command processing will start from scratch, and the command will
* be either executed or rejected. (unlike LIST blocked clients for
* which the command is already in progress in a way. */
if (c->bstate->btype == BLOCKED_POSTPONE) continue;
if (c->bstate.btype == BLOCKED_POSTPONE) continue;
unblockClientOnError(c, "-UNBLOCKED force unblock from blocking operation, "
"instance state changed (master -> replica?)");
@ -397,17 +370,15 @@ void blockForKeys(client *c, int btype, robj **keys, int numkeys, mstime_t timeo
list *l;
int j;
initClientBlockingState(c);
if (!c->flag.reprocessing_command) {
/* If the client is re-processing the command, we do not set the timeout
* because we need to retain the client's original timeout. */
c->bstate->timeout = timeout;
c->bstate.timeout = timeout;
}
for (j = 0; j < numkeys; j++) {
/* If the key already exists in the dictionary ignore it. */
if (!(client_blocked_entry = dictAddRaw(c->bstate->keys, keys[j], NULL))) {
if (!(client_blocked_entry = dictAddRaw(c->bstate.keys, keys[j], NULL))) {
continue;
}
incrRefCount(keys[j]);
@ -424,7 +395,7 @@ void blockForKeys(client *c, int btype, robj **keys, int numkeys, mstime_t timeo
l = dictGetVal(db_blocked_existing_entry);
}
listAddNodeTail(l, c);
dictSetVal(c->bstate->keys, client_blocked_entry, listLast(l));
dictSetVal(c->bstate.keys, client_blocked_entry, listLast(l));
/* We need to add the key to blocking_keys_unblock_on_nokey, if the client
* wants to be awakened if key is deleted (like XREADGROUP) */
@ -438,7 +409,7 @@ void blockForKeys(client *c, int btype, robj **keys, int numkeys, mstime_t timeo
}
}
}
c->bstate->unblock_on_nokey = unblock_on_nokey;
c->bstate.unblock_on_nokey = unblock_on_nokey;
/* Currently we assume key blocking will require reprocessing the command.
* However in case of modules, they have a different way to handle the reprocessing
* which does not require setting the pending command flag */
@ -452,15 +423,15 @@ static void unblockClientWaitingData(client *c) {
dictEntry *de;
dictIterator *di;
if (dictSize(c->bstate->keys) == 0) return;
if (dictSize(c->bstate.keys) == 0) return;
di = dictGetIterator(c->bstate->keys);
di = dictGetIterator(c->bstate.keys);
/* The client may wait for multiple keys, so unblock it for every key. */
while ((de = dictNext(di)) != NULL) {
releaseBlockedEntry(c, de, 0);
}
dictReleaseIterator(di);
dictEmpty(c->bstate->keys, NULL);
dictEmpty(c->bstate.keys, NULL);
}
static blocking_type getBlockedTypeByType(int type) {
@ -559,7 +530,7 @@ static void releaseBlockedEntry(client *c, dictEntry *de, int remove_key) {
if (listLength(l) == 0) {
dictDelete(c->db->blocking_keys, key);
dictDelete(c->db->blocking_keys_unblock_on_nokey, key);
} else if (c->bstate->unblock_on_nokey) {
} else if (c->bstate.unblock_on_nokey) {
unblock_on_nokey_entry = dictFind(c->db->blocking_keys_unblock_on_nokey, key);
/* it is not possible to have a client blocked on nokey with no matching entry */
serverAssertWithInfo(c, key, unblock_on_nokey_entry != NULL);
@ -568,7 +539,7 @@ static void releaseBlockedEntry(client *c, dictEntry *de, int remove_key) {
dictDelete(c->db->blocking_keys_unblock_on_nokey, key);
}
}
if (remove_key) dictDelete(c->bstate->keys, key);
if (remove_key) dictDelete(c->bstate.keys, key);
}
void signalKeyAsReady(serverDb *db, robj *key, int type) {
@ -606,9 +577,9 @@ static void handleClientsBlockedOnKey(readyList *rl) {
* module is trying to accomplish right now.
* 3. In case of XREADGROUP call we will want to unblock on any change in object type
* or in case the key was deleted, since the group is no longer valid. */
if ((o != NULL && (receiver->bstate->btype == getBlockedTypeByType(o->type))) ||
(o != NULL && (receiver->bstate->btype == BLOCKED_MODULE)) || (receiver->bstate->unblock_on_nokey)) {
if (receiver->bstate->btype != BLOCKED_MODULE)
if ((o != NULL && (receiver->bstate.btype == getBlockedTypeByType(o->type))) ||
(o != NULL && (receiver->bstate.btype == BLOCKED_MODULE)) || (receiver->bstate.unblock_on_nokey)) {
if (receiver->bstate.btype != BLOCKED_MODULE)
unblockClientOnKey(receiver, rl->key);
else
moduleUnblockClientOnKey(receiver, rl->key);
@ -619,17 +590,11 @@ static void handleClientsBlockedOnKey(readyList *rl) {
/* block a client for replica acknowledgement */
void blockClientForReplicaAck(client *c, mstime_t timeout, long long offset, long numreplicas, int numlocal) {
initClientBlockingState(c);
c->bstate->timeout = timeout;
c->bstate->reploffset = offset;
c->bstate->numreplicas = numreplicas;
c->bstate->numlocal = numlocal;
c->bstate.timeout = timeout;
c->bstate.reploffset = offset;
c->bstate.numreplicas = numreplicas;
c->bstate.numlocal = numlocal;
listAddNodeHead(server.clients_waiting_acks, c);
/* Note that we remember the linked list node where the client is stored,
* this way removing the client in unblockClientWaitingReplicas() will not
* require a linear scan, but just a constant time operation. */
serverAssert(c->bstate->client_waiting_acks_list_node == NULL);
c->bstate->client_waiting_acks_list_node = listFirst(server.clients_waiting_acks);
blockClient(c, BLOCKED_WAIT);
}
@ -637,12 +602,10 @@ void blockClientForReplicaAck(client *c, mstime_t timeout, long long offset, lon
* requesting to avoid processing clients commands which will be processed later
* when the it is ready to accept them. */
void blockPostponeClient(client *c) {
initClientBlockingState(c);
c->bstate->timeout = 0;
c->bstate.timeout = 0;
blockClient(c, BLOCKED_POSTPONE);
listAddNodeTail(server.postponed_clients, c);
serverAssert(c->bstate->postponed_list_node == NULL);
c->bstate->postponed_list_node = listLast(server.postponed_clients);
c->postponed_list_node = listLast(server.postponed_clients);
/* Mark this client to execute its command */
c->flag.pending_command = 1;
}
@ -659,13 +622,13 @@ void blockClientShutdown(client *c) {
static void unblockClientOnKey(client *c, robj *key) {
dictEntry *de;
de = dictFind(c->bstate->keys, key);
de = dictFind(c->bstate.keys, key);
releaseBlockedEntry(c, de, 1);
/* Only in case of blocking API calls, we might be blocked on several keys.
however we should force unblock the entire blocking keys */
serverAssert(c->bstate->btype == BLOCKED_STREAM || c->bstate->btype == BLOCKED_LIST ||
c->bstate->btype == BLOCKED_ZSET);
serverAssert(c->bstate.btype == BLOCKED_STREAM || c->bstate.btype == BLOCKED_LIST ||
c->bstate.btype == BLOCKED_ZSET);
/* We need to unblock the client before calling processCommandAndResetClient
* because it checks the CLIENT_BLOCKED flag */
@ -708,8 +671,7 @@ static void moduleUnblockClientOnKey(client *c, robj *key) {
elapsedStart(&replyTimer);
if (moduleTryServeClientBlockedOnKey(c, key)) {
updateStatsOnUnblock(c, 0, elapsedUs(replyTimer),
((server.stat_total_error_replies != prev_error_replies) ? ERROR_COMMAND_FAILED : 0));
updateStatsOnUnblock(c, 0, elapsedUs(replyTimer), server.stat_total_error_replies != prev_error_replies);
moduleUnblockClient(c);
}
/* We need to call afterCommand even if the client was not unblocked
@ -727,7 +689,7 @@ static void moduleUnblockClientOnKey(client *c, robj *key) {
* command with timeout reply. */
void unblockClientOnTimeout(client *c) {
/* The client has been unlocked (in the moduleUnblocked list), return ASAP. */
if (c->bstate->btype == BLOCKED_MODULE && isModuleClientUnblocked(c)) return;
if (c->bstate.btype == BLOCKED_MODULE && isModuleClientUnblocked(c)) return;
replyToBlockedClientTimedOut(c);
if (c->flag.pending_command) c->flag.pending_command = 0;
@ -738,7 +700,7 @@ void unblockClientOnTimeout(client *c) {
* If err_str is provided it will be used to reply to the blocked client */
void unblockClientOnError(client *c, const char *err_str) {
if (err_str) addReplyError(c, err_str);
updateStatsOnUnblock(c, 0, 0, ERROR_COMMAND_REJECTED);
updateStatsOnUnblock(c, 0, 0, 1);
if (c->flag.pending_command) c->flag.pending_command = 0;
unblockClient(c, 1);
}

Some files were not shown because too many files have changed in this diff Show More