Refactor repository and build scripts

This commit includes updates that affects the build, testing, and
deployment of Tile38.

- The root level build.sh has been broken up into multiple scripts
  and placed in the "scripts" directory.

- The vendor directory has been updated to follow the Go modules
  rules, thus `make` should work on isolated environments. Also
  some vendored packages may have been updated to a later
  version, if needed.

- The Makefile has been updated to allow for making single
  binaries such as `make tile38-server`. There is some scaffolding
  during the build process, so from now on all binaries should be
  made using make. For example, to run a development version of
  the tile38-cli binary, do this:
     make tile38-cli && ./tile38-cli
  not this:
     go run cmd/tile38-cli/main.go

- Travis.CI docker push script has been updated to address a
  change to Docker's JSON repo meta output, which in turn fixes
  a bug where new Tile38 versions were not being properly pushed
  to Docker
This commit is contained in:
tidwall 2019-11-18 10:33:15 -07:00
parent 4b17a1b828
commit cfc65a13f6
1013 changed files with 323746 additions and 192 deletions

View File

@ -10,4 +10,4 @@ script:
- make test
after_success:
- ./build.sh travis-docker-push
- scripts/travis-docker-push.sh

View File

@ -1,21 +1,46 @@
all:
@./build.sh
clean:
rm -f tile38-server
rm -f tile38-cli
rm -f tile38-benchmark
rm -f tile38-luamemtest
all: tile38-server tile38-cli tile38-benchmark tile38-luamemtest
.PHONY: tile38-server
tile38-server:
@./scripts/build.sh tile38-server
.PHONY: tile38-cli
tile38-cli:
@./scripts/build.sh tile38-cli
.PHONY: tile38-benchmark
tile38-benchmark:
@./scripts/build.sh tile38-benchmark
.PHONY: tile38-luamemtest
tile38-luamemtest:
@./scripts/build.sh tile38-luamemtest
test:
@./build.sh test
cover:
@./build.sh cover
@./scripts/test.sh
package:
@rm -rf packages/
@scripts/package.sh Windows windows amd64
@scripts/package.sh Mac darwin amd64
@scripts/package.sh Linux linux amd64
@scripts/package.sh FreeBSD freebsd amd64
@scripts/package.sh ARM linux arm
@scripts/package.sh ARM64 linux arm64
clean:
rm -rf tile38-server tile38-cli tile38-benchmark tile38-luamemtest
distclean: clean
rm -rf packages/
install: all
cp tile38-server /usr/local/bin
cp tile38-cli /usr/local/bin
cp tile38-benchmark /usr/local/bin
uninstall:
rm -f /usr/local/bin/tile38-server
rm -f /usr/local/bin/tile38-cli
rm -f /usr/local/bin/tile38-benchmark
package:
@./build.sh package

View File

@ -5,7 +5,6 @@
</p>
<p align="center">
<a href="https://tile38.com/slack"><img src="https://img.shields.io/badge/slack-channel-orange.svg" alt="Slack Channel"></a>
<a href="https://github.com/tidwall/tile38/releases"><img src="https://img.shields.io/badge/version-1.19.0-green.svg?" alt="Version"></a>
<a href="https://travis-ci.org/tidwall/tile38"><img src="https://travis-ci.org/tidwall/tile38.svg?branch=master" alt="Build Status"></a>
<a href="https://hub.docker.com/r/tile38/tile38"><img src="https://img.shields.io/badge/docker-ready-blue.svg" alt="Docker Ready"></a>
</p>

178
build.sh
View File

@ -1,178 +0,0 @@
#!/bin/bash
set -e
cd $(dirname "${BASH_SOURCE[0]}")
OD="$(pwd)"
VERSION=1.19.0
PROTECTED_MODE="no"
# Hardcode some values to the core package
LDFLAGS="$LDFLAGS -X github.com/tidwall/tile38/core.Version=${VERSION}"
if [ -d ".git" ]; then
LDFLAGS="$LDFLAGS -X github.com/tidwall/tile38/core.GitSHA=$(git rev-parse --short HEAD)"
fi
LDFLAGS="$LDFLAGS -X github.com/tidwall/tile38/core.BuildTime=$(date +%FT%T%z)"
if [ "$PROTECTED_MODE" == "no" ]; then
LDFLAGS="$LDFLAGS -X github.com/tidwall/tile38/core.ProtectedMode=no"
fi
if [ "$1" == "update-version" ]; then
# update the versions in the README.md
sed -i '' "s/version-[0-9]*\.[0-9]*\.[0-9]*/version-$VERSION/g" README.md
exit
fi
if [ "$1" == "travis-docker-push" ]; then
# GIT_VERSION - always the last verison number, like 1.12.1.
export GIT_VERSION=$(git describe --tags --abbrev=0)
# GIT_COMMIT_SHORT - the short git commit number, like a718ef0.
export GIT_COMMIT_SHORT=$(git rev-parse --short HEAD)
# DOCKER_REPO - the base repository name to push the docker build to.
export DOCKER_REPO=$DOCKER_USER/tile38
if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then
# never push from a pull request
echo "Not pushing, on a PR or not running in Travis CI"
elif [ "$TRAVIS_BRANCH" != "master" ]; then
# only the master branch will work
echo "Not pushing, not on master"
else
push(){
docker tag $DOCKER_REPO:$GIT_COMMIT_SHORT $DOCKER_REPO:$1
docker push $DOCKER_REPO:$1
echo "Pushed $DOCKER_REPO:$1"
}
# docker login
echo $DOCKER_PASSWORD | docker login -u $DOCKER_LOGIN --password-stdin
# build the docker image
docker build -f Dockerfile -t $DOCKER_REPO:$GIT_COMMIT_SHORT .
if [ "$(curl -s https://hub.docker.com/v2/repositories/$DOCKER_REPO/tags/$GIT_VERSION/ | grep "$GIT_VERSION" | grep "repository")" == "" ]; then
# push the newest tag
push "$GIT_VERSION"
push "latest"
fi
push "edge"
fi
exit
fi
# Check go install
if [ "$(which go)" == "" ]; then
echo "error: Go is not installed. Please download and follow installation instructions at https://golang.org/dl to continue."
exit 1
fi
# Check go version
GOVERS="$(go version | cut -d " " -f 3)"
if [ "$GOVERS" != "devel" ]; then
vercomp () {
if [[ $1 == $2 ]]
then
echo "0"
return
fi
local IFS=.
local i ver1=($1) ver2=($2)
# fill empty fields in ver1 with zeros
for ((i=${#ver1[@]}; i<${#ver2[@]}; i++))
do
ver1[i]=0
done
for ((i=0; i<${#ver1[@]}; i++))
do
if [[ -z ${ver2[i]} ]]
then
# fill empty fields in ver2 with zeros
ver2[i]=0
fi
if ((10#${ver1[i]} > 10#${ver2[i]}))
then
echo "1"
return
fi
if ((10#${ver1[i]} < 10#${ver2[i]}))
then
echo "-1"
return
fi
done
echo "0"
return
}
GOVERS="${GOVERS:2}"
EQRES=$(vercomp "$GOVERS" "1.7")
if [ "$EQRES" == "-1" ]; then
echo "error: Go '1.7' or greater is required and '$GOVERS' is currently installed. Please upgrade Go at https://golang.org/dl to continue."
exit 1
fi
fi
package(){
echo Packaging $1 Binary
bdir=tile38-${VERSION}-$2-$3
rm -rf packages/$bdir && mkdir -p packages/$bdir
GOOS=$2 GOARCH=$3 ./build.sh
if [ "$2" == "windows" ]; then
mv tile38-server packages/$bdir/tile38-server.exe
mv tile38-cli packages/$bdir/tile38-cli.exe
mv tile38-benchmark packages/$bdir/tile38-benchmark.exe
else
mv tile38-server packages/$bdir
mv tile38-cli packages/$bdir
mv tile38-benchmark packages/$bdir
fi
cp README.md packages/$bdir
cp CHANGELOG.md packages/$bdir
cp LICENSE packages/$bdir
cd packages
if [ "$2" == "linux" ]; then
tar -zcf $bdir.tar.gz $bdir
else
zip -r -q $bdir.zip $bdir
fi
rm -rf $bdir
cd ..
}
if [ "$1" == "package" ]; then
rm -rf packages/
package "Windows" "windows" "amd64"
package "Mac" "darwin" "amd64"
package "Linux" "linux" "amd64"
package "FreeBSD" "freebsd" "amd64"
package "ARM" "linux" "arm"
package "ARM64" "linux" "arm64"
exit
fi
# generate the core package
core/gen.sh
export CGO_ENABLED=0
# build and store objects into original directory.
go build -ldflags "$LDFLAGS -extldflags '-static'" -o "$OD/tile38-server" cmd/tile38-server/*.go
go build -ldflags "$LDFLAGS -extldflags '-static'" -o "$OD/tile38-cli" cmd/tile38-cli/*.go
go build -ldflags "$LDFLAGS -extldflags '-static'" -o "$OD/tile38-benchmark" cmd/tile38-benchmark/*.go
go build -ldflags "$LDFLAGS -extldflags '-static'" -o "$OD/tile38-luamemtest" cmd/tile38-luamemtest/*.go
# test if requested
if [ "$1" == "test" ]; then
cd tests && go test && cd ..
go test $(go list ./... | grep -v /vendor/ | grep -v /tests)
fi
# cover if requested
if [ "$1" == "cover" ]; then
$OD/tile38-server -p 9876 -d "$TMP" -q &
PID=$!
function testend {
kill $PID &
}
trap testend EXIT
go test -cover $(go list ./... | grep -v /vendor/)
fi

37
scripts/build.sh Executable file
View File

@ -0,0 +1,37 @@
#!/bin/bash
set -e
cd $(dirname "${BASH_SOURCE[0]}")/..
if [ "$1" == "" ]; then
echo "error: missing argument (binary name)"
exit 1
fi
# Check the Go installation
if [ "$(which go)" == "" ]; then
echo "error: Go is not installed. Please download and follow installation"\
"instructions at https://golang.org/dl to continue."
exit 1
fi
# Hardcode some values to the core package.
if [ -d ".git" ]; then
VERSION=$(git describe --tags --abbrev=0)
GITSHA=$(git rev-parse --short HEAD)
LDFLAGS="$LDFLAGS -X github.com/tidwall/tile38/core.Version=${VERSION}"
LDFLAGS="$LDFLAGS -X github.com/tidwall/tile38/core.GitSHA=${GITSHA}"
fi
LDFLAGS="$LDFLAGS -X github.com/tidwall/tile38/core.BuildTime=$(date +%FT%T%z)"
# Generate the core package
core/gen.sh
# Set final Go environment options
LDFLAGS="$LDFLAGS -extldflags '-static'"
export CGO_ENABLED=0
export GO111MODULE=on
export GOFLAGS=-mod=vendor
# Build and store objects into original directory.
go build -ldflags "$LDFLAGS" -o $1 cmd/$1/*.go

46
scripts/package.sh Executable file
View File

@ -0,0 +1,46 @@
#!/bin/bash
set -e
cd $(dirname "${BASH_SOURCE[0]}")/..
PLATFORM="$1"
GOOS="$2"
GOARCH="$3"
VERSION=$(git describe --tags --abbrev=0)
echo Packaging $PLATFORM Binary
# Remove previous build directory, if needed.
bdir=tile38-$VERSION-$GOOS-$GOARCH
rm -rf packages/$bdir && mkdir -p packages/$bdir
# Make the binaries.
GOOS=$GOOS GOARCH=$GOARCH make all
rm -f tile38-luamemtest # not needed
# Copy the executable binaries.
if [ "$GOOS" == "windows" ]; then
mv tile38-server packages/$bdir/tile38-server.exe
mv tile38-cli packages/$bdir/tile38-cli.exe
mv tile38-benchmark packages/$bdir/tile38-benchmark.exe
else
mv tile38-server packages/$bdir
mv tile38-cli packages/$bdir
mv tile38-benchmark packages/$bdir
fi
# Copy documention and license.
cp README.md packages/$bdir
cp CHANGELOG.md packages/$bdir
cp LICENSE packages/$bdir
# Compress the package.
cd packages
if [ "$GOOS" == "linux" ]; then
tar -zcf $bdir.tar.gz $bdir
else
zip -r -q $bdir.zip $bdir
fi
# Remove build directory.
rm -rf $bdir

7
scripts/test.sh Executable file
View File

@ -0,0 +1,7 @@
#!/bin/bash
set -e
cd $(dirname "${BASH_SOURCE[0]}")/..
cd tests && go test && cd ..
go test $(go list ./... | grep -v /vendor/ | grep -v /tests)

35
scripts/travis-docker-push.sh Executable file
View File

@ -0,0 +1,35 @@
#!/bin/bash
set -e
cd $(dirname "${BASH_SOURCE[0]}")/..
# GIT_VERSION - always the last verison number, like 1.12.1.
export GIT_VERSION=$(git describe --tags --abbrev=0)
# GIT_COMMIT_SHORT - the short git commit number, like a718ef0.
export GIT_COMMIT_SHORT=$(git rev-parse --short HEAD)
# DOCKER_REPO - the base repository name to push the docker build to.
export DOCKER_REPO=$DOCKER_USER/tile38
if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then
# never push from a pull request
echo "Not pushing, on a PR or not running in Travis CI"
elif [ "$TRAVIS_BRANCH" != "master" ]; then
# only the master branch will work
echo "Not pushing, not on master"
else
push(){
docker tag $DOCKER_REPO:$GIT_COMMIT_SHORT $DOCKER_REPO:$1
docker push $DOCKER_REPO:$1
echo "Pushed $DOCKER_REPO:$1"
}
# docker login
echo $DOCKER_PASSWORD | docker login -u $DOCKER_LOGIN --password-stdin
# build the docker image
docker build -f Dockerfile -t $DOCKER_REPO:$GIT_COMMIT_SHORT .
if [ "$(curl -s https://hub.docker.com/v2/repositories/$DOCKER_REPO/tags/$GIT_VERSION/ | grep "digest")" == "" ]; then
# push the newest tag
push "$GIT_VERSION"
push "latest"
fi
push "edge"
fi

24
vendor/github.com/Shopify/sarama/.gitignore generated vendored Normal file
View File

@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
*.test
# Folders
_obj
_test
.vagrant
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

34
vendor/github.com/Shopify/sarama/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,34 @@
language: go
go:
- 1.7.x
- 1.8.x
- 1.9.x
env:
global:
- KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095
- TOXIPROXY_ADDR=http://localhost:8474
- KAFKA_INSTALL_ROOT=/home/travis/kafka
- KAFKA_HOSTNAME=localhost
- DEBUG=true
matrix:
- KAFKA_VERSION=0.9.0.1
- KAFKA_VERSION=0.10.2.1
- KAFKA_VERSION=0.11.0.1
before_install:
- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
- vagrant/install_cluster.sh
- vagrant/boot_cluster.sh
- vagrant/create_topics.sh
install:
- make install_dependencies
script:
- make test
- make vet
- make errcheck
- make fmt
sudo: false

418
vendor/github.com/Shopify/sarama/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,418 @@
# Changelog
#### Version 1.13.0 (2017-10-04)
New Features:
- Support for FetchRequest version 3
([#905](https://github.com/Shopify/sarama/pull/905)).
- Permit setting version on mock FetchResponses
([#939](https://github.com/Shopify/sarama/pull/939)).
- Add a configuration option to support storing only minimal metadata for
extremely large clusters
([#937](https://github.com/Shopify/sarama/pull/937)).
- Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
([#932](https://github.com/Shopify/sarama/pull/932)).
Improvements:
- Provide the block-level timestamp when consuming compressed messages
([#885](https://github.com/Shopify/sarama/issues/885)).
- `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
by the broker, which can be meaningful
([#930](https://github.com/Shopify/sarama/pull/930)).
- Use a `Ticker` to reduce consumer timer overhead at the cost of higher
variance in the actual timeout
([#933](https://github.com/Shopify/sarama/pull/933)).
Bug Fixes:
- Gracefully handle messages with negative timestamps
([#907](https://github.com/Shopify/sarama/pull/907)).
- Raise a proper error when encountering an unknown message version
([#940](https://github.com/Shopify/sarama/pull/940)).
#### Version 1.12.0 (2017-05-08)
New Features:
- Added support for the `ApiVersions` request and response pair, and Kafka
version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note
that you still need to specify the Kafka version in the Sarama configuration
for the time being.
- Added a `Brokers` method to the Client which returns the complete set of
active brokers ([#813](https://github.com/Shopify/sarama/pull/813)).
- Added an `InSyncReplicas` method to the Client which returns the set of all
in-sync broker IDs for the given partition, now that the Kafka versions for
which this was misleading are no longer in our supported set
([#872](https://github.com/Shopify/sarama/pull/872)).
- Added a `NewCustomHashPartitioner` method which allows constructing a hash
partitioner with a custom hash method in case the default (FNV-1a) is not
suitable
([#837](https://github.com/Shopify/sarama/pull/837),
[#841](https://github.com/Shopify/sarama/pull/841)).
Improvements:
- Recognize more Kafka error codes
([#859](https://github.com/Shopify/sarama/pull/859)).
Bug Fixes:
- Fix an issue where decoding a malformed FetchRequest would not return the
correct error ([#818](https://github.com/Shopify/sarama/pull/818)).
- Respect ordering of group protocols in JoinGroupRequests. This fix is
transparent if you're using the `AddGroupProtocol` or
`AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
([#812](https://github.com/Shopify/sarama/issues/812)).
- Fix an alignment-related issue with atomics on 32-bit architectures
([#859](https://github.com/Shopify/sarama/pull/859)).
#### Version 1.11.0 (2016-12-20)
_Important:_ As of Sarama 1.11 it is necessary to set the config value of
`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
versions would silently override this value when instantiating a SyncProducer
which led to unexpected values and data races.
New Features:
- Metrics! Thanks to Sébastien Launay for all his work on this feature
([#701](https://github.com/Shopify/sarama/pull/701),
[#746](https://github.com/Shopify/sarama/pull/746),
[#766](https://github.com/Shopify/sarama/pull/766)).
- Add support for LZ4 compression
([#786](https://github.com/Shopify/sarama/pull/786)).
- Add support for ListOffsetRequest v1 and Kafka 0.10.1
([#775](https://github.com/Shopify/sarama/pull/775)).
- Added a `HighWaterMarks` method to the Consumer which aggregates the
`HighWaterMarkOffset` values of its child topic/partitions
([#769](https://github.com/Shopify/sarama/pull/769)).
Bug Fixes:
- Fixed producing when using timestamps, compression and Kafka 0.10
([#759](https://github.com/Shopify/sarama/pull/759)).
- Added missing decoder methods to DescribeGroups response
([#756](https://github.com/Shopify/sarama/pull/756)).
- Fix producer shutdown when `Return.Errors` is disabled
([#787](https://github.com/Shopify/sarama/pull/787)).
- Don't mutate configuration in SyncProducer
([#790](https://github.com/Shopify/sarama/pull/790)).
- Fix crash on SASL initialization failure
([#795](https://github.com/Shopify/sarama/pull/795)).
#### Version 1.10.1 (2016-08-30)
Bug Fixes:
- Fix the documentation for `HashPartitioner` which was incorrect
([#717](https://github.com/Shopify/sarama/pull/717)).
- Permit client creation even when it is limited by ACLs
([#722](https://github.com/Shopify/sarama/pull/722)).
- Several fixes to the consumer timer optimization code, regressions introduced
in v1.10.0. Go's timers are finicky
([#730](https://github.com/Shopify/sarama/pull/730),
[#733](https://github.com/Shopify/sarama/pull/733),
[#734](https://github.com/Shopify/sarama/pull/734)).
- Handle consuming compressed relative offsets with Kafka 0.10
([#735](https://github.com/Shopify/sarama/pull/735)).
#### Version 1.10.0 (2016-08-02)
_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
Kafka you are running against (via the `config.Version` value) in order to use
features that may not be compatible with old Kafka versions. If you don't
specify this value it will default to 0.8.2 (the minimum supported), and trying
to use more recent features (like the offset manager) will fail with an error.
_Also:_ The offset-manager's behaviour has been changed to match the upstream
java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and
[#713](https://github.com/Shopify/sarama/pull/713)). If you use the
offset-manager, please ensure that you are committing one *greater* than the
last consumed message offset or else you may end up consuming duplicate
messages.
New Features:
- Support for Kafka 0.10
([#672](https://github.com/Shopify/sarama/pull/672),
[#678](https://github.com/Shopify/sarama/pull/678),
[#681](https://github.com/Shopify/sarama/pull/681), and others).
- Support for configuring the target Kafka version
([#676](https://github.com/Shopify/sarama/pull/676)).
- Batch producing support in the SyncProducer
([#677](https://github.com/Shopify/sarama/pull/677)).
- Extend producer mock to allow setting expectations on message contents
([#667](https://github.com/Shopify/sarama/pull/667)).
Improvements:
- Support `nil` compressed messages for deleting in compacted topics
([#634](https://github.com/Shopify/sarama/pull/634)).
- Pre-allocate decoding errors, greatly reducing heap usage and GC time against
misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)).
- Re-use consumer expiry timers, removing one allocation per consumed message
([#707](https://github.com/Shopify/sarama/pull/707)).
Bug Fixes:
- Actually default the client ID to "sarama" like we say we do
([#664](https://github.com/Shopify/sarama/pull/664)).
- Fix a rare issue where `Client.Leader` could return the wrong error
([#685](https://github.com/Shopify/sarama/pull/685)).
- Fix a possible tight loop in the consumer
([#693](https://github.com/Shopify/sarama/pull/693)).
- Match upstream's offset-tracking behaviour
([#705](https://github.com/Shopify/sarama/pull/705)).
- Report UnknownTopicOrPartition errors from the offset manager
([#706](https://github.com/Shopify/sarama/pull/706)).
- Fix possible negative partition value from the HashPartitioner
([#709](https://github.com/Shopify/sarama/pull/709)).
#### Version 1.9.0 (2016-05-16)
New Features:
- Add support for custom offset manager retention durations
([#602](https://github.com/Shopify/sarama/pull/602)).
- Publish low-level mocks to enable testing of third-party producer/consumer
implementations ([#570](https://github.com/Shopify/sarama/pull/570)).
- Declare support for Golang 1.6
([#611](https://github.com/Shopify/sarama/pull/611)).
- Support for SASL plain-text auth
([#648](https://github.com/Shopify/sarama/pull/648)).
Improvements:
- Simplified broker locking scheme slightly
([#604](https://github.com/Shopify/sarama/pull/604)).
- Documentation cleanup
([#605](https://github.com/Shopify/sarama/pull/605),
[#621](https://github.com/Shopify/sarama/pull/621),
[#654](https://github.com/Shopify/sarama/pull/654)).
Bug Fixes:
- Fix race condition shutting down the OffsetManager
([#658](https://github.com/Shopify/sarama/pull/658)).
#### Version 1.8.0 (2016-02-01)
New Features:
- Full support for Kafka 0.9:
- All protocol messages and fields
([#586](https://github.com/Shopify/sarama/pull/586),
[#588](https://github.com/Shopify/sarama/pull/588),
[#590](https://github.com/Shopify/sarama/pull/590)).
- Verified that TLS support works
([#581](https://github.com/Shopify/sarama/pull/581)).
- Fixed the OffsetManager compatibility
([#585](https://github.com/Shopify/sarama/pull/585)).
Improvements:
- Optimize for fewer system calls when reading from the network
([#584](https://github.com/Shopify/sarama/pull/584)).
- Automatically retry `InvalidMessage` errors to match upstream behaviour
([#589](https://github.com/Shopify/sarama/pull/589)).
#### Version 1.7.0 (2015-12-11)
New Features:
- Preliminary support for Kafka 0.9
([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several
caveats:
- Protocol-layer support is mostly in place
([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9
renamed some messages and fields, which we did not in order to preserve API
compatibility.
- The producer and consumer work against 0.9, but the offset manager does
not ([#573](https://github.com/Shopify/sarama/pull/573)).
- TLS support may or may not work
([#581](https://github.com/Shopify/sarama/pull/581)).
Improvements:
- Don't wait for request timeouts on dead brokers, greatly speeding recovery
when the TCP connection is left hanging
([#548](https://github.com/Shopify/sarama/pull/548)).
- Refactored part of the producer. The new version provides a much more elegant
solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also
slightly more efficient, and much more precise in calculating batch sizes
when compression is used
([#549](https://github.com/Shopify/sarama/pull/549),
[#550](https://github.com/Shopify/sarama/pull/550),
[#551](https://github.com/Shopify/sarama/pull/551)).
Bug Fixes:
- Fix race condition in consumer test mock
([#553](https://github.com/Shopify/sarama/pull/553)).
#### Version 1.6.1 (2015-09-25)
Bug Fixes:
- Fix panic that could occur if a user-supplied message value failed to encode
([#449](https://github.com/Shopify/sarama/pull/449)).
#### Version 1.6.0 (2015-09-04)
New Features:
- Implementation of a consumer offset manager using the APIs introduced in
Kafka 0.8.2. The API is designed mainly for integration into a future
high-level consumer, not for direct use, although it is *possible* to use it
directly.
([#461](https://github.com/Shopify/sarama/pull/461)).
Improvements:
- CRC32 calculation is much faster on machines with SSE4.2 instructions,
removing a major hotspot from most profiles
([#255](https://github.com/Shopify/sarama/pull/255)).
Bug Fixes:
- Make protocol decoding more robust against some malformed packets generated
by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523),
[#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways
([#528](https://github.com/Shopify/sarama/pull/528)).
- Fix a potential race condition panic in the consumer on shutdown
([#529](https://github.com/Shopify/sarama/pull/529)).
#### Version 1.5.0 (2015-08-17)
New Features:
- TLS-encrypted network connections are now supported. This feature is subject
to change when Kafka releases built-in TLS support, but for now this is
enough to work with TLS-terminating proxies
([#154](https://github.com/Shopify/sarama/pull/154)).
Improvements:
- The consumer will not block if a single partition is not drained by the user;
all other partitions will continue to consume normally
([#485](https://github.com/Shopify/sarama/pull/485)).
- Formatting of error strings has been much improved
([#495](https://github.com/Shopify/sarama/pull/495)).
- Internal refactoring of the producer for code cleanliness and to enable
future work ([#300](https://github.com/Shopify/sarama/pull/300)).
Bug Fixes:
- Fix a potential deadlock in the consumer on shutdown
([#475](https://github.com/Shopify/sarama/pull/475)).
#### Version 1.4.3 (2015-07-21)
Bug Fixes:
- Don't include the partitioner in the producer's "fetch partitions"
circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
- Don't retry messages until the broker is closed when abandoning a broker in
the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
- Update the import path for snappy-go, it has moved again and the API has
changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
#### Version 1.4.2 (2015-05-27)
Bug Fixes:
- Update the import path for snappy-go, it has moved from google code to github
([#456](https://github.com/Shopify/sarama/pull/456)).
#### Version 1.4.1 (2015-05-25)
Improvements:
- Optimizations when decoding snappy messages, thanks to John Potocny
([#446](https://github.com/Shopify/sarama/pull/446)).
Bug Fixes:
- Fix hypothetical race conditions on producer shutdown
([#450](https://github.com/Shopify/sarama/pull/450),
[#451](https://github.com/Shopify/sarama/pull/451)).
#### Version 1.4.0 (2015-05-01)
New Features:
- The consumer now implements `Topics()` and `Partitions()` methods to enable
users to dynamically choose what topics/partitions to consume without
instantiating a full client
([#431](https://github.com/Shopify/sarama/pull/431)).
- The partition-consumer now exposes the high water mark offset value returned
by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
- Added a `kafka-console-consumer` tool capable of handling multiple
partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
([#439](https://github.com/Shopify/sarama/pull/439),
[#442](https://github.com/Shopify/sarama/pull/442)).
Improvements:
- The producer's logging during retry scenarios is more consistent, more
useful, and slightly less verbose
([#429](https://github.com/Shopify/sarama/pull/429)).
- The client now shuffles its initial list of seed brokers in order to prevent
thundering herd on the first broker in the list
([#441](https://github.com/Shopify/sarama/pull/441)).
Bug Fixes:
- The producer now correctly manages its state if retries occur when it is
shutting down, fixing several instances of confusing behaviour and at least
one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
- The consumer now handles messages for different partitions asynchronously,
making it much more resilient to specific user code ordering
([#325](https://github.com/Shopify/sarama/pull/325)).
#### Version 1.3.0 (2015-04-16)
New Features:
- The client now tracks consumer group coordinators using
ConsumerMetadataRequests similar to how it tracks partition leadership using
regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
This adds two methods to the client API:
- `Coordinator(consumerGroup string) (*Broker, error)`
- `RefreshCoordinator(consumerGroup string) error`
Improvements:
- ConsumerMetadataResponses now automatically create a Broker object out of the
ID/address/port combination for the Coordinator; accessing the fields
individually has been deprecated
([#413](https://github.com/Shopify/sarama/pull/413)).
- Much improved handling of `OffsetOutOfRange` errors in the consumer.
Consumers will fail to start if the provided offset is out of range
([#418](https://github.com/Shopify/sarama/pull/418))
and they will automatically shut down if the offset falls out of range
([#424](https://github.com/Shopify/sarama/pull/424)).
- Small performance improvement in encoding and decoding protocol messages
([#427](https://github.com/Shopify/sarama/pull/427)).
Bug Fixes:
- Fix a rare race condition in the client's background metadata refresher if
it happens to be activated while the client is being closed
([#422](https://github.com/Shopify/sarama/pull/422)).
#### Version 1.2.0 (2015-04-07)
Improvements:
- The producer's behaviour when `Flush.Frequency` is set is now more intuitive
([#389](https://github.com/Shopify/sarama/pull/389)).
- The producer is now somewhat more memory-efficient during and after retrying
messages due to an improved queue implementation
([#396](https://github.com/Shopify/sarama/pull/396)).
- The consumer produces much more useful logging output when leadership
changes ([#385](https://github.com/Shopify/sarama/pull/385)).
- The client's `GetOffset` method will now automatically refresh metadata and
retry once in the event of stale information or similar
([#394](https://github.com/Shopify/sarama/pull/394)).
- Broker connections now have support for using TCP keepalives
([#407](https://github.com/Shopify/sarama/issues/407)).
Bug Fixes:
- The OffsetCommitRequest message now correctly implements all three possible
API versions ([#390](https://github.com/Shopify/sarama/pull/390),
[#400](https://github.com/Shopify/sarama/pull/400)).
#### Version 1.1.0 (2015-03-20)
Improvements:
- Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
broken topics don't choke throughput
([#373](https://github.com/Shopify/sarama/pull/373)).
Bug Fixes:
- Fix the producer's internal reference counting in certain unusual scenarios
([#367](https://github.com/Shopify/sarama/pull/367)).
- Fix the consumer's internal reference counting in certain unusual scenarios
([#369](https://github.com/Shopify/sarama/pull/369)).
- Fix a condition where the producer's internal control messages could have
gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
- Fix an issue where invalid partition lists would be cached when asking for
metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
#### Version 1.0.0 (2015-03-17)
Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
- All the configuration values have been unified in the `Config` struct.
- Much improved test suite.

20
vendor/github.com/Shopify/sarama/LICENSE generated vendored Normal file
View File

@ -0,0 +1,20 @@
Copyright (c) 2013 Evan Huus
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

21
vendor/github.com/Shopify/sarama/Makefile generated vendored Normal file
View File

@ -0,0 +1,21 @@
default: fmt vet errcheck test
test:
go test -v -timeout 60s -race ./...
vet:
go vet ./...
errcheck:
errcheck github.com/Shopify/sarama/...
fmt:
@if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
install_dependencies: install_errcheck get
install_errcheck:
go get github.com/kisielk/errcheck
get:
go get -t

38
vendor/github.com/Shopify/sarama/README.md generated vendored Normal file
View File

@ -0,0 +1,38 @@
sarama
======
[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama)
[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
### Getting started
- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama).
- Mocks for testing are available in the [mocks](./mocks) subpackage.
- The [examples](./examples) directory contains more elaborate example applications.
- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
### Compatibility and API stability
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
the two latest stable releases of Kafka and Go, and we provide a two month
grace period for older releases. This means we currently officially support
Go 1.9 through 1.7, and Kafka 0.11 through 0.9, although older releases are
still likely to work.
Sarama follows semantic versioning and provides API stability via the gopkg.in service.
You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
A changelog is available [here](CHANGELOG.md).
### Contributing
* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more
technical and design details.
* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)
contains a wealth of useful information.
* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
* If you have any questions, just ask!

20
vendor/github.com/Shopify/sarama/Vagrantfile generated vendored Normal file
View File

@ -0,0 +1,20 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB
MEMORY = 3072
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "ubuntu/trusty64"
config.vm.provision :shell, path: "vagrant/provision.sh"
config.vm.network "private_network", ip: "192.168.100.67"
config.vm.provider "virtualbox" do |v|
v.memory = MEMORY
end
end

View File

@ -0,0 +1,24 @@
package sarama
type ApiVersionsRequest struct {
}
func (r *ApiVersionsRequest) encode(pe packetEncoder) error {
return nil
}
func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
return nil
}
func (r *ApiVersionsRequest) key() int16 {
return 18
}
func (r *ApiVersionsRequest) version() int16 {
return 0
}
func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
return V0_10_0_0
}

View File

@ -0,0 +1,87 @@
package sarama
type ApiVersionsResponseBlock struct {
ApiKey int16
MinVersion int16
MaxVersion int16
}
func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
pe.putInt16(b.ApiKey)
pe.putInt16(b.MinVersion)
pe.putInt16(b.MaxVersion)
return nil
}
func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
var err error
if b.ApiKey, err = pd.getInt16(); err != nil {
return err
}
if b.MinVersion, err = pd.getInt16(); err != nil {
return err
}
if b.MaxVersion, err = pd.getInt16(); err != nil {
return err
}
return nil
}
type ApiVersionsResponse struct {
Err KError
ApiVersions []*ApiVersionsResponseBlock
}
func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
return err
}
for _, apiVersion := range r.ApiVersions {
if err := apiVersion.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
for i := 0; i < numBlocks; i++ {
block := new(ApiVersionsResponseBlock)
if err := block.decode(pd); err != nil {
return err
}
r.ApiVersions[i] = block
}
return nil
}
func (r *ApiVersionsResponse) key() int16 {
return 18
}
func (r *ApiVersionsResponse) version() int16 {
return 0
}
func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
return V0_10_0_0
}

904
vendor/github.com/Shopify/sarama/async_producer.go generated vendored Normal file
View File

@ -0,0 +1,904 @@
package sarama
import (
"fmt"
"sync"
"time"
"github.com/eapache/go-resiliency/breaker"
"github.com/eapache/queue"
)
// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
// and parses responses for errors. You must read from the Errors() channel or the
// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
// leaks: it will not be garbage-collected automatically when it passes out of
// scope.
type AsyncProducer interface {
// AsyncClose triggers a shutdown of the producer. The shutdown has completed
// when both the Errors and Successes channels have been closed. When calling
// AsyncClose, you *must* continue to read from those channels in order to
// drain the results of any messages in flight.
AsyncClose()
// Close shuts down the producer and waits for any buffered messages to be
// flushed. You must call this function before a producer object passes out of
// scope, as it may otherwise leak memory. You must call this before calling
// Close on the underlying client.
Close() error
// Input is the input channel for the user to write messages to that they
// wish to send.
Input() chan<- *ProducerMessage
// Successes is the success output channel back to the user when Return.Successes is
// enabled. If Return.Successes is true, you MUST read from this channel or the
// Producer will deadlock. It is suggested that you send and read messages
// together in a single select statement.
Successes() <-chan *ProducerMessage
// Errors is the error output channel back to the user. You MUST read from this
// channel or the Producer will deadlock when the channel is full. Alternatively,
// you can set Producer.Return.Errors in your config to false, which prevents
// errors to be returned.
Errors() <-chan *ProducerError
}
type asyncProducer struct {
client Client
conf *Config
ownClient bool
errors chan *ProducerError
input, successes, retries chan *ProducerMessage
inFlight sync.WaitGroup
brokers map[*Broker]chan<- *ProducerMessage
brokerRefs map[chan<- *ProducerMessage]int
brokerLock sync.Mutex
}
// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
client, err := NewClient(addrs, conf)
if err != nil {
return nil, err
}
p, err := NewAsyncProducerFromClient(client)
if err != nil {
return nil, err
}
p.(*asyncProducer).ownClient = true
return p, nil
}
// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this producer.
func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
}
p := &asyncProducer{
client: client,
conf: client.Config(),
errors: make(chan *ProducerError),
input: make(chan *ProducerMessage),
successes: make(chan *ProducerMessage),
retries: make(chan *ProducerMessage),
brokers: make(map[*Broker]chan<- *ProducerMessage),
brokerRefs: make(map[chan<- *ProducerMessage]int),
}
// launch our singleton dispatchers
go withRecover(p.dispatcher)
go withRecover(p.retryHandler)
return p, nil
}
type flagSet int8
const (
syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer
fin // final message from partitionProducer to brokerProducer and back
shutdown // start the shutdown process
)
// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
type ProducerMessage struct {
Topic string // The Kafka topic for this message.
// The partitioning key for this message. Pre-existing Encoders include
// StringEncoder and ByteEncoder.
Key Encoder
// The actual message to store in Kafka. Pre-existing Encoders include
// StringEncoder and ByteEncoder.
Value Encoder
// This field is used to hold arbitrary data you wish to include so it
// will be available when receiving on the Successes and Errors channels.
// Sarama completely ignores this field and is only to be used for
// pass-through data.
Metadata interface{}
// Below this point are filled in by the producer as the message is processed
// Offset is the offset of the message stored on the broker. This is only
// guaranteed to be defined if the message was successfully delivered and
// RequiredAcks is not NoResponse.
Offset int64
// Partition is the partition that the message was sent to. This is only
// guaranteed to be defined if the message was successfully delivered.
Partition int32
// Timestamp is the timestamp assigned to the message by the broker. This
// is only guaranteed to be defined if the message was successfully
// delivered, RequiredAcks is not NoResponse, and the Kafka broker is at
// least version 0.10.0.
Timestamp time.Time
retries int
flags flagSet
}
const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
func (m *ProducerMessage) byteSize() int {
size := producerMessageOverhead
if m.Key != nil {
size += m.Key.Length()
}
if m.Value != nil {
size += m.Value.Length()
}
return size
}
func (m *ProducerMessage) clear() {
m.flags = 0
m.retries = 0
}
// ProducerError is the type of error generated when the producer fails to deliver a message.
// It contains the original ProducerMessage as well as the actual error value.
type ProducerError struct {
Msg *ProducerMessage
Err error
}
func (pe ProducerError) Error() string {
return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
}
// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
// when closing a producer.
type ProducerErrors []*ProducerError
func (pe ProducerErrors) Error() string {
return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
}
func (p *asyncProducer) Errors() <-chan *ProducerError {
return p.errors
}
func (p *asyncProducer) Successes() <-chan *ProducerMessage {
return p.successes
}
func (p *asyncProducer) Input() chan<- *ProducerMessage {
return p.input
}
func (p *asyncProducer) Close() error {
p.AsyncClose()
if p.conf.Producer.Return.Successes {
go withRecover(func() {
for range p.successes {
}
})
}
var errors ProducerErrors
if p.conf.Producer.Return.Errors {
for event := range p.errors {
errors = append(errors, event)
}
} else {
<-p.errors
}
if len(errors) > 0 {
return errors
}
return nil
}
func (p *asyncProducer) AsyncClose() {
go withRecover(p.shutdown)
}
// singleton
// dispatches messages by topic
func (p *asyncProducer) dispatcher() {
handlers := make(map[string]chan<- *ProducerMessage)
shuttingDown := false
for msg := range p.input {
if msg == nil {
Logger.Println("Something tried to send a nil message, it was ignored.")
continue
}
if msg.flags&shutdown != 0 {
shuttingDown = true
p.inFlight.Done()
continue
} else if msg.retries == 0 {
if shuttingDown {
// we can't just call returnError here because that decrements the wait group,
// which hasn't been incremented yet for this message, and shouldn't be
pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
if p.conf.Producer.Return.Errors {
p.errors <- pErr
} else {
Logger.Println(pErr)
}
continue
}
p.inFlight.Add(1)
}
if msg.byteSize() > p.conf.Producer.MaxMessageBytes {
p.returnError(msg, ErrMessageSizeTooLarge)
continue
}
handler := handlers[msg.Topic]
if handler == nil {
handler = p.newTopicProducer(msg.Topic)
handlers[msg.Topic] = handler
}
handler <- msg
}
for _, handler := range handlers {
close(handler)
}
}
// one per topic
// partitions messages, then dispatches them by partition
type topicProducer struct {
parent *asyncProducer
topic string
input <-chan *ProducerMessage
breaker *breaker.Breaker
handlers map[int32]chan<- *ProducerMessage
partitioner Partitioner
}
func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
tp := &topicProducer{
parent: p,
topic: topic,
input: input,
breaker: breaker.New(3, 1, 10*time.Second),
handlers: make(map[int32]chan<- *ProducerMessage),
partitioner: p.conf.Producer.Partitioner(topic),
}
go withRecover(tp.dispatch)
return input
}
func (tp *topicProducer) dispatch() {
for msg := range tp.input {
if msg.retries == 0 {
if err := tp.partitionMessage(msg); err != nil {
tp.parent.returnError(msg, err)
continue
}
}
handler := tp.handlers[msg.Partition]
if handler == nil {
handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
tp.handlers[msg.Partition] = handler
}
handler <- msg
}
for _, handler := range tp.handlers {
close(handler)
}
}
func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
var partitions []int32
err := tp.breaker.Run(func() (err error) {
if tp.partitioner.RequiresConsistency() {
partitions, err = tp.parent.client.Partitions(msg.Topic)
} else {
partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
}
return
})
if err != nil {
return err
}
numPartitions := int32(len(partitions))
if numPartitions == 0 {
return ErrLeaderNotAvailable
}
choice, err := tp.partitioner.Partition(msg, numPartitions)
if err != nil {
return err
} else if choice < 0 || choice >= numPartitions {
return ErrInvalidPartition
}
msg.Partition = partitions[choice]
return nil
}
// one per partition per topic
// dispatches messages to the appropriate broker
// also responsible for maintaining message order during retries
type partitionProducer struct {
parent *asyncProducer
topic string
partition int32
input <-chan *ProducerMessage
leader *Broker
breaker *breaker.Breaker
output chan<- *ProducerMessage
// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
// retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
// therefore whether our buffer is complete and safe to flush)
highWatermark int
retryState []partitionRetryState
}
type partitionRetryState struct {
buf []*ProducerMessage
expectChaser bool
}
func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
pp := &partitionProducer{
parent: p,
topic: topic,
partition: partition,
input: input,
breaker: breaker.New(3, 1, 10*time.Second),
retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
}
go withRecover(pp.dispatch)
return input
}
func (pp *partitionProducer) dispatch() {
// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
// on the first message
pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
if pp.leader != nil {
pp.output = pp.parent.getBrokerProducer(pp.leader)
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
}
for msg := range pp.input {
if msg.retries > pp.highWatermark {
// a new, higher, retry level; handle it and then back off
pp.newHighWatermark(msg.retries)
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
} else if pp.highWatermark > 0 {
// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
if msg.retries < pp.highWatermark {
// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
if msg.flags&fin == fin {
pp.retryState[msg.retries].expectChaser = false
pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
} else {
pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
}
continue
} else if msg.flags&fin == fin {
// this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
// meaning this retry level is done and we can go down (at least) one level and flush that
pp.retryState[pp.highWatermark].expectChaser = false
pp.flushRetryBuffers()
pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
continue
}
}
// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
// without breaking any of our ordering guarantees
if pp.output == nil {
if err := pp.updateLeader(); err != nil {
pp.parent.returnError(msg, err)
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
continue
}
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
}
pp.output <- msg
}
if pp.output != nil {
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
}
}
func (pp *partitionProducer) newHighWatermark(hwm int) {
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
pp.highWatermark = hwm
// send off a fin so that we know when everything "in between" has made it
// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
pp.retryState[pp.highWatermark].expectChaser = true
pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
// a new HWM means that our current broker selection is out of date
Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
pp.output = nil
}
func (pp *partitionProducer) flushRetryBuffers() {
Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
for {
pp.highWatermark--
if pp.output == nil {
if err := pp.updateLeader(); err != nil {
pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
goto flushDone
}
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
}
for _, msg := range pp.retryState[pp.highWatermark].buf {
pp.output <- msg
}
flushDone:
pp.retryState[pp.highWatermark].buf = nil
if pp.retryState[pp.highWatermark].expectChaser {
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
break
} else if pp.highWatermark == 0 {
Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
break
}
}
}
func (pp *partitionProducer) updateLeader() error {
return pp.breaker.Run(func() (err error) {
if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
return err
}
if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
return err
}
pp.output = pp.parent.getBrokerProducer(pp.leader)
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
return nil
})
}
// one per broker; also constructs an associated flusher
func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
var (
input = make(chan *ProducerMessage)
bridge = make(chan *produceSet)
responses = make(chan *brokerProducerResponse)
)
bp := &brokerProducer{
parent: p,
broker: broker,
input: input,
output: bridge,
responses: responses,
buffer: newProduceSet(p),
currentRetries: make(map[string]map[int32]error),
}
go withRecover(bp.run)
// minimal bridge to make the network response `select`able
go withRecover(func() {
for set := range bridge {
request := set.buildRequest()
response, err := broker.Produce(request)
responses <- &brokerProducerResponse{
set: set,
err: err,
res: response,
}
}
close(responses)
})
return input
}
type brokerProducerResponse struct {
set *produceSet
err error
res *ProduceResponse
}
// groups messages together into appropriately-sized batches for sending to the broker
// handles state related to retries etc
type brokerProducer struct {
parent *asyncProducer
broker *Broker
input <-chan *ProducerMessage
output chan<- *produceSet
responses <-chan *brokerProducerResponse
buffer *produceSet
timer <-chan time.Time
timerFired bool
closing error
currentRetries map[string]map[int32]error
}
func (bp *brokerProducer) run() {
var output chan<- *produceSet
Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
for {
select {
case msg := <-bp.input:
if msg == nil {
bp.shutdown()
return
}
if msg.flags&syn == syn {
Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
bp.broker.ID(), msg.Topic, msg.Partition)
if bp.currentRetries[msg.Topic] == nil {
bp.currentRetries[msg.Topic] = make(map[int32]error)
}
bp.currentRetries[msg.Topic][msg.Partition] = nil
bp.parent.inFlight.Done()
continue
}
if reason := bp.needsRetry(msg); reason != nil {
bp.parent.retryMessage(msg, reason)
if bp.closing == nil && msg.flags&fin == fin {
// we were retrying this partition but we can start processing again
delete(bp.currentRetries[msg.Topic], msg.Partition)
Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
bp.broker.ID(), msg.Topic, msg.Partition)
}
continue
}
if bp.buffer.wouldOverflow(msg) {
if err := bp.waitForSpace(msg); err != nil {
bp.parent.retryMessage(msg, err)
continue
}
}
if err := bp.buffer.add(msg); err != nil {
bp.parent.returnError(msg, err)
continue
}
if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
}
case <-bp.timer:
bp.timerFired = true
case output <- bp.buffer:
bp.rollOver()
case response := <-bp.responses:
bp.handleResponse(response)
}
if bp.timerFired || bp.buffer.readyToFlush() {
output = bp.output
} else {
output = nil
}
}
}
func (bp *brokerProducer) shutdown() {
for !bp.buffer.empty() {
select {
case response := <-bp.responses:
bp.handleResponse(response)
case bp.output <- bp.buffer:
bp.rollOver()
}
}
close(bp.output)
for response := range bp.responses {
bp.handleResponse(response)
}
Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
}
func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
if bp.closing != nil {
return bp.closing
}
return bp.currentRetries[msg.Topic][msg.Partition]
}
func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
for {
select {
case response := <-bp.responses:
bp.handleResponse(response)
// handling a response can change our state, so re-check some things
if reason := bp.needsRetry(msg); reason != nil {
return reason
} else if !bp.buffer.wouldOverflow(msg) {
return nil
}
case bp.output <- bp.buffer:
bp.rollOver()
return nil
}
}
}
func (bp *brokerProducer) rollOver() {
bp.timer = nil
bp.timerFired = false
bp.buffer = newProduceSet(bp.parent)
}
func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
if response.err != nil {
bp.handleError(response.set, response.err)
} else {
bp.handleSuccess(response.set, response.res)
}
if bp.buffer.empty() {
bp.rollOver() // this can happen if the response invalidated our buffer
}
}
func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
// we iterate through the blocks in the request set, not the response, so that we notice
// if the response is missing a block completely
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
if response == nil {
// this only happens when RequiredAcks is NoResponse, so we have to assume success
bp.parent.returnSuccesses(msgs)
return
}
block := response.GetBlock(topic, partition)
if block == nil {
bp.parent.returnErrors(msgs, ErrIncompleteResponse)
return
}
switch block.Err {
// Success
case ErrNoError:
if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
for _, msg := range msgs {
msg.Timestamp = block.Timestamp
}
}
for i, msg := range msgs {
msg.Offset = block.Offset + int64(i)
}
bp.parent.returnSuccesses(msgs)
// Retriable errors
case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
bp.broker.ID(), topic, partition, block.Err)
bp.currentRetries[topic][partition] = block.Err
bp.parent.retryMessages(msgs, block.Err)
bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
// Other non-retriable errors
default:
bp.parent.returnErrors(msgs, block.Err)
}
})
}
func (bp *brokerProducer) handleError(sent *produceSet, err error) {
switch err.(type) {
case PacketEncodingError:
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
bp.parent.returnErrors(msgs, err)
})
default:
Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
bp.parent.abandonBrokerConnection(bp.broker)
_ = bp.broker.Close()
bp.closing = err
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
bp.parent.retryMessages(msgs, err)
})
bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
bp.parent.retryMessages(msgs, err)
})
bp.rollOver()
}
}
// singleton
// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
func (p *asyncProducer) retryHandler() {
var msg *ProducerMessage
buf := queue.New()
for {
if buf.Length() == 0 {
msg = <-p.retries
} else {
select {
case msg = <-p.retries:
case p.input <- buf.Peek().(*ProducerMessage):
buf.Remove()
continue
}
}
if msg == nil {
return
}
buf.Add(msg)
}
}
// utility functions
func (p *asyncProducer) shutdown() {
Logger.Println("Producer shutting down.")
p.inFlight.Add(1)
p.input <- &ProducerMessage{flags: shutdown}
p.inFlight.Wait()
if p.ownClient {
err := p.client.Close()
if err != nil {
Logger.Println("producer/shutdown failed to close the embedded client:", err)
}
}
close(p.input)
close(p.retries)
close(p.errors)
close(p.successes)
}
func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
msg.clear()
pErr := &ProducerError{Msg: msg, Err: err}
if p.conf.Producer.Return.Errors {
p.errors <- pErr
} else {
Logger.Println(pErr)
}
p.inFlight.Done()
}
func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
for _, msg := range batch {
p.returnError(msg, err)
}
}
func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
for _, msg := range batch {
if p.conf.Producer.Return.Successes {
msg.clear()
p.successes <- msg
}
p.inFlight.Done()
}
}
func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
if msg.retries >= p.conf.Producer.Retry.Max {
p.returnError(msg, err)
} else {
msg.retries++
p.retries <- msg
}
}
func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
for _, msg := range batch {
p.retryMessage(msg, err)
}
}
func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
bp := p.brokers[broker]
if bp == nil {
bp = p.newBrokerProducer(broker)
p.brokers[broker] = bp
p.brokerRefs[bp] = 0
}
p.brokerRefs[bp]++
return bp
}
func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
p.brokerRefs[bp]--
if p.brokerRefs[bp] == 0 {
close(bp)
delete(p.brokerRefs, bp)
if p.brokers[broker] == bp {
delete(p.brokers, broker)
}
}
}
func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
delete(p.brokers, broker)
}

685
vendor/github.com/Shopify/sarama/broker.go generated vendored Normal file
View File

@ -0,0 +1,685 @@
package sarama
import (
"crypto/tls"
"encoding/binary"
"fmt"
"io"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/rcrowley/go-metrics"
)
// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
type Broker struct {
id int32
addr string
conf *Config
correlationID int32
conn net.Conn
connErr error
lock sync.Mutex
opened int32
responses chan responsePromise
done chan bool
incomingByteRate metrics.Meter
requestRate metrics.Meter
requestSize metrics.Histogram
requestLatency metrics.Histogram
outgoingByteRate metrics.Meter
responseRate metrics.Meter
responseSize metrics.Histogram
brokerIncomingByteRate metrics.Meter
brokerRequestRate metrics.Meter
brokerRequestSize metrics.Histogram
brokerRequestLatency metrics.Histogram
brokerOutgoingByteRate metrics.Meter
brokerResponseRate metrics.Meter
brokerResponseSize metrics.Histogram
}
type responsePromise struct {
requestTime time.Time
correlationID int32
packets chan []byte
errors chan error
}
// NewBroker creates and returns a Broker targeting the given host:port address.
// This does not attempt to actually connect, you have to call Open() for that.
func NewBroker(addr string) *Broker {
return &Broker{id: -1, addr: addr}
}
// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
// waiting for the connection to complete. This means that any subsequent operations on the broker will
// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
func (b *Broker) Open(conf *Config) error {
if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
return ErrAlreadyConnected
}
if conf == nil {
conf = NewConfig()
}
err := conf.Validate()
if err != nil {
return err
}
b.lock.Lock()
go withRecover(func() {
defer b.lock.Unlock()
dialer := net.Dialer{
Timeout: conf.Net.DialTimeout,
KeepAlive: conf.Net.KeepAlive,
}
if conf.Net.TLS.Enable {
b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
} else {
b.conn, b.connErr = dialer.Dial("tcp", b.addr)
}
if b.connErr != nil {
Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
b.conn = nil
atomic.StoreInt32(&b.opened, 0)
return
}
b.conn = newBufConn(b.conn)
b.conf = conf
// Create or reuse the global metrics shared between brokers
b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
// Do not gather metrics for seeded broker (only used during bootstrap) because they share
// the same id (-1) and are already exposed through the global metrics above
if b.id >= 0 {
b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry)
b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry)
b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry)
b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry)
b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry)
b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry)
b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry)
}
if conf.Net.SASL.Enable {
b.connErr = b.sendAndReceiveSASLPlainAuth()
if b.connErr != nil {
err = b.conn.Close()
if err == nil {
Logger.Printf("Closed connection to broker %s\n", b.addr)
} else {
Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
}
b.conn = nil
atomic.StoreInt32(&b.opened, 0)
return
}
}
b.done = make(chan bool)
b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
if b.id >= 0 {
Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
} else {
Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
}
go withRecover(b.responseReceiver)
})
return nil
}
// Connected returns true if the broker is connected and false otherwise. If the broker is not
// connected but it had tried to connect, the error from that connection attempt is also returned.
func (b *Broker) Connected() (bool, error) {
b.lock.Lock()
defer b.lock.Unlock()
return b.conn != nil, b.connErr
}
func (b *Broker) Close() error {
b.lock.Lock()
defer b.lock.Unlock()
if b.conn == nil {
return ErrNotConnected
}
close(b.responses)
<-b.done
err := b.conn.Close()
b.conn = nil
b.connErr = nil
b.done = nil
b.responses = nil
if err == nil {
Logger.Printf("Closed connection to broker %s\n", b.addr)
} else {
Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
}
atomic.StoreInt32(&b.opened, 0)
return err
}
// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
func (b *Broker) ID() int32 {
return b.id
}
// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
func (b *Broker) Addr() string {
return b.addr
}
func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
response := new(MetadataResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
response := new(ConsumerMetadataResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
response := new(OffsetResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
var response *ProduceResponse
var err error
if request.RequiredAcks == NoResponse {
err = b.sendAndReceive(request, nil)
} else {
response = new(ProduceResponse)
err = b.sendAndReceive(request, response)
}
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
response := new(FetchResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
response := new(OffsetCommitResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
response := new(OffsetFetchResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
response := new(JoinGroupResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
response := new(SyncGroupResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
response := new(LeaveGroupResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
response := new(HeartbeatResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
response := new(ListGroupsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
response := new(DescribeGroupsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
response := new(ApiVersionsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
b.lock.Lock()
defer b.lock.Unlock()
if b.conn == nil {
if b.connErr != nil {
return nil, b.connErr
}
return nil, ErrNotConnected
}
if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
return nil, ErrUnsupportedVersion
}
req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
buf, err := encode(req, b.conf.MetricRegistry)
if err != nil {
return nil, err
}
err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
if err != nil {
return nil, err
}
requestTime := time.Now()
bytes, err := b.conn.Write(buf)
b.updateOutgoingCommunicationMetrics(bytes)
if err != nil {
return nil, err
}
b.correlationID++
if !promiseResponse {
// Record request latency without the response
b.updateRequestLatencyMetrics(time.Since(requestTime))
return nil, nil
}
promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)}
b.responses <- promise
return &promise, nil
}
func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
promise, err := b.send(req, res != nil)
if err != nil {
return err
}
if promise == nil {
return nil
}
select {
case buf := <-promise.packets:
return versionedDecode(buf, res, req.version())
case err = <-promise.errors:
return err
}
}
func (b *Broker) decode(pd packetDecoder) (err error) {
b.id, err = pd.getInt32()
if err != nil {
return err
}
host, err := pd.getString()
if err != nil {
return err
}
port, err := pd.getInt32()
if err != nil {
return err
}
b.addr = net.JoinHostPort(host, fmt.Sprint(port))
if _, _, err := net.SplitHostPort(b.addr); err != nil {
return err
}
return nil
}
func (b *Broker) encode(pe packetEncoder) (err error) {
host, portstr, err := net.SplitHostPort(b.addr)
if err != nil {
return err
}
port, err := strconv.Atoi(portstr)
if err != nil {
return err
}
pe.putInt32(b.id)
err = pe.putString(host)
if err != nil {
return err
}
pe.putInt32(int32(port))
return nil
}
func (b *Broker) responseReceiver() {
var dead error
header := make([]byte, 8)
for response := range b.responses {
if dead != nil {
response.errors <- dead
continue
}
err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout))
if err != nil {
dead = err
response.errors <- err
continue
}
bytesReadHeader, err := io.ReadFull(b.conn, header)
requestLatency := time.Since(response.requestTime)
if err != nil {
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
dead = err
response.errors <- err
continue
}
decodedHeader := responseHeader{}
err = decode(header, &decodedHeader)
if err != nil {
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
dead = err
response.errors <- err
continue
}
if decodedHeader.correlationID != response.correlationID {
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
// TODO if decoded ID < cur ID, discard until we catch up
// TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
response.errors <- dead
continue
}
buf := make([]byte, decodedHeader.length-4)
bytesReadBody, err := io.ReadFull(b.conn, buf)
b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
if err != nil {
dead = err
response.errors <- err
continue
}
response.packets <- buf
}
close(b.done)
}
func (b *Broker) sendAndReceiveSASLPlainHandshake() error {
rb := &SaslHandshakeRequest{"PLAIN"}
req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
buf, err := encode(req, b.conf.MetricRegistry)
if err != nil {
return err
}
err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
if err != nil {
return err
}
requestTime := time.Now()
bytes, err := b.conn.Write(buf)
b.updateOutgoingCommunicationMetrics(bytes)
if err != nil {
Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
return err
}
b.correlationID++
//wait for the response
header := make([]byte, 8) // response header
_, err = io.ReadFull(b.conn, header)
if err != nil {
Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
return err
}
length := binary.BigEndian.Uint32(header[:4])
payload := make([]byte, length-4)
n, err := io.ReadFull(b.conn, payload)
if err != nil {
Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
return err
}
b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
res := &SaslHandshakeResponse{}
err = versionedDecode(payload, res, 0)
if err != nil {
Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
return err
}
if res.Err != ErrNoError {
Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
return res.Err
}
Logger.Print("Successful SASL handshake")
return nil
}
// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149)
// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9
//
// In SASL Plain, Kafka expects the auth header to be in the following format
// Message format (from https://tools.ietf.org/html/rfc4616):
//
// message = [authzid] UTF8NUL authcid UTF8NUL passwd
// authcid = 1*SAFE ; MUST accept up to 255 octets
// authzid = 1*SAFE ; MUST accept up to 255 octets
// passwd = 1*SAFE ; MUST accept up to 255 octets
// UTF8NUL = %x00 ; UTF-8 encoded NUL character
//
// SAFE = UTF1 / UTF2 / UTF3 / UTF4
// ;; any UTF-8 encoded Unicode character except NUL
//
// When credentials are valid, Kafka returns a 4 byte array of null characters.
// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way
// of responding to bad credentials but thats how its being done today.
func (b *Broker) sendAndReceiveSASLPlainAuth() error {
if b.conf.Net.SASL.Handshake {
handshakeErr := b.sendAndReceiveSASLPlainHandshake()
if handshakeErr != nil {
Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
return handshakeErr
}
}
length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
authBytes := make([]byte, length+4) //4 byte length header + auth data
binary.BigEndian.PutUint32(authBytes, uint32(length))
copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password))
err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
if err != nil {
Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
return err
}
requestTime := time.Now()
bytesWritten, err := b.conn.Write(authBytes)
b.updateOutgoingCommunicationMetrics(bytesWritten)
if err != nil {
Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
return err
}
header := make([]byte, 4)
n, err := io.ReadFull(b.conn, header)
b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
// If the credentials are valid, we would get a 4 byte response filled with null characters.
// Otherwise, the broker closes the connection and we get an EOF
if err != nil {
Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
return err
}
Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
return nil
}
func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
b.updateRequestLatencyMetrics(requestLatency)
b.responseRate.Mark(1)
if b.brokerResponseRate != nil {
b.brokerResponseRate.Mark(1)
}
responseSize := int64(bytes)
b.incomingByteRate.Mark(responseSize)
if b.brokerIncomingByteRate != nil {
b.brokerIncomingByteRate.Mark(responseSize)
}
b.responseSize.Update(responseSize)
if b.brokerResponseSize != nil {
b.brokerResponseSize.Update(responseSize)
}
}
func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
requestLatencyInMs := int64(requestLatency / time.Millisecond)
b.requestLatency.Update(requestLatencyInMs)
if b.brokerRequestLatency != nil {
b.brokerRequestLatency.Update(requestLatencyInMs)
}
}
func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
b.requestRate.Mark(1)
if b.brokerRequestRate != nil {
b.brokerRequestRate.Mark(1)
}
requestSize := int64(bytes)
b.outgoingByteRate.Mark(requestSize)
if b.brokerOutgoingByteRate != nil {
b.brokerOutgoingByteRate.Mark(requestSize)
}
b.requestSize.Update(requestSize)
if b.brokerRequestSize != nil {
b.brokerRequestSize.Update(requestSize)
}
}

794
vendor/github.com/Shopify/sarama/client.go generated vendored Normal file
View File

@ -0,0 +1,794 @@
package sarama
import (
"math/rand"
"sort"
"sync"
"time"
)
// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
// automatically when it passes out of scope. It is safe to share a client amongst many
// users, however Kafka will process requests from a single client strictly in serial,
// so it is generally more efficient to use the default one client per producer/consumer.
type Client interface {
// Config returns the Config struct of the client. This struct should not be
// altered after it has been created.
Config() *Config
// Brokers returns the current set of active brokers as retrieved from cluster metadata.
Brokers() []*Broker
// Topics returns the set of available topics as retrieved from cluster metadata.
Topics() ([]string, error)
// Partitions returns the sorted list of all partition IDs for the given topic.
Partitions(topic string) ([]int32, error)
// WritablePartitions returns the sorted list of all writable partition IDs for
// the given topic, where "writable" means "having a valid leader accepting
// writes".
WritablePartitions(topic string) ([]int32, error)
// Leader returns the broker object that is the leader of the current
// topic/partition, as determined by querying the cluster metadata.
Leader(topic string, partitionID int32) (*Broker, error)
// Replicas returns the set of all replica IDs for the given partition.
Replicas(topic string, partitionID int32) ([]int32, error)
// InSyncReplicas returns the set of all in-sync replica IDs for the given
// partition. In-sync replicas are replicas which are fully caught up with
// the partition leader.
InSyncReplicas(topic string, partitionID int32) ([]int32, error)
// RefreshMetadata takes a list of topics and queries the cluster to refresh the
// available metadata for those topics. If no topics are provided, it will refresh
// metadata for all topics.
RefreshMetadata(topics ...string) error
// GetOffset queries the cluster to get the most recent available offset at the
// given time on the topic/partition combination. Time should be OffsetOldest for
// the earliest available offset, OffsetNewest for the offset of the message that
// will be produced next, or a time.
GetOffset(topic string, partitionID int32, time int64) (int64, error)
// Coordinator returns the coordinating broker for a consumer group. It will
// return a locally cached value if it's available. You can call
// RefreshCoordinator to update the cached value. This function only works on
// Kafka 0.8.2 and higher.
Coordinator(consumerGroup string) (*Broker, error)
// RefreshCoordinator retrieves the coordinator for a consumer group and stores it
// in local cache. This function only works on Kafka 0.8.2 and higher.
RefreshCoordinator(consumerGroup string) error
// Close shuts down all broker connections managed by this client. It is required
// to call this function before a client object passes out of scope, as it will
// otherwise leak memory. You must close any Producers or Consumers using a client
// before you close the client.
Close() error
// Closed returns true if the client has already had Close called on it
Closed() bool
}
const (
// OffsetNewest stands for the log head offset, i.e. the offset that will be
// assigned to the next message that will be produced to the partition. You
// can send this to a client's GetOffset method to get this offset, or when
// calling ConsumePartition to start consuming new messages.
OffsetNewest int64 = -1
// OffsetOldest stands for the oldest offset available on the broker for a
// partition. You can send this to a client's GetOffset method to get this
// offset, or when calling ConsumePartition to start consuming from the
// oldest offset that is still available on the broker.
OffsetOldest int64 = -2
)
type client struct {
conf *Config
closer, closed chan none // for shutting down background metadata updater
// the broker addresses given to us through the constructor are not guaranteed to be returned in
// the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
// so we store them separately
seedBrokers []*Broker
deadSeeds []*Broker
brokers map[int32]*Broker // maps broker ids to brokers
metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
// If the number of partitions is large, we can get some churn calling cachedPartitions,
// so the result is cached. It is important to update this value whenever metadata is changed
cachedPartitionsResults map[string][maxPartitionIndex][]int32
lock sync.RWMutex // protects access to the maps that hold cluster state.
}
// NewClient creates a new Client. It connects to one of the given broker addresses
// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
// be retrieved from any of the given broker addresses, the client is not created.
func NewClient(addrs []string, conf *Config) (Client, error) {
Logger.Println("Initializing new client")
if conf == nil {
conf = NewConfig()
}
if err := conf.Validate(); err != nil {
return nil, err
}
if len(addrs) < 1 {
return nil, ConfigurationError("You must provide at least one broker address")
}
client := &client{
conf: conf,
closer: make(chan none),
closed: make(chan none),
brokers: make(map[int32]*Broker),
metadata: make(map[string]map[int32]*PartitionMetadata),
cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
coordinators: make(map[string]int32),
}
random := rand.New(rand.NewSource(time.Now().UnixNano()))
for _, index := range random.Perm(len(addrs)) {
client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
}
if conf.Metadata.Full {
// do an initial fetch of all cluster metadata by specifying an empty list of topics
err := client.RefreshMetadata()
switch err {
case nil:
break
case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
// indicates that maybe part of the cluster is down, but is not fatal to creating the client
Logger.Println(err)
default:
close(client.closed) // we haven't started the background updater yet, so we have to do this manually
_ = client.Close()
return nil, err
}
}
go withRecover(client.backgroundMetadataUpdater)
Logger.Println("Successfully initialized new client")
return client, nil
}
func (client *client) Config() *Config {
return client.conf
}
func (client *client) Brokers() []*Broker {
client.lock.RLock()
defer client.lock.RUnlock()
brokers := make([]*Broker, 0)
for _, broker := range client.brokers {
brokers = append(brokers, broker)
}
return brokers
}
func (client *client) Close() error {
if client.Closed() {
// Chances are this is being called from a defer() and the error will go unobserved
// so we go ahead and log the event in this case.
Logger.Printf("Close() called on already closed client")
return ErrClosedClient
}
// shutdown and wait for the background thread before we take the lock, to avoid races
close(client.closer)
<-client.closed
client.lock.Lock()
defer client.lock.Unlock()
Logger.Println("Closing Client")
for _, broker := range client.brokers {
safeAsyncClose(broker)
}
for _, broker := range client.seedBrokers {
safeAsyncClose(broker)
}
client.brokers = nil
client.metadata = nil
return nil
}
func (client *client) Closed() bool {
return client.brokers == nil
}
func (client *client) Topics() ([]string, error) {
if client.Closed() {
return nil, ErrClosedClient
}
client.lock.RLock()
defer client.lock.RUnlock()
ret := make([]string, 0, len(client.metadata))
for topic := range client.metadata {
ret = append(ret, topic)
}
return ret, nil
}
func (client *client) Partitions(topic string) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
partitions := client.cachedPartitions(topic, allPartitions)
if len(partitions) == 0 {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
partitions = client.cachedPartitions(topic, allPartitions)
}
if partitions == nil {
return nil, ErrUnknownTopicOrPartition
}
return partitions, nil
}
func (client *client) WritablePartitions(topic string) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
partitions := client.cachedPartitions(topic, writablePartitions)
// len==0 catches when it's nil (no such topic) and the odd case when every single
// partition is undergoing leader election simultaneously. Callers have to be able to handle
// this function returning an empty slice (which is a valid return value) but catching it
// here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
// a metadata refresh as a nicety so callers can just try again and don't have to manually
// trigger a refresh (otherwise they'd just keep getting a stale cached copy).
if len(partitions) == 0 {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
partitions = client.cachedPartitions(topic, writablePartitions)
}
if partitions == nil {
return nil, ErrUnknownTopicOrPartition
}
return partitions, nil
}
func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
metadata := client.cachedMetadata(topic, partitionID)
if metadata == nil {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
metadata = client.cachedMetadata(topic, partitionID)
}
if metadata == nil {
return nil, ErrUnknownTopicOrPartition
}
if metadata.Err == ErrReplicaNotAvailable {
return nil, metadata.Err
}
return dupInt32Slice(metadata.Replicas), nil
}
func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
metadata := client.cachedMetadata(topic, partitionID)
if metadata == nil {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
metadata = client.cachedMetadata(topic, partitionID)
}
if metadata == nil {
return nil, ErrUnknownTopicOrPartition
}
if metadata.Err == ErrReplicaNotAvailable {
return nil, metadata.Err
}
return dupInt32Slice(metadata.Isr), nil
}
func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
if client.Closed() {
return nil, ErrClosedClient
}
leader, err := client.cachedLeader(topic, partitionID)
if leader == nil {
err = client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
leader, err = client.cachedLeader(topic, partitionID)
}
return leader, err
}
func (client *client) RefreshMetadata(topics ...string) error {
if client.Closed() {
return ErrClosedClient
}
// Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
// error. This handles the case by returning an error instead of sending it
// off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
for _, topic := range topics {
if len(topic) == 0 {
return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
}
}
return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max)
}
func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
if client.Closed() {
return -1, ErrClosedClient
}
offset, err := client.getOffset(topic, partitionID, time)
if err != nil {
if err := client.RefreshMetadata(topic); err != nil {
return -1, err
}
return client.getOffset(topic, partitionID, time)
}
return offset, err
}
func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
if client.Closed() {
return nil, ErrClosedClient
}
coordinator := client.cachedCoordinator(consumerGroup)
if coordinator == nil {
if err := client.RefreshCoordinator(consumerGroup); err != nil {
return nil, err
}
coordinator = client.cachedCoordinator(consumerGroup)
}
if coordinator == nil {
return nil, ErrConsumerCoordinatorNotAvailable
}
_ = coordinator.Open(client.conf)
return coordinator, nil
}
func (client *client) RefreshCoordinator(consumerGroup string) error {
if client.Closed() {
return ErrClosedClient
}
response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
if err != nil {
return err
}
client.lock.Lock()
defer client.lock.Unlock()
client.registerBroker(response.Coordinator)
client.coordinators[consumerGroup] = response.Coordinator.ID()
return nil
}
// private broker management helpers
// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
// in the brokers map. It returns the broker that is registered, which may be the provided broker,
// or a previously registered Broker instance. You must hold the write lock before calling this function.
func (client *client) registerBroker(broker *Broker) {
if client.brokers[broker.ID()] == nil {
client.brokers[broker.ID()] = broker
Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
} else if broker.Addr() != client.brokers[broker.ID()].Addr() {
safeAsyncClose(client.brokers[broker.ID()])
client.brokers[broker.ID()] = broker
Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
}
}
// deregisterBroker removes a broker from the seedsBroker list, and if it's
// not the seedbroker, removes it from brokers map completely.
func (client *client) deregisterBroker(broker *Broker) {
client.lock.Lock()
defer client.lock.Unlock()
if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
client.deadSeeds = append(client.deadSeeds, broker)
client.seedBrokers = client.seedBrokers[1:]
} else {
// we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
// but we really shouldn't have to; once that loop is made better this case can be
// removed, and the function generally can be renamed from `deregisterBroker` to
// `nextSeedBroker` or something
Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
delete(client.brokers, broker.ID())
}
}
func (client *client) resurrectDeadBrokers() {
client.lock.Lock()
defer client.lock.Unlock()
Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
client.deadSeeds = nil
}
func (client *client) any() *Broker {
client.lock.RLock()
defer client.lock.RUnlock()
if len(client.seedBrokers) > 0 {
_ = client.seedBrokers[0].Open(client.conf)
return client.seedBrokers[0]
}
// not guaranteed to be random *or* deterministic
for _, broker := range client.brokers {
_ = broker.Open(client.conf)
return broker
}
return nil
}
// private caching/lazy metadata helpers
type partitionType int
const (
allPartitions partitionType = iota
writablePartitions
// If you add any more types, update the partition cache in update()
// Ensure this is the last partition type value
maxPartitionIndex
)
func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
client.lock.RLock()
defer client.lock.RUnlock()
partitions := client.metadata[topic]
if partitions != nil {
return partitions[partitionID]
}
return nil
}
func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
client.lock.RLock()
defer client.lock.RUnlock()
partitions, exists := client.cachedPartitionsResults[topic]
if !exists {
return nil
}
return partitions[partitionSet]
}
func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
partitions := client.metadata[topic]
if partitions == nil {
return nil
}
ret := make([]int32, 0, len(partitions))
for _, partition := range partitions {
if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
continue
}
ret = append(ret, partition.ID)
}
sort.Sort(int32Slice(ret))
return ret
}
func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
client.lock.RLock()
defer client.lock.RUnlock()
partitions := client.metadata[topic]
if partitions != nil {
metadata, ok := partitions[partitionID]
if ok {
if metadata.Err == ErrLeaderNotAvailable {
return nil, ErrLeaderNotAvailable
}
b := client.brokers[metadata.Leader]
if b == nil {
return nil, ErrLeaderNotAvailable
}
_ = b.Open(client.conf)
return b, nil
}
}
return nil, ErrUnknownTopicOrPartition
}
func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
broker, err := client.Leader(topic, partitionID)
if err != nil {
return -1, err
}
request := &OffsetRequest{}
if client.conf.Version.IsAtLeast(V0_10_1_0) {
request.Version = 1
}
request.AddBlock(topic, partitionID, time, 1)
response, err := broker.GetAvailableOffsets(request)
if err != nil {
_ = broker.Close()
return -1, err
}
block := response.GetBlock(topic, partitionID)
if block == nil {
_ = broker.Close()
return -1, ErrIncompleteResponse
}
if block.Err != ErrNoError {
return -1, block.Err
}
if len(block.Offsets) != 1 {
return -1, ErrOffsetOutOfRange
}
return block.Offsets[0], nil
}
// core metadata update logic
func (client *client) backgroundMetadataUpdater() {
defer close(client.closed)
if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
return
}
ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
defer ticker.Stop()
for {
select {
case <-ticker.C:
topics := []string{}
if !client.conf.Metadata.Full {
if specificTopics, err := client.Topics(); err != nil {
Logger.Println("Client background metadata topic load:", err)
break
} else if len(specificTopics) == 0 {
Logger.Println("Client background metadata update: no specific topics to update")
break
} else {
topics = specificTopics
}
}
if err := client.RefreshMetadata(topics...); err != nil {
Logger.Println("Client background metadata update:", err)
}
case <-client.closer:
return
}
}
}
func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
retry := func(err error) error {
if attemptsRemaining > 0 {
Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
time.Sleep(client.conf.Metadata.Retry.Backoff)
return client.tryRefreshMetadata(topics, attemptsRemaining-1)
}
return err
}
for broker := client.any(); broker != nil; broker = client.any() {
if len(topics) > 0 {
Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
} else {
Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
}
response, err := broker.GetMetadata(&MetadataRequest{Topics: topics})
switch err.(type) {
case nil:
// valid response, use it
shouldRetry, err := client.updateMetadata(response)
if shouldRetry {
Logger.Println("client/metadata found some partitions to be leaderless")
return retry(err) // note: err can be nil
}
return err
case PacketEncodingError:
// didn't even send, return the error
return err
default:
// some other error, remove that broker and try again
Logger.Println("client/metadata got error from broker while fetching metadata:", err)
_ = broker.Close()
client.deregisterBroker(broker)
}
}
Logger.Println("client/metadata no available broker to send metadata request to")
client.resurrectDeadBrokers()
return retry(ErrOutOfBrokers)
}
// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) {
client.lock.Lock()
defer client.lock.Unlock()
// For all the brokers we received:
// - if it is a new ID, save it
// - if it is an existing ID, but the address we have is stale, discard the old one and save it
// - otherwise ignore it, replacing our existing one would just bounce the connection
for _, broker := range data.Brokers {
client.registerBroker(broker)
}
for _, topic := range data.Topics {
delete(client.metadata, topic.Name)
delete(client.cachedPartitionsResults, topic.Name)
switch topic.Err {
case ErrNoError:
break
case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
err = topic.Err
continue
case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
err = topic.Err
retry = true
continue
case ErrLeaderNotAvailable: // retry, but store partial partition results
retry = true
break
default: // don't retry, don't store partial results
Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
err = topic.Err
continue
}
client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
for _, partition := range topic.Partitions {
client.metadata[topic.Name][partition.ID] = partition
if partition.Err == ErrLeaderNotAvailable {
retry = true
}
}
var partitionCache [maxPartitionIndex][]int32
partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
client.cachedPartitionsResults[topic.Name] = partitionCache
}
return
}
func (client *client) cachedCoordinator(consumerGroup string) *Broker {
client.lock.RLock()
defer client.lock.RUnlock()
if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
return client.brokers[coordinatorID]
}
return nil
}
func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) {
retry := func(err error) (*ConsumerMetadataResponse, error) {
if attemptsRemaining > 0 {
Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
time.Sleep(client.conf.Metadata.Retry.Backoff)
return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
}
return nil, err
}
for broker := client.any(); broker != nil; broker = client.any() {
Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
request := new(ConsumerMetadataRequest)
request.ConsumerGroup = consumerGroup
response, err := broker.GetConsumerMetadata(request)
if err != nil {
Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
switch err.(type) {
case PacketEncodingError:
return nil, err
default:
_ = broker.Close()
client.deregisterBroker(broker)
continue
}
}
switch response.Err {
case ErrNoError:
Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
return response, nil
case ErrConsumerCoordinatorNotAvailable:
Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
// This is very ugly, but this scenario will only happen once per cluster.
// The __consumer_offsets topic only has to be created one time.
// The number of partitions not configurable, but partition 0 should always exist.
if _, err := client.Leader("__consumer_offsets", 0); err != nil {
Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
time.Sleep(2 * time.Second)
}
return retry(ErrConsumerCoordinatorNotAvailable)
default:
return nil, response.Err
}
}
Logger.Println("client/coordinator no available broker to send consumer metadata request to")
client.resurrectDeadBrokers()
return retry(ErrOutOfBrokers)
}

442
vendor/github.com/Shopify/sarama/config.go generated vendored Normal file
View File

@ -0,0 +1,442 @@
package sarama
import (
"crypto/tls"
"regexp"
"time"
"github.com/rcrowley/go-metrics"
)
const defaultClientID = "sarama"
var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
// Config is used to pass multiple configuration options to Sarama's constructors.
type Config struct {
// Net is the namespace for network-level properties used by the Broker, and
// shared by the Client/Producer/Consumer.
Net struct {
// How many outstanding requests a connection is allowed to have before
// sending on it blocks (default 5).
MaxOpenRequests int
// All three of the below configurations are similar to the
// `socket.timeout.ms` setting in JVM kafka. All of them default
// to 30 seconds.
DialTimeout time.Duration // How long to wait for the initial connection.
ReadTimeout time.Duration // How long to wait for a response.
WriteTimeout time.Duration // How long to wait for a transmit.
TLS struct {
// Whether or not to use TLS when connecting to the broker
// (defaults to false).
Enable bool
// The TLS configuration to use for secure connections if
// enabled (defaults to nil).
Config *tls.Config
}
// SASL based authentication with broker. While there are multiple SASL authentication methods
// the current implementation is limited to plaintext (SASL/PLAIN) authentication
SASL struct {
// Whether or not to use SASL authentication when connecting to the broker
// (defaults to false).
Enable bool
// Whether or not to send the Kafka SASL handshake first if enabled
// (defaults to true). You should only set this to false if you're using
// a non-Kafka SASL proxy.
Handshake bool
//username and password for SASL/PLAIN authentication
User string
Password string
}
// KeepAlive specifies the keep-alive period for an active network connection.
// If zero, keep-alives are disabled. (default is 0: disabled).
KeepAlive time.Duration
}
// Metadata is the namespace for metadata management properties used by the
// Client, and shared by the Producer/Consumer.
Metadata struct {
Retry struct {
// The total number of times to retry a metadata request when the
// cluster is in the middle of a leader election (default 3).
Max int
// How long to wait for leader election to occur before retrying
// (default 250ms). Similar to the JVM's `retry.backoff.ms`.
Backoff time.Duration
}
// How frequently to refresh the cluster metadata in the background.
// Defaults to 10 minutes. Set to 0 to disable. Similar to
// `topic.metadata.refresh.interval.ms` in the JVM version.
RefreshFrequency time.Duration
// Whether to maintain a full set of metadata for all topics, or just
// the minimal set that has been necessary so far. The full set is simpler
// and usually more convenient, but can take up a substantial amount of
// memory if you have many topics and partitions. Defaults to true.
Full bool
}
// Producer is the namespace for configuration related to producing messages,
// used by the Producer.
Producer struct {
// The maximum permitted size of a message (defaults to 1000000). Should be
// set equal to or smaller than the broker's `message.max.bytes`.
MaxMessageBytes int
// The level of acknowledgement reliability needed from the broker (defaults
// to WaitForLocal). Equivalent to the `request.required.acks` setting of the
// JVM producer.
RequiredAcks RequiredAcks
// The maximum duration the broker will wait the receipt of the number of
// RequiredAcks (defaults to 10 seconds). This is only relevant when
// RequiredAcks is set to WaitForAll or a number > 1. Only supports
// millisecond resolution, nanoseconds will be truncated. Equivalent to
// the JVM producer's `request.timeout.ms` setting.
Timeout time.Duration
// The type of compression to use on messages (defaults to no compression).
// Similar to `compression.codec` setting of the JVM producer.
Compression CompressionCodec
// Generates partitioners for choosing the partition to send messages to
// (defaults to hashing the message key). Similar to the `partitioner.class`
// setting for the JVM producer.
Partitioner PartitionerConstructor
// Return specifies what channels will be populated. If they are set to true,
// you must read from the respective channels to prevent deadlock. If,
// however, this config is used to create a `SyncProducer`, both must be set
// to true and you shall not read from the channels since the producer does
// this internally.
Return struct {
// If enabled, successfully delivered messages will be returned on the
// Successes channel (default disabled).
Successes bool
// If enabled, messages that failed to deliver will be returned on the
// Errors channel, including error (default enabled).
Errors bool
}
// The following config options control how often messages are batched up and
// sent to the broker. By default, messages are sent as fast as possible, and
// all messages received while the current batch is in-flight are placed
// into the subsequent batch.
Flush struct {
// The best-effort number of bytes needed to trigger a flush. Use the
// global sarama.MaxRequestSize to set a hard upper limit.
Bytes int
// The best-effort number of messages needed to trigger a flush. Use
// `MaxMessages` to set a hard upper limit.
Messages int
// The best-effort frequency of flushes. Equivalent to
// `queue.buffering.max.ms` setting of JVM producer.
Frequency time.Duration
// The maximum number of messages the producer will send in a single
// broker request. Defaults to 0 for unlimited. Similar to
// `queue.buffering.max.messages` in the JVM producer.
MaxMessages int
}
Retry struct {
// The total number of times to retry sending a message (default 3).
// Similar to the `message.send.max.retries` setting of the JVM producer.
Max int
// How long to wait for the cluster to settle between retries
// (default 100ms). Similar to the `retry.backoff.ms` setting of the
// JVM producer.
Backoff time.Duration
}
}
// Consumer is the namespace for configuration related to consuming messages,
// used by the Consumer.
//
// Note that Sarama's Consumer type does not currently support automatic
// consumer-group rebalancing and offset tracking. For Zookeeper-based
// tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka
// library builds on Sarama to add this support. For Kafka-based tracking
// (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library
// builds on Sarama to add this support.
Consumer struct {
Retry struct {
// How long to wait after a failing to read from a partition before
// trying again (default 2s).
Backoff time.Duration
}
// Fetch is the namespace for controlling how many bytes are retrieved by any
// given request.
Fetch struct {
// The minimum number of message bytes to fetch in a request - the broker
// will wait until at least this many are available. The default is 1,
// as 0 causes the consumer to spin when no messages are available.
// Equivalent to the JVM's `fetch.min.bytes`.
Min int32
// The default number of message bytes to fetch from the broker in each
// request (default 32768). This should be larger than the majority of
// your messages, or else the consumer will spend a lot of time
// negotiating sizes and not actually consuming. Similar to the JVM's
// `fetch.message.max.bytes`.
Default int32
// The maximum number of message bytes to fetch from the broker in a
// single request. Messages larger than this will return
// ErrMessageTooLarge and will not be consumable, so you must be sure
// this is at least as large as your largest message. Defaults to 0
// (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
// global `sarama.MaxResponseSize` still applies.
Max int32
}
// The maximum amount of time the broker will wait for Consumer.Fetch.Min
// bytes to become available before it returns fewer than that anyways. The
// default is 250ms, since 0 causes the consumer to spin when no events are
// available. 100-500ms is a reasonable range for most cases. Kafka only
// supports precision up to milliseconds; nanoseconds will be truncated.
// Equivalent to the JVM's `fetch.wait.max.ms`.
MaxWaitTime time.Duration
// The maximum amount of time the consumer expects a message takes to
// process for the user. If writing to the Messages channel takes longer
// than this, that partition will stop fetching more messages until it
// can proceed again.
// Note that, since the Messages channel is buffered, the actual grace time is
// (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
// If a message is not written to the Messages channel between two ticks
// of the expiryTicker then a timeout is detected.
// Using a ticker instead of a timer to detect timeouts should typically
// result in many fewer calls to Timer functions which may result in a
// significant performance improvement if many messages are being sent
// and timeouts are infrequent.
// The disadvantage of using a ticker instead of a timer is that
// timeouts will be less accurate. That is, the effective timeout could
// be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
// example, if `MaxProcessingTime` is 100ms then a delay of 180ms
// between two messages being sent may not be recognized as a timeout.
MaxProcessingTime time.Duration
// Return specifies what channels will be populated. If they are set to true,
// you must read from them to prevent deadlock.
Return struct {
// If enabled, any errors that occurred while consuming are returned on
// the Errors channel (default disabled).
Errors bool
}
// Offsets specifies configuration for how and when to commit consumed
// offsets. This currently requires the manual use of an OffsetManager
// but will eventually be automated.
Offsets struct {
// How frequently to commit updated offsets. Defaults to 1s.
CommitInterval time.Duration
// The initial offset to use if no offset was previously committed.
// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
Initial int64
// The retention duration for committed offsets. If zero, disabled
// (in which case the `offsets.retention.minutes` option on the
// broker will be used). Kafka only supports precision up to
// milliseconds; nanoseconds will be truncated. Requires Kafka
// broker version 0.9.0 or later.
// (default is 0: disabled).
Retention time.Duration
}
}
// A user-provided string sent with every request to the brokers for logging,
// debugging, and auditing purposes. Defaults to "sarama", but you should
// probably set it to something specific to your application.
ClientID string
// The number of events to buffer in internal and external channels. This
// permits the producer and consumer to continue processing some messages
// in the background while user code is working, greatly improving throughput.
// Defaults to 256.
ChannelBufferSize int
// The version of Kafka that Sarama will assume it is running against.
// Defaults to the oldest supported stable version. Since Kafka provides
// backwards-compatibility, setting it to a version older than you have
// will not break anything, although it may prevent you from using the
// latest features. Setting it to a version greater than you are actually
// running may lead to random breakage.
Version KafkaVersion
// The registry to define metrics into.
// Defaults to a local registry.
// If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
// prior to starting Sarama.
// See Examples on how to use the metrics registry
MetricRegistry metrics.Registry
}
// NewConfig returns a new configuration instance with sane defaults.
func NewConfig() *Config {
c := &Config{}
c.Net.MaxOpenRequests = 5
c.Net.DialTimeout = 30 * time.Second
c.Net.ReadTimeout = 30 * time.Second
c.Net.WriteTimeout = 30 * time.Second
c.Net.SASL.Handshake = true
c.Metadata.Retry.Max = 3
c.Metadata.Retry.Backoff = 250 * time.Millisecond
c.Metadata.RefreshFrequency = 10 * time.Minute
c.Metadata.Full = true
c.Producer.MaxMessageBytes = 1000000
c.Producer.RequiredAcks = WaitForLocal
c.Producer.Timeout = 10 * time.Second
c.Producer.Partitioner = NewHashPartitioner
c.Producer.Retry.Max = 3
c.Producer.Retry.Backoff = 100 * time.Millisecond
c.Producer.Return.Errors = true
c.Consumer.Fetch.Min = 1
c.Consumer.Fetch.Default = 32768
c.Consumer.Retry.Backoff = 2 * time.Second
c.Consumer.MaxWaitTime = 250 * time.Millisecond
c.Consumer.MaxProcessingTime = 100 * time.Millisecond
c.Consumer.Return.Errors = false
c.Consumer.Offsets.CommitInterval = 1 * time.Second
c.Consumer.Offsets.Initial = OffsetNewest
c.ClientID = defaultClientID
c.ChannelBufferSize = 256
c.Version = minVersion
c.MetricRegistry = metrics.NewRegistry()
return c
}
// Validate checks a Config instance. It will return a
// ConfigurationError if the specified values don't make sense.
func (c *Config) Validate() error {
// some configuration values should be warned on but not fail completely, do those first
if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil {
Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
}
if c.Net.SASL.Enable == false {
if c.Net.SASL.User != "" {
Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
}
if c.Net.SASL.Password != "" {
Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
}
}
if c.Producer.RequiredAcks > 1 {
Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
}
if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
}
if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
}
if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
}
if c.Producer.Timeout%time.Millisecond != 0 {
Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
}
if c.Consumer.MaxWaitTime < 100*time.Millisecond {
Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
}
if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
}
if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
}
if c.ClientID == defaultClientID {
Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
}
// validate Net values
switch {
case c.Net.MaxOpenRequests <= 0:
return ConfigurationError("Net.MaxOpenRequests must be > 0")
case c.Net.DialTimeout <= 0:
return ConfigurationError("Net.DialTimeout must be > 0")
case c.Net.ReadTimeout <= 0:
return ConfigurationError("Net.ReadTimeout must be > 0")
case c.Net.WriteTimeout <= 0:
return ConfigurationError("Net.WriteTimeout must be > 0")
case c.Net.KeepAlive < 0:
return ConfigurationError("Net.KeepAlive must be >= 0")
case c.Net.SASL.Enable == true && c.Net.SASL.User == "":
return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
case c.Net.SASL.Enable == true && c.Net.SASL.Password == "":
return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
}
// validate the Metadata values
switch {
case c.Metadata.Retry.Max < 0:
return ConfigurationError("Metadata.Retry.Max must be >= 0")
case c.Metadata.Retry.Backoff < 0:
return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
case c.Metadata.RefreshFrequency < 0:
return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
}
// validate the Producer values
switch {
case c.Producer.MaxMessageBytes <= 0:
return ConfigurationError("Producer.MaxMessageBytes must be > 0")
case c.Producer.RequiredAcks < -1:
return ConfigurationError("Producer.RequiredAcks must be >= -1")
case c.Producer.Timeout <= 0:
return ConfigurationError("Producer.Timeout must be > 0")
case c.Producer.Partitioner == nil:
return ConfigurationError("Producer.Partitioner must not be nil")
case c.Producer.Flush.Bytes < 0:
return ConfigurationError("Producer.Flush.Bytes must be >= 0")
case c.Producer.Flush.Messages < 0:
return ConfigurationError("Producer.Flush.Messages must be >= 0")
case c.Producer.Flush.Frequency < 0:
return ConfigurationError("Producer.Flush.Frequency must be >= 0")
case c.Producer.Flush.MaxMessages < 0:
return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
case c.Producer.Retry.Max < 0:
return ConfigurationError("Producer.Retry.Max must be >= 0")
case c.Producer.Retry.Backoff < 0:
return ConfigurationError("Producer.Retry.Backoff must be >= 0")
}
if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
}
// validate the Consumer values
switch {
case c.Consumer.Fetch.Min <= 0:
return ConfigurationError("Consumer.Fetch.Min must be > 0")
case c.Consumer.Fetch.Default <= 0:
return ConfigurationError("Consumer.Fetch.Default must be > 0")
case c.Consumer.Fetch.Max < 0:
return ConfigurationError("Consumer.Fetch.Max must be >= 0")
case c.Consumer.MaxWaitTime < 1*time.Millisecond:
return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
case c.Consumer.MaxProcessingTime <= 0:
return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
case c.Consumer.Retry.Backoff < 0:
return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
case c.Consumer.Offsets.CommitInterval <= 0:
return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
}
// validate misc shared values
switch {
case c.ChannelBufferSize < 0:
return ConfigurationError("ChannelBufferSize must be >= 0")
case !validID.MatchString(c.ClientID):
return ConfigurationError("ClientID is invalid")
}
return nil
}

749
vendor/github.com/Shopify/sarama/consumer.go generated vendored Normal file
View File

@ -0,0 +1,749 @@
package sarama
import (
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
)
// ConsumerMessage encapsulates a Kafka message returned by the consumer.
type ConsumerMessage struct {
Key, Value []byte
Topic string
Partition int32
Offset int64
Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
}
// ConsumerError is what is provided to the user when an error occurs.
// It wraps an error and includes the topic and partition.
type ConsumerError struct {
Topic string
Partition int32
Err error
}
func (ce ConsumerError) Error() string {
return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
}
// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
// when stopping.
type ConsumerErrors []*ConsumerError
func (ce ConsumerErrors) Error() string {
return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
}
// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
// scope.
//
// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking.
// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library
// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the
// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
type Consumer interface {
// Topics returns the set of available topics as retrieved from the cluster
// metadata. This method is the same as Client.Topics(), and is provided for
// convenience.
Topics() ([]string, error)
// Partitions returns the sorted list of all partition IDs for the given topic.
// This method is the same as Client.Partitions(), and is provided for convenience.
Partitions(topic string) ([]int32, error)
// ConsumePartition creates a PartitionConsumer on the given topic/partition with
// the given offset. It will return an error if this Consumer is already consuming
// on the given topic/partition. Offset can be a literal offset, or OffsetNewest
// or OffsetOldest
ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
// HighWaterMarks returns the current high water marks for each topic and partition.
// Consistency between partitions is not guaranteed since high water marks are updated separately.
HighWaterMarks() map[string]map[int32]int64
// Close shuts down the consumer. It must be called after all child
// PartitionConsumers have already been closed.
Close() error
}
type consumer struct {
client Client
conf *Config
ownClient bool
lock sync.Mutex
children map[string]map[int32]*partitionConsumer
brokerConsumers map[*Broker]*brokerConsumer
}
// NewConsumer creates a new consumer using the given broker addresses and configuration.
func NewConsumer(addrs []string, config *Config) (Consumer, error) {
client, err := NewClient(addrs, config)
if err != nil {
return nil, err
}
c, err := NewConsumerFromClient(client)
if err != nil {
return nil, err
}
c.(*consumer).ownClient = true
return c, nil
}
// NewConsumerFromClient creates a new consumer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this consumer.
func NewConsumerFromClient(client Client) (Consumer, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
}
c := &consumer{
client: client,
conf: client.Config(),
children: make(map[string]map[int32]*partitionConsumer),
brokerConsumers: make(map[*Broker]*brokerConsumer),
}
return c, nil
}
func (c *consumer) Close() error {
if c.ownClient {
return c.client.Close()
}
return nil
}
func (c *consumer) Topics() ([]string, error) {
return c.client.Topics()
}
func (c *consumer) Partitions(topic string) ([]int32, error) {
return c.client.Partitions(topic)
}
func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
child := &partitionConsumer{
consumer: c,
conf: c.conf,
topic: topic,
partition: partition,
messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
feeder: make(chan *FetchResponse, 1),
trigger: make(chan none, 1),
dying: make(chan none),
fetchSize: c.conf.Consumer.Fetch.Default,
}
if err := child.chooseStartingOffset(offset); err != nil {
return nil, err
}
var leader *Broker
var err error
if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
return nil, err
}
if err := c.addChild(child); err != nil {
return nil, err
}
go withRecover(child.dispatcher)
go withRecover(child.responseFeeder)
child.broker = c.refBrokerConsumer(leader)
child.broker.input <- child
return child, nil
}
func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
c.lock.Lock()
defer c.lock.Unlock()
hwms := make(map[string]map[int32]int64)
for topic, p := range c.children {
hwm := make(map[int32]int64, len(p))
for partition, pc := range p {
hwm[partition] = pc.HighWaterMarkOffset()
}
hwms[topic] = hwm
}
return hwms
}
func (c *consumer) addChild(child *partitionConsumer) error {
c.lock.Lock()
defer c.lock.Unlock()
topicChildren := c.children[child.topic]
if topicChildren == nil {
topicChildren = make(map[int32]*partitionConsumer)
c.children[child.topic] = topicChildren
}
if topicChildren[child.partition] != nil {
return ConfigurationError("That topic/partition is already being consumed")
}
topicChildren[child.partition] = child
return nil
}
func (c *consumer) removeChild(child *partitionConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.children[child.topic], child.partition)
}
func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
c.lock.Lock()
defer c.lock.Unlock()
bc := c.brokerConsumers[broker]
if bc == nil {
bc = c.newBrokerConsumer(broker)
c.brokerConsumers[broker] = bc
}
bc.refs++
return bc
}
func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
brokerWorker.refs--
if brokerWorker.refs == 0 {
close(brokerWorker.input)
if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
delete(c.brokerConsumers, brokerWorker.broker)
}
}
}
func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.brokerConsumers, brokerWorker.broker)
}
// PartitionConsumer
// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
// of scope.
//
// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
//
// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process
// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
type PartitionConsumer interface {
// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
// this before calling Close on the underlying client.
AsyncClose()
// Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
// the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
// the Messages channel when this function is called, you will be competing with Close for messages; consider
// calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
// out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
Close() error
// Messages returns the read channel for the messages that are returned by
// the broker.
Messages() <-chan *ConsumerMessage
// Errors returns a read channel of errors that occurred during consuming, if
// enabled. By default, errors are logged and not returned over this channel.
// If you want to implement any custom error handling, set your config's
// Consumer.Return.Errors setting to true, and read from this channel.
Errors() <-chan *ConsumerError
// HighWaterMarkOffset returns the high water mark offset of the partition,
// i.e. the offset that will be used for the next message that will be produced.
// You can use this to determine how far behind the processing is.
HighWaterMarkOffset() int64
}
type partitionConsumer struct {
highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
consumer *consumer
conf *Config
topic string
partition int32
broker *brokerConsumer
messages chan *ConsumerMessage
errors chan *ConsumerError
feeder chan *FetchResponse
trigger, dying chan none
responseResult error
fetchSize int32
offset int64
}
var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
func (child *partitionConsumer) sendError(err error) {
cErr := &ConsumerError{
Topic: child.topic,
Partition: child.partition,
Err: err,
}
if child.conf.Consumer.Return.Errors {
child.errors <- cErr
} else {
Logger.Println(cErr)
}
}
func (child *partitionConsumer) dispatcher() {
for range child.trigger {
select {
case <-child.dying:
close(child.trigger)
case <-time.After(child.conf.Consumer.Retry.Backoff):
if child.broker != nil {
child.consumer.unrefBrokerConsumer(child.broker)
child.broker = nil
}
Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
if err := child.dispatch(); err != nil {
child.sendError(err)
child.trigger <- none{}
}
}
}
if child.broker != nil {
child.consumer.unrefBrokerConsumer(child.broker)
}
child.consumer.removeChild(child)
close(child.feeder)
}
func (child *partitionConsumer) dispatch() error {
if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
return err
}
var leader *Broker
var err error
if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
return err
}
child.broker = child.consumer.refBrokerConsumer(leader)
child.broker.input <- child
return nil
}
func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
if err != nil {
return err
}
oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
if err != nil {
return err
}
switch {
case offset == OffsetNewest:
child.offset = newestOffset
case offset == OffsetOldest:
child.offset = oldestOffset
case offset >= oldestOffset && offset <= newestOffset:
child.offset = offset
default:
return ErrOffsetOutOfRange
}
return nil
}
func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
return child.messages
}
func (child *partitionConsumer) Errors() <-chan *ConsumerError {
return child.errors
}
func (child *partitionConsumer) AsyncClose() {
// this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
// also just close itself)
close(child.dying)
}
func (child *partitionConsumer) Close() error {
child.AsyncClose()
go withRecover(func() {
for range child.messages {
// drain
}
})
var errors ConsumerErrors
for err := range child.errors {
errors = append(errors, err)
}
if len(errors) > 0 {
return errors
}
return nil
}
func (child *partitionConsumer) HighWaterMarkOffset() int64 {
return atomic.LoadInt64(&child.highWaterMarkOffset)
}
func (child *partitionConsumer) responseFeeder() {
var msgs []*ConsumerMessage
msgSent := false
feederLoop:
for response := range child.feeder {
msgs, child.responseResult = child.parseResponse(response)
expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
for i, msg := range msgs {
messageSelect:
select {
case child.messages <- msg:
msgSent = true
case <-expiryTicker.C:
if !msgSent {
child.responseResult = errTimedOut
child.broker.acks.Done()
for _, msg = range msgs[i:] {
child.messages <- msg
}
child.broker.input <- child
continue feederLoop
} else {
// current message has not been sent, return to select
// statement
msgSent = false
goto messageSelect
}
}
}
expiryTicker.Stop()
child.broker.acks.Done()
}
close(child.messages)
close(child.errors)
}
func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
block := response.GetBlock(child.topic, child.partition)
if block == nil {
return nil, ErrIncompleteResponse
}
if block.Err != ErrNoError {
return nil, block.Err
}
if len(block.MsgSet.Messages) == 0 {
// We got no messages. If we got a trailing one then we need to ask for more data.
// Otherwise we just poll again and wait for one to be produced...
if block.MsgSet.PartialTrailingMessage {
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
// we can't ask for more data, we've hit the configured limit
child.sendError(ErrMessageTooLarge)
child.offset++ // skip this one so we can keep processing future messages
} else {
child.fetchSize *= 2
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
child.fetchSize = child.conf.Consumer.Fetch.Max
}
}
}
return nil, nil
}
// we got messages, reset our fetch size in case it was increased for a previous request
child.fetchSize = child.conf.Consumer.Fetch.Default
atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
incomplete := false
prelude := true
var messages []*ConsumerMessage
for _, msgBlock := range block.MsgSet.Messages {
for _, msg := range msgBlock.Messages() {
offset := msg.Offset
if msg.Msg.Version >= 1 {
baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
offset += baseOffset
}
if prelude && offset < child.offset {
continue
}
prelude = false
if offset >= child.offset {
messages = append(messages, &ConsumerMessage{
Topic: child.topic,
Partition: child.partition,
Key: msg.Msg.Key,
Value: msg.Msg.Value,
Offset: offset,
Timestamp: msg.Msg.Timestamp,
BlockTimestamp: msgBlock.Msg.Timestamp,
})
child.offset = offset + 1
} else {
incomplete = true
}
}
}
if incomplete || len(messages) == 0 {
return nil, ErrIncompleteResponse
}
return messages, nil
}
// brokerConsumer
type brokerConsumer struct {
consumer *consumer
broker *Broker
input chan *partitionConsumer
newSubscriptions chan []*partitionConsumer
wait chan none
subscriptions map[*partitionConsumer]none
acks sync.WaitGroup
refs int
}
func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
bc := &brokerConsumer{
consumer: c,
broker: broker,
input: make(chan *partitionConsumer),
newSubscriptions: make(chan []*partitionConsumer),
wait: make(chan none),
subscriptions: make(map[*partitionConsumer]none),
refs: 0,
}
go withRecover(bc.subscriptionManager)
go withRecover(bc.subscriptionConsumer)
return bc
}
func (bc *brokerConsumer) subscriptionManager() {
var buffer []*partitionConsumer
// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
// so the main goroutine can block waiting for work if it has none.
for {
if len(buffer) > 0 {
select {
case event, ok := <-bc.input:
if !ok {
goto done
}
buffer = append(buffer, event)
case bc.newSubscriptions <- buffer:
buffer = nil
case bc.wait <- none{}:
}
} else {
select {
case event, ok := <-bc.input:
if !ok {
goto done
}
buffer = append(buffer, event)
case bc.newSubscriptions <- nil:
}
}
}
done:
close(bc.wait)
if len(buffer) > 0 {
bc.newSubscriptions <- buffer
}
close(bc.newSubscriptions)
}
func (bc *brokerConsumer) subscriptionConsumer() {
<-bc.wait // wait for our first piece of work
// the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
for newSubscriptions := range bc.newSubscriptions {
bc.updateSubscriptions(newSubscriptions)
if len(bc.subscriptions) == 0 {
// We're about to be shut down or we're about to receive more subscriptions.
// Either way, the signal just hasn't propagated to our goroutine yet.
<-bc.wait
continue
}
response, err := bc.fetchNewMessages()
if err != nil {
Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
bc.abort(err)
return
}
bc.acks.Add(len(bc.subscriptions))
for child := range bc.subscriptions {
child.feeder <- response
}
bc.acks.Wait()
bc.handleResponses()
}
}
func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
for _, child := range newSubscriptions {
bc.subscriptions[child] = none{}
Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
}
for child := range bc.subscriptions {
select {
case <-child.dying:
Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
close(child.trigger)
delete(bc.subscriptions, child)
default:
break
}
}
}
func (bc *brokerConsumer) handleResponses() {
// handles the response codes left for us by our subscriptions, and abandons ones that have been closed
for child := range bc.subscriptions {
result := child.responseResult
child.responseResult = nil
switch result {
case nil:
break
case errTimedOut:
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
bc.broker.ID(), child.topic, child.partition)
delete(bc.subscriptions, child)
case ErrOffsetOutOfRange:
// there's no point in retrying this it will just fail the same way again
// shut it down and force the user to choose what to do
child.sendError(result)
Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
close(child.trigger)
delete(bc.subscriptions, child)
case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
// not an error, but does need redispatching
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
bc.broker.ID(), child.topic, child.partition, result)
child.trigger <- none{}
delete(bc.subscriptions, child)
default:
// dunno, tell the user and try redispatching
child.sendError(result)
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
bc.broker.ID(), child.topic, child.partition, result)
child.trigger <- none{}
delete(bc.subscriptions, child)
}
}
}
func (bc *brokerConsumer) abort(err error) {
bc.consumer.abandonBrokerConsumer(bc)
_ = bc.broker.Close() // we don't care about the error this might return, we already have one
for child := range bc.subscriptions {
child.sendError(err)
child.trigger <- none{}
}
for newSubscriptions := range bc.newSubscriptions {
if len(newSubscriptions) == 0 {
<-bc.wait
continue
}
for _, child := range newSubscriptions {
child.sendError(err)
child.trigger <- none{}
}
}
}
func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
request := &FetchRequest{
MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
}
if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
request.Version = 2
}
if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
request.Version = 3
request.MaxBytes = MaxResponseSize
}
for child := range bc.subscriptions {
request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
}
return bc.broker.Fetch(request)
}

View File

@ -0,0 +1,94 @@
package sarama
type ConsumerGroupMemberMetadata struct {
Version int16
Topics []string
UserData []byte
}
func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
pe.putInt16(m.Version)
if err := pe.putStringArray(m.Topics); err != nil {
return err
}
if err := pe.putBytes(m.UserData); err != nil {
return err
}
return nil
}
func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
if m.Version, err = pd.getInt16(); err != nil {
return
}
if m.Topics, err = pd.getStringArray(); err != nil {
return
}
if m.UserData, err = pd.getBytes(); err != nil {
return
}
return nil
}
type ConsumerGroupMemberAssignment struct {
Version int16
Topics map[string][]int32
UserData []byte
}
func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
pe.putInt16(m.Version)
if err := pe.putArrayLength(len(m.Topics)); err != nil {
return err
}
for topic, partitions := range m.Topics {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putInt32Array(partitions); err != nil {
return err
}
}
if err := pe.putBytes(m.UserData); err != nil {
return err
}
return nil
}
func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
if m.Version, err = pd.getInt16(); err != nil {
return
}
var topicLen int
if topicLen, err = pd.getArrayLength(); err != nil {
return
}
m.Topics = make(map[string][]int32, topicLen)
for i := 0; i < topicLen; i++ {
var topic string
if topic, err = pd.getString(); err != nil {
return
}
if m.Topics[topic], err = pd.getInt32Array(); err != nil {
return
}
}
if m.UserData, err = pd.getBytes(); err != nil {
return
}
return nil
}

View File

@ -0,0 +1,26 @@
package sarama
type ConsumerMetadataRequest struct {
ConsumerGroup string
}
func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
return pe.putString(r.ConsumerGroup)
}
func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
r.ConsumerGroup, err = pd.getString()
return err
}
func (r *ConsumerMetadataRequest) key() int16 {
return 10
}
func (r *ConsumerMetadataRequest) version() int16 {
return 0
}
func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
return V0_8_2_0
}

View File

@ -0,0 +1,85 @@
package sarama
import (
"net"
"strconv"
)
type ConsumerMetadataResponse struct {
Err KError
Coordinator *Broker
CoordinatorID int32 // deprecated: use Coordinator.ID()
CoordinatorHost string // deprecated: use Coordinator.Addr()
CoordinatorPort int32 // deprecated: use Coordinator.Addr()
}
func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(tmp)
coordinator := new(Broker)
if err := coordinator.decode(pd); err != nil {
return err
}
if coordinator.addr == ":0" {
return nil
}
r.Coordinator = coordinator
// this can all go away in 2.0, but we have to fill in deprecated fields to maintain
// backwards compatibility
host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
if err != nil {
return err
}
port, err := strconv.ParseInt(portstr, 10, 32)
if err != nil {
return err
}
r.CoordinatorID = r.Coordinator.ID()
r.CoordinatorHost = host
r.CoordinatorPort = int32(port)
return nil
}
func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
if r.Coordinator != nil {
host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
if err != nil {
return err
}
port, err := strconv.ParseInt(portstr, 10, 32)
if err != nil {
return err
}
pe.putInt32(r.Coordinator.ID())
if err := pe.putString(host); err != nil {
return err
}
pe.putInt32(int32(port))
return nil
}
pe.putInt32(r.CoordinatorID)
if err := pe.putString(r.CoordinatorHost); err != nil {
return err
}
pe.putInt32(r.CoordinatorPort)
return nil
}
func (r *ConsumerMetadataResponse) key() int16 {
return 10
}
func (r *ConsumerMetadataResponse) version() int16 {
return 0
}
func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
return V0_8_2_0
}

37
vendor/github.com/Shopify/sarama/crc32_field.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
package sarama
import (
"encoding/binary"
"fmt"
"hash/crc32"
)
// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
type crc32Field struct {
startOffset int
}
func (c *crc32Field) saveOffset(in int) {
c.startOffset = in
}
func (c *crc32Field) reserveLength() int {
return 4
}
func (c *crc32Field) run(curOffset int, buf []byte) error {
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
return nil
}
func (c *crc32Field) check(curOffset int, buf []byte) error {
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
expected := binary.BigEndian.Uint32(buf[c.startOffset:])
if crc != expected {
return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)}
}
return nil
}

View File

@ -0,0 +1,30 @@
package sarama
type DescribeGroupsRequest struct {
Groups []string
}
func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
return pe.putStringArray(r.Groups)
}
func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
r.Groups, err = pd.getStringArray()
return
}
func (r *DescribeGroupsRequest) key() int16 {
return 15
}
func (r *DescribeGroupsRequest) version() int16 {
return 0
}
func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}
func (r *DescribeGroupsRequest) AddGroup(group string) {
r.Groups = append(r.Groups, group)
}

View File

@ -0,0 +1,187 @@
package sarama
type DescribeGroupsResponse struct {
Groups []*GroupDescription
}
func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(r.Groups)); err != nil {
return err
}
for _, groupDescription := range r.Groups {
if err := groupDescription.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Groups = make([]*GroupDescription, n)
for i := 0; i < n; i++ {
r.Groups[i] = new(GroupDescription)
if err := r.Groups[i].decode(pd); err != nil {
return err
}
}
return nil
}
func (r *DescribeGroupsResponse) key() int16 {
return 15
}
func (r *DescribeGroupsResponse) version() int16 {
return 0
}
func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}
type GroupDescription struct {
Err KError
GroupId string
State string
ProtocolType string
Protocol string
Members map[string]*GroupMemberDescription
}
func (gd *GroupDescription) encode(pe packetEncoder) error {
pe.putInt16(int16(gd.Err))
if err := pe.putString(gd.GroupId); err != nil {
return err
}
if err := pe.putString(gd.State); err != nil {
return err
}
if err := pe.putString(gd.ProtocolType); err != nil {
return err
}
if err := pe.putString(gd.Protocol); err != nil {
return err
}
if err := pe.putArrayLength(len(gd.Members)); err != nil {
return err
}
for memberId, groupMemberDescription := range gd.Members {
if err := pe.putString(memberId); err != nil {
return err
}
if err := groupMemberDescription.encode(pe); err != nil {
return err
}
}
return nil
}
func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
gd.Err = KError(kerr)
if gd.GroupId, err = pd.getString(); err != nil {
return
}
if gd.State, err = pd.getString(); err != nil {
return
}
if gd.ProtocolType, err = pd.getString(); err != nil {
return
}
if gd.Protocol, err = pd.getString(); err != nil {
return
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
gd.Members = make(map[string]*GroupMemberDescription)
for i := 0; i < n; i++ {
memberId, err := pd.getString()
if err != nil {
return err
}
gd.Members[memberId] = new(GroupMemberDescription)
if err := gd.Members[memberId].decode(pd); err != nil {
return err
}
}
return nil
}
type GroupMemberDescription struct {
ClientId string
ClientHost string
MemberMetadata []byte
MemberAssignment []byte
}
func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
if err := pe.putString(gmd.ClientId); err != nil {
return err
}
if err := pe.putString(gmd.ClientHost); err != nil {
return err
}
if err := pe.putBytes(gmd.MemberMetadata); err != nil {
return err
}
if err := pe.putBytes(gmd.MemberAssignment); err != nil {
return err
}
return nil
}
func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
if gmd.ClientId, err = pd.getString(); err != nil {
return
}
if gmd.ClientHost, err = pd.getString(); err != nil {
return
}
if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
return
}
if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
return
}
return nil
}
func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
assignment := new(ConsumerGroupMemberAssignment)
err := decode(gmd.MemberAssignment, assignment)
return assignment, err
}
func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
metadata := new(ConsumerGroupMemberMetadata)
err := decode(gmd.MemberMetadata, metadata)
return metadata, err
}

14
vendor/github.com/Shopify/sarama/dev.yml generated vendored Normal file
View File

@ -0,0 +1,14 @@
name: sarama
up:
- go:
version: '1.8'
commands:
test:
run: make test
desc: 'run unit tests'
packages:
- git@github.com:Shopify/dev-shopify.git

89
vendor/github.com/Shopify/sarama/encoder_decoder.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
package sarama
import (
"fmt"
"github.com/rcrowley/go-metrics"
)
// Encoder is the interface that wraps the basic Encode method.
// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
type encoder interface {
encode(pe packetEncoder) error
}
// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
if e == nil {
return nil, nil
}
var prepEnc prepEncoder
var realEnc realEncoder
err := e.encode(&prepEnc)
if err != nil {
return nil, err
}
if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
}
realEnc.raw = make([]byte, prepEnc.length)
realEnc.registry = metricRegistry
err = e.encode(&realEnc)
if err != nil {
return nil, err
}
return realEnc.raw, nil
}
// Decoder is the interface that wraps the basic Decode method.
// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
type decoder interface {
decode(pd packetDecoder) error
}
type versionedDecoder interface {
decode(pd packetDecoder, version int16) error
}
// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
// interpreted using Kafka's encoding rules.
func decode(buf []byte, in decoder) error {
if buf == nil {
return nil
}
helper := realDecoder{raw: buf}
err := in.decode(&helper)
if err != nil {
return err
}
if helper.off != len(buf) {
return PacketDecodingError{"invalid length"}
}
return nil
}
func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
if buf == nil {
return nil
}
helper := realDecoder{raw: buf}
err := in.decode(&helper, version)
if err != nil {
return err
}
if helper.off != len(buf) {
return PacketDecodingError{"invalid length"}
}
return nil
}

221
vendor/github.com/Shopify/sarama/errors.go generated vendored Normal file
View File

@ -0,0 +1,221 @@
package sarama
import (
"errors"
"fmt"
)
// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
// or otherwise failed to respond.
var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
// ErrClosedClient is the error returned when a method is called on a client that has been closed.
var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
// not contain the expected information.
var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
// (meaning one outside of the range [0...numPartitions-1]).
var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
var ErrNotConnected = errors.New("kafka: broker not connected")
// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
// of the message set.
var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
// ErrShuttingDown is returned when a producer receives a message during shutdown.
var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
type PacketEncodingError struct {
Info string
}
func (err PacketEncodingError) Error() string {
return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
}
// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
// This can be a bad CRC or length field, or any other invalid value.
type PacketDecodingError struct {
Info string
}
func (err PacketDecodingError) Error() string {
return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
}
// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
// when the specified configuration is invalid.
type ConfigurationError string
func (err ConfigurationError) Error() string {
return "kafka: invalid configuration (" + string(err) + ")"
}
// KError is the type of error that can be returned directly by the Kafka broker.
// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
type KError int16
// Numeric error codes returned by the Kafka server.
const (
ErrNoError KError = 0
ErrUnknown KError = -1
ErrOffsetOutOfRange KError = 1
ErrInvalidMessage KError = 2
ErrUnknownTopicOrPartition KError = 3
ErrInvalidMessageSize KError = 4
ErrLeaderNotAvailable KError = 5
ErrNotLeaderForPartition KError = 6
ErrRequestTimedOut KError = 7
ErrBrokerNotAvailable KError = 8
ErrReplicaNotAvailable KError = 9
ErrMessageSizeTooLarge KError = 10
ErrStaleControllerEpochCode KError = 11
ErrOffsetMetadataTooLarge KError = 12
ErrNetworkException KError = 13
ErrOffsetsLoadInProgress KError = 14
ErrConsumerCoordinatorNotAvailable KError = 15
ErrNotCoordinatorForConsumer KError = 16
ErrInvalidTopic KError = 17
ErrMessageSetSizeTooLarge KError = 18
ErrNotEnoughReplicas KError = 19
ErrNotEnoughReplicasAfterAppend KError = 20
ErrInvalidRequiredAcks KError = 21
ErrIllegalGeneration KError = 22
ErrInconsistentGroupProtocol KError = 23
ErrInvalidGroupId KError = 24
ErrUnknownMemberId KError = 25
ErrInvalidSessionTimeout KError = 26
ErrRebalanceInProgress KError = 27
ErrInvalidCommitOffsetSize KError = 28
ErrTopicAuthorizationFailed KError = 29
ErrGroupAuthorizationFailed KError = 30
ErrClusterAuthorizationFailed KError = 31
ErrInvalidTimestamp KError = 32
ErrUnsupportedSASLMechanism KError = 33
ErrIllegalSASLState KError = 34
ErrUnsupportedVersion KError = 35
ErrTopicAlreadyExists KError = 36
ErrInvalidPartitions KError = 37
ErrInvalidReplicationFactor KError = 38
ErrInvalidReplicaAssignment KError = 39
ErrInvalidConfig KError = 40
ErrNotController KError = 41
ErrInvalidRequest KError = 42
ErrUnsupportedForMessageFormat KError = 43
ErrPolicyViolation KError = 44
)
func (err KError) Error() string {
// Error messages stolen/adapted from
// https://kafka.apache.org/protocol#protocol_error_codes
switch err {
case ErrNoError:
return "kafka server: Not an error, why are you printing me?"
case ErrUnknown:
return "kafka server: Unexpected (unknown?) server error."
case ErrOffsetOutOfRange:
return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
case ErrInvalidMessage:
return "kafka server: Message contents does not match its CRC."
case ErrUnknownTopicOrPartition:
return "kafka server: Request was for a topic or partition that does not exist on this broker."
case ErrInvalidMessageSize:
return "kafka server: The message has a negative size."
case ErrLeaderNotAvailable:
return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
case ErrNotLeaderForPartition:
return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
case ErrRequestTimedOut:
return "kafka server: Request exceeded the user-specified time limit in the request."
case ErrBrokerNotAvailable:
return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
case ErrReplicaNotAvailable:
return "kafka server: Replica information not available, one or more brokers are down."
case ErrMessageSizeTooLarge:
return "kafka server: Message was too large, server rejected it to avoid allocation error."
case ErrStaleControllerEpochCode:
return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
case ErrOffsetMetadataTooLarge:
return "kafka server: Specified a string larger than the configured maximum for offset metadata."
case ErrNetworkException:
return "kafka server: The server disconnected before a response was received."
case ErrOffsetsLoadInProgress:
return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
case ErrConsumerCoordinatorNotAvailable:
return "kafka server: Offset's topic has not yet been created."
case ErrNotCoordinatorForConsumer:
return "kafka server: Request was for a consumer group that is not coordinated by this broker."
case ErrInvalidTopic:
return "kafka server: The request attempted to perform an operation on an invalid topic."
case ErrMessageSetSizeTooLarge:
return "kafka server: The request included message batch larger than the configured segment size on the server."
case ErrNotEnoughReplicas:
return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
case ErrNotEnoughReplicasAfterAppend:
return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
case ErrInvalidRequiredAcks:
return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
case ErrIllegalGeneration:
return "kafka server: The provided generation id is not the current generation."
case ErrInconsistentGroupProtocol:
return "kafka server: The provider group protocol type is incompatible with the other members."
case ErrInvalidGroupId:
return "kafka server: The provided group id was empty."
case ErrUnknownMemberId:
return "kafka server: The provided member is not known in the current generation."
case ErrInvalidSessionTimeout:
return "kafka server: The provided session timeout is outside the allowed range."
case ErrRebalanceInProgress:
return "kafka server: A rebalance for the group is in progress. Please re-join the group."
case ErrInvalidCommitOffsetSize:
return "kafka server: The provided commit metadata was too large."
case ErrTopicAuthorizationFailed:
return "kafka server: The client is not authorized to access this topic."
case ErrGroupAuthorizationFailed:
return "kafka server: The client is not authorized to access this group."
case ErrClusterAuthorizationFailed:
return "kafka server: The client is not authorized to send this request type."
case ErrInvalidTimestamp:
return "kafka server: The timestamp of the message is out of acceptable range."
case ErrUnsupportedSASLMechanism:
return "kafka server: The broker does not support the requested SASL mechanism."
case ErrIllegalSASLState:
return "kafka server: Request is not valid given the current SASL state."
case ErrUnsupportedVersion:
return "kafka server: The version of API is not supported."
case ErrTopicAlreadyExists:
return "kafka server: Topic with this name already exists."
case ErrInvalidPartitions:
return "kafka server: Number of partitions is invalid."
case ErrInvalidReplicationFactor:
return "kafka server: Replication-factor is invalid."
case ErrInvalidReplicaAssignment:
return "kafka server: Replica assignment is invalid."
case ErrInvalidConfig:
return "kafka server: Configuration is invalid."
case ErrNotController:
return "kafka server: This is not the correct controller for this cluster."
case ErrInvalidRequest:
return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
case ErrUnsupportedForMessageFormat:
return "kafka server: The requested operation is not supported by the message format version."
case ErrPolicyViolation:
return "kafka server: Request parameters do not satisfy the configured policy."
}
return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
}

150
vendor/github.com/Shopify/sarama/fetch_request.go generated vendored Normal file
View File

@ -0,0 +1,150 @@
package sarama
type fetchRequestBlock struct {
fetchOffset int64
maxBytes int32
}
func (b *fetchRequestBlock) encode(pe packetEncoder) error {
pe.putInt64(b.fetchOffset)
pe.putInt32(b.maxBytes)
return nil
}
func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
if b.fetchOffset, err = pd.getInt64(); err != nil {
return err
}
if b.maxBytes, err = pd.getInt32(); err != nil {
return err
}
return nil
}
// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
type FetchRequest struct {
MaxWaitTime int32
MinBytes int32
MaxBytes int32
Version int16
blocks map[string]map[int32]*fetchRequestBlock
}
func (r *FetchRequest) encode(pe packetEncoder) (err error) {
pe.putInt32(-1) // replica ID is always -1 for clients
pe.putInt32(r.MaxWaitTime)
pe.putInt32(r.MinBytes)
if r.Version == 3 {
pe.putInt32(r.MaxBytes)
}
err = pe.putArrayLength(len(r.blocks))
if err != nil {
return err
}
for topic, blocks := range r.blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(blocks))
if err != nil {
return err
}
for partition, block := range blocks {
pe.putInt32(partition)
err = block.encode(pe)
if err != nil {
return err
}
}
}
return nil
}
func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if _, err = pd.getInt32(); err != nil {
return err
}
if r.MaxWaitTime, err = pd.getInt32(); err != nil {
return err
}
if r.MinBytes, err = pd.getInt32(); err != nil {
return err
}
if r.Version == 3 {
if r.MaxBytes, err = pd.getInt32(); err != nil {
return err
}
}
topicCount, err := pd.getArrayLength()
if err != nil {
return err
}
if topicCount == 0 {
return nil
}
r.blocks = make(map[string]map[int32]*fetchRequestBlock)
for i := 0; i < topicCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.blocks[topic] = make(map[int32]*fetchRequestBlock)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
fetchBlock := &fetchRequestBlock{}
if err = fetchBlock.decode(pd); err != nil {
return err
}
r.blocks[topic][partition] = fetchBlock
}
}
return nil
}
func (r *FetchRequest) key() int16 {
return 1
}
func (r *FetchRequest) version() int16 {
return r.Version
}
func (r *FetchRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_9_0_0
case 2:
return V0_10_0_0
case 3:
return V0_10_1_0
default:
return minVersion
}
}
func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
if r.blocks == nil {
r.blocks = make(map[string]map[int32]*fetchRequestBlock)
}
if r.blocks[topic] == nil {
r.blocks[topic] = make(map[int32]*fetchRequestBlock)
}
tmp := new(fetchRequestBlock)
tmp.maxBytes = maxBytes
tmp.fetchOffset = fetchOffset
r.blocks[topic][partitionID] = tmp
}

210
vendor/github.com/Shopify/sarama/fetch_response.go generated vendored Normal file
View File

@ -0,0 +1,210 @@
package sarama
import "time"
type FetchResponseBlock struct {
Err KError
HighWaterMarkOffset int64
MsgSet MessageSet
}
func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
b.Err = KError(tmp)
b.HighWaterMarkOffset, err = pd.getInt64()
if err != nil {
return err
}
msgSetSize, err := pd.getInt32()
if err != nil {
return err
}
msgSetDecoder, err := pd.getSubset(int(msgSetSize))
if err != nil {
return err
}
err = (&b.MsgSet).decode(msgSetDecoder)
return err
}
func (b *FetchResponseBlock) encode(pe packetEncoder) (err error) {
pe.putInt16(int16(b.Err))
pe.putInt64(b.HighWaterMarkOffset)
pe.push(&lengthField{})
err = b.MsgSet.encode(pe)
if err != nil {
return err
}
return pe.pop()
}
type FetchResponse struct {
Blocks map[string]map[int32]*FetchResponseBlock
ThrottleTime time.Duration
Version int16 // v1 requires 0.9+, v2 requires 0.10+
}
func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.Version >= 1 {
throttle, err := pd.getInt32()
if err != nil {
return err
}
r.ThrottleTime = time.Duration(throttle) * time.Millisecond
}
numTopics, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
block := new(FetchResponseBlock)
err = block.decode(pd)
if err != nil {
return err
}
r.Blocks[name][id] = block
}
}
return nil
}
func (r *FetchResponse) encode(pe packetEncoder) (err error) {
if r.Version >= 1 {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
}
err = pe.putArrayLength(len(r.Blocks))
if err != nil {
return err
}
for topic, partitions := range r.Blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
for id, block := range partitions {
pe.putInt32(id)
err = block.encode(pe)
if err != nil {
return err
}
}
}
return nil
}
func (r *FetchResponse) key() int16 {
return 1
}
func (r *FetchResponse) version() int16 {
return r.Version
}
func (r *FetchResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_9_0_0
case 2:
return V0_10_0_0
default:
return minVersion
}
}
func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
if r.Blocks == nil {
return nil
}
if r.Blocks[topic] == nil {
return nil
}
return r.Blocks[topic][partition]
}
func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
}
partitions, ok := r.Blocks[topic]
if !ok {
partitions = make(map[int32]*FetchResponseBlock)
r.Blocks[topic] = partitions
}
frb, ok := partitions[partition]
if !ok {
frb = new(FetchResponseBlock)
partitions[partition] = frb
}
frb.Err = err
}
func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
}
partitions, ok := r.Blocks[topic]
if !ok {
partitions = make(map[int32]*FetchResponseBlock)
r.Blocks[topic] = partitions
}
frb, ok := partitions[partition]
if !ok {
frb = new(FetchResponseBlock)
partitions[partition] = frb
}
var kb []byte
var vb []byte
if key != nil {
kb, _ = key.Encode()
}
if value != nil {
vb, _ = value.Encode()
}
msg := &Message{Key: kb, Value: vb}
msgBlock := &MessageBlock{Msg: msg, Offset: offset}
frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock)
}

47
vendor/github.com/Shopify/sarama/heartbeat_request.go generated vendored Normal file
View File

@ -0,0 +1,47 @@
package sarama
type HeartbeatRequest struct {
GroupId string
GenerationId int32
MemberId string
}
func (r *HeartbeatRequest) encode(pe packetEncoder) error {
if err := pe.putString(r.GroupId); err != nil {
return err
}
pe.putInt32(r.GenerationId)
if err := pe.putString(r.MemberId); err != nil {
return err
}
return nil
}
func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
if r.GroupId, err = pd.getString(); err != nil {
return
}
if r.GenerationId, err = pd.getInt32(); err != nil {
return
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
return nil
}
func (r *HeartbeatRequest) key() int16 {
return 12
}
func (r *HeartbeatRequest) version() int16 {
return 0
}
func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}

32
vendor/github.com/Shopify/sarama/heartbeat_response.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
package sarama
type HeartbeatResponse struct {
Err KError
}
func (r *HeartbeatResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
return nil
}
func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
return nil
}
func (r *HeartbeatResponse) key() int16 {
return 12
}
func (r *HeartbeatResponse) version() int16 {
return 0
}
func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

143
vendor/github.com/Shopify/sarama/join_group_request.go generated vendored Normal file
View File

@ -0,0 +1,143 @@
package sarama
type GroupProtocol struct {
Name string
Metadata []byte
}
func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
p.Name, err = pd.getString()
if err != nil {
return err
}
p.Metadata, err = pd.getBytes()
return err
}
func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
if err := pe.putString(p.Name); err != nil {
return err
}
if err := pe.putBytes(p.Metadata); err != nil {
return err
}
return nil
}
type JoinGroupRequest struct {
GroupId string
SessionTimeout int32
MemberId string
ProtocolType string
GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols
OrderedGroupProtocols []*GroupProtocol
}
func (r *JoinGroupRequest) encode(pe packetEncoder) error {
if err := pe.putString(r.GroupId); err != nil {
return err
}
pe.putInt32(r.SessionTimeout)
if err := pe.putString(r.MemberId); err != nil {
return err
}
if err := pe.putString(r.ProtocolType); err != nil {
return err
}
if len(r.GroupProtocols) > 0 {
if len(r.OrderedGroupProtocols) > 0 {
return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
}
if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
return err
}
for name, metadata := range r.GroupProtocols {
if err := pe.putString(name); err != nil {
return err
}
if err := pe.putBytes(metadata); err != nil {
return err
}
}
} else {
if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
return err
}
for _, protocol := range r.OrderedGroupProtocols {
if err := protocol.encode(pe); err != nil {
return err
}
}
}
return nil
}
func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
if r.GroupId, err = pd.getString(); err != nil {
return
}
if r.SessionTimeout, err = pd.getInt32(); err != nil {
return
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
if r.ProtocolType, err = pd.getString(); err != nil {
return
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.GroupProtocols = make(map[string][]byte)
for i := 0; i < n; i++ {
protocol := &GroupProtocol{}
if err := protocol.decode(pd); err != nil {
return err
}
r.GroupProtocols[protocol.Name] = protocol.Metadata
r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
}
return nil
}
func (r *JoinGroupRequest) key() int16 {
return 11
}
func (r *JoinGroupRequest) version() int16 {
return 0
}
func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}
func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
Name: name,
Metadata: metadata,
})
}
func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
bin, err := encode(metadata, nil)
if err != nil {
return err
}
r.AddGroupProtocol(name, bin)
return nil
}

115
vendor/github.com/Shopify/sarama/join_group_response.go generated vendored Normal file
View File

@ -0,0 +1,115 @@
package sarama
type JoinGroupResponse struct {
Err KError
GenerationId int32
GroupProtocol string
LeaderId string
MemberId string
Members map[string][]byte
}
func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
for id, bin := range r.Members {
meta := new(ConsumerGroupMemberMetadata)
if err := decode(bin, meta); err != nil {
return nil, err
}
members[id] = *meta
}
return members, nil
}
func (r *JoinGroupResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
pe.putInt32(r.GenerationId)
if err := pe.putString(r.GroupProtocol); err != nil {
return err
}
if err := pe.putString(r.LeaderId); err != nil {
return err
}
if err := pe.putString(r.MemberId); err != nil {
return err
}
if err := pe.putArrayLength(len(r.Members)); err != nil {
return err
}
for memberId, memberMetadata := range r.Members {
if err := pe.putString(memberId); err != nil {
return err
}
if err := pe.putBytes(memberMetadata); err != nil {
return err
}
}
return nil
}
func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
if r.GenerationId, err = pd.getInt32(); err != nil {
return
}
if r.GroupProtocol, err = pd.getString(); err != nil {
return
}
if r.LeaderId, err = pd.getString(); err != nil {
return
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.Members = make(map[string][]byte)
for i := 0; i < n; i++ {
memberId, err := pd.getString()
if err != nil {
return err
}
memberMetadata, err := pd.getBytes()
if err != nil {
return err
}
r.Members[memberId] = memberMetadata
}
return nil
}
func (r *JoinGroupResponse) key() int16 {
return 11
}
func (r *JoinGroupResponse) version() int16 {
return 0
}
func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -0,0 +1,40 @@
package sarama
type LeaveGroupRequest struct {
GroupId string
MemberId string
}
func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
if err := pe.putString(r.GroupId); err != nil {
return err
}
if err := pe.putString(r.MemberId); err != nil {
return err
}
return nil
}
func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
if r.GroupId, err = pd.getString(); err != nil {
return
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
return nil
}
func (r *LeaveGroupRequest) key() int16 {
return 13
}
func (r *LeaveGroupRequest) version() int16 {
return 0
}
func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -0,0 +1,32 @@
package sarama
type LeaveGroupResponse struct {
Err KError
}
func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
return nil
}
func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
return nil
}
func (r *LeaveGroupResponse) key() int16 {
return 13
}
func (r *LeaveGroupResponse) version() int16 {
return 0
}
func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

29
vendor/github.com/Shopify/sarama/length_field.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
package sarama
import "encoding/binary"
// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
type lengthField struct {
startOffset int
}
func (l *lengthField) saveOffset(in int) {
l.startOffset = in
}
func (l *lengthField) reserveLength() int {
return 4
}
func (l *lengthField) run(curOffset int, buf []byte) error {
binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4))
return nil
}
func (l *lengthField) check(curOffset int, buf []byte) error {
if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) {
return PacketDecodingError{"length field invalid"}
}
return nil
}

View File

@ -0,0 +1,24 @@
package sarama
type ListGroupsRequest struct {
}
func (r *ListGroupsRequest) encode(pe packetEncoder) error {
return nil
}
func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
return nil
}
func (r *ListGroupsRequest) key() int16 {
return 16
}
func (r *ListGroupsRequest) version() int16 {
return 0
}
func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -0,0 +1,69 @@
package sarama
type ListGroupsResponse struct {
Err KError
Groups map[string]string
}
func (r *ListGroupsResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
if err := pe.putArrayLength(len(r.Groups)); err != nil {
return err
}
for groupId, protocolType := range r.Groups {
if err := pe.putString(groupId); err != nil {
return err
}
if err := pe.putString(protocolType); err != nil {
return err
}
}
return nil
}
func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.Groups = make(map[string]string)
for i := 0; i < n; i++ {
groupId, err := pd.getString()
if err != nil {
return err
}
protocolType, err := pd.getString()
if err != nil {
return err
}
r.Groups[groupId] = protocolType
}
return nil
}
func (r *ListGroupsResponse) key() int16 {
return 16
}
func (r *ListGroupsResponse) version() int16 {
return 0
}
func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

216
vendor/github.com/Shopify/sarama/message.go generated vendored Normal file
View File

@ -0,0 +1,216 @@
package sarama
import (
"bytes"
"compress/gzip"
"fmt"
"io/ioutil"
"time"
"github.com/eapache/go-xerial-snappy"
"github.com/pierrec/lz4"
)
// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
type CompressionCodec int8
// only the last two bits are really used
const compressionCodecMask int8 = 0x03
const (
CompressionNone CompressionCodec = 0
CompressionGZIP CompressionCodec = 1
CompressionSnappy CompressionCodec = 2
CompressionLZ4 CompressionCodec = 3
)
type Message struct {
Codec CompressionCodec // codec used to compress the message contents
Key []byte // the message key, may be nil
Value []byte // the message contents
Set *MessageSet // the message set a message might wrap
Version int8 // v1 requires Kafka 0.10
Timestamp time.Time // the timestamp of the message (version 1+ only)
compressedCache []byte
compressedSize int // used for computing the compression ratio metrics
}
func (m *Message) encode(pe packetEncoder) error {
pe.push(&crc32Field{})
pe.putInt8(m.Version)
attributes := int8(m.Codec) & compressionCodecMask
pe.putInt8(attributes)
if m.Version >= 1 {
timestamp := int64(-1)
if !m.Timestamp.Before(time.Unix(0, 0)) {
timestamp = m.Timestamp.UnixNano() / int64(time.Millisecond)
} else if !m.Timestamp.IsZero() {
return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", m.Timestamp)}
}
pe.putInt64(timestamp)
}
err := pe.putBytes(m.Key)
if err != nil {
return err
}
var payload []byte
if m.compressedCache != nil {
payload = m.compressedCache
m.compressedCache = nil
} else if m.Value != nil {
switch m.Codec {
case CompressionNone:
payload = m.Value
case CompressionGZIP:
var buf bytes.Buffer
writer := gzip.NewWriter(&buf)
if _, err = writer.Write(m.Value); err != nil {
return err
}
if err = writer.Close(); err != nil {
return err
}
m.compressedCache = buf.Bytes()
payload = m.compressedCache
case CompressionSnappy:
tmp := snappy.Encode(m.Value)
m.compressedCache = tmp
payload = m.compressedCache
case CompressionLZ4:
var buf bytes.Buffer
writer := lz4.NewWriter(&buf)
if _, err = writer.Write(m.Value); err != nil {
return err
}
if err = writer.Close(); err != nil {
return err
}
m.compressedCache = buf.Bytes()
payload = m.compressedCache
default:
return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)}
}
// Keep in mind the compressed payload size for metric gathering
m.compressedSize = len(payload)
}
if err = pe.putBytes(payload); err != nil {
return err
}
return pe.pop()
}
func (m *Message) decode(pd packetDecoder) (err error) {
err = pd.push(&crc32Field{})
if err != nil {
return err
}
m.Version, err = pd.getInt8()
if err != nil {
return err
}
if m.Version > 1 {
return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)}
}
attribute, err := pd.getInt8()
if err != nil {
return err
}
m.Codec = CompressionCodec(attribute & compressionCodecMask)
if m.Version == 1 {
millis, err := pd.getInt64()
if err != nil {
return err
}
// negative timestamps are invalid, in these cases we should return
// a zero time
timestamp := time.Time{}
if millis >= 0 {
timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
}
m.Timestamp = timestamp
}
m.Key, err = pd.getBytes()
if err != nil {
return err
}
m.Value, err = pd.getBytes()
if err != nil {
return err
}
// Required for deep equal assertion during tests but might be useful
// for future metrics about the compression ratio in fetch requests
m.compressedSize = len(m.Value)
switch m.Codec {
case CompressionNone:
// nothing to do
case CompressionGZIP:
if m.Value == nil {
break
}
reader, err := gzip.NewReader(bytes.NewReader(m.Value))
if err != nil {
return err
}
if m.Value, err = ioutil.ReadAll(reader); err != nil {
return err
}
if err := m.decodeSet(); err != nil {
return err
}
case CompressionSnappy:
if m.Value == nil {
break
}
if m.Value, err = snappy.Decode(m.Value); err != nil {
return err
}
if err := m.decodeSet(); err != nil {
return err
}
case CompressionLZ4:
if m.Value == nil {
break
}
reader := lz4.NewReader(bytes.NewReader(m.Value))
if m.Value, err = ioutil.ReadAll(reader); err != nil {
return err
}
if err := m.decodeSet(); err != nil {
return err
}
default:
return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)}
}
return pd.pop()
}
// decodes a message set from a previousy encoded bulk-message
func (m *Message) decodeSet() (err error) {
pd := realDecoder{raw: m.Value}
m.Set = &MessageSet{}
return m.Set.decode(&pd)
}

89
vendor/github.com/Shopify/sarama/message_set.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
package sarama
type MessageBlock struct {
Offset int64
Msg *Message
}
// Messages convenience helper which returns either all the
// messages that are wrapped in this block
func (msb *MessageBlock) Messages() []*MessageBlock {
if msb.Msg.Set != nil {
return msb.Msg.Set.Messages
}
return []*MessageBlock{msb}
}
func (msb *MessageBlock) encode(pe packetEncoder) error {
pe.putInt64(msb.Offset)
pe.push(&lengthField{})
err := msb.Msg.encode(pe)
if err != nil {
return err
}
return pe.pop()
}
func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
if msb.Offset, err = pd.getInt64(); err != nil {
return err
}
if err = pd.push(&lengthField{}); err != nil {
return err
}
msb.Msg = new(Message)
if err = msb.Msg.decode(pd); err != nil {
return err
}
if err = pd.pop(); err != nil {
return err
}
return nil
}
type MessageSet struct {
PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
Messages []*MessageBlock
}
func (ms *MessageSet) encode(pe packetEncoder) error {
for i := range ms.Messages {
err := ms.Messages[i].encode(pe)
if err != nil {
return err
}
}
return nil
}
func (ms *MessageSet) decode(pd packetDecoder) (err error) {
ms.Messages = nil
for pd.remaining() > 0 {
msb := new(MessageBlock)
err = msb.decode(pd)
switch err {
case nil:
ms.Messages = append(ms.Messages, msb)
case ErrInsufficientData:
// As an optimization the server is allowed to return a partial message at the
// end of the message set. Clients should handle this case. So we just ignore such things.
ms.PartialTrailingMessage = true
return nil
default:
return err
}
}
return nil
}
func (ms *MessageSet) addMessage(msg *Message) {
block := new(MessageBlock)
block.Msg = msg
ms.Messages = append(ms.Messages, block)
}

52
vendor/github.com/Shopify/sarama/metadata_request.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
package sarama
type MetadataRequest struct {
Topics []string
}
func (r *MetadataRequest) encode(pe packetEncoder) error {
err := pe.putArrayLength(len(r.Topics))
if err != nil {
return err
}
for i := range r.Topics {
err = pe.putString(r.Topics[i])
if err != nil {
return err
}
}
return nil
}
func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
topicCount, err := pd.getArrayLength()
if err != nil {
return err
}
if topicCount == 0 {
return nil
}
r.Topics = make([]string, topicCount)
for i := range r.Topics {
topic, err := pd.getString()
if err != nil {
return err
}
r.Topics[i] = topic
}
return nil
}
func (r *MetadataRequest) key() int16 {
return 3
}
func (r *MetadataRequest) version() int16 {
return 0
}
func (r *MetadataRequest) requiredVersion() KafkaVersion {
return minVersion
}

239
vendor/github.com/Shopify/sarama/metadata_response.go generated vendored Normal file
View File

@ -0,0 +1,239 @@
package sarama
type PartitionMetadata struct {
Err KError
ID int32
Leader int32
Replicas []int32
Isr []int32
}
func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
pm.Err = KError(tmp)
pm.ID, err = pd.getInt32()
if err != nil {
return err
}
pm.Leader, err = pd.getInt32()
if err != nil {
return err
}
pm.Replicas, err = pd.getInt32Array()
if err != nil {
return err
}
pm.Isr, err = pd.getInt32Array()
if err != nil {
return err
}
return nil
}
func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) {
pe.putInt16(int16(pm.Err))
pe.putInt32(pm.ID)
pe.putInt32(pm.Leader)
err = pe.putInt32Array(pm.Replicas)
if err != nil {
return err
}
err = pe.putInt32Array(pm.Isr)
if err != nil {
return err
}
return nil
}
type TopicMetadata struct {
Err KError
Name string
Partitions []*PartitionMetadata
}
func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
tm.Err = KError(tmp)
tm.Name, err = pd.getString()
if err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
tm.Partitions = make([]*PartitionMetadata, n)
for i := 0; i < n; i++ {
tm.Partitions[i] = new(PartitionMetadata)
err = tm.Partitions[i].decode(pd)
if err != nil {
return err
}
}
return nil
}
func (tm *TopicMetadata) encode(pe packetEncoder) (err error) {
pe.putInt16(int16(tm.Err))
err = pe.putString(tm.Name)
if err != nil {
return err
}
err = pe.putArrayLength(len(tm.Partitions))
if err != nil {
return err
}
for _, pm := range tm.Partitions {
err = pm.encode(pe)
if err != nil {
return err
}
}
return nil
}
type MetadataResponse struct {
Brokers []*Broker
Topics []*TopicMetadata
}
func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Brokers = make([]*Broker, n)
for i := 0; i < n; i++ {
r.Brokers[i] = new(Broker)
err = r.Brokers[i].decode(pd)
if err != nil {
return err
}
}
n, err = pd.getArrayLength()
if err != nil {
return err
}
r.Topics = make([]*TopicMetadata, n)
for i := 0; i < n; i++ {
r.Topics[i] = new(TopicMetadata)
err = r.Topics[i].decode(pd)
if err != nil {
return err
}
}
return nil
}
func (r *MetadataResponse) encode(pe packetEncoder) error {
err := pe.putArrayLength(len(r.Brokers))
if err != nil {
return err
}
for _, broker := range r.Brokers {
err = broker.encode(pe)
if err != nil {
return err
}
}
err = pe.putArrayLength(len(r.Topics))
if err != nil {
return err
}
for _, tm := range r.Topics {
err = tm.encode(pe)
if err != nil {
return err
}
}
return nil
}
func (r *MetadataResponse) key() int16 {
return 3
}
func (r *MetadataResponse) version() int16 {
return 0
}
func (r *MetadataResponse) requiredVersion() KafkaVersion {
return minVersion
}
// testing API
func (r *MetadataResponse) AddBroker(addr string, id int32) {
r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
}
func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
var tmatch *TopicMetadata
for _, tm := range r.Topics {
if tm.Name == topic {
tmatch = tm
goto foundTopic
}
}
tmatch = new(TopicMetadata)
tmatch.Name = topic
r.Topics = append(r.Topics, tmatch)
foundTopic:
tmatch.Err = err
return tmatch
}
func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) {
tmatch := r.AddTopic(topic, ErrNoError)
var pmatch *PartitionMetadata
for _, pm := range tmatch.Partitions {
if pm.ID == partition {
pmatch = pm
goto foundPartition
}
}
pmatch = new(PartitionMetadata)
pmatch.ID = partition
tmatch.Partitions = append(tmatch.Partitions, pmatch)
foundPartition:
pmatch.Leader = brokerID
pmatch.Replicas = replicas
pmatch.Isr = isr
pmatch.Err = err
}

51
vendor/github.com/Shopify/sarama/metrics.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
package sarama
import (
"fmt"
"strings"
"github.com/rcrowley/go-metrics"
)
// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
const (
metricsReservoirSize = 1028
metricsAlphaFactor = 0.015
)
func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
return r.GetOrRegister(name, func() metrics.Histogram {
return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
}).(metrics.Histogram)
}
func getMetricNameForBroker(name string, broker *Broker) string {
// Use broker id like the Java client as it does not contain '.' or ':' characters that
// can be interpreted as special character by monitoring tool (e.g. Graphite)
return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
}
func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter {
return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r)
}
func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram {
return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r)
}
func getMetricNameForTopic(name string, topic string) string {
// Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
// cf. KAFKA-1902 and KAFKA-2337
return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1))
}
func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
}
func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
}

324
vendor/github.com/Shopify/sarama/mockbroker.go generated vendored Normal file
View File

@ -0,0 +1,324 @@
package sarama
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"net"
"reflect"
"strconv"
"sync"
"time"
"github.com/davecgh/go-spew/spew"
)
const (
expectationTimeout = 500 * time.Millisecond
)
type requestHandlerFunc func(req *request) (res encoder)
// RequestNotifierFunc is invoked when a mock broker processes a request successfully
// and will provides the number of bytes read and written.
type RequestNotifierFunc func(bytesRead, bytesWritten int)
// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
// to facilitate testing of higher level or specialized consumers and producers
// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
// but rather provides a facility to do that. It takes care of the TCP
// transport, request unmarshaling, response marshaling, and makes it the test
// writer responsibility to program correct according to the Kafka API protocol
// MockBroker behaviour.
//
// MockBroker is implemented as a TCP server listening on a kernel-selected
// localhost port that can accept many connections. It reads Kafka requests
// from that connection and returns responses programmed by the SetHandlerByMap
// function. If a MockBroker receives a request that it has no programmed
// response for, then it returns nothing and the request times out.
//
// A set of MockRequest builders to define mappings used by MockBroker is
// provided by Sarama. But users can develop MockRequests of their own and use
// them along with or instead of the standard ones.
//
// When running tests with MockBroker it is strongly recommended to specify
// a timeout to `go test` so that if the broker hangs waiting for a response,
// the test panics.
//
// It is not necessary to prefix message length or correlation ID to your
// response bytes, the server does that automatically as a convenience.
type MockBroker struct {
brokerID int32
port int32
closing chan none
stopper chan none
expectations chan encoder
listener net.Listener
t TestReporter
latency time.Duration
handler requestHandlerFunc
notifier RequestNotifierFunc
history []RequestResponse
lock sync.Mutex
}
// RequestResponse represents a Request/Response pair processed by MockBroker.
type RequestResponse struct {
Request protocolBody
Response encoder
}
// SetLatency makes broker pause for the specified period every time before
// replying.
func (b *MockBroker) SetLatency(latency time.Duration) {
b.latency = latency
}
// SetHandlerByMap defines mapping of Request types to MockResponses. When a
// request is received by the broker, it looks up the request type in the map
// and uses the found MockResponse instance to generate an appropriate reply.
// If the request type is not found in the map then nothing is sent.
func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
b.setHandler(func(req *request) (res encoder) {
reqTypeName := reflect.TypeOf(req.body).Elem().Name()
mockResponse := handlerMap[reqTypeName]
if mockResponse == nil {
return nil
}
return mockResponse.For(req.body)
})
}
// SetNotifier set a function that will get invoked whenever a request has been
// processed successfully and will provide the number of bytes read and written
func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
b.lock.Lock()
b.notifier = notifier
b.lock.Unlock()
}
// BrokerID returns broker ID assigned to the broker.
func (b *MockBroker) BrokerID() int32 {
return b.brokerID
}
// History returns a slice of RequestResponse pairs in the order they were
// processed by the broker. Note that in case of multiple connections to the
// broker the order expected by a test can be different from the order recorded
// in the history, unless some synchronization is implemented in the test.
func (b *MockBroker) History() []RequestResponse {
b.lock.Lock()
history := make([]RequestResponse, len(b.history))
copy(history, b.history)
b.lock.Unlock()
return history
}
// Port returns the TCP port number the broker is listening for requests on.
func (b *MockBroker) Port() int32 {
return b.port
}
// Addr returns the broker connection string in the form "<address>:<port>".
func (b *MockBroker) Addr() string {
return b.listener.Addr().String()
}
// Close terminates the broker blocking until it stops internal goroutines and
// releases all resources.
func (b *MockBroker) Close() {
close(b.expectations)
if len(b.expectations) > 0 {
buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
for e := range b.expectations {
_, _ = buf.WriteString(spew.Sdump(e))
}
b.t.Error(buf.String())
}
close(b.closing)
<-b.stopper
}
// setHandler sets the specified function as the request handler. Whenever
// a mock broker reads a request from the wire it passes the request to the
// function and sends back whatever the handler function returns.
func (b *MockBroker) setHandler(handler requestHandlerFunc) {
b.lock.Lock()
b.handler = handler
b.lock.Unlock()
}
func (b *MockBroker) serverLoop() {
defer close(b.stopper)
var err error
var conn net.Conn
go func() {
<-b.closing
err := b.listener.Close()
if err != nil {
b.t.Error(err)
}
}()
wg := &sync.WaitGroup{}
i := 0
for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
wg.Add(1)
go b.handleRequests(conn, i, wg)
i++
}
wg.Wait()
Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
}
func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
defer wg.Done()
defer func() {
_ = conn.Close()
}()
Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
var err error
abort := make(chan none)
defer close(abort)
go func() {
select {
case <-b.closing:
_ = conn.Close()
case <-abort:
}
}()
resHeader := make([]byte, 8)
for {
req, bytesRead, err := decodeRequest(conn)
if err != nil {
Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
b.serverError(err)
break
}
if b.latency > 0 {
time.Sleep(b.latency)
}
b.lock.Lock()
res := b.handler(req)
b.history = append(b.history, RequestResponse{req.body, res})
b.lock.Unlock()
if res == nil {
Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
continue
}
Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
encodedRes, err := encode(res, nil)
if err != nil {
b.serverError(err)
break
}
if len(encodedRes) == 0 {
b.lock.Lock()
if b.notifier != nil {
b.notifier(bytesRead, 0)
}
b.lock.Unlock()
continue
}
binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
if _, err = conn.Write(resHeader); err != nil {
b.serverError(err)
break
}
if _, err = conn.Write(encodedRes); err != nil {
b.serverError(err)
break
}
b.lock.Lock()
if b.notifier != nil {
b.notifier(bytesRead, len(resHeader)+len(encodedRes))
}
b.lock.Unlock()
}
Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
}
func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) {
select {
case res, ok := <-b.expectations:
if !ok {
return nil
}
return res
case <-time.After(expectationTimeout):
return nil
}
}
func (b *MockBroker) serverError(err error) {
isConnectionClosedError := false
if _, ok := err.(*net.OpError); ok {
isConnectionClosedError = true
} else if err == io.EOF {
isConnectionClosedError = true
} else if err.Error() == "use of closed network connection" {
isConnectionClosedError = true
}
if isConnectionClosedError {
return
}
b.t.Errorf(err.Error())
}
// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
// test framework and a channel of responses to use. If an error occurs it is
// simply logged to the TestReporter and the broker exits.
func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
return NewMockBrokerAddr(t, brokerID, "localhost:0")
}
// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
// it rather than just some ephemeral port.
func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
var err error
broker := &MockBroker{
closing: make(chan none),
stopper: make(chan none),
t: t,
brokerID: brokerID,
expectations: make(chan encoder, 512),
}
broker.handler = broker.defaultRequestHandler
broker.listener, err = net.Listen("tcp", addr)
if err != nil {
t.Fatal(err)
}
Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
_, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
if err != nil {
t.Fatal(err)
}
tmp, err := strconv.ParseInt(portStr, 10, 32)
if err != nil {
t.Fatal(err)
}
broker.port = int32(tmp)
go broker.serverLoop()
return broker
}
func (b *MockBroker) Returns(e encoder) {
b.expectations <- e
}

463
vendor/github.com/Shopify/sarama/mockresponses.go generated vendored Normal file
View File

@ -0,0 +1,463 @@
package sarama
import (
"fmt"
)
// TestReporter has methods matching go's testing.T to avoid importing
// `testing` in the main part of the library.
type TestReporter interface {
Error(...interface{})
Errorf(string, ...interface{})
Fatal(...interface{})
Fatalf(string, ...interface{})
}
// MockResponse is a response builder interface it defines one method that
// allows generating a response based on a request body. MockResponses are used
// to program behavior of MockBroker in tests.
type MockResponse interface {
For(reqBody versionedDecoder) (res encoder)
}
// MockWrapper is a mock response builder that returns a particular concrete
// response regardless of the actual request passed to the `For` method.
type MockWrapper struct {
res encoder
}
func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) {
return mw.res
}
func NewMockWrapper(res encoder) *MockWrapper {
return &MockWrapper{res: res}
}
// MockSequence is a mock response builder that is created from a sequence of
// concrete responses. Every time when a `MockBroker` calls its `For` method
// the next response from the sequence is returned. When the end of the
// sequence is reached the last element from the sequence is returned.
type MockSequence struct {
responses []MockResponse
}
func NewMockSequence(responses ...interface{}) *MockSequence {
ms := &MockSequence{}
ms.responses = make([]MockResponse, len(responses))
for i, res := range responses {
switch res := res.(type) {
case MockResponse:
ms.responses[i] = res
case encoder:
ms.responses[i] = NewMockWrapper(res)
default:
panic(fmt.Sprintf("Unexpected response type: %T", res))
}
}
return ms
}
func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
res = mc.responses[0].For(reqBody)
if len(mc.responses) > 1 {
mc.responses = mc.responses[1:]
}
return res
}
// MockMetadataResponse is a `MetadataResponse` builder.
type MockMetadataResponse struct {
leaders map[string]map[int32]int32
brokers map[string]int32
t TestReporter
}
func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
return &MockMetadataResponse{
leaders: make(map[string]map[int32]int32),
brokers: make(map[string]int32),
t: t,
}
}
func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
partitions := mmr.leaders[topic]
if partitions == nil {
partitions = make(map[int32]int32)
mmr.leaders[topic] = partitions
}
partitions[partition] = brokerID
return mmr
}
func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
mmr.brokers[addr] = brokerID
return mmr
}
func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
metadataRequest := reqBody.(*MetadataRequest)
metadataResponse := &MetadataResponse{}
for addr, brokerID := range mmr.brokers {
metadataResponse.AddBroker(addr, brokerID)
}
if len(metadataRequest.Topics) == 0 {
for topic, partitions := range mmr.leaders {
for partition, brokerID := range partitions {
metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
}
}
return metadataResponse
}
for _, topic := range metadataRequest.Topics {
for partition, brokerID := range mmr.leaders[topic] {
metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
}
}
return metadataResponse
}
// MockOffsetResponse is an `OffsetResponse` builder.
type MockOffsetResponse struct {
offsets map[string]map[int32]map[int64]int64
t TestReporter
}
func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
return &MockOffsetResponse{
offsets: make(map[string]map[int32]map[int64]int64),
t: t,
}
}
func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
partitions := mor.offsets[topic]
if partitions == nil {
partitions = make(map[int32]map[int64]int64)
mor.offsets[topic] = partitions
}
times := partitions[partition]
if times == nil {
times = make(map[int64]int64)
partitions[partition] = times
}
times[time] = offset
return mor
}
func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
offsetRequest := reqBody.(*OffsetRequest)
offsetResponse := &OffsetResponse{}
for topic, partitions := range offsetRequest.blocks {
for partition, block := range partitions {
offset := mor.getOffset(topic, partition, block.time)
offsetResponse.AddTopicPartition(topic, partition, offset)
}
}
return offsetResponse
}
func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
partitions := mor.offsets[topic]
if partitions == nil {
mor.t.Errorf("missing topic: %s", topic)
}
times := partitions[partition]
if times == nil {
mor.t.Errorf("missing partition: %d", partition)
}
offset, ok := times[time]
if !ok {
mor.t.Errorf("missing time: %d", time)
}
return offset
}
// MockFetchResponse is a `FetchResponse` builder.
type MockFetchResponse struct {
messages map[string]map[int32]map[int64]Encoder
highWaterMarks map[string]map[int32]int64
t TestReporter
batchSize int
version int16
}
func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
return &MockFetchResponse{
messages: make(map[string]map[int32]map[int64]Encoder),
highWaterMarks: make(map[string]map[int32]int64),
t: t,
batchSize: batchSize,
}
}
func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse {
mfr.version = version
return mfr
}
func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
partitions := mfr.messages[topic]
if partitions == nil {
partitions = make(map[int32]map[int64]Encoder)
mfr.messages[topic] = partitions
}
messages := partitions[partition]
if messages == nil {
messages = make(map[int64]Encoder)
partitions[partition] = messages
}
messages[offset] = msg
return mfr
}
func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
partitions := mfr.highWaterMarks[topic]
if partitions == nil {
partitions = make(map[int32]int64)
mfr.highWaterMarks[topic] = partitions
}
partitions[partition] = offset
return mfr
}
func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
fetchRequest := reqBody.(*FetchRequest)
res := &FetchResponse{
Version: mfr.version,
}
for topic, partitions := range fetchRequest.blocks {
for partition, block := range partitions {
initialOffset := block.fetchOffset
offset := initialOffset
maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
for i := 0; i < mfr.batchSize && offset < maxOffset; {
msg := mfr.getMessage(topic, partition, offset)
if msg != nil {
res.AddMessage(topic, partition, nil, msg, offset)
i++
}
offset++
}
fb := res.GetBlock(topic, partition)
if fb == nil {
res.AddError(topic, partition, ErrNoError)
fb = res.GetBlock(topic, partition)
}
fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
}
}
return res
}
func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder {
partitions := mfr.messages[topic]
if partitions == nil {
return nil
}
messages := partitions[partition]
if messages == nil {
return nil
}
return messages[offset]
}
func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
partitions := mfr.messages[topic]
if partitions == nil {
return 0
}
messages := partitions[partition]
if messages == nil {
return 0
}
return len(messages)
}
func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
partitions := mfr.highWaterMarks[topic]
if partitions == nil {
return 0
}
return partitions[partition]
}
// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
type MockConsumerMetadataResponse struct {
coordinators map[string]interface{}
t TestReporter
}
func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
return &MockConsumerMetadataResponse{
coordinators: make(map[string]interface{}),
t: t,
}
}
func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
mr.coordinators[group] = broker
return mr
}
func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
mr.coordinators[group] = kerror
return mr
}
func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*ConsumerMetadataRequest)
group := req.ConsumerGroup
res := &ConsumerMetadataResponse{}
v := mr.coordinators[group]
switch v := v.(type) {
case *MockBroker:
res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
case KError:
res.Err = v
}
return res
}
// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
type MockOffsetCommitResponse struct {
errors map[string]map[string]map[int32]KError
t TestReporter
}
func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
return &MockOffsetCommitResponse{t: t}
}
func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
if mr.errors == nil {
mr.errors = make(map[string]map[string]map[int32]KError)
}
topics := mr.errors[group]
if topics == nil {
topics = make(map[string]map[int32]KError)
mr.errors[group] = topics
}
partitions := topics[topic]
if partitions == nil {
partitions = make(map[int32]KError)
topics[topic] = partitions
}
partitions[partition] = kerror
return mr
}
func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*OffsetCommitRequest)
group := req.ConsumerGroup
res := &OffsetCommitResponse{}
for topic, partitions := range req.blocks {
for partition := range partitions {
res.AddError(topic, partition, mr.getError(group, topic, partition))
}
}
return res
}
func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
topics := mr.errors[group]
if topics == nil {
return ErrNoError
}
partitions := topics[topic]
if partitions == nil {
return ErrNoError
}
kerror, ok := partitions[partition]
if !ok {
return ErrNoError
}
return kerror
}
// MockProduceResponse is a `ProduceResponse` builder.
type MockProduceResponse struct {
errors map[string]map[int32]KError
t TestReporter
}
func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
return &MockProduceResponse{t: t}
}
func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
if mr.errors == nil {
mr.errors = make(map[string]map[int32]KError)
}
partitions := mr.errors[topic]
if partitions == nil {
partitions = make(map[int32]KError)
mr.errors[topic] = partitions
}
partitions[partition] = kerror
return mr
}
func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*ProduceRequest)
res := &ProduceResponse{}
for topic, partitions := range req.msgSets {
for partition := range partitions {
res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
}
}
return res
}
func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
partitions := mr.errors[topic]
if partitions == nil {
return ErrNoError
}
kerror, ok := partitions[partition]
if !ok {
return ErrNoError
}
return kerror
}
// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
type MockOffsetFetchResponse struct {
offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
t TestReporter
}
func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
return &MockOffsetFetchResponse{t: t}
}
func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
if mr.offsets == nil {
mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
}
topics := mr.offsets[group]
if topics == nil {
topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
mr.offsets[group] = topics
}
partitions := topics[topic]
if partitions == nil {
partitions = make(map[int32]*OffsetFetchResponseBlock)
topics[topic] = partitions
}
partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror}
return mr
}
func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*OffsetFetchRequest)
group := req.ConsumerGroup
res := &OffsetFetchResponse{}
for topic, partitions := range mr.offsets[group] {
for partition, block := range partitions {
res.AddBlock(topic, partition, block)
}
}
return res
}

View File

@ -0,0 +1,190 @@
package sarama
// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
// tells the broker to set the timestamp to the time at which the request was received.
// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
const ReceiveTime int64 = -1
// GroupGenerationUndefined is a special value for the group generation field of
// Offset Commit Requests that should be used when a consumer group does not rely
// on Kafka for partition management.
const GroupGenerationUndefined = -1
type offsetCommitRequestBlock struct {
offset int64
timestamp int64
metadata string
}
func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
pe.putInt64(b.offset)
if version == 1 {
pe.putInt64(b.timestamp)
} else if b.timestamp != 0 {
Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
}
return pe.putString(b.metadata)
}
func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
if b.offset, err = pd.getInt64(); err != nil {
return err
}
if version == 1 {
if b.timestamp, err = pd.getInt64(); err != nil {
return err
}
}
b.metadata, err = pd.getString()
return err
}
type OffsetCommitRequest struct {
ConsumerGroup string
ConsumerGroupGeneration int32 // v1 or later
ConsumerID string // v1 or later
RetentionTime int64 // v2 or later
// Version can be:
// - 0 (kafka 0.8.1 and later)
// - 1 (kafka 0.8.2 and later)
// - 2 (kafka 0.9.0 and later)
Version int16
blocks map[string]map[int32]*offsetCommitRequestBlock
}
func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
if r.Version < 0 || r.Version > 2 {
return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
}
if err := pe.putString(r.ConsumerGroup); err != nil {
return err
}
if r.Version >= 1 {
pe.putInt32(r.ConsumerGroupGeneration)
if err := pe.putString(r.ConsumerID); err != nil {
return err
}
} else {
if r.ConsumerGroupGeneration != 0 {
Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
}
if r.ConsumerID != "" {
Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
}
}
if r.Version >= 2 {
pe.putInt64(r.RetentionTime)
} else if r.RetentionTime != 0 {
Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
}
if err := pe.putArrayLength(len(r.blocks)); err != nil {
return err
}
for topic, partitions := range r.blocks {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(partitions)); err != nil {
return err
}
for partition, block := range partitions {
pe.putInt32(partition)
if err := block.encode(pe, r.Version); err != nil {
return err
}
}
}
return nil
}
func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.ConsumerGroup, err = pd.getString(); err != nil {
return err
}
if r.Version >= 1 {
if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
return err
}
if r.ConsumerID, err = pd.getString(); err != nil {
return err
}
}
if r.Version >= 2 {
if r.RetentionTime, err = pd.getInt64(); err != nil {
return err
}
}
topicCount, err := pd.getArrayLength()
if err != nil {
return err
}
if topicCount == 0 {
return nil
}
r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
for i := 0; i < topicCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
block := &offsetCommitRequestBlock{}
if err := block.decode(pd, r.Version); err != nil {
return err
}
r.blocks[topic][partition] = block
}
}
return nil
}
func (r *OffsetCommitRequest) key() int16 {
return 8
}
func (r *OffsetCommitRequest) version() int16 {
return r.Version
}
func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_8_2_0
case 2:
return V0_9_0_0
default:
return minVersion
}
}
func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
if r.blocks == nil {
r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
}
if r.blocks[topic] == nil {
r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
}
r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
}

View File

@ -0,0 +1,85 @@
package sarama
type OffsetCommitResponse struct {
Errors map[string]map[int32]KError
}
func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
if r.Errors == nil {
r.Errors = make(map[string]map[int32]KError)
}
partitions := r.Errors[topic]
if partitions == nil {
partitions = make(map[int32]KError)
r.Errors[topic] = partitions
}
partitions[partition] = kerror
}
func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(r.Errors)); err != nil {
return err
}
for topic, partitions := range r.Errors {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(partitions)); err != nil {
return err
}
for partition, kerror := range partitions {
pe.putInt32(partition)
pe.putInt16(int16(kerror))
}
}
return nil
}
func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
numTopics, err := pd.getArrayLength()
if err != nil || numTopics == 0 {
return err
}
r.Errors = make(map[string]map[int32]KError, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numErrors, err := pd.getArrayLength()
if err != nil {
return err
}
r.Errors[name] = make(map[int32]KError, numErrors)
for j := 0; j < numErrors; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
tmp, err := pd.getInt16()
if err != nil {
return err
}
r.Errors[name][id] = KError(tmp)
}
}
return nil
}
func (r *OffsetCommitResponse) key() int16 {
return 8
}
func (r *OffsetCommitResponse) version() int16 {
return 0
}
func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
return minVersion
}

View File

@ -0,0 +1,81 @@
package sarama
type OffsetFetchRequest struct {
ConsumerGroup string
Version int16
partitions map[string][]int32
}
func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
if r.Version < 0 || r.Version > 1 {
return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
}
if err = pe.putString(r.ConsumerGroup); err != nil {
return err
}
if err = pe.putArrayLength(len(r.partitions)); err != nil {
return err
}
for topic, partitions := range r.partitions {
if err = pe.putString(topic); err != nil {
return err
}
if err = pe.putInt32Array(partitions); err != nil {
return err
}
}
return nil
}
func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.ConsumerGroup, err = pd.getString(); err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
if partitionCount == 0 {
return nil
}
r.partitions = make(map[string][]int32)
for i := 0; i < partitionCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitions, err := pd.getInt32Array()
if err != nil {
return err
}
r.partitions[topic] = partitions
}
return nil
}
func (r *OffsetFetchRequest) key() int16 {
return 9
}
func (r *OffsetFetchRequest) version() int16 {
return r.Version
}
func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_8_2_0
default:
return minVersion
}
}
func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
if r.partitions == nil {
r.partitions = make(map[string][]int32)
}
r.partitions[topic] = append(r.partitions[topic], partitionID)
}

View File

@ -0,0 +1,143 @@
package sarama
type OffsetFetchResponseBlock struct {
Offset int64
Metadata string
Err KError
}
func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) {
b.Offset, err = pd.getInt64()
if err != nil {
return err
}
b.Metadata, err = pd.getString()
if err != nil {
return err
}
tmp, err := pd.getInt16()
if err != nil {
return err
}
b.Err = KError(tmp)
return nil
}
func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) {
pe.putInt64(b.Offset)
err = pe.putString(b.Metadata)
if err != nil {
return err
}
pe.putInt16(int16(b.Err))
return nil
}
type OffsetFetchResponse struct {
Blocks map[string]map[int32]*OffsetFetchResponseBlock
}
func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(r.Blocks)); err != nil {
return err
}
for topic, partitions := range r.Blocks {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(partitions)); err != nil {
return err
}
for partition, block := range partitions {
pe.putInt32(partition)
if err := block.encode(pe); err != nil {
return err
}
}
}
return nil
}
func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
numTopics, err := pd.getArrayLength()
if err != nil || numTopics == 0 {
return err
}
r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
if numBlocks == 0 {
r.Blocks[name] = nil
continue
}
r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
block := new(OffsetFetchResponseBlock)
err = block.decode(pd)
if err != nil {
return err
}
r.Blocks[name][id] = block
}
}
return nil
}
func (r *OffsetFetchResponse) key() int16 {
return 9
}
func (r *OffsetFetchResponse) version() int16 {
return 0
}
func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
return minVersion
}
func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
if r.Blocks == nil {
return nil
}
if r.Blocks[topic] == nil {
return nil
}
return r.Blocks[topic][partition]
}
func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
}
partitions := r.Blocks[topic]
if partitions == nil {
partitions = make(map[int32]*OffsetFetchResponseBlock)
r.Blocks[topic] = partitions
}
partitions[partition] = block
}

560
vendor/github.com/Shopify/sarama/offset_manager.go generated vendored Normal file
View File

@ -0,0 +1,560 @@
package sarama
import (
"sync"
"time"
)
// Offset Manager
// OffsetManager uses Kafka to store and fetch consumed partition offsets.
type OffsetManager interface {
// ManagePartition creates a PartitionOffsetManager on the given topic/partition.
// It will return an error if this OffsetManager is already managing the given
// topic/partition.
ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
// Close stops the OffsetManager from managing offsets. It is required to call
// this function before an OffsetManager object passes out of scope, as it
// will otherwise leak memory. You must call this after all the
// PartitionOffsetManagers are closed.
Close() error
}
type offsetManager struct {
client Client
conf *Config
group string
lock sync.Mutex
poms map[string]map[int32]*partitionOffsetManager
boms map[*Broker]*brokerOffsetManager
}
// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
// It is still necessary to call Close() on the underlying client when finished with the partition manager.
func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
}
om := &offsetManager{
client: client,
conf: client.Config(),
group: group,
poms: make(map[string]map[int32]*partitionOffsetManager),
boms: make(map[*Broker]*brokerOffsetManager),
}
return om, nil
}
func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
pom, err := om.newPartitionOffsetManager(topic, partition)
if err != nil {
return nil, err
}
om.lock.Lock()
defer om.lock.Unlock()
topicManagers := om.poms[topic]
if topicManagers == nil {
topicManagers = make(map[int32]*partitionOffsetManager)
om.poms[topic] = topicManagers
}
if topicManagers[partition] != nil {
return nil, ConfigurationError("That topic/partition is already being managed")
}
topicManagers[partition] = pom
return pom, nil
}
func (om *offsetManager) Close() error {
return nil
}
func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
om.lock.Lock()
defer om.lock.Unlock()
bom := om.boms[broker]
if bom == nil {
bom = om.newBrokerOffsetManager(broker)
om.boms[broker] = bom
}
bom.refs++
return bom
}
func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) {
om.lock.Lock()
defer om.lock.Unlock()
bom.refs--
if bom.refs == 0 {
close(bom.updateSubscriptions)
if om.boms[bom.broker] == bom {
delete(om.boms, bom.broker)
}
}
}
func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) {
om.lock.Lock()
defer om.lock.Unlock()
delete(om.boms, bom.broker)
}
func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) {
om.lock.Lock()
defer om.lock.Unlock()
delete(om.poms[pom.topic], pom.partition)
if len(om.poms[pom.topic]) == 0 {
delete(om.poms, pom.topic)
}
}
// Partition Offset Manager
// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
// out of scope.
type PartitionOffsetManager interface {
// NextOffset returns the next offset that should be consumed for the managed
// partition, accompanied by metadata which can be used to reconstruct the state
// of the partition consumer when it resumes. NextOffset() will return
// `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
// was committed for this partition yet.
NextOffset() (int64, string)
// MarkOffset marks the provided offset, alongside a metadata string
// that represents the state of the partition consumer at that point in time. The
// metadata string can be used by another consumer to restore that state, so it
// can resume consumption.
//
// To follow upstream conventions, you are expected to mark the offset of the
// next message to read, not the last message read. Thus, when calling `MarkOffset`
// you should typically add one to the offset of the last consumed message.
//
// Note: calling MarkOffset does not necessarily commit the offset to the backend
// store immediately for efficiency reasons, and it may never be committed if
// your application crashes. This means that you may end up processing the same
// message twice, and your processing should ideally be idempotent.
MarkOffset(offset int64, metadata string)
// ResetOffset resets to the provided offset, alongside a metadata string that
// represents the state of the partition consumer at that point in time. Reset
// acts as a counterpart to MarkOffset, the difference being that it allows to
// reset an offset to an earlier or smaller value, where MarkOffset only
// allows incrementing the offset. cf MarkOffset for more details.
ResetOffset(offset int64, metadata string)
// Errors returns a read channel of errors that occur during offset management, if
// enabled. By default, errors are logged and not returned over this channel. If
// you want to implement any custom error handling, set your config's
// Consumer.Return.Errors setting to true, and read from this channel.
Errors() <-chan *ConsumerError
// AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
// return immediately, after which you should wait until the 'errors' channel has
// been drained and closed. It is required to call this function, or Close before
// a consumer object passes out of scope, as it will otherwise leak memory. You
// must call this before calling Close on the underlying client.
AsyncClose()
// Close stops the PartitionOffsetManager from managing offsets. It is required to
// call this function (or AsyncClose) before a PartitionOffsetManager object
// passes out of scope, as it will otherwise leak memory. You must call this
// before calling Close on the underlying client.
Close() error
}
type partitionOffsetManager struct {
parent *offsetManager
topic string
partition int32
lock sync.Mutex
offset int64
metadata string
dirty bool
clean sync.Cond
broker *brokerOffsetManager
errors chan *ConsumerError
rebalance chan none
dying chan none
}
func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
pom := &partitionOffsetManager{
parent: om,
topic: topic,
partition: partition,
errors: make(chan *ConsumerError, om.conf.ChannelBufferSize),
rebalance: make(chan none, 1),
dying: make(chan none),
}
pom.clean.L = &pom.lock
if err := pom.selectBroker(); err != nil {
return nil, err
}
if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil {
return nil, err
}
pom.broker.updateSubscriptions <- pom
go withRecover(pom.mainLoop)
return pom, nil
}
func (pom *partitionOffsetManager) mainLoop() {
for {
select {
case <-pom.rebalance:
if err := pom.selectBroker(); err != nil {
pom.handleError(err)
pom.rebalance <- none{}
} else {
pom.broker.updateSubscriptions <- pom
}
case <-pom.dying:
if pom.broker != nil {
select {
case <-pom.rebalance:
case pom.broker.updateSubscriptions <- pom:
}
pom.parent.unrefBrokerOffsetManager(pom.broker)
}
pom.parent.abandonPartitionOffsetManager(pom)
close(pom.errors)
return
}
}
}
func (pom *partitionOffsetManager) selectBroker() error {
if pom.broker != nil {
pom.parent.unrefBrokerOffsetManager(pom.broker)
pom.broker = nil
}
var broker *Broker
var err error
if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil {
return err
}
if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil {
return err
}
pom.broker = pom.parent.refBrokerOffsetManager(broker)
return nil
}
func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error {
request := new(OffsetFetchRequest)
request.Version = 1
request.ConsumerGroup = pom.parent.group
request.AddPartition(pom.topic, pom.partition)
response, err := pom.broker.broker.FetchOffset(request)
if err != nil {
return err
}
block := response.GetBlock(pom.topic, pom.partition)
if block == nil {
return ErrIncompleteResponse
}
switch block.Err {
case ErrNoError:
pom.offset = block.Offset
pom.metadata = block.Metadata
return nil
case ErrNotCoordinatorForConsumer:
if retries <= 0 {
return block.Err
}
if err := pom.selectBroker(); err != nil {
return err
}
return pom.fetchInitialOffset(retries - 1)
case ErrOffsetsLoadInProgress:
if retries <= 0 {
return block.Err
}
time.Sleep(pom.parent.conf.Metadata.Retry.Backoff)
return pom.fetchInitialOffset(retries - 1)
default:
return block.Err
}
}
func (pom *partitionOffsetManager) handleError(err error) {
cErr := &ConsumerError{
Topic: pom.topic,
Partition: pom.partition,
Err: err,
}
if pom.parent.conf.Consumer.Return.Errors {
pom.errors <- cErr
} else {
Logger.Println(cErr)
}
}
func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
return pom.errors
}
func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
pom.lock.Lock()
defer pom.lock.Unlock()
if offset > pom.offset {
pom.offset = offset
pom.metadata = metadata
pom.dirty = true
}
}
func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
pom.lock.Lock()
defer pom.lock.Unlock()
if offset <= pom.offset {
pom.offset = offset
pom.metadata = metadata
pom.dirty = true
}
}
func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
pom.lock.Lock()
defer pom.lock.Unlock()
if pom.offset == offset && pom.metadata == metadata {
pom.dirty = false
pom.clean.Signal()
}
}
func (pom *partitionOffsetManager) NextOffset() (int64, string) {
pom.lock.Lock()
defer pom.lock.Unlock()
if pom.offset >= 0 {
return pom.offset, pom.metadata
}
return pom.parent.conf.Consumer.Offsets.Initial, ""
}
func (pom *partitionOffsetManager) AsyncClose() {
go func() {
pom.lock.Lock()
defer pom.lock.Unlock()
for pom.dirty {
pom.clean.Wait()
}
close(pom.dying)
}()
}
func (pom *partitionOffsetManager) Close() error {
pom.AsyncClose()
var errors ConsumerErrors
for err := range pom.errors {
errors = append(errors, err)
}
if len(errors) > 0 {
return errors
}
return nil
}
// Broker Offset Manager
type brokerOffsetManager struct {
parent *offsetManager
broker *Broker
timer *time.Ticker
updateSubscriptions chan *partitionOffsetManager
subscriptions map[*partitionOffsetManager]none
refs int
}
func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
bom := &brokerOffsetManager{
parent: om,
broker: broker,
timer: time.NewTicker(om.conf.Consumer.Offsets.CommitInterval),
updateSubscriptions: make(chan *partitionOffsetManager),
subscriptions: make(map[*partitionOffsetManager]none),
}
go withRecover(bom.mainLoop)
return bom
}
func (bom *brokerOffsetManager) mainLoop() {
for {
select {
case <-bom.timer.C:
if len(bom.subscriptions) > 0 {
bom.flushToBroker()
}
case s, ok := <-bom.updateSubscriptions:
if !ok {
bom.timer.Stop()
return
}
if _, ok := bom.subscriptions[s]; ok {
delete(bom.subscriptions, s)
} else {
bom.subscriptions[s] = none{}
}
}
}
}
func (bom *brokerOffsetManager) flushToBroker() {
request := bom.constructRequest()
if request == nil {
return
}
response, err := bom.broker.CommitOffset(request)
if err != nil {
bom.abort(err)
return
}
for s := range bom.subscriptions {
if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil {
continue
}
var err KError
var ok bool
if response.Errors[s.topic] == nil {
s.handleError(ErrIncompleteResponse)
delete(bom.subscriptions, s)
s.rebalance <- none{}
continue
}
if err, ok = response.Errors[s.topic][s.partition]; !ok {
s.handleError(ErrIncompleteResponse)
delete(bom.subscriptions, s)
s.rebalance <- none{}
continue
}
switch err {
case ErrNoError:
block := request.blocks[s.topic][s.partition]
s.updateCommitted(block.offset, block.metadata)
case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
// not a critical error, we just need to redispatch
delete(bom.subscriptions, s)
s.rebalance <- none{}
case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
// nothing we can do about this, just tell the user and carry on
s.handleError(err)
case ErrOffsetsLoadInProgress:
// nothing wrong but we didn't commit, we'll get it next time round
break
case ErrUnknownTopicOrPartition:
// let the user know *and* try redispatching - if topic-auto-create is
// enabled, redispatching should trigger a metadata request and create the
// topic; if not then re-dispatching won't help, but we've let the user
// know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706)
fallthrough
default:
// dunno, tell the user and try redispatching
s.handleError(err)
delete(bom.subscriptions, s)
s.rebalance <- none{}
}
}
}
func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest {
var r *OffsetCommitRequest
var perPartitionTimestamp int64
if bom.parent.conf.Consumer.Offsets.Retention == 0 {
perPartitionTimestamp = ReceiveTime
r = &OffsetCommitRequest{
Version: 1,
ConsumerGroup: bom.parent.group,
ConsumerGroupGeneration: GroupGenerationUndefined,
}
} else {
r = &OffsetCommitRequest{
Version: 2,
RetentionTime: int64(bom.parent.conf.Consumer.Offsets.Retention / time.Millisecond),
ConsumerGroup: bom.parent.group,
ConsumerGroupGeneration: GroupGenerationUndefined,
}
}
for s := range bom.subscriptions {
s.lock.Lock()
if s.dirty {
r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata)
}
s.lock.Unlock()
}
if len(r.blocks) > 0 {
return r
}
return nil
}
func (bom *brokerOffsetManager) abort(err error) {
_ = bom.broker.Close() // we don't care about the error this might return, we already have one
bom.parent.abandonBroker(bom)
for pom := range bom.subscriptions {
pom.handleError(err)
pom.rebalance <- none{}
}
for s := range bom.updateSubscriptions {
if _, ok := bom.subscriptions[s]; !ok {
s.handleError(err)
s.rebalance <- none{}
}
}
bom.subscriptions = make(map[*partitionOffsetManager]none)
}

132
vendor/github.com/Shopify/sarama/offset_request.go generated vendored Normal file
View File

@ -0,0 +1,132 @@
package sarama
type offsetRequestBlock struct {
time int64
maxOffsets int32 // Only used in version 0
}
func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
pe.putInt64(int64(b.time))
if version == 0 {
pe.putInt32(b.maxOffsets)
}
return nil
}
func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
if b.time, err = pd.getInt64(); err != nil {
return err
}
if version == 0 {
if b.maxOffsets, err = pd.getInt32(); err != nil {
return err
}
}
return nil
}
type OffsetRequest struct {
Version int16
blocks map[string]map[int32]*offsetRequestBlock
}
func (r *OffsetRequest) encode(pe packetEncoder) error {
pe.putInt32(-1) // replica ID is always -1 for clients
err := pe.putArrayLength(len(r.blocks))
if err != nil {
return err
}
for topic, partitions := range r.blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
for partition, block := range partitions {
pe.putInt32(partition)
if err = block.encode(pe, r.Version); err != nil {
return err
}
}
}
return nil
}
func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
r.Version = version
// Ignore replica ID
if _, err := pd.getInt32(); err != nil {
return err
}
blockCount, err := pd.getArrayLength()
if err != nil {
return err
}
if blockCount == 0 {
return nil
}
r.blocks = make(map[string]map[int32]*offsetRequestBlock)
for i := 0; i < blockCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.blocks[topic] = make(map[int32]*offsetRequestBlock)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
block := &offsetRequestBlock{}
if err := block.decode(pd, version); err != nil {
return err
}
r.blocks[topic][partition] = block
}
}
return nil
}
func (r *OffsetRequest) key() int16 {
return 2
}
func (r *OffsetRequest) version() int16 {
return r.Version
}
func (r *OffsetRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_10_1_0
default:
return minVersion
}
}
func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
if r.blocks == nil {
r.blocks = make(map[string]map[int32]*offsetRequestBlock)
}
if r.blocks[topic] == nil {
r.blocks[topic] = make(map[int32]*offsetRequestBlock)
}
tmp := new(offsetRequestBlock)
tmp.time = time
if r.Version == 0 {
tmp.maxOffsets = maxOffsets
}
r.blocks[topic][partitionID] = tmp
}

174
vendor/github.com/Shopify/sarama/offset_response.go generated vendored Normal file
View File

@ -0,0 +1,174 @@
package sarama
type OffsetResponseBlock struct {
Err KError
Offsets []int64 // Version 0
Offset int64 // Version 1
Timestamp int64 // Version 1
}
func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
b.Err = KError(tmp)
if version == 0 {
b.Offsets, err = pd.getInt64Array()
return err
}
b.Timestamp, err = pd.getInt64()
if err != nil {
return err
}
b.Offset, err = pd.getInt64()
if err != nil {
return err
}
// For backwards compatibility put the offset in the offsets array too
b.Offsets = []int64{b.Offset}
return nil
}
func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
pe.putInt16(int16(b.Err))
if version == 0 {
return pe.putInt64Array(b.Offsets)
}
pe.putInt64(b.Timestamp)
pe.putInt64(b.Offset)
return nil
}
type OffsetResponse struct {
Version int16
Blocks map[string]map[int32]*OffsetResponseBlock
}
func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
numTopics, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
block := new(OffsetResponseBlock)
err = block.decode(pd, version)
if err != nil {
return err
}
r.Blocks[name][id] = block
}
}
return nil
}
func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
if r.Blocks == nil {
return nil
}
if r.Blocks[topic] == nil {
return nil
}
return r.Blocks[topic][partition]
}
/*
// [0 0 0 1 ntopics
0 8 109 121 95 116 111 112 105 99 topic
0 0 0 1 npartitions
0 0 0 0 id
0 0
0 0 0 1 0 0 0 0
0 1 1 1 0 0 0 1
0 8 109 121 95 116 111 112
105 99 0 0 0 1 0 0
0 0 0 0 0 0 0 1
0 0 0 0 0 1 1 1] <nil>
*/
func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
if err = pe.putArrayLength(len(r.Blocks)); err != nil {
return err
}
for topic, partitions := range r.Blocks {
if err = pe.putString(topic); err != nil {
return err
}
if err = pe.putArrayLength(len(partitions)); err != nil {
return err
}
for partition, block := range partitions {
pe.putInt32(partition)
if err = block.encode(pe, r.version()); err != nil {
return err
}
}
}
return nil
}
func (r *OffsetResponse) key() int16 {
return 2
}
func (r *OffsetResponse) version() int16 {
return r.Version
}
func (r *OffsetResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_10_1_0
default:
return minVersion
}
}
// testing API
func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
}
byTopic, ok := r.Blocks[topic]
if !ok {
byTopic = make(map[int32]*OffsetResponseBlock)
r.Blocks[topic] = byTopic
}
byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
}

45
vendor/github.com/Shopify/sarama/packet_decoder.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
package sarama
// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
// Types implementing Decoder only need to worry about calling methods like GetString,
// not about how a string is represented in Kafka.
type packetDecoder interface {
// Primitives
getInt8() (int8, error)
getInt16() (int16, error)
getInt32() (int32, error)
getInt64() (int64, error)
getArrayLength() (int, error)
// Collections
getBytes() ([]byte, error)
getString() (string, error)
getInt32Array() ([]int32, error)
getInt64Array() ([]int64, error)
getStringArray() ([]string, error)
// Subsets
remaining() int
getSubset(length int) (packetDecoder, error)
// Stacks, see PushDecoder
push(in pushDecoder) error
pop() error
}
// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
// depend upon have been decoded.
type pushDecoder interface {
// Saves the offset into the input buffer as the location to actually read the calculated value when able.
saveOffset(in int)
// Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
reserveLength() int
// Indicates that all required data is now available to calculate and check the field.
// SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
// of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
check(curOffset int, buf []byte) error
}

50
vendor/github.com/Shopify/sarama/packet_encoder.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
package sarama
import "github.com/rcrowley/go-metrics"
// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
// Types implementing Encoder only need to worry about calling methods like PutString,
// not about how a string is represented in Kafka.
type packetEncoder interface {
// Primitives
putInt8(in int8)
putInt16(in int16)
putInt32(in int32)
putInt64(in int64)
putArrayLength(in int) error
// Collections
putBytes(in []byte) error
putRawBytes(in []byte) error
putString(in string) error
putStringArray(in []string) error
putInt32Array(in []int32) error
putInt64Array(in []int64) error
// Provide the current offset to record the batch size metric
offset() int
// Stacks, see PushEncoder
push(in pushEncoder)
pop() error
// To record metrics when provided
metricRegistry() metrics.Registry
}
// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
// depend upon have been written.
type pushEncoder interface {
// Saves the offset into the input buffer as the location to actually write the calculated value when able.
saveOffset(in int)
// Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
reserveLength() int
// Indicates that all required data is now available to calculate and write the field.
// SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
// of data to the saved offset, based on the data between the saved offset and curOffset.
run(curOffset int, buf []byte) error
}

135
vendor/github.com/Shopify/sarama/partitioner.go generated vendored Normal file
View File

@ -0,0 +1,135 @@
package sarama
import (
"hash"
"hash/fnv"
"math/rand"
"time"
)
// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
// as simple default implementations.
type Partitioner interface {
// Partition takes a message and partition count and chooses a partition
Partition(message *ProducerMessage, numPartitions int32) (int32, error)
// RequiresConsistency indicates to the user of the partitioner whether the
// mapping of key->partition is consistent or not. Specifically, if a
// partitioner requires consistency then it must be allowed to choose from all
// partitions (even ones known to be unavailable), and its choice must be
// respected by the caller. The obvious example is the HashPartitioner.
RequiresConsistency() bool
}
// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
type PartitionerConstructor func(topic string) Partitioner
type manualPartitioner struct{}
// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
// ProducerMessage's Partition field as the partition to produce to.
func NewManualPartitioner(topic string) Partitioner {
return new(manualPartitioner)
}
func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
return message.Partition, nil
}
func (p *manualPartitioner) RequiresConsistency() bool {
return true
}
type randomPartitioner struct {
generator *rand.Rand
}
// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
func NewRandomPartitioner(topic string) Partitioner {
p := new(randomPartitioner)
p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
return p
}
func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
return int32(p.generator.Intn(int(numPartitions))), nil
}
func (p *randomPartitioner) RequiresConsistency() bool {
return false
}
type roundRobinPartitioner struct {
partition int32
}
// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
func NewRoundRobinPartitioner(topic string) Partitioner {
return &roundRobinPartitioner{}
}
func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
if p.partition >= numPartitions {
p.partition = 0
}
ret := p.partition
p.partition++
return ret, nil
}
func (p *roundRobinPartitioner) RequiresConsistency() bool {
return false
}
type hashPartitioner struct {
random Partitioner
hasher hash.Hash32
}
// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
return func(topic string) Partitioner {
p := new(hashPartitioner)
p.random = NewRandomPartitioner(topic)
p.hasher = hasher()
return p
}
}
// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
// modulus the number of partitions. This ensures that messages with the same key always end up on the
// same partition.
func NewHashPartitioner(topic string) Partitioner {
p := new(hashPartitioner)
p.random = NewRandomPartitioner(topic)
p.hasher = fnv.New32a()
return p
}
func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
if message.Key == nil {
return p.random.Partition(message, numPartitions)
}
bytes, err := message.Key.Encode()
if err != nil {
return -1, err
}
p.hasher.Reset()
_, err = p.hasher.Write(bytes)
if err != nil {
return -1, err
}
partition := int32(p.hasher.Sum32()) % numPartitions
if partition < 0 {
partition = -partition
}
return partition, nil
}
func (p *hashPartitioner) RequiresConsistency() bool {
return true
}

121
vendor/github.com/Shopify/sarama/prep_encoder.go generated vendored Normal file
View File

@ -0,0 +1,121 @@
package sarama
import (
"fmt"
"math"
"github.com/rcrowley/go-metrics"
)
type prepEncoder struct {
length int
}
// primitives
func (pe *prepEncoder) putInt8(in int8) {
pe.length++
}
func (pe *prepEncoder) putInt16(in int16) {
pe.length += 2
}
func (pe *prepEncoder) putInt32(in int32) {
pe.length += 4
}
func (pe *prepEncoder) putInt64(in int64) {
pe.length += 8
}
func (pe *prepEncoder) putArrayLength(in int) error {
if in > math.MaxInt32 {
return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
}
pe.length += 4
return nil
}
// arrays
func (pe *prepEncoder) putBytes(in []byte) error {
pe.length += 4
if in == nil {
return nil
}
if len(in) > math.MaxInt32 {
return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
}
pe.length += len(in)
return nil
}
func (pe *prepEncoder) putRawBytes(in []byte) error {
if len(in) > math.MaxInt32 {
return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
}
pe.length += len(in)
return nil
}
func (pe *prepEncoder) putString(in string) error {
pe.length += 2
if len(in) > math.MaxInt16 {
return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
}
pe.length += len(in)
return nil
}
func (pe *prepEncoder) putStringArray(in []string) error {
err := pe.putArrayLength(len(in))
if err != nil {
return err
}
for _, str := range in {
if err := pe.putString(str); err != nil {
return err
}
}
return nil
}
func (pe *prepEncoder) putInt32Array(in []int32) error {
err := pe.putArrayLength(len(in))
if err != nil {
return err
}
pe.length += 4 * len(in)
return nil
}
func (pe *prepEncoder) putInt64Array(in []int64) error {
err := pe.putArrayLength(len(in))
if err != nil {
return err
}
pe.length += 8 * len(in)
return nil
}
func (pe *prepEncoder) offset() int {
return pe.length
}
// stackable
func (pe *prepEncoder) push(in pushEncoder) {
pe.length += in.reserveLength()
}
func (pe *prepEncoder) pop() error {
return nil
}
// we do not record metrics during the prep encoder pass
func (pe *prepEncoder) metricRegistry() metrics.Registry {
return nil
}

209
vendor/github.com/Shopify/sarama/produce_request.go generated vendored Normal file
View File

@ -0,0 +1,209 @@
package sarama
import "github.com/rcrowley/go-metrics"
// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
// it must see before responding. Any of the constants defined here are valid. On broker versions
// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
// by setting the `min.isr` value in the brokers configuration).
type RequiredAcks int16
const (
// NoResponse doesn't send any response, the TCP ACK is all you get.
NoResponse RequiredAcks = 0
// WaitForLocal waits for only the local commit to succeed before responding.
WaitForLocal RequiredAcks = 1
// WaitForAll waits for all in-sync replicas to commit before responding.
// The minimum number of in-sync replicas is configured on the broker via
// the `min.insync.replicas` configuration key.
WaitForAll RequiredAcks = -1
)
type ProduceRequest struct {
RequiredAcks RequiredAcks
Timeout int32
Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10
msgSets map[string]map[int32]*MessageSet
}
func (r *ProduceRequest) encode(pe packetEncoder) error {
pe.putInt16(int16(r.RequiredAcks))
pe.putInt32(r.Timeout)
err := pe.putArrayLength(len(r.msgSets))
if err != nil {
return err
}
metricRegistry := pe.metricRegistry()
var batchSizeMetric metrics.Histogram
var compressionRatioMetric metrics.Histogram
if metricRegistry != nil {
batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
}
totalRecordCount := int64(0)
for topic, partitions := range r.msgSets {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
topicRecordCount := int64(0)
var topicCompressionRatioMetric metrics.Histogram
if metricRegistry != nil {
topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
}
for id, msgSet := range partitions {
startOffset := pe.offset()
pe.putInt32(id)
pe.push(&lengthField{})
err = msgSet.encode(pe)
if err != nil {
return err
}
err = pe.pop()
if err != nil {
return err
}
if metricRegistry != nil {
for _, messageBlock := range msgSet.Messages {
// Is this a fake "message" wrapping real messages?
if messageBlock.Msg.Set != nil {
topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
} else {
// A single uncompressed message
topicRecordCount++
}
// Better be safe than sorry when computing the compression ratio
if messageBlock.Msg.compressedSize != 0 {
compressionRatio := float64(len(messageBlock.Msg.Value)) /
float64(messageBlock.Msg.compressedSize)
// Histogram do not support decimal values, let's multiple it by 100 for better precision
intCompressionRatio := int64(100 * compressionRatio)
compressionRatioMetric.Update(intCompressionRatio)
topicCompressionRatioMetric.Update(intCompressionRatio)
}
}
batchSize := int64(pe.offset() - startOffset)
batchSizeMetric.Update(batchSize)
getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
}
}
if topicRecordCount > 0 {
getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
totalRecordCount += topicRecordCount
}
}
if totalRecordCount > 0 {
metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
}
return nil
}
func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
requiredAcks, err := pd.getInt16()
if err != nil {
return err
}
r.RequiredAcks = RequiredAcks(requiredAcks)
if r.Timeout, err = pd.getInt32(); err != nil {
return err
}
topicCount, err := pd.getArrayLength()
if err != nil {
return err
}
if topicCount == 0 {
return nil
}
r.msgSets = make(map[string]map[int32]*MessageSet)
for i := 0; i < topicCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.msgSets[topic] = make(map[int32]*MessageSet)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
messageSetSize, err := pd.getInt32()
if err != nil {
return err
}
msgSetDecoder, err := pd.getSubset(int(messageSetSize))
if err != nil {
return err
}
msgSet := &MessageSet{}
err = msgSet.decode(msgSetDecoder)
if err != nil {
return err
}
r.msgSets[topic][partition] = msgSet
}
}
return nil
}
func (r *ProduceRequest) key() int16 {
return 0
}
func (r *ProduceRequest) version() int16 {
return r.Version
}
func (r *ProduceRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_9_0_0
case 2:
return V0_10_0_0
default:
return minVersion
}
}
func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
if r.msgSets == nil {
r.msgSets = make(map[string]map[int32]*MessageSet)
}
if r.msgSets[topic] == nil {
r.msgSets[topic] = make(map[int32]*MessageSet)
}
set := r.msgSets[topic][partition]
if set == nil {
set = new(MessageSet)
r.msgSets[topic][partition] = set
}
set.addMessage(msg)
}
func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
if r.msgSets == nil {
r.msgSets = make(map[string]map[int32]*MessageSet)
}
if r.msgSets[topic] == nil {
r.msgSets[topic] = make(map[int32]*MessageSet)
}
r.msgSets[topic][partition] = set
}

159
vendor/github.com/Shopify/sarama/produce_response.go generated vendored Normal file
View File

@ -0,0 +1,159 @@
package sarama
import "time"
type ProduceResponseBlock struct {
Err KError
Offset int64
// only provided if Version >= 2 and the broker is configured with `LogAppendTime`
Timestamp time.Time
}
func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
b.Err = KError(tmp)
b.Offset, err = pd.getInt64()
if err != nil {
return err
}
if version >= 2 {
if millis, err := pd.getInt64(); err != nil {
return err
} else if millis != -1 {
b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
}
}
return nil
}
type ProduceResponse struct {
Blocks map[string]map[int32]*ProduceResponseBlock
Version int16
ThrottleTime time.Duration // only provided if Version >= 1
}
func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
numTopics, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
block := new(ProduceResponseBlock)
err = block.decode(pd, version)
if err != nil {
return err
}
r.Blocks[name][id] = block
}
}
if r.Version >= 1 {
millis, err := pd.getInt32()
if err != nil {
return err
}
r.ThrottleTime = time.Duration(millis) * time.Millisecond
}
return nil
}
func (r *ProduceResponse) encode(pe packetEncoder) error {
err := pe.putArrayLength(len(r.Blocks))
if err != nil {
return err
}
for topic, partitions := range r.Blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
for id, prb := range partitions {
pe.putInt32(id)
pe.putInt16(int16(prb.Err))
pe.putInt64(prb.Offset)
}
}
if r.Version >= 1 {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
}
return nil
}
func (r *ProduceResponse) key() int16 {
return 0
}
func (r *ProduceResponse) version() int16 {
return r.Version
}
func (r *ProduceResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_9_0_0
case 2:
return V0_10_0_0
default:
return minVersion
}
}
func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
if r.Blocks == nil {
return nil
}
if r.Blocks[topic] == nil {
return nil
}
return r.Blocks[topic][partition]
}
// Testing API
func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
}
byTopic, ok := r.Blocks[topic]
if !ok {
byTopic = make(map[int32]*ProduceResponseBlock)
r.Blocks[topic] = byTopic
}
byTopic[partition] = &ProduceResponseBlock{Err: err}
}

176
vendor/github.com/Shopify/sarama/produce_set.go generated vendored Normal file
View File

@ -0,0 +1,176 @@
package sarama
import "time"
type partitionSet struct {
msgs []*ProducerMessage
setToSend *MessageSet
bufferBytes int
}
type produceSet struct {
parent *asyncProducer
msgs map[string]map[int32]*partitionSet
bufferBytes int
bufferCount int
}
func newProduceSet(parent *asyncProducer) *produceSet {
return &produceSet{
msgs: make(map[string]map[int32]*partitionSet),
parent: parent,
}
}
func (ps *produceSet) add(msg *ProducerMessage) error {
var err error
var key, val []byte
if msg.Key != nil {
if key, err = msg.Key.Encode(); err != nil {
return err
}
}
if msg.Value != nil {
if val, err = msg.Value.Encode(); err != nil {
return err
}
}
partitions := ps.msgs[msg.Topic]
if partitions == nil {
partitions = make(map[int32]*partitionSet)
ps.msgs[msg.Topic] = partitions
}
set := partitions[msg.Partition]
if set == nil {
set = &partitionSet{setToSend: new(MessageSet)}
partitions[msg.Partition] = set
}
set.msgs = append(set.msgs, msg)
msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
if msg.Timestamp.IsZero() {
msgToSend.Timestamp = time.Now()
} else {
msgToSend.Timestamp = msg.Timestamp
}
msgToSend.Version = 1
}
set.setToSend.addMessage(msgToSend)
size := producerMessageOverhead + len(key) + len(val)
set.bufferBytes += size
ps.bufferBytes += size
ps.bufferCount++
return nil
}
func (ps *produceSet) buildRequest() *ProduceRequest {
req := &ProduceRequest{
RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
}
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
req.Version = 2
}
for topic, partitionSet := range ps.msgs {
for partition, set := range partitionSet {
if ps.parent.conf.Producer.Compression == CompressionNone {
req.AddSet(topic, partition, set.setToSend)
} else {
// When compression is enabled, the entire set for each partition is compressed
// and sent as the payload of a single fake "message" with the appropriate codec
// set and no key. When the server sees a message with a compression codec, it
// decompresses the payload and treats the result as its message set.
payload, err := encode(set.setToSend, ps.parent.conf.MetricRegistry)
if err != nil {
Logger.Println(err) // if this happens, it's basically our fault.
panic(err)
}
compMsg := &Message{
Codec: ps.parent.conf.Producer.Compression,
Key: nil,
Value: payload,
Set: set.setToSend, // Provide the underlying message set for accurate metrics
}
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
compMsg.Version = 1
compMsg.Timestamp = set.setToSend.Messages[0].Msg.Timestamp
}
req.AddMessage(topic, partition, compMsg)
}
}
}
return req
}
func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) {
for topic, partitionSet := range ps.msgs {
for partition, set := range partitionSet {
cb(topic, partition, set.msgs)
}
}
}
func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
if ps.msgs[topic] == nil {
return nil
}
set := ps.msgs[topic][partition]
if set == nil {
return nil
}
ps.bufferBytes -= set.bufferBytes
ps.bufferCount -= len(set.msgs)
delete(ps.msgs[topic], partition)
return set.msgs
}
func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
switch {
// Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
case ps.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)):
return true
// Would we overflow the size-limit of a compressed message-batch for this partition?
case ps.parent.conf.Producer.Compression != CompressionNone &&
ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize() >= ps.parent.conf.Producer.MaxMessageBytes:
return true
// Would we overflow simply in number of messages?
case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
return true
default:
return false
}
}
func (ps *produceSet) readyToFlush() bool {
switch {
// If we don't have any messages, nothing else matters
case ps.empty():
return false
// If all three config values are 0, we always flush as-fast-as-possible
case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
return true
// If we've passed the message trigger-point
case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
return true
// If we've passed the byte trigger-point
case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
return true
default:
return false
}
}
func (ps *produceSet) empty() bool {
return ps.bufferCount == 0
}

260
vendor/github.com/Shopify/sarama/real_decoder.go generated vendored Normal file
View File

@ -0,0 +1,260 @@
package sarama
import (
"encoding/binary"
"math"
)
var errInvalidArrayLength = PacketDecodingError{"invalid array length"}
var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
var errInvalidStringLength = PacketDecodingError{"invalid string length"}
var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"}
type realDecoder struct {
raw []byte
off int
stack []pushDecoder
}
// primitives
func (rd *realDecoder) getInt8() (int8, error) {
if rd.remaining() < 1 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int8(rd.raw[rd.off])
rd.off++
return tmp, nil
}
func (rd *realDecoder) getInt16() (int16, error) {
if rd.remaining() < 2 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
rd.off += 2
return tmp, nil
}
func (rd *realDecoder) getInt32() (int32, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
return tmp, nil
}
func (rd *realDecoder) getInt64() (int64, error) {
if rd.remaining() < 8 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
rd.off += 8
return tmp, nil
}
func (rd *realDecoder) getArrayLength() (int, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
if tmp > rd.remaining() {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
} else if tmp > 2*math.MaxUint16 {
return -1, errInvalidArrayLength
}
return tmp, nil
}
// collections
func (rd *realDecoder) getBytes() ([]byte, error) {
tmp, err := rd.getInt32()
if err != nil {
return nil, err
}
n := int(tmp)
switch {
case n < -1:
return nil, errInvalidByteSliceLength
case n == -1:
return nil, nil
case n == 0:
return make([]byte, 0), nil
case n > rd.remaining():
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
tmpStr := rd.raw[rd.off : rd.off+n]
rd.off += n
return tmpStr, nil
}
func (rd *realDecoder) getString() (string, error) {
tmp, err := rd.getInt16()
if err != nil {
return "", err
}
n := int(tmp)
switch {
case n < -1:
return "", errInvalidStringLength
case n == -1:
return "", nil
case n == 0:
return "", nil
case n > rd.remaining():
rd.off = len(rd.raw)
return "", ErrInsufficientData
}
tmpStr := string(rd.raw[rd.off : rd.off+n])
rd.off += n
return tmpStr, nil
}
func (rd *realDecoder) getInt32Array() ([]int32, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
if rd.remaining() < 4*n {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
if n == 0 {
return nil, nil
}
if n < 0 {
return nil, errInvalidArrayLength
}
ret := make([]int32, n)
for i := range ret {
ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
}
return ret, nil
}
func (rd *realDecoder) getInt64Array() ([]int64, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
if rd.remaining() < 8*n {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
if n == 0 {
return nil, nil
}
if n < 0 {
return nil, errInvalidArrayLength
}
ret := make([]int64, n)
for i := range ret {
ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
rd.off += 8
}
return ret, nil
}
func (rd *realDecoder) getStringArray() ([]string, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
if n == 0 {
return nil, nil
}
if n < 0 {
return nil, errInvalidArrayLength
}
ret := make([]string, n)
for i := range ret {
str, err := rd.getString()
if err != nil {
return nil, err
}
ret[i] = str
}
return ret, nil
}
// subsets
func (rd *realDecoder) remaining() int {
return len(rd.raw) - rd.off
}
func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
if length < 0 {
return nil, errInvalidSubsetSize
} else if length > rd.remaining() {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
start := rd.off
rd.off += length
return &realDecoder{raw: rd.raw[start:rd.off]}, nil
}
// stacks
func (rd *realDecoder) push(in pushDecoder) error {
in.saveOffset(rd.off)
reserve := in.reserveLength()
if rd.remaining() < reserve {
rd.off = len(rd.raw)
return ErrInsufficientData
}
rd.stack = append(rd.stack, in)
rd.off += reserve
return nil
}
func (rd *realDecoder) pop() error {
// this is go's ugly pop pattern (the inverse of append)
in := rd.stack[len(rd.stack)-1]
rd.stack = rd.stack[:len(rd.stack)-1]
return in.check(rd.off, rd.raw)
}

129
vendor/github.com/Shopify/sarama/real_encoder.go generated vendored Normal file
View File

@ -0,0 +1,129 @@
package sarama
import (
"encoding/binary"
"github.com/rcrowley/go-metrics"
)
type realEncoder struct {
raw []byte
off int
stack []pushEncoder
registry metrics.Registry
}
// primitives
func (re *realEncoder) putInt8(in int8) {
re.raw[re.off] = byte(in)
re.off++
}
func (re *realEncoder) putInt16(in int16) {
binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
re.off += 2
}
func (re *realEncoder) putInt32(in int32) {
binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
re.off += 4
}
func (re *realEncoder) putInt64(in int64) {
binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
re.off += 8
}
func (re *realEncoder) putArrayLength(in int) error {
re.putInt32(int32(in))
return nil
}
// collection
func (re *realEncoder) putRawBytes(in []byte) error {
copy(re.raw[re.off:], in)
re.off += len(in)
return nil
}
func (re *realEncoder) putBytes(in []byte) error {
if in == nil {
re.putInt32(-1)
return nil
}
re.putInt32(int32(len(in)))
copy(re.raw[re.off:], in)
re.off += len(in)
return nil
}
func (re *realEncoder) putString(in string) error {
re.putInt16(int16(len(in)))
copy(re.raw[re.off:], in)
re.off += len(in)
return nil
}
func (re *realEncoder) putStringArray(in []string) error {
err := re.putArrayLength(len(in))
if err != nil {
return err
}
for _, val := range in {
if err := re.putString(val); err != nil {
return err
}
}
return nil
}
func (re *realEncoder) putInt32Array(in []int32) error {
err := re.putArrayLength(len(in))
if err != nil {
return err
}
for _, val := range in {
re.putInt32(val)
}
return nil
}
func (re *realEncoder) putInt64Array(in []int64) error {
err := re.putArrayLength(len(in))
if err != nil {
return err
}
for _, val := range in {
re.putInt64(val)
}
return nil
}
func (re *realEncoder) offset() int {
return re.off
}
// stacks
func (re *realEncoder) push(in pushEncoder) {
in.saveOffset(re.off)
re.off += in.reserveLength()
re.stack = append(re.stack, in)
}
func (re *realEncoder) pop() error {
// this is go's ugly pop pattern (the inverse of append)
in := re.stack[len(re.stack)-1]
re.stack = re.stack[:len(re.stack)-1]
return in.run(re.off, re.raw)
}
// we do record metrics during the real encoder pass
func (re *realEncoder) metricRegistry() metrics.Registry {
return re.registry
}

119
vendor/github.com/Shopify/sarama/request.go generated vendored Normal file
View File

@ -0,0 +1,119 @@
package sarama
import (
"encoding/binary"
"fmt"
"io"
)
type protocolBody interface {
encoder
versionedDecoder
key() int16
version() int16
requiredVersion() KafkaVersion
}
type request struct {
correlationID int32
clientID string
body protocolBody
}
func (r *request) encode(pe packetEncoder) (err error) {
pe.push(&lengthField{})
pe.putInt16(r.body.key())
pe.putInt16(r.body.version())
pe.putInt32(r.correlationID)
err = pe.putString(r.clientID)
if err != nil {
return err
}
err = r.body.encode(pe)
if err != nil {
return err
}
return pe.pop()
}
func (r *request) decode(pd packetDecoder) (err error) {
var key int16
if key, err = pd.getInt16(); err != nil {
return err
}
var version int16
if version, err = pd.getInt16(); err != nil {
return err
}
if r.correlationID, err = pd.getInt32(); err != nil {
return err
}
r.clientID, err = pd.getString()
r.body = allocateBody(key, version)
if r.body == nil {
return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
}
return r.body.decode(pd, version)
}
func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) {
lengthBytes := make([]byte, 4)
if _, err := io.ReadFull(r, lengthBytes); err != nil {
return nil, bytesRead, err
}
bytesRead += len(lengthBytes)
length := int32(binary.BigEndian.Uint32(lengthBytes))
if length <= 4 || length > MaxRequestSize {
return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
}
encodedReq := make([]byte, length)
if _, err := io.ReadFull(r, encodedReq); err != nil {
return nil, bytesRead, err
}
bytesRead += len(encodedReq)
req = &request{}
if err := decode(encodedReq, req); err != nil {
return nil, bytesRead, err
}
return req, bytesRead, nil
}
func allocateBody(key, version int16) protocolBody {
switch key {
case 0:
return &ProduceRequest{}
case 1:
return &FetchRequest{}
case 2:
return &OffsetRequest{Version: version}
case 3:
return &MetadataRequest{}
case 8:
return &OffsetCommitRequest{Version: version}
case 9:
return &OffsetFetchRequest{}
case 10:
return &ConsumerMetadataRequest{}
case 11:
return &JoinGroupRequest{}
case 12:
return &HeartbeatRequest{}
case 13:
return &LeaveGroupRequest{}
case 14:
return &SyncGroupRequest{}
case 15:
return &DescribeGroupsRequest{}
case 16:
return &ListGroupsRequest{}
case 17:
return &SaslHandshakeRequest{}
case 18:
return &ApiVersionsRequest{}
}
return nil
}

21
vendor/github.com/Shopify/sarama/response_header.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
package sarama
import "fmt"
type responseHeader struct {
length int32
correlationID int32
}
func (r *responseHeader) decode(pd packetDecoder) (err error) {
r.length, err = pd.getInt32()
if err != nil {
return err
}
if r.length <= 4 || r.length > MaxResponseSize {
return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
}
r.correlationID, err = pd.getInt32()
return err
}

99
vendor/github.com/Shopify/sarama/sarama.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
/*
Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level
API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level
API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation.
To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel
and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases.
The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be
useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees
depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
SyncProducer can still sometimes be lost.
To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic
consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the
https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9
and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
and message sent on the wire; the Client provides higher-level metadata management that is shared between
the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up
exactly with the protocol fields documented by Kafka at
https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry.
Broker related metrics:
+----------------------------------------------+------------+---------------------------------------------------------------+
| Name | Type | Description |
+----------------------------------------------+------------+---------------------------------------------------------------+
| incoming-byte-rate | meter | Bytes/second read off all brokers |
| incoming-byte-rate-for-broker-<broker-id> | meter | Bytes/second read off a given broker |
| outgoing-byte-rate | meter | Bytes/second written off all brokers |
| outgoing-byte-rate-for-broker-<broker-id> | meter | Bytes/second written off a given broker |
| request-rate | meter | Requests/second sent to all brokers |
| request-rate-for-broker-<broker-id> | meter | Requests/second sent to a given broker |
| request-size | histogram | Distribution of the request size in bytes for all brokers |
| request-size-for-broker-<broker-id> | histogram | Distribution of the request size in bytes for a given broker |
| request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers |
| request-latency-in-ms-for-broker-<broker-id> | histogram | Distribution of the request latency in ms for a given broker |
| response-rate | meter | Responses/second received from all brokers |
| response-rate-for-broker-<broker-id> | meter | Responses/second received from a given broker |
| response-size | histogram | Distribution of the response size in bytes for all brokers |
| response-size-for-broker-<broker-id> | histogram | Distribution of the response size in bytes for a given broker |
+----------------------------------------------+------------+---------------------------------------------------------------+
Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
Producer related metrics:
+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
| Name | Type | Description |
+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
| batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics |
| batch-size-for-topic-<topic> | histogram | Distribution of the number of bytes sent per partition per request for a given topic |
| record-send-rate | meter | Records/second sent to all topics |
| record-send-rate-for-topic-<topic> | meter | Records/second sent to a given topic |
| records-per-request | histogram | Distribution of the number of records sent per request for all topics |
| records-per-request-for-topic-<topic> | histogram | Distribution of the number of records sent per request for a given topic |
| compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics |
| compression-ratio-for-topic-<topic> | histogram | Distribution of the compression ratio times 100 of record batches for a given topic |
+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
*/
package sarama
import (
"io/ioutil"
"log"
)
// Logger is the instance of a StdLogger interface that Sarama writes connection
// management events to. By default it is set to discard all log messages via ioutil.Discard,
// but you can set it to redirect wherever you want.
var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
// StdLogger is used to log error messages.
type StdLogger interface {
Print(v ...interface{})
Printf(format string, v ...interface{})
Println(v ...interface{})
}
// PanicHandler is called for recovering from panics spawned internally to the library (and thus
// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
var PanicHandler func(interface{})
// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
// to process.
var MaxRequestSize int32 = 100 * 1024 * 1024
// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
// protect the client from running out of memory. Please note that brokers do not have any natural limit on
// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
// (see https://issues.apache.org/jira/browse/KAFKA-2063).
var MaxResponseSize int32 = 100 * 1024 * 1024

View File

@ -0,0 +1,33 @@
package sarama
type SaslHandshakeRequest struct {
Mechanism string
}
func (r *SaslHandshakeRequest) encode(pe packetEncoder) error {
if err := pe.putString(r.Mechanism); err != nil {
return err
}
return nil
}
func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) {
if r.Mechanism, err = pd.getString(); err != nil {
return err
}
return nil
}
func (r *SaslHandshakeRequest) key() int16 {
return 17
}
func (r *SaslHandshakeRequest) version() int16 {
return 0
}
func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
return V0_10_0_0
}

View File

@ -0,0 +1,38 @@
package sarama
type SaslHandshakeResponse struct {
Err KError
EnabledMechanisms []string
}
func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
return pe.putStringArray(r.EnabledMechanisms)
}
func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
return err
}
return nil
}
func (r *SaslHandshakeResponse) key() int16 {
return 17
}
func (r *SaslHandshakeResponse) version() int16 {
return 0
}
func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
return V0_10_0_0
}

100
vendor/github.com/Shopify/sarama/sync_group_request.go generated vendored Normal file
View File

@ -0,0 +1,100 @@
package sarama
type SyncGroupRequest struct {
GroupId string
GenerationId int32
MemberId string
GroupAssignments map[string][]byte
}
func (r *SyncGroupRequest) encode(pe packetEncoder) error {
if err := pe.putString(r.GroupId); err != nil {
return err
}
pe.putInt32(r.GenerationId)
if err := pe.putString(r.MemberId); err != nil {
return err
}
if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil {
return err
}
for memberId, memberAssignment := range r.GroupAssignments {
if err := pe.putString(memberId); err != nil {
return err
}
if err := pe.putBytes(memberAssignment); err != nil {
return err
}
}
return nil
}
func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) {
if r.GroupId, err = pd.getString(); err != nil {
return
}
if r.GenerationId, err = pd.getInt32(); err != nil {
return
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.GroupAssignments = make(map[string][]byte)
for i := 0; i < n; i++ {
memberId, err := pd.getString()
if err != nil {
return err
}
memberAssignment, err := pd.getBytes()
if err != nil {
return err
}
r.GroupAssignments[memberId] = memberAssignment
}
return nil
}
func (r *SyncGroupRequest) key() int16 {
return 14
}
func (r *SyncGroupRequest) version() int16 {
return 0
}
func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}
func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
if r.GroupAssignments == nil {
r.GroupAssignments = make(map[string][]byte)
}
r.GroupAssignments[memberId] = memberAssignment
}
func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error {
bin, err := encode(memberAssignment, nil)
if err != nil {
return err
}
r.AddGroupAssignment(memberId, bin)
return nil
}

View File

@ -0,0 +1,41 @@
package sarama
type SyncGroupResponse struct {
Err KError
MemberAssignment []byte
}
func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
assignment := new(ConsumerGroupMemberAssignment)
err := decode(r.MemberAssignment, assignment)
return assignment, err
}
func (r *SyncGroupResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
return pe.putBytes(r.MemberAssignment)
}
func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
r.MemberAssignment, err = pd.getBytes()
return
}
func (r *SyncGroupResponse) key() int16 {
return 14
}
func (r *SyncGroupResponse) version() int16 {
return 0
}
func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

164
vendor/github.com/Shopify/sarama/sync_producer.go generated vendored Normal file
View File

@ -0,0 +1,164 @@
package sarama
import "sync"
// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct
// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer
// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope.
//
// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual
// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`.
// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost.
//
// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to
// be set to true in its configuration.
type SyncProducer interface {
// SendMessage produces a given message, and returns only when it either has
// succeeded or failed to produce. It will return the partition and the offset
// of the produced message, or an error if the message failed to produce.
SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
// SendMessages produces a given set of messages, and returns only when all
// messages in the set have either succeeded or failed. Note that messages
// can succeed and fail individually; if some succeed and some fail,
// SendMessages will return an error.
SendMessages(msgs []*ProducerMessage) error
// Close shuts down the producer and waits for any buffered messages to be
// flushed. You must call this function before a producer object passes out of
// scope, as it may otherwise leak memory. You must call this before calling
// Close on the underlying client.
Close() error
}
type syncProducer struct {
producer *asyncProducer
wg sync.WaitGroup
}
// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
if config == nil {
config = NewConfig()
config.Producer.Return.Successes = true
}
if err := verifyProducerConfig(config); err != nil {
return nil, err
}
p, err := NewAsyncProducer(addrs, config)
if err != nil {
return nil, err
}
return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
}
// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this producer.
func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
if err := verifyProducerConfig(client.Config()); err != nil {
return nil, err
}
p, err := NewAsyncProducerFromClient(client)
if err != nil {
return nil, err
}
return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
}
func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
sp := &syncProducer{producer: p}
sp.wg.Add(2)
go withRecover(sp.handleSuccesses)
go withRecover(sp.handleErrors)
return sp
}
func verifyProducerConfig(config *Config) error {
if !config.Producer.Return.Errors {
return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer")
}
if !config.Producer.Return.Successes {
return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer")
}
return nil
}
func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
oldMetadata := msg.Metadata
defer func() {
msg.Metadata = oldMetadata
}()
expectation := make(chan *ProducerError, 1)
msg.Metadata = expectation
sp.producer.Input() <- msg
if err := <-expectation; err != nil {
return -1, -1, err.Err
}
return msg.Partition, msg.Offset, nil
}
func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error {
savedMetadata := make([]interface{}, len(msgs))
for i := range msgs {
savedMetadata[i] = msgs[i].Metadata
}
defer func() {
for i := range msgs {
msgs[i].Metadata = savedMetadata[i]
}
}()
expectations := make(chan chan *ProducerError, len(msgs))
go func() {
for _, msg := range msgs {
expectation := make(chan *ProducerError, 1)
msg.Metadata = expectation
sp.producer.Input() <- msg
expectations <- expectation
}
close(expectations)
}()
var errors ProducerErrors
for expectation := range expectations {
if err := <-expectation; err != nil {
errors = append(errors, err)
}
}
if len(errors) > 0 {
return errors
}
return nil
}
func (sp *syncProducer) handleSuccesses() {
defer sp.wg.Done()
for msg := range sp.producer.Successes() {
expectation := msg.Metadata.(chan *ProducerError)
expectation <- nil
}
}
func (sp *syncProducer) handleErrors() {
defer sp.wg.Done()
for err := range sp.producer.Errors() {
expectation := err.Msg.Metadata.(chan *ProducerError)
expectation <- err
}
}
func (sp *syncProducer) Close() error {
sp.producer.AsyncClose()
sp.wg.Wait()
return nil
}

150
vendor/github.com/Shopify/sarama/utils.go generated vendored Normal file
View File

@ -0,0 +1,150 @@
package sarama
import (
"bufio"
"net"
)
type none struct{}
// make []int32 sortable so we can sort partition numbers
type int32Slice []int32
func (slice int32Slice) Len() int {
return len(slice)
}
func (slice int32Slice) Less(i, j int) bool {
return slice[i] < slice[j]
}
func (slice int32Slice) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
}
func dupInt32Slice(input []int32) []int32 {
ret := make([]int32, 0, len(input))
for _, val := range input {
ret = append(ret, val)
}
return ret
}
func withRecover(fn func()) {
defer func() {
handler := PanicHandler
if handler != nil {
if err := recover(); err != nil {
handler(err)
}
}
}()
fn()
}
func safeAsyncClose(b *Broker) {
tmp := b // local var prevents clobbering in goroutine
go withRecover(func() {
if connected, _ := tmp.Connected(); connected {
if err := tmp.Close(); err != nil {
Logger.Println("Error closing broker", tmp.ID(), ":", err)
}
}
})
}
// Encoder is a simple interface for any type that can be encoded as an array of bytes
// in order to be sent as the key or value of a Kafka message. Length() is provided as an
// optimization, and must return the same as len() on the result of Encode().
type Encoder interface {
Encode() ([]byte, error)
Length() int
}
// make strings and byte slices encodable for convenience so they can be used as keys
// and/or values in kafka messages
// StringEncoder implements the Encoder interface for Go strings so that they can be used
// as the Key or Value in a ProducerMessage.
type StringEncoder string
func (s StringEncoder) Encode() ([]byte, error) {
return []byte(s), nil
}
func (s StringEncoder) Length() int {
return len(s)
}
// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
// as the Key or Value in a ProducerMessage.
type ByteEncoder []byte
func (b ByteEncoder) Encode() ([]byte, error) {
return b, nil
}
func (b ByteEncoder) Length() int {
return len(b)
}
// bufConn wraps a net.Conn with a buffer for reads to reduce the number of
// reads that trigger syscalls.
type bufConn struct {
net.Conn
buf *bufio.Reader
}
func newBufConn(conn net.Conn) *bufConn {
return &bufConn{
Conn: conn,
buf: bufio.NewReader(conn),
}
}
func (bc *bufConn) Read(b []byte) (n int, err error) {
return bc.buf.Read(b)
}
// KafkaVersion instances represent versions of the upstream Kafka broker.
type KafkaVersion struct {
// it's a struct rather than just typing the array directly to make it opaque and stop people
// generating their own arbitrary versions
version [4]uint
}
func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
return KafkaVersion{
version: [4]uint{major, minor, veryMinor, patch},
}
}
// IsAtLeast return true if and only if the version it is called on is
// greater than or equal to the version passed in:
// V1.IsAtLeast(V2) // false
// V2.IsAtLeast(V1) // true
func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
for i := range v.version {
if v.version[i] > other.version[i] {
return true
} else if v.version[i] < other.version[i] {
return false
}
}
return true
}
// Effective constants defining the supported kafka versions.
var (
V0_8_2_0 = newKafkaVersion(0, 8, 2, 0)
V0_8_2_1 = newKafkaVersion(0, 8, 2, 1)
V0_8_2_2 = newKafkaVersion(0, 8, 2, 2)
V0_9_0_0 = newKafkaVersion(0, 9, 0, 0)
V0_9_0_1 = newKafkaVersion(0, 9, 0, 1)
V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
minVersion = V0_8_2_0
)

202
vendor/github.com/aws/aws-sdk-go/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

3
vendor/github.com/aws/aws-sdk-go/NOTICE.txt generated vendored Normal file
View File

@ -0,0 +1,3 @@
AWS SDK for Go
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Copyright 2014-2015 Stripe, Inc.

145
vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go generated vendored Normal file
View File

@ -0,0 +1,145 @@
// Package awserr represents API error interface accessors for the SDK.
package awserr
// An Error wraps lower level errors with code, message and an original error.
// The underlying concrete error type may also satisfy other interfaces which
// can be to used to obtain more specific information about the error.
//
// Calling Error() or String() will always include the full information about
// an error based on its underlying type.
//
// Example:
//
// output, err := s3manage.Upload(svc, input, opts)
// if err != nil {
// if awsErr, ok := err.(awserr.Error); ok {
// // Get error details
// log.Println("Error:", awsErr.Code(), awsErr.Message())
//
// // Prints out full error message, including original error if there was one.
// log.Println("Error:", awsErr.Error())
//
// // Get original error
// if origErr := awsErr.OrigErr(); origErr != nil {
// // operate on original error.
// }
// } else {
// fmt.Println(err.Error())
// }
// }
//
type Error interface {
// Satisfy the generic error interface.
error
// Returns the short phrase depicting the classification of the error.
Code() string
// Returns the error details message.
Message() string
// Returns the original error if one was set. Nil is returned if not set.
OrigErr() error
}
// BatchError is a batch of errors which also wraps lower level errors with
// code, message, and original errors. Calling Error() will include all errors
// that occurred in the batch.
//
// Deprecated: Replaced with BatchedErrors. Only defined for backwards
// compatibility.
type BatchError interface {
// Satisfy the generic error interface.
error
// Returns the short phrase depicting the classification of the error.
Code() string
// Returns the error details message.
Message() string
// Returns the original error if one was set. Nil is returned if not set.
OrigErrs() []error
}
// BatchedErrors is a batch of errors which also wraps lower level errors with
// code, message, and original errors. Calling Error() will include all errors
// that occurred in the batch.
//
// Replaces BatchError
type BatchedErrors interface {
// Satisfy the base Error interface.
Error
// Returns the original error if one was set. Nil is returned if not set.
OrigErrs() []error
}
// New returns an Error object described by the code, message, and origErr.
//
// If origErr satisfies the Error interface it will not be wrapped within a new
// Error object and will instead be returned.
func New(code, message string, origErr error) Error {
var errs []error
if origErr != nil {
errs = append(errs, origErr)
}
return newBaseError(code, message, errs)
}
// NewBatchError returns an BatchedErrors with a collection of errors as an
// array of errors.
func NewBatchError(code, message string, errs []error) BatchedErrors {
return newBaseError(code, message, errs)
}
// A RequestFailure is an interface to extract request failure information from
// an Error such as the request ID of the failed request returned by a service.
// RequestFailures may not always have a requestID value if the request failed
// prior to reaching the service such as a connection error.
//
// Example:
//
// output, err := s3manage.Upload(svc, input, opts)
// if err != nil {
// if reqerr, ok := err.(RequestFailure); ok {
// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
// } else {
// log.Println("Error:", err.Error())
// }
// }
//
// Combined with awserr.Error:
//
// output, err := s3manage.Upload(svc, input, opts)
// if err != nil {
// if awsErr, ok := err.(awserr.Error); ok {
// // Generic AWS Error with Code, Message, and original error (if any)
// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
//
// if reqErr, ok := err.(awserr.RequestFailure); ok {
// // A service error occurred
// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
// }
// } else {
// fmt.Println(err.Error())
// }
// }
//
type RequestFailure interface {
Error
// The status code of the HTTP response.
StatusCode() int
// The request ID returned by the service for a request failure. This will
// be empty if no request ID is available such as the request failed due
// to a connection error.
RequestID() string
}
// NewRequestFailure returns a new request error wrapper for the given Error
// provided.
func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
return newRequestError(err, statusCode, reqID)
}

194
vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go generated vendored Normal file
View File

@ -0,0 +1,194 @@
package awserr
import "fmt"
// SprintError returns a string of the formatted error code.
//
// Both extra and origErr are optional. If they are included their lines
// will be added, but if they are not included their lines will be ignored.
func SprintError(code, message, extra string, origErr error) string {
msg := fmt.Sprintf("%s: %s", code, message)
if extra != "" {
msg = fmt.Sprintf("%s\n\t%s", msg, extra)
}
if origErr != nil {
msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
}
return msg
}
// A baseError wraps the code and message which defines an error. It also
// can be used to wrap an original error object.
//
// Should be used as the root for errors satisfying the awserr.Error. Also
// for any error which does not fit into a specific error wrapper type.
type baseError struct {
// Classification of error
code string
// Detailed information about error
message string
// Optional original error this error is based off of. Allows building
// chained errors.
errs []error
}
// newBaseError returns an error object for the code, message, and errors.
//
// code is a short no whitespace phrase depicting the classification of
// the error that is being created.
//
// message is the free flow string containing detailed information about the
// error.
//
// origErrs is the error objects which will be nested under the new errors to
// be returned.
func newBaseError(code, message string, origErrs []error) *baseError {
b := &baseError{
code: code,
message: message,
errs: origErrs,
}
return b
}
// Error returns the string representation of the error.
//
// See ErrorWithExtra for formatting.
//
// Satisfies the error interface.
func (b baseError) Error() string {
size := len(b.errs)
if size > 0 {
return SprintError(b.code, b.message, "", errorList(b.errs))
}
return SprintError(b.code, b.message, "", nil)
}
// String returns the string representation of the error.
// Alias for Error to satisfy the stringer interface.
func (b baseError) String() string {
return b.Error()
}
// Code returns the short phrase depicting the classification of the error.
func (b baseError) Code() string {
return b.code
}
// Message returns the error details message.
func (b baseError) Message() string {
return b.message
}
// OrigErr returns the original error if one was set. Nil is returned if no
// error was set. This only returns the first element in the list. If the full
// list is needed, use BatchedErrors.
func (b baseError) OrigErr() error {
switch len(b.errs) {
case 0:
return nil
case 1:
return b.errs[0]
default:
if err, ok := b.errs[0].(Error); ok {
return NewBatchError(err.Code(), err.Message(), b.errs[1:])
}
return NewBatchError("BatchedErrors",
"multiple errors occurred", b.errs)
}
}
// OrigErrs returns the original errors if one was set. An empty slice is
// returned if no error was set.
func (b baseError) OrigErrs() []error {
return b.errs
}
// So that the Error interface type can be included as an anonymous field
// in the requestError struct and not conflict with the error.Error() method.
type awsError Error
// A requestError wraps a request or service error.
//
// Composed of baseError for code, message, and original error.
type requestError struct {
awsError
statusCode int
requestID string
}
// newRequestError returns a wrapped error with additional information for
// request status code, and service requestID.
//
// Should be used to wrap all request which involve service requests. Even if
// the request failed without a service response, but had an HTTP status code
// that may be meaningful.
//
// Also wraps original errors via the baseError.
func newRequestError(err Error, statusCode int, requestID string) *requestError {
return &requestError{
awsError: err,
statusCode: statusCode,
requestID: requestID,
}
}
// Error returns the string representation of the error.
// Satisfies the error interface.
func (r requestError) Error() string {
extra := fmt.Sprintf("status code: %d, request id: %s",
r.statusCode, r.requestID)
return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
}
// String returns the string representation of the error.
// Alias for Error to satisfy the stringer interface.
func (r requestError) String() string {
return r.Error()
}
// StatusCode returns the wrapped status code for the error
func (r requestError) StatusCode() int {
return r.statusCode
}
// RequestID returns the wrapped requestID
func (r requestError) RequestID() string {
return r.requestID
}
// OrigErrs returns the original errors if one was set. An empty slice is
// returned if no error was set.
func (r requestError) OrigErrs() []error {
if b, ok := r.awsError.(BatchedErrors); ok {
return b.OrigErrs()
}
return []error{r.OrigErr()}
}
// An error list that satisfies the golang interface
type errorList []error
// Error returns the string representation of the error.
//
// Satisfies the error interface.
func (e errorList) Error() string {
msg := ""
// How do we want to handle the array size being zero
if size := len(e); size > 0 {
for i := 0; i < size; i++ {
msg += fmt.Sprintf("%s", e[i].Error())
// We check the next index to see if it is within the slice.
// If it is, then we append a newline. We do this, because unit tests
// could be broken with the additional '\n'
if i+1 < size {
msg += "\n"
}
}
}
return msg
}

108
vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go generated vendored Normal file
View File

@ -0,0 +1,108 @@
package awsutil
import (
"io"
"reflect"
"time"
)
// Copy deeply copies a src structure to dst. Useful for copying request and
// response structures.
//
// Can copy between structs of different type, but will only copy fields which
// are assignable, and exist in both structs. Fields which are not assignable,
// or do not exist in both structs are ignored.
func Copy(dst, src interface{}) {
dstval := reflect.ValueOf(dst)
if !dstval.IsValid() {
panic("Copy dst cannot be nil")
}
rcopy(dstval, reflect.ValueOf(src), true)
}
// CopyOf returns a copy of src while also allocating the memory for dst.
// src must be a pointer type or this operation will fail.
func CopyOf(src interface{}) (dst interface{}) {
dsti := reflect.New(reflect.TypeOf(src).Elem())
dst = dsti.Interface()
rcopy(dsti, reflect.ValueOf(src), true)
return
}
// rcopy performs a recursive copy of values from the source to destination.
//
// root is used to skip certain aspects of the copy which are not valid
// for the root node of a object.
func rcopy(dst, src reflect.Value, root bool) {
if !src.IsValid() {
return
}
switch src.Kind() {
case reflect.Ptr:
if _, ok := src.Interface().(io.Reader); ok {
if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
dst.Elem().Set(src)
} else if dst.CanSet() {
dst.Set(src)
}
} else {
e := src.Type().Elem()
if dst.CanSet() && !src.IsNil() {
if _, ok := src.Interface().(*time.Time); !ok {
dst.Set(reflect.New(e))
} else {
tempValue := reflect.New(e)
tempValue.Elem().Set(src.Elem())
// Sets time.Time's unexported values
dst.Set(tempValue)
}
}
if src.Elem().IsValid() {
// Keep the current root state since the depth hasn't changed
rcopy(dst.Elem(), src.Elem(), root)
}
}
case reflect.Struct:
t := dst.Type()
for i := 0; i < t.NumField(); i++ {
name := t.Field(i).Name
srcVal := src.FieldByName(name)
dstVal := dst.FieldByName(name)
if srcVal.IsValid() && dstVal.CanSet() {
rcopy(dstVal, srcVal, false)
}
}
case reflect.Slice:
if src.IsNil() {
break
}
s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
dst.Set(s)
for i := 0; i < src.Len(); i++ {
rcopy(dst.Index(i), src.Index(i), false)
}
case reflect.Map:
if src.IsNil() {
break
}
s := reflect.MakeMap(src.Type())
dst.Set(s)
for _, k := range src.MapKeys() {
v := src.MapIndex(k)
v2 := reflect.New(v.Type()).Elem()
rcopy(v2, v, false)
dst.SetMapIndex(k, v2)
}
default:
// Assign the value if possible. If its not assignable, the value would
// need to be converted and the impact of that may be unexpected, or is
// not compatible with the dst type.
if src.Type().AssignableTo(dst.Type()) {
dst.Set(src)
}
}
}

27
vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
package awsutil
import (
"reflect"
)
// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
// In addition to this, this method will also dereference the input values if
// possible so the DeepEqual performed will not fail if one parameter is a
// pointer and the other is not.
//
// DeepEqual will not perform indirection of nested values of the input parameters.
func DeepEqual(a, b interface{}) bool {
ra := reflect.Indirect(reflect.ValueOf(a))
rb := reflect.Indirect(reflect.ValueOf(b))
if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
// If the elements are both nil, and of the same type the are equal
// If they are of different types they are not equal
return reflect.TypeOf(a) == reflect.TypeOf(b)
} else if raValid != rbValid {
// Both values must be valid to be equal
return false
}
return reflect.DeepEqual(ra.Interface(), rb.Interface())
}

View File

@ -0,0 +1,222 @@
package awsutil
import (
"reflect"
"regexp"
"strconv"
"strings"
"github.com/jmespath/go-jmespath"
)
var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
// rValuesAtPath returns a slice of values found in value v. The values
// in v are explored recursively so all nested values are collected.
func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
pathparts := strings.Split(path, "||")
if len(pathparts) > 1 {
for _, pathpart := range pathparts {
vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
if len(vals) > 0 {
return vals
}
}
return nil
}
values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
components := strings.Split(path, ".")
for len(values) > 0 && len(components) > 0 {
var index *int64
var indexStar bool
c := strings.TrimSpace(components[0])
if c == "" { // no actual component, illegal syntax
return nil
} else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
// TODO normalize case for user
return nil // don't support unexported fields
}
// parse this component
if m := indexRe.FindStringSubmatch(c); m != nil {
c = m[1]
if m[2] == "" {
index = nil
indexStar = true
} else {
i, _ := strconv.ParseInt(m[2], 10, 32)
index = &i
indexStar = false
}
}
nextvals := []reflect.Value{}
for _, value := range values {
// pull component name out of struct member
if value.Kind() != reflect.Struct {
continue
}
if c == "*" { // pull all members
for i := 0; i < value.NumField(); i++ {
if f := reflect.Indirect(value.Field(i)); f.IsValid() {
nextvals = append(nextvals, f)
}
}
continue
}
value = value.FieldByNameFunc(func(name string) bool {
if c == name {
return true
} else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
return true
}
return false
})
if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
if !value.IsNil() {
value.Set(reflect.Zero(value.Type()))
}
return []reflect.Value{value}
}
if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
// TODO if the value is the terminus it should not be created
// if the value to be set to its position is nil.
value.Set(reflect.New(value.Type().Elem()))
value = value.Elem()
} else {
value = reflect.Indirect(value)
}
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
if !createPath && value.IsNil() {
value = reflect.ValueOf(nil)
}
}
if value.IsValid() {
nextvals = append(nextvals, value)
}
}
values = nextvals
if indexStar || index != nil {
nextvals = []reflect.Value{}
for _, valItem := range values {
value := reflect.Indirect(valItem)
if value.Kind() != reflect.Slice {
continue
}
if indexStar { // grab all indices
for i := 0; i < value.Len(); i++ {
idx := reflect.Indirect(value.Index(i))
if idx.IsValid() {
nextvals = append(nextvals, idx)
}
}
continue
}
// pull out index
i := int(*index)
if i >= value.Len() { // check out of bounds
if createPath {
// TODO resize slice
} else {
continue
}
} else if i < 0 { // support negative indexing
i = value.Len() + i
}
value = reflect.Indirect(value.Index(i))
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
if !createPath && value.IsNil() {
value = reflect.ValueOf(nil)
}
}
if value.IsValid() {
nextvals = append(nextvals, value)
}
}
values = nextvals
}
components = components[1:]
}
return values
}
// ValuesAtPath returns a list of values at the case insensitive lexical
// path inside of a structure.
func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
result, err := jmespath.Search(path, i)
if err != nil {
return nil, err
}
v := reflect.ValueOf(result)
if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
return nil, nil
}
if s, ok := result.([]interface{}); ok {
return s, err
}
if v.Kind() == reflect.Map && v.Len() == 0 {
return nil, nil
}
if v.Kind() == reflect.Slice {
out := make([]interface{}, v.Len())
for i := 0; i < v.Len(); i++ {
out[i] = v.Index(i).Interface()
}
return out, nil
}
return []interface{}{result}, nil
}
// SetValueAtPath sets a value at the case insensitive lexical path inside
// of a structure.
func SetValueAtPath(i interface{}, path string, v interface{}) {
if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
for _, rval := range rvals {
if rval.Kind() == reflect.Ptr && rval.IsNil() {
continue
}
setValue(rval, v)
}
}
}
func setValue(dstVal reflect.Value, src interface{}) {
if dstVal.Kind() == reflect.Ptr {
dstVal = reflect.Indirect(dstVal)
}
srcVal := reflect.ValueOf(src)
if !srcVal.IsValid() { // src is literal nil
if dstVal.CanAddr() {
// Convert to pointer so that pointer's value can be nil'ed
// dstVal = dstVal.Addr()
}
dstVal.Set(reflect.Zero(dstVal.Type()))
} else if srcVal.Kind() == reflect.Ptr {
if srcVal.IsNil() {
srcVal = reflect.Zero(dstVal.Type())
} else {
srcVal = reflect.ValueOf(src).Elem()
}
dstVal.Set(srcVal)
} else {
dstVal.Set(srcVal)
}
}

View File

@ -0,0 +1,113 @@
package awsutil
import (
"bytes"
"fmt"
"io"
"reflect"
"strings"
)
// Prettify returns the string representation of a value.
func Prettify(i interface{}) string {
var buf bytes.Buffer
prettify(reflect.ValueOf(i), 0, &buf)
return buf.String()
}
// prettify will recursively walk value v to build a textual
// representation of the value.
func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
for v.Kind() == reflect.Ptr {
v = v.Elem()
}
switch v.Kind() {
case reflect.Struct:
strtype := v.Type().String()
if strtype == "time.Time" {
fmt.Fprintf(buf, "%s", v.Interface())
break
} else if strings.HasPrefix(strtype, "io.") {
buf.WriteString("<buffer>")
break
}
buf.WriteString("{\n")
names := []string{}
for i := 0; i < v.Type().NumField(); i++ {
name := v.Type().Field(i).Name
f := v.Field(i)
if name[0:1] == strings.ToLower(name[0:1]) {
continue // ignore unexported fields
}
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
continue // ignore unset fields
}
names = append(names, name)
}
for i, n := range names {
val := v.FieldByName(n)
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(n + ": ")
prettify(val, indent+2, buf)
if i < len(names)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
case reflect.Slice:
strtype := v.Type().String()
if strtype == "[]uint8" {
fmt.Fprintf(buf, "<binary> len %d", v.Len())
break
}
nl, id, id2 := "", "", ""
if v.Len() > 3 {
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
}
buf.WriteString("[" + nl)
for i := 0; i < v.Len(); i++ {
buf.WriteString(id2)
prettify(v.Index(i), indent+2, buf)
if i < v.Len()-1 {
buf.WriteString("," + nl)
}
}
buf.WriteString(nl + id + "]")
case reflect.Map:
buf.WriteString("{\n")
for i, k := range v.MapKeys() {
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(k.String() + ": ")
prettify(v.MapIndex(k), indent+2, buf)
if i < v.Len()-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
default:
if !v.IsValid() {
fmt.Fprint(buf, "<invalid value>")
return
}
format := "%v"
switch v.Interface().(type) {
case string:
format = "%q"
case io.ReadSeeker, io.Reader:
format = "buffer(%p)"
}
fmt.Fprintf(buf, format, v.Interface())
}
}

View File

@ -0,0 +1,88 @@
package awsutil
import (
"bytes"
"fmt"
"reflect"
"strings"
)
// StringValue returns the string representation of a value.
func StringValue(i interface{}) string {
var buf bytes.Buffer
stringValue(reflect.ValueOf(i), 0, &buf)
return buf.String()
}
func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
for v.Kind() == reflect.Ptr {
v = v.Elem()
}
switch v.Kind() {
case reflect.Struct:
buf.WriteString("{\n")
for i := 0; i < v.Type().NumField(); i++ {
ft := v.Type().Field(i)
fv := v.Field(i)
if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) {
continue // ignore unexported fields
}
if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() {
continue // ignore unset fields
}
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(ft.Name + ": ")
if tag := ft.Tag.Get("sensitive"); tag == "true" {
buf.WriteString("<sensitive>")
} else {
stringValue(fv, indent+2, buf)
}
buf.WriteString(",\n")
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
case reflect.Slice:
nl, id, id2 := "", "", ""
if v.Len() > 3 {
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
}
buf.WriteString("[" + nl)
for i := 0; i < v.Len(); i++ {
buf.WriteString(id2)
stringValue(v.Index(i), indent+2, buf)
if i < v.Len()-1 {
buf.WriteString("," + nl)
}
}
buf.WriteString(nl + id + "]")
case reflect.Map:
buf.WriteString("{\n")
for i, k := range v.MapKeys() {
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(k.String() + ": ")
stringValue(v.MapIndex(k), indent+2, buf)
if i < v.Len()-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
default:
format := "%v"
switch v.Interface().(type) {
case string:
format = "%q"
}
fmt.Fprintf(buf, format, v.Interface())
}
}

96
vendor/github.com/aws/aws-sdk-go/aws/client/client.go generated vendored Normal file
View File

@ -0,0 +1,96 @@
package client
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
)
// A Config provides configuration to a service client instance.
type Config struct {
Config *aws.Config
Handlers request.Handlers
Endpoint string
SigningRegion string
SigningName string
// States that the signing name did not come from a modeled source but
// was derived based on other data. Used by service client constructors
// to determine if the signin name can be overridden based on metadata the
// service has.
SigningNameDerived bool
}
// ConfigProvider provides a generic way for a service client to receive
// the ClientConfig without circular dependencies.
type ConfigProvider interface {
ClientConfig(serviceName string, cfgs ...*aws.Config) Config
}
// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not
// resolve the endpoint automatically. The service client's endpoint must be
// provided via the aws.Config.Endpoint field.
type ConfigNoResolveEndpointProvider interface {
ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config
}
// A Client implements the base client request and response handling
// used by all service clients.
type Client struct {
request.Retryer
metadata.ClientInfo
Config aws.Config
Handlers request.Handlers
}
// New will return a pointer to a new initialized service client.
func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
svc := &Client{
Config: cfg,
ClientInfo: info,
Handlers: handlers.Copy(),
}
switch retryer, ok := cfg.Retryer.(request.Retryer); {
case ok:
svc.Retryer = retryer
case cfg.Retryer != nil && cfg.Logger != nil:
s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
cfg.Logger.Log(s)
fallthrough
default:
maxRetries := aws.IntValue(cfg.MaxRetries)
if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
maxRetries = 3
}
svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
}
svc.AddDebugHandlers()
for _, option := range options {
option(svc)
}
return svc
}
// NewRequest returns a new Request pointer for the service API
// operation and parameters.
func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
}
// AddDebugHandlers injects debug logging handlers into the service to log request
// debug information.
func (c *Client) AddDebugHandlers() {
if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
return
}
c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler)
c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler)
}

View File

@ -0,0 +1,116 @@
package client
import (
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/sdkrand"
)
// DefaultRetryer implements basic retry logic using exponential backoff for
// most services. If you want to implement custom retry logic, implement the
// request.Retryer interface or create a structure type that composes this
// struct and override the specific methods. For example, to override only
// the MaxRetries method:
//
// type retryer struct {
// client.DefaultRetryer
// }
//
// // This implementation always has 100 max retries
// func (d retryer) MaxRetries() int { return 100 }
type DefaultRetryer struct {
NumMaxRetries int
}
// MaxRetries returns the number of maximum returns the service will use to make
// an individual API request.
func (d DefaultRetryer) MaxRetries() int {
return d.NumMaxRetries
}
// RetryRules returns the delay duration before retrying this request again
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
// Set the upper limit of delay in retrying at ~five minutes
minTime := 30
throttle := d.shouldThrottle(r)
if throttle {
if delay, ok := getRetryDelay(r); ok {
return delay
}
minTime = 500
}
retryCount := r.RetryCount
if throttle && retryCount > 8 {
retryCount = 8
} else if retryCount > 13 {
retryCount = 13
}
delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime)
return time.Duration(delay) * time.Millisecond
}
// ShouldRetry returns true if the request should be retried.
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
// If one of the other handlers already set the retry state
// we don't want to override it based on the service's state
if r.Retryable != nil {
return *r.Retryable
}
if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 {
return true
}
return r.IsErrorRetryable() || d.shouldThrottle(r)
}
// ShouldThrottle returns true if the request should be throttled.
func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
switch r.HTTPResponse.StatusCode {
case 429:
case 502:
case 503:
case 504:
default:
return r.IsErrorThrottle()
}
return true
}
// This will look in the Retry-After header, RFC 7231, for how long
// it will wait before attempting another request
func getRetryDelay(r *request.Request) (time.Duration, bool) {
if !canUseRetryAfterHeader(r) {
return 0, false
}
delayStr := r.HTTPResponse.Header.Get("Retry-After")
if len(delayStr) == 0 {
return 0, false
}
delay, err := strconv.Atoi(delayStr)
if err != nil {
return 0, false
}
return time.Duration(delay) * time.Second, true
}
// Will look at the status code to see if the retry header pertains to
// the status code.
func canUseRetryAfterHeader(r *request.Request) bool {
switch r.HTTPResponse.StatusCode {
case 429:
case 503:
default:
return false
}
return true
}

190
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go generated vendored Normal file
View File

@ -0,0 +1,190 @@
package client
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http/httputil"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
)
const logReqMsg = `DEBUG: Request %s/%s Details:
---[ REQUEST POST-SIGN ]-----------------------------
%s
-----------------------------------------------------`
const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
---[ REQUEST DUMP ERROR ]-----------------------------
%s
------------------------------------------------------`
type logWriter struct {
// Logger is what we will use to log the payload of a response.
Logger aws.Logger
// buf stores the contents of what has been read
buf *bytes.Buffer
}
func (logger *logWriter) Write(b []byte) (int, error) {
return logger.buf.Write(b)
}
type teeReaderCloser struct {
// io.Reader will be a tee reader that is used during logging.
// This structure will read from a body and write the contents to a logger.
io.Reader
// Source is used just to close when we are done reading.
Source io.ReadCloser
}
func (reader *teeReaderCloser) Close() error {
return reader.Source.Close()
}
// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent
// to a service. Will include the HTTP request body if the LogLevel of the
// request matches LogDebugWithHTTPBody.
var LogHTTPRequestHandler = request.NamedHandler{
Name: "awssdk.client.LogRequest",
Fn: logRequest,
}
func logRequest(r *request.Request) {
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
bodySeekable := aws.IsReaderSeekable(r.Body)
b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
if err != nil {
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
r.ClientInfo.ServiceName, r.Operation.Name, err))
return
}
if logBody {
if !bodySeekable {
r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
}
// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
// Body as a NoOpCloser and will not be reset after read by the HTTP
// client reader.
r.ResetBody()
}
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
}
// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent
// to a service. Will only log the HTTP request's headers. The request payload
// will not be read.
var LogHTTPRequestHeaderHandler = request.NamedHandler{
Name: "awssdk.client.LogRequestHeader",
Fn: logRequestHeader,
}
func logRequestHeader(r *request.Request) {
b, err := httputil.DumpRequestOut(r.HTTPRequest, false)
if err != nil {
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
r.ClientInfo.ServiceName, r.Operation.Name, err))
return
}
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
}
const logRespMsg = `DEBUG: Response %s/%s Details:
---[ RESPONSE ]--------------------------------------
%s
-----------------------------------------------------`
const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
---[ RESPONSE DUMP ERROR ]-----------------------------
%s
-----------------------------------------------------`
// LogHTTPResponseHandler is a SDK request handler to log the HTTP response
// received from a service. Will include the HTTP response body if the LogLevel
// of the request matches LogDebugWithHTTPBody.
var LogHTTPResponseHandler = request.NamedHandler{
Name: "awssdk.client.LogResponse",
Fn: logResponse,
}
func logResponse(r *request.Request) {
lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
if r.HTTPResponse == nil {
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil"))
return
}
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
if logBody {
r.HTTPResponse.Body = &teeReaderCloser{
Reader: io.TeeReader(r.HTTPResponse.Body, lw),
Source: r.HTTPResponse.Body,
}
}
handlerFn := func(req *request.Request) {
b, err := httputil.DumpResponse(req.HTTPResponse, false)
if err != nil {
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
req.ClientInfo.ServiceName, req.Operation.Name, err))
return
}
lw.Logger.Log(fmt.Sprintf(logRespMsg,
req.ClientInfo.ServiceName, req.Operation.Name, string(b)))
if logBody {
b, err := ioutil.ReadAll(lw.buf)
if err != nil {
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
req.ClientInfo.ServiceName, req.Operation.Name, err))
return
}
lw.Logger.Log(string(b))
}
}
const handlerName = "awsdk.client.LogResponse.ResponseBody"
r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{
Name: handlerName, Fn: handlerFn,
})
r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{
Name: handlerName, Fn: handlerFn,
})
}
// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP
// response received from a service. Will only log the HTTP response's headers.
// The response payload will not be read.
var LogHTTPResponseHeaderHandler = request.NamedHandler{
Name: "awssdk.client.LogResponseHeader",
Fn: logResponseHeader,
}
func logResponseHeader(r *request.Request) {
if r.Config.Logger == nil {
return
}
b, err := httputil.DumpResponse(r.HTTPResponse, false)
if err != nil {
r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg,
r.ClientInfo.ServiceName, r.Operation.Name, err))
return
}
r.Config.Logger.Log(fmt.Sprintf(logRespMsg,
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
}

View File

@ -0,0 +1,13 @@
package metadata
// ClientInfo wraps immutable data from the client.Client structure.
type ClientInfo struct {
ServiceName string
ServiceID string
APIVersion string
Endpoint string
SigningName string
SigningRegion string
JSONVersion string
TargetPrefix string
}

536
vendor/github.com/aws/aws-sdk-go/aws/config.go generated vendored Normal file
View File

@ -0,0 +1,536 @@
package aws
import (
"net/http"
"time"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/endpoints"
)
// UseServiceDefaultRetries instructs the config to use the service's own
// default number of retries. This will be the default action if
// Config.MaxRetries is nil also.
const UseServiceDefaultRetries = -1
// RequestRetryer is an alias for a type that implements the request.Retryer
// interface.
type RequestRetryer interface{}
// A Config provides service configuration for service clients. By default,
// all clients will use the defaults.DefaultConfig structure.
//
// // Create Session with MaxRetry configuration to be shared by multiple
// // service clients.
// sess := session.Must(session.NewSession(&aws.Config{
// MaxRetries: aws.Int(3),
// }))
//
// // Create S3 service client with a specific Region.
// svc := s3.New(sess, &aws.Config{
// Region: aws.String("us-west-2"),
// })
type Config struct {
// Enables verbose error printing of all credential chain errors.
// Should be used when wanting to see all errors while attempting to
// retrieve credentials.
CredentialsChainVerboseErrors *bool
// The credentials object to use when signing requests. Defaults to a
// chain of credential providers to search for credentials in environment
// variables, shared credential file, and EC2 Instance Roles.
Credentials *credentials.Credentials
// An optional endpoint URL (hostname only or fully qualified URI)
// that overrides the default generated endpoint for a client. Set this
// to `""` to use the default generated endpoint.
//
// Note: You must still provide a `Region` value when specifying an
// endpoint for a client.
Endpoint *string
// The resolver to use for looking up endpoints for AWS service clients
// to use based on region.
EndpointResolver endpoints.Resolver
// EnforceShouldRetryCheck is used in the AfterRetryHandler to always call
// ShouldRetry regardless of whether or not if request.Retryable is set.
// This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck
// is not set, then ShouldRetry will only be called if request.Retryable is nil.
// Proper handling of the request.Retryable field is important when setting this field.
EnforceShouldRetryCheck *bool
// The region to send requests to. This parameter is required and must
// be configured globally or on a per-client basis unless otherwise
// noted. A full list of regions is found in the "Regions and Endpoints"
// document.
//
// See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS
// Regions and Endpoints.
Region *string
// Set this to `true` to disable SSL when sending requests. Defaults
// to `false`.
DisableSSL *bool
// The HTTP client to use when sending requests. Defaults to
// `http.DefaultClient`.
HTTPClient *http.Client
// An integer value representing the logging level. The default log level
// is zero (LogOff), which represents no logging. To enable logging set
// to a LogLevel Value.
LogLevel *LogLevelType
// The logger writer interface to write logging messages to. Defaults to
// standard out.
Logger Logger
// The maximum number of times that a request will be retried for failures.
// Defaults to -1, which defers the max retry setting to the service
// specific configuration.
MaxRetries *int
// Retryer guides how HTTP requests should be retried in case of
// recoverable failures.
//
// When nil or the value does not implement the request.Retryer interface,
// the client.DefaultRetryer will be used.
//
// When both Retryer and MaxRetries are non-nil, the former is used and
// the latter ignored.
//
// To set the Retryer field in a type-safe manner and with chaining, use
// the request.WithRetryer helper function:
//
// cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
//
Retryer RequestRetryer
// Disables semantic parameter validation, which validates input for
// missing required fields and/or other semantic request input errors.
DisableParamValidation *bool
// Disables the computation of request and response checksums, e.g.,
// CRC32 checksums in Amazon DynamoDB.
DisableComputeChecksums *bool
// Set this to `true` to force the request to use path-style addressing,
// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
// will use virtual hosted bucket addressing when possible
// (`http://BUCKET.s3.amazonaws.com/KEY`).
//
// Note: This configuration option is specific to the Amazon S3 service.
//
// See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
// for Amazon S3: Virtual Hosting of Buckets
S3ForcePathStyle *bool
// Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
// header to PUT requests over 2MB of content. 100-Continue instructs the
// HTTP client not to send the body until the service responds with a
// `continue` status. This is useful to prevent sending the request body
// until after the request is authenticated, and validated.
//
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
//
// 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
// `ExpectContinueTimeout` for information on adjusting the continue wait
// timeout. https://golang.org/pkg/net/http/#Transport
//
// You should use this flag to disble 100-Continue if you experience issues
// with proxies or third party S3 compatible services.
S3Disable100Continue *bool
// Set this to `true` to enable S3 Accelerate feature. For all operations
// compatible with S3 Accelerate will use the accelerate endpoint for
// requests. Requests not compatible will fall back to normal S3 requests.
//
// The bucket must be enable for accelerate to be used with S3 client with
// accelerate enabled. If the bucket is not enabled for accelerate an error
// will be returned. The bucket name must be DNS compatible to also work
// with accelerate.
S3UseAccelerate *bool
// S3DisableContentMD5Validation config option is temporarily disabled,
// For S3 GetObject API calls, #1837.
//
// Set this to `true` to disable the S3 service client from automatically
// adding the ContentMD5 to S3 Object Put and Upload API calls. This option
// will also disable the SDK from performing object ContentMD5 validation
// on GetObject API calls.
S3DisableContentMD5Validation *bool
// Set this to `true` to disable the EC2Metadata client from overriding the
// default http.Client's Timeout. This is helpful if you do not want the
// EC2Metadata client to create a new http.Client. This options is only
// meaningful if you're not already using a custom HTTP client with the
// SDK. Enabled by default.
//
// Must be set and provided to the session.NewSession() in order to disable
// the EC2Metadata overriding the timeout for default credentials chain.
//
// Example:
// sess := session.Must(session.NewSession(aws.NewConfig()
// .WithEC2MetadataDiableTimeoutOverride(true)))
//
// svc := s3.New(sess)
//
EC2MetadataDisableTimeoutOverride *bool
// Instructs the endpoint to be generated for a service client to
// be the dual stack endpoint. The dual stack endpoint will support
// both IPv4 and IPv6 addressing.
//
// Setting this for a service which does not support dual stack will fail
// to make requets. It is not recommended to set this value on the session
// as it will apply to all service clients created with the session. Even
// services which don't support dual stack endpoints.
//
// If the Endpoint config value is also provided the UseDualStack flag
// will be ignored.
//
// Only supported with.
//
// sess := session.Must(session.NewSession())
//
// svc := s3.New(sess, &aws.Config{
// UseDualStack: aws.Bool(true),
// })
UseDualStack *bool
// SleepDelay is an override for the func the SDK will call when sleeping
// during the lifecycle of a request. Specifically this will be used for
// request delays. This value should only be used for testing. To adjust
// the delay of a request see the aws/client.DefaultRetryer and
// aws/request.Retryer.
//
// SleepDelay will prevent any Context from being used for canceling retry
// delay of an API operation. It is recommended to not use SleepDelay at all
// and specify a Retryer instead.
SleepDelay func(time.Duration)
// DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests.
// Will default to false. This would only be used for empty directory names in s3 requests.
//
// Example:
// sess := session.Must(session.NewSession(&aws.Config{
// DisableRestProtocolURICleaning: aws.Bool(true),
// }))
//
// svc := s3.New(sess)
// out, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String("bucketname"),
// Key: aws.String("//foo//bar//moo"),
// })
DisableRestProtocolURICleaning *bool
// EnableEndpointDiscovery will allow for endpoint discovery on operations that
// have the definition in its model. By default, endpoint discovery is off.
//
// Example:
// sess := session.Must(session.NewSession(&aws.Config{
// EnableEndpointDiscovery: aws.Bool(true),
// }))
//
// svc := s3.New(sess)
// out, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String("bucketname"),
// Key: aws.String("/foo/bar/moo"),
// })
EnableEndpointDiscovery *bool
// DisableEndpointHostPrefix will disable the SDK's behavior of prefixing
// request endpoint hosts with modeled information.
//
// Disabling this feature is useful when you want to use local endpoints
// for testing that do not support the modeled host prefix pattern.
DisableEndpointHostPrefix *bool
}
// NewConfig returns a new Config pointer that can be chained with builder
// methods to set multiple configuration values inline without using pointers.
//
// // Create Session with MaxRetry configuration to be shared by multiple
// // service clients.
// sess := session.Must(session.NewSession(aws.NewConfig().
// WithMaxRetries(3),
// ))
//
// // Create S3 service client with a specific Region.
// svc := s3.New(sess, aws.NewConfig().
// WithRegion("us-west-2"),
// )
func NewConfig() *Config {
return &Config{}
}
// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
// a Config pointer.
func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
c.CredentialsChainVerboseErrors = &verboseErrs
return c
}
// WithCredentials sets a config Credentials value returning a Config pointer
// for chaining.
func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
c.Credentials = creds
return c
}
// WithEndpoint sets a config Endpoint value returning a Config pointer for
// chaining.
func (c *Config) WithEndpoint(endpoint string) *Config {
c.Endpoint = &endpoint
return c
}
// WithEndpointResolver sets a config EndpointResolver value returning a
// Config pointer for chaining.
func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config {
c.EndpointResolver = resolver
return c
}
// WithRegion sets a config Region value returning a Config pointer for
// chaining.
func (c *Config) WithRegion(region string) *Config {
c.Region = &region
return c
}
// WithDisableSSL sets a config DisableSSL value returning a Config pointer
// for chaining.
func (c *Config) WithDisableSSL(disable bool) *Config {
c.DisableSSL = &disable
return c
}
// WithHTTPClient sets a config HTTPClient value returning a Config pointer
// for chaining.
func (c *Config) WithHTTPClient(client *http.Client) *Config {
c.HTTPClient = client
return c
}
// WithMaxRetries sets a config MaxRetries value returning a Config pointer
// for chaining.
func (c *Config) WithMaxRetries(max int) *Config {
c.MaxRetries = &max
return c
}
// WithDisableParamValidation sets a config DisableParamValidation value
// returning a Config pointer for chaining.
func (c *Config) WithDisableParamValidation(disable bool) *Config {
c.DisableParamValidation = &disable
return c
}
// WithDisableComputeChecksums sets a config DisableComputeChecksums value
// returning a Config pointer for chaining.
func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
c.DisableComputeChecksums = &disable
return c
}
// WithLogLevel sets a config LogLevel value returning a Config pointer for
// chaining.
func (c *Config) WithLogLevel(level LogLevelType) *Config {
c.LogLevel = &level
return c
}
// WithLogger sets a config Logger value returning a Config pointer for
// chaining.
func (c *Config) WithLogger(logger Logger) *Config {
c.Logger = logger
return c
}
// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
// pointer for chaining.
func (c *Config) WithS3ForcePathStyle(force bool) *Config {
c.S3ForcePathStyle = &force
return c
}
// WithS3Disable100Continue sets a config S3Disable100Continue value returning
// a Config pointer for chaining.
func (c *Config) WithS3Disable100Continue(disable bool) *Config {
c.S3Disable100Continue = &disable
return c
}
// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
// pointer for chaining.
func (c *Config) WithS3UseAccelerate(enable bool) *Config {
c.S3UseAccelerate = &enable
return c
}
// WithS3DisableContentMD5Validation sets a config
// S3DisableContentMD5Validation value returning a Config pointer for chaining.
func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config {
c.S3DisableContentMD5Validation = &enable
return c
}
// WithUseDualStack sets a config UseDualStack value returning a Config
// pointer for chaining.
func (c *Config) WithUseDualStack(enable bool) *Config {
c.UseDualStack = &enable
return c
}
// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
// returning a Config pointer for chaining.
func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
c.EC2MetadataDisableTimeoutOverride = &enable
return c
}
// WithSleepDelay overrides the function used to sleep while waiting for the
// next retry. Defaults to time.Sleep.
func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
c.SleepDelay = fn
return c
}
// WithEndpointDiscovery will set whether or not to use endpoint discovery.
func (c *Config) WithEndpointDiscovery(t bool) *Config {
c.EnableEndpointDiscovery = &t
return c
}
// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix
// when making requests.
func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config {
c.DisableEndpointHostPrefix = &t
return c
}
// MergeIn merges the passed in configs into the existing config object.
func (c *Config) MergeIn(cfgs ...*Config) {
for _, other := range cfgs {
mergeInConfig(c, other)
}
}
func mergeInConfig(dst *Config, other *Config) {
if other == nil {
return
}
if other.CredentialsChainVerboseErrors != nil {
dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
}
if other.Credentials != nil {
dst.Credentials = other.Credentials
}
if other.Endpoint != nil {
dst.Endpoint = other.Endpoint
}
if other.EndpointResolver != nil {
dst.EndpointResolver = other.EndpointResolver
}
if other.Region != nil {
dst.Region = other.Region
}
if other.DisableSSL != nil {
dst.DisableSSL = other.DisableSSL
}
if other.HTTPClient != nil {
dst.HTTPClient = other.HTTPClient
}
if other.LogLevel != nil {
dst.LogLevel = other.LogLevel
}
if other.Logger != nil {
dst.Logger = other.Logger
}
if other.MaxRetries != nil {
dst.MaxRetries = other.MaxRetries
}
if other.Retryer != nil {
dst.Retryer = other.Retryer
}
if other.DisableParamValidation != nil {
dst.DisableParamValidation = other.DisableParamValidation
}
if other.DisableComputeChecksums != nil {
dst.DisableComputeChecksums = other.DisableComputeChecksums
}
if other.S3ForcePathStyle != nil {
dst.S3ForcePathStyle = other.S3ForcePathStyle
}
if other.S3Disable100Continue != nil {
dst.S3Disable100Continue = other.S3Disable100Continue
}
if other.S3UseAccelerate != nil {
dst.S3UseAccelerate = other.S3UseAccelerate
}
if other.S3DisableContentMD5Validation != nil {
dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation
}
if other.UseDualStack != nil {
dst.UseDualStack = other.UseDualStack
}
if other.EC2MetadataDisableTimeoutOverride != nil {
dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
}
if other.SleepDelay != nil {
dst.SleepDelay = other.SleepDelay
}
if other.DisableRestProtocolURICleaning != nil {
dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning
}
if other.EnforceShouldRetryCheck != nil {
dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
}
if other.EnableEndpointDiscovery != nil {
dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery
}
if other.DisableEndpointHostPrefix != nil {
dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix
}
}
// Copy will return a shallow copy of the Config object. If any additional
// configurations are provided they will be merged into the new config returned.
func (c *Config) Copy(cfgs ...*Config) *Config {
dst := &Config{}
dst.MergeIn(c)
for _, cfg := range cfgs {
dst.MergeIn(cfg)
}
return dst
}

37
vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// +build !go1.9
package aws
import "time"
// Context is an copy of the Go v1.7 stdlib's context.Context interface.
// It is represented as a SDK interface to enable you to use the "WithContext"
// API methods with Go v1.6 and a Context type such as golang.org/x/net/context.
//
// See https://golang.org/pkg/context on how to use contexts.
type Context interface {
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value.
Done() <-chan struct{}
// Err returns a non-nil error value after Done is closed. Err returns
// Canceled if the context was canceled or DeadlineExceeded if the
// context's deadline passed. No other values for Err are defined.
// After Done is closed, successive calls to Err return the same value.
Err() error
// Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with
// the same key returns the same result.
//
// Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to
// functions.
Value(key interface{}) interface{}
}

11
vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// +build go1.9
package aws
import "context"
// Context is an alias of the Go stdlib's context.Context interface.
// It can be used within the SDK's API operation "WithContext" methods.
//
// See https://golang.org/pkg/context on how to use contexts.
type Context = context.Context

View File

@ -0,0 +1,56 @@
// +build !go1.7
package aws
import "time"
// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to
// provide a 1.6 and 1.5 safe version of context that is compatible with Go
// 1.7's Context.
//
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
// struct{}, since vars of this type must have distinct addresses.
type emptyCtx int
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
return
}
func (*emptyCtx) Done() <-chan struct{} {
return nil
}
func (*emptyCtx) Err() error {
return nil
}
func (*emptyCtx) Value(key interface{}) interface{} {
return nil
}
func (e *emptyCtx) String() string {
switch e {
case backgroundCtx:
return "aws.BackgroundContext"
}
return "unknown empty Context"
}
var (
backgroundCtx = new(emptyCtx)
)
// BackgroundContext returns a context that will never be canceled, has no
// values, and no deadline. This context is used by the SDK to provide
// backwards compatibility with non-context API operations and functionality.
//
// Go 1.6 and before:
// This context function is equivalent to context.Background in the Go stdlib.
//
// Go 1.7 and later:
// The context returned will be the value returned by context.Background()
//
// See https://golang.org/pkg/context for more information on Contexts.
func BackgroundContext() Context {
return backgroundCtx
}

View File

@ -0,0 +1,20 @@
// +build go1.7
package aws
import "context"
// BackgroundContext returns a context that will never be canceled, has no
// values, and no deadline. This context is used by the SDK to provide
// backwards compatibility with non-context API operations and functionality.
//
// Go 1.6 and before:
// This context function is equivalent to context.Background in the Go stdlib.
//
// Go 1.7 and later:
// The context returned will be the value returned by context.Background()
//
// See https://golang.org/pkg/context for more information on Contexts.
func BackgroundContext() Context {
return context.Background()
}

24
vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
package aws
import (
"time"
)
// SleepWithContext will wait for the timer duration to expire, or the context
// is canceled. Which ever happens first. If the context is canceled the Context's
// error will be returned.
//
// Expects Context to always return a non-nil error if the Done channel is closed.
func SleepWithContext(ctx Context, dur time.Duration) error {
t := time.NewTimer(dur)
defer t.Stop()
select {
case <-t.C:
break
case <-ctx.Done():
return ctx.Err()
}
return nil
}

387
vendor/github.com/aws/aws-sdk-go/aws/convert_types.go generated vendored Normal file
View File

@ -0,0 +1,387 @@
package aws
import "time"
// String returns a pointer to the string value passed in.
func String(v string) *string {
return &v
}
// StringValue returns the value of the string pointer passed in or
// "" if the pointer is nil.
func StringValue(v *string) string {
if v != nil {
return *v
}
return ""
}
// StringSlice converts a slice of string values into a slice of
// string pointers
func StringSlice(src []string) []*string {
dst := make([]*string, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// StringValueSlice converts a slice of string pointers into a slice of
// string values
func StringValueSlice(src []*string) []string {
dst := make([]string, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// StringMap converts a string map of string values into a string
// map of string pointers
func StringMap(src map[string]string) map[string]*string {
dst := make(map[string]*string)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// StringValueMap converts a string map of string pointers into a string
// map of string values
func StringValueMap(src map[string]*string) map[string]string {
dst := make(map[string]string)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Bool returns a pointer to the bool value passed in.
func Bool(v bool) *bool {
return &v
}
// BoolValue returns the value of the bool pointer passed in or
// false if the pointer is nil.
func BoolValue(v *bool) bool {
if v != nil {
return *v
}
return false
}
// BoolSlice converts a slice of bool values into a slice of
// bool pointers
func BoolSlice(src []bool) []*bool {
dst := make([]*bool, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// BoolValueSlice converts a slice of bool pointers into a slice of
// bool values
func BoolValueSlice(src []*bool) []bool {
dst := make([]bool, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// BoolMap converts a string map of bool values into a string
// map of bool pointers
func BoolMap(src map[string]bool) map[string]*bool {
dst := make(map[string]*bool)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// BoolValueMap converts a string map of bool pointers into a string
// map of bool values
func BoolValueMap(src map[string]*bool) map[string]bool {
dst := make(map[string]bool)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int returns a pointer to the int value passed in.
func Int(v int) *int {
return &v
}
// IntValue returns the value of the int pointer passed in or
// 0 if the pointer is nil.
func IntValue(v *int) int {
if v != nil {
return *v
}
return 0
}
// IntSlice converts a slice of int values into a slice of
// int pointers
func IntSlice(src []int) []*int {
dst := make([]*int, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// IntValueSlice converts a slice of int pointers into a slice of
// int values
func IntValueSlice(src []*int) []int {
dst := make([]int, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// IntMap converts a string map of int values into a string
// map of int pointers
func IntMap(src map[string]int) map[string]*int {
dst := make(map[string]*int)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// IntValueMap converts a string map of int pointers into a string
// map of int values
func IntValueMap(src map[string]*int) map[string]int {
dst := make(map[string]int)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int64 returns a pointer to the int64 value passed in.
func Int64(v int64) *int64 {
return &v
}
// Int64Value returns the value of the int64 pointer passed in or
// 0 if the pointer is nil.
func Int64Value(v *int64) int64 {
if v != nil {
return *v
}
return 0
}
// Int64Slice converts a slice of int64 values into a slice of
// int64 pointers
func Int64Slice(src []int64) []*int64 {
dst := make([]*int64, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Int64ValueSlice converts a slice of int64 pointers into a slice of
// int64 values
func Int64ValueSlice(src []*int64) []int64 {
dst := make([]int64, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Int64Map converts a string map of int64 values into a string
// map of int64 pointers
func Int64Map(src map[string]int64) map[string]*int64 {
dst := make(map[string]*int64)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Int64ValueMap converts a string map of int64 pointers into a string
// map of int64 values
func Int64ValueMap(src map[string]*int64) map[string]int64 {
dst := make(map[string]int64)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Float64 returns a pointer to the float64 value passed in.
func Float64(v float64) *float64 {
return &v
}
// Float64Value returns the value of the float64 pointer passed in or
// 0 if the pointer is nil.
func Float64Value(v *float64) float64 {
if v != nil {
return *v
}
return 0
}
// Float64Slice converts a slice of float64 values into a slice of
// float64 pointers
func Float64Slice(src []float64) []*float64 {
dst := make([]*float64, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Float64ValueSlice converts a slice of float64 pointers into a slice of
// float64 values
func Float64ValueSlice(src []*float64) []float64 {
dst := make([]float64, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Float64Map converts a string map of float64 values into a string
// map of float64 pointers
func Float64Map(src map[string]float64) map[string]*float64 {
dst := make(map[string]*float64)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Float64ValueMap converts a string map of float64 pointers into a string
// map of float64 values
func Float64ValueMap(src map[string]*float64) map[string]float64 {
dst := make(map[string]float64)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Time returns a pointer to the time.Time value passed in.
func Time(v time.Time) *time.Time {
return &v
}
// TimeValue returns the value of the time.Time pointer passed in or
// time.Time{} if the pointer is nil.
func TimeValue(v *time.Time) time.Time {
if v != nil {
return *v
}
return time.Time{}
}
// SecondsTimeValue converts an int64 pointer to a time.Time value
// representing seconds since Epoch or time.Time{} if the pointer is nil.
func SecondsTimeValue(v *int64) time.Time {
if v != nil {
return time.Unix((*v / 1000), 0)
}
return time.Time{}
}
// MillisecondsTimeValue converts an int64 pointer to a time.Time value
// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil.
func MillisecondsTimeValue(v *int64) time.Time {
if v != nil {
return time.Unix(0, (*v * 1000000))
}
return time.Time{}
}
// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
// The result is undefined if the Unix time cannot be represented by an int64.
// Which includes calling TimeUnixMilli on a zero Time is undefined.
//
// This utility is useful for service API's such as CloudWatch Logs which require
// their unix time values to be in milliseconds.
//
// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
func TimeUnixMilli(t time.Time) int64 {
return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
}
// TimeSlice converts a slice of time.Time values into a slice of
// time.Time pointers
func TimeSlice(src []time.Time) []*time.Time {
dst := make([]*time.Time, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// TimeValueSlice converts a slice of time.Time pointers into a slice of
// time.Time values
func TimeValueSlice(src []*time.Time) []time.Time {
dst := make([]time.Time, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// TimeMap converts a string map of time.Time values into a string
// map of time.Time pointers
func TimeMap(src map[string]time.Time) map[string]*time.Time {
dst := make(map[string]*time.Time)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// TimeValueMap converts a string map of time.Time pointers into a string
// map of time.Time values
func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
dst := make(map[string]time.Time)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}

View File

@ -0,0 +1,228 @@
package corehandlers
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
)
// Interface for matching types which also have a Len method.
type lener interface {
Len() int
}
// BuildContentLengthHandler builds the content length of a request based on the body,
// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
// to determine request body length and no "Content-Length" was specified it will panic.
//
// The Content-Length will only be added to the request if the length of the body
// is greater than 0. If the body is empty or the current `Content-Length`
// header is <= 0, the header will also be stripped.
var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
var length int64
if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
length, _ = strconv.ParseInt(slength, 10, 64)
} else {
if r.Body != nil {
var err error
length, err = aws.SeekerLen(r.Body)
if err != nil {
r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err)
return
}
}
}
if length > 0 {
r.HTTPRequest.ContentLength = length
r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
} else {
r.HTTPRequest.ContentLength = 0
r.HTTPRequest.Header.Del("Content-Length")
}
}}
var reStatusCode = regexp.MustCompile(`^(\d{3})`)
// ValidateReqSigHandler is a request handler to ensure that the request's
// signature doesn't expire before it is sent. This can happen when a request
// is built and signed significantly before it is sent. Or significant delays
// occur when retrying requests that would cause the signature to expire.
var ValidateReqSigHandler = request.NamedHandler{
Name: "core.ValidateReqSigHandler",
Fn: func(r *request.Request) {
// Unsigned requests are not signed
if r.Config.Credentials == credentials.AnonymousCredentials {
return
}
signedTime := r.Time
if !r.LastSignedAt.IsZero() {
signedTime = r.LastSignedAt
}
// 5 minutes to allow for some clock skew/delays in transmission.
// Would be improved with aws/aws-sdk-go#423
if signedTime.Add(5 * time.Minute).After(time.Now()) {
return
}
fmt.Println("request expired, resigning")
r.Sign()
},
}
// SendHandler is a request handler to send service request using HTTP client.
var SendHandler = request.NamedHandler{
Name: "core.SendHandler",
Fn: func(r *request.Request) {
sender := sendFollowRedirects
if r.DisableFollowRedirects {
sender = sendWithoutFollowRedirects
}
if request.NoBody == r.HTTPRequest.Body {
// Strip off the request body if the NoBody reader was used as a
// place holder for a request body. This prevents the SDK from
// making requests with a request body when it would be invalid
// to do so.
//
// Use a shallow copy of the http.Request to ensure the race condition
// of transport on Body will not trigger
reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest
reqCopy.Body = nil
r.HTTPRequest = &reqCopy
defer func() {
r.HTTPRequest = reqOrig
}()
}
var err error
r.HTTPResponse, err = sender(r)
if err != nil {
handleSendError(r, err)
}
},
}
func sendFollowRedirects(r *request.Request) (*http.Response, error) {
return r.Config.HTTPClient.Do(r.HTTPRequest)
}
func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) {
transport := r.Config.HTTPClient.Transport
if transport == nil {
transport = http.DefaultTransport
}
return transport.RoundTrip(r.HTTPRequest)
}
func handleSendError(r *request.Request, err error) {
// Prevent leaking if an HTTPResponse was returned. Clean up
// the body.
if r.HTTPResponse != nil {
r.HTTPResponse.Body.Close()
}
// Capture the case where url.Error is returned for error processing
// response. e.g. 301 without location header comes back as string
// error and r.HTTPResponse is nil. Other URL redirect errors will
// comeback in a similar method.
if e, ok := err.(*url.Error); ok && e.Err != nil {
if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
code, _ := strconv.ParseInt(s[1], 10, 64)
r.HTTPResponse = &http.Response{
StatusCode: int(code),
Status: http.StatusText(int(code)),
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}
return
}
}
if r.HTTPResponse == nil {
// Add a dummy request response object to ensure the HTTPResponse
// value is consistent.
r.HTTPResponse = &http.Response{
StatusCode: int(0),
Status: http.StatusText(int(0)),
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}
}
// Catch all other request errors.
r.Error = awserr.New("RequestError", "send request failed", err)
r.Retryable = aws.Bool(true) // network errors are retryable
// Override the error with a context canceled error, if that was canceled.
ctx := r.Context()
select {
case <-ctx.Done():
r.Error = awserr.New(request.CanceledErrorCode,
"request context canceled", ctx.Err())
r.Retryable = aws.Bool(false)
default:
}
}
// ValidateResponseHandler is a request handler to validate service response.
var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
// this may be replaced by an UnmarshalError handler
r.Error = awserr.New("UnknownError", "unknown error", nil)
}
}}
// AfterRetryHandler performs final checks to determine if the request should
// be retried and how long to delay.
var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
// If one of the other handlers already set the retry state
// we don't want to override it based on the service's state
if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
r.Retryable = aws.Bool(r.ShouldRetry(r))
}
if r.WillRetry() {
r.RetryDelay = r.RetryRules(r)
if sleepFn := r.Config.SleepDelay; sleepFn != nil {
// Support SleepDelay for backwards compatibility and testing
sleepFn(r.RetryDelay)
} else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
r.Error = awserr.New(request.CanceledErrorCode,
"request context canceled", err)
r.Retryable = aws.Bool(false)
return
}
// when the expired token exception occurs the credentials
// need to be expired locally so that the next request to
// get credentials will trigger a credentials refresh.
if r.IsErrorExpired() {
r.Config.Credentials.Expire()
}
r.RetryCount++
r.Error = nil
}
}}
// ValidateEndpointHandler is a request handler to validate a request had the
// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
// region is not valid.
var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
r.Error = aws.ErrMissingRegion
} else if r.ClientInfo.Endpoint == "" {
r.Error = aws.ErrMissingEndpoint
}
}}

View File

@ -0,0 +1,17 @@
package corehandlers
import "github.com/aws/aws-sdk-go/aws/request"
// ValidateParametersHandler is a request handler to validate the input parameters.
// Validating parameters only has meaning if done prior to the request being sent.
var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
if !r.ParamsFilled() {
return
}
if v, ok := r.Params.(request.Validator); ok {
if err := v.Validate(); err != nil {
r.Error = err
}
}
}}

View File

@ -0,0 +1,37 @@
package corehandlers
import (
"os"
"runtime"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
)
// SDKVersionUserAgentHandler is a request handler for adding the SDK Version
// to the user agent.
var SDKVersionUserAgentHandler = request.NamedHandler{
Name: "core.SDKVersionUserAgentHandler",
Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
runtime.Version(), runtime.GOOS, runtime.GOARCH),
}
const execEnvVar = `AWS_EXECUTION_ENV`
const execEnvUAKey = `exec-env`
// AddHostExecEnvUserAgentHander is a request handler appending the SDK's
// execution environment to the user agent.
//
// If the environment variable AWS_EXECUTION_ENV is set, its value will be
// appended to the user agent string.
var AddHostExecEnvUserAgentHander = request.NamedHandler{
Name: "core.AddHostExecEnvUserAgentHander",
Fn: func(r *request.Request) {
v := os.Getenv(execEnvVar)
if len(v) == 0 {
return
}
request.AddToUserAgent(r, execEnvUAKey+"/"+v)
},
}

View File

@ -0,0 +1,100 @@
package credentials
import (
"github.com/aws/aws-sdk-go/aws/awserr"
)
var (
// ErrNoValidProvidersFoundInChain Is returned when there are no valid
// providers in the ChainProvider.
//
// This has been deprecated. For verbose error messaging set
// aws.Config.CredentialsChainVerboseErrors to true.
ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
`no valid providers in chain. Deprecated.
For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
nil)
)
// A ChainProvider will search for a provider which returns credentials
// and cache that provider until Retrieve is called again.
//
// The ChainProvider provides a way of chaining multiple providers together
// which will pick the first available using priority order of the Providers
// in the list.
//
// If none of the Providers retrieve valid credentials Value, ChainProvider's
// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
//
// If a Provider is found which returns valid credentials Value ChainProvider
// will cache that Provider for all calls to IsExpired(), until Retrieve is
// called again.
//
// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
// In this example EnvProvider will first check if any credentials are available
// via the environment variables. If there are none ChainProvider will check
// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
// does not return any credentials ChainProvider will return the error
// ErrNoValidProvidersFoundInChain
//
// creds := credentials.NewChainCredentials(
// []credentials.Provider{
// &credentials.EnvProvider{},
// &ec2rolecreds.EC2RoleProvider{
// Client: ec2metadata.New(sess),
// },
// })
//
// // Usage of ChainCredentials with aws.Config
// svc := ec2.New(session.Must(session.NewSession(&aws.Config{
// Credentials: creds,
// })))
//
type ChainProvider struct {
Providers []Provider
curr Provider
VerboseErrors bool
}
// NewChainCredentials returns a pointer to a new Credentials object
// wrapping a chain of providers.
func NewChainCredentials(providers []Provider) *Credentials {
return NewCredentials(&ChainProvider{
Providers: append([]Provider{}, providers...),
})
}
// Retrieve returns the credentials value or error if no provider returned
// without error.
//
// If a provider is found it will be cached and any calls to IsExpired()
// will return the expired state of the cached provider.
func (c *ChainProvider) Retrieve() (Value, error) {
var errs []error
for _, p := range c.Providers {
creds, err := p.Retrieve()
if err == nil {
c.curr = p
return creds, nil
}
errs = append(errs, err)
}
c.curr = nil
var err error
err = ErrNoValidProvidersFoundInChain
if c.VerboseErrors {
err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
}
return Value{}, err
}
// IsExpired will returned the expired state of the currently cached provider
// if there is one. If there is no current provider, true will be returned.
func (c *ChainProvider) IsExpired() bool {
if c.curr != nil {
return c.curr.IsExpired()
}
return true
}

View File

@ -0,0 +1,292 @@
// Package credentials provides credential retrieval and management
//
// The Credentials is the primary method of getting access to and managing
// credentials Values. Using dependency injection retrieval of the credential
// values is handled by a object which satisfies the Provider interface.
//
// By default the Credentials.Get() will cache the successful result of a
// Provider's Retrieve() until Provider.IsExpired() returns true. At which
// point Credentials will call Provider's Retrieve() to get new credential Value.
//
// The Provider is responsible for determining when credentials Value have expired.
// It is also important to note that Credentials will always call Retrieve the
// first time Credentials.Get() is called.
//
// Example of using the environment variable credentials.
//
// creds := credentials.NewEnvCredentials()
//
// // Retrieve the credentials value
// credValue, err := creds.Get()
// if err != nil {
// // handle error
// }
//
// Example of forcing credentials to expire and be refreshed on the next Get().
// This may be helpful to proactively expire credentials and refresh them sooner
// than they would naturally expire on their own.
//
// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{})
// creds.Expire()
// credsValue, err := creds.Get()
// // New credentials will be retrieved instead of from cache.
//
//
// Custom Provider
//
// Each Provider built into this package also provides a helper method to generate
// a Credentials pointer setup with the provider. To use a custom Provider just
// create a type which satisfies the Provider interface and pass it to the
// NewCredentials method.
//
// type MyProvider struct{}
// func (m *MyProvider) Retrieve() (Value, error) {...}
// func (m *MyProvider) IsExpired() bool {...}
//
// creds := credentials.NewCredentials(&MyProvider{})
// credValue, err := creds.Get()
//
package credentials
import (
"fmt"
"github.com/aws/aws-sdk-go/aws/awserr"
"sync"
"time"
)
// AnonymousCredentials is an empty Credential object that can be used as
// dummy placeholder credentials for requests that do not need signed.
//
// This Credentials can be used to configure a service to not sign requests
// when making service API calls. For example, when accessing public
// s3 buckets.
//
// svc := s3.New(session.Must(session.NewSession(&aws.Config{
// Credentials: credentials.AnonymousCredentials,
// })))
// // Access public S3 buckets.
var AnonymousCredentials = NewStaticCredentials("", "", "")
// A Value is the AWS credentials value for individual credential fields.
type Value struct {
// AWS Access key ID
AccessKeyID string
// AWS Secret Access Key
SecretAccessKey string
// AWS Session Token
SessionToken string
// Provider used to get credentials
ProviderName string
}
// A Provider is the interface for any component which will provide credentials
// Value. A provider is required to manage its own Expired state, and what to
// be expired means.
//
// The Provider should not need to implement its own mutexes, because
// that will be managed by Credentials.
type Provider interface {
// Retrieve returns nil if it successfully retrieved the value.
// Error is returned if the value were not obtainable, or empty.
Retrieve() (Value, error)
// IsExpired returns if the credentials are no longer valid, and need
// to be retrieved.
IsExpired() bool
}
// An Expirer is an interface that Providers can implement to expose the expiration
// time, if known. If the Provider cannot accurately provide this info,
// it should not implement this interface.
type Expirer interface {
// The time at which the credentials are no longer valid
ExpiresAt() time.Time
}
// An ErrorProvider is a stub credentials provider that always returns an error
// this is used by the SDK when construction a known provider is not possible
// due to an error.
type ErrorProvider struct {
// The error to be returned from Retrieve
Err error
// The provider name to set on the Retrieved returned Value
ProviderName string
}
// Retrieve will always return the error that the ErrorProvider was created with.
func (p ErrorProvider) Retrieve() (Value, error) {
return Value{ProviderName: p.ProviderName}, p.Err
}
// IsExpired will always return not expired.
func (p ErrorProvider) IsExpired() bool {
return false
}
// A Expiry provides shared expiration logic to be used by credentials
// providers to implement expiry functionality.
//
// The best method to use this struct is as an anonymous field within the
// provider's struct.
//
// Example:
// type EC2RoleProvider struct {
// Expiry
// ...
// }
type Expiry struct {
// The date/time when to expire on
expiration time.Time
// If set will be used by IsExpired to determine the current time.
// Defaults to time.Now if CurrentTime is not set. Available for testing
// to be able to mock out the current time.
CurrentTime func() time.Time
}
// SetExpiration sets the expiration IsExpired will check when called.
//
// If window is greater than 0 the expiration time will be reduced by the
// window value.
//
// Using a window is helpful to trigger credentials to expire sooner than
// the expiration time given to ensure no requests are made with expired
// tokens.
func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
e.expiration = expiration
if window > 0 {
e.expiration = e.expiration.Add(-window)
}
}
// IsExpired returns if the credentials are expired.
func (e *Expiry) IsExpired() bool {
curTime := e.CurrentTime
if curTime == nil {
curTime = time.Now
}
return e.expiration.Before(curTime())
}
// ExpiresAt returns the expiration time of the credential
func (e *Expiry) ExpiresAt() time.Time {
return e.expiration
}
// A Credentials provides concurrency safe retrieval of AWS credentials Value.
// Credentials will cache the credentials value until they expire. Once the value
// expires the next Get will attempt to retrieve valid credentials.
//
// Credentials is safe to use across multiple goroutines and will manage the
// synchronous state so the Providers do not need to implement their own
// synchronization.
//
// The first Credentials.Get() will always call Provider.Retrieve() to get the
// first instance of the credentials Value. All calls to Get() after that
// will return the cached credentials Value until IsExpired() returns true.
type Credentials struct {
creds Value
forceRefresh bool
m sync.RWMutex
provider Provider
}
// NewCredentials returns a pointer to a new Credentials with the provider set.
func NewCredentials(provider Provider) *Credentials {
return &Credentials{
provider: provider,
forceRefresh: true,
}
}
// Get returns the credentials value, or error if the credentials Value failed
// to be retrieved.
//
// Will return the cached credentials Value if it has not expired. If the
// credentials Value has expired the Provider's Retrieve() will be called
// to refresh the credentials.
//
// If Credentials.Expire() was called the credentials Value will be force
// expired, and the next call to Get() will cause them to be refreshed.
func (c *Credentials) Get() (Value, error) {
// Check the cached credentials first with just the read lock.
c.m.RLock()
if !c.isExpired() {
creds := c.creds
c.m.RUnlock()
return creds, nil
}
c.m.RUnlock()
// Credentials are expired need to retrieve the credentials taking the full
// lock.
c.m.Lock()
defer c.m.Unlock()
if c.isExpired() {
creds, err := c.provider.Retrieve()
if err != nil {
return Value{}, err
}
c.creds = creds
c.forceRefresh = false
}
return c.creds, nil
}
// Expire expires the credentials and forces them to be retrieved on the
// next call to Get().
//
// This will override the Provider's expired state, and force Credentials
// to call the Provider's Retrieve().
func (c *Credentials) Expire() {
c.m.Lock()
defer c.m.Unlock()
c.forceRefresh = true
}
// IsExpired returns if the credentials are no longer valid, and need
// to be retrieved.
//
// If the Credentials were forced to be expired with Expire() this will
// reflect that override.
func (c *Credentials) IsExpired() bool {
c.m.RLock()
defer c.m.RUnlock()
return c.isExpired()
}
// isExpired helper method wrapping the definition of expired credentials.
func (c *Credentials) isExpired() bool {
return c.forceRefresh || c.provider.IsExpired()
}
// ExpiresAt provides access to the functionality of the Expirer interface of
// the underlying Provider, if it supports that interface. Otherwise, it returns
// an error.
func (c *Credentials) ExpiresAt() (time.Time, error) {
c.m.RLock()
defer c.m.RUnlock()
expirer, ok := c.provider.(Expirer)
if !ok {
return time.Time{}, awserr.New("ProviderNotExpirer",
fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName),
nil)
}
if c.forceRefresh {
// set expiration time to the distant past
return time.Time{}, nil
}
return expirer.ExpiresAt(), nil
}

View File

@ -0,0 +1,178 @@
package ec2rolecreds
import (
"bufio"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/internal/sdkuri"
)
// ProviderName provides a name of EC2Role provider
const ProviderName = "EC2RoleProvider"
// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
// those credentials are expired.
//
// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
// or ExpiryWindow
//
// p := &ec2rolecreds.EC2RoleProvider{
// // Pass in a custom timeout to be used when requesting
// // IAM EC2 Role credentials.
// Client: ec2metadata.New(sess, aws.Config{
// HTTPClient: &http.Client{Timeout: 10 * time.Second},
// }),
//
// // Do not use early expiry of credentials. If a non zero value is
// // specified the credentials will be expired early
// ExpiryWindow: 0,
// }
type EC2RoleProvider struct {
credentials.Expiry
// Required EC2Metadata client to use when connecting to EC2 metadata service.
Client *ec2metadata.EC2Metadata
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
// with expiring credentials do not cause request to fail unexpectedly
// due to ExpiredTokenException exceptions.
//
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
// 10 seconds before the credentials are actually expired.
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
}
// NewCredentials returns a pointer to a new Credentials object wrapping
// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
// The ConfigProvider is satisfied by the session.Session type.
func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
p := &EC2RoleProvider{
Client: ec2metadata.New(c),
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
// metadata service.
func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
p := &EC2RoleProvider{
Client: client,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// Retrieve retrieves credentials from the EC2 service.
// Error will be returned if the request fails, or unable to extract
// the desired credentials.
func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
credsList, err := requestCredList(m.Client)
if err != nil {
return credentials.Value{ProviderName: ProviderName}, err
}
if len(credsList) == 0 {
return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
}
credsName := credsList[0]
roleCreds, err := requestCred(m.Client, credsName)
if err != nil {
return credentials.Value{ProviderName: ProviderName}, err
}
m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
return credentials.Value{
AccessKeyID: roleCreds.AccessKeyID,
SecretAccessKey: roleCreds.SecretAccessKey,
SessionToken: roleCreds.Token,
ProviderName: ProviderName,
}, nil
}
// A ec2RoleCredRespBody provides the shape for unmarshaling credential
// request responses.
type ec2RoleCredRespBody struct {
// Success State
Expiration time.Time
AccessKeyID string
SecretAccessKey string
Token string
// Error state
Code string
Message string
}
const iamSecurityCredsPath = "iam/security-credentials/"
// requestCredList requests a list of credentials from the EC2 service.
// If there are no credentials, or there is an error making or receiving the request
func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
resp, err := client.GetMetadata(iamSecurityCredsPath)
if err != nil {
return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
}
credsList := []string{}
s := bufio.NewScanner(strings.NewReader(resp))
for s.Scan() {
credsList = append(credsList, s.Text())
}
if err := s.Err(); err != nil {
return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
}
return credsList, nil
}
// requestCred requests the credentials for a specific credentials from the EC2 service.
//
// If the credentials cannot be found, or there is an error reading the response
// and error will be returned.
func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName))
if err != nil {
return ec2RoleCredRespBody{},
awserr.New("EC2RoleRequestError",
fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
err)
}
respCreds := ec2RoleCredRespBody{}
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
return ec2RoleCredRespBody{},
awserr.New("SerializationError",
fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
err)
}
if respCreds.Code != "Success" {
// If an error code was returned something failed requesting the role.
return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
}
return respCreds, nil
}

Some files were not shown because too many files have changed in this diff Show More