Update vendoring to use golang/dep

commit a1a37d335a8e89ac89d85c00c8585d3fc02e064a
Author: Josh Baker <joshbaker77@gmail.com>
Date:   Thu Oct 5 07:36:54 2017 -0700

    use symlink instead of copy

commit 96399c2c92620f633611c778e5473200bfd48d41
Author: Josh Baker <joshbaker77@gmail.com>
Date:   Thu Oct 5 07:19:26 2017 -0700

    use dep for vendoring
This commit is contained in:
Josh Baker 2017-10-05 07:40:19 -07:00
parent d4d51a8191
commit 26d0083faf
2590 changed files with 817759 additions and 18203 deletions

207
Gopkg.lock generated Normal file
View File

@ -0,0 +1,207 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "github.com/Shopify/sarama"
packages = ["."]
revision = "bbdbe644099b7fdc8327d5cc69c030945188b2e9"
version = "v1.13.0"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
name = "github.com/eapache/go-resiliency"
packages = ["breaker"]
revision = "6800482f2c813e689c88b7ed3282262385011890"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/eapache/go-xerial-snappy"
packages = ["."]
revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c"
[[projects]]
name = "github.com/eapache/queue"
packages = ["."]
revision = "ded5959c0d4e360646dc9e9908cff48666781367"
version = "v1.0.2"
[[projects]]
name = "github.com/eclipse/paho.mqtt.golang"
packages = [".","packets"]
revision = "aff15770515e3c57fc6109da73d42b0d46f7f483"
version = "v1.1.0"
[[projects]]
name = "github.com/garyburd/redigo"
packages = ["internal","redis"]
revision = "433969511232c397de61b1442f9fd49ec06ae9ba"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/golang/protobuf"
packages = ["proto","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
revision = "130e6b02ab059e7b717a096f397c5b60111cae74"
[[projects]]
branch = "master"
name = "github.com/golang/snappy"
packages = ["."]
revision = "553a641470496b2327abcac10b36396bd98e45c9"
[[projects]]
branch = "master"
name = "github.com/peterh/liner"
packages = ["."]
revision = "a37ad39843113264dae84a5d89fcee28f50b35c6"
[[projects]]
name = "github.com/pierrec/lz4"
packages = ["."]
revision = "08c27939df1bd95e881e2c2367a749964ad1fceb"
version = "v1.0.1"
[[projects]]
name = "github.com/pierrec/xxHash"
packages = ["xxHash32"]
revision = "f051bb7f1d1aaf1b5a665d74fb6b0217712c69f7"
version = "v0.1.1"
[[projects]]
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/rcrowley/go-metrics"
packages = ["."]
revision = "1f30fe9094a513ce4c700b9a54458bbb0c96996c"
[[projects]]
branch = "master"
name = "github.com/streadway/amqp"
packages = ["."]
revision = "cefed15a0bd808d13947f228770a81b06ebe8e45"
[[projects]]
name = "github.com/stretchr/testify"
packages = ["assert"]
revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
version = "v1.1.4"
[[projects]]
branch = "master"
name = "github.com/tidwall/btree"
packages = ["."]
revision = "9876f1454cf0993a53d74c27196993e345f50dd1"
[[projects]]
branch = "master"
name = "github.com/tidwall/buntdb"
packages = ["."]
revision = "b67b1b8c1658cb01502801c14e33c61e6c4cbb95"
[[projects]]
name = "github.com/tidwall/gjson"
packages = ["."]
revision = "5a69e67cfd8f6f9b0044ed49f5079d0eeed28653"
version = "v1.0.1"
[[projects]]
branch = "master"
name = "github.com/tidwall/grect"
packages = ["."]
revision = "ba9a043346eba55344e40d66a5e74cfda3a9d293"
[[projects]]
branch = "master"
name = "github.com/tidwall/match"
packages = ["."]
revision = "1731857f09b1f38450e2c12409748407822dc6be"
[[projects]]
branch = "master"
name = "github.com/tidwall/redbench"
packages = ["."]
revision = "637a608ebec1acbf049c2e4a5eda6c2d72aa3af1"
[[projects]]
branch = "master"
name = "github.com/tidwall/redcon"
packages = ["."]
revision = "3df12143a4fe57c9f0d7f0f37e29ad95bc37f9a7"
[[projects]]
branch = "master"
name = "github.com/tidwall/resp"
packages = ["."]
revision = "b2b1a7ca20e34ad839fdb81f78e67522c99959f0"
[[projects]]
branch = "master"
name = "github.com/tidwall/rtree"
packages = ["."]
revision = "d4a8a3d30d5729f85edfba1745241f3a621d0359"
[[projects]]
name = "github.com/tidwall/sjson"
packages = ["."]
revision = "6a22caf2fd45d5e2119bfc3717e984f15a7eb7ee"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/tidwall/tinyqueue"
packages = ["."]
revision = "1feaf062ef04a231c9126f99a68eaa579fd0e390"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
revision = "9419663f5a44be8b34ca85f08abc5fe1be11f8a3"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["context","http2","http2/hpack","idna","internal/timeseries","lex/httplex","proxy","trace","websocket"]
revision = "a04bdaca5b32abe1c069418fb7088ae607de5bd0"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix","windows"]
revision = "314a259e304ff91bd6985da2a7149bbf91237993"
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
revision = "d82c1812e304abfeeabd31e995a115a2855bf642"
[[projects]]
branch = "master"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
revision = "f676e0f3ac6395ff1a529ae59a6670878a8371a6"
[[projects]]
name = "google.golang.org/grpc"
packages = [".","codes","connectivity","credentials","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","stats","status","tap","transport"]
revision = "f92cdcd7dcdc69e81b2d7b338479a19a8723cfa3"
version = "v1.6.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "03a37ac805762eaebe7dbf8d7b1a079894ff6b0f67f4d2f38d3b9348dd5af40f"
solver-name = "gps-cdcl"
solver-version = 1

94
Gopkg.toml Normal file
View File

@ -0,0 +1,94 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
[[constraint]]
name = "github.com/Shopify/sarama"
version = "1.13.0"
[[constraint]]
name = "github.com/eclipse/paho.mqtt.golang"
version = "1.1.0"
[[constraint]]
name = "github.com/garyburd/redigo"
version = "1.1.0"
[[constraint]]
branch = "master"
name = "github.com/golang/protobuf"
[[constraint]]
branch = "master"
name = "github.com/peterh/liner"
[[constraint]]
branch = "master"
name = "github.com/streadway/amqp"
[[constraint]]
name = "github.com/stretchr/testify"
version = "1.1.4"
[[constraint]]
branch = "master"
name = "github.com/tidwall/btree"
[[constraint]]
branch = "master"
name = "github.com/tidwall/buntdb"
[[constraint]]
name = "github.com/tidwall/gjson"
version = "1.0.1"
[[constraint]]
branch = "master"
name = "github.com/tidwall/redbench"
[[constraint]]
branch = "master"
name = "github.com/tidwall/redcon"
[[constraint]]
branch = "master"
name = "github.com/tidwall/resp"
[[constraint]]
name = "github.com/tidwall/sjson"
version = "1.0.0"
[[constraint]]
branch = "master"
name = "github.com/tidwall/tinyqueue"
[[constraint]]
branch = "master"
name = "golang.org/x/crypto"
[[constraint]]
branch = "master"
name = "golang.org/x/net"
[[constraint]]
name = "google.golang.org/grpc"
version = "1.6.0"

View File

@ -113,23 +113,6 @@ if [ "$1" == "package" ]; then
exit
fi
if [ "$1" == "vendor" ]; then
pkg="$2"
if [ "$pkg" == "" ]; then
echo "no package specified"
exit
fi
if [ ! -d "$GOPATH/src/$pkg" ]; then
echo "invalid package"
exit
fi
rm -rf vendor/$pkg/
mkdir -p vendor/$pkg/
cp -rf $GOPATH/src/$pkg/* vendor/$pkg/
rm -rf vendor/$pkg/.git
exit
fi
# temp directory for storing isolated environment.
TMP="$(mktemp -d -t tile38.XXXX)"
function rmtemp {
@ -137,18 +120,12 @@ function rmtemp {
}
trap rmtemp EXIT
if [ "$NOCOPY" != "1" ]; then
# copy all files to an isloated directory.
WD="$TMP/src/github.com/tidwall/tile38"
export GOPATH="$TMP"
for file in `find . -type f`; do
# TODO: use .gitignore to ignore, or possibly just use git to determine the file list.
if [[ "$file" != "." && "$file" != ./.git* && "$file" != ./data* && "$file" != ./tile38-* ]]; then
mkdir -p "$WD/$(dirname "${file}")"
cp -P "$file" "$WD/$(dirname "${file}")"
fi
done
cd $WD
if [ "$NOLINK" != "1" ]; then
# symlink root to isolated directory
mkdir -p "$TMP/go/src/github.com/tidwall"
ln -s $OD "$TMP/go/src/github.com/tidwall/tile38"
export GOPATH="$TMP/go"
cd "$TMP/go/src/github.com/tidwall/tile38"
fi
# build and store objects into original directory.

View File

@ -0,0 +1,31 @@
# Contributing
Contributions are always welcome, both reporting issues and submitting pull requests!
### Reporting issues
Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth.
- What SHA of Sarama are you running? If this is not the latest SHA on the master branch, please try if the problem persists with the latest version.
- You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description.
- Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it.
Also, please include the following information about your environment, so we can help you faster:
- What version of Kafka are you using?
- What version of Go are you using?
- What are the values of your Producer/Consumer/Client configuration?
### Submitting pull requests
We will gladly accept bug fixes, or additions to this library. Please fork this library, commit & push your changes, and open a pull request. Because this library is in production use by many people and applications, we code review all additions. To make the review process go as smooth as possible, please consider the following.
- If you plan to work on something major, please open an issue to discuss the design first.
- Don't break backwards compatibility. If you really have to, open an issue to discuss this first.
- Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving.
- Run [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) to detect any suspicious constructs in your code that could be bugs.
- Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`.You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors.
- You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems.
- Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions.
- Make sure your code is supported by all the Go versions we support. You can rely on [Travis CI](https://travis-ci.org/Shopify/sarama) for testing older Go versions

View File

@ -0,0 +1,20 @@
##### Versions
*Please specify real version numbers or git SHAs, not just "Latest" since that changes fairly regularly.*
Sarama Version:
Kafka Version:
Go Version:
##### Configuration
What configuration values are you using for Sarama and Kafka?
##### Logs
When filing an issue please provide logs from Sarama and Kafka if at all
possible. You can set `sarama.Logger` to a `log.Logger` to capture Sarama debug
output.
##### Problem Description

24
vendor/github.com/Shopify/sarama/.gitignore generated vendored Normal file
View File

@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
*.test
# Folders
_obj
_test
.vagrant
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

34
vendor/github.com/Shopify/sarama/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,34 @@
language: go
go:
- 1.7.x
- 1.8.x
- 1.9.x
env:
global:
- KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095
- TOXIPROXY_ADDR=http://localhost:8474
- KAFKA_INSTALL_ROOT=/home/travis/kafka
- KAFKA_HOSTNAME=localhost
- DEBUG=true
matrix:
- KAFKA_VERSION=0.9.0.1
- KAFKA_VERSION=0.10.2.1
- KAFKA_VERSION=0.11.0.1
before_install:
- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
- vagrant/install_cluster.sh
- vagrant/boot_cluster.sh
- vagrant/create_topics.sh
install:
- make install_dependencies
script:
- make test
- make vet
- make errcheck
- make fmt
sudo: false

View File

@ -1,5 +1,68 @@
# Changelog
#### Version 1.13.0 (2017-10-04)
New Features:
- Support for FetchRequest version 3
([#905](https://github.com/Shopify/sarama/pull/905)).
- Permit setting version on mock FetchResponses
([#939](https://github.com/Shopify/sarama/pull/939)).
- Add a configuration option to support storing only minimal metadata for
extremely large clusters
([#937](https://github.com/Shopify/sarama/pull/937)).
- Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
([#932](https://github.com/Shopify/sarama/pull/932)).
Improvements:
- Provide the block-level timestamp when consuming compressed messages
([#885](https://github.com/Shopify/sarama/issues/885)).
- `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
by the broker, which can be meaningful
([#930](https://github.com/Shopify/sarama/pull/930)).
- Use a `Ticker` to reduce consumer timer overhead at the cost of higher
variance in the actual timeout
([#933](https://github.com/Shopify/sarama/pull/933)).
Bug Fixes:
- Gracefully handle messages with negative timestamps
([#907](https://github.com/Shopify/sarama/pull/907)).
- Raise a proper error when encountering an unknown message version
([#940](https://github.com/Shopify/sarama/pull/940)).
#### Version 1.12.0 (2017-05-08)
New Features:
- Added support for the `ApiVersions` request and response pair, and Kafka
version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note
that you still need to specify the Kafka version in the Sarama configuration
for the time being.
- Added a `Brokers` method to the Client which returns the complete set of
active brokers ([#813](https://github.com/Shopify/sarama/pull/813)).
- Added an `InSyncReplicas` method to the Client which returns the set of all
in-sync broker IDs for the given partition, now that the Kafka versions for
which this was misleading are no longer in our supported set
([#872](https://github.com/Shopify/sarama/pull/872)).
- Added a `NewCustomHashPartitioner` method which allows constructing a hash
partitioner with a custom hash method in case the default (FNV-1a) is not
suitable
([#837](https://github.com/Shopify/sarama/pull/837),
[#841](https://github.com/Shopify/sarama/pull/841)).
Improvements:
- Recognize more Kafka error codes
([#859](https://github.com/Shopify/sarama/pull/859)).
Bug Fixes:
- Fix an issue where decoding a malformed FetchRequest would not return the
correct error ([#818](https://github.com/Shopify/sarama/pull/818)).
- Respect ordering of group protocols in JoinGroupRequests. This fix is
transparent if you're using the `AddGroupProtocol` or
`AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
([#812](https://github.com/Shopify/sarama/issues/812)).
- Fix an alignment-related issue with atomics on 32-bit architectures
([#859](https://github.com/Shopify/sarama/pull/859)).
#### Version 1.11.0 (2016-12-20)
_Important:_ As of Sarama 1.11 it is necessary to set the config value of

21
vendor/github.com/Shopify/sarama/Makefile generated vendored Normal file
View File

@ -0,0 +1,21 @@
default: fmt vet errcheck test
test:
go test -v -timeout 60s -race ./...
vet:
go vet ./...
errcheck:
errcheck github.com/Shopify/sarama/...
fmt:
@if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
install_dependencies: install_errcheck get
install_errcheck:
go get github.com/kisielk/errcheck
get:
go get -t

View File

@ -13,12 +13,14 @@ Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apa
- The [examples](./examples) directory contains more elaborate example applications.
- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
### Compatibility and API stability
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
the two latest stable releases of Kafka and Go, and we provide a two month
grace period for older releases. This means we currently officially support
Go 1.7 and 1.6, and Kafka 0.10.0 and 0.9.0, although older releases are
Go 1.9 through 1.7, and Kafka 0.11 through 0.9, although older releases are
still likely to work.
Sarama follows semantic versioning and provides API stability via the gopkg.in service.

20
vendor/github.com/Shopify/sarama/Vagrantfile generated vendored Normal file
View File

@ -0,0 +1,20 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB
MEMORY = 3072
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "ubuntu/trusty64"
config.vm.provision :shell, path: "vagrant/provision.sh"
config.vm.network "private_network", ip: "192.168.100.67"
config.vm.provider "virtualbox" do |v|
v.memory = MEMORY
end
end

View File

@ -33,7 +33,7 @@ type AsyncProducer interface {
// wish to send.
Input() chan<- *ProducerMessage
// Successes is the success output channel back to the user when AckSuccesses is
// Successes is the success output channel back to the user when Return.Successes is
// enabled. If Return.Successes is true, you MUST read from this channel or the
// Producer will deadlock. It is suggested that you send and read messages
// together in a single select statement.

View File

@ -355,6 +355,17 @@ func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroups
return response, nil
}
func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
response := new(ApiVersionsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
b.lock.Lock()
defer b.lock.Unlock()

View File

@ -284,6 +284,19 @@ var brokerTestTable = []struct {
t.Error("DescribeGroups request got no response!")
}
}},
{"ApiVersionsRequest",
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := ApiVersionsRequest{}
response, err := broker.ApiVersions(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("ApiVersions request got no response!")
}
}},
}
func validateBrokerMetrics(t *testing.T, broker *Broker, mockBrokerMetrics brokerMetrics) {

View File

@ -38,6 +38,11 @@ type Client interface {
// Replicas returns the set of all replica IDs for the given partition.
Replicas(topic string, partitionID int32) ([]int32, error)
// InSyncReplicas returns the set of all in-sync replica IDs for the given
// partition. In-sync replicas are replicas which are fully caught up with
// the partition leader.
InSyncReplicas(topic string, partitionID int32) ([]int32, error)
// RefreshMetadata takes a list of topics and queries the cluster to refresh the
// available metadata for those topics. If no topics are provided, it will refresh
// metadata for all topics.
@ -136,18 +141,20 @@ func NewClient(addrs []string, conf *Config) (Client, error) {
client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
}
// do an initial fetch of all cluster metadata by specifying an empty list of topics
err := client.RefreshMetadata()
switch err {
case nil:
break
case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
// indicates that maybe part of the cluster is down, but is not fatal to creating the client
Logger.Println(err)
default:
close(client.closed) // we haven't started the background updater yet, so we have to do this manually
_ = client.Close()
return nil, err
if conf.Metadata.Full {
// do an initial fetch of all cluster metadata by specifying an empty list of topics
err := client.RefreshMetadata()
switch err {
case nil:
break
case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
// indicates that maybe part of the cluster is down, but is not fatal to creating the client
Logger.Println(err)
default:
close(client.closed) // we haven't started the background updater yet, so we have to do this manually
_ = client.Close()
return nil, err
}
}
go withRecover(client.backgroundMetadataUpdater)
@ -292,7 +299,32 @@ func (client *client) Replicas(topic string, partitionID int32) ([]int32, error)
if metadata.Err == ErrReplicaNotAvailable {
return nil, metadata.Err
}
return dupeAndSort(metadata.Replicas), nil
return dupInt32Slice(metadata.Replicas), nil
}
func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
metadata := client.cachedMetadata(topic, partitionID)
if metadata == nil {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
metadata = client.cachedMetadata(topic, partitionID)
}
if metadata == nil {
return nil, ErrUnknownTopicOrPartition
}
if metadata.Err == ErrReplicaNotAvailable {
return nil, metadata.Err
}
return dupInt32Slice(metadata.Isr), nil
}
func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
@ -575,7 +607,20 @@ func (client *client) backgroundMetadataUpdater() {
for {
select {
case <-ticker.C:
if err := client.RefreshMetadata(); err != nil {
topics := []string{}
if !client.conf.Metadata.Full {
if specificTopics, err := client.Topics(); err != nil {
Logger.Println("Client background metadata topic load:", err)
break
} else if len(specificTopics) == 0 {
Logger.Println("Client background metadata update: no specific topics to update")
break
} else {
topics = specificTopics
}
}
if err := client.RefreshMetadata(topics...); err != nil {
Logger.Println("Client background metadata update:", err)
}
case <-client.closer:

View File

@ -188,12 +188,23 @@ func TestClientMetadata(t *testing.T) {
replicas, err = client.Replicas("my_topic", 0)
if err != nil {
t.Error(err)
} else if replicas[0] != 1 {
t.Error("Incorrect (or unsorted) replica")
} else if replicas[1] != 3 {
t.Error("Incorrect (or unsorted) replica")
} else if replicas[0] != 3 {
t.Error("Incorrect (or sorted) replica")
} else if replicas[1] != 1 {
t.Error("Incorrect (or sorted) replica")
} else if replicas[2] != 5 {
t.Error("Incorrect (or unsorted) replica")
t.Error("Incorrect (or sorted) replica")
}
isr, err = client.InSyncReplicas("my_topic", 0)
if err != nil {
t.Error(err)
} else if len(isr) != 2 {
t.Error("Client returned incorrect ISRs for partition:", isr)
} else if isr[0] != 5 {
t.Error("Incorrect (or sorted) ISR:", isr)
} else if isr[1] != 1 {
t.Error("Incorrect (or sorted) ISR:", isr)
}
leader.Close()

View File

@ -72,6 +72,12 @@ type Config struct {
// Defaults to 10 minutes. Set to 0 to disable. Similar to
// `topic.metadata.refresh.interval.ms` in the JVM version.
RefreshFrequency time.Duration
// Whether to maintain a full set of metadata for all topics, or just
// the minimal set that has been necessary so far. The full set is simpler
// and usually more convenient, but can take up a substantial amount of
// memory if you have many topics and partitions. Defaults to true.
Full bool
}
// Producer is the namespace for configuration related to producing messages,
@ -99,7 +105,10 @@ type Config struct {
Partitioner PartitionerConstructor
// Return specifies what channels will be populated. If they are set to true,
// you must read from the respective channels to prevent deadlock.
// you must read from the respective channels to prevent deadlock. If,
// however, this config is used to create a `SyncProducer`, both must be set
// to true and you shall not read from the channels since the producer does
// this internally.
Return struct {
// If enabled, successfully delivered messages will be returned on the
// Successes channel (default disabled).
@ -187,11 +196,23 @@ type Config struct {
// Equivalent to the JVM's `fetch.wait.max.ms`.
MaxWaitTime time.Duration
// The maximum amount of time the consumer expects a message takes to process
// for the user. If writing to the Messages channel takes longer than this,
// that partition will stop fetching more messages until it can proceed again.
// The maximum amount of time the consumer expects a message takes to
// process for the user. If writing to the Messages channel takes longer
// than this, that partition will stop fetching more messages until it
// can proceed again.
// Note that, since the Messages channel is buffered, the actual grace time is
// (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
// If a message is not written to the Messages channel between two ticks
// of the expiryTicker then a timeout is detected.
// Using a ticker instead of a timer to detect timeouts should typically
// result in many fewer calls to Timer functions which may result in a
// significant performance improvement if many messages are being sent
// and timeouts are infrequent.
// The disadvantage of using a ticker instead of a timer is that
// timeouts will be less accurate. That is, the effective timeout could
// be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
// example, if `MaxProcessingTime` is 100ms then a delay of 180ms
// between two messages being sent may not be recognized as a timeout.
MaxProcessingTime time.Duration
// Return specifies what channels will be populated. If they are set to true,
@ -260,6 +281,7 @@ func NewConfig() *Config {
c.Metadata.Retry.Max = 3
c.Metadata.Retry.Backoff = 250 * time.Millisecond
c.Metadata.RefreshFrequency = 10 * time.Minute
c.Metadata.Full = true
c.Producer.MaxMessageBytes = 1000000
c.Producer.RequiredAcks = WaitForLocal
@ -310,6 +332,9 @@ func (c *Config) Validate() error {
if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
}
if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
}
if c.Producer.Timeout%time.Millisecond != 0 {
Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
}

View File

@ -10,11 +10,12 @@ import (
// ConsumerMessage encapsulates a Kafka message returned by the consumer.
type ConsumerMessage struct {
Key, Value []byte
Topic string
Partition int32
Offset int64
Timestamp time.Time // only set if kafka is version 0.10+
Key, Value []byte
Topic string
Partition int32
Offset int64
Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
}
// ConsumerError is what is provided to the user when an error occurs.
@ -246,9 +247,9 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
// PartitionConsumer
// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call Close()
// or AsyncClose() on a PartitionConsumer to avoid leaks, it will not be garbage-collected automatically
// when it passes out of scope.
// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
// of scope.
//
// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
@ -257,19 +258,25 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
//
// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process
// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
type PartitionConsumer interface {
// AsyncClose initiates a shutdown of the PartitionConsumer. This method will
// return immediately, after which you should wait until the 'messages' and
// 'errors' channel are drained. It is required to call this function, or
// Close before a consumer object passes out of scope, as it will otherwise
// leak memory. You must call this before calling Close on the underlying client.
// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
// this before calling Close on the underlying client.
AsyncClose()
// Close stops the PartitionConsumer from fetching messages. It is required to
// call this function (or AsyncClose) before a consumer object passes out of
// scope, as it will otherwise leak memory. You must call this before calling
// Close on the underlying client.
// Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
// the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
// the Messages channel when this function is called, you will be competing with Close for messages; consider
// calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
// out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
Close() error
// Messages returns the read channel for the messages that are returned by
@ -289,10 +296,11 @@ type PartitionConsumer interface {
}
type partitionConsumer struct {
consumer *consumer
conf *Config
topic string
partition int32
highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
consumer *consumer
conf *Config
topic string
partition int32
broker *brokerConsumer
messages chan *ConsumerMessage
@ -302,9 +310,8 @@ type partitionConsumer struct {
trigger, dying chan none
responseResult error
fetchSize int32
offset int64
highWaterMarkOffset int64
fetchSize int32
offset int64
}
var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
@ -433,35 +440,37 @@ func (child *partitionConsumer) HighWaterMarkOffset() int64 {
func (child *partitionConsumer) responseFeeder() {
var msgs []*ConsumerMessage
expiryTimer := time.NewTimer(child.conf.Consumer.MaxProcessingTime)
expireTimedOut := false
msgSent := false
feederLoop:
for response := range child.feeder {
msgs, child.responseResult = child.parseResponse(response)
expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
for i, msg := range msgs {
if !expiryTimer.Stop() && !expireTimedOut {
// expiryTimer was expired; clear out the waiting msg
<-expiryTimer.C
}
expiryTimer.Reset(child.conf.Consumer.MaxProcessingTime)
expireTimedOut = false
messageSelect:
select {
case child.messages <- msg:
case <-expiryTimer.C:
expireTimedOut = true
child.responseResult = errTimedOut
child.broker.acks.Done()
for _, msg = range msgs[i:] {
child.messages <- msg
msgSent = true
case <-expiryTicker.C:
if !msgSent {
child.responseResult = errTimedOut
child.broker.acks.Done()
for _, msg = range msgs[i:] {
child.messages <- msg
}
child.broker.input <- child
continue feederLoop
} else {
// current message has not been sent, return to select
// statement
msgSent = false
goto messageSelect
}
child.broker.input <- child
continue feederLoop
}
}
expiryTicker.Stop()
child.broker.acks.Done()
}
@ -520,12 +529,13 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu
if offset >= child.offset {
messages = append(messages, &ConsumerMessage{
Topic: child.topic,
Partition: child.partition,
Key: msg.Msg.Key,
Value: msg.Msg.Value,
Offset: offset,
Timestamp: msg.Msg.Timestamp,
Topic: child.topic,
Partition: child.partition,
Key: msg.Msg.Key,
Value: msg.Msg.Value,
Offset: offset,
Timestamp: msg.Msg.Timestamp,
BlockTimestamp: msgBlock.Msg.Timestamp,
})
child.offset = offset + 1
} else {
@ -726,6 +736,10 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
request.Version = 2
}
if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
request.Version = 3
request.MaxBytes = MaxResponseSize
}
for child := range bc.subscriptions {
request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)

View File

@ -803,6 +803,48 @@ func TestConsumerOffsetOutOfRange(t *testing.T) {
broker0.Close()
}
func TestConsumerExpiryTicker(t *testing.T) {
// Given
broker0 := NewMockBroker(t, 0)
fetchResponse1 := &FetchResponse{}
for i := 1; i <= 8; i++ {
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, int64(i))
}
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetNewest, 1234).
SetOffset("my_topic", 0, OffsetOldest, 1),
"FetchRequest": NewMockSequence(fetchResponse1),
})
config := NewConfig()
config.ChannelBufferSize = 0
config.Consumer.MaxProcessingTime = 10 * time.Millisecond
master, err := NewConsumer([]string{broker0.Addr()}, config)
if err != nil {
t.Fatal(err)
}
// When
consumer, err := master.ConsumePartition("my_topic", 0, 1)
if err != nil {
t.Fatal(err)
}
// Then: messages with offsets 1 through 8 are read
for i := 1; i <= 8; i++ {
assertMessageOffset(t, <-consumer.Messages(), int64(i))
time.Sleep(2 * time.Millisecond)
}
safeClose(t, consumer)
safeClose(t, master)
broker0.Close()
}
func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) {
if msg.Offset != expectedOffset {
t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset)

View File

@ -2,8 +2,8 @@ package sarama
import (
"encoding/binary"
"github.com/klauspost/crc32"
"fmt"
"hash/crc32"
)
// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
@ -28,8 +28,9 @@ func (c *crc32Field) run(curOffset int, buf []byte) error {
func (c *crc32Field) check(curOffset int, buf []byte) error {
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) {
return PacketDecodingError{"CRC didn't match"}
expected := binary.BigEndian.Uint32(buf[c.startOffset:])
if crc != expected {
return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)}
}
return nil

View File

@ -1,7 +1,8 @@
name: sarama
up:
- go: 1.7.3
- go:
version: '1.8'
commands:
test:

View File

@ -108,12 +108,20 @@ const (
ErrUnsupportedSASLMechanism KError = 33
ErrIllegalSASLState KError = 34
ErrUnsupportedVersion KError = 35
ErrTopicAlreadyExists KError = 36
ErrInvalidPartitions KError = 37
ErrInvalidReplicationFactor KError = 38
ErrInvalidReplicaAssignment KError = 39
ErrInvalidConfig KError = 40
ErrNotController KError = 41
ErrInvalidRequest KError = 42
ErrUnsupportedForMessageFormat KError = 43
ErrPolicyViolation KError = 44
)
func (err KError) Error() string {
// Error messages stolen/adapted from
// https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
// https://kafka.apache.org/protocol#protocol_error_codes
switch err {
case ErrNoError:
return "kafka server: Not an error, why are you printing me?"
@ -189,8 +197,24 @@ func (err KError) Error() string {
return "kafka server: Request is not valid given the current SASL state."
case ErrUnsupportedVersion:
return "kafka server: The version of API is not supported."
case ErrTopicAlreadyExists:
return "kafka server: Topic with this name already exists."
case ErrInvalidPartitions:
return "kafka server: Number of partitions is invalid."
case ErrInvalidReplicationFactor:
return "kafka server: Replication-factor is invalid."
case ErrInvalidReplicaAssignment:
return "kafka server: Replica assignment is invalid."
case ErrInvalidConfig:
return "kafka server: Configuration is invalid."
case ErrNotController:
return "kafka server: This is not the correct controller for this cluster."
case ErrInvalidRequest:
return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
case ErrUnsupportedForMessageFormat:
return "kafka server: The requested operation is not supported by the message format version."
case ErrPolicyViolation:
return "kafka server: Request parameters do not satisfy the configured policy."
}
return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)

9
vendor/github.com/Shopify/sarama/examples/README.md generated vendored Normal file
View File

@ -0,0 +1,9 @@
# Sarama examples
This folder contains example applications to demonstrate the use of Sarama. For code snippet examples on how to use the different types in Sarama, see [Sarama's API documentation on godoc.org](https://godoc.org/github.com/Shopify/sarama)
In these examples, we use `github.com/Shopify/sarama` as import path. We do this to ensure all the examples are up to date with the latest changes in Sarama. For your own applications, you may want to use `gopkg.in/Shopify/sarama.v1` to lock into a stable API version.
#### HTTP server
[http_server](./http_server) is a simple HTTP server uses both the sync producer to produce data as part of the request handling cycle, as well as the async producer to maintain an access log. It also uses the [mocks subpackage](https://godoc.org/github.com/Shopify/sarama/mocks) to test both.

View File

@ -0,0 +1,2 @@
http_server
http_server.test

View File

@ -0,0 +1,7 @@
# HTTP server example
This HTTP server example shows you how to use the AsyncProducer and SyncProducer, and how to test them using mocks. The server simply sends the data of the HTTP request's query string to Kafka, and send a 200 result if that succeeds. For every request, it will send an access log entry to Kafka as well in the background.
If you need to know whether a message was successfully sent to the Kafka cluster before you can send your HTTP response, using the `SyncProducer` is probably the simplest way to achieve this. If you don't care, e.g. for the access log, using the `AsyncProducer` will let you fire and forget. You can send the HTTP response, while the message is being produced in the background.
One important thing to note is that both the `SyncProducer` and `AsyncProducer` are **thread-safe**. Go's `http.Server` handles requests concurrently in different goroutines, but you can use a single producer safely. This will actually achieve efficiency gains as the producer will be able to batch messages from concurrent requests together.

View File

@ -0,0 +1,247 @@
package main
import (
"github.com/Shopify/sarama"
"crypto/tls"
"crypto/x509"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
"time"
)
var (
addr = flag.String("addr", ":8080", "The address to bind to")
brokers = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The Kafka brokers to connect to, as a comma separated list")
verbose = flag.Bool("verbose", false, "Turn on Sarama logging")
certFile = flag.String("certificate", "", "The optional certificate file for client authentication")
keyFile = flag.String("key", "", "The optional key file for client authentication")
caFile = flag.String("ca", "", "The optional certificate authority file for TLS client authentication")
verifySsl = flag.Bool("verify", false, "Optional verify ssl certificates chain")
)
func main() {
flag.Parse()
if *verbose {
sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
}
if *brokers == "" {
flag.PrintDefaults()
os.Exit(1)
}
brokerList := strings.Split(*brokers, ",")
log.Printf("Kafka brokers: %s", strings.Join(brokerList, ", "))
server := &Server{
DataCollector: newDataCollector(brokerList),
AccessLogProducer: newAccessLogProducer(brokerList),
}
defer func() {
if err := server.Close(); err != nil {
log.Println("Failed to close server", err)
}
}()
log.Fatal(server.Run(*addr))
}
func createTlsConfiguration() (t *tls.Config) {
if *certFile != "" && *keyFile != "" && *caFile != "" {
cert, err := tls.LoadX509KeyPair(*certFile, *keyFile)
if err != nil {
log.Fatal(err)
}
caCert, err := ioutil.ReadFile(*caFile)
if err != nil {
log.Fatal(err)
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
t = &tls.Config{
Certificates: []tls.Certificate{cert},
RootCAs: caCertPool,
InsecureSkipVerify: *verifySsl,
}
}
// will be nil by default if nothing is provided
return t
}
type Server struct {
DataCollector sarama.SyncProducer
AccessLogProducer sarama.AsyncProducer
}
func (s *Server) Close() error {
if err := s.DataCollector.Close(); err != nil {
log.Println("Failed to shut down data collector cleanly", err)
}
if err := s.AccessLogProducer.Close(); err != nil {
log.Println("Failed to shut down access log producer cleanly", err)
}
return nil
}
func (s *Server) Handler() http.Handler {
return s.withAccessLog(s.collectQueryStringData())
}
func (s *Server) Run(addr string) error {
httpServer := &http.Server{
Addr: addr,
Handler: s.Handler(),
}
log.Printf("Listening for requests on %s...\n", addr)
return httpServer.ListenAndServe()
}
func (s *Server) collectQueryStringData() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
// We are not setting a message key, which means that all messages will
// be distributed randomly over the different partitions.
partition, offset, err := s.DataCollector.SendMessage(&sarama.ProducerMessage{
Topic: "important",
Value: sarama.StringEncoder(r.URL.RawQuery),
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Failed to store your data:, %s", err)
} else {
// The tuple (topic, partition, offset) can be used as a unique identifier
// for a message in a Kafka cluster.
fmt.Fprintf(w, "Your data is stored with unique identifier important/%d/%d", partition, offset)
}
})
}
type accessLogEntry struct {
Method string `json:"method"`
Host string `json:"host"`
Path string `json:"path"`
IP string `json:"ip"`
ResponseTime float64 `json:"response_time"`
encoded []byte
err error
}
func (ale *accessLogEntry) ensureEncoded() {
if ale.encoded == nil && ale.err == nil {
ale.encoded, ale.err = json.Marshal(ale)
}
}
func (ale *accessLogEntry) Length() int {
ale.ensureEncoded()
return len(ale.encoded)
}
func (ale *accessLogEntry) Encode() ([]byte, error) {
ale.ensureEncoded()
return ale.encoded, ale.err
}
func (s *Server) withAccessLog(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
started := time.Now()
next.ServeHTTP(w, r)
entry := &accessLogEntry{
Method: r.Method,
Host: r.Host,
Path: r.RequestURI,
IP: r.RemoteAddr,
ResponseTime: float64(time.Since(started)) / float64(time.Second),
}
// We will use the client's IP address as key. This will cause
// all the access log entries of the same IP address to end up
// on the same partition.
s.AccessLogProducer.Input() <- &sarama.ProducerMessage{
Topic: "access_log",
Key: sarama.StringEncoder(r.RemoteAddr),
Value: entry,
}
})
}
func newDataCollector(brokerList []string) sarama.SyncProducer {
// For the data collector, we are looking for strong consistency semantics.
// Because we don't change the flush settings, sarama will try to produce messages
// as fast as possible to keep latency low.
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message
config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message
config.Producer.Return.Successes = true
tlsConfig := createTlsConfiguration()
if tlsConfig != nil {
config.Net.TLS.Config = tlsConfig
config.Net.TLS.Enable = true
}
// On the broker side, you may want to change the following settings to get
// stronger consistency guarantees:
// - For your broker, set `unclean.leader.election.enable` to false
// - For the topic, you could increase `min.insync.replicas`.
producer, err := sarama.NewSyncProducer(brokerList, config)
if err != nil {
log.Fatalln("Failed to start Sarama producer:", err)
}
return producer
}
func newAccessLogProducer(brokerList []string) sarama.AsyncProducer {
// For the access log, we are looking for AP semantics, with high throughput.
// By creating batches of compressed messages, we reduce network I/O at a cost of more latency.
config := sarama.NewConfig()
tlsConfig := createTlsConfiguration()
if tlsConfig != nil {
config.Net.TLS.Enable = true
config.Net.TLS.Config = tlsConfig
}
config.Producer.RequiredAcks = sarama.WaitForLocal // Only wait for the leader to ack
config.Producer.Compression = sarama.CompressionSnappy // Compress messages
config.Producer.Flush.Frequency = 500 * time.Millisecond // Flush batches every 500ms
producer, err := sarama.NewAsyncProducer(brokerList, config)
if err != nil {
log.Fatalln("Failed to start Sarama producer:", err)
}
// We will just log to STDOUT if we're not able to produce messages.
// Note: messages will only be returned here after all retry attempts are exhausted.
go func() {
for err := range producer.Errors() {
log.Println("Failed to write access log entry:", err)
}
}()
return producer
}

View File

@ -0,0 +1,109 @@
package main
import (
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/Shopify/sarama"
"github.com/Shopify/sarama/mocks"
)
// In normal operation, we expect one access log entry,
// and one data collector entry. Let's assume both will succeed.
// We should return a HTTP 200 status.
func TestCollectSuccessfully(t *testing.T) {
dataCollectorMock := mocks.NewSyncProducer(t, nil)
dataCollectorMock.ExpectSendMessageAndSucceed()
accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
accessLogProducerMock.ExpectInputAndSucceed()
// Now, use dependency injection to use the mocks.
s := &Server{
DataCollector: dataCollectorMock,
AccessLogProducer: accessLogProducerMock,
}
// The Server's Close call is important; it will call Close on
// the two mock producers, which will then validate whether all
// expectations are resolved.
defer safeClose(t, s)
req, err := http.NewRequest("GET", "http://example.com/?data", nil)
if err != nil {
t.Fatal(err)
}
res := httptest.NewRecorder()
s.Handler().ServeHTTP(res, req)
if res.Code != 200 {
t.Errorf("Expected HTTP status 200, found %d", res.Code)
}
if string(res.Body.Bytes()) != "Your data is stored with unique identifier important/0/1" {
t.Error("Unexpected response body", res.Body)
}
}
// Now, let's see if we handle the case of not being able to produce
// to the data collector properly. In this case we should return a 500 status.
func TestCollectionFailure(t *testing.T) {
dataCollectorMock := mocks.NewSyncProducer(t, nil)
dataCollectorMock.ExpectSendMessageAndFail(sarama.ErrRequestTimedOut)
accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
accessLogProducerMock.ExpectInputAndSucceed()
s := &Server{
DataCollector: dataCollectorMock,
AccessLogProducer: accessLogProducerMock,
}
defer safeClose(t, s)
req, err := http.NewRequest("GET", "http://example.com/?data", nil)
if err != nil {
t.Fatal(err)
}
res := httptest.NewRecorder()
s.Handler().ServeHTTP(res, req)
if res.Code != 500 {
t.Errorf("Expected HTTP status 500, found %d", res.Code)
}
}
// We don't expect any data collector calls because the path is wrong,
// so we are not setting any expectations on the dataCollectorMock. It
// will still generate an access log entry though.
func TestWrongPath(t *testing.T) {
dataCollectorMock := mocks.NewSyncProducer(t, nil)
accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
accessLogProducerMock.ExpectInputAndSucceed()
s := &Server{
DataCollector: dataCollectorMock,
AccessLogProducer: accessLogProducerMock,
}
defer safeClose(t, s)
req, err := http.NewRequest("GET", "http://example.com/wrong?data", nil)
if err != nil {
t.Fatal(err)
}
res := httptest.NewRecorder()
s.Handler().ServeHTTP(res, req)
if res.Code != 404 {
t.Errorf("Expected HTTP status 404, found %d", res.Code)
}
}
func safeClose(t *testing.T, o io.Closer) {
if err := o.Close(); err != nil {
t.Error(err)
}
}

View File

@ -21,9 +21,13 @@ func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
return nil
}
// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
type FetchRequest struct {
MaxWaitTime int32
MinBytes int32
MaxBytes int32
Version int16
blocks map[string]map[int32]*fetchRequestBlock
}
@ -32,6 +36,9 @@ func (r *FetchRequest) encode(pe packetEncoder) (err error) {
pe.putInt32(-1) // replica ID is always -1 for clients
pe.putInt32(r.MaxWaitTime)
pe.putInt32(r.MinBytes)
if r.Version == 3 {
pe.putInt32(r.MaxBytes)
}
err = pe.putArrayLength(len(r.blocks))
if err != nil {
return err
@ -67,6 +74,11 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
if r.MinBytes, err = pd.getInt32(); err != nil {
return err
}
if r.Version == 3 {
if r.MaxBytes, err = pd.getInt32(); err != nil {
return err
}
}
topicCount, err := pd.getArrayLength()
if err != nil {
return err
@ -114,6 +126,8 @@ func (r *FetchRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
case 2:
return V0_10_0_0
case 3:
return V0_10_1_0
default:
return minVersion
}

View File

@ -1,11 +1,36 @@
package sarama
type GroupProtocol struct {
Name string
Metadata []byte
}
func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
p.Name, err = pd.getString()
if err != nil {
return err
}
p.Metadata, err = pd.getBytes()
return err
}
func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
if err := pe.putString(p.Name); err != nil {
return err
}
if err := pe.putBytes(p.Metadata); err != nil {
return err
}
return nil
}
type JoinGroupRequest struct {
GroupId string
SessionTimeout int32
MemberId string
ProtocolType string
GroupProtocols map[string][]byte
GroupId string
SessionTimeout int32
MemberId string
ProtocolType string
GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols
OrderedGroupProtocols []*GroupProtocol
}
func (r *JoinGroupRequest) encode(pe packetEncoder) error {
@ -20,16 +45,31 @@ func (r *JoinGroupRequest) encode(pe packetEncoder) error {
return err
}
if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
return err
}
for name, metadata := range r.GroupProtocols {
if err := pe.putString(name); err != nil {
if len(r.GroupProtocols) > 0 {
if len(r.OrderedGroupProtocols) > 0 {
return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
}
if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
return err
}
if err := pe.putBytes(metadata); err != nil {
for name, metadata := range r.GroupProtocols {
if err := pe.putString(name); err != nil {
return err
}
if err := pe.putBytes(metadata); err != nil {
return err
}
}
} else {
if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
return err
}
for _, protocol := range r.OrderedGroupProtocols {
if err := protocol.encode(pe); err != nil {
return err
}
}
}
return nil
@ -62,16 +102,12 @@ func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
r.GroupProtocols = make(map[string][]byte)
for i := 0; i < n; i++ {
name, err := pd.getString()
if err != nil {
protocol := &GroupProtocol{}
if err := protocol.decode(pd); err != nil {
return err
}
metadata, err := pd.getBytes()
if err != nil {
return err
}
r.GroupProtocols[name] = metadata
r.GroupProtocols[protocol.Name] = protocol.Metadata
r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
}
return nil
@ -90,11 +126,10 @@ func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
}
func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
if r.GroupProtocols == nil {
r.GroupProtocols = make(map[string][]byte)
}
r.GroupProtocols[name] = metadata
r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
Name: name,
Metadata: metadata,
})
}
func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {

View File

@ -23,19 +23,35 @@ var (
)
func TestJoinGroupRequest(t *testing.T) {
var request *JoinGroupRequest
request = new(JoinGroupRequest)
request := new(JoinGroupRequest)
request.GroupId = "TestGroup"
request.SessionTimeout = 100
request.ProtocolType = "consumer"
testRequest(t, "no protocols", request, joinGroupRequestNoProtocols)
}
request = new(JoinGroupRequest)
func TestJoinGroupRequestOneProtocol(t *testing.T) {
request := new(JoinGroupRequest)
request.GroupId = "TestGroup"
request.SessionTimeout = 100
request.MemberId = "OneProtocol"
request.ProtocolType = "consumer"
request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03})
testRequest(t, "one protocol", request, joinGroupRequestOneProtocol)
packet := testRequestEncode(t, "one protocol", request, joinGroupRequestOneProtocol)
request.GroupProtocols = make(map[string][]byte)
request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03}
testRequestDecode(t, "one protocol", request, packet)
}
func TestJoinGroupRequestDeprecatedEncode(t *testing.T) {
request := new(JoinGroupRequest)
request.GroupId = "TestGroup"
request.SessionTimeout = 100
request.MemberId = "OneProtocol"
request.ProtocolType = "consumer"
request.GroupProtocols = make(map[string][]byte)
request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03}
packet := testRequestEncode(t, "one protocol", request, joinGroupRequestOneProtocol)
request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03})
testRequestDecode(t, "one protocol", request, packet)
}

View File

@ -45,7 +45,15 @@ func (m *Message) encode(pe packetEncoder) error {
pe.putInt8(attributes)
if m.Version >= 1 {
pe.putInt64(m.Timestamp.UnixNano() / int64(time.Millisecond))
timestamp := int64(-1)
if !m.Timestamp.Before(time.Unix(0, 0)) {
timestamp = m.Timestamp.UnixNano() / int64(time.Millisecond)
} else if !m.Timestamp.IsZero() {
return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", m.Timestamp)}
}
pe.putInt64(timestamp)
}
err := pe.putBytes(m.Key)
@ -114,18 +122,30 @@ func (m *Message) decode(pd packetDecoder) (err error) {
return err
}
if m.Version > 1 {
return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)}
}
attribute, err := pd.getInt8()
if err != nil {
return err
}
m.Codec = CompressionCodec(attribute & compressionCodecMask)
if m.Version >= 1 {
if m.Version == 1 {
millis, err := pd.getInt64()
if err != nil {
return err
}
m.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
// negative timestamps are invalid, in these cases we should return
// a zero time
timestamp := time.Time{}
if millis >= 0 {
timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
}
m.Timestamp = timestamp
}
m.Key, err = pd.getBytes()

View File

@ -1,6 +1,8 @@
package sarama
import (
"runtime"
"strings"
"testing"
"time"
)
@ -13,6 +15,21 @@ var (
0xFF, 0xFF, 0xFF, 0xFF, // key
0xFF, 0xFF, 0xFF, 0xFF} // value
emptyV1Message = []byte{
204, 47, 121, 217, // CRC
0x01, // magic version byte
0x00, // attribute flags
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // timestamp
0xFF, 0xFF, 0xFF, 0xFF, // key
0xFF, 0xFF, 0xFF, 0xFF} // value
emptyV2Message = []byte{
167, 236, 104, 3, // CRC
0x02, // magic version byte
0x00, // attribute flags
0xFF, 0xFF, 0xFF, 0xFF, // key
0xFF, 0xFF, 0xFF, 0xFF} // value
emptyGzipMessage = []byte{
97, 79, 149, 90, //CRC
0x00, // magic version byte
@ -24,6 +41,17 @@ var (
0x08,
0, 0, 9, 110, 136, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}
emptyGzipMessage18 = []byte{
132, 99, 80, 148, //CRC
0x00, // magic version byte
0x01, // attribute flags
0xFF, 0xFF, 0xFF, 0xFF, // key
// value
0x00, 0x00, 0x00, 0x17,
0x1f, 0x8b,
0x08,
0, 0, 0, 0, 0, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}
emptyLZ4Message = []byte{
132, 219, 238, 101, // CRC
0x01, // version byte
@ -79,7 +107,11 @@ func TestMessageEncoding(t *testing.T) {
message.Value = []byte{}
message.Codec = CompressionGZIP
testEncodable(t, "empty gzip", &message, emptyGzipMessage)
if strings.HasPrefix(runtime.Version(), "go1.8") || strings.HasPrefix(runtime.Version(), "go1.9") {
testEncodable(t, "empty gzip", &message, emptyGzipMessage18)
} else {
testEncodable(t, "empty gzip", &message, emptyGzipMessage)
}
message.Value = []byte{}
message.Codec = CompressionLZ4
@ -163,3 +195,19 @@ func TestMessageDecodingBulkLZ4(t *testing.T) {
t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
}
}
func TestMessageDecodingVersion1(t *testing.T) {
message := Message{Version: 1}
testDecodable(t, "decoding empty v1 message", &message, emptyV1Message)
}
func TestMessageDecodingUnknownVersions(t *testing.T) {
message := Message{Version: 2}
err := decode(emptyV2Message, &message)
if err == nil {
t.Error("Decoding did not produce an error for an unknown magic byte")
}
if err.Error() != "kafka: error decoding packet: unknown magic byte (2)" {
t.Error("Decoding an unknown magic byte produced an unknown error ", err)
}
}

View File

@ -180,6 +180,7 @@ type MockFetchResponse struct {
highWaterMarks map[string]map[int32]int64
t TestReporter
batchSize int
version int16
}
func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
@ -191,6 +192,11 @@ func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
}
}
func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse {
mfr.version = version
return mfr
}
func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
partitions := mfr.messages[topic]
if partitions == nil {
@ -218,7 +224,9 @@ func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, of
func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
fetchRequest := reqBody.(*FetchRequest)
res := &FetchResponse{}
res := &FetchResponse{
Version: mfr.version,
}
for topic, partitions := range fetchRequest.blocks {
for partition, block := range partitions {
initialOffset := block.fetchOffset

View File

@ -20,7 +20,7 @@ type Consumer struct {
// NewConsumer returns a new mock Consumer instance. The t argument should
// be the *testing.T instance of your test method. An error will be written to it if
// an expectation is violated. The config argument is currently unused and can be set to nil.
// an expectation is violated. The config argument can be set to nil.
func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer {
if config == nil {
config = sarama.NewConfig()
@ -178,6 +178,7 @@ func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset
// Errors and Messages channel, you should specify what values will be provided on these
// channels using YieldMessage and YieldError.
type PartitionConsumer struct {
highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
l sync.Mutex
t ErrorReporter
topic string
@ -189,7 +190,6 @@ type PartitionConsumer struct {
consumed bool
errorsShouldBeDrained bool
messagesShouldBeDrained bool
highWaterMarkOffset int64
}
///////////////////////////////////////////////////

View File

@ -151,6 +151,13 @@ type PartitionOffsetManager interface {
// message twice, and your processing should ideally be idempotent.
MarkOffset(offset int64, metadata string)
// ResetOffset resets to the provided offset, alongside a metadata string that
// represents the state of the partition consumer at that point in time. Reset
// acts as a counterpart to MarkOffset, the difference being that it allows to
// reset an offset to an earlier or smaller value, where MarkOffset only
// allows incrementing the offset. cf MarkOffset for more details.
ResetOffset(offset int64, metadata string)
// Errors returns a read channel of errors that occur during offset management, if
// enabled. By default, errors are logged and not returned over this channel. If
// you want to implement any custom error handling, set your config's
@ -329,6 +336,17 @@ func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
}
}
func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
pom.lock.Lock()
defer pom.lock.Unlock()
if offset <= pom.offset {
pom.offset = offset
pom.metadata = metadata
pom.dirty = true
}
}
func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
pom.lock.Lock()
defer pom.lock.Unlock()

View File

@ -204,6 +204,70 @@ func TestPartitionOffsetManagerNextOffset(t *testing.T) {
safeClose(t, testClient)
}
func TestPartitionOffsetManagerResetOffset(t *testing.T) {
om, testClient, broker, coordinator := initOffsetManager(t)
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
ocResponse := new(OffsetCommitResponse)
ocResponse.AddError("my_topic", 0, ErrNoError)
coordinator.Returns(ocResponse)
expected := int64(1)
pom.ResetOffset(expected, "modified_meta")
actual, meta := pom.NextOffset()
if actual != expected {
t.Errorf("Expected offset %v. Actual: %v", expected, actual)
}
if meta != "modified_meta" {
t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
}
safeClose(t, pom)
safeClose(t, om)
safeClose(t, testClient)
broker.Close()
coordinator.Close()
}
func TestPartitionOffsetManagerResetOffsetWithRetention(t *testing.T) {
om, testClient, broker, coordinator := initOffsetManager(t)
testClient.Config().Consumer.Offsets.Retention = time.Hour
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
ocResponse := new(OffsetCommitResponse)
ocResponse.AddError("my_topic", 0, ErrNoError)
handler := func(req *request) (res encoder) {
if req.body.version() != 2 {
t.Errorf("Expected to be using version 2. Actual: %v", req.body.version())
}
offsetCommitRequest := req.body.(*OffsetCommitRequest)
if offsetCommitRequest.RetentionTime != (60 * 60 * 1000) {
t.Errorf("Expected an hour retention time. Actual: %v", offsetCommitRequest.RetentionTime)
}
return ocResponse
}
coordinator.setHandler(handler)
expected := int64(1)
pom.ResetOffset(expected, "modified_meta")
actual, meta := pom.NextOffset()
if actual != expected {
t.Errorf("Expected offset %v. Actual: %v", expected, actual)
}
if meta != "modified_meta" {
t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
}
safeClose(t, pom)
safeClose(t, om)
safeClose(t, testClient)
broker.Close()
coordinator.Close()
}
func TestPartitionOffsetManagerMarkOffset(t *testing.T) {
om, testClient, broker, coordinator := initOffsetManager(t)
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")

View File

@ -87,13 +87,14 @@ type hashPartitioner struct {
hasher hash.Hash32
}
// NewCustomHashPartitioner is a wrapper around NewHashPartitioner,
// allowing the use of custom hasher
func NewCustomHashPartitioner(hasher hash.Hash32) PartitionerConstructor {
// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
return func(topic string) Partitioner {
p := new(hashPartitioner)
p.random = NewRandomPartitioner(topic)
p.hasher = hasher
p.hasher = hasher()
return p
}
}

View File

@ -73,7 +73,7 @@ func TestRoundRobinPartitioner(t *testing.T) {
func TestNewHashPartitionerWithHasher(t *testing.T) {
// use the current default hasher fnv.New32a()
partitioner := NewCustomHashPartitioner(fnv.New32a())("mytopic")
partitioner := NewCustomHashPartitioner(fnv.New32a)("mytopic")
choice, err := partitioner.Partition(&ProducerMessage{}, 1)
if err != nil {
@ -104,7 +104,7 @@ func TestNewHashPartitionerWithHasher(t *testing.T) {
func TestHashPartitionerWithHasherMinInt32(t *testing.T) {
// use the current default hasher fnv.New32a()
partitioner := NewCustomHashPartitioner(fnv.New32a())("mytopic")
partitioner := NewCustomHashPartitioner(fnv.New32a)("mytopic")
msg := ProducerMessage{}
// "1468509572224" generates 2147483648 (uint32) result from Sum32 function

View File

@ -50,7 +50,11 @@ func testVersionDecodable(t *testing.T, name string, out versionedDecoder, in []
}
func testRequest(t *testing.T, name string, rb protocolBody, expected []byte) {
// Encoder request
packet := testRequestEncode(t, name, rb, expected)
testRequestDecode(t, name, rb, packet)
}
func testRequestEncode(t *testing.T, name string, rb protocolBody, expected []byte) []byte {
req := &request{correlationID: 123, clientID: "foo", body: rb}
packet, err := encode(req, nil)
headerSize := 14 + len("foo")
@ -59,7 +63,10 @@ func testRequest(t *testing.T, name string, rb protocolBody, expected []byte) {
} else if !bytes.Equal(packet[headerSize:], expected) {
t.Error("Encoding", name, "failed\ngot ", packet[headerSize:], "\nwant", expected)
}
// Decoder request
return packet
}
func testRequestDecode(t *testing.T, name string, rb protocolBody, packet []byte) {
decoded, n, err := decodeRequest(bytes.NewReader(packet))
if err != nil {
t.Error("Failed to decode request", err)

View File

@ -3,7 +3,6 @@ package sarama
import (
"bufio"
"net"
"sort"
)
type none struct{}
@ -23,13 +22,11 @@ func (slice int32Slice) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
}
func dupeAndSort(input []int32) []int32 {
func dupInt32Slice(input []int32) []int32 {
ret := make([]int32, 0, len(input))
for _, val := range input {
ret = append(ret, val)
}
sort.Sort(int32Slice(ret))
return ret
}
@ -148,5 +145,6 @@ var (
V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
minVersion = V0_8_2_0
)

22
vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh generated vendored Executable file
View File

@ -0,0 +1,22 @@
#!/bin/sh
set -ex
# Launch and wait for toxiproxy
${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh &
while ! nc -q 1 localhost 2181 </dev/null; do echo "Waiting"; sleep 1; done
while ! nc -q 1 localhost 9092 </dev/null; do echo "Waiting"; sleep 1; done
# Launch and wait for Zookeeper
for i in 1 2 3 4 5; do
KAFKA_PORT=`expr $i + 9090`
cd ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT} && bin/zookeeper-server-start.sh -daemon config/zookeeper.properties
done
while ! nc -q 1 localhost 21805 </dev/null; do echo "Waiting"; sleep 1; done
# Launch and wait for Kafka
for i in 1 2 3 4 5; do
KAFKA_PORT=`expr $i + 9090`
cd ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT} && bin/kafka-server-start.sh -daemon config/server.properties
done
while ! nc -q 1 localhost 29095 </dev/null; do echo "Waiting"; sleep 1; done

8
vendor/github.com/Shopify/sarama/vagrant/create_topics.sh generated vendored Executable file
View File

@ -0,0 +1,8 @@
#!/bin/sh
set -ex
cd ${KAFKA_INSTALL_ROOT}/kafka-9092
bin/kafka-topics.sh --create --partitions 1 --replication-factor 3 --topic test.1 --zookeeper localhost:2181
bin/kafka-topics.sh --create --partitions 4 --replication-factor 3 --topic test.4 --zookeeper localhost:2181
bin/kafka-topics.sh --create --partitions 64 --replication-factor 3 --topic test.64 --zookeeper localhost:2181

49
vendor/github.com/Shopify/sarama/vagrant/install_cluster.sh generated vendored Executable file
View File

@ -0,0 +1,49 @@
#!/bin/sh
set -ex
TOXIPROXY_VERSION=2.0.0
mkdir -p ${KAFKA_INSTALL_ROOT}
if [ ! -f ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz ]; then
wget --quiet http://apache.mirror.gtcomm.net/kafka/${KAFKA_VERSION}/kafka_2.11-${KAFKA_VERSION}.tgz -O ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz
fi
if [ ! -f ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION} ]; then
wget --quiet https://github.com/Shopify/toxiproxy/releases/download/v${TOXIPROXY_VERSION}/toxiproxy-server-linux-amd64 -O ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION}
chmod +x ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION}
fi
rm -f ${KAFKA_INSTALL_ROOT}/toxiproxy
ln -s ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION} ${KAFKA_INSTALL_ROOT}/toxiproxy
for i in 1 2 3 4 5; do
ZK_PORT=`expr $i + 2180`
ZK_PORT_REAL=`expr $i + 21800`
KAFKA_PORT=`expr $i + 9090`
KAFKA_PORT_REAL=`expr $i + 29090`
# unpack kafka
mkdir -p ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}
tar xzf ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz -C ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT} --strip-components 1
# broker configuration
cp ${REPOSITORY_ROOT}/vagrant/server.properties ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/
sed -i s/KAFKAID/${KAFKA_PORT}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
sed -i s/KAFKAPORT/${KAFKA_PORT_REAL}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
sed -i s/KAFKA_HOSTNAME/${KAFKA_HOSTNAME}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
sed -i s/ZK_PORT/${ZK_PORT}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
KAFKA_DATADIR="${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/data"
mkdir -p ${KAFKA_DATADIR}
sed -i s#KAFKA_DATADIR#${KAFKA_DATADIR}#g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
# zookeeper configuration
cp ${REPOSITORY_ROOT}/vagrant/zookeeper.properties ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/
sed -i s/KAFKAID/${KAFKA_PORT}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/zookeeper.properties
sed -i s/ZK_PORT/${ZK_PORT_REAL}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/zookeeper.properties
ZK_DATADIR="${KAFKA_INSTALL_ROOT}/zookeeper-${ZK_PORT}"
mkdir -p ${ZK_DATADIR}
sed -i s#ZK_DATADIR#${ZK_DATADIR}#g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/zookeeper.properties
echo $i > ${KAFKA_INSTALL_ROOT}/zookeeper-${ZK_PORT}/myid
done

9
vendor/github.com/Shopify/sarama/vagrant/kafka.conf generated vendored Normal file
View File

@ -0,0 +1,9 @@
start on started zookeeper-ZK_PORT
stop on stopping zookeeper-ZK_PORT
# Use a script instead of exec (using env stanza leaks KAFKA_HEAP_OPTS from zookeeper)
script
sleep 2
export KAFKA_HEAP_OPTS="-Xmx320m"
exec /opt/kafka-KAFKAID/bin/kafka-server-start.sh /opt/kafka-KAFKAID/config/server.properties
end script

15
vendor/github.com/Shopify/sarama/vagrant/provision.sh generated vendored Executable file
View File

@ -0,0 +1,15 @@
#!/bin/sh
set -ex
apt-get update
yes | apt-get install default-jre
export KAFKA_INSTALL_ROOT=/opt
export KAFKA_HOSTNAME=192.168.100.67
export KAFKA_VERSION=0.9.0.1
export REPOSITORY_ROOT=/vagrant
sh /vagrant/vagrant/install_cluster.sh
sh /vagrant/vagrant/setup_services.sh
sh /vagrant/vagrant/create_topics.sh

22
vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh generated vendored Executable file
View File

@ -0,0 +1,22 @@
#!/bin/sh
set -ex
${KAFKA_INSTALL_ROOT}/toxiproxy -port 8474 -host 0.0.0.0 &
PID=$!
while ! nc -q 1 localhost 8474 </dev/null; do echo "Waiting"; sleep 1; done
wget -O/dev/null -S --post-data='{"name":"zk1", "upstream":"localhost:21801", "listen":"0.0.0.0:2181"}' localhost:8474/proxies
wget -O/dev/null -S --post-data='{"name":"zk2", "upstream":"localhost:21802", "listen":"0.0.0.0:2182"}' localhost:8474/proxies
wget -O/dev/null -S --post-data='{"name":"zk3", "upstream":"localhost:21803", "listen":"0.0.0.0:2183"}' localhost:8474/proxies
wget -O/dev/null -S --post-data='{"name":"zk4", "upstream":"localhost:21804", "listen":"0.0.0.0:2184"}' localhost:8474/proxies
wget -O/dev/null -S --post-data='{"name":"zk5", "upstream":"localhost:21805", "listen":"0.0.0.0:2185"}' localhost:8474/proxies
wget -O/dev/null -S --post-data='{"name":"kafka1", "upstream":"localhost:29091", "listen":"0.0.0.0:9091"}' localhost:8474/proxies
wget -O/dev/null -S --post-data='{"name":"kafka2", "upstream":"localhost:29092", "listen":"0.0.0.0:9092"}' localhost:8474/proxies
wget -O/dev/null -S --post-data='{"name":"kafka3", "upstream":"localhost:29093", "listen":"0.0.0.0:9093"}' localhost:8474/proxies
wget -O/dev/null -S --post-data='{"name":"kafka4", "upstream":"localhost:29094", "listen":"0.0.0.0:9094"}' localhost:8474/proxies
wget -O/dev/null -S --post-data='{"name":"kafka5", "upstream":"localhost:29095", "listen":"0.0.0.0:9095"}' localhost:8474/proxies
wait $PID

View File

@ -0,0 +1,127 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# see kafka.server.KafkaConfig for additional details and defaults
############################# Server Basics #############################
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=KAFKAID
reserved.broker.max.id=10000
############################# Socket Server Settings #############################
# The port the socket server listens on
port=KAFKAPORT
# Hostname the broker will bind to. If not set, the server will bind to all interfaces
host.name=localhost
# Hostname the broker will advertise to producers and consumers. If not set, it uses the
# value for "host.name" if configured. Otherwise, it will use the value returned from
# java.net.InetAddress.getCanonicalHostName().
advertised.host.name=KAFKA_HOSTNAME
advertised.port=KAFKAID
# The port to publish to ZooKeeper for clients to use. If this is not set,
# it will publish the same port that the broker binds to.
# advertised.port=<port accessible by clients>
# The number of threads handling network requests
num.network.threads=2
# The number of threads doing disk I/O
num.io.threads=8
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=1048576
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=1048576
# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600
############################# Log Basics #############################
# A comma seperated list of directories under which to store log files
log.dirs=KAFKA_DATADIR
# The default number of log partitions per topic. More partitions allow greater
# parallelism for consumption, but this will also result in more files across
# the brokers.
num.partitions=2
# Create new topics with a replication factor of 2 so failover can be tested
# more easily.
default.replication.factor=2
auto.create.topics.enable=false
delete.topic.enable=true
############################# Log Flush Policy #############################
# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here:
# 1. Durability: Unflushed data may be lost if you are not using replication.
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
# The number of messages to accept before forcing a flush of data to disk
#log.flush.interval.messages=10000
# The maximum amount of time a message can sit in a log before we force a flush
#log.flush.interval.ms=1000
############################# Log Retention Policy #############################
# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
# The minimum age of a log file to be eligible for deletion
log.retention.hours=168
# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
# segments don't drop below log.retention.bytes.
log.retention.bytes=268435456
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=268435456
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=60000
# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
log.cleaner.enable=false
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=localhost:ZK_PORT
# Timeout in ms for connecting to zookeeper
zookeeper.session.timeout.ms=3000
zookeeper.connection.timeout.ms=3000

29
vendor/github.com/Shopify/sarama/vagrant/setup_services.sh generated vendored Executable file
View File

@ -0,0 +1,29 @@
#!/bin/sh
set -ex
stop toxiproxy || true
cp ${REPOSITORY_ROOT}/vagrant/toxiproxy.conf /etc/init/toxiproxy.conf
cp ${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh ${KAFKA_INSTALL_ROOT}/
start toxiproxy
for i in 1 2 3 4 5; do
ZK_PORT=`expr $i + 2180`
KAFKA_PORT=`expr $i + 9090`
stop zookeeper-${ZK_PORT} || true
# set up zk service
cp ${REPOSITORY_ROOT}/vagrant/zookeeper.conf /etc/init/zookeeper-${ZK_PORT}.conf
sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/zookeeper-${ZK_PORT}.conf
# set up kafka service
cp ${REPOSITORY_ROOT}/vagrant/kafka.conf /etc/init/kafka-${KAFKA_PORT}.conf
sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf
sed -i s/ZK_PORT/${ZK_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf
start zookeeper-${ZK_PORT}
done
# Wait for the last kafka node to finish booting
while ! nc -q 1 localhost 29095 </dev/null; do echo "Waiting"; sleep 1; done

View File

@ -0,0 +1,6 @@
start on started networking
stop on shutdown
env KAFKA_INSTALL_ROOT=/opt
exec /opt/run_toxiproxy.sh

View File

@ -0,0 +1,7 @@
start on started toxiproxy
stop on stopping toxiproxy
script
export KAFKA_HEAP_OPTS="-Xmx192m"
exec /opt/kafka-KAFKAID/bin/zookeeper-server-start.sh /opt/kafka-KAFKAID/config/zookeeper.properties
end script

View File

@ -0,0 +1,36 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# the directory where the snapshot is stored.
dataDir=ZK_DATADIR
# the port at which the clients will connect
clientPort=ZK_PORT
# disable the per-ip limit on the number of connections since this is a non-production config
maxClientCnxns=0
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
server.1=localhost:2281:2381
server.2=localhost:2282:2382
server.3=localhost:2283:2383
server.4=localhost:2284:2384
server.5=localhost:2285:2385

View File

@ -1,22 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Shopify
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,161 +0,0 @@
# toxiproxy-go
This is the Go client library for the
[Toxiproxy](https://github.com/shopify/toxiproxy) API. Please read the [usage
section in the Toxiproxy README](https://github.com/shopify/toxiproxy#usage)
before attempting to use the client.
This client is compatible with Toxiproxy 2.x, for the latest 1.x client see
[v1.2.1](https://github.com/Shopify/toxiproxy/tree/v1.2.1/client).
## Changes in Toxiproxy-go Client 2.x
In order to make use of the 2.0 api, and to make usage a little easier, the
client api has changed:
- `client.NewProxy()` no longer accepts a proxy as an argument.
- `proxy.Create()` is removed in favour of using `proxy.Save()`.
- Proxies can be created in a single call using `client.CreateProxy()`.
- `proxy.Disable()` and `proxy.Enable()` have been added to simplify taking
down a proxy.
- `proxy.ToxicsUpstream` and `proxy.ToxicsDownstream` have been merged into a
single `ActiveToxics` list.
- `proxy.Toxics()`` no longer requires a direction to be specified, and will
return toxics for both directions.
- `proxy.SetToxic()` has been replaced by `proxy.AddToxic()`,
`proxy.UpdateToxic()`, and `proxy.RemoveToxic()`.
## Usage
For detailed API docs please [see the Godoc
documentation](http://godoc.org/github.com/Shopify/toxiproxy/client).
First import toxiproxy and create a new client:
```go
import "github.com/Shopify/toxiproxy/client"
client := toxiproxy.NewClient("localhost:8474")
```
You can then create a new proxy using the client:
```go
proxy := client.CreateProxy("redis", "localhost:26379", "localhost:6379")
```
For large amounts of proxies, they can also be created using a configuration file:
```go
var config []toxiproxy.Proxy
data, _ := ioutil.ReadFile("config.json")
json.Unmarshal(data, &config)
proxies, err = client.Populate(config)
```
```json
[{
"name": "redis",
"listen": "localhost:26379",
"upstream": "localhost:6379"
}]
```
Toxics can be added as follows:
```go
// Add 1s latency to 100% of downstream connections
proxy.AddToxic("latency_down", "latency", "downstream", 1.0, toxiproxy.Attributes{
"latency": 1000,
})
// Change downstream latency to add 100ms of jitter
proxy.UpdateToxic("latency_down", 1.0, toxiproxy.Attributes{
"jitter": 100,
})
// Remove the latency toxic
proxy.RemoveToxic("latency_down")
```
The proxy can be taken down using `Disable()`:
```go
proxy.Disable()
```
When a proxy is no longer needed, it can be cleaned up with `Delete()`:
```go
proxy.Delete()
```
## Full Example
```go
import (
"net/http"
"testing"
"time"
"github.com/Shopify/toxiproxy/client"
"github.com/garyburd/redigo/redis"
)
var toxiClient *toxiproxy.Client
var proxies map[string]*toxiproxy.Proxy
func init() {
var err error
toxiClient = toxiproxy.NewClient("localhost:8474")
proxies, err = toxiClient.Populate([]toxiproxy.Proxy{{
Name: "redis",
Listen: "localhost:26379",
Upstream: "localhost:6379",
}})
if err != nil {
panic(err)
}
// Alternatively, create the proxies manually with
// toxiClient.CreateProxy("redis", "localhost:26379", "localhost:6379")
}
func TestRedisBackendDown(t *testing.T) {
proxies["redis"].Disable()
defer proxies["redis"].Enable()
// Test that redis is down
_, err := redis.Dial("tcp", ":26379")
if err == nil {
t.Fatal("Connection to redis did not fail")
}
}
func TestRedisBackendSlow(t *testing.T) {
proxies["redis"].AddToxic("", "latency", "", 1, toxiproxy.Attributes{
"latency": 1000,
})
defer proxies["redis"].RemoveToxic("latency_downstream")
// Test that redis is slow
start := time.Now()
conn, err := redis.Dial("tcp", ":26379")
if err != nil {
t.Fatal("Connection to redis failed", err)
}
_, err = conn.Do("GET", "test")
if err != nil {
t.Fatal("Redis command failed", err)
} else if time.Since(start) < 900*time.Millisecond {
t.Fatal("Redis command did not take long enough:", time.Since(start))
}
}
func TestEphemeralProxy(t *testing.T) {
proxy, _ := toxiClient.CreateProxy("test", "", "google.com:80")
defer proxy.Delete()
// Test connection through proxy.Listen
resp, err := http.Get("http://" + proxy.Listen)
if err != nil {
t.Fatal(err)
} else if resp.StatusCode != 200 {
t.Fatal("Proxy to google failed:", resp.StatusCode)
}
}
```

View File

@ -1,362 +0,0 @@
// Package Toxiproxy provides a client wrapper around the Toxiproxy HTTP API for
// testing the resiliency of Go applications.
//
// For use with Toxiproxy 2.x
package toxiproxy
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
)
// Client holds information about where to connect to Toxiproxy.
type Client struct {
endpoint string
}
type Attributes map[string]interface{}
type Toxic struct {
Name string `json:"name"`
Type string `json:"type"`
Stream string `json:"stream,omitempty"`
Toxicity float32 `json:"toxicity"`
Attributes Attributes `json:"attributes"`
}
type Toxics []Toxic
type Proxy struct {
Name string `json:"name"` // The name of the proxy
Listen string `json:"listen"` // The address the proxy listens on
Upstream string `json:"upstream"` // The upstream address to proxy to
Enabled bool `json:"enabled"` // Whether the proxy is enabled
ActiveToxics Toxics `json:"toxics"` // The toxics active on this proxy
client *Client
created bool // True if this proxy exists on the server
}
// NewClient creates a new client which provides the base of all communication
// with Toxiproxy. Endpoint is the address to the proxy (e.g. localhost:8474 if
// not overriden)
func NewClient(endpoint string) *Client {
if !strings.HasPrefix(endpoint, "http://") {
endpoint = "http://" + endpoint
}
return &Client{endpoint: endpoint}
}
// Proxies returns a map with all the proxies and their toxics.
func (client *Client) Proxies() (map[string]*Proxy, error) {
resp, err := http.Get(client.endpoint + "/proxies")
if err != nil {
return nil, err
}
err = checkError(resp, http.StatusOK, "Proxies")
if err != nil {
return nil, err
}
proxies := make(map[string]*Proxy)
err = json.NewDecoder(resp.Body).Decode(&proxies)
if err != nil {
return nil, err
}
for _, proxy := range proxies {
proxy.client = client
proxy.created = true
}
return proxies, nil
}
// Generates a new uncommitted proxy instance. In order to use the result, the
// proxy fields will need to be set and have `Save()` called.
func (client *Client) NewProxy() *Proxy {
return &Proxy{
client: client,
}
}
// CreateProxy instantiates a new proxy and starts listening on the specified address.
// This is an alias for `NewProxy()` + `proxy.Save()`
func (client *Client) CreateProxy(name, listen, upstream string) (*Proxy, error) {
proxy := &Proxy{
Name: name,
Listen: listen,
Upstream: upstream,
Enabled: true,
client: client,
}
err := proxy.Save()
if err != nil {
return nil, err
}
return proxy, nil
}
// Proxy returns a proxy by name.
func (client *Client) Proxy(name string) (*Proxy, error) {
// TODO url encode
resp, err := http.Get(client.endpoint + "/proxies/" + name)
if err != nil {
return nil, err
}
err = checkError(resp, http.StatusOK, "Proxy")
if err != nil {
return nil, err
}
proxy := new(Proxy)
err = json.NewDecoder(resp.Body).Decode(proxy)
if err != nil {
return nil, err
}
proxy.client = client
proxy.created = true
return proxy, nil
}
// Create a list of proxies using a configuration list. If a proxy already exists, it will be replaced
// with the specified configuration. For large amounts of proxies, `config` can be loaded from a file.
// Returns a list of the successfully created proxies.
func (client *Client) Populate(config []Proxy) ([]*Proxy, error) {
proxies := struct {
Proxies []*Proxy `json:"proxies"`
}{}
request, err := json.Marshal(config)
if err != nil {
return nil, err
}
resp, err := http.Post(client.endpoint+"/populate", "application/json", bytes.NewReader(request))
if err != nil {
return nil, err
}
// Response body may need to be read twice, we want to return both the proxy list and any errors
var body bytes.Buffer
tee := io.TeeReader(resp.Body, &body)
err = json.NewDecoder(tee).Decode(&proxies)
if err != nil {
return nil, err
}
resp.Body = ioutil.NopCloser(&body)
err = checkError(resp, http.StatusCreated, "Populate")
return proxies.Proxies, err
}
// Save saves changes to a proxy such as its enabled status or upstream port.
func (proxy *Proxy) Save() error {
request, err := json.Marshal(proxy)
if err != nil {
return err
}
var resp *http.Response
if proxy.created {
resp, err = http.Post(proxy.client.endpoint+"/proxies/"+proxy.Name, "text/plain", bytes.NewReader(request))
} else {
resp, err = http.Post(proxy.client.endpoint+"/proxies", "application/json", bytes.NewReader(request))
}
if err != nil {
return err
}
if proxy.created {
err = checkError(resp, http.StatusOK, "Save")
} else {
err = checkError(resp, http.StatusCreated, "Create")
}
if err != nil {
return err
}
err = json.NewDecoder(resp.Body).Decode(proxy)
if err != nil {
return err
}
proxy.created = true
return nil
}
// Enable a proxy again after it has been disabled.
func (proxy *Proxy) Enable() error {
proxy.Enabled = true
return proxy.Save()
}
// Disable a proxy so that no connections can pass through. This will drop all active connections.
func (proxy *Proxy) Disable() error {
proxy.Enabled = false
return proxy.Save()
}
// Delete a proxy complete and close all existing connections through it. All information about
// the proxy such as listen port and active toxics will be deleted as well. If you just wish to
// stop and later enable a proxy, use `Enable()` and `Disable()`.
func (proxy *Proxy) Delete() error {
httpClient := &http.Client{}
req, err := http.NewRequest("DELETE", proxy.client.endpoint+"/proxies/"+proxy.Name, nil)
if err != nil {
return err
}
resp, err := httpClient.Do(req)
if err != nil {
return err
}
return checkError(resp, http.StatusNoContent, "Delete")
}
// Toxics returns a map of all the active toxics and their attributes.
func (proxy *Proxy) Toxics() (Toxics, error) {
resp, err := http.Get(proxy.client.endpoint + "/proxies/" + proxy.Name + "/toxics")
if err != nil {
return nil, err
}
err = checkError(resp, http.StatusOK, "Toxics")
if err != nil {
return nil, err
}
toxics := make(Toxics, 0)
err = json.NewDecoder(resp.Body).Decode(&toxics)
if err != nil {
return nil, err
}
return toxics, nil
}
// AddToxic adds a toxic to the given stream direction.
// If a name is not specified, it will default to <type>_<stream>.
// If a stream is not specified, it will default to downstream.
// See https://github.com/Shopify/toxiproxy#toxics for a list of all Toxic types.
func (proxy *Proxy) AddToxic(name, typeName, stream string, toxicity float32, attrs Attributes) (*Toxic, error) {
toxic := Toxic{name, typeName, stream, toxicity, attrs}
if toxic.Toxicity == -1 {
toxic.Toxicity = 1 // Just to be consistent with a toxicity of -1 using the default
}
request, err := json.Marshal(&toxic)
if err != nil {
return nil, err
}
resp, err := http.Post(proxy.client.endpoint+"/proxies/"+proxy.Name+"/toxics", "application/json", bytes.NewReader(request))
if err != nil {
return nil, err
}
err = checkError(resp, http.StatusOK, "AddToxic")
if err != nil {
return nil, err
}
result := &Toxic{}
err = json.NewDecoder(resp.Body).Decode(result)
if err != nil {
return nil, err
}
return result, nil
}
// UpdateToxic sets the parameters for an existing toxic with the given name.
// If toxicity is set to -1, the current value will be used.
func (proxy *Proxy) UpdateToxic(name string, toxicity float32, attrs Attributes) (*Toxic, error) {
toxic := map[string]interface{}{
"attributes": attrs,
}
if toxicity != -1 {
toxic["toxicity"] = toxicity
}
request, err := json.Marshal(&toxic)
if err != nil {
return nil, err
}
resp, err := http.Post(proxy.client.endpoint+"/proxies/"+proxy.Name+"/toxics/"+name, "application/json", bytes.NewReader(request))
if err != nil {
return nil, err
}
err = checkError(resp, http.StatusOK, "UpdateToxic")
if err != nil {
return nil, err
}
result := &Toxic{}
err = json.NewDecoder(resp.Body).Decode(result)
if err != nil {
return nil, err
}
return result, nil
}
// RemoveToxic renives the toxic with the given name.
func (proxy *Proxy) RemoveToxic(name string) error {
httpClient := &http.Client{}
req, err := http.NewRequest("DELETE", proxy.client.endpoint+"/proxies/"+proxy.Name+"/toxics/"+name, nil)
if err != nil {
return err
}
resp, err := httpClient.Do(req)
if err != nil {
return err
}
return checkError(resp, http.StatusNoContent, "RemoveToxic")
}
// ResetState resets the state of all proxies and toxics in Toxiproxy.
func (client *Client) ResetState() error {
resp, err := http.Post(client.endpoint+"/reset", "text/plain", bytes.NewReader([]byte{}))
if err != nil {
return err
}
return checkError(resp, http.StatusNoContent, "ResetState")
}
type ApiError struct {
Message string `json:"error"`
Status int `json:"status"`
}
func (err *ApiError) Error() string {
return fmt.Sprintf("HTTP %d: %s", err.Status, err.Message)
}
func checkError(resp *http.Response, expectedCode int, caller string) error {
if resp.StatusCode != expectedCode {
apiError := new(ApiError)
err := json.NewDecoder(resp.Body).Decode(apiError)
if err != nil {
apiError.Message = fmt.Sprintf("Unexpected response code, expected %d", expectedCode)
apiError.Status = resp.StatusCode
}
return fmt.Errorf("%s: %v", caller, apiError)
}
return nil
}

View File

@ -1,152 +0,0 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build !js,!appengine,!safe,!disableunsafe
package spew
import (
"reflect"
"unsafe"
)
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = false
// ptrSize is the size of a pointer on the current arch.
ptrSize = unsafe.Sizeof((*byte)(nil))
)
var (
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
// internal reflect.Value fields. These values are valid before golang
// commit ecccf07e7f9d which changed the format. The are also valid
// after commit 82f48826c6c7 which changed the format again to mirror
// the original format. Code in the init function updates these offsets
// as necessary.
offsetPtr = uintptr(ptrSize)
offsetScalar = uintptr(0)
offsetFlag = uintptr(ptrSize * 2)
// flagKindWidth and flagKindShift indicate various bits that the
// reflect package uses internally to track kind information.
//
// flagRO indicates whether or not the value field of a reflect.Value is
// read-only.
//
// flagIndir indicates whether the value field of a reflect.Value is
// the actual data or a pointer to the data.
//
// These values are valid before golang commit 90a7c3c86944 which
// changed their positions. Code in the init function updates these
// flags as necessary.
flagKindWidth = uintptr(5)
flagKindShift = uintptr(flagKindWidth - 1)
flagRO = uintptr(1 << 0)
flagIndir = uintptr(1 << 1)
)
func init() {
// Older versions of reflect.Value stored small integers directly in the
// ptr field (which is named val in the older versions). Versions
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
// scalar for this purpose which unfortunately came before the flag
// field, so the offset of the flag field is different for those
// versions.
//
// This code constructs a new reflect.Value from a known small integer
// and checks if the size of the reflect.Value struct indicates it has
// the scalar field. When it does, the offsets are updated accordingly.
vv := reflect.ValueOf(0xf00)
if unsafe.Sizeof(vv) == (ptrSize * 4) {
offsetScalar = ptrSize * 2
offsetFlag = ptrSize * 3
}
// Commit 90a7c3c86944 changed the flag positions such that the low
// order bits are the kind. This code extracts the kind from the flags
// field and ensures it's the correct type. When it's not, the flag
// order has been changed to the newer format, so the flags are updated
// accordingly.
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
upfv := *(*uintptr)(upf)
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
flagKindShift = 0
flagRO = 1 << 5
flagIndir = 1 << 6
// Commit adf9b30e5594 modified the flags to separate the
// flagRO flag into two bits which specifies whether or not the
// field is embedded. This causes flagIndir to move over a bit
// and means that flagRO is the combination of either of the
// original flagRO bit and the new bit.
//
// This code detects the change by extracting what used to be
// the indirect bit to ensure it's set. When it's not, the flag
// order has been changed to the newer format, so the flags are
// updated accordingly.
if upfv&flagIndir == 0 {
flagRO = 3 << 5
flagIndir = 1 << 7
}
}
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
// the typical safety restrictions preventing access to unaddressable and
// unexported data. It works by digging the raw pointer to the underlying
// value out of the protected value and generating a new unprotected (unsafe)
// reflect.Value to it.
//
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
indirects := 1
vt := v.Type()
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
if rvf&flagIndir != 0 {
vt = reflect.PtrTo(v.Type())
indirects++
} else if offsetScalar != 0 {
// The value is in the scalar field when it's not one of the
// reference types.
switch vt.Kind() {
case reflect.Uintptr:
case reflect.Chan:
case reflect.Func:
case reflect.Map:
case reflect.Ptr:
case reflect.UnsafePointer:
default:
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
offsetScalar)
}
}
pv := reflect.NewAt(vt, upv)
rv = pv
for i := 0; i < indirects; i++ {
rv = rv.Elem()
}
return rv
}

View File

@ -1,38 +0,0 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build js appengine safe disableunsafe
package spew
import "reflect"
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = true
)
// unsafeReflectValue typically converts the passed reflect.Value into a one
// that bypasses the typical safety restrictions preventing access to
// unaddressable and unexported data. However, doing this relies on access to
// the unsafe package. This is a stub version which simply returns the passed
// reflect.Value when the unsafe package is not available.
func unsafeReflectValue(v reflect.Value) reflect.Value {
return v
}

View File

@ -1,341 +0,0 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"reflect"
"sort"
"strconv"
)
// Some constants in the form of bytes to avoid string overhead. This mirrors
// the technique used in the fmt package.
var (
panicBytes = []byte("(PANIC=")
plusBytes = []byte("+")
iBytes = []byte("i")
trueBytes = []byte("true")
falseBytes = []byte("false")
interfaceBytes = []byte("(interface {})")
commaNewlineBytes = []byte(",\n")
newlineBytes = []byte("\n")
openBraceBytes = []byte("{")
openBraceNewlineBytes = []byte("{\n")
closeBraceBytes = []byte("}")
asteriskBytes = []byte("*")
colonBytes = []byte(":")
colonSpaceBytes = []byte(": ")
openParenBytes = []byte("(")
closeParenBytes = []byte(")")
spaceBytes = []byte(" ")
pointerChainBytes = []byte("->")
nilAngleBytes = []byte("<nil>")
maxNewlineBytes = []byte("<max depth reached>\n")
maxShortBytes = []byte("<max>")
circularBytes = []byte("<already shown>")
circularShortBytes = []byte("<shown>")
invalidAngleBytes = []byte("<invalid>")
openBracketBytes = []byte("[")
closeBracketBytes = []byte("]")
percentBytes = []byte("%")
precisionBytes = []byte(".")
openAngleBytes = []byte("<")
closeAngleBytes = []byte(">")
openMapBytes = []byte("map[")
closeMapBytes = []byte("]")
lenEqualsBytes = []byte("len=")
capEqualsBytes = []byte("cap=")
)
// hexDigits is used to map a decimal value to a hex digit.
var hexDigits = "0123456789abcdef"
// catchPanic handles any panics that might occur during the handleMethods
// calls.
func catchPanic(w io.Writer, v reflect.Value) {
if err := recover(); err != nil {
w.Write(panicBytes)
fmt.Fprintf(w, "%v", err)
w.Write(closeParenBytes)
}
}
// handleMethods attempts to call the Error and String methods on the underlying
// type the passed reflect.Value represents and outputes the result to Writer w.
//
// It handles panics in any called methods by catching and displaying the error
// as the formatted value.
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
// We need an interface to check if the type implements the error or
// Stringer interface. However, the reflect package won't give us an
// interface on certain things like unexported struct fields in order
// to enforce visibility rules. We use unsafe, when it's available,
// to bypass these restrictions since this package does not mutate the
// values.
if !v.CanInterface() {
if UnsafeDisabled {
return false
}
v = unsafeReflectValue(v)
}
// Choose whether or not to do error and Stringer interface lookups against
// the base type or a pointer to the base type depending on settings.
// Technically calling one of these methods with a pointer receiver can
// mutate the value, however, types which choose to satisify an error or
// Stringer interface with a pointer receiver should not be mutating their
// state inside these interface methods.
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
v = unsafeReflectValue(v)
}
if v.CanAddr() {
v = v.Addr()
}
// Is it an error or Stringer?
switch iface := v.Interface().(type) {
case error:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.Error()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.Error()))
return true
case fmt.Stringer:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.String()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.String()))
return true
}
return false
}
// printBool outputs a boolean value as true or false to Writer w.
func printBool(w io.Writer, val bool) {
if val {
w.Write(trueBytes)
} else {
w.Write(falseBytes)
}
}
// printInt outputs a signed integer value to Writer w.
func printInt(w io.Writer, val int64, base int) {
w.Write([]byte(strconv.FormatInt(val, base)))
}
// printUint outputs an unsigned integer value to Writer w.
func printUint(w io.Writer, val uint64, base int) {
w.Write([]byte(strconv.FormatUint(val, base)))
}
// printFloat outputs a floating point value using the specified precision,
// which is expected to be 32 or 64bit, to Writer w.
func printFloat(w io.Writer, val float64, precision int) {
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
}
// printComplex outputs a complex value using the specified float precision
// for the real and imaginary parts to Writer w.
func printComplex(w io.Writer, c complex128, floatPrecision int) {
r := real(c)
w.Write(openParenBytes)
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
i := imag(c)
if i >= 0 {
w.Write(plusBytes)
}
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
w.Write(iBytes)
w.Write(closeParenBytes)
}
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.
num := uint64(p)
if num == 0 {
w.Write(nilAngleBytes)
return
}
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
buf := make([]byte, 18)
// It's simpler to construct the hex string right to left.
base := uint64(16)
i := len(buf) - 1
for num >= base {
buf[i] = hexDigits[num%base]
num /= base
i--
}
buf[i] = hexDigits[num]
// Add '0x' prefix.
i--
buf[i] = 'x'
i--
buf[i] = '0'
// Strip unused leading bytes.
buf = buf[i:]
w.Write(buf)
}
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
// elements to be sorted.
type valuesSorter struct {
values []reflect.Value
strings []string // either nil or same len and values
cs *ConfigState
}
// newValuesSorter initializes a valuesSorter instance, which holds a set of
// surrogate keys on which the data should be sorted. It uses flags in
// ConfigState to decide if and how to populate those surrogate keys.
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
vs := &valuesSorter{values: values, cs: cs}
if canSortSimply(vs.values[0].Kind()) {
return vs
}
if !cs.DisableMethods {
vs.strings = make([]string, len(values))
for i := range vs.values {
b := bytes.Buffer{}
if !handleMethods(cs, &b, vs.values[i]) {
vs.strings = nil
break
}
vs.strings[i] = b.String()
}
}
if vs.strings == nil && cs.SpewKeys {
vs.strings = make([]string, len(values))
for i := range vs.values {
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
}
}
return vs
}
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
// directly, or whether it should be considered for sorting by surrogate keys
// (if the ConfigState allows it).
func canSortSimply(kind reflect.Kind) bool {
// This switch parallels valueSortLess, except for the default case.
switch kind {
case reflect.Bool:
return true
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return true
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return true
case reflect.Float32, reflect.Float64:
return true
case reflect.String:
return true
case reflect.Uintptr:
return true
case reflect.Array:
return true
}
return false
}
// Len returns the number of values in the slice. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Len() int {
return len(s.values)
}
// Swap swaps the values at the passed indices. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Swap(i, j int) {
s.values[i], s.values[j] = s.values[j], s.values[i]
if s.strings != nil {
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
}
}
// valueSortLess returns whether the first value should sort before the second
// value. It is used by valueSorter.Less as part of the sort.Interface
// implementation.
func valueSortLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Bool:
return !a.Bool() && b.Bool()
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return a.Int() < b.Int()
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return a.Uint() < b.Uint()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.String:
return a.String() < b.String()
case reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Array:
// Compare the contents of both arrays.
l := a.Len()
for i := 0; i < l; i++ {
av := a.Index(i)
bv := b.Index(i)
if av.Interface() == bv.Interface() {
continue
}
return valueSortLess(av, bv)
}
}
return a.String() < b.String()
}
// Less returns whether the value at index i should sort before the
// value at index j. It is part of the sort.Interface implementation.
func (s *valuesSorter) Less(i, j int) bool {
if s.strings == nil {
return valueSortLess(s.values[i], s.values[j])
}
return s.strings[i] < s.strings[j]
}
// sortValues is a sort function that handles both native types and any type that
// can be converted to error or Stringer. Other inputs are sorted according to
// their Value.String() value to ensure display stability.
func sortValues(values []reflect.Value, cs *ConfigState) {
if len(values) == 0 {
return
}
sort.Sort(newValuesSorter(values, cs))
}

View File

@ -1,298 +0,0 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew_test
import (
"fmt"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
)
// custom type to test Stinger interface on non-pointer receiver.
type stringer string
// String implements the Stringer interface for testing invocation of custom
// stringers on types with non-pointer receivers.
func (s stringer) String() string {
return "stringer " + string(s)
}
// custom type to test Stinger interface on pointer receiver.
type pstringer string
// String implements the Stringer interface for testing invocation of custom
// stringers on types with only pointer receivers.
func (s *pstringer) String() string {
return "stringer " + string(*s)
}
// xref1 and xref2 are cross referencing structs for testing circular reference
// detection.
type xref1 struct {
ps2 *xref2
}
type xref2 struct {
ps1 *xref1
}
// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular
// reference for testing detection.
type indirCir1 struct {
ps2 *indirCir2
}
type indirCir2 struct {
ps3 *indirCir3
}
type indirCir3 struct {
ps1 *indirCir1
}
// embed is used to test embedded structures.
type embed struct {
a string
}
// embedwrap is used to test embedded structures.
type embedwrap struct {
*embed
e *embed
}
// panicer is used to intentionally cause a panic for testing spew properly
// handles them
type panicer int
func (p panicer) String() string {
panic("test panic")
}
// customError is used to test custom error interface invocation.
type customError int
func (e customError) Error() string {
return fmt.Sprintf("error: %d", int(e))
}
// stringizeWants converts a slice of wanted test output into a format suitable
// for a test error message.
func stringizeWants(wants []string) string {
s := ""
for i, want := range wants {
if i > 0 {
s += fmt.Sprintf("want%d: %s", i+1, want)
} else {
s += "want: " + want
}
}
return s
}
// testFailed returns whether or not a test failed by checking if the result
// of the test is in the slice of wanted strings.
func testFailed(result string, wants []string) bool {
for _, want := range wants {
if result == want {
return false
}
}
return true
}
type sortableStruct struct {
x int
}
func (ss sortableStruct) String() string {
return fmt.Sprintf("ss.%d", ss.x)
}
type unsortableStruct struct {
x int
}
type sortTestCase struct {
input []reflect.Value
expected []reflect.Value
}
func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) {
getInterfaces := func(values []reflect.Value) []interface{} {
interfaces := []interface{}{}
for _, v := range values {
interfaces = append(interfaces, v.Interface())
}
return interfaces
}
for _, test := range tests {
spew.SortValues(test.input, cs)
// reflect.DeepEqual cannot really make sense of reflect.Value,
// probably because of all the pointer tricks. For instance,
// v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{}
// instead.
input := getInterfaces(test.input)
expected := getInterfaces(test.expected)
if !reflect.DeepEqual(input, expected) {
t.Errorf("Sort mismatch:\n %v != %v", input, expected)
}
}
}
// TestSortValues ensures the sort functionality for relect.Value based sorting
// works as intended.
func TestSortValues(t *testing.T) {
v := reflect.ValueOf
a := v("a")
b := v("b")
c := v("c")
embedA := v(embed{"a"})
embedB := v(embed{"b"})
embedC := v(embed{"c"})
tests := []sortTestCase{
// No values.
{
[]reflect.Value{},
[]reflect.Value{},
},
// Bools.
{
[]reflect.Value{v(false), v(true), v(false)},
[]reflect.Value{v(false), v(false), v(true)},
},
// Ints.
{
[]reflect.Value{v(2), v(1), v(3)},
[]reflect.Value{v(1), v(2), v(3)},
},
// Uints.
{
[]reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))},
[]reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))},
},
// Floats.
{
[]reflect.Value{v(2.0), v(1.0), v(3.0)},
[]reflect.Value{v(1.0), v(2.0), v(3.0)},
},
// Strings.
{
[]reflect.Value{b, a, c},
[]reflect.Value{a, b, c},
},
// Array
{
[]reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})},
[]reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})},
},
// Uintptrs.
{
[]reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))},
[]reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))},
},
// SortableStructs.
{
// Note: not sorted - DisableMethods is set.
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
},
// UnsortableStructs.
{
// Note: not sorted - SpewKeys is false.
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
},
// Invalid.
{
[]reflect.Value{embedB, embedA, embedC},
[]reflect.Value{embedB, embedA, embedC},
},
}
cs := spew.ConfigState{DisableMethods: true, SpewKeys: false}
helpTestSortValues(tests, &cs, t)
}
// TestSortValuesWithMethods ensures the sort functionality for relect.Value
// based sorting works as intended when using string methods.
func TestSortValuesWithMethods(t *testing.T) {
v := reflect.ValueOf
a := v("a")
b := v("b")
c := v("c")
tests := []sortTestCase{
// Ints.
{
[]reflect.Value{v(2), v(1), v(3)},
[]reflect.Value{v(1), v(2), v(3)},
},
// Strings.
{
[]reflect.Value{b, a, c},
[]reflect.Value{a, b, c},
},
// SortableStructs.
{
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
},
// UnsortableStructs.
{
// Note: not sorted - SpewKeys is false.
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
},
}
cs := spew.ConfigState{DisableMethods: false, SpewKeys: false}
helpTestSortValues(tests, &cs, t)
}
// TestSortValuesWithSpew ensures the sort functionality for relect.Value
// based sorting works as intended when using spew to stringify keys.
func TestSortValuesWithSpew(t *testing.T) {
v := reflect.ValueOf
a := v("a")
b := v("b")
c := v("c")
tests := []sortTestCase{
// Ints.
{
[]reflect.Value{v(2), v(1), v(3)},
[]reflect.Value{v(1), v(2), v(3)},
},
// Strings.
{
[]reflect.Value{b, a, c},
[]reflect.Value{a, b, c},
},
// SortableStructs.
{
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
},
// UnsortableStructs.
{
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
[]reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})},
},
}
cs := spew.ConfigState{DisableMethods: true, SpewKeys: true}
helpTestSortValues(tests, &cs, t)
}

View File

@ -1,306 +0,0 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"os"
)
// ConfigState houses the configuration options used by spew to format and
// display values. There is a global instance, Config, that is used to control
// all top-level Formatter and Dump functionality. Each ConfigState instance
// provides methods equivalent to the top-level functions.
//
// The zero value for ConfigState provides no indentation. You would typically
// want to set it to a space or a tab.
//
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
// with default settings. See the documentation of NewDefaultConfig for default
// values.
type ConfigState struct {
// Indent specifies the string to use for each indentation level. The
// global config instance that all top-level functions use set this to a
// single space by default. If you would like more indentation, you might
// set this to a tab with "\t" or perhaps two spaces with " ".
Indent string
// MaxDepth controls the maximum number of levels to descend into nested
// data structures. The default, 0, means there is no limit.
//
// NOTE: Circular data structures are properly detected, so it is not
// necessary to set this value unless you specifically want to limit deeply
// nested data structures.
MaxDepth int
// DisableMethods specifies whether or not error and Stringer interfaces are
// invoked for types that implement them.
DisableMethods bool
// DisablePointerMethods specifies whether or not to check for and invoke
// error and Stringer interfaces on types which only accept a pointer
// receiver when the current type is not a pointer.
//
// NOTE: This might be an unsafe action since calling one of these methods
// with a pointer receiver could technically mutate the value, however,
// in practice, types which choose to satisify an error or Stringer
// interface with a pointer receiver should not be mutating their state
// inside these interface methods. As a result, this option relies on
// access to the unsafe package, so it will not have any effect when
// running in environments without access to the unsafe package such as
// Google App Engine or with the "safe" build tag specified.
DisablePointerMethods bool
// DisablePointerAddresses specifies whether to disable the printing of
// pointer addresses. This is useful when diffing data structures in tests.
DisablePointerAddresses bool
// DisableCapacities specifies whether to disable the printing of capacities
// for arrays, slices, maps and channels. This is useful when diffing
// data structures in tests.
DisableCapacities bool
// ContinueOnMethod specifies whether or not recursion should continue once
// a custom error or Stringer interface is invoked. The default, false,
// means it will print the results of invoking the custom error or Stringer
// interface and return immediately instead of continuing to recurse into
// the internals of the data type.
//
// NOTE: This flag does not have any effect if method invocation is disabled
// via the DisableMethods or DisablePointerMethods options.
ContinueOnMethod bool
// SortKeys specifies map keys should be sorted before being printed. Use
// this to have a more deterministic, diffable output. Note that only
// native types (bool, int, uint, floats, uintptr and string) and types
// that support the error or Stringer interfaces (if methods are
// enabled) are supported, with other types sorted according to the
// reflect.Value.String() output which guarantees display stability.
SortKeys bool
// SpewKeys specifies that, as a last resort attempt, map keys should
// be spewed to strings and sorted by those strings. This is only
// considered if SortKeys is true.
SpewKeys bool
}
// Config is the active configuration of the top-level functions.
// The configuration can be changed by modifying the contents of spew.Config.
var Config = ConfigState{Indent: " "}
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the formatted string as a value that satisfies error. See NewFormatter
// for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, c.convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, c.convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, c.convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a Formatter interface returned by c.NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, c.convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
return fmt.Print(c.convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, c.convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
return fmt.Println(c.convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprint(a ...interface{}) string {
return fmt.Sprint(c.convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, c.convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a Formatter interface returned by c.NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintln(a ...interface{}) string {
return fmt.Sprintln(c.convertArgs(a)...)
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
c.Printf, c.Println, or c.Printf.
*/
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(c, v)
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
fdump(c, w, a...)
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by modifying the public members
of c. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func (c *ConfigState) Dump(a ...interface{}) {
fdump(c, os.Stdout, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func (c *ConfigState) Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(c, &buf, a...)
return buf.String()
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a spew Formatter interface using
// the ConfigState associated with s.
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = newFormatter(c, arg)
}
return formatters
}
// NewDefaultConfig returns a ConfigState with the following default settings.
//
// Indent: " "
// MaxDepth: 0
// DisableMethods: false
// DisablePointerMethods: false
// ContinueOnMethod: false
// SortKeys: false
func NewDefaultConfig() *ConfigState {
return &ConfigState{Indent: " "}
}

View File

@ -1,211 +0,0 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
Package spew implements a deep pretty printer for Go data structures to aid in
debugging.
A quick overview of the additional features spew provides over the built-in
printing facilities for Go data types are as follows:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output (only when using
Dump style)
There are two different approaches spew allows for dumping Go data structures:
* Dump style which prints with newlines, customizable indentation,
and additional debug information such as types and all pointer addresses
used to indirect to the final value
* A custom Formatter interface that integrates cleanly with the standard fmt
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
similar to the default %v while providing the additional functionality
outlined above and passing unsupported format verbs such as %x and %q
along to fmt
Quick Start
This section demonstrates how to quickly get started with spew. See the
sections below for further details on formatting and configuration options.
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
%#+v (adds types and pointer addresses):
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available
via the spew.Config global.
It is also possible to create a ConfigState instance that provides methods
equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
The following configuration options are available:
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are
supported with other types sorted according to the
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
* SpewKeys
Specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only
considered if SortKeys is true.
Dump Usage
Simply call spew.Dump with a list of variables you want to dump:
spew.Dump(myVar1, myVar2, ...)
You may also call spew.Fdump if you would prefer to output to an arbitrary
io.Writer. For example, to dump to standard error:
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
A third option is to call spew.Sdump to get the formatted output as a string:
str := spew.Sdump(myVar1, myVar2, ...)
Sample Dump Output
See the Dump example for details on the setup of the types and variables being
shown here.
(main.Foo) {
unexportedField: (*main.Bar)(0xf84002e210)({
flag: (main.Flag) flagTwo,
data: (uintptr) <nil>
}),
ExportedField: (map[interface {}]interface {}) (len=1) {
(string) (len=3) "one": (bool) true
}
}
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
command as shown.
([]uint8) (len=32 cap=32) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
Custom Formatter
Spew provides a custom formatter that implements the fmt.Formatter interface
so that it integrates cleanly with standard fmt package printing functions. The
formatter is useful for inline printing of smaller data types similar to the
standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Custom Formatter Usage
The simplest way to make use of the spew custom formatter is to call one of the
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
functions have syntax you are most likely already familiar with:
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Println(myVar, myVar2)
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
See the Index for the full list convenience functions.
Sample Formatter Output
Double pointer to a uint8:
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
Pointer to circular struct with a uint8 field and a pointer to itself:
%v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
See the Printf example for details on the setup of variables being shown
here.
Errors
Since it is possible for custom Stringer/error interfaces to panic, spew
detects them and handles them internally by printing the panic information
inline with the output. Since spew is intended to provide deep pretty printing
capabilities on structures, it intentionally does not return any errors.
*/
package spew

View File

@ -1,509 +0,0 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"os"
"reflect"
"regexp"
"strconv"
"strings"
)
var (
// uint8Type is a reflect.Type representing a uint8. It is used to
// convert cgo types to uint8 slices for hexdumping.
uint8Type = reflect.TypeOf(uint8(0))
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
)
// dumpState contains information about the state of a dump operation.
type dumpState struct {
w io.Writer
depth int
pointers map[uintptr]int
ignoreNextType bool
ignoreNextIndent bool
cs *ConfigState
}
// indent performs indentation according to the depth level and cs.Indent
// option.
func (d *dumpState) indent() {
if d.ignoreNextIndent {
d.ignoreNextIndent = false
return
}
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
}
// unpackValue returns values inside of non-nil interfaces when possible.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface && !v.IsNil() {
v = v.Elem()
}
return v
}
// dumpPtr handles formatting of pointers by indirecting them as necessary.
func (d *dumpState) dumpPtr(v reflect.Value) {
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range d.pointers {
if depth >= d.depth {
delete(d.pointers, k)
}
}
// Keep list of all dereferenced pointers to show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by dereferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
cycleFound = true
indirects--
break
}
d.pointers[addr] = d.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type information.
d.w.Write(openParenBytes)
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
d.w.Write([]byte(ve.Type().String()))
d.w.Write(closeParenBytes)
// Display pointer information.
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
d.w.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
d.w.Write(pointerChainBytes)
}
printHexPtr(d.w, addr)
}
d.w.Write(closeParenBytes)
}
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
case nilFound == true:
d.w.Write(nilAngleBytes)
case cycleFound == true:
d.w.Write(circularBytes)
default:
d.ignoreNextType = true
d.dump(ve)
}
d.w.Write(closeParenBytes)
}
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
// reflection) arrays and slices are dumped in hexdump -C fashion.
func (d *dumpState) dumpSlice(v reflect.Value) {
// Determine whether this type should be hex dumped or not. Also,
// for types which should be hexdumped, try to use the underlying data
// first, then fall back to trying to convert them to a uint8 slice.
var buf []uint8
doConvert := false
doHexDump := false
numEntries := v.Len()
if numEntries > 0 {
vt := v.Index(0).Type()
vts := vt.String()
switch {
// C types that need to be converted.
case cCharRE.MatchString(vts):
fallthrough
case cUnsignedCharRE.MatchString(vts):
fallthrough
case cUint8tCharRE.MatchString(vts):
doConvert = true
// Try to use existing uint8 slices and fall back to converting
// and copying if that fails.
case vt.Kind() == reflect.Uint8:
// We need an addressable interface to convert the type
// to a byte slice. However, the reflect package won't
// give us an interface on certain things like
// unexported struct fields in order to enforce
// visibility rules. We use unsafe, when available, to
// bypass these restrictions since this package does not
// mutate the values.
vs := v
if !vs.CanInterface() || !vs.CanAddr() {
vs = unsafeReflectValue(vs)
}
if !UnsafeDisabled {
vs = vs.Slice(0, numEntries)
// Use the existing uint8 slice if it can be
// type asserted.
iface := vs.Interface()
if slice, ok := iface.([]uint8); ok {
buf = slice
doHexDump = true
break
}
}
// The underlying data needs to be converted if it can't
// be type asserted to a uint8 slice.
doConvert = true
}
// Copy and convert the underlying type if needed.
if doConvert && vt.ConvertibleTo(uint8Type) {
// Convert and copy each element into a uint8 byte
// slice.
buf = make([]uint8, numEntries)
for i := 0; i < numEntries; i++ {
vv := v.Index(i)
buf[i] = uint8(vv.Convert(uint8Type).Uint())
}
doHexDump = true
}
}
// Hexdump the entire slice as needed.
if doHexDump {
indent := strings.Repeat(d.cs.Indent, d.depth)
str := indent + hex.Dump(buf)
str = strings.Replace(str, "\n", "\n"+indent, -1)
str = strings.TrimRight(str, d.cs.Indent)
d.w.Write([]byte(str))
return
}
// Recursively call dump for each item.
for i := 0; i < numEntries; i++ {
d.dump(d.unpackValue(v.Index(i)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
// dump is the main workhorse for dumping a value. It uses the passed reflect
// value to figure out what kind of object we are dealing with and formats it
// appropriately. It is a recursive function, however circular data structures
// are detected and handled properly.
func (d *dumpState) dump(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
d.w.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
d.indent()
d.dumpPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !d.ignoreNextType {
d.indent()
d.w.Write(openParenBytes)
d.w.Write([]byte(v.Type().String()))
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
d.ignoreNextType = false
// Display length and capacity if the built-in len and cap functions
// work with the value's kind and the len/cap itself is non-zero.
valueLen, valueCap := 0, 0
switch v.Kind() {
case reflect.Array, reflect.Slice, reflect.Chan:
valueLen, valueCap = v.Len(), v.Cap()
case reflect.Map, reflect.String:
valueLen = v.Len()
}
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
d.w.Write(openParenBytes)
if valueLen != 0 {
d.w.Write(lenEqualsBytes)
printInt(d.w, int64(valueLen), 10)
}
if !d.cs.DisableCapacities && valueCap != 0 {
if valueLen != 0 {
d.w.Write(spaceBytes)
}
d.w.Write(capEqualsBytes)
printInt(d.w, int64(valueCap), 10)
}
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
// Call Stringer/error interfaces if they exist and the handle methods flag
// is enabled
if !d.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(d.cs, d.w, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(d.w, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(d.w, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(d.w, v.Uint(), 10)
case reflect.Float32:
printFloat(d.w, v.Float(), 32)
case reflect.Float64:
printFloat(d.w, v.Float(), 64)
case reflect.Complex64:
printComplex(d.w, v.Complex(), 32)
case reflect.Complex128:
printComplex(d.w, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
d.dumpSlice(v)
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.String:
d.w.Write([]byte(strconv.Quote(v.String())))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
d.w.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
numEntries := v.Len()
keys := v.MapKeys()
if d.cs.SortKeys {
sortValues(keys, d.cs)
}
for i, key := range keys {
d.dump(d.unpackValue(key))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.MapIndex(key)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Struct:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
vt := v.Type()
numFields := v.NumField()
for i := 0; i < numFields; i++ {
d.indent()
vtf := vt.Field(i)
d.w.Write([]byte(vtf.Name))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.Field(i)))
if i < (numFields - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(d.w, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(d.w, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it in case any new
// types are added.
default:
if v.CanInterface() {
fmt.Fprintf(d.w, "%v", v.Interface())
} else {
fmt.Fprintf(d.w, "%v", v.String())
}
}
}
// fdump is a helper function to consolidate the logic from the various public
// methods which take varying writers and config states.
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
for _, arg := range a {
if arg == nil {
w.Write(interfaceBytes)
w.Write(spaceBytes)
w.Write(nilAngleBytes)
w.Write(newlineBytes)
continue
}
d := dumpState{w: w, cs: cs}
d.pointers = make(map[uintptr]int)
d.dump(reflect.ValueOf(arg))
d.w.Write(newlineBytes)
}
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func Fdump(w io.Writer, a ...interface{}) {
fdump(&Config, w, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(&Config, &buf, a...)
return buf.String()
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by an exported package global,
spew.Config. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func Dump(a ...interface{}) {
fdump(&Config, os.Stdout, a...)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,99 +0,0 @@
// Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when both cgo is supported and "-tags testcgo" is added to the go test
// command line. This means the cgo tests are only added (and hence run) when
// specifially requested. This configuration is used because spew itself
// does not require cgo to run even though it does handle certain cgo types
// specially. Rather than forcing all clients to require cgo and an external
// C compiler just to run the tests, this scheme makes them optional.
// +build cgo,testcgo
package spew_test
import (
"fmt"
"github.com/davecgh/go-spew/spew/testdata"
)
func addCgoDumpTests() {
// C char pointer.
v := testdata.GetCgoCharPointer()
nv := testdata.GetCgoNullCharPointer()
pv := &v
vcAddr := fmt.Sprintf("%p", v)
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "*testdata._Ctype_char"
vs := "116"
addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n")
addDumpTest(nv, "("+vt+")(<nil>)\n")
// C char array.
v2, v2l, v2c := testdata.GetCgoCharArray()
v2Len := fmt.Sprintf("%d", v2l)
v2Cap := fmt.Sprintf("%d", v2c)
v2t := "[6]testdata._Ctype_char"
v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " +
"{\n 00000000 74 65 73 74 32 00 " +
" |test2.|\n}"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
// C unsigned char array.
v3, v3l, v3c := testdata.GetCgoUnsignedCharArray()
v3Len := fmt.Sprintf("%d", v3l)
v3Cap := fmt.Sprintf("%d", v3c)
v3t := "[6]testdata._Ctype_unsignedchar"
v3t2 := "[6]testdata._Ctype_uchar"
v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " +
"{\n 00000000 74 65 73 74 33 00 " +
" |test3.|\n}"
addDumpTest(v3, "("+v3t+") "+v3s+"\n", "("+v3t2+") "+v3s+"\n")
// C signed char array.
v4, v4l, v4c := testdata.GetCgoSignedCharArray()
v4Len := fmt.Sprintf("%d", v4l)
v4Cap := fmt.Sprintf("%d", v4c)
v4t := "[6]testdata._Ctype_schar"
v4t2 := "testdata._Ctype_schar"
v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
"{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 +
") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 +
") 0\n}"
addDumpTest(v4, "("+v4t+") "+v4s+"\n")
// C uint8_t array.
v5, v5l, v5c := testdata.GetCgoUint8tArray()
v5Len := fmt.Sprintf("%d", v5l)
v5Cap := fmt.Sprintf("%d", v5c)
v5t := "[6]testdata._Ctype_uint8_t"
v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " +
"{\n 00000000 74 65 73 74 35 00 " +
" |test5.|\n}"
addDumpTest(v5, "("+v5t+") "+v5s+"\n")
// C typedefed unsigned char array.
v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray()
v6Len := fmt.Sprintf("%d", v6l)
v6Cap := fmt.Sprintf("%d", v6c)
v6t := "[6]testdata._Ctype_custom_uchar_t"
v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " +
"{\n 00000000 74 65 73 74 36 00 " +
" |test6.|\n}"
addDumpTest(v6, "("+v6t+") "+v6s+"\n")
}

View File

@ -1,26 +0,0 @@
// Copyright (c) 2013 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when either cgo is not supported or "-tags testcgo" is not added to the go
// test command line. This file intentionally does not setup any cgo tests in
// this scenario.
// +build !cgo !testcgo
package spew_test
func addCgoDumpTests() {
// Don't add any tests for cgo since this file is only compiled when
// there should not be any cgo tests.
}

View File

@ -1,226 +0,0 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew_test
import (
"fmt"
"github.com/davecgh/go-spew/spew"
)
type Flag int
const (
flagOne Flag = iota
flagTwo
)
var flagStrings = map[Flag]string{
flagOne: "flagOne",
flagTwo: "flagTwo",
}
func (f Flag) String() string {
if s, ok := flagStrings[f]; ok {
return s
}
return fmt.Sprintf("Unknown flag (%d)", int(f))
}
type Bar struct {
data uintptr
}
type Foo struct {
unexportedField Bar
ExportedField map[interface{}]interface{}
}
// This example demonstrates how to use Dump to dump variables to stdout.
func ExampleDump() {
// The following package level declarations are assumed for this example:
/*
type Flag int
const (
flagOne Flag = iota
flagTwo
)
var flagStrings = map[Flag]string{
flagOne: "flagOne",
flagTwo: "flagTwo",
}
func (f Flag) String() string {
if s, ok := flagStrings[f]; ok {
return s
}
return fmt.Sprintf("Unknown flag (%d)", int(f))
}
type Bar struct {
data uintptr
}
type Foo struct {
unexportedField Bar
ExportedField map[interface{}]interface{}
}
*/
// Setup some sample data structures for the example.
bar := Bar{uintptr(0)}
s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
f := Flag(5)
b := []byte{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
0x31, 0x32,
}
// Dump!
spew.Dump(s1, f, b)
// Output:
// (spew_test.Foo) {
// unexportedField: (spew_test.Bar) {
// data: (uintptr) <nil>
// },
// ExportedField: (map[interface {}]interface {}) (len=1) {
// (string) (len=3) "one": (bool) true
// }
// }
// (spew_test.Flag) Unknown flag (5)
// ([]uint8) (len=34 cap=34) {
// 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
// 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
// 00000020 31 32 |12|
// }
//
}
// This example demonstrates how to use Printf to display a variable with a
// format string and inline formatting.
func ExamplePrintf() {
// Create a double pointer to a uint 8.
ui8 := uint8(5)
pui8 := &ui8
ppui8 := &pui8
// Create a circular data type.
type circular struct {
ui8 uint8
c *circular
}
c := circular{ui8: 1}
c.c = &c
// Print!
spew.Printf("ppui8: %v\n", ppui8)
spew.Printf("circular: %v\n", c)
// Output:
// ppui8: <**>5
// circular: {1 <*>{1 <*><shown>}}
}
// This example demonstrates how to use a ConfigState.
func ExampleConfigState() {
// Modify the indent level of the ConfigState only. The global
// configuration is not modified.
scs := spew.ConfigState{Indent: "\t"}
// Output using the ConfigState instance.
v := map[string]int{"one": 1}
scs.Printf("v: %v\n", v)
scs.Dump(v)
// Output:
// v: map[one:1]
// (map[string]int) (len=1) {
// (string) (len=3) "one": (int) 1
// }
}
// This example demonstrates how to use ConfigState.Dump to dump variables to
// stdout
func ExampleConfigState_Dump() {
// See the top-level Dump example for details on the types used in this
// example.
// Create two ConfigState instances with different indentation.
scs := spew.ConfigState{Indent: "\t"}
scs2 := spew.ConfigState{Indent: " "}
// Setup some sample data structures for the example.
bar := Bar{uintptr(0)}
s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
// Dump using the ConfigState instances.
scs.Dump(s1)
scs2.Dump(s1)
// Output:
// (spew_test.Foo) {
// unexportedField: (spew_test.Bar) {
// data: (uintptr) <nil>
// },
// ExportedField: (map[interface {}]interface {}) (len=1) {
// (string) (len=3) "one": (bool) true
// }
// }
// (spew_test.Foo) {
// unexportedField: (spew_test.Bar) {
// data: (uintptr) <nil>
// },
// ExportedField: (map[interface {}]interface {}) (len=1) {
// (string) (len=3) "one": (bool) true
// }
// }
//
}
// This example demonstrates how to use ConfigState.Printf to display a variable
// with a format string and inline formatting.
func ExampleConfigState_Printf() {
// See the top-level Dump example for details on the types used in this
// example.
// Create two ConfigState instances and modify the method handling of the
// first ConfigState only.
scs := spew.NewDefaultConfig()
scs2 := spew.NewDefaultConfig()
scs.DisableMethods = true
// Alternatively
// scs := spew.ConfigState{Indent: " ", DisableMethods: true}
// scs2 := spew.ConfigState{Indent: " "}
// This is of type Flag which implements a Stringer and has raw value 1.
f := flagTwo
// Dump using the ConfigState instances.
scs.Printf("f: %v\n", f)
scs2.Printf("f: %v\n", f)
// Output:
// f: 1
// f: flagTwo
}

View File

@ -1,419 +0,0 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"reflect"
"strconv"
"strings"
)
// supportedFlags is a list of all the character flags supported by fmt package.
const supportedFlags = "0-+# "
// formatState implements the fmt.Formatter interface and contains information
// about the state of a formatting operation. The NewFormatter function can
// be used to get a new Formatter which can be used directly as arguments
// in standard fmt package printing calls.
type formatState struct {
value interface{}
fs fmt.State
depth int
pointers map[uintptr]int
ignoreNextType bool
cs *ConfigState
}
// buildDefaultFormat recreates the original format string without precision
// and width information to pass in to fmt.Sprintf in the case of an
// unrecognized type. Unless new types are added to the language, this
// function won't ever be called.
func (f *formatState) buildDefaultFormat() (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
buf.WriteRune('v')
format = buf.String()
return format
}
// constructOrigFormat recreates the original format string including precision
// and width information to pass along to the standard fmt package. This allows
// automatic deferral of all format strings this package doesn't support.
func (f *formatState) constructOrigFormat(verb rune) (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
if width, ok := f.fs.Width(); ok {
buf.WriteString(strconv.Itoa(width))
}
if precision, ok := f.fs.Precision(); ok {
buf.Write(precisionBytes)
buf.WriteString(strconv.Itoa(precision))
}
buf.WriteRune(verb)
format = buf.String()
return format
}
// unpackValue returns values inside of non-nil interfaces when possible and
// ensures that types for values which have been unpacked from an interface
// are displayed when the show types flag is also set.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface {
f.ignoreNextType = false
if !v.IsNil() {
v = v.Elem()
}
}
return v
}
// formatPtr handles formatting of pointers by indirecting them as necessary.
func (f *formatState) formatPtr(v reflect.Value) {
// Display nil if top level pointer is nil.
showTypes := f.fs.Flag('#')
if v.IsNil() && (!showTypes || f.ignoreNextType) {
f.fs.Write(nilAngleBytes)
return
}
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range f.pointers {
if depth >= f.depth {
delete(f.pointers, k)
}
}
// Keep list of all dereferenced pointers to possibly show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by derferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
cycleFound = true
indirects--
break
}
f.pointers[addr] = f.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type or indirection level depending on flags.
if showTypes && !f.ignoreNextType {
f.fs.Write(openParenBytes)
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
f.fs.Write([]byte(ve.Type().String()))
f.fs.Write(closeParenBytes)
} else {
if nilFound || cycleFound {
indirects += strings.Count(ve.Type().String(), "*")
}
f.fs.Write(openAngleBytes)
f.fs.Write([]byte(strings.Repeat("*", indirects)))
f.fs.Write(closeAngleBytes)
}
// Display pointer information depending on flags.
if f.fs.Flag('+') && (len(pointerChain) > 0) {
f.fs.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
f.fs.Write(pointerChainBytes)
}
printHexPtr(f.fs, addr)
}
f.fs.Write(closeParenBytes)
}
// Display dereferenced value.
switch {
case nilFound == true:
f.fs.Write(nilAngleBytes)
case cycleFound == true:
f.fs.Write(circularShortBytes)
default:
f.ignoreNextType = true
f.format(ve)
}
}
// format is the main workhorse for providing the Formatter interface. It
// uses the passed reflect value to figure out what kind of object we are
// dealing with and formats it appropriately. It is a recursive function,
// however circular data structures are detected and handled properly.
func (f *formatState) format(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
f.fs.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
f.formatPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !f.ignoreNextType && f.fs.Flag('#') {
f.fs.Write(openParenBytes)
f.fs.Write([]byte(v.Type().String()))
f.fs.Write(closeParenBytes)
}
f.ignoreNextType = false
// Call Stringer/error interfaces if they exist and the handle methods
// flag is enabled.
if !f.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(f.cs, f.fs, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(f.fs, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(f.fs, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(f.fs, v.Uint(), 10)
case reflect.Float32:
printFloat(f.fs, v.Float(), 32)
case reflect.Float64:
printFloat(f.fs, v.Float(), 64)
case reflect.Complex64:
printComplex(f.fs, v.Complex(), 32)
case reflect.Complex128:
printComplex(f.fs, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
f.fs.Write(openBracketBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
numEntries := v.Len()
for i := 0; i < numEntries; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(v.Index(i)))
}
}
f.depth--
f.fs.Write(closeBracketBytes)
case reflect.String:
f.fs.Write([]byte(v.String()))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
f.fs.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
f.fs.Write(openMapBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
keys := v.MapKeys()
if f.cs.SortKeys {
sortValues(keys, f.cs)
}
for i, key := range keys {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(key))
f.fs.Write(colonBytes)
f.ignoreNextType = true
f.format(f.unpackValue(v.MapIndex(key)))
}
}
f.depth--
f.fs.Write(closeMapBytes)
case reflect.Struct:
numFields := v.NumField()
f.fs.Write(openBraceBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
vt := v.Type()
for i := 0; i < numFields; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
vtf := vt.Field(i)
if f.fs.Flag('+') || f.fs.Flag('#') {
f.fs.Write([]byte(vtf.Name))
f.fs.Write(colonBytes)
}
f.format(f.unpackValue(v.Field(i)))
}
}
f.depth--
f.fs.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(f.fs, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(f.fs, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it if any get added.
default:
format := f.buildDefaultFormat()
if v.CanInterface() {
fmt.Fprintf(f.fs, format, v.Interface())
} else {
fmt.Fprintf(f.fs, format, v.String())
}
}
}
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
// details.
func (f *formatState) Format(fs fmt.State, verb rune) {
f.fs = fs
// Use standard formatting for verbs that are not v.
if verb != 'v' {
format := f.constructOrigFormat(verb)
fmt.Fprintf(fs, format, f.value)
return
}
if f.value == nil {
if fs.Flag('#') {
fs.Write(interfaceBytes)
}
fs.Write(nilAngleBytes)
return
}
f.format(reflect.ValueOf(f.value))
}
// newFormatter is a helper function to consolidate the logic from the various
// public methods which take varying config states.
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
fs := &formatState{value: v, cs: cs}
fs.pointers = make(map[uintptr]int)
return fs
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
Printf, Println, or Fprintf.
*/
func NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(&Config, v)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,87 +0,0 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
This test file is part of the spew package rather than than the spew_test
package because it needs access to internals to properly test certain cases
which are not possible via the public interface since they should never happen.
*/
package spew
import (
"bytes"
"reflect"
"testing"
)
// dummyFmtState implements a fake fmt.State to use for testing invalid
// reflect.Value handling. This is necessary because the fmt package catches
// invalid values before invoking the formatter on them.
type dummyFmtState struct {
bytes.Buffer
}
func (dfs *dummyFmtState) Flag(f int) bool {
if f == int('+') {
return true
}
return false
}
func (dfs *dummyFmtState) Precision() (int, bool) {
return 0, false
}
func (dfs *dummyFmtState) Width() (int, bool) {
return 0, false
}
// TestInvalidReflectValue ensures the dump and formatter code handles an
// invalid reflect value properly. This needs access to internal state since it
// should never happen in real code and therefore can't be tested via the public
// API.
func TestInvalidReflectValue(t *testing.T) {
i := 1
// Dump invalid reflect value.
v := new(reflect.Value)
buf := new(bytes.Buffer)
d := dumpState{w: buf, cs: &Config}
d.dump(*v)
s := buf.String()
want := "<invalid>"
if s != want {
t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want)
}
i++
// Formatter invalid reflect value.
buf2 := new(dummyFmtState)
f := formatState{value: *v, cs: &Config, fs: buf2}
f.format(*v)
s = buf2.String()
want = "<invalid>"
if s != want {
t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want)
}
}
// SortValues makes the internal sortValues function available to the test
// package.
func SortValues(values []reflect.Value, cs *ConfigState) {
sortValues(values, cs)
}

View File

@ -1,102 +0,0 @@
// Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build !js,!appengine,!safe,!disableunsafe
/*
This test file is part of the spew package rather than than the spew_test
package because it needs access to internals to properly test certain cases
which are not possible via the public interface since they should never happen.
*/
package spew
import (
"bytes"
"reflect"
"testing"
"unsafe"
)
// changeKind uses unsafe to intentionally change the kind of a reflect.Value to
// the maximum kind value which does not exist. This is needed to test the
// fallback code which punts to the standard fmt library for new types that
// might get added to the language.
func changeKind(v *reflect.Value, readOnly bool) {
rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag))
*rvf = *rvf | ((1<<flagKindWidth - 1) << flagKindShift)
if readOnly {
*rvf |= flagRO
} else {
*rvf &= ^uintptr(flagRO)
}
}
// TestAddedReflectValue tests functionaly of the dump and formatter code which
// falls back to the standard fmt library for new types that might get added to
// the language.
func TestAddedReflectValue(t *testing.T) {
i := 1
// Dump using a reflect.Value that is exported.
v := reflect.ValueOf(int8(5))
changeKind(&v, false)
buf := new(bytes.Buffer)
d := dumpState{w: buf, cs: &Config}
d.dump(v)
s := buf.String()
want := "(int8) 5"
if s != want {
t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
}
i++
// Dump using a reflect.Value that is not exported.
changeKind(&v, true)
buf.Reset()
d.dump(v)
s = buf.String()
want = "(int8) <int8 Value>"
if s != want {
t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
}
i++
// Formatter using a reflect.Value that is exported.
changeKind(&v, false)
buf2 := new(dummyFmtState)
f := formatState{value: v, cs: &Config, fs: buf2}
f.format(v)
s = buf2.String()
want = "5"
if s != want {
t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
}
i++
// Formatter using a reflect.Value that is not exported.
changeKind(&v, true)
buf2.Reset()
f = formatState{value: v, cs: &Config, fs: buf2}
f.format(v)
s = buf2.String()
want = "<int8 Value>"
if s != want {
t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
}
}

View File

@ -1,148 +0,0 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"fmt"
"io"
)
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the formatted string as a value that satisfies error. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a default Formatter interface returned by NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
func Print(a ...interface{}) (n int, err error) {
return fmt.Print(convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
func Println(a ...interface{}) (n int, err error) {
return fmt.Println(convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprint(a ...interface{}) string {
return fmt.Sprint(convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintln(a ...interface{}) string {
return fmt.Sprintln(convertArgs(a)...)
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a default spew Formatter interface.
func convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = NewFormatter(arg)
}
return formatters
}

View File

@ -1,320 +0,0 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew_test
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/davecgh/go-spew/spew"
)
// spewFunc is used to identify which public function of the spew package or
// ConfigState a test applies to.
type spewFunc int
const (
fCSFdump spewFunc = iota
fCSFprint
fCSFprintf
fCSFprintln
fCSPrint
fCSPrintln
fCSSdump
fCSSprint
fCSSprintf
fCSSprintln
fCSErrorf
fCSNewFormatter
fErrorf
fFprint
fFprintln
fPrint
fPrintln
fSdump
fSprint
fSprintf
fSprintln
)
// Map of spewFunc values to names for pretty printing.
var spewFuncStrings = map[spewFunc]string{
fCSFdump: "ConfigState.Fdump",
fCSFprint: "ConfigState.Fprint",
fCSFprintf: "ConfigState.Fprintf",
fCSFprintln: "ConfigState.Fprintln",
fCSSdump: "ConfigState.Sdump",
fCSPrint: "ConfigState.Print",
fCSPrintln: "ConfigState.Println",
fCSSprint: "ConfigState.Sprint",
fCSSprintf: "ConfigState.Sprintf",
fCSSprintln: "ConfigState.Sprintln",
fCSErrorf: "ConfigState.Errorf",
fCSNewFormatter: "ConfigState.NewFormatter",
fErrorf: "spew.Errorf",
fFprint: "spew.Fprint",
fFprintln: "spew.Fprintln",
fPrint: "spew.Print",
fPrintln: "spew.Println",
fSdump: "spew.Sdump",
fSprint: "spew.Sprint",
fSprintf: "spew.Sprintf",
fSprintln: "spew.Sprintln",
}
func (f spewFunc) String() string {
if s, ok := spewFuncStrings[f]; ok {
return s
}
return fmt.Sprintf("Unknown spewFunc (%d)", int(f))
}
// spewTest is used to describe a test to be performed against the public
// functions of the spew package or ConfigState.
type spewTest struct {
cs *spew.ConfigState
f spewFunc
format string
in interface{}
want string
}
// spewTests houses the tests to be performed against the public functions of
// the spew package and ConfigState.
//
// These tests are only intended to ensure the public functions are exercised
// and are intentionally not exhaustive of types. The exhaustive type
// tests are handled in the dump and format tests.
var spewTests []spewTest
// redirStdout is a helper function to return the standard output from f as a
// byte slice.
func redirStdout(f func()) ([]byte, error) {
tempFile, err := ioutil.TempFile("", "ss-test")
if err != nil {
return nil, err
}
fileName := tempFile.Name()
defer os.Remove(fileName) // Ignore error
origStdout := os.Stdout
os.Stdout = tempFile
f()
os.Stdout = origStdout
tempFile.Close()
return ioutil.ReadFile(fileName)
}
func initSpewTests() {
// Config states with various settings.
scsDefault := spew.NewDefaultConfig()
scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true}
scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true}
scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1}
scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true}
scsNoPtrAddr := &spew.ConfigState{DisablePointerAddresses: true}
scsNoCap := &spew.ConfigState{DisableCapacities: true}
// Variables for tests on types which implement Stringer interface with and
// without a pointer receiver.
ts := stringer("test")
tps := pstringer("test")
type ptrTester struct {
s *struct{}
}
tptr := &ptrTester{s: &struct{}{}}
// depthTester is used to test max depth handling for structs, array, slices
// and maps.
type depthTester struct {
ic indirCir1
arr [1]string
slice []string
m map[string]int
}
dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"},
map[string]int{"one": 1}}
// Variable for tests on types which implement error interface.
te := customError(10)
spewTests = []spewTest{
{scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"},
{scsDefault, fCSFprint, "", int16(32767), "32767"},
{scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"},
{scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"},
{scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"},
{scsDefault, fCSPrintln, "", uint8(255), "255\n"},
{scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"},
{scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"},
{scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"},
{scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"},
{scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"},
{scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"},
{scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"},
{scsDefault, fFprint, "", float32(3.14), "3.14"},
{scsDefault, fFprintln, "", float64(6.28), "6.28\n"},
{scsDefault, fPrint, "", true, "true"},
{scsDefault, fPrintln, "", false, "false\n"},
{scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"},
{scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"},
{scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"},
{scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"},
{scsNoMethods, fCSFprint, "", ts, "test"},
{scsNoMethods, fCSFprint, "", &ts, "<*>test"},
{scsNoMethods, fCSFprint, "", tps, "test"},
{scsNoMethods, fCSFprint, "", &tps, "<*>test"},
{scsNoPmethods, fCSFprint, "", ts, "stringer test"},
{scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"},
{scsNoPmethods, fCSFprint, "", tps, "test"},
{scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"},
{scsMaxDepth, fCSFprint, "", dt, "{{<max>} [<max>] [<max>] map[<max>]}"},
{scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" +
" ic: (spew_test.indirCir1) {\n <max depth reached>\n },\n" +
" arr: ([1]string) (len=1 cap=1) {\n <max depth reached>\n },\n" +
" slice: ([]string) (len=1 cap=1) {\n <max depth reached>\n },\n" +
" m: (map[string]int) (len=1) {\n <max depth reached>\n }\n}\n"},
{scsContinue, fCSFprint, "", ts, "(stringer test) test"},
{scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " +
"(len=4) (stringer test) \"test\"\n"},
{scsContinue, fCSFprint, "", te, "(error: 10) 10"},
{scsContinue, fCSFdump, "", te, "(spew_test.customError) " +
"(error: 10) 10\n"},
{scsNoPtrAddr, fCSFprint, "", tptr, "<*>{<*>{}}"},
{scsNoPtrAddr, fCSSdump, "", tptr, "(*spew_test.ptrTester)({\ns: (*struct {})({\n})\n})\n"},
{scsNoCap, fCSSdump, "", make([]string, 0, 10), "([]string) {\n}\n"},
{scsNoCap, fCSSdump, "", make([]string, 1, 10), "([]string) (len=1) {\n(string) \"\"\n}\n"},
}
}
// TestSpew executes all of the tests described by spewTests.
func TestSpew(t *testing.T) {
initSpewTests()
t.Logf("Running %d tests", len(spewTests))
for i, test := range spewTests {
buf := new(bytes.Buffer)
switch test.f {
case fCSFdump:
test.cs.Fdump(buf, test.in)
case fCSFprint:
test.cs.Fprint(buf, test.in)
case fCSFprintf:
test.cs.Fprintf(buf, test.format, test.in)
case fCSFprintln:
test.cs.Fprintln(buf, test.in)
case fCSPrint:
b, err := redirStdout(func() { test.cs.Print(test.in) })
if err != nil {
t.Errorf("%v #%d %v", test.f, i, err)
continue
}
buf.Write(b)
case fCSPrintln:
b, err := redirStdout(func() { test.cs.Println(test.in) })
if err != nil {
t.Errorf("%v #%d %v", test.f, i, err)
continue
}
buf.Write(b)
case fCSSdump:
str := test.cs.Sdump(test.in)
buf.WriteString(str)
case fCSSprint:
str := test.cs.Sprint(test.in)
buf.WriteString(str)
case fCSSprintf:
str := test.cs.Sprintf(test.format, test.in)
buf.WriteString(str)
case fCSSprintln:
str := test.cs.Sprintln(test.in)
buf.WriteString(str)
case fCSErrorf:
err := test.cs.Errorf(test.format, test.in)
buf.WriteString(err.Error())
case fCSNewFormatter:
fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in))
case fErrorf:
err := spew.Errorf(test.format, test.in)
buf.WriteString(err.Error())
case fFprint:
spew.Fprint(buf, test.in)
case fFprintln:
spew.Fprintln(buf, test.in)
case fPrint:
b, err := redirStdout(func() { spew.Print(test.in) })
if err != nil {
t.Errorf("%v #%d %v", test.f, i, err)
continue
}
buf.Write(b)
case fPrintln:
b, err := redirStdout(func() { spew.Println(test.in) })
if err != nil {
t.Errorf("%v #%d %v", test.f, i, err)
continue
}
buf.Write(b)
case fSdump:
str := spew.Sdump(test.in)
buf.WriteString(str)
case fSprint:
str := spew.Sprint(test.in)
buf.WriteString(str)
case fSprintf:
str := spew.Sprintf(test.format, test.in)
buf.WriteString(str)
case fSprintln:
str := spew.Sprintln(test.in)
buf.WriteString(str)
default:
t.Errorf("%v #%d unrecognized function", test.f, i)
continue
}
s := buf.String()
if test.want != s {
t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want)
continue
}
}
}

View File

@ -1,82 +0,0 @@
// Copyright (c) 2013 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when both cgo is supported and "-tags testcgo" is added to the go test
// command line. This code should really only be in the dumpcgo_test.go file,
// but unfortunately Go will not allow cgo in test files, so this is a
// workaround to allow cgo types to be tested. This configuration is used
// because spew itself does not require cgo to run even though it does handle
// certain cgo types specially. Rather than forcing all clients to require cgo
// and an external C compiler just to run the tests, this scheme makes them
// optional.
// +build cgo,testcgo
package testdata
/*
#include <stdint.h>
typedef unsigned char custom_uchar_t;
char *ncp = 0;
char *cp = "test";
char ca[6] = {'t', 'e', 's', 't', '2', '\0'};
unsigned char uca[6] = {'t', 'e', 's', 't', '3', '\0'};
signed char sca[6] = {'t', 'e', 's', 't', '4', '\0'};
uint8_t ui8ta[6] = {'t', 'e', 's', 't', '5', '\0'};
custom_uchar_t tuca[6] = {'t', 'e', 's', 't', '6', '\0'};
*/
import "C"
// GetCgoNullCharPointer returns a null char pointer via cgo. This is only
// used for tests.
func GetCgoNullCharPointer() interface{} {
return C.ncp
}
// GetCgoCharPointer returns a char pointer via cgo. This is only used for
// tests.
func GetCgoCharPointer() interface{} {
return C.cp
}
// GetCgoCharArray returns a char array via cgo and the array's len and cap.
// This is only used for tests.
func GetCgoCharArray() (interface{}, int, int) {
return C.ca, len(C.ca), cap(C.ca)
}
// GetCgoUnsignedCharArray returns an unsigned char array via cgo and the
// array's len and cap. This is only used for tests.
func GetCgoUnsignedCharArray() (interface{}, int, int) {
return C.uca, len(C.uca), cap(C.uca)
}
// GetCgoSignedCharArray returns a signed char array via cgo and the array's len
// and cap. This is only used for tests.
func GetCgoSignedCharArray() (interface{}, int, int) {
return C.sca, len(C.sca), cap(C.sca)
}
// GetCgoUint8tArray returns a uint8_t array via cgo and the array's len and
// cap. This is only used for tests.
func GetCgoUint8tArray() (interface{}, int, int) {
return C.ui8ta, len(C.ui8ta), cap(C.ui8ta)
}
// GetCgoTypdefedUnsignedCharArray returns a typedefed unsigned char array via
// cgo and the array's len and cap. This is only used for tests.
func GetCgoTypdefedUnsignedCharArray() (interface{}, int, int) {
return C.tuca, len(C.tuca), cap(C.tuca)
}

View File

@ -1,87 +0,0 @@
# crc32
CRC32 hash with x64 optimizations
This package is a drop-in replacement for the standard library `hash/crc32` package, that features SSE 4.2 optimizations on x64 platforms, for a 10x speedup.
[![Build Status](https://travis-ci.org/klauspost/crc32.svg?branch=master)](https://travis-ci.org/klauspost/crc32)
# usage
Install using `go get github.com/klauspost/crc32`. This library is based on Go 1.5 code and requires Go 1.3 or newer.
Replace `import "hash/crc32"` with `import "github.com/klauspost/crc32"` and you are good to go.
# changes
* Oct 20, 2016: Changes have been merged to upstream Go. Package updated to match.
* Dec 4, 2015: Uses the "slice-by-8" trick more extensively, which gives a 1.5 to 2.5x speedup if assembler is unavailable.
# performance
For *Go 1.7* performance is equivalent to the standard library. So if you use this package for Go 1.7 you can switch back.
For IEEE tables (the most common), there is approximately a factor 10 speedup with "CLMUL" (Carryless multiplication) instruction:
```
benchmark old ns/op new ns/op delta
BenchmarkCrc32KB 99955 10258 -89.74%
benchmark old MB/s new MB/s speedup
BenchmarkCrc32KB 327.83 3194.20 9.74x
```
For other tables and "CLMUL" capable machines the performance is the same as the standard library.
Here are some detailed benchmarks, comparing to go 1.5 standard library with and without assembler enabled.
```
Std: Standard Go 1.5 library
Crc: Indicates IEEE type CRC.
40B: Size of each slice encoded.
NoAsm: Assembler was disabled (ie. not an AMD64 or SSE 4.2+ capable machine).
Castagnoli: Castagnoli CRC type.
BenchmarkStdCrc40B-4 10000000 158 ns/op 252.88 MB/s
BenchmarkCrc40BNoAsm-4 20000000 105 ns/op 377.38 MB/s (slice8)
BenchmarkCrc40B-4 20000000 105 ns/op 378.77 MB/s (slice8)
BenchmarkStdCrc1KB-4 500000 3604 ns/op 284.10 MB/s
BenchmarkCrc1KBNoAsm-4 1000000 1463 ns/op 699.79 MB/s (slice8)
BenchmarkCrc1KB-4 3000000 396 ns/op 2583.69 MB/s (asm)
BenchmarkStdCrc8KB-4 200000 11417 ns/op 717.48 MB/s (slice8)
BenchmarkCrc8KBNoAsm-4 200000 11317 ns/op 723.85 MB/s (slice8)
BenchmarkCrc8KB-4 500000 2919 ns/op 2805.73 MB/s (asm)
BenchmarkStdCrc32KB-4 30000 45749 ns/op 716.24 MB/s (slice8)
BenchmarkCrc32KBNoAsm-4 30000 45109 ns/op 726.42 MB/s (slice8)
BenchmarkCrc32KB-4 100000 11497 ns/op 2850.09 MB/s (asm)
BenchmarkStdNoAsmCastagnol40B-4 10000000 161 ns/op 246.94 MB/s
BenchmarkStdCastagnoli40B-4 50000000 28.4 ns/op 1410.69 MB/s (asm)
BenchmarkCastagnoli40BNoAsm-4 20000000 100 ns/op 398.01 MB/s (slice8)
BenchmarkCastagnoli40B-4 50000000 28.2 ns/op 1419.54 MB/s (asm)
BenchmarkStdNoAsmCastagnoli1KB-4 500000 3622 ns/op 282.67 MB/s
BenchmarkStdCastagnoli1KB-4 10000000 144 ns/op 7099.78 MB/s (asm)
BenchmarkCastagnoli1KBNoAsm-4 1000000 1475 ns/op 694.14 MB/s (slice8)
BenchmarkCastagnoli1KB-4 10000000 146 ns/op 6993.35 MB/s (asm)
BenchmarkStdNoAsmCastagnoli8KB-4 50000 28781 ns/op 284.63 MB/s
BenchmarkStdCastagnoli8KB-4 1000000 1029 ns/op 7957.89 MB/s (asm)
BenchmarkCastagnoli8KBNoAsm-4 200000 11410 ns/op 717.94 MB/s (slice8)
BenchmarkCastagnoli8KB-4 1000000 1000 ns/op 8188.71 MB/s (asm)
BenchmarkStdNoAsmCastagnoli32KB-4 10000 115426 ns/op 283.89 MB/s
BenchmarkStdCastagnoli32KB-4 300000 4065 ns/op 8059.13 MB/s (asm)
BenchmarkCastagnoli32KBNoAsm-4 30000 45171 ns/op 725.41 MB/s (slice8)
BenchmarkCastagnoli32KB-4 500000 4077 ns/op 8035.89 MB/s (asm)
```
The IEEE assembler optimizations has been submitted and will be part of the Go 1.6 standard library.
However, the improved use of slice-by-8 has not, but will probably be submitted for Go 1.7.
# license
Standard Go license. Changes are Copyright (c) 2015 Klaus Post under same conditions.

View File

@ -1,207 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package crc32 implements the 32-bit cyclic redundancy check, or CRC-32,
// checksum. See http://en.wikipedia.org/wiki/Cyclic_redundancy_check for
// information.
//
// Polynomials are represented in LSB-first form also known as reversed representation.
//
// See http://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks#Reversed_representations_and_reciprocal_polynomials
// for information.
package crc32
import (
"hash"
"sync"
)
// The size of a CRC-32 checksum in bytes.
const Size = 4
// Predefined polynomials.
const (
// IEEE is by far and away the most common CRC-32 polynomial.
// Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, ...
IEEE = 0xedb88320
// Castagnoli's polynomial, used in iSCSI.
// Has better error detection characteristics than IEEE.
// http://dx.doi.org/10.1109/26.231911
Castagnoli = 0x82f63b78
// Koopman's polynomial.
// Also has better error detection characteristics than IEEE.
// http://dx.doi.org/10.1109/DSN.2002.1028931
Koopman = 0xeb31d82e
)
// Table is a 256-word table representing the polynomial for efficient processing.
type Table [256]uint32
// This file makes use of functions implemented in architecture-specific files.
// The interface that they implement is as follows:
//
// // archAvailableIEEE reports whether an architecture-specific CRC32-IEEE
// // algorithm is available.
// archAvailableIEEE() bool
//
// // archInitIEEE initializes the architecture-specific CRC3-IEEE algorithm.
// // It can only be called if archAvailableIEEE() returns true.
// archInitIEEE()
//
// // archUpdateIEEE updates the given CRC32-IEEE. It can only be called if
// // archInitIEEE() was previously called.
// archUpdateIEEE(crc uint32, p []byte) uint32
//
// // archAvailableCastagnoli reports whether an architecture-specific
// // CRC32-C algorithm is available.
// archAvailableCastagnoli() bool
//
// // archInitCastagnoli initializes the architecture-specific CRC32-C
// // algorithm. It can only be called if archAvailableCastagnoli() returns
// // true.
// archInitCastagnoli()
//
// // archUpdateCastagnoli updates the given CRC32-C. It can only be called
// // if archInitCastagnoli() was previously called.
// archUpdateCastagnoli(crc uint32, p []byte) uint32
// castagnoliTable points to a lazily initialized Table for the Castagnoli
// polynomial. MakeTable will always return this value when asked to make a
// Castagnoli table so we can compare against it to find when the caller is
// using this polynomial.
var castagnoliTable *Table
var castagnoliTable8 *slicing8Table
var castagnoliArchImpl bool
var updateCastagnoli func(crc uint32, p []byte) uint32
var castagnoliOnce sync.Once
func castagnoliInit() {
castagnoliTable = simpleMakeTable(Castagnoli)
castagnoliArchImpl = archAvailableCastagnoli()
if castagnoliArchImpl {
archInitCastagnoli()
updateCastagnoli = archUpdateCastagnoli
} else {
// Initialize the slicing-by-8 table.
castagnoliTable8 = slicingMakeTable(Castagnoli)
updateCastagnoli = func(crc uint32, p []byte) uint32 {
return slicingUpdate(crc, castagnoliTable8, p)
}
}
}
// IEEETable is the table for the IEEE polynomial.
var IEEETable = simpleMakeTable(IEEE)
// ieeeTable8 is the slicing8Table for IEEE
var ieeeTable8 *slicing8Table
var ieeeArchImpl bool
var updateIEEE func(crc uint32, p []byte) uint32
var ieeeOnce sync.Once
func ieeeInit() {
ieeeArchImpl = archAvailableIEEE()
if ieeeArchImpl {
archInitIEEE()
updateIEEE = archUpdateIEEE
} else {
// Initialize the slicing-by-8 table.
ieeeTable8 = slicingMakeTable(IEEE)
updateIEEE = func(crc uint32, p []byte) uint32 {
return slicingUpdate(crc, ieeeTable8, p)
}
}
}
// MakeTable returns a Table constructed from the specified polynomial.
// The contents of this Table must not be modified.
func MakeTable(poly uint32) *Table {
switch poly {
case IEEE:
ieeeOnce.Do(ieeeInit)
return IEEETable
case Castagnoli:
castagnoliOnce.Do(castagnoliInit)
return castagnoliTable
}
return simpleMakeTable(poly)
}
// digest represents the partial evaluation of a checksum.
type digest struct {
crc uint32
tab *Table
}
// New creates a new hash.Hash32 computing the CRC-32 checksum
// using the polynomial represented by the Table.
// Its Sum method will lay the value out in big-endian byte order.
func New(tab *Table) hash.Hash32 {
if tab == IEEETable {
ieeeOnce.Do(ieeeInit)
}
return &digest{0, tab}
}
// NewIEEE creates a new hash.Hash32 computing the CRC-32 checksum
// using the IEEE polynomial.
// Its Sum method will lay the value out in big-endian byte order.
func NewIEEE() hash.Hash32 { return New(IEEETable) }
func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return 1 }
func (d *digest) Reset() { d.crc = 0 }
// Update returns the result of adding the bytes in p to the crc.
func Update(crc uint32, tab *Table, p []byte) uint32 {
switch tab {
case castagnoliTable:
return updateCastagnoli(crc, p)
case IEEETable:
// Unfortunately, because IEEETable is exported, IEEE may be used without a
// call to MakeTable. We have to make sure it gets initialized in that case.
ieeeOnce.Do(ieeeInit)
return updateIEEE(crc, p)
default:
return simpleUpdate(crc, tab, p)
}
}
func (d *digest) Write(p []byte) (n int, err error) {
switch d.tab {
case castagnoliTable:
d.crc = updateCastagnoli(d.crc, p)
case IEEETable:
// We only create digest objects through New() which takes care of
// initialization in this case.
d.crc = updateIEEE(d.crc, p)
default:
d.crc = simpleUpdate(d.crc, d.tab, p)
}
return len(p), nil
}
func (d *digest) Sum32() uint32 { return d.crc }
func (d *digest) Sum(in []byte) []byte {
s := d.Sum32()
return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
}
// Checksum returns the CRC-32 checksum of data
// using the polynomial represented by the Table.
func Checksum(data []byte, tab *Table) uint32 { return Update(0, tab, data) }
// ChecksumIEEE returns the CRC-32 checksum of data
// using the IEEE polynomial.
func ChecksumIEEE(data []byte) uint32 {
ieeeOnce.Do(ieeeInit)
return updateIEEE(0, data)
}

View File

@ -1,230 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine,!gccgo
// AMD64-specific hardware-assisted CRC32 algorithms. See crc32.go for a
// description of the interface that each architecture-specific file
// implements.
package crc32
import "unsafe"
// This file contains the code to call the SSE 4.2 version of the Castagnoli
// and IEEE CRC.
// haveSSE41/haveSSE42/haveCLMUL are defined in crc_amd64.s and use
// CPUID to test for SSE 4.1, 4.2 and CLMUL support.
func haveSSE41() bool
func haveSSE42() bool
func haveCLMUL() bool
// castagnoliSSE42 is defined in crc32_amd64.s and uses the SSE4.2 CRC32
// instruction.
//go:noescape
func castagnoliSSE42(crc uint32, p []byte) uint32
// castagnoliSSE42Triple is defined in crc32_amd64.s and uses the SSE4.2 CRC32
// instruction.
//go:noescape
func castagnoliSSE42Triple(
crcA, crcB, crcC uint32,
a, b, c []byte,
rounds uint32,
) (retA uint32, retB uint32, retC uint32)
// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ
// instruction as well as SSE 4.1.
//go:noescape
func ieeeCLMUL(crc uint32, p []byte) uint32
var sse42 = haveSSE42()
var useFastIEEE = haveCLMUL() && haveSSE41()
const castagnoliK1 = 168
const castagnoliK2 = 1344
type sse42Table [4]Table
var castagnoliSSE42TableK1 *sse42Table
var castagnoliSSE42TableK2 *sse42Table
func archAvailableCastagnoli() bool {
return sse42
}
func archInitCastagnoli() {
if !sse42 {
panic("arch-specific Castagnoli not available")
}
castagnoliSSE42TableK1 = new(sse42Table)
castagnoliSSE42TableK2 = new(sse42Table)
// See description in updateCastagnoli.
// t[0][i] = CRC(i000, O)
// t[1][i] = CRC(0i00, O)
// t[2][i] = CRC(00i0, O)
// t[3][i] = CRC(000i, O)
// where O is a sequence of K zeros.
var tmp [castagnoliK2]byte
for b := 0; b < 4; b++ {
for i := 0; i < 256; i++ {
val := uint32(i) << uint32(b*8)
castagnoliSSE42TableK1[b][i] = castagnoliSSE42(val, tmp[:castagnoliK1])
castagnoliSSE42TableK2[b][i] = castagnoliSSE42(val, tmp[:])
}
}
}
// castagnoliShift computes the CRC32-C of K1 or K2 zeroes (depending on the
// table given) with the given initial crc value. This corresponds to
// CRC(crc, O) in the description in updateCastagnoli.
func castagnoliShift(table *sse42Table, crc uint32) uint32 {
return table[3][crc>>24] ^
table[2][(crc>>16)&0xFF] ^
table[1][(crc>>8)&0xFF] ^
table[0][crc&0xFF]
}
func archUpdateCastagnoli(crc uint32, p []byte) uint32 {
if !sse42 {
panic("not available")
}
// This method is inspired from the algorithm in Intel's white paper:
// "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction"
// The same strategy of splitting the buffer in three is used but the
// combining calculation is different; the complete derivation is explained
// below.
//
// -- The basic idea --
//
// The CRC32 instruction (available in SSE4.2) can process 8 bytes at a
// time. In recent Intel architectures the instruction takes 3 cycles;
// however the processor can pipeline up to three instructions if they
// don't depend on each other.
//
// Roughly this means that we can process three buffers in about the same
// time we can process one buffer.
//
// The idea is then to split the buffer in three, CRC the three pieces
// separately and then combine the results.
//
// Combining the results requires precomputed tables, so we must choose a
// fixed buffer length to optimize. The longer the length, the faster; but
// only buffers longer than this length will use the optimization. We choose
// two cutoffs and compute tables for both:
// - one around 512: 168*3=504
// - one around 4KB: 1344*3=4032
//
// -- The nitty gritty --
//
// Let CRC(I, X) be the non-inverted CRC32-C of the sequence X (with
// initial non-inverted CRC I). This function has the following properties:
// (a) CRC(I, AB) = CRC(CRC(I, A), B)
// (b) CRC(I, A xor B) = CRC(I, A) xor CRC(0, B)
//
// Say we want to compute CRC(I, ABC) where A, B, C are three sequences of
// K bytes each, where K is a fixed constant. Let O be the sequence of K zero
// bytes.
//
// CRC(I, ABC) = CRC(I, ABO xor C)
// = CRC(I, ABO) xor CRC(0, C)
// = CRC(CRC(I, AB), O) xor CRC(0, C)
// = CRC(CRC(I, AO xor B), O) xor CRC(0, C)
// = CRC(CRC(I, AO) xor CRC(0, B), O) xor CRC(0, C)
// = CRC(CRC(CRC(I, A), O) xor CRC(0, B), O) xor CRC(0, C)
//
// The castagnoliSSE42Triple function can compute CRC(I, A), CRC(0, B),
// and CRC(0, C) efficiently. We just need to find a way to quickly compute
// CRC(uvwx, O) given a 4-byte initial value uvwx. We can precompute these
// values; since we can't have a 32-bit table, we break it up into four
// 8-bit tables:
//
// CRC(uvwx, O) = CRC(u000, O) xor
// CRC(0v00, O) xor
// CRC(00w0, O) xor
// CRC(000x, O)
//
// We can compute tables corresponding to the four terms for all 8-bit
// values.
crc = ^crc
// If a buffer is long enough to use the optimization, process the first few
// bytes to align the buffer to an 8 byte boundary (if necessary).
if len(p) >= castagnoliK1*3 {
delta := int(uintptr(unsafe.Pointer(&p[0])) & 7)
if delta != 0 {
delta = 8 - delta
crc = castagnoliSSE42(crc, p[:delta])
p = p[delta:]
}
}
// Process 3*K2 at a time.
for len(p) >= castagnoliK2*3 {
// Compute CRC(I, A), CRC(0, B), and CRC(0, C).
crcA, crcB, crcC := castagnoliSSE42Triple(
crc, 0, 0,
p, p[castagnoliK2:], p[castagnoliK2*2:],
castagnoliK2/24)
// CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B)
crcAB := castagnoliShift(castagnoliSSE42TableK2, crcA) ^ crcB
// CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C)
crc = castagnoliShift(castagnoliSSE42TableK2, crcAB) ^ crcC
p = p[castagnoliK2*3:]
}
// Process 3*K1 at a time.
for len(p) >= castagnoliK1*3 {
// Compute CRC(I, A), CRC(0, B), and CRC(0, C).
crcA, crcB, crcC := castagnoliSSE42Triple(
crc, 0, 0,
p, p[castagnoliK1:], p[castagnoliK1*2:],
castagnoliK1/24)
// CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B)
crcAB := castagnoliShift(castagnoliSSE42TableK1, crcA) ^ crcB
// CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C)
crc = castagnoliShift(castagnoliSSE42TableK1, crcAB) ^ crcC
p = p[castagnoliK1*3:]
}
// Use the simple implementation for what's left.
crc = castagnoliSSE42(crc, p)
return ^crc
}
func archAvailableIEEE() bool {
return useFastIEEE
}
var archIeeeTable8 *slicing8Table
func archInitIEEE() {
if !useFastIEEE {
panic("not available")
}
// We still use slicing-by-8 for small buffers.
archIeeeTable8 = slicingMakeTable(IEEE)
}
func archUpdateIEEE(crc uint32, p []byte) uint32 {
if !useFastIEEE {
panic("not available")
}
if len(p) >= 64 {
left := len(p) & 15
do := len(p) - left
crc = ^ieeeCLMUL(^crc, p[:do])
p = p[do:]
}
if len(p) == 0 {
return crc
}
return slicingUpdate(crc, archIeeeTable8, p)
}

View File

@ -1,319 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build gc
#define NOSPLIT 4
#define RODATA 8
// castagnoliSSE42 updates the (non-inverted) crc with the given buffer.
//
// func castagnoliSSE42(crc uint32, p []byte) uint32
TEXT ·castagnoliSSE42(SB), NOSPLIT, $0
MOVL crc+0(FP), AX // CRC value
MOVQ p+8(FP), SI // data pointer
MOVQ p_len+16(FP), CX // len(p)
// If there are fewer than 8 bytes to process, skip alignment.
CMPQ CX, $8
JL less_than_8
MOVQ SI, BX
ANDQ $7, BX
JZ aligned
// Process the first few bytes to 8-byte align the input.
// BX = 8 - BX. We need to process this many bytes to align.
SUBQ $1, BX
XORQ $7, BX
BTQ $0, BX
JNC align_2
CRC32B (SI), AX
DECQ CX
INCQ SI
align_2:
BTQ $1, BX
JNC align_4
// CRC32W (SI), AX
BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
SUBQ $2, CX
ADDQ $2, SI
align_4:
BTQ $2, BX
JNC aligned
// CRC32L (SI), AX
BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
SUBQ $4, CX
ADDQ $4, SI
aligned:
// The input is now 8-byte aligned and we can process 8-byte chunks.
CMPQ CX, $8
JL less_than_8
CRC32Q (SI), AX
ADDQ $8, SI
SUBQ $8, CX
JMP aligned
less_than_8:
// We may have some bytes left over; process 4 bytes, then 2, then 1.
BTQ $2, CX
JNC less_than_4
// CRC32L (SI), AX
BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
ADDQ $4, SI
less_than_4:
BTQ $1, CX
JNC less_than_2
// CRC32W (SI), AX
BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
ADDQ $2, SI
less_than_2:
BTQ $0, CX
JNC done
CRC32B (SI), AX
done:
MOVL AX, ret+32(FP)
RET
// castagnoliSSE42Triple updates three (non-inverted) crcs with (24*rounds)
// bytes from each buffer.
//
// func castagnoliSSE42Triple(
// crc1, crc2, crc3 uint32,
// a, b, c []byte,
// rounds uint32,
// ) (retA uint32, retB uint32, retC uint32)
TEXT ·castagnoliSSE42Triple(SB), NOSPLIT, $0
MOVL crcA+0(FP), AX
MOVL crcB+4(FP), CX
MOVL crcC+8(FP), DX
MOVQ a+16(FP), R8 // data pointer
MOVQ b+40(FP), R9 // data pointer
MOVQ c+64(FP), R10 // data pointer
MOVL rounds+88(FP), R11
loop:
CRC32Q (R8), AX
CRC32Q (R9), CX
CRC32Q (R10), DX
CRC32Q 8(R8), AX
CRC32Q 8(R9), CX
CRC32Q 8(R10), DX
CRC32Q 16(R8), AX
CRC32Q 16(R9), CX
CRC32Q 16(R10), DX
ADDQ $24, R8
ADDQ $24, R9
ADDQ $24, R10
DECQ R11
JNZ loop
MOVL AX, retA+96(FP)
MOVL CX, retB+100(FP)
MOVL DX, retC+104(FP)
RET
// func haveSSE42() bool
TEXT ·haveSSE42(SB), NOSPLIT, $0
XORQ AX, AX
INCL AX
CPUID
SHRQ $20, CX
ANDQ $1, CX
MOVB CX, ret+0(FP)
RET
// func haveCLMUL() bool
TEXT ·haveCLMUL(SB), NOSPLIT, $0
XORQ AX, AX
INCL AX
CPUID
SHRQ $1, CX
ANDQ $1, CX
MOVB CX, ret+0(FP)
RET
// func haveSSE41() bool
TEXT ·haveSSE41(SB), NOSPLIT, $0
XORQ AX, AX
INCL AX
CPUID
SHRQ $19, CX
ANDQ $1, CX
MOVB CX, ret+0(FP)
RET
// CRC32 polynomial data
//
// These constants are lifted from the
// Linux kernel, since they avoid the costly
// PSHUFB 16 byte reversal proposed in the
// original Intel paper.
DATA r2r1kp<>+0(SB)/8, $0x154442bd4
DATA r2r1kp<>+8(SB)/8, $0x1c6e41596
DATA r4r3kp<>+0(SB)/8, $0x1751997d0
DATA r4r3kp<>+8(SB)/8, $0x0ccaa009e
DATA rupolykp<>+0(SB)/8, $0x1db710641
DATA rupolykp<>+8(SB)/8, $0x1f7011641
DATA r5kp<>+0(SB)/8, $0x163cd6124
GLOBL r2r1kp<>(SB), RODATA, $16
GLOBL r4r3kp<>(SB), RODATA, $16
GLOBL rupolykp<>(SB), RODATA, $16
GLOBL r5kp<>(SB), RODATA, $8
// Based on http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
// len(p) must be at least 64, and must be a multiple of 16.
// func ieeeCLMUL(crc uint32, p []byte) uint32
TEXT ·ieeeCLMUL(SB), NOSPLIT, $0
MOVL crc+0(FP), X0 // Initial CRC value
MOVQ p+8(FP), SI // data pointer
MOVQ p_len+16(FP), CX // len(p)
MOVOU (SI), X1
MOVOU 16(SI), X2
MOVOU 32(SI), X3
MOVOU 48(SI), X4
PXOR X0, X1
ADDQ $64, SI // buf+=64
SUBQ $64, CX // len-=64
CMPQ CX, $64 // Less than 64 bytes left
JB remain64
MOVOA r2r1kp<>+0(SB), X0
loopback64:
MOVOA X1, X5
MOVOA X2, X6
MOVOA X3, X7
MOVOA X4, X8
PCLMULQDQ $0, X0, X1
PCLMULQDQ $0, X0, X2
PCLMULQDQ $0, X0, X3
PCLMULQDQ $0, X0, X4
// Load next early
MOVOU (SI), X11
MOVOU 16(SI), X12
MOVOU 32(SI), X13
MOVOU 48(SI), X14
PCLMULQDQ $0x11, X0, X5
PCLMULQDQ $0x11, X0, X6
PCLMULQDQ $0x11, X0, X7
PCLMULQDQ $0x11, X0, X8
PXOR X5, X1
PXOR X6, X2
PXOR X7, X3
PXOR X8, X4
PXOR X11, X1
PXOR X12, X2
PXOR X13, X3
PXOR X14, X4
ADDQ $0x40, DI
ADDQ $64, SI // buf+=64
SUBQ $64, CX // len-=64
CMPQ CX, $64 // Less than 64 bytes left?
JGE loopback64
// Fold result into a single register (X1)
remain64:
MOVOA r4r3kp<>+0(SB), X0
MOVOA X1, X5
PCLMULQDQ $0, X0, X1
PCLMULQDQ $0x11, X0, X5
PXOR X5, X1
PXOR X2, X1
MOVOA X1, X5
PCLMULQDQ $0, X0, X1
PCLMULQDQ $0x11, X0, X5
PXOR X5, X1
PXOR X3, X1
MOVOA X1, X5
PCLMULQDQ $0, X0, X1
PCLMULQDQ $0x11, X0, X5
PXOR X5, X1
PXOR X4, X1
// If there is less than 16 bytes left we are done
CMPQ CX, $16
JB finish
// Encode 16 bytes
remain16:
MOVOU (SI), X10
MOVOA X1, X5
PCLMULQDQ $0, X0, X1
PCLMULQDQ $0x11, X0, X5
PXOR X5, X1
PXOR X10, X1
SUBQ $16, CX
ADDQ $16, SI
CMPQ CX, $16
JGE remain16
finish:
// Fold final result into 32 bits and return it
PCMPEQB X3, X3
PCLMULQDQ $1, X1, X0
PSRLDQ $8, X1
PXOR X0, X1
MOVOA X1, X2
MOVQ r5kp<>+0(SB), X0
// Creates 32 bit mask. Note that we don't care about upper half.
PSRLQ $32, X3
PSRLDQ $4, X2
PAND X3, X1
PCLMULQDQ $0, X0, X1
PXOR X2, X1
MOVOA rupolykp<>+0(SB), X0
MOVOA X1, X2
PAND X3, X1
PCLMULQDQ $0x10, X0, X1
PAND X3, X1
PCLMULQDQ $0, X0, X1
PXOR X2, X1
// PEXTRD $1, X1, AX (SSE 4.1)
BYTE $0x66; BYTE $0x0f; BYTE $0x3a
BYTE $0x16; BYTE $0xc8; BYTE $0x01
MOVL AX, ret+32(FP)
RET

View File

@ -1,43 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine,!gccgo
package crc32
// This file contains the code to call the SSE 4.2 version of the Castagnoli
// CRC.
// haveSSE42 is defined in crc32_amd64p32.s and uses CPUID to test for SSE 4.2
// support.
func haveSSE42() bool
// castagnoliSSE42 is defined in crc32_amd64p32.s and uses the SSE4.2 CRC32
// instruction.
//go:noescape
func castagnoliSSE42(crc uint32, p []byte) uint32
var sse42 = haveSSE42()
func archAvailableCastagnoli() bool {
return sse42
}
func archInitCastagnoli() {
if !sse42 {
panic("not available")
}
// No initialization necessary.
}
func archUpdateCastagnoli(crc uint32, p []byte) uint32 {
if !sse42 {
panic("not available")
}
return castagnoliSSE42(crc, p)
}
func archAvailableIEEE() bool { return false }
func archInitIEEE() { panic("not available") }
func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") }

View File

@ -1,67 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build gc
#define NOSPLIT 4
#define RODATA 8
// func castagnoliSSE42(crc uint32, p []byte) uint32
TEXT ·castagnoliSSE42(SB), NOSPLIT, $0
MOVL crc+0(FP), AX // CRC value
MOVL p+4(FP), SI // data pointer
MOVL p_len+8(FP), CX // len(p)
NOTL AX
// If there's less than 8 bytes to process, we do it byte-by-byte.
CMPQ CX, $8
JL cleanup
// Process individual bytes until the input is 8-byte aligned.
startup:
MOVQ SI, BX
ANDQ $7, BX
JZ aligned
CRC32B (SI), AX
DECQ CX
INCQ SI
JMP startup
aligned:
// The input is now 8-byte aligned and we can process 8-byte chunks.
CMPQ CX, $8
JL cleanup
CRC32Q (SI), AX
ADDQ $8, SI
SUBQ $8, CX
JMP aligned
cleanup:
// We may have some bytes left over that we process one at a time.
CMPQ CX, $0
JE done
CRC32B (SI), AX
INCQ SI
DECQ CX
JMP cleanup
done:
NOTL AX
MOVL AX, ret+16(FP)
RET
// func haveSSE42() bool
TEXT ·haveSSE42(SB), NOSPLIT, $0
XORQ AX, AX
INCL AX
CPUID
SHRQ $20, CX
ANDQ $1, CX
MOVB CX, ret+0(FP)
RET

View File

@ -1,89 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains CRC32 algorithms that are not specific to any architecture
// and don't use hardware acceleration.
//
// The simple (and slow) CRC32 implementation only uses a 256*4 bytes table.
//
// The slicing-by-8 algorithm is a faster implementation that uses a bigger
// table (8*256*4 bytes).
package crc32
// simpleMakeTable allocates and constructs a Table for the specified
// polynomial. The table is suitable for use with the simple algorithm
// (simpleUpdate).
func simpleMakeTable(poly uint32) *Table {
t := new(Table)
simplePopulateTable(poly, t)
return t
}
// simplePopulateTable constructs a Table for the specified polynomial, suitable
// for use with simpleUpdate.
func simplePopulateTable(poly uint32, t *Table) {
for i := 0; i < 256; i++ {
crc := uint32(i)
for j := 0; j < 8; j++ {
if crc&1 == 1 {
crc = (crc >> 1) ^ poly
} else {
crc >>= 1
}
}
t[i] = crc
}
}
// simpleUpdate uses the simple algorithm to update the CRC, given a table that
// was previously computed using simpleMakeTable.
func simpleUpdate(crc uint32, tab *Table, p []byte) uint32 {
crc = ^crc
for _, v := range p {
crc = tab[byte(crc)^v] ^ (crc >> 8)
}
return ^crc
}
// Use slicing-by-8 when payload >= this value.
const slicing8Cutoff = 16
// slicing8Table is array of 8 Tables, used by the slicing-by-8 algorithm.
type slicing8Table [8]Table
// slicingMakeTable constructs a slicing8Table for the specified polynomial. The
// table is suitable for use with the slicing-by-8 algorithm (slicingUpdate).
func slicingMakeTable(poly uint32) *slicing8Table {
t := new(slicing8Table)
simplePopulateTable(poly, &t[0])
for i := 0; i < 256; i++ {
crc := t[0][i]
for j := 1; j < 8; j++ {
crc = t[0][crc&0xFF] ^ (crc >> 8)
t[j][i] = crc
}
}
return t
}
// slicingUpdate uses the slicing-by-8 algorithm to update the CRC, given a
// table that was previously computed using slicingMakeTable.
func slicingUpdate(crc uint32, tab *slicing8Table, p []byte) uint32 {
if len(p) >= slicing8Cutoff {
crc = ^crc
for len(p) > 8 {
crc ^= uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
crc = tab[0][p[7]] ^ tab[1][p[6]] ^ tab[2][p[5]] ^ tab[3][p[4]] ^
tab[4][crc>>24] ^ tab[5][(crc>>16)&0xFF] ^
tab[6][(crc>>8)&0xFF] ^ tab[7][crc&0xFF]
p = p[8:]
}
crc = ^crc
}
if len(p) == 0 {
return crc
}
return simpleUpdate(crc, &tab[0], p)
}

View File

@ -1,15 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64,!amd64p32,!s390x
package crc32
func archAvailableIEEE() bool { return false }
func archInitIEEE() { panic("not available") }
func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") }
func archAvailableCastagnoli() bool { return false }
func archInitCastagnoli() { panic("not available") }
func archUpdateCastagnoli(crc uint32, p []byte) uint32 { panic("not available") }

View File

@ -1,91 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x
package crc32
const (
vxMinLen = 64
vxAlignMask = 15 // align to 16 bytes
)
// hasVectorFacility reports whether the machine has the z/Architecture
// vector facility installed and enabled.
func hasVectorFacility() bool
var hasVX = hasVectorFacility()
// vectorizedCastagnoli implements CRC32 using vector instructions.
// It is defined in crc32_s390x.s.
//go:noescape
func vectorizedCastagnoli(crc uint32, p []byte) uint32
// vectorizedIEEE implements CRC32 using vector instructions.
// It is defined in crc32_s390x.s.
//go:noescape
func vectorizedIEEE(crc uint32, p []byte) uint32
func archAvailableCastagnoli() bool {
return hasVX
}
var archCastagnoliTable8 *slicing8Table
func archInitCastagnoli() {
if !hasVX {
panic("not available")
}
// We still use slicing-by-8 for small buffers.
archCastagnoliTable8 = slicingMakeTable(Castagnoli)
}
// archUpdateCastagnoli calculates the checksum of p using
// vectorizedCastagnoli.
func archUpdateCastagnoli(crc uint32, p []byte) uint32 {
if !hasVX {
panic("not available")
}
// Use vectorized function if data length is above threshold.
if len(p) >= vxMinLen {
aligned := len(p) & ^vxAlignMask
crc = vectorizedCastagnoli(crc, p[:aligned])
p = p[aligned:]
}
if len(p) == 0 {
return crc
}
return slicingUpdate(crc, archCastagnoliTable8, p)
}
func archAvailableIEEE() bool {
return hasVX
}
var archIeeeTable8 *slicing8Table
func archInitIEEE() {
if !hasVX {
panic("not available")
}
// We still use slicing-by-8 for small buffers.
archIeeeTable8 = slicingMakeTable(IEEE)
}
// archUpdateIEEE calculates the checksum of p using vectorizedIEEE.
func archUpdateIEEE(crc uint32, p []byte) uint32 {
if !hasVX {
panic("not available")
}
// Use vectorized function if data length is above threshold.
if len(p) >= vxMinLen {
aligned := len(p) & ^vxAlignMask
crc = vectorizedIEEE(crc, p[:aligned])
p = p[aligned:]
}
if len(p) == 0 {
return crc
}
return slicingUpdate(crc, archIeeeTable8, p)
}

View File

@ -1,249 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x
#include "textflag.h"
// Vector register range containing CRC-32 constants
#define CONST_PERM_LE2BE V9
#define CONST_R2R1 V10
#define CONST_R4R3 V11
#define CONST_R5 V12
#define CONST_RU_POLY V13
#define CONST_CRC_POLY V14
// The CRC-32 constant block contains reduction constants to fold and
// process particular chunks of the input data stream in parallel.
//
// Note that the constant definitions below are extended in order to compute
// intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction.
// The rightmost doubleword can be 0 to prevent contribution to the result or
// can be multiplied by 1 to perform an XOR without the need for a separate
// VECTOR EXCLUSIVE OR instruction.
//
// The polynomials used are bit-reflected:
//
// IEEE: P'(x) = 0x0edb88320
// Castagnoli: P'(x) = 0x082f63b78
// IEEE polynomial constants
DATA ·crcleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask
DATA ·crcleconskp+8(SB)/8, $0x0706050403020100
DATA ·crcleconskp+16(SB)/8, $0x00000001c6e41596 // R2
DATA ·crcleconskp+24(SB)/8, $0x0000000154442bd4 // R1
DATA ·crcleconskp+32(SB)/8, $0x00000000ccaa009e // R4
DATA ·crcleconskp+40(SB)/8, $0x00000001751997d0 // R3
DATA ·crcleconskp+48(SB)/8, $0x0000000000000000
DATA ·crcleconskp+56(SB)/8, $0x0000000163cd6124 // R5
DATA ·crcleconskp+64(SB)/8, $0x0000000000000000
DATA ·crcleconskp+72(SB)/8, $0x00000001F7011641 // u'
DATA ·crcleconskp+80(SB)/8, $0x0000000000000000
DATA ·crcleconskp+88(SB)/8, $0x00000001DB710641 // P'(x) << 1
GLOBL ·crcleconskp(SB), RODATA, $144
// Castagonli Polynomial constants
DATA ·crccleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask
DATA ·crccleconskp+8(SB)/8, $0x0706050403020100
DATA ·crccleconskp+16(SB)/8, $0x000000009e4addf8 // R2
DATA ·crccleconskp+24(SB)/8, $0x00000000740eef02 // R1
DATA ·crccleconskp+32(SB)/8, $0x000000014cd00bd6 // R4
DATA ·crccleconskp+40(SB)/8, $0x00000000f20c0dfe // R3
DATA ·crccleconskp+48(SB)/8, $0x0000000000000000
DATA ·crccleconskp+56(SB)/8, $0x00000000dd45aab8 // R5
DATA ·crccleconskp+64(SB)/8, $0x0000000000000000
DATA ·crccleconskp+72(SB)/8, $0x00000000dea713f1 // u'
DATA ·crccleconskp+80(SB)/8, $0x0000000000000000
DATA ·crccleconskp+88(SB)/8, $0x0000000105ec76f0 // P'(x) << 1
GLOBL ·crccleconskp(SB), RODATA, $144
// func hasVectorFacility() bool
TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1
MOVD $x-24(SP), R1
XC $24, 0(R1), 0(R1) // clear the storage
MOVD $2, R0 // R0 is the number of double words stored -1
WORD $0xB2B01000 // STFLE 0(R1)
XOR R0, R0 // reset the value of R0
MOVBZ z-8(SP), R1
AND $0x40, R1
BEQ novector
vectorinstalled:
// check if the vector instruction has been enabled
VLEIB $0, $0xF, V16
VLGVB $0, V16, R1
CMPBNE R1, $0xF, novector
MOVB $1, ret+0(FP) // have vx
RET
novector:
MOVB $0, ret+0(FP) // no vx
RET
// The CRC-32 function(s) use these calling conventions:
//
// Parameters:
//
// R2: Initial CRC value, typically ~0; and final CRC (return) value.
// R3: Input buffer pointer, performance might be improved if the
// buffer is on a doubleword boundary.
// R4: Length of the buffer, must be 64 bytes or greater.
//
// Register usage:
//
// R5: CRC-32 constant pool base pointer.
// V0: Initial CRC value and intermediate constants and results.
// V1..V4: Data for CRC computation.
// V5..V8: Next data chunks that are fetched from the input buffer.
//
// V9..V14: CRC-32 constants.
// func vectorizedIEEE(crc uint32, p []byte) uint32
TEXT ·vectorizedIEEE(SB), NOSPLIT, $0
MOVWZ crc+0(FP), R2 // R2 stores the CRC value
MOVD p+8(FP), R3 // data pointer
MOVD p_len+16(FP), R4 // len(p)
MOVD $·crcleconskp(SB), R5
BR vectorizedBody<>(SB)
// func vectorizedCastagnoli(crc uint32, p []byte) uint32
TEXT ·vectorizedCastagnoli(SB), NOSPLIT, $0
MOVWZ crc+0(FP), R2 // R2 stores the CRC value
MOVD p+8(FP), R3 // data pointer
MOVD p_len+16(FP), R4 // len(p)
// R5: crc-32 constant pool base pointer, constant is used to reduce crc
MOVD $·crccleconskp(SB), R5
BR vectorizedBody<>(SB)
TEXT vectorizedBody<>(SB), NOSPLIT, $0
XOR $0xffffffff, R2 // NOTW R2
VLM 0(R5), CONST_PERM_LE2BE, CONST_CRC_POLY
// Load the initial CRC value into the rightmost word of V0
VZERO V0
VLVGF $3, R2, V0
// Crash if the input size is less than 64-bytes.
CMP R4, $64
BLT crash
// Load a 64-byte data chunk and XOR with CRC
VLM 0(R3), V1, V4 // 64-bytes into V1..V4
// Reflect the data if the CRC operation is in the bit-reflected domain
VPERM V1, V1, CONST_PERM_LE2BE, V1
VPERM V2, V2, CONST_PERM_LE2BE, V2
VPERM V3, V3, CONST_PERM_LE2BE, V3
VPERM V4, V4, CONST_PERM_LE2BE, V4
VX V0, V1, V1 // V1 ^= CRC
ADD $64, R3 // BUF = BUF + 64
ADD $(-64), R4
// Check remaining buffer size and jump to proper folding method
CMP R4, $64
BLT less_than_64bytes
fold_64bytes_loop:
// Load the next 64-byte data chunk into V5 to V8
VLM 0(R3), V5, V8
VPERM V5, V5, CONST_PERM_LE2BE, V5
VPERM V6, V6, CONST_PERM_LE2BE, V6
VPERM V7, V7, CONST_PERM_LE2BE, V7
VPERM V8, V8, CONST_PERM_LE2BE, V8
// Perform a GF(2) multiplication of the doublewords in V1 with
// the reduction constants in V0. The intermediate result is
// then folded (accumulated) with the next data chunk in V5 and
// stored in V1. Repeat this step for the register contents
// in V2, V3, and V4 respectively.
VGFMAG CONST_R2R1, V1, V5, V1
VGFMAG CONST_R2R1, V2, V6, V2
VGFMAG CONST_R2R1, V3, V7, V3
VGFMAG CONST_R2R1, V4, V8, V4
// Adjust buffer pointer and length for next loop
ADD $64, R3 // BUF = BUF + 64
ADD $(-64), R4 // LEN = LEN - 64
CMP R4, $64
BGE fold_64bytes_loop
less_than_64bytes:
// Fold V1 to V4 into a single 128-bit value in V1
VGFMAG CONST_R4R3, V1, V2, V1
VGFMAG CONST_R4R3, V1, V3, V1
VGFMAG CONST_R4R3, V1, V4, V1
// Check whether to continue with 64-bit folding
CMP R4, $16
BLT final_fold
fold_16bytes_loop:
VL 0(R3), V2 // Load next data chunk
VPERM V2, V2, CONST_PERM_LE2BE, V2
VGFMAG CONST_R4R3, V1, V2, V1 // Fold next data chunk
// Adjust buffer pointer and size for folding next data chunk
ADD $16, R3
ADD $-16, R4
// Process remaining data chunks
CMP R4, $16
BGE fold_16bytes_loop
final_fold:
VLEIB $7, $0x40, V9
VSRLB V9, CONST_R4R3, V0
VLEIG $0, $1, V0
VGFMG V0, V1, V1
VLEIB $7, $0x20, V9 // Shift by words
VSRLB V9, V1, V2 // Store remaining bits in V2
VUPLLF V1, V1 // Split rightmost doubleword
VGFMAG CONST_R5, V1, V2, V1 // V1 = (V1 * R5) XOR V2
// The input values to the Barret reduction are the degree-63 polynomial
// in V1 (R(x)), degree-32 generator polynomial, and the reduction
// constant u. The Barret reduction result is the CRC value of R(x) mod
// P(x).
//
// The Barret reduction algorithm is defined as:
//
// 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u
// 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x)
// 3. C(x) = R(x) XOR T2(x) mod x^32
//
// Note: To compensate the division by x^32, use the vector unpack
// instruction to move the leftmost word into the leftmost doubleword
// of the vector register. The rightmost doubleword is multiplied
// with zero to not contribute to the intermedate results.
// T1(x) = floor( R(x) / x^32 ) GF2MUL u
VUPLLF V1, V2
VGFMG CONST_RU_POLY, V2, V2
// Compute the GF(2) product of the CRC polynomial in VO with T1(x) in
// V2 and XOR the intermediate result, T2(x), with the value in V1.
// The final result is in the rightmost word of V2.
VUPLLF V2, V2
VGFMAG CONST_CRC_POLY, V2, V1, V2
done:
VLGVF $2, V2, R2
XOR $0xffffffff, R2 // NOTW R2
MOVWZ R2, ret + 32(FP)
RET
crash:
MOVD $0, (R0) // input size is less than 64-bytes

View File

@ -1,284 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package crc32
import (
crand "crypto/rand"
"hash"
mrand "math/rand"
"testing"
)
type test struct {
ieee, castagnoli uint32
in string
}
var golden = []test{
{0x0, 0x0, ""},
{0xe8b7be43, 0xc1d04330, "a"},
{0x9e83486d, 0xe2a22936, "ab"},
{0x352441c2, 0x364b3fb7, "abc"},
{0xed82cd11, 0x92c80a31, "abcd"},
{0x8587d865, 0xc450d697, "abcde"},
{0x4b8e39ef, 0x53bceff1, "abcdef"},
{0x312a6aa6, 0xe627f441, "abcdefg"},
{0xaeef2a50, 0xa9421b7, "abcdefgh"},
{0x8da988af, 0x2ddc99fc, "abcdefghi"},
{0x3981703a, 0xe6599437, "abcdefghij"},
{0x6b9cdfe7, 0xb2cc01fe, "Discard medicine more than two years old."},
{0xc90ef73f, 0xe28207f, "He who has a shady past knows that nice guys finish last."},
{0xb902341f, 0xbe93f964, "I wouldn't marry him with a ten foot pole."},
{0x42080e8, 0x9e3be0c3, "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"},
{0x154c6d11, 0xf505ef04, "The days of the digital watch are numbered. -Tom Stoppard"},
{0x4c418325, 0x85d3dc82, "Nepal premier won't resign."},
{0x33955150, 0xc5142380, "For every action there is an equal and opposite government program."},
{0x26216a4b, 0x75eb77dd, "His money is twice tainted: 'taint yours and 'taint mine."},
{0x1abbe45e, 0x91ebe9f7, "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"},
{0xc89a94f7, 0xf0b1168e, "It's a tiny change to the code and not completely disgusting. - Bob Manchek"},
{0xab3abe14, 0x572b74e2, "size: a.out: bad magic"},
{0xbab102b6, 0x8a58a6d5, "The major problem is with sendmail. -Mark Horton"},
{0x999149d7, 0x9c426c50, "Give me a rock, paper and scissors and I will move the world. CCFestoon"},
{0x6d52a33c, 0x735400a4, "If the enemy is within range, then so are you."},
{0x90631e8d, 0xbec49c95, "It's well we cannot hear the screams/That we create in others' dreams."},
{0x78309130, 0xa95a2079, "You remind me of a TV show, but that's all right: I watch it anyway."},
{0x7d0a377f, 0xde2e65c5, "C is as portable as Stonehedge!!"},
{0x8c79fd79, 0x297a88ed, "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"},
{0xa20b7167, 0x66ed1d8b, "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"},
{0x8e0bb443, 0xdcded527, "How can you write a big system without C++? -Paul Glick"},
}
// testGoldenIEEE verifies that the given function returns
// correct IEEE checksums.
func testGoldenIEEE(t *testing.T, crcFunc func(b []byte) uint32) {
for _, g := range golden {
if crc := crcFunc([]byte(g.in)); crc != g.ieee {
t.Errorf("IEEE(%s) = 0x%x want 0x%x", g.in, crc, g.ieee)
}
}
}
// testGoldenCastagnoli verifies that the given function returns
// correct IEEE checksums.
func testGoldenCastagnoli(t *testing.T, crcFunc func(b []byte) uint32) {
for _, g := range golden {
if crc := crcFunc([]byte(g.in)); crc != g.castagnoli {
t.Errorf("Castagnoli(%s) = 0x%x want 0x%x", g.in, crc, g.castagnoli)
}
}
}
// testCrossCheck generates random buffers of various lengths and verifies that
// the two "update" functions return the same result.
func testCrossCheck(t *testing.T, crcFunc1, crcFunc2 func(crc uint32, b []byte) uint32) {
// The AMD64 implementation has some cutoffs at lengths 168*3=504 and
// 1344*3=4032. We should make sure lengths around these values are in the
// list.
lengths := []int{0, 1, 2, 3, 4, 5, 10, 16, 50, 100, 128,
500, 501, 502, 503, 504, 505, 512, 1000, 1024, 2000,
4030, 4031, 4032, 4033, 4036, 4040, 4048, 4096, 5000, 10000}
for _, length := range lengths {
p := make([]byte, length)
_, _ = crand.Read(p)
crcInit := uint32(mrand.Int63())
crc1 := crcFunc1(crcInit, p)
crc2 := crcFunc2(crcInit, p)
if crc1 != crc2 {
t.Errorf("mismatch: 0x%x vs 0x%x (buffer length %d)", crc1, crc2, length)
}
}
}
// TestSimple tests the simple generic algorithm.
func TestSimple(t *testing.T) {
tab := simpleMakeTable(IEEE)
testGoldenIEEE(t, func(b []byte) uint32 {
return simpleUpdate(0, tab, b)
})
tab = simpleMakeTable(Castagnoli)
testGoldenCastagnoli(t, func(b []byte) uint32 {
return simpleUpdate(0, tab, b)
})
}
// TestSimple tests the slicing-by-8 algorithm.
func TestSlicing(t *testing.T) {
tab := slicingMakeTable(IEEE)
testGoldenIEEE(t, func(b []byte) uint32 {
return slicingUpdate(0, tab, b)
})
tab = slicingMakeTable(Castagnoli)
testGoldenCastagnoli(t, func(b []byte) uint32 {
return slicingUpdate(0, tab, b)
})
// Cross-check various polys against the simple algorithm.
for _, poly := range []uint32{IEEE, Castagnoli, Koopman, 0xD5828281} {
t1 := simpleMakeTable(poly)
f1 := func(crc uint32, b []byte) uint32 {
return simpleUpdate(crc, t1, b)
}
t2 := slicingMakeTable(poly)
f2 := func(crc uint32, b []byte) uint32 {
return slicingUpdate(crc, t2, b)
}
testCrossCheck(t, f1, f2)
}
}
func TestArchIEEE(t *testing.T) {
if !archAvailableIEEE() {
t.Skip("Arch-specific IEEE not available.")
}
archInitIEEE()
slicingTable := slicingMakeTable(IEEE)
testCrossCheck(t, archUpdateIEEE, func(crc uint32, b []byte) uint32 {
return slicingUpdate(crc, slicingTable, b)
})
}
func TestArchCastagnoli(t *testing.T) {
if !archAvailableCastagnoli() {
t.Skip("Arch-specific Castagnoli not available.")
}
archInitCastagnoli()
slicingTable := slicingMakeTable(Castagnoli)
testCrossCheck(t, archUpdateCastagnoli, func(crc uint32, b []byte) uint32 {
return slicingUpdate(crc, slicingTable, b)
})
}
func TestGolden(t *testing.T) {
testGoldenIEEE(t, ChecksumIEEE)
// Some implementations have special code to deal with misaligned
// data; test that as well.
for delta := 1; delta <= 7; delta++ {
testGoldenIEEE(t, func(b []byte) uint32 {
ieee := NewIEEE()
d := delta
if d >= len(b) {
d = len(b)
}
ieee.Write(b[:d])
ieee.Write(b[d:])
return ieee.Sum32()
})
}
castagnoliTab := MakeTable(Castagnoli)
if castagnoliTab == nil {
t.Errorf("nil Castagnoli Table")
}
testGoldenCastagnoli(t, func(b []byte) uint32 {
castagnoli := New(castagnoliTab)
castagnoli.Write(b)
return castagnoli.Sum32()
})
// Some implementations have special code to deal with misaligned
// data; test that as well.
for delta := 1; delta <= 7; delta++ {
testGoldenCastagnoli(t, func(b []byte) uint32 {
castagnoli := New(castagnoliTab)
d := delta
if d >= len(b) {
d = len(b)
}
castagnoli.Write(b[:d])
castagnoli.Write(b[d:])
return castagnoli.Sum32()
})
}
}
func BenchmarkIEEECrc40B(b *testing.B) {
benchmark(b, NewIEEE(), 40, 0)
}
func BenchmarkIEEECrc1KB(b *testing.B) {
benchmark(b, NewIEEE(), 1<<10, 0)
}
func BenchmarkIEEECrc4KB(b *testing.B) {
benchmark(b, NewIEEE(), 4<<10, 0)
}
func BenchmarkIEEECrc32KB(b *testing.B) {
benchmark(b, NewIEEE(), 32<<10, 0)
}
func BenchmarkCastagnoliCrc15B(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 15, 0)
}
func BenchmarkCastagnoliCrc15BMisaligned(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 15, 1)
}
func BenchmarkCastagnoliCrc40B(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 40, 0)
}
func BenchmarkCastagnoliCrc40BMisaligned(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 40, 1)
}
func BenchmarkCastagnoliCrc512(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 512, 0)
}
func BenchmarkCastagnoliCrc512Misaligned(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 512, 1)
}
func BenchmarkCastagnoliCrc1KB(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 1<<10, 0)
}
func BenchmarkCastagnoliCrc1KBMisaligned(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 1<<10, 1)
}
func BenchmarkCastagnoliCrc4KB(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 4<<10, 0)
}
func BenchmarkCastagnoliCrc4KBMisaligned(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 4<<10, 1)
}
func BenchmarkCastagnoliCrc32KB(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 32<<10, 0)
}
func BenchmarkCastagnoliCrc32KBMisaligned(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 32<<10, 1)
}
func benchmark(b *testing.B, h hash.Hash32, n, alignment int64) {
b.SetBytes(n)
data := make([]byte, n+alignment)
data = data[alignment:]
for i := range data {
data[i] = byte(i)
}
in := make([]byte, 0, h.Size())
// Warm up
h.Reset()
h.Write(data)
h.Sum(in)
b.ResetTimer()
for i := 0; i < b.N; i++ {
h.Reset()
h.Write(data)
h.Sum(in)
}
}

View File

@ -1,28 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package crc32_test
import (
"fmt"
"hash/crc32"
)
func ExampleMakeTable() {
// In this package, the CRC polynomial is represented in reversed notation,
// or LSB-first representation.
//
// LSB-first representation is a hexadecimal number with n bits, in which the
// most significant bit represents the coefficient of x⁰ and the least significant
// bit represents the coefficient of xⁿ⁻¹ (the coefficient for xⁿ is implicit).
//
// For example, CRC32-Q, as defined by the following polynomial,
// x³²+ x³¹+ x²⁴+ x²²+ x¹⁶+ x¹⁴+ x⁸+ x⁷+ x⁵+ x³+ x¹+ x⁰
// has the reversed notation 0b11010101100000101000001010000001, so the value
// that should be passed to MakeTable is 0xD5828281.
crc32q := crc32.MakeTable(0xD5828281)
fmt.Printf("%08x\n", crc32.Checksum([]byte("Hello world"), crc32q))
// Output:
// 2964d064
}

View File

@ -1,73 +0,0 @@
{
"comment": "",
"ignore": "",
"package": [
{
"checksumSHA1": "lz/w/ZDHqS152Nc3i6+vn24oz4Q=",
"path": "github.com/Shopify/toxiproxy/client",
"revision": "411706b9d0c8ad721cd13cc8c45fdaa013635152",
"revisionTime": "2017-02-02T13:02:56Z"
},
{
"checksumSHA1": "WRu+mhZ2PfQu27qRr69HVGjtN4Q=",
"path": "github.com/davecgh/go-spew/spew",
"revision": "346938d642f2ec3594ed81d874461961cd0faa76",
"revisionTime": "2016-10-29T20:57:26Z"
},
{
"checksumSHA1": "a2yC46a1qsJomgY6rb+FkTFiqmE=",
"path": "github.com/davecgh/go-spew/spew/testdata",
"revision": "346938d642f2ec3594ed81d874461961cd0faa76",
"revisionTime": "2016-10-29T20:57:26Z"
},
{
"checksumSHA1": "427LRiuP2yQto6Bc40H3bBDO7PY=",
"path": "github.com/eapache/go-resiliency/breaker",
"revision": "b86b1ec0dd4209a588dc1285cdd471e73525c0b3",
"revisionTime": "2016-01-04T19:15:39Z"
},
{
"checksumSHA1": "+yXwwVFcfyVGIWpwcvEefGOadnA=",
"path": "github.com/eapache/go-xerial-snappy",
"revision": "bb955e01b9346ac19dc29eb16586c90ded99a98c",
"revisionTime": "2016-06-09T14:24:08Z"
},
{
"checksumSHA1": "rmP8g6I7+mJaZRGpNHtep8F1oiE=",
"path": "github.com/eapache/queue",
"revision": "44cc805cf13205b55f69e14bcb69867d1ae92f98",
"revisionTime": "2016-08-05T00:47:13Z"
},
{
"checksumSHA1": "IhK3rKOSR3UfWHe5JmYv7Fnmkrk=",
"path": "github.com/golang/snappy",
"revision": "553a641470496b2327abcac10b36396bd98e45c9",
"revisionTime": "2017-02-15T23:32:05Z"
},
{
"checksumSHA1": "jcHJL7j40puiZOV3qwnlsud2aoI=",
"path": "github.com/klauspost/crc32",
"revision": "1bab8b35b6bb565f92cbc97939610af9369f942a",
"revisionTime": "2017-02-10T14:05:23Z"
},
{
"checksumSHA1": "CZsJm4ihvctDj9xhGgfkKe0jIDg=",
"path": "github.com/pierrec/lz4",
"revision": "90290f74b1b4d9c097f0a3b3c7eba2ef3875c699",
"revisionTime": "2017-02-26T14:26:21Z"
},
{
"checksumSHA1": "Xxrev8i6UGVVl6IiHMu7Na8yGYM=",
"path": "github.com/pierrec/xxHash/xxHash32",
"revision": "5a004441f897722c627870a981d02b29924215fa",
"revisionTime": "2016-01-12T16:53:51Z"
},
{
"checksumSHA1": "TJ1UtXLL59LZBuC0IbNq5DYykrQ=",
"path": "github.com/rcrowley/go-metrics",
"revision": "1f30fe9094a513ce4c700b9a54458bbb0c96996c",
"revisionTime": "2016-11-28T21:05:44Z"
}
],
"rootPath": "github.com/Shopify/sarama"
}

22
vendor/github.com/davecgh/go-spew/.gitignore generated vendored Normal file
View File

@ -0,0 +1,22 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

14
vendor/github.com/davecgh/go-spew/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,14 @@
language: go
go:
- 1.5.4
- 1.6.3
- 1.7
install:
- go get -v golang.org/x/tools/cmd/cover
script:
- go test -v -tags=safe ./spew
- go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov
after_success:
- go get -v github.com/mattn/goveralls
- export PATH=$PATH:$HOME/gopath/bin
- goveralls -coverprofile=profile.cov -service=travis-ci

205
vendor/github.com/davecgh/go-spew/README.md generated vendored Normal file
View File

@ -0,0 +1,205 @@
go-spew
=======
[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)]
(https://travis-ci.org/davecgh/go-spew) [![ISC License]
(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status]
(https://img.shields.io/coveralls/davecgh/go-spew.svg)]
(https://coveralls.io/r/davecgh/go-spew?branch=master)
Go-spew implements a deep pretty printer for Go data structures to aid in
debugging. A comprehensive suite of tests with 100% test coverage is provided
to ensure proper functionality. See `test_coverage.txt` for the gocov coverage
report. Go-spew is licensed under the liberal ISC license, so it may be used in
open source or commercial projects.
If you're interested in reading about how this package came to life and some
of the challenges involved in providing a deep pretty printer, there is a blog
post about it
[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/).
## Documentation
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)]
(http://godoc.org/github.com/davecgh/go-spew/spew)
Full `go doc` style documentation for the project can be viewed online without
installing this package by using the excellent GoDoc site here:
http://godoc.org/github.com/davecgh/go-spew/spew
You can also view the documentation locally once the package is installed with
the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to
http://localhost:6060/pkg/github.com/davecgh/go-spew/spew
## Installation
```bash
$ go get -u github.com/davecgh/go-spew/spew
```
## Quick Start
Add this import line to the file you're working in:
```Go
import "github.com/davecgh/go-spew/spew"
```
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
```Go
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
```
Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most
compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types
and pointer addresses):
```Go
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
```
## Debugging a Web Application Example
Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production.
```Go
package main
import (
"fmt"
"html"
"net/http"
"github.com/davecgh/go-spew/spew"
)
func handler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html")
fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:])
fmt.Fprintf(w, "<!--\n" + html.EscapeString(spew.Sdump(w)) + "\n-->")
}
func main() {
http.HandleFunc("/", handler)
http.ListenAndServe(":8080", nil)
}
```
## Sample Dump Output
```
(main.Foo) {
unexportedField: (*main.Bar)(0xf84002e210)({
flag: (main.Flag) flagTwo,
data: (uintptr) <nil>
}),
ExportedField: (map[interface {}]interface {}) {
(string) "one": (bool) true
}
}
([]uint8) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
```
## Sample Formatter Output
Double pointer to a uint8:
```
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
```
Pointer to circular struct with a uint8 field and a pointer to itself:
```
%v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
```
## Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available via the
spew.Config global.
It is also possible to create a ConfigState instance that provides methods
equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
```
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables. This option
relies on access to the unsafe package, so it will not have any effect when
running in environments without access to the unsafe package such as Google
App Engine or with the "safe" build tag specified.
Pointer method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of capacities
for arrays, slices, maps and channels. This is useful when diffing data
structures in tests.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are supported,
with other types sorted according to the reflect.Value.String() output
which guarantees display stability. Natural map order is used by
default.
* SpewKeys
SpewKeys specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only considered
if SortKeys is true.
```
## Unsafe Package Dependency
This package relies on the unsafe package to perform some of the more advanced
features, however it also supports a "limited" mode which allows it to work in
environments where the unsafe package is not available. By default, it will
operate in this mode on Google App Engine and when compiled with GopherJS. The
"safe" build tag may also be specified to force the package to build without
using the unsafe package.
## License
Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License.

22
vendor/github.com/davecgh/go-spew/cov_report.sh generated vendored Normal file
View File

@ -0,0 +1,22 @@
#!/bin/sh
# This script uses gocov to generate a test coverage report.
# The gocov tool my be obtained with the following command:
# go get github.com/axw/gocov/gocov
#
# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
# Check for gocov.
if ! type gocov >/dev/null 2>&1; then
echo >&2 "This script requires the gocov tool."
echo >&2 "You may obtain it with the following command:"
echo >&2 "go get github.com/axw/gocov/gocov"
exit 1
fi
# Only run the cgo tests if gcc is installed.
if type gcc >/dev/null 2>&1; then
(cd spew && gocov test -tags testcgo | gocov report)
else
(cd spew && gocov test | gocov report)
fi

View File

@ -41,9 +41,9 @@ var (
// after commit 82f48826c6c7 which changed the format again to mirror
// the original format. Code in the init function updates these offsets
// as necessary.
offsetPtr = ptrSize
offsetPtr = uintptr(ptrSize)
offsetScalar = uintptr(0)
offsetFlag = ptrSize * 2
offsetFlag = uintptr(ptrSize * 2)
// flagKindWidth and flagKindShift indicate various bits that the
// reflect package uses internally to track kind information.
@ -58,7 +58,7 @@ var (
// changed their positions. Code in the init function updates these
// flags as necessary.
flagKindWidth = uintptr(5)
flagKindShift = flagKindWidth - 1
flagKindShift = uintptr(flagKindWidth - 1)
flagRO = uintptr(1 << 0)
flagIndir = uintptr(1 << 1)
)

View File

@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) {
w.Write(closeParenBytes)
}
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.

View File

@ -35,16 +35,16 @@ var (
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
)
// dumpState contains information about the state of a dump operation.
@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
case nilFound:
case nilFound == true:
d.w.Write(nilAngleBytes)
case cycleFound:
case cycleFound == true:
d.w.Write(circularBytes)
default:

View File

@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) {
// Display dereferenced value.
switch {
case nilFound:
case nilFound == true:
f.fs.Write(nilAngleBytes)
case cycleFound:
case cycleFound == true:
f.fs.Write(circularShortBytes)
default:

View File

@ -1536,14 +1536,14 @@ func TestPrintSortedKeys(t *testing.T) {
t.Errorf("Sorted keys mismatch 3:\n %v %v", s, expected)
}
s = cfg.Sprint(map[testStruct]int{{1}: 1, {3}: 3, {2}: 2})
s = cfg.Sprint(map[testStruct]int{testStruct{1}: 1, testStruct{3}: 3, testStruct{2}: 2})
expected = "map[ts.1:1 ts.2:2 ts.3:3]"
if s != expected {
t.Errorf("Sorted keys mismatch 4:\n %v %v", s, expected)
}
if !spew.UnsafeDisabled {
s = cfg.Sprint(map[testStructP]int{{1}: 1, {3}: 3, {2}: 2})
s = cfg.Sprint(map[testStructP]int{testStructP{1}: 1, testStructP{3}: 3, testStructP{2}: 2})
expected = "map[ts.1:1 ts.2:2 ts.3:3]"
if s != expected {
t.Errorf("Sorted keys mismatch 5:\n %v %v", s, expected)

View File

@ -36,7 +36,10 @@ type dummyFmtState struct {
}
func (dfs *dummyFmtState) Flag(f int) bool {
return f == int('+')
if f == int('+') {
return true
}
return false
}
func (dfs *dummyFmtState) Precision() (int, bool) {

61
vendor/github.com/davecgh/go-spew/test_coverage.txt generated vendored Normal file
View File

@ -0,0 +1,61 @@
github.com/davecgh/go-spew/spew/dump.go dumpState.dump 100.00% (88/88)
github.com/davecgh/go-spew/spew/format.go formatState.format 100.00% (82/82)
github.com/davecgh/go-spew/spew/format.go formatState.formatPtr 100.00% (52/52)
github.com/davecgh/go-spew/spew/dump.go dumpState.dumpPtr 100.00% (44/44)
github.com/davecgh/go-spew/spew/dump.go dumpState.dumpSlice 100.00% (39/39)
github.com/davecgh/go-spew/spew/common.go handleMethods 100.00% (30/30)
github.com/davecgh/go-spew/spew/common.go printHexPtr 100.00% (18/18)
github.com/davecgh/go-spew/spew/common.go unsafeReflectValue 100.00% (13/13)
github.com/davecgh/go-spew/spew/format.go formatState.constructOrigFormat 100.00% (12/12)
github.com/davecgh/go-spew/spew/dump.go fdump 100.00% (11/11)
github.com/davecgh/go-spew/spew/format.go formatState.Format 100.00% (11/11)
github.com/davecgh/go-spew/spew/common.go init 100.00% (10/10)
github.com/davecgh/go-spew/spew/common.go printComplex 100.00% (9/9)
github.com/davecgh/go-spew/spew/common.go valuesSorter.Less 100.00% (8/8)
github.com/davecgh/go-spew/spew/format.go formatState.buildDefaultFormat 100.00% (7/7)
github.com/davecgh/go-spew/spew/format.go formatState.unpackValue 100.00% (5/5)
github.com/davecgh/go-spew/spew/dump.go dumpState.indent 100.00% (4/4)
github.com/davecgh/go-spew/spew/common.go catchPanic 100.00% (4/4)
github.com/davecgh/go-spew/spew/config.go ConfigState.convertArgs 100.00% (4/4)
github.com/davecgh/go-spew/spew/spew.go convertArgs 100.00% (4/4)
github.com/davecgh/go-spew/spew/format.go newFormatter 100.00% (3/3)
github.com/davecgh/go-spew/spew/dump.go Sdump 100.00% (3/3)
github.com/davecgh/go-spew/spew/common.go printBool 100.00% (3/3)
github.com/davecgh/go-spew/spew/common.go sortValues 100.00% (3/3)
github.com/davecgh/go-spew/spew/config.go ConfigState.Sdump 100.00% (3/3)
github.com/davecgh/go-spew/spew/dump.go dumpState.unpackValue 100.00% (3/3)
github.com/davecgh/go-spew/spew/spew.go Printf 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Println 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Sprint 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Sprintf 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Sprintln 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go printFloat 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go NewDefaultConfig 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go printInt 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go printUint 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go valuesSorter.Len 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go valuesSorter.Swap 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Errorf 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Fprint 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintf 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintln 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Print 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Printf 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Println 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Sprint 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintf 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintln 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.NewFormatter 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Fdump 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Dump 100.00% (1/1)
github.com/davecgh/go-spew/spew/dump.go Fdump 100.00% (1/1)
github.com/davecgh/go-spew/spew/dump.go Dump 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Fprintln 100.00% (1/1)
github.com/davecgh/go-spew/spew/format.go NewFormatter 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Errorf 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Fprint 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Fprintf 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Print 100.00% (1/1)
github.com/davecgh/go-spew/spew ------------------------------- 100.00% (505/505)

24
vendor/github.com/eapache/go-resiliency/.gitignore generated vendored Normal file
View File

@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

7
vendor/github.com/eapache/go-resiliency/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,7 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4

Some files were not shown because too many files have changed in this diff Show More