Updated to Go modules
This commit is contained in:
parent
69b146effd
commit
b27f32a66e
514
Gopkg.lock
generated
514
Gopkg.lock
generated
@ -1,514 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
digest = "1:db7100aae319b808f819ee6d1d2e400d234b0a335bff2b70392f1e3a79ccd63d"
|
||||
name = "github.com/Shopify/sarama"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "bbdbe644099b7fdc8327d5cc69c030945188b2e9"
|
||||
version = "v1.13.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:718dd81b0dcfc49323360bcc7b4f940c9d5f27106d17eddb25a714e9b2563d24"
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = [
|
||||
"aws",
|
||||
"aws/awserr",
|
||||
"aws/awsutil",
|
||||
"aws/client",
|
||||
"aws/client/metadata",
|
||||
"aws/corehandlers",
|
||||
"aws/credentials",
|
||||
"aws/credentials/ec2rolecreds",
|
||||
"aws/credentials/endpointcreds",
|
||||
"aws/credentials/processcreds",
|
||||
"aws/credentials/stscreds",
|
||||
"aws/csm",
|
||||
"aws/defaults",
|
||||
"aws/ec2metadata",
|
||||
"aws/endpoints",
|
||||
"aws/request",
|
||||
"aws/session",
|
||||
"aws/signer/v4",
|
||||
"internal/ini",
|
||||
"internal/sdkio",
|
||||
"internal/sdkrand",
|
||||
"internal/sdkuri",
|
||||
"internal/shareddefaults",
|
||||
"private/protocol",
|
||||
"private/protocol/query",
|
||||
"private/protocol/query/queryutil",
|
||||
"private/protocol/rest",
|
||||
"private/protocol/xml/xmlutil",
|
||||
"service/sqs",
|
||||
"service/sts",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "8b705a6dec722bcda3a9309c0924d4eca24f7c72"
|
||||
version = "v1.17.14"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:545ae40d6dde46043a71bdfd7f9a17f2353ce16277c83ac685af231b4b7c4beb"
|
||||
name = "github.com/cespare/xxhash"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "de209a9ffae3256185a6bb135d1a0ada7b2b5f09"
|
||||
version = "v2.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b"
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
pruneopts = ""
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f714fa0ab4449a2fe13d156446ac1c1e16bc85334e9be320d42bf8bee362ba45"
|
||||
name = "github.com/eapache/go-resiliency"
|
||||
packages = ["breaker"]
|
||||
pruneopts = ""
|
||||
revision = "6800482f2c813e689c88b7ed3282262385011890"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:1f7503fa58a852a1416556ae2ddb219b49a1304fd408391948e2e3676514c48d"
|
||||
name = "github.com/eapache/go-xerial-snappy"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c05dc14dd75a9697b8410ea13445ceb40669448f789afe955351ad34bc998cd0"
|
||||
name = "github.com/eapache/queue"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "ded5959c0d4e360646dc9e9908cff48666781367"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:eecb3e6cef98036972582ffff7d3e340aef15f075236da353aa3e7fb798fdb21"
|
||||
name = "github.com/eclipse/paho.mqtt.golang"
|
||||
packages = [
|
||||
".",
|
||||
"packets",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "aff15770515e3c57fc6109da73d42b0d46f7f483"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:27854310d59099f8dcc61dd8af4a69f0a3597f001154b2fb4d1c41baf2e31ec1"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "130e6b02ab059e7b717a096f397c5b60111cae74"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:09307dfb1aa3f49a2bf869dcfa4c6c06ecd3c207221bd1c1a1141f0e51f209eb"
|
||||
name = "github.com/golang/snappy"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "553a641470496b2327abcac10b36396bd98e45c9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:af269c8136b9422554bd4f1c0a024a7ccde3ef18482131ce10c336af80228e88"
|
||||
name = "github.com/gomodule/redigo"
|
||||
packages = ["redis"]
|
||||
pruneopts = ""
|
||||
revision = "e8fc0692a7e26a05b06517348ed466349062eb47"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:13fe471d0ed891e8544eddfeeb0471fd3c9f2015609a1c000aefdedf52a19d40"
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "c2b33e84"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:75dddee0eb82002b5aff6937fdf6d544b85322d2414524a521768fe4b4e5ed3d"
|
||||
name = "github.com/mmcloughlin/geohash"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "f7f2bcae3294530249c63fcb6fb6d5e83eee4e73"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f04a78a43f55f089c919beee8ec4a1495dee1bd271548da2cb44bf44699a6a61"
|
||||
name = "github.com/nats-io/go-nats"
|
||||
packages = [
|
||||
".",
|
||||
"encoders/builtin",
|
||||
"util",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "fb0396ee0bdb8018b0fef30d6d1de798ce99cd05"
|
||||
version = "v1.6.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:be61e8224b84064109eaba8157cbb4bbe6ca12443e182b6624fdfa1c0dcf53d9"
|
||||
name = "github.com/nats-io/nuid"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "289cccf02c178dc782430d534e3c1f5b72af807f"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:30f72985e574101b71666d6e601e7564bd02d95164da59ca17363ad194137969"
|
||||
name = "github.com/peterh/liner"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "a37ad39843113264dae84a5d89fcee28f50b35c6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2118aac9bc7ff09544626d5d1c7f7d4fb92a558702b00da5572ccc80ae7caf2b"
|
||||
name = "github.com/pierrec/lz4"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "08c27939df1bd95e881e2c2367a749964ad1fceb"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ff95a6c61f34f32e57833783059c80274d84e9c74e6e315c3dc2e93e9bf3dab9"
|
||||
name = "github.com/pierrec/xxHash"
|
||||
packages = ["xxHash32"]
|
||||
pruneopts = ""
|
||||
revision = "f051bb7f1d1aaf1b5a665d74fb6b0217712c69f7"
|
||||
version = "v0.1.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:5e3f4c2357e1e95ede9f36dc027b4b5a2cca463c8899344c22ebeb7c0abab8d5"
|
||||
name = "github.com/rcrowley/go-metrics"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "1f30fe9094a513ce4c700b9a54458bbb0c96996c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:10332fc7c87ee09af278e9c1899f1349f5c54dd22843ad2030e6556d05c5983e"
|
||||
name = "github.com/streadway/amqp"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "cefed15a0bd808d13947f228770a81b06ebe8e45"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b28f2f9253cbb1bf2bcb3c0ab7421d2f88a245386199a6668b0a66eb09ce3e1f"
|
||||
name = "github.com/tidwall/btree"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "9876f1454cf0993a53d74c27196993e345f50dd1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4d2ec831fbaaf74fd75d2d9fe107e605c92489ec6cef6d36e1f23b678e9f2bd4"
|
||||
name = "github.com/tidwall/buntdb"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "6249481c29c2cd96f53b691b74ac1893f72774c2"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:eab1a01c55a3428f83e16e92f902ffbeae143e19e080a6a1117532f7908f7579"
|
||||
name = "github.com/tidwall/geoindex"
|
||||
packages = [
|
||||
".",
|
||||
"child",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "6fc1984907cad925af47d09fdc0cadc70f875cfe"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ddb305f09be3613fd1bf9fd8d6d0713f2fd28b5af596437b3d7de2366bbee870"
|
||||
name = "github.com/tidwall/geojson"
|
||||
packages = [
|
||||
".",
|
||||
"geo",
|
||||
"geometry",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "09ce8fa8548966071daf8df6bfc692cf756ff8cc"
|
||||
version = "v1.1.7"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:30e9a79822702670b96d3461aca7da11b8cc6e7954eb4e859e886559ed4802a4"
|
||||
name = "github.com/tidwall/gjson"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "c5e72cdf74dff23857243dd662c465b810891c21"
|
||||
version = "v1.3.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:bf59f997bab72b8ecd044aed35d706edb6abd6128afe0502c94398b2374f1f3f"
|
||||
name = "github.com/tidwall/grect"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "ba9a043346eba55344e40d66a5e74cfda3a9d293"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:96eb1cfd440166e1313d61021adba328e0dfeeac426abf9cc8c9879019b99b59"
|
||||
name = "github.com/tidwall/lotsa"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "a03631ac7f1cd37f159fca01ff6d600b3536d3cf"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:4db4f92bb9cb04cfc4fccb36aba2598b02a988008c4cc0692b241214ad8ac96e"
|
||||
name = "github.com/tidwall/match"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "1731857f09b1f38450e2c12409748407822dc6be"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:7eed51dcae60e95dbde54662594ef90a7cbf3b7e3f0de32f84f0213b695967ff"
|
||||
name = "github.com/tidwall/pretty"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "65a9db5fad5105a89e17f38adcc9878685be6d78"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3deeba766e407673583fcef7135199c081c4a236071511b6c4cac412335bcecc"
|
||||
name = "github.com/tidwall/rbang"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "55391bcd9942773f84554000f0c9600345e3ef92"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e84d0aa788bd55e938ebbaa62782385ca4da00b63c1d6bf23270c031a2ae9a88"
|
||||
name = "github.com/tidwall/redbench"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "17c5b5b864a4b072481036ac689913156f5bb81c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:3d97df307101403f6647217a1af92bdb1dc15d7d4c2c92280faeeb98c4fce0f2"
|
||||
name = "github.com/tidwall/redcon"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "3df12143a4fe57c9f0d7f0f37e29ad95bc37f9a7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:f0bb37e804b0c1901995b54f51a63f23ff0bb67747a1f8d37a666f394025bbc8"
|
||||
name = "github.com/tidwall/resp"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "b2b1a7ca20e34ad839fdb81f78e67522c99959f0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2351ccd20a2fc2ba55096db53b98f6dc4451d2f68b72ab744dd1550adf98e85f"
|
||||
name = "github.com/tidwall/rhh"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "86b588640216d02f9c15d34f1f3d546f082dd65e"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:5d9d865e55b95f001e52a7f5d1f812e8a80f0f05d5b04ede006f24206ebba33c"
|
||||
name = "github.com/tidwall/rtree"
|
||||
packages = [
|
||||
".",
|
||||
"base",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "6cd427091e0e662cb4f8e2c9eb1a41e1c46ff0d3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ca969d3e75ed5b3003f4f5864bb5c13d99471ef57f9049bf78562d7ee1ac019c"
|
||||
name = "github.com/tidwall/sjson"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "48d34adceb39a5bd6ed7c12f38c78cd425436442"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:251d31d1270dfc5d995c5ff7ee26dc783a34392b2d692e97e273146d082e25bd"
|
||||
name = "github.com/tidwall/tinybtree"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "de5932d649b50053050d43056146b960f3d90ca5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:9d6562efe571b54b2ec08ed598e4ba08d77b966dc2103a4300ae0cd0286dd6c3"
|
||||
name = "github.com/tidwall/tinyqueue"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "1e39f55115634cad2c504631c8bfcc292f2c9c55"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:9d71091ff8756d88318a4334be685d311b10e1a01c0290ce743187b3bfb1b3f6"
|
||||
name = "github.com/yuin/gopher-lua"
|
||||
packages = [
|
||||
".",
|
||||
"ast",
|
||||
"parse",
|
||||
"pm",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "eb1c7299435cc746b72514f37f74a5154dfe460f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:d2438f1c85d855408197edcbac2ba137513738ccefbd12396b0af99d5449880b"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
pruneopts = ""
|
||||
revision = "9419663f5a44be8b34ca85f08abc5fe1be11f8a3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:898bc7c802c1e0c20cecd65811e90b7b9bc5651b4a07aefd159451bfb200b2b3"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"internal/timeseries",
|
||||
"lex/httplex",
|
||||
"proxy",
|
||||
"trace",
|
||||
"websocket",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "a04bdaca5b32abe1c069418fb7088ae607de5bd0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e9f555036bb1a2f61074131371313348581fa1b643c0e5f8c0a436bf7ce6db69"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "314a259e304ff91bd6985da2a7149bbf91237993"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:de5f2b854c46e933cd32e9c6e1f4bcf79d38587037f9a35178dde028779d9067"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"language",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "d82c1812e304abfeeabd31e995a115a2855bf642"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:180913ea45cbe0072abce387a686b929908f8213106a735fe1d1273ae5239648"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
pruneopts = ""
|
||||
revision = "f676e0f3ac6395ff1a529ae59a6670878a8371a6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b3c989821c14a572c49a8091fd1e832a5e53d3ae2fbf90ed3ea46cef4863aad9"
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [
|
||||
".",
|
||||
"codes",
|
||||
"connectivity",
|
||||
"credentials",
|
||||
"grpclb/grpc_lb_v1/messages",
|
||||
"grpclog",
|
||||
"internal",
|
||||
"keepalive",
|
||||
"metadata",
|
||||
"naming",
|
||||
"peer",
|
||||
"stats",
|
||||
"status",
|
||||
"tap",
|
||||
"transport",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "f92cdcd7dcdc69e81b2d7b338479a19a8723cfa3"
|
||||
version = "v1.6.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:d14d9e6f479cbbac36f556199034db558ec5755b59dcf9b44fa373611e97c7be"
|
||||
name = "layeh.com/gopher-json"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "c128cc74278be889c4381681712931976fe0d88b"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"github.com/Shopify/sarama",
|
||||
"github.com/aws/aws-sdk-go/aws",
|
||||
"github.com/aws/aws-sdk-go/aws/credentials",
|
||||
"github.com/aws/aws-sdk-go/aws/session",
|
||||
"github.com/aws/aws-sdk-go/service/sqs",
|
||||
"github.com/eclipse/paho.mqtt.golang",
|
||||
"github.com/golang/protobuf/proto",
|
||||
"github.com/gomodule/redigo/redis",
|
||||
"github.com/mmcloughlin/geohash",
|
||||
"github.com/nats-io/go-nats",
|
||||
"github.com/peterh/liner",
|
||||
"github.com/streadway/amqp",
|
||||
"github.com/tidwall/btree",
|
||||
"github.com/tidwall/buntdb",
|
||||
"github.com/tidwall/geoindex",
|
||||
"github.com/tidwall/geojson",
|
||||
"github.com/tidwall/geojson/geo",
|
||||
"github.com/tidwall/geojson/geometry",
|
||||
"github.com/tidwall/gjson",
|
||||
"github.com/tidwall/lotsa",
|
||||
"github.com/tidwall/match",
|
||||
"github.com/tidwall/pretty",
|
||||
"github.com/tidwall/rbang",
|
||||
"github.com/tidwall/redbench",
|
||||
"github.com/tidwall/redcon",
|
||||
"github.com/tidwall/resp",
|
||||
"github.com/tidwall/rhh",
|
||||
"github.com/tidwall/sjson",
|
||||
"github.com/tidwall/tinybtree",
|
||||
"github.com/yuin/gopher-lua",
|
||||
"golang.org/x/crypto/ssh/terminal",
|
||||
"golang.org/x/net/context",
|
||||
"google.golang.org/grpc",
|
||||
"layeh.com/gopher-json",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
102
Gopkg.toml
102
Gopkg.toml
@ -1,102 +0,0 @@
|
||||
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
required = [
|
||||
"github.com/tidwall/lotsa",
|
||||
"github.com/mmcloughlin/geohash"
|
||||
]
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/tidwall/tinybtree"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tidwall/rhh"
|
||||
version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tidwall/geojson"
|
||||
version = "1.1.7"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/Shopify/sarama"
|
||||
version = "1.13.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/eclipse/paho.mqtt.golang"
|
||||
version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/peterh/liner"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/gomodule/redigo"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/streadway/amqp"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/tidwall/btree"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tidwall/buntdb"
|
||||
version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tidwall/gjson"
|
||||
version = "1.3.2"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/tidwall/redbench"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/tidwall/redcon"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/tidwall/resp"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tidwall/sjson"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
version = "1.6.0"
|
21
build.sh
21
build.sh
@ -147,21 +147,6 @@ if [ "$1" == "package" ]; then
|
||||
exit
|
||||
fi
|
||||
|
||||
# temp directory for storing isolated environment.
|
||||
TMP="$(mktemp -d -t tile38.XXXX)"
|
||||
function rmtemp {
|
||||
rm -rf "$TMP"
|
||||
}
|
||||
trap rmtemp EXIT
|
||||
|
||||
if [ "$NOLINK" != "1" ]; then
|
||||
# symlink root to isolated directory
|
||||
mkdir -p "$TMP/go/src/github.com/tidwall"
|
||||
ln -s $OD "$TMP/go/src/github.com/tidwall/tile38"
|
||||
export GOPATH="$TMP/go"
|
||||
cd "$TMP/go/src/github.com/tidwall/tile38"
|
||||
fi
|
||||
|
||||
# generate the core package
|
||||
core/gen.sh
|
||||
|
||||
@ -175,12 +160,6 @@ go build -ldflags "$LDFLAGS -extldflags '-static'" -o "$OD/tile38-luamemtest" cm
|
||||
|
||||
# test if requested
|
||||
if [ "$1" == "test" ]; then
|
||||
$OD/tile38-server -p 9876 -d "$TMP" -q &
|
||||
PID=$!
|
||||
function testend {
|
||||
kill $PID &
|
||||
}
|
||||
trap testend EXIT
|
||||
cd tests && go test && cd ..
|
||||
go test $(go list ./... | grep -v /vendor/ | grep -v /tests)
|
||||
fi
|
||||
|
51
go.mod
Normal file
51
go.mod
Normal file
@ -0,0 +1,51 @@
|
||||
module github.com/tidwall/tile38
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/Shopify/sarama v1.13.0
|
||||
github.com/aws/aws-sdk-go v1.17.14
|
||||
github.com/cespare/xxhash v1.1.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.0 // indirect
|
||||
github.com/eapache/go-resiliency v1.0.0 // indirect
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934 // indirect
|
||||
github.com/eapache/queue v1.0.2 // indirect
|
||||
github.com/eclipse/paho.mqtt.golang v1.1.0
|
||||
github.com/golang/protobuf v0.0.0-20170920220647-130e6b02ab05
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049 // indirect
|
||||
github.com/gomodule/redigo v2.0.1-0.20181026001555-e8fc0692a7e2+incompatible
|
||||
github.com/mmcloughlin/geohash v0.0.0-20181009053802-f7f2bcae3294
|
||||
github.com/nats-io/go-nats v1.6.0
|
||||
github.com/nats-io/nuid v1.0.0 // indirect
|
||||
github.com/peterh/liner v1.0.1-0.20170902204657-a37ad3984311
|
||||
github.com/pierrec/lz4 v1.0.1 // indirect
|
||||
github.com/pierrec/xxHash v0.1.1 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20161128210544-1f30fe9094a5 // indirect
|
||||
github.com/streadway/amqp v0.0.0-20170926065634-cefed15a0bd8
|
||||
github.com/tidwall/btree v0.0.0-20170113224114-9876f1454cf0
|
||||
github.com/tidwall/buntdb v1.1.0
|
||||
github.com/tidwall/geoindex v1.1.0
|
||||
github.com/tidwall/geojson v1.1.7
|
||||
github.com/tidwall/gjson v1.3.2
|
||||
github.com/tidwall/grect v0.0.0-20161006141115-ba9a043346eb // indirect
|
||||
github.com/tidwall/lotsa v0.0.0-20180225195211-a03631ac7f1c // indirect
|
||||
github.com/tidwall/match v1.0.1
|
||||
github.com/tidwall/pretty v1.0.0
|
||||
github.com/tidwall/rbang v1.1.0
|
||||
github.com/tidwall/redbench v0.0.0-20181110173744-17c5b5b864a4
|
||||
github.com/tidwall/redcon v0.0.0-20171003141744-3df12143a4fe
|
||||
github.com/tidwall/resp v0.0.0-20160908231031-b2b1a7ca20e3
|
||||
github.com/tidwall/rhh v1.1.0
|
||||
github.com/tidwall/rtree v0.0.0-20180113144539-6cd427091e0e // indirect
|
||||
github.com/tidwall/sjson v1.0.2
|
||||
github.com/tidwall/tinybtree v0.0.0-20181217131827-de5932d649b5
|
||||
github.com/tidwall/tinyqueue v0.0.0-20180302190814-1e39f5511563 // indirect
|
||||
github.com/yuin/gopher-lua v0.0.0-20170915035107-eb1c7299435c
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44
|
||||
golang.org/x/net v0.0.0-20171004034648-a04bdaca5b32
|
||||
golang.org/x/sys v0.0.0-20170927054621-314a259e304f // indirect
|
||||
golang.org/x/text v0.1.1-0.20171005092100-d82c1812e304 // indirect
|
||||
google.golang.org/genproto v0.0.0-20171002232614-f676e0f3ac63 // indirect
|
||||
google.golang.org/grpc v1.6.0
|
||||
layeh.com/gopher-json v0.0.0-20161224164157-c128cc74278b
|
||||
)
|
95
go.sum
Normal file
95
go.sum
Normal file
@ -0,0 +1,95 @@
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/Shopify/sarama v1.13.0 h1:R+4WFsmMzUxN2uiGzWXoY9apBAQnARC+B+wYvy/kC3k=
|
||||
github.com/Shopify/sarama v1.13.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/aws/aws-sdk-go v1.17.14 h1:IjqZDIQoLyZ48A93BxVrZOaIGgZPRi4nXt6WQUMJplY=
|
||||
github.com/aws/aws-sdk-go v1.17.14/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/eapache/go-resiliency v1.0.0 h1:XPZo5qMI0LGzIqT9wRq6dPv2vEuo9MWCar1wHY8Kuf4=
|
||||
github.com/eapache/go-resiliency v1.0.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934 h1:oGLoaVIefp3tiOgi7+KInR/nNPvEpPM6GFo+El7fd14=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.0.2 h1:jRJXCx6uciOfN69MfZCC9EZlGRqqHhwlyb6GBeNow+c=
|
||||
github.com/eapache/queue v1.0.2/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/eclipse/paho.mqtt.golang v1.1.0 h1:Em29HD1CwLHdRFnX7yfg+kBjHHw6DSDok9I+ia4znT4=
|
||||
github.com/eclipse/paho.mqtt.golang v1.1.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
|
||||
github.com/golang/protobuf v0.0.0-20170920220647-130e6b02ab05 h1:Kesru7U6Mhpf/x7rthxAKnr586VFmoE2NdEvkOKvfjg=
|
||||
github.com/golang/protobuf v0.0.0-20170920220647-130e6b02ab05/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049 h1:K9KHZbXKpGydfDN0aZrsoHpLJlZsBrGMFWbgLDGnPZk=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/gomodule/redigo v2.0.1-0.20181026001555-e8fc0692a7e2+incompatible h1:H4S5GVLXZxCnS6q3+HrRBu/ObgobnAHg92tWG8cLfX8=
|
||||
github.com/gomodule/redigo v2.0.1-0.20181026001555-e8fc0692a7e2+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/mmcloughlin/geohash v0.0.0-20181009053802-f7f2bcae3294 h1:QlTAK00UrY80KK9Da+foE04AjxhXFrgp87aZB6yfU5c=
|
||||
github.com/mmcloughlin/geohash v0.0.0-20181009053802-f7f2bcae3294/go.mod h1:oNZxQo5yWJh0eMQEP/8hwQuVx9Z9tjwFUqcTB1SmG0c=
|
||||
github.com/nats-io/go-nats v1.6.0 h1:FznPwMfrVwGnSCh7JTXyJDRW0TIkD4Tr+M1LPJt9T70=
|
||||
github.com/nats-io/go-nats v1.6.0/go.mod h1:+t7RHT5ApZebkrQdnn6AhQJmhJJiKAvJUio1PiiCtj0=
|
||||
github.com/nats-io/nuid v1.0.0 h1:44QGdhbiANq8ZCbUkdn6W5bqtg+mHuDE4wOUuxxndFs=
|
||||
github.com/nats-io/nuid v1.0.0/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/peterh/liner v1.0.1-0.20170902204657-a37ad3984311 h1:IQrJrnseUVEdTXQpnWjks3LRNuYyydpK+A4k6oYXYHk=
|
||||
github.com/peterh/liner v1.0.1-0.20170902204657-a37ad3984311/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
|
||||
github.com/pierrec/lz4 v1.0.1 h1:w6GMGWSsCI04fTM8wQRdnW74MuJISakuUU0onU0TYB4=
|
||||
github.com/pierrec/lz4 v1.0.1/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pierrec/xxHash v0.1.1 h1:KP4NrV9023xp3M4FkTYfcXqWigsOCImL1ANJ7sh5vg4=
|
||||
github.com/pierrec/xxHash v0.1.1/go.mod h1:w2waW5Zoa/Wc4Yqe0wgrIYAGKqRMf7czn2HNKXmuL+I=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20161128210544-1f30fe9094a5 h1:gwcdIpH6NU2iF8CmcqD+CP6+1CkRBOhHaPR+iu6raBY=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20161128210544-1f30fe9094a5/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/streadway/amqp v0.0.0-20170926065634-cefed15a0bd8 h1:q2L3Zhh0RscQeFIJRFshSq3DtZPE0ts8q3F+oyUxw/c=
|
||||
github.com/streadway/amqp v0.0.0-20170926065634-cefed15a0bd8/go.mod h1:1WNBiOZtZQLpVAyu0iTduoJL9hEsMloAK5XWrtW0xdY=
|
||||
github.com/tidwall/btree v0.0.0-20170113224114-9876f1454cf0 h1:QnyrPZZvPmR0AtJCxxfCtI1qN+fYpKTKJ/5opWmZ34k=
|
||||
github.com/tidwall/btree v0.0.0-20170113224114-9876f1454cf0/go.mod h1:huei1BkDWJ3/sLXmO+bsCNELL+Bp2Kks9OLyQFkzvA8=
|
||||
github.com/tidwall/buntdb v1.1.0 h1:H6LzK59KiNjf1nHVPFrYj4Qnl8d8YLBsYamdL8N+Bao=
|
||||
github.com/tidwall/buntdb v1.1.0/go.mod h1:Y39xhcDW10WlyYXeLgGftXVbjtM0QP+/kpz8xl9cbzE=
|
||||
github.com/tidwall/geoindex v1.1.0 h1:d/pGCgKUonfQINd1235kKqx9gWBU4N7GjDS9WvbPvLY=
|
||||
github.com/tidwall/geoindex v1.1.0/go.mod h1:3gTa91BW+eiVIipuR6aU1Y9Sa0q75b1teE/NP2vfsTc=
|
||||
github.com/tidwall/geojson v1.1.7 h1:uNeIRbzYzGpFw88CLajyrN3d0To5GcMW5YJSmPqkhH0=
|
||||
github.com/tidwall/geojson v1.1.7/go.mod h1:tBjfxeALRFLc25LLpjtWzy2nIrNmW1ze1EAhLtd8+QQ=
|
||||
github.com/tidwall/gjson v1.3.2 h1:+7p3qQFaH3fOMXAJSrdZwGKcOO/lYdGS0HqGhPqDdTI=
|
||||
github.com/tidwall/gjson v1.3.2/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
|
||||
github.com/tidwall/grect v0.0.0-20161006141115-ba9a043346eb h1:5NSYaAdrnblKByzd7XByQEJVT8+9v0W/tIY0Oo4OwrE=
|
||||
github.com/tidwall/grect v0.0.0-20161006141115-ba9a043346eb/go.mod h1:lKYYLFIr9OIgdgrtgkZ9zgRxRdvPYsExnYBsEAd8W5M=
|
||||
github.com/tidwall/lotsa v0.0.0-20180225195211-a03631ac7f1c/go.mod h1:X6NiU+4yHA3fE3Puvpnn1XMDrFZrE9JO2/w+UMuqgR8=
|
||||
github.com/tidwall/match v1.0.0/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
|
||||
github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
|
||||
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
|
||||
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tidwall/rbang v1.1.0 h1:egck3fUlFlNj93odJC1TdcqFCcqfWq0qMH6HTwpXWUQ=
|
||||
github.com/tidwall/rbang v1.1.0/go.mod h1:aMGOM1Wj50tooEO/0aO9j+7gyHUs3bUW0t4Q+xiuOjg=
|
||||
github.com/tidwall/redbench v0.0.0-20181110173744-17c5b5b864a4 h1:W7pJCltjA/nrESHhV47Y01p2gFdInOY9qkq69uEzN0o=
|
||||
github.com/tidwall/redbench v0.0.0-20181110173744-17c5b5b864a4/go.mod h1:zxcRGCq/JcqV48YjK9WxBNJL7JSpMzbLXaHvMcnanKQ=
|
||||
github.com/tidwall/redcon v0.0.0-20171003141744-3df12143a4fe h1:GtV/vMtn3FYTSRAoIgQqcjc4/Huw16xy5t7djUnzc6k=
|
||||
github.com/tidwall/redcon v0.0.0-20171003141744-3df12143a4fe/go.mod h1:bdYBm4rlcWpst2XMwKVzWDF9CoUxEbUmM7CQrKeOZas=
|
||||
github.com/tidwall/resp v0.0.0-20160908231031-b2b1a7ca20e3 h1:+weN0RLHfv5fugOSyHPxbY9feCqi6JnLBFsq8Jvx7/E=
|
||||
github.com/tidwall/resp v0.0.0-20160908231031-b2b1a7ca20e3/go.mod h1:18xEj855iMY2bK6tNF2A4x+nZy5gWO1iO7OOl3jETKw=
|
||||
github.com/tidwall/rhh v1.1.0 h1:U+3RGzEB6VoBBkLlsAaF3ThjwZ0JGibplJhDin5Ub/Y=
|
||||
github.com/tidwall/rhh v1.1.0/go.mod h1:37/ybjMQQ4nDztczc//g/WBd2X51vogKHWL3xC8kY24=
|
||||
github.com/tidwall/rtree v0.0.0-20180113144539-6cd427091e0e h1:+NL1GDIUOKxVfbp2KoJQD9cTQ6dyP2co9q4yzmT9FZo=
|
||||
github.com/tidwall/rtree v0.0.0-20180113144539-6cd427091e0e/go.mod h1:/h+UnNGt0IhNNJLkGikcdcJqm66zGD/uJGMRxK/9+Ao=
|
||||
github.com/tidwall/sjson v1.0.2 h1:WHiiu9LsxPZazjIUPC1EGBuUqQVWJksZszl9BasNNjg=
|
||||
github.com/tidwall/sjson v1.0.2/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y=
|
||||
github.com/tidwall/tinybtree v0.0.0-20181217131827-de5932d649b5 h1:NaGfypx6656w6iRmXK0buWGuYashbcFttdPe00DKUcE=
|
||||
github.com/tidwall/tinybtree v0.0.0-20181217131827-de5932d649b5/go.mod h1:0aFQG6KLQz3j57CeVgXlmKO3RSQ3myhJn2H+r84IgSY=
|
||||
github.com/tidwall/tinyqueue v0.0.0-20180302190814-1e39f5511563 h1:Otn9S136ELckZ3KKDyCkxapfufrqDqwmGjcHfAyXRrE=
|
||||
github.com/tidwall/tinyqueue v0.0.0-20180302190814-1e39f5511563/go.mod h1:mLqSmt7Dv/CNneF2wfcChfN1rvapyQr01LGKnKex0DQ=
|
||||
github.com/yuin/gopher-lua v0.0.0-20170915035107-eb1c7299435c h1:BXhbeVl63cTUicr+Q/D0/BNPw59IsIcyv2cB1/xHRps=
|
||||
github.com/yuin/gopher-lua v0.0.0-20170915035107-eb1c7299435c/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44 h1:9lP3x0pW80sDI6t1UMSLA4to18W7R7imwAI/sWS9S8Q=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/net v0.0.0-20171004034648-a04bdaca5b32 h1:NjAulLPqFTaOxQu5S4qUMqscSu+mQdu+wMY0nfqSkuk=
|
||||
golang.org/x/net v0.0.0-20171004034648-a04bdaca5b32/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/sys v0.0.0-20170927054621-314a259e304f h1:iUy6hSM2lPBGm2d9HgXq1GqYPwcJvA8ihnWauXggYMs=
|
||||
golang.org/x/sys v0.0.0-20170927054621-314a259e304f/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.1.1-0.20171005092100-d82c1812e304 h1:O2dKpvCsgtI9C6I1Byy3L6t4dfkwGmLFeXPT6NMySx4=
|
||||
golang.org/x/text v0.1.1-0.20171005092100-d82c1812e304/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
google.golang.org/genproto v0.0.0-20171002232614-f676e0f3ac63 h1:yNBw5bwywOTguAu+h6SkCUaWdEZ7ZXgfiwb2YTN1eQw=
|
||||
google.golang.org/genproto v0.0.0-20171002232614-f676e0f3ac63/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/grpc v1.6.0 h1:vaySXtNtPrLJFCiET8QXtfBrqq16ynklmFGaZwLcd1M=
|
||||
google.golang.org/grpc v1.6.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
layeh.com/gopher-json v0.0.0-20161224164157-c128cc74278b h1:ZfaWvT/nGlYq3Id9DDvsgmRzAfPotitRJmD7ymWZPK0=
|
||||
layeh.com/gopher-json v0.0.0-20161224164157-c128cc74278b/go.mod h1:ivKkcY8Zxw5ba0jldhZCYYQfGdb2K6u9tbYK1AwMIBc=
|
31
vendor/github.com/Shopify/sarama/.github/CONTRIBUTING.md
generated
vendored
31
vendor/github.com/Shopify/sarama/.github/CONTRIBUTING.md
generated
vendored
@ -1,31 +0,0 @@
|
||||
# Contributing
|
||||
|
||||
Contributions are always welcome, both reporting issues and submitting pull requests!
|
||||
|
||||
### Reporting issues
|
||||
|
||||
Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth.
|
||||
|
||||
- What SHA of Sarama are you running? If this is not the latest SHA on the master branch, please try if the problem persists with the latest version.
|
||||
- You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description.
|
||||
- Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it.
|
||||
|
||||
Also, please include the following information about your environment, so we can help you faster:
|
||||
|
||||
- What version of Kafka are you using?
|
||||
- What version of Go are you using?
|
||||
- What are the values of your Producer/Consumer/Client configuration?
|
||||
|
||||
|
||||
### Submitting pull requests
|
||||
|
||||
We will gladly accept bug fixes, or additions to this library. Please fork this library, commit & push your changes, and open a pull request. Because this library is in production use by many people and applications, we code review all additions. To make the review process go as smooth as possible, please consider the following.
|
||||
|
||||
- If you plan to work on something major, please open an issue to discuss the design first.
|
||||
- Don't break backwards compatibility. If you really have to, open an issue to discuss this first.
|
||||
- Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving.
|
||||
- Run [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) to detect any suspicious constructs in your code that could be bugs.
|
||||
- Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`.You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors.
|
||||
- You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems.
|
||||
- Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions.
|
||||
- Make sure your code is supported by all the Go versions we support. You can rely on [Travis CI](https://travis-ci.org/Shopify/sarama) for testing older Go versions
|
20
vendor/github.com/Shopify/sarama/.github/ISSUE_TEMPLATE.md
generated
vendored
20
vendor/github.com/Shopify/sarama/.github/ISSUE_TEMPLATE.md
generated
vendored
@ -1,20 +0,0 @@
|
||||
##### Versions
|
||||
|
||||
*Please specify real version numbers or git SHAs, not just "Latest" since that changes fairly regularly.*
|
||||
Sarama Version:
|
||||
Kafka Version:
|
||||
Go Version:
|
||||
|
||||
##### Configuration
|
||||
|
||||
What configuration values are you using for Sarama and Kafka?
|
||||
|
||||
##### Logs
|
||||
|
||||
When filing an issue please provide logs from Sarama and Kafka if at all
|
||||
possible. You can set `sarama.Logger` to a `log.Logger` to capture Sarama debug
|
||||
output.
|
||||
|
||||
##### Problem Description
|
||||
|
||||
|
14
vendor/github.com/Shopify/sarama/api_versions_request_test.go
generated
vendored
14
vendor/github.com/Shopify/sarama/api_versions_request_test.go
generated
vendored
@ -1,14 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
apiVersionRequest = []byte{}
|
||||
)
|
||||
|
||||
func TestApiVersionsRequest(t *testing.T) {
|
||||
var request *ApiVersionsRequest
|
||||
|
||||
request = new(ApiVersionsRequest)
|
||||
testRequest(t, "basic", request, apiVersionRequest)
|
||||
}
|
32
vendor/github.com/Shopify/sarama/api_versions_response_test.go
generated
vendored
32
vendor/github.com/Shopify/sarama/api_versions_response_test.go
generated
vendored
@ -1,32 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
apiVersionResponse = []byte{
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x03,
|
||||
0x00, 0x02,
|
||||
0x00, 0x01,
|
||||
}
|
||||
)
|
||||
|
||||
func TestApiVersionsResponse(t *testing.T) {
|
||||
var response *ApiVersionsResponse
|
||||
|
||||
response = new(ApiVersionsResponse)
|
||||
testVersionDecodable(t, "no error", response, apiVersionResponse, 0)
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Decoding error failed: no error expected but found", response.Err)
|
||||
}
|
||||
if response.ApiVersions[0].ApiKey != 0x03 {
|
||||
t.Error("Decoding error: expected 0x03 but got", response.ApiVersions[0].ApiKey)
|
||||
}
|
||||
if response.ApiVersions[0].MinVersion != 0x02 {
|
||||
t.Error("Decoding error: expected 0x02 but got", response.ApiVersions[0].MinVersion)
|
||||
}
|
||||
if response.ApiVersions[0].MaxVersion != 0x01 {
|
||||
t.Error("Decoding error: expected 0x01 but got", response.ApiVersions[0].MaxVersion)
|
||||
}
|
||||
}
|
841
vendor/github.com/Shopify/sarama/async_producer_test.go
generated
vendored
841
vendor/github.com/Shopify/sarama/async_producer_test.go
generated
vendored
@ -1,841 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const TestMessage = "ABC THE MESSAGE"
|
||||
|
||||
func closeProducer(t *testing.T, p AsyncProducer) {
|
||||
var wg sync.WaitGroup
|
||||
p.AsyncClose()
|
||||
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
for range p.Successes() {
|
||||
t.Error("Unexpected message on Successes()")
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
for msg := range p.Errors() {
|
||||
t.Error(msg.Err)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func expectResults(t *testing.T, p AsyncProducer, successes, errors int) {
|
||||
expect := successes + errors
|
||||
for expect > 0 {
|
||||
select {
|
||||
case msg := <-p.Errors():
|
||||
if msg.Msg.flags != 0 {
|
||||
t.Error("Message had flags set")
|
||||
}
|
||||
errors--
|
||||
expect--
|
||||
if errors < 0 {
|
||||
t.Error(msg.Err)
|
||||
}
|
||||
case msg := <-p.Successes():
|
||||
if msg.flags != 0 {
|
||||
t.Error("Message had flags set")
|
||||
}
|
||||
successes--
|
||||
expect--
|
||||
if successes < 0 {
|
||||
t.Error("Too many successes")
|
||||
}
|
||||
}
|
||||
}
|
||||
if successes != 0 || errors != 0 {
|
||||
t.Error("Unexpected successes", successes, "or errors", errors)
|
||||
}
|
||||
}
|
||||
|
||||
type testPartitioner chan *int32
|
||||
|
||||
func (p testPartitioner) Partition(msg *ProducerMessage, numPartitions int32) (int32, error) {
|
||||
part := <-p
|
||||
if part == nil {
|
||||
return 0, errors.New("BOOM")
|
||||
}
|
||||
|
||||
return *part, nil
|
||||
}
|
||||
|
||||
func (p testPartitioner) RequiresConsistency() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (p testPartitioner) feed(partition int32) {
|
||||
p <- &partition
|
||||
}
|
||||
|
||||
type flakyEncoder bool
|
||||
|
||||
func (f flakyEncoder) Length() int {
|
||||
return len(TestMessage)
|
||||
}
|
||||
|
||||
func (f flakyEncoder) Encode() ([]byte, error) {
|
||||
if !bool(f) {
|
||||
return nil, errors.New("flaky encoding error")
|
||||
}
|
||||
return []byte(TestMessage), nil
|
||||
}
|
||||
|
||||
func TestAsyncProducer(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = true
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Metadata: i}
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
select {
|
||||
case msg := <-producer.Errors():
|
||||
t.Error(msg.Err)
|
||||
if msg.Msg.flags != 0 {
|
||||
t.Error("Message had flags set")
|
||||
}
|
||||
case msg := <-producer.Successes():
|
||||
if msg.flags != 0 {
|
||||
t.Error("Message had flags set")
|
||||
}
|
||||
if msg.Metadata.(int) != i {
|
||||
t.Error("Message metadata did not match")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
closeProducer(t, producer)
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerMultipleFlushes(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
leader.Returns(prodSuccess)
|
||||
leader.Returns(prodSuccess)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 5
|
||||
config.Producer.Return.Successes = true
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for flush := 0; flush < 3; flush++ {
|
||||
for i := 0; i < 5; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
expectResults(t, producer, 5, 0)
|
||||
}
|
||||
|
||||
closeProducer(t, producer)
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerMultipleBrokers(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader0 := NewMockBroker(t, 2)
|
||||
leader1 := NewMockBroker(t, 3)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader0.Addr(), leader0.BrokerID())
|
||||
metadataResponse.AddBroker(leader1.Addr(), leader1.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader0.BrokerID(), nil, nil, ErrNoError)
|
||||
metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodResponse0 := new(ProduceResponse)
|
||||
prodResponse0.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader0.Returns(prodResponse0)
|
||||
|
||||
prodResponse1 := new(ProduceResponse)
|
||||
prodResponse1.AddTopicPartition("my_topic", 1, ErrNoError)
|
||||
leader1.Returns(prodResponse1)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 5
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Partitioner = NewRoundRobinPartitioner
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
expectResults(t, producer, 10, 0)
|
||||
|
||||
closeProducer(t, producer)
|
||||
leader1.Close()
|
||||
leader0.Close()
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerCustomPartitioner(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodResponse := new(ProduceResponse)
|
||||
prodResponse.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodResponse)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 2
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Partitioner = func(topic string) Partitioner {
|
||||
p := make(testPartitioner)
|
||||
go func() {
|
||||
p.feed(0)
|
||||
p <- nil
|
||||
p <- nil
|
||||
p <- nil
|
||||
p.feed(0)
|
||||
}()
|
||||
return p
|
||||
}
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
expectResults(t, producer, 2, 3)
|
||||
|
||||
closeProducer(t, producer)
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerFailureRetry(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader1 := NewMockBroker(t, 2)
|
||||
leader2 := NewMockBroker(t, 3)
|
||||
|
||||
metadataLeader1 := new(MetadataResponse)
|
||||
metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID())
|
||||
metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader1)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Backoff = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
seedBroker.Close()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
prodNotLeader := new(ProduceResponse)
|
||||
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
|
||||
leader1.Returns(prodNotLeader)
|
||||
|
||||
metadataLeader2 := new(MetadataResponse)
|
||||
metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID())
|
||||
metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError)
|
||||
leader1.Returns(metadataLeader2)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader2.Returns(prodSuccess)
|
||||
expectResults(t, producer, 10, 0)
|
||||
leader1.Close()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
leader2.Returns(prodSuccess)
|
||||
expectResults(t, producer, 10, 0)
|
||||
|
||||
leader2.Close()
|
||||
closeProducer(t, producer)
|
||||
}
|
||||
|
||||
func TestAsyncProducerEncoderFailures(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
leader.Returns(prodSuccess)
|
||||
leader.Returns(prodSuccess)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 1
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Partitioner = NewManualPartitioner
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for flush := 0; flush < 3; flush++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(true), Value: flakyEncoder(false)}
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(false), Value: flakyEncoder(true)}
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(true), Value: flakyEncoder(true)}
|
||||
expectResults(t, producer, 1, 2)
|
||||
}
|
||||
|
||||
closeProducer(t, producer)
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
// If a Kafka broker becomes unavailable and then returns back in service, then
|
||||
// producer reconnects to it and continues sending messages.
|
||||
func TestAsyncProducerBrokerBounce(t *testing.T) {
|
||||
// Given
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
leaderAddr := leader.Addr()
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leaderAddr, leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 1
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Backoff = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 1, 0)
|
||||
|
||||
// When: a broker connection gets reset by a broker (network glitch, restart, you name it).
|
||||
leader.Close() // producer should get EOF
|
||||
leader = NewMockBrokerAddr(t, 2, leaderAddr) // start it up again right away for giggles
|
||||
seedBroker.Returns(metadataResponse) // tell it to go to broker 2 again
|
||||
|
||||
// Then: a produced message goes through the new broker connection.
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 1, 0)
|
||||
|
||||
closeProducer(t, producer)
|
||||
seedBroker.Close()
|
||||
leader.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerBrokerBounceWithStaleMetadata(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader1 := NewMockBroker(t, 2)
|
||||
leader2 := NewMockBroker(t, 3)
|
||||
|
||||
metadataLeader1 := new(MetadataResponse)
|
||||
metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID())
|
||||
metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader1)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Max = 3
|
||||
config.Producer.Retry.Backoff = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
leader1.Close() // producer should get EOF
|
||||
seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down
|
||||
seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down
|
||||
|
||||
// ok fine, tell it to go to leader2 finally
|
||||
metadataLeader2 := new(MetadataResponse)
|
||||
metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID())
|
||||
metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader2)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader2.Returns(prodSuccess)
|
||||
expectResults(t, producer, 10, 0)
|
||||
seedBroker.Close()
|
||||
leader2.Close()
|
||||
|
||||
closeProducer(t, producer)
|
||||
}
|
||||
|
||||
func TestAsyncProducerMultipleRetries(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader1 := NewMockBroker(t, 2)
|
||||
leader2 := NewMockBroker(t, 3)
|
||||
|
||||
metadataLeader1 := new(MetadataResponse)
|
||||
metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID())
|
||||
metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader1)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Max = 4
|
||||
config.Producer.Retry.Backoff = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
prodNotLeader := new(ProduceResponse)
|
||||
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
|
||||
leader1.Returns(prodNotLeader)
|
||||
|
||||
metadataLeader2 := new(MetadataResponse)
|
||||
metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID())
|
||||
metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader2)
|
||||
leader2.Returns(prodNotLeader)
|
||||
seedBroker.Returns(metadataLeader1)
|
||||
leader1.Returns(prodNotLeader)
|
||||
seedBroker.Returns(metadataLeader1)
|
||||
leader1.Returns(prodNotLeader)
|
||||
seedBroker.Returns(metadataLeader2)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader2.Returns(prodSuccess)
|
||||
expectResults(t, producer, 10, 0)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
leader2.Returns(prodSuccess)
|
||||
expectResults(t, producer, 10, 0)
|
||||
|
||||
seedBroker.Close()
|
||||
leader1.Close()
|
||||
leader2.Close()
|
||||
closeProducer(t, producer)
|
||||
}
|
||||
|
||||
func TestAsyncProducerOutOfRetries(t *testing.T) {
|
||||
t.Skip("Enable once bug #294 is fixed.")
|
||||
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Backoff = 0
|
||||
config.Producer.Retry.Max = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
|
||||
prodNotLeader := new(ProduceResponse)
|
||||
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
|
||||
leader.Returns(prodNotLeader)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
select {
|
||||
case msg := <-producer.Errors():
|
||||
if msg.Err != ErrNotLeaderForPartition {
|
||||
t.Error(msg.Err)
|
||||
}
|
||||
case <-producer.Successes():
|
||||
t.Error("Unexpected success")
|
||||
}
|
||||
}
|
||||
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
|
||||
expectResults(t, producer, 10, 0)
|
||||
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
safeClose(t, producer)
|
||||
}
|
||||
|
||||
func TestAsyncProducerRetryWithReferenceOpen(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
leaderAddr := leader.Addr()
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leaderAddr, leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Backoff = 0
|
||||
config.Producer.Retry.Max = 1
|
||||
config.Producer.Partitioner = NewRoundRobinPartitioner
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// prime partition 0
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 1, 0)
|
||||
|
||||
// prime partition 1
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
prodSuccess = new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 1, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 1, 0)
|
||||
|
||||
// reboot the broker (the producer will get EOF on its existing connection)
|
||||
leader.Close()
|
||||
leader = NewMockBrokerAddr(t, 2, leaderAddr)
|
||||
|
||||
// send another message on partition 0 to trigger the EOF and retry
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
|
||||
// tell partition 0 to go to that broker again
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
// succeed this time
|
||||
prodSuccess = new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 1, 0)
|
||||
|
||||
// shutdown
|
||||
closeProducer(t, producer)
|
||||
seedBroker.Close()
|
||||
leader.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerFlusherRetryCondition(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 5
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Backoff = 0
|
||||
config.Producer.Retry.Max = 1
|
||||
config.Producer.Partitioner = NewManualPartitioner
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// prime partitions
|
||||
for p := int32(0); p < 2; p++ {
|
||||
for i := 0; i < 5; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: p}
|
||||
}
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", p, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 5, 0)
|
||||
}
|
||||
|
||||
// send more messages on partition 0
|
||||
for i := 0; i < 5; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0}
|
||||
}
|
||||
prodNotLeader := new(ProduceResponse)
|
||||
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
|
||||
leader.Returns(prodNotLeader)
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
leader.SetHandlerByMap(map[string]MockResponse{
|
||||
"ProduceRequest": NewMockProduceResponse(t).
|
||||
SetError("my_topic", 0, ErrNoError),
|
||||
})
|
||||
|
||||
// tell partition 0 to go to that broker again
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
// succeed this time
|
||||
expectResults(t, producer, 5, 0)
|
||||
|
||||
// put five more through
|
||||
for i := 0; i < 5; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0}
|
||||
}
|
||||
expectResults(t, producer, 5, 0)
|
||||
|
||||
// shutdown
|
||||
closeProducer(t, producer)
|
||||
seedBroker.Close()
|
||||
leader.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerRetryShutdown(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
|
||||
metadataLeader := new(MetadataResponse)
|
||||
metadataLeader.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Backoff = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
producer.AsyncClose()
|
||||
time.Sleep(5 * time.Millisecond) // let the shutdown goroutine kick in
|
||||
|
||||
producer.Input() <- &ProducerMessage{Topic: "FOO"}
|
||||
if err := <-producer.Errors(); err.Err != ErrShuttingDown {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
prodNotLeader := new(ProduceResponse)
|
||||
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
|
||||
leader.Returns(prodNotLeader)
|
||||
|
||||
seedBroker.Returns(metadataLeader)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 10, 0)
|
||||
|
||||
seedBroker.Close()
|
||||
leader.Close()
|
||||
|
||||
// wait for the async-closed producer to shut down fully
|
||||
for err := range producer.Errors() {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAsyncProducerNoReturns(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
|
||||
metadataLeader := new(MetadataResponse)
|
||||
metadataLeader.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = false
|
||||
config.Producer.Return.Errors = false
|
||||
config.Producer.Retry.Backoff = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
|
||||
wait := make(chan bool)
|
||||
go func() {
|
||||
if err := producer.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
close(wait)
|
||||
}()
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
|
||||
<-wait
|
||||
seedBroker.Close()
|
||||
leader.Close()
|
||||
}
|
||||
|
||||
// This example shows how to use the producer while simultaneously
|
||||
// reading the Errors channel to know about any failures.
|
||||
func ExampleAsyncProducer_select() {
|
||||
producer, err := NewAsyncProducer([]string{"localhost:9092"}, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := producer.Close(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Trap SIGINT to trigger a shutdown.
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, os.Interrupt)
|
||||
|
||||
var enqueued, errors int
|
||||
ProducerLoop:
|
||||
for {
|
||||
select {
|
||||
case producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder("testing 123")}:
|
||||
enqueued++
|
||||
case err := <-producer.Errors():
|
||||
log.Println("Failed to produce message", err)
|
||||
errors++
|
||||
case <-signals:
|
||||
break ProducerLoop
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("Enqueued: %d; errors: %d\n", enqueued, errors)
|
||||
}
|
||||
|
||||
// This example shows how to use the producer with separate goroutines
|
||||
// reading from the Successes and Errors channels. Note that in order
|
||||
// for the Successes channel to be populated, you have to set
|
||||
// config.Producer.Return.Successes to true.
|
||||
func ExampleAsyncProducer_goroutines() {
|
||||
config := NewConfig()
|
||||
config.Producer.Return.Successes = true
|
||||
producer, err := NewAsyncProducer([]string{"localhost:9092"}, config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Trap SIGINT to trigger a graceful shutdown.
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, os.Interrupt)
|
||||
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
enqueued, successes, errors int
|
||||
)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range producer.Successes() {
|
||||
successes++
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for err := range producer.Errors() {
|
||||
log.Println(err)
|
||||
errors++
|
||||
}
|
||||
}()
|
||||
|
||||
ProducerLoop:
|
||||
for {
|
||||
message := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")}
|
||||
select {
|
||||
case producer.Input() <- message:
|
||||
enqueued++
|
||||
|
||||
case <-signals:
|
||||
producer.AsyncClose() // Trigger a shutdown of the producer.
|
||||
break ProducerLoop
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
log.Printf("Successfully produced: %d; errors: %d\n", successes, errors)
|
||||
}
|
328
vendor/github.com/Shopify/sarama/broker_test.go
generated
vendored
328
vendor/github.com/Shopify/sarama/broker_test.go
generated
vendored
@ -1,328 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func ExampleBroker() {
|
||||
broker := NewBroker("localhost:9092")
|
||||
err := broker.Open(nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
request := MetadataRequest{Topics: []string{"myTopic"}}
|
||||
response, err := broker.GetMetadata(&request)
|
||||
if err != nil {
|
||||
_ = broker.Close()
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Println("There are", len(response.Topics), "topics active in the cluster.")
|
||||
|
||||
if err = broker.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
type mockEncoder struct {
|
||||
bytes []byte
|
||||
}
|
||||
|
||||
func (m mockEncoder) encode(pe packetEncoder) error {
|
||||
return pe.putRawBytes(m.bytes)
|
||||
}
|
||||
|
||||
type brokerMetrics struct {
|
||||
bytesRead int
|
||||
bytesWritten int
|
||||
}
|
||||
|
||||
func TestBrokerAccessors(t *testing.T) {
|
||||
broker := NewBroker("abc:123")
|
||||
|
||||
if broker.ID() != -1 {
|
||||
t.Error("New broker didn't have an ID of -1.")
|
||||
}
|
||||
|
||||
if broker.Addr() != "abc:123" {
|
||||
t.Error("New broker didn't have the correct address")
|
||||
}
|
||||
|
||||
broker.id = 34
|
||||
if broker.ID() != 34 {
|
||||
t.Error("Manually setting broker ID did not take effect.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleBrokerCommunication(t *testing.T) {
|
||||
for _, tt := range brokerTestTable {
|
||||
Logger.Printf("Testing broker communication for %s", tt.name)
|
||||
mb := NewMockBroker(t, 0)
|
||||
mb.Returns(&mockEncoder{tt.response})
|
||||
pendingNotify := make(chan brokerMetrics)
|
||||
// Register a callback to be notified about successful requests
|
||||
mb.SetNotifier(func(bytesRead, bytesWritten int) {
|
||||
pendingNotify <- brokerMetrics{bytesRead, bytesWritten}
|
||||
})
|
||||
broker := NewBroker(mb.Addr())
|
||||
// Set the broker id in order to validate local broker metrics
|
||||
broker.id = 0
|
||||
conf := NewConfig()
|
||||
conf.Version = V0_10_0_0
|
||||
err := broker.Open(conf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tt.runner(t, broker)
|
||||
err = broker.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
// Wait up to 500 ms for the remote broker to process the request and
|
||||
// notify us about the metrics
|
||||
timeout := 500 * time.Millisecond
|
||||
select {
|
||||
case mockBrokerMetrics := <-pendingNotify:
|
||||
validateBrokerMetrics(t, broker, mockBrokerMetrics)
|
||||
case <-time.After(timeout):
|
||||
t.Errorf("No request received for: %s after waiting for %v", tt.name, timeout)
|
||||
}
|
||||
mb.Close()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// We're not testing encoding/decoding here, so most of the requests/responses will be empty for simplicity's sake
|
||||
var brokerTestTable = []struct {
|
||||
name string
|
||||
response []byte
|
||||
runner func(*testing.T, *Broker)
|
||||
}{
|
||||
{"MetadataRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := MetadataRequest{}
|
||||
response, err := broker.GetMetadata(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("Metadata request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"ConsumerMetadataRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 't', 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := ConsumerMetadataRequest{}
|
||||
response, err := broker.GetConsumerMetadata(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("Consumer Metadata request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"ProduceRequest (NoResponse)",
|
||||
[]byte{},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := ProduceRequest{}
|
||||
request.RequiredAcks = NoResponse
|
||||
response, err := broker.Produce(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response != nil {
|
||||
t.Error("Produce request with NoResponse got a response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"ProduceRequest (WaitForLocal)",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := ProduceRequest{}
|
||||
request.RequiredAcks = WaitForLocal
|
||||
response, err := broker.Produce(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("Produce request without NoResponse got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"FetchRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := FetchRequest{}
|
||||
response, err := broker.Fetch(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("Fetch request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"OffsetFetchRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := OffsetFetchRequest{}
|
||||
response, err := broker.FetchOffset(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("OffsetFetch request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"OffsetCommitRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := OffsetCommitRequest{}
|
||||
response, err := broker.CommitOffset(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("OffsetCommit request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"OffsetRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := OffsetRequest{}
|
||||
response, err := broker.GetAvailableOffsets(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("Offset request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"JoinGroupRequest",
|
||||
[]byte{0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := JoinGroupRequest{}
|
||||
response, err := broker.JoinGroup(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("JoinGroup request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"SyncGroupRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := SyncGroupRequest{}
|
||||
response, err := broker.SyncGroup(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("SyncGroup request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"LeaveGroupRequest",
|
||||
[]byte{0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := LeaveGroupRequest{}
|
||||
response, err := broker.LeaveGroup(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("LeaveGroup request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"HeartbeatRequest",
|
||||
[]byte{0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := HeartbeatRequest{}
|
||||
response, err := broker.Heartbeat(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("Heartbeat request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"ListGroupsRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := ListGroupsRequest{}
|
||||
response, err := broker.ListGroups(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("ListGroups request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"DescribeGroupsRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := DescribeGroupsRequest{}
|
||||
response, err := broker.DescribeGroups(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("DescribeGroups request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"ApiVersionsRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := ApiVersionsRequest{}
|
||||
response, err := broker.ApiVersions(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("ApiVersions request got no response!")
|
||||
}
|
||||
}},
|
||||
}
|
||||
|
||||
func validateBrokerMetrics(t *testing.T, broker *Broker, mockBrokerMetrics brokerMetrics) {
|
||||
metricValidators := newMetricValidators()
|
||||
mockBrokerBytesRead := mockBrokerMetrics.bytesRead
|
||||
mockBrokerBytesWritten := mockBrokerMetrics.bytesWritten
|
||||
|
||||
// Check that the number of bytes sent corresponds to what the mock broker received
|
||||
metricValidators.registerForAllBrokers(broker, countMeterValidator("incoming-byte-rate", mockBrokerBytesWritten))
|
||||
if mockBrokerBytesWritten == 0 {
|
||||
// This a ProduceRequest with NoResponse
|
||||
metricValidators.registerForAllBrokers(broker, countMeterValidator("response-rate", 0))
|
||||
metricValidators.registerForAllBrokers(broker, countHistogramValidator("response-size", 0))
|
||||
metricValidators.registerForAllBrokers(broker, minMaxHistogramValidator("response-size", 0, 0))
|
||||
} else {
|
||||
metricValidators.registerForAllBrokers(broker, countMeterValidator("response-rate", 1))
|
||||
metricValidators.registerForAllBrokers(broker, countHistogramValidator("response-size", 1))
|
||||
metricValidators.registerForAllBrokers(broker, minMaxHistogramValidator("response-size", mockBrokerBytesWritten, mockBrokerBytesWritten))
|
||||
}
|
||||
|
||||
// Check that the number of bytes received corresponds to what the mock broker sent
|
||||
metricValidators.registerForAllBrokers(broker, countMeterValidator("outgoing-byte-rate", mockBrokerBytesRead))
|
||||
metricValidators.registerForAllBrokers(broker, countMeterValidator("request-rate", 1))
|
||||
metricValidators.registerForAllBrokers(broker, countHistogramValidator("request-size", 1))
|
||||
metricValidators.registerForAllBrokers(broker, minMaxHistogramValidator("request-size", mockBrokerBytesRead, mockBrokerBytesRead))
|
||||
|
||||
// Run the validators
|
||||
metricValidators.run(t, broker.conf.MetricRegistry)
|
||||
}
|
619
vendor/github.com/Shopify/sarama/client_test.go
generated
vendored
619
vendor/github.com/Shopify/sarama/client_test.go
generated
vendored
@ -1,619 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func safeClose(t testing.TB, c io.Closer) {
|
||||
err := c.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleClient(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
|
||||
seedBroker.Returns(new(MetadataResponse))
|
||||
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
seedBroker.Close()
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestCachedPartitions(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
|
||||
replicas := []int32{3, 1, 5}
|
||||
isr := []int32{5, 1}
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker("localhost:12345", 2)
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, 2, replicas, isr, ErrNoError)
|
||||
metadataResponse.AddTopicPartition("my_topic", 1, 2, replicas, isr, ErrLeaderNotAvailable)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
config := NewConfig()
|
||||
config.Metadata.Retry.Max = 0
|
||||
c, err := NewClient([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
client := c.(*client)
|
||||
|
||||
// Verify they aren't cached the same
|
||||
allP := client.cachedPartitionsResults["my_topic"][allPartitions]
|
||||
writeP := client.cachedPartitionsResults["my_topic"][writablePartitions]
|
||||
if len(allP) == len(writeP) {
|
||||
t.Fatal("Invalid lengths!")
|
||||
}
|
||||
|
||||
tmp := client.cachedPartitionsResults["my_topic"]
|
||||
// Verify we actually use the cache at all!
|
||||
tmp[allPartitions] = []int32{1, 2, 3, 4}
|
||||
client.cachedPartitionsResults["my_topic"] = tmp
|
||||
if 4 != len(client.cachedPartitions("my_topic", allPartitions)) {
|
||||
t.Fatal("Not using the cache!")
|
||||
}
|
||||
|
||||
seedBroker.Close()
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestClientDoesntCachePartitionsForTopicsWithErrors(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
|
||||
replicas := []int32{seedBroker.BrokerID()}
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 1, replicas[0], replicas, replicas, ErrNoError)
|
||||
metadataResponse.AddTopicPartition("my_topic", 2, replicas[0], replicas, replicas, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
config := NewConfig()
|
||||
config.Metadata.Retry.Max = 0
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
metadataResponse = new(MetadataResponse)
|
||||
metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
partitions, err := client.Partitions("unknown")
|
||||
|
||||
if err != ErrUnknownTopicOrPartition {
|
||||
t.Error("Expected ErrUnknownTopicOrPartition, found", err)
|
||||
}
|
||||
if partitions != nil {
|
||||
t.Errorf("Should return nil as partition list, found %v", partitions)
|
||||
}
|
||||
|
||||
// Should still use the cache of a known topic
|
||||
partitions, err = client.Partitions("my_topic")
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error, found %v", err)
|
||||
}
|
||||
|
||||
metadataResponse = new(MetadataResponse)
|
||||
metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
// Should not use cache for unknown topic
|
||||
partitions, err = client.Partitions("unknown")
|
||||
if err != ErrUnknownTopicOrPartition {
|
||||
t.Error("Expected ErrUnknownTopicOrPartition, found", err)
|
||||
}
|
||||
if partitions != nil {
|
||||
t.Errorf("Should return nil as partition list, found %v", partitions)
|
||||
}
|
||||
|
||||
seedBroker.Close()
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestClientSeedBrokers(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker("localhost:12345", 2)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
seedBroker.Close()
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestClientMetadata(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 5)
|
||||
|
||||
replicas := []int32{3, 1, 5}
|
||||
isr := []int32{5, 1}
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), replicas, isr, ErrNoError)
|
||||
metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), replicas, isr, ErrLeaderNotAvailable)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
config := NewConfig()
|
||||
config.Metadata.Retry.Max = 0
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
topics, err := client.Topics()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if len(topics) != 1 || topics[0] != "my_topic" {
|
||||
t.Error("Client returned incorrect topics:", topics)
|
||||
}
|
||||
|
||||
parts, err := client.Partitions("my_topic")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if len(parts) != 2 || parts[0] != 0 || parts[1] != 1 {
|
||||
t.Error("Client returned incorrect partitions for my_topic:", parts)
|
||||
}
|
||||
|
||||
parts, err = client.WritablePartitions("my_topic")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if len(parts) != 1 || parts[0] != 0 {
|
||||
t.Error("Client returned incorrect writable partitions for my_topic:", parts)
|
||||
}
|
||||
|
||||
tst, err := client.Leader("my_topic", 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if tst.ID() != 5 {
|
||||
t.Error("Leader for my_topic had incorrect ID.")
|
||||
}
|
||||
|
||||
replicas, err = client.Replicas("my_topic", 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if replicas[0] != 3 {
|
||||
t.Error("Incorrect (or sorted) replica")
|
||||
} else if replicas[1] != 1 {
|
||||
t.Error("Incorrect (or sorted) replica")
|
||||
} else if replicas[2] != 5 {
|
||||
t.Error("Incorrect (or sorted) replica")
|
||||
}
|
||||
|
||||
isr, err = client.InSyncReplicas("my_topic", 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if len(isr) != 2 {
|
||||
t.Error("Client returned incorrect ISRs for partition:", isr)
|
||||
} else if isr[0] != 5 {
|
||||
t.Error("Incorrect (or sorted) ISR:", isr)
|
||||
} else if isr[1] != 1 {
|
||||
t.Error("Incorrect (or sorted) ISR:", isr)
|
||||
}
|
||||
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestClientGetOffset(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
leaderAddr := leader.Addr()
|
||||
|
||||
metadata := new(MetadataResponse)
|
||||
metadata.AddTopicPartition("foo", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
metadata.AddBroker(leaderAddr, leader.BrokerID())
|
||||
seedBroker.Returns(metadata)
|
||||
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
offsetResponse := new(OffsetResponse)
|
||||
offsetResponse.AddTopicPartition("foo", 0, 123)
|
||||
leader.Returns(offsetResponse)
|
||||
|
||||
offset, err := client.GetOffset("foo", 0, OffsetNewest)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if offset != 123 {
|
||||
t.Error("Unexpected offset, got ", offset)
|
||||
}
|
||||
|
||||
leader.Close()
|
||||
seedBroker.Returns(metadata)
|
||||
|
||||
leader = NewMockBrokerAddr(t, 2, leaderAddr)
|
||||
offsetResponse = new(OffsetResponse)
|
||||
offsetResponse.AddTopicPartition("foo", 0, 456)
|
||||
leader.Returns(offsetResponse)
|
||||
|
||||
offset, err = client.GetOffset("foo", 0, OffsetNewest)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if offset != 456 {
|
||||
t.Error("Unexpected offset, got ", offset)
|
||||
}
|
||||
|
||||
seedBroker.Close()
|
||||
leader.Close()
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestClientReceivingUnknownTopic(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
|
||||
metadataResponse1 := new(MetadataResponse)
|
||||
seedBroker.Returns(metadataResponse1)
|
||||
|
||||
config := NewConfig()
|
||||
config.Metadata.Retry.Max = 1
|
||||
config.Metadata.Retry.Backoff = 0
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
metadataUnknownTopic := new(MetadataResponse)
|
||||
metadataUnknownTopic.AddTopic("new_topic", ErrUnknownTopicOrPartition)
|
||||
seedBroker.Returns(metadataUnknownTopic)
|
||||
seedBroker.Returns(metadataUnknownTopic)
|
||||
|
||||
if err := client.RefreshMetadata("new_topic"); err != ErrUnknownTopicOrPartition {
|
||||
t.Error("ErrUnknownTopicOrPartition expected, got", err)
|
||||
}
|
||||
|
||||
// If we are asking for the leader of a partition of the non-existing topic.
|
||||
// we will request metadata again.
|
||||
seedBroker.Returns(metadataUnknownTopic)
|
||||
seedBroker.Returns(metadataUnknownTopic)
|
||||
|
||||
if _, err = client.Leader("new_topic", 1); err != ErrUnknownTopicOrPartition {
|
||||
t.Error("Expected ErrUnknownTopicOrPartition, got", err)
|
||||
}
|
||||
|
||||
safeClose(t, client)
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
func TestClientReceivingPartialMetadata(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 5)
|
||||
|
||||
metadataResponse1 := new(MetadataResponse)
|
||||
metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
seedBroker.Returns(metadataResponse1)
|
||||
|
||||
config := NewConfig()
|
||||
config.Metadata.Retry.Max = 0
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
replicas := []int32{leader.BrokerID(), seedBroker.BrokerID()}
|
||||
|
||||
metadataPartial := new(MetadataResponse)
|
||||
metadataPartial.AddTopic("new_topic", ErrLeaderNotAvailable)
|
||||
metadataPartial.AddTopicPartition("new_topic", 0, leader.BrokerID(), replicas, replicas, ErrNoError)
|
||||
metadataPartial.AddTopicPartition("new_topic", 1, -1, replicas, []int32{}, ErrLeaderNotAvailable)
|
||||
seedBroker.Returns(metadataPartial)
|
||||
|
||||
if err := client.RefreshMetadata("new_topic"); err != nil {
|
||||
t.Error("ErrLeaderNotAvailable should not make RefreshMetadata respond with an error")
|
||||
}
|
||||
|
||||
// Even though the metadata was incomplete, we should be able to get the leader of a partition
|
||||
// for which we did get a useful response, without doing additional requests.
|
||||
|
||||
partition0Leader, err := client.Leader("new_topic", 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if partition0Leader.Addr() != leader.Addr() {
|
||||
t.Error("Unexpected leader returned", partition0Leader.Addr())
|
||||
}
|
||||
|
||||
// If we are asking for the leader of a partition that didn't have a leader before,
|
||||
// we will do another metadata request.
|
||||
|
||||
seedBroker.Returns(metadataPartial)
|
||||
|
||||
// Still no leader for the partition, so asking for it should return an error.
|
||||
_, err = client.Leader("new_topic", 1)
|
||||
if err != ErrLeaderNotAvailable {
|
||||
t.Error("Expected ErrLeaderNotAvailable, got", err)
|
||||
}
|
||||
|
||||
safeClose(t, client)
|
||||
seedBroker.Close()
|
||||
leader.Close()
|
||||
}
|
||||
|
||||
func TestClientRefreshBehaviour(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 5)
|
||||
|
||||
metadataResponse1 := new(MetadataResponse)
|
||||
metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
seedBroker.Returns(metadataResponse1)
|
||||
|
||||
metadataResponse2 := new(MetadataResponse)
|
||||
metadataResponse2.AddTopicPartition("my_topic", 0xb, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse2)
|
||||
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
parts, err := client.Partitions("my_topic")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if len(parts) != 1 || parts[0] != 0xb {
|
||||
t.Error("Client returned incorrect partitions for my_topic:", parts)
|
||||
}
|
||||
|
||||
tst, err := client.Leader("my_topic", 0xb)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if tst.ID() != 5 {
|
||||
t.Error("Leader for my_topic had incorrect ID.")
|
||||
}
|
||||
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestClientResurrectDeadSeeds(t *testing.T) {
|
||||
initialSeed := NewMockBroker(t, 0)
|
||||
emptyMetadata := new(MetadataResponse)
|
||||
initialSeed.Returns(emptyMetadata)
|
||||
|
||||
conf := NewConfig()
|
||||
conf.Metadata.Retry.Backoff = 0
|
||||
conf.Metadata.RefreshFrequency = 0
|
||||
c, err := NewClient([]string{initialSeed.Addr()}, conf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
initialSeed.Close()
|
||||
|
||||
client := c.(*client)
|
||||
|
||||
seed1 := NewMockBroker(t, 1)
|
||||
seed2 := NewMockBroker(t, 2)
|
||||
seed3 := NewMockBroker(t, 3)
|
||||
addr1 := seed1.Addr()
|
||||
addr2 := seed2.Addr()
|
||||
addr3 := seed3.Addr()
|
||||
|
||||
// Overwrite the seed brokers with a fixed ordering to make this test deterministic.
|
||||
safeClose(t, client.seedBrokers[0])
|
||||
client.seedBrokers = []*Broker{NewBroker(addr1), NewBroker(addr2), NewBroker(addr3)}
|
||||
client.deadSeeds = []*Broker{}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
if err := client.RefreshMetadata(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
seed1.Close()
|
||||
seed2.Close()
|
||||
|
||||
seed1 = NewMockBrokerAddr(t, 1, addr1)
|
||||
seed2 = NewMockBrokerAddr(t, 2, addr2)
|
||||
|
||||
seed3.Close()
|
||||
|
||||
seed1.Close()
|
||||
seed2.Returns(emptyMetadata)
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if len(client.seedBrokers) != 2 {
|
||||
t.Error("incorrect number of live seeds")
|
||||
}
|
||||
if len(client.deadSeeds) != 1 {
|
||||
t.Error("incorrect number of dead seeds")
|
||||
}
|
||||
|
||||
safeClose(t, c)
|
||||
}
|
||||
|
||||
func TestClientCoordinatorWithConsumerOffsetsTopic(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
staleCoordinator := NewMockBroker(t, 2)
|
||||
freshCoordinator := NewMockBroker(t, 3)
|
||||
|
||||
replicas := []int32{staleCoordinator.BrokerID(), freshCoordinator.BrokerID()}
|
||||
metadataResponse1 := new(MetadataResponse)
|
||||
metadataResponse1.AddBroker(staleCoordinator.Addr(), staleCoordinator.BrokerID())
|
||||
metadataResponse1.AddBroker(freshCoordinator.Addr(), freshCoordinator.BrokerID())
|
||||
metadataResponse1.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse1)
|
||||
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
coordinatorResponse1 := new(ConsumerMetadataResponse)
|
||||
coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable
|
||||
seedBroker.Returns(coordinatorResponse1)
|
||||
|
||||
coordinatorResponse2 := new(ConsumerMetadataResponse)
|
||||
coordinatorResponse2.CoordinatorID = staleCoordinator.BrokerID()
|
||||
coordinatorResponse2.CoordinatorHost = "127.0.0.1"
|
||||
coordinatorResponse2.CoordinatorPort = staleCoordinator.Port()
|
||||
|
||||
seedBroker.Returns(coordinatorResponse2)
|
||||
|
||||
broker, err := client.Coordinator("my_group")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if staleCoordinator.Addr() != broker.Addr() {
|
||||
t.Errorf("Expected coordinator to have address %s, found %s", staleCoordinator.Addr(), broker.Addr())
|
||||
}
|
||||
|
||||
if staleCoordinator.BrokerID() != broker.ID() {
|
||||
t.Errorf("Expected coordinator to have ID %d, found %d", staleCoordinator.BrokerID(), broker.ID())
|
||||
}
|
||||
|
||||
// Grab the cached value
|
||||
broker2, err := client.Coordinator("my_group")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if broker2.Addr() != broker.Addr() {
|
||||
t.Errorf("Expected the coordinator to be the same, but found %s vs. %s", broker2.Addr(), broker.Addr())
|
||||
}
|
||||
|
||||
coordinatorResponse3 := new(ConsumerMetadataResponse)
|
||||
coordinatorResponse3.CoordinatorID = freshCoordinator.BrokerID()
|
||||
coordinatorResponse3.CoordinatorHost = "127.0.0.1"
|
||||
coordinatorResponse3.CoordinatorPort = freshCoordinator.Port()
|
||||
|
||||
seedBroker.Returns(coordinatorResponse3)
|
||||
|
||||
// Refresh the locally cahced value because it's stale
|
||||
if err := client.RefreshCoordinator("my_group"); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// Grab the fresh value
|
||||
broker3, err := client.Coordinator("my_group")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if broker3.Addr() != freshCoordinator.Addr() {
|
||||
t.Errorf("Expected the freshCoordinator to be returned, but found %s.", broker3.Addr())
|
||||
}
|
||||
|
||||
freshCoordinator.Close()
|
||||
staleCoordinator.Close()
|
||||
seedBroker.Close()
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestClientCoordinatorWithoutConsumerOffsetsTopic(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
coordinator := NewMockBroker(t, 2)
|
||||
|
||||
metadataResponse1 := new(MetadataResponse)
|
||||
seedBroker.Returns(metadataResponse1)
|
||||
|
||||
config := NewConfig()
|
||||
config.Metadata.Retry.Max = 1
|
||||
config.Metadata.Retry.Backoff = 0
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
coordinatorResponse1 := new(ConsumerMetadataResponse)
|
||||
coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable
|
||||
seedBroker.Returns(coordinatorResponse1)
|
||||
|
||||
metadataResponse2 := new(MetadataResponse)
|
||||
metadataResponse2.AddTopic("__consumer_offsets", ErrUnknownTopicOrPartition)
|
||||
seedBroker.Returns(metadataResponse2)
|
||||
|
||||
replicas := []int32{coordinator.BrokerID()}
|
||||
metadataResponse3 := new(MetadataResponse)
|
||||
metadataResponse3.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse3)
|
||||
|
||||
coordinatorResponse2 := new(ConsumerMetadataResponse)
|
||||
coordinatorResponse2.CoordinatorID = coordinator.BrokerID()
|
||||
coordinatorResponse2.CoordinatorHost = "127.0.0.1"
|
||||
coordinatorResponse2.CoordinatorPort = coordinator.Port()
|
||||
|
||||
seedBroker.Returns(coordinatorResponse2)
|
||||
|
||||
broker, err := client.Coordinator("my_group")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if coordinator.Addr() != broker.Addr() {
|
||||
t.Errorf("Expected coordinator to have address %s, found %s", coordinator.Addr(), broker.Addr())
|
||||
}
|
||||
|
||||
if coordinator.BrokerID() != broker.ID() {
|
||||
t.Errorf("Expected coordinator to have ID %d, found %d", coordinator.BrokerID(), broker.ID())
|
||||
}
|
||||
|
||||
coordinator.Close()
|
||||
seedBroker.Close()
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestClientAutorefreshShutdownRace(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
conf := NewConfig()
|
||||
conf.Metadata.RefreshFrequency = 100 * time.Millisecond
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, conf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for the background refresh to kick in
|
||||
time.Sleep(110 * time.Millisecond)
|
||||
|
||||
done := make(chan none)
|
||||
go func() {
|
||||
// Close the client
|
||||
if err := client.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
close(done)
|
||||
}()
|
||||
|
||||
// Wait for the Close to kick in
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Then return some metadata to the still-running background thread
|
||||
leader := NewMockBroker(t, 2)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("foo", 0, leader.BrokerID(), []int32{2}, []int32{2}, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
<-done
|
||||
|
||||
seedBroker.Close()
|
||||
|
||||
// give the update time to happen so we get a panic if it's still running (which it shouldn't)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
70
vendor/github.com/Shopify/sarama/config_test.go
generated
vendored
70
vendor/github.com/Shopify/sarama/config_test.go
generated
vendored
@ -1,70 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
func TestDefaultConfigValidates(t *testing.T) {
|
||||
config := NewConfig()
|
||||
if err := config.Validate(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if config.MetricRegistry == nil {
|
||||
t.Error("Expected non nil metrics.MetricRegistry, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidClientIDConfigValidates(t *testing.T) {
|
||||
config := NewConfig()
|
||||
config.ClientID = "foo:bar"
|
||||
if err := config.Validate(); string(err.(ConfigurationError)) != "ClientID is invalid" {
|
||||
t.Error("Expected invalid ClientID, got ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyClientIDConfigValidates(t *testing.T) {
|
||||
config := NewConfig()
|
||||
config.ClientID = ""
|
||||
if err := config.Validate(); string(err.(ConfigurationError)) != "ClientID is invalid" {
|
||||
t.Error("Expected invalid ClientID, got ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLZ4ConfigValidation(t *testing.T) {
|
||||
config := NewConfig()
|
||||
config.Producer.Compression = CompressionLZ4
|
||||
if err := config.Validate(); string(err.(ConfigurationError)) != "lz4 compression requires Version >= V0_10_0_0" {
|
||||
t.Error("Expected invalid lz4/kakfa version error, got ", err)
|
||||
}
|
||||
config.Version = V0_10_0_0
|
||||
if err := config.Validate(); err != nil {
|
||||
t.Error("Expected lz4 to work, got ", err)
|
||||
}
|
||||
}
|
||||
|
||||
// This example shows how to integrate with an existing registry as well as publishing metrics
|
||||
// on the standard output
|
||||
func ExampleConfig_metrics() {
|
||||
// Our application registry
|
||||
appMetricRegistry := metrics.NewRegistry()
|
||||
appGauge := metrics.GetOrRegisterGauge("m1", appMetricRegistry)
|
||||
appGauge.Update(1)
|
||||
|
||||
config := NewConfig()
|
||||
// Use a prefix registry instead of the default local one
|
||||
config.MetricRegistry = metrics.NewPrefixedChildRegistry(appMetricRegistry, "sarama.")
|
||||
|
||||
// Simulate a metric created by sarama without starting a broker
|
||||
saramaGauge := metrics.GetOrRegisterGauge("m2", config.MetricRegistry)
|
||||
saramaGauge.Update(2)
|
||||
|
||||
metrics.WriteOnce(appMetricRegistry, os.Stdout)
|
||||
// Output:
|
||||
// gauge m1
|
||||
// value: 1
|
||||
// gauge sarama.m2
|
||||
// value: 2
|
||||
}
|
73
vendor/github.com/Shopify/sarama/consumer_group_members_test.go
generated
vendored
73
vendor/github.com/Shopify/sarama/consumer_group_members_test.go
generated
vendored
@ -1,73 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
groupMemberMetadata = []byte{
|
||||
0, 1, // Version
|
||||
0, 0, 0, 2, // Topic array length
|
||||
0, 3, 'o', 'n', 'e', // Topic one
|
||||
0, 3, 't', 'w', 'o', // Topic two
|
||||
0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata
|
||||
}
|
||||
groupMemberAssignment = []byte{
|
||||
0, 1, // Version
|
||||
0, 0, 0, 1, // Topic array length
|
||||
0, 3, 'o', 'n', 'e', // Topic one
|
||||
0, 0, 0, 3, // Topic one, partition array length
|
||||
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 4, // 0, 2, 4
|
||||
0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata
|
||||
}
|
||||
)
|
||||
|
||||
func TestConsumerGroupMemberMetadata(t *testing.T) {
|
||||
meta := &ConsumerGroupMemberMetadata{
|
||||
Version: 1,
|
||||
Topics: []string{"one", "two"},
|
||||
UserData: []byte{0x01, 0x02, 0x03},
|
||||
}
|
||||
|
||||
buf, err := encode(meta, nil)
|
||||
if err != nil {
|
||||
t.Error("Failed to encode data", err)
|
||||
} else if !bytes.Equal(groupMemberMetadata, buf) {
|
||||
t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", groupMemberMetadata, buf)
|
||||
}
|
||||
|
||||
meta2 := new(ConsumerGroupMemberMetadata)
|
||||
err = decode(buf, meta2)
|
||||
if err != nil {
|
||||
t.Error("Failed to decode data", err)
|
||||
} else if !reflect.DeepEqual(meta, meta2) {
|
||||
t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", meta, meta2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerGroupMemberAssignment(t *testing.T) {
|
||||
amt := &ConsumerGroupMemberAssignment{
|
||||
Version: 1,
|
||||
Topics: map[string][]int32{
|
||||
"one": {0, 2, 4},
|
||||
},
|
||||
UserData: []byte{0x01, 0x02, 0x03},
|
||||
}
|
||||
|
||||
buf, err := encode(amt, nil)
|
||||
if err != nil {
|
||||
t.Error("Failed to encode data", err)
|
||||
} else if !bytes.Equal(groupMemberAssignment, buf) {
|
||||
t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", groupMemberAssignment, buf)
|
||||
}
|
||||
|
||||
amt2 := new(ConsumerGroupMemberAssignment)
|
||||
err = decode(buf, amt2)
|
||||
if err != nil {
|
||||
t.Error("Failed to decode data", err)
|
||||
} else if !reflect.DeepEqual(amt, amt2) {
|
||||
t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", amt, amt2)
|
||||
}
|
||||
}
|
19
vendor/github.com/Shopify/sarama/consumer_metadata_request_test.go
generated
vendored
19
vendor/github.com/Shopify/sarama/consumer_metadata_request_test.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
consumerMetadataRequestEmpty = []byte{
|
||||
0x00, 0x00}
|
||||
|
||||
consumerMetadataRequestString = []byte{
|
||||
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r'}
|
||||
)
|
||||
|
||||
func TestConsumerMetadataRequest(t *testing.T) {
|
||||
request := new(ConsumerMetadataRequest)
|
||||
testRequest(t, "empty string", request, consumerMetadataRequestEmpty)
|
||||
|
||||
request.ConsumerGroup = "foobar"
|
||||
testRequest(t, "with string", request, consumerMetadataRequestString)
|
||||
}
|
35
vendor/github.com/Shopify/sarama/consumer_metadata_response_test.go
generated
vendored
35
vendor/github.com/Shopify/sarama/consumer_metadata_response_test.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
consumerMetadataResponseError = []byte{
|
||||
0x00, 0x0E,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
consumerMetadataResponseSuccess = []byte{
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0xAB,
|
||||
0x00, 0x03, 'f', 'o', 'o',
|
||||
0x00, 0x00, 0xCC, 0xDD}
|
||||
)
|
||||
|
||||
func TestConsumerMetadataResponseError(t *testing.T) {
|
||||
response := ConsumerMetadataResponse{Err: ErrOffsetsLoadInProgress}
|
||||
testResponse(t, "error", &response, consumerMetadataResponseError)
|
||||
}
|
||||
|
||||
func TestConsumerMetadataResponseSuccess(t *testing.T) {
|
||||
broker := NewBroker("foo:52445")
|
||||
broker.id = 0xAB
|
||||
response := ConsumerMetadataResponse{
|
||||
Coordinator: broker,
|
||||
CoordinatorID: 0xAB,
|
||||
CoordinatorHost: "foo",
|
||||
CoordinatorPort: 0xCCDD,
|
||||
Err: ErrNoError,
|
||||
}
|
||||
testResponse(t, "success", &response, consumerMetadataResponseSuccess)
|
||||
}
|
896
vendor/github.com/Shopify/sarama/consumer_test.go
generated
vendored
896
vendor/github.com/Shopify/sarama/consumer_test.go
generated
vendored
@ -1,896 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var testMsg = StringEncoder("Foo")
|
||||
|
||||
// If a particular offset is provided then messages are consumed starting from
|
||||
// that offset.
|
||||
func TestConsumerOffsetManual(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
|
||||
mockFetchResponse := NewMockFetchResponse(t, 1)
|
||||
for i := 0; i < 10; i++ {
|
||||
mockFetchResponse.SetMessage("my_topic", 0, int64(i+1234), testMsg)
|
||||
}
|
||||
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 0).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 2345),
|
||||
"FetchRequest": mockFetchResponse,
|
||||
})
|
||||
|
||||
// When
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
consumer, err := master.ConsumePartition("my_topic", 0, 1234)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Then: messages starting from offset 1234 are consumed.
|
||||
for i := 0; i < 10; i++ {
|
||||
select {
|
||||
case message := <-consumer.Messages():
|
||||
assertMessageOffset(t, message, int64(i+1234))
|
||||
case err := <-consumer.Errors():
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
safeClose(t, consumer)
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
// If `OffsetNewest` is passed as the initial offset then the first consumed
|
||||
// message is indeed corresponds to the offset that broker claims to be the
|
||||
// newest in its metadata response.
|
||||
func TestConsumerOffsetNewest(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 10).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 7),
|
||||
"FetchRequest": NewMockFetchResponse(t, 1).
|
||||
SetMessage("my_topic", 0, 9, testMsg).
|
||||
SetMessage("my_topic", 0, 10, testMsg).
|
||||
SetMessage("my_topic", 0, 11, testMsg).
|
||||
SetHighWaterMark("my_topic", 0, 14),
|
||||
})
|
||||
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When
|
||||
consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Then
|
||||
assertMessageOffset(t, <-consumer.Messages(), 10)
|
||||
if hwmo := consumer.HighWaterMarkOffset(); hwmo != 14 {
|
||||
t.Errorf("Expected high water mark offset 14, found %d", hwmo)
|
||||
}
|
||||
|
||||
safeClose(t, consumer)
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
// It is possible to close a partition consumer and create the same anew.
|
||||
func TestConsumerRecreate(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 0).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1000),
|
||||
"FetchRequest": NewMockFetchResponse(t, 1).
|
||||
SetMessage("my_topic", 0, 10, testMsg),
|
||||
})
|
||||
|
||||
c, err := NewConsumer([]string{broker0.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pc, err := c.ConsumePartition("my_topic", 0, 10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertMessageOffset(t, <-pc.Messages(), 10)
|
||||
|
||||
// When
|
||||
safeClose(t, pc)
|
||||
pc, err = c.ConsumePartition("my_topic", 0, 10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Then
|
||||
assertMessageOffset(t, <-pc.Messages(), 10)
|
||||
|
||||
safeClose(t, pc)
|
||||
safeClose(t, c)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
// An attempt to consume the same partition twice should fail.
|
||||
func TestConsumerDuplicate(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 0).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1000),
|
||||
"FetchRequest": NewMockFetchResponse(t, 1),
|
||||
})
|
||||
|
||||
config := NewConfig()
|
||||
config.ChannelBufferSize = 0
|
||||
c, err := NewConsumer([]string{broker0.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pc1, err := c.ConsumePartition("my_topic", 0, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When
|
||||
pc2, err := c.ConsumePartition("my_topic", 0, 0)
|
||||
|
||||
// Then
|
||||
if pc2 != nil || err != ConfigurationError("That topic/partition is already being consumed") {
|
||||
t.Fatal("A partition cannot be consumed twice at the same time")
|
||||
}
|
||||
|
||||
safeClose(t, pc1)
|
||||
safeClose(t, c)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
// If consumer fails to refresh metadata it keeps retrying with frequency
|
||||
// specified by `Config.Consumer.Retry.Backoff`.
|
||||
func TestConsumerLeaderRefreshError(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 100)
|
||||
|
||||
// Stage 1: my_topic/0 served by broker0
|
||||
Logger.Printf(" STAGE 1")
|
||||
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 123).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1000),
|
||||
"FetchRequest": NewMockFetchResponse(t, 1).
|
||||
SetMessage("my_topic", 0, 123, testMsg),
|
||||
})
|
||||
|
||||
config := NewConfig()
|
||||
config.Net.ReadTimeout = 100 * time.Millisecond
|
||||
config.Consumer.Retry.Backoff = 200 * time.Millisecond
|
||||
config.Consumer.Return.Errors = true
|
||||
config.Metadata.Retry.Max = 0
|
||||
c, err := NewConsumer([]string{broker0.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assertMessageOffset(t, <-pc.Messages(), 123)
|
||||
|
||||
// Stage 2: broker0 says that it is no longer the leader for my_topic/0,
|
||||
// but the requests to retrieve metadata fail with network timeout.
|
||||
Logger.Printf(" STAGE 2")
|
||||
|
||||
fetchResponse2 := &FetchResponse{}
|
||||
fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
|
||||
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"FetchRequest": NewMockWrapper(fetchResponse2),
|
||||
})
|
||||
|
||||
if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
|
||||
t.Errorf("Unexpected error: %v", consErr.Err)
|
||||
}
|
||||
|
||||
// Stage 3: finally the metadata returned by broker0 tells that broker1 is
|
||||
// a new leader for my_topic/0. Consumption resumes.
|
||||
|
||||
Logger.Printf(" STAGE 3")
|
||||
|
||||
broker1 := NewMockBroker(t, 101)
|
||||
|
||||
broker1.SetHandlerByMap(map[string]MockResponse{
|
||||
"FetchRequest": NewMockFetchResponse(t, 1).
|
||||
SetMessage("my_topic", 0, 124, testMsg),
|
||||
})
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetBroker(broker1.Addr(), broker1.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker1.BrokerID()),
|
||||
})
|
||||
|
||||
assertMessageOffset(t, <-pc.Messages(), 124)
|
||||
|
||||
safeClose(t, pc)
|
||||
safeClose(t, c)
|
||||
broker1.Close()
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
func TestConsumerInvalidTopic(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 100)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()),
|
||||
})
|
||||
|
||||
c, err := NewConsumer([]string{broker0.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When
|
||||
pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
|
||||
|
||||
// Then
|
||||
if pc != nil || err != ErrUnknownTopicOrPartition {
|
||||
t.Errorf("Should fail with, err=%v", err)
|
||||
}
|
||||
|
||||
safeClose(t, c)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
// Nothing bad happens if a partition consumer that has no leader assigned at
|
||||
// the moment is closed.
|
||||
func TestConsumerClosePartitionWithoutLeader(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 100)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 123).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1000),
|
||||
"FetchRequest": NewMockFetchResponse(t, 1).
|
||||
SetMessage("my_topic", 0, 123, testMsg),
|
||||
})
|
||||
|
||||
config := NewConfig()
|
||||
config.Net.ReadTimeout = 100 * time.Millisecond
|
||||
config.Consumer.Retry.Backoff = 100 * time.Millisecond
|
||||
config.Consumer.Return.Errors = true
|
||||
config.Metadata.Retry.Max = 0
|
||||
c, err := NewConsumer([]string{broker0.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assertMessageOffset(t, <-pc.Messages(), 123)
|
||||
|
||||
// broker0 says that it is no longer the leader for my_topic/0, but the
|
||||
// requests to retrieve metadata fail with network timeout.
|
||||
fetchResponse2 := &FetchResponse{}
|
||||
fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
|
||||
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"FetchRequest": NewMockWrapper(fetchResponse2),
|
||||
})
|
||||
|
||||
// When
|
||||
if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
|
||||
t.Errorf("Unexpected error: %v", consErr.Err)
|
||||
}
|
||||
|
||||
// Then: the partition consumer can be closed without any problem.
|
||||
safeClose(t, pc)
|
||||
safeClose(t, c)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
// If the initial offset passed on partition consumer creation is out of the
|
||||
// actual offset range for the partition, then the partition consumer stops
|
||||
// immediately closing its output channels.
|
||||
func TestConsumerShutsDownOutOfRange(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
fetchResponse := new(FetchResponse)
|
||||
fetchResponse.AddError("my_topic", 0, ErrOffsetOutOfRange)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1234).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 7),
|
||||
"FetchRequest": NewMockWrapper(fetchResponse),
|
||||
})
|
||||
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When
|
||||
consumer, err := master.ConsumePartition("my_topic", 0, 101)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Then: consumer should shut down closing its messages and errors channels.
|
||||
if _, ok := <-consumer.Messages(); ok {
|
||||
t.Error("Expected the consumer to shut down")
|
||||
}
|
||||
safeClose(t, consumer)
|
||||
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
// If a fetch response contains messages with offsets that are smaller then
|
||||
// requested, then such messages are ignored.
|
||||
func TestConsumerExtraOffsets(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
fetchResponse1 := &FetchResponse{}
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 1)
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 2)
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 3)
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 4)
|
||||
fetchResponse2 := &FetchResponse{}
|
||||
fetchResponse2.AddError("my_topic", 0, ErrNoError)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1234).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 0),
|
||||
"FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
|
||||
})
|
||||
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When
|
||||
consumer, err := master.ConsumePartition("my_topic", 0, 3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Then: messages with offsets 1 and 2 are not returned even though they
|
||||
// are present in the response.
|
||||
assertMessageOffset(t, <-consumer.Messages(), 3)
|
||||
assertMessageOffset(t, <-consumer.Messages(), 4)
|
||||
|
||||
safeClose(t, consumer)
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
// It is fine if offsets of fetched messages are not sequential (although
|
||||
// strictly increasing!).
|
||||
func TestConsumerNonSequentialOffsets(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
fetchResponse1 := &FetchResponse{}
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 5)
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 7)
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 11)
|
||||
fetchResponse2 := &FetchResponse{}
|
||||
fetchResponse2.AddError("my_topic", 0, ErrNoError)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1234).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 0),
|
||||
"FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
|
||||
})
|
||||
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When
|
||||
consumer, err := master.ConsumePartition("my_topic", 0, 3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Then: messages with offsets 1 and 2 are not returned even though they
|
||||
// are present in the response.
|
||||
assertMessageOffset(t, <-consumer.Messages(), 5)
|
||||
assertMessageOffset(t, <-consumer.Messages(), 7)
|
||||
assertMessageOffset(t, <-consumer.Messages(), 11)
|
||||
|
||||
safeClose(t, consumer)
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
// If leadership for a partition is changing then consumer resolves the new
|
||||
// leader and switches to it.
|
||||
func TestConsumerRebalancingMultiplePartitions(t *testing.T) {
|
||||
// initial setup
|
||||
seedBroker := NewMockBroker(t, 10)
|
||||
leader0 := NewMockBroker(t, 0)
|
||||
leader1 := NewMockBroker(t, 1)
|
||||
|
||||
seedBroker.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(leader0.Addr(), leader0.BrokerID()).
|
||||
SetBroker(leader1.Addr(), leader1.BrokerID()).
|
||||
SetLeader("my_topic", 0, leader0.BrokerID()).
|
||||
SetLeader("my_topic", 1, leader1.BrokerID()),
|
||||
})
|
||||
|
||||
mockOffsetResponse1 := NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 0).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1000).
|
||||
SetOffset("my_topic", 1, OffsetOldest, 0).
|
||||
SetOffset("my_topic", 1, OffsetNewest, 1000)
|
||||
leader0.SetHandlerByMap(map[string]MockResponse{
|
||||
"OffsetRequest": mockOffsetResponse1,
|
||||
"FetchRequest": NewMockFetchResponse(t, 1),
|
||||
})
|
||||
leader1.SetHandlerByMap(map[string]MockResponse{
|
||||
"OffsetRequest": mockOffsetResponse1,
|
||||
"FetchRequest": NewMockFetchResponse(t, 1),
|
||||
})
|
||||
|
||||
// launch test goroutines
|
||||
config := NewConfig()
|
||||
config.Consumer.Retry.Backoff = 50
|
||||
master, err := NewConsumer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// we expect to end up (eventually) consuming exactly ten messages on each partition
|
||||
var wg sync.WaitGroup
|
||||
for i := int32(0); i < 2; i++ {
|
||||
consumer, err := master.ConsumePartition("my_topic", i, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
go func(c PartitionConsumer) {
|
||||
for err := range c.Errors() {
|
||||
t.Error(err)
|
||||
}
|
||||
}(consumer)
|
||||
|
||||
wg.Add(1)
|
||||
go func(partition int32, c PartitionConsumer) {
|
||||
for i := 0; i < 10; i++ {
|
||||
message := <-consumer.Messages()
|
||||
if message.Offset != int64(i) {
|
||||
t.Error("Incorrect message offset!", i, partition, message.Offset)
|
||||
}
|
||||
if message.Partition != partition {
|
||||
t.Error("Incorrect message partition!")
|
||||
}
|
||||
}
|
||||
safeClose(t, consumer)
|
||||
wg.Done()
|
||||
}(i, consumer)
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
Logger.Printf(" STAGE 1")
|
||||
// Stage 1:
|
||||
// * my_topic/0 -> leader0 serves 4 messages
|
||||
// * my_topic/1 -> leader1 serves 0 messages
|
||||
|
||||
mockFetchResponse := NewMockFetchResponse(t, 1)
|
||||
for i := 0; i < 4; i++ {
|
||||
mockFetchResponse.SetMessage("my_topic", 0, int64(i), testMsg)
|
||||
}
|
||||
leader0.SetHandlerByMap(map[string]MockResponse{
|
||||
"FetchRequest": mockFetchResponse,
|
||||
})
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
Logger.Printf(" STAGE 2")
|
||||
// Stage 2:
|
||||
// * leader0 says that it is no longer serving my_topic/0
|
||||
// * seedBroker tells that leader1 is serving my_topic/0 now
|
||||
|
||||
// seed broker tells that the new partition 0 leader is leader1
|
||||
seedBroker.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetLeader("my_topic", 0, leader1.BrokerID()).
|
||||
SetLeader("my_topic", 1, leader1.BrokerID()),
|
||||
})
|
||||
|
||||
// leader0 says no longer leader of partition 0
|
||||
fetchResponse := new(FetchResponse)
|
||||
fetchResponse.AddError("my_topic", 0, ErrNotLeaderForPartition)
|
||||
leader0.SetHandlerByMap(map[string]MockResponse{
|
||||
"FetchRequest": NewMockWrapper(fetchResponse),
|
||||
})
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
Logger.Printf(" STAGE 3")
|
||||
// Stage 3:
|
||||
// * my_topic/0 -> leader1 serves 3 messages
|
||||
// * my_topic/1 -> leader1 server 8 messages
|
||||
|
||||
// leader1 provides 3 message on partition 0, and 8 messages on partition 1
|
||||
mockFetchResponse2 := NewMockFetchResponse(t, 2)
|
||||
for i := 4; i < 7; i++ {
|
||||
mockFetchResponse2.SetMessage("my_topic", 0, int64(i), testMsg)
|
||||
}
|
||||
for i := 0; i < 8; i++ {
|
||||
mockFetchResponse2.SetMessage("my_topic", 1, int64(i), testMsg)
|
||||
}
|
||||
leader1.SetHandlerByMap(map[string]MockResponse{
|
||||
"FetchRequest": mockFetchResponse2,
|
||||
})
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
Logger.Printf(" STAGE 4")
|
||||
// Stage 4:
|
||||
// * my_topic/0 -> leader1 serves 3 messages
|
||||
// * my_topic/1 -> leader1 tells that it is no longer the leader
|
||||
// * seedBroker tells that leader0 is a new leader for my_topic/1
|
||||
|
||||
// metadata assigns 0 to leader1 and 1 to leader0
|
||||
seedBroker.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetLeader("my_topic", 0, leader1.BrokerID()).
|
||||
SetLeader("my_topic", 1, leader0.BrokerID()),
|
||||
})
|
||||
|
||||
// leader1 provides three more messages on partition0, says no longer leader of partition1
|
||||
mockFetchResponse3 := NewMockFetchResponse(t, 3).
|
||||
SetMessage("my_topic", 0, int64(7), testMsg).
|
||||
SetMessage("my_topic", 0, int64(8), testMsg).
|
||||
SetMessage("my_topic", 0, int64(9), testMsg)
|
||||
fetchResponse4 := new(FetchResponse)
|
||||
fetchResponse4.AddError("my_topic", 1, ErrNotLeaderForPartition)
|
||||
leader1.SetHandlerByMap(map[string]MockResponse{
|
||||
"FetchRequest": NewMockSequence(mockFetchResponse3, fetchResponse4),
|
||||
})
|
||||
|
||||
// leader0 provides two messages on partition 1
|
||||
mockFetchResponse4 := NewMockFetchResponse(t, 2)
|
||||
for i := 8; i < 10; i++ {
|
||||
mockFetchResponse4.SetMessage("my_topic", 1, int64(i), testMsg)
|
||||
}
|
||||
leader0.SetHandlerByMap(map[string]MockResponse{
|
||||
"FetchRequest": mockFetchResponse4,
|
||||
})
|
||||
|
||||
wg.Wait()
|
||||
safeClose(t, master)
|
||||
leader1.Close()
|
||||
leader0.Close()
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
// When two partitions have the same broker as the leader, if one partition
|
||||
// consumer channel buffer is full then that does not affect the ability to
|
||||
// read messages by the other consumer.
|
||||
func TestConsumerInterleavedClose(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()).
|
||||
SetLeader("my_topic", 1, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 1000).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1100).
|
||||
SetOffset("my_topic", 1, OffsetOldest, 2000).
|
||||
SetOffset("my_topic", 1, OffsetNewest, 2100),
|
||||
"FetchRequest": NewMockFetchResponse(t, 1).
|
||||
SetMessage("my_topic", 0, 1000, testMsg).
|
||||
SetMessage("my_topic", 0, 1001, testMsg).
|
||||
SetMessage("my_topic", 0, 1002, testMsg).
|
||||
SetMessage("my_topic", 1, 2000, testMsg),
|
||||
})
|
||||
|
||||
config := NewConfig()
|
||||
config.ChannelBufferSize = 0
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c0, err := master.ConsumePartition("my_topic", 0, 1000)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c1, err := master.ConsumePartition("my_topic", 1, 2000)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When/Then: we can read from partition 0 even if nobody reads from partition 1
|
||||
assertMessageOffset(t, <-c0.Messages(), 1000)
|
||||
assertMessageOffset(t, <-c0.Messages(), 1001)
|
||||
assertMessageOffset(t, <-c0.Messages(), 1002)
|
||||
|
||||
safeClose(t, c1)
|
||||
safeClose(t, c0)
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
func TestConsumerBounceWithReferenceOpen(t *testing.T) {
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
broker0Addr := broker0.Addr()
|
||||
broker1 := NewMockBroker(t, 1)
|
||||
|
||||
mockMetadataResponse := NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetBroker(broker1.Addr(), broker1.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()).
|
||||
SetLeader("my_topic", 1, broker1.BrokerID())
|
||||
|
||||
mockOffsetResponse := NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 1000).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1100).
|
||||
SetOffset("my_topic", 1, OffsetOldest, 2000).
|
||||
SetOffset("my_topic", 1, OffsetNewest, 2100)
|
||||
|
||||
mockFetchResponse := NewMockFetchResponse(t, 1)
|
||||
for i := 0; i < 10; i++ {
|
||||
mockFetchResponse.SetMessage("my_topic", 0, int64(1000+i), testMsg)
|
||||
mockFetchResponse.SetMessage("my_topic", 1, int64(2000+i), testMsg)
|
||||
}
|
||||
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"OffsetRequest": mockOffsetResponse,
|
||||
"FetchRequest": mockFetchResponse,
|
||||
})
|
||||
broker1.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": mockMetadataResponse,
|
||||
"OffsetRequest": mockOffsetResponse,
|
||||
"FetchRequest": mockFetchResponse,
|
||||
})
|
||||
|
||||
config := NewConfig()
|
||||
config.Consumer.Return.Errors = true
|
||||
config.Consumer.Retry.Backoff = 100 * time.Millisecond
|
||||
config.ChannelBufferSize = 1
|
||||
master, err := NewConsumer([]string{broker1.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c0, err := master.ConsumePartition("my_topic", 0, 1000)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c1, err := master.ConsumePartition("my_topic", 1, 2000)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// read messages from both partition to make sure that both brokers operate
|
||||
// normally.
|
||||
assertMessageOffset(t, <-c0.Messages(), 1000)
|
||||
assertMessageOffset(t, <-c1.Messages(), 2000)
|
||||
|
||||
// Simulate broker shutdown. Note that metadata response does not change,
|
||||
// that is the leadership does not move to another broker. So partition
|
||||
// consumer will keep retrying to restore the connection with the broker.
|
||||
broker0.Close()
|
||||
|
||||
// Make sure that while the partition/0 leader is down, consumer/partition/1
|
||||
// is capable of pulling messages from broker1.
|
||||
for i := 1; i < 7; i++ {
|
||||
offset := (<-c1.Messages()).Offset
|
||||
if offset != int64(2000+i) {
|
||||
t.Errorf("Expected offset %d from consumer/partition/1", int64(2000+i))
|
||||
}
|
||||
}
|
||||
|
||||
// Bring broker0 back to service.
|
||||
broker0 = NewMockBrokerAddr(t, 0, broker0Addr)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"FetchRequest": mockFetchResponse,
|
||||
})
|
||||
|
||||
// Read the rest of messages from both partitions.
|
||||
for i := 7; i < 10; i++ {
|
||||
assertMessageOffset(t, <-c1.Messages(), int64(2000+i))
|
||||
}
|
||||
for i := 1; i < 10; i++ {
|
||||
assertMessageOffset(t, <-c0.Messages(), int64(1000+i))
|
||||
}
|
||||
|
||||
select {
|
||||
case <-c0.Errors():
|
||||
default:
|
||||
t.Errorf("Partition consumer should have detected broker restart")
|
||||
}
|
||||
|
||||
safeClose(t, c1)
|
||||
safeClose(t, c0)
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
broker1.Close()
|
||||
}
|
||||
|
||||
func TestConsumerOffsetOutOfRange(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 2)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1234).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 2345),
|
||||
})
|
||||
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When/Then
|
||||
if _, err := master.ConsumePartition("my_topic", 0, 0); err != ErrOffsetOutOfRange {
|
||||
t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
|
||||
}
|
||||
if _, err := master.ConsumePartition("my_topic", 0, 3456); err != ErrOffsetOutOfRange {
|
||||
t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
|
||||
}
|
||||
if _, err := master.ConsumePartition("my_topic", 0, -3); err != ErrOffsetOutOfRange {
|
||||
t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
|
||||
}
|
||||
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
func TestConsumerExpiryTicker(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
fetchResponse1 := &FetchResponse{}
|
||||
for i := 1; i <= 8; i++ {
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, int64(i))
|
||||
}
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1234).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 1),
|
||||
"FetchRequest": NewMockSequence(fetchResponse1),
|
||||
})
|
||||
|
||||
config := NewConfig()
|
||||
config.ChannelBufferSize = 0
|
||||
config.Consumer.MaxProcessingTime = 10 * time.Millisecond
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When
|
||||
consumer, err := master.ConsumePartition("my_topic", 0, 1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Then: messages with offsets 1 through 8 are read
|
||||
for i := 1; i <= 8; i++ {
|
||||
assertMessageOffset(t, <-consumer.Messages(), int64(i))
|
||||
time.Sleep(2 * time.Millisecond)
|
||||
}
|
||||
|
||||
safeClose(t, consumer)
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) {
|
||||
if msg.Offset != expectedOffset {
|
||||
t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset)
|
||||
}
|
||||
}
|
||||
|
||||
// This example shows how to use the consumer to read messages
|
||||
// from a single partition.
|
||||
func ExampleConsumer() {
|
||||
consumer, err := NewConsumer([]string{"localhost:9092"}, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := consumer.Close(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}()
|
||||
|
||||
partitionConsumer, err := consumer.ConsumePartition("my_topic", 0, OffsetNewest)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := partitionConsumer.Close(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Trap SIGINT to trigger a shutdown.
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, os.Interrupt)
|
||||
|
||||
consumed := 0
|
||||
ConsumerLoop:
|
||||
for {
|
||||
select {
|
||||
case msg := <-partitionConsumer.Messages():
|
||||
log.Printf("Consumed message offset %d\n", msg.Offset)
|
||||
consumed++
|
||||
case <-signals:
|
||||
break ConsumerLoop
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("Consumed: %d\n", consumed)
|
||||
}
|
34
vendor/github.com/Shopify/sarama/describe_groups_request_test.go
generated
vendored
34
vendor/github.com/Shopify/sarama/describe_groups_request_test.go
generated
vendored
@ -1,34 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
emptyDescribeGroupsRequest = []byte{0, 0, 0, 0}
|
||||
|
||||
singleDescribeGroupsRequest = []byte{
|
||||
0, 0, 0, 1, // 1 group
|
||||
0, 3, 'f', 'o', 'o', // group name: foo
|
||||
}
|
||||
|
||||
doubleDescribeGroupsRequest = []byte{
|
||||
0, 0, 0, 2, // 2 groups
|
||||
0, 3, 'f', 'o', 'o', // group name: foo
|
||||
0, 3, 'b', 'a', 'r', // group name: foo
|
||||
}
|
||||
)
|
||||
|
||||
func TestDescribeGroupsRequest(t *testing.T) {
|
||||
var request *DescribeGroupsRequest
|
||||
|
||||
request = new(DescribeGroupsRequest)
|
||||
testRequest(t, "no groups", request, emptyDescribeGroupsRequest)
|
||||
|
||||
request = new(DescribeGroupsRequest)
|
||||
request.AddGroup("foo")
|
||||
testRequest(t, "one group", request, singleDescribeGroupsRequest)
|
||||
|
||||
request = new(DescribeGroupsRequest)
|
||||
request.AddGroup("foo")
|
||||
request.AddGroup("bar")
|
||||
testRequest(t, "two groups", request, doubleDescribeGroupsRequest)
|
||||
}
|
91
vendor/github.com/Shopify/sarama/describe_groups_response_test.go
generated
vendored
91
vendor/github.com/Shopify/sarama/describe_groups_response_test.go
generated
vendored
@ -1,91 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
describeGroupsResponseEmpty = []byte{
|
||||
0, 0, 0, 0, // no groups
|
||||
}
|
||||
|
||||
describeGroupsResponsePopulated = []byte{
|
||||
0, 0, 0, 2, // 2 groups
|
||||
|
||||
0, 0, // no error
|
||||
0, 3, 'f', 'o', 'o', // Group ID
|
||||
0, 3, 'b', 'a', 'r', // State
|
||||
0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // ConsumerProtocol type
|
||||
0, 3, 'b', 'a', 'z', // Protocol name
|
||||
0, 0, 0, 1, // 1 member
|
||||
0, 2, 'i', 'd', // Member ID
|
||||
0, 6, 's', 'a', 'r', 'a', 'm', 'a', // Client ID
|
||||
0, 9, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't', // Client Host
|
||||
0, 0, 0, 3, 0x01, 0x02, 0x03, // MemberMetadata
|
||||
0, 0, 0, 3, 0x04, 0x05, 0x06, // MemberAssignment
|
||||
|
||||
0, 30, // ErrGroupAuthorizationFailed
|
||||
0, 0,
|
||||
0, 0,
|
||||
0, 0,
|
||||
0, 0,
|
||||
0, 0, 0, 0,
|
||||
}
|
||||
)
|
||||
|
||||
func TestDescribeGroupsResponse(t *testing.T) {
|
||||
var response *DescribeGroupsResponse
|
||||
|
||||
response = new(DescribeGroupsResponse)
|
||||
testVersionDecodable(t, "empty", response, describeGroupsResponseEmpty, 0)
|
||||
if len(response.Groups) != 0 {
|
||||
t.Error("Expected no groups")
|
||||
}
|
||||
|
||||
response = new(DescribeGroupsResponse)
|
||||
testVersionDecodable(t, "populated", response, describeGroupsResponsePopulated, 0)
|
||||
if len(response.Groups) != 2 {
|
||||
t.Error("Expected two groups")
|
||||
}
|
||||
|
||||
group0 := response.Groups[0]
|
||||
if group0.Err != ErrNoError {
|
||||
t.Error("Unxpected groups[0].Err, found", group0.Err)
|
||||
}
|
||||
if group0.GroupId != "foo" {
|
||||
t.Error("Unxpected groups[0].GroupId, found", group0.GroupId)
|
||||
}
|
||||
if group0.State != "bar" {
|
||||
t.Error("Unxpected groups[0].State, found", group0.State)
|
||||
}
|
||||
if group0.ProtocolType != "consumer" {
|
||||
t.Error("Unxpected groups[0].ProtocolType, found", group0.ProtocolType)
|
||||
}
|
||||
if group0.Protocol != "baz" {
|
||||
t.Error("Unxpected groups[0].Protocol, found", group0.Protocol)
|
||||
}
|
||||
if len(group0.Members) != 1 {
|
||||
t.Error("Unxpected groups[0].Members, found", group0.Members)
|
||||
}
|
||||
if group0.Members["id"].ClientId != "sarama" {
|
||||
t.Error("Unxpected groups[0].Members[id].ClientId, found", group0.Members["id"].ClientId)
|
||||
}
|
||||
if group0.Members["id"].ClientHost != "localhost" {
|
||||
t.Error("Unxpected groups[0].Members[id].ClientHost, found", group0.Members["id"].ClientHost)
|
||||
}
|
||||
if !reflect.DeepEqual(group0.Members["id"].MemberMetadata, []byte{0x01, 0x02, 0x03}) {
|
||||
t.Error("Unxpected groups[0].Members[id].MemberMetadata, found", group0.Members["id"].MemberMetadata)
|
||||
}
|
||||
if !reflect.DeepEqual(group0.Members["id"].MemberAssignment, []byte{0x04, 0x05, 0x06}) {
|
||||
t.Error("Unxpected groups[0].Members[id].MemberAssignment, found", group0.Members["id"].MemberAssignment)
|
||||
}
|
||||
|
||||
group1 := response.Groups[1]
|
||||
if group1.Err != ErrGroupAuthorizationFailed {
|
||||
t.Error("Unxpected groups[1].Err, found", group0.Err)
|
||||
}
|
||||
if len(group1.Members) != 0 {
|
||||
t.Error("Unxpected groups[1].Members, found", group0.Members)
|
||||
}
|
||||
}
|
9
vendor/github.com/Shopify/sarama/examples/README.md
generated
vendored
9
vendor/github.com/Shopify/sarama/examples/README.md
generated
vendored
@ -1,9 +0,0 @@
|
||||
# Sarama examples
|
||||
|
||||
This folder contains example applications to demonstrate the use of Sarama. For code snippet examples on how to use the different types in Sarama, see [Sarama's API documentation on godoc.org](https://godoc.org/github.com/Shopify/sarama)
|
||||
|
||||
In these examples, we use `github.com/Shopify/sarama` as import path. We do this to ensure all the examples are up to date with the latest changes in Sarama. For your own applications, you may want to use `gopkg.in/Shopify/sarama.v1` to lock into a stable API version.
|
||||
|
||||
#### HTTP server
|
||||
|
||||
[http_server](./http_server) is a simple HTTP server uses both the sync producer to produce data as part of the request handling cycle, as well as the async producer to maintain an access log. It also uses the [mocks subpackage](https://godoc.org/github.com/Shopify/sarama/mocks) to test both.
|
2
vendor/github.com/Shopify/sarama/examples/http_server/.gitignore
generated
vendored
2
vendor/github.com/Shopify/sarama/examples/http_server/.gitignore
generated
vendored
@ -1,2 +0,0 @@
|
||||
http_server
|
||||
http_server.test
|
7
vendor/github.com/Shopify/sarama/examples/http_server/README.md
generated
vendored
7
vendor/github.com/Shopify/sarama/examples/http_server/README.md
generated
vendored
@ -1,7 +0,0 @@
|
||||
# HTTP server example
|
||||
|
||||
This HTTP server example shows you how to use the AsyncProducer and SyncProducer, and how to test them using mocks. The server simply sends the data of the HTTP request's query string to Kafka, and send a 200 result if that succeeds. For every request, it will send an access log entry to Kafka as well in the background.
|
||||
|
||||
If you need to know whether a message was successfully sent to the Kafka cluster before you can send your HTTP response, using the `SyncProducer` is probably the simplest way to achieve this. If you don't care, e.g. for the access log, using the `AsyncProducer` will let you fire and forget. You can send the HTTP response, while the message is being produced in the background.
|
||||
|
||||
One important thing to note is that both the `SyncProducer` and `AsyncProducer` are **thread-safe**. Go's `http.Server` handles requests concurrently in different goroutines, but you can use a single producer safely. This will actually achieve efficiency gains as the producer will be able to batch messages from concurrent requests together.
|
247
vendor/github.com/Shopify/sarama/examples/http_server/http_server.go
generated
vendored
247
vendor/github.com/Shopify/sarama/examples/http_server/http_server.go
generated
vendored
@ -1,247 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/Shopify/sarama"
|
||||
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("addr", ":8080", "The address to bind to")
|
||||
brokers = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The Kafka brokers to connect to, as a comma separated list")
|
||||
verbose = flag.Bool("verbose", false, "Turn on Sarama logging")
|
||||
certFile = flag.String("certificate", "", "The optional certificate file for client authentication")
|
||||
keyFile = flag.String("key", "", "The optional key file for client authentication")
|
||||
caFile = flag.String("ca", "", "The optional certificate authority file for TLS client authentication")
|
||||
verifySsl = flag.Bool("verify", false, "Optional verify ssl certificates chain")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if *verbose {
|
||||
sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
|
||||
}
|
||||
|
||||
if *brokers == "" {
|
||||
flag.PrintDefaults()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
brokerList := strings.Split(*brokers, ",")
|
||||
log.Printf("Kafka brokers: %s", strings.Join(brokerList, ", "))
|
||||
|
||||
server := &Server{
|
||||
DataCollector: newDataCollector(brokerList),
|
||||
AccessLogProducer: newAccessLogProducer(brokerList),
|
||||
}
|
||||
defer func() {
|
||||
if err := server.Close(); err != nil {
|
||||
log.Println("Failed to close server", err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Fatal(server.Run(*addr))
|
||||
}
|
||||
|
||||
func createTlsConfiguration() (t *tls.Config) {
|
||||
if *certFile != "" && *keyFile != "" && *caFile != "" {
|
||||
cert, err := tls.LoadX509KeyPair(*certFile, *keyFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
caCert, err := ioutil.ReadFile(*caFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
|
||||
t = &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
RootCAs: caCertPool,
|
||||
InsecureSkipVerify: *verifySsl,
|
||||
}
|
||||
}
|
||||
// will be nil by default if nothing is provided
|
||||
return t
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
DataCollector sarama.SyncProducer
|
||||
AccessLogProducer sarama.AsyncProducer
|
||||
}
|
||||
|
||||
func (s *Server) Close() error {
|
||||
if err := s.DataCollector.Close(); err != nil {
|
||||
log.Println("Failed to shut down data collector cleanly", err)
|
||||
}
|
||||
|
||||
if err := s.AccessLogProducer.Close(); err != nil {
|
||||
log.Println("Failed to shut down access log producer cleanly", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) Handler() http.Handler {
|
||||
return s.withAccessLog(s.collectQueryStringData())
|
||||
}
|
||||
|
||||
func (s *Server) Run(addr string) error {
|
||||
httpServer := &http.Server{
|
||||
Addr: addr,
|
||||
Handler: s.Handler(),
|
||||
}
|
||||
|
||||
log.Printf("Listening for requests on %s...\n", addr)
|
||||
return httpServer.ListenAndServe()
|
||||
}
|
||||
|
||||
func (s *Server) collectQueryStringData() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// We are not setting a message key, which means that all messages will
|
||||
// be distributed randomly over the different partitions.
|
||||
partition, offset, err := s.DataCollector.SendMessage(&sarama.ProducerMessage{
|
||||
Topic: "important",
|
||||
Value: sarama.StringEncoder(r.URL.RawQuery),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
fmt.Fprintf(w, "Failed to store your data:, %s", err)
|
||||
} else {
|
||||
// The tuple (topic, partition, offset) can be used as a unique identifier
|
||||
// for a message in a Kafka cluster.
|
||||
fmt.Fprintf(w, "Your data is stored with unique identifier important/%d/%d", partition, offset)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type accessLogEntry struct {
|
||||
Method string `json:"method"`
|
||||
Host string `json:"host"`
|
||||
Path string `json:"path"`
|
||||
IP string `json:"ip"`
|
||||
ResponseTime float64 `json:"response_time"`
|
||||
|
||||
encoded []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func (ale *accessLogEntry) ensureEncoded() {
|
||||
if ale.encoded == nil && ale.err == nil {
|
||||
ale.encoded, ale.err = json.Marshal(ale)
|
||||
}
|
||||
}
|
||||
|
||||
func (ale *accessLogEntry) Length() int {
|
||||
ale.ensureEncoded()
|
||||
return len(ale.encoded)
|
||||
}
|
||||
|
||||
func (ale *accessLogEntry) Encode() ([]byte, error) {
|
||||
ale.ensureEncoded()
|
||||
return ale.encoded, ale.err
|
||||
}
|
||||
|
||||
func (s *Server) withAccessLog(next http.Handler) http.Handler {
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
started := time.Now()
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
|
||||
entry := &accessLogEntry{
|
||||
Method: r.Method,
|
||||
Host: r.Host,
|
||||
Path: r.RequestURI,
|
||||
IP: r.RemoteAddr,
|
||||
ResponseTime: float64(time.Since(started)) / float64(time.Second),
|
||||
}
|
||||
|
||||
// We will use the client's IP address as key. This will cause
|
||||
// all the access log entries of the same IP address to end up
|
||||
// on the same partition.
|
||||
s.AccessLogProducer.Input() <- &sarama.ProducerMessage{
|
||||
Topic: "access_log",
|
||||
Key: sarama.StringEncoder(r.RemoteAddr),
|
||||
Value: entry,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func newDataCollector(brokerList []string) sarama.SyncProducer {
|
||||
|
||||
// For the data collector, we are looking for strong consistency semantics.
|
||||
// Because we don't change the flush settings, sarama will try to produce messages
|
||||
// as fast as possible to keep latency low.
|
||||
config := sarama.NewConfig()
|
||||
config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message
|
||||
config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message
|
||||
config.Producer.Return.Successes = true
|
||||
tlsConfig := createTlsConfiguration()
|
||||
if tlsConfig != nil {
|
||||
config.Net.TLS.Config = tlsConfig
|
||||
config.Net.TLS.Enable = true
|
||||
}
|
||||
|
||||
// On the broker side, you may want to change the following settings to get
|
||||
// stronger consistency guarantees:
|
||||
// - For your broker, set `unclean.leader.election.enable` to false
|
||||
// - For the topic, you could increase `min.insync.replicas`.
|
||||
|
||||
producer, err := sarama.NewSyncProducer(brokerList, config)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to start Sarama producer:", err)
|
||||
}
|
||||
|
||||
return producer
|
||||
}
|
||||
|
||||
func newAccessLogProducer(brokerList []string) sarama.AsyncProducer {
|
||||
|
||||
// For the access log, we are looking for AP semantics, with high throughput.
|
||||
// By creating batches of compressed messages, we reduce network I/O at a cost of more latency.
|
||||
config := sarama.NewConfig()
|
||||
tlsConfig := createTlsConfiguration()
|
||||
if tlsConfig != nil {
|
||||
config.Net.TLS.Enable = true
|
||||
config.Net.TLS.Config = tlsConfig
|
||||
}
|
||||
config.Producer.RequiredAcks = sarama.WaitForLocal // Only wait for the leader to ack
|
||||
config.Producer.Compression = sarama.CompressionSnappy // Compress messages
|
||||
config.Producer.Flush.Frequency = 500 * time.Millisecond // Flush batches every 500ms
|
||||
|
||||
producer, err := sarama.NewAsyncProducer(brokerList, config)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to start Sarama producer:", err)
|
||||
}
|
||||
|
||||
// We will just log to STDOUT if we're not able to produce messages.
|
||||
// Note: messages will only be returned here after all retry attempts are exhausted.
|
||||
go func() {
|
||||
for err := range producer.Errors() {
|
||||
log.Println("Failed to write access log entry:", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return producer
|
||||
}
|
109
vendor/github.com/Shopify/sarama/examples/http_server/http_server_test.go
generated
vendored
109
vendor/github.com/Shopify/sarama/examples/http_server/http_server_test.go
generated
vendored
@ -1,109 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/Shopify/sarama/mocks"
|
||||
)
|
||||
|
||||
// In normal operation, we expect one access log entry,
|
||||
// and one data collector entry. Let's assume both will succeed.
|
||||
// We should return a HTTP 200 status.
|
||||
func TestCollectSuccessfully(t *testing.T) {
|
||||
dataCollectorMock := mocks.NewSyncProducer(t, nil)
|
||||
dataCollectorMock.ExpectSendMessageAndSucceed()
|
||||
|
||||
accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
|
||||
accessLogProducerMock.ExpectInputAndSucceed()
|
||||
|
||||
// Now, use dependency injection to use the mocks.
|
||||
s := &Server{
|
||||
DataCollector: dataCollectorMock,
|
||||
AccessLogProducer: accessLogProducerMock,
|
||||
}
|
||||
|
||||
// The Server's Close call is important; it will call Close on
|
||||
// the two mock producers, which will then validate whether all
|
||||
// expectations are resolved.
|
||||
defer safeClose(t, s)
|
||||
|
||||
req, err := http.NewRequest("GET", "http://example.com/?data", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res := httptest.NewRecorder()
|
||||
s.Handler().ServeHTTP(res, req)
|
||||
|
||||
if res.Code != 200 {
|
||||
t.Errorf("Expected HTTP status 200, found %d", res.Code)
|
||||
}
|
||||
|
||||
if string(res.Body.Bytes()) != "Your data is stored with unique identifier important/0/1" {
|
||||
t.Error("Unexpected response body", res.Body)
|
||||
}
|
||||
}
|
||||
|
||||
// Now, let's see if we handle the case of not being able to produce
|
||||
// to the data collector properly. In this case we should return a 500 status.
|
||||
func TestCollectionFailure(t *testing.T) {
|
||||
dataCollectorMock := mocks.NewSyncProducer(t, nil)
|
||||
dataCollectorMock.ExpectSendMessageAndFail(sarama.ErrRequestTimedOut)
|
||||
|
||||
accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
|
||||
accessLogProducerMock.ExpectInputAndSucceed()
|
||||
|
||||
s := &Server{
|
||||
DataCollector: dataCollectorMock,
|
||||
AccessLogProducer: accessLogProducerMock,
|
||||
}
|
||||
defer safeClose(t, s)
|
||||
|
||||
req, err := http.NewRequest("GET", "http://example.com/?data", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res := httptest.NewRecorder()
|
||||
s.Handler().ServeHTTP(res, req)
|
||||
|
||||
if res.Code != 500 {
|
||||
t.Errorf("Expected HTTP status 500, found %d", res.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// We don't expect any data collector calls because the path is wrong,
|
||||
// so we are not setting any expectations on the dataCollectorMock. It
|
||||
// will still generate an access log entry though.
|
||||
func TestWrongPath(t *testing.T) {
|
||||
dataCollectorMock := mocks.NewSyncProducer(t, nil)
|
||||
|
||||
accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
|
||||
accessLogProducerMock.ExpectInputAndSucceed()
|
||||
|
||||
s := &Server{
|
||||
DataCollector: dataCollectorMock,
|
||||
AccessLogProducer: accessLogProducerMock,
|
||||
}
|
||||
defer safeClose(t, s)
|
||||
|
||||
req, err := http.NewRequest("GET", "http://example.com/wrong?data", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res := httptest.NewRecorder()
|
||||
|
||||
s.Handler().ServeHTTP(res, req)
|
||||
|
||||
if res.Code != 404 {
|
||||
t.Errorf("Expected HTTP status 404, found %d", res.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func safeClose(t *testing.T, o io.Closer) {
|
||||
if err := o.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
34
vendor/github.com/Shopify/sarama/fetch_request_test.go
generated
vendored
34
vendor/github.com/Shopify/sarama/fetch_request_test.go
generated
vendored
@ -1,34 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
fetchRequestNoBlocks = []byte{
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
fetchRequestWithProperties = []byte{
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xEF,
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
fetchRequestOneBlock = []byte{
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x56}
|
||||
)
|
||||
|
||||
func TestFetchRequest(t *testing.T) {
|
||||
request := new(FetchRequest)
|
||||
testRequest(t, "no blocks", request, fetchRequestNoBlocks)
|
||||
|
||||
request.MaxWaitTime = 0x20
|
||||
request.MinBytes = 0xEF
|
||||
testRequest(t, "with properties", request, fetchRequestWithProperties)
|
||||
|
||||
request.MaxWaitTime = 0
|
||||
request.MinBytes = 0
|
||||
request.AddBlock("topic", 0x12, 0x34, 0x56)
|
||||
testRequest(t, "one block", request, fetchRequestOneBlock)
|
||||
}
|
84
vendor/github.com/Shopify/sarama/fetch_response_test.go
generated
vendored
84
vendor/github.com/Shopify/sarama/fetch_response_test.go
generated
vendored
@ -1,84 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
emptyFetchResponse = []byte{
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
oneMessageFetchResponse = []byte{
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x05,
|
||||
0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10,
|
||||
0x00, 0x00, 0x00, 0x1C,
|
||||
// messageSet
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x10,
|
||||
// message
|
||||
0x23, 0x96, 0x4a, 0xf7, // CRC
|
||||
0x00,
|
||||
0x00,
|
||||
0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0x00, 0x00, 0x00, 0x02, 0x00, 0xEE}
|
||||
)
|
||||
|
||||
func TestEmptyFetchResponse(t *testing.T) {
|
||||
response := FetchResponse{}
|
||||
testVersionDecodable(t, "empty", &response, emptyFetchResponse, 0)
|
||||
|
||||
if len(response.Blocks) != 0 {
|
||||
t.Error("Decoding produced topic blocks where there were none.")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestOneMessageFetchResponse(t *testing.T) {
|
||||
response := FetchResponse{}
|
||||
testVersionDecodable(t, "one message", &response, oneMessageFetchResponse, 0)
|
||||
|
||||
if len(response.Blocks) != 1 {
|
||||
t.Fatal("Decoding produced incorrect number of topic blocks.")
|
||||
}
|
||||
|
||||
if len(response.Blocks["topic"]) != 1 {
|
||||
t.Fatal("Decoding produced incorrect number of partition blocks for topic.")
|
||||
}
|
||||
|
||||
block := response.GetBlock("topic", 5)
|
||||
if block == nil {
|
||||
t.Fatal("GetBlock didn't return block.")
|
||||
}
|
||||
if block.Err != ErrOffsetOutOfRange {
|
||||
t.Error("Decoding didn't produce correct error code.")
|
||||
}
|
||||
if block.HighWaterMarkOffset != 0x10101010 {
|
||||
t.Error("Decoding didn't produce correct high water mark offset.")
|
||||
}
|
||||
if block.MsgSet.PartialTrailingMessage {
|
||||
t.Error("Decoding detected a partial trailing message where there wasn't one.")
|
||||
}
|
||||
|
||||
if len(block.MsgSet.Messages) != 1 {
|
||||
t.Fatal("Decoding produced incorrect number of messages.")
|
||||
}
|
||||
msgBlock := block.MsgSet.Messages[0]
|
||||
if msgBlock.Offset != 0x550000 {
|
||||
t.Error("Decoding produced incorrect message offset.")
|
||||
}
|
||||
msg := msgBlock.Msg
|
||||
if msg.Codec != CompressionNone {
|
||||
t.Error("Decoding produced incorrect message compression.")
|
||||
}
|
||||
if msg.Key != nil {
|
||||
t.Error("Decoding produced message key where there was none.")
|
||||
}
|
||||
if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) {
|
||||
t.Error("Decoding produced incorrect message value.")
|
||||
}
|
||||
}
|
90
vendor/github.com/Shopify/sarama/functional_client_test.go
generated
vendored
90
vendor/github.com/Shopify/sarama/functional_client_test.go
generated
vendored
@ -1,90 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestFuncConnectionFailure(t *testing.T) {
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
Proxies["kafka1"].Enabled = false
|
||||
SaveProxy(t, "kafka1")
|
||||
|
||||
config := NewConfig()
|
||||
config.Metadata.Retry.Max = 1
|
||||
|
||||
_, err := NewClient([]string{kafkaBrokers[0]}, config)
|
||||
if err != ErrOutOfBrokers {
|
||||
t.Fatal("Expected returned error to be ErrOutOfBrokers, but was: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuncClientMetadata(t *testing.T) {
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
config := NewConfig()
|
||||
config.Metadata.Retry.Max = 1
|
||||
config.Metadata.Retry.Backoff = 10 * time.Millisecond
|
||||
client, err := NewClient(kafkaBrokers, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := client.RefreshMetadata("unknown_topic"); err != ErrUnknownTopicOrPartition {
|
||||
t.Error("Expected ErrUnknownTopicOrPartition, got", err)
|
||||
}
|
||||
|
||||
if _, err := client.Leader("unknown_topic", 0); err != ErrUnknownTopicOrPartition {
|
||||
t.Error("Expected ErrUnknownTopicOrPartition, got", err)
|
||||
}
|
||||
|
||||
if _, err := client.Replicas("invalid/topic", 0); err != ErrUnknownTopicOrPartition {
|
||||
t.Error("Expected ErrUnknownTopicOrPartition, got", err)
|
||||
}
|
||||
|
||||
partitions, err := client.Partitions("test.4")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if len(partitions) != 4 {
|
||||
t.Errorf("Expected test.4 topic to have 4 partitions, found %v", partitions)
|
||||
}
|
||||
|
||||
partitions, err = client.Partitions("test.1")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if len(partitions) != 1 {
|
||||
t.Errorf("Expected test.1 topic to have 1 partitions, found %v", partitions)
|
||||
}
|
||||
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestFuncClientCoordinator(t *testing.T) {
|
||||
checkKafkaVersion(t, "0.8.2")
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
client, err := NewClient(kafkaBrokers, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
broker, err := client.Coordinator(fmt.Sprintf("another_new_consumer_group_%d", i))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if connected, err := broker.Connected(); !connected || err != nil {
|
||||
t.Errorf("Expected to coordinator %s broker to be properly connected.", broker.Addr())
|
||||
}
|
||||
}
|
||||
|
||||
safeClose(t, client)
|
||||
}
|
61
vendor/github.com/Shopify/sarama/functional_consumer_test.go
generated
vendored
61
vendor/github.com/Shopify/sarama/functional_consumer_test.go
generated
vendored
@ -1,61 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFuncConsumerOffsetOutOfRange(t *testing.T) {
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
consumer, err := NewConsumer(kafkaBrokers, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := consumer.ConsumePartition("test.1", 0, -10); err != ErrOffsetOutOfRange {
|
||||
t.Error("Expected ErrOffsetOutOfRange, got:", err)
|
||||
}
|
||||
|
||||
if _, err := consumer.ConsumePartition("test.1", 0, math.MaxInt64); err != ErrOffsetOutOfRange {
|
||||
t.Error("Expected ErrOffsetOutOfRange, got:", err)
|
||||
}
|
||||
|
||||
safeClose(t, consumer)
|
||||
}
|
||||
|
||||
func TestConsumerHighWaterMarkOffset(t *testing.T) {
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
p, err := NewSyncProducer(kafkaBrokers, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer safeClose(t, p)
|
||||
|
||||
_, offset, err := p.SendMessage(&ProducerMessage{Topic: "test.1", Value: StringEncoder("Test")})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c, err := NewConsumer(kafkaBrokers, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer safeClose(t, c)
|
||||
|
||||
pc, err := c.ConsumePartition("test.1", 0, OffsetOldest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
<-pc.Messages()
|
||||
|
||||
if hwmo := pc.HighWaterMarkOffset(); hwmo != offset+1 {
|
||||
t.Logf("Last produced offset %d; high water mark should be one higher but found %d.", offset, hwmo)
|
||||
}
|
||||
|
||||
safeClose(t, pc)
|
||||
}
|
47
vendor/github.com/Shopify/sarama/functional_offset_manager_test.go
generated
vendored
47
vendor/github.com/Shopify/sarama/functional_offset_manager_test.go
generated
vendored
@ -1,47 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFuncOffsetManager(t *testing.T) {
|
||||
checkKafkaVersion(t, "0.8.2")
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
client, err := NewClient(kafkaBrokers, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
offsetManager, err := NewOffsetManagerFromClient("sarama.TestFuncOffsetManager", client)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pom1, err := offsetManager.ManagePartition("test.1", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pom1.MarkOffset(10, "test metadata")
|
||||
safeClose(t, pom1)
|
||||
|
||||
pom2, err := offsetManager.ManagePartition("test.1", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
offset, metadata := pom2.NextOffset()
|
||||
|
||||
if offset != 10 {
|
||||
t.Errorf("Expected the next offset to be 10, found %d.", offset)
|
||||
}
|
||||
if metadata != "test metadata" {
|
||||
t.Errorf("Expected metadata to be 'test metadata', found %s.", metadata)
|
||||
}
|
||||
|
||||
safeClose(t, pom2)
|
||||
safeClose(t, offsetManager)
|
||||
safeClose(t, client)
|
||||
}
|
323
vendor/github.com/Shopify/sarama/functional_producer_test.go
generated
vendored
323
vendor/github.com/Shopify/sarama/functional_producer_test.go
generated
vendored
@ -1,323 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
toxiproxy "github.com/Shopify/toxiproxy/client"
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
const TestBatchSize = 1000
|
||||
|
||||
func TestFuncProducing(t *testing.T) {
|
||||
config := NewConfig()
|
||||
testProducingMessages(t, config)
|
||||
}
|
||||
|
||||
func TestFuncProducingGzip(t *testing.T) {
|
||||
config := NewConfig()
|
||||
config.Producer.Compression = CompressionGZIP
|
||||
testProducingMessages(t, config)
|
||||
}
|
||||
|
||||
func TestFuncProducingSnappy(t *testing.T) {
|
||||
config := NewConfig()
|
||||
config.Producer.Compression = CompressionSnappy
|
||||
testProducingMessages(t, config)
|
||||
}
|
||||
|
||||
func TestFuncProducingNoResponse(t *testing.T) {
|
||||
config := NewConfig()
|
||||
config.Producer.RequiredAcks = NoResponse
|
||||
testProducingMessages(t, config)
|
||||
}
|
||||
|
||||
func TestFuncProducingFlushing(t *testing.T) {
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = TestBatchSize / 8
|
||||
config.Producer.Flush.Frequency = 250 * time.Millisecond
|
||||
testProducingMessages(t, config)
|
||||
}
|
||||
|
||||
func TestFuncMultiPartitionProduce(t *testing.T) {
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
config := NewConfig()
|
||||
config.ChannelBufferSize = 20
|
||||
config.Producer.Flush.Frequency = 50 * time.Millisecond
|
||||
config.Producer.Flush.Messages = 200
|
||||
config.Producer.Return.Successes = true
|
||||
producer, err := NewSyncProducer(kafkaBrokers, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(TestBatchSize)
|
||||
|
||||
for i := 1; i <= TestBatchSize; i++ {
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
msg := &ProducerMessage{Topic: "test.64", Key: nil, Value: StringEncoder(fmt.Sprintf("hur %d", i))}
|
||||
if _, _, err := producer.SendMessage(msg); err != nil {
|
||||
t.Error(i, err)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
if err := producer.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuncProducingToInvalidTopic(t *testing.T) {
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
producer, err := NewSyncProducer(kafkaBrokers, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition {
|
||||
t.Error("Expected ErrUnknownTopicOrPartition, found", err)
|
||||
}
|
||||
|
||||
if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition {
|
||||
t.Error("Expected ErrUnknownTopicOrPartition, found", err)
|
||||
}
|
||||
|
||||
safeClose(t, producer)
|
||||
}
|
||||
|
||||
func testProducingMessages(t *testing.T, config *Config) {
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
// Configure some latency in order to properly validate the request latency metric
|
||||
for _, proxy := range Proxies {
|
||||
if _, err := proxy.AddToxic("", "latency", "", 1, toxiproxy.Attributes{"latency": 10}); err != nil {
|
||||
t.Fatal("Unable to configure latency toxicity", err)
|
||||
}
|
||||
}
|
||||
|
||||
config.Producer.Return.Successes = true
|
||||
config.Consumer.Return.Errors = true
|
||||
|
||||
client, err := NewClient(kafkaBrokers, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Keep in mind the current offset
|
||||
initialOffset, err := client.GetOffset("test.1", 0, OffsetNewest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
producer, err := NewAsyncProducerFromClient(client)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedResponses := TestBatchSize
|
||||
for i := 1; i <= TestBatchSize; {
|
||||
msg := &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder(fmt.Sprintf("testing %d", i))}
|
||||
select {
|
||||
case producer.Input() <- msg:
|
||||
i++
|
||||
case ret := <-producer.Errors():
|
||||
t.Fatal(ret.Err)
|
||||
case <-producer.Successes():
|
||||
expectedResponses--
|
||||
}
|
||||
}
|
||||
for expectedResponses > 0 {
|
||||
select {
|
||||
case ret := <-producer.Errors():
|
||||
t.Fatal(ret.Err)
|
||||
case <-producer.Successes():
|
||||
expectedResponses--
|
||||
}
|
||||
}
|
||||
safeClose(t, producer)
|
||||
|
||||
// Validate producer metrics before using the consumer minus the offset request
|
||||
validateMetrics(t, client)
|
||||
|
||||
master, err := NewConsumerFromClient(client)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
consumer, err := master.ConsumePartition("test.1", 0, initialOffset)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 1; i <= TestBatchSize; i++ {
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("Not received any more events in the last 10 seconds.")
|
||||
|
||||
case err := <-consumer.Errors():
|
||||
t.Error(err)
|
||||
|
||||
case message := <-consumer.Messages():
|
||||
if string(message.Value) != fmt.Sprintf("testing %d", i) {
|
||||
t.Fatalf("Unexpected message with index %d: %s", i, message.Value)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
safeClose(t, consumer)
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func validateMetrics(t *testing.T, client Client) {
|
||||
// Get the broker used by test1 topic
|
||||
var broker *Broker
|
||||
if partitions, err := client.Partitions("test.1"); err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
for _, partition := range partitions {
|
||||
if b, err := client.Leader("test.1", partition); err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
if broker != nil && b != broker {
|
||||
t.Fatal("Expected only one broker, got at least 2")
|
||||
}
|
||||
broker = b
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
metricValidators := newMetricValidators()
|
||||
noResponse := client.Config().Producer.RequiredAcks == NoResponse
|
||||
compressionEnabled := client.Config().Producer.Compression != CompressionNone
|
||||
|
||||
// We are adding 10ms of latency to all requests with toxiproxy
|
||||
minRequestLatencyInMs := 10
|
||||
if noResponse {
|
||||
// but when we do not wait for a response it can be less than 1ms
|
||||
minRequestLatencyInMs = 0
|
||||
}
|
||||
|
||||
// We read at least 1 byte from the broker
|
||||
metricValidators.registerForAllBrokers(broker, minCountMeterValidator("incoming-byte-rate", 1))
|
||||
// in at least 3 global requests (1 for metadata request, 1 for offset request and N for produce request)
|
||||
metricValidators.register(minCountMeterValidator("request-rate", 3))
|
||||
metricValidators.register(minCountHistogramValidator("request-size", 3))
|
||||
metricValidators.register(minValHistogramValidator("request-size", 1))
|
||||
metricValidators.register(minValHistogramValidator("request-latency-in-ms", minRequestLatencyInMs))
|
||||
// and at least 2 requests to the registered broker (offset + produces)
|
||||
metricValidators.registerForBroker(broker, minCountMeterValidator("request-rate", 2))
|
||||
metricValidators.registerForBroker(broker, minCountHistogramValidator("request-size", 2))
|
||||
metricValidators.registerForBroker(broker, minValHistogramValidator("request-size", 1))
|
||||
metricValidators.registerForBroker(broker, minValHistogramValidator("request-latency-in-ms", minRequestLatencyInMs))
|
||||
|
||||
// We send at least 1 batch
|
||||
metricValidators.registerForGlobalAndTopic("test_1", minCountHistogramValidator("batch-size", 1))
|
||||
metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("batch-size", 1))
|
||||
if compressionEnabled {
|
||||
// We record compression ratios between [0.50,-10.00] (50-1000 with a histogram) for at least one "fake" record
|
||||
metricValidators.registerForGlobalAndTopic("test_1", minCountHistogramValidator("compression-ratio", 1))
|
||||
metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("compression-ratio", 50))
|
||||
metricValidators.registerForGlobalAndTopic("test_1", maxValHistogramValidator("compression-ratio", 1000))
|
||||
} else {
|
||||
// We record compression ratios of 1.00 (100 with a histogram) for every TestBatchSize record
|
||||
metricValidators.registerForGlobalAndTopic("test_1", countHistogramValidator("compression-ratio", TestBatchSize))
|
||||
metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("compression-ratio", 100))
|
||||
metricValidators.registerForGlobalAndTopic("test_1", maxValHistogramValidator("compression-ratio", 100))
|
||||
}
|
||||
|
||||
// We send exactly TestBatchSize messages
|
||||
metricValidators.registerForGlobalAndTopic("test_1", countMeterValidator("record-send-rate", TestBatchSize))
|
||||
// We send at least one record per request
|
||||
metricValidators.registerForGlobalAndTopic("test_1", minCountHistogramValidator("records-per-request", 1))
|
||||
metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("records-per-request", 1))
|
||||
|
||||
// We receive at least 1 byte from the broker
|
||||
metricValidators.registerForAllBrokers(broker, minCountMeterValidator("outgoing-byte-rate", 1))
|
||||
if noResponse {
|
||||
// in exactly 2 global responses (metadata + offset)
|
||||
metricValidators.register(countMeterValidator("response-rate", 2))
|
||||
metricValidators.register(minCountHistogramValidator("response-size", 2))
|
||||
metricValidators.register(minValHistogramValidator("response-size", 1))
|
||||
// and exactly 1 offset response for the registered broker
|
||||
metricValidators.registerForBroker(broker, countMeterValidator("response-rate", 1))
|
||||
metricValidators.registerForBroker(broker, minCountHistogramValidator("response-size", 1))
|
||||
metricValidators.registerForBroker(broker, minValHistogramValidator("response-size", 1))
|
||||
} else {
|
||||
// in at least 3 global responses (metadata + offset + produces)
|
||||
metricValidators.register(minCountMeterValidator("response-rate", 3))
|
||||
metricValidators.register(minCountHistogramValidator("response-size", 3))
|
||||
metricValidators.register(minValHistogramValidator("response-size", 1))
|
||||
// and at least 2 for the registered broker
|
||||
metricValidators.registerForBroker(broker, minCountMeterValidator("response-rate", 2))
|
||||
metricValidators.registerForBroker(broker, minCountHistogramValidator("response-size", 2))
|
||||
metricValidators.registerForBroker(broker, minValHistogramValidator("response-size", 1))
|
||||
}
|
||||
|
||||
// Run the validators
|
||||
metricValidators.run(t, client.Config().MetricRegistry)
|
||||
}
|
||||
|
||||
// Benchmarks
|
||||
|
||||
func BenchmarkProducerSmall(b *testing.B) {
|
||||
benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 128)))
|
||||
}
|
||||
func BenchmarkProducerMedium(b *testing.B) {
|
||||
benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 1024)))
|
||||
}
|
||||
func BenchmarkProducerLarge(b *testing.B) {
|
||||
benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 8192)))
|
||||
}
|
||||
func BenchmarkProducerSmallSinglePartition(b *testing.B) {
|
||||
benchmarkProducer(b, nil, "test.1", ByteEncoder(make([]byte, 128)))
|
||||
}
|
||||
func BenchmarkProducerMediumSnappy(b *testing.B) {
|
||||
conf := NewConfig()
|
||||
conf.Producer.Compression = CompressionSnappy
|
||||
benchmarkProducer(b, conf, "test.1", ByteEncoder(make([]byte, 1024)))
|
||||
}
|
||||
|
||||
func benchmarkProducer(b *testing.B, conf *Config, topic string, value Encoder) {
|
||||
setupFunctionalTest(b)
|
||||
defer teardownFunctionalTest(b)
|
||||
|
||||
metricsDisable := os.Getenv("METRICS_DISABLE")
|
||||
if metricsDisable != "" {
|
||||
previousUseNilMetrics := metrics.UseNilMetrics
|
||||
Logger.Println("Disabling metrics using no-op implementation")
|
||||
metrics.UseNilMetrics = true
|
||||
// Restore previous setting
|
||||
defer func() {
|
||||
metrics.UseNilMetrics = previousUseNilMetrics
|
||||
}()
|
||||
}
|
||||
|
||||
producer, err := NewAsyncProducer(kafkaBrokers, conf)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 1; i <= b.N; {
|
||||
msg := &ProducerMessage{Topic: topic, Key: StringEncoder(fmt.Sprintf("%d", i)), Value: value}
|
||||
select {
|
||||
case producer.Input() <- msg:
|
||||
i++
|
||||
case ret := <-producer.Errors():
|
||||
b.Fatal(ret.Err)
|
||||
}
|
||||
}
|
||||
safeClose(b, producer)
|
||||
}
|
148
vendor/github.com/Shopify/sarama/functional_test.go
generated
vendored
148
vendor/github.com/Shopify/sarama/functional_test.go
generated
vendored
@ -1,148 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
toxiproxy "github.com/Shopify/toxiproxy/client"
|
||||
)
|
||||
|
||||
const (
|
||||
VagrantToxiproxy = "http://192.168.100.67:8474"
|
||||
VagrantKafkaPeers = "192.168.100.67:9091,192.168.100.67:9092,192.168.100.67:9093,192.168.100.67:9094,192.168.100.67:9095"
|
||||
VagrantZookeeperPeers = "192.168.100.67:2181,192.168.100.67:2182,192.168.100.67:2183,192.168.100.67:2184,192.168.100.67:2185"
|
||||
)
|
||||
|
||||
var (
|
||||
kafkaAvailable, kafkaRequired bool
|
||||
kafkaBrokers []string
|
||||
|
||||
proxyClient *toxiproxy.Client
|
||||
Proxies map[string]*toxiproxy.Proxy
|
||||
ZKProxies = []string{"zk1", "zk2", "zk3", "zk4", "zk5"}
|
||||
KafkaProxies = []string{"kafka1", "kafka2", "kafka3", "kafka4", "kafka5"}
|
||||
)
|
||||
|
||||
func init() {
|
||||
if os.Getenv("DEBUG") == "true" {
|
||||
Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
|
||||
}
|
||||
|
||||
seed := time.Now().UTC().UnixNano()
|
||||
if tmp := os.Getenv("TEST_SEED"); tmp != "" {
|
||||
seed, _ = strconv.ParseInt(tmp, 0, 64)
|
||||
}
|
||||
Logger.Println("Using random seed:", seed)
|
||||
rand.Seed(seed)
|
||||
|
||||
proxyAddr := os.Getenv("TOXIPROXY_ADDR")
|
||||
if proxyAddr == "" {
|
||||
proxyAddr = VagrantToxiproxy
|
||||
}
|
||||
proxyClient = toxiproxy.NewClient(proxyAddr)
|
||||
|
||||
kafkaPeers := os.Getenv("KAFKA_PEERS")
|
||||
if kafkaPeers == "" {
|
||||
kafkaPeers = VagrantKafkaPeers
|
||||
}
|
||||
kafkaBrokers = strings.Split(kafkaPeers, ",")
|
||||
|
||||
if c, err := net.DialTimeout("tcp", kafkaBrokers[0], 5*time.Second); err == nil {
|
||||
if err = c.Close(); err == nil {
|
||||
kafkaAvailable = true
|
||||
}
|
||||
}
|
||||
|
||||
kafkaRequired = os.Getenv("CI") != ""
|
||||
}
|
||||
|
||||
func checkKafkaAvailability(t testing.TB) {
|
||||
if !kafkaAvailable {
|
||||
if kafkaRequired {
|
||||
t.Fatalf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0])
|
||||
} else {
|
||||
t.Skipf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkKafkaVersion(t testing.TB, requiredVersion string) {
|
||||
kafkaVersion := os.Getenv("KAFKA_VERSION")
|
||||
if kafkaVersion == "" {
|
||||
t.Logf("No KAFKA_VERSION set. This test requires Kafka version %s or higher. Continuing...", requiredVersion)
|
||||
} else {
|
||||
available := parseKafkaVersion(kafkaVersion)
|
||||
required := parseKafkaVersion(requiredVersion)
|
||||
if !available.satisfies(required) {
|
||||
t.Skipf("Kafka version %s is required for this test; you have %s. Skipping...", requiredVersion, kafkaVersion)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func resetProxies(t testing.TB) {
|
||||
if err := proxyClient.ResetState(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
Proxies = nil
|
||||
}
|
||||
|
||||
func fetchProxies(t testing.TB) {
|
||||
var err error
|
||||
Proxies, err = proxyClient.Proxies()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func SaveProxy(t *testing.T, px string) {
|
||||
if err := Proxies[px].Save(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func setupFunctionalTest(t testing.TB) {
|
||||
checkKafkaAvailability(t)
|
||||
resetProxies(t)
|
||||
fetchProxies(t)
|
||||
}
|
||||
|
||||
func teardownFunctionalTest(t testing.TB) {
|
||||
resetProxies(t)
|
||||
}
|
||||
|
||||
type kafkaVersion []int
|
||||
|
||||
func (kv kafkaVersion) satisfies(other kafkaVersion) bool {
|
||||
var ov int
|
||||
for index, v := range kv {
|
||||
if len(other) <= index {
|
||||
ov = 0
|
||||
} else {
|
||||
ov = other[index]
|
||||
}
|
||||
|
||||
if v < ov {
|
||||
return false
|
||||
} else if v > ov {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func parseKafkaVersion(version string) kafkaVersion {
|
||||
numbers := strings.Split(version, ".")
|
||||
result := make(kafkaVersion, 0, len(numbers))
|
||||
for _, number := range numbers {
|
||||
nr, _ := strconv.Atoi(number)
|
||||
result = append(result, nr)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
21
vendor/github.com/Shopify/sarama/heartbeat_request_test.go
generated
vendored
21
vendor/github.com/Shopify/sarama/heartbeat_request_test.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
basicHeartbeatRequest = []byte{
|
||||
0, 3, 'f', 'o', 'o', // Group ID
|
||||
0x00, 0x01, 0x02, 0x03, // Generatiuon ID
|
||||
0, 3, 'b', 'a', 'z', // Member ID
|
||||
}
|
||||
)
|
||||
|
||||
func TestHeartbeatRequest(t *testing.T) {
|
||||
var request *HeartbeatRequest
|
||||
|
||||
request = new(HeartbeatRequest)
|
||||
request.GroupId = "foo"
|
||||
request.GenerationId = 66051
|
||||
request.MemberId = "baz"
|
||||
testRequest(t, "basic", request, basicHeartbeatRequest)
|
||||
}
|
18
vendor/github.com/Shopify/sarama/heartbeat_response_test.go
generated
vendored
18
vendor/github.com/Shopify/sarama/heartbeat_response_test.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
heartbeatResponseNoError = []byte{
|
||||
0x00, 0x00}
|
||||
)
|
||||
|
||||
func TestHeartbeatResponse(t *testing.T) {
|
||||
var response *HeartbeatResponse
|
||||
|
||||
response = new(HeartbeatResponse)
|
||||
testVersionDecodable(t, "no error", response, heartbeatResponseNoError, 0)
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Decoding error failed: no error expected but found", response.Err)
|
||||
}
|
||||
}
|
57
vendor/github.com/Shopify/sarama/join_group_request_test.go
generated
vendored
57
vendor/github.com/Shopify/sarama/join_group_request_test.go
generated
vendored
@ -1,57 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
joinGroupRequestNoProtocols = []byte{
|
||||
0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID
|
||||
0, 0, 0, 100, // Session timeout
|
||||
0, 0, // Member ID
|
||||
0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type
|
||||
0, 0, 0, 0, // 0 protocol groups
|
||||
}
|
||||
|
||||
joinGroupRequestOneProtocol = []byte{
|
||||
0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID
|
||||
0, 0, 0, 100, // Session timeout
|
||||
0, 11, 'O', 'n', 'e', 'P', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Member ID
|
||||
0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type
|
||||
0, 0, 0, 1, // 1 group protocol
|
||||
0, 3, 'o', 'n', 'e', // Protocol name
|
||||
0, 0, 0, 3, 0x01, 0x02, 0x03, // protocol metadata
|
||||
}
|
||||
)
|
||||
|
||||
func TestJoinGroupRequest(t *testing.T) {
|
||||
request := new(JoinGroupRequest)
|
||||
request.GroupId = "TestGroup"
|
||||
request.SessionTimeout = 100
|
||||
request.ProtocolType = "consumer"
|
||||
testRequest(t, "no protocols", request, joinGroupRequestNoProtocols)
|
||||
}
|
||||
|
||||
func TestJoinGroupRequestOneProtocol(t *testing.T) {
|
||||
request := new(JoinGroupRequest)
|
||||
request.GroupId = "TestGroup"
|
||||
request.SessionTimeout = 100
|
||||
request.MemberId = "OneProtocol"
|
||||
request.ProtocolType = "consumer"
|
||||
request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03})
|
||||
packet := testRequestEncode(t, "one protocol", request, joinGroupRequestOneProtocol)
|
||||
request.GroupProtocols = make(map[string][]byte)
|
||||
request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03}
|
||||
testRequestDecode(t, "one protocol", request, packet)
|
||||
}
|
||||
|
||||
func TestJoinGroupRequestDeprecatedEncode(t *testing.T) {
|
||||
request := new(JoinGroupRequest)
|
||||
request.GroupId = "TestGroup"
|
||||
request.SessionTimeout = 100
|
||||
request.MemberId = "OneProtocol"
|
||||
request.ProtocolType = "consumer"
|
||||
request.GroupProtocols = make(map[string][]byte)
|
||||
request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03}
|
||||
packet := testRequestEncode(t, "one protocol", request, joinGroupRequestOneProtocol)
|
||||
request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03})
|
||||
testRequestDecode(t, "one protocol", request, packet)
|
||||
}
|
98
vendor/github.com/Shopify/sarama/join_group_response_test.go
generated
vendored
98
vendor/github.com/Shopify/sarama/join_group_response_test.go
generated
vendored
@ -1,98 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
joinGroupResponseNoError = []byte{
|
||||
0x00, 0x00, // No error
|
||||
0x00, 0x01, 0x02, 0x03, // Generation ID
|
||||
0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen
|
||||
0, 3, 'f', 'o', 'o', // Leader ID
|
||||
0, 3, 'b', 'a', 'r', // Member ID
|
||||
0, 0, 0, 0, // No member info
|
||||
}
|
||||
|
||||
joinGroupResponseWithError = []byte{
|
||||
0, 23, // Error: inconsistent group protocol
|
||||
0x00, 0x00, 0x00, 0x00, // Generation ID
|
||||
0, 0, // Protocol name chosen
|
||||
0, 0, // Leader ID
|
||||
0, 0, // Member ID
|
||||
0, 0, 0, 0, // No member info
|
||||
}
|
||||
|
||||
joinGroupResponseLeader = []byte{
|
||||
0x00, 0x00, // No error
|
||||
0x00, 0x01, 0x02, 0x03, // Generation ID
|
||||
0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen
|
||||
0, 3, 'f', 'o', 'o', // Leader ID
|
||||
0, 3, 'f', 'o', 'o', // Member ID == Leader ID
|
||||
0, 0, 0, 1, // 1 member
|
||||
0, 3, 'f', 'o', 'o', // Member ID
|
||||
0, 0, 0, 3, 0x01, 0x02, 0x03, // Member metadata
|
||||
}
|
||||
)
|
||||
|
||||
func TestJoinGroupResponse(t *testing.T) {
|
||||
var response *JoinGroupResponse
|
||||
|
||||
response = new(JoinGroupResponse)
|
||||
testVersionDecodable(t, "no error", response, joinGroupResponseNoError, 0)
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Decoding Err failed: no error expected but found", response.Err)
|
||||
}
|
||||
if response.GenerationId != 66051 {
|
||||
t.Error("Decoding GenerationId failed, found:", response.GenerationId)
|
||||
}
|
||||
if response.LeaderId != "foo" {
|
||||
t.Error("Decoding LeaderId failed, found:", response.LeaderId)
|
||||
}
|
||||
if response.MemberId != "bar" {
|
||||
t.Error("Decoding MemberId failed, found:", response.MemberId)
|
||||
}
|
||||
if len(response.Members) != 0 {
|
||||
t.Error("Decoding Members failed, found:", response.Members)
|
||||
}
|
||||
|
||||
response = new(JoinGroupResponse)
|
||||
testVersionDecodable(t, "with error", response, joinGroupResponseWithError, 0)
|
||||
if response.Err != ErrInconsistentGroupProtocol {
|
||||
t.Error("Decoding Err failed: ErrInconsistentGroupProtocol expected but found", response.Err)
|
||||
}
|
||||
if response.GenerationId != 0 {
|
||||
t.Error("Decoding GenerationId failed, found:", response.GenerationId)
|
||||
}
|
||||
if response.LeaderId != "" {
|
||||
t.Error("Decoding LeaderId failed, found:", response.LeaderId)
|
||||
}
|
||||
if response.MemberId != "" {
|
||||
t.Error("Decoding MemberId failed, found:", response.MemberId)
|
||||
}
|
||||
if len(response.Members) != 0 {
|
||||
t.Error("Decoding Members failed, found:", response.Members)
|
||||
}
|
||||
|
||||
response = new(JoinGroupResponse)
|
||||
testVersionDecodable(t, "with error", response, joinGroupResponseLeader, 0)
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Decoding Err failed: ErrNoError expected but found", response.Err)
|
||||
}
|
||||
if response.GenerationId != 66051 {
|
||||
t.Error("Decoding GenerationId failed, found:", response.GenerationId)
|
||||
}
|
||||
if response.LeaderId != "foo" {
|
||||
t.Error("Decoding LeaderId failed, found:", response.LeaderId)
|
||||
}
|
||||
if response.MemberId != "foo" {
|
||||
t.Error("Decoding MemberId failed, found:", response.MemberId)
|
||||
}
|
||||
if len(response.Members) != 1 {
|
||||
t.Error("Decoding Members failed, found:", response.Members)
|
||||
}
|
||||
if !reflect.DeepEqual(response.Members["foo"], []byte{0x01, 0x02, 0x03}) {
|
||||
t.Error("Decoding foo member failed, found:", response.Members["foo"])
|
||||
}
|
||||
}
|
19
vendor/github.com/Shopify/sarama/leave_group_request_test.go
generated
vendored
19
vendor/github.com/Shopify/sarama/leave_group_request_test.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
basicLeaveGroupRequest = []byte{
|
||||
0, 3, 'f', 'o', 'o',
|
||||
0, 3, 'b', 'a', 'r',
|
||||
}
|
||||
)
|
||||
|
||||
func TestLeaveGroupRequest(t *testing.T) {
|
||||
var request *LeaveGroupRequest
|
||||
|
||||
request = new(LeaveGroupRequest)
|
||||
request.GroupId = "foo"
|
||||
request.MemberId = "bar"
|
||||
testRequest(t, "basic", request, basicLeaveGroupRequest)
|
||||
}
|
24
vendor/github.com/Shopify/sarama/leave_group_response_test.go
generated
vendored
24
vendor/github.com/Shopify/sarama/leave_group_response_test.go
generated
vendored
@ -1,24 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
leaveGroupResponseNoError = []byte{0x00, 0x00}
|
||||
leaveGroupResponseWithError = []byte{0, 25}
|
||||
)
|
||||
|
||||
func TestLeaveGroupResponse(t *testing.T) {
|
||||
var response *LeaveGroupResponse
|
||||
|
||||
response = new(LeaveGroupResponse)
|
||||
testVersionDecodable(t, "no error", response, leaveGroupResponseNoError, 0)
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Decoding error failed: no error expected but found", response.Err)
|
||||
}
|
||||
|
||||
response = new(LeaveGroupResponse)
|
||||
testVersionDecodable(t, "with error", response, leaveGroupResponseWithError, 0)
|
||||
if response.Err != ErrUnknownMemberId {
|
||||
t.Error("Decoding error failed: ErrUnknownMemberId expected but found", response.Err)
|
||||
}
|
||||
}
|
7
vendor/github.com/Shopify/sarama/list_groups_request_test.go
generated
vendored
7
vendor/github.com/Shopify/sarama/list_groups_request_test.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestListGroupsRequest(t *testing.T) {
|
||||
testRequest(t, "ListGroupsRequest", &ListGroupsRequest{}, []byte{})
|
||||
}
|
58
vendor/github.com/Shopify/sarama/list_groups_response_test.go
generated
vendored
58
vendor/github.com/Shopify/sarama/list_groups_response_test.go
generated
vendored
@ -1,58 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
listGroupsResponseEmpty = []byte{
|
||||
0, 0, // no error
|
||||
0, 0, 0, 0, // no groups
|
||||
}
|
||||
|
||||
listGroupsResponseError = []byte{
|
||||
0, 31, // no error
|
||||
0, 0, 0, 0, // ErrClusterAuthorizationFailed
|
||||
}
|
||||
|
||||
listGroupsResponseWithConsumer = []byte{
|
||||
0, 0, // no error
|
||||
0, 0, 0, 1, // 1 group
|
||||
0, 3, 'f', 'o', 'o', // group name
|
||||
0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // protocol type
|
||||
}
|
||||
)
|
||||
|
||||
func TestListGroupsResponse(t *testing.T) {
|
||||
var response *ListGroupsResponse
|
||||
|
||||
response = new(ListGroupsResponse)
|
||||
testVersionDecodable(t, "no error", response, listGroupsResponseEmpty, 0)
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Expected no gerror, found:", response.Err)
|
||||
}
|
||||
if len(response.Groups) != 0 {
|
||||
t.Error("Expected no groups")
|
||||
}
|
||||
|
||||
response = new(ListGroupsResponse)
|
||||
testVersionDecodable(t, "no error", response, listGroupsResponseError, 0)
|
||||
if response.Err != ErrClusterAuthorizationFailed {
|
||||
t.Error("Expected no gerror, found:", response.Err)
|
||||
}
|
||||
if len(response.Groups) != 0 {
|
||||
t.Error("Expected no groups")
|
||||
}
|
||||
|
||||
response = new(ListGroupsResponse)
|
||||
testVersionDecodable(t, "no error", response, listGroupsResponseWithConsumer, 0)
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Expected no gerror, found:", response.Err)
|
||||
}
|
||||
if len(response.Groups) != 1 {
|
||||
t.Error("Expected one group")
|
||||
}
|
||||
if response.Groups["foo"] != "consumer" {
|
||||
t.Error("Expected foo group to use consumer protocol")
|
||||
}
|
||||
}
|
213
vendor/github.com/Shopify/sarama/message_test.go
generated
vendored
213
vendor/github.com/Shopify/sarama/message_test.go
generated
vendored
@ -1,213 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
emptyMessage = []byte{
|
||||
167, 236, 104, 3, // CRC
|
||||
0x00, // magic version byte
|
||||
0x00, // attribute flags
|
||||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
0xFF, 0xFF, 0xFF, 0xFF} // value
|
||||
|
||||
emptyV1Message = []byte{
|
||||
204, 47, 121, 217, // CRC
|
||||
0x01, // magic version byte
|
||||
0x00, // attribute flags
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // timestamp
|
||||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
0xFF, 0xFF, 0xFF, 0xFF} // value
|
||||
|
||||
emptyV2Message = []byte{
|
||||
167, 236, 104, 3, // CRC
|
||||
0x02, // magic version byte
|
||||
0x00, // attribute flags
|
||||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
0xFF, 0xFF, 0xFF, 0xFF} // value
|
||||
|
||||
emptyGzipMessage = []byte{
|
||||
97, 79, 149, 90, //CRC
|
||||
0x00, // magic version byte
|
||||
0x01, // attribute flags
|
||||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
// value
|
||||
0x00, 0x00, 0x00, 0x17,
|
||||
0x1f, 0x8b,
|
||||
0x08,
|
||||
0, 0, 9, 110, 136, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
emptyGzipMessage18 = []byte{
|
||||
132, 99, 80, 148, //CRC
|
||||
0x00, // magic version byte
|
||||
0x01, // attribute flags
|
||||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
// value
|
||||
0x00, 0x00, 0x00, 0x17,
|
||||
0x1f, 0x8b,
|
||||
0x08,
|
||||
0, 0, 0, 0, 0, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
emptyLZ4Message = []byte{
|
||||
132, 219, 238, 101, // CRC
|
||||
0x01, // version byte
|
||||
0x03, // attribute flags: lz4
|
||||
0, 0, 1, 88, 141, 205, 89, 56, // timestamp
|
||||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
0x00, 0x00, 0x00, 0x0f, // len
|
||||
0x04, 0x22, 0x4D, 0x18, // LZ4 magic number
|
||||
100, // LZ4 flags: version 01, block indepedant, content checksum
|
||||
112, 185, 0, 0, 0, 0, // LZ4 data
|
||||
5, 93, 204, 2, // LZ4 checksum
|
||||
}
|
||||
|
||||
emptyBulkSnappyMessage = []byte{
|
||||
180, 47, 53, 209, //CRC
|
||||
0x00, // magic version byte
|
||||
0x02, // attribute flags
|
||||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
0, 0, 0, 42,
|
||||
130, 83, 78, 65, 80, 80, 89, 0, // SNAPPY magic
|
||||
0, 0, 0, 1, // min version
|
||||
0, 0, 0, 1, // default version
|
||||
0, 0, 0, 22, 52, 0, 0, 25, 1, 16, 14, 227, 138, 104, 118, 25, 15, 13, 1, 8, 1, 0, 0, 62, 26, 0}
|
||||
|
||||
emptyBulkGzipMessage = []byte{
|
||||
139, 160, 63, 141, //CRC
|
||||
0x00, // magic version byte
|
||||
0x01, // attribute flags
|
||||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
0x00, 0x00, 0x00, 0x27, // len
|
||||
0x1f, 0x8b, // Gzip Magic
|
||||
0x08, // deflate compressed
|
||||
0, 0, 0, 0, 0, 0, 0, 99, 96, 128, 3, 190, 202, 112, 143, 7, 12, 12, 255, 129, 0, 33, 200, 192, 136, 41, 3, 0, 199, 226, 155, 70, 52, 0, 0, 0}
|
||||
|
||||
emptyBulkLZ4Message = []byte{
|
||||
246, 12, 188, 129, // CRC
|
||||
0x01, // Version
|
||||
0x03, // attribute flags (LZ4)
|
||||
255, 255, 249, 209, 212, 181, 73, 201, // timestamp
|
||||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
0x00, 0x00, 0x00, 0x47, // len
|
||||
0x04, 0x22, 0x4D, 0x18, // magic number lz4
|
||||
100, // lz4 flags 01100100
|
||||
// version: 01, block indep: 1, block checksum: 0, content size: 0, content checksum: 1, reserved: 00
|
||||
112, 185, 52, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 121, 87, 72, 224, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 14, 121, 87, 72, 224, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
71, 129, 23, 111, // LZ4 checksum
|
||||
}
|
||||
)
|
||||
|
||||
func TestMessageEncoding(t *testing.T) {
|
||||
message := Message{}
|
||||
testEncodable(t, "empty", &message, emptyMessage)
|
||||
|
||||
message.Value = []byte{}
|
||||
message.Codec = CompressionGZIP
|
||||
if strings.HasPrefix(runtime.Version(), "go1.8") || strings.HasPrefix(runtime.Version(), "go1.9") {
|
||||
testEncodable(t, "empty gzip", &message, emptyGzipMessage18)
|
||||
} else {
|
||||
testEncodable(t, "empty gzip", &message, emptyGzipMessage)
|
||||
}
|
||||
|
||||
message.Value = []byte{}
|
||||
message.Codec = CompressionLZ4
|
||||
message.Timestamp = time.Unix(1479847795, 0)
|
||||
message.Version = 1
|
||||
testEncodable(t, "empty lz4", &message, emptyLZ4Message)
|
||||
}
|
||||
|
||||
func TestMessageDecoding(t *testing.T) {
|
||||
message := Message{}
|
||||
testDecodable(t, "empty", &message, emptyMessage)
|
||||
if message.Codec != CompressionNone {
|
||||
t.Error("Decoding produced compression codec where there was none.")
|
||||
}
|
||||
if message.Key != nil {
|
||||
t.Error("Decoding produced key where there was none.")
|
||||
}
|
||||
if message.Value != nil {
|
||||
t.Error("Decoding produced value where there was none.")
|
||||
}
|
||||
if message.Set != nil {
|
||||
t.Error("Decoding produced set where there was none.")
|
||||
}
|
||||
|
||||
testDecodable(t, "empty gzip", &message, emptyGzipMessage)
|
||||
if message.Codec != CompressionGZIP {
|
||||
t.Error("Decoding produced incorrect compression codec (was gzip).")
|
||||
}
|
||||
if message.Key != nil {
|
||||
t.Error("Decoding produced key where there was none.")
|
||||
}
|
||||
if message.Value == nil || len(message.Value) != 0 {
|
||||
t.Error("Decoding produced nil or content-ful value where there was an empty array.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMessageDecodingBulkSnappy(t *testing.T) {
|
||||
message := Message{}
|
||||
testDecodable(t, "bulk snappy", &message, emptyBulkSnappyMessage)
|
||||
if message.Codec != CompressionSnappy {
|
||||
t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionSnappy)
|
||||
}
|
||||
if message.Key != nil {
|
||||
t.Errorf("Decoding produced key %+v, but none was expected.", message.Key)
|
||||
}
|
||||
if message.Set == nil {
|
||||
t.Error("Decoding produced no set, but one was expected.")
|
||||
} else if len(message.Set.Messages) != 2 {
|
||||
t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMessageDecodingBulkGzip(t *testing.T) {
|
||||
message := Message{}
|
||||
testDecodable(t, "bulk gzip", &message, emptyBulkGzipMessage)
|
||||
if message.Codec != CompressionGZIP {
|
||||
t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionGZIP)
|
||||
}
|
||||
if message.Key != nil {
|
||||
t.Errorf("Decoding produced key %+v, but none was expected.", message.Key)
|
||||
}
|
||||
if message.Set == nil {
|
||||
t.Error("Decoding produced no set, but one was expected.")
|
||||
} else if len(message.Set.Messages) != 2 {
|
||||
t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMessageDecodingBulkLZ4(t *testing.T) {
|
||||
message := Message{}
|
||||
testDecodable(t, "bulk lz4", &message, emptyBulkLZ4Message)
|
||||
if message.Codec != CompressionLZ4 {
|
||||
t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionLZ4)
|
||||
}
|
||||
if message.Key != nil {
|
||||
t.Errorf("Decoding produced key %+v, but none was expected.", message.Key)
|
||||
}
|
||||
if message.Set == nil {
|
||||
t.Error("Decoding produced no set, but one was expected.")
|
||||
} else if len(message.Set.Messages) != 2 {
|
||||
t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMessageDecodingVersion1(t *testing.T) {
|
||||
message := Message{Version: 1}
|
||||
testDecodable(t, "decoding empty v1 message", &message, emptyV1Message)
|
||||
}
|
||||
|
||||
func TestMessageDecodingUnknownVersions(t *testing.T) {
|
||||
message := Message{Version: 2}
|
||||
err := decode(emptyV2Message, &message)
|
||||
if err == nil {
|
||||
t.Error("Decoding did not produce an error for an unknown magic byte")
|
||||
}
|
||||
if err.Error() != "kafka: error decoding packet: unknown magic byte (2)" {
|
||||
t.Error("Decoding an unknown magic byte produced an unknown error ", err)
|
||||
}
|
||||
}
|
29
vendor/github.com/Shopify/sarama/metadata_request_test.go
generated
vendored
29
vendor/github.com/Shopify/sarama/metadata_request_test.go
generated
vendored
@ -1,29 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
metadataRequestNoTopics = []byte{
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
metadataRequestOneTopic = []byte{
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x06, 't', 'o', 'p', 'i', 'c', '1'}
|
||||
|
||||
metadataRequestThreeTopics = []byte{
|
||||
0x00, 0x00, 0x00, 0x03,
|
||||
0x00, 0x03, 'f', 'o', 'o',
|
||||
0x00, 0x03, 'b', 'a', 'r',
|
||||
0x00, 0x03, 'b', 'a', 'z'}
|
||||
)
|
||||
|
||||
func TestMetadataRequest(t *testing.T) {
|
||||
request := new(MetadataRequest)
|
||||
testRequest(t, "no topics", request, metadataRequestNoTopics)
|
||||
|
||||
request.Topics = []string{"topic1"}
|
||||
testRequest(t, "one topic", request, metadataRequestOneTopic)
|
||||
|
||||
request.Topics = []string{"foo", "bar", "baz"}
|
||||
testRequest(t, "three topics", request, metadataRequestThreeTopics)
|
||||
}
|
139
vendor/github.com/Shopify/sarama/metadata_response_test.go
generated
vendored
139
vendor/github.com/Shopify/sarama/metadata_response_test.go
generated
vendored
@ -1,139 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
emptyMetadataResponse = []byte{
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
brokersNoTopicsMetadataResponse = []byte{
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
|
||||
0x00, 0x00, 0xab, 0xff,
|
||||
0x00, 0x09, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't',
|
||||
0x00, 0x00, 0x00, 0x33,
|
||||
|
||||
0x00, 0x01, 0x02, 0x03,
|
||||
0x00, 0x0a, 'g', 'o', 'o', 'g', 'l', 'e', '.', 'c', 'o', 'm',
|
||||
0x00, 0x00, 0x01, 0x11,
|
||||
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
topicsNoBrokersMetadataResponse = []byte{
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
|
||||
0x00, 0x00,
|
||||
0x00, 0x03, 'f', 'o', 'o',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x04,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x07,
|
||||
0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
|
||||
0x00, 0x00,
|
||||
0x00, 0x03, 'b', 'a', 'r',
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
)
|
||||
|
||||
func TestEmptyMetadataResponse(t *testing.T) {
|
||||
response := MetadataResponse{}
|
||||
|
||||
testVersionDecodable(t, "empty", &response, emptyMetadataResponse, 0)
|
||||
if len(response.Brokers) != 0 {
|
||||
t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!")
|
||||
}
|
||||
if len(response.Topics) != 0 {
|
||||
t.Error("Decoding produced", len(response.Topics), "topics where there were none!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadataResponseWithBrokers(t *testing.T) {
|
||||
response := MetadataResponse{}
|
||||
|
||||
testVersionDecodable(t, "brokers, no topics", &response, brokersNoTopicsMetadataResponse, 0)
|
||||
if len(response.Brokers) != 2 {
|
||||
t.Fatal("Decoding produced", len(response.Brokers), "brokers where there were two!")
|
||||
}
|
||||
|
||||
if response.Brokers[0].id != 0xabff {
|
||||
t.Error("Decoding produced invalid broker 0 id.")
|
||||
}
|
||||
if response.Brokers[0].addr != "localhost:51" {
|
||||
t.Error("Decoding produced invalid broker 0 address.")
|
||||
}
|
||||
if response.Brokers[1].id != 0x010203 {
|
||||
t.Error("Decoding produced invalid broker 1 id.")
|
||||
}
|
||||
if response.Brokers[1].addr != "google.com:273" {
|
||||
t.Error("Decoding produced invalid broker 1 address.")
|
||||
}
|
||||
|
||||
if len(response.Topics) != 0 {
|
||||
t.Error("Decoding produced", len(response.Topics), "topics where there were none!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadataResponseWithTopics(t *testing.T) {
|
||||
response := MetadataResponse{}
|
||||
|
||||
testVersionDecodable(t, "topics, no brokers", &response, topicsNoBrokersMetadataResponse, 0)
|
||||
if len(response.Brokers) != 0 {
|
||||
t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!")
|
||||
}
|
||||
|
||||
if len(response.Topics) != 2 {
|
||||
t.Fatal("Decoding produced", len(response.Topics), "topics where there were two!")
|
||||
}
|
||||
|
||||
if response.Topics[0].Err != ErrNoError {
|
||||
t.Error("Decoding produced invalid topic 0 error.")
|
||||
}
|
||||
|
||||
if response.Topics[0].Name != "foo" {
|
||||
t.Error("Decoding produced invalid topic 0 name.")
|
||||
}
|
||||
|
||||
if len(response.Topics[0].Partitions) != 1 {
|
||||
t.Fatal("Decoding produced invalid partition count for topic 0.")
|
||||
}
|
||||
|
||||
if response.Topics[0].Partitions[0].Err != ErrInvalidMessageSize {
|
||||
t.Error("Decoding produced invalid topic 0 partition 0 error.")
|
||||
}
|
||||
|
||||
if response.Topics[0].Partitions[0].ID != 0x01 {
|
||||
t.Error("Decoding produced invalid topic 0 partition 0 id.")
|
||||
}
|
||||
|
||||
if response.Topics[0].Partitions[0].Leader != 0x07 {
|
||||
t.Error("Decoding produced invalid topic 0 partition 0 leader.")
|
||||
}
|
||||
|
||||
if len(response.Topics[0].Partitions[0].Replicas) != 3 {
|
||||
t.Fatal("Decoding produced invalid topic 0 partition 0 replicas.")
|
||||
}
|
||||
for i := 0; i < 3; i++ {
|
||||
if response.Topics[0].Partitions[0].Replicas[i] != int32(i+1) {
|
||||
t.Error("Decoding produced invalid topic 0 partition 0 replica", i)
|
||||
}
|
||||
}
|
||||
|
||||
if len(response.Topics[0].Partitions[0].Isr) != 0 {
|
||||
t.Error("Decoding produced invalid topic 0 partition 0 isr length.")
|
||||
}
|
||||
|
||||
if response.Topics[1].Err != ErrNoError {
|
||||
t.Error("Decoding produced invalid topic 1 error.")
|
||||
}
|
||||
|
||||
if response.Topics[1].Name != "bar" {
|
||||
t.Error("Decoding produced invalid topic 0 name.")
|
||||
}
|
||||
|
||||
if len(response.Topics[1].Partitions) != 0 {
|
||||
t.Error("Decoding produced invalid partition count for topic 1.")
|
||||
}
|
||||
}
|
172
vendor/github.com/Shopify/sarama/metrics_test.go
generated
vendored
172
vendor/github.com/Shopify/sarama/metrics_test.go
generated
vendored
@ -1,172 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
func TestGetOrRegisterHistogram(t *testing.T) {
|
||||
metricRegistry := metrics.NewRegistry()
|
||||
histogram := getOrRegisterHistogram("name", metricRegistry)
|
||||
|
||||
if histogram == nil {
|
||||
t.Error("Unexpected nil histogram")
|
||||
}
|
||||
|
||||
// Fetch the metric
|
||||
foundHistogram := metricRegistry.Get("name")
|
||||
|
||||
if foundHistogram != histogram {
|
||||
t.Error("Unexpected different histogram", foundHistogram, histogram)
|
||||
}
|
||||
|
||||
// Try to register the metric again
|
||||
sameHistogram := getOrRegisterHistogram("name", metricRegistry)
|
||||
|
||||
if sameHistogram != histogram {
|
||||
t.Error("Unexpected different histogram", sameHistogram, histogram)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMetricNameForBroker(t *testing.T) {
|
||||
metricName := getMetricNameForBroker("name", &Broker{id: 1})
|
||||
|
||||
if metricName != "name-for-broker-1" {
|
||||
t.Error("Unexpected metric name", metricName)
|
||||
}
|
||||
}
|
||||
|
||||
// Common type and functions for metric validation
|
||||
type metricValidator struct {
|
||||
name string
|
||||
validator func(*testing.T, interface{})
|
||||
}
|
||||
|
||||
type metricValidators []*metricValidator
|
||||
|
||||
func newMetricValidators() metricValidators {
|
||||
return make([]*metricValidator, 0, 32)
|
||||
}
|
||||
|
||||
func (m *metricValidators) register(validator *metricValidator) {
|
||||
*m = append(*m, validator)
|
||||
}
|
||||
|
||||
func (m *metricValidators) registerForBroker(broker *Broker, validator *metricValidator) {
|
||||
m.register(&metricValidator{getMetricNameForBroker(validator.name, broker), validator.validator})
|
||||
}
|
||||
|
||||
func (m *metricValidators) registerForGlobalAndTopic(topic string, validator *metricValidator) {
|
||||
m.register(&metricValidator{validator.name, validator.validator})
|
||||
m.register(&metricValidator{getMetricNameForTopic(validator.name, topic), validator.validator})
|
||||
}
|
||||
|
||||
func (m *metricValidators) registerForAllBrokers(broker *Broker, validator *metricValidator) {
|
||||
m.register(validator)
|
||||
m.registerForBroker(broker, validator)
|
||||
}
|
||||
|
||||
func (m metricValidators) run(t *testing.T, r metrics.Registry) {
|
||||
for _, metricValidator := range m {
|
||||
metric := r.Get(metricValidator.name)
|
||||
if metric == nil {
|
||||
t.Error("No metric named", metricValidator.name)
|
||||
} else {
|
||||
metricValidator.validator(t, metric)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func meterValidator(name string, extraValidator func(*testing.T, metrics.Meter)) *metricValidator {
|
||||
return &metricValidator{
|
||||
name: name,
|
||||
validator: func(t *testing.T, metric interface{}) {
|
||||
if meter, ok := metric.(metrics.Meter); !ok {
|
||||
t.Errorf("Expected meter metric for '%s', got %T", name, metric)
|
||||
} else {
|
||||
extraValidator(t, meter)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func countMeterValidator(name string, expectedCount int) *metricValidator {
|
||||
return meterValidator(name, func(t *testing.T, meter metrics.Meter) {
|
||||
count := meter.Count()
|
||||
if count != int64(expectedCount) {
|
||||
t.Errorf("Expected meter metric '%s' count = %d, got %d", name, expectedCount, count)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func minCountMeterValidator(name string, minCount int) *metricValidator {
|
||||
return meterValidator(name, func(t *testing.T, meter metrics.Meter) {
|
||||
count := meter.Count()
|
||||
if count < int64(minCount) {
|
||||
t.Errorf("Expected meter metric '%s' count >= %d, got %d", name, minCount, count)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func histogramValidator(name string, extraValidator func(*testing.T, metrics.Histogram)) *metricValidator {
|
||||
return &metricValidator{
|
||||
name: name,
|
||||
validator: func(t *testing.T, metric interface{}) {
|
||||
if histogram, ok := metric.(metrics.Histogram); !ok {
|
||||
t.Errorf("Expected histogram metric for '%s', got %T", name, metric)
|
||||
} else {
|
||||
extraValidator(t, histogram)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func countHistogramValidator(name string, expectedCount int) *metricValidator {
|
||||
return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
|
||||
count := histogram.Count()
|
||||
if count != int64(expectedCount) {
|
||||
t.Errorf("Expected histogram metric '%s' count = %d, got %d", name, expectedCount, count)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func minCountHistogramValidator(name string, minCount int) *metricValidator {
|
||||
return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
|
||||
count := histogram.Count()
|
||||
if count < int64(minCount) {
|
||||
t.Errorf("Expected histogram metric '%s' count >= %d, got %d", name, minCount, count)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func minMaxHistogramValidator(name string, expectedMin int, expectedMax int) *metricValidator {
|
||||
return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
|
||||
min := int(histogram.Min())
|
||||
if min != expectedMin {
|
||||
t.Errorf("Expected histogram metric '%s' min = %d, got %d", name, expectedMin, min)
|
||||
}
|
||||
max := int(histogram.Max())
|
||||
if max != expectedMax {
|
||||
t.Errorf("Expected histogram metric '%s' max = %d, got %d", name, expectedMax, max)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func minValHistogramValidator(name string, minMin int) *metricValidator {
|
||||
return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
|
||||
min := int(histogram.Min())
|
||||
if min < minMin {
|
||||
t.Errorf("Expected histogram metric '%s' min >= %d, got %d", name, minMin, min)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func maxValHistogramValidator(name string, maxMax int) *metricValidator {
|
||||
return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
|
||||
max := int(histogram.Max())
|
||||
if max > maxMax {
|
||||
t.Errorf("Expected histogram metric '%s' max <= %d, got %d", name, maxMax, max)
|
||||
}
|
||||
})
|
||||
}
|
13
vendor/github.com/Shopify/sarama/mocks/README.md
generated
vendored
13
vendor/github.com/Shopify/sarama/mocks/README.md
generated
vendored
@ -1,13 +0,0 @@
|
||||
# sarama/mocks
|
||||
|
||||
The `mocks` subpackage includes mock implementations that implement the interfaces of the major sarama types.
|
||||
You can use them to test your sarama applications using dependency injection.
|
||||
|
||||
The following mock objects are available:
|
||||
|
||||
- [Consumer](https://godoc.org/github.com/Shopify/sarama/mocks#Consumer), which will create [PartitionConsumer](https://godoc.org/github.com/Shopify/sarama/mocks#PartitionConsumer) mocks.
|
||||
- [AsyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#AsyncProducer)
|
||||
- [SyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#SyncProducer)
|
||||
|
||||
The mocks allow you to set expectations on them. When you close the mocks, the expectations will be verified,
|
||||
and the results will be reported to the `*testing.T` object you provided when creating the mock.
|
174
vendor/github.com/Shopify/sarama/mocks/async_producer.go
generated
vendored
174
vendor/github.com/Shopify/sarama/mocks/async_producer.go
generated
vendored
@ -1,174 +0,0 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
)
|
||||
|
||||
// AsyncProducer implements sarama's Producer interface for testing purposes.
|
||||
// Before you can send messages to it's Input channel, you have to set expectations
|
||||
// so it knows how to handle the input; it returns an error if the number of messages
|
||||
// received is bigger then the number of expectations set. You can also set a
|
||||
// function in each expectation so that the message value is checked by this function
|
||||
// and an error is returned if the match fails.
|
||||
type AsyncProducer struct {
|
||||
l sync.Mutex
|
||||
t ErrorReporter
|
||||
expectations []*producerExpectation
|
||||
closed chan struct{}
|
||||
input chan *sarama.ProducerMessage
|
||||
successes chan *sarama.ProducerMessage
|
||||
errors chan *sarama.ProducerError
|
||||
lastOffset int64
|
||||
}
|
||||
|
||||
// NewAsyncProducer instantiates a new Producer mock. The t argument should
|
||||
// be the *testing.T instance of your test method. An error will be written to it if
|
||||
// an expectation is violated. The config argument is used to determine whether it
|
||||
// should ack successes on the Successes channel.
|
||||
func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer {
|
||||
if config == nil {
|
||||
config = sarama.NewConfig()
|
||||
}
|
||||
mp := &AsyncProducer{
|
||||
t: t,
|
||||
closed: make(chan struct{}, 0),
|
||||
expectations: make([]*producerExpectation, 0),
|
||||
input: make(chan *sarama.ProducerMessage, config.ChannelBufferSize),
|
||||
successes: make(chan *sarama.ProducerMessage, config.ChannelBufferSize),
|
||||
errors: make(chan *sarama.ProducerError, config.ChannelBufferSize),
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
close(mp.successes)
|
||||
close(mp.errors)
|
||||
}()
|
||||
|
||||
for msg := range mp.input {
|
||||
mp.l.Lock()
|
||||
if mp.expectations == nil || len(mp.expectations) == 0 {
|
||||
mp.expectations = nil
|
||||
mp.t.Errorf("No more expectation set on this mock producer to handle the input message.")
|
||||
} else {
|
||||
expectation := mp.expectations[0]
|
||||
mp.expectations = mp.expectations[1:]
|
||||
if expectation.CheckFunction != nil {
|
||||
if val, err := msg.Value.Encode(); err != nil {
|
||||
mp.t.Errorf("Input message encoding failed: %s", err.Error())
|
||||
mp.errors <- &sarama.ProducerError{Err: err, Msg: msg}
|
||||
} else {
|
||||
err = expectation.CheckFunction(val)
|
||||
if err != nil {
|
||||
mp.t.Errorf("Check function returned an error: %s", err.Error())
|
||||
mp.errors <- &sarama.ProducerError{Err: err, Msg: msg}
|
||||
}
|
||||
}
|
||||
}
|
||||
if expectation.Result == errProduceSuccess {
|
||||
mp.lastOffset++
|
||||
if config.Producer.Return.Successes {
|
||||
msg.Offset = mp.lastOffset
|
||||
mp.successes <- msg
|
||||
}
|
||||
} else {
|
||||
if config.Producer.Return.Errors {
|
||||
mp.errors <- &sarama.ProducerError{Err: expectation.Result, Msg: msg}
|
||||
}
|
||||
}
|
||||
}
|
||||
mp.l.Unlock()
|
||||
}
|
||||
|
||||
mp.l.Lock()
|
||||
if len(mp.expectations) > 0 {
|
||||
mp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(mp.expectations))
|
||||
}
|
||||
mp.l.Unlock()
|
||||
|
||||
close(mp.closed)
|
||||
}()
|
||||
|
||||
return mp
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////
|
||||
// Implement Producer interface
|
||||
////////////////////////////////////////////////
|
||||
|
||||
// AsyncClose corresponds with the AsyncClose method of sarama's Producer implementation.
|
||||
// By closing a mock producer, you also tell it that no more input will be provided, so it will
|
||||
// write an error to the test state if there's any remaining expectations.
|
||||
func (mp *AsyncProducer) AsyncClose() {
|
||||
close(mp.input)
|
||||
}
|
||||
|
||||
// Close corresponds with the Close method of sarama's Producer implementation.
|
||||
// By closing a mock producer, you also tell it that no more input will be provided, so it will
|
||||
// write an error to the test state if there's any remaining expectations.
|
||||
func (mp *AsyncProducer) Close() error {
|
||||
mp.AsyncClose()
|
||||
<-mp.closed
|
||||
return nil
|
||||
}
|
||||
|
||||
// Input corresponds with the Input method of sarama's Producer implementation.
|
||||
// You have to set expectations on the mock producer before writing messages to the Input
|
||||
// channel, so it knows how to handle them. If there is no more remaining expectations and
|
||||
// a messages is written to the Input channel, the mock producer will write an error to the test
|
||||
// state object.
|
||||
func (mp *AsyncProducer) Input() chan<- *sarama.ProducerMessage {
|
||||
return mp.input
|
||||
}
|
||||
|
||||
// Successes corresponds with the Successes method of sarama's Producer implementation.
|
||||
func (mp *AsyncProducer) Successes() <-chan *sarama.ProducerMessage {
|
||||
return mp.successes
|
||||
}
|
||||
|
||||
// Errors corresponds with the Errors method of sarama's Producer implementation.
|
||||
func (mp *AsyncProducer) Errors() <-chan *sarama.ProducerError {
|
||||
return mp.errors
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////
|
||||
// Setting expectations
|
||||
////////////////////////////////////////////////
|
||||
|
||||
// ExpectInputWithCheckerFunctionAndSucceed sets an expectation on the mock producer that a message
|
||||
// will be provided on the input channel. The mock producer will call the given function to check
|
||||
// the message value. If an error is returned it will be made available on the Errors channel
|
||||
// otherwise the mock will handle the message as if it produced successfully, i.e. it will make
|
||||
// it available on the Successes channel if the Producer.Return.Successes setting is set to true.
|
||||
func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndSucceed(cf ValueChecker) {
|
||||
mp.l.Lock()
|
||||
defer mp.l.Unlock()
|
||||
mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf})
|
||||
}
|
||||
|
||||
// ExpectInputWithCheckerFunctionAndFail sets an expectation on the mock producer that a message
|
||||
// will be provided on the input channel. The mock producer will first call the given function to
|
||||
// check the message value. If an error is returned it will be made available on the Errors channel
|
||||
// otherwise the mock will handle the message as if it failed to produce successfully. This means
|
||||
// it will make a ProducerError available on the Errors channel.
|
||||
func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndFail(cf ValueChecker, err error) {
|
||||
mp.l.Lock()
|
||||
defer mp.l.Unlock()
|
||||
mp.expectations = append(mp.expectations, &producerExpectation{Result: err, CheckFunction: cf})
|
||||
}
|
||||
|
||||
// ExpectInputAndSucceed sets an expectation on the mock producer that a message will be provided
|
||||
// on the input channel. The mock producer will handle the message as if it is produced successfully,
|
||||
// i.e. it will make it available on the Successes channel if the Producer.Return.Successes setting
|
||||
// is set to true.
|
||||
func (mp *AsyncProducer) ExpectInputAndSucceed() {
|
||||
mp.ExpectInputWithCheckerFunctionAndSucceed(nil)
|
||||
}
|
||||
|
||||
// ExpectInputAndFail sets an expectation on the mock producer that a message will be provided
|
||||
// on the input channel. The mock producer will handle the message as if it failed to produce
|
||||
// successfully. This means it will make a ProducerError available on the Errors channel.
|
||||
func (mp *AsyncProducer) ExpectInputAndFail(err error) {
|
||||
mp.ExpectInputWithCheckerFunctionAndFail(nil, err)
|
||||
}
|
132
vendor/github.com/Shopify/sarama/mocks/async_producer_test.go
generated
vendored
132
vendor/github.com/Shopify/sarama/mocks/async_producer_test.go
generated
vendored
@ -1,132 +0,0 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
)
|
||||
|
||||
func generateRegexpChecker(re string) func([]byte) error {
|
||||
return func(val []byte) error {
|
||||
matched, err := regexp.MatchString(re, string(val))
|
||||
if err != nil {
|
||||
return errors.New("Error while trying to match the input message with the expected pattern: " + err.Error())
|
||||
}
|
||||
if !matched {
|
||||
return fmt.Errorf("No match between input value \"%s\" and expected pattern \"%s\"", val, re)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
type testReporterMock struct {
|
||||
errors []string
|
||||
}
|
||||
|
||||
func newTestReporterMock() *testReporterMock {
|
||||
return &testReporterMock{errors: make([]string, 0)}
|
||||
}
|
||||
|
||||
func (trm *testReporterMock) Errorf(format string, args ...interface{}) {
|
||||
trm.errors = append(trm.errors, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func TestMockAsyncProducerImplementsAsyncProducerInterface(t *testing.T) {
|
||||
var mp interface{} = &AsyncProducer{}
|
||||
if _, ok := mp.(sarama.AsyncProducer); !ok {
|
||||
t.Error("The mock producer should implement the sarama.Producer interface.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProducerReturnsExpectationsToChannels(t *testing.T) {
|
||||
config := sarama.NewConfig()
|
||||
config.Producer.Return.Successes = true
|
||||
mp := NewAsyncProducer(t, config)
|
||||
|
||||
mp.ExpectInputAndSucceed()
|
||||
mp.ExpectInputAndSucceed()
|
||||
mp.ExpectInputAndFail(sarama.ErrOutOfBrokers)
|
||||
|
||||
mp.Input() <- &sarama.ProducerMessage{Topic: "test 1"}
|
||||
mp.Input() <- &sarama.ProducerMessage{Topic: "test 2"}
|
||||
mp.Input() <- &sarama.ProducerMessage{Topic: "test 3"}
|
||||
|
||||
msg1 := <-mp.Successes()
|
||||
msg2 := <-mp.Successes()
|
||||
err1 := <-mp.Errors()
|
||||
|
||||
if msg1.Topic != "test 1" {
|
||||
t.Error("Expected message 1 to be returned first")
|
||||
}
|
||||
|
||||
if msg2.Topic != "test 2" {
|
||||
t.Error("Expected message 2 to be returned second")
|
||||
}
|
||||
|
||||
if err1.Msg.Topic != "test 3" || err1.Err != sarama.ErrOutOfBrokers {
|
||||
t.Error("Expected message 3 to be returned as error")
|
||||
}
|
||||
|
||||
if err := mp.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProducerWithTooFewExpectations(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
mp := NewAsyncProducer(trm, nil)
|
||||
mp.ExpectInputAndSucceed()
|
||||
|
||||
mp.Input() <- &sarama.ProducerMessage{Topic: "test"}
|
||||
mp.Input() <- &sarama.ProducerMessage{Topic: "test"}
|
||||
|
||||
if err := mp.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Error("Expected to report an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProducerWithTooManyExpectations(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
mp := NewAsyncProducer(trm, nil)
|
||||
mp.ExpectInputAndSucceed()
|
||||
mp.ExpectInputAndFail(sarama.ErrOutOfBrokers)
|
||||
|
||||
mp.Input() <- &sarama.ProducerMessage{Topic: "test"}
|
||||
if err := mp.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Error("Expected to report an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProducerWithCheckerFunction(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
mp := NewAsyncProducer(trm, nil)
|
||||
mp.ExpectInputWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes"))
|
||||
mp.ExpectInputWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes$"))
|
||||
|
||||
mp.Input() <- &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
|
||||
mp.Input() <- &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
|
||||
if err := mp.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(mp.Errors()) != 1 {
|
||||
t.Error("Expected to report an error")
|
||||
}
|
||||
|
||||
err1 := <-mp.Errors()
|
||||
if !strings.HasPrefix(err1.Err.Error(), "No match") {
|
||||
t.Error("Expected to report a value check error, found: ", err1.Err)
|
||||
}
|
||||
}
|
315
vendor/github.com/Shopify/sarama/mocks/consumer.go
generated
vendored
315
vendor/github.com/Shopify/sarama/mocks/consumer.go
generated
vendored
@ -1,315 +0,0 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
)
|
||||
|
||||
// Consumer implements sarama's Consumer interface for testing purposes.
|
||||
// Before you can start consuming from this consumer, you have to register
|
||||
// topic/partitions using ExpectConsumePartition, and set expectations on them.
|
||||
type Consumer struct {
|
||||
l sync.Mutex
|
||||
t ErrorReporter
|
||||
config *sarama.Config
|
||||
partitionConsumers map[string]map[int32]*PartitionConsumer
|
||||
metadata map[string][]int32
|
||||
}
|
||||
|
||||
// NewConsumer returns a new mock Consumer instance. The t argument should
|
||||
// be the *testing.T instance of your test method. An error will be written to it if
|
||||
// an expectation is violated. The config argument can be set to nil.
|
||||
func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer {
|
||||
if config == nil {
|
||||
config = sarama.NewConfig()
|
||||
}
|
||||
|
||||
c := &Consumer{
|
||||
t: t,
|
||||
config: config,
|
||||
partitionConsumers: make(map[string]map[int32]*PartitionConsumer),
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////
|
||||
// Consumer interface implementation
|
||||
///////////////////////////////////////////////////
|
||||
|
||||
// ConsumePartition implements the ConsumePartition method from the sarama.Consumer interface.
|
||||
// Before you can start consuming a partition, you have to set expectations on it using
|
||||
// ExpectConsumePartition. You can only consume a partition once per consumer.
|
||||
func (c *Consumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) {
|
||||
c.l.Lock()
|
||||
defer c.l.Unlock()
|
||||
|
||||
if c.partitionConsumers[topic] == nil || c.partitionConsumers[topic][partition] == nil {
|
||||
c.t.Errorf("No expectations set for %s/%d", topic, partition)
|
||||
return nil, errOutOfExpectations
|
||||
}
|
||||
|
||||
pc := c.partitionConsumers[topic][partition]
|
||||
if pc.consumed {
|
||||
return nil, sarama.ConfigurationError("The topic/partition is already being consumed")
|
||||
}
|
||||
|
||||
if pc.offset != AnyOffset && pc.offset != offset {
|
||||
c.t.Errorf("Unexpected offset when calling ConsumePartition for %s/%d. Expected %d, got %d.", topic, partition, pc.offset, offset)
|
||||
}
|
||||
|
||||
pc.consumed = true
|
||||
return pc, nil
|
||||
}
|
||||
|
||||
// Topics returns a list of topics, as registered with SetMetadata
|
||||
func (c *Consumer) Topics() ([]string, error) {
|
||||
c.l.Lock()
|
||||
defer c.l.Unlock()
|
||||
|
||||
if c.metadata == nil {
|
||||
c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetMetadata.")
|
||||
return nil, sarama.ErrOutOfBrokers
|
||||
}
|
||||
|
||||
var result []string
|
||||
for topic := range c.metadata {
|
||||
result = append(result, topic)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Partitions returns the list of parititons for the given topic, as registered with SetMetadata
|
||||
func (c *Consumer) Partitions(topic string) ([]int32, error) {
|
||||
c.l.Lock()
|
||||
defer c.l.Unlock()
|
||||
|
||||
if c.metadata == nil {
|
||||
c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetMetadata.")
|
||||
return nil, sarama.ErrOutOfBrokers
|
||||
}
|
||||
if c.metadata[topic] == nil {
|
||||
return nil, sarama.ErrUnknownTopicOrPartition
|
||||
}
|
||||
|
||||
return c.metadata[topic], nil
|
||||
}
|
||||
|
||||
func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 {
|
||||
c.l.Lock()
|
||||
defer c.l.Unlock()
|
||||
|
||||
hwms := make(map[string]map[int32]int64, len(c.partitionConsumers))
|
||||
for topic, partitionConsumers := range c.partitionConsumers {
|
||||
hwm := make(map[int32]int64, len(partitionConsumers))
|
||||
for partition, pc := range partitionConsumers {
|
||||
hwm[partition] = pc.HighWaterMarkOffset()
|
||||
}
|
||||
hwms[topic] = hwm
|
||||
}
|
||||
|
||||
return hwms
|
||||
}
|
||||
|
||||
// Close implements the Close method from the sarama.Consumer interface. It will close
|
||||
// all registered PartitionConsumer instances.
|
||||
func (c *Consumer) Close() error {
|
||||
c.l.Lock()
|
||||
defer c.l.Unlock()
|
||||
|
||||
for _, partitions := range c.partitionConsumers {
|
||||
for _, partitionConsumer := range partitions {
|
||||
_ = partitionConsumer.Close()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////
|
||||
// Expectation API
|
||||
///////////////////////////////////////////////////
|
||||
|
||||
// SetTopicMetadata sets the clusters topic/partition metadata,
|
||||
// which will be returned by Topics() and Partitions().
|
||||
func (c *Consumer) SetTopicMetadata(metadata map[string][]int32) {
|
||||
c.l.Lock()
|
||||
defer c.l.Unlock()
|
||||
|
||||
c.metadata = metadata
|
||||
}
|
||||
|
||||
// ExpectConsumePartition will register a topic/partition, so you can set expectations on it.
|
||||
// The registered PartitionConsumer will be returned, so you can set expectations
|
||||
// on it using method chaining. Once a topic/partition is registered, you are
|
||||
// expected to start consuming it using ConsumePartition. If that doesn't happen,
|
||||
// an error will be written to the error reporter once the mock consumer is closed. It will
|
||||
// also expect that the
|
||||
func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset int64) *PartitionConsumer {
|
||||
c.l.Lock()
|
||||
defer c.l.Unlock()
|
||||
|
||||
if c.partitionConsumers[topic] == nil {
|
||||
c.partitionConsumers[topic] = make(map[int32]*PartitionConsumer)
|
||||
}
|
||||
|
||||
if c.partitionConsumers[topic][partition] == nil {
|
||||
c.partitionConsumers[topic][partition] = &PartitionConsumer{
|
||||
t: c.t,
|
||||
topic: topic,
|
||||
partition: partition,
|
||||
offset: offset,
|
||||
messages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize),
|
||||
errors: make(chan *sarama.ConsumerError, c.config.ChannelBufferSize),
|
||||
}
|
||||
}
|
||||
|
||||
return c.partitionConsumers[topic][partition]
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////
|
||||
// PartitionConsumer mock type
|
||||
///////////////////////////////////////////////////
|
||||
|
||||
// PartitionConsumer implements sarama's PartitionConsumer interface for testing purposes.
|
||||
// It is returned by the mock Consumers ConsumePartitionMethod, but only if it is
|
||||
// registered first using the Consumer's ExpectConsumePartition method. Before consuming the
|
||||
// Errors and Messages channel, you should specify what values will be provided on these
|
||||
// channels using YieldMessage and YieldError.
|
||||
type PartitionConsumer struct {
|
||||
highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
l sync.Mutex
|
||||
t ErrorReporter
|
||||
topic string
|
||||
partition int32
|
||||
offset int64
|
||||
messages chan *sarama.ConsumerMessage
|
||||
errors chan *sarama.ConsumerError
|
||||
singleClose sync.Once
|
||||
consumed bool
|
||||
errorsShouldBeDrained bool
|
||||
messagesShouldBeDrained bool
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////
|
||||
// PartitionConsumer interface implementation
|
||||
///////////////////////////////////////////////////
|
||||
|
||||
// AsyncClose implements the AsyncClose method from the sarama.PartitionConsumer interface.
|
||||
func (pc *PartitionConsumer) AsyncClose() {
|
||||
pc.singleClose.Do(func() {
|
||||
close(pc.messages)
|
||||
close(pc.errors)
|
||||
})
|
||||
}
|
||||
|
||||
// Close implements the Close method from the sarama.PartitionConsumer interface. It will
|
||||
// verify whether the partition consumer was actually started.
|
||||
func (pc *PartitionConsumer) Close() error {
|
||||
if !pc.consumed {
|
||||
pc.t.Errorf("Expectations set on %s/%d, but no partition consumer was started.", pc.topic, pc.partition)
|
||||
return errPartitionConsumerNotStarted
|
||||
}
|
||||
|
||||
if pc.errorsShouldBeDrained && len(pc.errors) > 0 {
|
||||
pc.t.Errorf("Expected the errors channel for %s/%d to be drained on close, but found %d errors.", pc.topic, pc.partition, len(pc.errors))
|
||||
}
|
||||
|
||||
if pc.messagesShouldBeDrained && len(pc.messages) > 0 {
|
||||
pc.t.Errorf("Expected the messages channel for %s/%d to be drained on close, but found %d messages.", pc.topic, pc.partition, len(pc.messages))
|
||||
}
|
||||
|
||||
pc.AsyncClose()
|
||||
|
||||
var (
|
||||
closeErr error
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
var errs = make(sarama.ConsumerErrors, 0)
|
||||
for err := range pc.errors {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
closeErr = errs
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range pc.messages {
|
||||
// drain
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
return closeErr
|
||||
}
|
||||
|
||||
// Errors implements the Errors method from the sarama.PartitionConsumer interface.
|
||||
func (pc *PartitionConsumer) Errors() <-chan *sarama.ConsumerError {
|
||||
return pc.errors
|
||||
}
|
||||
|
||||
// Messages implements the Messages method from the sarama.PartitionConsumer interface.
|
||||
func (pc *PartitionConsumer) Messages() <-chan *sarama.ConsumerMessage {
|
||||
return pc.messages
|
||||
}
|
||||
|
||||
func (pc *PartitionConsumer) HighWaterMarkOffset() int64 {
|
||||
return atomic.LoadInt64(&pc.highWaterMarkOffset) + 1
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////
|
||||
// Expectation API
|
||||
///////////////////////////////////////////////////
|
||||
|
||||
// YieldMessage will yield a messages Messages channel of this partition consumer
|
||||
// when it is consumed. By default, the mock consumer will not verify whether this
|
||||
// message was consumed from the Messages channel, because there are legitimate
|
||||
// reasons forthis not to happen. ou can call ExpectMessagesDrainedOnClose so it will
|
||||
// verify that the channel is empty on close.
|
||||
func (pc *PartitionConsumer) YieldMessage(msg *sarama.ConsumerMessage) {
|
||||
pc.l.Lock()
|
||||
defer pc.l.Unlock()
|
||||
|
||||
msg.Topic = pc.topic
|
||||
msg.Partition = pc.partition
|
||||
msg.Offset = atomic.AddInt64(&pc.highWaterMarkOffset, 1)
|
||||
|
||||
pc.messages <- msg
|
||||
}
|
||||
|
||||
// YieldError will yield an error on the Errors channel of this partition consumer
|
||||
// when it is consumed. By default, the mock consumer will not verify whether this error was
|
||||
// consumed from the Errors channel, because there are legitimate reasons for this
|
||||
// not to happen. You can call ExpectErrorsDrainedOnClose so it will verify that
|
||||
// the channel is empty on close.
|
||||
func (pc *PartitionConsumer) YieldError(err error) {
|
||||
pc.errors <- &sarama.ConsumerError{
|
||||
Topic: pc.topic,
|
||||
Partition: pc.partition,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// ExpectMessagesDrainedOnClose sets an expectation on the partition consumer
|
||||
// that the messages channel will be fully drained when Close is called. If this
|
||||
// expectation is not met, an error is reported to the error reporter.
|
||||
func (pc *PartitionConsumer) ExpectMessagesDrainedOnClose() {
|
||||
pc.messagesShouldBeDrained = true
|
||||
}
|
||||
|
||||
// ExpectErrorsDrainedOnClose sets an expectation on the partition consumer
|
||||
// that the errors channel will be fully drained when Close is called. If this
|
||||
// expectation is not met, an error is reported to the error reporter.
|
||||
func (pc *PartitionConsumer) ExpectErrorsDrainedOnClose() {
|
||||
pc.errorsShouldBeDrained = true
|
||||
}
|
249
vendor/github.com/Shopify/sarama/mocks/consumer_test.go
generated
vendored
249
vendor/github.com/Shopify/sarama/mocks/consumer_test.go
generated
vendored
@ -1,249 +0,0 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
)
|
||||
|
||||
func TestMockConsumerImplementsConsumerInterface(t *testing.T) {
|
||||
var c interface{} = &Consumer{}
|
||||
if _, ok := c.(sarama.Consumer); !ok {
|
||||
t.Error("The mock consumer should implement the sarama.Consumer interface.")
|
||||
}
|
||||
|
||||
var pc interface{} = &PartitionConsumer{}
|
||||
if _, ok := pc.(sarama.PartitionConsumer); !ok {
|
||||
t.Error("The mock partitionconsumer should implement the sarama.PartitionConsumer interface.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerHandlesExpectations(t *testing.T) {
|
||||
consumer := NewConsumer(t, nil)
|
||||
defer func() {
|
||||
if err := consumer.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")})
|
||||
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers)
|
||||
consumer.ExpectConsumePartition("test", 1, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world again")})
|
||||
consumer.ExpectConsumePartition("other", 0, AnyOffset).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello other")})
|
||||
|
||||
pc_test0, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
test0_msg := <-pc_test0.Messages()
|
||||
if test0_msg.Topic != "test" || test0_msg.Partition != 0 || string(test0_msg.Value) != "hello world" {
|
||||
t.Error("Message was not as expected:", test0_msg)
|
||||
}
|
||||
test0_err := <-pc_test0.Errors()
|
||||
if test0_err.Err != sarama.ErrOutOfBrokers {
|
||||
t.Error("Expected sarama.ErrOutOfBrokers, found:", test0_err.Err)
|
||||
}
|
||||
|
||||
pc_test1, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
test1_msg := <-pc_test1.Messages()
|
||||
if test1_msg.Topic != "test" || test1_msg.Partition != 1 || string(test1_msg.Value) != "hello world again" {
|
||||
t.Error("Message was not as expected:", test1_msg)
|
||||
}
|
||||
|
||||
pc_other0, err := consumer.ConsumePartition("other", 0, sarama.OffsetNewest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
other0_msg := <-pc_other0.Messages()
|
||||
if other0_msg.Topic != "other" || other0_msg.Partition != 0 || string(other0_msg.Value) != "hello other" {
|
||||
t.Error("Message was not as expected:", other0_msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerReturnsNonconsumedErrorsOnClose(t *testing.T) {
|
||||
consumer := NewConsumer(t, nil)
|
||||
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers)
|
||||
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers)
|
||||
|
||||
pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-pc.Messages():
|
||||
t.Error("Did not epxect a message on the messages channel.")
|
||||
case err := <-pc.Errors():
|
||||
if err.Err != sarama.ErrOutOfBrokers {
|
||||
t.Error("Expected sarama.ErrOutOfBrokers, found", err)
|
||||
}
|
||||
}
|
||||
|
||||
errs := pc.Close().(sarama.ConsumerErrors)
|
||||
if len(errs) != 1 && errs[0].Err != sarama.ErrOutOfBrokers {
|
||||
t.Error("Expected Close to return the remaining sarama.ErrOutOfBrokers")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerWithoutExpectationsOnPartition(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
consumer := NewConsumer(trm, nil)
|
||||
|
||||
_, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest)
|
||||
if err != errOutOfExpectations {
|
||||
t.Error("Expected ConsumePartition to return errOutOfExpectations")
|
||||
}
|
||||
|
||||
if err := consumer.Close(); err != nil {
|
||||
t.Error("No error expected on close, but found:", err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Errorf("Expected an expectation failure to be set on the error reporter.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerWithExpectationsOnUnconsumedPartition(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
consumer := NewConsumer(trm, nil)
|
||||
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")})
|
||||
|
||||
if err := consumer.Close(); err != nil {
|
||||
t.Error("No error expected on close, but found:", err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Errorf("Expected an expectation failure to be set on the error reporter.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerWithWrongOffsetExpectation(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
consumer := NewConsumer(trm, nil)
|
||||
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest)
|
||||
|
||||
_, err := consumer.ConsumePartition("test", 0, sarama.OffsetNewest)
|
||||
if err != nil {
|
||||
t.Error("Did not expect error, found:", err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Errorf("Expected an expectation failure to be set on the error reporter.")
|
||||
}
|
||||
|
||||
if err := consumer.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerViolatesMessagesDrainedExpectation(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
consumer := NewConsumer(trm, nil)
|
||||
pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest)
|
||||
pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")})
|
||||
pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")})
|
||||
pcmock.ExpectMessagesDrainedOnClose()
|
||||
|
||||
pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// consume first message, not second one
|
||||
<-pc.Messages()
|
||||
|
||||
if err := consumer.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Errorf("Expected an expectation failure to be set on the error reporter.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerMeetsErrorsDrainedExpectation(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
consumer := NewConsumer(trm, nil)
|
||||
|
||||
pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest)
|
||||
pcmock.YieldError(sarama.ErrInvalidMessage)
|
||||
pcmock.YieldError(sarama.ErrInvalidMessage)
|
||||
pcmock.ExpectErrorsDrainedOnClose()
|
||||
|
||||
pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// consume first and second error,
|
||||
<-pc.Errors()
|
||||
<-pc.Errors()
|
||||
|
||||
if err := consumer.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 0 {
|
||||
t.Errorf("Expected no expectation failures to be set on the error reporter.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerTopicMetadata(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
consumer := NewConsumer(trm, nil)
|
||||
|
||||
consumer.SetTopicMetadata(map[string][]int32{
|
||||
"test1": {0, 1, 2, 3},
|
||||
"test2": {0, 1, 2, 3, 4, 5, 6, 7},
|
||||
})
|
||||
|
||||
topics, err := consumer.Topics()
|
||||
if err != nil {
|
||||
t.Error(t)
|
||||
}
|
||||
|
||||
sortedTopics := sort.StringSlice(topics)
|
||||
sortedTopics.Sort()
|
||||
if len(sortedTopics) != 2 || sortedTopics[0] != "test1" || sortedTopics[1] != "test2" {
|
||||
t.Error("Unexpected topics returned:", sortedTopics)
|
||||
}
|
||||
|
||||
partitions1, err := consumer.Partitions("test1")
|
||||
if err != nil {
|
||||
t.Error(t)
|
||||
}
|
||||
|
||||
if len(partitions1) != 4 {
|
||||
t.Error("Unexpected partitions returned:", len(partitions1))
|
||||
}
|
||||
|
||||
partitions2, err := consumer.Partitions("test2")
|
||||
if err != nil {
|
||||
t.Error(t)
|
||||
}
|
||||
|
||||
if len(partitions2) != 8 {
|
||||
t.Error("Unexpected partitions returned:", len(partitions2))
|
||||
}
|
||||
|
||||
if len(trm.errors) != 0 {
|
||||
t.Errorf("Expected no expectation failures to be set on the error reporter.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerUnexpectedTopicMetadata(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
consumer := NewConsumer(trm, nil)
|
||||
|
||||
if _, err := consumer.Topics(); err != sarama.ErrOutOfBrokers {
|
||||
t.Error("Expected sarama.ErrOutOfBrokers, found", err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Errorf("Expected an expectation failure to be set on the error reporter.")
|
||||
}
|
||||
}
|
48
vendor/github.com/Shopify/sarama/mocks/mocks.go
generated
vendored
48
vendor/github.com/Shopify/sarama/mocks/mocks.go
generated
vendored
@ -1,48 +0,0 @@
|
||||
/*
|
||||
Package mocks provides mocks that can be used for testing applications
|
||||
that use Sarama. The mock types provided by this package implement the
|
||||
interfaces Sarama exports, so you can use them for dependency injection
|
||||
in your tests.
|
||||
|
||||
All mock instances require you to set expectations on them before you
|
||||
can use them. It will determine how the mock will behave. If an
|
||||
expectation is not met, it will make your test fail.
|
||||
|
||||
NOTE: this package currently does not fall under the API stability
|
||||
guarantee of Sarama as it is still considered experimental.
|
||||
*/
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
)
|
||||
|
||||
// ErrorReporter is a simple interface that includes the testing.T methods we use to report
|
||||
// expectation violations when using the mock objects.
|
||||
type ErrorReporter interface {
|
||||
Errorf(string, ...interface{})
|
||||
}
|
||||
|
||||
// ValueChecker is a function type to be set in each expectation of the producer mocks
|
||||
// to check the value passed.
|
||||
type ValueChecker func(val []byte) error
|
||||
|
||||
var (
|
||||
errProduceSuccess error = nil
|
||||
errOutOfExpectations = errors.New("No more expectations set on mock")
|
||||
errPartitionConsumerNotStarted = errors.New("The partition consumer was never started")
|
||||
)
|
||||
|
||||
const AnyOffset int64 = -1000
|
||||
|
||||
type producerExpectation struct {
|
||||
Result error
|
||||
CheckFunction ValueChecker
|
||||
}
|
||||
|
||||
type consumerExpectation struct {
|
||||
Err error
|
||||
Msg *sarama.ConsumerMessage
|
||||
}
|
146
vendor/github.com/Shopify/sarama/mocks/sync_producer.go
generated
vendored
146
vendor/github.com/Shopify/sarama/mocks/sync_producer.go
generated
vendored
@ -1,146 +0,0 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
)
|
||||
|
||||
// SyncProducer implements sarama's SyncProducer interface for testing purposes.
|
||||
// Before you can use it, you have to set expectations on the mock SyncProducer
|
||||
// to tell it how to handle calls to SendMessage, so you can easily test success
|
||||
// and failure scenarios.
|
||||
type SyncProducer struct {
|
||||
l sync.Mutex
|
||||
t ErrorReporter
|
||||
expectations []*producerExpectation
|
||||
lastOffset int64
|
||||
}
|
||||
|
||||
// NewSyncProducer instantiates a new SyncProducer mock. The t argument should
|
||||
// be the *testing.T instance of your test method. An error will be written to it if
|
||||
// an expectation is violated. The config argument is currently unused, but is
|
||||
// maintained to be compatible with the async Producer.
|
||||
func NewSyncProducer(t ErrorReporter, config *sarama.Config) *SyncProducer {
|
||||
return &SyncProducer{
|
||||
t: t,
|
||||
expectations: make([]*producerExpectation, 0),
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////
|
||||
// Implement SyncProducer interface
|
||||
////////////////////////////////////////////////
|
||||
|
||||
// SendMessage corresponds with the SendMessage method of sarama's SyncProducer implementation.
|
||||
// You have to set expectations on the mock producer before calling SendMessage, so it knows
|
||||
// how to handle them. You can set a function in each expectation so that the message value
|
||||
// checked by this function and an error is returned if the match fails.
|
||||
// If there is no more remaining expectation when SendMessage is called,
|
||||
// the mock producer will write an error to the test state object.
|
||||
func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) {
|
||||
sp.l.Lock()
|
||||
defer sp.l.Unlock()
|
||||
|
||||
if len(sp.expectations) > 0 {
|
||||
expectation := sp.expectations[0]
|
||||
sp.expectations = sp.expectations[1:]
|
||||
if expectation.CheckFunction != nil {
|
||||
val, err := msg.Value.Encode()
|
||||
if err != nil {
|
||||
sp.t.Errorf("Input message encoding failed: %s", err.Error())
|
||||
return -1, -1, err
|
||||
}
|
||||
|
||||
errCheck := expectation.CheckFunction(val)
|
||||
if errCheck != nil {
|
||||
sp.t.Errorf("Check function returned an error: %s", errCheck.Error())
|
||||
return -1, -1, errCheck
|
||||
}
|
||||
}
|
||||
if expectation.Result == errProduceSuccess {
|
||||
sp.lastOffset++
|
||||
msg.Offset = sp.lastOffset
|
||||
return 0, msg.Offset, nil
|
||||
}
|
||||
return -1, -1, expectation.Result
|
||||
}
|
||||
sp.t.Errorf("No more expectation set on this mock producer to handle the input message.")
|
||||
return -1, -1, errOutOfExpectations
|
||||
}
|
||||
|
||||
// SendMessages corresponds with the SendMessages method of sarama's SyncProducer implementation.
|
||||
// You have to set expectations on the mock producer before calling SendMessages, so it knows
|
||||
// how to handle them. If there is no more remaining expectations when SendMessages is called,
|
||||
// the mock producer will write an error to the test state object.
|
||||
func (sp *SyncProducer) SendMessages(msgs []*sarama.ProducerMessage) error {
|
||||
sp.l.Lock()
|
||||
defer sp.l.Unlock()
|
||||
|
||||
if len(sp.expectations) >= len(msgs) {
|
||||
expectations := sp.expectations[0 : len(msgs)-1]
|
||||
sp.expectations = sp.expectations[len(msgs):]
|
||||
|
||||
for _, expectation := range expectations {
|
||||
if expectation.Result != errProduceSuccess {
|
||||
return expectation.Result
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
sp.t.Errorf("Insufficient expectations set on this mock producer to handle the input messages.")
|
||||
return errOutOfExpectations
|
||||
}
|
||||
|
||||
// Close corresponds with the Close method of sarama's SyncProducer implementation.
|
||||
// By closing a mock syncproducer, you also tell it that no more SendMessage calls will follow,
|
||||
// so it will write an error to the test state if there's any remaining expectations.
|
||||
func (sp *SyncProducer) Close() error {
|
||||
sp.l.Lock()
|
||||
defer sp.l.Unlock()
|
||||
|
||||
if len(sp.expectations) > 0 {
|
||||
sp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(sp.expectations))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////
|
||||
// Setting expectations
|
||||
////////////////////////////////////////////////
|
||||
|
||||
// ExpectSendMessageWithCheckerFunctionAndSucceed sets an expectation on the mock producer that SendMessage
|
||||
// will be called. The mock producer will first call the given function to check the message value.
|
||||
// It will cascade the error of the function, if any, or handle the message as if it produced
|
||||
// successfully, i.e. by returning a valid partition, and offset, and a nil error.
|
||||
func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndSucceed(cf ValueChecker) {
|
||||
sp.l.Lock()
|
||||
defer sp.l.Unlock()
|
||||
sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf})
|
||||
}
|
||||
|
||||
// ExpectSendMessageWithCheckerFunctionAndFail sets an expectation on the mock producer that SendMessage will be
|
||||
// called. The mock producer will first call the given function to check the message value.
|
||||
// It will cascade the error of the function, if any, or handle the message as if it failed
|
||||
// to produce successfully, i.e. by returning the provided error.
|
||||
func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndFail(cf ValueChecker, err error) {
|
||||
sp.l.Lock()
|
||||
defer sp.l.Unlock()
|
||||
sp.expectations = append(sp.expectations, &producerExpectation{Result: err, CheckFunction: cf})
|
||||
}
|
||||
|
||||
// ExpectSendMessageAndSucceed sets an expectation on the mock producer that SendMessage will be
|
||||
// called. The mock producer will handle the message as if it produced successfully, i.e. by
|
||||
// returning a valid partition, and offset, and a nil error.
|
||||
func (sp *SyncProducer) ExpectSendMessageAndSucceed() {
|
||||
sp.ExpectSendMessageWithCheckerFunctionAndSucceed(nil)
|
||||
}
|
||||
|
||||
// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be
|
||||
// called. The mock producer will handle the message as if it failed to produce
|
||||
// successfully, i.e. by returning the provided error.
|
||||
func (sp *SyncProducer) ExpectSendMessageAndFail(err error) {
|
||||
sp.ExpectSendMessageWithCheckerFunctionAndFail(nil, err)
|
||||
}
|
124
vendor/github.com/Shopify/sarama/mocks/sync_producer_test.go
generated
vendored
124
vendor/github.com/Shopify/sarama/mocks/sync_producer_test.go
generated
vendored
@ -1,124 +0,0 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
)
|
||||
|
||||
func TestMockSyncProducerImplementsSyncProducerInterface(t *testing.T) {
|
||||
var mp interface{} = &SyncProducer{}
|
||||
if _, ok := mp.(sarama.SyncProducer); !ok {
|
||||
t.Error("The mock async producer should implement the sarama.SyncProducer interface.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncProducerReturnsExpectationsToSendMessage(t *testing.T) {
|
||||
sp := NewSyncProducer(t, nil)
|
||||
defer func() {
|
||||
if err := sp.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
sp.ExpectSendMessageAndSucceed()
|
||||
sp.ExpectSendMessageAndSucceed()
|
||||
sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers)
|
||||
|
||||
msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
|
||||
|
||||
_, offset, err := sp.SendMessage(msg)
|
||||
if err != nil {
|
||||
t.Errorf("The first message should have been produced successfully, but got %s", err)
|
||||
}
|
||||
if offset != 1 || offset != msg.Offset {
|
||||
t.Errorf("The first message should have been assigned offset 1, but got %d", msg.Offset)
|
||||
}
|
||||
|
||||
_, offset, err = sp.SendMessage(msg)
|
||||
if err != nil {
|
||||
t.Errorf("The second message should have been produced successfully, but got %s", err)
|
||||
}
|
||||
if offset != 2 || offset != msg.Offset {
|
||||
t.Errorf("The second message should have been assigned offset 2, but got %d", offset)
|
||||
}
|
||||
|
||||
_, _, err = sp.SendMessage(msg)
|
||||
if err != sarama.ErrOutOfBrokers {
|
||||
t.Errorf("The third message should not have been produced successfully")
|
||||
}
|
||||
|
||||
if err := sp.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncProducerWithTooManyExpectations(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
|
||||
sp := NewSyncProducer(trm, nil)
|
||||
sp.ExpectSendMessageAndSucceed()
|
||||
sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers)
|
||||
|
||||
msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
|
||||
if _, _, err := sp.SendMessage(msg); err != nil {
|
||||
t.Error("No error expected on first SendMessage call", err)
|
||||
}
|
||||
|
||||
if err := sp.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Error("Expected to report an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncProducerWithTooFewExpectations(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
|
||||
sp := NewSyncProducer(trm, nil)
|
||||
sp.ExpectSendMessageAndSucceed()
|
||||
|
||||
msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
|
||||
if _, _, err := sp.SendMessage(msg); err != nil {
|
||||
t.Error("No error expected on first SendMessage call", err)
|
||||
}
|
||||
if _, _, err := sp.SendMessage(msg); err != errOutOfExpectations {
|
||||
t.Error("errOutOfExpectations expected on second SendMessage call, found:", err)
|
||||
}
|
||||
|
||||
if err := sp.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Error("Expected to report an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncProducerWithCheckerFunction(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
|
||||
sp := NewSyncProducer(trm, nil)
|
||||
sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes"))
|
||||
sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes$"))
|
||||
|
||||
msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
|
||||
if _, _, err := sp.SendMessage(msg); err != nil {
|
||||
t.Error("No error expected on first SendMessage call, found: ", err)
|
||||
}
|
||||
msg = &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
|
||||
if _, _, err := sp.SendMessage(msg); err == nil || !strings.HasPrefix(err.Error(), "No match") {
|
||||
t.Error("Error during value check expected on second SendMessage call, found:", err)
|
||||
}
|
||||
|
||||
if err := sp.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Error("Expected to report an error")
|
||||
}
|
||||
}
|
90
vendor/github.com/Shopify/sarama/offset_commit_request_test.go
generated
vendored
90
vendor/github.com/Shopify/sarama/offset_commit_request_test.go
generated
vendored
@ -1,90 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
offsetCommitRequestNoBlocksV0 = []byte{
|
||||
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
offsetCommitRequestNoBlocksV1 = []byte{
|
||||
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
|
||||
0x00, 0x00, 0x11, 0x22,
|
||||
0x00, 0x04, 'c', 'o', 'n', 's',
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
offsetCommitRequestNoBlocksV2 = []byte{
|
||||
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
|
||||
0x00, 0x00, 0x11, 0x22,
|
||||
0x00, 0x04, 'c', 'o', 'n', 's',
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33,
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
offsetCommitRequestOneBlockV0 = []byte{
|
||||
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x52, 0x21,
|
||||
0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF,
|
||||
0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'}
|
||||
|
||||
offsetCommitRequestOneBlockV1 = []byte{
|
||||
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
|
||||
0x00, 0x00, 0x11, 0x22,
|
||||
0x00, 0x04, 'c', 'o', 'n', 's',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x52, 0x21,
|
||||
0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'}
|
||||
|
||||
offsetCommitRequestOneBlockV2 = []byte{
|
||||
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
|
||||
0x00, 0x00, 0x11, 0x22,
|
||||
0x00, 0x04, 'c', 'o', 'n', 's',
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x52, 0x21,
|
||||
0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF,
|
||||
0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'}
|
||||
)
|
||||
|
||||
func TestOffsetCommitRequestV0(t *testing.T) {
|
||||
request := new(OffsetCommitRequest)
|
||||
request.Version = 0
|
||||
request.ConsumerGroup = "foobar"
|
||||
testRequest(t, "no blocks v0", request, offsetCommitRequestNoBlocksV0)
|
||||
|
||||
request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata")
|
||||
testRequest(t, "one block v0", request, offsetCommitRequestOneBlockV0)
|
||||
}
|
||||
|
||||
func TestOffsetCommitRequestV1(t *testing.T) {
|
||||
request := new(OffsetCommitRequest)
|
||||
request.ConsumerGroup = "foobar"
|
||||
request.ConsumerID = "cons"
|
||||
request.ConsumerGroupGeneration = 0x1122
|
||||
request.Version = 1
|
||||
testRequest(t, "no blocks v1", request, offsetCommitRequestNoBlocksV1)
|
||||
|
||||
request.AddBlock("topic", 0x5221, 0xDEADBEEF, ReceiveTime, "metadata")
|
||||
testRequest(t, "one block v1", request, offsetCommitRequestOneBlockV1)
|
||||
}
|
||||
|
||||
func TestOffsetCommitRequestV2(t *testing.T) {
|
||||
request := new(OffsetCommitRequest)
|
||||
request.ConsumerGroup = "foobar"
|
||||
request.ConsumerID = "cons"
|
||||
request.ConsumerGroupGeneration = 0x1122
|
||||
request.RetentionTime = 0x4433
|
||||
request.Version = 2
|
||||
testRequest(t, "no blocks v2", request, offsetCommitRequestNoBlocksV2)
|
||||
|
||||
request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata")
|
||||
testRequest(t, "one block v2", request, offsetCommitRequestOneBlockV2)
|
||||
}
|
24
vendor/github.com/Shopify/sarama/offset_commit_response_test.go
generated
vendored
24
vendor/github.com/Shopify/sarama/offset_commit_response_test.go
generated
vendored
@ -1,24 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
emptyOffsetCommitResponse = []byte{
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
)
|
||||
|
||||
func TestEmptyOffsetCommitResponse(t *testing.T) {
|
||||
response := OffsetCommitResponse{}
|
||||
testResponse(t, "empty", &response, emptyOffsetCommitResponse)
|
||||
}
|
||||
|
||||
func TestNormalOffsetCommitResponse(t *testing.T) {
|
||||
response := OffsetCommitResponse{}
|
||||
response.AddError("t", 0, ErrNotLeaderForPartition)
|
||||
response.Errors["m"] = make(map[int32]KError)
|
||||
// The response encoded form cannot be checked for it varies due to
|
||||
// unpredictable map traversal order.
|
||||
testResponse(t, "normal", &response, nil)
|
||||
}
|
31
vendor/github.com/Shopify/sarama/offset_fetch_request_test.go
generated
vendored
31
vendor/github.com/Shopify/sarama/offset_fetch_request_test.go
generated
vendored
@ -1,31 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
offsetFetchRequestNoGroupNoPartitions = []byte{
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
offsetFetchRequestNoPartitions = []byte{
|
||||
0x00, 0x04, 'b', 'l', 'a', 'h',
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
offsetFetchRequestOnePartition = []byte{
|
||||
0x00, 0x04, 'b', 'l', 'a', 'h',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x0D, 't', 'o', 'p', 'i', 'c', 'T', 'h', 'e', 'F', 'i', 'r', 's', 't',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x4F, 0x4F, 0x4F, 0x4F}
|
||||
)
|
||||
|
||||
func TestOffsetFetchRequest(t *testing.T) {
|
||||
request := new(OffsetFetchRequest)
|
||||
testRequest(t, "no group, no partitions", request, offsetFetchRequestNoGroupNoPartitions)
|
||||
|
||||
request.ConsumerGroup = "blah"
|
||||
testRequest(t, "no partitions", request, offsetFetchRequestNoPartitions)
|
||||
|
||||
request.AddPartition("topicTheFirst", 0x4F4F4F4F)
|
||||
testRequest(t, "one partition", request, offsetFetchRequestOnePartition)
|
||||
}
|
22
vendor/github.com/Shopify/sarama/offset_fetch_response_test.go
generated
vendored
22
vendor/github.com/Shopify/sarama/offset_fetch_response_test.go
generated
vendored
@ -1,22 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
emptyOffsetFetchResponse = []byte{
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
)
|
||||
|
||||
func TestEmptyOffsetFetchResponse(t *testing.T) {
|
||||
response := OffsetFetchResponse{}
|
||||
testResponse(t, "empty", &response, emptyOffsetFetchResponse)
|
||||
}
|
||||
|
||||
func TestNormalOffsetFetchResponse(t *testing.T) {
|
||||
response := OffsetFetchResponse{}
|
||||
response.AddBlock("t", 0, &OffsetFetchResponseBlock{0, "md", ErrRequestTimedOut})
|
||||
response.Blocks["m"] = nil
|
||||
// The response encoded form cannot be checked for it varies due to
|
||||
// unpredictable map traversal order.
|
||||
testResponse(t, "normal", &response, nil)
|
||||
}
|
433
vendor/github.com/Shopify/sarama/offset_manager_test.go
generated
vendored
433
vendor/github.com/Shopify/sarama/offset_manager_test.go
generated
vendored
@ -1,433 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func initOffsetManager(t *testing.T) (om OffsetManager,
|
||||
testClient Client, broker, coordinator *MockBroker) {
|
||||
|
||||
config := NewConfig()
|
||||
config.Metadata.Retry.Max = 1
|
||||
config.Consumer.Offsets.CommitInterval = 1 * time.Millisecond
|
||||
config.Version = V0_9_0_0
|
||||
|
||||
broker = NewMockBroker(t, 1)
|
||||
coordinator = NewMockBroker(t, 2)
|
||||
|
||||
seedMeta := new(MetadataResponse)
|
||||
seedMeta.AddBroker(coordinator.Addr(), coordinator.BrokerID())
|
||||
seedMeta.AddTopicPartition("my_topic", 0, 1, []int32{}, []int32{}, ErrNoError)
|
||||
seedMeta.AddTopicPartition("my_topic", 1, 1, []int32{}, []int32{}, ErrNoError)
|
||||
broker.Returns(seedMeta)
|
||||
|
||||
var err error
|
||||
testClient, err = NewClient([]string{broker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
broker.Returns(&ConsumerMetadataResponse{
|
||||
CoordinatorID: coordinator.BrokerID(),
|
||||
CoordinatorHost: "127.0.0.1",
|
||||
CoordinatorPort: coordinator.Port(),
|
||||
})
|
||||
|
||||
om, err = NewOffsetManagerFromClient("group", testClient)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return om, testClient, broker, coordinator
|
||||
}
|
||||
|
||||
func initPartitionOffsetManager(t *testing.T, om OffsetManager,
|
||||
coordinator *MockBroker, initialOffset int64, metadata string) PartitionOffsetManager {
|
||||
|
||||
fetchResponse := new(OffsetFetchResponse)
|
||||
fetchResponse.AddBlock("my_topic", 0, &OffsetFetchResponseBlock{
|
||||
Err: ErrNoError,
|
||||
Offset: initialOffset,
|
||||
Metadata: metadata,
|
||||
})
|
||||
coordinator.Returns(fetchResponse)
|
||||
|
||||
pom, err := om.ManagePartition("my_topic", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return pom
|
||||
}
|
||||
|
||||
func TestNewOffsetManager(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
seedBroker.Returns(new(MetadataResponse))
|
||||
|
||||
testClient, err := NewClient([]string{seedBroker.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = NewOffsetManagerFromClient("group", testClient)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
safeClose(t, testClient)
|
||||
|
||||
_, err = NewOffsetManagerFromClient("group", testClient)
|
||||
if err != ErrClosedClient {
|
||||
t.Errorf("Error expected for closed client; actual value: %v", err)
|
||||
}
|
||||
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
// Test recovery from ErrNotCoordinatorForConsumer
|
||||
// on first fetchInitialOffset call
|
||||
func TestOffsetManagerFetchInitialFail(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
|
||||
// Error on first fetchInitialOffset call
|
||||
responseBlock := OffsetFetchResponseBlock{
|
||||
Err: ErrNotCoordinatorForConsumer,
|
||||
Offset: 5,
|
||||
Metadata: "test_meta",
|
||||
}
|
||||
|
||||
fetchResponse := new(OffsetFetchResponse)
|
||||
fetchResponse.AddBlock("my_topic", 0, &responseBlock)
|
||||
coordinator.Returns(fetchResponse)
|
||||
|
||||
// Refresh coordinator
|
||||
newCoordinator := NewMockBroker(t, 3)
|
||||
broker.Returns(&ConsumerMetadataResponse{
|
||||
CoordinatorID: newCoordinator.BrokerID(),
|
||||
CoordinatorHost: "127.0.0.1",
|
||||
CoordinatorPort: newCoordinator.Port(),
|
||||
})
|
||||
|
||||
// Second fetchInitialOffset call is fine
|
||||
fetchResponse2 := new(OffsetFetchResponse)
|
||||
responseBlock2 := responseBlock
|
||||
responseBlock2.Err = ErrNoError
|
||||
fetchResponse2.AddBlock("my_topic", 0, &responseBlock2)
|
||||
newCoordinator.Returns(fetchResponse2)
|
||||
|
||||
pom, err := om.ManagePartition("my_topic", 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
newCoordinator.Close()
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
safeClose(t, testClient)
|
||||
}
|
||||
|
||||
// Test fetchInitialOffset retry on ErrOffsetsLoadInProgress
|
||||
func TestOffsetManagerFetchInitialLoadInProgress(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
|
||||
// Error on first fetchInitialOffset call
|
||||
responseBlock := OffsetFetchResponseBlock{
|
||||
Err: ErrOffsetsLoadInProgress,
|
||||
Offset: 5,
|
||||
Metadata: "test_meta",
|
||||
}
|
||||
|
||||
fetchResponse := new(OffsetFetchResponse)
|
||||
fetchResponse.AddBlock("my_topic", 0, &responseBlock)
|
||||
coordinator.Returns(fetchResponse)
|
||||
|
||||
// Second fetchInitialOffset call is fine
|
||||
fetchResponse2 := new(OffsetFetchResponse)
|
||||
responseBlock2 := responseBlock
|
||||
responseBlock2.Err = ErrNoError
|
||||
fetchResponse2.AddBlock("my_topic", 0, &responseBlock2)
|
||||
coordinator.Returns(fetchResponse2)
|
||||
|
||||
pom, err := om.ManagePartition("my_topic", 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
safeClose(t, testClient)
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManagerInitialOffset(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
testClient.Config().Consumer.Offsets.Initial = OffsetOldest
|
||||
|
||||
// Kafka returns -1 if no offset has been stored for this partition yet.
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, -1, "")
|
||||
|
||||
offset, meta := pom.NextOffset()
|
||||
if offset != OffsetOldest {
|
||||
t.Errorf("Expected offset 5. Actual: %v", offset)
|
||||
}
|
||||
if meta != "" {
|
||||
t.Errorf("Expected metadata to be empty. Actual: %q", meta)
|
||||
}
|
||||
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
safeClose(t, testClient)
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManagerNextOffset(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, 5, "test_meta")
|
||||
|
||||
offset, meta := pom.NextOffset()
|
||||
if offset != 5 {
|
||||
t.Errorf("Expected offset 5. Actual: %v", offset)
|
||||
}
|
||||
if meta != "test_meta" {
|
||||
t.Errorf("Expected metadata \"test_meta\". Actual: %q", meta)
|
||||
}
|
||||
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
safeClose(t, testClient)
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManagerResetOffset(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
|
||||
|
||||
ocResponse := new(OffsetCommitResponse)
|
||||
ocResponse.AddError("my_topic", 0, ErrNoError)
|
||||
coordinator.Returns(ocResponse)
|
||||
|
||||
expected := int64(1)
|
||||
pom.ResetOffset(expected, "modified_meta")
|
||||
actual, meta := pom.NextOffset()
|
||||
|
||||
if actual != expected {
|
||||
t.Errorf("Expected offset %v. Actual: %v", expected, actual)
|
||||
}
|
||||
if meta != "modified_meta" {
|
||||
t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
|
||||
}
|
||||
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
safeClose(t, testClient)
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManagerResetOffsetWithRetention(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
testClient.Config().Consumer.Offsets.Retention = time.Hour
|
||||
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
|
||||
|
||||
ocResponse := new(OffsetCommitResponse)
|
||||
ocResponse.AddError("my_topic", 0, ErrNoError)
|
||||
handler := func(req *request) (res encoder) {
|
||||
if req.body.version() != 2 {
|
||||
t.Errorf("Expected to be using version 2. Actual: %v", req.body.version())
|
||||
}
|
||||
offsetCommitRequest := req.body.(*OffsetCommitRequest)
|
||||
if offsetCommitRequest.RetentionTime != (60 * 60 * 1000) {
|
||||
t.Errorf("Expected an hour retention time. Actual: %v", offsetCommitRequest.RetentionTime)
|
||||
}
|
||||
return ocResponse
|
||||
}
|
||||
coordinator.setHandler(handler)
|
||||
|
||||
expected := int64(1)
|
||||
pom.ResetOffset(expected, "modified_meta")
|
||||
actual, meta := pom.NextOffset()
|
||||
|
||||
if actual != expected {
|
||||
t.Errorf("Expected offset %v. Actual: %v", expected, actual)
|
||||
}
|
||||
if meta != "modified_meta" {
|
||||
t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
|
||||
}
|
||||
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
safeClose(t, testClient)
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManagerMarkOffset(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
|
||||
|
||||
ocResponse := new(OffsetCommitResponse)
|
||||
ocResponse.AddError("my_topic", 0, ErrNoError)
|
||||
coordinator.Returns(ocResponse)
|
||||
|
||||
pom.MarkOffset(100, "modified_meta")
|
||||
offset, meta := pom.NextOffset()
|
||||
|
||||
if offset != 100 {
|
||||
t.Errorf("Expected offset 100. Actual: %v", offset)
|
||||
}
|
||||
if meta != "modified_meta" {
|
||||
t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
|
||||
}
|
||||
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
safeClose(t, testClient)
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManagerMarkOffsetWithRetention(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
testClient.Config().Consumer.Offsets.Retention = time.Hour
|
||||
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
|
||||
|
||||
ocResponse := new(OffsetCommitResponse)
|
||||
ocResponse.AddError("my_topic", 0, ErrNoError)
|
||||
handler := func(req *request) (res encoder) {
|
||||
if req.body.version() != 2 {
|
||||
t.Errorf("Expected to be using version 2. Actual: %v", req.body.version())
|
||||
}
|
||||
offsetCommitRequest := req.body.(*OffsetCommitRequest)
|
||||
if offsetCommitRequest.RetentionTime != (60 * 60 * 1000) {
|
||||
t.Errorf("Expected an hour retention time. Actual: %v", offsetCommitRequest.RetentionTime)
|
||||
}
|
||||
return ocResponse
|
||||
}
|
||||
coordinator.setHandler(handler)
|
||||
|
||||
pom.MarkOffset(100, "modified_meta")
|
||||
offset, meta := pom.NextOffset()
|
||||
|
||||
if offset != 100 {
|
||||
t.Errorf("Expected offset 100. Actual: %v", offset)
|
||||
}
|
||||
if meta != "modified_meta" {
|
||||
t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
|
||||
}
|
||||
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
safeClose(t, testClient)
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManagerCommitErr(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, 5, "meta")
|
||||
|
||||
// Error on one partition
|
||||
ocResponse := new(OffsetCommitResponse)
|
||||
ocResponse.AddError("my_topic", 0, ErrOffsetOutOfRange)
|
||||
ocResponse.AddError("my_topic", 1, ErrNoError)
|
||||
coordinator.Returns(ocResponse)
|
||||
|
||||
newCoordinator := NewMockBroker(t, 3)
|
||||
|
||||
// For RefreshCoordinator()
|
||||
broker.Returns(&ConsumerMetadataResponse{
|
||||
CoordinatorID: newCoordinator.BrokerID(),
|
||||
CoordinatorHost: "127.0.0.1",
|
||||
CoordinatorPort: newCoordinator.Port(),
|
||||
})
|
||||
|
||||
// Nothing in response.Errors at all
|
||||
ocResponse2 := new(OffsetCommitResponse)
|
||||
newCoordinator.Returns(ocResponse2)
|
||||
|
||||
// For RefreshCoordinator()
|
||||
broker.Returns(&ConsumerMetadataResponse{
|
||||
CoordinatorID: newCoordinator.BrokerID(),
|
||||
CoordinatorHost: "127.0.0.1",
|
||||
CoordinatorPort: newCoordinator.Port(),
|
||||
})
|
||||
|
||||
// Error on the wrong partition for this pom
|
||||
ocResponse3 := new(OffsetCommitResponse)
|
||||
ocResponse3.AddError("my_topic", 1, ErrNoError)
|
||||
newCoordinator.Returns(ocResponse3)
|
||||
|
||||
// For RefreshCoordinator()
|
||||
broker.Returns(&ConsumerMetadataResponse{
|
||||
CoordinatorID: newCoordinator.BrokerID(),
|
||||
CoordinatorHost: "127.0.0.1",
|
||||
CoordinatorPort: newCoordinator.Port(),
|
||||
})
|
||||
|
||||
// ErrUnknownTopicOrPartition/ErrNotLeaderForPartition/ErrLeaderNotAvailable block
|
||||
ocResponse4 := new(OffsetCommitResponse)
|
||||
ocResponse4.AddError("my_topic", 0, ErrUnknownTopicOrPartition)
|
||||
newCoordinator.Returns(ocResponse4)
|
||||
|
||||
// For RefreshCoordinator()
|
||||
broker.Returns(&ConsumerMetadataResponse{
|
||||
CoordinatorID: newCoordinator.BrokerID(),
|
||||
CoordinatorHost: "127.0.0.1",
|
||||
CoordinatorPort: newCoordinator.Port(),
|
||||
})
|
||||
|
||||
// Normal error response
|
||||
ocResponse5 := new(OffsetCommitResponse)
|
||||
ocResponse5.AddError("my_topic", 0, ErrNoError)
|
||||
newCoordinator.Returns(ocResponse5)
|
||||
|
||||
pom.MarkOffset(100, "modified_meta")
|
||||
|
||||
err := pom.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
newCoordinator.Close()
|
||||
safeClose(t, om)
|
||||
safeClose(t, testClient)
|
||||
}
|
||||
|
||||
// Test of recovery from abort
|
||||
func TestAbortPartitionOffsetManager(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, 5, "meta")
|
||||
|
||||
// this triggers an error in the CommitOffset request,
|
||||
// which leads to the abort call
|
||||
coordinator.Close()
|
||||
|
||||
// Response to refresh coordinator request
|
||||
newCoordinator := NewMockBroker(t, 3)
|
||||
broker.Returns(&ConsumerMetadataResponse{
|
||||
CoordinatorID: newCoordinator.BrokerID(),
|
||||
CoordinatorHost: "127.0.0.1",
|
||||
CoordinatorPort: newCoordinator.Port(),
|
||||
})
|
||||
|
||||
ocResponse := new(OffsetCommitResponse)
|
||||
ocResponse.AddError("my_topic", 0, ErrNoError)
|
||||
newCoordinator.Returns(ocResponse)
|
||||
|
||||
pom.MarkOffset(100, "modified_meta")
|
||||
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
broker.Close()
|
||||
safeClose(t, testClient)
|
||||
}
|
43
vendor/github.com/Shopify/sarama/offset_request_test.go
generated
vendored
43
vendor/github.com/Shopify/sarama/offset_request_test.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
offsetRequestNoBlocks = []byte{
|
||||
0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
offsetRequestOneBlock = []byte{
|
||||
0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x03, 'f', 'o', 'o',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x04,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x02}
|
||||
|
||||
offsetRequestOneBlockV1 = []byte{
|
||||
0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x03, 'b', 'a', 'r',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x04,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}
|
||||
)
|
||||
|
||||
func TestOffsetRequest(t *testing.T) {
|
||||
request := new(OffsetRequest)
|
||||
testRequest(t, "no blocks", request, offsetRequestNoBlocks)
|
||||
|
||||
request.AddBlock("foo", 4, 1, 2)
|
||||
testRequest(t, "one block", request, offsetRequestOneBlock)
|
||||
}
|
||||
|
||||
func TestOffsetRequestV1(t *testing.T) {
|
||||
request := new(OffsetRequest)
|
||||
request.Version = 1
|
||||
testRequest(t, "no blocks", request, offsetRequestNoBlocks)
|
||||
|
||||
request.AddBlock("bar", 4, 1, 2) // Last argument is ignored for V1
|
||||
testRequest(t, "one block", request, offsetRequestOneBlockV1)
|
||||
}
|
111
vendor/github.com/Shopify/sarama/offset_response_test.go
generated
vendored
111
vendor/github.com/Shopify/sarama/offset_response_test.go
generated
vendored
@ -1,111 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
emptyOffsetResponse = []byte{
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
normalOffsetResponse = []byte{
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
|
||||
0x00, 0x01, 'a',
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
|
||||
0x00, 0x01, 'z',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06}
|
||||
|
||||
normalOffsetResponseV1 = []byte{
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
|
||||
0x00, 0x01, 'a',
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
|
||||
0x00, 0x01, 'z',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x01, 0x58, 0x1A, 0xE6, 0x48, 0x86,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06}
|
||||
)
|
||||
|
||||
func TestEmptyOffsetResponse(t *testing.T) {
|
||||
response := OffsetResponse{}
|
||||
|
||||
testVersionDecodable(t, "empty", &response, emptyOffsetResponse, 0)
|
||||
if len(response.Blocks) != 0 {
|
||||
t.Error("Decoding produced", len(response.Blocks), "topics where there were none.")
|
||||
}
|
||||
|
||||
response = OffsetResponse{}
|
||||
|
||||
testVersionDecodable(t, "empty", &response, emptyOffsetResponse, 1)
|
||||
if len(response.Blocks) != 0 {
|
||||
t.Error("Decoding produced", len(response.Blocks), "topics where there were none.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalOffsetResponse(t *testing.T) {
|
||||
response := OffsetResponse{}
|
||||
|
||||
testVersionDecodable(t, "normal", &response, normalOffsetResponse, 0)
|
||||
|
||||
if len(response.Blocks) != 2 {
|
||||
t.Fatal("Decoding produced", len(response.Blocks), "topics where there were two.")
|
||||
}
|
||||
|
||||
if len(response.Blocks["a"]) != 0 {
|
||||
t.Fatal("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.")
|
||||
}
|
||||
|
||||
if len(response.Blocks["z"]) != 1 {
|
||||
t.Fatal("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.")
|
||||
}
|
||||
|
||||
if response.Blocks["z"][2].Err != ErrNoError {
|
||||
t.Fatal("Decoding produced invalid error for topic z partition 2.")
|
||||
}
|
||||
|
||||
if len(response.Blocks["z"][2].Offsets) != 2 {
|
||||
t.Fatal("Decoding produced invalid number of offsets for topic z partition 2.")
|
||||
}
|
||||
|
||||
if response.Blocks["z"][2].Offsets[0] != 5 || response.Blocks["z"][2].Offsets[1] != 6 {
|
||||
t.Fatal("Decoding produced invalid offsets for topic z partition 2.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalOffsetResponseV1(t *testing.T) {
|
||||
response := OffsetResponse{}
|
||||
|
||||
testVersionDecodable(t, "normal", &response, normalOffsetResponseV1, 1)
|
||||
|
||||
if len(response.Blocks) != 2 {
|
||||
t.Fatal("Decoding produced", len(response.Blocks), "topics where there were two.")
|
||||
}
|
||||
|
||||
if len(response.Blocks["a"]) != 0 {
|
||||
t.Fatal("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.")
|
||||
}
|
||||
|
||||
if len(response.Blocks["z"]) != 1 {
|
||||
t.Fatal("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.")
|
||||
}
|
||||
|
||||
if response.Blocks["z"][2].Err != ErrNoError {
|
||||
t.Fatal("Decoding produced invalid error for topic z partition 2.")
|
||||
}
|
||||
|
||||
if response.Blocks["z"][2].Timestamp != 1477920049286 {
|
||||
t.Fatal("Decoding produced invalid timestamp for topic z partition 2.", response.Blocks["z"][2].Timestamp)
|
||||
}
|
||||
|
||||
if response.Blocks["z"][2].Offset != 6 {
|
||||
t.Fatal("Decoding produced invalid offsets for topic z partition 2.")
|
||||
}
|
||||
}
|
265
vendor/github.com/Shopify/sarama/partitioner_test.go
generated
vendored
265
vendor/github.com/Shopify/sarama/partitioner_test.go
generated
vendored
@ -1,265 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"hash/fnv"
|
||||
"log"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func assertPartitioningConsistent(t *testing.T, partitioner Partitioner, message *ProducerMessage, numPartitions int32) {
|
||||
choice, err := partitioner.Partition(message, numPartitions)
|
||||
if err != nil {
|
||||
t.Error(partitioner, err)
|
||||
}
|
||||
if choice < 0 || choice >= numPartitions {
|
||||
t.Error(partitioner, "returned partition", choice, "outside of range for", message)
|
||||
}
|
||||
for i := 1; i < 50; i++ {
|
||||
newChoice, err := partitioner.Partition(message, numPartitions)
|
||||
if err != nil {
|
||||
t.Error(partitioner, err)
|
||||
}
|
||||
if newChoice != choice {
|
||||
t.Error(partitioner, "returned partition", newChoice, "inconsistent with", choice, ".")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRandomPartitioner(t *testing.T) {
|
||||
partitioner := NewRandomPartitioner("mytopic")
|
||||
|
||||
choice, err := partitioner.Partition(nil, 1)
|
||||
if err != nil {
|
||||
t.Error(partitioner, err)
|
||||
}
|
||||
if choice != 0 {
|
||||
t.Error("Returned non-zero partition when only one available.")
|
||||
}
|
||||
|
||||
for i := 1; i < 50; i++ {
|
||||
choice, err := partitioner.Partition(nil, 50)
|
||||
if err != nil {
|
||||
t.Error(partitioner, err)
|
||||
}
|
||||
if choice < 0 || choice >= 50 {
|
||||
t.Error("Returned partition", choice, "outside of range.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundRobinPartitioner(t *testing.T) {
|
||||
partitioner := NewRoundRobinPartitioner("mytopic")
|
||||
|
||||
choice, err := partitioner.Partition(nil, 1)
|
||||
if err != nil {
|
||||
t.Error(partitioner, err)
|
||||
}
|
||||
if choice != 0 {
|
||||
t.Error("Returned non-zero partition when only one available.")
|
||||
}
|
||||
|
||||
var i int32
|
||||
for i = 1; i < 50; i++ {
|
||||
choice, err := partitioner.Partition(nil, 7)
|
||||
if err != nil {
|
||||
t.Error(partitioner, err)
|
||||
}
|
||||
if choice != i%7 {
|
||||
t.Error("Returned partition", choice, "expecting", i%7)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewHashPartitionerWithHasher(t *testing.T) {
|
||||
// use the current default hasher fnv.New32a()
|
||||
partitioner := NewCustomHashPartitioner(fnv.New32a)("mytopic")
|
||||
|
||||
choice, err := partitioner.Partition(&ProducerMessage{}, 1)
|
||||
if err != nil {
|
||||
t.Error(partitioner, err)
|
||||
}
|
||||
if choice != 0 {
|
||||
t.Error("Returned non-zero partition when only one available.")
|
||||
}
|
||||
|
||||
for i := 1; i < 50; i++ {
|
||||
choice, err := partitioner.Partition(&ProducerMessage{}, 50)
|
||||
if err != nil {
|
||||
t.Error(partitioner, err)
|
||||
}
|
||||
if choice < 0 || choice >= 50 {
|
||||
t.Error("Returned partition", choice, "outside of range for nil key.")
|
||||
}
|
||||
}
|
||||
|
||||
buf := make([]byte, 256)
|
||||
for i := 1; i < 50; i++ {
|
||||
if _, err := rand.Read(buf); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assertPartitioningConsistent(t, partitioner, &ProducerMessage{Key: ByteEncoder(buf)}, 50)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashPartitionerWithHasherMinInt32(t *testing.T) {
|
||||
// use the current default hasher fnv.New32a()
|
||||
partitioner := NewCustomHashPartitioner(fnv.New32a)("mytopic")
|
||||
|
||||
msg := ProducerMessage{}
|
||||
// "1468509572224" generates 2147483648 (uint32) result from Sum32 function
|
||||
// which is -2147483648 or int32's min value
|
||||
msg.Key = StringEncoder("1468509572224")
|
||||
|
||||
choice, err := partitioner.Partition(&msg, 50)
|
||||
if err != nil {
|
||||
t.Error(partitioner, err)
|
||||
}
|
||||
if choice < 0 || choice >= 50 {
|
||||
t.Error("Returned partition", choice, "outside of range for nil key.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashPartitioner(t *testing.T) {
|
||||
partitioner := NewHashPartitioner("mytopic")
|
||||
|
||||
choice, err := partitioner.Partition(&ProducerMessage{}, 1)
|
||||
if err != nil {
|
||||
t.Error(partitioner, err)
|
||||
}
|
||||
if choice != 0 {
|
||||
t.Error("Returned non-zero partition when only one available.")
|
||||
}
|
||||
|
||||
for i := 1; i < 50; i++ {
|
||||
choice, err := partitioner.Partition(&ProducerMessage{}, 50)
|
||||
if err != nil {
|
||||
t.Error(partitioner, err)
|
||||
}
|
||||
if choice < 0 || choice >= 50 {
|
||||
t.Error("Returned partition", choice, "outside of range for nil key.")
|
||||
}
|
||||
}
|
||||
|
||||
buf := make([]byte, 256)
|
||||
for i := 1; i < 50; i++ {
|
||||
if _, err := rand.Read(buf); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assertPartitioningConsistent(t, partitioner, &ProducerMessage{Key: ByteEncoder(buf)}, 50)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashPartitionerMinInt32(t *testing.T) {
|
||||
partitioner := NewHashPartitioner("mytopic")
|
||||
|
||||
msg := ProducerMessage{}
|
||||
// "1468509572224" generates 2147483648 (uint32) result from Sum32 function
|
||||
// which is -2147483648 or int32's min value
|
||||
msg.Key = StringEncoder("1468509572224")
|
||||
|
||||
choice, err := partitioner.Partition(&msg, 50)
|
||||
if err != nil {
|
||||
t.Error(partitioner, err)
|
||||
}
|
||||
if choice < 0 || choice >= 50 {
|
||||
t.Error("Returned partition", choice, "outside of range for nil key.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestManualPartitioner(t *testing.T) {
|
||||
partitioner := NewManualPartitioner("mytopic")
|
||||
|
||||
choice, err := partitioner.Partition(&ProducerMessage{}, 1)
|
||||
if err != nil {
|
||||
t.Error(partitioner, err)
|
||||
}
|
||||
if choice != 0 {
|
||||
t.Error("Returned non-zero partition when only one available.")
|
||||
}
|
||||
|
||||
for i := int32(1); i < 50; i++ {
|
||||
choice, err := partitioner.Partition(&ProducerMessage{Partition: i}, 50)
|
||||
if err != nil {
|
||||
t.Error(partitioner, err)
|
||||
}
|
||||
if choice != i {
|
||||
t.Error("Returned partition not the same as the input partition")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// By default, Sarama uses the message's key to consistently assign a partition to
|
||||
// a message using hashing. If no key is set, a random partition will be chosen.
|
||||
// This example shows how you can partition messages randomly, even when a key is set,
|
||||
// by overriding Config.Producer.Partitioner.
|
||||
func ExamplePartitioner_random() {
|
||||
config := NewConfig()
|
||||
config.Producer.Partitioner = NewRandomPartitioner
|
||||
|
||||
producer, err := NewSyncProducer([]string{"localhost:9092"}, config)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := producer.Close(); err != nil {
|
||||
log.Println("Failed to close producer:", err)
|
||||
}
|
||||
}()
|
||||
|
||||
msg := &ProducerMessage{Topic: "test", Key: StringEncoder("key is set"), Value: StringEncoder("test")}
|
||||
partition, offset, err := producer.SendMessage(msg)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to produce message to kafka cluster.")
|
||||
}
|
||||
|
||||
log.Printf("Produced message to partition %d with offset %d", partition, offset)
|
||||
}
|
||||
|
||||
// This example shows how to assign partitions to your messages manually.
|
||||
func ExamplePartitioner_manual() {
|
||||
config := NewConfig()
|
||||
|
||||
// First, we tell the producer that we are going to partition ourselves.
|
||||
config.Producer.Partitioner = NewManualPartitioner
|
||||
|
||||
producer, err := NewSyncProducer([]string{"localhost:9092"}, config)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := producer.Close(); err != nil {
|
||||
log.Println("Failed to close producer:", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Now, we set the Partition field of the ProducerMessage struct.
|
||||
msg := &ProducerMessage{Topic: "test", Partition: 6, Value: StringEncoder("test")}
|
||||
|
||||
partition, offset, err := producer.SendMessage(msg)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to produce message to kafka cluster.")
|
||||
}
|
||||
|
||||
if partition != 6 {
|
||||
log.Fatal("Message should have been produced to partition 6!")
|
||||
}
|
||||
|
||||
log.Printf("Produced message to partition %d with offset %d", partition, offset)
|
||||
}
|
||||
|
||||
// This example shows how to set a different partitioner depending on the topic.
|
||||
func ExamplePartitioner_per_topic() {
|
||||
config := NewConfig()
|
||||
config.Producer.Partitioner = func(topic string) Partitioner {
|
||||
switch topic {
|
||||
case "access_log", "error_log":
|
||||
return NewRandomPartitioner(topic)
|
||||
|
||||
default:
|
||||
return NewHashPartitioner(topic)
|
||||
}
|
||||
}
|
||||
|
||||
// ...
|
||||
}
|
47
vendor/github.com/Shopify/sarama/produce_request_test.go
generated
vendored
47
vendor/github.com/Shopify/sarama/produce_request_test.go
generated
vendored
@ -1,47 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
produceRequestEmpty = []byte{
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
produceRequestHeader = []byte{
|
||||
0x01, 0x23,
|
||||
0x00, 0x00, 0x04, 0x44,
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
produceRequestOneMessage = []byte{
|
||||
0x01, 0x23,
|
||||
0x00, 0x00, 0x04, 0x44,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0xAD,
|
||||
0x00, 0x00, 0x00, 0x1C,
|
||||
// messageSet
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x10,
|
||||
// message
|
||||
0x23, 0x96, 0x4a, 0xf7, // CRC
|
||||
0x00,
|
||||
0x00,
|
||||
0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0x00, 0x00, 0x00, 0x02, 0x00, 0xEE}
|
||||
)
|
||||
|
||||
func TestProduceRequest(t *testing.T) {
|
||||
request := new(ProduceRequest)
|
||||
testRequest(t, "empty", request, produceRequestEmpty)
|
||||
|
||||
request.RequiredAcks = 0x123
|
||||
request.Timeout = 0x444
|
||||
testRequest(t, "header", request, produceRequestHeader)
|
||||
|
||||
request.AddMessage("topic", 0xAD, &Message{Codec: CompressionNone, Key: nil, Value: []byte{0x00, 0xEE}})
|
||||
testRequest(t, "one message", request, produceRequestOneMessage)
|
||||
}
|
67
vendor/github.com/Shopify/sarama/produce_response_test.go
generated
vendored
67
vendor/github.com/Shopify/sarama/produce_response_test.go
generated
vendored
@ -1,67 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
produceResponseNoBlocks = []byte{
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
produceResponseManyBlocks = []byte{
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
|
||||
0x00, 0x03, 'f', 'o', 'o',
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
|
||||
0x00, 0x03, 'b', 'a', 'r',
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
|
||||
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
0x00, 0x02,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
|
||||
)
|
||||
|
||||
func TestProduceResponse(t *testing.T) {
|
||||
response := ProduceResponse{}
|
||||
|
||||
testVersionDecodable(t, "no blocks", &response, produceResponseNoBlocks, 0)
|
||||
if len(response.Blocks) != 0 {
|
||||
t.Error("Decoding produced", len(response.Blocks), "topics where there were none")
|
||||
}
|
||||
|
||||
testVersionDecodable(t, "many blocks", &response, produceResponseManyBlocks, 0)
|
||||
if len(response.Blocks) != 2 {
|
||||
t.Error("Decoding produced", len(response.Blocks), "topics where there were 2")
|
||||
}
|
||||
if len(response.Blocks["foo"]) != 0 {
|
||||
t.Error("Decoding produced", len(response.Blocks["foo"]), "partitions for 'foo' where there were none")
|
||||
}
|
||||
if len(response.Blocks["bar"]) != 2 {
|
||||
t.Error("Decoding produced", len(response.Blocks["bar"]), "partitions for 'bar' where there were two")
|
||||
}
|
||||
block := response.GetBlock("bar", 1)
|
||||
if block == nil {
|
||||
t.Error("Decoding did not produce a block for bar/1")
|
||||
} else {
|
||||
if block.Err != ErrNoError {
|
||||
t.Error("Decoding failed for bar/1/Err, got:", int16(block.Err))
|
||||
}
|
||||
if block.Offset != 0xFF {
|
||||
t.Error("Decoding failed for bar/1/Offset, got:", block.Offset)
|
||||
}
|
||||
}
|
||||
block = response.GetBlock("bar", 2)
|
||||
if block == nil {
|
||||
t.Error("Decoding did not produce a block for bar/2")
|
||||
} else {
|
||||
if block.Err != ErrInvalidMessage {
|
||||
t.Error("Decoding failed for bar/2/Err, got:", int16(block.Err))
|
||||
}
|
||||
if block.Offset != 0 {
|
||||
t.Error("Decoding failed for bar/2/Offset, got:", block.Offset)
|
||||
}
|
||||
}
|
||||
}
|
185
vendor/github.com/Shopify/sarama/produce_set_test.go
generated
vendored
185
vendor/github.com/Shopify/sarama/produce_set_test.go
generated
vendored
@ -1,185 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func makeProduceSet() (*asyncProducer, *produceSet) {
|
||||
parent := &asyncProducer{
|
||||
conf: NewConfig(),
|
||||
}
|
||||
return parent, newProduceSet(parent)
|
||||
}
|
||||
|
||||
func safeAddMessage(t *testing.T, ps *produceSet, msg *ProducerMessage) {
|
||||
if err := ps.add(msg); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProduceSetInitial(t *testing.T) {
|
||||
_, ps := makeProduceSet()
|
||||
|
||||
if !ps.empty() {
|
||||
t.Error("New produceSet should be empty")
|
||||
}
|
||||
|
||||
if ps.readyToFlush() {
|
||||
t.Error("Empty produceSet must never be ready to flush")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProduceSetAddingMessages(t *testing.T) {
|
||||
parent, ps := makeProduceSet()
|
||||
parent.conf.Producer.Flush.MaxMessages = 1000
|
||||
|
||||
msg := &ProducerMessage{Key: StringEncoder(TestMessage), Value: StringEncoder(TestMessage)}
|
||||
safeAddMessage(t, ps, msg)
|
||||
|
||||
if ps.empty() {
|
||||
t.Error("set shouldn't be empty when a message is added")
|
||||
}
|
||||
|
||||
if !ps.readyToFlush() {
|
||||
t.Error("by default set should be ready to flush when any message is in place")
|
||||
}
|
||||
|
||||
for i := 0; i < 999; i++ {
|
||||
if ps.wouldOverflow(msg) {
|
||||
t.Error("set shouldn't fill up after only", i+1, "messages")
|
||||
}
|
||||
safeAddMessage(t, ps, msg)
|
||||
}
|
||||
|
||||
if !ps.wouldOverflow(msg) {
|
||||
t.Error("set should be full after 1000 messages")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProduceSetPartitionTracking(t *testing.T) {
|
||||
_, ps := makeProduceSet()
|
||||
|
||||
m1 := &ProducerMessage{Topic: "t1", Partition: 0}
|
||||
m2 := &ProducerMessage{Topic: "t1", Partition: 1}
|
||||
m3 := &ProducerMessage{Topic: "t2", Partition: 0}
|
||||
safeAddMessage(t, ps, m1)
|
||||
safeAddMessage(t, ps, m2)
|
||||
safeAddMessage(t, ps, m3)
|
||||
|
||||
seenT1P0 := false
|
||||
seenT1P1 := false
|
||||
seenT2P0 := false
|
||||
|
||||
ps.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
if len(msgs) != 1 {
|
||||
t.Error("Wrong message count")
|
||||
}
|
||||
|
||||
if topic == "t1" && partition == 0 {
|
||||
seenT1P0 = true
|
||||
} else if topic == "t1" && partition == 1 {
|
||||
seenT1P1 = true
|
||||
} else if topic == "t2" && partition == 0 {
|
||||
seenT2P0 = true
|
||||
}
|
||||
})
|
||||
|
||||
if !seenT1P0 {
|
||||
t.Error("Didn't see t1p0")
|
||||
}
|
||||
if !seenT1P1 {
|
||||
t.Error("Didn't see t1p1")
|
||||
}
|
||||
if !seenT2P0 {
|
||||
t.Error("Didn't see t2p0")
|
||||
}
|
||||
|
||||
if len(ps.dropPartition("t1", 1)) != 1 {
|
||||
t.Error("Got wrong messages back from dropping partition")
|
||||
}
|
||||
|
||||
if ps.bufferCount != 2 {
|
||||
t.Error("Incorrect buffer count after dropping partition")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProduceSetRequestBuilding(t *testing.T) {
|
||||
parent, ps := makeProduceSet()
|
||||
parent.conf.Producer.RequiredAcks = WaitForAll
|
||||
parent.conf.Producer.Timeout = 10 * time.Second
|
||||
|
||||
msg := &ProducerMessage{
|
||||
Topic: "t1",
|
||||
Partition: 0,
|
||||
Key: StringEncoder(TestMessage),
|
||||
Value: StringEncoder(TestMessage),
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
safeAddMessage(t, ps, msg)
|
||||
}
|
||||
msg.Partition = 1
|
||||
for i := 0; i < 10; i++ {
|
||||
safeAddMessage(t, ps, msg)
|
||||
}
|
||||
msg.Topic = "t2"
|
||||
for i := 0; i < 10; i++ {
|
||||
safeAddMessage(t, ps, msg)
|
||||
}
|
||||
|
||||
req := ps.buildRequest()
|
||||
|
||||
if req.RequiredAcks != WaitForAll {
|
||||
t.Error("RequiredAcks not set properly")
|
||||
}
|
||||
|
||||
if req.Timeout != 10000 {
|
||||
t.Error("Timeout not set properly")
|
||||
}
|
||||
|
||||
if len(req.msgSets) != 2 {
|
||||
t.Error("Wrong number of topics in request")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProduceSetCompressedRequestBuilding(t *testing.T) {
|
||||
parent, ps := makeProduceSet()
|
||||
parent.conf.Producer.RequiredAcks = WaitForAll
|
||||
parent.conf.Producer.Timeout = 10 * time.Second
|
||||
parent.conf.Producer.Compression = CompressionGZIP
|
||||
parent.conf.Version = V0_10_0_0
|
||||
|
||||
msg := &ProducerMessage{
|
||||
Topic: "t1",
|
||||
Partition: 0,
|
||||
Key: StringEncoder(TestMessage),
|
||||
Value: StringEncoder(TestMessage),
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
safeAddMessage(t, ps, msg)
|
||||
}
|
||||
|
||||
req := ps.buildRequest()
|
||||
|
||||
if req.Version != 2 {
|
||||
t.Error("Wrong request version")
|
||||
}
|
||||
|
||||
for _, msgBlock := range req.msgSets["t1"][0].Messages {
|
||||
msg := msgBlock.Msg
|
||||
err := msg.decodeSet()
|
||||
if err != nil {
|
||||
t.Error("Failed to decode set from payload")
|
||||
}
|
||||
for _, compMsgBlock := range msg.Set.Messages {
|
||||
compMsg := compMsgBlock.Msg
|
||||
if compMsg.Version != 1 {
|
||||
t.Error("Wrong compressed message version")
|
||||
}
|
||||
}
|
||||
if msg.Version != 1 {
|
||||
t.Error("Wrong compressed parent message version")
|
||||
}
|
||||
}
|
||||
}
|
98
vendor/github.com/Shopify/sarama/request_test.go
generated
vendored
98
vendor/github.com/Shopify/sarama/request_test.go
generated
vendored
@ -1,98 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
type testRequestBody struct {
|
||||
}
|
||||
|
||||
func (s *testRequestBody) key() int16 {
|
||||
return 0x666
|
||||
}
|
||||
|
||||
func (s *testRequestBody) version() int16 {
|
||||
return 0xD2
|
||||
}
|
||||
|
||||
func (s *testRequestBody) encode(pe packetEncoder) error {
|
||||
return pe.putString("abc")
|
||||
}
|
||||
|
||||
// not specific to request tests, just helper functions for testing structures that
|
||||
// implement the encoder or decoder interfaces that needed somewhere to live
|
||||
|
||||
func testEncodable(t *testing.T, name string, in encoder, expect []byte) {
|
||||
packet, err := encode(in, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if !bytes.Equal(packet, expect) {
|
||||
t.Error("Encoding", name, "failed\ngot ", packet, "\nwant", expect)
|
||||
}
|
||||
}
|
||||
|
||||
func testDecodable(t *testing.T, name string, out decoder, in []byte) {
|
||||
err := decode(in, out)
|
||||
if err != nil {
|
||||
t.Error("Decoding", name, "failed:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testVersionDecodable(t *testing.T, name string, out versionedDecoder, in []byte, version int16) {
|
||||
err := versionedDecode(in, out, version)
|
||||
if err != nil {
|
||||
t.Error("Decoding", name, "version", version, "failed:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testRequest(t *testing.T, name string, rb protocolBody, expected []byte) {
|
||||
packet := testRequestEncode(t, name, rb, expected)
|
||||
testRequestDecode(t, name, rb, packet)
|
||||
}
|
||||
|
||||
func testRequestEncode(t *testing.T, name string, rb protocolBody, expected []byte) []byte {
|
||||
req := &request{correlationID: 123, clientID: "foo", body: rb}
|
||||
packet, err := encode(req, nil)
|
||||
headerSize := 14 + len("foo")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if !bytes.Equal(packet[headerSize:], expected) {
|
||||
t.Error("Encoding", name, "failed\ngot ", packet[headerSize:], "\nwant", expected)
|
||||
}
|
||||
return packet
|
||||
}
|
||||
|
||||
func testRequestDecode(t *testing.T, name string, rb protocolBody, packet []byte) {
|
||||
decoded, n, err := decodeRequest(bytes.NewReader(packet))
|
||||
if err != nil {
|
||||
t.Error("Failed to decode request", err)
|
||||
} else if decoded.correlationID != 123 || decoded.clientID != "foo" {
|
||||
t.Errorf("Decoded header %q is not valid: %+v", name, decoded)
|
||||
} else if !reflect.DeepEqual(rb, decoded.body) {
|
||||
t.Error(spew.Sprintf("Decoded request %q does not match the encoded one\nencoded: %+v\ndecoded: %+v", name, rb, decoded.body))
|
||||
} else if n != len(packet) {
|
||||
t.Errorf("Decoded request %q bytes: %d does not match the encoded one: %d\n", name, n, len(packet))
|
||||
}
|
||||
}
|
||||
|
||||
func testResponse(t *testing.T, name string, res protocolBody, expected []byte) {
|
||||
encoded, err := encode(res, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if expected != nil && !bytes.Equal(encoded, expected) {
|
||||
t.Error("Encoding", name, "failed\ngot ", encoded, "\nwant", expected)
|
||||
}
|
||||
|
||||
decoded := reflect.New(reflect.TypeOf(res).Elem()).Interface().(versionedDecoder)
|
||||
if err := versionedDecode(encoded, decoded, res.version()); err != nil {
|
||||
t.Error("Decoding", name, "failed:", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(decoded, res) {
|
||||
t.Errorf("Decoded response does not match the encoded one\nencoded: %#v\ndecoded: %#v", res, decoded)
|
||||
}
|
||||
}
|
21
vendor/github.com/Shopify/sarama/response_header_test.go
generated
vendored
21
vendor/github.com/Shopify/sarama/response_header_test.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
responseHeaderBytes = []byte{
|
||||
0x00, 0x00, 0x0f, 0x00,
|
||||
0x0a, 0xbb, 0xcc, 0xff}
|
||||
)
|
||||
|
||||
func TestResponseHeader(t *testing.T) {
|
||||
header := responseHeader{}
|
||||
|
||||
testDecodable(t, "response header", &header, responseHeaderBytes)
|
||||
if header.length != 0xf00 {
|
||||
t.Error("Decoding header length failed, got", header.length)
|
||||
}
|
||||
if header.correlationID != 0x0abbccff {
|
||||
t.Error("Decoding header correlation id failed, got", header.correlationID)
|
||||
}
|
||||
}
|
17
vendor/github.com/Shopify/sarama/sasl_handshake_request_test.go
generated
vendored
17
vendor/github.com/Shopify/sarama/sasl_handshake_request_test.go
generated
vendored
@ -1,17 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
baseSaslRequest = []byte{
|
||||
0, 3, 'f', 'o', 'o', // Mechanism
|
||||
}
|
||||
)
|
||||
|
||||
func TestSaslHandshakeRequest(t *testing.T) {
|
||||
var request *SaslHandshakeRequest
|
||||
|
||||
request = new(SaslHandshakeRequest)
|
||||
request.Mechanism = "foo"
|
||||
testRequest(t, "basic", request, baseSaslRequest)
|
||||
}
|
24
vendor/github.com/Shopify/sarama/sasl_handshake_response_test.go
generated
vendored
24
vendor/github.com/Shopify/sarama/sasl_handshake_response_test.go
generated
vendored
@ -1,24 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
saslHandshakeResponse = []byte{
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x03, 'f', 'o', 'o',
|
||||
}
|
||||
)
|
||||
|
||||
func TestSaslHandshakeResponse(t *testing.T) {
|
||||
var response *SaslHandshakeResponse
|
||||
|
||||
response = new(SaslHandshakeResponse)
|
||||
testVersionDecodable(t, "no error", response, saslHandshakeResponse, 0)
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Decoding error failed: no error expected but found", response.Err)
|
||||
}
|
||||
if response.EnabledMechanisms[0] != "foo" {
|
||||
t.Error("Decoding error failed: expected 'foo' but found", response.EnabledMechanisms)
|
||||
}
|
||||
}
|
38
vendor/github.com/Shopify/sarama/sync_group_request_test.go
generated
vendored
38
vendor/github.com/Shopify/sarama/sync_group_request_test.go
generated
vendored
@ -1,38 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
emptySyncGroupRequest = []byte{
|
||||
0, 3, 'f', 'o', 'o', // Group ID
|
||||
0x00, 0x01, 0x02, 0x03, // Generation ID
|
||||
0, 3, 'b', 'a', 'z', // Member ID
|
||||
0, 0, 0, 0, // no assignments
|
||||
}
|
||||
|
||||
populatedSyncGroupRequest = []byte{
|
||||
0, 3, 'f', 'o', 'o', // Group ID
|
||||
0x00, 0x01, 0x02, 0x03, // Generation ID
|
||||
0, 3, 'b', 'a', 'z', // Member ID
|
||||
0, 0, 0, 1, // one assignment
|
||||
0, 3, 'b', 'a', 'z', // Member ID
|
||||
0, 0, 0, 3, 'f', 'o', 'o', // Member assignment
|
||||
}
|
||||
)
|
||||
|
||||
func TestSyncGroupRequest(t *testing.T) {
|
||||
var request *SyncGroupRequest
|
||||
|
||||
request = new(SyncGroupRequest)
|
||||
request.GroupId = "foo"
|
||||
request.GenerationId = 66051
|
||||
request.MemberId = "baz"
|
||||
testRequest(t, "empty", request, emptySyncGroupRequest)
|
||||
|
||||
request = new(SyncGroupRequest)
|
||||
request.GroupId = "foo"
|
||||
request.GenerationId = 66051
|
||||
request.MemberId = "baz"
|
||||
request.AddGroupAssignment("baz", []byte("foo"))
|
||||
testRequest(t, "populated", request, populatedSyncGroupRequest)
|
||||
}
|
40
vendor/github.com/Shopify/sarama/sync_group_response_test.go
generated
vendored
40
vendor/github.com/Shopify/sarama/sync_group_response_test.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
syncGroupResponseNoError = []byte{
|
||||
0x00, 0x00, // No error
|
||||
0, 0, 0, 3, 0x01, 0x02, 0x03, // Member assignment data
|
||||
}
|
||||
|
||||
syncGroupResponseWithError = []byte{
|
||||
0, 27, // ErrRebalanceInProgress
|
||||
0, 0, 0, 0, // No member assignment data
|
||||
}
|
||||
)
|
||||
|
||||
func TestSyncGroupResponse(t *testing.T) {
|
||||
var response *SyncGroupResponse
|
||||
|
||||
response = new(SyncGroupResponse)
|
||||
testVersionDecodable(t, "no error", response, syncGroupResponseNoError, 0)
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Decoding Err failed: no error expected but found", response.Err)
|
||||
}
|
||||
if !reflect.DeepEqual(response.MemberAssignment, []byte{0x01, 0x02, 0x03}) {
|
||||
t.Error("Decoding MemberAssignment failed, found:", response.MemberAssignment)
|
||||
}
|
||||
|
||||
response = new(SyncGroupResponse)
|
||||
testVersionDecodable(t, "no error", response, syncGroupResponseWithError, 0)
|
||||
if response.Err != ErrRebalanceInProgress {
|
||||
t.Error("Decoding Err failed: ErrRebalanceInProgress expected but found", response.Err)
|
||||
}
|
||||
if !reflect.DeepEqual(response.MemberAssignment, []byte{}) {
|
||||
t.Error("Decoding MemberAssignment failed, found:", response.MemberAssignment)
|
||||
}
|
||||
}
|
199
vendor/github.com/Shopify/sarama/sync_producer_test.go
generated
vendored
199
vendor/github.com/Shopify/sarama/sync_producer_test.go
generated
vendored
@ -1,199 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSyncProducer(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
for i := 0; i < 10; i++ {
|
||||
leader.Returns(prodSuccess)
|
||||
}
|
||||
|
||||
producer, err := NewSyncProducer([]string{seedBroker.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
msg := &ProducerMessage{
|
||||
Topic: "my_topic",
|
||||
Value: StringEncoder(TestMessage),
|
||||
Metadata: "test",
|
||||
}
|
||||
|
||||
partition, offset, err := producer.SendMessage(msg)
|
||||
|
||||
if partition != 0 || msg.Partition != partition {
|
||||
t.Error("Unexpected partition")
|
||||
}
|
||||
if offset != 0 || msg.Offset != offset {
|
||||
t.Error("Unexpected offset")
|
||||
}
|
||||
if str, ok := msg.Metadata.(string); !ok || str != "test" {
|
||||
t.Error("Unexpected metadata")
|
||||
}
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
safeClose(t, producer)
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
func TestSyncProducerBatch(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 3
|
||||
config.Producer.Return.Successes = true
|
||||
producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = producer.SendMessages([]*ProducerMessage{
|
||||
{
|
||||
Topic: "my_topic",
|
||||
Value: StringEncoder(TestMessage),
|
||||
Metadata: "test",
|
||||
},
|
||||
{
|
||||
Topic: "my_topic",
|
||||
Value: StringEncoder(TestMessage),
|
||||
Metadata: "test",
|
||||
},
|
||||
{
|
||||
Topic: "my_topic",
|
||||
Value: StringEncoder(TestMessage),
|
||||
Metadata: "test",
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
safeClose(t, producer)
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
func TestConcurrentSyncProducer(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 100
|
||||
config.Producer.Return.Successes = true
|
||||
producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder(TestMessage)}
|
||||
partition, _, err := producer.SendMessage(msg)
|
||||
if partition != 0 {
|
||||
t.Error("Unexpected partition")
|
||||
}
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
safeClose(t, producer)
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
func TestSyncProducerToNonExistingTopic(t *testing.T) {
|
||||
broker := NewMockBroker(t, 1)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(broker.Addr(), broker.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, broker.BrokerID(), nil, nil, ErrNoError)
|
||||
broker.Returns(metadataResponse)
|
||||
|
||||
config := NewConfig()
|
||||
config.Metadata.Retry.Max = 0
|
||||
config.Producer.Retry.Max = 0
|
||||
config.Producer.Return.Successes = true
|
||||
|
||||
producer, err := NewSyncProducer([]string{broker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
metadataResponse = new(MetadataResponse)
|
||||
metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition)
|
||||
broker.Returns(metadataResponse)
|
||||
|
||||
_, _, err = producer.SendMessage(&ProducerMessage{Topic: "unknown"})
|
||||
if err != ErrUnknownTopicOrPartition {
|
||||
t.Error("Uxpected ErrUnknownTopicOrPartition, found:", err)
|
||||
}
|
||||
|
||||
safeClose(t, producer)
|
||||
broker.Close()
|
||||
}
|
||||
|
||||
// This example shows the basic usage pattern of the SyncProducer.
|
||||
func ExampleSyncProducer() {
|
||||
producer, err := NewSyncProducer([]string{"localhost:9092"}, nil)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := producer.Close(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}()
|
||||
|
||||
msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")}
|
||||
partition, offset, err := producer.SendMessage(msg)
|
||||
if err != nil {
|
||||
log.Printf("FAILED to send message: %s\n", err)
|
||||
} else {
|
||||
log.Printf("> message sent to partition %d at offset %d\n", partition, offset)
|
||||
}
|
||||
}
|
10
vendor/github.com/Shopify/sarama/tools/README.md
generated
vendored
10
vendor/github.com/Shopify/sarama/tools/README.md
generated
vendored
@ -1,10 +0,0 @@
|
||||
# Sarama tools
|
||||
|
||||
This folder contains applications that are useful for exploration of your Kafka cluster, or instrumentation.
|
||||
Some of these tools mirror tools that ship with Kafka, but these tools won't require installing the JVM to function.
|
||||
|
||||
- [kafka-console-producer](./kafka-console-producer): a command line tool to produce a single message to your Kafka custer.
|
||||
- [kafka-console-partitionconsumer](./kafka-console-partitionconsumer): (deprecated) a command line tool to consume a single partition of a topic on your Kafka cluster.
|
||||
- [kafka-console-consumer](./kafka-console-consumer): a command line tool to consume arbitrary partitions of a topic on your Kafka cluster.
|
||||
|
||||
To install all tools, run `go get github.com/Shopify/sarama/tools/...`
|
2
vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore
generated
vendored
2
vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore
generated
vendored
@ -1,2 +0,0 @@
|
||||
kafka-console-consumer
|
||||
kafka-console-consumer.test
|
29
vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md
generated
vendored
29
vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md
generated
vendored
@ -1,29 +0,0 @@
|
||||
# kafka-console-consumer
|
||||
|
||||
A simple command line tool to consume partitions of a topic and print the
|
||||
messages on the standard output.
|
||||
|
||||
### Installation
|
||||
|
||||
go get github.com/Shopify/sarama/tools/kafka-console-consumer
|
||||
|
||||
### Usage
|
||||
|
||||
# Minimum invocation
|
||||
kafka-console-consumer -topic=test -brokers=kafka1:9092
|
||||
|
||||
# It will pick up a KAFKA_PEERS environment variable
|
||||
export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092
|
||||
kafka-console-consumer -topic=test
|
||||
|
||||
# You can specify the offset you want to start at. It can be either
|
||||
# `oldest`, `newest`. The default is `newest`.
|
||||
kafka-console-consumer -topic=test -offset=oldest
|
||||
kafka-console-consumer -topic=test -offset=newest
|
||||
|
||||
# You can specify the partition(s) you want to consume as a comma-separated
|
||||
# list. The default is `all`.
|
||||
kafka-console-consumer -topic=test -partitions=1,2,3
|
||||
|
||||
# Display all command line options
|
||||
kafka-console-consumer -help
|
145
vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go
generated
vendored
145
vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go
generated
vendored
@ -1,145 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
)
|
||||
|
||||
var (
|
||||
brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster")
|
||||
topic = flag.String("topic", "", "REQUIRED: the topic to consume")
|
||||
partitions = flag.String("partitions", "all", "The partitions to consume, can be 'all' or comma-separated numbers")
|
||||
offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`")
|
||||
verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging")
|
||||
bufferSize = flag.Int("buffer-size", 256, "The buffer size of the message channel.")
|
||||
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if *brokerList == "" {
|
||||
printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.")
|
||||
}
|
||||
|
||||
if *topic == "" {
|
||||
printUsageErrorAndExit("-topic is required")
|
||||
}
|
||||
|
||||
if *verbose {
|
||||
sarama.Logger = logger
|
||||
}
|
||||
|
||||
var initialOffset int64
|
||||
switch *offset {
|
||||
case "oldest":
|
||||
initialOffset = sarama.OffsetOldest
|
||||
case "newest":
|
||||
initialOffset = sarama.OffsetNewest
|
||||
default:
|
||||
printUsageErrorAndExit("-offset should be `oldest` or `newest`")
|
||||
}
|
||||
|
||||
c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil)
|
||||
if err != nil {
|
||||
printErrorAndExit(69, "Failed to start consumer: %s", err)
|
||||
}
|
||||
|
||||
partitionList, err := getPartitions(c)
|
||||
if err != nil {
|
||||
printErrorAndExit(69, "Failed to get the list of partitions: %s", err)
|
||||
}
|
||||
|
||||
var (
|
||||
messages = make(chan *sarama.ConsumerMessage, *bufferSize)
|
||||
closing = make(chan struct{})
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
|
||||
go func() {
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, os.Kill, os.Interrupt)
|
||||
<-signals
|
||||
logger.Println("Initiating shutdown of consumer...")
|
||||
close(closing)
|
||||
}()
|
||||
|
||||
for _, partition := range partitionList {
|
||||
pc, err := c.ConsumePartition(*topic, partition, initialOffset)
|
||||
if err != nil {
|
||||
printErrorAndExit(69, "Failed to start consumer for partition %d: %s", partition, err)
|
||||
}
|
||||
|
||||
go func(pc sarama.PartitionConsumer) {
|
||||
<-closing
|
||||
pc.AsyncClose()
|
||||
}(pc)
|
||||
|
||||
wg.Add(1)
|
||||
go func(pc sarama.PartitionConsumer) {
|
||||
defer wg.Done()
|
||||
for message := range pc.Messages() {
|
||||
messages <- message
|
||||
}
|
||||
}(pc)
|
||||
}
|
||||
|
||||
go func() {
|
||||
for msg := range messages {
|
||||
fmt.Printf("Partition:\t%d\n", msg.Partition)
|
||||
fmt.Printf("Offset:\t%d\n", msg.Offset)
|
||||
fmt.Printf("Key:\t%s\n", string(msg.Key))
|
||||
fmt.Printf("Value:\t%s\n", string(msg.Value))
|
||||
fmt.Println()
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
logger.Println("Done consuming topic", *topic)
|
||||
close(messages)
|
||||
|
||||
if err := c.Close(); err != nil {
|
||||
logger.Println("Failed to close consumer: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func getPartitions(c sarama.Consumer) ([]int32, error) {
|
||||
if *partitions == "all" {
|
||||
return c.Partitions(*topic)
|
||||
}
|
||||
|
||||
tmp := strings.Split(*partitions, ",")
|
||||
var pList []int32
|
||||
for i := range tmp {
|
||||
val, err := strconv.ParseInt(tmp[i], 10, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pList = append(pList, int32(val))
|
||||
}
|
||||
|
||||
return pList, nil
|
||||
}
|
||||
|
||||
func printErrorAndExit(code int, format string, values ...interface{}) {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
|
||||
fmt.Fprintln(os.Stderr)
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func printUsageErrorAndExit(format string, values ...interface{}) {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
|
||||
fmt.Fprintln(os.Stderr)
|
||||
fmt.Fprintln(os.Stderr, "Available command line options:")
|
||||
flag.PrintDefaults()
|
||||
os.Exit(64)
|
||||
}
|
2
vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore
generated
vendored
2
vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore
generated
vendored
@ -1,2 +0,0 @@
|
||||
kafka-console-partitionconsumer
|
||||
kafka-console-partitionconsumer.test
|
28
vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md
generated
vendored
28
vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md
generated
vendored
@ -1,28 +0,0 @@
|
||||
# kafka-console-partitionconsumer
|
||||
|
||||
NOTE: this tool is deprecated in favour of the more general and more powerful
|
||||
`kafka-console-consumer`.
|
||||
|
||||
A simple command line tool to consume a partition of a topic and print the messages
|
||||
on the standard output.
|
||||
|
||||
### Installation
|
||||
|
||||
go get github.com/Shopify/sarama/tools/kafka-console-partitionconsumer
|
||||
|
||||
### Usage
|
||||
|
||||
# Minimum invocation
|
||||
kafka-console-partitionconsumer -topic=test -partition=4 -brokers=kafka1:9092
|
||||
|
||||
# It will pick up a KAFKA_PEERS environment variable
|
||||
export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092
|
||||
kafka-console-partitionconsumer -topic=test -partition=4
|
||||
|
||||
# You can specify the offset you want to start at. It can be either
|
||||
# `oldest`, `newest`, or a specific offset number
|
||||
kafka-console-partitionconsumer -topic=test -partition=3 -offset=oldest
|
||||
kafka-console-partitionconsumer -topic=test -partition=2 -offset=1337
|
||||
|
||||
# Display all command line options
|
||||
kafka-console-partitionconsumer -help
|
@ -1,102 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
)
|
||||
|
||||
var (
|
||||
brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster")
|
||||
topic = flag.String("topic", "", "REQUIRED: the topic to consume")
|
||||
partition = flag.Int("partition", -1, "REQUIRED: the partition to consume")
|
||||
offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`, or an actual offset")
|
||||
verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging")
|
||||
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if *brokerList == "" {
|
||||
printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.")
|
||||
}
|
||||
|
||||
if *topic == "" {
|
||||
printUsageErrorAndExit("-topic is required")
|
||||
}
|
||||
|
||||
if *partition == -1 {
|
||||
printUsageErrorAndExit("-partition is required")
|
||||
}
|
||||
|
||||
if *verbose {
|
||||
sarama.Logger = logger
|
||||
}
|
||||
|
||||
var (
|
||||
initialOffset int64
|
||||
offsetError error
|
||||
)
|
||||
switch *offset {
|
||||
case "oldest":
|
||||
initialOffset = sarama.OffsetOldest
|
||||
case "newest":
|
||||
initialOffset = sarama.OffsetNewest
|
||||
default:
|
||||
initialOffset, offsetError = strconv.ParseInt(*offset, 10, 64)
|
||||
}
|
||||
|
||||
if offsetError != nil {
|
||||
printUsageErrorAndExit("Invalid initial offset: %s", *offset)
|
||||
}
|
||||
|
||||
c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil)
|
||||
if err != nil {
|
||||
printErrorAndExit(69, "Failed to start consumer: %s", err)
|
||||
}
|
||||
|
||||
pc, err := c.ConsumePartition(*topic, int32(*partition), initialOffset)
|
||||
if err != nil {
|
||||
printErrorAndExit(69, "Failed to start partition consumer: %s", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, os.Kill, os.Interrupt)
|
||||
<-signals
|
||||
pc.AsyncClose()
|
||||
}()
|
||||
|
||||
for msg := range pc.Messages() {
|
||||
fmt.Printf("Offset:\t%d\n", msg.Offset)
|
||||
fmt.Printf("Key:\t%s\n", string(msg.Key))
|
||||
fmt.Printf("Value:\t%s\n", string(msg.Value))
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
if err := c.Close(); err != nil {
|
||||
logger.Println("Failed to close consumer: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func printErrorAndExit(code int, format string, values ...interface{}) {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
|
||||
fmt.Fprintln(os.Stderr)
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func printUsageErrorAndExit(format string, values ...interface{}) {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
|
||||
fmt.Fprintln(os.Stderr)
|
||||
fmt.Fprintln(os.Stderr, "Available command line options:")
|
||||
flag.PrintDefaults()
|
||||
os.Exit(64)
|
||||
}
|
2
vendor/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore
generated
vendored
2
vendor/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore
generated
vendored
@ -1,2 +0,0 @@
|
||||
kafka-console-producer
|
||||
kafka-console-producer.test
|
34
vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md
generated
vendored
34
vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md
generated
vendored
@ -1,34 +0,0 @@
|
||||
# kafka-console-producer
|
||||
|
||||
A simple command line tool to produce a single message to Kafka.
|
||||
|
||||
### Installation
|
||||
|
||||
go get github.com/Shopify/sarama/tools/kafka-console-producer
|
||||
|
||||
|
||||
### Usage
|
||||
|
||||
# Minimum invocation
|
||||
kafka-console-producer -topic=test -value=value -brokers=kafka1:9092
|
||||
|
||||
# It will pick up a KAFKA_PEERS environment variable
|
||||
export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092
|
||||
kafka-console-producer -topic=test -value=value
|
||||
|
||||
# It will read the value from stdin by using pipes
|
||||
echo "hello world" | kafka-console-producer -topic=test
|
||||
|
||||
# Specify a key:
|
||||
echo "hello world" | kafka-console-producer -topic=test -key=key
|
||||
|
||||
# Partitioning: by default, kafka-console-producer will partition as follows:
|
||||
# - manual partitioning if a -partition is provided
|
||||
# - hash partitioning by key if a -key is provided
|
||||
# - random partioning otherwise.
|
||||
#
|
||||
# You can override this using the -partitioner argument:
|
||||
echo "hello world" | kafka-console-producer -topic=test -key=key -partitioner=random
|
||||
|
||||
# Display all command line options
|
||||
kafka-console-producer -help
|
124
vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go
generated
vendored
124
vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go
generated
vendored
@ -1,124 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster. You can also set the KAFKA_PEERS environment variable")
|
||||
topic = flag.String("topic", "", "REQUIRED: the topic to produce to")
|
||||
key = flag.String("key", "", "The key of the message to produce. Can be empty.")
|
||||
value = flag.String("value", "", "REQUIRED: the value of the message to produce. You can also provide the value on stdin.")
|
||||
partitioner = flag.String("partitioner", "", "The partitioning scheme to use. Can be `hash`, `manual`, or `random`")
|
||||
partition = flag.Int("partition", -1, "The partition to produce to.")
|
||||
verbose = flag.Bool("verbose", false, "Turn on sarama logging to stderr")
|
||||
showMetrics = flag.Bool("metrics", false, "Output metrics on successful publish to stderr")
|
||||
silent = flag.Bool("silent", false, "Turn off printing the message's topic, partition, and offset to stdout")
|
||||
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if *brokerList == "" {
|
||||
printUsageErrorAndExit("no -brokers specified. Alternatively, set the KAFKA_PEERS environment variable")
|
||||
}
|
||||
|
||||
if *topic == "" {
|
||||
printUsageErrorAndExit("no -topic specified")
|
||||
}
|
||||
|
||||
if *verbose {
|
||||
sarama.Logger = logger
|
||||
}
|
||||
|
||||
config := sarama.NewConfig()
|
||||
config.Producer.RequiredAcks = sarama.WaitForAll
|
||||
config.Producer.Return.Successes = true
|
||||
|
||||
switch *partitioner {
|
||||
case "":
|
||||
if *partition >= 0 {
|
||||
config.Producer.Partitioner = sarama.NewManualPartitioner
|
||||
} else {
|
||||
config.Producer.Partitioner = sarama.NewHashPartitioner
|
||||
}
|
||||
case "hash":
|
||||
config.Producer.Partitioner = sarama.NewHashPartitioner
|
||||
case "random":
|
||||
config.Producer.Partitioner = sarama.NewRandomPartitioner
|
||||
case "manual":
|
||||
config.Producer.Partitioner = sarama.NewManualPartitioner
|
||||
if *partition == -1 {
|
||||
printUsageErrorAndExit("-partition is required when partitioning manually")
|
||||
}
|
||||
default:
|
||||
printUsageErrorAndExit(fmt.Sprintf("Partitioner %s not supported.", *partitioner))
|
||||
}
|
||||
|
||||
message := &sarama.ProducerMessage{Topic: *topic, Partition: int32(*partition)}
|
||||
|
||||
if *key != "" {
|
||||
message.Key = sarama.StringEncoder(*key)
|
||||
}
|
||||
|
||||
if *value != "" {
|
||||
message.Value = sarama.StringEncoder(*value)
|
||||
} else if stdinAvailable() {
|
||||
bytes, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
printErrorAndExit(66, "Failed to read data from the standard input: %s", err)
|
||||
}
|
||||
message.Value = sarama.ByteEncoder(bytes)
|
||||
} else {
|
||||
printUsageErrorAndExit("-value is required, or you have to provide the value on stdin")
|
||||
}
|
||||
|
||||
producer, err := sarama.NewSyncProducer(strings.Split(*brokerList, ","), config)
|
||||
if err != nil {
|
||||
printErrorAndExit(69, "Failed to open Kafka producer: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := producer.Close(); err != nil {
|
||||
logger.Println("Failed to close Kafka producer cleanly:", err)
|
||||
}
|
||||
}()
|
||||
|
||||
partition, offset, err := producer.SendMessage(message)
|
||||
if err != nil {
|
||||
printErrorAndExit(69, "Failed to produce message: %s", err)
|
||||
} else if !*silent {
|
||||
fmt.Printf("topic=%s\tpartition=%d\toffset=%d\n", *topic, partition, offset)
|
||||
}
|
||||
if *showMetrics {
|
||||
metrics.WriteOnce(config.MetricRegistry, os.Stderr)
|
||||
}
|
||||
}
|
||||
|
||||
func printErrorAndExit(code int, format string, values ...interface{}) {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
|
||||
fmt.Fprintln(os.Stderr)
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func printUsageErrorAndExit(message string) {
|
||||
fmt.Fprintln(os.Stderr, "ERROR:", message)
|
||||
fmt.Fprintln(os.Stderr)
|
||||
fmt.Fprintln(os.Stderr, "Available command line options:")
|
||||
flag.PrintDefaults()
|
||||
os.Exit(64)
|
||||
}
|
||||
|
||||
func stdinAvailable() bool {
|
||||
stat, _ := os.Stdin.Stat()
|
||||
return (stat.Mode() & os.ModeCharDevice) == 0
|
||||
}
|
21
vendor/github.com/Shopify/sarama/utils_test.go
generated
vendored
21
vendor/github.com/Shopify/sarama/utils_test.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestVersionCompare(t *testing.T) {
|
||||
if V0_8_2_0.IsAtLeast(V0_8_2_1) {
|
||||
t.Error("0.8.2.0 >= 0.8.2.1")
|
||||
}
|
||||
if !V0_8_2_1.IsAtLeast(V0_8_2_0) {
|
||||
t.Error("! 0.8.2.1 >= 0.8.2.0")
|
||||
}
|
||||
if !V0_8_2_0.IsAtLeast(V0_8_2_0) {
|
||||
t.Error("! 0.8.2.0 >= 0.8.2.0")
|
||||
}
|
||||
if !V0_9_0_0.IsAtLeast(V0_8_2_1) {
|
||||
t.Error("! 0.9.0.0 >= 0.8.2.1")
|
||||
}
|
||||
if V0_8_2_1.IsAtLeast(V0_10_0_0) {
|
||||
t.Error("0.8.2.1 >= 0.10.0.0")
|
||||
}
|
||||
}
|
22
vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh
generated
vendored
22
vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh
generated
vendored
@ -1,22 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -ex
|
||||
|
||||
# Launch and wait for toxiproxy
|
||||
${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh &
|
||||
while ! nc -q 1 localhost 2181 </dev/null; do echo "Waiting"; sleep 1; done
|
||||
while ! nc -q 1 localhost 9092 </dev/null; do echo "Waiting"; sleep 1; done
|
||||
|
||||
# Launch and wait for Zookeeper
|
||||
for i in 1 2 3 4 5; do
|
||||
KAFKA_PORT=`expr $i + 9090`
|
||||
cd ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT} && bin/zookeeper-server-start.sh -daemon config/zookeeper.properties
|
||||
done
|
||||
while ! nc -q 1 localhost 21805 </dev/null; do echo "Waiting"; sleep 1; done
|
||||
|
||||
# Launch and wait for Kafka
|
||||
for i in 1 2 3 4 5; do
|
||||
KAFKA_PORT=`expr $i + 9090`
|
||||
cd ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT} && bin/kafka-server-start.sh -daemon config/server.properties
|
||||
done
|
||||
while ! nc -q 1 localhost 29095 </dev/null; do echo "Waiting"; sleep 1; done
|
8
vendor/github.com/Shopify/sarama/vagrant/create_topics.sh
generated
vendored
8
vendor/github.com/Shopify/sarama/vagrant/create_topics.sh
generated
vendored
@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -ex
|
||||
|
||||
cd ${KAFKA_INSTALL_ROOT}/kafka-9092
|
||||
bin/kafka-topics.sh --create --partitions 1 --replication-factor 3 --topic test.1 --zookeeper localhost:2181
|
||||
bin/kafka-topics.sh --create --partitions 4 --replication-factor 3 --topic test.4 --zookeeper localhost:2181
|
||||
bin/kafka-topics.sh --create --partitions 64 --replication-factor 3 --topic test.64 --zookeeper localhost:2181
|
49
vendor/github.com/Shopify/sarama/vagrant/install_cluster.sh
generated
vendored
49
vendor/github.com/Shopify/sarama/vagrant/install_cluster.sh
generated
vendored
@ -1,49 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -ex
|
||||
|
||||
TOXIPROXY_VERSION=2.0.0
|
||||
|
||||
mkdir -p ${KAFKA_INSTALL_ROOT}
|
||||
if [ ! -f ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz ]; then
|
||||
wget --quiet http://apache.mirror.gtcomm.net/kafka/${KAFKA_VERSION}/kafka_2.11-${KAFKA_VERSION}.tgz -O ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz
|
||||
fi
|
||||
if [ ! -f ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION} ]; then
|
||||
wget --quiet https://github.com/Shopify/toxiproxy/releases/download/v${TOXIPROXY_VERSION}/toxiproxy-server-linux-amd64 -O ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION}
|
||||
chmod +x ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION}
|
||||
fi
|
||||
rm -f ${KAFKA_INSTALL_ROOT}/toxiproxy
|
||||
ln -s ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION} ${KAFKA_INSTALL_ROOT}/toxiproxy
|
||||
|
||||
for i in 1 2 3 4 5; do
|
||||
ZK_PORT=`expr $i + 2180`
|
||||
ZK_PORT_REAL=`expr $i + 21800`
|
||||
KAFKA_PORT=`expr $i + 9090`
|
||||
KAFKA_PORT_REAL=`expr $i + 29090`
|
||||
|
||||
# unpack kafka
|
||||
mkdir -p ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}
|
||||
tar xzf ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz -C ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT} --strip-components 1
|
||||
|
||||
# broker configuration
|
||||
cp ${REPOSITORY_ROOT}/vagrant/server.properties ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/
|
||||
sed -i s/KAFKAID/${KAFKA_PORT}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
|
||||
sed -i s/KAFKAPORT/${KAFKA_PORT_REAL}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
|
||||
sed -i s/KAFKA_HOSTNAME/${KAFKA_HOSTNAME}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
|
||||
sed -i s/ZK_PORT/${ZK_PORT}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
|
||||
|
||||
KAFKA_DATADIR="${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/data"
|
||||
mkdir -p ${KAFKA_DATADIR}
|
||||
sed -i s#KAFKA_DATADIR#${KAFKA_DATADIR}#g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
|
||||
|
||||
# zookeeper configuration
|
||||
cp ${REPOSITORY_ROOT}/vagrant/zookeeper.properties ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/
|
||||
sed -i s/KAFKAID/${KAFKA_PORT}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/zookeeper.properties
|
||||
sed -i s/ZK_PORT/${ZK_PORT_REAL}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/zookeeper.properties
|
||||
|
||||
ZK_DATADIR="${KAFKA_INSTALL_ROOT}/zookeeper-${ZK_PORT}"
|
||||
mkdir -p ${ZK_DATADIR}
|
||||
sed -i s#ZK_DATADIR#${ZK_DATADIR}#g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/zookeeper.properties
|
||||
|
||||
echo $i > ${KAFKA_INSTALL_ROOT}/zookeeper-${ZK_PORT}/myid
|
||||
done
|
9
vendor/github.com/Shopify/sarama/vagrant/kafka.conf
generated
vendored
9
vendor/github.com/Shopify/sarama/vagrant/kafka.conf
generated
vendored
@ -1,9 +0,0 @@
|
||||
start on started zookeeper-ZK_PORT
|
||||
stop on stopping zookeeper-ZK_PORT
|
||||
|
||||
# Use a script instead of exec (using env stanza leaks KAFKA_HEAP_OPTS from zookeeper)
|
||||
script
|
||||
sleep 2
|
||||
export KAFKA_HEAP_OPTS="-Xmx320m"
|
||||
exec /opt/kafka-KAFKAID/bin/kafka-server-start.sh /opt/kafka-KAFKAID/config/server.properties
|
||||
end script
|
15
vendor/github.com/Shopify/sarama/vagrant/provision.sh
generated
vendored
15
vendor/github.com/Shopify/sarama/vagrant/provision.sh
generated
vendored
@ -1,15 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -ex
|
||||
|
||||
apt-get update
|
||||
yes | apt-get install default-jre
|
||||
|
||||
export KAFKA_INSTALL_ROOT=/opt
|
||||
export KAFKA_HOSTNAME=192.168.100.67
|
||||
export KAFKA_VERSION=0.9.0.1
|
||||
export REPOSITORY_ROOT=/vagrant
|
||||
|
||||
sh /vagrant/vagrant/install_cluster.sh
|
||||
sh /vagrant/vagrant/setup_services.sh
|
||||
sh /vagrant/vagrant/create_topics.sh
|
22
vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh
generated
vendored
22
vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh
generated
vendored
@ -1,22 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -ex
|
||||
|
||||
${KAFKA_INSTALL_ROOT}/toxiproxy -port 8474 -host 0.0.0.0 &
|
||||
PID=$!
|
||||
|
||||
while ! nc -q 1 localhost 8474 </dev/null; do echo "Waiting"; sleep 1; done
|
||||
|
||||
wget -O/dev/null -S --post-data='{"name":"zk1", "upstream":"localhost:21801", "listen":"0.0.0.0:2181"}' localhost:8474/proxies
|
||||
wget -O/dev/null -S --post-data='{"name":"zk2", "upstream":"localhost:21802", "listen":"0.0.0.0:2182"}' localhost:8474/proxies
|
||||
wget -O/dev/null -S --post-data='{"name":"zk3", "upstream":"localhost:21803", "listen":"0.0.0.0:2183"}' localhost:8474/proxies
|
||||
wget -O/dev/null -S --post-data='{"name":"zk4", "upstream":"localhost:21804", "listen":"0.0.0.0:2184"}' localhost:8474/proxies
|
||||
wget -O/dev/null -S --post-data='{"name":"zk5", "upstream":"localhost:21805", "listen":"0.0.0.0:2185"}' localhost:8474/proxies
|
||||
|
||||
wget -O/dev/null -S --post-data='{"name":"kafka1", "upstream":"localhost:29091", "listen":"0.0.0.0:9091"}' localhost:8474/proxies
|
||||
wget -O/dev/null -S --post-data='{"name":"kafka2", "upstream":"localhost:29092", "listen":"0.0.0.0:9092"}' localhost:8474/proxies
|
||||
wget -O/dev/null -S --post-data='{"name":"kafka3", "upstream":"localhost:29093", "listen":"0.0.0.0:9093"}' localhost:8474/proxies
|
||||
wget -O/dev/null -S --post-data='{"name":"kafka4", "upstream":"localhost:29094", "listen":"0.0.0.0:9094"}' localhost:8474/proxies
|
||||
wget -O/dev/null -S --post-data='{"name":"kafka5", "upstream":"localhost:29095", "listen":"0.0.0.0:9095"}' localhost:8474/proxies
|
||||
|
||||
wait $PID
|
127
vendor/github.com/Shopify/sarama/vagrant/server.properties
generated
vendored
127
vendor/github.com/Shopify/sarama/vagrant/server.properties
generated
vendored
@ -1,127 +0,0 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# see kafka.server.KafkaConfig for additional details and defaults
|
||||
|
||||
############################# Server Basics #############################
|
||||
|
||||
# The id of the broker. This must be set to a unique integer for each broker.
|
||||
broker.id=KAFKAID
|
||||
reserved.broker.max.id=10000
|
||||
|
||||
############################# Socket Server Settings #############################
|
||||
|
||||
# The port the socket server listens on
|
||||
port=KAFKAPORT
|
||||
|
||||
# Hostname the broker will bind to. If not set, the server will bind to all interfaces
|
||||
host.name=localhost
|
||||
|
||||
# Hostname the broker will advertise to producers and consumers. If not set, it uses the
|
||||
# value for "host.name" if configured. Otherwise, it will use the value returned from
|
||||
# java.net.InetAddress.getCanonicalHostName().
|
||||
advertised.host.name=KAFKA_HOSTNAME
|
||||
advertised.port=KAFKAID
|
||||
|
||||
# The port to publish to ZooKeeper for clients to use. If this is not set,
|
||||
# it will publish the same port that the broker binds to.
|
||||
# advertised.port=<port accessible by clients>
|
||||
|
||||
# The number of threads handling network requests
|
||||
num.network.threads=2
|
||||
|
||||
# The number of threads doing disk I/O
|
||||
num.io.threads=8
|
||||
|
||||
# The send buffer (SO_SNDBUF) used by the socket server
|
||||
socket.send.buffer.bytes=1048576
|
||||
|
||||
# The receive buffer (SO_RCVBUF) used by the socket server
|
||||
socket.receive.buffer.bytes=1048576
|
||||
|
||||
# The maximum size of a request that the socket server will accept (protection against OOM)
|
||||
socket.request.max.bytes=104857600
|
||||
|
||||
|
||||
############################# Log Basics #############################
|
||||
|
||||
# A comma seperated list of directories under which to store log files
|
||||
log.dirs=KAFKA_DATADIR
|
||||
|
||||
# The default number of log partitions per topic. More partitions allow greater
|
||||
# parallelism for consumption, but this will also result in more files across
|
||||
# the brokers.
|
||||
num.partitions=2
|
||||
|
||||
# Create new topics with a replication factor of 2 so failover can be tested
|
||||
# more easily.
|
||||
default.replication.factor=2
|
||||
|
||||
auto.create.topics.enable=false
|
||||
delete.topic.enable=true
|
||||
|
||||
############################# Log Flush Policy #############################
|
||||
|
||||
# Messages are immediately written to the filesystem but by default we only fsync() to sync
|
||||
# the OS cache lazily. The following configurations control the flush of data to disk.
|
||||
# There are a few important trade-offs here:
|
||||
# 1. Durability: Unflushed data may be lost if you are not using replication.
|
||||
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
|
||||
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
|
||||
# The settings below allow one to configure the flush policy to flush data after a period of time or
|
||||
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
|
||||
|
||||
# The number of messages to accept before forcing a flush of data to disk
|
||||
#log.flush.interval.messages=10000
|
||||
|
||||
# The maximum amount of time a message can sit in a log before we force a flush
|
||||
#log.flush.interval.ms=1000
|
||||
|
||||
############################# Log Retention Policy #############################
|
||||
|
||||
# The following configurations control the disposal of log segments. The policy can
|
||||
# be set to delete segments after a period of time, or after a given size has accumulated.
|
||||
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
|
||||
# from the end of the log.
|
||||
|
||||
# The minimum age of a log file to be eligible for deletion
|
||||
log.retention.hours=168
|
||||
|
||||
# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
|
||||
# segments don't drop below log.retention.bytes.
|
||||
log.retention.bytes=268435456
|
||||
|
||||
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
|
||||
log.segment.bytes=268435456
|
||||
|
||||
# The interval at which log segments are checked to see if they can be deleted according
|
||||
# to the retention policies
|
||||
log.retention.check.interval.ms=60000
|
||||
|
||||
# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
|
||||
# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
|
||||
log.cleaner.enable=false
|
||||
|
||||
############################# Zookeeper #############################
|
||||
|
||||
# Zookeeper connection string (see zookeeper docs for details).
|
||||
# This is a comma separated host:port pairs, each corresponding to a zk
|
||||
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
|
||||
# You can also append an optional chroot string to the urls to specify the
|
||||
# root directory for all kafka znodes.
|
||||
zookeeper.connect=localhost:ZK_PORT
|
||||
|
||||
# Timeout in ms for connecting to zookeeper
|
||||
zookeeper.session.timeout.ms=3000
|
||||
zookeeper.connection.timeout.ms=3000
|
29
vendor/github.com/Shopify/sarama/vagrant/setup_services.sh
generated
vendored
29
vendor/github.com/Shopify/sarama/vagrant/setup_services.sh
generated
vendored
@ -1,29 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -ex
|
||||
|
||||
stop toxiproxy || true
|
||||
cp ${REPOSITORY_ROOT}/vagrant/toxiproxy.conf /etc/init/toxiproxy.conf
|
||||
cp ${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh ${KAFKA_INSTALL_ROOT}/
|
||||
start toxiproxy
|
||||
|
||||
for i in 1 2 3 4 5; do
|
||||
ZK_PORT=`expr $i + 2180`
|
||||
KAFKA_PORT=`expr $i + 9090`
|
||||
|
||||
stop zookeeper-${ZK_PORT} || true
|
||||
|
||||
# set up zk service
|
||||
cp ${REPOSITORY_ROOT}/vagrant/zookeeper.conf /etc/init/zookeeper-${ZK_PORT}.conf
|
||||
sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/zookeeper-${ZK_PORT}.conf
|
||||
|
||||
# set up kafka service
|
||||
cp ${REPOSITORY_ROOT}/vagrant/kafka.conf /etc/init/kafka-${KAFKA_PORT}.conf
|
||||
sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf
|
||||
sed -i s/ZK_PORT/${ZK_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf
|
||||
|
||||
start zookeeper-${ZK_PORT}
|
||||
done
|
||||
|
||||
# Wait for the last kafka node to finish booting
|
||||
while ! nc -q 1 localhost 29095 </dev/null; do echo "Waiting"; sleep 1; done
|
6
vendor/github.com/Shopify/sarama/vagrant/toxiproxy.conf
generated
vendored
6
vendor/github.com/Shopify/sarama/vagrant/toxiproxy.conf
generated
vendored
@ -1,6 +0,0 @@
|
||||
start on started networking
|
||||
stop on shutdown
|
||||
|
||||
env KAFKA_INSTALL_ROOT=/opt
|
||||
|
||||
exec /opt/run_toxiproxy.sh
|
7
vendor/github.com/Shopify/sarama/vagrant/zookeeper.conf
generated
vendored
7
vendor/github.com/Shopify/sarama/vagrant/zookeeper.conf
generated
vendored
@ -1,7 +0,0 @@
|
||||
start on started toxiproxy
|
||||
stop on stopping toxiproxy
|
||||
|
||||
script
|
||||
export KAFKA_HEAP_OPTS="-Xmx192m"
|
||||
exec /opt/kafka-KAFKAID/bin/zookeeper-server-start.sh /opt/kafka-KAFKAID/config/zookeeper.properties
|
||||
end script
|
36
vendor/github.com/Shopify/sarama/vagrant/zookeeper.properties
generated
vendored
36
vendor/github.com/Shopify/sarama/vagrant/zookeeper.properties
generated
vendored
@ -1,36 +0,0 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# the directory where the snapshot is stored.
|
||||
dataDir=ZK_DATADIR
|
||||
# the port at which the clients will connect
|
||||
clientPort=ZK_PORT
|
||||
# disable the per-ip limit on the number of connections since this is a non-production config
|
||||
maxClientCnxns=0
|
||||
|
||||
# The number of milliseconds of each tick
|
||||
tickTime=2000
|
||||
|
||||
# The number of ticks that the initial synchronization phase can take
|
||||
initLimit=10
|
||||
|
||||
# The number of ticks that can pass between
|
||||
# sending a request and getting an acknowledgement
|
||||
syncLimit=5
|
||||
|
||||
server.1=localhost:2281:2381
|
||||
server.2=localhost:2282:2382
|
||||
server.3=localhost:2283:2383
|
||||
server.4=localhost:2284:2384
|
||||
server.5=localhost:2285:2385
|
14
vendor/github.com/aws/aws-sdk-go/.github/ISSUE_TEMPLATE.md
generated
vendored
14
vendor/github.com/aws/aws-sdk-go/.github/ISSUE_TEMPLATE.md
generated
vendored
@ -1,14 +0,0 @@
|
||||
Please fill out the sections below to help us address your issue.
|
||||
|
||||
### Version of AWS SDK for Go?
|
||||
|
||||
|
||||
### Version of Go (`go version`)?
|
||||
|
||||
|
||||
### What issue did you see?
|
||||
|
||||
### Steps to reproduce
|
||||
|
||||
If you have an runnable example, please include it.
|
||||
|
3
vendor/github.com/aws/aws-sdk-go/.github/PULL_REQUEST_TEMPLATE.md
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/.github/PULL_REQUEST_TEMPLATE.md
generated
vendored
@ -1,3 +0,0 @@
|
||||
For changes to files under the `/model/` folder, and manual edits to autogenerated code (e.g. `/service/s3/api.go`) please create an Issue instead of a PR for those type of changes.
|
||||
|
||||
If there is an existing bug or feature this PR is answers please reference it here.
|
11
vendor/github.com/aws/aws-sdk-go/.gitignore
generated
vendored
11
vendor/github.com/aws/aws-sdk-go/.gitignore
generated
vendored
@ -1,11 +0,0 @@
|
||||
dist
|
||||
/doc
|
||||
/doc-staging
|
||||
.yardoc
|
||||
Gemfile.lock
|
||||
awstesting/integration/smoke/**/importmarker__.go
|
||||
awstesting/integration/smoke/_test/
|
||||
/vendor/bin/
|
||||
/vendor/pkg/
|
||||
/vendor/src/
|
||||
/private/model/cli/gen-api/gen-api
|
14
vendor/github.com/aws/aws-sdk-go/.godoc_config
generated
vendored
14
vendor/github.com/aws/aws-sdk-go/.godoc_config
generated
vendored
@ -1,14 +0,0 @@
|
||||
{
|
||||
"PkgHandler": {
|
||||
"Pattern": "/sdk-for-go/api/",
|
||||
"StripPrefix": "/sdk-for-go/api",
|
||||
"Include": ["/src/github.com/aws/aws-sdk-go/aws", "/src/github.com/aws/aws-sdk-go/service"],
|
||||
"Exclude": ["/src/cmd", "/src/github.com/aws/aws-sdk-go/awstesting", "/src/github.com/aws/aws-sdk-go/awsmigrate", "/src/github.com/aws/aws-sdk-go/private"],
|
||||
"IgnoredSuffixes": ["iface"]
|
||||
},
|
||||
"Github": {
|
||||
"Tag": "master",
|
||||
"Repo": "/aws/aws-sdk-go",
|
||||
"UseGithub": true
|
||||
}
|
||||
}
|
42
vendor/github.com/aws/aws-sdk-go/.travis.yml
generated
vendored
42
vendor/github.com/aws/aws-sdk-go/.travis.yml
generated
vendored
@ -1,42 +0,0 @@
|
||||
language: go
|
||||
|
||||
sudo: required
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
go:
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
exclude:
|
||||
# OSX 1.6.4 is not present in travis.
|
||||
# https://github.com/travis-ci/travis-ci/issues/10309
|
||||
- go: 1.6.x
|
||||
os: osx
|
||||
include:
|
||||
- os: linux
|
||||
go: 1.5.x
|
||||
# Use Go 1.5's vendoring experiment for 1.5 tests.
|
||||
env: GO15VENDOREXPERIMENT=1
|
||||
|
||||
script:
|
||||
- if [ $TRAVIS_GO_VERSION == "tip" ] ||
|
||||
[ $TRAVIS_GO_VERSION == "1.11.x" ] ||
|
||||
[ $TRAVIS_GO_VERSION == "1.10.x" ]; then
|
||||
make ci-test;
|
||||
else
|
||||
make unit-old-go-race-cover;
|
||||
fi
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
5603
vendor/github.com/aws/aws-sdk-go/CHANGELOG.md
generated
vendored
5603
vendor/github.com/aws/aws-sdk-go/CHANGELOG.md
generated
vendored
File diff suppressed because it is too large
Load Diff
5
vendor/github.com/aws/aws-sdk-go/CHANGELOG_PENDING.md
generated
vendored
5
vendor/github.com/aws/aws-sdk-go/CHANGELOG_PENDING.md
generated
vendored
@ -1,5 +0,0 @@
|
||||
### SDK Features
|
||||
|
||||
### SDK Enhancements
|
||||
|
||||
### SDK Bugs
|
4
vendor/github.com/aws/aws-sdk-go/CODE_OF_CONDUCT.md
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/CODE_OF_CONDUCT.md
generated
vendored
@ -1,4 +0,0 @@
|
||||
## Code of Conduct
|
||||
This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
|
||||
For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
|
||||
opensource-codeofconduct@amazon.com with any additional questions or comments.
|
127
vendor/github.com/aws/aws-sdk-go/CONTRIBUTING.md
generated
vendored
127
vendor/github.com/aws/aws-sdk-go/CONTRIBUTING.md
generated
vendored
@ -1,127 +0,0 @@
|
||||
Contributing to the AWS SDK for Go
|
||||
|
||||
We work hard to provide a high-quality and useful SDK, and we greatly value
|
||||
feedback and contributions from our community. Whether it's a bug report,
|
||||
new feature, correction, or additional documentation, we welcome your issues
|
||||
and pull requests. Please read through this document before submitting any
|
||||
issues or pull requests to ensure we have all the necessary information to
|
||||
effectively respond to your bug report or contribution.
|
||||
|
||||
|
||||
## Filing Bug Reports
|
||||
|
||||
You can file bug reports against the SDK on the [GitHub issues][issues] page.
|
||||
|
||||
If you are filing a report for a bug or regression in the SDK, it's extremely
|
||||
helpful to provide as much information as possible when opening the original
|
||||
issue. This helps us reproduce and investigate the possible bug without having
|
||||
to wait for this extra information to be provided. Please read the following
|
||||
guidelines prior to filing a bug report.
|
||||
|
||||
1. Search through existing [issues][] to ensure that your specific issue has
|
||||
not yet been reported. If it is a common issue, it is likely there is
|
||||
already a bug report for your problem.
|
||||
|
||||
2. Ensure that you have tested the latest version of the SDK. Although you
|
||||
may have an issue against an older version of the SDK, we cannot provide
|
||||
bug fixes for old versions. It's also possible that the bug may have been
|
||||
fixed in the latest release.
|
||||
|
||||
3. Provide as much information about your environment, SDK version, and
|
||||
relevant dependencies as possible. For example, let us know what version
|
||||
of Go you are using, which and version of the operating system, and the
|
||||
the environment your code is running in. e.g Container.
|
||||
|
||||
4. Provide a minimal test case that reproduces your issue or any error
|
||||
information you related to your problem. We can provide feedback much
|
||||
more quickly if we know what operations you are calling in the SDK. If
|
||||
you cannot provide a full test case, provide as much code as you can
|
||||
to help us diagnose the problem. Any relevant information should be provided
|
||||
as well, like whether this is a persistent issue, or if it only occurs
|
||||
some of the time.
|
||||
|
||||
|
||||
## Submitting Pull Requests
|
||||
|
||||
We are always happy to receive code and documentation contributions to the SDK.
|
||||
Please be aware of the following notes prior to opening a pull request:
|
||||
|
||||
1. The SDK is released under the [Apache license][license]. Any code you submit
|
||||
will be released under that license. For substantial contributions, we may
|
||||
ask you to sign a [Contributor License Agreement (CLA)][cla].
|
||||
|
||||
2. If you would like to implement support for a significant feature that is not
|
||||
yet available in the SDK, please talk to us beforehand to avoid any
|
||||
duplication of effort.
|
||||
|
||||
3. Wherever possible, pull requests should contain tests as appropriate.
|
||||
Bugfixes should contain tests that exercise the corrected behavior (i.e., the
|
||||
test should fail without the bugfix and pass with it), and new features
|
||||
should be accompanied by tests exercising the feature.
|
||||
|
||||
4. Pull requests that contain failing tests will not be merged until the test
|
||||
failures are addressed. Pull requests that cause a significant drop in the
|
||||
SDK's test coverage percentage are unlikely to be merged until tests have
|
||||
been added.
|
||||
|
||||
5. The JSON files under the SDK's `models` folder are sourced from outside the SDK.
|
||||
Such as `models/apis/ec2/2016-11-15/api.json`. We will not accept pull requests
|
||||
directly on these models. If you discover an issue with the models please
|
||||
create a [GitHub issue][issues] describing the issue.
|
||||
|
||||
### Testing
|
||||
|
||||
To run the tests locally, running the `make unit` command will `go get` the
|
||||
SDK's testing dependencies, and run vet, link and unit tests for the SDK.
|
||||
|
||||
```
|
||||
make unit
|
||||
```
|
||||
|
||||
Standard go testing functionality is supported as well. To test SDK code that
|
||||
is tagged with `codegen` you'll need to set the build tag in the go test
|
||||
command. The `make unit` command will do this automatically.
|
||||
|
||||
```
|
||||
go test -tags codegen ./private/...
|
||||
```
|
||||
|
||||
See the `Makefile` for additional testing tags that can be used in testing.
|
||||
|
||||
To test on multiple platform the SDK includes several DockerFiles under the
|
||||
`awstesting/sandbox` folder, and associated make recipes to execute
|
||||
unit testing within environments configured for specific Go versions.
|
||||
|
||||
```
|
||||
make sandbox-test-go18
|
||||
```
|
||||
|
||||
To run all sandbox environments use the following make recipe
|
||||
|
||||
```
|
||||
# Optionally update the Go tip that will be used during the batch testing
|
||||
make update-aws-golang-tip
|
||||
|
||||
# Run all SDK tests for supported Go versions in sandboxes
|
||||
make sandbox-test
|
||||
```
|
||||
|
||||
In addition the sandbox environment include make recipes for interactive modes
|
||||
so you can run command within the Docker container and context of the SDK.
|
||||
|
||||
```
|
||||
make sandbox-go18
|
||||
```
|
||||
|
||||
### Changelog
|
||||
|
||||
You can see all release changes in the `CHANGELOG.md` file at the root of the
|
||||
repository. The release notes added to this file will contain service client
|
||||
updates, and major SDK changes.
|
||||
|
||||
[issues]: https://github.com/aws/aws-sdk-go/issues
|
||||
[pr]: https://github.com/aws/aws-sdk-go/pulls
|
||||
[license]: http://aws.amazon.com/apache2.0/
|
||||
[cla]: http://en.wikipedia.org/wiki/Contributor_License_Agreement
|
||||
[releasenotes]: https://github.com/aws/aws-sdk-go/releases
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user