Updated Kafka client

This commit is contained in:
tidwall 2020-06-24 14:20:22 -07:00
parent 6629eba8bf
commit 712da8aefa
704 changed files with 185982 additions and 24298 deletions

14
go.mod
View File

@ -3,25 +3,19 @@ module github.com/tidwall/tile38
go 1.13
require (
github.com/Shopify/sarama v1.13.0
github.com/Shopify/sarama v1.26.4
github.com/Shopify/toxiproxy v2.1.4+incompatible // indirect
github.com/aws/aws-sdk-go v1.17.14
github.com/cespare/xxhash v1.1.0 // indirect
github.com/eapache/go-resiliency v1.0.0 // indirect
github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934 // indirect
github.com/eapache/queue v1.0.2 // indirect
github.com/eclipse/paho.mqtt.golang v1.1.0
github.com/golang/protobuf v0.0.0-20170920220647-130e6b02ab05
github.com/golang/snappy v0.0.0-20170215233205-553a64147049 // indirect
github.com/gomodule/redigo v2.0.1-0.20181026001555-e8fc0692a7e2+incompatible
github.com/mmcloughlin/geohash v0.0.0-20181009053802-f7f2bcae3294
github.com/nats-io/gnatsd v1.4.1 // indirect
github.com/nats-io/go-nats v1.6.0
github.com/nats-io/nuid v1.0.0 // indirect
github.com/peterh/liner v1.0.1-0.20170902204657-a37ad3984311
github.com/pierrec/lz4 v1.0.1 // indirect
github.com/pierrec/xxHash v0.1.1 // indirect
github.com/rcrowley/go-metrics v0.0.0-20161128210544-1f30fe9094a5 // indirect
github.com/streadway/amqp v0.0.0-20170926065634-cefed15a0bd8
github.com/stretchr/testify v1.4.0 // indirect
github.com/tidwall/btree v0.0.0-20170113224114-9876f1454cf0
@ -44,11 +38,9 @@ require (
github.com/tidwall/tinybtree v0.0.0-20181217131827-de5932d649b5
github.com/tidwall/tinyqueue v0.0.0-20180302190814-1e39f5511563 // indirect
github.com/yuin/gopher-lua v0.0.0-20170915035107-eb1c7299435c
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44
golang.org/x/net v0.0.0-20171004034648-a04bdaca5b32
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72
golang.org/x/net v0.0.0-20200202094626-16171245cfb2
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e // indirect
golang.org/x/sys v0.0.0-20170927054621-314a259e304f // indirect
golang.org/x/text v0.1.1-0.20171005092100-d82c1812e304 // indirect
google.golang.org/genproto v0.0.0-20171002232614-f676e0f3ac63 // indirect
google.golang.org/grpc v1.6.0
layeh.com/gopher-json v0.0.0-20161224164157-c128cc74278b

57
go.sum
View File

@ -2,6 +2,8 @@ github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/Shopify/sarama v1.13.0 h1:R+4WFsmMzUxN2uiGzWXoY9apBAQnARC+B+wYvy/kC3k=
github.com/Shopify/sarama v1.13.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/sarama v1.26.4 h1:+17TxUq/PJEAfZAll0T7XJjSgQWCpaQSoki/x5yN8o8=
github.com/Shopify/sarama v1.26.4/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU=
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/aws/aws-sdk-go v1.17.14 h1:IjqZDIQoLyZ48A93BxVrZOaIGgZPRi4nXt6WQUMJplY=
@ -10,24 +12,48 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/eapache/go-resiliency v1.0.0 h1:XPZo5qMI0LGzIqT9wRq6dPv2vEuo9MWCar1wHY8Kuf4=
github.com/eapache/go-resiliency v1.0.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q=
github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934 h1:oGLoaVIefp3tiOgi7+KInR/nNPvEpPM6GFo+El7fd14=
github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.0.2 h1:jRJXCx6uciOfN69MfZCC9EZlGRqqHhwlyb6GBeNow+c=
github.com/eapache/queue v1.0.2/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/eclipse/paho.mqtt.golang v1.1.0 h1:Em29HD1CwLHdRFnX7yfg+kBjHHw6DSDok9I+ia4znT4=
github.com/eclipse/paho.mqtt.golang v1.1.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
github.com/golang/protobuf v0.0.0-20170920220647-130e6b02ab05 h1:Kesru7U6Mhpf/x7rthxAKnr586VFmoE2NdEvkOKvfjg=
github.com/golang/protobuf v0.0.0-20170920220647-130e6b02ab05/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20170215233205-553a64147049 h1:K9KHZbXKpGydfDN0aZrsoHpLJlZsBrGMFWbgLDGnPZk=
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v1.7.0 h1:ZKld1VOtsGhAe37E7wMxEDgAlGM5dvFY+DiOhSkhP9Y=
github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0=
github.com/gomodule/redigo v2.0.1-0.20181026001555-e8fc0692a7e2+incompatible h1:H4S5GVLXZxCnS6q3+HrRBu/ObgobnAHg92tWG8cLfX8=
github.com/gomodule/redigo v2.0.1-0.20181026001555-e8fc0692a7e2+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA=
github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mmcloughlin/geohash v0.0.0-20181009053802-f7f2bcae3294 h1:QlTAK00UrY80KK9Da+foE04AjxhXFrgp87aZB6yfU5c=
github.com/mmcloughlin/geohash v0.0.0-20181009053802-f7f2bcae3294/go.mod h1:oNZxQo5yWJh0eMQEP/8hwQuVx9Z9tjwFUqcTB1SmG0c=
github.com/nats-io/gnatsd v1.4.1 h1:RconcfDeWpKCD6QIIwiVFcvForlXpWeJP7i5/lDLy44=
@ -40,12 +66,16 @@ github.com/peterh/liner v1.0.1-0.20170902204657-a37ad3984311 h1:IQrJrnseUVEdTXQp
github.com/peterh/liner v1.0.1-0.20170902204657-a37ad3984311/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
github.com/pierrec/lz4 v1.0.1 h1:w6GMGWSsCI04fTM8wQRdnW74MuJISakuUU0onU0TYB4=
github.com/pierrec/lz4 v1.0.1/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4 v2.4.1+incompatible h1:mFe7ttWaflA46Mhqh+jUfjp2qTbPYxLB2/OyBppH9dg=
github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/xxHash v0.1.1 h1:KP4NrV9023xp3M4FkTYfcXqWigsOCImL1ANJ7sh5vg4=
github.com/pierrec/xxHash v0.1.1/go.mod h1:w2waW5Zoa/Wc4Yqe0wgrIYAGKqRMf7czn2HNKXmuL+I=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rcrowley/go-metrics v0.0.0-20161128210544-1f30fe9094a5 h1:gwcdIpH6NU2iF8CmcqD+CP6+1CkRBOhHaPR+iu6raBY=
github.com/rcrowley/go-metrics v0.0.0-20161128210544-1f30fe9094a5/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/streadway/amqp v0.0.0-20170926065634-cefed15a0bd8 h1:q2L3Zhh0RscQeFIJRFshSq3DtZPE0ts8q3F+oyUxw/c=
@ -97,25 +127,52 @@ github.com/tidwall/tinybtree v0.0.0-20181217131827-de5932d649b5 h1:NaGfypx6656w6
github.com/tidwall/tinybtree v0.0.0-20181217131827-de5932d649b5/go.mod h1:0aFQG6KLQz3j57CeVgXlmKO3RSQ3myhJn2H+r84IgSY=
github.com/tidwall/tinyqueue v0.0.0-20180302190814-1e39f5511563 h1:Otn9S136ELckZ3KKDyCkxapfufrqDqwmGjcHfAyXRrE=
github.com/tidwall/tinyqueue v0.0.0-20180302190814-1e39f5511563/go.mod h1:mLqSmt7Dv/CNneF2wfcChfN1rvapyQr01LGKnKex0DQ=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/yuin/gopher-lua v0.0.0-20170915035107-eb1c7299435c h1:BXhbeVl63cTUicr+Q/D0/BNPw59IsIcyv2cB1/xHRps=
github.com/yuin/gopher-lua v0.0.0-20170915035107-eb1c7299435c/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44 h1:9lP3x0pW80sDI6t1UMSLA4to18W7R7imwAI/sWS9S8Q=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 h1:+ELyKg6m8UBf0nPFSqD0mi7zUfwPyXo23HNjMnXPz7w=
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20171004034648-a04bdaca5b32 h1:NjAulLPqFTaOxQu5S4qUMqscSu+mQdu+wMY0nfqSkuk=
golang.org/x/net v0.0.0-20171004034648-a04bdaca5b32/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170927054621-314a259e304f h1:iUy6hSM2lPBGm2d9HgXq1GqYPwcJvA8ihnWauXggYMs=
golang.org/x/sys v0.0.0-20170927054621-314a259e304f/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.1.1-0.20171005092100-d82c1812e304 h1:O2dKpvCsgtI9C6I1Byy3L6t4dfkwGmLFeXPT6NMySx4=
golang.org/x/text v0.1.1-0.20171005092100-d82c1812e304/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto v0.0.0-20171002232614-f676e0f3ac63 h1:yNBw5bwywOTguAu+h6SkCUaWdEZ7ZXgfiwb2YTN1eQw=
google.golang.org/genproto v0.0.0-20171002232614-f676e0f3ac63/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.6.0 h1:vaySXtNtPrLJFCiET8QXtfBrqq16ynklmFGaZwLcd1M=
google.golang.org/grpc v1.6.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=
gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg=
gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
layeh.com/gopher-json v0.0.0-20161224164157-c128cc74278b h1:ZfaWvT/nGlYq3Id9DDvsgmRzAfPotitRJmD7ymWZPK0=
layeh.com/gopher-json v0.0.0-20161224164157-c128cc74278b/go.mod h1:ivKkcY8Zxw5ba0jldhZCYYQfGdb2K6u9tbYK1AwMIBc=

View File

@ -22,3 +22,6 @@ _cgo_export.*
_testmain.go
*.exe
coverage.txt
profile.out

74
vendor/github.com/Shopify/sarama/.golangci.yml generated vendored Normal file
View File

@ -0,0 +1,74 @@
run:
timeout: 5m
deadline: 10m
linters-settings:
govet:
check-shadowing: false
golint:
min-confidence: 0
gocyclo:
min-complexity: 99
maligned:
suggest-new: true
dupl:
threshold: 100
goconst:
min-len: 2
min-occurrences: 3
misspell:
locale: US
goimports:
local-prefixes: github.com/Shopify/sarama
gocritic:
enabled-tags:
- diagnostic
- experimental
- opinionated
- performance
- style
disabled-checks:
- wrapperFunc
- ifElseChain
funlen:
lines: 300
statements: 300
linters:
disable-all: true
enable:
- bodyclose
- deadcode
- depguard
- dogsled
# - dupl
- errcheck
- funlen
# - gocritic
- gocyclo
- gofmt
- goimports
# - golint
- gosec
# - gosimple
- govet
# - ineffassign
- interfacer
# - misspell
# - nakedret
# - scopelint
# - staticcheck
- structcheck
# - stylecheck
- typecheck
- unconvert
- unused
- varcheck
- whitespace
# - goconst
# - gochecknoinits
issues:
exclude:
- consider giving a name to these results
- include an explanation for nolint directive

View File

@ -1,34 +0,0 @@
language: go
go:
- 1.7.x
- 1.8.x
- 1.9.x
env:
global:
- KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095
- TOXIPROXY_ADDR=http://localhost:8474
- KAFKA_INSTALL_ROOT=/home/travis/kafka
- KAFKA_HOSTNAME=localhost
- DEBUG=true
matrix:
- KAFKA_VERSION=0.9.0.1
- KAFKA_VERSION=0.10.2.1
- KAFKA_VERSION=0.11.0.1
before_install:
- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
- vagrant/install_cluster.sh
- vagrant/boot_cluster.sh
- vagrant/create_topics.sh
install:
- make install_dependencies
script:
- make test
- make vet
- make errcheck
- make fmt
sudo: false

View File

@ -1,5 +1,499 @@
# Changelog
#### Unreleased
#### Version 1.26.1 (2020-02-04)
Improvements:
- Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539))
- Fix misleading example for cluster admin ([1595](https://github.com/Shopify/sarama/pull/1595))
- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/Shopify/sarama/pull/1573))
- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/Shopify/sarama/pull/1592))
Bug Fixes:
- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590))
- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589))
#### Version 1.26.0 (2020-01-24)
New Features:
- Enable zstd compression
([1574](https://github.com/Shopify/sarama/pull/1574),
[1582](https://github.com/Shopify/sarama/pull/1582))
- Support headers in tools kafka-console-producer
([1549](https://github.com/Shopify/sarama/pull/1549))
Improvements:
- Add SASL AuthIdentity to SASL frames (authzid)
([1585](https://github.com/Shopify/sarama/pull/1585)).
Bug Fixes:
- Sending messages with ZStd compression enabled fails in multiple ways
([1252](https://github.com/Shopify/sarama/issues/1252)).
- Use the broker for any admin on BrokerConfig
([1571](https://github.com/Shopify/sarama/pull/1571)).
- Set DescribeConfigRequest Version field
([1576](https://github.com/Shopify/sarama/pull/1576)).
- ConsumerGroup flooding logs with client/metadata update req
([1578](https://github.com/Shopify/sarama/pull/1578)).
- MetadataRequest version in DescribeCluster
([1580](https://github.com/Shopify/sarama/pull/1580)).
- Fix deadlock in consumer group handleError
([1581](https://github.com/Shopify/sarama/pull/1581))
- Fill in the Fetch{Request,Response} protocol
([1582](https://github.com/Shopify/sarama/pull/1582)).
- Retry topic request on ControllerNotAvailable
([1586](https://github.com/Shopify/sarama/pull/1586)).
#### Version 1.25.0 (2020-01-13)
New Features:
- Support TLS protocol in kafka-producer-performance
([1538](https://github.com/Shopify/sarama/pull/1538)).
- Add support for kafka 2.4.0
([1552](https://github.com/Shopify/sarama/pull/1552)).
Improvements:
- Allow the Consumer to disable auto-commit offsets
([1164](https://github.com/Shopify/sarama/pull/1164)).
- Produce records with consistent timestamps
([1455](https://github.com/Shopify/sarama/pull/1455)).
Bug Fixes:
- Fix incorrect SetTopicMetadata name mentions
([1534](https://github.com/Shopify/sarama/pull/1534)).
- Fix client.tryRefreshMetadata Println
([1535](https://github.com/Shopify/sarama/pull/1535)).
- Fix panic on calling updateMetadata on closed client
([1531](https://github.com/Shopify/sarama/pull/1531)).
- Fix possible faulty metrics in TestFuncProducing
([1545](https://github.com/Shopify/sarama/pull/1545)).
#### Version 1.24.1 (2019-10-31)
New Features:
- Add DescribeLogDirs Request/Response pair
([1520](https://github.com/Shopify/sarama/pull/1520)).
Bug Fixes:
- Fix ClusterAdmin returning invalid controller ID on DescribeCluster
([1518](https://github.com/Shopify/sarama/pull/1518)).
- Fix issue with consumergroup not rebalancing when new partition is added
([1525](https://github.com/Shopify/sarama/pull/1525)).
- Ensure consistent use of read/write deadlines
([1529](https://github.com/Shopify/sarama/pull/1529)).
#### Version 1.24.0 (2019-10-09)
New Features:
- Add sticky partition assignor
([1416](https://github.com/Shopify/sarama/pull/1416)).
- Switch from cgo zstd package to pure Go implementation
([1477](https://github.com/Shopify/sarama/pull/1477)).
Improvements:
- Allow creating ClusterAdmin from client
([1415](https://github.com/Shopify/sarama/pull/1415)).
- Set KafkaVersion in ListAcls method
([1452](https://github.com/Shopify/sarama/pull/1452)).
- Set request version in CreateACL ClusterAdmin method
([1458](https://github.com/Shopify/sarama/pull/1458)).
- Set request version in DeleteACL ClusterAdmin method
([1461](https://github.com/Shopify/sarama/pull/1461)).
- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest
([1464](https://github.com/Shopify/sarama/pull/1464)).
- Remove direct usage of gofork
([1465](https://github.com/Shopify/sarama/pull/1465)).
- Add support for Go 1.13
([1478](https://github.com/Shopify/sarama/pull/1478)).
- Improve behavior of NewMockListAclsResponse
([1481](https://github.com/Shopify/sarama/pull/1481)).
Bug Fixes:
- Fix race condition in consumergroup example
([1434](https://github.com/Shopify/sarama/pull/1434)).
- Fix brokerProducer goroutine leak
([1442](https://github.com/Shopify/sarama/pull/1442)).
- Use released version of lz4 library
([1469](https://github.com/Shopify/sarama/pull/1469)).
- Set correct version in MockDeleteTopicsResponse
([1484](https://github.com/Shopify/sarama/pull/1484)).
- Fix CLI help message typo
([1494](https://github.com/Shopify/sarama/pull/1494)).
Known Issues:
- Please **don't** use Zstd, as it doesn't work right now.
See https://github.com/Shopify/sarama/issues/1252
#### Version 1.23.1 (2019-07-22)
Bug Fixes:
- Fix fetch delete bug record
([1425](https://github.com/Shopify/sarama/pull/1425)).
- Handle SASL/OAUTHBEARER token rejection
([1428](https://github.com/Shopify/sarama/pull/1428)).
#### Version 1.23.0 (2019-07-02)
New Features:
- Add support for Kafka 2.3.0
([1418](https://github.com/Shopify/sarama/pull/1418)).
- Add support for ListConsumerGroupOffsets v2
([1374](https://github.com/Shopify/sarama/pull/1374)).
- Add support for DeleteConsumerGroup
([1417](https://github.com/Shopify/sarama/pull/1417)).
- Add support for SASLVersion configuration
([1410](https://github.com/Shopify/sarama/pull/1410)).
- Add kerberos support
([1366](https://github.com/Shopify/sarama/pull/1366)).
Improvements:
- Improve sasl_scram_client example
([1406](https://github.com/Shopify/sarama/pull/1406)).
- Fix shutdown and race-condition in consumer-group example
([1404](https://github.com/Shopify/sarama/pull/1404)).
- Add support for error codes 77—81
([1397](https://github.com/Shopify/sarama/pull/1397)).
- Pool internal objects allocated per message
([1385](https://github.com/Shopify/sarama/pull/1385)).
- Reduce packet decoder allocations
([1373](https://github.com/Shopify/sarama/pull/1373)).
- Support timeout when fetching metadata
([1359](https://github.com/Shopify/sarama/pull/1359)).
Bug Fixes:
- Fix fetch size integer overflow
([1376](https://github.com/Shopify/sarama/pull/1376)).
- Handle and log throttled FetchResponses
([1383](https://github.com/Shopify/sarama/pull/1383)).
- Refactor misspelled word Resouce to Resource
([1368](https://github.com/Shopify/sarama/pull/1368)).
#### Version 1.22.1 (2019-04-29)
Improvements:
- Use zstd 1.3.8
([1350](https://github.com/Shopify/sarama/pull/1350)).
- Add support for SaslHandshakeRequest v1
([1354](https://github.com/Shopify/sarama/pull/1354)).
Bug Fixes:
- Fix V5 MetadataRequest nullable topics array
([1353](https://github.com/Shopify/sarama/pull/1353)).
- Use a different SCRAM client for each broker connection
([1349](https://github.com/Shopify/sarama/pull/1349)).
- Fix AllowAutoTopicCreation for MetadataRequest greater than v3
([1344](https://github.com/Shopify/sarama/pull/1344)).
#### Version 1.22.0 (2019-04-09)
New Features:
- Add Offline Replicas Operation to Client
([1318](https://github.com/Shopify/sarama/pull/1318)).
- Allow using proxy when connecting to broker
([1326](https://github.com/Shopify/sarama/pull/1326)).
- Implement ReadCommitted
([1307](https://github.com/Shopify/sarama/pull/1307)).
- Add support for Kafka 2.2.0
([1331](https://github.com/Shopify/sarama/pull/1331)).
- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes
([1331](https://github.com/Shopify/sarama/pull/1295)).
Improvements:
- Unregister all broker metrics on broker stop
([1232](https://github.com/Shopify/sarama/pull/1232)).
- Add SCRAM authentication example
([1303](https://github.com/Shopify/sarama/pull/1303)).
- Add consumergroup examples
([1304](https://github.com/Shopify/sarama/pull/1304)).
- Expose consumer batch size metric
([1296](https://github.com/Shopify/sarama/pull/1296)).
- Add TLS options to console producer and consumer
([1300](https://github.com/Shopify/sarama/pull/1300)).
- Reduce client close bookkeeping
([1297](https://github.com/Shopify/sarama/pull/1297)).
- Satisfy error interface in create responses
([1154](https://github.com/Shopify/sarama/pull/1154)).
- Please lint gods
([1346](https://github.com/Shopify/sarama/pull/1346)).
Bug Fixes:
- Fix multi consumer group instance crash
([1338](https://github.com/Shopify/sarama/pull/1338)).
- Update lz4 to latest version
([1347](https://github.com/Shopify/sarama/pull/1347)).
- Retry ErrNotCoordinatorForConsumer in new consumergroup session
([1231](https://github.com/Shopify/sarama/pull/1231)).
- Fix cleanup error handler
([1332](https://github.com/Shopify/sarama/pull/1332)).
- Fix rate condition in PartitionConsumer
([1156](https://github.com/Shopify/sarama/pull/1156)).
#### Version 1.21.0 (2019-02-24)
New Features:
- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest
([1236](https://github.com/Shopify/sarama/pull/1236)).
- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests
([1178](https://github.com/Shopify/sarama/pull/1178)).
- Implement SASL/OAUTHBEARER
([1240](https://github.com/Shopify/sarama/pull/1240)).
Improvements:
- Add Go mod support
([1282](https://github.com/Shopify/sarama/pull/1282)).
- Add error codes 73—76
([1239](https://github.com/Shopify/sarama/pull/1239)).
- Add retry backoff function
([1160](https://github.com/Shopify/sarama/pull/1160)).
- Maintain metadata in the producer even when retries are disabled
([1189](https://github.com/Shopify/sarama/pull/1189)).
- Include ReplicaAssignment in ListTopics
([1274](https://github.com/Shopify/sarama/pull/1274)).
- Add producer performance tool
([1222](https://github.com/Shopify/sarama/pull/1222)).
- Add support LogAppend timestamps
([1258](https://github.com/Shopify/sarama/pull/1258)).
Bug Fixes:
- Fix potential deadlock when a heartbeat request fails
([1286](https://github.com/Shopify/sarama/pull/1286)).
- Fix consuming compacted topic
([1227](https://github.com/Shopify/sarama/pull/1227)).
- Set correct Kafka version for DescribeConfigsRequest v1
([1277](https://github.com/Shopify/sarama/pull/1277)).
- Update kafka test version
([1273](https://github.com/Shopify/sarama/pull/1273)).
#### Version 1.20.1 (2019-01-10)
New Features:
- Add optional replica id in offset request
([1100](https://github.com/Shopify/sarama/pull/1100)).
Improvements:
- Implement DescribeConfigs Request + Response v1 & v2
([1230](https://github.com/Shopify/sarama/pull/1230)).
- Reuse compression objects
([1185](https://github.com/Shopify/sarama/pull/1185)).
- Switch from png to svg for GoDoc link in README
([1243](https://github.com/Shopify/sarama/pull/1243)).
- Fix typo in deprecation notice for FetchResponseBlock.Records
([1242](https://github.com/Shopify/sarama/pull/1242)).
- Fix typos in consumer metadata response file
([1244](https://github.com/Shopify/sarama/pull/1244)).
Bug Fixes:
- Revert to individual msg retries for non-idempotent
([1203](https://github.com/Shopify/sarama/pull/1203)).
- Respect MaxMessageBytes limit for uncompressed messages
([1141](https://github.com/Shopify/sarama/pull/1141)).
#### Version 1.20.0 (2018-12-10)
New Features:
- Add support for zstd compression
([#1170](https://github.com/Shopify/sarama/pull/1170)).
- Add support for Idempotent Producer
([#1152](https://github.com/Shopify/sarama/pull/1152)).
- Add support support for Kafka 2.1.0
([#1229](https://github.com/Shopify/sarama/pull/1229)).
- Add support support for OffsetCommit request/response pairs versions v1 to v5
([#1201](https://github.com/Shopify/sarama/pull/1201)).
- Add support support for OffsetFetch request/response pair up to version v5
([#1198](https://github.com/Shopify/sarama/pull/1198)).
Improvements:
- Export broker's Rack setting
([#1173](https://github.com/Shopify/sarama/pull/1173)).
- Always use latest patch version of Go on CI
([#1202](https://github.com/Shopify/sarama/pull/1202)).
- Add error codes 61 to 72
([#1195](https://github.com/Shopify/sarama/pull/1195)).
Bug Fixes:
- Fix build without cgo
([#1182](https://github.com/Shopify/sarama/pull/1182)).
- Fix go vet suggestion in consumer group file
([#1209](https://github.com/Shopify/sarama/pull/1209)).
- Fix typos in code and comments
([#1228](https://github.com/Shopify/sarama/pull/1228)).
#### Version 1.19.0 (2018-09-27)
New Features:
- Implement a higher-level consumer group
([#1099](https://github.com/Shopify/sarama/pull/1099)).
Improvements:
- Add support for Go 1.11
([#1176](https://github.com/Shopify/sarama/pull/1176)).
Bug Fixes:
- Fix encoding of `MetadataResponse` with version 2 and higher
([#1174](https://github.com/Shopify/sarama/pull/1174)).
- Fix race condition in mock async producer
([#1174](https://github.com/Shopify/sarama/pull/1174)).
#### Version 1.18.0 (2018-09-07)
New Features:
- Make `Partitioner.RequiresConsistency` vary per-message
([#1112](https://github.com/Shopify/sarama/pull/1112)).
- Add customizable partitioner
([#1118](https://github.com/Shopify/sarama/pull/1118)).
- Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`,
`DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL`
([#1055](https://github.com/Shopify/sarama/pull/1055)).
Improvements:
- Add support for Kafka 2.0.0
([#1149](https://github.com/Shopify/sarama/pull/1149)).
- Allow setting `LocalAddr` when dialing an address to support multi-homed hosts
([#1123](https://github.com/Shopify/sarama/pull/1123)).
- Simpler offset management
([#1127](https://github.com/Shopify/sarama/pull/1127)).
Bug Fixes:
- Fix mutation of `ProducerMessage.MetaData` when producing to Kafka
([#1110](https://github.com/Shopify/sarama/pull/1110)).
- Fix consumer block when response did not contain all the
expected topic/partition blocks
([#1086](https://github.com/Shopify/sarama/pull/1086)).
- Fix consumer block when response contains only constrol messages
([#1115](https://github.com/Shopify/sarama/pull/1115)).
- Add timeout config for ClusterAdmin requests
([#1142](https://github.com/Shopify/sarama/pull/1142)).
- Add version check when producing message with headers
([#1117](https://github.com/Shopify/sarama/pull/1117)).
- Fix `MetadataRequest` for empty list of topics
([#1132](https://github.com/Shopify/sarama/pull/1132)).
- Fix producer topic metadata on-demand fetch when topic error happens in metadata response
([#1125](https://github.com/Shopify/sarama/pull/1125)).
#### Version 1.17.0 (2018-05-30)
New Features:
- Add support for gzip compression levels
([#1044](https://github.com/Shopify/sarama/pull/1044)).
- Add support for Metadata request/response pairs versions v1 to v5
([#1047](https://github.com/Shopify/sarama/pull/1047),
[#1069](https://github.com/Shopify/sarama/pull/1069)).
- Add versioning to JoinGroup request/response pairs
([#1098](https://github.com/Shopify/sarama/pull/1098))
- Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs
([#1065](https://github.com/Shopify/sarama/pull/1065),
[#1096](https://github.com/Shopify/sarama/pull/1096),
[#1027](https://github.com/Shopify/sarama/pull/1027)).
- Add `Controller()` method to Client interface
([#1063](https://github.com/Shopify/sarama/pull/1063)).
Improvements:
- ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp
([#1010](https://github.com/Shopify/sarama/pull/1010)).
- Expose missing protocol parts: `msgSet` and `recordBatch`
([#1049](https://github.com/Shopify/sarama/pull/1049)).
- Add support for v1 DeleteTopics Request
([#1052](https://github.com/Shopify/sarama/pull/1052)).
- Add support for Go 1.10
([#1064](https://github.com/Shopify/sarama/pull/1064)).
- Claim support for Kafka 1.1.0
([#1073](https://github.com/Shopify/sarama/pull/1073)).
Bug Fixes:
- Fix FindCoordinatorResponse.encode to allow nil Coordinator
([#1050](https://github.com/Shopify/sarama/pull/1050),
[#1051](https://github.com/Shopify/sarama/pull/1051)).
- Clear all metadata when we have the latest topic info
([#1033](https://github.com/Shopify/sarama/pull/1033)).
- Make `PartitionConsumer.Close` idempotent
([#1092](https://github.com/Shopify/sarama/pull/1092)).
#### Version 1.16.0 (2018-02-12)
New Features:
- Add support for the Create/Delete Topics request/response pairs
([#1007](https://github.com/Shopify/sarama/pull/1007),
[#1008](https://github.com/Shopify/sarama/pull/1008)).
- Add support for the Describe/Create/Delete ACL request/response pairs
([#1009](https://github.com/Shopify/sarama/pull/1009)).
- Add support for the five transaction-related request/response pairs
([#1016](https://github.com/Shopify/sarama/pull/1016)).
Improvements:
- Permit setting version on mock producer responses
([#999](https://github.com/Shopify/sarama/pull/999)).
- Add `NewMockBrokerListener` helper for testing TLS connections
([#1019](https://github.com/Shopify/sarama/pull/1019)).
- Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB
which results in much higher throughput in most cases
([#1024](https://github.com/Shopify/sarama/pull/1024)).
- Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to
reduce CPU and memory usage when processing many partitions
([#1028](https://github.com/Shopify/sarama/pull/1028)).
- Assign relative offsets to messages in the producer to save the brokers a
recompression pass
([#1002](https://github.com/Shopify/sarama/pull/1002),
[#1015](https://github.com/Shopify/sarama/pull/1015)).
Bug Fixes:
- Fix producing uncompressed batches with the new protocol format
([#1032](https://github.com/Shopify/sarama/issues/1032)).
- Fix consuming compacted topics with the new protocol format
([#1005](https://github.com/Shopify/sarama/issues/1005)).
- Fix consuming topics with a mix of protocol formats
([#1021](https://github.com/Shopify/sarama/issues/1021)).
- Fix consuming when the broker includes multiple batches in a single response
([#1022](https://github.com/Shopify/sarama/issues/1022)).
- Fix detection of `PartialTrailingMessage` when the partial message was
truncated before the magic value indicating its version
([#1030](https://github.com/Shopify/sarama/pull/1030)).
- Fix expectation-checking in the mock of `SyncProducer.SendMessages`
([#1035](https://github.com/Shopify/sarama/pull/1035)).
#### Version 1.15.0 (2017-12-08)
New Features:
- Claim official support for Kafka 1.0, though it did already work
([#984](https://github.com/Shopify/sarama/pull/984)).
- Helper methods for Kafka version numbers to/from strings
([#989](https://github.com/Shopify/sarama/pull/989)).
- Implement CreatePartitions request/response
([#985](https://github.com/Shopify/sarama/pull/985)).
Improvements:
- Add error codes 45-60
([#986](https://github.com/Shopify/sarama/issues/986)).
Bug Fixes:
- Fix slow consuming for certain Kafka 0.11/1.0 configurations
([#982](https://github.com/Shopify/sarama/pull/982)).
- Correctly determine when a FetchResponse contains the new message format
([#990](https://github.com/Shopify/sarama/pull/990)).
- Fix producing with multiple headers
([#996](https://github.com/Shopify/sarama/pull/996)).
- Fix handling of truncated record batches
([#998](https://github.com/Shopify/sarama/pull/998)).
- Fix leaking metrics when closing brokers
([#991](https://github.com/Shopify/sarama/pull/991)).
#### Version 1.14.0 (2017-11-13)
New Features:
- Add support for the new Kafka 0.11 record-batch format, including the wire
protocol and the necessary behavioural changes in the producer and consumer.
Transactions and idempotency are not yet supported, but producing and
consuming should work with all the existing bells and whistles (batching,
compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta
of Arista Networks for this work. Part of
([#901](https://github.com/Shopify/sarama/issues/901)).
Bug Fixes:
- Fix encoding of ProduceResponse versions in test
([#970](https://github.com/Shopify/sarama/pull/970)).
- Return partial replicas list when we have it
([#975](https://github.com/Shopify/sarama/pull/975)).
#### Version 1.13.0 (2017-10-04)
New Features:

View File

@ -1,4 +1,4 @@
Copyright (c) 2013 Evan Huus
Copyright (c) 2013 Shopify
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the

View File

@ -1,21 +1,27 @@
default: fmt vet errcheck test
default: fmt get update test lint
test:
go test -v -timeout 60s -race ./...
GO := GO111MODULE=on GOPRIVATE=github.com/linkedin GOSUMDB=off go
GOBUILD := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG)
GOTEST := $(GO) test -gcflags='-l' -p 3 -v -race -timeout 6m -coverprofile=profile.out -covermode=atomic
vet:
go vet ./...
errcheck:
errcheck github.com/Shopify/sarama/...
fmt:
@if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
install_dependencies: install_errcheck get
install_errcheck:
go get github.com/kisielk/errcheck
FILES := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go')
TESTS := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go')
get:
go get -t
$(GO) get ./...
$(GO) mod verify
$(GO) mod tidy
update:
$(GO) get -u -v all
$(GO) mod verify
$(GO) mod tidy
fmt:
gofmt -s -l -w $(FILES) $(TESTS)
lint:
golangci-lint run
test:
$(GOTEST) ./...

View File

@ -1,12 +1,12 @@
sarama
======
# sarama
[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama)
[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.svg)](https://godoc.org/github.com/Shopify/sarama)
[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama)
Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
### Getting started
## Getting started
- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama).
- Mocks for testing are available in the [mocks](./mocks) subpackage.
@ -15,24 +15,22 @@ Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apa
You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
### Compatibility and API stability
## Compatibility and API stability
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
the two latest stable releases of Kafka and Go, and we provide a two month
grace period for older releases. This means we currently officially support
Go 1.9 through 1.7, and Kafka 0.11 through 0.9, although older releases are
Go 1.12 through 1.14, and Kafka 2.1 through 2.4, although older releases are
still likely to work.
Sarama follows semantic versioning and provides API stability via the gopkg.in service.
You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
A changelog is available [here](CHANGELOG.md).
### Contributing
## Contributing
* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more
technical and design details.
* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)
contains a wealth of useful information.
* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
* If you have any questions, just ask!
- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
- Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details.
- The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information.
- For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
- If you have any questions, just ask!

View File

@ -1,14 +1,8 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB
MEMORY = 3072
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "ubuntu/trusty64"
Vagrant.configure("2") do |config|
config.vm.box = "ubuntu/bionic64"
config.vm.provision :shell, path: "vagrant/provision.sh"

138
vendor/github.com/Shopify/sarama/acl_bindings.go generated vendored Normal file
View File

@ -0,0 +1,138 @@
package sarama
//Resource holds information about acl resource type
type Resource struct {
ResourceType AclResourceType
ResourceName string
ResourcePatternType AclResourcePatternType
}
func (r *Resource) encode(pe packetEncoder, version int16) error {
pe.putInt8(int8(r.ResourceType))
if err := pe.putString(r.ResourceName); err != nil {
return err
}
if version == 1 {
if r.ResourcePatternType == AclPatternUnknown {
Logger.Print("Cannot encode an unknown resource pattern type, using Literal instead")
r.ResourcePatternType = AclPatternLiteral
}
pe.putInt8(int8(r.ResourcePatternType))
}
return nil
}
func (r *Resource) decode(pd packetDecoder, version int16) (err error) {
resourceType, err := pd.getInt8()
if err != nil {
return err
}
r.ResourceType = AclResourceType(resourceType)
if r.ResourceName, err = pd.getString(); err != nil {
return err
}
if version == 1 {
pattern, err := pd.getInt8()
if err != nil {
return err
}
r.ResourcePatternType = AclResourcePatternType(pattern)
}
return nil
}
//Acl holds information about acl type
type Acl struct {
Principal string
Host string
Operation AclOperation
PermissionType AclPermissionType
}
func (a *Acl) encode(pe packetEncoder) error {
if err := pe.putString(a.Principal); err != nil {
return err
}
if err := pe.putString(a.Host); err != nil {
return err
}
pe.putInt8(int8(a.Operation))
pe.putInt8(int8(a.PermissionType))
return nil
}
func (a *Acl) decode(pd packetDecoder, version int16) (err error) {
if a.Principal, err = pd.getString(); err != nil {
return err
}
if a.Host, err = pd.getString(); err != nil {
return err
}
operation, err := pd.getInt8()
if err != nil {
return err
}
a.Operation = AclOperation(operation)
permissionType, err := pd.getInt8()
if err != nil {
return err
}
a.PermissionType = AclPermissionType(permissionType)
return nil
}
//ResourceAcls is an acl resource type
type ResourceAcls struct {
Resource
Acls []*Acl
}
func (r *ResourceAcls) encode(pe packetEncoder, version int16) error {
if err := r.Resource.encode(pe, version); err != nil {
return err
}
if err := pe.putArrayLength(len(r.Acls)); err != nil {
return err
}
for _, acl := range r.Acls {
if err := acl.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *ResourceAcls) decode(pd packetDecoder, version int16) error {
if err := r.Resource.decode(pd, version); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Acls = make([]*Acl, n)
for i := 0; i < n; i++ {
r.Acls[i] = new(Acl)
if err := r.Acls[i].decode(pd, version); err != nil {
return err
}
}
return nil
}

89
vendor/github.com/Shopify/sarama/acl_create_request.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
package sarama
//CreateAclsRequest is an acl creation request
type CreateAclsRequest struct {
Version int16
AclCreations []*AclCreation
}
func (c *CreateAclsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(c.AclCreations)); err != nil {
return err
}
for _, aclCreation := range c.AclCreations {
if err := aclCreation.encode(pe, c.Version); err != nil {
return err
}
}
return nil
}
func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) {
c.Version = version
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.AclCreations = make([]*AclCreation, n)
for i := 0; i < n; i++ {
c.AclCreations[i] = new(AclCreation)
if err := c.AclCreations[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (c *CreateAclsRequest) key() int16 {
return 30
}
func (c *CreateAclsRequest) version() int16 {
return c.Version
}
func (c *CreateAclsRequest) headerVersion() int16 {
return 1
}
func (c *CreateAclsRequest) requiredVersion() KafkaVersion {
switch c.Version {
case 1:
return V2_0_0_0
default:
return V0_11_0_0
}
}
//AclCreation is a wrapper around Resource and Acl type
type AclCreation struct {
Resource
Acl
}
func (a *AclCreation) encode(pe packetEncoder, version int16) error {
if err := a.Resource.encode(pe, version); err != nil {
return err
}
if err := a.Acl.encode(pe); err != nil {
return err
}
return nil
}
func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) {
if err := a.Resource.decode(pd, version); err != nil {
return err
}
if err := a.Acl.decode(pd, version); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,94 @@
package sarama
import "time"
//CreateAclsResponse is a an acl response creation type
type CreateAclsResponse struct {
ThrottleTime time.Duration
AclCreationResponses []*AclCreationResponse
}
func (c *CreateAclsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil {
return err
}
for _, aclCreationResponse := range c.AclCreationResponses {
if err := aclCreationResponse.encode(pe); err != nil {
return err
}
}
return nil
}
func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.AclCreationResponses = make([]*AclCreationResponse, n)
for i := 0; i < n; i++ {
c.AclCreationResponses[i] = new(AclCreationResponse)
if err := c.AclCreationResponses[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (c *CreateAclsResponse) key() int16 {
return 30
}
func (c *CreateAclsResponse) version() int16 {
return 0
}
func (c *CreateAclsResponse) headerVersion() int16 {
return 0
}
func (c *CreateAclsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
//AclCreationResponse is an acl creation response type
type AclCreationResponse struct {
Err KError
ErrMsg *string
}
func (a *AclCreationResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(a.Err))
if err := pe.putNullableString(a.ErrMsg); err != nil {
return err
}
return nil
}
func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
a.Err = KError(kerr)
if a.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
return nil
}

62
vendor/github.com/Shopify/sarama/acl_delete_request.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
package sarama
//DeleteAclsRequest is a delete acl request
type DeleteAclsRequest struct {
Version int
Filters []*AclFilter
}
func (d *DeleteAclsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(d.Filters)); err != nil {
return err
}
for _, filter := range d.Filters {
filter.Version = d.Version
if err := filter.encode(pe); err != nil {
return err
}
}
return nil
}
func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) {
d.Version = int(version)
n, err := pd.getArrayLength()
if err != nil {
return err
}
d.Filters = make([]*AclFilter, n)
for i := 0; i < n; i++ {
d.Filters[i] = new(AclFilter)
d.Filters[i].Version = int(version)
if err := d.Filters[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *DeleteAclsRequest) key() int16 {
return 31
}
func (d *DeleteAclsRequest) version() int16 {
return int16(d.Version)
}
func (c *DeleteAclsRequest) headerVersion() int16 {
return 1
}
func (d *DeleteAclsRequest) requiredVersion() KafkaVersion {
switch d.Version {
case 1:
return V2_0_0_0
default:
return V0_11_0_0
}
}

163
vendor/github.com/Shopify/sarama/acl_delete_response.go generated vendored Normal file
View File

@ -0,0 +1,163 @@
package sarama
import "time"
//DeleteAclsResponse is a delete acl response
type DeleteAclsResponse struct {
Version int16
ThrottleTime time.Duration
FilterResponses []*FilterResponse
}
func (d *DeleteAclsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(d.FilterResponses)); err != nil {
return err
}
for _, filterResponse := range d.FilterResponses {
if err := filterResponse.encode(pe, d.Version); err != nil {
return err
}
}
return nil
}
func (d *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
d.FilterResponses = make([]*FilterResponse, n)
for i := 0; i < n; i++ {
d.FilterResponses[i] = new(FilterResponse)
if err := d.FilterResponses[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *DeleteAclsResponse) key() int16 {
return 31
}
func (d *DeleteAclsResponse) version() int16 {
return d.Version
}
func (d *DeleteAclsResponse) headerVersion() int16 {
return 0
}
func (d *DeleteAclsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
//FilterResponse is a filter response type
type FilterResponse struct {
Err KError
ErrMsg *string
MatchingAcls []*MatchingAcl
}
func (f *FilterResponse) encode(pe packetEncoder, version int16) error {
pe.putInt16(int16(f.Err))
if err := pe.putNullableString(f.ErrMsg); err != nil {
return err
}
if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil {
return err
}
for _, matchingAcl := range f.MatchingAcls {
if err := matchingAcl.encode(pe, version); err != nil {
return err
}
}
return nil
}
func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
f.Err = KError(kerr)
if f.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
f.MatchingAcls = make([]*MatchingAcl, n)
for i := 0; i < n; i++ {
f.MatchingAcls[i] = new(MatchingAcl)
if err := f.MatchingAcls[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
//MatchingAcl is a matching acl type
type MatchingAcl struct {
Err KError
ErrMsg *string
Resource
Acl
}
func (m *MatchingAcl) encode(pe packetEncoder, version int16) error {
pe.putInt16(int16(m.Err))
if err := pe.putNullableString(m.ErrMsg); err != nil {
return err
}
if err := m.Resource.encode(pe, version); err != nil {
return err
}
if err := m.Acl.encode(pe); err != nil {
return err
}
return nil
}
func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
m.Err = KError(kerr)
if m.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
if err := m.Resource.decode(pd, version); err != nil {
return err
}
if err := m.Acl.decode(pd, version); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,39 @@
package sarama
//DescribeAclsRequest is a secribe acl request type
type DescribeAclsRequest struct {
Version int
AclFilter
}
func (d *DescribeAclsRequest) encode(pe packetEncoder) error {
d.AclFilter.Version = d.Version
return d.AclFilter.encode(pe)
}
func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) {
d.Version = int(version)
d.AclFilter.Version = int(version)
return d.AclFilter.decode(pd, version)
}
func (d *DescribeAclsRequest) key() int16 {
return 29
}
func (d *DescribeAclsRequest) version() int16 {
return int16(d.Version)
}
func (d *DescribeAclsRequest) headerVersion() int16 {
return 1
}
func (d *DescribeAclsRequest) requiredVersion() KafkaVersion {
switch d.Version {
case 1:
return V2_0_0_0
default:
return V0_11_0_0
}
}

View File

@ -0,0 +1,91 @@
package sarama
import "time"
//DescribeAclsResponse is a describe acl response type
type DescribeAclsResponse struct {
Version int16
ThrottleTime time.Duration
Err KError
ErrMsg *string
ResourceAcls []*ResourceAcls
}
func (d *DescribeAclsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
pe.putInt16(int16(d.Err))
if err := pe.putNullableString(d.ErrMsg); err != nil {
return err
}
if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil {
return err
}
for _, resourceAcl := range d.ResourceAcls {
if err := resourceAcl.encode(pe, d.Version); err != nil {
return err
}
}
return nil
}
func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
d.Err = KError(kerr)
errmsg, err := pd.getString()
if err != nil {
return err
}
if errmsg != "" {
d.ErrMsg = &errmsg
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
d.ResourceAcls = make([]*ResourceAcls, n)
for i := 0; i < n; i++ {
d.ResourceAcls[i] = new(ResourceAcls)
if err := d.ResourceAcls[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *DescribeAclsResponse) key() int16 {
return 29
}
func (d *DescribeAclsResponse) version() int16 {
return d.Version
}
func (d *DescribeAclsResponse) headerVersion() int16 {
return 0
}
func (d *DescribeAclsResponse) requiredVersion() KafkaVersion {
switch d.Version {
case 1:
return V2_0_0_0
default:
return V0_11_0_0
}
}

78
vendor/github.com/Shopify/sarama/acl_filter.go generated vendored Normal file
View File

@ -0,0 +1,78 @@
package sarama
type AclFilter struct {
Version int
ResourceType AclResourceType
ResourceName *string
ResourcePatternTypeFilter AclResourcePatternType
Principal *string
Host *string
Operation AclOperation
PermissionType AclPermissionType
}
func (a *AclFilter) encode(pe packetEncoder) error {
pe.putInt8(int8(a.ResourceType))
if err := pe.putNullableString(a.ResourceName); err != nil {
return err
}
if a.Version == 1 {
pe.putInt8(int8(a.ResourcePatternTypeFilter))
}
if err := pe.putNullableString(a.Principal); err != nil {
return err
}
if err := pe.putNullableString(a.Host); err != nil {
return err
}
pe.putInt8(int8(a.Operation))
pe.putInt8(int8(a.PermissionType))
return nil
}
func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) {
resourceType, err := pd.getInt8()
if err != nil {
return err
}
a.ResourceType = AclResourceType(resourceType)
if a.ResourceName, err = pd.getNullableString(); err != nil {
return err
}
if a.Version == 1 {
pattern, err := pd.getInt8()
if err != nil {
return err
}
a.ResourcePatternTypeFilter = AclResourcePatternType(pattern)
}
if a.Principal, err = pd.getNullableString(); err != nil {
return err
}
if a.Host, err = pd.getNullableString(); err != nil {
return err
}
operation, err := pd.getInt8()
if err != nil {
return err
}
a.Operation = AclOperation(operation)
permissionType, err := pd.getInt8()
if err != nil {
return err
}
a.PermissionType = AclPermissionType(permissionType)
return nil
}

55
vendor/github.com/Shopify/sarama/acl_types.go generated vendored Normal file
View File

@ -0,0 +1,55 @@
package sarama
type (
AclOperation int
AclPermissionType int
AclResourceType int
AclResourcePatternType int
)
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
const (
AclOperationUnknown AclOperation = iota
AclOperationAny
AclOperationAll
AclOperationRead
AclOperationWrite
AclOperationCreate
AclOperationDelete
AclOperationAlter
AclOperationDescribe
AclOperationClusterAction
AclOperationDescribeConfigs
AclOperationAlterConfigs
AclOperationIdempotentWrite
)
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
const (
AclPermissionUnknown AclPermissionType = iota
AclPermissionAny
AclPermissionDeny
AclPermissionAllow
)
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
const (
AclResourceUnknown AclResourceType = iota
AclResourceAny
AclResourceTopic
AclResourceGroup
AclResourceCluster
AclResourceTransactionalID
)
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java
const (
AclPatternUnknown AclResourcePatternType = iota
AclPatternAny
AclPatternMatch
AclPatternLiteral
AclPatternPrefixed
)

View File

@ -0,0 +1,57 @@
package sarama
//AddOffsetsToTxnRequest adds offsets to a transaction request
type AddOffsetsToTxnRequest struct {
TransactionalID string
ProducerID int64
ProducerEpoch int16
GroupID string
}
func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error {
if err := pe.putString(a.TransactionalID); err != nil {
return err
}
pe.putInt64(a.ProducerID)
pe.putInt16(a.ProducerEpoch)
if err := pe.putString(a.GroupID); err != nil {
return err
}
return nil
}
func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
if a.TransactionalID, err = pd.getString(); err != nil {
return err
}
if a.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if a.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
if a.GroupID, err = pd.getString(); err != nil {
return err
}
return nil
}
func (a *AddOffsetsToTxnRequest) key() int16 {
return 25
}
func (a *AddOffsetsToTxnRequest) version() int16 {
return 0
}
func (a *AddOffsetsToTxnRequest) headerVersion() int16 {
return 1
}
func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,49 @@
package sarama
import (
"time"
)
//AddOffsetsToTxnResponse is a response type for adding offsets to txns
type AddOffsetsToTxnResponse struct {
ThrottleTime time.Duration
Err KError
}
func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
pe.putInt16(int16(a.Err))
return nil
}
func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
a.Err = KError(kerr)
return nil
}
func (a *AddOffsetsToTxnResponse) key() int16 {
return 25
}
func (a *AddOffsetsToTxnResponse) version() int16 {
return 0
}
func (a *AddOffsetsToTxnResponse) headerVersion() int16 {
return 0
}
func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,81 @@
package sarama
//AddPartitionsToTxnRequest is a add paartition request
type AddPartitionsToTxnRequest struct {
TransactionalID string
ProducerID int64
ProducerEpoch int16
TopicPartitions map[string][]int32
}
func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error {
if err := pe.putString(a.TransactionalID); err != nil {
return err
}
pe.putInt64(a.ProducerID)
pe.putInt16(a.ProducerEpoch)
if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil {
return err
}
for topic, partitions := range a.TopicPartitions {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putInt32Array(partitions); err != nil {
return err
}
}
return nil
}
func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
if a.TransactionalID, err = pd.getString(); err != nil {
return err
}
if a.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if a.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
a.TopicPartitions = make(map[string][]int32)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitions, err := pd.getInt32Array()
if err != nil {
return err
}
a.TopicPartitions[topic] = partitions
}
return nil
}
func (a *AddPartitionsToTxnRequest) key() int16 {
return 24
}
func (a *AddPartitionsToTxnRequest) version() int16 {
return 0
}
func (a *AddPartitionsToTxnRequest) headerVersion() int16 {
return 1
}
func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,114 @@
package sarama
import (
"time"
)
//AddPartitionsToTxnResponse is a partition errors to transaction type
type AddPartitionsToTxnResponse struct {
ThrottleTime time.Duration
Errors map[string][]*PartitionError
}
func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(a.Errors)); err != nil {
return err
}
for topic, e := range a.Errors {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(e)); err != nil {
return err
}
for _, partitionError := range e {
if err := partitionError.encode(pe); err != nil {
return err
}
}
}
return nil
}
func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
a.Errors = make(map[string][]*PartitionError)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
m, err := pd.getArrayLength()
if err != nil {
return err
}
a.Errors[topic] = make([]*PartitionError, m)
for j := 0; j < m; j++ {
a.Errors[topic][j] = new(PartitionError)
if err := a.Errors[topic][j].decode(pd, version); err != nil {
return err
}
}
}
return nil
}
func (a *AddPartitionsToTxnResponse) key() int16 {
return 24
}
func (a *AddPartitionsToTxnResponse) version() int16 {
return 0
}
func (a *AddPartitionsToTxnResponse) headerVersion() int16 {
return 0
}
func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
//PartitionError is a partition error type
type PartitionError struct {
Partition int32
Err KError
}
func (p *PartitionError) encode(pe packetEncoder) error {
pe.putInt32(p.Partition)
pe.putInt16(int16(p.Err))
return nil
}
func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) {
if p.Partition, err = pd.getInt32(); err != nil {
return err
}
kerr, err := pd.getInt16()
if err != nil {
return err
}
p.Err = KError(kerr)
return nil
}

934
vendor/github.com/Shopify/sarama/admin.go generated vendored Normal file
View File

@ -0,0 +1,934 @@
package sarama
import (
"errors"
"fmt"
"math/rand"
"strconv"
"sync"
"time"
)
// ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics,
// brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0.
// Methods with stricter requirements will specify the minimum broker version required.
// You MUST call Close() on a client to avoid leaks
type ClusterAdmin interface {
// Creates a new topic. This operation is supported by brokers with version 0.10.1.0 or higher.
// It may take several seconds after CreateTopic returns success for all the brokers
// to become aware that the topic has been created. During this time, listTopics
// may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0.
CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error
// List the topics available in the cluster with the default options.
ListTopics() (map[string]TopicDetail, error)
// Describe some topics in the cluster.
DescribeTopics(topics []string) (metadata []*TopicMetadata, err error)
// Delete a topic. It may take several seconds after the DeleteTopic to returns success
// and for all the brokers to become aware that the topics are gone.
// During this time, listTopics may continue to return information about the deleted topic.
// If delete.topic.enable is false on the brokers, deleteTopic will mark
// the topic for deletion, but not actually delete them.
// This operation is supported by brokers with version 0.10.1.0 or higher.
DeleteTopic(topic string) error
// Increase the number of partitions of the topics according to the corresponding values.
// If partitions are increased for a topic that has a key, the partition logic or ordering of
// the messages will be affected. It may take several seconds after this method returns
// success for all the brokers to become aware that the partitions have been created.
// During this time, ClusterAdmin#describeTopics may not return information about the
// new partitions. This operation is supported by brokers with version 1.0.0 or higher.
CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error
// Alter the replica assignment for partitions.
// This operation is supported by brokers with version 2.4.0.0 or higher.
AlterPartitionReassignments(topic string, assignment [][]int32) error
// Provides info on ongoing partitions replica reassignments.
// This operation is supported by brokers with version 2.4.0.0 or higher.
ListPartitionReassignments(topics string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error)
// Delete records whose offset is smaller than the given offset of the corresponding partition.
// This operation is supported by brokers with version 0.11.0.0 or higher.
DeleteRecords(topic string, partitionOffsets map[int32]int64) error
// Get the configuration for the specified resources.
// The returned configuration includes default values and the Default is true
// can be used to distinguish them from user supplied values.
// Config entries where ReadOnly is true cannot be updated.
// The value of config entries where Sensitive is true is always nil so
// sensitive information is not disclosed.
// This operation is supported by brokers with version 0.11.0.0 or higher.
DescribeConfig(resource ConfigResource) ([]ConfigEntry, error)
// Update the configuration for the specified resources with the default options.
// This operation is supported by brokers with version 0.11.0.0 or higher.
// The resources with their configs (topic is the only resource type with configs
// that can be updated currently Updates are not transactional so they may succeed
// for some resources while fail for others. The configs for a particular resource are updated automatically.
AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error
// Creates access control lists (ACLs) which are bound to specific resources.
// This operation is not transactional so it may succeed for some ACLs while fail for others.
// If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but
// no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher.
CreateACL(resource Resource, acl Acl) error
// Lists access control lists (ACLs) according to the supplied filter.
// it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls
// This operation is supported by brokers with version 0.11.0.0 or higher.
ListAcls(filter AclFilter) ([]ResourceAcls, error)
// Deletes access control lists (ACLs) according to the supplied filters.
// This operation is not transactional so it may succeed for some ACLs while fail for others.
// This operation is supported by brokers with version 0.11.0.0 or higher.
DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error)
// List the consumer groups available in the cluster.
ListConsumerGroups() (map[string]string, error)
// Describe the given consumer groups.
DescribeConsumerGroups(groups []string) ([]*GroupDescription, error)
// List the consumer group offsets available in the cluster.
ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error)
// Delete a consumer group.
DeleteConsumerGroup(group string) error
// Get information about the nodes in the cluster
DescribeCluster() (brokers []*Broker, controllerID int32, err error)
// Get information about all log directories on the given set of brokers
DescribeLogDirs(brokers []int32) (map[int32][]DescribeLogDirsResponseDirMetadata, error)
// Close shuts down the admin and closes underlying client.
Close() error
}
type clusterAdmin struct {
client Client
conf *Config
}
// NewClusterAdmin creates a new ClusterAdmin using the given broker addresses and configuration.
func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) {
client, err := NewClient(addrs, conf)
if err != nil {
return nil, err
}
return NewClusterAdminFromClient(client)
}
// NewClusterAdminFromClient creates a new ClusterAdmin using the given client.
// Note that underlying client will also be closed on admin's Close() call.
func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) {
//make sure we can retrieve the controller
_, err := client.Controller()
if err != nil {
return nil, err
}
ca := &clusterAdmin{
client: client,
conf: client.Config(),
}
return ca, nil
}
func (ca *clusterAdmin) Close() error {
return ca.client.Close()
}
func (ca *clusterAdmin) Controller() (*Broker, error) {
return ca.client.Controller()
}
func (ca *clusterAdmin) refreshController() (*Broker, error) {
return ca.client.RefreshController()
}
// isErrNoController returns `true` if the given error type unwraps to an
// `ErrNotController` response from Kafka
func isErrNoController(err error) bool {
switch e := err.(type) {
case *TopicError:
return e.Err == ErrNotController
case *TopicPartitionError:
return e.Err == ErrNotController
case KError:
return e == ErrNotController
}
return false
}
// retryOnError will repeatedly call the given (error-returning) func in the
// case that its response is non-nil and retriable (as determined by the
// provided retriable func) up to the maximum number of tries permitted by
// the admin client configuration
func (ca *clusterAdmin) retryOnError(retriable func(error) bool, fn func() error) error {
var err error
for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ {
err = fn()
if err == nil || !retriable(err) {
return err
}
Logger.Printf(
"admin/request retrying after %dms... (%d attempts remaining)\n",
ca.conf.Admin.Retry.Backoff/time.Millisecond, ca.conf.Admin.Retry.Max-attempt)
time.Sleep(ca.conf.Admin.Retry.Backoff)
continue
}
return err
}
func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
if topic == "" {
return ErrInvalidTopic
}
if detail == nil {
return errors.New("you must specify topic details")
}
topicDetails := make(map[string]*TopicDetail)
topicDetails[topic] = detail
request := &CreateTopicsRequest{
TopicDetails: topicDetails,
ValidateOnly: validateOnly,
Timeout: ca.conf.Admin.Timeout,
}
if ca.conf.Version.IsAtLeast(V0_11_0_0) {
request.Version = 1
}
if ca.conf.Version.IsAtLeast(V1_0_0_0) {
request.Version = 2
}
return ca.retryOnError(isErrNoController, func() error {
b, err := ca.Controller()
if err != nil {
return err
}
rsp, err := b.CreateTopics(request)
if err != nil {
return err
}
topicErr, ok := rsp.TopicErrors[topic]
if !ok {
return ErrIncompleteResponse
}
if topicErr.Err != ErrNoError {
if topicErr.Err == ErrNotController {
_, _ = ca.refreshController()
}
return topicErr
}
return nil
})
}
func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) {
controller, err := ca.Controller()
if err != nil {
return nil, err
}
request := &MetadataRequest{
Topics: topics,
AllowAutoTopicCreation: false,
}
if ca.conf.Version.IsAtLeast(V1_0_0_0) {
request.Version = 5
} else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
request.Version = 4
}
response, err := controller.GetMetadata(request)
if err != nil {
return nil, err
}
return response.Topics, nil
}
func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) {
controller, err := ca.Controller()
if err != nil {
return nil, int32(0), err
}
request := &MetadataRequest{
Topics: []string{},
}
if ca.conf.Version.IsAtLeast(V0_10_0_0) {
request.Version = 1
}
response, err := controller.GetMetadata(request)
if err != nil {
return nil, int32(0), err
}
return response.Brokers, response.ControllerID, nil
}
func (ca *clusterAdmin) findBroker(id int32) (*Broker, error) {
brokers := ca.client.Brokers()
for _, b := range brokers {
if b.ID() == id {
return b, nil
}
}
return nil, fmt.Errorf("could not find broker id %d", id)
}
func (ca *clusterAdmin) findAnyBroker() (*Broker, error) {
brokers := ca.client.Brokers()
if len(brokers) > 0 {
index := rand.Intn(len(brokers))
return brokers[index], nil
}
return nil, errors.New("no available broker")
}
func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) {
// In order to build TopicDetails we need to first get the list of all
// topics using a MetadataRequest and then get their configs using a
// DescribeConfigsRequest request. To avoid sending many requests to the
// broker, we use a single DescribeConfigsRequest.
// Send the all-topic MetadataRequest
b, err := ca.findAnyBroker()
if err != nil {
return nil, err
}
_ = b.Open(ca.client.Config())
metadataReq := &MetadataRequest{}
metadataResp, err := b.GetMetadata(metadataReq)
if err != nil {
return nil, err
}
topicsDetailsMap := make(map[string]TopicDetail)
var describeConfigsResources []*ConfigResource
for _, topic := range metadataResp.Topics {
topicDetails := TopicDetail{
NumPartitions: int32(len(topic.Partitions)),
}
if len(topic.Partitions) > 0 {
topicDetails.ReplicaAssignment = map[int32][]int32{}
for _, partition := range topic.Partitions {
topicDetails.ReplicaAssignment[partition.ID] = partition.Replicas
}
topicDetails.ReplicationFactor = int16(len(topic.Partitions[0].Replicas))
}
topicsDetailsMap[topic.Name] = topicDetails
// we populate the resources we want to describe from the MetadataResponse
topicResource := ConfigResource{
Type: TopicResource,
Name: topic.Name,
}
describeConfigsResources = append(describeConfigsResources, &topicResource)
}
// Send the DescribeConfigsRequest
describeConfigsReq := &DescribeConfigsRequest{
Resources: describeConfigsResources,
}
if ca.conf.Version.IsAtLeast(V1_1_0_0) {
describeConfigsReq.Version = 1
}
if ca.conf.Version.IsAtLeast(V2_0_0_0) {
describeConfigsReq.Version = 2
}
describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq)
if err != nil {
return nil, err
}
for _, resource := range describeConfigsResp.Resources {
topicDetails := topicsDetailsMap[resource.Name]
topicDetails.ConfigEntries = make(map[string]*string)
for _, entry := range resource.Configs {
// only include non-default non-sensitive config
// (don't actually think topic config will ever be sensitive)
if entry.Default || entry.Sensitive {
continue
}
topicDetails.ConfigEntries[entry.Name] = &entry.Value
}
topicsDetailsMap[resource.Name] = topicDetails
}
return topicsDetailsMap, nil
}
func (ca *clusterAdmin) DeleteTopic(topic string) error {
if topic == "" {
return ErrInvalidTopic
}
request := &DeleteTopicsRequest{
Topics: []string{topic},
Timeout: ca.conf.Admin.Timeout,
}
if ca.conf.Version.IsAtLeast(V0_11_0_0) {
request.Version = 1
}
return ca.retryOnError(isErrNoController, func() error {
b, err := ca.Controller()
if err != nil {
return err
}
rsp, err := b.DeleteTopics(request)
if err != nil {
return err
}
topicErr, ok := rsp.TopicErrorCodes[topic]
if !ok {
return ErrIncompleteResponse
}
if topicErr != ErrNoError {
if topicErr == ErrNotController {
_, _ = ca.refreshController()
}
return topicErr
}
return nil
})
}
func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error {
if topic == "" {
return ErrInvalidTopic
}
topicPartitions := make(map[string]*TopicPartition)
topicPartitions[topic] = &TopicPartition{Count: count, Assignment: assignment}
request := &CreatePartitionsRequest{
TopicPartitions: topicPartitions,
Timeout: ca.conf.Admin.Timeout,
}
return ca.retryOnError(isErrNoController, func() error {
b, err := ca.Controller()
if err != nil {
return err
}
rsp, err := b.CreatePartitions(request)
if err != nil {
return err
}
topicErr, ok := rsp.TopicPartitionErrors[topic]
if !ok {
return ErrIncompleteResponse
}
if topicErr.Err != ErrNoError {
if topicErr.Err == ErrNotController {
_, _ = ca.refreshController()
}
return topicErr
}
return nil
})
}
func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][]int32) error {
if topic == "" {
return ErrInvalidTopic
}
request := &AlterPartitionReassignmentsRequest{
TimeoutMs: int32(60000),
Version: int16(0),
}
for i := 0; i < len(assignment); i++ {
request.AddBlock(topic, int32(i), assignment[i])
}
return ca.retryOnError(isErrNoController, func() error {
b, err := ca.Controller()
if err != nil {
return err
}
errs := make([]error, 0)
rsp, err := b.AlterPartitionReassignments(request)
if err != nil {
errs = append(errs, err)
} else {
if rsp.ErrorCode > 0 {
errs = append(errs, errors.New(rsp.ErrorCode.Error()))
}
for topic, topicErrors := range rsp.Errors {
for partition, partitionError := range topicErrors {
if partitionError.errorCode != ErrNoError {
errStr := fmt.Sprintf("[%s-%d]: %s", topic, partition, partitionError.errorCode.Error())
errs = append(errs, errors.New(errStr))
}
}
}
}
if len(errs) > 0 {
return ErrReassignPartitions{MultiError{&errs}}
}
return nil
})
}
func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) {
if topic == "" {
return nil, ErrInvalidTopic
}
request := &ListPartitionReassignmentsRequest{
TimeoutMs: int32(60000),
Version: int16(0),
}
request.AddBlock(topic, partitions)
b, err := ca.Controller()
if err != nil {
return nil, err
}
_ = b.Open(ca.client.Config())
rsp, err := b.ListPartitionReassignments(request)
if err == nil && rsp != nil {
return rsp.TopicStatus, nil
} else {
return nil, err
}
}
func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error {
if topic == "" {
return ErrInvalidTopic
}
partitionPerBroker := make(map[*Broker][]int32)
for partition := range partitionOffsets {
broker, err := ca.client.Leader(topic, partition)
if err != nil {
return err
}
if _, ok := partitionPerBroker[broker]; ok {
partitionPerBroker[broker] = append(partitionPerBroker[broker], partition)
} else {
partitionPerBroker[broker] = []int32{partition}
}
}
errs := make([]error, 0)
for broker, partitions := range partitionPerBroker {
topics := make(map[string]*DeleteRecordsRequestTopic)
recordsToDelete := make(map[int32]int64)
for _, p := range partitions {
recordsToDelete[p] = partitionOffsets[p]
}
topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: recordsToDelete}
request := &DeleteRecordsRequest{
Topics: topics,
Timeout: ca.conf.Admin.Timeout,
}
rsp, err := broker.DeleteRecords(request)
if err != nil {
errs = append(errs, err)
} else {
deleteRecordsResponseTopic, ok := rsp.Topics[topic]
if !ok {
errs = append(errs, ErrIncompleteResponse)
} else {
for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions {
if deleteRecordsResponsePartition.Err != ErrNoError {
errs = append(errs, errors.New(deleteRecordsResponsePartition.Err.Error()))
}
}
}
}
}
if len(errs) > 0 {
return ErrDeleteRecords{MultiError{&errs}}
}
//todo since we are dealing with couple of partitions it would be good if we return slice of errors
//for each partition instead of one error
return nil
}
// Returns a bool indicating whether the resource request needs to go to a
// specific broker
func dependsOnSpecificNode(resource ConfigResource) bool {
return (resource.Type == BrokerResource && resource.Name != "") ||
resource.Type == BrokerLoggerResource
}
func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
var entries []ConfigEntry
var resources []*ConfigResource
resources = append(resources, &resource)
request := &DescribeConfigsRequest{
Resources: resources,
}
if ca.conf.Version.IsAtLeast(V1_1_0_0) {
request.Version = 1
}
if ca.conf.Version.IsAtLeast(V2_0_0_0) {
request.Version = 2
}
var (
b *Broker
err error
)
// DescribeConfig of broker/broker logger must be sent to the broker in question
if dependsOnSpecificNode(resource) {
id, _ := strconv.Atoi(resource.Name)
b, err = ca.findBroker(int32(id))
} else {
b, err = ca.findAnyBroker()
}
if err != nil {
return nil, err
}
_ = b.Open(ca.client.Config())
rsp, err := b.DescribeConfigs(request)
if err != nil {
return nil, err
}
for _, rspResource := range rsp.Resources {
if rspResource.Name == resource.Name {
if rspResource.ErrorMsg != "" {
return nil, errors.New(rspResource.ErrorMsg)
}
if rspResource.ErrorCode != 0 {
return nil, KError(rspResource.ErrorCode)
}
for _, cfgEntry := range rspResource.Configs {
entries = append(entries, *cfgEntry)
}
}
}
return entries, nil
}
func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error {
var resources []*AlterConfigsResource
resources = append(resources, &AlterConfigsResource{
Type: resourceType,
Name: name,
ConfigEntries: entries,
})
request := &AlterConfigsRequest{
Resources: resources,
ValidateOnly: validateOnly,
}
var (
b *Broker
err error
)
// AlterConfig of broker/broker logger must be sent to the broker in question
if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) {
id, _ := strconv.Atoi(name)
b, err = ca.findBroker(int32(id))
} else {
b, err = ca.findAnyBroker()
}
if err != nil {
return err
}
_ = b.Open(ca.client.Config())
rsp, err := b.AlterConfigs(request)
if err != nil {
return err
}
for _, rspResource := range rsp.Resources {
if rspResource.Name == name {
if rspResource.ErrorMsg != "" {
return errors.New(rspResource.ErrorMsg)
}
if rspResource.ErrorCode != 0 {
return KError(rspResource.ErrorCode)
}
}
}
return nil
}
func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error {
var acls []*AclCreation
acls = append(acls, &AclCreation{resource, acl})
request := &CreateAclsRequest{AclCreations: acls}
if ca.conf.Version.IsAtLeast(V2_0_0_0) {
request.Version = 1
}
b, err := ca.Controller()
if err != nil {
return err
}
_, err = b.CreateAcls(request)
return err
}
func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) {
request := &DescribeAclsRequest{AclFilter: filter}
if ca.conf.Version.IsAtLeast(V2_0_0_0) {
request.Version = 1
}
b, err := ca.Controller()
if err != nil {
return nil, err
}
rsp, err := b.DescribeAcls(request)
if err != nil {
return nil, err
}
var lAcls []ResourceAcls
for _, rAcl := range rsp.ResourceAcls {
lAcls = append(lAcls, *rAcl)
}
return lAcls, nil
}
func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) {
var filters []*AclFilter
filters = append(filters, &filter)
request := &DeleteAclsRequest{Filters: filters}
if ca.conf.Version.IsAtLeast(V2_0_0_0) {
request.Version = 1
}
b, err := ca.Controller()
if err != nil {
return nil, err
}
rsp, err := b.DeleteAcls(request)
if err != nil {
return nil, err
}
var mAcls []MatchingAcl
for _, fr := range rsp.FilterResponses {
for _, mACL := range fr.MatchingAcls {
mAcls = append(mAcls, *mACL)
}
}
return mAcls, nil
}
func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) {
groupsPerBroker := make(map[*Broker][]string)
for _, group := range groups {
controller, err := ca.client.Coordinator(group)
if err != nil {
return nil, err
}
groupsPerBroker[controller] = append(groupsPerBroker[controller], group)
}
for broker, brokerGroups := range groupsPerBroker {
response, err := broker.DescribeGroups(&DescribeGroupsRequest{
Groups: brokerGroups,
})
if err != nil {
return nil, err
}
result = append(result, response.Groups...)
}
return result, nil
}
func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err error) {
allGroups = make(map[string]string)
// Query brokers in parallel, since we have to query *all* brokers
brokers := ca.client.Brokers()
groupMaps := make(chan map[string]string, len(brokers))
errors := make(chan error, len(brokers))
wg := sync.WaitGroup{}
for _, b := range brokers {
wg.Add(1)
go func(b *Broker, conf *Config) {
defer wg.Done()
_ = b.Open(conf) // Ensure that broker is opened
response, err := b.ListGroups(&ListGroupsRequest{})
if err != nil {
errors <- err
return
}
groups := make(map[string]string)
for group, typ := range response.Groups {
groups[group] = typ
}
groupMaps <- groups
}(b, ca.conf)
}
wg.Wait()
close(groupMaps)
close(errors)
for groupMap := range groupMaps {
for group, protocolType := range groupMap {
allGroups[group] = protocolType
}
}
// Intentionally return only the first error for simplicity
err = <-errors
return
}
func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) {
coordinator, err := ca.client.Coordinator(group)
if err != nil {
return nil, err
}
request := &OffsetFetchRequest{
ConsumerGroup: group,
partitions: topicPartitions,
}
if ca.conf.Version.IsAtLeast(V0_10_2_0) {
request.Version = 2
} else if ca.conf.Version.IsAtLeast(V0_8_2_2) {
request.Version = 1
}
return coordinator.FetchOffset(request)
}
func (ca *clusterAdmin) DeleteConsumerGroup(group string) error {
coordinator, err := ca.client.Coordinator(group)
if err != nil {
return err
}
request := &DeleteGroupsRequest{
Groups: []string{group},
}
resp, err := coordinator.DeleteGroups(request)
if err != nil {
return err
}
groupErr, ok := resp.GroupErrorCodes[group]
if !ok {
return ErrIncompleteResponse
}
if groupErr != ErrNoError {
return groupErr
}
return nil
}
func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) {
allLogDirs = make(map[int32][]DescribeLogDirsResponseDirMetadata)
// Query brokers in parallel, since we may have to query multiple brokers
logDirsMaps := make(chan map[int32][]DescribeLogDirsResponseDirMetadata, len(brokerIds))
errors := make(chan error, len(brokerIds))
wg := sync.WaitGroup{}
for _, b := range brokerIds {
wg.Add(1)
broker, err := ca.findBroker(b)
if err != nil {
Logger.Printf("Unable to find broker with ID = %v\n", b)
continue
}
go func(b *Broker, conf *Config) {
defer wg.Done()
_ = b.Open(conf) // Ensure that broker is opened
response, err := b.DescribeLogDirs(&DescribeLogDirsRequest{})
if err != nil {
errors <- err
return
}
logDirs := make(map[int32][]DescribeLogDirsResponseDirMetadata)
logDirs[b.ID()] = response.LogDirs
logDirsMaps <- logDirs
}(broker, ca.conf)
}
wg.Wait()
close(logDirsMaps)
close(errors)
for logDirsMap := range logDirsMaps {
for id, logDirs := range logDirsMap {
allLogDirs[id] = logDirs
}
}
// Intentionally return only the first error for simplicity
err = <-errors
return
}

View File

@ -0,0 +1,126 @@
package sarama
//AlterConfigsRequest is an alter config request type
type AlterConfigsRequest struct {
Resources []*AlterConfigsResource
ValidateOnly bool
}
//AlterConfigsResource is an alter config resource type
type AlterConfigsResource struct {
Type ConfigResourceType
Name string
ConfigEntries map[string]*string
}
func (a *AlterConfigsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(a.Resources)); err != nil {
return err
}
for _, r := range a.Resources {
if err := r.encode(pe); err != nil {
return err
}
}
pe.putBool(a.ValidateOnly)
return nil
}
func (a *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
resourceCount, err := pd.getArrayLength()
if err != nil {
return err
}
a.Resources = make([]*AlterConfigsResource, resourceCount)
for i := range a.Resources {
r := &AlterConfigsResource{}
err = r.decode(pd, version)
if err != nil {
return err
}
a.Resources[i] = r
}
validateOnly, err := pd.getBool()
if err != nil {
return err
}
a.ValidateOnly = validateOnly
return nil
}
func (a *AlterConfigsResource) encode(pe packetEncoder) error {
pe.putInt8(int8(a.Type))
if err := pe.putString(a.Name); err != nil {
return err
}
if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
return err
}
for configKey, configValue := range a.ConfigEntries {
if err := pe.putString(configKey); err != nil {
return err
}
if err := pe.putNullableString(configValue); err != nil {
return err
}
}
return nil
}
func (a *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
t, err := pd.getInt8()
if err != nil {
return err
}
a.Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
a.Name = name
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
a.ConfigEntries = make(map[string]*string, n)
for i := 0; i < n; i++ {
configKey, err := pd.getString()
if err != nil {
return err
}
if a.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
return err
}
}
}
return err
}
func (a *AlterConfigsRequest) key() int16 {
return 33
}
func (a *AlterConfigsRequest) version() int16 {
return 0
}
func (a *AlterConfigsRequest) headerVersion() int16 {
return 1
}
func (a *AlterConfigsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,101 @@
package sarama
import "time"
//AlterConfigsResponse is a response type for alter config
type AlterConfigsResponse struct {
ThrottleTime time.Duration
Resources []*AlterConfigsResourceResponse
}
//AlterConfigsResourceResponse is a response type for alter config resource
type AlterConfigsResourceResponse struct {
ErrorCode int16
ErrorMsg string
Type ConfigResourceType
Name string
}
func (a *AlterConfigsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(a.Resources)); err != nil {
return err
}
for i := range a.Resources {
pe.putInt16(a.Resources[i].ErrorCode)
err := pe.putString(a.Resources[i].ErrorMsg)
if err != nil {
return nil
}
pe.putInt8(int8(a.Resources[i].Type))
err = pe.putString(a.Resources[i].Name)
if err != nil {
return nil
}
}
return nil
}
func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) error {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
responseCount, err := pd.getArrayLength()
if err != nil {
return err
}
a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
for i := range a.Resources {
a.Resources[i] = new(AlterConfigsResourceResponse)
errCode, err := pd.getInt16()
if err != nil {
return err
}
a.Resources[i].ErrorCode = errCode
e, err := pd.getString()
if err != nil {
return err
}
a.Resources[i].ErrorMsg = e
t, err := pd.getInt8()
if err != nil {
return err
}
a.Resources[i].Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
a.Resources[i].Name = name
}
return nil
}
func (a *AlterConfigsResponse) key() int16 {
return 32
}
func (a *AlterConfigsResponse) version() int16 {
return 0
}
func (a *AlterConfigsResponse) headerVersion() int16 {
return 0
}
func (a *AlterConfigsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,130 @@
package sarama
type alterPartitionReassignmentsBlock struct {
replicas []int32
}
func (b *alterPartitionReassignmentsBlock) encode(pe packetEncoder) error {
if err := pe.putNullableCompactInt32Array(b.replicas); err != nil {
return err
}
pe.putEmptyTaggedFieldArray()
return nil
}
func (b *alterPartitionReassignmentsBlock) decode(pd packetDecoder) (err error) {
if b.replicas, err = pd.getCompactInt32Array(); err != nil {
return err
}
return nil
}
type AlterPartitionReassignmentsRequest struct {
TimeoutMs int32
blocks map[string]map[int32]*alterPartitionReassignmentsBlock
Version int16
}
func (r *AlterPartitionReassignmentsRequest) encode(pe packetEncoder) error {
pe.putInt32(r.TimeoutMs)
pe.putCompactArrayLength(len(r.blocks))
for topic, partitions := range r.blocks {
if err := pe.putCompactString(topic); err != nil {
return err
}
pe.putCompactArrayLength(len(partitions))
for partition, block := range partitions {
pe.putInt32(partition)
if err := block.encode(pe); err != nil {
return err
}
}
pe.putEmptyTaggedFieldArray()
}
pe.putEmptyTaggedFieldArray()
return nil
}
func (r *AlterPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.TimeoutMs, err = pd.getInt32(); err != nil {
return err
}
topicCount, err := pd.getCompactArrayLength()
if err != nil {
return err
}
if topicCount > 0 {
r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
for i := 0; i < topicCount; i++ {
topic, err := pd.getCompactString()
if err != nil {
return err
}
partitionCount, err := pd.getCompactArrayLength()
if err != nil {
return err
}
r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
block := &alterPartitionReassignmentsBlock{}
if err := block.decode(pd); err != nil {
return err
}
r.blocks[topic][partition] = block
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
}
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
}
}
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
return
}
func (r *AlterPartitionReassignmentsRequest) key() int16 {
return 45
}
func (r *AlterPartitionReassignmentsRequest) version() int16 {
return r.Version
}
func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 {
return 2
}
func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
return V2_4_0_0
}
func (r *AlterPartitionReassignmentsRequest) AddBlock(topic string, partitionID int32, replicas []int32) {
if r.blocks == nil {
r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
}
if r.blocks[topic] == nil {
r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
}
r.blocks[topic][partitionID] = &alterPartitionReassignmentsBlock{replicas}
}

View File

@ -0,0 +1,157 @@
package sarama
type alterPartitionReassignmentsErrorBlock struct {
errorCode KError
errorMessage *string
}
func (b *alterPartitionReassignmentsErrorBlock) encode(pe packetEncoder) error {
pe.putInt16(int16(b.errorCode))
if err := pe.putNullableCompactString(b.errorMessage); err != nil {
return err
}
pe.putEmptyTaggedFieldArray()
return nil
}
func (b *alterPartitionReassignmentsErrorBlock) decode(pd packetDecoder) (err error) {
errorCode, err := pd.getInt16()
if err != nil {
return err
}
b.errorCode = KError(errorCode)
b.errorMessage, err = pd.getCompactNullableString()
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
return err
}
type AlterPartitionReassignmentsResponse struct {
Version int16
ThrottleTimeMs int32
ErrorCode KError
ErrorMessage *string
Errors map[string]map[int32]*alterPartitionReassignmentsErrorBlock
}
func (r *AlterPartitionReassignmentsResponse) AddError(topic string, partition int32, kerror KError, message *string) {
if r.Errors == nil {
r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock)
}
partitions := r.Errors[topic]
if partitions == nil {
partitions = make(map[int32]*alterPartitionReassignmentsErrorBlock)
r.Errors[topic] = partitions
}
partitions[partition] = &alterPartitionReassignmentsErrorBlock{errorCode: kerror, errorMessage: message}
}
func (r *AlterPartitionReassignmentsResponse) encode(pe packetEncoder) error {
pe.putInt32(r.ThrottleTimeMs)
pe.putInt16(int16(r.ErrorCode))
if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
return err
}
pe.putCompactArrayLength(len(r.Errors))
for topic, partitions := range r.Errors {
if err := pe.putCompactString(topic); err != nil {
return err
}
pe.putCompactArrayLength(len(partitions))
for partition, block := range partitions {
pe.putInt32(partition)
if err := block.encode(pe); err != nil {
return err
}
}
pe.putEmptyTaggedFieldArray()
}
pe.putEmptyTaggedFieldArray()
return nil
}
func (r *AlterPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
return err
}
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.ErrorCode = KError(kerr)
if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
return err
}
numTopics, err := pd.getCompactArrayLength()
if err != nil {
return err
}
if numTopics > 0 {
r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock, numTopics)
for i := 0; i < numTopics; i++ {
topic, err := pd.getCompactString()
if err != nil {
return err
}
ongoingPartitionReassignments, err := pd.getCompactArrayLength()
if err != nil {
return err
}
r.Errors[topic] = make(map[int32]*alterPartitionReassignmentsErrorBlock, ongoingPartitionReassignments)
for j := 0; j < ongoingPartitionReassignments; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
block := &alterPartitionReassignmentsErrorBlock{}
if err := block.decode(pd); err != nil {
return err
}
r.Errors[topic][partition] = block
}
if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
}
}
if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
return nil
}
func (r *AlterPartitionReassignmentsResponse) key() int16 {
return 45
}
func (r *AlterPartitionReassignmentsResponse) version() int16 {
return r.Version
}
func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 {
return 1
}
func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
return V2_4_0_0
}

View File

@ -1,24 +1,29 @@
package sarama
//ApiVersionsRequest ...
type ApiVersionsRequest struct {
}
func (r *ApiVersionsRequest) encode(pe packetEncoder) error {
func (a *ApiVersionsRequest) encode(pe packetEncoder) error {
return nil
}
func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
func (a *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
return nil
}
func (r *ApiVersionsRequest) key() int16 {
func (a *ApiVersionsRequest) key() int16 {
return 18
}
func (r *ApiVersionsRequest) version() int16 {
func (a *ApiVersionsRequest) version() int16 {
return 0
}
func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
func (a *ApiVersionsRequest) headerVersion() int16 {
return 1
}
func (a *ApiVersionsRequest) requiredVersion() KafkaVersion {
return V0_10_0_0
}

View File

@ -1,5 +1,6 @@
package sarama
//ApiVersionsResponseBlock is an api version response block type
type ApiVersionsResponseBlock struct {
ApiKey int16
MinVersion int16
@ -31,6 +32,7 @@ func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
return nil
}
//ApiVersionsResponse is an api version response type
type ApiVersionsResponse struct {
Err KError
ApiVersions []*ApiVersionsResponseBlock
@ -82,6 +84,10 @@ func (r *ApiVersionsResponse) version() int16 {
return 0
}
func (a *ApiVersionsResponse) headerVersion() int16 {
return 0
}
func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
return V0_10_0_0
}

View File

@ -1,6 +1,7 @@
package sarama
import (
"encoding/binary"
"fmt"
"sync"
"time"
@ -46,18 +47,78 @@ type AsyncProducer interface {
Errors() <-chan *ProducerError
}
// transactionManager keeps the state necessary to ensure idempotent production
type transactionManager struct {
producerID int64
producerEpoch int16
sequenceNumbers map[string]int32
mutex sync.Mutex
}
const (
noProducerID = -1
noProducerEpoch = -1
)
func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) {
key := fmt.Sprintf("%s-%d", topic, partition)
t.mutex.Lock()
defer t.mutex.Unlock()
sequence := t.sequenceNumbers[key]
t.sequenceNumbers[key] = sequence + 1
return sequence, t.producerEpoch
}
func (t *transactionManager) bumpEpoch() {
t.mutex.Lock()
defer t.mutex.Unlock()
t.producerEpoch++
for k := range t.sequenceNumbers {
t.sequenceNumbers[k] = 0
}
}
func (t *transactionManager) getProducerID() (int64, int16) {
t.mutex.Lock()
defer t.mutex.Unlock()
return t.producerID, t.producerEpoch
}
func newTransactionManager(conf *Config, client Client) (*transactionManager, error) {
txnmgr := &transactionManager{
producerID: noProducerID,
producerEpoch: noProducerEpoch,
}
if conf.Producer.Idempotent {
initProducerIDResponse, err := client.InitProducerID()
if err != nil {
return nil, err
}
txnmgr.producerID = initProducerIDResponse.ProducerID
txnmgr.producerEpoch = initProducerIDResponse.ProducerEpoch
txnmgr.sequenceNumbers = make(map[string]int32)
txnmgr.mutex = sync.Mutex{}
Logger.Printf("Obtained a ProducerId: %d and ProducerEpoch: %d\n", txnmgr.producerID, txnmgr.producerEpoch)
}
return txnmgr, nil
}
type asyncProducer struct {
client Client
conf *Config
ownClient bool
client Client
conf *Config
errors chan *ProducerError
input, successes, retries chan *ProducerMessage
inFlight sync.WaitGroup
brokers map[*Broker]chan<- *ProducerMessage
brokerRefs map[chan<- *ProducerMessage]int
brokers map[*Broker]*brokerProducer
brokerRefs map[*brokerProducer]int
brokerLock sync.Mutex
txnmgr *transactionManager
}
// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
@ -66,23 +127,29 @@ func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
if err != nil {
return nil, err
}
p, err := NewAsyncProducerFromClient(client)
if err != nil {
return nil, err
}
p.(*asyncProducer).ownClient = true
return p, nil
return newAsyncProducer(client)
}
// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this producer.
func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
// For clients passed in by the client, ensure we don't
// call Close() on it.
cli := &nopCloserClient{client}
return newAsyncProducer(cli)
}
func newAsyncProducer(client Client) (AsyncProducer, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
}
txnmgr, err := newTransactionManager(client.Config(), client)
if err != nil {
return nil, err
}
p := &asyncProducer{
client: client,
conf: client.Config(),
@ -90,8 +157,9 @@ func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
input: make(chan *ProducerMessage),
successes: make(chan *ProducerMessage),
retries: make(chan *ProducerMessage),
brokers: make(map[*Broker]chan<- *ProducerMessage),
brokerRefs: make(map[chan<- *ProducerMessage]int),
brokers: make(map[*Broker]*brokerProducer),
brokerRefs: make(map[*brokerProducer]int),
txnmgr: txnmgr,
}
// launch our singleton dispatchers
@ -119,6 +187,10 @@ type ProducerMessage struct {
// StringEncoder and ByteEncoder.
Value Encoder
// The headers are key-value pairs that are transparently passed
// by Kafka between producers and consumers.
Headers []RecordHeader
// This field is used to hold arbitrary data you wish to include so it
// will be available when receiving on the Successes and Errors channels.
// Sarama completely ignores this field and is only to be used for
@ -134,20 +206,39 @@ type ProducerMessage struct {
// Partition is the partition that the message was sent to. This is only
// guaranteed to be defined if the message was successfully delivered.
Partition int32
// Timestamp is the timestamp assigned to the message by the broker. This
// is only guaranteed to be defined if the message was successfully
// delivered, RequiredAcks is not NoResponse, and the Kafka broker is at
// least version 0.10.0.
// Timestamp can vary in behaviour depending on broker configuration, being
// in either one of the CreateTime or LogAppendTime modes (default CreateTime),
// and requiring version at least 0.10.0.
//
// When configured to CreateTime, the timestamp is specified by the producer
// either by explicitly setting this field, or when the message is added
// to a produce set.
//
// When configured to LogAppendTime, the timestamp assigned to the message
// by the broker. This is only guaranteed to be defined if the message was
// successfully delivered and RequiredAcks is not NoResponse.
Timestamp time.Time
retries int
flags flagSet
retries int
flags flagSet
expectation chan *ProducerError
sequenceNumber int32
producerEpoch int16
hasSequence bool
}
const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
func (m *ProducerMessage) byteSize() int {
size := producerMessageOverhead
func (m *ProducerMessage) byteSize(version int) int {
var size int
if version >= 2 {
size = maximumRecordOverhead
for _, h := range m.Headers {
size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32
}
} else {
size = producerMessageOverhead
}
if m.Key != nil {
size += m.Key.Length()
}
@ -160,6 +251,9 @@ func (m *ProducerMessage) byteSize() int {
func (m *ProducerMessage) clear() {
m.flags = 0
m.retries = 0
m.sequenceNumber = 0
m.producerEpoch = 0
m.hasSequence = false
}
// ProducerError is the type of error generated when the producer fails to deliver a message.
@ -254,7 +348,14 @@ func (p *asyncProducer) dispatcher() {
p.inFlight.Add(1)
}
if msg.byteSize() > p.conf.Producer.MaxMessageBytes {
version := 1
if p.conf.Version.IsAtLeast(V0_11_0_0) {
version = 2
} else if msg.Headers != nil {
p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11"))
continue
}
if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes {
p.returnError(msg, ErrMessageSizeTooLarge)
continue
}
@ -326,7 +427,14 @@ func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
var partitions []int32
err := tp.breaker.Run(func() (err error) {
if tp.partitioner.RequiresConsistency() {
requiresConsistency := false
if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok {
requiresConsistency = ep.MessageRequiresConsistency(msg)
} else {
requiresConsistency = tp.partitioner.RequiresConsistency()
}
if requiresConsistency {
partitions, err = tp.parent.client.Partitions(msg.Topic)
} else {
partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
@ -366,9 +474,9 @@ type partitionProducer struct {
partition int32
input <-chan *ProducerMessage
leader *Broker
breaker *breaker.Breaker
output chan<- *ProducerMessage
leader *Broker
breaker *breaker.Breaker
brokerProducer *brokerProducer
// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
@ -398,21 +506,53 @@ func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan
return input
}
func (pp *partitionProducer) backoff(retries int) {
var backoff time.Duration
if pp.parent.conf.Producer.Retry.BackoffFunc != nil {
maxRetries := pp.parent.conf.Producer.Retry.Max
backoff = pp.parent.conf.Producer.Retry.BackoffFunc(retries, maxRetries)
} else {
backoff = pp.parent.conf.Producer.Retry.Backoff
}
if backoff > 0 {
time.Sleep(backoff)
}
}
func (pp *partitionProducer) dispatch() {
// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
// on the first message
pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
if pp.leader != nil {
pp.output = pp.parent.getBrokerProducer(pp.leader)
pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
}
defer func() {
if pp.brokerProducer != nil {
pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
}
}()
for msg := range pp.input {
if pp.brokerProducer != nil && pp.brokerProducer.abandoned != nil {
select {
case <-pp.brokerProducer.abandoned:
// a message on the abandoned channel means that our current broker selection is out of date
Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
pp.brokerProducer = nil
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
default:
// producer connection is still open.
}
}
if msg.retries > pp.highWatermark {
// a new, higher, retry level; handle it and then back off
pp.newHighWatermark(msg.retries)
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
pp.backoff(msg.retries)
} else if pp.highWatermark > 0 {
// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
if msg.retries < pp.highWatermark {
@ -437,20 +577,25 @@ func (pp *partitionProducer) dispatch() {
// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
// without breaking any of our ordering guarantees
if pp.output == nil {
if pp.brokerProducer == nil {
if err := pp.updateLeader(); err != nil {
pp.parent.returnError(msg, err)
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
pp.backoff(msg.retries)
continue
}
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
}
pp.output <- msg
}
// Now that we know we have a broker to actually try and send this message to, generate the sequence
// number for it.
// All messages being retried (sent or not) have already had their retry count updated
// Also, ignore "special" syn/fin messages used to sync the brokerProducer and the topicProducer.
if pp.parent.conf.Producer.Idempotent && msg.retries == 0 && msg.flags == 0 {
msg.sequenceNumber, msg.producerEpoch = pp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition)
msg.hasSequence = true
}
if pp.output != nil {
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
pp.brokerProducer.input <- msg
}
}
@ -462,12 +607,12 @@ func (pp *partitionProducer) newHighWatermark(hwm int) {
// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
pp.retryState[pp.highWatermark].expectChaser = true
pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
// a new HWM means that our current broker selection is out of date
Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
pp.output = nil
pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
pp.brokerProducer = nil
}
func (pp *partitionProducer) flushRetryBuffers() {
@ -475,7 +620,7 @@ func (pp *partitionProducer) flushRetryBuffers() {
for {
pp.highWatermark--
if pp.output == nil {
if pp.brokerProducer == nil {
if err := pp.updateLeader(); err != nil {
pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
goto flushDone
@ -484,7 +629,7 @@ func (pp *partitionProducer) flushRetryBuffers() {
}
for _, msg := range pp.retryState[pp.highWatermark].buf {
pp.output <- msg
pp.brokerProducer.input <- msg
}
flushDone:
@ -509,16 +654,16 @@ func (pp *partitionProducer) updateLeader() error {
return err
}
pp.output = pp.parent.getBrokerProducer(pp.leader)
pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
return nil
})
}
// one per broker; also constructs an associated flusher
func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer {
var (
input = make(chan *ProducerMessage)
bridge = make(chan *produceSet)
@ -531,6 +676,7 @@ func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessag
input: input,
output: bridge,
responses: responses,
stopchan: make(chan struct{}),
buffer: newProduceSet(p),
currentRetries: make(map[string]map[int32]error),
}
@ -552,7 +698,11 @@ func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessag
close(responses)
})
return input
if p.conf.Producer.Retry.Max <= 0 {
bp.abandoned = make(chan struct{})
}
return bp
}
type brokerProducerResponse struct {
@ -567,9 +717,11 @@ type brokerProducer struct {
parent *asyncProducer
broker *Broker
input <-chan *ProducerMessage
input chan *ProducerMessage
output chan<- *produceSet
responses <-chan *brokerProducerResponse
abandoned chan struct{}
stopchan chan struct{}
buffer *produceSet
timer <-chan time.Time
@ -585,12 +737,17 @@ func (bp *brokerProducer) run() {
for {
select {
case msg := <-bp.input:
if msg == nil {
case msg, ok := <-bp.input:
if !ok {
Logger.Printf("producer/broker/%d input chan closed\n", bp.broker.ID())
bp.shutdown()
return
}
if msg == nil {
continue
}
if msg.flags&syn == syn {
Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
bp.broker.ID(), msg.Topic, msg.Partition)
@ -616,12 +773,21 @@ func (bp *brokerProducer) run() {
}
if bp.buffer.wouldOverflow(msg) {
if err := bp.waitForSpace(msg); err != nil {
Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
if err := bp.waitForSpace(msg, false); err != nil {
bp.parent.retryMessage(msg, err)
continue
}
}
if bp.parent.txnmgr.producerID != noProducerID && bp.buffer.producerEpoch != msg.producerEpoch {
// The epoch was reset, need to roll the buffer over
Logger.Printf("producer/broker/%d detected epoch rollover, waiting for new buffer\n", bp.broker.ID())
if err := bp.waitForSpace(msg, true); err != nil {
bp.parent.retryMessage(msg, err)
continue
}
}
if err := bp.buffer.add(msg); err != nil {
bp.parent.returnError(msg, err)
continue
@ -634,8 +800,14 @@ func (bp *brokerProducer) run() {
bp.timerFired = true
case output <- bp.buffer:
bp.rollOver()
case response := <-bp.responses:
bp.handleResponse(response)
case response, ok := <-bp.responses:
if ok {
bp.handleResponse(response)
}
case <-bp.stopchan:
Logger.Printf(
"producer/broker/%d run loop asked to stop\n", bp.broker.ID())
return
}
if bp.timerFired || bp.buffer.readyToFlush() {
@ -659,7 +831,7 @@ func (bp *brokerProducer) shutdown() {
for response := range bp.responses {
bp.handleResponse(response)
}
close(bp.stopchan)
Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
}
@ -671,9 +843,7 @@ func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
return bp.currentRetries[msg.Topic][msg.Partition]
}
func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
func (bp *brokerProducer) waitForSpace(msg *ProducerMessage, forceRollover bool) error {
for {
select {
case response := <-bp.responses:
@ -681,7 +851,7 @@ func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
// handling a response can change our state, so re-check some things
if reason := bp.needsRetry(msg); reason != nil {
return reason
} else if !bp.buffer.wouldOverflow(msg) {
} else if !bp.buffer.wouldOverflow(msg) && !forceRollover {
return nil
}
case bp.output <- bp.buffer:
@ -712,16 +882,17 @@ func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
// we iterate through the blocks in the request set, not the response, so that we notice
// if the response is missing a block completely
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
var retryTopics []string
sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
if response == nil {
// this only happens when RequiredAcks is NoResponse, so we have to assume success
bp.parent.returnSuccesses(msgs)
bp.parent.returnSuccesses(pSet.msgs)
return
}
block := response.GetBlock(topic, partition)
if block == nil {
bp.parent.returnErrors(msgs, ErrIncompleteResponse)
bp.parent.returnErrors(pSet.msgs, ErrIncompleteResponse)
return
}
@ -729,45 +900,115 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo
// Success
case ErrNoError:
if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
for _, msg := range msgs {
for _, msg := range pSet.msgs {
msg.Timestamp = block.Timestamp
}
}
for i, msg := range msgs {
for i, msg := range pSet.msgs {
msg.Offset = block.Offset + int64(i)
}
bp.parent.returnSuccesses(msgs)
bp.parent.returnSuccesses(pSet.msgs)
// Duplicate
case ErrDuplicateSequenceNumber:
bp.parent.returnSuccesses(pSet.msgs)
// Retriable errors
case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
bp.broker.ID(), topic, partition, block.Err)
bp.currentRetries[topic][partition] = block.Err
bp.parent.retryMessages(msgs, block.Err)
bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
if bp.parent.conf.Producer.Retry.Max <= 0 {
bp.parent.abandonBrokerConnection(bp.broker)
bp.parent.returnErrors(pSet.msgs, block.Err)
} else {
retryTopics = append(retryTopics, topic)
}
// Other non-retriable errors
default:
bp.parent.returnErrors(msgs, block.Err)
if bp.parent.conf.Producer.Retry.Max <= 0 {
bp.parent.abandonBrokerConnection(bp.broker)
}
bp.parent.returnErrors(pSet.msgs, block.Err)
}
})
if len(retryTopics) > 0 {
if bp.parent.conf.Producer.Idempotent {
err := bp.parent.client.RefreshMetadata(retryTopics...)
if err != nil {
Logger.Printf("Failed refreshing metadata because of %v\n", err)
}
}
sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
block := response.GetBlock(topic, partition)
if block == nil {
// handled in the previous "eachPartition" loop
return
}
switch block.Err {
case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
bp.broker.ID(), topic, partition, block.Err)
if bp.currentRetries[topic] == nil {
bp.currentRetries[topic] = make(map[int32]error)
}
bp.currentRetries[topic][partition] = block.Err
if bp.parent.conf.Producer.Idempotent {
go bp.parent.retryBatch(topic, partition, pSet, block.Err)
} else {
bp.parent.retryMessages(pSet.msgs, block.Err)
}
// dropping the following messages has the side effect of incrementing their retry count
bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
}
})
}
}
func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitionSet, kerr KError) {
Logger.Printf("Retrying batch for %v-%d because of %s\n", topic, partition, kerr)
produceSet := newProduceSet(p)
produceSet.msgs[topic] = make(map[int32]*partitionSet)
produceSet.msgs[topic][partition] = pSet
produceSet.bufferBytes += pSet.bufferBytes
produceSet.bufferCount += len(pSet.msgs)
for _, msg := range pSet.msgs {
if msg.retries >= p.conf.Producer.Retry.Max {
p.returnError(msg, kerr)
return
}
msg.retries++
}
// it's expected that a metadata refresh has been requested prior to calling retryBatch
leader, err := p.client.Leader(topic, partition)
if err != nil {
Logger.Printf("Failed retrying batch for %v-%d because of %v while looking up for new leader\n", topic, partition, err)
for _, msg := range pSet.msgs {
p.returnError(msg, kerr)
}
return
}
bp := p.getBrokerProducer(leader)
bp.output <- produceSet
}
func (bp *brokerProducer) handleError(sent *produceSet, err error) {
switch err.(type) {
case PacketEncodingError:
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
bp.parent.returnErrors(msgs, err)
sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
bp.parent.returnErrors(pSet.msgs, err)
})
default:
Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
bp.parent.abandonBrokerConnection(bp.broker)
_ = bp.broker.Close()
bp.closing = err
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
bp.parent.retryMessages(msgs, err)
sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
bp.parent.retryMessages(pSet.msgs, err)
})
bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
bp.parent.retryMessages(msgs, err)
bp.buffer.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
bp.parent.retryMessages(pSet.msgs, err)
})
bp.rollOver()
}
@ -809,11 +1050,9 @@ func (p *asyncProducer) shutdown() {
p.inFlight.Wait()
if p.ownClient {
err := p.client.Close()
if err != nil {
Logger.Println("producer/shutdown failed to close the embedded client:", err)
}
err := p.client.Close()
if err != nil {
Logger.Println("producer/shutdown failed to close the embedded client:", err)
}
close(p.input)
@ -823,6 +1062,12 @@ func (p *asyncProducer) shutdown() {
}
func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
// We need to reset the producer ID epoch if we set a sequence number on it, because the broker
// will never see a message with this number, so we can never continue the sequence.
if msg.hasSequence {
Logger.Printf("producer/txnmanager rolling over epoch due to publish failure on %s/%d", msg.Topic, msg.Partition)
p.txnmgr.bumpEpoch()
}
msg.clear()
pErr := &ProducerError{Msg: msg, Err: err}
if p.conf.Producer.Return.Errors {
@ -864,7 +1109,7 @@ func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
}
}
func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
func (p *asyncProducer) getBrokerProducer(broker *Broker) *brokerProducer {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
@ -881,13 +1126,13 @@ func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessag
return bp
}
func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp *brokerProducer) {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
p.brokerRefs[bp]--
if p.brokerRefs[bp] == 0 {
close(bp)
close(bp.input)
delete(p.brokerRefs, bp)
if p.brokers[broker] == bp {
@ -900,5 +1145,10 @@ func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
bc, ok := p.brokers[broker]
if ok && bc.abandoned != nil {
close(bc.abandoned)
}
delete(p.brokers, broker)
}

1061
vendor/github.com/Shopify/sarama/balance_strategy.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -17,6 +17,15 @@ type Client interface {
// altered after it has been created.
Config() *Config
// Controller returns the cluster controller broker. It will return a
// locally cached value if it's available. You can call RefreshController
// to update the cached value. Requires Kafka 0.10 or higher.
Controller() (*Broker, error)
// RefreshController retrieves the cluster controller from fresh metadata
// and stores it in the local cache. Requires Kafka 0.10 or higher.
RefreshController() (*Broker, error)
// Brokers returns the current set of active brokers as retrieved from cluster metadata.
Brokers() []*Broker
@ -43,15 +52,19 @@ type Client interface {
// the partition leader.
InSyncReplicas(topic string, partitionID int32) ([]int32, error)
// OfflineReplicas returns the set of all offline replica IDs for the given
// partition. Offline replicas are replicas which are offline
OfflineReplicas(topic string, partitionID int32) ([]int32, error)
// RefreshMetadata takes a list of topics and queries the cluster to refresh the
// available metadata for those topics. If no topics are provided, it will refresh
// metadata for all topics.
RefreshMetadata(topics ...string) error
// GetOffset queries the cluster to get the most recent available offset at the
// given time on the topic/partition combination. Time should be OffsetOldest for
// the earliest available offset, OffsetNewest for the offset of the message that
// will be produced next, or a time.
// given time (in milliseconds) on the topic/partition combination.
// Time should be OffsetOldest for the earliest available offset,
// OffsetNewest for the offset of the message that will be produced next, or a time.
GetOffset(topic string, partitionID int32, time int64) (int64, error)
// Coordinator returns the coordinating broker for a consumer group. It will
@ -64,6 +77,9 @@ type Client interface {
// in local cache. This function only works on Kafka 0.8.2 and higher.
RefreshCoordinator(consumerGroup string) error
// InitProducerID retrieves information required for Idempotent Producer
InitProducerID() (*InitProducerIDResponse, error)
// Close shuts down all broker connections managed by this client. It is required
// to call this function before a client object passes out of scope, as it will
// otherwise leak memory. You must close any Producers or Consumers using a client
@ -97,9 +113,11 @@ type client struct {
seedBrokers []*Broker
deadSeeds []*Broker
brokers map[int32]*Broker // maps broker ids to brokers
metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
controllerID int32 // cluster controller broker id
brokers map[int32]*Broker // maps broker ids to brokers
metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
metadataTopics map[string]none // topics that need to collect metadata
coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
// If the number of partitions is large, we can get some churn calling cachedPartitions,
// so the result is cached. It is important to update this value whenever metadata is changed
@ -132,6 +150,7 @@ func NewClient(addrs []string, conf *Config) (Client, error) {
closed: make(chan none),
brokers: make(map[int32]*Broker),
metadata: make(map[string]map[int32]*PartitionMetadata),
metadataTopics: make(map[string]none),
cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
coordinators: make(map[string]int32),
}
@ -170,13 +189,32 @@ func (client *client) Config() *Config {
func (client *client) Brokers() []*Broker {
client.lock.RLock()
defer client.lock.RUnlock()
brokers := make([]*Broker, 0)
brokers := make([]*Broker, 0, len(client.brokers))
for _, broker := range client.brokers {
brokers = append(brokers, broker)
}
return brokers
}
func (client *client) InitProducerID() (*InitProducerIDResponse, error) {
var err error
for broker := client.any(); broker != nil; broker = client.any() {
req := &InitProducerIDRequest{}
response, err := broker.InitProducerID(req)
switch err.(type) {
case nil:
return response, nil
default:
// some error, remove that broker and try again
Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err)
_ = broker.Close()
client.deregisterBroker(broker)
}
}
return nil, err
}
func (client *client) Close() error {
if client.Closed() {
// Chances are this is being called from a defer() and the error will go unobserved
@ -203,11 +241,15 @@ func (client *client) Close() error {
client.brokers = nil
client.metadata = nil
client.metadataTopics = nil
return nil
}
func (client *client) Closed() bool {
client.lock.RLock()
defer client.lock.RUnlock()
return client.brokers == nil
}
@ -227,6 +269,22 @@ func (client *client) Topics() ([]string, error) {
return ret, nil
}
func (client *client) MetadataTopics() ([]string, error) {
if client.Closed() {
return nil, ErrClosedClient
}
client.lock.RLock()
defer client.lock.RUnlock()
ret := make([]string, 0, len(client.metadataTopics))
for topic := range client.metadataTopics {
ret = append(ret, topic)
}
return ret, nil
}
func (client *client) Partitions(topic string) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
@ -242,7 +300,8 @@ func (client *client) Partitions(topic string) ([]int32, error) {
partitions = client.cachedPartitions(topic, allPartitions)
}
if partitions == nil {
// no partitions found after refresh metadata
if len(partitions) == 0 {
return nil, ErrUnknownTopicOrPartition
}
@ -297,7 +356,7 @@ func (client *client) Replicas(topic string, partitionID int32) ([]int32, error)
}
if metadata.Err == ErrReplicaNotAvailable {
return nil, metadata.Err
return dupInt32Slice(metadata.Replicas), metadata.Err
}
return dupInt32Slice(metadata.Replicas), nil
}
@ -322,11 +381,36 @@ func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32,
}
if metadata.Err == ErrReplicaNotAvailable {
return nil, metadata.Err
return dupInt32Slice(metadata.Isr), metadata.Err
}
return dupInt32Slice(metadata.Isr), nil
}
func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
metadata := client.cachedMetadata(topic, partitionID)
if metadata == nil {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
metadata = client.cachedMetadata(topic, partitionID)
}
if metadata == nil {
return nil, ErrUnknownTopicOrPartition
}
if metadata.Err == ErrReplicaNotAvailable {
return dupInt32Slice(metadata.OfflineReplicas), metadata.Err
}
return dupInt32Slice(metadata.OfflineReplicas), nil
}
func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
if client.Closed() {
return nil, ErrClosedClient
@ -359,7 +443,11 @@ func (client *client) RefreshMetadata(topics ...string) error {
}
}
return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max)
deadline := time.Time{}
if client.conf.Metadata.Timeout > 0 {
deadline = time.Now().Add(client.conf.Metadata.Timeout)
}
return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline)
}
func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
@ -379,6 +467,60 @@ func (client *client) GetOffset(topic string, partitionID int32, time int64) (in
return offset, err
}
func (client *client) Controller() (*Broker, error) {
if client.Closed() {
return nil, ErrClosedClient
}
if !client.conf.Version.IsAtLeast(V0_10_0_0) {
return nil, ErrUnsupportedVersion
}
controller := client.cachedController()
if controller == nil {
if err := client.refreshMetadata(); err != nil {
return nil, err
}
controller = client.cachedController()
}
if controller == nil {
return nil, ErrControllerNotAvailable
}
_ = controller.Open(client.conf)
return controller, nil
}
// deregisterController removes the cached controllerID
func (client *client) deregisterController() {
client.lock.Lock()
defer client.lock.Unlock()
delete(client.brokers, client.controllerID)
}
// RefreshController retrieves the cluster controller from fresh metadata
// and stores it in the local cache. Requires Kafka 0.10 or higher.
func (client *client) RefreshController() (*Broker, error) {
if client.Closed() {
return nil, ErrClosedClient
}
client.deregisterController()
if err := client.refreshMetadata(); err != nil {
return nil, err
}
controller := client.cachedController()
if controller == nil {
return nil, ErrControllerNotAvailable
}
_ = controller.Open(client.conf)
return controller, nil
}
func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
if client.Closed() {
return nil, ErrClosedClient
@ -420,10 +562,39 @@ func (client *client) RefreshCoordinator(consumerGroup string) error {
// private broker management helpers
func (client *client) updateBroker(brokers []*Broker) {
var currentBroker = make(map[int32]*Broker, len(brokers))
for _, broker := range brokers {
currentBroker[broker.ID()] = broker
if client.brokers[broker.ID()] == nil { // add new broker
client.brokers[broker.ID()] = broker
Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
} else if broker.Addr() != client.brokers[broker.ID()].Addr() { // replace broker with new address
safeAsyncClose(client.brokers[broker.ID()])
client.brokers[broker.ID()] = broker
Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
}
}
for id, broker := range client.brokers {
if _, exist := currentBroker[id]; !exist { // remove old broker
safeAsyncClose(broker)
delete(client.brokers, id)
Logger.Printf("client/broker remove invalid broker #%d with %s", broker.ID(), broker.Addr())
}
}
}
// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
// in the brokers map. It returns the broker that is registered, which may be the provided broker,
// or a previously registered Broker instance. You must hold the write lock before calling this function.
func (client *client) registerBroker(broker *Broker) {
if client.brokers == nil {
Logger.Printf("cannot register broker #%d at %s, client already closed", broker.ID(), broker.Addr())
return
}
if client.brokers[broker.ID()] == nil {
client.brokers[broker.ID()] = broker
Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
@ -607,20 +778,7 @@ func (client *client) backgroundMetadataUpdater() {
for {
select {
case <-ticker.C:
topics := []string{}
if !client.conf.Metadata.Full {
if specificTopics, err := client.Topics(); err != nil {
Logger.Println("Client background metadata topic load:", err)
break
} else if len(specificTopics) == 0 {
Logger.Println("Client background metadata update: no specific topics to update")
break
} else {
topics = specificTopics
}
}
if err := client.RefreshMetadata(topics...); err != nil {
if err := client.refreshMetadata(); err != nil {
Logger.Println("Client background metadata update:", err)
}
case <-client.closer:
@ -629,28 +787,72 @@ func (client *client) backgroundMetadataUpdater() {
}
}
func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
func (client *client) refreshMetadata() error {
topics := []string{}
if !client.conf.Metadata.Full {
if specificTopics, err := client.MetadataTopics(); err != nil {
return err
} else if len(specificTopics) == 0 {
return ErrNoTopicsToUpdateMetadata
} else {
topics = specificTopics
}
}
if err := client.RefreshMetadata(topics...); err != nil {
return err
}
return nil
}
func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, deadline time.Time) error {
pastDeadline := func(backoff time.Duration) bool {
if !deadline.IsZero() && time.Now().Add(backoff).After(deadline) {
// we are past the deadline
return true
}
return false
}
retry := func(err error) error {
if attemptsRemaining > 0 {
Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
time.Sleep(client.conf.Metadata.Retry.Backoff)
return client.tryRefreshMetadata(topics, attemptsRemaining-1)
backoff := client.computeBackoff(attemptsRemaining)
if pastDeadline(backoff) {
Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout")
return err
}
Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
if backoff > 0 {
time.Sleep(backoff)
}
return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline)
}
return err
}
for broker := client.any(); broker != nil; broker = client.any() {
broker := client.any()
for ; broker != nil && !pastDeadline(0); broker = client.any() {
allowAutoTopicCreation := true
if len(topics) > 0 {
Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
} else {
allowAutoTopicCreation = false
Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
}
response, err := broker.GetMetadata(&MetadataRequest{Topics: topics})
req := &MetadataRequest{Topics: topics, AllowAutoTopicCreation: allowAutoTopicCreation}
if client.conf.Version.IsAtLeast(V1_0_0_0) {
req.Version = 5
} else if client.conf.Version.IsAtLeast(V0_10_0_0) {
req.Version = 1
}
response, err := broker.GetMetadata(req)
switch err.(type) {
case nil:
allKnownMetaData := len(topics) == 0
// valid response, use it
shouldRetry, err := client.updateMetadata(response)
shouldRetry, err := client.updateMetadata(response, allKnownMetaData)
if shouldRetry {
Logger.Println("client/metadata found some partitions to be leaderless")
return retry(err) // note: err can be nil
@ -660,39 +862,77 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int)
case PacketEncodingError:
// didn't even send, return the error
return err
case KError:
// if SASL auth error return as this _should_ be a non retryable err for all brokers
if err.(KError) == ErrSASLAuthenticationFailed {
Logger.Println("client/metadata failed SASL authentication")
return err
}
if err.(KError) == ErrTopicAuthorizationFailed {
Logger.Println("client is not authorized to access this topic. The topics were: ", topics)
return err
}
// else remove that broker and try again
Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
_ = broker.Close()
client.deregisterBroker(broker)
default:
// some other error, remove that broker and try again
Logger.Println("client/metadata got error from broker while fetching metadata:", err)
Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
_ = broker.Close()
client.deregisterBroker(broker)
}
}
if broker != nil {
Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr)
return retry(ErrOutOfBrokers)
}
Logger.Println("client/metadata no available broker to send metadata request to")
client.resurrectDeadBrokers()
return retry(ErrOutOfBrokers)
}
// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) {
func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) {
if client.Closed() {
return
}
client.lock.Lock()
defer client.lock.Unlock()
// For all the brokers we received:
// - if it is a new ID, save it
// - if it is an existing ID, but the address we have is stale, discard the old one and save it
// - if some brokers is not exist in it, remove old broker
// - otherwise ignore it, replacing our existing one would just bounce the connection
for _, broker := range data.Brokers {
client.registerBroker(broker)
}
client.updateBroker(data.Brokers)
client.controllerID = data.ControllerID
if allKnownMetaData {
client.metadata = make(map[string]map[int32]*PartitionMetadata)
client.metadataTopics = make(map[string]none)
client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32)
}
for _, topic := range data.Topics {
// topics must be added firstly to `metadataTopics` to guarantee that all
// requested topics must be recorded to keep them trackable for periodically
// metadata refresh.
if _, exists := client.metadataTopics[topic.Name]; !exists {
client.metadataTopics[topic.Name] = none{}
}
delete(client.metadata, topic.Name)
delete(client.cachedPartitionsResults, topic.Name)
switch topic.Err {
case ErrNoError:
break
// no-op
case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
err = topic.Err
continue
@ -702,7 +942,6 @@ func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err er
continue
case ErrLeaderNotAvailable: // retry, but store partial partition results
retry = true
break
default: // don't retry, don't store partial results
Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
err = topic.Err
@ -735,11 +974,28 @@ func (client *client) cachedCoordinator(consumerGroup string) *Broker {
return nil
}
func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) {
retry := func(err error) (*ConsumerMetadataResponse, error) {
func (client *client) cachedController() *Broker {
client.lock.RLock()
defer client.lock.RUnlock()
return client.brokers[client.controllerID]
}
func (client *client) computeBackoff(attemptsRemaining int) time.Duration {
if client.conf.Metadata.Retry.BackoffFunc != nil {
maxRetries := client.conf.Metadata.Retry.Max
retries := maxRetries - attemptsRemaining
return client.conf.Metadata.Retry.BackoffFunc(retries, maxRetries)
}
return client.conf.Metadata.Retry.Backoff
}
func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) {
retry := func(err error) (*FindCoordinatorResponse, error) {
if attemptsRemaining > 0 {
Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
time.Sleep(client.conf.Metadata.Retry.Backoff)
backoff := client.computeBackoff(attemptsRemaining)
Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
time.Sleep(backoff)
return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
}
return nil, err
@ -748,10 +1004,11 @@ func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemainin
for broker := client.any(); broker != nil; broker = client.any() {
Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
request := new(ConsumerMetadataRequest)
request.ConsumerGroup = consumerGroup
request := new(FindCoordinatorRequest)
request.CoordinatorKey = consumerGroup
request.CoordinatorType = CoordinatorGroup
response, err := broker.GetConsumerMetadata(request)
response, err := broker.FindCoordinator(request)
if err != nil {
Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
@ -783,6 +1040,10 @@ func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemainin
}
return retry(ErrConsumerCoordinatorNotAvailable)
case ErrGroupAuthorizationFailed:
Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", consumerGroup)
return retry(ErrGroupAuthorizationFailed)
default:
return nil, response.Err
}
@ -792,3 +1053,18 @@ func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemainin
client.resurrectDeadBrokers()
return retry(ErrOutOfBrokers)
}
// nopCloserClient embeds an existing Client, but disables
// the Close method (yet all other methods pass
// through unchanged). This is for use in larger structs
// where it is undesirable to close the client that was
// passed in by the caller.
type nopCloserClient struct {
Client
}
// Close intercepts and purposely does not call the underlying
// client's Close() method.
func (ncc *nopCloserClient) Close() error {
return nil
}

194
vendor/github.com/Shopify/sarama/compress.go generated vendored Normal file
View File

@ -0,0 +1,194 @@
package sarama
import (
"bytes"
"compress/gzip"
"fmt"
"sync"
snappy "github.com/eapache/go-xerial-snappy"
"github.com/pierrec/lz4"
)
var (
lz4WriterPool = sync.Pool{
New: func() interface{} {
return lz4.NewWriter(nil)
},
}
gzipWriterPool = sync.Pool{
New: func() interface{} {
return gzip.NewWriter(nil)
},
}
gzipWriterPoolForCompressionLevel1 = sync.Pool{
New: func() interface{} {
gz, err := gzip.NewWriterLevel(nil, 1)
if err != nil {
panic(err)
}
return gz
},
}
gzipWriterPoolForCompressionLevel2 = sync.Pool{
New: func() interface{} {
gz, err := gzip.NewWriterLevel(nil, 2)
if err != nil {
panic(err)
}
return gz
},
}
gzipWriterPoolForCompressionLevel3 = sync.Pool{
New: func() interface{} {
gz, err := gzip.NewWriterLevel(nil, 3)
if err != nil {
panic(err)
}
return gz
},
}
gzipWriterPoolForCompressionLevel4 = sync.Pool{
New: func() interface{} {
gz, err := gzip.NewWriterLevel(nil, 4)
if err != nil {
panic(err)
}
return gz
},
}
gzipWriterPoolForCompressionLevel5 = sync.Pool{
New: func() interface{} {
gz, err := gzip.NewWriterLevel(nil, 5)
if err != nil {
panic(err)
}
return gz
},
}
gzipWriterPoolForCompressionLevel6 = sync.Pool{
New: func() interface{} {
gz, err := gzip.NewWriterLevel(nil, 6)
if err != nil {
panic(err)
}
return gz
},
}
gzipWriterPoolForCompressionLevel7 = sync.Pool{
New: func() interface{} {
gz, err := gzip.NewWriterLevel(nil, 7)
if err != nil {
panic(err)
}
return gz
},
}
gzipWriterPoolForCompressionLevel8 = sync.Pool{
New: func() interface{} {
gz, err := gzip.NewWriterLevel(nil, 8)
if err != nil {
panic(err)
}
return gz
},
}
gzipWriterPoolForCompressionLevel9 = sync.Pool{
New: func() interface{} {
gz, err := gzip.NewWriterLevel(nil, 9)
if err != nil {
panic(err)
}
return gz
},
}
)
func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) {
switch cc {
case CompressionNone:
return data, nil
case CompressionGZIP:
var (
err error
buf bytes.Buffer
writer *gzip.Writer
)
switch level {
case CompressionLevelDefault:
writer = gzipWriterPool.Get().(*gzip.Writer)
defer gzipWriterPool.Put(writer)
writer.Reset(&buf)
case 1:
writer = gzipWriterPoolForCompressionLevel1.Get().(*gzip.Writer)
defer gzipWriterPoolForCompressionLevel1.Put(writer)
writer.Reset(&buf)
case 2:
writer = gzipWriterPoolForCompressionLevel2.Get().(*gzip.Writer)
defer gzipWriterPoolForCompressionLevel2.Put(writer)
writer.Reset(&buf)
case 3:
writer = gzipWriterPoolForCompressionLevel3.Get().(*gzip.Writer)
defer gzipWriterPoolForCompressionLevel3.Put(writer)
writer.Reset(&buf)
case 4:
writer = gzipWriterPoolForCompressionLevel4.Get().(*gzip.Writer)
defer gzipWriterPoolForCompressionLevel4.Put(writer)
writer.Reset(&buf)
case 5:
writer = gzipWriterPoolForCompressionLevel5.Get().(*gzip.Writer)
defer gzipWriterPoolForCompressionLevel5.Put(writer)
writer.Reset(&buf)
case 6:
writer = gzipWriterPoolForCompressionLevel6.Get().(*gzip.Writer)
defer gzipWriterPoolForCompressionLevel6.Put(writer)
writer.Reset(&buf)
case 7:
writer = gzipWriterPoolForCompressionLevel7.Get().(*gzip.Writer)
defer gzipWriterPoolForCompressionLevel7.Put(writer)
writer.Reset(&buf)
case 8:
writer = gzipWriterPoolForCompressionLevel8.Get().(*gzip.Writer)
defer gzipWriterPoolForCompressionLevel8.Put(writer)
writer.Reset(&buf)
case 9:
writer = gzipWriterPoolForCompressionLevel9.Get().(*gzip.Writer)
defer gzipWriterPoolForCompressionLevel9.Put(writer)
writer.Reset(&buf)
default:
writer, err = gzip.NewWriterLevel(&buf, level)
if err != nil {
return nil, err
}
}
if _, err := writer.Write(data); err != nil {
return nil, err
}
if err := writer.Close(); err != nil {
return nil, err
}
return buf.Bytes(), nil
case CompressionSnappy:
return snappy.Encode(data), nil
case CompressionLZ4:
writer := lz4WriterPool.Get().(*lz4.Writer)
defer lz4WriterPool.Put(writer)
var buf bytes.Buffer
writer.Reset(&buf)
if _, err := writer.Write(data); err != nil {
return nil, err
}
if err := writer.Close(); err != nil {
return nil, err
}
return buf.Bytes(), nil
case CompressionZSTD:
return zstdCompress(nil, data)
default:
return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)}
}
}

View File

@ -1,11 +1,16 @@
package sarama
import (
"compress/gzip"
"crypto/tls"
"fmt"
"io/ioutil"
"net"
"regexp"
"time"
"github.com/rcrowley/go-metrics"
"golang.org/x/net/proxy"
)
const defaultClientID = "sarama"
@ -14,6 +19,20 @@ var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
// Config is used to pass multiple configuration options to Sarama's constructors.
type Config struct {
// Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client.
Admin struct {
Retry struct {
// The total number of times to retry sending (retriable) admin requests (default 5).
// Similar to the `retries` setting of the JVM AdminClientConfig.
Max int
// Backoff time between retries of a failed request (default 100ms)
Backoff time.Duration
}
// The maximum duration the administrative Kafka client will wait for ClusterAdmin operations,
// including topics, brokers, configurations and ACLs (defaults to 3 seconds).
Timeout time.Duration
}
// Net is the namespace for network-level properties used by the Broker, and
// shared by the Client/Producer/Consumer.
Net struct {
@ -43,18 +62,58 @@ type Config struct {
// Whether or not to use SASL authentication when connecting to the broker
// (defaults to false).
Enable bool
// SASLMechanism is the name of the enabled SASL mechanism.
// Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN).
Mechanism SASLMechanism
// Version is the SASL Protocol Version to use
// Kafka > 1.x should use V1, except on Azure EventHub which use V0
Version int16
// Whether or not to send the Kafka SASL handshake first if enabled
// (defaults to true). You should only set this to false if you're using
// a non-Kafka SASL proxy.
Handshake bool
//username and password for SASL/PLAIN authentication
User string
// AuthIdentity is an (optional) authorization identity (authzid) to
// use for SASL/PLAIN authentication (if different from User) when
// an authenticated user is permitted to act as the presented
// alternative user. See RFC4616 for details.
AuthIdentity string
// User is the authentication identity (authcid) to present for
// SASL/PLAIN or SASL/SCRAM authentication
User string
// Password for SASL/PLAIN authentication
Password string
// authz id used for SASL/SCRAM authentication
SCRAMAuthzID string
// SCRAMClientGeneratorFunc is a generator of a user provided implementation of a SCRAM
// client used to perform the SCRAM exchange with the server.
SCRAMClientGeneratorFunc func() SCRAMClient
// TokenProvider is a user-defined callback for generating
// access tokens for SASL/OAUTHBEARER auth. See the
// AccessTokenProvider interface docs for proper implementation
// guidelines.
TokenProvider AccessTokenProvider
GSSAPI GSSAPIConfig
}
// KeepAlive specifies the keep-alive period for an active network connection.
// If zero, keep-alives are disabled. (default is 0: disabled).
// KeepAlive specifies the keep-alive period for an active network connection (defaults to 0).
// If zero or positive, keep-alives are enabled.
// If negative, keep-alives are disabled.
KeepAlive time.Duration
// LocalAddr is the local address to use when dialing an
// address. The address must be of a compatible type for the
// network being dialed.
// If nil, a local address is automatically chosen.
LocalAddr net.Addr
Proxy struct {
// Whether or not to use proxy when connecting to the broker
// (defaults to false).
Enable bool
// The proxy dialer to use enabled (defaults to nil).
Dialer proxy.Dialer
}
}
// Metadata is the namespace for metadata management properties used by the
@ -67,6 +126,10 @@ type Config struct {
// How long to wait for leader election to occur before retrying
// (default 250ms). Similar to the JVM's `retry.backoff.ms`.
Backoff time.Duration
// Called to compute backoff time dynamically. Useful for implementing
// more sophisticated backoff strategies. This takes precedence over
// `Backoff` if set.
BackoffFunc func(retries, maxRetries int) time.Duration
}
// How frequently to refresh the cluster metadata in the background.
// Defaults to 10 minutes. Set to 0 to disable. Similar to
@ -78,6 +141,13 @@ type Config struct {
// and usually more convenient, but can take up a substantial amount of
// memory if you have many topics and partitions. Defaults to true.
Full bool
// How long to wait for a successful metadata response.
// Disabled by default which means a metadata request against an unreachable
// cluster (all brokers are unreachable or unresponsive) can take up to
// `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max`
// to fail.
Timeout time.Duration
}
// Producer is the namespace for configuration related to producing messages,
@ -99,10 +169,17 @@ type Config struct {
// The type of compression to use on messages (defaults to no compression).
// Similar to `compression.codec` setting of the JVM producer.
Compression CompressionCodec
// The level of compression to use on messages. The meaning depends
// on the actual compression type used and defaults to default compression
// level for the codec.
CompressionLevel int
// Generates partitioners for choosing the partition to send messages to
// (defaults to hashing the message key). Similar to the `partitioner.class`
// setting for the JVM producer.
Partitioner PartitionerConstructor
// If enabled, the producer will ensure that exactly one copy of each message is
// written.
Idempotent bool
// Return specifies what channels will be populated. If they are set to true,
// you must read from the respective channels to prevent deadlock. If,
@ -147,23 +224,72 @@ type Config struct {
// (default 100ms). Similar to the `retry.backoff.ms` setting of the
// JVM producer.
Backoff time.Duration
// Called to compute backoff time dynamically. Useful for implementing
// more sophisticated backoff strategies. This takes precedence over
// `Backoff` if set.
BackoffFunc func(retries, maxRetries int) time.Duration
}
}
// Consumer is the namespace for configuration related to consuming messages,
// used by the Consumer.
//
// Note that Sarama's Consumer type does not currently support automatic
// consumer-group rebalancing and offset tracking. For Zookeeper-based
// tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka
// library builds on Sarama to add this support. For Kafka-based tracking
// (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library
// builds on Sarama to add this support.
Consumer struct {
// Group is the namespace for configuring consumer group.
Group struct {
Session struct {
// The timeout used to detect consumer failures when using Kafka's group management facility.
// The consumer sends periodic heartbeats to indicate its liveness to the broker.
// If no heartbeats are received by the broker before the expiration of this session timeout,
// then the broker will remove this consumer from the group and initiate a rebalance.
// Note that the value must be in the allowable range as configured in the broker configuration
// by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s)
Timeout time.Duration
}
Heartbeat struct {
// The expected time between heartbeats to the consumer coordinator when using Kafka's group
// management facilities. Heartbeats are used to ensure that the consumer's session stays active and
// to facilitate rebalancing when new consumers join or leave the group.
// The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no
// higher than 1/3 of that value.
// It can be adjusted even lower to control the expected time for normal rebalances (default 3s)
Interval time.Duration
}
Rebalance struct {
// Strategy for allocating topic partitions to members (default BalanceStrategyRange)
Strategy BalanceStrategy
// The maximum allowed time for each worker to join the group once a rebalance has begun.
// This is basically a limit on the amount of time needed for all tasks to flush any pending
// data and commit offsets. If the timeout is exceeded, then the worker will be removed from
// the group, which will cause offset commit failures (default 60s).
Timeout time.Duration
Retry struct {
// When a new consumer joins a consumer group the set of consumers attempt to "rebalance"
// the load to assign partitions to each consumer. If the set of consumers changes while
// this assignment is taking place the rebalance will fail and retry. This setting controls
// the maximum number of attempts before giving up (default 4).
Max int
// Backoff time between retries during rebalance (default 2s)
Backoff time.Duration
}
}
Member struct {
// Custom metadata to include when joining the group. The user data for all joined members
// can be retrieved by sending a DescribeGroupRequest to the broker that is the
// coordinator for the group.
UserData []byte
}
}
Retry struct {
// How long to wait after a failing to read from a partition before
// trying again (default 2s).
Backoff time.Duration
// Called to compute backoff time dynamically. Useful for implementing
// more sophisticated backoff strategies. This takes precedence over
// `Backoff` if set.
BackoffFunc func(retries int) time.Duration
}
// Fetch is the namespace for controlling how many bytes are retrieved by any
@ -175,7 +301,7 @@ type Config struct {
// Equivalent to the JVM's `fetch.min.bytes`.
Min int32
// The default number of message bytes to fetch from the broker in each
// request (default 32768). This should be larger than the majority of
// request (default 1MB). This should be larger than the majority of
// your messages, or else the consumer will spend a lot of time
// negotiating sizes and not actually consuming. Similar to the JVM's
// `fetch.message.max.bytes`.
@ -201,7 +327,7 @@ type Config struct {
// than this, that partition will stop fetching more messages until it
// can proceed again.
// Note that, since the Messages channel is buffered, the actual grace time is
// (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
// (MaxProcessingTime * ChannelBufferSize). Defaults to 100ms.
// If a message is not written to the Messages channel between two ticks
// of the expiryTicker then a timeout is detected.
// Using a ticker instead of a timer to detect timeouts should typically
@ -227,9 +353,21 @@ type Config struct {
// offsets. This currently requires the manual use of an OffsetManager
// but will eventually be automated.
Offsets struct {
// How frequently to commit updated offsets. Defaults to 1s.
// Deprecated: CommitInterval exists for historical compatibility
// and should not be used. Please use Consumer.Offsets.AutoCommit
CommitInterval time.Duration
// AutoCommit specifies configuration for commit messages automatically.
AutoCommit struct {
// Whether or not to auto-commit updated offsets back to the broker.
// (default enabled).
Enable bool
// How frequently to commit updated offsets. Ineffective unless
// auto-commit is enabled (default 1s)
Interval time.Duration
}
// The initial offset to use if no offset was previously committed.
// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
Initial int64
@ -241,13 +379,28 @@ type Config struct {
// broker version 0.9.0 or later.
// (default is 0: disabled).
Retention time.Duration
Retry struct {
// The total number of times to retry failing commit
// requests during OffsetManager shutdown (default 3).
Max int
}
}
// IsolationLevel support 2 mode:
// - use `ReadUncommitted` (default) to consume and return all messages in message channel
// - use `ReadCommitted` to hide messages that are part of an aborted transaction
IsolationLevel IsolationLevel
}
// A user-provided string sent with every request to the brokers for logging,
// debugging, and auditing purposes. Defaults to "sarama", but you should
// probably set it to something specific to your application.
ClientID string
// A rack identifier for this client. This can be any string value which
// indicates where this client is physically located.
// It corresponds with the broker config 'broker.rack'
RackID string
// The number of events to buffer in internal and external channels. This
// permits the producer and consumer to continue processing some messages
// in the background while user code is working, greatly improving throughput.
@ -272,11 +425,16 @@ type Config struct {
func NewConfig() *Config {
c := &Config{}
c.Admin.Retry.Max = 5
c.Admin.Retry.Backoff = 100 * time.Millisecond
c.Admin.Timeout = 3 * time.Second
c.Net.MaxOpenRequests = 5
c.Net.DialTimeout = 30 * time.Second
c.Net.ReadTimeout = 30 * time.Second
c.Net.WriteTimeout = 30 * time.Second
c.Net.SASL.Handshake = true
c.Net.SASL.Version = SASLHandshakeV0
c.Metadata.Retry.Max = 3
c.Metadata.Retry.Backoff = 250 * time.Millisecond
@ -290,19 +448,29 @@ func NewConfig() *Config {
c.Producer.Retry.Max = 3
c.Producer.Retry.Backoff = 100 * time.Millisecond
c.Producer.Return.Errors = true
c.Producer.CompressionLevel = CompressionLevelDefault
c.Consumer.Fetch.Min = 1
c.Consumer.Fetch.Default = 32768
c.Consumer.Fetch.Default = 1024 * 1024
c.Consumer.Retry.Backoff = 2 * time.Second
c.Consumer.MaxWaitTime = 250 * time.Millisecond
c.Consumer.MaxProcessingTime = 100 * time.Millisecond
c.Consumer.Return.Errors = false
c.Consumer.Offsets.CommitInterval = 1 * time.Second
c.Consumer.Offsets.AutoCommit.Enable = true
c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second
c.Consumer.Offsets.Initial = OffsetNewest
c.Consumer.Offsets.Retry.Max = 3
c.Consumer.Group.Session.Timeout = 10 * time.Second
c.Consumer.Group.Heartbeat.Interval = 3 * time.Second
c.Consumer.Group.Rebalance.Strategy = BalanceStrategyRange
c.Consumer.Group.Rebalance.Timeout = 60 * time.Second
c.Consumer.Group.Rebalance.Retry.Max = 4
c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second
c.ClientID = defaultClientID
c.ChannelBufferSize = 256
c.Version = minVersion
c.Version = MinVersion
c.MetricRegistry = metrics.NewRegistry()
return c
@ -312,10 +480,10 @@ func NewConfig() *Config {
// ConfigurationError if the specified values don't make sense.
func (c *Config) Validate() error {
// some configuration values should be warned on but not fail completely, do those first
if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil {
if !c.Net.TLS.Enable && c.Net.TLS.Config != nil {
Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
}
if c.Net.SASL.Enable == false {
if !c.Net.SASL.Enable {
if c.Net.SASL.User != "" {
Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
}
@ -347,6 +515,15 @@ func (c *Config) Validate() error {
if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
}
if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 {
Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")
}
if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 {
Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")
}
if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 {
Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.")
}
if c.ClientID == defaultClientID {
Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
}
@ -361,12 +538,71 @@ func (c *Config) Validate() error {
return ConfigurationError("Net.ReadTimeout must be > 0")
case c.Net.WriteTimeout <= 0:
return ConfigurationError("Net.WriteTimeout must be > 0")
case c.Net.KeepAlive < 0:
return ConfigurationError("Net.KeepAlive must be >= 0")
case c.Net.SASL.Enable == true && c.Net.SASL.User == "":
return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
case c.Net.SASL.Enable == true && c.Net.SASL.Password == "":
return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
case c.Net.SASL.Enable:
if c.Net.SASL.Mechanism == "" {
c.Net.SASL.Mechanism = SASLTypePlaintext
}
switch c.Net.SASL.Mechanism {
case SASLTypePlaintext:
if c.Net.SASL.User == "" {
return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
}
if c.Net.SASL.Password == "" {
return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
}
case SASLTypeOAuth:
if c.Net.SASL.TokenProvider == nil {
return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider")
}
case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
if c.Net.SASL.User == "" {
return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
}
if c.Net.SASL.Password == "" {
return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
}
if c.Net.SASL.SCRAMClientGeneratorFunc == nil {
return ConfigurationError("A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc")
}
case SASLTypeGSSAPI:
if c.Net.SASL.GSSAPI.ServiceName == "" {
return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used")
}
if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH {
if c.Net.SASL.GSSAPI.Password == "" {
return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " +
"mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH")
}
} else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH {
if c.Net.SASL.GSSAPI.KeyTabPath == "" {
return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" +
" and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH")
}
} else {
return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH")
}
if c.Net.SASL.GSSAPI.KerberosConfigPath == "" {
return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used")
}
if c.Net.SASL.GSSAPI.Username == "" {
return ConfigurationError("Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used")
}
if c.Net.SASL.GSSAPI.Realm == "" {
return ConfigurationError("Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used")
}
default:
msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s`, `%s` and `%s`",
SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512, SASLTypeGSSAPI)
return ConfigurationError(msg)
}
}
// validate the Admin values
switch {
case c.Admin.Timeout <= 0:
return ConfigurationError("Admin.Timeout must be > 0")
}
// validate the Metadata values
@ -409,6 +645,33 @@ func (c *Config) Validate() error {
return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
}
if c.Producer.Compression == CompressionGZIP {
if c.Producer.CompressionLevel != CompressionLevelDefault {
if _, err := gzip.NewWriterLevel(ioutil.Discard, c.Producer.CompressionLevel); err != nil {
return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err))
}
}
}
if c.Producer.Compression == CompressionZSTD && !c.Version.IsAtLeast(V2_1_0_0) {
return ConfigurationError("zstd compression requires Version >= V2_1_0_0")
}
if c.Producer.Idempotent {
if !c.Version.IsAtLeast(V0_11_0_0) {
return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0")
}
if c.Producer.Retry.Max == 0 {
return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1")
}
if c.Producer.RequiredAcks != WaitForAll {
return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll")
}
if c.Net.MaxOpenRequests > 1 {
return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1")
}
}
// validate the Consumer values
switch {
case c.Consumer.Fetch.Min <= 0:
@ -423,11 +686,42 @@ func (c *Config) Validate() error {
return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
case c.Consumer.Retry.Backoff < 0:
return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
case c.Consumer.Offsets.CommitInterval <= 0:
return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
case c.Consumer.Offsets.AutoCommit.Interval <= 0:
return ConfigurationError("Consumer.Offsets.AutoCommit.Interval must be > 0")
case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
case c.Consumer.Offsets.Retry.Max < 0:
return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0")
case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted:
return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted")
}
if c.Consumer.Offsets.CommitInterval != 0 {
Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" +
" and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored")
}
// validate IsolationLevel
if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) {
return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0")
}
// validate the Consumer Group values
switch {
case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond:
return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms")
case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond:
return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms")
case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout:
return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout")
case c.Consumer.Group.Rebalance.Strategy == nil:
return ConfigurationError("Consumer.Group.Rebalance.Strategy must not be empty")
case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond:
return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms")
case c.Consumer.Group.Rebalance.Retry.Max < 0:
return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0")
case c.Consumer.Group.Rebalance.Retry.Backoff < 0:
return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0")
}
// validate misc shared values
@ -440,3 +734,16 @@ func (c *Config) Validate() error {
return nil
}
func (c *Config) getDialer() proxy.Dialer {
if c.Net.Proxy.Enable {
Logger.Printf("using proxy %s", c.Net.Proxy.Dialer)
return c.Net.Proxy.Dialer
} else {
return &net.Dialer{
Timeout: c.Net.DialTimeout,
KeepAlive: c.Net.KeepAlive,
LocalAddr: c.Net.LocalAddr,
}
}
}

View File

@ -0,0 +1,18 @@
package sarama
// ConfigResourceType is a type for resources that have configs.
type ConfigResourceType int8
// Taken from:
// https://github.com/apache/kafka/blob/ed7c071e07f1f90e4c2895582f61ca090ced3c42/clients/src/main/java/org/apache/kafka/common/config/ConfigResource.java#L32-L55
const (
// UnknownResource constant type
UnknownResource ConfigResourceType = 0
// TopicResource constant type
TopicResource ConfigResourceType = 2
// BrokerResource constant type
BrokerResource ConfigResourceType = 4
// BrokerLoggerResource constant type
BrokerLoggerResource ConfigResourceType = 8
)

View File

@ -3,19 +3,24 @@ package sarama
import (
"errors"
"fmt"
"math"
"sync"
"sync/atomic"
"time"
"github.com/rcrowley/go-metrics"
)
// ConsumerMessage encapsulates a Kafka message returned by the consumer.
type ConsumerMessage struct {
Key, Value []byte
Topic string
Partition int32
Offset int64
Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
Headers []*RecordHeader // only set if kafka is version 0.11+
Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
Key, Value []byte
Topic string
Partition int32
Offset int64
}
// ConsumerError is what is provided to the user when an error occurs.
@ -42,13 +47,7 @@ func (ce ConsumerErrors) Error() string {
// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
// scope.
//
// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking.
// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library
// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the
// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
type Consumer interface {
// Topics returns the set of available topics as retrieved from the cluster
// metadata. This method is the same as Client.Topics(), and is provided for
// convenience.
@ -74,13 +73,11 @@ type Consumer interface {
}
type consumer struct {
client Client
conf *Config
ownClient bool
lock sync.Mutex
conf *Config
children map[string]map[int32]*partitionConsumer
brokerConsumers map[*Broker]*brokerConsumer
client Client
lock sync.Mutex
}
// NewConsumer creates a new consumer using the given broker addresses and configuration.
@ -89,18 +86,19 @@ func NewConsumer(addrs []string, config *Config) (Consumer, error) {
if err != nil {
return nil, err
}
c, err := NewConsumerFromClient(client)
if err != nil {
return nil, err
}
c.(*consumer).ownClient = true
return c, nil
return newConsumer(client)
}
// NewConsumerFromClient creates a new consumer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this consumer.
func NewConsumerFromClient(client Client) (Consumer, error) {
// For clients passed in by the client, ensure we don't
// call Close() on it.
cli := &nopCloserClient{client}
return newConsumer(cli)
}
func newConsumer(client Client) (Consumer, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
@ -117,10 +115,7 @@ func NewConsumerFromClient(client Client) (Consumer, error) {
}
func (c *consumer) Close() error {
if c.ownClient {
return c.client.Close()
}
return nil
return c.client.Close()
}
func (c *consumer) Topics() ([]string, error) {
@ -260,12 +255,11 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
//
// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process
// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process
// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
type PartitionConsumer interface {
// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
@ -297,21 +291,22 @@ type PartitionConsumer interface {
type partitionConsumer struct {
highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
consumer *consumer
conf *Config
topic string
partition int32
consumer *consumer
conf *Config
broker *brokerConsumer
messages chan *ConsumerMessage
errors chan *ConsumerError
feeder chan *FetchResponse
trigger, dying chan none
closeOnce sync.Once
topic string
partition int32
responseResult error
fetchSize int32
offset int64
fetchSize int32
offset int64
retries int32
}
var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
@ -330,12 +325,20 @@ func (child *partitionConsumer) sendError(err error) {
}
}
func (child *partitionConsumer) computeBackoff() time.Duration {
if child.conf.Consumer.Retry.BackoffFunc != nil {
retries := atomic.AddInt32(&child.retries, 1)
return child.conf.Consumer.Retry.BackoffFunc(int(retries))
}
return child.conf.Consumer.Retry.Backoff
}
func (child *partitionConsumer) dispatcher() {
for range child.trigger {
select {
case <-child.dying:
close(child.trigger)
case <-time.After(child.conf.Consumer.Retry.Backoff):
case <-time.After(child.computeBackoff()):
if child.broker != nil {
child.consumer.unrefBrokerConsumer(child.broker)
child.broker = nil
@ -411,18 +414,14 @@ func (child *partitionConsumer) AsyncClose() {
// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
// also just close itself)
close(child.dying)
child.closeOnce.Do(func() {
close(child.dying)
})
}
func (child *partitionConsumer) Close() error {
child.AsyncClose()
go withRecover(func() {
for range child.messages {
// drain
}
})
var errors ConsumerErrors
for err := range child.errors {
errors = append(errors, err)
@ -440,45 +439,137 @@ func (child *partitionConsumer) HighWaterMarkOffset() int64 {
func (child *partitionConsumer) responseFeeder() {
var msgs []*ConsumerMessage
msgSent := false
expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
firstAttempt := true
feederLoop:
for response := range child.feeder {
msgs, child.responseResult = child.parseResponse(response)
expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
if child.responseResult == nil {
atomic.StoreInt32(&child.retries, 0)
}
for i, msg := range msgs {
messageSelect:
select {
case <-child.dying:
child.broker.acks.Done()
continue feederLoop
case child.messages <- msg:
msgSent = true
firstAttempt = true
case <-expiryTicker.C:
if !msgSent {
if !firstAttempt {
child.responseResult = errTimedOut
child.broker.acks.Done()
remainingLoop:
for _, msg = range msgs[i:] {
child.messages <- msg
select {
case child.messages <- msg:
case <-child.dying:
break remainingLoop
}
}
child.broker.input <- child
continue feederLoop
} else {
// current message has not been sent, return to select
// statement
msgSent = false
firstAttempt = false
goto messageSelect
}
}
}
expiryTicker.Stop()
child.broker.acks.Done()
}
expiryTicker.Stop()
close(child.messages)
close(child.errors)
}
func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
var messages []*ConsumerMessage
for _, msgBlock := range msgSet.Messages {
for _, msg := range msgBlock.Messages() {
offset := msg.Offset
timestamp := msg.Msg.Timestamp
if msg.Msg.Version >= 1 {
baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
offset += baseOffset
if msg.Msg.LogAppendTime {
timestamp = msgBlock.Msg.Timestamp
}
}
if offset < child.offset {
continue
}
messages = append(messages, &ConsumerMessage{
Topic: child.topic,
Partition: child.partition,
Key: msg.Msg.Key,
Value: msg.Msg.Value,
Offset: offset,
Timestamp: timestamp,
BlockTimestamp: msgBlock.Msg.Timestamp,
})
child.offset = offset + 1
}
}
if len(messages) == 0 {
child.offset++
}
return messages, nil
}
func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
messages := make([]*ConsumerMessage, 0, len(batch.Records))
for _, rec := range batch.Records {
offset := batch.FirstOffset + rec.OffsetDelta
if offset < child.offset {
continue
}
timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta)
if batch.LogAppendTime {
timestamp = batch.MaxTimestamp
}
messages = append(messages, &ConsumerMessage{
Topic: child.topic,
Partition: child.partition,
Key: rec.Key,
Value: rec.Value,
Offset: offset,
Timestamp: timestamp,
Headers: rec.Headers,
})
child.offset = offset + 1
}
if len(messages) == 0 {
child.offset++
}
return messages, nil
}
func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
var (
metricRegistry = child.conf.MetricRegistry
consumerBatchSizeMetric metrics.Histogram
)
if metricRegistry != nil {
consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", metricRegistry)
}
// If request was throttled and empty we log and return without error
if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 {
Logger.Printf(
"consumer/broker/%d FetchResponse throttled %v\n",
child.broker.broker.ID(), response.ThrottleTime)
return nil, nil
}
block := response.GetBlock(child.topic, child.partition)
if block == nil {
return nil, ErrIncompleteResponse
@ -488,16 +579,31 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu
return nil, block.Err
}
if len(block.MsgSet.Messages) == 0 {
nRecs, err := block.numRecords()
if err != nil {
return nil, err
}
consumerBatchSizeMetric.Update(int64(nRecs))
if nRecs == 0 {
partialTrailingMessage, err := block.isPartial()
if err != nil {
return nil, err
}
// We got no messages. If we got a trailing one then we need to ask for more data.
// Otherwise we just poll again and wait for one to be produced...
if block.MsgSet.PartialTrailingMessage {
if partialTrailingMessage {
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
// we can't ask for more data, we've hit the configured limit
child.sendError(ErrMessageTooLarge)
child.offset++ // skip this one so we can keep processing future messages
} else {
child.fetchSize *= 2
// check int32 overflow
if child.fetchSize < 0 {
child.fetchSize = math.MaxInt32
}
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
child.fetchSize = child.conf.Consumer.Fetch.Max
}
@ -511,55 +617,89 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu
child.fetchSize = child.conf.Consumer.Fetch.Default
atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
incomplete := false
prelude := true
var messages []*ConsumerMessage
for _, msgBlock := range block.MsgSet.Messages {
// abortedProducerIDs contains producerID which message should be ignored as uncommitted
// - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset)
// - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over
abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions))
abortedTransactions := block.getAbortedTransactions()
for _, msg := range msgBlock.Messages() {
offset := msg.Offset
if msg.Msg.Version >= 1 {
baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
offset += baseOffset
messages := []*ConsumerMessage{}
for _, records := range block.RecordsSet {
switch records.recordsType {
case legacyRecords:
messageSetMessages, err := child.parseMessages(records.MsgSet)
if err != nil {
return nil, err
}
if prelude && offset < child.offset {
messages = append(messages, messageSetMessages...)
case defaultRecords:
// Consume remaining abortedTransaction up to last offset of current batch
for _, txn := range abortedTransactions {
if txn.FirstOffset > records.RecordBatch.LastOffset() {
break
}
abortedProducerIDs[txn.ProducerID] = struct{}{}
// Pop abortedTransactions so that we never add it again
abortedTransactions = abortedTransactions[1:]
}
recordBatchMessages, err := child.parseRecords(records.RecordBatch)
if err != nil {
return nil, err
}
// Parse and commit offset but do not expose messages that are:
// - control records
// - part of an aborted transaction when set to `ReadCommitted`
// control record
isControl, err := records.isControl()
if err != nil {
// I don't know why there is this continue in case of error to begin with
// Safe bet is to ignore control messages if ReadUncommitted
// and block on them in case of error and ReadCommitted
if child.conf.Consumer.IsolationLevel == ReadCommitted {
return nil, err
}
continue
}
prelude = false
if isControl {
controlRecord, err := records.getControlRecord()
if err != nil {
return nil, err
}
if offset >= child.offset {
messages = append(messages, &ConsumerMessage{
Topic: child.topic,
Partition: child.partition,
Key: msg.Msg.Key,
Value: msg.Msg.Value,
Offset: offset,
Timestamp: msg.Msg.Timestamp,
BlockTimestamp: msgBlock.Msg.Timestamp,
})
child.offset = offset + 1
} else {
incomplete = true
if controlRecord.Type == ControlRecordAbort {
delete(abortedProducerIDs, records.RecordBatch.ProducerID)
}
continue
}
// filter aborted transactions
if child.conf.Consumer.IsolationLevel == ReadCommitted {
_, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID]
if records.RecordBatch.IsTransactional && isAborted {
continue
}
}
messages = append(messages, recordBatchMessages...)
default:
return nil, fmt.Errorf("unknown records type: %v", records.recordsType)
}
}
if incomplete || len(messages) == 0 {
return nil, ErrIncompleteResponse
}
return messages, nil
}
// brokerConsumer
type brokerConsumer struct {
consumer *consumer
broker *Broker
input chan *partitionConsumer
newSubscriptions chan []*partitionConsumer
wait chan none
subscriptions map[*partitionConsumer]none
wait chan none
acks sync.WaitGroup
refs int
}
@ -581,14 +721,14 @@ func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
return bc
}
// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
// so the main goroutine can block waiting for work if it has none.
func (bc *brokerConsumer) subscriptionManager() {
var buffer []*partitionConsumer
// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
// so the main goroutine can block waiting for work if it has none.
for {
if len(buffer) > 0 {
select {
@ -621,10 +761,10 @@ done:
close(bc.newSubscriptions)
}
//subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
func (bc *brokerConsumer) subscriptionConsumer() {
<-bc.wait // wait for our first piece of work
// the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
for newSubscriptions := range bc.newSubscriptions {
bc.updateSubscriptions(newSubscriptions)
@ -665,20 +805,20 @@ func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsu
close(child.trigger)
delete(bc.subscriptions, child)
default:
break
// no-op
}
}
}
//handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed
func (bc *brokerConsumer) handleResponses() {
// handles the response codes left for us by our subscriptions, and abandons ones that have been closed
for child := range bc.subscriptions {
result := child.responseResult
child.responseResult = nil
switch result {
case nil:
break
// no-op
case errTimedOut:
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
bc.broker.ID(), child.topic, child.partition)
@ -733,6 +873,9 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
}
if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) {
request.Version = 1
}
if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
request.Version = 2
}
@ -740,6 +883,25 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
request.Version = 3
request.MaxBytes = MaxResponseSize
}
if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
request.Version = 4
request.Isolation = bc.consumer.conf.Consumer.IsolationLevel
}
if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) {
request.Version = 7
// We do not currently implement KIP-227 FetchSessions. Setting the id to 0
// and the epoch to -1 tells the broker not to generate as session ID we're going
// to just ignore anyway.
request.SessionID = 0
request.SessionEpoch = -1
}
if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) {
request.Version = 10
}
if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) {
request.Version = 11
request.RackID = bc.consumer.conf.RackID
}
for child := range bc.subscriptions {
request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)

867
vendor/github.com/Shopify/sarama/consumer_group.go generated vendored Normal file
View File

@ -0,0 +1,867 @@
package sarama
import (
"context"
"errors"
"fmt"
"sort"
"sync"
"time"
)
// ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed.
var ErrClosedConsumerGroup = errors.New("kafka: tried to use a consumer group that was closed")
// ConsumerGroup is responsible for dividing up processing of topics and partitions
// over a collection of processes (the members of the consumer group).
type ConsumerGroup interface {
// Consume joins a cluster of consumers for a given list of topics and
// starts a blocking ConsumerGroupSession through the ConsumerGroupHandler.
//
// The life-cycle of a session is represented by the following steps:
//
// 1. The consumers join the group (as explained in https://kafka.apache.org/documentation/#intro_consumers)
// and is assigned their "fair share" of partitions, aka 'claims'.
// 2. Before processing starts, the handler's Setup() hook is called to notify the user
// of the claims and allow any necessary preparation or alteration of state.
// 3. For each of the assigned claims the handler's ConsumeClaim() function is then called
// in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected
// from concurrent reads/writes.
// 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the
// parent context is cancelled or when a server-side rebalance cycle is initiated.
// 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called
// to allow the user to perform any final tasks before a rebalance.
// 6. Finally, marked offsets are committed one last time before claims are released.
//
// Please note, that once a rebalance is triggered, sessions must be completed within
// Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit
// as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout
// is exceeded, the consumer will be removed from the group by Kafka, which will cause offset
// commit failures.
// This method should be called inside an infinite loop, when a
// server-side rebalance happens, the consumer session will need to be
// recreated to get the new claims.
Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error
// Errors returns a read channel of errors that occurred during the consumer life-cycle.
// By default, errors are logged and not returned over this channel.
// If you want to implement any custom error handling, set your config's
// Consumer.Return.Errors setting to true, and read from this channel.
Errors() <-chan error
// Close stops the ConsumerGroup and detaches any running sessions. It is required to call
// this function before the object passes out of scope, as it will otherwise leak memory.
Close() error
}
type consumerGroup struct {
client Client
config *Config
consumer Consumer
groupID string
memberID string
errors chan error
lock sync.Mutex
closed chan none
closeOnce sync.Once
userData []byte
}
// NewConsumerGroup creates a new consumer group the given broker addresses and configuration.
func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerGroup, error) {
client, err := NewClient(addrs, config)
if err != nil {
return nil, err
}
c, err := newConsumerGroup(groupID, client)
if err != nil {
_ = client.Close()
}
return c, err
}
// NewConsumerGroupFromClient creates a new consumer group using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this consumer.
// PLEASE NOTE: consumer groups can only re-use but not share clients.
func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) {
// For clients passed in by the client, ensure we don't
// call Close() on it.
cli := &nopCloserClient{client}
return newConsumerGroup(groupID, cli)
}
func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) {
config := client.Config()
if !config.Version.IsAtLeast(V0_10_2_0) {
return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0")
}
consumer, err := NewConsumerFromClient(client)
if err != nil {
return nil, err
}
return &consumerGroup{
client: client,
consumer: consumer,
config: config,
groupID: groupID,
errors: make(chan error, config.ChannelBufferSize),
closed: make(chan none),
}, nil
}
// Errors implements ConsumerGroup.
func (c *consumerGroup) Errors() <-chan error { return c.errors }
// Close implements ConsumerGroup.
func (c *consumerGroup) Close() (err error) {
c.closeOnce.Do(func() {
close(c.closed)
// leave group
if e := c.leave(); e != nil {
err = e
}
// drain errors
go func() {
close(c.errors)
}()
for e := range c.errors {
err = e
}
if e := c.client.Close(); e != nil {
err = e
}
})
return
}
// Consume implements ConsumerGroup.
func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error {
// Ensure group is not closed
select {
case <-c.closed:
return ErrClosedConsumerGroup
default:
}
c.lock.Lock()
defer c.lock.Unlock()
// Quick exit when no topics are provided
if len(topics) == 0 {
return fmt.Errorf("no topics provided")
}
// Refresh metadata for requested topics
if err := c.client.RefreshMetadata(topics...); err != nil {
return err
}
// Init session
sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max)
if err == ErrClosedClient {
return ErrClosedConsumerGroup
} else if err != nil {
return err
}
// loop check topic partition numbers changed
// will trigger rebalance when any topic partitions number had changed
// avoid Consume function called again that will generate more than loopCheckPartitionNumbers coroutine
go c.loopCheckPartitionNumbers(topics, sess)
// Wait for session exit signal
<-sess.ctx.Done()
// Gracefully release session claims
return sess.release(true)
}
func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) {
select {
case <-c.closed:
return nil, ErrClosedConsumerGroup
case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
}
if refreshCoordinator {
err := c.client.RefreshCoordinator(c.groupID)
if err != nil {
return c.retryNewSession(ctx, topics, handler, retries, true)
}
}
return c.newSession(ctx, topics, handler, retries-1)
}
func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) {
coordinator, err := c.client.Coordinator(c.groupID)
if err != nil {
if retries <= 0 {
return nil, err
}
return c.retryNewSession(ctx, topics, handler, retries, true)
}
// Join consumer group
join, err := c.joinGroupRequest(coordinator, topics)
if err != nil {
_ = coordinator.Close()
return nil, err
}
switch join.Err {
case ErrNoError:
c.memberID = join.MemberId
case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
c.memberID = ""
return c.newSession(ctx, topics, handler, retries)
case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
if retries <= 0 {
return nil, join.Err
}
return c.retryNewSession(ctx, topics, handler, retries, true)
case ErrRebalanceInProgress: // retry after backoff
if retries <= 0 {
return nil, join.Err
}
return c.retryNewSession(ctx, topics, handler, retries, false)
default:
return nil, join.Err
}
// Prepare distribution plan if we joined as the leader
var plan BalanceStrategyPlan
if join.LeaderId == join.MemberId {
members, err := join.GetMembers()
if err != nil {
return nil, err
}
plan, err = c.balance(members)
if err != nil {
return nil, err
}
}
// Sync consumer group
sync, err := c.syncGroupRequest(coordinator, plan, join.GenerationId)
if err != nil {
_ = coordinator.Close()
return nil, err
}
switch sync.Err {
case ErrNoError:
case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
c.memberID = ""
return c.newSession(ctx, topics, handler, retries)
case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
if retries <= 0 {
return nil, sync.Err
}
return c.retryNewSession(ctx, topics, handler, retries, true)
case ErrRebalanceInProgress: // retry after backoff
if retries <= 0 {
return nil, sync.Err
}
return c.retryNewSession(ctx, topics, handler, retries, false)
default:
return nil, sync.Err
}
// Retrieve and sort claims
var claims map[string][]int32
if len(sync.MemberAssignment) > 0 {
members, err := sync.GetMemberAssignment()
if err != nil {
return nil, err
}
claims = members.Topics
c.userData = members.UserData
for _, partitions := range claims {
sort.Sort(int32Slice(partitions))
}
}
return newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler)
}
func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) {
req := &JoinGroupRequest{
GroupId: c.groupID,
MemberId: c.memberID,
SessionTimeout: int32(c.config.Consumer.Group.Session.Timeout / time.Millisecond),
ProtocolType: "consumer",
}
if c.config.Version.IsAtLeast(V0_10_1_0) {
req.Version = 1
req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond)
}
// use static user-data if configured, otherwise use consumer-group userdata from the last sync
userData := c.config.Consumer.Group.Member.UserData
if len(userData) == 0 {
userData = c.userData
}
meta := &ConsumerGroupMemberMetadata{
Topics: topics,
UserData: userData,
}
strategy := c.config.Consumer.Group.Rebalance.Strategy
if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil {
return nil, err
}
return coordinator.JoinGroup(req)
}
func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrategyPlan, generationID int32) (*SyncGroupResponse, error) {
req := &SyncGroupRequest{
GroupId: c.groupID,
MemberId: c.memberID,
GenerationId: generationID,
}
strategy := c.config.Consumer.Group.Rebalance.Strategy
for memberID, topics := range plan {
assignment := &ConsumerGroupMemberAssignment{Topics: topics}
userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID)
if err != nil {
return nil, err
}
assignment.UserData = userDataBytes
if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil {
return nil, err
}
}
return coordinator.SyncGroup(req)
}
func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, generationID int32) (*HeartbeatResponse, error) {
req := &HeartbeatRequest{
GroupId: c.groupID,
MemberId: memberID,
GenerationId: generationID,
}
return coordinator.Heartbeat(req)
}
func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) {
topics := make(map[string][]int32)
for _, meta := range members {
for _, topic := range meta.Topics {
topics[topic] = nil
}
}
for topic := range topics {
partitions, err := c.client.Partitions(topic)
if err != nil {
return nil, err
}
topics[topic] = partitions
}
strategy := c.config.Consumer.Group.Rebalance.Strategy
return strategy.Plan(members, topics)
}
// Leaves the cluster, called by Close.
func (c *consumerGroup) leave() error {
c.lock.Lock()
defer c.lock.Unlock()
if c.memberID == "" {
return nil
}
coordinator, err := c.client.Coordinator(c.groupID)
if err != nil {
return err
}
resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{
GroupId: c.groupID,
MemberId: c.memberID,
})
if err != nil {
_ = coordinator.Close()
return err
}
// Unset memberID
c.memberID = ""
// Check response
switch resp.Err {
case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError:
return nil
default:
return resp.Err
}
}
func (c *consumerGroup) handleError(err error, topic string, partition int32) {
if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 {
err = &ConsumerError{
Topic: topic,
Partition: partition,
Err: err,
}
}
if !c.config.Consumer.Return.Errors {
Logger.Println(err)
return
}
select {
case <-c.closed:
//consumer is closed
return
default:
}
select {
case c.errors <- err:
default:
// no error listener
}
}
func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *consumerGroupSession) {
pause := time.NewTicker(c.config.Metadata.RefreshFrequency)
defer session.cancel()
defer pause.Stop()
var oldTopicToPartitionNum map[string]int
var err error
if oldTopicToPartitionNum, err = c.topicToPartitionNumbers(topics); err != nil {
return
}
for {
if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil {
return
} else {
for topic, num := range oldTopicToPartitionNum {
if newTopicToPartitionNum[topic] != num {
return // trigger the end of the session on exit
}
}
}
select {
case <-pause.C:
case <-session.ctx.Done():
Logger.Printf("loop check partition number coroutine will exit, topics %s", topics)
// if session closed by other, should be exited
return
case <-c.closed:
return
}
}
}
func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int, error) {
topicToPartitionNum := make(map[string]int, len(topics))
for _, topic := range topics {
if partitionNum, err := c.client.Partitions(topic); err != nil {
Logger.Printf("Consumer Group topic %s get partition number failed %v", topic, err)
return nil, err
} else {
topicToPartitionNum[topic] = len(partitionNum)
}
}
return topicToPartitionNum, nil
}
// --------------------------------------------------------------------
// ConsumerGroupSession represents a consumer group member session.
type ConsumerGroupSession interface {
// Claims returns information about the claimed partitions by topic.
Claims() map[string][]int32
// MemberID returns the cluster member ID.
MemberID() string
// GenerationID returns the current generation ID.
GenerationID() int32
// MarkOffset marks the provided offset, alongside a metadata string
// that represents the state of the partition consumer at that point in time. The
// metadata string can be used by another consumer to restore that state, so it
// can resume consumption.
//
// To follow upstream conventions, you are expected to mark the offset of the
// next message to read, not the last message read. Thus, when calling `MarkOffset`
// you should typically add one to the offset of the last consumed message.
//
// Note: calling MarkOffset does not necessarily commit the offset to the backend
// store immediately for efficiency reasons, and it may never be committed if
// your application crashes. This means that you may end up processing the same
// message twice, and your processing should ideally be idempotent.
MarkOffset(topic string, partition int32, offset int64, metadata string)
// ResetOffset resets to the provided offset, alongside a metadata string that
// represents the state of the partition consumer at that point in time. Reset
// acts as a counterpart to MarkOffset, the difference being that it allows to
// reset an offset to an earlier or smaller value, where MarkOffset only
// allows incrementing the offset. cf MarkOffset for more details.
ResetOffset(topic string, partition int32, offset int64, metadata string)
// MarkMessage marks a message as consumed.
MarkMessage(msg *ConsumerMessage, metadata string)
// Context returns the session context.
Context() context.Context
}
type consumerGroupSession struct {
parent *consumerGroup
memberID string
generationID int32
handler ConsumerGroupHandler
claims map[string][]int32
offsets *offsetManager
ctx context.Context
cancel func()
waitGroup sync.WaitGroup
releaseOnce sync.Once
hbDying, hbDead chan none
}
func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) {
// init offset manager
offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client)
if err != nil {
return nil, err
}
// init context
ctx, cancel := context.WithCancel(ctx)
// init session
sess := &consumerGroupSession{
parent: parent,
memberID: memberID,
generationID: generationID,
handler: handler,
offsets: offsets,
claims: claims,
ctx: ctx,
cancel: cancel,
hbDying: make(chan none),
hbDead: make(chan none),
}
// start heartbeat loop
go sess.heartbeatLoop()
// create a POM for each claim
for topic, partitions := range claims {
for _, partition := range partitions {
pom, err := offsets.ManagePartition(topic, partition)
if err != nil {
_ = sess.release(false)
return nil, err
}
// handle POM errors
go func(topic string, partition int32) {
for err := range pom.Errors() {
sess.parent.handleError(err, topic, partition)
}
}(topic, partition)
}
}
// perform setup
if err := handler.Setup(sess); err != nil {
_ = sess.release(true)
return nil, err
}
// start consuming
for topic, partitions := range claims {
for _, partition := range partitions {
sess.waitGroup.Add(1)
go func(topic string, partition int32) {
defer sess.waitGroup.Done()
// cancel the as session as soon as the first
// goroutine exits
defer sess.cancel()
// consume a single topic/partition, blocking
sess.consume(topic, partition)
}(topic, partition)
}
}
return sess, nil
}
func (s *consumerGroupSession) Claims() map[string][]int32 { return s.claims }
func (s *consumerGroupSession) MemberID() string { return s.memberID }
func (s *consumerGroupSession) GenerationID() int32 { return s.generationID }
func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) {
if pom := s.offsets.findPOM(topic, partition); pom != nil {
pom.MarkOffset(offset, metadata)
}
}
func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) {
if pom := s.offsets.findPOM(topic, partition); pom != nil {
pom.ResetOffset(offset, metadata)
}
}
func (s *consumerGroupSession) MarkMessage(msg *ConsumerMessage, metadata string) {
s.MarkOffset(msg.Topic, msg.Partition, msg.Offset+1, metadata)
}
func (s *consumerGroupSession) Context() context.Context {
return s.ctx
}
func (s *consumerGroupSession) consume(topic string, partition int32) {
// quick exit if rebalance is due
select {
case <-s.ctx.Done():
return
case <-s.parent.closed:
return
default:
}
// get next offset
offset := s.parent.config.Consumer.Offsets.Initial
if pom := s.offsets.findPOM(topic, partition); pom != nil {
offset, _ = pom.NextOffset()
}
// create new claim
claim, err := newConsumerGroupClaim(s, topic, partition, offset)
if err != nil {
s.parent.handleError(err, topic, partition)
return
}
// handle errors
go func() {
for err := range claim.Errors() {
s.parent.handleError(err, topic, partition)
}
}()
// trigger close when session is done
go func() {
select {
case <-s.ctx.Done():
case <-s.parent.closed:
}
claim.AsyncClose()
}()
// start processing
if err := s.handler.ConsumeClaim(s, claim); err != nil {
s.parent.handleError(err, topic, partition)
}
// ensure consumer is closed & drained
claim.AsyncClose()
for _, err := range claim.waitClosed() {
s.parent.handleError(err, topic, partition)
}
}
func (s *consumerGroupSession) release(withCleanup bool) (err error) {
// signal release, stop heartbeat
s.cancel()
// wait for consumers to exit
s.waitGroup.Wait()
// perform release
s.releaseOnce.Do(func() {
if withCleanup {
if e := s.handler.Cleanup(s); e != nil {
s.parent.handleError(e, "", -1)
err = e
}
}
if e := s.offsets.Close(); e != nil {
err = e
}
close(s.hbDying)
<-s.hbDead
})
return
}
func (s *consumerGroupSession) heartbeatLoop() {
defer close(s.hbDead)
defer s.cancel() // trigger the end of the session on exit
pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval)
defer pause.Stop()
retries := s.parent.config.Metadata.Retry.Max
for {
coordinator, err := s.parent.client.Coordinator(s.parent.groupID)
if err != nil {
if retries <= 0 {
s.parent.handleError(err, "", -1)
return
}
select {
case <-s.hbDying:
return
case <-time.After(s.parent.config.Metadata.Retry.Backoff):
retries--
}
continue
}
resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID)
if err != nil {
_ = coordinator.Close()
if retries <= 0 {
s.parent.handleError(err, "", -1)
return
}
retries--
continue
}
switch resp.Err {
case ErrNoError:
retries = s.parent.config.Metadata.Retry.Max
case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration:
return
default:
s.parent.handleError(resp.Err, "", -1)
return
}
select {
case <-pause.C:
case <-s.hbDying:
return
}
}
}
// --------------------------------------------------------------------
// ConsumerGroupHandler instances are used to handle individual topic/partition claims.
// It also provides hooks for your consumer group session life-cycle and allow you to
// trigger logic before or after the consume loop(s).
//
// PLEASE NOTE that handlers are likely be called from several goroutines concurrently,
// ensure that all state is safely protected against race conditions.
type ConsumerGroupHandler interface {
// Setup is run at the beginning of a new session, before ConsumeClaim.
Setup(ConsumerGroupSession) error
// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
// but before the offsets are committed for the very last time.
Cleanup(ConsumerGroupSession) error
// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
// Once the Messages() channel is closed, the Handler must finish its processing
// loop and exit.
ConsumeClaim(ConsumerGroupSession, ConsumerGroupClaim) error
}
// ConsumerGroupClaim processes Kafka messages from a given topic and partition within a consumer group.
type ConsumerGroupClaim interface {
// Topic returns the consumed topic name.
Topic() string
// Partition returns the consumed partition.
Partition() int32
// InitialOffset returns the initial offset that was used as a starting point for this claim.
InitialOffset() int64
// HighWaterMarkOffset returns the high water mark offset of the partition,
// i.e. the offset that will be used for the next message that will be produced.
// You can use this to determine how far behind the processing is.
HighWaterMarkOffset() int64
// Messages returns the read channel for the messages that are returned by
// the broker. The messages channel will be closed when a new rebalance cycle
// is due. You must finish processing and mark offsets within
// Config.Consumer.Group.Session.Timeout before the topic/partition is eventually
// re-assigned to another group member.
Messages() <-chan *ConsumerMessage
}
type consumerGroupClaim struct {
topic string
partition int32
offset int64
PartitionConsumer
}
func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) {
pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset)
if err == ErrOffsetOutOfRange {
offset = sess.parent.config.Consumer.Offsets.Initial
pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset)
}
if err != nil {
return nil, err
}
go func() {
for err := range pcm.Errors() {
sess.parent.handleError(err, topic, partition)
}
}()
return &consumerGroupClaim{
topic: topic,
partition: partition,
offset: offset,
PartitionConsumer: pcm,
}, nil
}
func (c *consumerGroupClaim) Topic() string { return c.topic }
func (c *consumerGroupClaim) Partition() int32 { return c.partition }
func (c *consumerGroupClaim) InitialOffset() int64 { return c.offset }
// Drains messages and errors, ensures the claim is fully closed.
func (c *consumerGroupClaim) waitClosed() (errs ConsumerErrors) {
go func() {
for range c.Messages() {
}
}()
for err := range c.Errors() {
errs = append(errs, err)
}
return
}

View File

@ -1,5 +1,6 @@
package sarama
//ConsumerGroupMemberMetadata holds the metadata for consumer group
type ConsumerGroupMemberMetadata struct {
Version int16
Topics []string
@ -36,6 +37,7 @@ func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
return nil
}
//ConsumerGroupMemberAssignment holds the member assignment for a consume group
type ConsumerGroupMemberAssignment struct {
Version int16
Topics map[string][]int32

View File

@ -1,16 +1,24 @@
package sarama
//ConsumerMetadataRequest is used for metadata requests
type ConsumerMetadataRequest struct {
ConsumerGroup string
}
func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
return pe.putString(r.ConsumerGroup)
tmp := new(FindCoordinatorRequest)
tmp.CoordinatorKey = r.ConsumerGroup
tmp.CoordinatorType = CoordinatorGroup
return tmp.encode(pe)
}
func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
r.ConsumerGroup, err = pd.getString()
return err
tmp := new(FindCoordinatorRequest)
if err := tmp.decode(pd, version); err != nil {
return err
}
r.ConsumerGroup = tmp.CoordinatorKey
return nil
}
func (r *ConsumerMetadataRequest) key() int16 {
@ -21,6 +29,10 @@ func (r *ConsumerMetadataRequest) version() int16 {
return 0
}
func (r *ConsumerMetadataRequest) headerVersion() int16 {
return 1
}
func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
return V0_8_2_0
}

View File

@ -5,6 +5,7 @@ import (
"strconv"
)
//ConsumerMetadataResponse holds the response for a consumer group meta data requests
type ConsumerMetadataResponse struct {
Err KError
Coordinator *Broker
@ -14,20 +15,18 @@ type ConsumerMetadataResponse struct {
}
func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(tmp)
tmp := new(FindCoordinatorResponse)
coordinator := new(Broker)
if err := coordinator.decode(pd); err != nil {
if err := tmp.decode(pd, version); err != nil {
return err
}
if coordinator.addr == ":0" {
r.Err = tmp.Err
r.Coordinator = tmp.Coordinator
if tmp.Coordinator == nil {
return nil
}
r.Coordinator = coordinator
// this can all go away in 2.0, but we have to fill in deprecated fields to maintain
// backwards compatibility
@ -47,28 +46,22 @@ func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err
}
func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
if r.Coordinator != nil {
host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
if err != nil {
return err
}
port, err := strconv.ParseInt(portstr, 10, 32)
if err != nil {
return err
}
pe.putInt32(r.Coordinator.ID())
if err := pe.putString(host); err != nil {
return err
}
pe.putInt32(int32(port))
return nil
if r.Coordinator == nil {
r.Coordinator = new(Broker)
r.Coordinator.id = r.CoordinatorID
r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort)))
}
pe.putInt32(r.CoordinatorID)
if err := pe.putString(r.CoordinatorHost); err != nil {
tmp := &FindCoordinatorResponse{
Version: 0,
Err: r.Err,
Coordinator: r.Coordinator,
}
if err := tmp.encode(pe); err != nil {
return err
}
pe.putInt32(r.CoordinatorPort)
return nil
}
@ -80,6 +73,10 @@ func (r *ConsumerMetadataResponse) version() int16 {
return 0
}
func (r *ConsumerMetadataResponse) headerVersion() int16 {
return 0
}
func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
return V0_8_2_0
}

72
vendor/github.com/Shopify/sarama/control_record.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
package sarama
//ControlRecordType ...
type ControlRecordType int
const (
//ControlRecordAbort is a control record for abort
ControlRecordAbort ControlRecordType = iota
//ControlRecordCommit is a control record for commit
ControlRecordCommit
//ControlRecordUnknown is a control record of unknown type
ControlRecordUnknown
)
// Control records are returned as a record by fetchRequest
// However unlike "normal" records, they mean nothing application wise.
// They only serve internal logic for supporting transactions.
type ControlRecord struct {
Version int16
CoordinatorEpoch int32
Type ControlRecordType
}
func (cr *ControlRecord) decode(key, value packetDecoder) error {
var err error
cr.Version, err = value.getInt16()
if err != nil {
return err
}
cr.CoordinatorEpoch, err = value.getInt32()
if err != nil {
return err
}
// There a version for the value part AND the key part. And I have no idea if they are supposed to match or not
// Either way, all these version can only be 0 for now
cr.Version, err = key.getInt16()
if err != nil {
return err
}
recordType, err := key.getInt16()
if err != nil {
return err
}
switch recordType {
case 0:
cr.Type = ControlRecordAbort
case 1:
cr.Type = ControlRecordCommit
default:
// from JAVA implementation:
// UNKNOWN is used to indicate a control type which the client is not aware of and should be ignored
cr.Type = ControlRecordUnknown
}
return nil
}
func (cr *ControlRecord) encode(key, value packetEncoder) {
value.putInt16(cr.Version)
value.putInt32(cr.CoordinatorEpoch)
key.putInt16(cr.Version)
switch cr.Type {
case ControlRecordAbort:
key.putInt16(0)
case ControlRecordCommit:
key.putInt16(1)
}
}

View File

@ -4,11 +4,38 @@ import (
"encoding/binary"
"fmt"
"hash/crc32"
"sync"
)
type crcPolynomial int8
const (
crcIEEE crcPolynomial = iota
crcCastagnoli
)
var crc32FieldPool = sync.Pool{}
func acquireCrc32Field(polynomial crcPolynomial) *crc32Field {
val := crc32FieldPool.Get()
if val != nil {
c := val.(*crc32Field)
c.polynomial = polynomial
return c
}
return newCRC32Field(polynomial)
}
func releaseCrc32Field(c *crc32Field) {
crc32FieldPool.Put(c)
}
var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
type crc32Field struct {
startOffset int
polynomial crcPolynomial
}
func (c *crc32Field) saveOffset(in int) {
@ -19,14 +46,24 @@ func (c *crc32Field) reserveLength() int {
return 4
}
func newCRC32Field(polynomial crcPolynomial) *crc32Field {
return &crc32Field{polynomial: polynomial}
}
func (c *crc32Field) run(curOffset int, buf []byte) error {
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
crc, err := c.crc(curOffset, buf)
if err != nil {
return err
}
binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
return nil
}
func (c *crc32Field) check(curOffset int, buf []byte) error {
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
crc, err := c.crc(curOffset, buf)
if err != nil {
return err
}
expected := binary.BigEndian.Uint32(buf[c.startOffset:])
if crc != expected {
@ -35,3 +72,15 @@ func (c *crc32Field) check(curOffset int, buf []byte) error {
return nil
}
func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) {
var tab *crc32.Table
switch c.polynomial {
case crcIEEE:
tab = crc32.IEEETable
case crcCastagnoli:
tab = castagnoliTable
default:
return 0, PacketDecodingError{"invalid CRC type"}
}
return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil
}

View File

@ -0,0 +1,125 @@
package sarama
import "time"
type CreatePartitionsRequest struct {
TopicPartitions map[string]*TopicPartition
Timeout time.Duration
ValidateOnly bool
}
func (c *CreatePartitionsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil {
return err
}
for topic, partition := range c.TopicPartitions {
if err := pe.putString(topic); err != nil {
return err
}
if err := partition.encode(pe); err != nil {
return err
}
}
pe.putInt32(int32(c.Timeout / time.Millisecond))
pe.putBool(c.ValidateOnly)
return nil
}
func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicPartitions = make(map[string]*TopicPartition, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicPartitions[topic] = new(TopicPartition)
if err := c.TopicPartitions[topic].decode(pd, version); err != nil {
return err
}
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
c.Timeout = time.Duration(timeout) * time.Millisecond
if c.ValidateOnly, err = pd.getBool(); err != nil {
return err
}
return nil
}
func (r *CreatePartitionsRequest) key() int16 {
return 37
}
func (r *CreatePartitionsRequest) version() int16 {
return 0
}
func (r *CreatePartitionsRequest) headerVersion() int16 {
return 1
}
func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
return V1_0_0_0
}
type TopicPartition struct {
Count int32
Assignment [][]int32
}
func (t *TopicPartition) encode(pe packetEncoder) error {
pe.putInt32(t.Count)
if len(t.Assignment) == 0 {
pe.putInt32(-1)
return nil
}
if err := pe.putArrayLength(len(t.Assignment)); err != nil {
return err
}
for _, assign := range t.Assignment {
if err := pe.putInt32Array(assign); err != nil {
return err
}
}
return nil
}
func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) {
if t.Count, err = pd.getInt32(); err != nil {
return err
}
n, err := pd.getInt32()
if err != nil {
return err
}
if n <= 0 {
return nil
}
t.Assignment = make([][]int32, n)
for i := 0; i < int(n); i++ {
if t.Assignment[i], err = pd.getInt32Array(); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,109 @@
package sarama
import (
"fmt"
"time"
)
type CreatePartitionsResponse struct {
ThrottleTime time.Duration
TopicPartitionErrors map[string]*TopicPartitionError
}
func (c *CreatePartitionsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil {
return err
}
for topic, partitionError := range c.TopicPartitionErrors {
if err := pe.putString(topic); err != nil {
return err
}
if err := partitionError.encode(pe); err != nil {
return err
}
}
return nil
}
func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicPartitionErrors[topic] = new(TopicPartitionError)
if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (r *CreatePartitionsResponse) key() int16 {
return 37
}
func (r *CreatePartitionsResponse) version() int16 {
return 0
}
func (r *CreatePartitionsResponse) headerVersion() int16 {
return 0
}
func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
return V1_0_0_0
}
type TopicPartitionError struct {
Err KError
ErrMsg *string
}
func (t *TopicPartitionError) Error() string {
text := t.Err.Error()
if t.ErrMsg != nil {
text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
}
return text
}
func (t *TopicPartitionError) encode(pe packetEncoder) error {
pe.putInt16(int16(t.Err))
if err := pe.putNullableString(t.ErrMsg); err != nil {
return err
}
return nil
}
func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
t.Err = KError(kerr)
if t.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,178 @@
package sarama
import (
"time"
)
type CreateTopicsRequest struct {
Version int16
TopicDetails map[string]*TopicDetail
Timeout time.Duration
ValidateOnly bool
}
func (c *CreateTopicsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(c.TopicDetails)); err != nil {
return err
}
for topic, detail := range c.TopicDetails {
if err := pe.putString(topic); err != nil {
return err
}
if err := detail.encode(pe); err != nil {
return err
}
}
pe.putInt32(int32(c.Timeout / time.Millisecond))
if c.Version >= 1 {
pe.putBool(c.ValidateOnly)
}
return nil
}
func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicDetails = make(map[string]*TopicDetail, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicDetails[topic] = new(TopicDetail)
if err = c.TopicDetails[topic].decode(pd, version); err != nil {
return err
}
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
c.Timeout = time.Duration(timeout) * time.Millisecond
if version >= 1 {
c.ValidateOnly, err = pd.getBool()
if err != nil {
return err
}
c.Version = version
}
return nil
}
func (c *CreateTopicsRequest) key() int16 {
return 19
}
func (c *CreateTopicsRequest) version() int16 {
return c.Version
}
func (r *CreateTopicsRequest) headerVersion() int16 {
return 1
}
func (c *CreateTopicsRequest) requiredVersion() KafkaVersion {
switch c.Version {
case 2:
return V1_0_0_0
case 1:
return V0_11_0_0
default:
return V0_10_1_0
}
}
type TopicDetail struct {
NumPartitions int32
ReplicationFactor int16
ReplicaAssignment map[int32][]int32
ConfigEntries map[string]*string
}
func (t *TopicDetail) encode(pe packetEncoder) error {
pe.putInt32(t.NumPartitions)
pe.putInt16(t.ReplicationFactor)
if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil {
return err
}
for partition, assignment := range t.ReplicaAssignment {
pe.putInt32(partition)
if err := pe.putInt32Array(assignment); err != nil {
return err
}
}
if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil {
return err
}
for configKey, configValue := range t.ConfigEntries {
if err := pe.putString(configKey); err != nil {
return err
}
if err := pe.putNullableString(configValue); err != nil {
return err
}
}
return nil
}
func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) {
if t.NumPartitions, err = pd.getInt32(); err != nil {
return err
}
if t.ReplicationFactor, err = pd.getInt16(); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
t.ReplicaAssignment = make(map[int32][]int32, n)
for i := 0; i < n; i++ {
replica, err := pd.getInt32()
if err != nil {
return err
}
if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil {
return err
}
}
}
n, err = pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
t.ConfigEntries = make(map[string]*string, n)
for i := 0; i < n; i++ {
configKey, err := pd.getString()
if err != nil {
return err
}
if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
return err
}
}
}
return nil
}

View File

@ -0,0 +1,127 @@
package sarama
import (
"fmt"
"time"
)
type CreateTopicsResponse struct {
Version int16
ThrottleTime time.Duration
TopicErrors map[string]*TopicError
}
func (c *CreateTopicsResponse) encode(pe packetEncoder) error {
if c.Version >= 2 {
pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
}
if err := pe.putArrayLength(len(c.TopicErrors)); err != nil {
return err
}
for topic, topicError := range c.TopicErrors {
if err := pe.putString(topic); err != nil {
return err
}
if err := topicError.encode(pe, c.Version); err != nil {
return err
}
}
return nil
}
func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
c.Version = version
if version >= 2 {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicErrors = make(map[string]*TopicError, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicErrors[topic] = new(TopicError)
if err := c.TopicErrors[topic].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (c *CreateTopicsResponse) key() int16 {
return 19
}
func (c *CreateTopicsResponse) version() int16 {
return c.Version
}
func (c *CreateTopicsResponse) headerVersion() int16 {
return 0
}
func (c *CreateTopicsResponse) requiredVersion() KafkaVersion {
switch c.Version {
case 2:
return V1_0_0_0
case 1:
return V0_11_0_0
default:
return V0_10_1_0
}
}
type TopicError struct {
Err KError
ErrMsg *string
}
func (t *TopicError) Error() string {
text := t.Err.Error()
if t.ErrMsg != nil {
text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
}
return text
}
func (t *TopicError) encode(pe packetEncoder, version int16) error {
pe.putInt16(int16(t.Err))
if version >= 1 {
if err := pe.putNullableString(t.ErrMsg); err != nil {
return err
}
}
return nil
}
func (t *TopicError) decode(pd packetDecoder, version int16) (err error) {
kErr, err := pd.getInt16()
if err != nil {
return err
}
t.Err = KError(kErr)
if version >= 1 {
if t.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
}
return nil
}

63
vendor/github.com/Shopify/sarama/decompress.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
package sarama
import (
"bytes"
"compress/gzip"
"fmt"
"io/ioutil"
"sync"
snappy "github.com/eapache/go-xerial-snappy"
"github.com/pierrec/lz4"
)
var (
lz4ReaderPool = sync.Pool{
New: func() interface{} {
return lz4.NewReader(nil)
},
}
gzipReaderPool sync.Pool
)
func decompress(cc CompressionCodec, data []byte) ([]byte, error) {
switch cc {
case CompressionNone:
return data, nil
case CompressionGZIP:
var (
err error
reader *gzip.Reader
readerIntf = gzipReaderPool.Get()
)
if readerIntf != nil {
reader = readerIntf.(*gzip.Reader)
} else {
reader, err = gzip.NewReader(bytes.NewReader(data))
if err != nil {
return nil, err
}
}
defer gzipReaderPool.Put(reader)
if err := reader.Reset(bytes.NewReader(data)); err != nil {
return nil, err
}
return ioutil.ReadAll(reader)
case CompressionSnappy:
return snappy.Decode(data)
case CompressionLZ4:
reader := lz4ReaderPool.Get().(*lz4.Reader)
defer lz4ReaderPool.Put(reader)
reader.Reset(bytes.NewReader(data))
return ioutil.ReadAll(reader)
case CompressionZSTD:
return zstdDecompress(nil, data)
default:
return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)}
}
}

View File

@ -0,0 +1,34 @@
package sarama
type DeleteGroupsRequest struct {
Groups []string
}
func (r *DeleteGroupsRequest) encode(pe packetEncoder) error {
return pe.putStringArray(r.Groups)
}
func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
r.Groups, err = pd.getStringArray()
return
}
func (r *DeleteGroupsRequest) key() int16 {
return 42
}
func (r *DeleteGroupsRequest) version() int16 {
return 0
}
func (r *DeleteGroupsRequest) headerVersion() int16 {
return 1
}
func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion {
return V1_1_0_0
}
func (r *DeleteGroupsRequest) AddGroup(group string) {
r.Groups = append(r.Groups, group)
}

View File

@ -0,0 +1,74 @@
package sarama
import (
"time"
)
type DeleteGroupsResponse struct {
ThrottleTime time.Duration
GroupErrorCodes map[string]KError
}
func (r *DeleteGroupsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil {
return err
}
for groupID, errorCode := range r.GroupErrorCodes {
if err := pe.putString(groupID); err != nil {
return err
}
pe.putInt16(int16(errorCode))
}
return nil
}
func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) error {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.GroupErrorCodes = make(map[string]KError, n)
for i := 0; i < n; i++ {
groupID, err := pd.getString()
if err != nil {
return err
}
errorCode, err := pd.getInt16()
if err != nil {
return err
}
r.GroupErrorCodes[groupID] = KError(errorCode)
}
return nil
}
func (r *DeleteGroupsResponse) key() int16 {
return 42
}
func (r *DeleteGroupsResponse) version() int16 {
return 0
}
func (r *DeleteGroupsResponse) headerVersion() int16 {
return 0
}
func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion {
return V1_1_0_0
}

View File

@ -0,0 +1,130 @@
package sarama
import (
"sort"
"time"
)
// request message format is:
// [topic] timeout(int32)
// where topic is:
// name(string) [partition]
// where partition is:
// id(int32) offset(int64)
type DeleteRecordsRequest struct {
Topics map[string]*DeleteRecordsRequestTopic
Timeout time.Duration
}
func (d *DeleteRecordsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(d.Topics)); err != nil {
return err
}
keys := make([]string, 0, len(d.Topics))
for topic := range d.Topics {
keys = append(keys, topic)
}
sort.Strings(keys)
for _, topic := range keys {
if err := pe.putString(topic); err != nil {
return err
}
if err := d.Topics[topic].encode(pe); err != nil {
return err
}
}
pe.putInt32(int32(d.Timeout / time.Millisecond))
return nil
}
func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error {
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
d.Topics = make(map[string]*DeleteRecordsRequestTopic, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
details := new(DeleteRecordsRequestTopic)
if err = details.decode(pd, version); err != nil {
return err
}
d.Topics[topic] = details
}
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
d.Timeout = time.Duration(timeout) * time.Millisecond
return nil
}
func (d *DeleteRecordsRequest) key() int16 {
return 21
}
func (d *DeleteRecordsRequest) version() int16 {
return 0
}
func (d *DeleteRecordsRequest) headerVersion() int16 {
return 1
}
func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type DeleteRecordsRequestTopic struct {
PartitionOffsets map[int32]int64 // partition => offset
}
func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil {
return err
}
keys := make([]int32, 0, len(t.PartitionOffsets))
for partition := range t.PartitionOffsets {
keys = append(keys, partition)
}
sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
for _, partition := range keys {
pe.putInt32(partition)
pe.putInt64(t.PartitionOffsets[partition])
}
return nil
}
func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error {
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
t.PartitionOffsets = make(map[int32]int64, n)
for i := 0; i < n; i++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
offset, err := pd.getInt64()
if err != nil {
return err
}
t.PartitionOffsets[partition] = offset
}
}
return nil
}

View File

@ -0,0 +1,162 @@
package sarama
import (
"sort"
"time"
)
// response message format is:
// throttleMs(int32) [topic]
// where topic is:
// name(string) [partition]
// where partition is:
// id(int32) low_watermark(int64) error_code(int16)
type DeleteRecordsResponse struct {
Version int16
ThrottleTime time.Duration
Topics map[string]*DeleteRecordsResponseTopic
}
func (d *DeleteRecordsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(d.Topics)); err != nil {
return err
}
keys := make([]string, 0, len(d.Topics))
for topic := range d.Topics {
keys = append(keys, topic)
}
sort.Strings(keys)
for _, topic := range keys {
if err := pe.putString(topic); err != nil {
return err
}
if err := d.Topics[topic].encode(pe); err != nil {
return err
}
}
return nil
}
func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) error {
d.Version = version
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
d.Topics = make(map[string]*DeleteRecordsResponseTopic, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
details := new(DeleteRecordsResponseTopic)
if err = details.decode(pd, version); err != nil {
return err
}
d.Topics[topic] = details
}
}
return nil
}
func (d *DeleteRecordsResponse) key() int16 {
return 21
}
func (d *DeleteRecordsResponse) version() int16 {
return 0
}
func (d *DeleteRecordsResponse) headerVersion() int16 {
return 0
}
func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type DeleteRecordsResponseTopic struct {
Partitions map[int32]*DeleteRecordsResponsePartition
}
func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(t.Partitions)); err != nil {
return err
}
keys := make([]int32, 0, len(t.Partitions))
for partition := range t.Partitions {
keys = append(keys, partition)
}
sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
for _, partition := range keys {
pe.putInt32(partition)
if err := t.Partitions[partition].encode(pe); err != nil {
return err
}
}
return nil
}
func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error {
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n)
for i := 0; i < n; i++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
details := new(DeleteRecordsResponsePartition)
if err = details.decode(pd, version); err != nil {
return err
}
t.Partitions[partition] = details
}
}
return nil
}
type DeleteRecordsResponsePartition struct {
LowWatermark int64
Err KError
}
func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error {
pe.putInt64(t.LowWatermark)
pe.putInt16(int16(t.Err))
return nil
}
func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error {
lowWatermark, err := pd.getInt64()
if err != nil {
return err
}
t.LowWatermark = lowWatermark
kErr, err := pd.getInt16()
if err != nil {
return err
}
t.Err = KError(kErr)
return nil
}

View File

@ -0,0 +1,52 @@
package sarama
import "time"
type DeleteTopicsRequest struct {
Version int16
Topics []string
Timeout time.Duration
}
func (d *DeleteTopicsRequest) encode(pe packetEncoder) error {
if err := pe.putStringArray(d.Topics); err != nil {
return err
}
pe.putInt32(int32(d.Timeout / time.Millisecond))
return nil
}
func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
if d.Topics, err = pd.getStringArray(); err != nil {
return err
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
d.Timeout = time.Duration(timeout) * time.Millisecond
d.Version = version
return nil
}
func (d *DeleteTopicsRequest) key() int16 {
return 20
}
func (d *DeleteTopicsRequest) version() int16 {
return d.Version
}
func (d *DeleteTopicsRequest) headerVersion() int16 {
return 1
}
func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
switch d.Version {
case 1:
return V0_11_0_0
default:
return V0_10_1_0
}
}

View File

@ -0,0 +1,82 @@
package sarama
import "time"
type DeleteTopicsResponse struct {
Version int16
ThrottleTime time.Duration
TopicErrorCodes map[string]KError
}
func (d *DeleteTopicsResponse) encode(pe packetEncoder) error {
if d.Version >= 1 {
pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
}
if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil {
return err
}
for topic, errorCode := range d.TopicErrorCodes {
if err := pe.putString(topic); err != nil {
return err
}
pe.putInt16(int16(errorCode))
}
return nil
}
func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
if version >= 1 {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
d.Version = version
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
d.TopicErrorCodes = make(map[string]KError, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
errorCode, err := pd.getInt16()
if err != nil {
return err
}
d.TopicErrorCodes[topic] = KError(errorCode)
}
return nil
}
func (d *DeleteTopicsResponse) key() int16 {
return 20
}
func (d *DeleteTopicsResponse) version() int16 {
return d.Version
}
func (d *DeleteTopicsResponse) headerVersion() int16 {
return 0
}
func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion {
switch d.Version {
case 1:
return V0_11_0_0
default:
return V0_10_1_0
}
}

View File

@ -0,0 +1,116 @@
package sarama
type DescribeConfigsRequest struct {
Version int16
Resources []*ConfigResource
IncludeSynonyms bool
}
type ConfigResource struct {
Type ConfigResourceType
Name string
ConfigNames []string
}
func (r *DescribeConfigsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(r.Resources)); err != nil {
return err
}
for _, c := range r.Resources {
pe.putInt8(int8(c.Type))
if err := pe.putString(c.Name); err != nil {
return err
}
if len(c.ConfigNames) == 0 {
pe.putInt32(-1)
continue
}
if err := pe.putStringArray(c.ConfigNames); err != nil {
return err
}
}
if r.Version >= 1 {
pe.putBool(r.IncludeSynonyms)
}
return nil
}
func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Resources = make([]*ConfigResource, n)
for i := 0; i < n; i++ {
r.Resources[i] = &ConfigResource{}
t, err := pd.getInt8()
if err != nil {
return err
}
r.Resources[i].Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
r.Resources[i].Name = name
confLength, err := pd.getArrayLength()
if err != nil {
return err
}
if confLength == -1 {
continue
}
cfnames := make([]string, confLength)
for i := 0; i < confLength; i++ {
s, err := pd.getString()
if err != nil {
return err
}
cfnames[i] = s
}
r.Resources[i].ConfigNames = cfnames
}
r.Version = version
if r.Version >= 1 {
b, err := pd.getBool()
if err != nil {
return err
}
r.IncludeSynonyms = b
}
return nil
}
func (r *DescribeConfigsRequest) key() int16 {
return 32
}
func (r *DescribeConfigsRequest) version() int16 {
return r.Version
}
func (r *DescribeConfigsRequest) headerVersion() int16 {
return 1
}
func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V1_1_0_0
case 2:
return V2_0_0_0
default:
return V0_11_0_0
}
}

View File

@ -0,0 +1,327 @@
package sarama
import (
"fmt"
"time"
)
type ConfigSource int8
func (s ConfigSource) String() string {
switch s {
case SourceUnknown:
return "Unknown"
case SourceTopic:
return "Topic"
case SourceDynamicBroker:
return "DynamicBroker"
case SourceDynamicDefaultBroker:
return "DynamicDefaultBroker"
case SourceStaticBroker:
return "StaticBroker"
case SourceDefault:
return "Default"
}
return fmt.Sprintf("Source Invalid: %d", int(s))
}
const (
SourceUnknown ConfigSource = iota
SourceTopic
SourceDynamicBroker
SourceDynamicDefaultBroker
SourceStaticBroker
SourceDefault
)
type DescribeConfigsResponse struct {
Version int16
ThrottleTime time.Duration
Resources []*ResourceResponse
}
type ResourceResponse struct {
ErrorCode int16
ErrorMsg string
Type ConfigResourceType
Name string
Configs []*ConfigEntry
}
type ConfigEntry struct {
Name string
Value string
ReadOnly bool
Default bool
Source ConfigSource
Sensitive bool
Synonyms []*ConfigSynonym
}
type ConfigSynonym struct {
ConfigName string
ConfigValue string
Source ConfigSource
}
func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
if err = pe.putArrayLength(len(r.Resources)); err != nil {
return err
}
for _, c := range r.Resources {
if err = c.encode(pe, r.Version); err != nil {
return err
}
}
return nil
}
func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Resources = make([]*ResourceResponse, n)
for i := 0; i < n; i++ {
rr := &ResourceResponse{}
if err := rr.decode(pd, version); err != nil {
return err
}
r.Resources[i] = rr
}
return nil
}
func (r *DescribeConfigsResponse) key() int16 {
return 32
}
func (r *DescribeConfigsResponse) version() int16 {
return r.Version
}
func (r *DescribeConfigsResponse) headerVersion() int16 {
return 0
}
func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V1_0_0_0
case 2:
return V2_0_0_0
default:
return V0_11_0_0
}
}
func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) {
pe.putInt16(r.ErrorCode)
if err = pe.putString(r.ErrorMsg); err != nil {
return err
}
pe.putInt8(int8(r.Type))
if err = pe.putString(r.Name); err != nil {
return err
}
if err = pe.putArrayLength(len(r.Configs)); err != nil {
return err
}
for _, c := range r.Configs {
if err = c.encode(pe, version); err != nil {
return err
}
}
return nil
}
func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) {
ec, err := pd.getInt16()
if err != nil {
return err
}
r.ErrorCode = ec
em, err := pd.getString()
if err != nil {
return err
}
r.ErrorMsg = em
t, err := pd.getInt8()
if err != nil {
return err
}
r.Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
r.Name = name
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Configs = make([]*ConfigEntry, n)
for i := 0; i < n; i++ {
c := &ConfigEntry{}
if err := c.decode(pd, version); err != nil {
return err
}
r.Configs[i] = c
}
return nil
}
func (r *ConfigEntry) encode(pe packetEncoder, version int16) (err error) {
if err = pe.putString(r.Name); err != nil {
return err
}
if err = pe.putString(r.Value); err != nil {
return err
}
pe.putBool(r.ReadOnly)
if version <= 0 {
pe.putBool(r.Default)
pe.putBool(r.Sensitive)
} else {
pe.putInt8(int8(r.Source))
pe.putBool(r.Sensitive)
if err := pe.putArrayLength(len(r.Synonyms)); err != nil {
return err
}
for _, c := range r.Synonyms {
if err = c.encode(pe, version); err != nil {
return err
}
}
}
return nil
}
//https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration
func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
if version == 0 {
r.Source = SourceUnknown
}
name, err := pd.getString()
if err != nil {
return err
}
r.Name = name
value, err := pd.getString()
if err != nil {
return err
}
r.Value = value
read, err := pd.getBool()
if err != nil {
return err
}
r.ReadOnly = read
if version == 0 {
defaultB, err := pd.getBool()
if err != nil {
return err
}
r.Default = defaultB
if defaultB {
r.Source = SourceDefault
}
} else {
source, err := pd.getInt8()
if err != nil {
return err
}
r.Source = ConfigSource(source)
r.Default = r.Source == SourceDefault
}
sensitive, err := pd.getBool()
if err != nil {
return err
}
r.Sensitive = sensitive
if version > 0 {
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Synonyms = make([]*ConfigSynonym, n)
for i := 0; i < n; i++ {
s := &ConfigSynonym{}
if err := s.decode(pd, version); err != nil {
return err
}
r.Synonyms[i] = s
}
}
return nil
}
func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) {
err = pe.putString(c.ConfigName)
if err != nil {
return err
}
err = pe.putString(c.ConfigValue)
if err != nil {
return err
}
pe.putInt8(int8(c.Source))
return nil
}
func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error {
name, err := pd.getString()
if err != nil {
return nil
}
c.ConfigName = name
value, err := pd.getString()
if err != nil {
return nil
}
c.ConfigValue = value
source, err := pd.getInt8()
if err != nil {
return nil
}
c.Source = ConfigSource(source)
return nil
}

View File

@ -21,6 +21,10 @@ func (r *DescribeGroupsRequest) version() int16 {
return 0
}
func (r *DescribeGroupsRequest) headerVersion() int16 {
return 1
}
func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -43,6 +43,10 @@ func (r *DescribeGroupsResponse) version() int16 {
return 0
}
func (r *DescribeGroupsResponse) headerVersion() int16 {
return 0
}
func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -0,0 +1,87 @@
package sarama
// DescribeLogDirsRequest is a describe request to get partitions' log size
type DescribeLogDirsRequest struct {
// Version 0 and 1 are equal
// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
Version int16
// If this is an empty array, all topics will be queried
DescribeTopics []DescribeLogDirsRequestTopic
}
// DescribeLogDirsRequestTopic is a describe request about the log dir of one or more partitions within a Topic
type DescribeLogDirsRequestTopic struct {
Topic string
PartitionIDs []int32
}
func (r *DescribeLogDirsRequest) encode(pe packetEncoder) error {
length := len(r.DescribeTopics)
if length == 0 {
// In order to query all topics we must send null
length = -1
}
if err := pe.putArrayLength(length); err != nil {
return err
}
for _, d := range r.DescribeTopics {
if err := pe.putString(d.Topic); err != nil {
return err
}
if err := pe.putInt32Array(d.PartitionIDs); err != nil {
return err
}
}
return nil
}
func (r *DescribeLogDirsRequest) decode(pd packetDecoder, version int16) error {
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == -1 {
n = 0
}
topics := make([]DescribeLogDirsRequestTopic, n)
for i := 0; i < n; i++ {
topics[i] = DescribeLogDirsRequestTopic{}
topic, err := pd.getString()
if err != nil {
return err
}
topics[i].Topic = topic
pIDs, err := pd.getInt32Array()
if err != nil {
return err
}
topics[i].PartitionIDs = pIDs
}
r.DescribeTopics = topics
return nil
}
func (r *DescribeLogDirsRequest) key() int16 {
return 35
}
func (r *DescribeLogDirsRequest) version() int16 {
return r.Version
}
func (r *DescribeLogDirsRequest) headerVersion() int16 {
return 1
}
func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion {
return V1_0_0_0
}

View File

@ -0,0 +1,229 @@
package sarama
import "time"
type DescribeLogDirsResponse struct {
ThrottleTime time.Duration
// Version 0 and 1 are equal
// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
Version int16
LogDirs []DescribeLogDirsResponseDirMetadata
}
func (r *DescribeLogDirsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(r.LogDirs)); err != nil {
return err
}
for _, dir := range r.LogDirs {
if err := dir.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *DescribeLogDirsResponse) decode(pd packetDecoder, version int16) error {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
// Decode array of DescribeLogDirsResponseDirMetadata
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.LogDirs = make([]DescribeLogDirsResponseDirMetadata, n)
for i := 0; i < n; i++ {
dir := DescribeLogDirsResponseDirMetadata{}
if err := dir.decode(pd, version); err != nil {
return err
}
r.LogDirs[i] = dir
}
return nil
}
func (r *DescribeLogDirsResponse) key() int16 {
return 35
}
func (r *DescribeLogDirsResponse) version() int16 {
return r.Version
}
func (r *DescribeLogDirsResponse) headerVersion() int16 {
return 0
}
func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion {
return V1_0_0_0
}
type DescribeLogDirsResponseDirMetadata struct {
ErrorCode KError
// The absolute log directory path
Path string
Topics []DescribeLogDirsResponseTopic
}
func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder) error {
pe.putInt16(int16(r.ErrorCode))
if err := pe.putString(r.Path); err != nil {
return err
}
if err := pe.putArrayLength(len(r.Topics)); err != nil {
return err
}
for _, topic := range r.Topics {
if err := topic.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *DescribeLogDirsResponseDirMetadata) decode(pd packetDecoder, version int16) error {
errCode, err := pd.getInt16()
if err != nil {
return err
}
r.ErrorCode = KError(errCode)
path, err := pd.getString()
if err != nil {
return err
}
r.Path = path
// Decode array of DescribeLogDirsResponseTopic
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Topics = make([]DescribeLogDirsResponseTopic, n)
for i := 0; i < n; i++ {
t := DescribeLogDirsResponseTopic{}
if err := t.decode(pd, version); err != nil {
return err
}
r.Topics[i] = t
}
return nil
}
// DescribeLogDirsResponseTopic contains a topic's partitions descriptions
type DescribeLogDirsResponseTopic struct {
Topic string
Partitions []DescribeLogDirsResponsePartition
}
func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder) error {
if err := pe.putString(r.Topic); err != nil {
return err
}
if err := pe.putArrayLength(len(r.Partitions)); err != nil {
return err
}
for _, partition := range r.Partitions {
if err := partition.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *DescribeLogDirsResponseTopic) decode(pd packetDecoder, version int16) error {
t, err := pd.getString()
if err != nil {
return err
}
r.Topic = t
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Partitions = make([]DescribeLogDirsResponsePartition, n)
for i := 0; i < n; i++ {
p := DescribeLogDirsResponsePartition{}
if err := p.decode(pd, version); err != nil {
return err
}
r.Partitions[i] = p
}
return nil
}
// DescribeLogDirsResponsePartition describes a partition's log directory
type DescribeLogDirsResponsePartition struct {
PartitionID int32
// The size of the log segments of the partition in bytes.
Size int64
// The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or
// current replica's LEO (if it is the future log for the partition)
OffsetLag int64
// True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of
// the replica in the future.
IsTemporary bool
}
func (r *DescribeLogDirsResponsePartition) encode(pe packetEncoder) error {
pe.putInt32(r.PartitionID)
pe.putInt64(r.Size)
pe.putInt64(r.OffsetLag)
pe.putBool(r.IsTemporary)
return nil
}
func (r *DescribeLogDirsResponsePartition) decode(pd packetDecoder, version int16) error {
pID, err := pd.getInt32()
if err != nil {
return err
}
r.PartitionID = pID
size, err := pd.getInt64()
if err != nil {
return err
}
r.Size = size
lag, err := pd.getInt64()
if err != nil {
return err
}
r.OffsetLag = lag
isTemp, err := pd.getBool()
if err != nil {
return err
}
r.IsTemporary = isTemp
return nil
}

View File

@ -2,13 +2,9 @@ name: sarama
up:
- go:
version: '1.8'
version: '1.14'
commands:
test:
run: make test
desc: 'run unit tests'
packages:
- git@github.com:Shopify/dev-shopify.git

View File

@ -12,6 +12,11 @@ type encoder interface {
encode(pe packetEncoder) error
}
type encoderWithHeader interface {
encoder
headerVersion() int16
}
// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
if e == nil {

54
vendor/github.com/Shopify/sarama/end_txn_request.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
package sarama
type EndTxnRequest struct {
TransactionalID string
ProducerID int64
ProducerEpoch int16
TransactionResult bool
}
func (a *EndTxnRequest) encode(pe packetEncoder) error {
if err := pe.putString(a.TransactionalID); err != nil {
return err
}
pe.putInt64(a.ProducerID)
pe.putInt16(a.ProducerEpoch)
pe.putBool(a.TransactionResult)
return nil
}
func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) {
if a.TransactionalID, err = pd.getString(); err != nil {
return err
}
if a.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if a.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
if a.TransactionResult, err = pd.getBool(); err != nil {
return err
}
return nil
}
func (a *EndTxnRequest) key() int16 {
return 26
}
func (a *EndTxnRequest) version() int16 {
return 0
}
func (r *EndTxnRequest) headerVersion() int16 {
return 1
}
func (a *EndTxnRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

48
vendor/github.com/Shopify/sarama/end_txn_response.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
package sarama
import (
"time"
)
type EndTxnResponse struct {
ThrottleTime time.Duration
Err KError
}
func (e *EndTxnResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(e.ThrottleTime / time.Millisecond))
pe.putInt16(int16(e.Err))
return nil
}
func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
e.Err = KError(kerr)
return nil
}
func (e *EndTxnResponse) key() int16 {
return 25
}
func (e *EndTxnResponse) version() int16 {
return 0
}
func (r *EndTxnResponse) headerVersion() int16 {
return 0
}
func (e *EndTxnResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -37,6 +37,18 @@ var ErrShuttingDown = errors.New("kafka: message received by producer in process
// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing
// a RecordBatch.
var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch")
// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version
// is lower than 0.10.0.0.
var ErrControllerNotAvailable = errors.New("kafka: controller is not available")
// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update
// the metadata.
var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata")
// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
type PacketEncodingError struct {
@ -69,54 +81,130 @@ func (err ConfigurationError) Error() string {
// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
type KError int16
// MultiError is used to contain multi error.
type MultiError struct {
Errors *[]error
}
func (mErr MultiError) Error() string {
var errString = ""
for _, err := range *mErr.Errors {
errString += err.Error() + ","
}
return errString
}
func (mErr MultiError) PrettyError() string {
var errString = ""
for _, err := range *mErr.Errors {
errString += err.Error() + "\n"
}
return errString
}
// ErrDeleteRecords is the type of error returned when fail to delete the required records
type ErrDeleteRecords struct {
MultiError
}
func (err ErrDeleteRecords) Error() string {
return "kafka server: failed to delete records " + err.MultiError.Error()
}
type ErrReassignPartitions struct {
MultiError
}
func (err ErrReassignPartitions) Error() string {
return fmt.Sprintf("failed to reassign partitions for topic: \n%s", err.MultiError.PrettyError())
}
// Numeric error codes returned by the Kafka server.
const (
ErrNoError KError = 0
ErrUnknown KError = -1
ErrOffsetOutOfRange KError = 1
ErrInvalidMessage KError = 2
ErrUnknownTopicOrPartition KError = 3
ErrInvalidMessageSize KError = 4
ErrLeaderNotAvailable KError = 5
ErrNotLeaderForPartition KError = 6
ErrRequestTimedOut KError = 7
ErrBrokerNotAvailable KError = 8
ErrReplicaNotAvailable KError = 9
ErrMessageSizeTooLarge KError = 10
ErrStaleControllerEpochCode KError = 11
ErrOffsetMetadataTooLarge KError = 12
ErrNetworkException KError = 13
ErrOffsetsLoadInProgress KError = 14
ErrConsumerCoordinatorNotAvailable KError = 15
ErrNotCoordinatorForConsumer KError = 16
ErrInvalidTopic KError = 17
ErrMessageSetSizeTooLarge KError = 18
ErrNotEnoughReplicas KError = 19
ErrNotEnoughReplicasAfterAppend KError = 20
ErrInvalidRequiredAcks KError = 21
ErrIllegalGeneration KError = 22
ErrInconsistentGroupProtocol KError = 23
ErrInvalidGroupId KError = 24
ErrUnknownMemberId KError = 25
ErrInvalidSessionTimeout KError = 26
ErrRebalanceInProgress KError = 27
ErrInvalidCommitOffsetSize KError = 28
ErrTopicAuthorizationFailed KError = 29
ErrGroupAuthorizationFailed KError = 30
ErrClusterAuthorizationFailed KError = 31
ErrInvalidTimestamp KError = 32
ErrUnsupportedSASLMechanism KError = 33
ErrIllegalSASLState KError = 34
ErrUnsupportedVersion KError = 35
ErrTopicAlreadyExists KError = 36
ErrInvalidPartitions KError = 37
ErrInvalidReplicationFactor KError = 38
ErrInvalidReplicaAssignment KError = 39
ErrInvalidConfig KError = 40
ErrNotController KError = 41
ErrInvalidRequest KError = 42
ErrUnsupportedForMessageFormat KError = 43
ErrPolicyViolation KError = 44
ErrNoError KError = 0
ErrUnknown KError = -1
ErrOffsetOutOfRange KError = 1
ErrInvalidMessage KError = 2
ErrUnknownTopicOrPartition KError = 3
ErrInvalidMessageSize KError = 4
ErrLeaderNotAvailable KError = 5
ErrNotLeaderForPartition KError = 6
ErrRequestTimedOut KError = 7
ErrBrokerNotAvailable KError = 8
ErrReplicaNotAvailable KError = 9
ErrMessageSizeTooLarge KError = 10
ErrStaleControllerEpochCode KError = 11
ErrOffsetMetadataTooLarge KError = 12
ErrNetworkException KError = 13
ErrOffsetsLoadInProgress KError = 14
ErrConsumerCoordinatorNotAvailable KError = 15
ErrNotCoordinatorForConsumer KError = 16
ErrInvalidTopic KError = 17
ErrMessageSetSizeTooLarge KError = 18
ErrNotEnoughReplicas KError = 19
ErrNotEnoughReplicasAfterAppend KError = 20
ErrInvalidRequiredAcks KError = 21
ErrIllegalGeneration KError = 22
ErrInconsistentGroupProtocol KError = 23
ErrInvalidGroupId KError = 24
ErrUnknownMemberId KError = 25
ErrInvalidSessionTimeout KError = 26
ErrRebalanceInProgress KError = 27
ErrInvalidCommitOffsetSize KError = 28
ErrTopicAuthorizationFailed KError = 29
ErrGroupAuthorizationFailed KError = 30
ErrClusterAuthorizationFailed KError = 31
ErrInvalidTimestamp KError = 32
ErrUnsupportedSASLMechanism KError = 33
ErrIllegalSASLState KError = 34
ErrUnsupportedVersion KError = 35
ErrTopicAlreadyExists KError = 36
ErrInvalidPartitions KError = 37
ErrInvalidReplicationFactor KError = 38
ErrInvalidReplicaAssignment KError = 39
ErrInvalidConfig KError = 40
ErrNotController KError = 41
ErrInvalidRequest KError = 42
ErrUnsupportedForMessageFormat KError = 43
ErrPolicyViolation KError = 44
ErrOutOfOrderSequenceNumber KError = 45
ErrDuplicateSequenceNumber KError = 46
ErrInvalidProducerEpoch KError = 47
ErrInvalidTxnState KError = 48
ErrInvalidProducerIDMapping KError = 49
ErrInvalidTransactionTimeout KError = 50
ErrConcurrentTransactions KError = 51
ErrTransactionCoordinatorFenced KError = 52
ErrTransactionalIDAuthorizationFailed KError = 53
ErrSecurityDisabled KError = 54
ErrOperationNotAttempted KError = 55
ErrKafkaStorageError KError = 56
ErrLogDirNotFound KError = 57
ErrSASLAuthenticationFailed KError = 58
ErrUnknownProducerID KError = 59
ErrReassignmentInProgress KError = 60
ErrDelegationTokenAuthDisabled KError = 61
ErrDelegationTokenNotFound KError = 62
ErrDelegationTokenOwnerMismatch KError = 63
ErrDelegationTokenRequestNotAllowed KError = 64
ErrDelegationTokenAuthorizationFailed KError = 65
ErrDelegationTokenExpired KError = 66
ErrInvalidPrincipalType KError = 67
ErrNonEmptyGroup KError = 68
ErrGroupIDNotFound KError = 69
ErrFetchSessionIDNotFound KError = 70
ErrInvalidFetchSessionEpoch KError = 71
ErrListenerNotFound KError = 72
ErrTopicDeletionDisabled KError = 73
ErrFencedLeaderEpoch KError = 74
ErrUnknownLeaderEpoch KError = 75
ErrUnsupportedCompressionType KError = 76
ErrStaleBrokerEpoch KError = 77
ErrOffsetNotAvailable KError = 78
ErrMemberIdRequired KError = 79
ErrPreferredLeaderNotAvailable KError = 80
ErrGroupMaxSizeReached KError = 81
ErrFencedInstancedId KError = 82
)
func (err KError) Error() string {
@ -215,6 +303,82 @@ func (err KError) Error() string {
return "kafka server: The requested operation is not supported by the message format version."
case ErrPolicyViolation:
return "kafka server: Request parameters do not satisfy the configured policy."
case ErrOutOfOrderSequenceNumber:
return "kafka server: The broker received an out of order sequence number."
case ErrDuplicateSequenceNumber:
return "kafka server: The broker received a duplicate sequence number."
case ErrInvalidProducerEpoch:
return "kafka server: Producer attempted an operation with an old epoch."
case ErrInvalidTxnState:
return "kafka server: The producer attempted a transactional operation in an invalid state."
case ErrInvalidProducerIDMapping:
return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id."
case ErrInvalidTransactionTimeout:
return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)."
case ErrConcurrentTransactions:
return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing."
case ErrTransactionCoordinatorFenced:
return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer."
case ErrTransactionalIDAuthorizationFailed:
return "kafka server: Transactional ID authorization failed."
case ErrSecurityDisabled:
return "kafka server: Security features are disabled."
case ErrOperationNotAttempted:
return "kafka server: The broker did not attempt to execute this operation."
case ErrKafkaStorageError:
return "kafka server: Disk error when trying to access log file on the disk."
case ErrLogDirNotFound:
return "kafka server: The specified log directory is not found in the broker config."
case ErrSASLAuthenticationFailed:
return "kafka server: SASL Authentication failed."
case ErrUnknownProducerID:
return "kafka server: The broker could not locate the producer metadata associated with the Producer ID."
case ErrReassignmentInProgress:
return "kafka server: A partition reassignment is in progress."
case ErrDelegationTokenAuthDisabled:
return "kafka server: Delegation Token feature is not enabled."
case ErrDelegationTokenNotFound:
return "kafka server: Delegation Token is not found on server."
case ErrDelegationTokenOwnerMismatch:
return "kafka server: Specified Principal is not valid Owner/Renewer."
case ErrDelegationTokenRequestNotAllowed:
return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels."
case ErrDelegationTokenAuthorizationFailed:
return "kafka server: Delegation Token authorization failed."
case ErrDelegationTokenExpired:
return "kafka server: Delegation Token is expired."
case ErrInvalidPrincipalType:
return "kafka server: Supplied principalType is not supported."
case ErrNonEmptyGroup:
return "kafka server: The group is not empty."
case ErrGroupIDNotFound:
return "kafka server: The group id does not exist."
case ErrFetchSessionIDNotFound:
return "kafka server: The fetch session ID was not found."
case ErrInvalidFetchSessionEpoch:
return "kafka server: The fetch session epoch is invalid."
case ErrListenerNotFound:
return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed."
case ErrTopicDeletionDisabled:
return "kafka server: Topic deletion is disabled."
case ErrFencedLeaderEpoch:
return "kafka server: The leader epoch in the request is older than the epoch on the broker."
case ErrUnknownLeaderEpoch:
return "kafka server: The leader epoch in the request is newer than the epoch on the broker."
case ErrUnsupportedCompressionType:
return "kafka server: The requesting client does not support the compression type of given partition."
case ErrStaleBrokerEpoch:
return "kafka server: Broker epoch has changed"
case ErrOffsetNotAvailable:
return "kafka server: The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing"
case ErrMemberIdRequired:
return "kafka server: The group member needs to have a valid member id before actually entering a consumer group"
case ErrPreferredLeaderNotAvailable:
return "kafka server: The preferred leader was not available"
case ErrGroupMaxSizeReached:
return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members."
case ErrFencedInstancedId:
return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id."
}
return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)

View File

@ -1,20 +1,41 @@
package sarama
type fetchRequestBlock struct {
fetchOffset int64
maxBytes int32
Version int16
currentLeaderEpoch int32
fetchOffset int64
logStartOffset int64
maxBytes int32
}
func (b *fetchRequestBlock) encode(pe packetEncoder) error {
func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error {
b.Version = version
if b.Version >= 9 {
pe.putInt32(b.currentLeaderEpoch)
}
pe.putInt64(b.fetchOffset)
if b.Version >= 5 {
pe.putInt64(b.logStartOffset)
}
pe.putInt32(b.maxBytes)
return nil
}
func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) {
b.Version = version
if b.Version >= 9 {
if b.currentLeaderEpoch, err = pd.getInt32(); err != nil {
return err
}
}
if b.fetchOffset, err = pd.getInt64(); err != nil {
return err
}
if b.Version >= 5 {
if b.logStartOffset, err = pd.getInt64(); err != nil {
return err
}
}
if b.maxBytes, err = pd.getInt32(); err != nil {
return err
}
@ -25,20 +46,39 @@ func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
type FetchRequest struct {
MaxWaitTime int32
MinBytes int32
MaxBytes int32
Version int16
blocks map[string]map[int32]*fetchRequestBlock
MaxWaitTime int32
MinBytes int32
MaxBytes int32
Version int16
Isolation IsolationLevel
SessionID int32
SessionEpoch int32
blocks map[string]map[int32]*fetchRequestBlock
forgotten map[string][]int32
RackID string
}
type IsolationLevel int8
const (
ReadUncommitted IsolationLevel = iota
ReadCommitted
)
func (r *FetchRequest) encode(pe packetEncoder) (err error) {
pe.putInt32(-1) // replica ID is always -1 for clients
pe.putInt32(r.MaxWaitTime)
pe.putInt32(r.MinBytes)
if r.Version == 3 {
if r.Version >= 3 {
pe.putInt32(r.MaxBytes)
}
if r.Version >= 4 {
pe.putInt8(int8(r.Isolation))
}
if r.Version >= 7 {
pe.putInt32(r.SessionID)
pe.putInt32(r.SessionEpoch)
}
err = pe.putArrayLength(len(r.blocks))
if err != nil {
return err
@ -54,17 +94,44 @@ func (r *FetchRequest) encode(pe packetEncoder) (err error) {
}
for partition, block := range blocks {
pe.putInt32(partition)
err = block.encode(pe)
err = block.encode(pe, r.Version)
if err != nil {
return err
}
}
}
if r.Version >= 7 {
err = pe.putArrayLength(len(r.forgotten))
if err != nil {
return err
}
for topic, partitions := range r.forgotten {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
for _, partition := range partitions {
pe.putInt32(partition)
}
}
}
if r.Version >= 11 {
err = pe.putString(r.RackID)
if err != nil {
return err
}
}
return nil
}
func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if _, err = pd.getInt32(); err != nil {
return err
}
@ -74,11 +141,28 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
if r.MinBytes, err = pd.getInt32(); err != nil {
return err
}
if r.Version == 3 {
if r.Version >= 3 {
if r.MaxBytes, err = pd.getInt32(); err != nil {
return err
}
}
if r.Version >= 4 {
isolation, err := pd.getInt8()
if err != nil {
return err
}
r.Isolation = IsolationLevel(isolation)
}
if r.Version >= 7 {
r.SessionID, err = pd.getInt32()
if err != nil {
return err
}
r.SessionEpoch, err = pd.getInt32()
if err != nil {
return err
}
}
topicCount, err := pd.getArrayLength()
if err != nil {
return err
@ -103,12 +187,47 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
return err
}
fetchBlock := &fetchRequestBlock{}
if err = fetchBlock.decode(pd); err != nil {
if err = fetchBlock.decode(pd, r.Version); err != nil {
return err
}
r.blocks[topic][partition] = fetchBlock
}
}
if r.Version >= 7 {
forgottenCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.forgotten = make(map[string][]int32)
for i := 0; i < forgottenCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.forgotten[topic] = make([]int32, partitionCount)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
r.forgotten[topic][j] = partition
}
}
}
if r.Version >= 11 {
r.RackID, err = pd.getString()
if err != nil {
return err
}
}
return nil
}
@ -120,16 +239,34 @@ func (r *FetchRequest) version() int16 {
return r.Version
}
func (r *FetchRequest) headerVersion() int16 {
return 1
}
func (r *FetchRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 0:
return MinVersion
case 1:
return V0_9_0_0
case 2:
return V0_10_0_0
case 3:
return V0_10_1_0
case 4, 5:
return V0_11_0_0
case 6:
return V1_0_0_0
case 7:
return V1_1_0_0
case 8:
return V2_0_0_0
case 9, 10:
return V2_1_0_0
case 11:
return V2_3_0_0
default:
return minVersion
return MaxVersion
}
}
@ -138,13 +275,21 @@ func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int
r.blocks = make(map[string]map[int32]*fetchRequestBlock)
}
if r.Version >= 7 && r.forgotten == nil {
r.forgotten = make(map[string][]int32)
}
if r.blocks[topic] == nil {
r.blocks[topic] = make(map[int32]*fetchRequestBlock)
}
tmp := new(fetchRequestBlock)
tmp.Version = r.Version
tmp.maxBytes = maxBytes
tmp.fetchOffset = fetchOffset
if r.Version >= 9 {
tmp.currentLeaderEpoch = int32(-1)
}
r.blocks[topic][partitionID] = tmp
}

View File

@ -1,14 +1,47 @@
package sarama
import "time"
import (
"sort"
"time"
)
type FetchResponseBlock struct {
Err KError
HighWaterMarkOffset int64
MsgSet MessageSet
type AbortedTransaction struct {
ProducerID int64
FirstOffset int64
}
func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) {
func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
if t.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if t.FirstOffset, err = pd.getInt64(); err != nil {
return err
}
return nil
}
func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
pe.putInt64(t.ProducerID)
pe.putInt64(t.FirstOffset)
return nil
}
type FetchResponseBlock struct {
Err KError
HighWaterMarkOffset int64
LastStableOffset int64
LogStartOffset int64
AbortedTransactions []*AbortedTransaction
PreferredReadReplica int32
Records *Records // deprecated: use FetchResponseBlock.RecordsSet
RecordsSet []*Records
Partial bool
}
func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
@ -20,37 +53,182 @@ func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) {
return err
}
msgSetSize, err := pd.getInt32()
if version >= 4 {
b.LastStableOffset, err = pd.getInt64()
if err != nil {
return err
}
if version >= 5 {
b.LogStartOffset, err = pd.getInt64()
if err != nil {
return err
}
}
numTransact, err := pd.getArrayLength()
if err != nil {
return err
}
if numTransact >= 0 {
b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
}
for i := 0; i < numTransact; i++ {
transact := new(AbortedTransaction)
if err = transact.decode(pd); err != nil {
return err
}
b.AbortedTransactions[i] = transact
}
}
if version >= 11 {
b.PreferredReadReplica, err = pd.getInt32()
if err != nil {
return err
}
}
recordsSize, err := pd.getInt32()
if err != nil {
return err
}
msgSetDecoder, err := pd.getSubset(int(msgSetSize))
recordsDecoder, err := pd.getSubset(int(recordsSize))
if err != nil {
return err
}
err = (&b.MsgSet).decode(msgSetDecoder)
return err
b.RecordsSet = []*Records{}
for recordsDecoder.remaining() > 0 {
records := &Records{}
if err := records.decode(recordsDecoder); err != nil {
// If we have at least one decoded records, this is not an error
if err == ErrInsufficientData {
if len(b.RecordsSet) == 0 {
b.Partial = true
}
break
}
return err
}
partial, err := records.isPartial()
if err != nil {
return err
}
n, err := records.numRecords()
if err != nil {
return err
}
if n > 0 || (partial && len(b.RecordsSet) == 0) {
b.RecordsSet = append(b.RecordsSet, records)
if b.Records == nil {
b.Records = records
}
}
overflow, err := records.isOverflow()
if err != nil {
return err
}
if partial || overflow {
break
}
}
return nil
}
func (b *FetchResponseBlock) encode(pe packetEncoder) (err error) {
func (b *FetchResponseBlock) numRecords() (int, error) {
sum := 0
for _, records := range b.RecordsSet {
count, err := records.numRecords()
if err != nil {
return 0, err
}
sum += count
}
return sum, nil
}
func (b *FetchResponseBlock) isPartial() (bool, error) {
if b.Partial {
return true, nil
}
if len(b.RecordsSet) == 1 {
return b.RecordsSet[0].isPartial()
}
return false, nil
}
func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
pe.putInt16(int16(b.Err))
pe.putInt64(b.HighWaterMarkOffset)
if version >= 4 {
pe.putInt64(b.LastStableOffset)
if version >= 5 {
pe.putInt64(b.LogStartOffset)
}
if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
return err
}
for _, transact := range b.AbortedTransactions {
if err = transact.encode(pe); err != nil {
return err
}
}
}
if version >= 11 {
pe.putInt32(b.PreferredReadReplica)
}
pe.push(&lengthField{})
err = b.MsgSet.encode(pe)
if err != nil {
return err
for _, records := range b.RecordsSet {
err = records.encode(pe)
if err != nil {
return err
}
}
return pe.pop()
}
func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction {
// I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered
// plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself
at := b.AbortedTransactions
sort.Slice(
at,
func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset },
)
return at
}
type FetchResponse struct {
Blocks map[string]map[int32]*FetchResponseBlock
ThrottleTime time.Duration
Version int16 // v1 requires 0.9+, v2 requires 0.10+
Blocks map[string]map[int32]*FetchResponseBlock
ThrottleTime time.Duration
ErrorCode int16
SessionID int32
Version int16
LogAppendTime bool
Timestamp time.Time
}
func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
@ -64,6 +242,17 @@ func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
r.ThrottleTime = time.Duration(throttle) * time.Millisecond
}
if r.Version >= 7 {
r.ErrorCode, err = pd.getInt16()
if err != nil {
return err
}
r.SessionID, err = pd.getInt32()
if err != nil {
return err
}
}
numTopics, err := pd.getArrayLength()
if err != nil {
return err
@ -90,7 +279,7 @@ func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
}
block := new(FetchResponseBlock)
err = block.decode(pd)
err = block.decode(pd, version)
if err != nil {
return err
}
@ -106,6 +295,11 @@ func (r *FetchResponse) encode(pe packetEncoder) (err error) {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
}
if r.Version >= 7 {
pe.putInt16(r.ErrorCode)
pe.putInt32(r.SessionID)
}
err = pe.putArrayLength(len(r.Blocks))
if err != nil {
return err
@ -124,12 +318,11 @@ func (r *FetchResponse) encode(pe packetEncoder) (err error) {
for id, block := range partitions {
pe.putInt32(id)
err = block.encode(pe)
err = block.encode(pe, r.Version)
if err != nil {
return err
}
}
}
return nil
}
@ -142,14 +335,34 @@ func (r *FetchResponse) version() int16 {
return r.Version
}
func (r *FetchResponse) headerVersion() int16 {
return 0
}
func (r *FetchResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 0:
return MinVersion
case 1:
return V0_9_0_0
case 2:
return V0_10_0_0
case 3:
return V0_10_1_0
case 4, 5:
return V0_11_0_0
case 6:
return V1_0_0_0
case 7:
return V1_1_0_0
case 8:
return V2_0_0_0
case 9, 10:
return V2_1_0_0
case 11:
return V2_3_0_0
default:
return minVersion
return MaxVersion
}
}
@ -182,7 +395,7 @@ func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
frb.Err = err
}
func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
}
@ -196,6 +409,11 @@ func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Enc
frb = new(FetchResponseBlock)
partitions[partition] = frb
}
return frb
}
func encodeKV(key, value Encoder) ([]byte, []byte) {
var kb []byte
var vb []byte
if key != nil {
@ -204,7 +422,125 @@ func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Enc
if value != nil {
vb, _ = value.Encode()
}
msg := &Message{Key: kb, Value: vb}
msgBlock := &MessageBlock{Msg: msg, Offset: offset}
frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock)
return kb, vb
}
func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) {
frb := r.getOrCreateBlock(topic, partition)
kb, vb := encodeKV(key, value)
if r.LogAppendTime {
timestamp = r.Timestamp
}
msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version}
msgBlock := &MessageBlock{Msg: msg, Offset: offset}
if len(frb.RecordsSet) == 0 {
records := newLegacyRecords(&MessageSet{})
frb.RecordsSet = []*Records{&records}
}
set := frb.RecordsSet[0].MsgSet
set.Messages = append(set.Messages, msgBlock)
}
func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) {
frb := r.getOrCreateBlock(topic, partition)
kb, vb := encodeKV(key, value)
if len(frb.RecordsSet) == 0 {
records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
frb.RecordsSet = []*Records{&records}
}
batch := frb.RecordsSet[0].RecordBatch
rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
batch.addRecord(rec)
}
// AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp
// But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse
// Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions
func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) {
frb := r.getOrCreateBlock(topic, partition)
kb, vb := encodeKV(key, value)
records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
batch := &RecordBatch{
Version: 2,
LogAppendTime: r.LogAppendTime,
FirstTimestamp: timestamp,
MaxTimestamp: r.Timestamp,
FirstOffset: offset,
LastOffsetDelta: 0,
ProducerID: producerID,
IsTransactional: isTransactional,
}
rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
batch.addRecord(rec)
records.RecordBatch = batch
frb.RecordsSet = append(frb.RecordsSet, &records)
}
func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) {
frb := r.getOrCreateBlock(topic, partition)
// batch
batch := &RecordBatch{
Version: 2,
LogAppendTime: r.LogAppendTime,
FirstTimestamp: timestamp,
MaxTimestamp: r.Timestamp,
FirstOffset: offset,
LastOffsetDelta: 0,
ProducerID: producerID,
IsTransactional: true,
Control: true,
}
// records
records := newDefaultRecords(nil)
records.RecordBatch = batch
// record
crAbort := ControlRecord{
Version: 0,
Type: recordType,
}
crKey := &realEncoder{raw: make([]byte, 4)}
crValue := &realEncoder{raw: make([]byte, 6)}
crAbort.encode(crKey, crValue)
rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
batch.addRecord(rec)
frb.RecordsSet = append(frb.RecordsSet, &records)
}
func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0)
}
func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{})
}
func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) {
r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{})
}
func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) {
// define controlRecord key and value
r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{})
}
func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
frb := r.getOrCreateBlock(topic, partition)
if len(frb.RecordsSet) == 0 {
records := newDefaultRecords(&RecordBatch{Version: 2})
frb.RecordsSet = []*Records{&records}
}
batch := frb.RecordsSet[0].RecordBatch
batch.LastOffsetDelta = offset
}
func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
frb := r.getOrCreateBlock(topic, partition)
frb.LastStableOffset = offset
}

View File

@ -0,0 +1,65 @@
package sarama
type CoordinatorType int8
const (
CoordinatorGroup CoordinatorType = iota
CoordinatorTransaction
)
type FindCoordinatorRequest struct {
Version int16
CoordinatorKey string
CoordinatorType CoordinatorType
}
func (f *FindCoordinatorRequest) encode(pe packetEncoder) error {
if err := pe.putString(f.CoordinatorKey); err != nil {
return err
}
if f.Version >= 1 {
pe.putInt8(int8(f.CoordinatorType))
}
return nil
}
func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) {
if f.CoordinatorKey, err = pd.getString(); err != nil {
return err
}
if version >= 1 {
f.Version = version
coordinatorType, err := pd.getInt8()
if err != nil {
return err
}
f.CoordinatorType = CoordinatorType(coordinatorType)
}
return nil
}
func (f *FindCoordinatorRequest) key() int16 {
return 10
}
func (f *FindCoordinatorRequest) version() int16 {
return f.Version
}
func (r *FindCoordinatorRequest) headerVersion() int16 {
return 1
}
func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion {
switch f.Version {
case 1:
return V0_11_0_0
default:
return V0_8_2_0
}
}

View File

@ -0,0 +1,96 @@
package sarama
import (
"time"
)
var NoNode = &Broker{id: -1, addr: ":-1"}
type FindCoordinatorResponse struct {
Version int16
ThrottleTime time.Duration
Err KError
ErrMsg *string
Coordinator *Broker
}
func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) {
if version >= 1 {
f.Version = version
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
f.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
}
tmp, err := pd.getInt16()
if err != nil {
return err
}
f.Err = KError(tmp)
if version >= 1 {
if f.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
}
coordinator := new(Broker)
// The version is hardcoded to 0, as version 1 of the Broker-decode
// contains the rack-field which is not present in the FindCoordinatorResponse.
if err := coordinator.decode(pd, 0); err != nil {
return err
}
if coordinator.addr == ":0" {
return nil
}
f.Coordinator = coordinator
return nil
}
func (f *FindCoordinatorResponse) encode(pe packetEncoder) error {
if f.Version >= 1 {
pe.putInt32(int32(f.ThrottleTime / time.Millisecond))
}
pe.putInt16(int16(f.Err))
if f.Version >= 1 {
if err := pe.putNullableString(f.ErrMsg); err != nil {
return err
}
}
coordinator := f.Coordinator
if coordinator == nil {
coordinator = NoNode
}
if err := coordinator.encode(pe, 0); err != nil {
return err
}
return nil
}
func (f *FindCoordinatorResponse) key() int16 {
return 10
}
func (f *FindCoordinatorResponse) version() int16 {
return f.Version
}
func (r *FindCoordinatorResponse) headerVersion() int16 {
return 0
}
func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion {
switch f.Version {
case 1:
return V0_11_0_0
default:
return V0_8_2_0
}
}

34
vendor/github.com/Shopify/sarama/go.mod generated vendored Normal file
View File

@ -0,0 +1,34 @@
module github.com/Shopify/sarama
go 1.13
require (
github.com/Shopify/toxiproxy v2.1.4+incompatible
github.com/davecgh/go-spew v1.1.1
github.com/eapache/go-resiliency v1.2.0
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21
github.com/eapache/queue v1.1.0
github.com/fortytw2/leaktest v1.3.0
github.com/frankban/quicktest v1.7.2 // indirect
github.com/golang/snappy v0.0.1 // indirect
github.com/google/go-cmp v0.4.0 // indirect
github.com/hashicorp/go-uuid v1.0.2 // indirect
github.com/jcmturner/gofork v1.0.0 // indirect
github.com/klauspost/compress v1.9.8
github.com/kr/pretty v0.2.0 // indirect
github.com/pierrec/lz4 v2.4.1+incompatible
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563
github.com/stretchr/testify v1.4.0
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c
github.com/xdg/stringprep v1.0.0 // indirect
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 // indirect
golang.org/x/net v0.0.0-20200202094626-16171245cfb2
golang.org/x/text v0.3.2 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect
gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect
gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
gopkg.in/jcmturner/gokrb5.v7 v7.5.0
gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect
gopkg.in/yaml.v2 v2.2.8 // indirect
)

81
vendor/github.com/Shopify/sarama/go.sum generated vendored Normal file
View File

@ -0,0 +1,81 @@
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q=
github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk=
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA=
github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/pierrec/lz4 v2.4.1+incompatible h1:mFe7ttWaflA46Mhqh+jUfjp2qTbPYxLB2/OyBppH9dg=
github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 h1:+ELyKg6m8UBf0nPFSqD0mi7zUfwPyXo23HNjMnXPz7w=
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=
gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI=
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg=
gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

257
vendor/github.com/Shopify/sarama/gssapi_kerberos.go generated vendored Normal file
View File

@ -0,0 +1,257 @@
package sarama
import (
"encoding/asn1"
"encoding/binary"
"fmt"
"io"
"strings"
"time"
"gopkg.in/jcmturner/gokrb5.v7/asn1tools"
"gopkg.in/jcmturner/gokrb5.v7/gssapi"
"gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype"
"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
"gopkg.in/jcmturner/gokrb5.v7/messages"
"gopkg.in/jcmturner/gokrb5.v7/types"
)
const (
TOK_ID_KRB_AP_REQ = 256
GSS_API_GENERIC_TAG = 0x60
KRB5_USER_AUTH = 1
KRB5_KEYTAB_AUTH = 2
GSS_API_INITIAL = 1
GSS_API_VERIFY = 2
GSS_API_FINISH = 3
)
type GSSAPIConfig struct {
AuthType int
KeyTabPath string
KerberosConfigPath string
ServiceName string
Username string
Password string
Realm string
}
type GSSAPIKerberosAuth struct {
Config *GSSAPIConfig
ticket messages.Ticket
encKey types.EncryptionKey
NewKerberosClientFunc func(config *GSSAPIConfig) (KerberosClient, error)
step int
}
type KerberosClient interface {
Login() error
GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error)
Domain() string
CName() types.PrincipalName
Destroy()
}
/*
*
* Appends length in big endian before payload, and send it to kafka
*
*/
func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) {
length := len(payload)
finalPackage := make([]byte, length+4) //4 byte length header + payload
copy(finalPackage[4:], payload)
binary.BigEndian.PutUint32(finalPackage, uint32(length))
bytes, err := broker.conn.Write(finalPackage)
if err != nil {
return bytes, err
}
return bytes, nil
}
/*
*
* Read length (4 bytes) and then read the payload
*
*/
func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) {
bytesRead := 0
lengthInBytes := make([]byte, 4)
bytes, err := io.ReadFull(broker.conn, lengthInBytes)
if err != nil {
return nil, bytesRead, err
}
bytesRead += bytes
payloadLength := binary.BigEndian.Uint32(lengthInBytes)
payloadBytes := make([]byte, payloadLength) // buffer for read..
bytes, err = io.ReadFull(broker.conn, payloadBytes) // read bytes
if err != nil {
return payloadBytes, bytesRead, err
}
bytesRead += bytes
return payloadBytes, bytesRead, nil
}
func (krbAuth *GSSAPIKerberosAuth) newAuthenticatorChecksum() []byte {
a := make([]byte, 24)
flags := []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf}
binary.LittleEndian.PutUint32(a[:4], 16)
for _, i := range flags {
f := binary.LittleEndian.Uint32(a[20:24])
f |= uint32(i)
binary.LittleEndian.PutUint32(a[20:24], f)
}
return a
}
/*
*
* Construct Kerberos AP_REQ package, conforming to RFC-4120
* https://tools.ietf.org/html/rfc4120#page-84
*
*/
func (krbAuth *GSSAPIKerberosAuth) createKrb5Token(
domain string, cname types.PrincipalName,
ticket messages.Ticket,
sessionKey types.EncryptionKey) ([]byte, error) {
auth, err := types.NewAuthenticator(domain, cname)
if err != nil {
return nil, err
}
auth.Cksum = types.Checksum{
CksumType: chksumtype.GSSAPI,
Checksum: krbAuth.newAuthenticatorChecksum(),
}
APReq, err := messages.NewAPReq(
ticket,
sessionKey,
auth,
)
if err != nil {
return nil, err
}
aprBytes := make([]byte, 2)
binary.BigEndian.PutUint16(aprBytes, TOK_ID_KRB_AP_REQ)
tb, err := APReq.Marshal()
if err != nil {
return nil, err
}
aprBytes = append(aprBytes, tb...)
return aprBytes, nil
}
/*
*
* Append the GSS-API header to the payload, conforming to RFC-2743
* Section 3.1, Mechanism-Independent Token Format
*
* https://tools.ietf.org/html/rfc2743#page-81
*
* GSSAPIHeader + <specific mechanism payload>
*
*/
func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) {
oidBytes, err := asn1.Marshal(gssapi.OID(gssapi.OIDKRB5))
if err != nil {
return nil, err
}
tkoLengthBytes := asn1tools.MarshalLengthBytes(len(oidBytes) + len(payload))
GSSHeader := append([]byte{GSS_API_GENERIC_TAG}, tkoLengthBytes...)
GSSHeader = append(GSSHeader, oidBytes...)
GSSPackage := append(GSSHeader, payload...)
return GSSPackage, nil
}
func (krbAuth *GSSAPIKerberosAuth) initSecContext(bytes []byte, kerberosClient KerberosClient) ([]byte, error) {
switch krbAuth.step {
case GSS_API_INITIAL:
aprBytes, err := krbAuth.createKrb5Token(
kerberosClient.Domain(),
kerberosClient.CName(),
krbAuth.ticket,
krbAuth.encKey)
if err != nil {
return nil, err
}
krbAuth.step = GSS_API_VERIFY
return krbAuth.appendGSSAPIHeader(aprBytes)
case GSS_API_VERIFY:
wrapTokenReq := gssapi.WrapToken{}
if err := wrapTokenReq.Unmarshal(bytes, true); err != nil {
return nil, err
}
// Validate response.
isValid, err := wrapTokenReq.Verify(krbAuth.encKey, keyusage.GSSAPI_ACCEPTOR_SEAL)
if !isValid {
return nil, err
}
wrapTokenResponse, err := gssapi.NewInitiatorWrapToken(wrapTokenReq.Payload, krbAuth.encKey)
if err != nil {
return nil, err
}
krbAuth.step = GSS_API_FINISH
return wrapTokenResponse.Marshal()
}
return nil, nil
}
/* This does the handshake for authorization */
func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error {
kerberosClient, err := krbAuth.NewKerberosClientFunc(krbAuth.Config)
if err != nil {
Logger.Printf("Kerberos client error: %s", err)
return err
}
err = kerberosClient.Login()
if err != nil {
Logger.Printf("Kerberos client error: %s", err)
return err
}
// Construct SPN using serviceName and host
// SPN format: <SERVICE>/<FQDN>
host := strings.SplitN(broker.addr, ":", 2)[0] // Strip port part
spn := fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host)
ticket, encKey, err := kerberosClient.GetServiceTicket(spn)
if err != nil {
Logger.Printf("Error getting Kerberos service ticket : %s", err)
return err
}
krbAuth.ticket = ticket
krbAuth.encKey = encKey
krbAuth.step = GSS_API_INITIAL
var receivedBytes []byte = nil
defer kerberosClient.Destroy()
for {
packBytes, err := krbAuth.initSecContext(receivedBytes, kerberosClient)
if err != nil {
Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
return err
}
requestTime := time.Now()
bytesWritten, err := krbAuth.writePackage(broker, packBytes)
if err != nil {
Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
return err
}
broker.updateOutgoingCommunicationMetrics(bytesWritten)
if krbAuth.step == GSS_API_VERIFY {
bytesRead := 0
receivedBytes, bytesRead, err = krbAuth.readPackage(broker)
requestLatency := time.Since(requestTime)
broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
if err != nil {
Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
return err
}
} else if krbAuth.step == GSS_API_FINISH {
return nil
}
}
}

View File

@ -42,6 +42,10 @@ func (r *HeartbeatRequest) version() int16 {
return 0
}
func (r *HeartbeatRequest) headerVersion() int16 {
return 1
}
func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -27,6 +27,10 @@ func (r *HeartbeatResponse) version() int16 {
return 0
}
func (r *HeartbeatResponse) headerVersion() int16 {
return 0
}
func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -0,0 +1,47 @@
package sarama
import "time"
type InitProducerIDRequest struct {
TransactionalID *string
TransactionTimeout time.Duration
}
func (i *InitProducerIDRequest) encode(pe packetEncoder) error {
if err := pe.putNullableString(i.TransactionalID); err != nil {
return err
}
pe.putInt32(int32(i.TransactionTimeout / time.Millisecond))
return nil
}
func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) {
if i.TransactionalID, err = pd.getNullableString(); err != nil {
return err
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
i.TransactionTimeout = time.Duration(timeout) * time.Millisecond
return nil
}
func (i *InitProducerIDRequest) key() int16 {
return 22
}
func (i *InitProducerIDRequest) version() int16 {
return 0
}
func (i *InitProducerIDRequest) headerVersion() int16 {
return 1
}
func (i *InitProducerIDRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,59 @@
package sarama
import "time"
type InitProducerIDResponse struct {
ThrottleTime time.Duration
Err KError
ProducerID int64
ProducerEpoch int16
}
func (i *InitProducerIDResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(i.ThrottleTime / time.Millisecond))
pe.putInt16(int16(i.Err))
pe.putInt64(i.ProducerID)
pe.putInt16(i.ProducerEpoch)
return nil
}
func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
i.Err = KError(kerr)
if i.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if i.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
return nil
}
func (i *InitProducerIDResponse) key() int16 {
return 22
}
func (i *InitProducerIDResponse) version() int16 {
return 0
}
func (i *InitProducerIDResponse) headerVersion() int16 {
return 0
}
func (i *InitProducerIDResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -25,8 +25,10 @@ func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
}
type JoinGroupRequest struct {
Version int16
GroupId string
SessionTimeout int32
RebalanceTimeout int32
MemberId string
ProtocolType string
GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols
@ -38,6 +40,9 @@ func (r *JoinGroupRequest) encode(pe packetEncoder) error {
return err
}
pe.putInt32(r.SessionTimeout)
if r.Version >= 1 {
pe.putInt32(r.RebalanceTimeout)
}
if err := pe.putString(r.MemberId); err != nil {
return err
}
@ -76,6 +81,8 @@ func (r *JoinGroupRequest) encode(pe packetEncoder) error {
}
func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.GroupId, err = pd.getString(); err != nil {
return
}
@ -84,6 +91,12 @@ func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
return
}
if version >= 1 {
if r.RebalanceTimeout, err = pd.getInt32(); err != nil {
return err
}
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
@ -118,11 +131,22 @@ func (r *JoinGroupRequest) key() int16 {
}
func (r *JoinGroupRequest) version() int16 {
return 0
return r.Version
}
func (r *JoinGroupRequest) headerVersion() int16 {
return 1
}
func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
switch r.Version {
case 2:
return V0_11_0_0
case 1:
return V0_10_1_0
default:
return V0_9_0_0
}
}
func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {

View File

@ -1,6 +1,8 @@
package sarama
type JoinGroupResponse struct {
Version int16
ThrottleTime int32
Err KError
GenerationId int32
GroupProtocol string
@ -22,6 +24,9 @@ func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata
}
func (r *JoinGroupResponse) encode(pe packetEncoder) error {
if r.Version >= 2 {
pe.putInt32(r.ThrottleTime)
}
pe.putInt16(int16(r.Err))
pe.putInt32(r.GenerationId)
@ -53,6 +58,14 @@ func (r *JoinGroupResponse) encode(pe packetEncoder) error {
}
func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if version >= 2 {
if r.ThrottleTime, err = pd.getInt32(); err != nil {
return
}
}
kerr, err := pd.getInt16()
if err != nil {
return err
@ -107,9 +120,20 @@ func (r *JoinGroupResponse) key() int16 {
}
func (r *JoinGroupResponse) version() int16 {
return r.Version
}
func (r *JoinGroupResponse) headerVersion() int16 {
return 0
}
func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
switch r.Version {
case 2:
return V0_11_0_0
case 1:
return V0_10_1_0
default:
return V0_9_0_0
}
}

51
vendor/github.com/Shopify/sarama/kerberos_client.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
package sarama
import (
krb5client "gopkg.in/jcmturner/gokrb5.v7/client"
krb5config "gopkg.in/jcmturner/gokrb5.v7/config"
"gopkg.in/jcmturner/gokrb5.v7/keytab"
"gopkg.in/jcmturner/gokrb5.v7/types"
)
type KerberosGoKrb5Client struct {
krb5client.Client
}
func (c *KerberosGoKrb5Client) Domain() string {
return c.Credentials.Domain()
}
func (c *KerberosGoKrb5Client) CName() types.PrincipalName {
return c.Credentials.CName()
}
/*
*
* Create kerberos client used to obtain TGT and TGS tokens
* used gokrb5 library, which is a pure go kerberos client with
* some GSS-API capabilities, and SPNEGO support. Kafka does not use SPNEGO
* it uses pure Kerberos 5 solution (RFC-4121 and RFC-4120).
*
*/
func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) {
cfg, err := krb5config.Load(config.KerberosConfigPath)
if err != nil {
return nil, err
}
return createClient(config, cfg)
}
func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) {
var client *krb5client.Client
if config.AuthType == KRB5_KEYTAB_AUTH {
kt, err := keytab.Load(config.KeyTabPath)
if err != nil {
return nil, err
}
client = krb5client.NewClientWithKeytab(config.Username, config.Realm, kt, cfg)
} else {
client = krb5client.NewClientWithPassword(config.Username,
config.Realm, config.Password, cfg)
}
return &KerberosGoKrb5Client{*client}, nil
}

View File

@ -35,6 +35,10 @@ func (r *LeaveGroupRequest) version() int16 {
return 0
}
func (r *LeaveGroupRequest) headerVersion() int16 {
return 1
}
func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -27,6 +27,10 @@ func (r *LeaveGroupResponse) version() int16 {
return 0
}
func (r *LeaveGroupResponse) headerVersion() int16 {
return 0
}
func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -1,10 +1,40 @@
package sarama
import "encoding/binary"
import (
"encoding/binary"
"sync"
)
// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
type lengthField struct {
startOffset int
length int32
}
var lengthFieldPool = sync.Pool{}
func acquireLengthField() *lengthField {
val := lengthFieldPool.Get()
if val != nil {
return val.(*lengthField)
}
return &lengthField{}
}
func releaseLengthField(m *lengthField) {
lengthFieldPool.Put(m)
}
func (l *lengthField) decode(pd packetDecoder) error {
var err error
l.length, err = pd.getInt32()
if err != nil {
return err
}
if l.length > int32(pd.remaining()) {
return ErrInsufficientData
}
return nil
}
func (l *lengthField) saveOffset(in int) {
@ -21,7 +51,47 @@ func (l *lengthField) run(curOffset int, buf []byte) error {
}
func (l *lengthField) check(curOffset int, buf []byte) error {
if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) {
if int32(curOffset-l.startOffset-4) != l.length {
return PacketDecodingError{"length field invalid"}
}
return nil
}
type varintLengthField struct {
startOffset int
length int64
}
func (l *varintLengthField) decode(pd packetDecoder) error {
var err error
l.length, err = pd.getVarint()
return err
}
func (l *varintLengthField) saveOffset(in int) {
l.startOffset = in
}
func (l *varintLengthField) adjustLength(currOffset int) int {
oldFieldSize := l.reserveLength()
l.length = int64(currOffset - l.startOffset - oldFieldSize)
return l.reserveLength() - oldFieldSize
}
func (l *varintLengthField) reserveLength() int {
var tmp [binary.MaxVarintLen64]byte
return binary.PutVarint(tmp[:], l.length)
}
func (l *varintLengthField) run(curOffset int, buf []byte) error {
binary.PutVarint(buf[l.startOffset:], l.length)
return nil
}
func (l *varintLengthField) check(curOffset int, buf []byte) error {
if int64(curOffset-l.startOffset-l.reserveLength()) != l.length {
return PacketDecodingError{"length field invalid"}
}

View File

@ -19,6 +19,10 @@ func (r *ListGroupsRequest) version() int16 {
return 0
}
func (r *ListGroupsRequest) headerVersion() int16 {
return 1
}
func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -64,6 +64,10 @@ func (r *ListGroupsResponse) version() int16 {
return 0
}
func (r *ListGroupsResponse) headerVersion() int16 {
return 0
}
func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -0,0 +1,98 @@
package sarama
type ListPartitionReassignmentsRequest struct {
TimeoutMs int32
blocks map[string][]int32
Version int16
}
func (r *ListPartitionReassignmentsRequest) encode(pe packetEncoder) error {
pe.putInt32(r.TimeoutMs)
pe.putCompactArrayLength(len(r.blocks))
for topic, partitions := range r.blocks {
if err := pe.putCompactString(topic); err != nil {
return err
}
if err := pe.putCompactInt32Array(partitions); err != nil {
return err
}
pe.putEmptyTaggedFieldArray()
}
pe.putEmptyTaggedFieldArray()
return nil
}
func (r *ListPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.TimeoutMs, err = pd.getInt32(); err != nil {
return err
}
topicCount, err := pd.getCompactArrayLength()
if err != nil {
return err
}
if topicCount > 0 {
r.blocks = make(map[string][]int32)
for i := 0; i < topicCount; i++ {
topic, err := pd.getCompactString()
if err != nil {
return err
}
partitionCount, err := pd.getCompactArrayLength()
if err != nil {
return err
}
r.blocks[topic] = make([]int32, partitionCount)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
r.blocks[topic][j] = partition
}
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
}
}
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
return
}
func (r *ListPartitionReassignmentsRequest) key() int16 {
return 46
}
func (r *ListPartitionReassignmentsRequest) version() int16 {
return r.Version
}
func (r *ListPartitionReassignmentsRequest) headerVersion() int16 {
return 2
}
func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
return V2_4_0_0
}
func (r *ListPartitionReassignmentsRequest) AddBlock(topic string, partitionIDs []int32) {
if r.blocks == nil {
r.blocks = make(map[string][]int32)
}
if r.blocks[topic] == nil {
r.blocks[topic] = partitionIDs
}
}

View File

@ -0,0 +1,169 @@
package sarama
type PartitionReplicaReassignmentsStatus struct {
Replicas []int32
AddingReplicas []int32
RemovingReplicas []int32
}
func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error {
if err := pe.putCompactInt32Array(b.Replicas); err != nil {
return err
}
if err := pe.putCompactInt32Array(b.AddingReplicas); err != nil {
return err
}
if err := pe.putCompactInt32Array(b.RemovingReplicas); err != nil {
return err
}
pe.putEmptyTaggedFieldArray()
return nil
}
func (b *PartitionReplicaReassignmentsStatus) decode(pd packetDecoder) (err error) {
if b.Replicas, err = pd.getCompactInt32Array(); err != nil {
return err
}
if b.AddingReplicas, err = pd.getCompactInt32Array(); err != nil {
return err
}
if b.RemovingReplicas, err = pd.getCompactInt32Array(); err != nil {
return err
}
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
return err
}
type ListPartitionReassignmentsResponse struct {
Version int16
ThrottleTimeMs int32
ErrorCode KError
ErrorMessage *string
TopicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus
}
func (r *ListPartitionReassignmentsResponse) AddBlock(topic string, partition int32, replicas, addingReplicas, removingReplicas []int32) {
if r.TopicStatus == nil {
r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus)
}
partitions := r.TopicStatus[topic]
if partitions == nil {
partitions = make(map[int32]*PartitionReplicaReassignmentsStatus)
r.TopicStatus[topic] = partitions
}
partitions[partition] = &PartitionReplicaReassignmentsStatus{Replicas: replicas, AddingReplicas: addingReplicas, RemovingReplicas: removingReplicas}
}
func (r *ListPartitionReassignmentsResponse) encode(pe packetEncoder) error {
pe.putInt32(r.ThrottleTimeMs)
pe.putInt16(int16(r.ErrorCode))
if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
return err
}
pe.putCompactArrayLength(len(r.TopicStatus))
for topic, partitions := range r.TopicStatus {
if err := pe.putCompactString(topic); err != nil {
return err
}
pe.putCompactArrayLength(len(partitions))
for partition, block := range partitions {
pe.putInt32(partition)
if err := block.encode(pe); err != nil {
return err
}
}
pe.putEmptyTaggedFieldArray()
}
pe.putEmptyTaggedFieldArray()
return nil
}
func (r *ListPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
return err
}
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.ErrorCode = KError(kerr)
if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
return err
}
numTopics, err := pd.getCompactArrayLength()
if err != nil {
return err
}
r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus, numTopics)
for i := 0; i < numTopics; i++ {
topic, err := pd.getCompactString()
if err != nil {
return err
}
ongoingPartitionReassignments, err := pd.getCompactArrayLength()
if err != nil {
return err
}
r.TopicStatus[topic] = make(map[int32]*PartitionReplicaReassignmentsStatus, ongoingPartitionReassignments)
for j := 0; j < ongoingPartitionReassignments; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
block := &PartitionReplicaReassignmentsStatus{}
if err := block.decode(pd); err != nil {
return err
}
r.TopicStatus[topic][partition] = block
}
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
}
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
return nil
}
func (r *ListPartitionReassignmentsResponse) key() int16 {
return 46
}
func (r *ListPartitionReassignmentsResponse) version() int16 {
return r.Version
}
func (r *ListPartitionReassignmentsResponse) headerVersion() int16 {
return 1
}
func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
return V2_4_0_0
}

View File

@ -1,59 +1,77 @@
package sarama
import (
"bytes"
"compress/gzip"
"fmt"
"io/ioutil"
"time"
)
"github.com/eapache/go-xerial-snappy"
"github.com/pierrec/lz4"
const (
//CompressionNone no compression
CompressionNone CompressionCodec = iota
//CompressionGZIP compression using GZIP
CompressionGZIP
//CompressionSnappy compression using snappy
CompressionSnappy
//CompressionLZ4 compression using LZ4
CompressionLZ4
//CompressionZSTD compression using ZSTD
CompressionZSTD
// The lowest 3 bits contain the compression codec used for the message
compressionCodecMask int8 = 0x07
// Bit 3 set for "LogAppend" timestamps
timestampTypeMask = 0x08
// CompressionLevelDefault is the constant to use in CompressionLevel
// to have the default compression level for any codec. The value is picked
// that we don't use any existing compression levels.
CompressionLevelDefault = -1000
)
// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
type CompressionCodec int8
// only the last two bits are really used
const compressionCodecMask int8 = 0x03
const (
CompressionNone CompressionCodec = 0
CompressionGZIP CompressionCodec = 1
CompressionSnappy CompressionCodec = 2
CompressionLZ4 CompressionCodec = 3
)
func (cc CompressionCodec) String() string {
return []string{
"none",
"gzip",
"snappy",
"lz4",
"zstd",
}[int(cc)]
}
//Message is a kafka message type
type Message struct {
Codec CompressionCodec // codec used to compress the message contents
Key []byte // the message key, may be nil
Value []byte // the message contents
Set *MessageSet // the message set a message might wrap
Version int8 // v1 requires Kafka 0.10
Timestamp time.Time // the timestamp of the message (version 1+ only)
Codec CompressionCodec // codec used to compress the message contents
CompressionLevel int // compression level
LogAppendTime bool // the used timestamp is LogAppendTime
Key []byte // the message key, may be nil
Value []byte // the message contents
Set *MessageSet // the message set a message might wrap
Version int8 // v1 requires Kafka 0.10
Timestamp time.Time // the timestamp of the message (version 1+ only)
compressedCache []byte
compressedSize int // used for computing the compression ratio metrics
}
func (m *Message) encode(pe packetEncoder) error {
pe.push(&crc32Field{})
pe.push(newCRC32Field(crcIEEE))
pe.putInt8(m.Version)
attributes := int8(m.Codec) & compressionCodecMask
if m.LogAppendTime {
attributes |= timestampTypeMask
}
pe.putInt8(attributes)
if m.Version >= 1 {
timestamp := int64(-1)
if !m.Timestamp.Before(time.Unix(0, 0)) {
timestamp = m.Timestamp.UnixNano() / int64(time.Millisecond)
} else if !m.Timestamp.IsZero() {
return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", m.Timestamp)}
if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil {
return err
}
pe.putInt64(timestamp)
}
err := pe.putBytes(m.Key)
@ -67,39 +85,11 @@ func (m *Message) encode(pe packetEncoder) error {
payload = m.compressedCache
m.compressedCache = nil
} else if m.Value != nil {
switch m.Codec {
case CompressionNone:
payload = m.Value
case CompressionGZIP:
var buf bytes.Buffer
writer := gzip.NewWriter(&buf)
if _, err = writer.Write(m.Value); err != nil {
return err
}
if err = writer.Close(); err != nil {
return err
}
m.compressedCache = buf.Bytes()
payload = m.compressedCache
case CompressionSnappy:
tmp := snappy.Encode(m.Value)
m.compressedCache = tmp
payload = m.compressedCache
case CompressionLZ4:
var buf bytes.Buffer
writer := lz4.NewWriter(&buf)
if _, err = writer.Write(m.Value); err != nil {
return err
}
if err = writer.Close(); err != nil {
return err
}
m.compressedCache = buf.Bytes()
payload = m.compressedCache
default:
return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)}
payload, err = compress(m.Codec, m.CompressionLevel, m.Value)
if err != nil {
return err
}
m.compressedCache = payload
// Keep in mind the compressed payload size for metric gathering
m.compressedSize = len(payload)
}
@ -112,7 +102,10 @@ func (m *Message) encode(pe packetEncoder) error {
}
func (m *Message) decode(pd packetDecoder) (err error) {
err = pd.push(&crc32Field{})
crc32Decoder := acquireCrc32Field(crcIEEE)
defer releaseCrc32Field(crc32Decoder)
err = pd.push(crc32Decoder)
if err != nil {
return err
}
@ -131,21 +124,12 @@ func (m *Message) decode(pd packetDecoder) (err error) {
return err
}
m.Codec = CompressionCodec(attribute & compressionCodecMask)
m.LogAppendTime = attribute&timestampTypeMask == timestampTypeMask
if m.Version == 1 {
millis, err := pd.getInt64()
if err != nil {
if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil {
return err
}
// negative timestamps are invalid, in these cases we should return
// a zero time
timestamp := time.Time{}
if millis >= 0 {
timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
}
m.Timestamp = timestamp
}
m.Key, err = pd.getBytes()
@ -165,50 +149,24 @@ func (m *Message) decode(pd packetDecoder) (err error) {
switch m.Codec {
case CompressionNone:
// nothing to do
case CompressionGZIP:
default:
if m.Value == nil {
break
}
reader, err := gzip.NewReader(bytes.NewReader(m.Value))
m.Value, err = decompress(m.Codec, m.Value)
if err != nil {
return err
}
if m.Value, err = ioutil.ReadAll(reader); err != nil {
return err
}
if err := m.decodeSet(); err != nil {
return err
}
case CompressionSnappy:
if m.Value == nil {
break
}
if m.Value, err = snappy.Decode(m.Value); err != nil {
return err
}
if err := m.decodeSet(); err != nil {
return err
}
case CompressionLZ4:
if m.Value == nil {
break
}
reader := lz4.NewReader(bytes.NewReader(m.Value))
if m.Value, err = ioutil.ReadAll(reader); err != nil {
return err
}
if err := m.decodeSet(); err != nil {
return err
}
default:
return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)}
}
return pd.pop()
}
// decodes a message set from a previousy encoded bulk-message
// decodes a message set from a previously encoded bulk-message
func (m *Message) decodeSet() (err error) {
pd := realDecoder{raw: m.Value}
m.Set = &MessageSet{}

View File

@ -29,7 +29,10 @@ func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
return err
}
if err = pd.push(&lengthField{}); err != nil {
lengthDecoder := acquireLengthField()
defer releaseLengthField(lengthDecoder)
if err = pd.push(lengthDecoder); err != nil {
return err
}
@ -47,6 +50,7 @@ func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
type MessageSet struct {
PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
OverflowMessage bool // whether the set on the wire contained an overflow message
Messages []*MessageBlock
}
@ -64,6 +68,19 @@ func (ms *MessageSet) decode(pd packetDecoder) (err error) {
ms.Messages = nil
for pd.remaining() > 0 {
magic, err := magicValue(pd)
if err != nil {
if err == ErrInsufficientData {
ms.PartialTrailingMessage = true
return nil
}
return err
}
if magic > 1 {
return nil
}
msb := new(MessageBlock)
err = msb.decode(pd)
switch err {
@ -72,7 +89,12 @@ func (ms *MessageSet) decode(pd packetDecoder) (err error) {
case ErrInsufficientData:
// As an optimization the server is allowed to return a partial message at the
// end of the message set. Clients should handle this case. So we just ignore such things.
ms.PartialTrailingMessage = true
if msb.Offset == -1 {
// This is an overflow message caused by chunked down conversion
ms.OverflowMessage = true
} else {
ms.PartialTrailingMessage = true
}
return nil
default:
return err

View File

@ -1,40 +1,58 @@
package sarama
type MetadataRequest struct {
Topics []string
Version int16
Topics []string
AllowAutoTopicCreation bool
}
func (r *MetadataRequest) encode(pe packetEncoder) error {
err := pe.putArrayLength(len(r.Topics))
if err != nil {
return err
if r.Version < 0 || r.Version > 5 {
return PacketEncodingError{"invalid or unsupported MetadataRequest version field"}
}
for i := range r.Topics {
err = pe.putString(r.Topics[i])
if r.Version == 0 || len(r.Topics) > 0 {
err := pe.putArrayLength(len(r.Topics))
if err != nil {
return err
}
for i := range r.Topics {
err = pe.putString(r.Topics[i])
if err != nil {
return err
}
}
} else {
pe.putInt32(-1)
}
if r.Version > 3 {
pe.putBool(r.AllowAutoTopicCreation)
}
return nil
}
func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
topicCount, err := pd.getArrayLength()
r.Version = version
size, err := pd.getInt32()
if err != nil {
return err
}
if topicCount == 0 {
return nil
if size > 0 {
r.Topics = make([]string, size)
for i := range r.Topics {
topic, err := pd.getString()
if err != nil {
return err
}
r.Topics[i] = topic
}
}
r.Topics = make([]string, topicCount)
for i := range r.Topics {
topic, err := pd.getString()
if r.Version > 3 {
autoCreation, err := pd.getBool()
if err != nil {
return err
}
r.Topics[i] = topic
r.AllowAutoTopicCreation = autoCreation
}
return nil
}
@ -44,9 +62,24 @@ func (r *MetadataRequest) key() int16 {
}
func (r *MetadataRequest) version() int16 {
return 0
return r.Version
}
func (r *MetadataRequest) headerVersion() int16 {
return 1
}
func (r *MetadataRequest) requiredVersion() KafkaVersion {
return minVersion
switch r.Version {
case 1:
return V0_10_0_0
case 2:
return V0_10_1_0
case 3, 4:
return V0_11_0_0
case 5:
return V1_0_0_0
default:
return MinVersion
}
}

View File

@ -1,14 +1,15 @@
package sarama
type PartitionMetadata struct {
Err KError
ID int32
Leader int32
Replicas []int32
Isr []int32
Err KError
ID int32
Leader int32
Replicas []int32
Isr []int32
OfflineReplicas []int32
}
func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) {
func (pm *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
@ -35,10 +36,17 @@ func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) {
return err
}
if version >= 5 {
pm.OfflineReplicas, err = pd.getInt32Array()
if err != nil {
return err
}
}
return nil
}
func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) {
func (pm *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) {
pe.putInt16(int16(pm.Err))
pe.putInt32(pm.ID)
pe.putInt32(pm.Leader)
@ -53,16 +61,24 @@ func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) {
return err
}
if version >= 5 {
err = pe.putInt32Array(pm.OfflineReplicas)
if err != nil {
return err
}
}
return nil
}
type TopicMetadata struct {
Err KError
Name string
IsInternal bool // Only valid for Version >= 1
Partitions []*PartitionMetadata
}
func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
func (tm *TopicMetadata) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
@ -74,6 +90,13 @@ func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
return err
}
if version >= 1 {
tm.IsInternal, err = pd.getBool()
if err != nil {
return err
}
}
n, err := pd.getArrayLength()
if err != nil {
return err
@ -81,7 +104,7 @@ func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
tm.Partitions = make([]*PartitionMetadata, n)
for i := 0; i < n; i++ {
tm.Partitions[i] = new(PartitionMetadata)
err = tm.Partitions[i].decode(pd)
err = tm.Partitions[i].decode(pd, version)
if err != nil {
return err
}
@ -90,7 +113,7 @@ func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
return nil
}
func (tm *TopicMetadata) encode(pe packetEncoder) (err error) {
func (tm *TopicMetadata) encode(pe packetEncoder, version int16) (err error) {
pe.putInt16(int16(tm.Err))
err = pe.putString(tm.Name)
@ -98,13 +121,17 @@ func (tm *TopicMetadata) encode(pe packetEncoder) (err error) {
return err
}
if version >= 1 {
pe.putBool(tm.IsInternal)
}
err = pe.putArrayLength(len(tm.Partitions))
if err != nil {
return err
}
for _, pm := range tm.Partitions {
err = pm.encode(pe)
err = pm.encode(pe, version)
if err != nil {
return err
}
@ -114,11 +141,24 @@ func (tm *TopicMetadata) encode(pe packetEncoder) (err error) {
}
type MetadataResponse struct {
Brokers []*Broker
Topics []*TopicMetadata
Version int16
ThrottleTimeMs int32
Brokers []*Broker
ClusterID *string
ControllerID int32
Topics []*TopicMetadata
}
func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if version >= 3 {
r.ThrottleTimeMs, err = pd.getInt32()
if err != nil {
return err
}
}
n, err := pd.getArrayLength()
if err != nil {
return err
@ -127,12 +167,28 @@ func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
r.Brokers = make([]*Broker, n)
for i := 0; i < n; i++ {
r.Brokers[i] = new(Broker)
err = r.Brokers[i].decode(pd)
err = r.Brokers[i].decode(pd, version)
if err != nil {
return err
}
}
if version >= 2 {
r.ClusterID, err = pd.getNullableString()
if err != nil {
return err
}
}
if version >= 1 {
r.ControllerID, err = pd.getInt32()
if err != nil {
return err
}
} else {
r.ControllerID = -1
}
n, err = pd.getArrayLength()
if err != nil {
return err
@ -141,7 +197,7 @@ func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
r.Topics = make([]*TopicMetadata, n)
for i := 0; i < n; i++ {
r.Topics[i] = new(TopicMetadata)
err = r.Topics[i].decode(pd)
err = r.Topics[i].decode(pd, version)
if err != nil {
return err
}
@ -151,23 +207,38 @@ func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
}
func (r *MetadataResponse) encode(pe packetEncoder) error {
if r.Version >= 3 {
pe.putInt32(r.ThrottleTimeMs)
}
err := pe.putArrayLength(len(r.Brokers))
if err != nil {
return err
}
for _, broker := range r.Brokers {
err = broker.encode(pe)
err = broker.encode(pe, r.Version)
if err != nil {
return err
}
}
if r.Version >= 2 {
err := pe.putNullableString(r.ClusterID)
if err != nil {
return err
}
}
if r.Version >= 1 {
pe.putInt32(r.ControllerID)
}
err = pe.putArrayLength(len(r.Topics))
if err != nil {
return err
}
for _, tm := range r.Topics {
err = tm.encode(pe)
err = tm.encode(pe, r.Version)
if err != nil {
return err
}
@ -181,11 +252,26 @@ func (r *MetadataResponse) key() int16 {
}
func (r *MetadataResponse) version() int16 {
return r.Version
}
func (r *MetadataResponse) headerVersion() int16 {
return 0
}
func (r *MetadataResponse) requiredVersion() KafkaVersion {
return minVersion
switch r.Version {
case 1:
return V0_10_0_0
case 2:
return V0_10_1_0
case 3, 4:
return V0_11_0_0
case 5:
return V1_0_0_0
default:
return MinVersion
}
}
// testing API
@ -214,7 +300,7 @@ foundTopic:
return tmatch
}
func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) {
func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) {
tmatch := r.AddTopic(topic, ErrNoError)
var pmatch *PartitionMetadata
@ -234,6 +320,6 @@ foundPartition:
pmatch.Leader = brokerID
pmatch.Replicas = replicas
pmatch.Isr = isr
pmatch.OfflineReplicas = offline
pmatch.Err = err
}

View File

@ -28,14 +28,6 @@ func getMetricNameForBroker(name string, broker *Broker) string {
return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
}
func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter {
return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r)
}
func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram {
return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r)
}
func getMetricNameForTopic(name string, topic string) string {
// Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
// cf. KAFKA-1902 and KAFKA-2337

View File

@ -18,7 +18,9 @@ const (
expectationTimeout = 500 * time.Millisecond
)
type requestHandlerFunc func(req *request) (res encoder)
type GSSApiHandlerFunc func([]byte) []byte
type requestHandlerFunc func(req *request) (res encoderWithHeader)
// RequestNotifierFunc is invoked when a mock broker processes a request successfully
// and will provides the number of bytes read and written.
@ -49,18 +51,19 @@ type RequestNotifierFunc func(bytesRead, bytesWritten int)
// It is not necessary to prefix message length or correlation ID to your
// response bytes, the server does that automatically as a convenience.
type MockBroker struct {
brokerID int32
port int32
closing chan none
stopper chan none
expectations chan encoder
listener net.Listener
t TestReporter
latency time.Duration
handler requestHandlerFunc
notifier RequestNotifierFunc
history []RequestResponse
lock sync.Mutex
brokerID int32
port int32
closing chan none
stopper chan none
expectations chan encoderWithHeader
listener net.Listener
t TestReporter
latency time.Duration
handler requestHandlerFunc
notifier RequestNotifierFunc
history []RequestResponse
lock sync.Mutex
gssApiHandler GSSApiHandlerFunc
}
// RequestResponse represents a Request/Response pair processed by MockBroker.
@ -80,7 +83,7 @@ func (b *MockBroker) SetLatency(latency time.Duration) {
// and uses the found MockResponse instance to generate an appropriate reply.
// If the request type is not found in the map then nothing is sent.
func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
b.setHandler(func(req *request) (res encoder) {
b.setHandler(func(req *request) (res encoderWithHeader) {
reqTypeName := reflect.TypeOf(req.body).Elem().Name()
mockResponse := handlerMap[reqTypeName]
if mockResponse == nil {
@ -173,7 +176,44 @@ func (b *MockBroker) serverLoop() {
Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
}
func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
func (b *MockBroker) SetGSSAPIHandler(handler GSSApiHandlerFunc) {
b.gssApiHandler = handler
}
func (b *MockBroker) readToBytes(r io.Reader) ([]byte, error) {
var (
bytesRead int
lengthBytes = make([]byte, 4)
)
if _, err := io.ReadFull(r, lengthBytes); err != nil {
return nil, err
}
bytesRead += len(lengthBytes)
length := int32(binary.BigEndian.Uint32(lengthBytes))
if length <= 4 || length > MaxRequestSize {
return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
}
encodedReq := make([]byte, length)
if _, err := io.ReadFull(r, encodedReq); err != nil {
return nil, err
}
bytesRead += len(encodedReq)
fullBytes := append(lengthBytes, encodedReq...)
return fullBytes, nil
}
func (b *MockBroker) isGSSAPI(buffer []byte) bool {
return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04})
}
func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.WaitGroup) {
defer wg.Done()
defer func() {
_ = conn.Close()
@ -191,65 +231,110 @@ func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup)
}
}()
resHeader := make([]byte, 8)
var bytesWritten int
var bytesRead int
for {
req, bytesRead, err := decodeRequest(conn)
buffer, err := b.readToBytes(conn)
if err != nil {
Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer))
b.serverError(err)
break
}
if b.latency > 0 {
time.Sleep(b.latency)
}
b.lock.Lock()
res := b.handler(req)
b.history = append(b.history, RequestResponse{req.body, res})
b.lock.Unlock()
if res == nil {
Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
continue
}
Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
encodedRes, err := encode(res, nil)
if err != nil {
b.serverError(err)
break
}
if len(encodedRes) == 0 {
b.lock.Lock()
if b.notifier != nil {
b.notifier(bytesRead, 0)
bytesWritten = 0
if !b.isGSSAPI(buffer) {
req, br, err := decodeRequest(bytes.NewReader(buffer))
bytesRead = br
if err != nil {
Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
b.serverError(err)
break
}
b.lock.Unlock()
continue
}
binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
if _, err = conn.Write(resHeader); err != nil {
b.serverError(err)
break
}
if _, err = conn.Write(encodedRes); err != nil {
b.serverError(err)
break
if b.latency > 0 {
time.Sleep(b.latency)
}
b.lock.Lock()
res := b.handler(req)
b.history = append(b.history, RequestResponse{req.body, res})
b.lock.Unlock()
if res == nil {
Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
continue
}
Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
encodedRes, err := encode(res, nil)
if err != nil {
b.serverError(err)
break
}
if len(encodedRes) == 0 {
b.lock.Lock()
if b.notifier != nil {
b.notifier(bytesRead, 0)
}
b.lock.Unlock()
continue
}
resHeader := b.encodeHeader(res.headerVersion(), req.correlationID, uint32(len(encodedRes)))
if _, err = conn.Write(resHeader); err != nil {
b.serverError(err)
break
}
if _, err = conn.Write(encodedRes); err != nil {
b.serverError(err)
break
}
bytesWritten = len(resHeader) + len(encodedRes)
} else {
// GSSAPI is not part of kafka protocol, but is supported for authentication proposes.
// Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism
b.lock.Lock()
res := b.gssApiHandler(buffer)
b.lock.Unlock()
if res == nil {
Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(buffer))
continue
}
if _, err = conn.Write(res); err != nil {
b.serverError(err)
break
}
bytesWritten = len(res)
}
b.lock.Lock()
if b.notifier != nil {
b.notifier(bytesRead, len(resHeader)+len(encodedRes))
b.notifier(bytesRead, bytesWritten)
}
b.lock.Unlock()
}
Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
}
func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) {
func (b *MockBroker) encodeHeader(headerVersion int16, correlationId int32, payloadLength uint32) []byte {
headerLength := uint32(8)
if headerVersion >= 1 {
headerLength = 9
}
resHeader := make([]byte, headerLength)
binary.BigEndian.PutUint32(resHeader, payloadLength+headerLength-4)
binary.BigEndian.PutUint32(resHeader[4:], uint32(correlationId))
if headerVersion >= 1 {
binary.PutUvarint(resHeader[8:], 0)
}
return resHeader
}
func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) {
select {
case res, ok := <-b.expectations:
if !ok {
@ -288,6 +373,15 @@ func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
// it rather than just some ephemeral port.
func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
listener, err := net.Listen("tcp", addr)
if err != nil {
t.Fatal(err)
}
return NewMockBrokerListener(t, brokerID, listener)
}
// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified.
func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker {
var err error
broker := &MockBroker{
@ -295,14 +389,11 @@ func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker
stopper: make(chan none),
t: t,
brokerID: brokerID,
expectations: make(chan encoder, 512),
expectations: make(chan encoderWithHeader, 512),
listener: listener,
}
broker.handler = broker.defaultRequestHandler
broker.listener, err = net.Listen("tcp", addr)
if err != nil {
t.Fatal(err)
}
Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
_, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
if err != nil {
@ -319,6 +410,6 @@ func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker
return broker
}
func (b *MockBroker) Returns(e encoder) {
func (b *MockBroker) Returns(e encoderWithHeader) {
b.expectations <- e
}

123
vendor/github.com/Shopify/sarama/mockkerberos.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
package sarama
import (
"encoding/binary"
"encoding/hex"
"gopkg.in/jcmturner/gokrb5.v7/credentials"
"gopkg.in/jcmturner/gokrb5.v7/gssapi"
"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
"gopkg.in/jcmturner/gokrb5.v7/messages"
"gopkg.in/jcmturner/gokrb5.v7/types"
)
type KafkaGSSAPIHandler struct {
client *MockKerberosClient
badResponse bool
badKeyChecksum bool
}
func (h *KafkaGSSAPIHandler) MockKafkaGSSAPI(buffer []byte) []byte {
// Default payload used for verify
err := h.client.Login() // Mock client construct keys when login
if err != nil {
return nil
}
if h.badResponse { // Returns trash
return []byte{0x00, 0x00, 0x00, 0x01, 0xAD}
}
var pack = gssapi.WrapToken{
Flags: KRB5_USER_AUTH,
EC: 12,
RRC: 0,
SndSeqNum: 3398292281,
Payload: []byte{0x11, 0x00}, // 1100
}
// Compute checksum
if h.badKeyChecksum {
pack.CheckSum = []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
} else {
err = pack.SetCheckSum(h.client.ASRep.DecryptedEncPart.Key, keyusage.GSSAPI_ACCEPTOR_SEAL)
if err != nil {
return nil
}
}
packBytes, err := pack.Marshal()
if err != nil {
return nil
}
lenBytes := len(packBytes)
response := make([]byte, lenBytes+4)
copy(response[4:], packBytes)
binary.BigEndian.PutUint32(response, uint32(lenBytes))
return response
}
type MockKerberosClient struct {
asRepBytes string
ASRep messages.ASRep
credentials *credentials.Credentials
mockError error
errorStage string
}
func (c *MockKerberosClient) Login() error {
if c.errorStage == "login" && c.mockError != nil {
return c.mockError
}
c.asRepBytes = "6b8202e9308202e5a003020105a10302010ba22b30293027a103020113a220041e301c301aa003020112a1131b114" +
"558414d504c452e434f4d636c69656e74a30d1b0b4558414d504c452e434f4da4133011a003020101a10a30081b06636c69656e7" +
"4a5820156618201523082014ea003020105a10d1b0b4558414d504c452e434f4da220301ea003020102a11730151b066b7262746" +
"7741b0b4558414d504c452e434f4da382011430820110a003020112a103020101a28201020481ffdb9891175d106818e61008c51" +
"d0b3462bca92f3bf9d4cfa82de4c4d7aff9994ec87c573e3a3d54dcb2bb79618c76f2bf4a3d006f90d5bdbd049bc18f48be39203" +
"549ca02acaf63f292b12404f9b74c34b83687119d8f56552ccc0c50ebee2a53bb114c1b4619bb1d5d31f0f49b4d40a08a9b4c046" +
"2e1398d0b648be1c0e50c552ad16e1d8d8e74263dd0bf0ec591e4797dfd40a9a1be4ae830d03a306e053fd7586fef84ffc5e4a83" +
"7c3122bf3e6a40fe87e84019f6283634461b955712b44a5f7386c278bff94ec2c2dc0403247e29c2450e853471ceababf9b8911f" +
"997f2e3010b046d2c49eb438afb0f4c210821e80d4ffa4c9521eb895dcd68610b3feaa682012c30820128a003020112a282011f0" +
"482011bce73cbce3f1dd17661c412005f0f2257c756fe8e98ff97e6ec24b7bab66e5fd3a3827aeeae4757af0c6e892948122d8b2" +
"03c8df48df0ef5d142d0e416d688f11daa0fcd63d96bdd431d02b8e951c664eeff286a2be62383d274a04016d5f0e141da58cb86" +
"331de64063062f4f885e8e9ce5b181ca2fdc67897c5995e0ae1ae0c171a64493ff7bd91bc6d89cd4fce1e2b3ea0a10e34b0d5eda" +
"aa38ee727b50c5632ed1d2f2b457908e616178d0d80b72af209fb8ac9dbaa1768fa45931392b36b6d8c12400f8ded2efaa0654d0" +
"da1db966e8b5aab4706c800f95d559664646041fdb38b411c62fc0fbe0d25083a28562b0e1c8df16e62e9d5626b0addee489835f" +
"eedb0f26c05baa596b69b17f47920aa64b29dc77cfcc97ba47885"
apRepBytes, err := hex.DecodeString(c.asRepBytes)
if err != nil {
return err
}
err = c.ASRep.Unmarshal(apRepBytes)
if err != nil {
return err
}
c.credentials = credentials.New("client", "EXAMPLE.COM").WithPassword("qwerty")
_, err = c.ASRep.DecryptEncPart(c.credentials)
if err != nil {
return err
}
return nil
}
func (c *MockKerberosClient) GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) {
if c.errorStage == "service_ticket" && c.mockError != nil {
return messages.Ticket{}, types.EncryptionKey{}, c.mockError
}
return c.ASRep.Ticket, c.ASRep.DecryptedEncPart.Key, nil
}
func (c *MockKerberosClient) Domain() string {
return "EXAMPLE.COM"
}
func (c *MockKerberosClient) CName() types.PrincipalName {
var p = types.PrincipalName{
NameType: KRB5_USER_AUTH,
NameString: []string{
"kafka",
"kafka",
},
}
return p
}
func (c *MockKerberosClient) Destroy() {
// Do nothing.
}

View File

@ -2,6 +2,7 @@ package sarama
import (
"fmt"
"strings"
)
// TestReporter has methods matching go's testing.T to avoid importing
@ -17,20 +18,20 @@ type TestReporter interface {
// allows generating a response based on a request body. MockResponses are used
// to program behavior of MockBroker in tests.
type MockResponse interface {
For(reqBody versionedDecoder) (res encoder)
For(reqBody versionedDecoder) (res encoderWithHeader)
}
// MockWrapper is a mock response builder that returns a particular concrete
// response regardless of the actual request passed to the `For` method.
type MockWrapper struct {
res encoder
res encoderWithHeader
}
func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) {
func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoderWithHeader) {
return mw.res
}
func NewMockWrapper(res encoder) *MockWrapper {
func NewMockWrapper(res encoderWithHeader) *MockWrapper {
return &MockWrapper{res: res}
}
@ -49,7 +50,7 @@ func NewMockSequence(responses ...interface{}) *MockSequence {
switch res := res.(type) {
case MockResponse:
ms.responses[i] = res
case encoder:
case encoderWithHeader:
ms.responses[i] = NewMockWrapper(res)
default:
panic(fmt.Sprintf("Unexpected response type: %T", res))
@ -58,7 +59,7 @@ func NewMockSequence(responses ...interface{}) *MockSequence {
return ms
}
func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
func (mc *MockSequence) For(reqBody versionedDecoder) (res encoderWithHeader) {
res = mc.responses[0].For(reqBody)
if len(mc.responses) > 1 {
mc.responses = mc.responses[1:]
@ -66,11 +67,75 @@ func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
return res
}
type MockListGroupsResponse struct {
groups map[string]string
t TestReporter
}
func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse {
return &MockListGroupsResponse{
groups: make(map[string]string),
t: t,
}
}
func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
request := reqBody.(*ListGroupsRequest)
_ = request
response := &ListGroupsResponse{
Groups: m.groups,
}
return response
}
func (m *MockListGroupsResponse) AddGroup(groupID, protocolType string) *MockListGroupsResponse {
m.groups[groupID] = protocolType
return m
}
type MockDescribeGroupsResponse struct {
groups map[string]*GroupDescription
t TestReporter
}
func NewMockDescribeGroupsResponse(t TestReporter) *MockDescribeGroupsResponse {
return &MockDescribeGroupsResponse{
t: t,
groups: make(map[string]*GroupDescription),
}
}
func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, description *GroupDescription) *MockDescribeGroupsResponse {
m.groups[groupID] = description
return m
}
func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
request := reqBody.(*DescribeGroupsRequest)
response := &DescribeGroupsResponse{}
for _, requestedGroup := range request.Groups {
if group, ok := m.groups[requestedGroup]; ok {
response.Groups = append(response.Groups, group)
} else {
// Mimic real kafka - if a group doesn't exist, return
// an entry with state "Dead"
response.Groups = append(response.Groups, &GroupDescription{
GroupId: requestedGroup,
State: "Dead",
})
}
}
return response
}
// MockMetadataResponse is a `MetadataResponse` builder.
type MockMetadataResponse struct {
leaders map[string]map[int32]int32
brokers map[string]int32
t TestReporter
controllerID int32
leaders map[string]map[int32]int32
brokers map[string]int32
t TestReporter
}
func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
@ -96,23 +161,39 @@ func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMet
return mmr
}
func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResponse {
mmr.controllerID = brokerID
return mmr
}
func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
metadataRequest := reqBody.(*MetadataRequest)
metadataResponse := &MetadataResponse{}
metadataResponse := &MetadataResponse{
Version: metadataRequest.version(),
ControllerID: mmr.controllerID,
}
for addr, brokerID := range mmr.brokers {
metadataResponse.AddBroker(addr, brokerID)
}
// Generate set of replicas
replicas := []int32{}
offlineReplicas := []int32{}
for _, brokerID := range mmr.brokers {
replicas = append(replicas, brokerID)
}
if len(metadataRequest.Topics) == 0 {
for topic, partitions := range mmr.leaders {
for partition, brokerID := range partitions {
metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
}
}
return metadataResponse
}
for _, topic := range metadataRequest.Topics {
for partition, brokerID := range mmr.leaders[topic] {
metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
}
}
return metadataResponse
@ -122,6 +203,7 @@ func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
type MockOffsetResponse struct {
offsets map[string]map[int32]map[int64]int64
t TestReporter
version int16
}
func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
@ -131,6 +213,11 @@ func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
}
}
func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse {
mor.version = version
return mor
}
func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
partitions := mor.offsets[topic]
if partitions == nil {
@ -146,9 +233,9 @@ func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, of
return mor
}
func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader {
offsetRequest := reqBody.(*OffsetRequest)
offsetResponse := &OffsetResponse{}
offsetResponse := &OffsetResponse{Version: mor.version}
for topic, partitions := range offsetRequest.blocks {
for partition, block := range partitions {
offset := mor.getOffset(topic, partition, block.time)
@ -222,7 +309,7 @@ func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, of
return mfr
}
func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
fetchRequest := reqBody.(*FetchRequest)
res := &FetchResponse{
Version: mfr.version,
@ -306,7 +393,7 @@ func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *M
return mr
}
func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
req := reqBody.(*ConsumerMetadataRequest)
group := req.ConsumerGroup
res := &ConsumerMetadataResponse{}
@ -320,6 +407,60 @@ func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
return res
}
// MockFindCoordinatorResponse is a `FindCoordinatorResponse` builder.
type MockFindCoordinatorResponse struct {
groupCoordinators map[string]interface{}
transCoordinators map[string]interface{}
t TestReporter
}
func NewMockFindCoordinatorResponse(t TestReporter) *MockFindCoordinatorResponse {
return &MockFindCoordinatorResponse{
groupCoordinators: make(map[string]interface{}),
transCoordinators: make(map[string]interface{}),
t: t,
}
}
func (mr *MockFindCoordinatorResponse) SetCoordinator(coordinatorType CoordinatorType, group string, broker *MockBroker) *MockFindCoordinatorResponse {
switch coordinatorType {
case CoordinatorGroup:
mr.groupCoordinators[group] = broker
case CoordinatorTransaction:
mr.transCoordinators[group] = broker
}
return mr
}
func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, group string, kerror KError) *MockFindCoordinatorResponse {
switch coordinatorType {
case CoordinatorGroup:
mr.groupCoordinators[group] = kerror
case CoordinatorTransaction:
mr.transCoordinators[group] = kerror
}
return mr
}
func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader {
req := reqBody.(*FindCoordinatorRequest)
res := &FindCoordinatorResponse{}
var v interface{}
switch req.CoordinatorType {
case CoordinatorGroup:
v = mr.groupCoordinators[req.CoordinatorKey]
case CoordinatorTransaction:
v = mr.transCoordinators[req.CoordinatorKey]
}
switch v := v.(type) {
case *MockBroker:
res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
case KError:
res.Err = v
}
return res
}
// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
type MockOffsetCommitResponse struct {
errors map[string]map[string]map[int32]KError
@ -348,7 +489,7 @@ func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int3
return mr
}
func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder {
func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoderWithHeader {
req := reqBody.(*OffsetCommitRequest)
group := req.ConsumerGroup
res := &OffsetCommitResponse{}
@ -378,14 +519,20 @@ func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int3
// MockProduceResponse is a `ProduceResponse` builder.
type MockProduceResponse struct {
errors map[string]map[int32]KError
t TestReporter
version int16
errors map[string]map[int32]KError
t TestReporter
}
func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
return &MockProduceResponse{t: t}
}
func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse {
mr.version = version
return mr
}
func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
if mr.errors == nil {
mr.errors = make(map[string]map[int32]KError)
@ -399,10 +546,12 @@ func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KE
return mr
}
func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoderWithHeader {
req := reqBody.(*ProduceRequest)
res := &ProduceResponse{}
for topic, partitions := range req.msgSets {
res := &ProduceResponse{
Version: mr.version,
}
for topic, partitions := range req.records {
for partition := range partitions {
res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
}
@ -425,6 +574,7 @@ func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
type MockOffsetFetchResponse struct {
offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
error KError
t TestReporter
}
@ -446,18 +596,526 @@ func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int3
partitions = make(map[int32]*OffsetFetchResponseBlock)
topics[topic] = partitions
}
partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror}
partitions[partition] = &OffsetFetchResponseBlock{offset, 0, metadata, kerror}
return mr
}
func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder {
func (mr *MockOffsetFetchResponse) SetError(kerror KError) *MockOffsetFetchResponse {
mr.error = kerror
return mr
}
func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
req := reqBody.(*OffsetFetchRequest)
group := req.ConsumerGroup
res := &OffsetFetchResponse{}
res := &OffsetFetchResponse{Version: req.Version}
for topic, partitions := range mr.offsets[group] {
for partition, block := range partitions {
res.AddBlock(topic, partition, block)
}
}
if res.Version >= 2 {
res.Err = mr.error
}
return res
}
type MockCreateTopicsResponse struct {
t TestReporter
}
func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse {
return &MockCreateTopicsResponse{t: t}
}
func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
req := reqBody.(*CreateTopicsRequest)
res := &CreateTopicsResponse{
Version: req.Version,
}
res.TopicErrors = make(map[string]*TopicError)
for topic := range req.TopicDetails {
if res.Version >= 1 && strings.HasPrefix(topic, "_") {
msg := "insufficient permissions to create topic with reserved prefix"
res.TopicErrors[topic] = &TopicError{
Err: ErrTopicAuthorizationFailed,
ErrMsg: &msg,
}
continue
}
res.TopicErrors[topic] = &TopicError{Err: ErrNoError}
}
return res
}
type MockDeleteTopicsResponse struct {
t TestReporter
}
func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse {
return &MockDeleteTopicsResponse{t: t}
}
func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
req := reqBody.(*DeleteTopicsRequest)
res := &DeleteTopicsResponse{}
res.TopicErrorCodes = make(map[string]KError)
for _, topic := range req.Topics {
res.TopicErrorCodes[topic] = ErrNoError
}
res.Version = req.Version
return res
}
type MockCreatePartitionsResponse struct {
t TestReporter
}
func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsResponse {
return &MockCreatePartitionsResponse{t: t}
}
func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoderWithHeader {
req := reqBody.(*CreatePartitionsRequest)
res := &CreatePartitionsResponse{}
res.TopicPartitionErrors = make(map[string]*TopicPartitionError)
for topic := range req.TopicPartitions {
if strings.HasPrefix(topic, "_") {
msg := "insufficient permissions to create partition on topic with reserved prefix"
res.TopicPartitionErrors[topic] = &TopicPartitionError{
Err: ErrTopicAuthorizationFailed,
ErrMsg: &msg,
}
continue
}
res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError}
}
return res
}
type MockAlterPartitionReassignmentsResponse struct {
t TestReporter
}
func NewMockAlterPartitionReassignmentsResponse(t TestReporter) *MockAlterPartitionReassignmentsResponse {
return &MockAlterPartitionReassignmentsResponse{t: t}
}
func (mr *MockAlterPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
req := reqBody.(*AlterPartitionReassignmentsRequest)
_ = req
res := &AlterPartitionReassignmentsResponse{}
return res
}
type MockListPartitionReassignmentsResponse struct {
t TestReporter
}
func NewMockListPartitionReassignmentsResponse(t TestReporter) *MockListPartitionReassignmentsResponse {
return &MockListPartitionReassignmentsResponse{t: t}
}
func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
req := reqBody.(*ListPartitionReassignmentsRequest)
_ = req
res := &ListPartitionReassignmentsResponse{}
for topic, partitions := range req.blocks {
for _, partition := range partitions {
res.AddBlock(topic, partition, []int32{0}, []int32{1}, []int32{2})
}
}
return res
}
type MockDeleteRecordsResponse struct {
t TestReporter
}
func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse {
return &MockDeleteRecordsResponse{t: t}
}
func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoderWithHeader {
req := reqBody.(*DeleteRecordsRequest)
res := &DeleteRecordsResponse{}
res.Topics = make(map[string]*DeleteRecordsResponseTopic)
for topic, deleteRecordRequestTopic := range req.Topics {
partitions := make(map[int32]*DeleteRecordsResponsePartition)
for partition := range deleteRecordRequestTopic.PartitionOffsets {
partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError}
}
res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions}
}
return res
}
type MockDescribeConfigsResponse struct {
t TestReporter
}
func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse {
return &MockDescribeConfigsResponse{t: t}
}
func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
req := reqBody.(*DescribeConfigsRequest)
res := &DescribeConfigsResponse{
Version: req.Version,
}
includeSynonyms := (req.Version > 0)
includeSource := (req.Version > 0)
for _, r := range req.Resources {
var configEntries []*ConfigEntry
switch r.Type {
case BrokerResource:
configEntries = append(configEntries,
&ConfigEntry{
Name: "min.insync.replicas",
Value: "2",
ReadOnly: false,
Default: false,
},
)
res.Resources = append(res.Resources, &ResourceResponse{
Name: r.Name,
Configs: configEntries,
})
case BrokerLoggerResource:
configEntries = append(configEntries,
&ConfigEntry{
Name: "kafka.controller.KafkaController",
Value: "DEBUG",
ReadOnly: false,
Default: false,
},
)
res.Resources = append(res.Resources, &ResourceResponse{
Name: r.Name,
Configs: configEntries,
})
case TopicResource:
maxMessageBytes := &ConfigEntry{Name: "max.message.bytes",
Value: "1000000",
ReadOnly: false,
Default: !includeSource,
Sensitive: false,
}
if includeSource {
maxMessageBytes.Source = SourceDefault
}
if includeSynonyms {
maxMessageBytes.Synonyms = []*ConfigSynonym{
{
ConfigName: "max.message.bytes",
ConfigValue: "500000",
},
}
}
retentionMs := &ConfigEntry{Name: "retention.ms",
Value: "5000",
ReadOnly: false,
Default: false,
Sensitive: false,
}
if includeSynonyms {
retentionMs.Synonyms = []*ConfigSynonym{
{
ConfigName: "log.retention.ms",
ConfigValue: "2500",
},
}
}
password := &ConfigEntry{Name: "password",
Value: "12345",
ReadOnly: false,
Default: false,
Sensitive: true,
}
configEntries = append(
configEntries, maxMessageBytes, retentionMs, password)
res.Resources = append(res.Resources, &ResourceResponse{
Name: r.Name,
Configs: configEntries,
})
}
}
return res
}
type MockDescribeConfigsResponseWithErrorCode struct {
t TestReporter
}
func NewMockDescribeConfigsResponseWithErrorCode(t TestReporter) *MockDescribeConfigsResponseWithErrorCode {
return &MockDescribeConfigsResponseWithErrorCode{t: t}
}
func (mr *MockDescribeConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
req := reqBody.(*DescribeConfigsRequest)
res := &DescribeConfigsResponse{
Version: req.Version,
}
for _, r := range req.Resources {
res.Resources = append(res.Resources, &ResourceResponse{
Name: r.Name,
Type: r.Type,
ErrorCode: 83,
ErrorMsg: "",
})
}
return res
}
type MockAlterConfigsResponse struct {
t TestReporter
}
func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse {
return &MockAlterConfigsResponse{t: t}
}
func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
req := reqBody.(*AlterConfigsRequest)
res := &AlterConfigsResponse{}
for _, r := range req.Resources {
res.Resources = append(res.Resources, &AlterConfigsResourceResponse{Name: r.Name,
Type: r.Type,
ErrorMsg: "",
})
}
return res
}
type MockAlterConfigsResponseWithErrorCode struct {
t TestReporter
}
func NewMockAlterConfigsResponseWithErrorCode(t TestReporter) *MockAlterConfigsResponseWithErrorCode {
return &MockAlterConfigsResponseWithErrorCode{t: t}
}
func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
req := reqBody.(*AlterConfigsRequest)
res := &AlterConfigsResponse{}
for _, r := range req.Resources {
res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
Name: r.Name,
Type: r.Type,
ErrorCode: 83,
ErrorMsg: "",
})
}
return res
}
type MockCreateAclsResponse struct {
t TestReporter
}
func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse {
return &MockCreateAclsResponse{t: t}
}
func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
req := reqBody.(*CreateAclsRequest)
res := &CreateAclsResponse{}
for range req.AclCreations {
res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrNoError})
}
return res
}
type MockListAclsResponse struct {
t TestReporter
}
func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse {
return &MockListAclsResponse{t: t}
}
func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
req := reqBody.(*DescribeAclsRequest)
res := &DescribeAclsResponse{}
res.Err = ErrNoError
acl := &ResourceAcls{}
if req.ResourceName != nil {
acl.Resource.ResourceName = *req.ResourceName
}
acl.Resource.ResourcePatternType = req.ResourcePatternTypeFilter
acl.Resource.ResourceType = req.ResourceType
host := "*"
if req.Host != nil {
host = *req.Host
}
principal := "User:test"
if req.Principal != nil {
principal = *req.Principal
}
permissionType := req.PermissionType
if permissionType == AclPermissionAny {
permissionType = AclPermissionAllow
}
acl.Acls = append(acl.Acls, &Acl{Operation: req.Operation, PermissionType: permissionType, Host: host, Principal: principal})
res.ResourceAcls = append(res.ResourceAcls, acl)
res.Version = int16(req.Version)
return res
}
type MockSaslAuthenticateResponse struct {
t TestReporter
kerror KError
saslAuthBytes []byte
}
func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateResponse {
return &MockSaslAuthenticateResponse{t: t}
}
func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader {
res := &SaslAuthenticateResponse{}
res.Err = msar.kerror
res.SaslAuthBytes = msar.saslAuthBytes
return res
}
func (msar *MockSaslAuthenticateResponse) SetError(kerror KError) *MockSaslAuthenticateResponse {
msar.kerror = kerror
return msar
}
func (msar *MockSaslAuthenticateResponse) SetAuthBytes(saslAuthBytes []byte) *MockSaslAuthenticateResponse {
msar.saslAuthBytes = saslAuthBytes
return msar
}
type MockDeleteAclsResponse struct {
t TestReporter
}
type MockSaslHandshakeResponse struct {
enabledMechanisms []string
kerror KError
t TestReporter
}
func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse {
return &MockSaslHandshakeResponse{t: t}
}
func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoderWithHeader {
res := &SaslHandshakeResponse{}
res.Err = mshr.kerror
res.EnabledMechanisms = mshr.enabledMechanisms
return res
}
func (mshr *MockSaslHandshakeResponse) SetError(kerror KError) *MockSaslHandshakeResponse {
mshr.kerror = kerror
return mshr
}
func (mshr *MockSaslHandshakeResponse) SetEnabledMechanisms(enabledMechanisms []string) *MockSaslHandshakeResponse {
mshr.enabledMechanisms = enabledMechanisms
return mshr
}
func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse {
return &MockDeleteAclsResponse{t: t}
}
func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
req := reqBody.(*DeleteAclsRequest)
res := &DeleteAclsResponse{}
for range req.Filters {
response := &FilterResponse{Err: ErrNoError}
response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError})
res.FilterResponses = append(res.FilterResponses, response)
}
res.Version = int16(req.Version)
return res
}
type MockDeleteGroupsResponse struct {
deletedGroups []string
}
func NewMockDeleteGroupsRequest(t TestReporter) *MockDeleteGroupsResponse {
return &MockDeleteGroupsResponse{}
}
func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDeleteGroupsResponse {
m.deletedGroups = groups
return m
}
func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
resp := &DeleteGroupsResponse{
GroupErrorCodes: map[string]KError{},
}
for _, group := range m.deletedGroups {
resp.GroupErrorCodes[group] = ErrNoError
}
return resp
}
type MockDescribeLogDirsResponse struct {
t TestReporter
logDirs []DescribeLogDirsResponseDirMetadata
}
func NewMockDescribeLogDirsResponse(t TestReporter) *MockDescribeLogDirsResponse {
return &MockDescribeLogDirsResponse{t: t}
}
func (m *MockDescribeLogDirsResponse) SetLogDirs(logDirPath string, topicPartitions map[string]int) *MockDescribeLogDirsResponse {
topics := []DescribeLogDirsResponseTopic{}
for topic := range topicPartitions {
partitions := []DescribeLogDirsResponsePartition{}
for i := 0; i < topicPartitions[topic]; i++ {
partitions = append(partitions, DescribeLogDirsResponsePartition{
PartitionID: int32(i),
IsTemporary: false,
OffsetLag: int64(0),
Size: int64(1234),
})
}
topics = append(topics, DescribeLogDirsResponseTopic{
Topic: topic,
Partitions: partitions,
})
}
logDir := DescribeLogDirsResponseDirMetadata{
ErrorCode: ErrNoError,
Path: logDirPath,
Topics: topics,
}
m.logDirs = []DescribeLogDirsResponseDirMetadata{logDir}
return m
}
func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithHeader {
resp := &DescribeLogDirsResponse{
LogDirs: m.logDirs,
}
return resp
}

View File

@ -1,5 +1,7 @@
package sarama
import "errors"
// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
// tells the broker to set the timestamp to the time at which the request was received.
// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
@ -50,12 +52,14 @@ type OffsetCommitRequest struct {
// - 0 (kafka 0.8.1 and later)
// - 1 (kafka 0.8.2 and later)
// - 2 (kafka 0.9.0 and later)
// - 3 (kafka 0.11.0 and later)
// - 4 (kafka 2.0.0 and later)
Version int16
blocks map[string]map[int32]*offsetCommitRequestBlock
}
func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
if r.Version < 0 || r.Version > 2 {
if r.Version < 0 || r.Version > 4 {
return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
}
@ -166,14 +170,22 @@ func (r *OffsetCommitRequest) version() int16 {
return r.Version
}
func (r *OffsetCommitRequest) headerVersion() int16 {
return 1
}
func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_8_2_0
case 2:
return V0_9_0_0
case 3:
return V0_11_0_0
case 4:
return V2_0_0_0
default:
return minVersion
return MinVersion
}
}
@ -188,3 +200,15 @@ func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset i
r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
}
func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) {
partitions := r.blocks[topic]
if partitions == nil {
return 0, "", errors.New("no such offset")
}
block := partitions[partitionID]
if block == nil {
return 0, "", errors.New("no such offset")
}
return block.offset, block.metadata, nil
}

View File

@ -1,7 +1,9 @@
package sarama
type OffsetCommitResponse struct {
Errors map[string]map[int32]KError
Version int16
ThrottleTimeMs int32
Errors map[string]map[int32]KError
}
func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
@ -17,6 +19,9 @@ func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KE
}
func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
if r.Version >= 3 {
pe.putInt32(r.ThrottleTimeMs)
}
if err := pe.putArrayLength(len(r.Errors)); err != nil {
return err
}
@ -36,6 +41,15 @@ func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
}
func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if version >= 3 {
r.ThrottleTimeMs, err = pd.getInt32()
if err != nil {
return err
}
}
numTopics, err := pd.getArrayLength()
if err != nil || numTopics == 0 {
return err
@ -77,9 +91,24 @@ func (r *OffsetCommitResponse) key() int16 {
}
func (r *OffsetCommitResponse) version() int16 {
return r.Version
}
func (r *OffsetCommitResponse) headerVersion() int16 {
return 0
}
func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
return minVersion
switch r.Version {
case 1:
return V0_8_2_0
case 2:
return V0_9_0_0
case 3:
return V0_11_0_0
case 4:
return V2_0_0_0
default:
return MinVersion
}
}

View File

@ -1,28 +1,33 @@
package sarama
type OffsetFetchRequest struct {
ConsumerGroup string
Version int16
ConsumerGroup string
partitions map[string][]int32
}
func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
if r.Version < 0 || r.Version > 1 {
if r.Version < 0 || r.Version > 5 {
return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
}
if err = pe.putString(r.ConsumerGroup); err != nil {
return err
}
if err = pe.putArrayLength(len(r.partitions)); err != nil {
return err
}
for topic, partitions := range r.partitions {
if err = pe.putString(topic); err != nil {
if r.Version >= 2 && r.partitions == nil {
pe.putInt32(-1)
} else {
if err = pe.putArrayLength(len(r.partitions)); err != nil {
return err
}
if err = pe.putInt32Array(partitions); err != nil {
return err
for topic, partitions := range r.partitions {
if err = pe.putString(topic); err != nil {
return err
}
if err = pe.putInt32Array(partitions); err != nil {
return err
}
}
}
return nil
@ -37,7 +42,7 @@ func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error)
if err != nil {
return err
}
if partitionCount == 0 {
if (partitionCount == 0 && version < 2) || partitionCount < 0 {
return nil
}
r.partitions = make(map[string][]int32)
@ -63,12 +68,30 @@ func (r *OffsetFetchRequest) version() int16 {
return r.Version
}
func (r *OffsetFetchRequest) headerVersion() int16 {
return 1
}
func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_8_2_0
case 2:
return V0_10_2_0
case 3:
return V0_11_0_0
case 4:
return V2_0_0_0
case 5:
return V2_1_0_0
default:
return minVersion
return MinVersion
}
}
func (r *OffsetFetchRequest) ZeroPartitions() {
if r.partitions == nil && r.Version >= 2 {
r.partitions = make(map[string][]int32)
}
}

View File

@ -1,17 +1,25 @@
package sarama
type OffsetFetchResponseBlock struct {
Offset int64
Metadata string
Err KError
Offset int64
LeaderEpoch int32
Metadata string
Err KError
}
func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) {
func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
b.Offset, err = pd.getInt64()
if err != nil {
return err
}
if version >= 5 {
b.LeaderEpoch, err = pd.getInt32()
if err != nil {
return err
}
}
b.Metadata, err = pd.getString()
if err != nil {
return err
@ -26,9 +34,13 @@ func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) {
return nil
}
func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) {
func (b *OffsetFetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
pe.putInt64(b.Offset)
if version >= 5 {
pe.putInt32(b.LeaderEpoch)
}
err = pe.putString(b.Metadata)
if err != nil {
return err
@ -40,10 +52,17 @@ func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) {
}
type OffsetFetchResponse struct {
Blocks map[string]map[int32]*OffsetFetchResponseBlock
Version int16
ThrottleTimeMs int32
Blocks map[string]map[int32]*OffsetFetchResponseBlock
Err KError
}
func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
if r.Version >= 3 {
pe.putInt32(r.ThrottleTimeMs)
}
if err := pe.putArrayLength(len(r.Blocks)); err != nil {
return err
}
@ -56,53 +75,75 @@ func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
}
for partition, block := range partitions {
pe.putInt32(partition)
if err := block.encode(pe); err != nil {
if err := block.encode(pe, r.Version); err != nil {
return err
}
}
}
if r.Version >= 2 {
pe.putInt16(int16(r.Err))
}
return nil
}
func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if version >= 3 {
r.ThrottleTimeMs, err = pd.getInt32()
if err != nil {
return err
}
}
numTopics, err := pd.getArrayLength()
if err != nil || numTopics == 0 {
if err != nil {
return err
}
r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
if numBlocks == 0 {
r.Blocks[name] = nil
continue
}
r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if numTopics > 0 {
r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
block := new(OffsetFetchResponseBlock)
err = block.decode(pd)
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks[name][id] = block
if numBlocks == 0 {
r.Blocks[name] = nil
continue
}
r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
block := new(OffsetFetchResponseBlock)
err = block.decode(pd, version)
if err != nil {
return err
}
r.Blocks[name][id] = block
}
}
}
if version >= 2 {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
}
return nil
}
@ -111,11 +152,28 @@ func (r *OffsetFetchResponse) key() int16 {
}
func (r *OffsetFetchResponse) version() int16 {
return r.Version
}
func (r *OffsetFetchResponse) headerVersion() int16 {
return 0
}
func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
return minVersion
switch r.Version {
case 1:
return V0_8_2_0
case 2:
return V0_10_2_0
case 3:
return V0_11_0_0
case 4:
return V2_0_0_0
case 5:
return V2_1_0_0
default:
return MinVersion
}
}
func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {

View File

@ -25,27 +25,49 @@ type offsetManager struct {
client Client
conf *Config
group string
ticker *time.Ticker
lock sync.Mutex
poms map[string]map[int32]*partitionOffsetManager
boms map[*Broker]*brokerOffsetManager
memberID string
generation int32
broker *Broker
brokerLock sync.RWMutex
poms map[string]map[int32]*partitionOffsetManager
pomsLock sync.RWMutex
closeOnce sync.Once
closing chan none
closed chan none
}
// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
// It is still necessary to call Close() on the underlying client when finished with the partition manager.
func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client)
}
func newOffsetManagerFromClient(group, memberID string, generation int32, client Client) (*offsetManager, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
}
conf := client.Config()
om := &offsetManager{
client: client,
conf: client.Config(),
conf: conf,
group: group,
ticker: time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval),
poms: make(map[string]map[int32]*partitionOffsetManager),
boms: make(map[*Broker]*brokerOffsetManager),
memberID: memberID,
generation: generation,
closing: make(chan none),
closed: make(chan none),
}
go withRecover(om.mainLoop)
return om, nil
}
@ -56,8 +78,8 @@ func (om *offsetManager) ManagePartition(topic string, partition int32) (Partiti
return nil, err
}
om.lock.Lock()
defer om.lock.Unlock()
om.pomsLock.Lock()
defer om.pomsLock.Unlock()
topicManagers := om.poms[topic]
if topicManagers == nil {
@ -74,53 +96,319 @@ func (om *offsetManager) ManagePartition(topic string, partition int32) (Partiti
}
func (om *offsetManager) Close() error {
om.closeOnce.Do(func() {
// exit the mainLoop
close(om.closing)
<-om.closed
// mark all POMs as closed
om.asyncClosePOMs()
// flush one last time
for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ {
om.flushToBroker()
if om.releasePOMs(false) == 0 {
break
}
}
om.releasePOMs(true)
om.brokerLock.Lock()
om.broker = nil
om.brokerLock.Unlock()
})
return nil
}
func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
om.lock.Lock()
defer om.lock.Unlock()
bom := om.boms[broker]
if bom == nil {
bom = om.newBrokerOffsetManager(broker)
om.boms[broker] = bom
func (om *offsetManager) computeBackoff(retries int) time.Duration {
if om.conf.Metadata.Retry.BackoffFunc != nil {
return om.conf.Metadata.Retry.BackoffFunc(retries, om.conf.Metadata.Retry.Max)
} else {
return om.conf.Metadata.Retry.Backoff
}
bom.refs++
return bom
}
func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) {
om.lock.Lock()
defer om.lock.Unlock()
func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, string, error) {
broker, err := om.coordinator()
if err != nil {
if retries <= 0 {
return 0, "", err
}
return om.fetchInitialOffset(topic, partition, retries-1)
}
bom.refs--
req := new(OffsetFetchRequest)
req.Version = 1
req.ConsumerGroup = om.group
req.AddPartition(topic, partition)
if bom.refs == 0 {
close(bom.updateSubscriptions)
if om.boms[bom.broker] == bom {
delete(om.boms, bom.broker)
resp, err := broker.FetchOffset(req)
if err != nil {
if retries <= 0 {
return 0, "", err
}
om.releaseCoordinator(broker)
return om.fetchInitialOffset(topic, partition, retries-1)
}
block := resp.GetBlock(topic, partition)
if block == nil {
return 0, "", ErrIncompleteResponse
}
switch block.Err {
case ErrNoError:
return block.Offset, block.Metadata, nil
case ErrNotCoordinatorForConsumer:
if retries <= 0 {
return 0, "", block.Err
}
om.releaseCoordinator(broker)
return om.fetchInitialOffset(topic, partition, retries-1)
case ErrOffsetsLoadInProgress:
if retries <= 0 {
return 0, "", block.Err
}
backoff := om.computeBackoff(retries)
select {
case <-om.closing:
return 0, "", block.Err
case <-time.After(backoff):
}
return om.fetchInitialOffset(topic, partition, retries-1)
default:
return 0, "", block.Err
}
}
func (om *offsetManager) coordinator() (*Broker, error) {
om.brokerLock.RLock()
broker := om.broker
om.brokerLock.RUnlock()
if broker != nil {
return broker, nil
}
om.brokerLock.Lock()
defer om.brokerLock.Unlock()
if broker := om.broker; broker != nil {
return broker, nil
}
if err := om.client.RefreshCoordinator(om.group); err != nil {
return nil, err
}
broker, err := om.client.Coordinator(om.group)
if err != nil {
return nil, err
}
om.broker = broker
return broker, nil
}
func (om *offsetManager) releaseCoordinator(b *Broker) {
om.brokerLock.Lock()
if om.broker == b {
om.broker = nil
}
om.brokerLock.Unlock()
}
func (om *offsetManager) mainLoop() {
defer om.ticker.Stop()
defer close(om.closed)
for {
select {
case <-om.ticker.C:
om.flushToBroker()
om.releasePOMs(false)
case <-om.closing:
return
}
}
}
func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) {
om.lock.Lock()
defer om.lock.Unlock()
// flushToBroker is ignored if auto-commit offsets is disabled
func (om *offsetManager) flushToBroker() {
if !om.conf.Consumer.Offsets.AutoCommit.Enable {
return
}
delete(om.boms, bom.broker)
req := om.constructRequest()
if req == nil {
return
}
broker, err := om.coordinator()
if err != nil {
om.handleError(err)
return
}
resp, err := broker.CommitOffset(req)
if err != nil {
om.handleError(err)
om.releaseCoordinator(broker)
_ = broker.Close()
return
}
om.handleResponse(broker, req, resp)
}
func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) {
om.lock.Lock()
defer om.lock.Unlock()
delete(om.poms[pom.topic], pom.partition)
if len(om.poms[pom.topic]) == 0 {
delete(om.poms, pom.topic)
func (om *offsetManager) constructRequest() *OffsetCommitRequest {
var r *OffsetCommitRequest
var perPartitionTimestamp int64
if om.conf.Consumer.Offsets.Retention == 0 {
perPartitionTimestamp = ReceiveTime
r = &OffsetCommitRequest{
Version: 1,
ConsumerGroup: om.group,
ConsumerID: om.memberID,
ConsumerGroupGeneration: om.generation,
}
} else {
r = &OffsetCommitRequest{
Version: 2,
RetentionTime: int64(om.conf.Consumer.Offsets.Retention / time.Millisecond),
ConsumerGroup: om.group,
ConsumerID: om.memberID,
ConsumerGroupGeneration: om.generation,
}
}
om.pomsLock.RLock()
defer om.pomsLock.RUnlock()
for _, topicManagers := range om.poms {
for _, pom := range topicManagers {
pom.lock.Lock()
if pom.dirty {
r.AddBlock(pom.topic, pom.partition, pom.offset, perPartitionTimestamp, pom.metadata)
}
pom.lock.Unlock()
}
}
if len(r.blocks) > 0 {
return r
}
return nil
}
func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest, resp *OffsetCommitResponse) {
om.pomsLock.RLock()
defer om.pomsLock.RUnlock()
for _, topicManagers := range om.poms {
for _, pom := range topicManagers {
if req.blocks[pom.topic] == nil || req.blocks[pom.topic][pom.partition] == nil {
continue
}
var err KError
var ok bool
if resp.Errors[pom.topic] == nil {
pom.handleError(ErrIncompleteResponse)
continue
}
if err, ok = resp.Errors[pom.topic][pom.partition]; !ok {
pom.handleError(ErrIncompleteResponse)
continue
}
switch err {
case ErrNoError:
block := req.blocks[pom.topic][pom.partition]
pom.updateCommitted(block.offset, block.metadata)
case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
// not a critical error, we just need to redispatch
om.releaseCoordinator(broker)
case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
// nothing we can do about this, just tell the user and carry on
pom.handleError(err)
case ErrOffsetsLoadInProgress:
// nothing wrong but we didn't commit, we'll get it next time round
case ErrUnknownTopicOrPartition:
// let the user know *and* try redispatching - if topic-auto-create is
// enabled, redispatching should trigger a metadata req and create the
// topic; if not then re-dispatching won't help, but we've let the user
// know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706)
fallthrough
default:
// dunno, tell the user and try redispatching
pom.handleError(err)
om.releaseCoordinator(broker)
}
}
}
}
func (om *offsetManager) handleError(err error) {
om.pomsLock.RLock()
defer om.pomsLock.RUnlock()
for _, topicManagers := range om.poms {
for _, pom := range topicManagers {
pom.handleError(err)
}
}
}
func (om *offsetManager) asyncClosePOMs() {
om.pomsLock.RLock()
defer om.pomsLock.RUnlock()
for _, topicManagers := range om.poms {
for _, pom := range topicManagers {
pom.AsyncClose()
}
}
}
// Releases/removes closed POMs once they are clean (or when forced)
func (om *offsetManager) releasePOMs(force bool) (remaining int) {
om.pomsLock.Lock()
defer om.pomsLock.Unlock()
for topic, topicManagers := range om.poms {
for partition, pom := range topicManagers {
pom.lock.Lock()
releaseDue := pom.done && (force || !pom.dirty)
pom.lock.Unlock()
if releaseDue {
pom.release()
delete(om.poms[topic], partition)
if len(om.poms[topic]) == 0 {
delete(om.poms, topic)
}
}
}
remaining += len(om.poms[topic])
}
return
}
func (om *offsetManager) findPOM(topic string, partition int32) *partitionOffsetManager {
om.pomsLock.RLock()
defer om.pomsLock.RUnlock()
if partitions, ok := om.poms[topic]; ok {
if pom, ok := partitions[partition]; ok {
return pom
}
}
return nil
}
// Partition Offset Manager
@ -187,138 +475,26 @@ type partitionOffsetManager struct {
offset int64
metadata string
dirty bool
clean sync.Cond
broker *brokerOffsetManager
done bool
errors chan *ConsumerError
rebalance chan none
dying chan none
releaseOnce sync.Once
errors chan *ConsumerError
}
func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
pom := &partitionOffsetManager{
offset, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max)
if err != nil {
return nil, err
}
return &partitionOffsetManager{
parent: om,
topic: topic,
partition: partition,
errors: make(chan *ConsumerError, om.conf.ChannelBufferSize),
rebalance: make(chan none, 1),
dying: make(chan none),
}
pom.clean.L = &pom.lock
if err := pom.selectBroker(); err != nil {
return nil, err
}
if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil {
return nil, err
}
pom.broker.updateSubscriptions <- pom
go withRecover(pom.mainLoop)
return pom, nil
}
func (pom *partitionOffsetManager) mainLoop() {
for {
select {
case <-pom.rebalance:
if err := pom.selectBroker(); err != nil {
pom.handleError(err)
pom.rebalance <- none{}
} else {
pom.broker.updateSubscriptions <- pom
}
case <-pom.dying:
if pom.broker != nil {
select {
case <-pom.rebalance:
case pom.broker.updateSubscriptions <- pom:
}
pom.parent.unrefBrokerOffsetManager(pom.broker)
}
pom.parent.abandonPartitionOffsetManager(pom)
close(pom.errors)
return
}
}
}
func (pom *partitionOffsetManager) selectBroker() error {
if pom.broker != nil {
pom.parent.unrefBrokerOffsetManager(pom.broker)
pom.broker = nil
}
var broker *Broker
var err error
if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil {
return err
}
if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil {
return err
}
pom.broker = pom.parent.refBrokerOffsetManager(broker)
return nil
}
func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error {
request := new(OffsetFetchRequest)
request.Version = 1
request.ConsumerGroup = pom.parent.group
request.AddPartition(pom.topic, pom.partition)
response, err := pom.broker.broker.FetchOffset(request)
if err != nil {
return err
}
block := response.GetBlock(pom.topic, pom.partition)
if block == nil {
return ErrIncompleteResponse
}
switch block.Err {
case ErrNoError:
pom.offset = block.Offset
pom.metadata = block.Metadata
return nil
case ErrNotCoordinatorForConsumer:
if retries <= 0 {
return block.Err
}
if err := pom.selectBroker(); err != nil {
return err
}
return pom.fetchInitialOffset(retries - 1)
case ErrOffsetsLoadInProgress:
if retries <= 0 {
return block.Err
}
time.Sleep(pom.parent.conf.Metadata.Retry.Backoff)
return pom.fetchInitialOffset(retries - 1)
default:
return block.Err
}
}
func (pom *partitionOffsetManager) handleError(err error) {
cErr := &ConsumerError{
Topic: pom.topic,
Partition: pom.partition,
Err: err,
}
if pom.parent.conf.Consumer.Return.Errors {
pom.errors <- cErr
} else {
Logger.Println(cErr)
}
offset: offset,
metadata: metadata,
}, nil
}
func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
@ -353,7 +529,6 @@ func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string
if pom.offset == offset && pom.metadata == metadata {
pom.dirty = false
pom.clean.Signal()
}
}
@ -369,16 +544,9 @@ func (pom *partitionOffsetManager) NextOffset() (int64, string) {
}
func (pom *partitionOffsetManager) AsyncClose() {
go func() {
pom.lock.Lock()
defer pom.lock.Unlock()
for pom.dirty {
pom.clean.Wait()
}
close(pom.dying)
}()
pom.lock.Lock()
pom.done = true
pom.lock.Unlock()
}
func (pom *partitionOffsetManager) Close() error {
@ -395,166 +563,22 @@ func (pom *partitionOffsetManager) Close() error {
return nil
}
// Broker Offset Manager
type brokerOffsetManager struct {
parent *offsetManager
broker *Broker
timer *time.Ticker
updateSubscriptions chan *partitionOffsetManager
subscriptions map[*partitionOffsetManager]none
refs int
}
func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
bom := &brokerOffsetManager{
parent: om,
broker: broker,
timer: time.NewTicker(om.conf.Consumer.Offsets.CommitInterval),
updateSubscriptions: make(chan *partitionOffsetManager),
subscriptions: make(map[*partitionOffsetManager]none),
func (pom *partitionOffsetManager) handleError(err error) {
cErr := &ConsumerError{
Topic: pom.topic,
Partition: pom.partition,
Err: err,
}
go withRecover(bom.mainLoop)
return bom
}
func (bom *brokerOffsetManager) mainLoop() {
for {
select {
case <-bom.timer.C:
if len(bom.subscriptions) > 0 {
bom.flushToBroker()
}
case s, ok := <-bom.updateSubscriptions:
if !ok {
bom.timer.Stop()
return
}
if _, ok := bom.subscriptions[s]; ok {
delete(bom.subscriptions, s)
} else {
bom.subscriptions[s] = none{}
}
}
}
}
func (bom *brokerOffsetManager) flushToBroker() {
request := bom.constructRequest()
if request == nil {
return
}
response, err := bom.broker.CommitOffset(request)
if err != nil {
bom.abort(err)
return
}
for s := range bom.subscriptions {
if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil {
continue
}
var err KError
var ok bool
if response.Errors[s.topic] == nil {
s.handleError(ErrIncompleteResponse)
delete(bom.subscriptions, s)
s.rebalance <- none{}
continue
}
if err, ok = response.Errors[s.topic][s.partition]; !ok {
s.handleError(ErrIncompleteResponse)
delete(bom.subscriptions, s)
s.rebalance <- none{}
continue
}
switch err {
case ErrNoError:
block := request.blocks[s.topic][s.partition]
s.updateCommitted(block.offset, block.metadata)
case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
// not a critical error, we just need to redispatch
delete(bom.subscriptions, s)
s.rebalance <- none{}
case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
// nothing we can do about this, just tell the user and carry on
s.handleError(err)
case ErrOffsetsLoadInProgress:
// nothing wrong but we didn't commit, we'll get it next time round
break
case ErrUnknownTopicOrPartition:
// let the user know *and* try redispatching - if topic-auto-create is
// enabled, redispatching should trigger a metadata request and create the
// topic; if not then re-dispatching won't help, but we've let the user
// know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706)
fallthrough
default:
// dunno, tell the user and try redispatching
s.handleError(err)
delete(bom.subscriptions, s)
s.rebalance <- none{}
}
}
}
func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest {
var r *OffsetCommitRequest
var perPartitionTimestamp int64
if bom.parent.conf.Consumer.Offsets.Retention == 0 {
perPartitionTimestamp = ReceiveTime
r = &OffsetCommitRequest{
Version: 1,
ConsumerGroup: bom.parent.group,
ConsumerGroupGeneration: GroupGenerationUndefined,
}
if pom.parent.conf.Consumer.Return.Errors {
pom.errors <- cErr
} else {
r = &OffsetCommitRequest{
Version: 2,
RetentionTime: int64(bom.parent.conf.Consumer.Offsets.Retention / time.Millisecond),
ConsumerGroup: bom.parent.group,
ConsumerGroupGeneration: GroupGenerationUndefined,
}
Logger.Println(cErr)
}
for s := range bom.subscriptions {
s.lock.Lock()
if s.dirty {
r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata)
}
s.lock.Unlock()
}
if len(r.blocks) > 0 {
return r
}
return nil
}
func (bom *brokerOffsetManager) abort(err error) {
_ = bom.broker.Close() // we don't care about the error this might return, we already have one
bom.parent.abandonBroker(bom)
for pom := range bom.subscriptions {
pom.handleError(err)
pom.rebalance <- none{}
}
for s := range bom.updateSubscriptions {
if _, ok := bom.subscriptions[s]; !ok {
s.handleError(err)
s.rebalance <- none{}
}
}
bom.subscriptions = make(map[*partitionOffsetManager]none)
func (pom *partitionOffsetManager) release() {
pom.releaseOnce.Do(func() {
close(pom.errors)
})
}

Some files were not shown because too many files have changed in this diff Show More