mirror of
https://github.com/ManyakRus/starter.git
synced 2025-12-09 00:41:58 +02:00
new
This commit is contained in:
1
vendor/github.com/segmentio/kafka-go/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/segmentio/kafka-go/.gitattributes
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
fixtures/*.hex binary
|
||||
40
vendor/github.com/segmentio/kafka-go/.gitignore
generated
vendored
Normal file
40
vendor/github.com/segmentio/kafka-go/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
/kafkacli
|
||||
|
||||
# Emacs
|
||||
*~
|
||||
|
||||
# VIM
|
||||
*.swp
|
||||
|
||||
# Goland
|
||||
.idea
|
||||
|
||||
#IntelliJ
|
||||
*.iml
|
||||
|
||||
# govendor
|
||||
/vendor/*/
|
||||
18
vendor/github.com/segmentio/kafka-go/.golangci.yml
generated
vendored
Normal file
18
vendor/github.com/segmentio/kafka-go/.golangci.yml
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
linters:
|
||||
enable:
|
||||
- bodyclose
|
||||
- errorlint
|
||||
- goconst
|
||||
- godot
|
||||
- gofmt
|
||||
- goimports
|
||||
- prealloc
|
||||
|
||||
disable:
|
||||
# Temporarily disabling so it can be addressed in a dedicated PR.
|
||||
- errcheck
|
||||
- goerr113
|
||||
|
||||
linters-settings:
|
||||
goconst:
|
||||
ignore-tests: true
|
||||
75
vendor/github.com/segmentio/kafka-go/CODE_OF_CONDUCT.md
generated
vendored
Normal file
75
vendor/github.com/segmentio/kafka-go/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, sex characteristics, gender identity and expression,
|
||||
level of experience, education, socio-economic status, nationality, personal
|
||||
appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
- Using welcoming and inclusive language
|
||||
- Being respectful of differing viewpoints and experiences
|
||||
- Gracefully accepting constructive criticism
|
||||
- Focusing on what is best for the community
|
||||
- Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
- The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
- Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
- Public or private harassment
|
||||
- Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
- Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
Project maintainers are available at [#kafka-go](https://gophers.slack.com/archives/CG4H0N9PX) channel inside the [Gophers Slack](https://gophers.slack.com)
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at open-source@twilio.com. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
139
vendor/github.com/segmentio/kafka-go/CONTRIBUTING.md
generated
vendored
Normal file
139
vendor/github.com/segmentio/kafka-go/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
# Contributing to kafka-go
|
||||
|
||||
kafka-go is an open source project. We welcome contributions to kafka-go of any kind including documentation,
|
||||
organization, tutorials, bug reports, issues, feature requests, feature implementations, pull requests, etc.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
* [Reporting Issues](#reporting-issues)
|
||||
* [Submitting Patches](#submitting-patches)
|
||||
* [Code Contribution Guidelines](#code-contribution-guidelines)
|
||||
* [Git Commit Message Guidelines](#git-commit-message-guidelines)
|
||||
* [Fetching the Source From GitHub](#fetching-the-sources-from-github)
|
||||
* [Building kafka-go with Your Changes](#building-kakfa-go-with-your-changes)
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
If you believe you have found a defect in kafka-go, use the GitHub issue tracker to report
|
||||
the problem to the maintainers.
|
||||
When reporting the issue, please provide the version of kafka-go, what version(s) of Kafka
|
||||
are you testing against, and your operating system.
|
||||
|
||||
- [kafka-go Issues segmentio/kafka-go](https://github.com/segmentio/kafka-go/issues)
|
||||
|
||||
## Submitting Patches
|
||||
|
||||
kafka-go project welcomes all contributors and contributions regardless of skill or experience levels. If you are
|
||||
interested in helping with the project, we will help you with your contribution.
|
||||
|
||||
### Code Contribution
|
||||
|
||||
To make contributions as seamless as possible, we ask the following:
|
||||
|
||||
* Go ahead and fork the project and make your changes. We encourage pull requests to allow for review and discussion of code changes.
|
||||
* When you’re ready to create a pull request, be sure to:
|
||||
* Have test cases for the new code. If you have questions about how to do this, please ask in your pull request.
|
||||
* Run `go fmt`.
|
||||
* Squash your commits into a single commit. `git rebase -i`. It’s okay to force update your pull request with `git push -f`.
|
||||
* Follow the **Git Commit Message Guidelines** below.
|
||||
|
||||
### Git Commit Message Guidelines
|
||||
|
||||
This [blog article](http://chris.beams.io/posts/git-commit/) is a good resource for learning how to write good commit messages,
|
||||
the most important part being that each commit message should have a title/subject in imperative mood starting with a capital letter and no trailing period:
|
||||
*"Return error on wrong use of the Reader"*, **NOT** *"returning some error."*
|
||||
|
||||
Also, if your commit references one or more GitHub issues, always end your commit message body with *See #1234* or *Fixes #1234*.
|
||||
Replace *1234* with the GitHub issue ID. The last example will close the issue when the commit is merged into *master*.
|
||||
|
||||
Please use a short and descriptive branch name, e.g. NOT "patch-1". It's very common but creates a naming conflict each
|
||||
time when a submission is pulled for a review.
|
||||
|
||||
An example:
|
||||
|
||||
```text
|
||||
Add Code of Conduct and Code Contribution Guidelines
|
||||
|
||||
Add a full Code of Conduct and Code Contribution Guidelines document.
|
||||
Provide description on how best to retrieve code, fork, checkout, and commit changes.
|
||||
|
||||
Fixes #688
|
||||
```
|
||||
|
||||
### Fetching the Sources From GitHub
|
||||
|
||||
We use Go Modules support built into Go 1.11 to build. The easiest way is to clone kafka-go into a directory outside of
|
||||
`GOPATH`, as in the following example:
|
||||
|
||||
```bash
|
||||
mkdir $HOME/src
|
||||
cd $HOME/src
|
||||
git clone https://github.com/segmentio/kafka-go.git
|
||||
cd kafka-go
|
||||
go build ./...
|
||||
```
|
||||
|
||||
To make changes to kafka-go's source:
|
||||
|
||||
1. Create a new branch for your changes (the branch name is arbitrary):
|
||||
|
||||
```bash
|
||||
git checkout -b branch1234
|
||||
```
|
||||
|
||||
1. After making your changes, commit them to your new branch:
|
||||
|
||||
```bash
|
||||
git commit -a -v
|
||||
```
|
||||
|
||||
1. Fork kafka-go in GitHub
|
||||
|
||||
1. Add your fork as a new remote (the remote name, "upstream" in this example, is arbitrary):
|
||||
|
||||
```bash
|
||||
git remote add upstream git@github.com:USERNAME/kafka-go.git
|
||||
```
|
||||
|
||||
1. Push your branch (the remote name, "upstream" in this example, is arbitrary):
|
||||
|
||||
```bash
|
||||
git push upstream
|
||||
```
|
||||
|
||||
1. You are now ready to submit a PR based upon the new branch in your forked repository.
|
||||
|
||||
### Using the forked library
|
||||
|
||||
To replace the original version of kafka-go library with a forked version is accomplished this way.
|
||||
|
||||
1. Make sure your application already has a go.mod entry depending on kafka-go
|
||||
|
||||
```bash
|
||||
module github.com/myusername/myapp
|
||||
|
||||
require (
|
||||
...
|
||||
github.com/segmentio/kafka-go v1.2.3
|
||||
...
|
||||
)
|
||||
```
|
||||
|
||||
1. Add the following entry to the beginning of the modules file.
|
||||
|
||||
```bash
|
||||
module github.com/myusername/myapp
|
||||
|
||||
replace github.com/segmentio/kafka-go v1.2.3 => ../local/directory
|
||||
|
||||
require (
|
||||
...
|
||||
github.com/segmentio/kafka-go v1.2.3
|
||||
...
|
||||
)
|
||||
```
|
||||
1. Depending on if you are using `vendor`ing or not you might need to run the following command to pull in the new bits.
|
||||
|
||||
```bash
|
||||
> go mod vendor
|
||||
```
|
||||
21
vendor/github.com/segmentio/kafka-go/LICENSE
generated
vendored
Normal file
21
vendor/github.com/segmentio/kafka-go/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2017 Segment
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
7
vendor/github.com/segmentio/kafka-go/Makefile
generated
vendored
Normal file
7
vendor/github.com/segmentio/kafka-go/Makefile
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
test:
|
||||
KAFKA_SKIP_NETTEST=1 \
|
||||
KAFKA_VERSION=2.3.1 \
|
||||
go test -race -cover ./...
|
||||
|
||||
docker:
|
||||
docker-compose up -d
|
||||
802
vendor/github.com/segmentio/kafka-go/README.md
generated
vendored
Normal file
802
vendor/github.com/segmentio/kafka-go/README.md
generated
vendored
Normal file
@@ -0,0 +1,802 @@
|
||||
# kafka-go [](https://circleci.com/gh/segmentio/kafka-go) [](https://goreportcard.com/report/github.com/segmentio/kafka-go) [](https://godoc.org/github.com/segmentio/kafka-go)
|
||||
|
||||
## Motivations
|
||||
|
||||
We rely on both Go and Kafka a lot at Segment. Unfortunately, the state of the Go
|
||||
client libraries for Kafka at the time of this writing was not ideal. The available
|
||||
options were:
|
||||
|
||||
- [sarama](https://github.com/Shopify/sarama), which is by far the most popular
|
||||
but is quite difficult to work with. It is poorly documented, the API exposes
|
||||
low level concepts of the Kafka protocol, and it doesn't support recent Go features
|
||||
like [contexts](https://golang.org/pkg/context/). It also passes all values as
|
||||
pointers which causes large numbers of dynamic memory allocations, more frequent
|
||||
garbage collections, and higher memory usage.
|
||||
|
||||
- [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go) is a
|
||||
cgo based wrapper around [librdkafka](https://github.com/edenhill/librdkafka),
|
||||
which means it introduces a dependency to a C library on all Go code that uses
|
||||
the package. It has much better documentation than sarama but still lacks support
|
||||
for Go contexts.
|
||||
|
||||
- [goka](https://github.com/lovoo/goka) is a more recent Kafka client for Go
|
||||
which focuses on a specific usage pattern. It provides abstractions for using Kafka
|
||||
as a message passing bus between services rather than an ordered log of events, but
|
||||
this is not the typical use case of Kafka for us at Segment. The package also
|
||||
depends on sarama for all interactions with Kafka.
|
||||
|
||||
This is where `kafka-go` comes into play. It provides both low and high level
|
||||
APIs for interacting with Kafka, mirroring concepts and implementing interfaces of
|
||||
the Go standard library to make it easy to use and integrate with existing
|
||||
software.
|
||||
|
||||
#### Note:
|
||||
|
||||
In order to better align with our newly adopted Code of Conduct, the kafka-go
|
||||
project has renamed our default branch to `main`. For the full details of our
|
||||
Code Of Conduct see [this](./CODE_OF_CONDUCT.md) document.
|
||||
|
||||
## Kafka versions
|
||||
|
||||
`kafka-go` is currently tested with Kafka versions 0.10.1.0 to 2.7.1.
|
||||
While it should also be compatible with later versions, newer features available
|
||||
in the Kafka API may not yet be implemented in the client.
|
||||
|
||||
## Go versions
|
||||
|
||||
`kafka-go` requires Go version 1.15 or later.
|
||||
|
||||
## Connection [](https://godoc.org/github.com/segmentio/kafka-go#Conn)
|
||||
|
||||
The `Conn` type is the core of the `kafka-go` package. It wraps around a raw
|
||||
network connection to expose a low-level API to a Kafka server.
|
||||
|
||||
Here are some examples showing typical use of a connection object:
|
||||
```go
|
||||
// to produce messages
|
||||
topic := "my-topic"
|
||||
partition := 0
|
||||
|
||||
conn, err := kafka.DialLeader(context.Background(), "tcp", "localhost:9092", topic, partition)
|
||||
if err != nil {
|
||||
log.Fatal("failed to dial leader:", err)
|
||||
}
|
||||
|
||||
conn.SetWriteDeadline(time.Now().Add(10*time.Second))
|
||||
_, err = conn.WriteMessages(
|
||||
kafka.Message{Value: []byte("one!")},
|
||||
kafka.Message{Value: []byte("two!")},
|
||||
kafka.Message{Value: []byte("three!")},
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal("failed to write messages:", err)
|
||||
}
|
||||
|
||||
if err := conn.Close(); err != nil {
|
||||
log.Fatal("failed to close writer:", err)
|
||||
}
|
||||
```
|
||||
```go
|
||||
// to consume messages
|
||||
topic := "my-topic"
|
||||
partition := 0
|
||||
|
||||
conn, err := kafka.DialLeader(context.Background(), "tcp", "localhost:9092", topic, partition)
|
||||
if err != nil {
|
||||
log.Fatal("failed to dial leader:", err)
|
||||
}
|
||||
|
||||
conn.SetReadDeadline(time.Now().Add(10*time.Second))
|
||||
batch := conn.ReadBatch(10e3, 1e6) // fetch 10KB min, 1MB max
|
||||
|
||||
b := make([]byte, 10e3) // 10KB max per message
|
||||
for {
|
||||
n, err := batch.Read(b)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
fmt.Println(string(b[:n]))
|
||||
}
|
||||
|
||||
if err := batch.Close(); err != nil {
|
||||
log.Fatal("failed to close batch:", err)
|
||||
}
|
||||
|
||||
if err := conn.Close(); err != nil {
|
||||
log.Fatal("failed to close connection:", err)
|
||||
}
|
||||
```
|
||||
|
||||
### To Create Topics
|
||||
By default kafka has the `auto.create.topics.enable='true'` (`KAFKA_AUTO_CREATE_TOPICS_ENABLE='true'` in the wurstmeister/kafka kafka docker image). If this value is set to `'true'` then topics will be created as a side effect of `kafka.DialLeader` like so:
|
||||
```go
|
||||
// to create topics when auto.create.topics.enable='true'
|
||||
conn, err := kafka.DialLeader(context.Background(), "tcp", "localhost:9092", "my-topic", 0)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
```
|
||||
|
||||
If `auto.create.topics.enable='false'` then you will need to create topics explicitly like so:
|
||||
```go
|
||||
// to create topics when auto.create.topics.enable='false'
|
||||
topic := "my-topic"
|
||||
|
||||
conn, err := kafka.Dial("tcp", "localhost:9092")
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
controller, err := conn.Controller()
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
var controllerConn *kafka.Conn
|
||||
controllerConn, err = kafka.Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
defer controllerConn.Close()
|
||||
|
||||
|
||||
topicConfigs := []kafka.TopicConfig{
|
||||
{
|
||||
Topic: topic,
|
||||
NumPartitions: 1,
|
||||
ReplicationFactor: 1,
|
||||
},
|
||||
}
|
||||
|
||||
err = controllerConn.CreateTopics(topicConfigs...)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
```
|
||||
|
||||
### To Connect To Leader Via a Non-leader Connection
|
||||
```go
|
||||
// to connect to the kafka leader via an existing non-leader connection rather than using DialLeader
|
||||
conn, err := kafka.Dial("tcp", "localhost:9092")
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
defer conn.Close()
|
||||
controller, err := conn.Controller()
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
var connLeader *kafka.Conn
|
||||
connLeader, err = kafka.Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
defer connLeader.Close()
|
||||
```
|
||||
|
||||
### To list topics
|
||||
```go
|
||||
conn, err := kafka.Dial("tcp", "localhost:9092")
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
partitions, err := conn.ReadPartitions()
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
m := map[string]struct{}{}
|
||||
|
||||
for _, p := range partitions {
|
||||
m[p.Topic] = struct{}{}
|
||||
}
|
||||
for k := range m {
|
||||
fmt.Println(k)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
Because it is low level, the `Conn` type turns out to be a great building block
|
||||
for higher level abstractions, like the `Reader` for example.
|
||||
|
||||
## Reader [](https://godoc.org/github.com/segmentio/kafka-go#Reader)
|
||||
|
||||
A `Reader` is another concept exposed by the `kafka-go` package, which intends
|
||||
to make it simpler to implement the typical use case of consuming from a single
|
||||
topic-partition pair.
|
||||
A `Reader` also automatically handles reconnections and offset management, and
|
||||
exposes an API that supports asynchronous cancellations and timeouts using Go
|
||||
contexts.
|
||||
|
||||
Note that it is important to call `Close()` on a `Reader` when a process exits.
|
||||
The kafka server needs a graceful disconnect to stop it from continuing to
|
||||
attempt to send messages to the connected clients. The given example will not
|
||||
call `Close()` if the process is terminated with SIGINT (ctrl-c at the shell) or
|
||||
SIGTERM (as docker stop or a kubernetes restart does). This can result in a
|
||||
delay when a new reader on the same topic connects (e.g. new process started
|
||||
or new container running). Use a `signal.Notify` handler to close the reader on
|
||||
process shutdown.
|
||||
|
||||
```go
|
||||
// make a new reader that consumes from topic-A, partition 0, at offset 42
|
||||
r := kafka.NewReader(kafka.ReaderConfig{
|
||||
Brokers: []string{"localhost:9092","localhost:9093", "localhost:9094"},
|
||||
Topic: "topic-A",
|
||||
Partition: 0,
|
||||
MinBytes: 10e3, // 10KB
|
||||
MaxBytes: 10e6, // 10MB
|
||||
})
|
||||
r.SetOffset(42)
|
||||
|
||||
for {
|
||||
m, err := r.ReadMessage(context.Background())
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
fmt.Printf("message at offset %d: %s = %s\n", m.Offset, string(m.Key), string(m.Value))
|
||||
}
|
||||
|
||||
if err := r.Close(); err != nil {
|
||||
log.Fatal("failed to close reader:", err)
|
||||
}
|
||||
```
|
||||
|
||||
### Consumer Groups
|
||||
|
||||
```kafka-go``` also supports Kafka consumer groups including broker managed offsets.
|
||||
To enable consumer groups, simply specify the GroupID in the ReaderConfig.
|
||||
|
||||
ReadMessage automatically commits offsets when using consumer groups.
|
||||
|
||||
```go
|
||||
// make a new reader that consumes from topic-A
|
||||
r := kafka.NewReader(kafka.ReaderConfig{
|
||||
Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"},
|
||||
GroupID: "consumer-group-id",
|
||||
Topic: "topic-A",
|
||||
MinBytes: 10e3, // 10KB
|
||||
MaxBytes: 10e6, // 10MB
|
||||
})
|
||||
|
||||
for {
|
||||
m, err := r.ReadMessage(context.Background())
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
fmt.Printf("message at topic/partition/offset %v/%v/%v: %s = %s\n", m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value))
|
||||
}
|
||||
|
||||
if err := r.Close(); err != nil {
|
||||
log.Fatal("failed to close reader:", err)
|
||||
}
|
||||
```
|
||||
|
||||
There are a number of limitations when using consumer groups:
|
||||
|
||||
* ```(*Reader).SetOffset``` will return an error when GroupID is set
|
||||
* ```(*Reader).Offset``` will always return ```-1``` when GroupID is set
|
||||
* ```(*Reader).Lag``` will always return ```-1``` when GroupID is set
|
||||
* ```(*Reader).ReadLag``` will return an error when GroupID is set
|
||||
* ```(*Reader).Stats``` will return a partition of ```-1``` when GroupID is set
|
||||
|
||||
### Explicit Commits
|
||||
|
||||
```kafka-go``` also supports explicit commits. Instead of calling ```ReadMessage```,
|
||||
call ```FetchMessage``` followed by ```CommitMessages```.
|
||||
|
||||
```go
|
||||
ctx := context.Background()
|
||||
for {
|
||||
m, err := r.FetchMessage(ctx)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
fmt.Printf("message at topic/partition/offset %v/%v/%v: %s = %s\n", m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value))
|
||||
if err := r.CommitMessages(ctx, m); err != nil {
|
||||
log.Fatal("failed to commit messages:", err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
When committing messages in consumer groups, the message with the highest offset
|
||||
for a given topic/partition determines the value of the committed offset for
|
||||
that partition. For example, if messages at offset 1, 2, and 3 of a single
|
||||
partition were retrieved by call to `FetchMessage`, calling `CommitMessages`
|
||||
with message offset 3 will also result in committing the messages at offsets 1
|
||||
and 2 for that partition.
|
||||
|
||||
### Managing Commits
|
||||
|
||||
By default, CommitMessages will synchronously commit offsets to Kafka. For
|
||||
improved performance, you can instead periodically commit offsets to Kafka
|
||||
by setting CommitInterval on the ReaderConfig.
|
||||
|
||||
|
||||
```go
|
||||
// make a new reader that consumes from topic-A
|
||||
r := kafka.NewReader(kafka.ReaderConfig{
|
||||
Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"},
|
||||
GroupID: "consumer-group-id",
|
||||
Topic: "topic-A",
|
||||
MinBytes: 10e3, // 10KB
|
||||
MaxBytes: 10e6, // 10MB
|
||||
CommitInterval: time.Second, // flushes commits to Kafka every second
|
||||
})
|
||||
```
|
||||
|
||||
## Writer [](https://godoc.org/github.com/segmentio/kafka-go#Writer)
|
||||
|
||||
To produce messages to Kafka, a program may use the low-level `Conn` API, but
|
||||
the package also provides a higher level `Writer` type which is more appropriate
|
||||
to use in most cases as it provides additional features:
|
||||
|
||||
- Automatic retries and reconnections on errors.
|
||||
- Configurable distribution of messages across available partitions.
|
||||
- Synchronous or asynchronous writes of messages to Kafka.
|
||||
- Asynchronous cancellation using contexts.
|
||||
- Flushing of pending messages on close to support graceful shutdowns.
|
||||
- Creation of a missing topic before publishing a message. *Note!* it was the default behaviour up to the version `v0.4.30`.
|
||||
|
||||
```go
|
||||
// make a writer that produces to topic-A, using the least-bytes distribution
|
||||
w := &kafka.Writer{
|
||||
Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
|
||||
Topic: "topic-A",
|
||||
Balancer: &kafka.LeastBytes{},
|
||||
}
|
||||
|
||||
err := w.WriteMessages(context.Background(),
|
||||
kafka.Message{
|
||||
Key: []byte("Key-A"),
|
||||
Value: []byte("Hello World!"),
|
||||
},
|
||||
kafka.Message{
|
||||
Key: []byte("Key-B"),
|
||||
Value: []byte("One!"),
|
||||
},
|
||||
kafka.Message{
|
||||
Key: []byte("Key-C"),
|
||||
Value: []byte("Two!"),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal("failed to write messages:", err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
log.Fatal("failed to close writer:", err)
|
||||
}
|
||||
```
|
||||
|
||||
### Missing topic creation before publication
|
||||
|
||||
```go
|
||||
// Make a writer that publishes messages to topic-A.
|
||||
// The topic will be created if it is missing.
|
||||
w := &Writer{
|
||||
Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
|
||||
Topic: "topic-A",
|
||||
AllowAutoTopicCreation: true,
|
||||
}
|
||||
|
||||
messages := []kafka.Message{
|
||||
{
|
||||
Key: []byte("Key-A"),
|
||||
Value: []byte("Hello World!"),
|
||||
},
|
||||
{
|
||||
Key: []byte("Key-B"),
|
||||
Value: []byte("One!"),
|
||||
},
|
||||
{
|
||||
Key: []byte("Key-C"),
|
||||
Value: []byte("Two!"),
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
const retries = 3
|
||||
for i := 0; i < retries; i++ {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// attempt to create topic prior to publishing the message
|
||||
err = w.WriteMessages(ctx, messages...)
|
||||
if errors.Is(err, LeaderNotAvailable) || errors.Is(err, context.DeadlineExceeded) {
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
log.Fatal("failed to close writer:", err)
|
||||
}
|
||||
```
|
||||
|
||||
### Writing to multiple topics
|
||||
|
||||
Normally, the `WriterConfig.Topic` is used to initialize a single-topic writer.
|
||||
By excluding that particular configuration, you are given the ability to define
|
||||
the topic on a per-message basis by setting `Message.Topic`.
|
||||
|
||||
```go
|
||||
w := &kafka.Writer{
|
||||
Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
|
||||
// NOTE: When Topic is not defined here, each Message must define it instead.
|
||||
Balancer: &kafka.LeastBytes{},
|
||||
}
|
||||
|
||||
err := w.WriteMessages(context.Background(),
|
||||
// NOTE: Each Message has Topic defined, otherwise an error is returned.
|
||||
kafka.Message{
|
||||
Topic: "topic-A",
|
||||
Key: []byte("Key-A"),
|
||||
Value: []byte("Hello World!"),
|
||||
},
|
||||
kafka.Message{
|
||||
Topic: "topic-B",
|
||||
Key: []byte("Key-B"),
|
||||
Value: []byte("One!"),
|
||||
},
|
||||
kafka.Message{
|
||||
Topic: "topic-C",
|
||||
Key: []byte("Key-C"),
|
||||
Value: []byte("Two!"),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal("failed to write messages:", err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
log.Fatal("failed to close writer:", err)
|
||||
}
|
||||
```
|
||||
|
||||
**NOTE:** These 2 patterns are mutually exclusive, if you set `Writer.Topic`,
|
||||
you must not also explicitly define `Message.Topic` on the messages you are
|
||||
writing. The opposite applies when you do not define a topic for the writer.
|
||||
The `Writer` will return an error if it detects this ambiguity.
|
||||
|
||||
### Compatibility with other clients
|
||||
|
||||
#### Sarama
|
||||
|
||||
If you're switching from Sarama and need/want to use the same algorithm for message partitioning, you can either use
|
||||
the `kafka.Hash` balancer or the `kafka.ReferenceHash` balancer:
|
||||
* `kafka.Hash` = `sarama.NewHashPartitioner`
|
||||
* `kafka.ReferenceHash` = `sarama.NewReferenceHashPartitioner`
|
||||
|
||||
The `kafka.Hash` and `kafka.ReferenceHash` balancers would route messages to the same partitions that the two
|
||||
aforementioned Sarama partitioners would route them to.
|
||||
|
||||
```go
|
||||
w := &kafka.Writer{
|
||||
Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
|
||||
Topic: "topic-A",
|
||||
Balancer: &kafka.Hash{},
|
||||
}
|
||||
```
|
||||
|
||||
#### librdkafka and confluent-kafka-go
|
||||
|
||||
Use the ```kafka.CRC32Balancer``` balancer to get the same behaviour as librdkafka's
|
||||
default ```consistent_random``` partition strategy.
|
||||
|
||||
```go
|
||||
w := &kafka.Writer{
|
||||
Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
|
||||
Topic: "topic-A",
|
||||
Balancer: kafka.CRC32Balancer{},
|
||||
}
|
||||
```
|
||||
|
||||
#### Java
|
||||
|
||||
Use the ```kafka.Murmur2Balancer``` balancer to get the same behaviour as the canonical
|
||||
Java client's default partitioner. Note: the Java class allows you to directly specify
|
||||
the partition which is not permitted.
|
||||
|
||||
```go
|
||||
w := &kafka.Writer{
|
||||
Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
|
||||
Topic: "topic-A",
|
||||
Balancer: kafka.Murmur2Balancer{},
|
||||
}
|
||||
```
|
||||
|
||||
### Compression
|
||||
|
||||
Compression can be enabled on the `Writer` by setting the `Compression` field:
|
||||
|
||||
```go
|
||||
w := &kafka.Writer{
|
||||
Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
|
||||
Topic: "topic-A",
|
||||
Compression: kafka.Snappy,
|
||||
}
|
||||
```
|
||||
|
||||
The `Reader` will by determine if the consumed messages are compressed by
|
||||
examining the message attributes. However, the package(s) for all expected
|
||||
codecs must be imported so that they get loaded correctly.
|
||||
|
||||
_Note: in versions prior to 0.4 programs had to import compression packages to
|
||||
install codecs and support reading compressed messages from kafka. This is no
|
||||
longer the case and import of the compression packages are now no-ops._
|
||||
|
||||
## TLS Support
|
||||
|
||||
For a bare bones Conn type or in the Reader/Writer configs you can specify a dialer option for TLS support. If the TLS field is nil, it will not connect with TLS.
|
||||
*Note:* Connecting to a Kafka cluster with TLS enabled without configuring TLS on the Conn/Reader/Writer can manifest in opaque io.ErrUnexpectedEOF errors.
|
||||
|
||||
|
||||
### Connection
|
||||
|
||||
```go
|
||||
dialer := &kafka.Dialer{
|
||||
Timeout: 10 * time.Second,
|
||||
DualStack: true,
|
||||
TLS: &tls.Config{...tls config...},
|
||||
}
|
||||
|
||||
conn, err := dialer.DialContext(ctx, "tcp", "localhost:9093")
|
||||
```
|
||||
|
||||
### Reader
|
||||
|
||||
```go
|
||||
dialer := &kafka.Dialer{
|
||||
Timeout: 10 * time.Second,
|
||||
DualStack: true,
|
||||
TLS: &tls.Config{...tls config...},
|
||||
}
|
||||
|
||||
r := kafka.NewReader(kafka.ReaderConfig{
|
||||
Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"},
|
||||
GroupID: "consumer-group-id",
|
||||
Topic: "topic-A",
|
||||
Dialer: dialer,
|
||||
})
|
||||
```
|
||||
|
||||
### Writer
|
||||
|
||||
|
||||
Direct Writer creation
|
||||
|
||||
```go
|
||||
w := kafka.Writer{
|
||||
Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
|
||||
Topic: "topic-A",
|
||||
Balancer: &kafka.Hash{},
|
||||
Transport: &kafka.Transport{
|
||||
TLS: &tls.Config{},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
Using `kafka.NewWriter`
|
||||
|
||||
```go
|
||||
dialer := &kafka.Dialer{
|
||||
Timeout: 10 * time.Second,
|
||||
DualStack: true,
|
||||
TLS: &tls.Config{...tls config...},
|
||||
}
|
||||
|
||||
w := kafka.NewWriter(kafka.WriterConfig{
|
||||
Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"},
|
||||
Topic: "topic-A",
|
||||
Balancer: &kafka.Hash{},
|
||||
Dialer: dialer,
|
||||
})
|
||||
```
|
||||
Note that `kafka.NewWriter` and `kafka.WriterConfig` are deprecated and will be removed in a future release.
|
||||
|
||||
## SASL Support
|
||||
|
||||
You can specify an option on the `Dialer` to use SASL authentication. The `Dialer` can be used directly to open a `Conn` or it can be passed to a `Reader` or `Writer` via their respective configs. If the `SASLMechanism` field is `nil`, it will not authenticate with SASL.
|
||||
|
||||
### SASL Authentication Types
|
||||
|
||||
#### [Plain](https://godoc.org/github.com/segmentio/kafka-go/sasl/plain#Mechanism)
|
||||
```go
|
||||
mechanism := plain.Mechanism{
|
||||
Username: "username",
|
||||
Password: "password",
|
||||
}
|
||||
```
|
||||
|
||||
#### [SCRAM](https://godoc.org/github.com/segmentio/kafka-go/sasl/scram#Mechanism)
|
||||
```go
|
||||
mechanism, err := scram.Mechanism(scram.SHA512, "username", "password")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
### Connection
|
||||
|
||||
```go
|
||||
mechanism, err := scram.Mechanism(scram.SHA512, "username", "password")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
dialer := &kafka.Dialer{
|
||||
Timeout: 10 * time.Second,
|
||||
DualStack: true,
|
||||
SASLMechanism: mechanism,
|
||||
}
|
||||
|
||||
conn, err := dialer.DialContext(ctx, "tcp", "localhost:9093")
|
||||
```
|
||||
|
||||
|
||||
### Reader
|
||||
|
||||
```go
|
||||
mechanism, err := scram.Mechanism(scram.SHA512, "username", "password")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
dialer := &kafka.Dialer{
|
||||
Timeout: 10 * time.Second,
|
||||
DualStack: true,
|
||||
SASLMechanism: mechanism,
|
||||
}
|
||||
|
||||
r := kafka.NewReader(kafka.ReaderConfig{
|
||||
Brokers: []string{"localhost:9092","localhost:9093", "localhost:9094"},
|
||||
GroupID: "consumer-group-id",
|
||||
Topic: "topic-A",
|
||||
Dialer: dialer,
|
||||
})
|
||||
```
|
||||
|
||||
### Writer
|
||||
|
||||
```go
|
||||
mechanism, err := scram.Mechanism(scram.SHA512, "username", "password")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Transports are responsible for managing connection pools and other resources,
|
||||
// it's generally best to create a few of these and share them across your
|
||||
// application.
|
||||
sharedTransport := &kafka.Transport{
|
||||
SASL: mechanism,
|
||||
}
|
||||
|
||||
w := kafka.Writer{
|
||||
Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
|
||||
Topic: "topic-A",
|
||||
Balancer: &kafka.Hash{},
|
||||
Transport: sharedTransport,
|
||||
}
|
||||
```
|
||||
|
||||
### Client
|
||||
|
||||
```go
|
||||
mechanism, err := scram.Mechanism(scram.SHA512, "username", "password")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Transports are responsible for managing connection pools and other resources,
|
||||
// it's generally best to create a few of these and share them across your
|
||||
// application.
|
||||
sharedTransport := &kafka.Transport{
|
||||
SASL: mechanism,
|
||||
}
|
||||
|
||||
client := &kafka.Client{
|
||||
Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
|
||||
Timeout: 10 * time.Second,
|
||||
Transport: sharedTransport,
|
||||
}
|
||||
```
|
||||
|
||||
#### Reading all messages within a time range
|
||||
|
||||
```go
|
||||
startTime := time.Now().Add(-time.Hour)
|
||||
endTime := time.Now()
|
||||
batchSize := int(10e6) // 10MB
|
||||
|
||||
r := kafka.NewReader(kafka.ReaderConfig{
|
||||
Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"},
|
||||
Topic: "my-topic1",
|
||||
Partition: 0,
|
||||
MinBytes: batchSize,
|
||||
MaxBytes: batchSize,
|
||||
})
|
||||
|
||||
r.SetOffsetAt(context.Background(), startTime)
|
||||
|
||||
for {
|
||||
m, err := r.ReadMessage(context.Background())
|
||||
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if m.Time.After(endTime) {
|
||||
break
|
||||
}
|
||||
// TODO: process message
|
||||
fmt.Printf("message at offset %d: %s = %s\n", m.Offset, string(m.Key), string(m.Value))
|
||||
}
|
||||
|
||||
if err := r.Close(); err != nil {
|
||||
log.Fatal("failed to close reader:", err)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Logging
|
||||
|
||||
For visiblity into the operations of the Reader/Writer types, configure a logger on creation.
|
||||
|
||||
|
||||
### Reader
|
||||
|
||||
```go
|
||||
func logf(msg string, a ...interface{}) {
|
||||
fmt.Printf(msg, a...)
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
r := kafka.NewReader(kafka.ReaderConfig{
|
||||
Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"},
|
||||
Topic: "my-topic1",
|
||||
Partition: 0,
|
||||
Logger: kafka.LoggerFunc(logf),
|
||||
ErrorLogger: kafka.LoggerFunc(logf),
|
||||
})
|
||||
```
|
||||
|
||||
### Writer
|
||||
|
||||
```go
|
||||
func logf(msg string, a ...interface{}) {
|
||||
fmt.Printf(msg, a...)
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
w := &kafka.Writer{
|
||||
Addr: kafka.TCP("localhost:9092"),
|
||||
Topic: "topic",
|
||||
Logger: kafka.LoggerFunc(logf),
|
||||
ErrorLogger: kafka.LoggerFunc(logf),
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Testing
|
||||
|
||||
Subtle behavior changes in later Kafka versions have caused some historical tests to break, if you are running against Kafka 2.3.1 or later, exporting the `KAFKA_SKIP_NETTEST=1` environment variables will skip those tests.
|
||||
|
||||
Run Kafka locally in docker
|
||||
|
||||
```bash
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
Run tests
|
||||
|
||||
```bash
|
||||
KAFKA_VERSION=2.3.1 \
|
||||
KAFKA_SKIP_NETTEST=1 \
|
||||
go test -race ./...
|
||||
```
|
||||
67
vendor/github.com/segmentio/kafka-go/addoffsetstotxn.go
generated
vendored
Normal file
67
vendor/github.com/segmentio/kafka-go/addoffsetstotxn.go
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/addoffsetstotxn"
|
||||
)
|
||||
|
||||
// AddOffsetsToTxnRequest is the request structure for the AddOffsetsToTxn function.
|
||||
type AddOffsetsToTxnRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// The transactional id key
|
||||
TransactionalID string
|
||||
|
||||
// The Producer ID (PID) for the current producer session;
|
||||
// received from an InitProducerID request.
|
||||
ProducerID int
|
||||
|
||||
// The epoch associated with the current producer session for the given PID
|
||||
ProducerEpoch int
|
||||
|
||||
// The unique group identifier.
|
||||
GroupID string
|
||||
}
|
||||
|
||||
// AddOffsetsToTxnResponse is the response structure for the AddOffsetsToTxn function.
|
||||
type AddOffsetsToTxnResponse struct {
|
||||
// The amount of time that the broker throttled the request.
|
||||
Throttle time.Duration
|
||||
|
||||
// An error that may have occurred when attempting to add the offsets
|
||||
// to a transaction.
|
||||
//
|
||||
// The errors contain the kafka error code. Programs may use the standard
|
||||
// errors.Is function to test the error against kafka error codes.
|
||||
Error error
|
||||
}
|
||||
|
||||
// AddOffsetsToTnx sends an add offsets to txn request to a kafka broker and returns the response.
|
||||
func (c *Client) AddOffsetsToTxn(
|
||||
ctx context.Context,
|
||||
req *AddOffsetsToTxnRequest,
|
||||
) (*AddOffsetsToTxnResponse, error) {
|
||||
m, err := c.roundTrip(ctx, req.Addr, &addoffsetstotxn.Request{
|
||||
TransactionalID: req.TransactionalID,
|
||||
ProducerID: int64(req.ProducerID),
|
||||
ProducerEpoch: int16(req.ProducerEpoch),
|
||||
GroupID: req.GroupID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).AddOffsetsToTxn: %w", err)
|
||||
}
|
||||
|
||||
r := m.(*addoffsetstotxn.Response)
|
||||
|
||||
res := &AddOffsetsToTxnResponse{
|
||||
Throttle: makeDuration(r.ThrottleTimeMs),
|
||||
Error: makeError(r.ErrorCode, ""),
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
108
vendor/github.com/segmentio/kafka-go/addpartitionstotxn.go
generated
vendored
Normal file
108
vendor/github.com/segmentio/kafka-go/addpartitionstotxn.go
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/addpartitionstotxn"
|
||||
)
|
||||
|
||||
// AddPartitionToTxn represents a partition to be added
|
||||
// to a transaction.
|
||||
type AddPartitionToTxn struct {
|
||||
// Partition is the ID of a partition to add to the transaction.
|
||||
Partition int
|
||||
}
|
||||
|
||||
// AddPartitionsToTxnRequest is the request structure fo the AddPartitionsToTxn function.
|
||||
type AddPartitionsToTxnRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// The transactional id key
|
||||
TransactionalID string
|
||||
|
||||
// The Producer ID (PID) for the current producer session;
|
||||
// received from an InitProducerID request.
|
||||
ProducerID int
|
||||
|
||||
// The epoch associated with the current producer session for the given PID
|
||||
ProducerEpoch int
|
||||
|
||||
// Mappings of topic names to lists of partitions.
|
||||
Topics map[string][]AddPartitionToTxn
|
||||
}
|
||||
|
||||
// AddPartitionsToTxnResponse is the response structure for the AddPartitionsToTxn function.
|
||||
type AddPartitionsToTxnResponse struct {
|
||||
// The amount of time that the broker throttled the request.
|
||||
Throttle time.Duration
|
||||
|
||||
// Mappings of topic names to partitions being added to a transactions.
|
||||
Topics map[string][]AddPartitionToTxnPartition
|
||||
}
|
||||
|
||||
// AddPartitionToTxnPartition represents the state of a single partition
|
||||
// in response to adding to a transaction.
|
||||
type AddPartitionToTxnPartition struct {
|
||||
// The ID of the partition.
|
||||
Partition int
|
||||
|
||||
// An error that may have occurred when attempting to add the partition
|
||||
// to a transaction.
|
||||
//
|
||||
// The errors contain the kafka error code. Programs may use the standard
|
||||
// errors.Is function to test the error against kafka error codes.
|
||||
Error error
|
||||
}
|
||||
|
||||
// AddPartitionsToTnx sends an add partitions to txn request to a kafka broker and returns the response.
|
||||
func (c *Client) AddPartitionsToTxn(
|
||||
ctx context.Context,
|
||||
req *AddPartitionsToTxnRequest,
|
||||
) (*AddPartitionsToTxnResponse, error) {
|
||||
protoReq := &addpartitionstotxn.Request{
|
||||
TransactionalID: req.TransactionalID,
|
||||
ProducerID: int64(req.ProducerID),
|
||||
ProducerEpoch: int16(req.ProducerEpoch),
|
||||
}
|
||||
protoReq.Topics = make([]addpartitionstotxn.RequestTopic, 0, len(req.Topics))
|
||||
|
||||
for topic, partitions := range req.Topics {
|
||||
reqTopic := addpartitionstotxn.RequestTopic{
|
||||
Name: topic,
|
||||
Partitions: make([]int32, len(partitions)),
|
||||
}
|
||||
for i, partition := range partitions {
|
||||
reqTopic.Partitions[i] = int32(partition.Partition)
|
||||
}
|
||||
protoReq.Topics = append(protoReq.Topics, reqTopic)
|
||||
}
|
||||
|
||||
m, err := c.roundTrip(ctx, req.Addr, protoReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).AddPartitionsToTxn: %w", err)
|
||||
}
|
||||
|
||||
r := m.(*addpartitionstotxn.Response)
|
||||
|
||||
res := &AddPartitionsToTxnResponse{
|
||||
Throttle: makeDuration(r.ThrottleTimeMs),
|
||||
Topics: make(map[string][]AddPartitionToTxnPartition, len(r.Results)),
|
||||
}
|
||||
|
||||
for _, result := range r.Results {
|
||||
partitions := make([]AddPartitionToTxnPartition, 0, len(result.Results))
|
||||
for _, rp := range result.Results {
|
||||
partitions = append(partitions, AddPartitionToTxnPartition{
|
||||
Partition: int(rp.PartitionIndex),
|
||||
Error: makeError(rp.ErrorCode, ""),
|
||||
})
|
||||
}
|
||||
res.Topics[result.Name] = partitions
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
64
vendor/github.com/segmentio/kafka-go/address.go
generated
vendored
Normal file
64
vendor/github.com/segmentio/kafka-go/address.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// TCP constructs an address with the network set to "tcp".
|
||||
func TCP(address ...string) net.Addr { return makeNetAddr("tcp", address) }
|
||||
|
||||
func makeNetAddr(network string, addresses []string) net.Addr {
|
||||
switch len(addresses) {
|
||||
case 0:
|
||||
return nil // maybe panic instead?
|
||||
case 1:
|
||||
return makeAddr(network, addresses[0])
|
||||
default:
|
||||
return makeMultiAddr(network, addresses)
|
||||
}
|
||||
}
|
||||
|
||||
func makeAddr(network, address string) net.Addr {
|
||||
return &networkAddress{
|
||||
network: network,
|
||||
address: canonicalAddress(address),
|
||||
}
|
||||
}
|
||||
|
||||
func makeMultiAddr(network string, addresses []string) net.Addr {
|
||||
multi := make(multiAddr, len(addresses))
|
||||
for i, address := range addresses {
|
||||
multi[i] = makeAddr(network, address)
|
||||
}
|
||||
return multi
|
||||
}
|
||||
|
||||
type networkAddress struct {
|
||||
network string
|
||||
address string
|
||||
}
|
||||
|
||||
func (a *networkAddress) Network() string { return a.network }
|
||||
|
||||
func (a *networkAddress) String() string { return a.address }
|
||||
|
||||
type multiAddr []net.Addr
|
||||
|
||||
func (m multiAddr) Network() string { return m.join(net.Addr.Network) }
|
||||
|
||||
func (m multiAddr) String() string { return m.join(net.Addr.String) }
|
||||
|
||||
func (m multiAddr) join(f func(net.Addr) string) string {
|
||||
switch len(m) {
|
||||
case 0:
|
||||
return ""
|
||||
case 1:
|
||||
return f(m[0])
|
||||
}
|
||||
s := make([]string, len(m))
|
||||
for i, a := range m {
|
||||
s[i] = f(a)
|
||||
}
|
||||
return strings.Join(s, ",")
|
||||
}
|
||||
107
vendor/github.com/segmentio/kafka-go/alterconfigs.go
generated
vendored
Normal file
107
vendor/github.com/segmentio/kafka-go/alterconfigs.go
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/alterconfigs"
|
||||
)
|
||||
|
||||
// AlterConfigsRequest represents a request sent to a kafka broker to alter configs.
|
||||
type AlterConfigsRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// List of resources to update.
|
||||
Resources []AlterConfigRequestResource
|
||||
|
||||
// When set to true, topics are not created but the configuration is
|
||||
// validated as if they were.
|
||||
ValidateOnly bool
|
||||
}
|
||||
|
||||
type AlterConfigRequestResource struct {
|
||||
// Resource Type
|
||||
ResourceType ResourceType
|
||||
|
||||
// Resource Name
|
||||
ResourceName string
|
||||
|
||||
// Configs is a list of configuration updates.
|
||||
Configs []AlterConfigRequestConfig
|
||||
}
|
||||
|
||||
type AlterConfigRequestConfig struct {
|
||||
// Configuration key name
|
||||
Name string
|
||||
|
||||
// The value to set for the configuration key.
|
||||
Value string
|
||||
}
|
||||
|
||||
// AlterConfigsResponse represents a response from a kafka broker to an alter config request.
|
||||
type AlterConfigsResponse struct {
|
||||
// Duration for which the request was throttled due to a quota violation.
|
||||
Throttle time.Duration
|
||||
|
||||
// Mapping of topic names to errors that occurred while attempting to create
|
||||
// the topics.
|
||||
//
|
||||
// The errors contain the kafka error code. Programs may use the standard
|
||||
// errors.Is function to test the error against kafka error codes.
|
||||
Errors map[AlterConfigsResponseResource]error
|
||||
}
|
||||
|
||||
// AlterConfigsResponseResource helps map errors to specific resources in an
|
||||
// alter config response.
|
||||
type AlterConfigsResponseResource struct {
|
||||
Type int8
|
||||
Name string
|
||||
}
|
||||
|
||||
// AlterConfigs sends a config altering request to a kafka broker and returns the
|
||||
// response.
|
||||
func (c *Client) AlterConfigs(ctx context.Context, req *AlterConfigsRequest) (*AlterConfigsResponse, error) {
|
||||
resources := make([]alterconfigs.RequestResources, len(req.Resources))
|
||||
|
||||
for i, t := range req.Resources {
|
||||
configs := make([]alterconfigs.RequestConfig, len(t.Configs))
|
||||
for j, v := range t.Configs {
|
||||
configs[j] = alterconfigs.RequestConfig{
|
||||
Name: v.Name,
|
||||
Value: v.Value,
|
||||
}
|
||||
}
|
||||
resources[i] = alterconfigs.RequestResources{
|
||||
ResourceType: int8(t.ResourceType),
|
||||
ResourceName: t.ResourceName,
|
||||
Configs: configs,
|
||||
}
|
||||
}
|
||||
|
||||
m, err := c.roundTrip(ctx, req.Addr, &alterconfigs.Request{
|
||||
Resources: resources,
|
||||
ValidateOnly: req.ValidateOnly,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).AlterConfigs: %w", err)
|
||||
}
|
||||
|
||||
res := m.(*alterconfigs.Response)
|
||||
ret := &AlterConfigsResponse{
|
||||
Throttle: makeDuration(res.ThrottleTimeMs),
|
||||
Errors: make(map[AlterConfigsResponseResource]error, len(res.Responses)),
|
||||
}
|
||||
|
||||
for _, t := range res.Responses {
|
||||
ret.Errors[AlterConfigsResponseResource{
|
||||
Type: t.ResourceType,
|
||||
Name: t.ResourceName,
|
||||
}] = makeError(t.ErrorCode, t.ErrorMessage)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
115
vendor/github.com/segmentio/kafka-go/alterpartitionreassignments.go
generated
vendored
Normal file
115
vendor/github.com/segmentio/kafka-go/alterpartitionreassignments.go
generated
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/alterpartitionreassignments"
|
||||
)
|
||||
|
||||
// AlterPartitionReassignmentsRequest is a request to the AlterPartitionReassignments API.
|
||||
type AlterPartitionReassignmentsRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// Topic is the name of the topic to alter partitions in.
|
||||
Topic string
|
||||
|
||||
// Assignments is the list of partition reassignments to submit to the API.
|
||||
Assignments []AlterPartitionReassignmentsRequestAssignment
|
||||
|
||||
// Timeout is the amount of time to wait for the request to complete.
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// AlterPartitionReassignmentsRequestAssignment contains the requested reassignments for a single
|
||||
// partition.
|
||||
type AlterPartitionReassignmentsRequestAssignment struct {
|
||||
// PartitionID is the ID of the partition to make the reassignments in.
|
||||
PartitionID int
|
||||
|
||||
// BrokerIDs is a slice of brokers to set the partition replicas to.
|
||||
BrokerIDs []int
|
||||
}
|
||||
|
||||
// AlterPartitionReassignmentsResponse is a response from the AlterPartitionReassignments API.
|
||||
type AlterPartitionReassignmentsResponse struct {
|
||||
// Error is set to a non-nil value including the code and message if a top-level
|
||||
// error was encountered when doing the update.
|
||||
Error error
|
||||
|
||||
// PartitionResults contains the specific results for each partition.
|
||||
PartitionResults []AlterPartitionReassignmentsResponsePartitionResult
|
||||
}
|
||||
|
||||
// AlterPartitionReassignmentsResponsePartitionResult contains the detailed result of
|
||||
// doing reassignments for a single partition.
|
||||
type AlterPartitionReassignmentsResponsePartitionResult struct {
|
||||
// PartitionID is the ID of the partition that was altered.
|
||||
PartitionID int
|
||||
|
||||
// Error is set to a non-nil value including the code and message if an error was encountered
|
||||
// during the update for this partition.
|
||||
Error error
|
||||
}
|
||||
|
||||
func (c *Client) AlterPartitionReassignments(
|
||||
ctx context.Context,
|
||||
req *AlterPartitionReassignmentsRequest,
|
||||
) (*AlterPartitionReassignmentsResponse, error) {
|
||||
apiPartitions := []alterpartitionreassignments.RequestPartition{}
|
||||
|
||||
for _, assignment := range req.Assignments {
|
||||
replicas := []int32{}
|
||||
for _, brokerID := range assignment.BrokerIDs {
|
||||
replicas = append(replicas, int32(brokerID))
|
||||
}
|
||||
|
||||
apiPartitions = append(
|
||||
apiPartitions,
|
||||
alterpartitionreassignments.RequestPartition{
|
||||
PartitionIndex: int32(assignment.PartitionID),
|
||||
Replicas: replicas,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
apiReq := &alterpartitionreassignments.Request{
|
||||
TimeoutMs: int32(req.Timeout.Milliseconds()),
|
||||
Topics: []alterpartitionreassignments.RequestTopic{
|
||||
{
|
||||
Name: req.Topic,
|
||||
Partitions: apiPartitions,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
protoResp, err := c.roundTrip(
|
||||
ctx,
|
||||
req.Addr,
|
||||
apiReq,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
apiResp := protoResp.(*alterpartitionreassignments.Response)
|
||||
|
||||
resp := &AlterPartitionReassignmentsResponse{
|
||||
Error: makeError(apiResp.ErrorCode, apiResp.ErrorMessage),
|
||||
}
|
||||
|
||||
for _, topicResult := range apiResp.Results {
|
||||
for _, partitionResult := range topicResult.Partitions {
|
||||
resp.PartitionResults = append(
|
||||
resp.PartitionResults,
|
||||
AlterPartitionReassignmentsResponsePartitionResult{
|
||||
PartitionID: int(partitionResult.PartitionIndex),
|
||||
Error: makeError(partitionResult.ErrorCode, partitionResult.ErrorMessage),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
72
vendor/github.com/segmentio/kafka-go/apiversions.go
generated
vendored
Normal file
72
vendor/github.com/segmentio/kafka-go/apiversions.go
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol"
|
||||
"github.com/segmentio/kafka-go/protocol/apiversions"
|
||||
)
|
||||
|
||||
// ApiVersionsRequest is a request to the ApiVersions API.
|
||||
type ApiVersionsRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
}
|
||||
|
||||
// ApiVersionsResponse is a response from the ApiVersions API.
|
||||
type ApiVersionsResponse struct {
|
||||
// Error is set to a non-nil value if an error was encountered.
|
||||
Error error
|
||||
|
||||
// ApiKeys contains the specific details of each supported API.
|
||||
ApiKeys []ApiVersionsResponseApiKey
|
||||
}
|
||||
|
||||
// ApiVersionsResponseApiKey includes the details of which versions are supported for a single API.
|
||||
type ApiVersionsResponseApiKey struct {
|
||||
// ApiKey is the ID of the API.
|
||||
ApiKey int
|
||||
|
||||
// ApiName is a human-friendly description of the API.
|
||||
ApiName string
|
||||
|
||||
// MinVersion is the minimum API version supported by the broker.
|
||||
MinVersion int
|
||||
|
||||
// MaxVersion is the maximum API version supported by the broker.
|
||||
MaxVersion int
|
||||
}
|
||||
|
||||
func (c *Client) ApiVersions(
|
||||
ctx context.Context,
|
||||
req *ApiVersionsRequest,
|
||||
) (*ApiVersionsResponse, error) {
|
||||
apiReq := &apiversions.Request{}
|
||||
protoResp, err := c.roundTrip(
|
||||
ctx,
|
||||
req.Addr,
|
||||
apiReq,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
apiResp := protoResp.(*apiversions.Response)
|
||||
|
||||
resp := &ApiVersionsResponse{
|
||||
Error: makeError(apiResp.ErrorCode, ""),
|
||||
}
|
||||
for _, apiKey := range apiResp.ApiKeys {
|
||||
resp.ApiKeys = append(
|
||||
resp.ApiKeys,
|
||||
ApiVersionsResponseApiKey{
|
||||
ApiKey: int(apiKey.ApiKey),
|
||||
ApiName: protocol.ApiKey(apiKey.ApiKey).String(),
|
||||
MinVersion: int(apiKey.MinVersion),
|
||||
MaxVersion: int(apiKey.MaxVersion),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
340
vendor/github.com/segmentio/kafka-go/balancer.go
generated
vendored
Normal file
340
vendor/github.com/segmentio/kafka-go/balancer.go
generated
vendored
Normal file
@@ -0,0 +1,340 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"hash/fnv"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// The Balancer interface provides an abstraction of the message distribution
|
||||
// logic used by Writer instances to route messages to the partitions available
|
||||
// on a kafka cluster.
|
||||
//
|
||||
// Balancers must be safe to use concurrently from multiple goroutines.
|
||||
type Balancer interface {
|
||||
// Balance receives a message and a set of available partitions and
|
||||
// returns the partition number that the message should be routed to.
|
||||
//
|
||||
// An application should refrain from using a balancer to manage multiple
|
||||
// sets of partitions (from different topics for examples), use one balancer
|
||||
// instance for each partition set, so the balancer can detect when the
|
||||
// partitions change and assume that the kafka topic has been rebalanced.
|
||||
Balance(msg Message, partitions ...int) (partition int)
|
||||
}
|
||||
|
||||
// BalancerFunc is an implementation of the Balancer interface that makes it
|
||||
// possible to use regular functions to distribute messages across partitions.
|
||||
type BalancerFunc func(Message, ...int) int
|
||||
|
||||
// Balance calls f, satisfies the Balancer interface.
|
||||
func (f BalancerFunc) Balance(msg Message, partitions ...int) int {
|
||||
return f(msg, partitions...)
|
||||
}
|
||||
|
||||
// RoundRobin is an Balancer implementation that equally distributes messages
|
||||
// across all available partitions.
|
||||
type RoundRobin struct {
|
||||
// Use a 32 bits integer so RoundRobin values don't need to be aligned to
|
||||
// apply atomic increments.
|
||||
offset uint32
|
||||
}
|
||||
|
||||
// Balance satisfies the Balancer interface.
|
||||
func (rr *RoundRobin) Balance(msg Message, partitions ...int) int {
|
||||
return rr.balance(partitions)
|
||||
}
|
||||
|
||||
func (rr *RoundRobin) balance(partitions []int) int {
|
||||
length := uint32(len(partitions))
|
||||
offset := atomic.AddUint32(&rr.offset, 1) - 1
|
||||
return partitions[offset%length]
|
||||
}
|
||||
|
||||
// LeastBytes is a Balancer implementation that routes messages to the partition
|
||||
// that has received the least amount of data.
|
||||
//
|
||||
// Note that no coordination is done between multiple producers, having good
|
||||
// balancing relies on the fact that each producer using a LeastBytes balancer
|
||||
// should produce well balanced messages.
|
||||
type LeastBytes struct {
|
||||
mutex sync.Mutex
|
||||
counters []leastBytesCounter
|
||||
}
|
||||
|
||||
type leastBytesCounter struct {
|
||||
partition int
|
||||
bytes uint64
|
||||
}
|
||||
|
||||
// Balance satisfies the Balancer interface.
|
||||
func (lb *LeastBytes) Balance(msg Message, partitions ...int) int {
|
||||
lb.mutex.Lock()
|
||||
defer lb.mutex.Unlock()
|
||||
|
||||
// partitions change
|
||||
if len(partitions) != len(lb.counters) {
|
||||
lb.counters = lb.makeCounters(partitions...)
|
||||
}
|
||||
|
||||
minBytes := lb.counters[0].bytes
|
||||
minIndex := 0
|
||||
|
||||
for i, c := range lb.counters[1:] {
|
||||
if c.bytes < minBytes {
|
||||
minIndex = i + 1
|
||||
minBytes = c.bytes
|
||||
}
|
||||
}
|
||||
|
||||
c := &lb.counters[minIndex]
|
||||
c.bytes += uint64(len(msg.Key)) + uint64(len(msg.Value))
|
||||
return c.partition
|
||||
}
|
||||
|
||||
func (lb *LeastBytes) makeCounters(partitions ...int) (counters []leastBytesCounter) {
|
||||
counters = make([]leastBytesCounter, len(partitions))
|
||||
|
||||
for i, p := range partitions {
|
||||
counters[i].partition = p
|
||||
}
|
||||
|
||||
sort.Slice(counters, func(i int, j int) bool {
|
||||
return counters[i].partition < counters[j].partition
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
fnv1aPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return fnv.New32a()
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// Hash is a Balancer that uses the provided hash function to determine which
|
||||
// partition to route messages to. This ensures that messages with the same key
|
||||
// are routed to the same partition.
|
||||
//
|
||||
// The logic to calculate the partition is:
|
||||
//
|
||||
// hasher.Sum32() % len(partitions) => partition
|
||||
//
|
||||
// By default, Hash uses the FNV-1a algorithm. This is the same algorithm used
|
||||
// by the Sarama Producer and ensures that messages produced by kafka-go will
|
||||
// be delivered to the same topics that the Sarama producer would be delivered to.
|
||||
type Hash struct {
|
||||
rr RoundRobin
|
||||
Hasher hash.Hash32
|
||||
|
||||
// lock protects Hasher while calculating the hash code. It is assumed that
|
||||
// the Hasher field is read-only once the Balancer is created, so as a
|
||||
// performance optimization, reads of the field are not protected.
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func (h *Hash) Balance(msg Message, partitions ...int) int {
|
||||
if msg.Key == nil {
|
||||
return h.rr.Balance(msg, partitions...)
|
||||
}
|
||||
|
||||
hasher := h.Hasher
|
||||
if hasher != nil {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
} else {
|
||||
hasher = fnv1aPool.Get().(hash.Hash32)
|
||||
defer fnv1aPool.Put(hasher)
|
||||
}
|
||||
|
||||
hasher.Reset()
|
||||
if _, err := hasher.Write(msg.Key); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// uses same algorithm that Sarama's hashPartitioner uses
|
||||
// note the type conversions here. if the uint32 hash code is not cast to
|
||||
// an int32, we do not get the same result as sarama.
|
||||
partition := int32(hasher.Sum32()) % int32(len(partitions))
|
||||
if partition < 0 {
|
||||
partition = -partition
|
||||
}
|
||||
|
||||
return int(partition)
|
||||
}
|
||||
|
||||
// ReferenceHash is a Balancer that uses the provided hash function to determine which
|
||||
// partition to route messages to. This ensures that messages with the same key
|
||||
// are routed to the same partition.
|
||||
//
|
||||
// The logic to calculate the partition is:
|
||||
//
|
||||
// (int32(hasher.Sum32()) & 0x7fffffff) % len(partitions) => partition
|
||||
//
|
||||
// By default, ReferenceHash uses the FNV-1a algorithm. This is the same algorithm as
|
||||
// the Sarama NewReferenceHashPartitioner and ensures that messages produced by kafka-go will
|
||||
// be delivered to the same topics that the Sarama producer would be delivered to.
|
||||
type ReferenceHash struct {
|
||||
rr randomBalancer
|
||||
Hasher hash.Hash32
|
||||
|
||||
// lock protects Hasher while calculating the hash code. It is assumed that
|
||||
// the Hasher field is read-only once the Balancer is created, so as a
|
||||
// performance optimization, reads of the field are not protected.
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func (h *ReferenceHash) Balance(msg Message, partitions ...int) int {
|
||||
if msg.Key == nil {
|
||||
return h.rr.Balance(msg, partitions...)
|
||||
}
|
||||
|
||||
hasher := h.Hasher
|
||||
if hasher != nil {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
} else {
|
||||
hasher = fnv1aPool.Get().(hash.Hash32)
|
||||
defer fnv1aPool.Put(hasher)
|
||||
}
|
||||
|
||||
hasher.Reset()
|
||||
if _, err := hasher.Write(msg.Key); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// uses the same algorithm as the Sarama's referenceHashPartitioner.
|
||||
// note the type conversions here. if the uint32 hash code is not cast to
|
||||
// an int32, we do not get the same result as sarama.
|
||||
partition := (int32(hasher.Sum32()) & 0x7fffffff) % int32(len(partitions))
|
||||
return int(partition)
|
||||
}
|
||||
|
||||
type randomBalancer struct {
|
||||
mock int // mocked return value, used for testing
|
||||
}
|
||||
|
||||
func (b randomBalancer) Balance(msg Message, partitions ...int) (partition int) {
|
||||
if b.mock != 0 {
|
||||
return b.mock
|
||||
}
|
||||
return partitions[rand.Int()%len(partitions)]
|
||||
}
|
||||
|
||||
// CRC32Balancer is a Balancer that uses the CRC32 hash function to determine
|
||||
// which partition to route messages to. This ensures that messages with the
|
||||
// same key are routed to the same partition. This balancer is compatible with
|
||||
// the built-in hash partitioners in librdkafka and the language bindings that
|
||||
// are built on top of it, including the
|
||||
// github.com/confluentinc/confluent-kafka-go Go package.
|
||||
//
|
||||
// With the Consistent field false (default), this partitioner is equivalent to
|
||||
// the "consistent_random" setting in librdkafka. When Consistent is true, this
|
||||
// partitioner is equivalent to the "consistent" setting. The latter will hash
|
||||
// empty or nil keys into the same partition.
|
||||
//
|
||||
// Unless you are absolutely certain that all your messages will have keys, it's
|
||||
// best to leave the Consistent flag off. Otherwise, you run the risk of
|
||||
// creating a very hot partition.
|
||||
type CRC32Balancer struct {
|
||||
Consistent bool
|
||||
random randomBalancer
|
||||
}
|
||||
|
||||
func (b CRC32Balancer) Balance(msg Message, partitions ...int) (partition int) {
|
||||
// NOTE: the crc32 balancers in librdkafka don't differentiate between nil
|
||||
// and empty keys. both cases are treated as unset.
|
||||
if len(msg.Key) == 0 && !b.Consistent {
|
||||
return b.random.Balance(msg, partitions...)
|
||||
}
|
||||
|
||||
idx := crc32.ChecksumIEEE(msg.Key) % uint32(len(partitions))
|
||||
return partitions[idx]
|
||||
}
|
||||
|
||||
// Murmur2Balancer is a Balancer that uses the Murmur2 hash function to
|
||||
// determine which partition to route messages to. This ensures that messages
|
||||
// with the same key are routed to the same partition. This balancer is
|
||||
// compatible with the partitioner used by the Java library and by librdkafka's
|
||||
// "murmur2" and "murmur2_random" partitioners. /
|
||||
//
|
||||
// With the Consistent field false (default), this partitioner is equivalent to
|
||||
// the "murmur2_random" setting in librdkafka. When Consistent is true, this
|
||||
// partitioner is equivalent to the "murmur2" setting. The latter will hash
|
||||
// nil keys into the same partition. Empty, non-nil keys are always hashed to
|
||||
// the same partition regardless of configuration.
|
||||
//
|
||||
// Unless you are absolutely certain that all your messages will have keys, it's
|
||||
// best to leave the Consistent flag off. Otherwise, you run the risk of
|
||||
// creating a very hot partition.
|
||||
//
|
||||
// Note that the librdkafka documentation states that the "murmur2_random" is
|
||||
// functionally equivalent to the default Java partitioner. That's because the
|
||||
// Java partitioner will use a round robin balancer instead of random on nil
|
||||
// keys. We choose librdkafka's implementation because it arguably has a larger
|
||||
// install base.
|
||||
type Murmur2Balancer struct {
|
||||
Consistent bool
|
||||
random randomBalancer
|
||||
}
|
||||
|
||||
func (b Murmur2Balancer) Balance(msg Message, partitions ...int) (partition int) {
|
||||
// NOTE: the murmur2 balancers in java and librdkafka treat a nil key as
|
||||
// non-existent while treating an empty slice as a defined value.
|
||||
if msg.Key == nil && !b.Consistent {
|
||||
return b.random.Balance(msg, partitions...)
|
||||
}
|
||||
|
||||
idx := (murmur2(msg.Key) & 0x7fffffff) % uint32(len(partitions))
|
||||
return partitions[idx]
|
||||
}
|
||||
|
||||
// Go port of the Java library's murmur2 function.
|
||||
// https://github.com/apache/kafka/blob/1.0/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L353
|
||||
func murmur2(data []byte) uint32 {
|
||||
length := len(data)
|
||||
const (
|
||||
seed uint32 = 0x9747b28c
|
||||
// 'm' and 'r' are mixing constants generated offline.
|
||||
// They're not really 'magic', they just happen to work well.
|
||||
m = 0x5bd1e995
|
||||
r = 24
|
||||
)
|
||||
|
||||
// Initialize the hash to a random value
|
||||
h := seed ^ uint32(length)
|
||||
length4 := length / 4
|
||||
|
||||
for i := 0; i < length4; i++ {
|
||||
i4 := i * 4
|
||||
k := (uint32(data[i4+0]) & 0xff) + ((uint32(data[i4+1]) & 0xff) << 8) + ((uint32(data[i4+2]) & 0xff) << 16) + ((uint32(data[i4+3]) & 0xff) << 24)
|
||||
k *= m
|
||||
k ^= k >> r
|
||||
k *= m
|
||||
h *= m
|
||||
h ^= k
|
||||
}
|
||||
|
||||
// Handle the last few bytes of the input array
|
||||
extra := length % 4
|
||||
if extra >= 3 {
|
||||
h ^= (uint32(data[(length & ^3)+2]) & 0xff) << 16
|
||||
}
|
||||
if extra >= 2 {
|
||||
h ^= (uint32(data[(length & ^3)+1]) & 0xff) << 8
|
||||
}
|
||||
if extra >= 1 {
|
||||
h ^= uint32(data[length & ^3]) & 0xff
|
||||
h *= m
|
||||
}
|
||||
|
||||
h ^= h >> 13
|
||||
h *= m
|
||||
h ^= h >> 15
|
||||
|
||||
return h
|
||||
}
|
||||
313
vendor/github.com/segmentio/kafka-go/batch.go
generated
vendored
Normal file
313
vendor/github.com/segmentio/kafka-go/batch.go
generated
vendored
Normal file
@@ -0,0 +1,313 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Batch is an iterator over a sequence of messages fetched from a kafka
|
||||
// server.
|
||||
//
|
||||
// Batches are created by calling (*Conn).ReadBatch. They hold a internal lock
|
||||
// on the connection, which is released when the batch is closed. Failing to
|
||||
// call a batch's Close method will likely result in a dead-lock when trying to
|
||||
// use the connection.
|
||||
//
|
||||
// Batches are safe to use concurrently from multiple goroutines.
|
||||
type Batch struct {
|
||||
mutex sync.Mutex
|
||||
conn *Conn
|
||||
lock *sync.Mutex
|
||||
msgs *messageSetReader
|
||||
deadline time.Time
|
||||
throttle time.Duration
|
||||
topic string
|
||||
partition int
|
||||
offset int64
|
||||
highWaterMark int64
|
||||
err error
|
||||
// The last offset in the batch.
|
||||
//
|
||||
// We use lastOffset to skip offsets that have been compacted away.
|
||||
//
|
||||
// We store lastOffset because we get lastOffset when we read a new message
|
||||
// but only try to handle compaction when we receive an EOF. However, when
|
||||
// we get an EOF we do not get the lastOffset. So there is a mismatch
|
||||
// between when we receive it and need to use it.
|
||||
lastOffset int64
|
||||
}
|
||||
|
||||
// Throttle gives the throttling duration applied by the kafka server on the
|
||||
// connection.
|
||||
func (batch *Batch) Throttle() time.Duration {
|
||||
return batch.throttle
|
||||
}
|
||||
|
||||
// Watermark returns the current highest watermark in a partition.
|
||||
func (batch *Batch) HighWaterMark() int64 {
|
||||
return batch.highWaterMark
|
||||
}
|
||||
|
||||
// Partition returns the batch partition.
|
||||
func (batch *Batch) Partition() int {
|
||||
return batch.partition
|
||||
}
|
||||
|
||||
// Offset returns the offset of the next message in the batch.
|
||||
func (batch *Batch) Offset() int64 {
|
||||
batch.mutex.Lock()
|
||||
offset := batch.offset
|
||||
batch.mutex.Unlock()
|
||||
return offset
|
||||
}
|
||||
|
||||
// Close closes the batch, releasing the connection lock and returning an error
|
||||
// if reading the batch failed for any reason.
|
||||
func (batch *Batch) Close() error {
|
||||
batch.mutex.Lock()
|
||||
err := batch.close()
|
||||
batch.mutex.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func (batch *Batch) close() (err error) {
|
||||
conn := batch.conn
|
||||
lock := batch.lock
|
||||
|
||||
batch.conn = nil
|
||||
batch.lock = nil
|
||||
|
||||
if batch.msgs != nil {
|
||||
batch.msgs.discard()
|
||||
}
|
||||
|
||||
if batch.msgs != nil && batch.msgs.decompressed != nil {
|
||||
releaseBuffer(batch.msgs.decompressed)
|
||||
batch.msgs.decompressed = nil
|
||||
}
|
||||
|
||||
if err = batch.err; errors.Is(batch.err, io.EOF) {
|
||||
err = nil
|
||||
}
|
||||
|
||||
if conn != nil {
|
||||
conn.rdeadline.unsetConnReadDeadline()
|
||||
conn.mutex.Lock()
|
||||
conn.offset = batch.offset
|
||||
conn.mutex.Unlock()
|
||||
|
||||
if err != nil {
|
||||
var kafkaError Error
|
||||
if !errors.As(err, &kafkaError) && !errors.Is(err, io.ErrShortBuffer) {
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if lock != nil {
|
||||
lock.Unlock()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Err returns a non-nil error if the batch is broken. This is the same error
|
||||
// that would be returned by Read, ReadMessage or Close (except in the case of
|
||||
// io.EOF which is never returned by Close).
|
||||
//
|
||||
// This method is useful when building retry mechanisms for (*Conn).ReadBatch,
|
||||
// the program can check whether the batch carried a error before attempting to
|
||||
// read the first message.
|
||||
//
|
||||
// Note that checking errors on a batch is optional, calling Read or ReadMessage
|
||||
// is always valid and can be used to either read a message or an error in cases
|
||||
// where that's convenient.
|
||||
func (batch *Batch) Err() error { return batch.err }
|
||||
|
||||
// Read reads the value of the next message from the batch into b, returning the
|
||||
// number of bytes read, or an error if the next message couldn't be read.
|
||||
//
|
||||
// If an error is returned the batch cannot be used anymore and calling Read
|
||||
// again will keep returning that error. All errors except io.EOF (indicating
|
||||
// that the program consumed all messages from the batch) are also returned by
|
||||
// Close.
|
||||
//
|
||||
// The method fails with io.ErrShortBuffer if the buffer passed as argument is
|
||||
// too small to hold the message value.
|
||||
func (batch *Batch) Read(b []byte) (int, error) {
|
||||
n := 0
|
||||
|
||||
batch.mutex.Lock()
|
||||
offset := batch.offset
|
||||
|
||||
_, _, _, err := batch.readMessage(
|
||||
func(r *bufio.Reader, size int, nbytes int) (int, error) {
|
||||
if nbytes < 0 {
|
||||
return size, nil
|
||||
}
|
||||
return discardN(r, size, nbytes)
|
||||
},
|
||||
func(r *bufio.Reader, size int, nbytes int) (int, error) {
|
||||
if nbytes < 0 {
|
||||
return size, nil
|
||||
}
|
||||
// make sure there are enough bytes for the message value. return
|
||||
// errShortRead if the message is truncated.
|
||||
if nbytes > size {
|
||||
return size, errShortRead
|
||||
}
|
||||
n = nbytes // return value
|
||||
if nbytes > cap(b) {
|
||||
nbytes = cap(b)
|
||||
}
|
||||
if nbytes > len(b) {
|
||||
b = b[:nbytes]
|
||||
}
|
||||
nbytes, err := io.ReadFull(r, b[:nbytes])
|
||||
if err != nil {
|
||||
return size - nbytes, err
|
||||
}
|
||||
return discardN(r, size-nbytes, n-nbytes)
|
||||
},
|
||||
)
|
||||
|
||||
if err == nil && n > len(b) {
|
||||
n, err = len(b), io.ErrShortBuffer
|
||||
batch.err = io.ErrShortBuffer
|
||||
batch.offset = offset // rollback
|
||||
}
|
||||
|
||||
batch.mutex.Unlock()
|
||||
return n, err
|
||||
}
|
||||
|
||||
// ReadMessage reads and return the next message from the batch.
|
||||
//
|
||||
// Because this method allocate memory buffers for the message key and value
|
||||
// it is less memory-efficient than Read, but has the advantage of never
|
||||
// failing with io.ErrShortBuffer.
|
||||
func (batch *Batch) ReadMessage() (Message, error) {
|
||||
msg := Message{}
|
||||
batch.mutex.Lock()
|
||||
|
||||
var offset, timestamp int64
|
||||
var headers []Header
|
||||
var err error
|
||||
|
||||
offset, timestamp, headers, err = batch.readMessage(
|
||||
func(r *bufio.Reader, size int, nbytes int) (remain int, err error) {
|
||||
msg.Key, remain, err = readNewBytes(r, size, nbytes)
|
||||
return
|
||||
},
|
||||
func(r *bufio.Reader, size int, nbytes int) (remain int, err error) {
|
||||
msg.Value, remain, err = readNewBytes(r, size, nbytes)
|
||||
return
|
||||
},
|
||||
)
|
||||
// A batch may start before the requested offset so skip messages
|
||||
// until the requested offset is reached.
|
||||
for batch.conn != nil && offset < batch.conn.offset {
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
offset, timestamp, headers, err = batch.readMessage(
|
||||
func(r *bufio.Reader, size int, nbytes int) (remain int, err error) {
|
||||
msg.Key, remain, err = readNewBytes(r, size, nbytes)
|
||||
return
|
||||
},
|
||||
func(r *bufio.Reader, size int, nbytes int) (remain int, err error) {
|
||||
msg.Value, remain, err = readNewBytes(r, size, nbytes)
|
||||
return
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
batch.mutex.Unlock()
|
||||
msg.Topic = batch.topic
|
||||
msg.Partition = batch.partition
|
||||
msg.Offset = offset
|
||||
msg.HighWaterMark = batch.highWaterMark
|
||||
msg.Time = makeTime(timestamp)
|
||||
msg.Headers = headers
|
||||
|
||||
return msg, err
|
||||
}
|
||||
|
||||
func (batch *Batch) readMessage(
|
||||
key func(*bufio.Reader, int, int) (int, error),
|
||||
val func(*bufio.Reader, int, int) (int, error),
|
||||
) (offset int64, timestamp int64, headers []Header, err error) {
|
||||
if err = batch.err; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var lastOffset int64
|
||||
offset, lastOffset, timestamp, headers, err = batch.msgs.readMessage(batch.offset, key, val)
|
||||
switch {
|
||||
case err == nil:
|
||||
batch.offset = offset + 1
|
||||
batch.lastOffset = lastOffset
|
||||
case errors.Is(err, errShortRead):
|
||||
// As an "optimization" kafka truncates the returned response after
|
||||
// producing MaxBytes, which could then cause the code to return
|
||||
// errShortRead.
|
||||
err = batch.msgs.discard()
|
||||
switch {
|
||||
case err != nil:
|
||||
// Since io.EOF is used by the batch to indicate that there is are
|
||||
// no more messages to consume, it is crucial that any io.EOF errors
|
||||
// on the underlying connection are repackaged. Otherwise, the
|
||||
// caller can't tell the difference between a batch that was fully
|
||||
// consumed or a batch whose connection is in an error state.
|
||||
batch.err = dontExpectEOF(err)
|
||||
case batch.msgs.remaining() == 0:
|
||||
// Because we use the adjusted deadline we could end up returning
|
||||
// before the actual deadline occurred. This is necessary otherwise
|
||||
// timing out the connection for real could end up leaving it in an
|
||||
// unpredictable state, which would require closing it.
|
||||
// This design decision was made to maximize the chances of keeping
|
||||
// the connection open, the trade off being to lose precision on the
|
||||
// read deadline management.
|
||||
err = checkTimeoutErr(batch.deadline)
|
||||
batch.err = err
|
||||
|
||||
// Checks the following:
|
||||
// - `batch.err` for a "success" from the previous timeout check
|
||||
// - `batch.msgs.lengthRemain` to ensure that this EOF is not due
|
||||
// to MaxBytes truncation
|
||||
// - `batch.lastOffset` to ensure that the message format contains
|
||||
// `lastOffset`
|
||||
if errors.Is(batch.err, io.EOF) && batch.msgs.lengthRemain == 0 && batch.lastOffset != -1 {
|
||||
// Log compaction can create batches that end with compacted
|
||||
// records so the normal strategy that increments the "next"
|
||||
// offset as records are read doesn't work as the compacted
|
||||
// records are "missing" and never get "read".
|
||||
//
|
||||
// In order to reliably reach the next non-compacted offset we
|
||||
// jump past the saved lastOffset.
|
||||
batch.offset = batch.lastOffset + 1
|
||||
}
|
||||
}
|
||||
default:
|
||||
// Since io.EOF is used by the batch to indicate that there is are
|
||||
// no more messages to consume, it is crucial that any io.EOF errors
|
||||
// on the underlying connection are repackaged. Otherwise, the
|
||||
// caller can't tell the difference between a batch that was fully
|
||||
// consumed or a batch whose connection is in an error state.
|
||||
batch.err = dontExpectEOF(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func checkTimeoutErr(deadline time.Time) (err error) {
|
||||
if !deadline.IsZero() && time.Now().After(deadline) {
|
||||
err = RequestTimedOut
|
||||
} else {
|
||||
err = io.EOF
|
||||
}
|
||||
return
|
||||
}
|
||||
27
vendor/github.com/segmentio/kafka-go/buffer.go
generated
vendored
Normal file
27
vendor/github.com/segmentio/kafka-go/buffer.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var bufferPool = sync.Pool{
|
||||
New: func() interface{} { return newBuffer() },
|
||||
}
|
||||
|
||||
func newBuffer() *bytes.Buffer {
|
||||
b := new(bytes.Buffer)
|
||||
b.Grow(65536)
|
||||
return b
|
||||
}
|
||||
|
||||
func acquireBuffer() *bytes.Buffer {
|
||||
return bufferPool.Get().(*bytes.Buffer)
|
||||
}
|
||||
|
||||
func releaseBuffer(b *bytes.Buffer) {
|
||||
if b != nil {
|
||||
b.Reset()
|
||||
bufferPool.Put(b)
|
||||
}
|
||||
}
|
||||
146
vendor/github.com/segmentio/kafka-go/client.go
generated
vendored
Normal file
146
vendor/github.com/segmentio/kafka-go/client.go
generated
vendored
Normal file
@@ -0,0 +1,146 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultCreateTopicsTimeout = 2 * time.Second
|
||||
defaultDeleteTopicsTimeout = 2 * time.Second
|
||||
defaultCreatePartitionsTimeout = 2 * time.Second
|
||||
defaultProduceTimeout = 500 * time.Millisecond
|
||||
defaultMaxWait = 500 * time.Millisecond
|
||||
)
|
||||
|
||||
// Client is a high-level API to interract with kafka brokers.
|
||||
//
|
||||
// All methods of the Client type accept a context as first argument, which may
|
||||
// be used to asynchronously cancel the requests.
|
||||
//
|
||||
// Clients are safe to use concurrently from multiple goroutines, as long as
|
||||
// their configuration is not changed after first use.
|
||||
type Client struct {
|
||||
// Address of the kafka cluster (or specific broker) that the client will be
|
||||
// sending requests to.
|
||||
//
|
||||
// This field is optional, the address may be provided in each request
|
||||
// instead. The request address takes precedence if both were specified.
|
||||
Addr net.Addr
|
||||
|
||||
// Time limit for requests sent by this client.
|
||||
//
|
||||
// If zero, no timeout is applied.
|
||||
Timeout time.Duration
|
||||
|
||||
// A transport used to communicate with the kafka brokers.
|
||||
//
|
||||
// If nil, DefaultTransport is used.
|
||||
Transport RoundTripper
|
||||
}
|
||||
|
||||
// A ConsumerGroup and Topic as these are both strings we define a type for
|
||||
// clarity when passing to the Client as a function argument
|
||||
//
|
||||
// N.B TopicAndGroup is currently experimental! Therefore, it is subject to
|
||||
// change, including breaking changes between MINOR and PATCH releases.
|
||||
//
|
||||
// DEPRECATED: this type will be removed in version 1.0, programs should
|
||||
// migrate to use kafka.(*Client).OffsetFetch instead.
|
||||
type TopicAndGroup struct {
|
||||
Topic string
|
||||
GroupId string
|
||||
}
|
||||
|
||||
// ConsumerOffsets returns a map[int]int64 of partition to committed offset for
|
||||
// a consumer group id and topic.
|
||||
//
|
||||
// DEPRECATED: this method will be removed in version 1.0, programs should
|
||||
// migrate to use kafka.(*Client).OffsetFetch instead.
|
||||
func (c *Client) ConsumerOffsets(ctx context.Context, tg TopicAndGroup) (map[int]int64, error) {
|
||||
metadata, err := c.Metadata(ctx, &MetadataRequest{
|
||||
Topics: []string{tg.Topic},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get topic metadata :%w", err)
|
||||
}
|
||||
|
||||
topic := metadata.Topics[0]
|
||||
partitions := make([]int, len(topic.Partitions))
|
||||
|
||||
for i := range topic.Partitions {
|
||||
partitions[i] = topic.Partitions[i].ID
|
||||
}
|
||||
|
||||
offsets, err := c.OffsetFetch(ctx, &OffsetFetchRequest{
|
||||
GroupID: tg.GroupId,
|
||||
Topics: map[string][]int{
|
||||
tg.Topic: partitions,
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get offsets: %w", err)
|
||||
}
|
||||
|
||||
topicOffsets := offsets.Topics[topic.Name]
|
||||
partitionOffsets := make(map[int]int64, len(topicOffsets))
|
||||
|
||||
for _, off := range topicOffsets {
|
||||
partitionOffsets[off.Partition] = off.CommittedOffset
|
||||
}
|
||||
|
||||
return partitionOffsets, nil
|
||||
}
|
||||
|
||||
func (c *Client) roundTrip(ctx context.Context, addr net.Addr, msg protocol.Message) (protocol.Message, error) {
|
||||
if c.Timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, c.Timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
if addr == nil {
|
||||
if addr = c.Addr; addr == nil {
|
||||
return nil, errors.New("no address was given for the kafka cluster in the request or on the client")
|
||||
}
|
||||
}
|
||||
|
||||
return c.transport().RoundTrip(ctx, addr, msg)
|
||||
}
|
||||
|
||||
func (c *Client) transport() RoundTripper {
|
||||
if c.Transport != nil {
|
||||
return c.Transport
|
||||
}
|
||||
return DefaultTransport
|
||||
}
|
||||
|
||||
func (c *Client) timeout(ctx context.Context, defaultTimeout time.Duration) time.Duration {
|
||||
timeout := c.Timeout
|
||||
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
if remain := time.Until(deadline); remain < timeout {
|
||||
timeout = remain
|
||||
}
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
// Half the timeout because it is communicated to kafka in multiple
|
||||
// requests (e.g. Fetch, Produce, etc...), this adds buffer to account
|
||||
// for network latency when waiting for the response from kafka.
|
||||
return timeout / 2
|
||||
}
|
||||
|
||||
return defaultTimeout
|
||||
}
|
||||
|
||||
func (c *Client) timeoutMs(ctx context.Context, defaultTimeout time.Duration) int32 {
|
||||
return milliseconds(c.timeout(ctx, defaultTimeout))
|
||||
}
|
||||
39
vendor/github.com/segmentio/kafka-go/commit.go
generated
vendored
Normal file
39
vendor/github.com/segmentio/kafka-go/commit.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
package kafka
|
||||
|
||||
// A commit represents the instruction of publishing an update of the last
|
||||
// offset read by a program for a topic and partition.
|
||||
type commit struct {
|
||||
topic string
|
||||
partition int
|
||||
offset int64
|
||||
}
|
||||
|
||||
// makeCommit builds a commit value from a message, the resulting commit takes
|
||||
// its topic, partition, and offset from the message.
|
||||
func makeCommit(msg Message) commit {
|
||||
return commit{
|
||||
topic: msg.Topic,
|
||||
partition: msg.Partition,
|
||||
offset: msg.Offset + 1,
|
||||
}
|
||||
}
|
||||
|
||||
// makeCommits generates a slice of commits from a list of messages, it extracts
|
||||
// the topic, partition, and offset of each message and builds the corresponding
|
||||
// commit slice.
|
||||
func makeCommits(msgs ...Message) []commit {
|
||||
commits := make([]commit, len(msgs))
|
||||
|
||||
for i, m := range msgs {
|
||||
commits[i] = makeCommit(m)
|
||||
}
|
||||
|
||||
return commits
|
||||
}
|
||||
|
||||
// commitRequest is the data type exchanged between the CommitMessages method
|
||||
// and internals of the reader's implementation.
|
||||
type commitRequest struct {
|
||||
commits []commit
|
||||
errch chan<- error
|
||||
}
|
||||
124
vendor/github.com/segmentio/kafka-go/compress/compress.go
generated
vendored
Normal file
124
vendor/github.com/segmentio/kafka-go/compress/compress.go
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
package compress
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/segmentio/kafka-go/compress/gzip"
|
||||
"github.com/segmentio/kafka-go/compress/lz4"
|
||||
"github.com/segmentio/kafka-go/compress/snappy"
|
||||
"github.com/segmentio/kafka-go/compress/zstd"
|
||||
)
|
||||
|
||||
// Compression represents the the compression applied to a record set.
|
||||
type Compression int8
|
||||
|
||||
const (
|
||||
None Compression = 0
|
||||
Gzip Compression = 1
|
||||
Snappy Compression = 2
|
||||
Lz4 Compression = 3
|
||||
Zstd Compression = 4
|
||||
)
|
||||
|
||||
func (c Compression) Codec() Codec {
|
||||
if i := int(c); i >= 0 && i < len(Codecs) {
|
||||
return Codecs[i]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c Compression) String() string {
|
||||
if codec := c.Codec(); codec != nil {
|
||||
return codec.Name()
|
||||
}
|
||||
return "uncompressed"
|
||||
}
|
||||
|
||||
func (c Compression) MarshalText() ([]byte, error) {
|
||||
return []byte(c.String()), nil
|
||||
}
|
||||
|
||||
func (c *Compression) UnmarshalText(b []byte) error {
|
||||
switch string(b) {
|
||||
case "none", "uncompressed":
|
||||
*c = None
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, codec := range Codecs[None+1:] {
|
||||
if codec.Name() == string(b) {
|
||||
*c = Compression(codec.Code())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(string(b), 10, 64)
|
||||
if err == nil && i >= 0 && i < int64(len(Codecs)) {
|
||||
*c = Compression(i)
|
||||
return nil
|
||||
}
|
||||
|
||||
s := &strings.Builder{}
|
||||
s.WriteString("none, uncompressed")
|
||||
|
||||
for i, codec := range Codecs[None+1:] {
|
||||
if i < (len(Codecs) - 1) {
|
||||
s.WriteString(", ")
|
||||
} else {
|
||||
s.WriteString(", or ")
|
||||
}
|
||||
s.WriteString(codec.Name())
|
||||
}
|
||||
|
||||
return fmt.Errorf("compression format must be one of %s, not %q", s, b)
|
||||
}
|
||||
|
||||
var (
|
||||
_ encoding.TextMarshaler = Compression(0)
|
||||
_ encoding.TextUnmarshaler = (*Compression)(nil)
|
||||
)
|
||||
|
||||
// Codec represents a compression codec to encode and decode the messages.
|
||||
// See : https://cwiki.apache.org/confluence/display/KAFKA/Compression
|
||||
//
|
||||
// A Codec must be safe for concurrent access by multiple go routines.
|
||||
type Codec interface {
|
||||
// Code returns the compression codec code
|
||||
Code() int8
|
||||
|
||||
// Human-readable name for the codec.
|
||||
Name() string
|
||||
|
||||
// Constructs a new reader which decompresses data from r.
|
||||
NewReader(r io.Reader) io.ReadCloser
|
||||
|
||||
// Constructs a new writer which writes compressed data to w.
|
||||
NewWriter(w io.Writer) io.WriteCloser
|
||||
}
|
||||
|
||||
var (
|
||||
// The global gzip codec installed on the Codecs table.
|
||||
GzipCodec gzip.Codec
|
||||
|
||||
// The global snappy codec installed on the Codecs table.
|
||||
SnappyCodec snappy.Codec
|
||||
|
||||
// The global lz4 codec installed on the Codecs table.
|
||||
Lz4Codec lz4.Codec
|
||||
|
||||
// The global zstd codec installed on the Codecs table.
|
||||
ZstdCodec zstd.Codec
|
||||
|
||||
// The global table of compression codecs supported by the kafka protocol.
|
||||
Codecs = [...]Codec{
|
||||
None: nil,
|
||||
Gzip: &GzipCodec,
|
||||
Snappy: &SnappyCodec,
|
||||
Lz4: &Lz4Codec,
|
||||
Zstd: &ZstdCodec,
|
||||
}
|
||||
)
|
||||
123
vendor/github.com/segmentio/kafka-go/compress/gzip/gzip.go
generated
vendored
Normal file
123
vendor/github.com/segmentio/kafka-go/compress/gzip/gzip.go
generated
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
package gzip
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/klauspost/compress/gzip"
|
||||
)
|
||||
|
||||
var (
|
||||
readerPool sync.Pool
|
||||
)
|
||||
|
||||
// Codec is the implementation of a compress.Codec which supports creating
|
||||
// readers and writers for kafka messages compressed with gzip.
|
||||
type Codec struct {
|
||||
// The compression level to configure on writers created by this codec.
|
||||
// Acceptable values are defined in the standard gzip package.
|
||||
//
|
||||
// Default to gzip.DefaultCompressionLevel.
|
||||
Level int
|
||||
|
||||
writerPool sync.Pool
|
||||
}
|
||||
|
||||
// Code implements the compress.Codec interface.
|
||||
func (c *Codec) Code() int8 { return 1 }
|
||||
|
||||
// Name implements the compress.Codec interface.
|
||||
func (c *Codec) Name() string { return "gzip" }
|
||||
|
||||
// NewReader implements the compress.Codec interface.
|
||||
func (c *Codec) NewReader(r io.Reader) io.ReadCloser {
|
||||
var err error
|
||||
z, _ := readerPool.Get().(*gzip.Reader)
|
||||
if z != nil {
|
||||
err = z.Reset(r)
|
||||
} else {
|
||||
z, err = gzip.NewReader(r)
|
||||
}
|
||||
if err != nil {
|
||||
if z != nil {
|
||||
readerPool.Put(z)
|
||||
}
|
||||
return &errorReader{err: err}
|
||||
}
|
||||
return &reader{Reader: z}
|
||||
}
|
||||
|
||||
// NewWriter implements the compress.Codec interface.
|
||||
func (c *Codec) NewWriter(w io.Writer) io.WriteCloser {
|
||||
x := c.writerPool.Get()
|
||||
z, _ := x.(*gzip.Writer)
|
||||
if z == nil {
|
||||
x, err := gzip.NewWriterLevel(w, c.level())
|
||||
if err != nil {
|
||||
return &errorWriter{err: err}
|
||||
}
|
||||
z = x
|
||||
} else {
|
||||
z.Reset(w)
|
||||
}
|
||||
return &writer{codec: c, Writer: z}
|
||||
}
|
||||
|
||||
func (c *Codec) level() int {
|
||||
if c.Level != 0 {
|
||||
return c.Level
|
||||
}
|
||||
return gzip.DefaultCompression
|
||||
}
|
||||
|
||||
type reader struct{ *gzip.Reader }
|
||||
|
||||
func (r *reader) Close() (err error) {
|
||||
if z := r.Reader; z != nil {
|
||||
r.Reader = nil
|
||||
err = z.Close()
|
||||
// Pass it an empty reader, which is a zero-size value implementing the
|
||||
// flate.Reader interface to avoid the construction of a bufio.Reader in
|
||||
// the call to Reset.
|
||||
//
|
||||
// Note: we could also not reset the reader at all, but that would cause
|
||||
// the underlying reader to be retained until the gzip.Reader is freed,
|
||||
// which may not be desirable.
|
||||
z.Reset(emptyReader{})
|
||||
readerPool.Put(z)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type writer struct {
|
||||
codec *Codec
|
||||
*gzip.Writer
|
||||
}
|
||||
|
||||
func (w *writer) Close() (err error) {
|
||||
if z := w.Writer; z != nil {
|
||||
w.Writer = nil
|
||||
err = z.Close()
|
||||
z.Reset(nil)
|
||||
w.codec.writerPool.Put(z)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type emptyReader struct{}
|
||||
|
||||
func (emptyReader) ReadByte() (byte, error) { return 0, io.EOF }
|
||||
|
||||
func (emptyReader) Read([]byte) (int, error) { return 0, io.EOF }
|
||||
|
||||
type errorReader struct{ err error }
|
||||
|
||||
func (r *errorReader) Close() error { return r.err }
|
||||
|
||||
func (r *errorReader) Read([]byte) (int, error) { return 0, r.err }
|
||||
|
||||
type errorWriter struct{ err error }
|
||||
|
||||
func (w *errorWriter) Close() error { return w.err }
|
||||
|
||||
func (w *errorWriter) Write([]byte) (int, error) { return 0, w.err }
|
||||
68
vendor/github.com/segmentio/kafka-go/compress/lz4/lz4.go
generated
vendored
Normal file
68
vendor/github.com/segmentio/kafka-go/compress/lz4/lz4.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/pierrec/lz4/v4"
|
||||
)
|
||||
|
||||
var (
|
||||
readerPool sync.Pool
|
||||
writerPool sync.Pool
|
||||
)
|
||||
|
||||
// Codec is the implementation of a compress.Codec which supports creating
|
||||
// readers and writers for kafka messages compressed with lz4.
|
||||
type Codec struct{}
|
||||
|
||||
// Code implements the compress.Codec interface.
|
||||
func (c *Codec) Code() int8 { return 3 }
|
||||
|
||||
// Name implements the compress.Codec interface.
|
||||
func (c *Codec) Name() string { return "lz4" }
|
||||
|
||||
// NewReader implements the compress.Codec interface.
|
||||
func (c *Codec) NewReader(r io.Reader) io.ReadCloser {
|
||||
z, _ := readerPool.Get().(*lz4.Reader)
|
||||
if z != nil {
|
||||
z.Reset(r)
|
||||
} else {
|
||||
z = lz4.NewReader(r)
|
||||
}
|
||||
return &reader{Reader: z}
|
||||
}
|
||||
|
||||
// NewWriter implements the compress.Codec interface.
|
||||
func (c *Codec) NewWriter(w io.Writer) io.WriteCloser {
|
||||
z, _ := writerPool.Get().(*lz4.Writer)
|
||||
if z != nil {
|
||||
z.Reset(w)
|
||||
} else {
|
||||
z = lz4.NewWriter(w)
|
||||
}
|
||||
return &writer{Writer: z}
|
||||
}
|
||||
|
||||
type reader struct{ *lz4.Reader }
|
||||
|
||||
func (r *reader) Close() (err error) {
|
||||
if z := r.Reader; z != nil {
|
||||
r.Reader = nil
|
||||
z.Reset(nil)
|
||||
readerPool.Put(z)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type writer struct{ *lz4.Writer }
|
||||
|
||||
func (w *writer) Close() (err error) {
|
||||
if z := w.Writer; z != nil {
|
||||
w.Writer = nil
|
||||
err = z.Close()
|
||||
z.Reset(nil)
|
||||
writerPool.Put(z)
|
||||
}
|
||||
return
|
||||
}
|
||||
110
vendor/github.com/segmentio/kafka-go/compress/snappy/snappy.go
generated
vendored
Normal file
110
vendor/github.com/segmentio/kafka-go/compress/snappy/snappy.go
generated
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/klauspost/compress/s2"
|
||||
"github.com/klauspost/compress/snappy"
|
||||
)
|
||||
|
||||
// Framing is an enumeration type used to enable or disable xerial framing of
|
||||
// snappy messages.
|
||||
type Framing int
|
||||
|
||||
const (
|
||||
Framed Framing = iota
|
||||
Unframed
|
||||
)
|
||||
|
||||
// Compression level.
|
||||
type Compression int
|
||||
|
||||
const (
|
||||
DefaultCompression Compression = iota
|
||||
FasterCompression
|
||||
BetterCompression
|
||||
BestCompression
|
||||
)
|
||||
|
||||
var (
|
||||
readerPool sync.Pool
|
||||
writerPool sync.Pool
|
||||
)
|
||||
|
||||
// Codec is the implementation of a compress.Codec which supports creating
|
||||
// readers and writers for kafka messages compressed with snappy.
|
||||
type Codec struct {
|
||||
// An optional framing to apply to snappy compression.
|
||||
//
|
||||
// Default to Framed.
|
||||
Framing Framing
|
||||
|
||||
// Compression level.
|
||||
Compression Compression
|
||||
}
|
||||
|
||||
// Code implements the compress.Codec interface.
|
||||
func (c *Codec) Code() int8 { return 2 }
|
||||
|
||||
// Name implements the compress.Codec interface.
|
||||
func (c *Codec) Name() string { return "snappy" }
|
||||
|
||||
// NewReader implements the compress.Codec interface.
|
||||
func (c *Codec) NewReader(r io.Reader) io.ReadCloser {
|
||||
x, _ := readerPool.Get().(*xerialReader)
|
||||
if x != nil {
|
||||
x.Reset(r)
|
||||
} else {
|
||||
x = &xerialReader{
|
||||
reader: r,
|
||||
decode: snappy.Decode,
|
||||
}
|
||||
}
|
||||
return &reader{xerialReader: x}
|
||||
}
|
||||
|
||||
// NewWriter implements the compress.Codec interface.
|
||||
func (c *Codec) NewWriter(w io.Writer) io.WriteCloser {
|
||||
x, _ := writerPool.Get().(*xerialWriter)
|
||||
if x != nil {
|
||||
x.Reset(w)
|
||||
} else {
|
||||
x = &xerialWriter{writer: w}
|
||||
}
|
||||
x.framed = c.Framing == Framed
|
||||
switch c.Compression {
|
||||
case FasterCompression:
|
||||
x.encode = s2.EncodeSnappy
|
||||
case BetterCompression:
|
||||
x.encode = s2.EncodeSnappyBetter
|
||||
case BestCompression:
|
||||
x.encode = s2.EncodeSnappyBest
|
||||
default:
|
||||
x.encode = snappy.Encode // aka. s2.EncodeSnappyBetter
|
||||
}
|
||||
return &writer{xerialWriter: x}
|
||||
}
|
||||
|
||||
type reader struct{ *xerialReader }
|
||||
|
||||
func (r *reader) Close() (err error) {
|
||||
if x := r.xerialReader; x != nil {
|
||||
r.xerialReader = nil
|
||||
x.Reset(nil)
|
||||
readerPool.Put(x)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type writer struct{ *xerialWriter }
|
||||
|
||||
func (w *writer) Close() (err error) {
|
||||
if x := w.xerialWriter; x != nil {
|
||||
w.xerialWriter = nil
|
||||
err = x.Flush()
|
||||
x.Reset(nil)
|
||||
writerPool.Put(x)
|
||||
}
|
||||
return
|
||||
}
|
||||
330
vendor/github.com/segmentio/kafka-go/compress/snappy/xerial.go
generated
vendored
Normal file
330
vendor/github.com/segmentio/kafka-go/compress/snappy/xerial.go
generated
vendored
Normal file
@@ -0,0 +1,330 @@
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/klauspost/compress/snappy"
|
||||
)
|
||||
|
||||
const defaultBufferSize = 32 * 1024
|
||||
|
||||
// An implementation of io.Reader which consumes a stream of xerial-framed
|
||||
// snappy-encoeded data. The framing is optional, if no framing is detected
|
||||
// the reader will simply forward the bytes from its underlying stream.
|
||||
type xerialReader struct {
|
||||
reader io.Reader
|
||||
header [16]byte
|
||||
input []byte
|
||||
output []byte
|
||||
offset int64
|
||||
nbytes int64
|
||||
decode func([]byte, []byte) ([]byte, error)
|
||||
}
|
||||
|
||||
func (x *xerialReader) Reset(r io.Reader) {
|
||||
x.reader = r
|
||||
x.input = x.input[:0]
|
||||
x.output = x.output[:0]
|
||||
x.header = [16]byte{}
|
||||
x.offset = 0
|
||||
x.nbytes = 0
|
||||
}
|
||||
|
||||
func (x *xerialReader) Read(b []byte) (int, error) {
|
||||
for {
|
||||
if x.offset < int64(len(x.output)) {
|
||||
n := copy(b, x.output[x.offset:])
|
||||
x.offset += int64(n)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
n, err := x.readChunk(b)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if n > 0 {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *xerialReader) WriteTo(w io.Writer) (int64, error) {
|
||||
wn := int64(0)
|
||||
|
||||
for {
|
||||
for x.offset < int64(len(x.output)) {
|
||||
n, err := w.Write(x.output[x.offset:])
|
||||
wn += int64(n)
|
||||
x.offset += int64(n)
|
||||
if err != nil {
|
||||
return wn, err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := x.readChunk(nil); err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
err = nil
|
||||
}
|
||||
return wn, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *xerialReader) readChunk(dst []byte) (int, error) {
|
||||
x.output = x.output[:0]
|
||||
x.offset = 0
|
||||
prefix := 0
|
||||
|
||||
if x.nbytes == 0 {
|
||||
n, err := x.readFull(x.header[:])
|
||||
if err != nil && n == 0 {
|
||||
return 0, err
|
||||
}
|
||||
prefix = n
|
||||
}
|
||||
|
||||
if isXerialHeader(x.header[:]) {
|
||||
if cap(x.input) < 4 {
|
||||
x.input = make([]byte, 4, defaultBufferSize)
|
||||
} else {
|
||||
x.input = x.input[:4]
|
||||
}
|
||||
|
||||
_, err := x.readFull(x.input)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
frame := int(binary.BigEndian.Uint32(x.input))
|
||||
if cap(x.input) < frame {
|
||||
x.input = make([]byte, frame, align(frame, defaultBufferSize))
|
||||
} else {
|
||||
x.input = x.input[:frame]
|
||||
}
|
||||
|
||||
if _, err := x.readFull(x.input); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
if cap(x.input) == 0 {
|
||||
x.input = make([]byte, 0, defaultBufferSize)
|
||||
} else {
|
||||
x.input = x.input[:0]
|
||||
}
|
||||
|
||||
if prefix > 0 {
|
||||
x.input = append(x.input, x.header[:prefix]...)
|
||||
}
|
||||
|
||||
for {
|
||||
if len(x.input) == cap(x.input) {
|
||||
b := make([]byte, len(x.input), 2*cap(x.input))
|
||||
copy(b, x.input)
|
||||
x.input = b
|
||||
}
|
||||
|
||||
n, err := x.read(x.input[len(x.input):cap(x.input)])
|
||||
x.input = x.input[:len(x.input)+n]
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) && len(x.input) > 0 {
|
||||
break
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var n int
|
||||
var err error
|
||||
|
||||
if x.decode == nil {
|
||||
x.output, x.input, err = x.input, x.output, nil
|
||||
} else if n, err = snappy.DecodedLen(x.input); n <= len(dst) && err == nil {
|
||||
// If the output buffer is large enough to hold the decode value,
|
||||
// write it there directly instead of using the intermediary output
|
||||
// buffer.
|
||||
_, err = x.decode(dst, x.input)
|
||||
} else {
|
||||
var b []byte
|
||||
n = 0
|
||||
b, err = x.decode(x.output[:cap(x.output)], x.input)
|
||||
if err == nil {
|
||||
x.output = b
|
||||
}
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (x *xerialReader) read(b []byte) (int, error) {
|
||||
n, err := x.reader.Read(b)
|
||||
x.nbytes += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (x *xerialReader) readFull(b []byte) (int, error) {
|
||||
n, err := io.ReadFull(x.reader, b)
|
||||
x.nbytes += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// An implementation of a xerial-framed snappy-encoded output stream.
|
||||
// Each Write made to the writer is framed with a xerial header.
|
||||
type xerialWriter struct {
|
||||
writer io.Writer
|
||||
header [16]byte
|
||||
input []byte
|
||||
output []byte
|
||||
nbytes int64
|
||||
framed bool
|
||||
encode func([]byte, []byte) []byte
|
||||
}
|
||||
|
||||
func (x *xerialWriter) Reset(w io.Writer) {
|
||||
x.writer = w
|
||||
x.input = x.input[:0]
|
||||
x.output = x.output[:0]
|
||||
x.nbytes = 0
|
||||
}
|
||||
|
||||
func (x *xerialWriter) ReadFrom(r io.Reader) (int64, error) {
|
||||
wn := int64(0)
|
||||
|
||||
if cap(x.input) == 0 {
|
||||
x.input = make([]byte, 0, defaultBufferSize)
|
||||
}
|
||||
|
||||
for {
|
||||
if x.full() {
|
||||
x.grow()
|
||||
}
|
||||
|
||||
n, err := r.Read(x.input[len(x.input):cap(x.input)])
|
||||
wn += int64(n)
|
||||
x.input = x.input[:len(x.input)+n]
|
||||
|
||||
if x.fullEnough() {
|
||||
if err := x.Flush(); err != nil {
|
||||
return wn, err
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
err = nil
|
||||
}
|
||||
return wn, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *xerialWriter) Write(b []byte) (int, error) {
|
||||
wn := 0
|
||||
|
||||
if cap(x.input) == 0 {
|
||||
x.input = make([]byte, 0, defaultBufferSize)
|
||||
}
|
||||
|
||||
for len(b) > 0 {
|
||||
if x.full() {
|
||||
x.grow()
|
||||
}
|
||||
|
||||
n := copy(x.input[len(x.input):cap(x.input)], b)
|
||||
b = b[n:]
|
||||
wn += n
|
||||
x.input = x.input[:len(x.input)+n]
|
||||
|
||||
if x.fullEnough() {
|
||||
if err := x.Flush(); err != nil {
|
||||
return wn, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return wn, nil
|
||||
}
|
||||
|
||||
func (x *xerialWriter) Flush() error {
|
||||
if len(x.input) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var b []byte
|
||||
if x.encode == nil {
|
||||
b = x.input
|
||||
} else {
|
||||
x.output = x.encode(x.output[:cap(x.output)], x.input)
|
||||
b = x.output
|
||||
}
|
||||
|
||||
x.input = x.input[:0]
|
||||
x.output = x.output[:0]
|
||||
|
||||
if x.framed && x.nbytes == 0 {
|
||||
writeXerialHeader(x.header[:])
|
||||
_, err := x.write(x.header[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if x.framed {
|
||||
writeXerialFrame(x.header[:4], len(b))
|
||||
_, err := x.write(x.header[:4])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
_, err := x.write(b)
|
||||
return err
|
||||
}
|
||||
|
||||
func (x *xerialWriter) write(b []byte) (int, error) {
|
||||
n, err := x.writer.Write(b)
|
||||
x.nbytes += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (x *xerialWriter) full() bool {
|
||||
return len(x.input) == cap(x.input)
|
||||
}
|
||||
|
||||
func (x *xerialWriter) fullEnough() bool {
|
||||
return x.framed && (cap(x.input)-len(x.input)) < 1024
|
||||
}
|
||||
|
||||
func (x *xerialWriter) grow() {
|
||||
tmp := make([]byte, len(x.input), 2*cap(x.input))
|
||||
copy(tmp, x.input)
|
||||
x.input = tmp
|
||||
}
|
||||
|
||||
func align(n, a int) int {
|
||||
if (n % a) == 0 {
|
||||
return n
|
||||
}
|
||||
return ((n / a) + 1) * a
|
||||
}
|
||||
|
||||
var (
|
||||
xerialHeader = [...]byte{130, 83, 78, 65, 80, 80, 89, 0}
|
||||
xerialVersionInfo = [...]byte{0, 0, 0, 1, 0, 0, 0, 1}
|
||||
)
|
||||
|
||||
func isXerialHeader(src []byte) bool {
|
||||
return len(src) >= 16 && bytes.Equal(src[:8], xerialHeader[:])
|
||||
}
|
||||
|
||||
func writeXerialHeader(b []byte) {
|
||||
copy(b[:8], xerialHeader[:])
|
||||
copy(b[8:], xerialVersionInfo[:])
|
||||
}
|
||||
|
||||
func writeXerialFrame(b []byte, n int) {
|
||||
binary.BigEndian.PutUint32(b, uint32(n))
|
||||
}
|
||||
168
vendor/github.com/segmentio/kafka-go/compress/zstd/zstd.go
generated
vendored
Normal file
168
vendor/github.com/segmentio/kafka-go/compress/zstd/zstd.go
generated
vendored
Normal file
@@ -0,0 +1,168 @@
|
||||
// Package zstd implements Zstandard compression.
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/klauspost/compress/zstd"
|
||||
)
|
||||
|
||||
// Codec is the implementation of a compress.Codec which supports creating
|
||||
// readers and writers for kafka messages compressed with zstd.
|
||||
type Codec struct {
|
||||
// The compression level configured on writers created by the codec.
|
||||
//
|
||||
// Default to 3.
|
||||
Level int
|
||||
|
||||
encoderPool sync.Pool // *encoder
|
||||
}
|
||||
|
||||
// Code implements the compress.Codec interface.
|
||||
func (c *Codec) Code() int8 { return 4 }
|
||||
|
||||
// Name implements the compress.Codec interface.
|
||||
func (c *Codec) Name() string { return "zstd" }
|
||||
|
||||
// NewReader implements the compress.Codec interface.
|
||||
func (c *Codec) NewReader(r io.Reader) io.ReadCloser {
|
||||
p := new(reader)
|
||||
if p.dec, _ = decoderPool.Get().(*zstd.Decoder); p.dec != nil {
|
||||
p.dec.Reset(r)
|
||||
} else {
|
||||
z, err := zstd.NewReader(r,
|
||||
zstd.WithDecoderConcurrency(1),
|
||||
)
|
||||
if err != nil {
|
||||
p.err = err
|
||||
} else {
|
||||
p.dec = z
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (c *Codec) level() int {
|
||||
if c.Level != 0 {
|
||||
return c.Level
|
||||
}
|
||||
return 3
|
||||
}
|
||||
|
||||
func (c *Codec) zstdLevel() zstd.EncoderLevel {
|
||||
return zstd.EncoderLevelFromZstd(c.level())
|
||||
}
|
||||
|
||||
var decoderPool sync.Pool // *zstd.Decoder
|
||||
|
||||
type reader struct {
|
||||
dec *zstd.Decoder
|
||||
err error
|
||||
}
|
||||
|
||||
// Close implements the io.Closer interface.
|
||||
func (r *reader) Close() error {
|
||||
if r.dec != nil {
|
||||
r.dec.Reset(devNull{}) // don't retain the underlying reader
|
||||
decoderPool.Put(r.dec)
|
||||
r.dec = nil
|
||||
r.err = io.ErrClosedPipe
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read implements the io.Reader interface.
|
||||
func (r *reader) Read(p []byte) (int, error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
if r.dec == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return r.dec.Read(p)
|
||||
}
|
||||
|
||||
// WriteTo implements the io.WriterTo interface.
|
||||
func (r *reader) WriteTo(w io.Writer) (int64, error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
if r.dec == nil {
|
||||
return 0, io.ErrClosedPipe
|
||||
}
|
||||
return r.dec.WriteTo(w)
|
||||
}
|
||||
|
||||
// NewWriter implements the compress.Codec interface.
|
||||
func (c *Codec) NewWriter(w io.Writer) io.WriteCloser {
|
||||
p := new(writer)
|
||||
if enc, _ := c.encoderPool.Get().(*zstd.Encoder); enc == nil {
|
||||
z, err := zstd.NewWriter(w,
|
||||
zstd.WithEncoderLevel(c.zstdLevel()),
|
||||
zstd.WithEncoderConcurrency(1),
|
||||
zstd.WithZeroFrames(true),
|
||||
)
|
||||
if err != nil {
|
||||
p.err = err
|
||||
} else {
|
||||
p.enc = z
|
||||
}
|
||||
} else {
|
||||
p.enc = enc
|
||||
p.enc.Reset(w)
|
||||
}
|
||||
p.c = c
|
||||
return p
|
||||
}
|
||||
|
||||
type writer struct {
|
||||
c *Codec
|
||||
enc *zstd.Encoder
|
||||
err error
|
||||
}
|
||||
|
||||
// Close implements the io.Closer interface.
|
||||
func (w *writer) Close() error {
|
||||
if w.enc != nil {
|
||||
// Close needs to be called to write the end of stream marker and flush
|
||||
// the buffers. The zstd package documents that the encoder is re-usable
|
||||
// after being closed.
|
||||
err := w.enc.Close()
|
||||
if err != nil {
|
||||
w.err = err
|
||||
}
|
||||
w.enc.Reset(devNull{}) // don't retain the underlying writer
|
||||
w.c.encoderPool.Put(w.enc)
|
||||
w.enc = nil
|
||||
return err
|
||||
}
|
||||
return w.err
|
||||
}
|
||||
|
||||
// WriteTo implements the io.WriterTo interface.
|
||||
func (w *writer) Write(p []byte) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
if w.enc == nil {
|
||||
return 0, io.ErrClosedPipe
|
||||
}
|
||||
return w.enc.Write(p)
|
||||
}
|
||||
|
||||
// ReadFrom implements the io.ReaderFrom interface.
|
||||
func (w *writer) ReadFrom(r io.Reader) (int64, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
if w.enc == nil {
|
||||
return 0, io.ErrClosedPipe
|
||||
}
|
||||
return w.enc.ReadFrom(r)
|
||||
}
|
||||
|
||||
type devNull struct{}
|
||||
|
||||
func (devNull) Read([]byte) (int, error) { return 0, io.EOF }
|
||||
func (devNull) Write([]byte) (int, error) { return 0, nil }
|
||||
31
vendor/github.com/segmentio/kafka-go/compression.go
generated
vendored
Normal file
31
vendor/github.com/segmentio/kafka-go/compression.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/segmentio/kafka-go/compress"
|
||||
)
|
||||
|
||||
type Compression = compress.Compression
|
||||
|
||||
const (
|
||||
Gzip Compression = compress.Gzip
|
||||
Snappy Compression = compress.Snappy
|
||||
Lz4 Compression = compress.Lz4
|
||||
Zstd Compression = compress.Zstd
|
||||
)
|
||||
|
||||
type CompressionCodec = compress.Codec
|
||||
|
||||
var (
|
||||
errUnknownCodec = errors.New("the compression code is invalid or its codec has not been imported")
|
||||
)
|
||||
|
||||
// resolveCodec looks up a codec by Code().
|
||||
func resolveCodec(code int8) (CompressionCodec, error) {
|
||||
codec := compress.Compression(code).Codec()
|
||||
if codec == nil {
|
||||
return nil, errUnknownCodec
|
||||
}
|
||||
return codec, nil
|
||||
}
|
||||
1647
vendor/github.com/segmentio/kafka-go/conn.go
generated
vendored
Normal file
1647
vendor/github.com/segmentio/kafka-go/conn.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1252
vendor/github.com/segmentio/kafka-go/consumergroup.go
generated
vendored
Normal file
1252
vendor/github.com/segmentio/kafka-go/consumergroup.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
55
vendor/github.com/segmentio/kafka-go/crc32.go
generated
vendored
Normal file
55
vendor/github.com/segmentio/kafka-go/crc32.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"hash/crc32"
|
||||
)
|
||||
|
||||
type crc32Writer struct {
|
||||
table *crc32.Table
|
||||
buffer [8]byte
|
||||
crc32 uint32
|
||||
}
|
||||
|
||||
func (w *crc32Writer) update(b []byte) {
|
||||
w.crc32 = crc32.Update(w.crc32, w.table, b)
|
||||
}
|
||||
|
||||
func (w *crc32Writer) writeInt8(i int8) {
|
||||
w.buffer[0] = byte(i)
|
||||
w.update(w.buffer[:1])
|
||||
}
|
||||
|
||||
func (w *crc32Writer) writeInt16(i int16) {
|
||||
binary.BigEndian.PutUint16(w.buffer[:2], uint16(i))
|
||||
w.update(w.buffer[:2])
|
||||
}
|
||||
|
||||
func (w *crc32Writer) writeInt32(i int32) {
|
||||
binary.BigEndian.PutUint32(w.buffer[:4], uint32(i))
|
||||
w.update(w.buffer[:4])
|
||||
}
|
||||
|
||||
func (w *crc32Writer) writeInt64(i int64) {
|
||||
binary.BigEndian.PutUint64(w.buffer[:8], uint64(i))
|
||||
w.update(w.buffer[:8])
|
||||
}
|
||||
|
||||
func (w *crc32Writer) writeBytes(b []byte) {
|
||||
n := len(b)
|
||||
if b == nil {
|
||||
n = -1
|
||||
}
|
||||
w.writeInt32(int32(n))
|
||||
w.update(b)
|
||||
}
|
||||
|
||||
func (w *crc32Writer) Write(b []byte) (int, error) {
|
||||
w.update(b)
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (w *crc32Writer) WriteString(s string) (int, error) {
|
||||
w.update([]byte(s))
|
||||
return len(s), nil
|
||||
}
|
||||
108
vendor/github.com/segmentio/kafka-go/createacls.go
generated
vendored
Normal file
108
vendor/github.com/segmentio/kafka-go/createacls.go
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/createacls"
|
||||
)
|
||||
|
||||
// CreateACLsRequest represents a request sent to a kafka broker to add
|
||||
// new ACLs.
|
||||
type CreateACLsRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// List of ACL to create.
|
||||
ACLs []ACLEntry
|
||||
}
|
||||
|
||||
// CreateACLsResponse represents a response from a kafka broker to an ACL
|
||||
// creation request.
|
||||
type CreateACLsResponse struct {
|
||||
// The amount of time that the broker throttled the request.
|
||||
Throttle time.Duration
|
||||
|
||||
// List of errors that occurred while attempting to create
|
||||
// the ACLs.
|
||||
//
|
||||
// The errors contain the kafka error code. Programs may use the standard
|
||||
// errors.Is function to test the error against kafka error codes.
|
||||
Errors []error
|
||||
}
|
||||
|
||||
type ACLPermissionType int8
|
||||
|
||||
const (
|
||||
ACLPermissionTypeUnknown ACLPermissionType = 0
|
||||
ACLPermissionTypeAny ACLPermissionType = 1
|
||||
ACLPermissionTypeDeny ACLPermissionType = 2
|
||||
ACLPermissionTypeAllow ACLPermissionType = 3
|
||||
)
|
||||
|
||||
type ACLOperationType int8
|
||||
|
||||
const (
|
||||
ACLOperationTypeUnknown ACLOperationType = 0
|
||||
ACLOperationTypeAny ACLOperationType = 1
|
||||
ACLOperationTypeAll ACLOperationType = 2
|
||||
ACLOperationTypeRead ACLOperationType = 3
|
||||
ACLOperationTypeWrite ACLOperationType = 4
|
||||
ACLOperationTypeCreate ACLOperationType = 5
|
||||
ACLOperationTypeDelete ACLOperationType = 6
|
||||
ACLOperationTypeAlter ACLOperationType = 7
|
||||
ACLOperationTypeDescribe ACLOperationType = 8
|
||||
ACLOperationTypeClusterAction ACLOperationType = 9
|
||||
ACLOperationTypeDescribeConfigs ACLOperationType = 10
|
||||
ACLOperationTypeAlterConfigs ACLOperationType = 11
|
||||
ACLOperationTypeIdempotentWrite ACLOperationType = 12
|
||||
)
|
||||
|
||||
type ACLEntry struct {
|
||||
ResourceType ResourceType
|
||||
ResourceName string
|
||||
ResourcePatternType PatternType
|
||||
Principal string
|
||||
Host string
|
||||
Operation ACLOperationType
|
||||
PermissionType ACLPermissionType
|
||||
}
|
||||
|
||||
// CreateACLs sends ACLs creation request to a kafka broker and returns the
|
||||
// response.
|
||||
func (c *Client) CreateACLs(ctx context.Context, req *CreateACLsRequest) (*CreateACLsResponse, error) {
|
||||
acls := make([]createacls.RequestACLs, 0, len(req.ACLs))
|
||||
|
||||
for _, acl := range req.ACLs {
|
||||
acls = append(acls, createacls.RequestACLs{
|
||||
ResourceType: int8(acl.ResourceType),
|
||||
ResourceName: acl.ResourceName,
|
||||
ResourcePatternType: int8(acl.ResourcePatternType),
|
||||
Principal: acl.Principal,
|
||||
Host: acl.Host,
|
||||
Operation: int8(acl.Operation),
|
||||
PermissionType: int8(acl.PermissionType),
|
||||
})
|
||||
}
|
||||
|
||||
m, err := c.roundTrip(ctx, req.Addr, &createacls.Request{
|
||||
Creations: acls,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).CreateACLs: %w", err)
|
||||
}
|
||||
|
||||
res := m.(*createacls.Response)
|
||||
ret := &CreateACLsResponse{
|
||||
Throttle: makeDuration(res.ThrottleTimeMs),
|
||||
Errors: make([]error, 0, len(res.Results)),
|
||||
}
|
||||
|
||||
for _, t := range res.Results {
|
||||
ret.Errors = append(ret.Errors, makeError(t.ErrorCode, t.ErrorMessage))
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
103
vendor/github.com/segmentio/kafka-go/createpartitions.go
generated
vendored
Normal file
103
vendor/github.com/segmentio/kafka-go/createpartitions.go
generated
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/createpartitions"
|
||||
)
|
||||
|
||||
// CreatePartitionsRequest represents a request sent to a kafka broker to create
|
||||
// and update topic parititions.
|
||||
type CreatePartitionsRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// List of topics to create and their configuration.
|
||||
Topics []TopicPartitionsConfig
|
||||
|
||||
// When set to true, topics are not created but the configuration is
|
||||
// validated as if they were.
|
||||
ValidateOnly bool
|
||||
}
|
||||
|
||||
// CreatePartitionsResponse represents a response from a kafka broker to a partition
|
||||
// creation request.
|
||||
type CreatePartitionsResponse struct {
|
||||
// The amount of time that the broker throttled the request.
|
||||
Throttle time.Duration
|
||||
|
||||
// Mapping of topic names to errors that occurred while attempting to create
|
||||
// the topics.
|
||||
//
|
||||
// The errors contain the kafka error code. Programs may use the standard
|
||||
// errors.Is function to test the error against kafka error codes.
|
||||
Errors map[string]error
|
||||
}
|
||||
|
||||
// CreatePartitions sends a partitions creation request to a kafka broker and returns the
|
||||
// response.
|
||||
func (c *Client) CreatePartitions(ctx context.Context, req *CreatePartitionsRequest) (*CreatePartitionsResponse, error) {
|
||||
topics := make([]createpartitions.RequestTopic, len(req.Topics))
|
||||
|
||||
for i, t := range req.Topics {
|
||||
topics[i] = createpartitions.RequestTopic{
|
||||
Name: t.Name,
|
||||
Count: t.Count,
|
||||
Assignments: t.assignments(),
|
||||
}
|
||||
}
|
||||
|
||||
m, err := c.roundTrip(ctx, req.Addr, &createpartitions.Request{
|
||||
Topics: topics,
|
||||
TimeoutMs: c.timeoutMs(ctx, defaultCreatePartitionsTimeout),
|
||||
ValidateOnly: req.ValidateOnly,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).CreatePartitions: %w", err)
|
||||
}
|
||||
|
||||
res := m.(*createpartitions.Response)
|
||||
ret := &CreatePartitionsResponse{
|
||||
Throttle: makeDuration(res.ThrottleTimeMs),
|
||||
Errors: make(map[string]error, len(res.Results)),
|
||||
}
|
||||
|
||||
for _, t := range res.Results {
|
||||
ret.Errors[t.Name] = makeError(t.ErrorCode, t.ErrorMessage)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
type TopicPartitionsConfig struct {
|
||||
// Topic name
|
||||
Name string
|
||||
|
||||
// Topic partition's count.
|
||||
Count int32
|
||||
|
||||
// TopicPartitionAssignments among kafka brokers for this topic partitions.
|
||||
TopicPartitionAssignments []TopicPartitionAssignment
|
||||
}
|
||||
|
||||
func (t *TopicPartitionsConfig) assignments() []createpartitions.RequestAssignment {
|
||||
if len(t.TopicPartitionAssignments) == 0 {
|
||||
return nil
|
||||
}
|
||||
assignments := make([]createpartitions.RequestAssignment, len(t.TopicPartitionAssignments))
|
||||
for i, a := range t.TopicPartitionAssignments {
|
||||
assignments[i] = createpartitions.RequestAssignment{
|
||||
BrokerIDs: a.BrokerIDs,
|
||||
}
|
||||
}
|
||||
return assignments
|
||||
}
|
||||
|
||||
type TopicPartitionAssignment struct {
|
||||
// Broker IDs
|
||||
BrokerIDs []int32
|
||||
}
|
||||
398
vendor/github.com/segmentio/kafka-go/createtopics.go
generated
vendored
Normal file
398
vendor/github.com/segmentio/kafka-go/createtopics.go
generated
vendored
Normal file
@@ -0,0 +1,398 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/createtopics"
|
||||
)
|
||||
|
||||
// CreateTopicRequests represents a request sent to a kafka broker to create
|
||||
// new topics.
|
||||
type CreateTopicsRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// List of topics to create and their configuration.
|
||||
Topics []TopicConfig
|
||||
|
||||
// When set to true, topics are not created but the configuration is
|
||||
// validated as if they were.
|
||||
//
|
||||
// This field will be ignored if the kafka broker did no support the
|
||||
// CreateTopics API in version 1 or above.
|
||||
ValidateOnly bool
|
||||
}
|
||||
|
||||
// CreateTopicResponse represents a response from a kafka broker to a topic
|
||||
// creation request.
|
||||
type CreateTopicsResponse struct {
|
||||
// The amount of time that the broker throttled the request.
|
||||
//
|
||||
// This field will be zero if the kafka broker did no support the
|
||||
// CreateTopics API in version 2 or above.
|
||||
Throttle time.Duration
|
||||
|
||||
// Mapping of topic names to errors that occurred while attempting to create
|
||||
// the topics.
|
||||
//
|
||||
// The errors contain the kafka error code. Programs may use the standard
|
||||
// errors.Is function to test the error against kafka error codes.
|
||||
Errors map[string]error
|
||||
}
|
||||
|
||||
// CreateTopics sends a topic creation request to a kafka broker and returns the
|
||||
// response.
|
||||
func (c *Client) CreateTopics(ctx context.Context, req *CreateTopicsRequest) (*CreateTopicsResponse, error) {
|
||||
topics := make([]createtopics.RequestTopic, len(req.Topics))
|
||||
|
||||
for i, t := range req.Topics {
|
||||
topics[i] = createtopics.RequestTopic{
|
||||
Name: t.Topic,
|
||||
NumPartitions: int32(t.NumPartitions),
|
||||
ReplicationFactor: int16(t.ReplicationFactor),
|
||||
Assignments: t.assignments(),
|
||||
Configs: t.configs(),
|
||||
}
|
||||
}
|
||||
|
||||
m, err := c.roundTrip(ctx, req.Addr, &createtopics.Request{
|
||||
Topics: topics,
|
||||
TimeoutMs: c.timeoutMs(ctx, defaultCreateTopicsTimeout),
|
||||
ValidateOnly: req.ValidateOnly,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).CreateTopics: %w", err)
|
||||
}
|
||||
|
||||
res := m.(*createtopics.Response)
|
||||
ret := &CreateTopicsResponse{
|
||||
Throttle: makeDuration(res.ThrottleTimeMs),
|
||||
Errors: make(map[string]error, len(res.Topics)),
|
||||
}
|
||||
|
||||
for _, t := range res.Topics {
|
||||
ret.Errors[t.Name] = makeError(t.ErrorCode, t.ErrorMessage)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
type ConfigEntry struct {
|
||||
ConfigName string
|
||||
ConfigValue string
|
||||
}
|
||||
|
||||
func (c ConfigEntry) toCreateTopicsRequestV0ConfigEntry() createTopicsRequestV0ConfigEntry {
|
||||
return createTopicsRequestV0ConfigEntry(c)
|
||||
}
|
||||
|
||||
type createTopicsRequestV0ConfigEntry struct {
|
||||
ConfigName string
|
||||
ConfigValue string
|
||||
}
|
||||
|
||||
func (t createTopicsRequestV0ConfigEntry) size() int32 {
|
||||
return sizeofString(t.ConfigName) +
|
||||
sizeofString(t.ConfigValue)
|
||||
}
|
||||
|
||||
func (t createTopicsRequestV0ConfigEntry) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.ConfigName)
|
||||
wb.writeString(t.ConfigValue)
|
||||
}
|
||||
|
||||
type ReplicaAssignment struct {
|
||||
Partition int
|
||||
// The list of brokers where the partition should be allocated. There must
|
||||
// be as many entries in thie list as there are replicas of the partition.
|
||||
// The first entry represents the broker that will be the preferred leader
|
||||
// for the partition.
|
||||
//
|
||||
// This field changed in 0.4 from `int` to `[]int`. It was invalid to pass
|
||||
// a single integer as this is supposed to be a list. While this introduces
|
||||
// a breaking change, it probably never worked before.
|
||||
Replicas []int
|
||||
}
|
||||
|
||||
func (a *ReplicaAssignment) partitionIndex() int32 {
|
||||
return int32(a.Partition)
|
||||
}
|
||||
|
||||
func (a *ReplicaAssignment) brokerIDs() []int32 {
|
||||
if len(a.Replicas) == 0 {
|
||||
return nil
|
||||
}
|
||||
replicas := make([]int32, len(a.Replicas))
|
||||
for i, r := range a.Replicas {
|
||||
replicas[i] = int32(r)
|
||||
}
|
||||
return replicas
|
||||
}
|
||||
|
||||
func (a ReplicaAssignment) toCreateTopicsRequestV0ReplicaAssignment() createTopicsRequestV0ReplicaAssignment {
|
||||
return createTopicsRequestV0ReplicaAssignment{
|
||||
Partition: int32(a.Partition),
|
||||
Replicas: a.brokerIDs(),
|
||||
}
|
||||
}
|
||||
|
||||
type createTopicsRequestV0ReplicaAssignment struct {
|
||||
Partition int32
|
||||
Replicas []int32
|
||||
}
|
||||
|
||||
func (t createTopicsRequestV0ReplicaAssignment) size() int32 {
|
||||
return sizeofInt32(t.Partition) +
|
||||
(int32(len(t.Replicas)+1) * sizeofInt32(0)) // N+1 because the array length is a int32
|
||||
}
|
||||
|
||||
func (t createTopicsRequestV0ReplicaAssignment) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(t.Partition)
|
||||
wb.writeInt32(int32(len(t.Replicas)))
|
||||
for _, r := range t.Replicas {
|
||||
wb.writeInt32(int32(r))
|
||||
}
|
||||
}
|
||||
|
||||
type TopicConfig struct {
|
||||
// Topic name
|
||||
Topic string
|
||||
|
||||
// NumPartitions created. -1 indicates unset.
|
||||
NumPartitions int
|
||||
|
||||
// ReplicationFactor for the topic. -1 indicates unset.
|
||||
ReplicationFactor int
|
||||
|
||||
// ReplicaAssignments among kafka brokers for this topic partitions. If this
|
||||
// is set num_partitions and replication_factor must be unset.
|
||||
ReplicaAssignments []ReplicaAssignment
|
||||
|
||||
// ConfigEntries holds topic level configuration for topic to be set.
|
||||
ConfigEntries []ConfigEntry
|
||||
}
|
||||
|
||||
func (t *TopicConfig) assignments() []createtopics.RequestAssignment {
|
||||
if len(t.ReplicaAssignments) == 0 {
|
||||
return nil
|
||||
}
|
||||
assignments := make([]createtopics.RequestAssignment, len(t.ReplicaAssignments))
|
||||
for i, a := range t.ReplicaAssignments {
|
||||
assignments[i] = createtopics.RequestAssignment{
|
||||
PartitionIndex: a.partitionIndex(),
|
||||
BrokerIDs: a.brokerIDs(),
|
||||
}
|
||||
}
|
||||
return assignments
|
||||
}
|
||||
|
||||
func (t *TopicConfig) configs() []createtopics.RequestConfig {
|
||||
if len(t.ConfigEntries) == 0 {
|
||||
return nil
|
||||
}
|
||||
configs := make([]createtopics.RequestConfig, len(t.ConfigEntries))
|
||||
for i, c := range t.ConfigEntries {
|
||||
configs[i] = createtopics.RequestConfig{
|
||||
Name: c.ConfigName,
|
||||
Value: c.ConfigValue,
|
||||
}
|
||||
}
|
||||
return configs
|
||||
}
|
||||
|
||||
func (t TopicConfig) toCreateTopicsRequestV0Topic() createTopicsRequestV0Topic {
|
||||
requestV0ReplicaAssignments := make([]createTopicsRequestV0ReplicaAssignment, 0, len(t.ReplicaAssignments))
|
||||
for _, a := range t.ReplicaAssignments {
|
||||
requestV0ReplicaAssignments = append(
|
||||
requestV0ReplicaAssignments,
|
||||
a.toCreateTopicsRequestV0ReplicaAssignment())
|
||||
}
|
||||
requestV0ConfigEntries := make([]createTopicsRequestV0ConfigEntry, 0, len(t.ConfigEntries))
|
||||
for _, c := range t.ConfigEntries {
|
||||
requestV0ConfigEntries = append(
|
||||
requestV0ConfigEntries,
|
||||
c.toCreateTopicsRequestV0ConfigEntry())
|
||||
}
|
||||
|
||||
return createTopicsRequestV0Topic{
|
||||
Topic: t.Topic,
|
||||
NumPartitions: int32(t.NumPartitions),
|
||||
ReplicationFactor: int16(t.ReplicationFactor),
|
||||
ReplicaAssignments: requestV0ReplicaAssignments,
|
||||
ConfigEntries: requestV0ConfigEntries,
|
||||
}
|
||||
}
|
||||
|
||||
type createTopicsRequestV0Topic struct {
|
||||
// Topic name
|
||||
Topic string
|
||||
|
||||
// NumPartitions created. -1 indicates unset.
|
||||
NumPartitions int32
|
||||
|
||||
// ReplicationFactor for the topic. -1 indicates unset.
|
||||
ReplicationFactor int16
|
||||
|
||||
// ReplicaAssignments among kafka brokers for this topic partitions. If this
|
||||
// is set num_partitions and replication_factor must be unset.
|
||||
ReplicaAssignments []createTopicsRequestV0ReplicaAssignment
|
||||
|
||||
// ConfigEntries holds topic level configuration for topic to be set.
|
||||
ConfigEntries []createTopicsRequestV0ConfigEntry
|
||||
}
|
||||
|
||||
func (t createTopicsRequestV0Topic) size() int32 {
|
||||
return sizeofString(t.Topic) +
|
||||
sizeofInt32(t.NumPartitions) +
|
||||
sizeofInt16(t.ReplicationFactor) +
|
||||
sizeofArray(len(t.ReplicaAssignments), func(i int) int32 { return t.ReplicaAssignments[i].size() }) +
|
||||
sizeofArray(len(t.ConfigEntries), func(i int) int32 { return t.ConfigEntries[i].size() })
|
||||
}
|
||||
|
||||
func (t createTopicsRequestV0Topic) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.Topic)
|
||||
wb.writeInt32(t.NumPartitions)
|
||||
wb.writeInt16(t.ReplicationFactor)
|
||||
wb.writeArray(len(t.ReplicaAssignments), func(i int) { t.ReplicaAssignments[i].writeTo(wb) })
|
||||
wb.writeArray(len(t.ConfigEntries), func(i int) { t.ConfigEntries[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
// See http://kafka.apache.org/protocol.html#The_Messages_CreateTopics
|
||||
type createTopicsRequestV0 struct {
|
||||
// Topics contains n array of single topic creation requests. Can not
|
||||
// have multiple entries for the same topic.
|
||||
Topics []createTopicsRequestV0Topic
|
||||
|
||||
// Timeout ms to wait for a topic to be completely created on the
|
||||
// controller node. Values <= 0 will trigger topic creation and return immediately
|
||||
Timeout int32
|
||||
}
|
||||
|
||||
func (t createTopicsRequestV0) size() int32 {
|
||||
return sizeofArray(len(t.Topics), func(i int) int32 { return t.Topics[i].size() }) +
|
||||
sizeofInt32(t.Timeout)
|
||||
}
|
||||
|
||||
func (t createTopicsRequestV0) writeTo(wb *writeBuffer) {
|
||||
wb.writeArray(len(t.Topics), func(i int) { t.Topics[i].writeTo(wb) })
|
||||
wb.writeInt32(t.Timeout)
|
||||
}
|
||||
|
||||
type createTopicsResponseV0TopicError struct {
|
||||
// Topic name
|
||||
Topic string
|
||||
|
||||
// ErrorCode holds response error code
|
||||
ErrorCode int16
|
||||
}
|
||||
|
||||
func (t createTopicsResponseV0TopicError) size() int32 {
|
||||
return sizeofString(t.Topic) +
|
||||
sizeofInt16(t.ErrorCode)
|
||||
}
|
||||
|
||||
func (t createTopicsResponseV0TopicError) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.Topic)
|
||||
wb.writeInt16(t.ErrorCode)
|
||||
}
|
||||
|
||||
func (t *createTopicsResponseV0TopicError) readFrom(r *bufio.Reader, size int) (remain int, err error) {
|
||||
if remain, err = readString(r, size, &t.Topic); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// See http://kafka.apache.org/protocol.html#The_Messages_CreateTopics
|
||||
type createTopicsResponseV0 struct {
|
||||
TopicErrors []createTopicsResponseV0TopicError
|
||||
}
|
||||
|
||||
func (t createTopicsResponseV0) size() int32 {
|
||||
return sizeofArray(len(t.TopicErrors), func(i int) int32 { return t.TopicErrors[i].size() })
|
||||
}
|
||||
|
||||
func (t createTopicsResponseV0) writeTo(wb *writeBuffer) {
|
||||
wb.writeArray(len(t.TopicErrors), func(i int) { t.TopicErrors[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
func (t *createTopicsResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) {
|
||||
fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) {
|
||||
var topic createTopicsResponseV0TopicError
|
||||
if fnRemain, fnErr = (&topic).readFrom(r, size); err != nil {
|
||||
return
|
||||
}
|
||||
t.TopicErrors = append(t.TopicErrors, topic)
|
||||
return
|
||||
}
|
||||
if remain, err = readArrayWith(r, size, fn); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Conn) createTopics(request createTopicsRequestV0) (createTopicsResponseV0, error) {
|
||||
var response createTopicsResponseV0
|
||||
|
||||
err := c.writeOperation(
|
||||
func(deadline time.Time, id int32) error {
|
||||
if request.Timeout == 0 {
|
||||
now := time.Now()
|
||||
deadline = adjustDeadlineForRTT(deadline, now, defaultRTT)
|
||||
request.Timeout = milliseconds(deadlineToTimeout(deadline, now))
|
||||
}
|
||||
return c.writeRequest(createTopics, v0, id, request)
|
||||
},
|
||||
func(deadline time.Time, size int) error {
|
||||
return expectZeroSize(func() (remain int, err error) {
|
||||
return (&response).readFrom(&c.rbuf, size)
|
||||
}())
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return response, err
|
||||
}
|
||||
for _, tr := range response.TopicErrors {
|
||||
if tr.ErrorCode != 0 {
|
||||
return response, Error(tr.ErrorCode)
|
||||
}
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// CreateTopics creates one topic per provided configuration with idempotent
|
||||
// operational semantics. In other words, if CreateTopics is invoked with a
|
||||
// configuration for an existing topic, it will have no effect.
|
||||
func (c *Conn) CreateTopics(topics ...TopicConfig) error {
|
||||
requestV0Topics := make([]createTopicsRequestV0Topic, 0, len(topics))
|
||||
for _, t := range topics {
|
||||
requestV0Topics = append(
|
||||
requestV0Topics,
|
||||
t.toCreateTopicsRequestV0Topic())
|
||||
}
|
||||
|
||||
_, err := c.createTopics(createTopicsRequestV0{
|
||||
Topics: requestV0Topics,
|
||||
})
|
||||
if err != nil {
|
||||
if errors.Is(err, TopicAlreadyExists) {
|
||||
// ok
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
175
vendor/github.com/segmentio/kafka-go/deletetopics.go
generated
vendored
Normal file
175
vendor/github.com/segmentio/kafka-go/deletetopics.go
generated
vendored
Normal file
@@ -0,0 +1,175 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/deletetopics"
|
||||
)
|
||||
|
||||
// DeleteTopicsRequest represents a request sent to a kafka broker to delete
|
||||
// topics.
|
||||
type DeleteTopicsRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// Names of topics to delete.
|
||||
Topics []string
|
||||
}
|
||||
|
||||
// DeleteTopicsResponse represents a response from a kafka broker to a topic
|
||||
// deletion request.
|
||||
type DeleteTopicsResponse struct {
|
||||
// The amount of time that the broker throttled the request.
|
||||
//
|
||||
// This field will be zero if the kafka broker did no support the
|
||||
// DeleteTopics API in version 1 or above.
|
||||
Throttle time.Duration
|
||||
|
||||
// Mapping of topic names to errors that occurred while attempting to delete
|
||||
// the topics.
|
||||
//
|
||||
// The errors contain the kafka error code. Programs may use the standard
|
||||
// errors.Is function to test the error against kafka error codes.
|
||||
Errors map[string]error
|
||||
}
|
||||
|
||||
// DeleteTopics sends a topic deletion request to a kafka broker and returns the
|
||||
// response.
|
||||
func (c *Client) DeleteTopics(ctx context.Context, req *DeleteTopicsRequest) (*DeleteTopicsResponse, error) {
|
||||
m, err := c.roundTrip(ctx, req.Addr, &deletetopics.Request{
|
||||
TopicNames: req.Topics,
|
||||
TimeoutMs: c.timeoutMs(ctx, defaultDeleteTopicsTimeout),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).DeleteTopics: %w", err)
|
||||
}
|
||||
|
||||
res := m.(*deletetopics.Response)
|
||||
ret := &DeleteTopicsResponse{
|
||||
Throttle: makeDuration(res.ThrottleTimeMs),
|
||||
Errors: make(map[string]error, len(res.Responses)),
|
||||
}
|
||||
|
||||
for _, t := range res.Responses {
|
||||
if t.ErrorCode == 0 {
|
||||
ret.Errors[t.Name] = nil
|
||||
} else {
|
||||
ret.Errors[t.Name] = Error(t.ErrorCode)
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// See http://kafka.apache.org/protocol.html#The_Messages_DeleteTopics
|
||||
type deleteTopicsRequestV0 struct {
|
||||
// Topics holds the topic names
|
||||
Topics []string
|
||||
|
||||
// Timeout holds the time in ms to wait for a topic to be completely deleted
|
||||
// on the controller node. Values <= 0 will trigger topic deletion and return
|
||||
// immediately.
|
||||
Timeout int32
|
||||
}
|
||||
|
||||
func (t deleteTopicsRequestV0) size() int32 {
|
||||
return sizeofStringArray(t.Topics) +
|
||||
sizeofInt32(t.Timeout)
|
||||
}
|
||||
|
||||
func (t deleteTopicsRequestV0) writeTo(wb *writeBuffer) {
|
||||
wb.writeStringArray(t.Topics)
|
||||
wb.writeInt32(t.Timeout)
|
||||
}
|
||||
|
||||
type deleteTopicsResponseV0 struct {
|
||||
// TopicErrorCodes holds per topic error codes
|
||||
TopicErrorCodes []deleteTopicsResponseV0TopicErrorCode
|
||||
}
|
||||
|
||||
func (t deleteTopicsResponseV0) size() int32 {
|
||||
return sizeofArray(len(t.TopicErrorCodes), func(i int) int32 { return t.TopicErrorCodes[i].size() })
|
||||
}
|
||||
|
||||
func (t *deleteTopicsResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) {
|
||||
fn := func(withReader *bufio.Reader, withSize int) (fnRemain int, fnErr error) {
|
||||
var item deleteTopicsResponseV0TopicErrorCode
|
||||
if fnRemain, fnErr = (&item).readFrom(withReader, withSize); err != nil {
|
||||
return
|
||||
}
|
||||
t.TopicErrorCodes = append(t.TopicErrorCodes, item)
|
||||
return
|
||||
}
|
||||
if remain, err = readArrayWith(r, size, fn); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t deleteTopicsResponseV0) writeTo(wb *writeBuffer) {
|
||||
wb.writeArray(len(t.TopicErrorCodes), func(i int) { t.TopicErrorCodes[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
type deleteTopicsResponseV0TopicErrorCode struct {
|
||||
// Topic holds the topic name
|
||||
Topic string
|
||||
|
||||
// ErrorCode holds the error code
|
||||
ErrorCode int16
|
||||
}
|
||||
|
||||
func (t deleteTopicsResponseV0TopicErrorCode) size() int32 {
|
||||
return sizeofString(t.Topic) +
|
||||
sizeofInt16(t.ErrorCode)
|
||||
}
|
||||
|
||||
func (t *deleteTopicsResponseV0TopicErrorCode) readFrom(r *bufio.Reader, size int) (remain int, err error) {
|
||||
if remain, err = readString(r, size, &t.Topic); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t deleteTopicsResponseV0TopicErrorCode) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.Topic)
|
||||
wb.writeInt16(t.ErrorCode)
|
||||
}
|
||||
|
||||
// deleteTopics deletes the specified topics.
|
||||
//
|
||||
// See http://kafka.apache.org/protocol.html#The_Messages_DeleteTopics
|
||||
func (c *Conn) deleteTopics(request deleteTopicsRequestV0) (deleteTopicsResponseV0, error) {
|
||||
var response deleteTopicsResponseV0
|
||||
err := c.writeOperation(
|
||||
func(deadline time.Time, id int32) error {
|
||||
if request.Timeout == 0 {
|
||||
now := time.Now()
|
||||
deadline = adjustDeadlineForRTT(deadline, now, defaultRTT)
|
||||
request.Timeout = milliseconds(deadlineToTimeout(deadline, now))
|
||||
}
|
||||
return c.writeRequest(deleteTopics, v0, id, request)
|
||||
},
|
||||
func(deadline time.Time, size int) error {
|
||||
return expectZeroSize(func() (remain int, err error) {
|
||||
return (&response).readFrom(&c.rbuf, size)
|
||||
}())
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return deleteTopicsResponseV0{}, err
|
||||
}
|
||||
for _, c := range response.TopicErrorCodes {
|
||||
if c.ErrorCode != 0 {
|
||||
return response, Error(c.ErrorCode)
|
||||
}
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
162
vendor/github.com/segmentio/kafka-go/describeconfigs.go
generated
vendored
Normal file
162
vendor/github.com/segmentio/kafka-go/describeconfigs.go
generated
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/describeconfigs"
|
||||
)
|
||||
|
||||
// DescribeConfigsRequest represents a request sent to a kafka broker to describe configs.
|
||||
type DescribeConfigsRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// List of resources to get details for.
|
||||
Resources []DescribeConfigRequestResource
|
||||
|
||||
// Ignored if API version is less than v1
|
||||
IncludeSynonyms bool
|
||||
|
||||
// Ignored if API version is less than v3
|
||||
IncludeDocumentation bool
|
||||
}
|
||||
|
||||
type DescribeConfigRequestResource struct {
|
||||
// Resource Type
|
||||
ResourceType ResourceType
|
||||
|
||||
// Resource Name
|
||||
ResourceName string
|
||||
|
||||
// ConfigNames is a list of configurations to update.
|
||||
ConfigNames []string
|
||||
}
|
||||
|
||||
// DescribeConfigsResponse represents a response from a kafka broker to a describe config request.
|
||||
type DescribeConfigsResponse struct {
|
||||
// The amount of time that the broker throttled the request.
|
||||
Throttle time.Duration
|
||||
|
||||
// Resources
|
||||
Resources []DescribeConfigResponseResource
|
||||
}
|
||||
|
||||
// DescribeConfigResponseResource.
|
||||
type DescribeConfigResponseResource struct {
|
||||
// Resource Type
|
||||
ResourceType int8
|
||||
|
||||
// Resource Name
|
||||
ResourceName string
|
||||
|
||||
// Error
|
||||
Error error
|
||||
|
||||
// ConfigEntries
|
||||
ConfigEntries []DescribeConfigResponseConfigEntry
|
||||
}
|
||||
|
||||
// DescribeConfigResponseConfigEntry.
|
||||
type DescribeConfigResponseConfigEntry struct {
|
||||
ConfigName string
|
||||
ConfigValue string
|
||||
ReadOnly bool
|
||||
|
||||
// Ignored if API version is greater than v0
|
||||
IsDefault bool
|
||||
|
||||
// Ignored if API version is less than v1
|
||||
ConfigSource int8
|
||||
|
||||
IsSensitive bool
|
||||
|
||||
// Ignored if API version is less than v1
|
||||
ConfigSynonyms []DescribeConfigResponseConfigSynonym
|
||||
|
||||
// Ignored if API version is less than v3
|
||||
ConfigType int8
|
||||
|
||||
// Ignored if API version is less than v3
|
||||
ConfigDocumentation string
|
||||
}
|
||||
|
||||
// DescribeConfigResponseConfigSynonym.
|
||||
type DescribeConfigResponseConfigSynonym struct {
|
||||
// Ignored if API version is less than v1
|
||||
ConfigName string
|
||||
|
||||
// Ignored if API version is less than v1
|
||||
ConfigValue string
|
||||
|
||||
// Ignored if API version is less than v1
|
||||
ConfigSource int8
|
||||
}
|
||||
|
||||
// DescribeConfigs sends a config altering request to a kafka broker and returns the
|
||||
// response.
|
||||
func (c *Client) DescribeConfigs(ctx context.Context, req *DescribeConfigsRequest) (*DescribeConfigsResponse, error) {
|
||||
resources := make([]describeconfigs.RequestResource, len(req.Resources))
|
||||
|
||||
for i, t := range req.Resources {
|
||||
resources[i] = describeconfigs.RequestResource{
|
||||
ResourceType: int8(t.ResourceType),
|
||||
ResourceName: t.ResourceName,
|
||||
ConfigNames: t.ConfigNames,
|
||||
}
|
||||
}
|
||||
|
||||
m, err := c.roundTrip(ctx, req.Addr, &describeconfigs.Request{
|
||||
Resources: resources,
|
||||
IncludeSynonyms: req.IncludeSynonyms,
|
||||
IncludeDocumentation: req.IncludeDocumentation,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).DescribeConfigs: %w", err)
|
||||
}
|
||||
|
||||
res := m.(*describeconfigs.Response)
|
||||
ret := &DescribeConfigsResponse{
|
||||
Throttle: makeDuration(res.ThrottleTimeMs),
|
||||
Resources: make([]DescribeConfigResponseResource, len(res.Resources)),
|
||||
}
|
||||
|
||||
for i, t := range res.Resources {
|
||||
|
||||
configEntries := make([]DescribeConfigResponseConfigEntry, len(t.ConfigEntries))
|
||||
for j, v := range t.ConfigEntries {
|
||||
|
||||
configSynonyms := make([]DescribeConfigResponseConfigSynonym, len(v.ConfigSynonyms))
|
||||
for k, cs := range v.ConfigSynonyms {
|
||||
configSynonyms[k] = DescribeConfigResponseConfigSynonym{
|
||||
ConfigName: cs.ConfigName,
|
||||
ConfigValue: cs.ConfigValue,
|
||||
ConfigSource: cs.ConfigSource,
|
||||
}
|
||||
}
|
||||
|
||||
configEntries[j] = DescribeConfigResponseConfigEntry{
|
||||
ConfigName: v.ConfigName,
|
||||
ConfigValue: v.ConfigValue,
|
||||
ReadOnly: v.ReadOnly,
|
||||
ConfigSource: v.ConfigSource,
|
||||
IsDefault: v.IsDefault,
|
||||
IsSensitive: v.IsSensitive,
|
||||
ConfigSynonyms: configSynonyms,
|
||||
ConfigType: v.ConfigType,
|
||||
ConfigDocumentation: v.ConfigDocumentation,
|
||||
}
|
||||
}
|
||||
|
||||
ret.Resources[i] = DescribeConfigResponseResource{
|
||||
ResourceType: t.ResourceType,
|
||||
ResourceName: t.ResourceName,
|
||||
Error: makeError(t.ErrorCode, t.ErrorMessage),
|
||||
ConfigEntries: configEntries,
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
298
vendor/github.com/segmentio/kafka-go/describegroups.go
generated
vendored
Normal file
298
vendor/github.com/segmentio/kafka-go/describegroups.go
generated
vendored
Normal file
@@ -0,0 +1,298 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/describegroups"
|
||||
)
|
||||
|
||||
// DescribeGroupsRequest is a request to the DescribeGroups API.
|
||||
type DescribeGroupsRequest struct {
|
||||
// Addr is the address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// GroupIDs is a slice of groups to get details for.
|
||||
GroupIDs []string
|
||||
}
|
||||
|
||||
// DescribeGroupsResponse is a response from the DescribeGroups API.
|
||||
type DescribeGroupsResponse struct {
|
||||
// Groups is a slice of details for the requested groups.
|
||||
Groups []DescribeGroupsResponseGroup
|
||||
}
|
||||
|
||||
// DescribeGroupsResponseGroup contains the response details for a single group.
|
||||
type DescribeGroupsResponseGroup struct {
|
||||
// Error is set to a non-nil value if there was an error fetching the details
|
||||
// for this group.
|
||||
Error error
|
||||
|
||||
// GroupID is the ID of the group.
|
||||
GroupID string
|
||||
|
||||
// GroupState is a description of the group state.
|
||||
GroupState string
|
||||
|
||||
// Members contains details about each member of the group.
|
||||
Members []DescribeGroupsResponseMember
|
||||
}
|
||||
|
||||
// MemberInfo represents the membership information for a single group member.
|
||||
type DescribeGroupsResponseMember struct {
|
||||
// MemberID is the ID of the group member.
|
||||
MemberID string
|
||||
|
||||
// ClientID is the ID of the client that the group member is using.
|
||||
ClientID string
|
||||
|
||||
// ClientHost is the host of the client that the group member is connecting from.
|
||||
ClientHost string
|
||||
|
||||
// MemberMetadata contains metadata about this group member.
|
||||
MemberMetadata DescribeGroupsResponseMemberMetadata
|
||||
|
||||
// MemberAssignments contains the topic partitions that this member is assigned to.
|
||||
MemberAssignments DescribeGroupsResponseAssignments
|
||||
}
|
||||
|
||||
// GroupMemberMetadata stores metadata associated with a group member.
|
||||
type DescribeGroupsResponseMemberMetadata struct {
|
||||
// Version is the version of the metadata.
|
||||
Version int
|
||||
|
||||
// Topics is the list of topics that the member is assigned to.
|
||||
Topics []string
|
||||
|
||||
// UserData is the user data for the member.
|
||||
UserData []byte
|
||||
|
||||
// OwnedPartitions contains the partitions owned by this group member; only set if
|
||||
// consumers are using a cooperative rebalancing assignor protocol.
|
||||
OwnedPartitions []DescribeGroupsResponseMemberMetadataOwnedPartition
|
||||
}
|
||||
|
||||
type DescribeGroupsResponseMemberMetadataOwnedPartition struct {
|
||||
// Topic is the name of the topic.
|
||||
Topic string
|
||||
|
||||
// Partitions is the partitions that are owned by the group in the topic.
|
||||
Partitions []int
|
||||
}
|
||||
|
||||
// GroupMemberAssignmentsInfo stores the topic partition assignment data for a group member.
|
||||
type DescribeGroupsResponseAssignments struct {
|
||||
// Version is the version of the assignments data.
|
||||
Version int
|
||||
|
||||
// Topics contains the details of the partition assignments for each topic.
|
||||
Topics []GroupMemberTopic
|
||||
|
||||
// UserData is the user data for the member.
|
||||
UserData []byte
|
||||
}
|
||||
|
||||
// GroupMemberTopic is a mapping from a topic to a list of partitions in the topic. It is used
|
||||
// to represent the topic partitions that have been assigned to a group member.
|
||||
type GroupMemberTopic struct {
|
||||
// Topic is the name of the topic.
|
||||
Topic string
|
||||
|
||||
// Partitions is a slice of partition IDs that this member is assigned to in the topic.
|
||||
Partitions []int
|
||||
}
|
||||
|
||||
// DescribeGroups calls the Kafka DescribeGroups API to get information about one or more
|
||||
// consumer groups. See https://kafka.apache.org/protocol#The_Messages_DescribeGroups for
|
||||
// more information.
|
||||
func (c *Client) DescribeGroups(
|
||||
ctx context.Context,
|
||||
req *DescribeGroupsRequest,
|
||||
) (*DescribeGroupsResponse, error) {
|
||||
protoResp, err := c.roundTrip(
|
||||
ctx,
|
||||
req.Addr,
|
||||
&describegroups.Request{
|
||||
Groups: req.GroupIDs,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
apiResp := protoResp.(*describegroups.Response)
|
||||
resp := &DescribeGroupsResponse{}
|
||||
|
||||
for _, apiGroup := range apiResp.Groups {
|
||||
group := DescribeGroupsResponseGroup{
|
||||
Error: makeError(apiGroup.ErrorCode, ""),
|
||||
GroupID: apiGroup.GroupID,
|
||||
GroupState: apiGroup.GroupState,
|
||||
}
|
||||
|
||||
for _, member := range apiGroup.Members {
|
||||
decodedMetadata, err := decodeMemberMetadata(member.MemberMetadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
decodedAssignments, err := decodeMemberAssignments(member.MemberAssignment)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
group.Members = append(group.Members, DescribeGroupsResponseMember{
|
||||
MemberID: member.MemberID,
|
||||
ClientID: member.ClientID,
|
||||
ClientHost: member.ClientHost,
|
||||
MemberAssignments: decodedAssignments,
|
||||
MemberMetadata: decodedMetadata,
|
||||
})
|
||||
}
|
||||
resp.Groups = append(resp.Groups, group)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// decodeMemberMetadata converts raw metadata bytes to a
|
||||
// DescribeGroupsResponseMemberMetadata struct.
|
||||
//
|
||||
// See https://github.com/apache/kafka/blob/2.4/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerProtocol.java#L49
|
||||
// for protocol details.
|
||||
func decodeMemberMetadata(rawMetadata []byte) (DescribeGroupsResponseMemberMetadata, error) {
|
||||
mm := DescribeGroupsResponseMemberMetadata{}
|
||||
|
||||
if len(rawMetadata) == 0 {
|
||||
return mm, nil
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(rawMetadata)
|
||||
bufReader := bufio.NewReader(buf)
|
||||
remain := len(rawMetadata)
|
||||
|
||||
var err error
|
||||
var version16 int16
|
||||
|
||||
if remain, err = readInt16(bufReader, remain, &version16); err != nil {
|
||||
return mm, err
|
||||
}
|
||||
mm.Version = int(version16)
|
||||
|
||||
if remain, err = readStringArray(bufReader, remain, &mm.Topics); err != nil {
|
||||
return mm, err
|
||||
}
|
||||
if remain, err = readBytes(bufReader, remain, &mm.UserData); err != nil {
|
||||
return mm, err
|
||||
}
|
||||
|
||||
if mm.Version == 1 && remain > 0 {
|
||||
fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) {
|
||||
op := DescribeGroupsResponseMemberMetadataOwnedPartition{}
|
||||
if fnRemain, fnErr = readString(r, size, &op.Topic); fnErr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ps := []int32{}
|
||||
if fnRemain, fnErr = readInt32Array(r, fnRemain, &ps); fnErr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, p := range ps {
|
||||
op.Partitions = append(op.Partitions, int(p))
|
||||
}
|
||||
|
||||
mm.OwnedPartitions = append(mm.OwnedPartitions, op)
|
||||
return
|
||||
}
|
||||
|
||||
if remain, err = readArrayWith(bufReader, remain, fn); err != nil {
|
||||
return mm, err
|
||||
}
|
||||
}
|
||||
|
||||
if remain != 0 {
|
||||
return mm, fmt.Errorf("Got non-zero number of bytes remaining: %d", remain)
|
||||
}
|
||||
|
||||
return mm, nil
|
||||
}
|
||||
|
||||
// decodeMemberAssignments converts raw assignment bytes to a DescribeGroupsResponseAssignments
|
||||
// struct.
|
||||
//
|
||||
// See https://github.com/apache/kafka/blob/2.4/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerProtocol.java#L49
|
||||
// for protocol details.
|
||||
func decodeMemberAssignments(rawAssignments []byte) (DescribeGroupsResponseAssignments, error) {
|
||||
ma := DescribeGroupsResponseAssignments{}
|
||||
|
||||
if len(rawAssignments) == 0 {
|
||||
return ma, nil
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(rawAssignments)
|
||||
bufReader := bufio.NewReader(buf)
|
||||
remain := len(rawAssignments)
|
||||
|
||||
var err error
|
||||
var version16 int16
|
||||
|
||||
if remain, err = readInt16(bufReader, remain, &version16); err != nil {
|
||||
return ma, err
|
||||
}
|
||||
ma.Version = int(version16)
|
||||
|
||||
fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) {
|
||||
item := GroupMemberTopic{}
|
||||
|
||||
if fnRemain, fnErr = readString(r, size, &item.Topic); fnErr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
partitions := []int32{}
|
||||
|
||||
if fnRemain, fnErr = readInt32Array(r, fnRemain, &partitions); fnErr != nil {
|
||||
return
|
||||
}
|
||||
for _, partition := range partitions {
|
||||
item.Partitions = append(item.Partitions, int(partition))
|
||||
}
|
||||
|
||||
ma.Topics = append(ma.Topics, item)
|
||||
return
|
||||
}
|
||||
if remain, err = readArrayWith(bufReader, remain, fn); err != nil {
|
||||
return ma, err
|
||||
}
|
||||
|
||||
if remain, err = readBytes(bufReader, remain, &ma.UserData); err != nil {
|
||||
return ma, err
|
||||
}
|
||||
|
||||
if remain != 0 {
|
||||
return ma, fmt.Errorf("Got non-zero number of bytes remaining: %d", remain)
|
||||
}
|
||||
|
||||
return ma, nil
|
||||
}
|
||||
|
||||
// readInt32Array reads an array of int32s. It's adapted from the implementation of
|
||||
// readStringArray.
|
||||
func readInt32Array(r *bufio.Reader, sz int, v *[]int32) (remain int, err error) {
|
||||
var content []int32
|
||||
fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) {
|
||||
var value int32
|
||||
if fnRemain, fnErr = readInt32(r, size, &value); fnErr != nil {
|
||||
return
|
||||
}
|
||||
content = append(content, value)
|
||||
return
|
||||
}
|
||||
if remain, err = readArrayWith(r, sz, fn); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
*v = content
|
||||
return
|
||||
}
|
||||
493
vendor/github.com/segmentio/kafka-go/dialer.go
generated
vendored
Normal file
493
vendor/github.com/segmentio/kafka-go/dialer.go
generated
vendored
Normal file
@@ -0,0 +1,493 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/sasl"
|
||||
)
|
||||
|
||||
// The Dialer type mirrors the net.Dialer API but is designed to open kafka
|
||||
// connections instead of raw network connections.
|
||||
type Dialer struct {
|
||||
// Unique identifier for client connections established by this Dialer.
|
||||
ClientID string
|
||||
|
||||
// Optionally specifies the function that the dialer uses to establish
|
||||
// network connections. If nil, net.(*Dialer).DialContext is used instead.
|
||||
//
|
||||
// When DialFunc is set, LocalAddr, DualStack, FallbackDelay, and KeepAlive
|
||||
// are ignored.
|
||||
DialFunc func(ctx context.Context, network string, address string) (net.Conn, error)
|
||||
|
||||
// Timeout is the maximum amount of time a dial will wait for a connect to
|
||||
// complete. If Deadline is also set, it may fail earlier.
|
||||
//
|
||||
// The default is no timeout.
|
||||
//
|
||||
// When dialing a name with multiple IP addresses, the timeout may be
|
||||
// divided between them.
|
||||
//
|
||||
// With or without a timeout, the operating system may impose its own
|
||||
// earlier timeout. For instance, TCP timeouts are often around 3 minutes.
|
||||
Timeout time.Duration
|
||||
|
||||
// Deadline is the absolute point in time after which dials will fail.
|
||||
// If Timeout is set, it may fail earlier.
|
||||
// Zero means no deadline, or dependent on the operating system as with the
|
||||
// Timeout option.
|
||||
Deadline time.Time
|
||||
|
||||
// LocalAddr is the local address to use when dialing an address.
|
||||
// The address must be of a compatible type for the network being dialed.
|
||||
// If nil, a local address is automatically chosen.
|
||||
LocalAddr net.Addr
|
||||
|
||||
// DualStack enables RFC 6555-compliant "Happy Eyeballs" dialing when the
|
||||
// network is "tcp" and the destination is a host name with both IPv4 and
|
||||
// IPv6 addresses. This allows a client to tolerate networks where one
|
||||
// address family is silently broken.
|
||||
DualStack bool
|
||||
|
||||
// FallbackDelay specifies the length of time to wait before spawning a
|
||||
// fallback connection, when DualStack is enabled.
|
||||
// If zero, a default delay of 300ms is used.
|
||||
FallbackDelay time.Duration
|
||||
|
||||
// KeepAlive specifies the keep-alive period for an active network
|
||||
// connection.
|
||||
// If zero, keep-alives are not enabled. Network protocols that do not
|
||||
// support keep-alives ignore this field.
|
||||
KeepAlive time.Duration
|
||||
|
||||
// Resolver optionally gives a hook to convert the broker address into an
|
||||
// alternate host or IP address which is useful for custom service discovery.
|
||||
// If a custom resolver returns any possible hosts, the first one will be
|
||||
// used and the original discarded. If a port number is included with the
|
||||
// resolved host, it will only be used if a port number was not previously
|
||||
// specified. If no port is specified or resolved, the default of 9092 will be
|
||||
// used.
|
||||
Resolver Resolver
|
||||
|
||||
// TLS enables Dialer to open secure connections. If nil, standard net.Conn
|
||||
// will be used.
|
||||
TLS *tls.Config
|
||||
|
||||
// SASLMechanism configures the Dialer to use SASL authentication. If nil,
|
||||
// no authentication will be performed.
|
||||
SASLMechanism sasl.Mechanism
|
||||
|
||||
// The transactional id to use for transactional delivery. Idempotent
|
||||
// deliver should be enabled if transactional id is configured.
|
||||
// For more details look at transactional.id description here: http://kafka.apache.org/documentation.html#producerconfigs
|
||||
// Empty string means that the connection will be non-transactional.
|
||||
TransactionalID string
|
||||
}
|
||||
|
||||
// Dial connects to the address on the named network.
|
||||
func (d *Dialer) Dial(network string, address string) (*Conn, error) {
|
||||
return d.DialContext(context.Background(), network, address)
|
||||
}
|
||||
|
||||
// DialContext connects to the address on the named network using the provided
|
||||
// context.
|
||||
//
|
||||
// The provided Context must be non-nil. If the context expires before the
|
||||
// connection is complete, an error is returned. Once successfully connected,
|
||||
// any expiration of the context will not affect the connection.
|
||||
//
|
||||
// When using TCP, and the host in the address parameter resolves to multiple
|
||||
// network addresses, any dial timeout (from d.Timeout or ctx) is spread over
|
||||
// each consecutive dial, such that each is given an appropriate fraction of the
|
||||
// time to connect. For example, if a host has 4 IP addresses and the timeout is
|
||||
// 1 minute, the connect to each single address will be given 15 seconds to
|
||||
// complete before trying the next one.
|
||||
func (d *Dialer) DialContext(ctx context.Context, network string, address string) (*Conn, error) {
|
||||
return d.connect(
|
||||
ctx,
|
||||
network,
|
||||
address,
|
||||
ConnConfig{
|
||||
ClientID: d.ClientID,
|
||||
TransactionalID: d.TransactionalID,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// DialLeader opens a connection to the leader of the partition for a given
|
||||
// topic.
|
||||
//
|
||||
// The address given to the DialContext method may not be the one that the
|
||||
// connection will end up being established to, because the dialer will lookup
|
||||
// the partition leader for the topic and return a connection to that server.
|
||||
// The original address is only used as a mechanism to discover the
|
||||
// configuration of the kafka cluster that we're connecting to.
|
||||
func (d *Dialer) DialLeader(ctx context.Context, network string, address string, topic string, partition int) (*Conn, error) {
|
||||
p, err := d.LookupPartition(ctx, network, address, topic, partition)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.DialPartition(ctx, network, address, p)
|
||||
}
|
||||
|
||||
// DialPartition opens a connection to the leader of the partition specified by partition
|
||||
// descriptor. It's strongly advised to use descriptor of the partition that comes out of
|
||||
// functions LookupPartition or LookupPartitions.
|
||||
func (d *Dialer) DialPartition(ctx context.Context, network string, address string, partition Partition) (*Conn, error) {
|
||||
return d.connect(ctx, network, net.JoinHostPort(partition.Leader.Host, strconv.Itoa(partition.Leader.Port)), ConnConfig{
|
||||
ClientID: d.ClientID,
|
||||
Topic: partition.Topic,
|
||||
Partition: partition.ID,
|
||||
Broker: partition.Leader.ID,
|
||||
Rack: partition.Leader.Rack,
|
||||
TransactionalID: d.TransactionalID,
|
||||
})
|
||||
}
|
||||
|
||||
// LookupLeader searches for the kafka broker that is the leader of the
|
||||
// partition for a given topic, returning a Broker value representing it.
|
||||
func (d *Dialer) LookupLeader(ctx context.Context, network string, address string, topic string, partition int) (Broker, error) {
|
||||
p, err := d.LookupPartition(ctx, network, address, topic, partition)
|
||||
return p.Leader, err
|
||||
}
|
||||
|
||||
// LookupPartition searches for the description of specified partition id.
|
||||
func (d *Dialer) LookupPartition(ctx context.Context, network string, address string, topic string, partition int) (Partition, error) {
|
||||
c, err := d.DialContext(ctx, network, address)
|
||||
if err != nil {
|
||||
return Partition{}, err
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
brkch := make(chan Partition, 1)
|
||||
errch := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
for attempt := 0; true; attempt++ {
|
||||
if attempt != 0 {
|
||||
if !sleep(ctx, backoff(attempt, 100*time.Millisecond, 10*time.Second)) {
|
||||
errch <- ctx.Err()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
partitions, err := c.ReadPartitions(topic)
|
||||
if err != nil {
|
||||
if isTemporary(err) {
|
||||
continue
|
||||
}
|
||||
errch <- err
|
||||
return
|
||||
}
|
||||
|
||||
for _, p := range partitions {
|
||||
if p.ID == partition {
|
||||
brkch <- p
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
errch <- UnknownTopicOrPartition
|
||||
}()
|
||||
|
||||
var prt Partition
|
||||
select {
|
||||
case prt = <-brkch:
|
||||
case err = <-errch:
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
}
|
||||
return prt, err
|
||||
}
|
||||
|
||||
// LookupPartitions returns the list of partitions that exist for the given topic.
|
||||
func (d *Dialer) LookupPartitions(ctx context.Context, network string, address string, topic string) ([]Partition, error) {
|
||||
conn, err := d.DialContext(ctx, network, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
prtch := make(chan []Partition, 1)
|
||||
errch := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
if prt, err := conn.ReadPartitions(topic); err != nil {
|
||||
errch <- err
|
||||
} else {
|
||||
prtch <- prt
|
||||
}
|
||||
}()
|
||||
|
||||
var prt []Partition
|
||||
select {
|
||||
case prt = <-prtch:
|
||||
case err = <-errch:
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
}
|
||||
return prt, err
|
||||
}
|
||||
|
||||
// connectTLS returns a tls.Conn that has already completed the Handshake.
|
||||
func (d *Dialer) connectTLS(ctx context.Context, conn net.Conn, config *tls.Config) (tlsConn *tls.Conn, err error) {
|
||||
tlsConn = tls.Client(conn, config)
|
||||
errch := make(chan error)
|
||||
|
||||
go func() {
|
||||
defer close(errch)
|
||||
errch <- tlsConn.Handshake()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
conn.Close()
|
||||
tlsConn.Close()
|
||||
<-errch // ignore possible error from Handshake
|
||||
err = ctx.Err()
|
||||
|
||||
case err = <-errch:
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// connect opens a socket connection to the broker, wraps it to create a
|
||||
// kafka connection, and performs SASL authentication if configured to do so.
|
||||
func (d *Dialer) connect(ctx context.Context, network, address string, connCfg ConnConfig) (*Conn, error) {
|
||||
if d.Timeout != 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, d.Timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
if !d.Deadline.IsZero() {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithDeadline(ctx, d.Deadline)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
c, err := d.dialContext(ctx, network, address)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to dial: %w", err)
|
||||
}
|
||||
|
||||
conn := NewConnWith(c, connCfg)
|
||||
|
||||
if d.SASLMechanism != nil {
|
||||
host, port, err := splitHostPortNumber(address)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not determine host/port for SASL authentication: %w", err)
|
||||
}
|
||||
metadata := &sasl.Metadata{
|
||||
Host: host,
|
||||
Port: port,
|
||||
}
|
||||
if err := d.authenticateSASL(sasl.WithMetadata(ctx, metadata), conn); err != nil {
|
||||
_ = conn.Close()
|
||||
return nil, fmt.Errorf("could not successfully authenticate to %s:%d with SASL: %w", host, port, err)
|
||||
}
|
||||
}
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// authenticateSASL performs all of the required requests to authenticate this
|
||||
// connection. If any step fails, this function returns with an error. A nil
|
||||
// error indicates successful authentication.
|
||||
//
|
||||
// In case of error, this function *does not* close the connection. That is the
|
||||
// responsibility of the caller.
|
||||
func (d *Dialer) authenticateSASL(ctx context.Context, conn *Conn) error {
|
||||
if err := conn.saslHandshake(d.SASLMechanism.Name()); err != nil {
|
||||
return fmt.Errorf("SASL handshake failed: %w", err)
|
||||
}
|
||||
|
||||
sess, state, err := d.SASLMechanism.Start(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("SASL authentication process could not be started: %w", err)
|
||||
}
|
||||
|
||||
for completed := false; !completed; {
|
||||
challenge, err := conn.saslAuthenticate(state)
|
||||
switch {
|
||||
case err == nil:
|
||||
case errors.Is(err, io.EOF):
|
||||
// the broker may communicate a failed exchange by closing the
|
||||
// connection (esp. in the case where we're passing opaque sasl
|
||||
// data over the wire since there's no protocol info).
|
||||
return SASLAuthenticationFailed
|
||||
default:
|
||||
return err
|
||||
}
|
||||
|
||||
completed, state, err = sess.Next(ctx, challenge)
|
||||
if err != nil {
|
||||
return fmt.Errorf("SASL authentication process has failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Dialer) dialContext(ctx context.Context, network string, addr string) (net.Conn, error) {
|
||||
address, err := lookupHost(ctx, addr, d.Resolver)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve host: %w", err)
|
||||
}
|
||||
|
||||
dial := d.DialFunc
|
||||
if dial == nil {
|
||||
dial = (&net.Dialer{
|
||||
LocalAddr: d.LocalAddr,
|
||||
DualStack: d.DualStack,
|
||||
FallbackDelay: d.FallbackDelay,
|
||||
KeepAlive: d.KeepAlive,
|
||||
}).DialContext
|
||||
}
|
||||
|
||||
conn, err := dial(ctx, network, address)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open connection to %s: %w", address, err)
|
||||
}
|
||||
|
||||
if d.TLS != nil {
|
||||
c := d.TLS
|
||||
// If no ServerName is set, infer the ServerName
|
||||
// from the hostname we're connecting to.
|
||||
if c.ServerName == "" {
|
||||
c = d.TLS.Clone()
|
||||
// Copied from tls.go in the standard library.
|
||||
colonPos := strings.LastIndex(address, ":")
|
||||
if colonPos == -1 {
|
||||
colonPos = len(address)
|
||||
}
|
||||
hostname := address[:colonPos]
|
||||
c.ServerName = hostname
|
||||
}
|
||||
return d.connectTLS(ctx, conn, c)
|
||||
}
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// DefaultDialer is the default dialer used when none is specified.
|
||||
var DefaultDialer = &Dialer{
|
||||
Timeout: 10 * time.Second,
|
||||
DualStack: true,
|
||||
}
|
||||
|
||||
// Dial is a convenience wrapper for DefaultDialer.Dial.
|
||||
func Dial(network string, address string) (*Conn, error) {
|
||||
return DefaultDialer.Dial(network, address)
|
||||
}
|
||||
|
||||
// DialContext is a convenience wrapper for DefaultDialer.DialContext.
|
||||
func DialContext(ctx context.Context, network string, address string) (*Conn, error) {
|
||||
return DefaultDialer.DialContext(ctx, network, address)
|
||||
}
|
||||
|
||||
// DialLeader is a convenience wrapper for DefaultDialer.DialLeader.
|
||||
func DialLeader(ctx context.Context, network string, address string, topic string, partition int) (*Conn, error) {
|
||||
return DefaultDialer.DialLeader(ctx, network, address, topic, partition)
|
||||
}
|
||||
|
||||
// DialPartition is a convenience wrapper for DefaultDialer.DialPartition.
|
||||
func DialPartition(ctx context.Context, network string, address string, partition Partition) (*Conn, error) {
|
||||
return DefaultDialer.DialPartition(ctx, network, address, partition)
|
||||
}
|
||||
|
||||
// LookupPartition is a convenience wrapper for DefaultDialer.LookupPartition.
|
||||
func LookupPartition(ctx context.Context, network string, address string, topic string, partition int) (Partition, error) {
|
||||
return DefaultDialer.LookupPartition(ctx, network, address, topic, partition)
|
||||
}
|
||||
|
||||
// LookupPartitions is a convenience wrapper for DefaultDialer.LookupPartitions.
|
||||
func LookupPartitions(ctx context.Context, network string, address string, topic string) ([]Partition, error) {
|
||||
return DefaultDialer.LookupPartitions(ctx, network, address, topic)
|
||||
}
|
||||
|
||||
func sleep(ctx context.Context, duration time.Duration) bool {
|
||||
if duration == 0 {
|
||||
select {
|
||||
default:
|
||||
return true
|
||||
case <-ctx.Done():
|
||||
return false
|
||||
}
|
||||
}
|
||||
timer := time.NewTimer(duration)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-timer.C:
|
||||
return true
|
||||
case <-ctx.Done():
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func backoff(attempt int, min time.Duration, max time.Duration) time.Duration {
|
||||
d := time.Duration(attempt*attempt) * min
|
||||
if d > max {
|
||||
d = max
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func canonicalAddress(s string) string {
|
||||
return net.JoinHostPort(splitHostPort(s))
|
||||
}
|
||||
|
||||
func splitHostPort(s string) (host string, port string) {
|
||||
host, port, _ = net.SplitHostPort(s)
|
||||
if len(host) == 0 && len(port) == 0 {
|
||||
host = s
|
||||
port = "9092"
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func splitHostPortNumber(s string) (host string, portNumber int, err error) {
|
||||
host, port := splitHostPort(s)
|
||||
portNumber, err = strconv.Atoi(port)
|
||||
if err != nil {
|
||||
return host, 0, fmt.Errorf("%s: %w", s, err)
|
||||
}
|
||||
return host, portNumber, nil
|
||||
}
|
||||
|
||||
func lookupHost(ctx context.Context, address string, resolver Resolver) (string, error) {
|
||||
host, port := splitHostPort(address)
|
||||
|
||||
if resolver != nil {
|
||||
resolved, err := resolver.LookupHost(ctx, host)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to resolve host %s: %w", host, err)
|
||||
}
|
||||
|
||||
// if the resolver doesn't return anything, we'll fall back on the provided
|
||||
// address instead
|
||||
if len(resolved) > 0 {
|
||||
resolvedHost, resolvedPort := splitHostPort(resolved[0])
|
||||
|
||||
// we'll always prefer the resolved host
|
||||
host = resolvedHost
|
||||
|
||||
// in the case of port though, the provided address takes priority, and we
|
||||
// only use the resolved address to set the port when not specified
|
||||
if port == "" {
|
||||
port = resolvedPort
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return net.JoinHostPort(host, port), nil
|
||||
}
|
||||
38
vendor/github.com/segmentio/kafka-go/discard.go
generated
vendored
Normal file
38
vendor/github.com/segmentio/kafka-go/discard.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
package kafka
|
||||
|
||||
import "bufio"
|
||||
|
||||
func discardN(r *bufio.Reader, sz int, n int) (int, error) {
|
||||
var err error
|
||||
if n <= sz {
|
||||
n, err = r.Discard(n)
|
||||
} else {
|
||||
n, err = r.Discard(sz)
|
||||
if err == nil {
|
||||
err = errShortRead
|
||||
}
|
||||
}
|
||||
return sz - n, err
|
||||
}
|
||||
|
||||
func discardInt32(r *bufio.Reader, sz int) (int, error) {
|
||||
return discardN(r, sz, 4)
|
||||
}
|
||||
|
||||
func discardString(r *bufio.Reader, sz int) (int, error) {
|
||||
return readStringWith(r, sz, func(r *bufio.Reader, sz int, n int) (int, error) {
|
||||
if n < 0 {
|
||||
return sz, nil
|
||||
}
|
||||
return discardN(r, sz, n)
|
||||
})
|
||||
}
|
||||
|
||||
func discardBytes(r *bufio.Reader, sz int) (int, error) {
|
||||
return readBytesWith(r, sz, func(r *bufio.Reader, sz int, n int) (int, error) {
|
||||
if n < 0 {
|
||||
return sz, nil
|
||||
}
|
||||
return discardN(r, sz, n)
|
||||
})
|
||||
}
|
||||
32
vendor/github.com/segmentio/kafka-go/docker-compose-241.yml
generated
vendored
Normal file
32
vendor/github.com/segmentio/kafka-go/docker-compose-241.yml
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
version: "3"
|
||||
services:
|
||||
kafka:
|
||||
image: wurstmeister/kafka:2.12-2.4.1
|
||||
restart: on-failure:3
|
||||
links:
|
||||
- zookeeper
|
||||
ports:
|
||||
- 9092:9092
|
||||
- 9093:9093
|
||||
environment:
|
||||
KAFKA_VERSION: '2.4.1'
|
||||
KAFKA_BROKER_ID: '1'
|
||||
KAFKA_CREATE_TOPICS: 'test-writer-0:3:1,test-writer-1:3:1'
|
||||
KAFKA_DELETE_TOPIC_ENABLE: 'true'
|
||||
KAFKA_ADVERTISED_HOST_NAME: 'localhost'
|
||||
KAFKA_ADVERTISED_PORT: '9092'
|
||||
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
|
||||
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_MESSAGE_MAX_BYTES: '200000000'
|
||||
KAFKA_LISTENERS: 'PLAINTEXT://:9092,SASL_PLAINTEXT://:9093'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092,SASL_PLAINTEXT://localhost:9093'
|
||||
KAFKA_SASL_ENABLED_MECHANISMS: 'PLAIN,SCRAM-SHA-256,SCRAM-SHA-512'
|
||||
KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/kafka/config/kafka_server_jaas.conf"
|
||||
CUSTOM_INIT_SCRIPT: |-
|
||||
echo -e 'KafkaServer {\norg.apache.kafka.common.security.scram.ScramLoginModule required\n username="adminscram"\n password="admin-secret";\n org.apache.kafka.common.security.plain.PlainLoginModule required\n username="adminplain"\n password="admin-secret"\n user_adminplain="admin-secret";\n };' > /opt/kafka/config/kafka_server_jaas.conf;
|
||||
/opt/kafka/bin/kafka-configs.sh --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=admin-secret-256],SCRAM-SHA-512=[password=admin-secret-512]' --entity-type users --entity-name adminscram
|
||||
|
||||
zookeeper:
|
||||
image: wurstmeister/zookeeper
|
||||
ports:
|
||||
- 2181:2181
|
||||
29
vendor/github.com/segmentio/kafka-go/docker-compose.010.yml
generated
vendored
Normal file
29
vendor/github.com/segmentio/kafka-go/docker-compose.010.yml
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
version: "3"
|
||||
services:
|
||||
kafka:
|
||||
image: wurstmeister/kafka:0.10.1.1
|
||||
links:
|
||||
- zookeeper
|
||||
ports:
|
||||
- 9092:9092
|
||||
- 9093:9093
|
||||
environment:
|
||||
KAFKA_BROKER_ID: '1'
|
||||
KAFKA_CREATE_TOPICS: 'test-writer-0:3:1,test-writer-1:3:1'
|
||||
KAFKA_DELETE_TOPIC_ENABLE: 'true'
|
||||
KAFKA_ADVERTISED_HOST_NAME: 'localhost'
|
||||
KAFKA_ADVERTISED_PORT: '9092'
|
||||
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
|
||||
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_MESSAGE_MAX_BYTES: '200000000'
|
||||
KAFKA_LISTENERS: 'PLAINTEXT://:9092,SASL_PLAINTEXT://:9093'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092,SASL_PLAINTEXT://localhost:9093'
|
||||
KAFKA_SASL_ENABLED_MECHANISMS: 'PLAIN'
|
||||
KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/kafka/config/kafka_server_jaas.conf"
|
||||
CUSTOM_INIT_SCRIPT: |-
|
||||
echo -e 'KafkaServer {\norg.apache.kafka.common.security.plain.PlainLoginModule required\n username="adminplain"\n password="admin-secret"\n user_adminplain="admin-secret";\n };' > /opt/kafka/config/kafka_server_jaas.conf;
|
||||
|
||||
zookeeper:
|
||||
image: wurstmeister/zookeeper
|
||||
ports:
|
||||
- 2181:2181
|
||||
34
vendor/github.com/segmentio/kafka-go/docker-compose.yml
generated
vendored
Normal file
34
vendor/github.com/segmentio/kafka-go/docker-compose.yml
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
version: "3"
|
||||
services:
|
||||
kafka:
|
||||
image: wurstmeister/kafka:2.12-2.3.1
|
||||
restart: on-failure:3
|
||||
links:
|
||||
- zookeeper
|
||||
ports:
|
||||
- 9092:9092
|
||||
- 9093:9093
|
||||
environment:
|
||||
KAFKA_VERSION: '2.3.1'
|
||||
KAFKA_BROKER_ID: '1'
|
||||
KAFKA_CREATE_TOPICS: 'test-writer-0:3:1,test-writer-1:3:1'
|
||||
KAFKA_DELETE_TOPIC_ENABLE: 'true'
|
||||
KAFKA_ADVERTISED_HOST_NAME: 'localhost'
|
||||
KAFKA_ADVERTISED_PORT: '9092'
|
||||
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
|
||||
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_MESSAGE_MAX_BYTES: '200000000'
|
||||
KAFKA_LISTENERS: 'PLAINTEXT://:9092,SASL_PLAINTEXT://:9093'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092,SASL_PLAINTEXT://localhost:9093'
|
||||
KAFKA_SASL_ENABLED_MECHANISMS: 'PLAIN,SCRAM-SHA-256,SCRAM-SHA-512'
|
||||
KAFKA_AUTHORIZER_CLASS_NAME: 'kafka.security.auth.SimpleAclAuthorizer'
|
||||
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true'
|
||||
KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/kafka/config/kafka_server_jaas.conf"
|
||||
CUSTOM_INIT_SCRIPT: |-
|
||||
echo -e 'KafkaServer {\norg.apache.kafka.common.security.scram.ScramLoginModule required\n username="adminscram"\n password="admin-secret";\n org.apache.kafka.common.security.plain.PlainLoginModule required\n username="adminplain"\n password="admin-secret"\n user_adminplain="admin-secret";\n };' > /opt/kafka/config/kafka_server_jaas.conf;
|
||||
/opt/kafka/bin/kafka-configs.sh --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=admin-secret-256],SCRAM-SHA-512=[password=admin-secret-512]' --entity-type users --entity-name adminscram
|
||||
|
||||
zookeeper:
|
||||
image: wurstmeister/zookeeper
|
||||
ports:
|
||||
- 2181:2181
|
||||
89
vendor/github.com/segmentio/kafka-go/electleaders.go
generated
vendored
Normal file
89
vendor/github.com/segmentio/kafka-go/electleaders.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/electleaders"
|
||||
)
|
||||
|
||||
// ElectLeadersRequest is a request to the ElectLeaders API.
|
||||
type ElectLeadersRequest struct {
|
||||
// Addr is the address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// Topic is the name of the topic to do the leader elections in.
|
||||
Topic string
|
||||
|
||||
// Partitions is the list of partitions to run leader elections for.
|
||||
Partitions []int
|
||||
|
||||
// Timeout is the amount of time to wait for the election to run.
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// ElectLeadersResponse is a response from the ElectLeaders API.
|
||||
type ElectLeadersResponse struct {
|
||||
// ErrorCode is set to a non-nil value if a top-level error occurred.
|
||||
Error error
|
||||
|
||||
// PartitionResults contains the results for each partition leader election.
|
||||
PartitionResults []ElectLeadersResponsePartitionResult
|
||||
}
|
||||
|
||||
// ElectLeadersResponsePartitionResult contains the response details for a single partition.
|
||||
type ElectLeadersResponsePartitionResult struct {
|
||||
// Partition is the ID of the partition.
|
||||
Partition int
|
||||
|
||||
// Error is set to a non-nil value if an error occurred electing leaders
|
||||
// for this partition.
|
||||
Error error
|
||||
}
|
||||
|
||||
func (c *Client) ElectLeaders(
|
||||
ctx context.Context,
|
||||
req *ElectLeadersRequest,
|
||||
) (*ElectLeadersResponse, error) {
|
||||
partitions32 := []int32{}
|
||||
for _, partition := range req.Partitions {
|
||||
partitions32 = append(partitions32, int32(partition))
|
||||
}
|
||||
|
||||
protoResp, err := c.roundTrip(
|
||||
ctx,
|
||||
req.Addr,
|
||||
&electleaders.Request{
|
||||
TopicPartitions: []electleaders.RequestTopicPartitions{
|
||||
{
|
||||
Topic: req.Topic,
|
||||
PartitionIDs: partitions32,
|
||||
},
|
||||
},
|
||||
TimeoutMs: int32(req.Timeout.Milliseconds()),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
apiResp := protoResp.(*electleaders.Response)
|
||||
|
||||
resp := &ElectLeadersResponse{
|
||||
Error: makeError(apiResp.ErrorCode, ""),
|
||||
}
|
||||
|
||||
for _, topicResult := range apiResp.ReplicaElectionResults {
|
||||
for _, partitionResult := range topicResult.PartitionResults {
|
||||
resp.PartitionResults = append(
|
||||
resp.PartitionResults,
|
||||
ElectLeadersResponsePartitionResult{
|
||||
Partition: int(partitionResult.PartitionID),
|
||||
Error: makeError(partitionResult.ErrorCode, partitionResult.ErrorMessage),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
61
vendor/github.com/segmentio/kafka-go/endtxn.go
generated
vendored
Normal file
61
vendor/github.com/segmentio/kafka-go/endtxn.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/endtxn"
|
||||
)
|
||||
|
||||
// EndTxnRequest represets a request sent to a kafka broker to end a transaction.
|
||||
type EndTxnRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// The transactional id key.
|
||||
TransactionalID string
|
||||
|
||||
// The Producer ID (PID) for the current producer session
|
||||
ProducerID int
|
||||
|
||||
// The epoch associated with the current producer session for the given PID
|
||||
ProducerEpoch int
|
||||
|
||||
// Committed should be set to true if the transaction was committed, false otherwise.
|
||||
Committed bool
|
||||
}
|
||||
|
||||
// EndTxnResponse represents a resposne from a kafka broker to an end transaction request.
|
||||
type EndTxnResponse struct {
|
||||
// The amount of time that the broker throttled the request.
|
||||
Throttle time.Duration
|
||||
|
||||
// Error is non-nil if an error occureda and contains the kafka error code.
|
||||
// Programs may use the standard errors.Is function to test the error
|
||||
// against kafka error codes.
|
||||
Error error
|
||||
}
|
||||
|
||||
// EndTxn sends an EndTxn request to a kafka broker and returns its response.
|
||||
func (c *Client) EndTxn(ctx context.Context, req *EndTxnRequest) (*EndTxnResponse, error) {
|
||||
m, err := c.roundTrip(ctx, req.Addr, &endtxn.Request{
|
||||
TransactionalID: req.TransactionalID,
|
||||
ProducerID: int64(req.ProducerID),
|
||||
ProducerEpoch: int16(req.ProducerEpoch),
|
||||
Committed: req.Committed,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).EndTxn: %w", err)
|
||||
}
|
||||
|
||||
r := m.(*endtxn.Response)
|
||||
|
||||
res := &EndTxnResponse{
|
||||
Throttle: makeDuration(r.ThrottleTimeMs),
|
||||
Error: makeError(r.ErrorCode, ""),
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
712
vendor/github.com/segmentio/kafka-go/error.go
generated
vendored
Normal file
712
vendor/github.com/segmentio/kafka-go/error.go
generated
vendored
Normal file
@@ -0,0 +1,712 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// Error represents the different error codes that may be returned by kafka.
|
||||
// https://kafka.apache.org/protocol#protocol_error_codes
|
||||
type Error int
|
||||
|
||||
const (
|
||||
Unknown Error = -1
|
||||
OffsetOutOfRange Error = 1
|
||||
InvalidMessage Error = 2
|
||||
UnknownTopicOrPartition Error = 3
|
||||
InvalidMessageSize Error = 4
|
||||
LeaderNotAvailable Error = 5
|
||||
NotLeaderForPartition Error = 6
|
||||
RequestTimedOut Error = 7
|
||||
BrokerNotAvailable Error = 8
|
||||
ReplicaNotAvailable Error = 9
|
||||
MessageSizeTooLarge Error = 10
|
||||
StaleControllerEpoch Error = 11
|
||||
OffsetMetadataTooLarge Error = 12
|
||||
NetworkException Error = 13
|
||||
GroupLoadInProgress Error = 14
|
||||
GroupCoordinatorNotAvailable Error = 15
|
||||
NotCoordinatorForGroup Error = 16
|
||||
InvalidTopic Error = 17
|
||||
RecordListTooLarge Error = 18
|
||||
NotEnoughReplicas Error = 19
|
||||
NotEnoughReplicasAfterAppend Error = 20
|
||||
InvalidRequiredAcks Error = 21
|
||||
IllegalGeneration Error = 22
|
||||
InconsistentGroupProtocol Error = 23
|
||||
InvalidGroupId Error = 24
|
||||
UnknownMemberId Error = 25
|
||||
InvalidSessionTimeout Error = 26
|
||||
RebalanceInProgress Error = 27
|
||||
InvalidCommitOffsetSize Error = 28
|
||||
TopicAuthorizationFailed Error = 29
|
||||
GroupAuthorizationFailed Error = 30
|
||||
ClusterAuthorizationFailed Error = 31
|
||||
InvalidTimestamp Error = 32
|
||||
UnsupportedSASLMechanism Error = 33
|
||||
IllegalSASLState Error = 34
|
||||
UnsupportedVersion Error = 35
|
||||
TopicAlreadyExists Error = 36
|
||||
InvalidPartitionNumber Error = 37
|
||||
InvalidReplicationFactor Error = 38
|
||||
InvalidReplicaAssignment Error = 39
|
||||
InvalidConfiguration Error = 40
|
||||
NotController Error = 41
|
||||
InvalidRequest Error = 42
|
||||
UnsupportedForMessageFormat Error = 43
|
||||
PolicyViolation Error = 44
|
||||
OutOfOrderSequenceNumber Error = 45
|
||||
DuplicateSequenceNumber Error = 46
|
||||
InvalidProducerEpoch Error = 47
|
||||
InvalidTransactionState Error = 48
|
||||
InvalidProducerIDMapping Error = 49
|
||||
InvalidTransactionTimeout Error = 50
|
||||
ConcurrentTransactions Error = 51
|
||||
TransactionCoordinatorFenced Error = 52
|
||||
TransactionalIDAuthorizationFailed Error = 53
|
||||
SecurityDisabled Error = 54
|
||||
BrokerAuthorizationFailed Error = 55
|
||||
KafkaStorageError Error = 56
|
||||
LogDirNotFound Error = 57
|
||||
SASLAuthenticationFailed Error = 58
|
||||
UnknownProducerId Error = 59
|
||||
ReassignmentInProgress Error = 60
|
||||
DelegationTokenAuthDisabled Error = 61
|
||||
DelegationTokenNotFound Error = 62
|
||||
DelegationTokenOwnerMismatch Error = 63
|
||||
DelegationTokenRequestNotAllowed Error = 64
|
||||
DelegationTokenAuthorizationFailed Error = 65
|
||||
DelegationTokenExpired Error = 66
|
||||
InvalidPrincipalType Error = 67
|
||||
NonEmptyGroup Error = 68
|
||||
GroupIdNotFound Error = 69
|
||||
FetchSessionIDNotFound Error = 70
|
||||
InvalidFetchSessionEpoch Error = 71
|
||||
ListenerNotFound Error = 72
|
||||
TopicDeletionDisabled Error = 73
|
||||
FencedLeaderEpoch Error = 74
|
||||
UnknownLeaderEpoch Error = 75
|
||||
UnsupportedCompressionType Error = 76
|
||||
StaleBrokerEpoch Error = 77
|
||||
OffsetNotAvailable Error = 78
|
||||
MemberIDRequired Error = 79
|
||||
PreferredLeaderNotAvailable Error = 80
|
||||
GroupMaxSizeReached Error = 81
|
||||
FencedInstanceID Error = 82
|
||||
EligibleLeadersNotAvailable Error = 83
|
||||
ElectionNotNeeded Error = 84
|
||||
NoReassignmentInProgress Error = 85
|
||||
GroupSubscribedToTopic Error = 86
|
||||
InvalidRecord Error = 87
|
||||
UnstableOffsetCommit Error = 88
|
||||
ThrottlingQuotaExceeded Error = 89
|
||||
ProducerFenced Error = 90
|
||||
ResourceNotFound Error = 91
|
||||
DuplicateResource Error = 92
|
||||
UnacceptableCredential Error = 93
|
||||
InconsistentVoterSet Error = 94
|
||||
InvalidUpdateVersion Error = 95
|
||||
FeatureUpdateFailed Error = 96
|
||||
PrincipalDeserializationFailure Error = 97
|
||||
SnapshotNotFound Error = 98
|
||||
PositionOutOfRange Error = 99
|
||||
UnknownTopicID Error = 100
|
||||
DuplicateBrokerRegistration Error = 101
|
||||
BrokerIDNotRegistered Error = 102
|
||||
InconsistentTopicID Error = 103
|
||||
InconsistentClusterID Error = 104
|
||||
TransactionalIDNotFound Error = 105
|
||||
FetchSessionTopicIDError Error = 106
|
||||
)
|
||||
|
||||
// Error satisfies the error interface.
|
||||
func (e Error) Error() string {
|
||||
return fmt.Sprintf("[%d] %s: %s", e, e.Title(), e.Description())
|
||||
}
|
||||
|
||||
// Timeout returns true if the error was due to a timeout.
|
||||
func (e Error) Timeout() bool {
|
||||
return e == RequestTimedOut
|
||||
}
|
||||
|
||||
// Temporary returns true if the operation that generated the error may succeed
|
||||
// if retried at a later time.
|
||||
// Kafka error documentation specifies these as "retriable"
|
||||
// https://kafka.apache.org/protocol#protocol_error_codes
|
||||
func (e Error) Temporary() bool {
|
||||
switch e {
|
||||
case InvalidMessage,
|
||||
UnknownTopicOrPartition,
|
||||
LeaderNotAvailable,
|
||||
NotLeaderForPartition,
|
||||
RequestTimedOut,
|
||||
NetworkException,
|
||||
GroupLoadInProgress,
|
||||
GroupCoordinatorNotAvailable,
|
||||
NotCoordinatorForGroup,
|
||||
NotEnoughReplicas,
|
||||
NotEnoughReplicasAfterAppend,
|
||||
NotController,
|
||||
KafkaStorageError,
|
||||
FetchSessionIDNotFound,
|
||||
InvalidFetchSessionEpoch,
|
||||
ListenerNotFound,
|
||||
FencedLeaderEpoch,
|
||||
UnknownLeaderEpoch,
|
||||
OffsetNotAvailable,
|
||||
PreferredLeaderNotAvailable,
|
||||
EligibleLeadersNotAvailable,
|
||||
ElectionNotNeeded,
|
||||
NoReassignmentInProgress,
|
||||
GroupSubscribedToTopic,
|
||||
UnstableOffsetCommit,
|
||||
ThrottlingQuotaExceeded,
|
||||
UnknownTopicID,
|
||||
InconsistentTopicID,
|
||||
FetchSessionTopicIDError:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Title returns a human readable title for the error.
|
||||
func (e Error) Title() string {
|
||||
switch e {
|
||||
case Unknown:
|
||||
return "Unknown"
|
||||
case OffsetOutOfRange:
|
||||
return "Offset Out Of Range"
|
||||
case InvalidMessage:
|
||||
return "Invalid Message"
|
||||
case UnknownTopicOrPartition:
|
||||
return "Unknown Topic Or Partition"
|
||||
case InvalidMessageSize:
|
||||
return "Invalid Message Size"
|
||||
case LeaderNotAvailable:
|
||||
return "Leader Not Available"
|
||||
case NotLeaderForPartition:
|
||||
return "Not Leader For Partition"
|
||||
case RequestTimedOut:
|
||||
return "Request Timed Out"
|
||||
case BrokerNotAvailable:
|
||||
return "Broker Not Available"
|
||||
case ReplicaNotAvailable:
|
||||
return "Replica Not Available"
|
||||
case MessageSizeTooLarge:
|
||||
return "Message Size Too Large"
|
||||
case StaleControllerEpoch:
|
||||
return "Stale Controller Epoch"
|
||||
case OffsetMetadataTooLarge:
|
||||
return "Offset Metadata Too Large"
|
||||
case GroupLoadInProgress:
|
||||
return "Group Load In Progress"
|
||||
case GroupCoordinatorNotAvailable:
|
||||
return "Group Coordinator Not Available"
|
||||
case NotCoordinatorForGroup:
|
||||
return "Not Coordinator For Group"
|
||||
case InvalidTopic:
|
||||
return "Invalid Topic"
|
||||
case RecordListTooLarge:
|
||||
return "Record List Too Large"
|
||||
case NotEnoughReplicas:
|
||||
return "Not Enough Replicas"
|
||||
case NotEnoughReplicasAfterAppend:
|
||||
return "Not Enough Replicas After Append"
|
||||
case InvalidRequiredAcks:
|
||||
return "Invalid Required Acks"
|
||||
case IllegalGeneration:
|
||||
return "Illegal Generation"
|
||||
case InconsistentGroupProtocol:
|
||||
return "Inconsistent Group Protocol"
|
||||
case InvalidGroupId:
|
||||
return "Invalid Group ID"
|
||||
case UnknownMemberId:
|
||||
return "Unknown Member ID"
|
||||
case InvalidSessionTimeout:
|
||||
return "Invalid Session Timeout"
|
||||
case RebalanceInProgress:
|
||||
return "Rebalance In Progress"
|
||||
case InvalidCommitOffsetSize:
|
||||
return "Invalid Commit Offset Size"
|
||||
case TopicAuthorizationFailed:
|
||||
return "Topic Authorization Failed"
|
||||
case GroupAuthorizationFailed:
|
||||
return "Group Authorization Failed"
|
||||
case ClusterAuthorizationFailed:
|
||||
return "Cluster Authorization Failed"
|
||||
case InvalidTimestamp:
|
||||
return "Invalid Timestamp"
|
||||
case UnsupportedSASLMechanism:
|
||||
return "Unsupported SASL Mechanism"
|
||||
case IllegalSASLState:
|
||||
return "Illegal SASL State"
|
||||
case UnsupportedVersion:
|
||||
return "Unsupported Version"
|
||||
case TopicAlreadyExists:
|
||||
return "Topic Already Exists"
|
||||
case InvalidPartitionNumber:
|
||||
return "Invalid Partition Number"
|
||||
case InvalidReplicationFactor:
|
||||
return "Invalid Replication Factor"
|
||||
case InvalidReplicaAssignment:
|
||||
return "Invalid Replica Assignment"
|
||||
case InvalidConfiguration:
|
||||
return "Invalid Configuration"
|
||||
case NotController:
|
||||
return "Not Controller"
|
||||
case InvalidRequest:
|
||||
return "Invalid Request"
|
||||
case UnsupportedForMessageFormat:
|
||||
return "Unsupported For Message Format"
|
||||
case PolicyViolation:
|
||||
return "Policy Violation"
|
||||
case OutOfOrderSequenceNumber:
|
||||
return "Out Of Order Sequence Number"
|
||||
case DuplicateSequenceNumber:
|
||||
return "Duplicate Sequence Number"
|
||||
case InvalidProducerEpoch:
|
||||
return "Invalid Producer Epoch"
|
||||
case InvalidTransactionState:
|
||||
return "Invalid Transaction State"
|
||||
case InvalidProducerIDMapping:
|
||||
return "Invalid Producer ID Mapping"
|
||||
case InvalidTransactionTimeout:
|
||||
return "Invalid Transaction Timeout"
|
||||
case ConcurrentTransactions:
|
||||
return "Concurrent Transactions"
|
||||
case TransactionCoordinatorFenced:
|
||||
return "Transaction Coordinator Fenced"
|
||||
case TransactionalIDAuthorizationFailed:
|
||||
return "Transactional ID Authorization Failed"
|
||||
case SecurityDisabled:
|
||||
return "Security Disabled"
|
||||
case BrokerAuthorizationFailed:
|
||||
return "Broker Authorization Failed"
|
||||
case KafkaStorageError:
|
||||
return "Kafka Storage Error"
|
||||
case LogDirNotFound:
|
||||
return "Log Dir Not Found"
|
||||
case SASLAuthenticationFailed:
|
||||
return "SASL Authentication Failed"
|
||||
case UnknownProducerId:
|
||||
return "Unknown Producer ID"
|
||||
case ReassignmentInProgress:
|
||||
return "Reassignment In Progress"
|
||||
case DelegationTokenAuthDisabled:
|
||||
return "Delegation Token Auth Disabled"
|
||||
case DelegationTokenNotFound:
|
||||
return "Delegation Token Not Found"
|
||||
case DelegationTokenOwnerMismatch:
|
||||
return "Delegation Token Owner Mismatch"
|
||||
case DelegationTokenRequestNotAllowed:
|
||||
return "Delegation Token Request Not Allowed"
|
||||
case DelegationTokenAuthorizationFailed:
|
||||
return "Delegation Token Authorization Failed"
|
||||
case DelegationTokenExpired:
|
||||
return "Delegation Token Expired"
|
||||
case InvalidPrincipalType:
|
||||
return "Invalid Principal Type"
|
||||
case NonEmptyGroup:
|
||||
return "Non Empty Group"
|
||||
case GroupIdNotFound:
|
||||
return "Group ID Not Found"
|
||||
case FetchSessionIDNotFound:
|
||||
return "Fetch Session ID Not Found"
|
||||
case InvalidFetchSessionEpoch:
|
||||
return "Invalid Fetch Session Epoch"
|
||||
case ListenerNotFound:
|
||||
return "Listener Not Found"
|
||||
case TopicDeletionDisabled:
|
||||
return "Topic Deletion Disabled"
|
||||
case FencedLeaderEpoch:
|
||||
return "Fenced Leader Epoch"
|
||||
case UnknownLeaderEpoch:
|
||||
return "Unknown Leader Epoch"
|
||||
case UnsupportedCompressionType:
|
||||
return "Unsupported Compression Type"
|
||||
case MemberIDRequired:
|
||||
return "Member ID Required"
|
||||
case EligibleLeadersNotAvailable:
|
||||
return "Eligible Leader Not Available"
|
||||
case ElectionNotNeeded:
|
||||
return "Election Not Needed"
|
||||
case NoReassignmentInProgress:
|
||||
return "No Reassignment In Progress"
|
||||
case GroupSubscribedToTopic:
|
||||
return "Group Subscribed To Topic"
|
||||
case InvalidRecord:
|
||||
return "Invalid Record"
|
||||
case UnstableOffsetCommit:
|
||||
return "Unstable Offset Commit"
|
||||
case ThrottlingQuotaExceeded:
|
||||
return "Throttling Quota Exceeded"
|
||||
case ProducerFenced:
|
||||
return "Producer Fenced"
|
||||
case ResourceNotFound:
|
||||
return "Resource Not Found"
|
||||
case DuplicateResource:
|
||||
return "Duplicate Resource"
|
||||
case UnacceptableCredential:
|
||||
return "Unacceptable Credential"
|
||||
case InconsistentVoterSet:
|
||||
return "Inconsistent Voter Set"
|
||||
case InvalidUpdateVersion:
|
||||
return "Invalid Update Version"
|
||||
case FeatureUpdateFailed:
|
||||
return "Feature Update Failed"
|
||||
case PrincipalDeserializationFailure:
|
||||
return "Principal Deserialization Failure"
|
||||
case SnapshotNotFound:
|
||||
return "Snapshot Not Found"
|
||||
case PositionOutOfRange:
|
||||
return "Position Out Of Range"
|
||||
case UnknownTopicID:
|
||||
return "Unknown Topic ID"
|
||||
case DuplicateBrokerRegistration:
|
||||
return "Duplicate Broker Registration"
|
||||
case BrokerIDNotRegistered:
|
||||
return "Broker ID Not Registered"
|
||||
case InconsistentTopicID:
|
||||
return "Inconsistent Topic ID"
|
||||
case InconsistentClusterID:
|
||||
return "Inconsistent Cluster ID"
|
||||
case TransactionalIDNotFound:
|
||||
return "Transactional ID Not Found"
|
||||
case FetchSessionTopicIDError:
|
||||
return "Fetch Session Topic ID Error"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Description returns a human readable description of cause of the error.
|
||||
func (e Error) Description() string {
|
||||
switch e {
|
||||
case Unknown:
|
||||
return "an unexpected server error occurred"
|
||||
case OffsetOutOfRange:
|
||||
return "the requested offset is outside the range of offsets maintained by the server for the given topic/partition"
|
||||
case InvalidMessage:
|
||||
return "the message contents does not match its CRC"
|
||||
case UnknownTopicOrPartition:
|
||||
return "the request is for a topic or partition that does not exist on this broker"
|
||||
case InvalidMessageSize:
|
||||
return "the message has a negative size"
|
||||
case LeaderNotAvailable:
|
||||
return "the cluster is in the middle of a leadership election and there is currently no leader for this partition and hence it is unavailable for writes"
|
||||
case NotLeaderForPartition:
|
||||
return "the client attempted to send messages to a replica that is not the leader for some partition, the client's metadata are likely out of date"
|
||||
case RequestTimedOut:
|
||||
return "the request exceeded the user-specified time limit in the request"
|
||||
case BrokerNotAvailable:
|
||||
return "not a client facing error and is used mostly by tools when a broker is not alive"
|
||||
case ReplicaNotAvailable:
|
||||
return "a replica is expected on a broker, but is not (this can be safely ignored)"
|
||||
case MessageSizeTooLarge:
|
||||
return "the server has a configurable maximum message size to avoid unbounded memory allocation and the client attempted to produce a message larger than this maximum"
|
||||
case StaleControllerEpoch:
|
||||
return "internal error code for broker-to-broker communication"
|
||||
case OffsetMetadataTooLarge:
|
||||
return "the client specified a string larger than configured maximum for offset metadata"
|
||||
case GroupLoadInProgress:
|
||||
return "the broker returns this error code for an offset fetch request if it is still loading offsets (after a leader change for that offsets topic partition), or in response to group membership requests (such as heartbeats) when group metadata is being loaded by the coordinator"
|
||||
case GroupCoordinatorNotAvailable:
|
||||
return "the broker returns this error code for group coordinator requests, offset commits, and most group management requests if the offsets topic has not yet been created, or if the group coordinator is not active"
|
||||
case NotCoordinatorForGroup:
|
||||
return "the broker returns this error code if it receives an offset fetch or commit request for a group that it is not a coordinator for"
|
||||
case InvalidTopic:
|
||||
return "a request which attempted to access an invalid topic (e.g. one which has an illegal name), or if an attempt was made to write to an internal topic (such as the consumer offsets topic)"
|
||||
case RecordListTooLarge:
|
||||
return "a message batch in a produce request exceeds the maximum configured segment size"
|
||||
case NotEnoughReplicas:
|
||||
return "the number of in-sync replicas is lower than the configured minimum and requiredAcks is -1"
|
||||
case NotEnoughReplicasAfterAppend:
|
||||
return "the message was written to the log, but with fewer in-sync replicas than required."
|
||||
case InvalidRequiredAcks:
|
||||
return "the requested requiredAcks is invalid (anything other than -1, 1, or 0)"
|
||||
case IllegalGeneration:
|
||||
return "the generation id provided in the request is not the current generation"
|
||||
case InconsistentGroupProtocol:
|
||||
return "the member provided a protocol type or set of protocols which is not compatible with the current group"
|
||||
case InvalidGroupId:
|
||||
return "the group id is empty or null"
|
||||
case UnknownMemberId:
|
||||
return "the member id is not in the current generation"
|
||||
case InvalidSessionTimeout:
|
||||
return "the requested session timeout is outside of the allowed range on the broker"
|
||||
case RebalanceInProgress:
|
||||
return "the coordinator has begun rebalancing the group, the client should rejoin the group"
|
||||
case InvalidCommitOffsetSize:
|
||||
return "an offset commit was rejected because of oversize metadata"
|
||||
case TopicAuthorizationFailed:
|
||||
return "the client is not authorized to access the requested topic"
|
||||
case GroupAuthorizationFailed:
|
||||
return "the client is not authorized to access a particular group id"
|
||||
case ClusterAuthorizationFailed:
|
||||
return "the client is not authorized to use an inter-broker or administrative API"
|
||||
case InvalidTimestamp:
|
||||
return "the timestamp of the message is out of acceptable range"
|
||||
case UnsupportedSASLMechanism:
|
||||
return "the broker does not support the requested SASL mechanism"
|
||||
case IllegalSASLState:
|
||||
return "the request is not valid given the current SASL state"
|
||||
case UnsupportedVersion:
|
||||
return "the version of API is not supported"
|
||||
case TopicAlreadyExists:
|
||||
return "a topic with this name already exists"
|
||||
case InvalidPartitionNumber:
|
||||
return "the number of partitions is invalid"
|
||||
case InvalidReplicationFactor:
|
||||
return "the replication-factor is invalid"
|
||||
case InvalidReplicaAssignment:
|
||||
return "the replica assignment is invalid"
|
||||
case InvalidConfiguration:
|
||||
return "the configuration is invalid"
|
||||
case NotController:
|
||||
return "this is not the correct controller for this cluster"
|
||||
case InvalidRequest:
|
||||
return "this most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker, se the broker logs for more details"
|
||||
case UnsupportedForMessageFormat:
|
||||
return "the message format version on the broker does not support the request"
|
||||
case PolicyViolation:
|
||||
return "the request parameters do not satisfy the configured policy"
|
||||
case OutOfOrderSequenceNumber:
|
||||
return "the broker received an out of order sequence number"
|
||||
case DuplicateSequenceNumber:
|
||||
return "the broker received a duplicate sequence number"
|
||||
case InvalidProducerEpoch:
|
||||
return "the producer attempted an operation with an old epoch, either there is a newer producer with the same transactional ID, or the producer's transaction has been expired by the broker"
|
||||
case InvalidTransactionState:
|
||||
return "the producer attempted a transactional operation in an invalid state"
|
||||
case InvalidProducerIDMapping:
|
||||
return "the producer attempted to use a producer id which is not currently assigned to its transactional ID"
|
||||
case InvalidTransactionTimeout:
|
||||
return "the transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)"
|
||||
case ConcurrentTransactions:
|
||||
return "the producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing"
|
||||
case TransactionCoordinatorFenced:
|
||||
return "the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer"
|
||||
case TransactionalIDAuthorizationFailed:
|
||||
return "the transactional ID authorization failed"
|
||||
case SecurityDisabled:
|
||||
return "the security features are disabled"
|
||||
case BrokerAuthorizationFailed:
|
||||
return "the broker authorization failed"
|
||||
case KafkaStorageError:
|
||||
return "disk error when trying to access log file on the disk"
|
||||
case LogDirNotFound:
|
||||
return "the user-specified log directory is not found in the broker config"
|
||||
case SASLAuthenticationFailed:
|
||||
return "SASL Authentication failed"
|
||||
case UnknownProducerId:
|
||||
return "the broker could not locate the producer metadata associated with the producer ID"
|
||||
case ReassignmentInProgress:
|
||||
return "a partition reassignment is in progress"
|
||||
case DelegationTokenAuthDisabled:
|
||||
return "delegation token feature is not enabled"
|
||||
case DelegationTokenNotFound:
|
||||
return "delegation token is not found on server"
|
||||
case DelegationTokenOwnerMismatch:
|
||||
return "specified principal is not valid owner/renewer"
|
||||
case DelegationTokenRequestNotAllowed:
|
||||
return "delegation token requests are not allowed on plaintext/1-way ssl channels and on delegation token authenticated channels"
|
||||
case DelegationTokenAuthorizationFailed:
|
||||
return "delegation token authorization failed"
|
||||
case DelegationTokenExpired:
|
||||
return "delegation token is expired"
|
||||
case InvalidPrincipalType:
|
||||
return "supplied principaltype is not supported"
|
||||
case NonEmptyGroup:
|
||||
return "the group is not empty"
|
||||
case GroupIdNotFound:
|
||||
return "the group ID does not exist"
|
||||
case FetchSessionIDNotFound:
|
||||
return "the fetch session ID was not found"
|
||||
case InvalidFetchSessionEpoch:
|
||||
return "the fetch session epoch is invalid"
|
||||
case ListenerNotFound:
|
||||
return "there is no listener on the leader broker that matches the listener on which metadata request was processed"
|
||||
case TopicDeletionDisabled:
|
||||
return "topic deletion is disabled"
|
||||
case FencedLeaderEpoch:
|
||||
return "the leader epoch in the request is older than the epoch on the broker"
|
||||
case UnknownLeaderEpoch:
|
||||
return "the leader epoch in the request is newer than the epoch on the broker"
|
||||
case UnsupportedCompressionType:
|
||||
return "the requesting client does not support the compression type of given partition"
|
||||
case MemberIDRequired:
|
||||
return "the group member needs to have a valid member id before actually entering a consumer group"
|
||||
case EligibleLeadersNotAvailable:
|
||||
return "eligible topic partition leaders are not available"
|
||||
case ElectionNotNeeded:
|
||||
return "leader election not needed for topic partition"
|
||||
case NoReassignmentInProgress:
|
||||
return "no partition reassignment is in progress"
|
||||
case GroupSubscribedToTopic:
|
||||
return "deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it"
|
||||
case InvalidRecord:
|
||||
return "this record has failed the validation on broker and hence be rejected"
|
||||
case UnstableOffsetCommit:
|
||||
return "there are unstable offsets that need to be cleared"
|
||||
case ThrottlingQuotaExceeded:
|
||||
return "The throttling quota has been exceeded"
|
||||
case ProducerFenced:
|
||||
return "There is a newer producer with the same transactionalId which fences the current one"
|
||||
case ResourceNotFound:
|
||||
return "A request illegally referred to a resource that does not exist"
|
||||
case DuplicateResource:
|
||||
return "A request illegally referred to the same resource twice"
|
||||
case UnacceptableCredential:
|
||||
return "Requested credential would not meet criteria for acceptability"
|
||||
case InconsistentVoterSet:
|
||||
return "Indicates that the either the sender or recipient of a voter-only request is not one of the expected voters"
|
||||
case InvalidUpdateVersion:
|
||||
return "The given update version was invalid"
|
||||
case FeatureUpdateFailed:
|
||||
return "Unable to update finalized features due to an unexpected server error"
|
||||
case PrincipalDeserializationFailure:
|
||||
return "Request principal deserialization failed during forwarding. This indicates an internal error on the broker cluster security setup"
|
||||
case SnapshotNotFound:
|
||||
return "Requested snapshot was not found"
|
||||
case PositionOutOfRange:
|
||||
return "Requested position is not greater than or equal to zero, and less than the size of the snapshot"
|
||||
case UnknownTopicID:
|
||||
return "This server does not host this topic ID"
|
||||
case DuplicateBrokerRegistration:
|
||||
return "This broker ID is already in use"
|
||||
case BrokerIDNotRegistered:
|
||||
return "The given broker ID was not registered"
|
||||
case InconsistentTopicID:
|
||||
return "The log's topic ID did not match the topic ID in the request"
|
||||
case InconsistentClusterID:
|
||||
return "The clusterId in the request does not match that found on the server"
|
||||
case TransactionalIDNotFound:
|
||||
return "The transactionalId could not be found"
|
||||
case FetchSessionTopicIDError:
|
||||
return "The fetch session encountered inconsistent topic ID usage"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func isTimeout(err error) bool {
|
||||
var timeoutError interface{ Timeout() bool }
|
||||
if errors.As(err, &timeoutError) {
|
||||
return timeoutError.Timeout()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isTemporary(err error) bool {
|
||||
var tempError interface{ Temporary() bool }
|
||||
if errors.As(err, &tempError) {
|
||||
return tempError.Temporary()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isTransientNetworkError(err error) bool {
|
||||
return errors.Is(err, io.ErrUnexpectedEOF) ||
|
||||
errors.Is(err, syscall.ECONNREFUSED) ||
|
||||
errors.Is(err, syscall.ECONNRESET) ||
|
||||
errors.Is(err, syscall.EPIPE)
|
||||
}
|
||||
|
||||
func silentEOF(err error) error {
|
||||
if errors.Is(err, io.EOF) {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func dontExpectEOF(err error) error {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func coalesceErrors(errs ...error) error {
|
||||
for _, err := range errs {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type MessageTooLargeError struct {
|
||||
Message Message
|
||||
Remaining []Message
|
||||
}
|
||||
|
||||
func messageTooLarge(msgs []Message, i int) MessageTooLargeError {
|
||||
remain := make([]Message, 0, len(msgs)-1)
|
||||
remain = append(remain, msgs[:i]...)
|
||||
remain = append(remain, msgs[i+1:]...)
|
||||
return MessageTooLargeError{
|
||||
Message: msgs[i],
|
||||
Remaining: remain,
|
||||
}
|
||||
}
|
||||
|
||||
func (e MessageTooLargeError) Error() string {
|
||||
return MessageSizeTooLarge.Error()
|
||||
}
|
||||
|
||||
func makeError(code int16, message string) error {
|
||||
if code == 0 {
|
||||
return nil
|
||||
}
|
||||
if message == "" {
|
||||
return Error(code)
|
||||
}
|
||||
return fmt.Errorf("%w: %s", Error(code), message)
|
||||
}
|
||||
|
||||
// WriteError is returned by kafka.(*Writer).WriteMessages when the writer is
|
||||
// not configured to write messages asynchronously. WriteError values contain
|
||||
// a list of errors where each entry matches the position of a message in the
|
||||
// WriteMessages call. The program can determine the status of each message by
|
||||
// looping over the error:
|
||||
//
|
||||
// switch err := w.WriteMessages(ctx, msgs...).(type) {
|
||||
// case nil:
|
||||
// case kafka.WriteErrors:
|
||||
// for i := range msgs {
|
||||
// if err[i] != nil {
|
||||
// // handle the error writing msgs[i]
|
||||
// ...
|
||||
// }
|
||||
// }
|
||||
// default:
|
||||
// // handle other errors
|
||||
// ...
|
||||
// }
|
||||
type WriteErrors []error
|
||||
|
||||
// Count counts the number of non-nil errors in err.
|
||||
func (err WriteErrors) Count() int {
|
||||
n := 0
|
||||
|
||||
for _, e := range err {
|
||||
if e != nil {
|
||||
n++
|
||||
}
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func (err WriteErrors) Error() string {
|
||||
errCount := err.Count()
|
||||
errors := make([]string, 0, errCount)
|
||||
for _, writeError := range err {
|
||||
if writeError == nil {
|
||||
continue
|
||||
}
|
||||
errors = append(errors, writeError.Error())
|
||||
}
|
||||
return fmt.Sprintf("Kafka write errors (%d/%d), errors: %v", errCount, len(err), errors)
|
||||
}
|
||||
289
vendor/github.com/segmentio/kafka-go/fetch.go
generated
vendored
Normal file
289
vendor/github.com/segmentio/kafka-go/fetch.go
generated
vendored
Normal file
@@ -0,0 +1,289 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol"
|
||||
fetchAPI "github.com/segmentio/kafka-go/protocol/fetch"
|
||||
)
|
||||
|
||||
// FetchRequest represents a request sent to a kafka broker to retrieve records
|
||||
// from a topic partition.
|
||||
type FetchRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// Topic, partition, and offset to retrieve records from. The offset may be
|
||||
// one of the special FirstOffset or LastOffset constants, in which case the
|
||||
// request will automatically discover the first or last offset of the
|
||||
// partition and submit the request for these.
|
||||
Topic string
|
||||
Partition int
|
||||
Offset int64
|
||||
|
||||
// Size and time limits of the response returned by the broker.
|
||||
MinBytes int64
|
||||
MaxBytes int64
|
||||
MaxWait time.Duration
|
||||
|
||||
// The isolation level for the request.
|
||||
//
|
||||
// Defaults to ReadUncommitted.
|
||||
//
|
||||
// This field requires the kafka broker to support the Fetch API in version
|
||||
// 4 or above (otherwise the value is ignored).
|
||||
IsolationLevel IsolationLevel
|
||||
}
|
||||
|
||||
// FetchResponse represents a response from a kafka broker to a fetch request.
|
||||
type FetchResponse struct {
|
||||
// The amount of time that the broker throttled the request.
|
||||
Throttle time.Duration
|
||||
|
||||
// The topic and partition that the response came for (will match the values
|
||||
// in the request).
|
||||
Topic string
|
||||
Partition int
|
||||
|
||||
// Informations about the topic partition layout returned from the broker.
|
||||
//
|
||||
// LastStableOffset requires the kafka broker to support the Fetch API in
|
||||
// version 4 or above (otherwise the value is zero).
|
||||
//
|
||||
/// LogStartOffset requires the kafka broker to support the Fetch API in
|
||||
// version 5 or above (otherwise the value is zero).
|
||||
HighWatermark int64
|
||||
LastStableOffset int64
|
||||
LogStartOffset int64
|
||||
|
||||
// An error that may have occurred while attempting to fetch the records.
|
||||
//
|
||||
// The error contains both the kafka error code, and an error message
|
||||
// returned by the kafka broker. Programs may use the standard errors.Is
|
||||
// function to test the error against kafka error codes.
|
||||
Error error
|
||||
|
||||
// The set of records returned in the response.
|
||||
//
|
||||
// The program is expected to call the RecordSet's Close method when it
|
||||
// finished reading the records.
|
||||
//
|
||||
// Note that kafka may return record batches that start at an offset before
|
||||
// the one that was requested. It is the program's responsibility to skip
|
||||
// the offsets that it is not interested in.
|
||||
Records RecordReader
|
||||
}
|
||||
|
||||
// Fetch sends a fetch request to a kafka broker and returns the response.
|
||||
//
|
||||
// If the broker returned an invalid response with no topics, an error wrapping
|
||||
// protocol.ErrNoTopic is returned.
|
||||
//
|
||||
// If the broker returned an invalid response with no partitions, an error
|
||||
// wrapping ErrNoPartitions is returned.
|
||||
func (c *Client) Fetch(ctx context.Context, req *FetchRequest) (*FetchResponse, error) {
|
||||
timeout := c.timeout(ctx, math.MaxInt64)
|
||||
maxWait := req.maxWait()
|
||||
|
||||
if maxWait < timeout {
|
||||
timeout = maxWait
|
||||
}
|
||||
|
||||
offset := req.Offset
|
||||
switch offset {
|
||||
case FirstOffset, LastOffset:
|
||||
topic, partition := req.Topic, req.Partition
|
||||
|
||||
r, err := c.ListOffsets(ctx, &ListOffsetsRequest{
|
||||
Addr: req.Addr,
|
||||
Topics: map[string][]OffsetRequest{
|
||||
topic: {{
|
||||
Partition: partition,
|
||||
Timestamp: offset,
|
||||
}},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", err)
|
||||
}
|
||||
|
||||
for _, p := range r.Topics[topic] {
|
||||
if p.Partition == partition {
|
||||
if p.Error != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", p.Error)
|
||||
}
|
||||
switch offset {
|
||||
case FirstOffset:
|
||||
offset = p.FirstOffset
|
||||
case LastOffset:
|
||||
offset = p.LastOffset
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m, err := c.roundTrip(ctx, req.Addr, &fetchAPI.Request{
|
||||
ReplicaID: -1,
|
||||
MaxWaitTime: milliseconds(timeout),
|
||||
MinBytes: int32(req.MinBytes),
|
||||
MaxBytes: int32(req.MaxBytes),
|
||||
IsolationLevel: int8(req.IsolationLevel),
|
||||
SessionID: -1,
|
||||
SessionEpoch: -1,
|
||||
Topics: []fetchAPI.RequestTopic{{
|
||||
Topic: req.Topic,
|
||||
Partitions: []fetchAPI.RequestPartition{{
|
||||
Partition: int32(req.Partition),
|
||||
CurrentLeaderEpoch: -1,
|
||||
FetchOffset: offset,
|
||||
LogStartOffset: -1,
|
||||
PartitionMaxBytes: int32(req.MaxBytes),
|
||||
}},
|
||||
}},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", err)
|
||||
}
|
||||
|
||||
res := m.(*fetchAPI.Response)
|
||||
if len(res.Topics) == 0 {
|
||||
return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", protocol.ErrNoTopic)
|
||||
}
|
||||
topic := &res.Topics[0]
|
||||
if len(topic.Partitions) == 0 {
|
||||
return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", protocol.ErrNoPartition)
|
||||
}
|
||||
partition := &topic.Partitions[0]
|
||||
|
||||
ret := &FetchResponse{
|
||||
Throttle: makeDuration(res.ThrottleTimeMs),
|
||||
Topic: topic.Topic,
|
||||
Partition: int(partition.Partition),
|
||||
Error: makeError(res.ErrorCode, ""),
|
||||
HighWatermark: partition.HighWatermark,
|
||||
LastStableOffset: partition.LastStableOffset,
|
||||
LogStartOffset: partition.LogStartOffset,
|
||||
Records: partition.RecordSet.Records,
|
||||
}
|
||||
|
||||
if partition.ErrorCode != 0 {
|
||||
ret.Error = makeError(partition.ErrorCode, "")
|
||||
}
|
||||
|
||||
if ret.Records == nil {
|
||||
ret.Records = NewRecordReader()
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (req *FetchRequest) maxWait() time.Duration {
|
||||
if req.MaxWait > 0 {
|
||||
return req.MaxWait
|
||||
}
|
||||
return defaultMaxWait
|
||||
}
|
||||
|
||||
type fetchRequestV2 struct {
|
||||
ReplicaID int32
|
||||
MaxWaitTime int32
|
||||
MinBytes int32
|
||||
Topics []fetchRequestTopicV2
|
||||
}
|
||||
|
||||
func (r fetchRequestV2) size() int32 {
|
||||
return 4 + 4 + 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() })
|
||||
}
|
||||
|
||||
func (r fetchRequestV2) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(r.ReplicaID)
|
||||
wb.writeInt32(r.MaxWaitTime)
|
||||
wb.writeInt32(r.MinBytes)
|
||||
wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
type fetchRequestTopicV2 struct {
|
||||
TopicName string
|
||||
Partitions []fetchRequestPartitionV2
|
||||
}
|
||||
|
||||
func (t fetchRequestTopicV2) size() int32 {
|
||||
return sizeofString(t.TopicName) +
|
||||
sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() })
|
||||
}
|
||||
|
||||
func (t fetchRequestTopicV2) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.TopicName)
|
||||
wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
type fetchRequestPartitionV2 struct {
|
||||
Partition int32
|
||||
FetchOffset int64
|
||||
MaxBytes int32
|
||||
}
|
||||
|
||||
func (p fetchRequestPartitionV2) size() int32 {
|
||||
return 4 + 8 + 4
|
||||
}
|
||||
|
||||
func (p fetchRequestPartitionV2) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(p.Partition)
|
||||
wb.writeInt64(p.FetchOffset)
|
||||
wb.writeInt32(p.MaxBytes)
|
||||
}
|
||||
|
||||
type fetchResponseV2 struct {
|
||||
ThrottleTime int32
|
||||
Topics []fetchResponseTopicV2
|
||||
}
|
||||
|
||||
func (r fetchResponseV2) size() int32 {
|
||||
return 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() })
|
||||
}
|
||||
|
||||
func (r fetchResponseV2) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(r.ThrottleTime)
|
||||
wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
type fetchResponseTopicV2 struct {
|
||||
TopicName string
|
||||
Partitions []fetchResponsePartitionV2
|
||||
}
|
||||
|
||||
func (t fetchResponseTopicV2) size() int32 {
|
||||
return sizeofString(t.TopicName) +
|
||||
sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() })
|
||||
}
|
||||
|
||||
func (t fetchResponseTopicV2) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.TopicName)
|
||||
wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
type fetchResponsePartitionV2 struct {
|
||||
Partition int32
|
||||
ErrorCode int16
|
||||
HighwaterMarkOffset int64
|
||||
MessageSetSize int32
|
||||
MessageSet messageSet
|
||||
}
|
||||
|
||||
func (p fetchResponsePartitionV2) size() int32 {
|
||||
return 4 + 2 + 8 + 4 + p.MessageSet.size()
|
||||
}
|
||||
|
||||
func (p fetchResponsePartitionV2) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(p.Partition)
|
||||
wb.writeInt16(p.ErrorCode)
|
||||
wb.writeInt64(p.HighwaterMarkOffset)
|
||||
wb.writeInt32(p.MessageSetSize)
|
||||
p.MessageSet.writeTo(wb)
|
||||
}
|
||||
170
vendor/github.com/segmentio/kafka-go/findcoordinator.go
generated
vendored
Normal file
170
vendor/github.com/segmentio/kafka-go/findcoordinator.go
generated
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/findcoordinator"
|
||||
)
|
||||
|
||||
// CoordinatorKeyType is used to specify the type of coordinator to look for.
|
||||
type CoordinatorKeyType int8
|
||||
|
||||
const (
|
||||
// CoordinatorKeyTypeConsumer type is used when looking for a Group coordinator.
|
||||
CoordinatorKeyTypeConsumer CoordinatorKeyType = 0
|
||||
|
||||
// CoordinatorKeyTypeTransaction type is used when looking for a Transaction coordinator.
|
||||
CoordinatorKeyTypeTransaction CoordinatorKeyType = 1
|
||||
)
|
||||
|
||||
// FindCoordinatorRequest is the request structure for the FindCoordinator function.
|
||||
type FindCoordinatorRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// The coordinator key.
|
||||
Key string
|
||||
|
||||
// The coordinator key type. (Group, transaction, etc.)
|
||||
KeyType CoordinatorKeyType
|
||||
}
|
||||
|
||||
// FindCoordinatorResponseCoordinator contains details about the found coordinator.
|
||||
type FindCoordinatorResponseCoordinator struct {
|
||||
// NodeID holds the broker id.
|
||||
NodeID int
|
||||
|
||||
// Host of the broker
|
||||
Host string
|
||||
|
||||
// Port on which broker accepts requests
|
||||
Port int
|
||||
}
|
||||
|
||||
// FindCoordinatorResponse is the response structure for the FindCoordinator function.
|
||||
type FindCoordinatorResponse struct {
|
||||
// The Transaction/Group Coordinator details
|
||||
Coordinator *FindCoordinatorResponseCoordinator
|
||||
|
||||
// The amount of time that the broker throttled the request.
|
||||
Throttle time.Duration
|
||||
|
||||
// An error that may have occurred while attempting to retrieve Coordinator
|
||||
//
|
||||
// The error contains both the kafka error code, and an error message
|
||||
// returned by the kafka broker.
|
||||
Error error
|
||||
}
|
||||
|
||||
// FindCoordinator sends a findCoordinator request to a kafka broker and returns the
|
||||
// response.
|
||||
func (c *Client) FindCoordinator(ctx context.Context, req *FindCoordinatorRequest) (*FindCoordinatorResponse, error) {
|
||||
|
||||
m, err := c.roundTrip(ctx, req.Addr, &findcoordinator.Request{
|
||||
Key: req.Key,
|
||||
KeyType: int8(req.KeyType),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).FindCoordinator: %w", err)
|
||||
}
|
||||
|
||||
res := m.(*findcoordinator.Response)
|
||||
coordinator := &FindCoordinatorResponseCoordinator{
|
||||
NodeID: int(res.NodeID),
|
||||
Host: res.Host,
|
||||
Port: int(res.Port),
|
||||
}
|
||||
ret := &FindCoordinatorResponse{
|
||||
Throttle: makeDuration(res.ThrottleTimeMs),
|
||||
Error: makeError(res.ErrorCode, res.ErrorMessage),
|
||||
Coordinator: coordinator,
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// FindCoordinatorRequestV0 requests the coordinator for the specified group or transaction
|
||||
//
|
||||
// See http://kafka.apache.org/protocol.html#The_Messages_FindCoordinator
|
||||
type findCoordinatorRequestV0 struct {
|
||||
// CoordinatorKey holds id to use for finding the coordinator (for groups, this is
|
||||
// the groupId, for transactional producers, this is the transactional id)
|
||||
CoordinatorKey string
|
||||
}
|
||||
|
||||
func (t findCoordinatorRequestV0) size() int32 {
|
||||
return sizeofString(t.CoordinatorKey)
|
||||
}
|
||||
|
||||
func (t findCoordinatorRequestV0) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.CoordinatorKey)
|
||||
}
|
||||
|
||||
type findCoordinatorResponseCoordinatorV0 struct {
|
||||
// NodeID holds the broker id.
|
||||
NodeID int32
|
||||
|
||||
// Host of the broker
|
||||
Host string
|
||||
|
||||
// Port on which broker accepts requests
|
||||
Port int32
|
||||
}
|
||||
|
||||
func (t findCoordinatorResponseCoordinatorV0) size() int32 {
|
||||
return sizeofInt32(t.NodeID) +
|
||||
sizeofString(t.Host) +
|
||||
sizeofInt32(t.Port)
|
||||
}
|
||||
|
||||
func (t findCoordinatorResponseCoordinatorV0) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(t.NodeID)
|
||||
wb.writeString(t.Host)
|
||||
wb.writeInt32(t.Port)
|
||||
}
|
||||
|
||||
func (t *findCoordinatorResponseCoordinatorV0) readFrom(r *bufio.Reader, size int) (remain int, err error) {
|
||||
if remain, err = readInt32(r, size, &t.NodeID); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readString(r, remain, &t.Host); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readInt32(r, remain, &t.Port); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type findCoordinatorResponseV0 struct {
|
||||
// ErrorCode holds response error code
|
||||
ErrorCode int16
|
||||
|
||||
// Coordinator holds host and port information for the coordinator
|
||||
Coordinator findCoordinatorResponseCoordinatorV0
|
||||
}
|
||||
|
||||
func (t findCoordinatorResponseV0) size() int32 {
|
||||
return sizeofInt16(t.ErrorCode) +
|
||||
t.Coordinator.size()
|
||||
}
|
||||
|
||||
func (t findCoordinatorResponseV0) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt16(t.ErrorCode)
|
||||
t.Coordinator.writeTo(wb)
|
||||
}
|
||||
|
||||
func (t *findCoordinatorResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) {
|
||||
if remain, err = readInt16(r, size, &t.ErrorCode); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = (&t.Coordinator).readFrom(r, remain); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
339
vendor/github.com/segmentio/kafka-go/groupbalancer.go
generated
vendored
Normal file
339
vendor/github.com/segmentio/kafka-go/groupbalancer.go
generated
vendored
Normal file
@@ -0,0 +1,339 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// GroupMember describes a single participant in a consumer group.
|
||||
type GroupMember struct {
|
||||
// ID is the unique ID for this member as taken from the JoinGroup response.
|
||||
ID string
|
||||
|
||||
// Topics is a list of topics that this member is consuming.
|
||||
Topics []string
|
||||
|
||||
// UserData contains any information that the GroupBalancer sent to the
|
||||
// consumer group coordinator.
|
||||
UserData []byte
|
||||
}
|
||||
|
||||
// GroupMemberAssignments holds MemberID => topic => partitions.
|
||||
type GroupMemberAssignments map[string]map[string][]int
|
||||
|
||||
// GroupBalancer encapsulates the client side rebalancing logic.
|
||||
type GroupBalancer interface {
|
||||
// ProtocolName of the GroupBalancer
|
||||
ProtocolName() string
|
||||
|
||||
// UserData provides the GroupBalancer an opportunity to embed custom
|
||||
// UserData into the metadata.
|
||||
//
|
||||
// Will be used by JoinGroup to begin the consumer group handshake.
|
||||
//
|
||||
// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-JoinGroupRequest
|
||||
UserData() ([]byte, error)
|
||||
|
||||
// DefineMemberships returns which members will be consuming
|
||||
// which topic partitions
|
||||
AssignGroups(members []GroupMember, partitions []Partition) GroupMemberAssignments
|
||||
}
|
||||
|
||||
// RangeGroupBalancer groups consumers by partition
|
||||
//
|
||||
// Example: 5 partitions, 2 consumers
|
||||
// C0: [0, 1, 2]
|
||||
// C1: [3, 4]
|
||||
//
|
||||
// Example: 6 partitions, 3 consumers
|
||||
// C0: [0, 1]
|
||||
// C1: [2, 3]
|
||||
// C2: [4, 5]
|
||||
//
|
||||
type RangeGroupBalancer struct{}
|
||||
|
||||
func (r RangeGroupBalancer) ProtocolName() string {
|
||||
return "range"
|
||||
}
|
||||
|
||||
func (r RangeGroupBalancer) UserData() ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r RangeGroupBalancer) AssignGroups(members []GroupMember, topicPartitions []Partition) GroupMemberAssignments {
|
||||
groupAssignments := GroupMemberAssignments{}
|
||||
membersByTopic := findMembersByTopic(members)
|
||||
|
||||
for topic, members := range membersByTopic {
|
||||
partitions := findPartitions(topic, topicPartitions)
|
||||
partitionCount := len(partitions)
|
||||
memberCount := len(members)
|
||||
|
||||
for memberIndex, member := range members {
|
||||
assignmentsByTopic, ok := groupAssignments[member.ID]
|
||||
if !ok {
|
||||
assignmentsByTopic = map[string][]int{}
|
||||
groupAssignments[member.ID] = assignmentsByTopic
|
||||
}
|
||||
|
||||
minIndex := memberIndex * partitionCount / memberCount
|
||||
maxIndex := (memberIndex + 1) * partitionCount / memberCount
|
||||
|
||||
for partitionIndex, partition := range partitions {
|
||||
if partitionIndex >= minIndex && partitionIndex < maxIndex {
|
||||
assignmentsByTopic[topic] = append(assignmentsByTopic[topic], partition)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return groupAssignments
|
||||
}
|
||||
|
||||
// RoundrobinGroupBalancer divides partitions evenly among consumers
|
||||
//
|
||||
// Example: 5 partitions, 2 consumers
|
||||
// C0: [0, 2, 4]
|
||||
// C1: [1, 3]
|
||||
//
|
||||
// Example: 6 partitions, 3 consumers
|
||||
// C0: [0, 3]
|
||||
// C1: [1, 4]
|
||||
// C2: [2, 5]
|
||||
//
|
||||
type RoundRobinGroupBalancer struct{}
|
||||
|
||||
func (r RoundRobinGroupBalancer) ProtocolName() string {
|
||||
return "roundrobin"
|
||||
}
|
||||
|
||||
func (r RoundRobinGroupBalancer) UserData() ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r RoundRobinGroupBalancer) AssignGroups(members []GroupMember, topicPartitions []Partition) GroupMemberAssignments {
|
||||
groupAssignments := GroupMemberAssignments{}
|
||||
membersByTopic := findMembersByTopic(members)
|
||||
for topic, members := range membersByTopic {
|
||||
partitionIDs := findPartitions(topic, topicPartitions)
|
||||
memberCount := len(members)
|
||||
|
||||
for memberIndex, member := range members {
|
||||
assignmentsByTopic, ok := groupAssignments[member.ID]
|
||||
if !ok {
|
||||
assignmentsByTopic = map[string][]int{}
|
||||
groupAssignments[member.ID] = assignmentsByTopic
|
||||
}
|
||||
|
||||
for partitionIndex, partition := range partitionIDs {
|
||||
if (partitionIndex % memberCount) == memberIndex {
|
||||
assignmentsByTopic[topic] = append(assignmentsByTopic[topic], partition)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return groupAssignments
|
||||
}
|
||||
|
||||
// RackAffinityGroupBalancer makes a best effort to pair up consumers with
|
||||
// partitions whose leader is in the same rack. This strategy can have
|
||||
// performance benefits by minimizing round trip latency between the consumer
|
||||
// and the broker. In environments where network traffic across racks incurs
|
||||
// charges (such as cross AZ data transfer in AWS), this strategy is also a cost
|
||||
// optimization measure because it keeps network traffic within the local rack
|
||||
// where possible.
|
||||
//
|
||||
// The primary objective is to spread partitions evenly across consumers with a
|
||||
// secondary focus on maximizing the number of partitions where the leader and
|
||||
// the consumer are in the same rack. For best affinity, it's recommended to
|
||||
// have a balanced spread of consumers and partition leaders across racks.
|
||||
//
|
||||
// This balancer requires Kafka version 0.10.0.0+ or later. Earlier versions do
|
||||
// not return the brokers' racks in the metadata request.
|
||||
type RackAffinityGroupBalancer struct {
|
||||
// Rack is the name of the rack where this consumer is running. It will be
|
||||
// communicated to the consumer group leader via the UserData so that
|
||||
// assignments can be made with affinity to the partition leader.
|
||||
Rack string
|
||||
}
|
||||
|
||||
func (r RackAffinityGroupBalancer) ProtocolName() string {
|
||||
return "rack-affinity"
|
||||
}
|
||||
|
||||
func (r RackAffinityGroupBalancer) AssignGroups(members []GroupMember, partitions []Partition) GroupMemberAssignments {
|
||||
membersByTopic := make(map[string][]GroupMember)
|
||||
for _, m := range members {
|
||||
for _, t := range m.Topics {
|
||||
membersByTopic[t] = append(membersByTopic[t], m)
|
||||
}
|
||||
}
|
||||
|
||||
partitionsByTopic := make(map[string][]Partition)
|
||||
for _, p := range partitions {
|
||||
partitionsByTopic[p.Topic] = append(partitionsByTopic[p.Topic], p)
|
||||
}
|
||||
|
||||
assignments := GroupMemberAssignments{}
|
||||
for topic := range membersByTopic {
|
||||
topicAssignments := r.assignTopic(membersByTopic[topic], partitionsByTopic[topic])
|
||||
for member, parts := range topicAssignments {
|
||||
memberAssignments, ok := assignments[member]
|
||||
if !ok {
|
||||
memberAssignments = make(map[string][]int)
|
||||
assignments[member] = memberAssignments
|
||||
}
|
||||
memberAssignments[topic] = parts
|
||||
}
|
||||
}
|
||||
return assignments
|
||||
}
|
||||
|
||||
func (r RackAffinityGroupBalancer) UserData() ([]byte, error) {
|
||||
return []byte(r.Rack), nil
|
||||
}
|
||||
|
||||
func (r *RackAffinityGroupBalancer) assignTopic(members []GroupMember, partitions []Partition) map[string][]int {
|
||||
zonedPartitions := make(map[string][]int)
|
||||
for _, part := range partitions {
|
||||
zone := part.Leader.Rack
|
||||
zonedPartitions[zone] = append(zonedPartitions[zone], part.ID)
|
||||
}
|
||||
|
||||
zonedConsumers := make(map[string][]string)
|
||||
for _, member := range members {
|
||||
zone := string(member.UserData)
|
||||
zonedConsumers[zone] = append(zonedConsumers[zone], member.ID)
|
||||
}
|
||||
|
||||
targetPerMember := len(partitions) / len(members)
|
||||
remainder := len(partitions) % len(members)
|
||||
assignments := make(map[string][]int)
|
||||
|
||||
// assign as many as possible in zone. this will assign up to partsPerMember
|
||||
// to each consumer. it will also prefer to allocate remainder partitions
|
||||
// in zone if possible.
|
||||
for zone, parts := range zonedPartitions {
|
||||
consumers := zonedConsumers[zone]
|
||||
if len(consumers) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// don't over-allocate. cap partition assignments at the calculated
|
||||
// target.
|
||||
partsPerMember := len(parts) / len(consumers)
|
||||
if partsPerMember > targetPerMember {
|
||||
partsPerMember = targetPerMember
|
||||
}
|
||||
|
||||
for _, consumer := range consumers {
|
||||
assignments[consumer] = append(assignments[consumer], parts[:partsPerMember]...)
|
||||
parts = parts[partsPerMember:]
|
||||
}
|
||||
|
||||
// if we had enough partitions for each consumer in this zone to hit its
|
||||
// target, attempt to use any leftover partitions to satisfy the total
|
||||
// remainder by adding at most 1 partition per consumer.
|
||||
leftover := len(parts)
|
||||
if partsPerMember == targetPerMember {
|
||||
if leftover > remainder {
|
||||
leftover = remainder
|
||||
}
|
||||
if leftover > len(consumers) {
|
||||
leftover = len(consumers)
|
||||
}
|
||||
remainder -= leftover
|
||||
}
|
||||
|
||||
// this loop covers the case where we're assigning extra partitions or
|
||||
// if there weren't enough to satisfy the targetPerMember and the zoned
|
||||
// partitions didn't divide evenly.
|
||||
for i := 0; i < leftover; i++ {
|
||||
assignments[consumers[i]] = append(assignments[consumers[i]], parts[i])
|
||||
}
|
||||
parts = parts[leftover:]
|
||||
|
||||
if len(parts) == 0 {
|
||||
delete(zonedPartitions, zone)
|
||||
} else {
|
||||
zonedPartitions[zone] = parts
|
||||
}
|
||||
}
|
||||
|
||||
// assign out remainders regardless of zone.
|
||||
var remaining []int
|
||||
for _, partitions := range zonedPartitions {
|
||||
remaining = append(remaining, partitions...)
|
||||
}
|
||||
|
||||
for _, member := range members {
|
||||
assigned := assignments[member.ID]
|
||||
delta := targetPerMember - len(assigned)
|
||||
// if it were possible to assign the remainder in zone, it's been taken
|
||||
// care of already. now we will portion out any remainder to a member
|
||||
// that can take it.
|
||||
if delta >= 0 && remainder > 0 {
|
||||
delta++
|
||||
remainder--
|
||||
}
|
||||
if delta > 0 {
|
||||
assignments[member.ID] = append(assigned, remaining[:delta]...)
|
||||
remaining = remaining[delta:]
|
||||
}
|
||||
}
|
||||
|
||||
return assignments
|
||||
}
|
||||
|
||||
// findPartitions extracts the partition ids associated with the topic from the
|
||||
// list of Partitions provided.
|
||||
func findPartitions(topic string, partitions []Partition) []int {
|
||||
var ids []int
|
||||
for _, partition := range partitions {
|
||||
if partition.Topic == topic {
|
||||
ids = append(ids, partition.ID)
|
||||
}
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// findMembersByTopic groups the memberGroupMetadata by topic.
|
||||
func findMembersByTopic(members []GroupMember) map[string][]GroupMember {
|
||||
membersByTopic := map[string][]GroupMember{}
|
||||
for _, member := range members {
|
||||
for _, topic := range member.Topics {
|
||||
membersByTopic[topic] = append(membersByTopic[topic], member)
|
||||
}
|
||||
}
|
||||
|
||||
// normalize ordering of members to enabling grouping across topics by partitions
|
||||
//
|
||||
// Want:
|
||||
// C0 [T0/P0, T1/P0]
|
||||
// C1 [T0/P1, T1/P1]
|
||||
//
|
||||
// Not:
|
||||
// C0 [T0/P0, T1/P1]
|
||||
// C1 [T0/P1, T1/P0]
|
||||
//
|
||||
// Even though the later is still round robin, the partitions are crossed
|
||||
//
|
||||
for _, members := range membersByTopic {
|
||||
sort.Slice(members, func(i, j int) bool {
|
||||
return members[i].ID < members[j].ID
|
||||
})
|
||||
}
|
||||
|
||||
return membersByTopic
|
||||
}
|
||||
|
||||
// findGroupBalancer returns the GroupBalancer with the specified protocolName
|
||||
// from the slice provided.
|
||||
func findGroupBalancer(protocolName string, balancers []GroupBalancer) (GroupBalancer, bool) {
|
||||
for _, balancer := range balancers {
|
||||
if balancer.ProtocolName() == protocolName {
|
||||
return balancer, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
109
vendor/github.com/segmentio/kafka-go/heartbeat.go
generated
vendored
Normal file
109
vendor/github.com/segmentio/kafka-go/heartbeat.go
generated
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
heartbeatAPI "github.com/segmentio/kafka-go/protocol/heartbeat"
|
||||
)
|
||||
|
||||
// HeartbeatRequest represents a heartbeat sent to kafka to indicate consume liveness.
|
||||
type HeartbeatRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// GroupID is the ID of the group.
|
||||
GroupID string
|
||||
|
||||
// GenerationID is the current generation for the group.
|
||||
GenerationID int32
|
||||
|
||||
// MemberID is the ID of the group member.
|
||||
MemberID string
|
||||
|
||||
// GroupInstanceID is a unique identifier for the consumer.
|
||||
GroupInstanceID string
|
||||
}
|
||||
|
||||
// HeartbeatResponse represents a response from a heartbeat request.
|
||||
type HeartbeatResponse struct {
|
||||
// Error is set to non-nil if an error occurred.
|
||||
Error error
|
||||
|
||||
// The amount of time that the broker throttled the request.
|
||||
//
|
||||
// This field will be zero if the kafka broker did not support the
|
||||
// Heartbeat API in version 1 or above.
|
||||
Throttle time.Duration
|
||||
}
|
||||
|
||||
type heartbeatRequestV0 struct {
|
||||
// GroupID holds the unique group identifier
|
||||
GroupID string
|
||||
|
||||
// GenerationID holds the generation of the group.
|
||||
GenerationID int32
|
||||
|
||||
// MemberID assigned by the group coordinator
|
||||
MemberID string
|
||||
}
|
||||
|
||||
// Heartbeat sends a heartbeat request to a kafka broker and returns the response.
|
||||
func (c *Client) Heartbeat(ctx context.Context, req *HeartbeatRequest) (*HeartbeatResponse, error) {
|
||||
m, err := c.roundTrip(ctx, req.Addr, &heartbeatAPI.Request{
|
||||
GroupID: req.GroupID,
|
||||
GenerationID: req.GenerationID,
|
||||
MemberID: req.MemberID,
|
||||
GroupInstanceID: req.GroupInstanceID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).Heartbeat: %w", err)
|
||||
}
|
||||
|
||||
res := m.(*heartbeatAPI.Response)
|
||||
|
||||
ret := &HeartbeatResponse{
|
||||
Throttle: makeDuration(res.ThrottleTimeMs),
|
||||
}
|
||||
|
||||
if res.ErrorCode != 0 {
|
||||
ret.Error = Error(res.ErrorCode)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (t heartbeatRequestV0) size() int32 {
|
||||
return sizeofString(t.GroupID) +
|
||||
sizeofInt32(t.GenerationID) +
|
||||
sizeofString(t.MemberID)
|
||||
}
|
||||
|
||||
func (t heartbeatRequestV0) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.GroupID)
|
||||
wb.writeInt32(t.GenerationID)
|
||||
wb.writeString(t.MemberID)
|
||||
}
|
||||
|
||||
type heartbeatResponseV0 struct {
|
||||
// ErrorCode holds response error code
|
||||
ErrorCode int16
|
||||
}
|
||||
|
||||
func (t heartbeatResponseV0) size() int32 {
|
||||
return sizeofInt16(t.ErrorCode)
|
||||
}
|
||||
|
||||
func (t heartbeatResponseV0) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt16(t.ErrorCode)
|
||||
}
|
||||
|
||||
func (t *heartbeatResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) {
|
||||
if remain, err = readInt16(r, sz, &t.ErrorCode); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
133
vendor/github.com/segmentio/kafka-go/incrementalalterconfigs.go
generated
vendored
Normal file
133
vendor/github.com/segmentio/kafka-go/incrementalalterconfigs.go
generated
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/incrementalalterconfigs"
|
||||
)
|
||||
|
||||
type ConfigOperation int8
|
||||
|
||||
const (
|
||||
ConfigOperationSet ConfigOperation = 0
|
||||
ConfigOperationDelete ConfigOperation = 1
|
||||
ConfigOperationAppend ConfigOperation = 2
|
||||
ConfigOperationSubtract ConfigOperation = 3
|
||||
)
|
||||
|
||||
// IncrementalAlterConfigsRequest is a request to the IncrementalAlterConfigs API.
|
||||
type IncrementalAlterConfigsRequest struct {
|
||||
// Addr is the address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// Resources contains the list of resources to update configs for.
|
||||
Resources []IncrementalAlterConfigsRequestResource
|
||||
|
||||
// ValidateOnly indicates whether Kafka should validate the changes without actually
|
||||
// applying them.
|
||||
ValidateOnly bool
|
||||
}
|
||||
|
||||
// IncrementalAlterConfigsRequestResource contains the details of a single resource type whose
|
||||
// configs should be altered.
|
||||
type IncrementalAlterConfigsRequestResource struct {
|
||||
// ResourceType is the type of resource to update.
|
||||
ResourceType ResourceType
|
||||
|
||||
// ResourceName is the name of the resource to update (i.e., topic name or broker ID).
|
||||
ResourceName string
|
||||
|
||||
// Configs contains the list of config key/values to update.
|
||||
Configs []IncrementalAlterConfigsRequestConfig
|
||||
}
|
||||
|
||||
// IncrementalAlterConfigsRequestConfig describes a single config key/value pair that should
|
||||
// be altered.
|
||||
type IncrementalAlterConfigsRequestConfig struct {
|
||||
// Name is the name of the config.
|
||||
Name string
|
||||
|
||||
// Value is the value to set for this config.
|
||||
Value string
|
||||
|
||||
// ConfigOperation indicates how this config should be updated (e.g., add, delete, etc.).
|
||||
ConfigOperation ConfigOperation
|
||||
}
|
||||
|
||||
// IncrementalAlterConfigsResponse is a response from the IncrementalAlterConfigs API.
|
||||
type IncrementalAlterConfigsResponse struct {
|
||||
// Resources contains details of each resource config that was updated.
|
||||
Resources []IncrementalAlterConfigsResponseResource
|
||||
}
|
||||
|
||||
// IncrementalAlterConfigsResponseResource contains the response details for a single resource
|
||||
// whose configs were updated.
|
||||
type IncrementalAlterConfigsResponseResource struct {
|
||||
// Error is set to a non-nil value if an error occurred while updating this specific
|
||||
// config.
|
||||
Error error
|
||||
|
||||
// ResourceType is the type of resource that was updated.
|
||||
ResourceType ResourceType
|
||||
|
||||
// ResourceName is the name of the resource that was updated.
|
||||
ResourceName string
|
||||
}
|
||||
|
||||
func (c *Client) IncrementalAlterConfigs(
|
||||
ctx context.Context,
|
||||
req *IncrementalAlterConfigsRequest,
|
||||
) (*IncrementalAlterConfigsResponse, error) {
|
||||
apiReq := &incrementalalterconfigs.Request{
|
||||
ValidateOnly: req.ValidateOnly,
|
||||
}
|
||||
|
||||
for _, res := range req.Resources {
|
||||
apiRes := incrementalalterconfigs.RequestResource{
|
||||
ResourceType: int8(res.ResourceType),
|
||||
ResourceName: res.ResourceName,
|
||||
}
|
||||
|
||||
for _, config := range res.Configs {
|
||||
apiRes.Configs = append(
|
||||
apiRes.Configs,
|
||||
incrementalalterconfigs.RequestConfig{
|
||||
Name: config.Name,
|
||||
Value: config.Value,
|
||||
ConfigOperation: int8(config.ConfigOperation),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
apiReq.Resources = append(
|
||||
apiReq.Resources,
|
||||
apiRes,
|
||||
)
|
||||
}
|
||||
|
||||
protoResp, err := c.roundTrip(
|
||||
ctx,
|
||||
req.Addr,
|
||||
apiReq,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp := &IncrementalAlterConfigsResponse{}
|
||||
|
||||
apiResp := protoResp.(*incrementalalterconfigs.Response)
|
||||
for _, res := range apiResp.Responses {
|
||||
resp.Resources = append(
|
||||
resp.Resources,
|
||||
IncrementalAlterConfigsResponseResource{
|
||||
Error: makeError(res.ErrorCode, res.ErrorMessage),
|
||||
ResourceType: ResourceType(res.ResourceType),
|
||||
ResourceName: res.ResourceName,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
82
vendor/github.com/segmentio/kafka-go/initproducerid.go
generated
vendored
Normal file
82
vendor/github.com/segmentio/kafka-go/initproducerid.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/initproducerid"
|
||||
)
|
||||
|
||||
// InitProducerIDRequest is the request structure for the InitProducerId function.
|
||||
type InitProducerIDRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// The transactional id key.
|
||||
TransactionalID string
|
||||
|
||||
// Time after which a transaction should time out
|
||||
TransactionTimeoutMs int
|
||||
|
||||
// The Producer ID (PID).
|
||||
// This is used to disambiguate requests if a transactional id is reused following its expiration.
|
||||
// Only supported in version >=3 of the request, will be ignore otherwise.
|
||||
ProducerID int
|
||||
|
||||
// The producer's current epoch.
|
||||
// This will be checked against the producer epoch on the broker,
|
||||
// and the request will return an error if they do not match.
|
||||
// Only supported in version >=3 of the request, will be ignore otherwise.
|
||||
ProducerEpoch int
|
||||
}
|
||||
|
||||
// ProducerSession contains useful information about the producer session from the broker's response.
|
||||
type ProducerSession struct {
|
||||
// The Producer ID (PID) for the current producer session
|
||||
ProducerID int
|
||||
|
||||
// The epoch associated with the current producer session for the given PID
|
||||
ProducerEpoch int
|
||||
}
|
||||
|
||||
// InitProducerIDResponse is the response structure for the InitProducerId function.
|
||||
type InitProducerIDResponse struct {
|
||||
// The Transaction/Group Coordinator details
|
||||
Producer *ProducerSession
|
||||
|
||||
// The amount of time that the broker throttled the request.
|
||||
Throttle time.Duration
|
||||
|
||||
// An error that may have occurred while attempting to retrieve initProducerId
|
||||
//
|
||||
// The error contains both the kafka error code, and an error message
|
||||
// returned by the kafka broker.
|
||||
Error error
|
||||
}
|
||||
|
||||
// InitProducerID sends a initProducerId request to a kafka broker and returns the
|
||||
// response.
|
||||
func (c *Client) InitProducerID(ctx context.Context, req *InitProducerIDRequest) (*InitProducerIDResponse, error) {
|
||||
m, err := c.roundTrip(ctx, req.Addr, &initproducerid.Request{
|
||||
TransactionalID: req.TransactionalID,
|
||||
TransactionTimeoutMs: int32(req.TransactionTimeoutMs),
|
||||
ProducerID: int64(req.ProducerID),
|
||||
ProducerEpoch: int16(req.ProducerEpoch),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).InitProducerId: %w", err)
|
||||
}
|
||||
|
||||
res := m.(*initproducerid.Response)
|
||||
|
||||
return &InitProducerIDResponse{
|
||||
Producer: &ProducerSession{
|
||||
ProducerID: int(res.ProducerID),
|
||||
ProducerEpoch: int(res.ProducerEpoch),
|
||||
},
|
||||
Throttle: makeDuration(res.ThrottleTimeMs),
|
||||
Error: makeError(res.ErrorCode, ""),
|
||||
}, nil
|
||||
}
|
||||
377
vendor/github.com/segmentio/kafka-go/joingroup.go
generated
vendored
Normal file
377
vendor/github.com/segmentio/kafka-go/joingroup.go
generated
vendored
Normal file
@@ -0,0 +1,377 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol"
|
||||
"github.com/segmentio/kafka-go/protocol/consumer"
|
||||
"github.com/segmentio/kafka-go/protocol/joingroup"
|
||||
)
|
||||
|
||||
// JoinGroupRequest is the request structure for the JoinGroup function.
|
||||
type JoinGroupRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// GroupID of the group to join.
|
||||
GroupID string
|
||||
|
||||
// The duration after which the coordinator considers the consumer dead
|
||||
// if it has not received a heartbeat.
|
||||
SessionTimeout time.Duration
|
||||
|
||||
// The duration the coordination will wait for each member to rejoin when rebalancing the group.
|
||||
RebalanceTimeout time.Duration
|
||||
|
||||
// The ID assigned by the group coordinator.
|
||||
MemberID string
|
||||
|
||||
// The unique identifier for the consumer instance.
|
||||
GroupInstanceID string
|
||||
|
||||
// The name for the class of protocols implemented by the group being joined.
|
||||
ProtocolType string
|
||||
|
||||
// The list of protocols the member supports.
|
||||
Protocols []GroupProtocol
|
||||
}
|
||||
|
||||
// GroupProtocol represents a consumer group protocol.
|
||||
type GroupProtocol struct {
|
||||
// The protocol name.
|
||||
Name string
|
||||
|
||||
// The protocol metadata.
|
||||
Metadata GroupProtocolSubscription
|
||||
}
|
||||
|
||||
type GroupProtocolSubscription struct {
|
||||
// The Topics to subscribe to.
|
||||
Topics []string
|
||||
|
||||
// UserData assosiated with the subscription for the given protocol
|
||||
UserData []byte
|
||||
|
||||
// Partitions owned by this consumer.
|
||||
OwnedPartitions map[string][]int
|
||||
}
|
||||
|
||||
// JoinGroupResponse is the response structure for the JoinGroup function.
|
||||
type JoinGroupResponse struct {
|
||||
// An error that may have occurred when attempting to join the group.
|
||||
//
|
||||
// The errors contain the kafka error code. Programs may use the standard
|
||||
// errors.Is function to test the error against kafka error codes.
|
||||
Error error
|
||||
|
||||
// The amount of time that the broker throttled the request.
|
||||
Throttle time.Duration
|
||||
|
||||
// The generation ID of the group.
|
||||
GenerationID int
|
||||
|
||||
// The group protocol selected by the coordinatior.
|
||||
ProtocolName string
|
||||
|
||||
// The group protocol name.
|
||||
ProtocolType string
|
||||
|
||||
// The leader of the group.
|
||||
LeaderID string
|
||||
|
||||
// The group member ID.
|
||||
MemberID string
|
||||
|
||||
// The members of the group.
|
||||
Members []JoinGroupResponseMember
|
||||
}
|
||||
|
||||
// JoinGroupResponseMember represents a group memmber in a reponse to a JoinGroup request.
|
||||
type JoinGroupResponseMember struct {
|
||||
// The group memmber ID.
|
||||
ID string
|
||||
|
||||
// The unique identifier of the consumer instance.
|
||||
GroupInstanceID string
|
||||
|
||||
// The group member metadata.
|
||||
Metadata GroupProtocolSubscription
|
||||
}
|
||||
|
||||
// JoinGroup sends a join group request to the coordinator and returns the response.
|
||||
func (c *Client) JoinGroup(ctx context.Context, req *JoinGroupRequest) (*JoinGroupResponse, error) {
|
||||
joinGroup := joingroup.Request{
|
||||
GroupID: req.GroupID,
|
||||
SessionTimeoutMS: int32(req.SessionTimeout.Milliseconds()),
|
||||
RebalanceTimeoutMS: int32(req.RebalanceTimeout.Milliseconds()),
|
||||
MemberID: req.MemberID,
|
||||
GroupInstanceID: req.GroupInstanceID,
|
||||
ProtocolType: req.ProtocolType,
|
||||
Protocols: make([]joingroup.RequestProtocol, 0, len(req.Protocols)),
|
||||
}
|
||||
|
||||
for _, proto := range req.Protocols {
|
||||
protoMeta := consumer.Subscription{
|
||||
Version: consumer.MaxVersionSupported,
|
||||
Topics: proto.Metadata.Topics,
|
||||
UserData: proto.Metadata.UserData,
|
||||
OwnedPartitions: make([]consumer.TopicPartition, 0, len(proto.Metadata.OwnedPartitions)),
|
||||
}
|
||||
for topic, partitions := range proto.Metadata.OwnedPartitions {
|
||||
tp := consumer.TopicPartition{
|
||||
Topic: topic,
|
||||
Partitions: make([]int32, 0, len(partitions)),
|
||||
}
|
||||
for _, partition := range partitions {
|
||||
tp.Partitions = append(tp.Partitions, int32(partition))
|
||||
}
|
||||
protoMeta.OwnedPartitions = append(protoMeta.OwnedPartitions, tp)
|
||||
}
|
||||
|
||||
metaBytes, err := protocol.Marshal(consumer.MaxVersionSupported, protoMeta)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).JoinGroup: %w", err)
|
||||
}
|
||||
|
||||
joinGroup.Protocols = append(joinGroup.Protocols, joingroup.RequestProtocol{
|
||||
Name: proto.Name,
|
||||
Metadata: metaBytes,
|
||||
})
|
||||
}
|
||||
|
||||
m, err := c.roundTrip(ctx, req.Addr, &joinGroup)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).JoinGroup: %w", err)
|
||||
}
|
||||
|
||||
r := m.(*joingroup.Response)
|
||||
|
||||
res := &JoinGroupResponse{
|
||||
Error: makeError(r.ErrorCode, ""),
|
||||
Throttle: makeDuration(r.ThrottleTimeMS),
|
||||
GenerationID: int(r.GenerationID),
|
||||
ProtocolName: r.ProtocolName,
|
||||
ProtocolType: r.ProtocolType,
|
||||
LeaderID: r.LeaderID,
|
||||
MemberID: r.MemberID,
|
||||
Members: make([]JoinGroupResponseMember, 0, len(r.Members)),
|
||||
}
|
||||
|
||||
for _, member := range r.Members {
|
||||
var meta consumer.Subscription
|
||||
err = protocol.Unmarshal(member.Metadata, consumer.MaxVersionSupported, &meta)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).JoinGroup: %w", err)
|
||||
}
|
||||
subscription := GroupProtocolSubscription{
|
||||
Topics: meta.Topics,
|
||||
UserData: meta.UserData,
|
||||
OwnedPartitions: make(map[string][]int, len(meta.OwnedPartitions)),
|
||||
}
|
||||
for _, owned := range meta.OwnedPartitions {
|
||||
subscription.OwnedPartitions[owned.Topic] = make([]int, 0, len(owned.Partitions))
|
||||
for _, partition := range owned.Partitions {
|
||||
subscription.OwnedPartitions[owned.Topic] = append(subscription.OwnedPartitions[owned.Topic], int(partition))
|
||||
}
|
||||
}
|
||||
res.Members = append(res.Members, JoinGroupResponseMember{
|
||||
ID: member.MemberID,
|
||||
GroupInstanceID: member.GroupInstanceID,
|
||||
Metadata: subscription,
|
||||
})
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
type groupMetadata struct {
|
||||
Version int16
|
||||
Topics []string
|
||||
UserData []byte
|
||||
}
|
||||
|
||||
func (t groupMetadata) size() int32 {
|
||||
return sizeofInt16(t.Version) +
|
||||
sizeofStringArray(t.Topics) +
|
||||
sizeofBytes(t.UserData)
|
||||
}
|
||||
|
||||
func (t groupMetadata) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt16(t.Version)
|
||||
wb.writeStringArray(t.Topics)
|
||||
wb.writeBytes(t.UserData)
|
||||
}
|
||||
|
||||
func (t groupMetadata) bytes() []byte {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
t.writeTo(&writeBuffer{w: buf})
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (t *groupMetadata) readFrom(r *bufio.Reader, size int) (remain int, err error) {
|
||||
if remain, err = readInt16(r, size, &t.Version); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readStringArray(r, remain, &t.Topics); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readBytes(r, remain, &t.UserData); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type joinGroupRequestGroupProtocolV1 struct {
|
||||
ProtocolName string
|
||||
ProtocolMetadata []byte
|
||||
}
|
||||
|
||||
func (t joinGroupRequestGroupProtocolV1) size() int32 {
|
||||
return sizeofString(t.ProtocolName) +
|
||||
sizeofBytes(t.ProtocolMetadata)
|
||||
}
|
||||
|
||||
func (t joinGroupRequestGroupProtocolV1) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.ProtocolName)
|
||||
wb.writeBytes(t.ProtocolMetadata)
|
||||
}
|
||||
|
||||
type joinGroupRequestV1 struct {
|
||||
// GroupID holds the unique group identifier
|
||||
GroupID string
|
||||
|
||||
// SessionTimeout holds the coordinator considers the consumer dead if it
|
||||
// receives no heartbeat after this timeout in ms.
|
||||
SessionTimeout int32
|
||||
|
||||
// RebalanceTimeout holds the maximum time that the coordinator will wait
|
||||
// for each member to rejoin when rebalancing the group in ms
|
||||
RebalanceTimeout int32
|
||||
|
||||
// MemberID assigned by the group coordinator or the zero string if joining
|
||||
// for the first time.
|
||||
MemberID string
|
||||
|
||||
// ProtocolType holds the unique name for class of protocols implemented by group
|
||||
ProtocolType string
|
||||
|
||||
// GroupProtocols holds the list of protocols that the member supports
|
||||
GroupProtocols []joinGroupRequestGroupProtocolV1
|
||||
}
|
||||
|
||||
func (t joinGroupRequestV1) size() int32 {
|
||||
return sizeofString(t.GroupID) +
|
||||
sizeofInt32(t.SessionTimeout) +
|
||||
sizeofInt32(t.RebalanceTimeout) +
|
||||
sizeofString(t.MemberID) +
|
||||
sizeofString(t.ProtocolType) +
|
||||
sizeofArray(len(t.GroupProtocols), func(i int) int32 { return t.GroupProtocols[i].size() })
|
||||
}
|
||||
|
||||
func (t joinGroupRequestV1) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.GroupID)
|
||||
wb.writeInt32(t.SessionTimeout)
|
||||
wb.writeInt32(t.RebalanceTimeout)
|
||||
wb.writeString(t.MemberID)
|
||||
wb.writeString(t.ProtocolType)
|
||||
wb.writeArray(len(t.GroupProtocols), func(i int) { t.GroupProtocols[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
type joinGroupResponseMemberV1 struct {
|
||||
// MemberID assigned by the group coordinator
|
||||
MemberID string
|
||||
MemberMetadata []byte
|
||||
}
|
||||
|
||||
func (t joinGroupResponseMemberV1) size() int32 {
|
||||
return sizeofString(t.MemberID) +
|
||||
sizeofBytes(t.MemberMetadata)
|
||||
}
|
||||
|
||||
func (t joinGroupResponseMemberV1) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.MemberID)
|
||||
wb.writeBytes(t.MemberMetadata)
|
||||
}
|
||||
|
||||
func (t *joinGroupResponseMemberV1) readFrom(r *bufio.Reader, size int) (remain int, err error) {
|
||||
if remain, err = readString(r, size, &t.MemberID); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readBytes(r, remain, &t.MemberMetadata); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type joinGroupResponseV1 struct {
|
||||
// ErrorCode holds response error code
|
||||
ErrorCode int16
|
||||
|
||||
// GenerationID holds the generation of the group.
|
||||
GenerationID int32
|
||||
|
||||
// GroupProtocol holds the group protocol selected by the coordinator
|
||||
GroupProtocol string
|
||||
|
||||
// LeaderID holds the leader of the group
|
||||
LeaderID string
|
||||
|
||||
// MemberID assigned by the group coordinator
|
||||
MemberID string
|
||||
Members []joinGroupResponseMemberV1
|
||||
}
|
||||
|
||||
func (t joinGroupResponseV1) size() int32 {
|
||||
return sizeofInt16(t.ErrorCode) +
|
||||
sizeofInt32(t.GenerationID) +
|
||||
sizeofString(t.GroupProtocol) +
|
||||
sizeofString(t.LeaderID) +
|
||||
sizeofString(t.MemberID) +
|
||||
sizeofArray(len(t.MemberID), func(i int) int32 { return t.Members[i].size() })
|
||||
}
|
||||
|
||||
func (t joinGroupResponseV1) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt16(t.ErrorCode)
|
||||
wb.writeInt32(t.GenerationID)
|
||||
wb.writeString(t.GroupProtocol)
|
||||
wb.writeString(t.LeaderID)
|
||||
wb.writeString(t.MemberID)
|
||||
wb.writeArray(len(t.Members), func(i int) { t.Members[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
func (t *joinGroupResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) {
|
||||
if remain, err = readInt16(r, size, &t.ErrorCode); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readInt32(r, remain, &t.GenerationID); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readString(r, remain, &t.GroupProtocol); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readString(r, remain, &t.LeaderID); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readString(r, remain, &t.MemberID); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) {
|
||||
var item joinGroupResponseMemberV1
|
||||
if fnRemain, fnErr = (&item).readFrom(r, size); fnErr != nil {
|
||||
return
|
||||
}
|
||||
t.Members = append(t.Members, item)
|
||||
return
|
||||
}
|
||||
if remain, err = readArrayWith(r, remain, fn); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
100
vendor/github.com/segmentio/kafka-go/kafka.go
generated
vendored
Normal file
100
vendor/github.com/segmentio/kafka-go/kafka.go
generated
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
package kafka
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
// Broker represents a kafka broker in a kafka cluster.
|
||||
type Broker struct {
|
||||
Host string
|
||||
Port int
|
||||
ID int
|
||||
Rack string
|
||||
}
|
||||
|
||||
// Topic represents a topic in a kafka cluster.
|
||||
type Topic struct {
|
||||
// Name of the topic.
|
||||
Name string
|
||||
|
||||
// True if the topic is internal.
|
||||
Internal bool
|
||||
|
||||
// The list of partition currently available on this topic.
|
||||
Partitions []Partition
|
||||
|
||||
// An error that may have occurred while attempting to read the topic
|
||||
// metadata.
|
||||
//
|
||||
// The error contains both the kafka error code, and an error message
|
||||
// returned by the kafka broker. Programs may use the standard errors.Is
|
||||
// function to test the error against kafka error codes.
|
||||
Error error
|
||||
}
|
||||
|
||||
// Partition carries the metadata associated with a kafka partition.
|
||||
type Partition struct {
|
||||
// Name of the topic that the partition belongs to, and its index in the
|
||||
// topic.
|
||||
Topic string
|
||||
ID int
|
||||
|
||||
// Leader, replicas, and ISR for the partition.
|
||||
//
|
||||
// When no physical host is known to be running a broker, the Host and Port
|
||||
// fields will be set to the zero values. The logical broker ID is always
|
||||
// set to the value known to the kafka cluster, even if the broker is not
|
||||
// currently backed by a physical host.
|
||||
Leader Broker
|
||||
Replicas []Broker
|
||||
Isr []Broker
|
||||
|
||||
// Available only with metadata API level >= 6:
|
||||
OfflineReplicas []Broker
|
||||
|
||||
// An error that may have occurred while attempting to read the partition
|
||||
// metadata.
|
||||
//
|
||||
// The error contains both the kafka error code, and an error message
|
||||
// returned by the kafka broker. Programs may use the standard errors.Is
|
||||
// function to test the error against kafka error codes.
|
||||
Error error
|
||||
}
|
||||
|
||||
// Marshal encodes v into a binary representation of the value in the kafka data
|
||||
// format.
|
||||
//
|
||||
// If v is a, or contains struct types, the kafka struct fields are interpreted
|
||||
// and may contain one of these values:
|
||||
//
|
||||
// nullable valid on bytes and strings, encodes as a nullable value
|
||||
// compact valid on strings, encodes as a compact string
|
||||
//
|
||||
// The kafka struct tags should not contain min and max versions. If you need to
|
||||
// encode types based on specific versions of kafka APIs, use the Version type
|
||||
// instead.
|
||||
func Marshal(v interface{}) ([]byte, error) {
|
||||
return protocol.Marshal(-1, v)
|
||||
}
|
||||
|
||||
// Unmarshal decodes a binary representation from b into v.
|
||||
//
|
||||
// See Marshal for details.
|
||||
func Unmarshal(b []byte, v interface{}) error {
|
||||
return protocol.Unmarshal(b, -1, v)
|
||||
}
|
||||
|
||||
// Version represents a version number for kafka APIs.
|
||||
type Version int16
|
||||
|
||||
// Marshal is like the top-level Marshal function, but will only encode struct
|
||||
// fields for which n falls within the min and max versions specified on the
|
||||
// struct tag.
|
||||
func (n Version) Marshal(v interface{}) ([]byte, error) {
|
||||
return protocol.Marshal(int16(n), v)
|
||||
}
|
||||
|
||||
// Unmarshal is like the top-level Unmarshal function, but will only decode
|
||||
// struct fields for which n falls within the min and max versions specified on
|
||||
// the struct tag.
|
||||
func (n Version) Unmarshal(b []byte, v interface{}) error {
|
||||
return protocol.Unmarshal(b, int16(n), v)
|
||||
}
|
||||
147
vendor/github.com/segmentio/kafka-go/leavegroup.go
generated
vendored
Normal file
147
vendor/github.com/segmentio/kafka-go/leavegroup.go
generated
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/leavegroup"
|
||||
)
|
||||
|
||||
// LeaveGroupRequest is the request structure for the LeaveGroup function.
|
||||
type LeaveGroupRequest struct {
|
||||
// Address of the kafka broker to sent he request to.
|
||||
Addr net.Addr
|
||||
|
||||
// GroupID of the group to leave.
|
||||
GroupID string
|
||||
|
||||
// List of leaving member identities.
|
||||
Members []LeaveGroupRequestMember
|
||||
}
|
||||
|
||||
// LeaveGroupRequestMember represents the indentify of a member leaving a group.
|
||||
type LeaveGroupRequestMember struct {
|
||||
// The member ID to remove from the group.
|
||||
ID string
|
||||
|
||||
// The group instance ID to remove from the group.
|
||||
GroupInstanceID string
|
||||
}
|
||||
|
||||
// LeaveGroupResponse is the response structure for the LeaveGroup function.
|
||||
type LeaveGroupResponse struct {
|
||||
// An error that may have occurred when attempting to leave the group.
|
||||
//
|
||||
// The errors contain the kafka error code. Programs may use the standard
|
||||
// errors.Is function to test the error against kafka error codes.
|
||||
Error error
|
||||
|
||||
// The amount of time that the broker throttled the request.
|
||||
Throttle time.Duration
|
||||
|
||||
// List of leaving member responses.
|
||||
Members []LeaveGroupResponseMember
|
||||
}
|
||||
|
||||
// LeaveGroupResponseMember represents a member leaving the group.
|
||||
type LeaveGroupResponseMember struct {
|
||||
// The member ID of the member leaving the group.
|
||||
ID string
|
||||
|
||||
// The group instance ID to remove from the group.
|
||||
GroupInstanceID string
|
||||
|
||||
// An error that may have occured when attempting to remove the member from the group.
|
||||
//
|
||||
// The errors contain the kafka error code. Programs may use the standard
|
||||
// errors.Is function to test the error against kafka error codes.
|
||||
Error error
|
||||
}
|
||||
|
||||
func (c *Client) LeaveGroup(ctx context.Context, req *LeaveGroupRequest) (*LeaveGroupResponse, error) {
|
||||
leaveGroup := leavegroup.Request{
|
||||
GroupID: req.GroupID,
|
||||
Members: make([]leavegroup.RequestMember, 0, len(req.Members)),
|
||||
}
|
||||
|
||||
for _, member := range req.Members {
|
||||
leaveGroup.Members = append(leaveGroup.Members, leavegroup.RequestMember{
|
||||
MemberID: member.ID,
|
||||
GroupInstanceID: member.GroupInstanceID,
|
||||
})
|
||||
}
|
||||
|
||||
m, err := c.roundTrip(ctx, req.Addr, &leaveGroup)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).LeaveGroup: %w", err)
|
||||
}
|
||||
|
||||
r := m.(*leavegroup.Response)
|
||||
|
||||
res := &LeaveGroupResponse{
|
||||
Error: makeError(r.ErrorCode, ""),
|
||||
Throttle: makeDuration(r.ThrottleTimeMS),
|
||||
}
|
||||
|
||||
if len(r.Members) == 0 {
|
||||
// If we're using a version of the api without the
|
||||
// members array in the response, just add a member
|
||||
// so the api is consistent across versions.
|
||||
r.Members = []leavegroup.ResponseMember{
|
||||
{
|
||||
MemberID: req.Members[0].ID,
|
||||
GroupInstanceID: req.Members[0].GroupInstanceID,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
res.Members = make([]LeaveGroupResponseMember, 0, len(r.Members))
|
||||
for _, member := range r.Members {
|
||||
res.Members = append(res.Members, LeaveGroupResponseMember{
|
||||
ID: member.MemberID,
|
||||
GroupInstanceID: member.GroupInstanceID,
|
||||
Error: makeError(member.ErrorCode, ""),
|
||||
})
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
type leaveGroupRequestV0 struct {
|
||||
// GroupID holds the unique group identifier
|
||||
GroupID string
|
||||
|
||||
// MemberID assigned by the group coordinator or the zero string if joining
|
||||
// for the first time.
|
||||
MemberID string
|
||||
}
|
||||
|
||||
func (t leaveGroupRequestV0) size() int32 {
|
||||
return sizeofString(t.GroupID) + sizeofString(t.MemberID)
|
||||
}
|
||||
|
||||
func (t leaveGroupRequestV0) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.GroupID)
|
||||
wb.writeString(t.MemberID)
|
||||
}
|
||||
|
||||
type leaveGroupResponseV0 struct {
|
||||
// ErrorCode holds response error code
|
||||
ErrorCode int16
|
||||
}
|
||||
|
||||
func (t leaveGroupResponseV0) size() int32 {
|
||||
return sizeofInt16(t.ErrorCode)
|
||||
}
|
||||
|
||||
func (t leaveGroupResponseV0) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt16(t.ErrorCode)
|
||||
}
|
||||
|
||||
func (t *leaveGroupResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) {
|
||||
remain, err = readInt16(r, size, &t.ErrorCode)
|
||||
return
|
||||
}
|
||||
139
vendor/github.com/segmentio/kafka-go/listgroups.go
generated
vendored
Normal file
139
vendor/github.com/segmentio/kafka-go/listgroups.go
generated
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/listgroups"
|
||||
)
|
||||
|
||||
// ListGroupsRequest is a request to the ListGroups API.
|
||||
type ListGroupsRequest struct {
|
||||
// Addr is the address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
}
|
||||
|
||||
// ListGroupsResponse is a response from the ListGroups API.
|
||||
type ListGroupsResponse struct {
|
||||
// Error is set to a non-nil value if a top-level error occurred while fetching
|
||||
// groups.
|
||||
Error error
|
||||
|
||||
// Groups contains the list of groups.
|
||||
Groups []ListGroupsResponseGroup
|
||||
}
|
||||
|
||||
// ListGroupsResponseGroup contains the response details for a single group.
|
||||
type ListGroupsResponseGroup struct {
|
||||
// GroupID is the ID of the group.
|
||||
GroupID string
|
||||
|
||||
// Coordinator is the ID of the coordinator broker for the group.
|
||||
Coordinator int
|
||||
}
|
||||
|
||||
func (c *Client) ListGroups(
|
||||
ctx context.Context,
|
||||
req *ListGroupsRequest,
|
||||
) (*ListGroupsResponse, error) {
|
||||
protoResp, err := c.roundTrip(ctx, req.Addr, &listgroups.Request{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
apiResp := protoResp.(*listgroups.Response)
|
||||
resp := &ListGroupsResponse{
|
||||
Error: makeError(apiResp.ErrorCode, ""),
|
||||
}
|
||||
|
||||
for _, apiGroupInfo := range apiResp.Groups {
|
||||
resp.Groups = append(resp.Groups, ListGroupsResponseGroup{
|
||||
GroupID: apiGroupInfo.GroupID,
|
||||
Coordinator: int(apiGroupInfo.BrokerID),
|
||||
})
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// TODO: Remove everything below and use protocol-based version above everywhere.
|
||||
type listGroupsRequestV1 struct {
|
||||
}
|
||||
|
||||
func (t listGroupsRequestV1) size() int32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (t listGroupsRequestV1) writeTo(wb *writeBuffer) {
|
||||
}
|
||||
|
||||
type listGroupsResponseGroupV1 struct {
|
||||
// GroupID holds the unique group identifier
|
||||
GroupID string
|
||||
ProtocolType string
|
||||
}
|
||||
|
||||
func (t listGroupsResponseGroupV1) size() int32 {
|
||||
return sizeofString(t.GroupID) + sizeofString(t.ProtocolType)
|
||||
}
|
||||
|
||||
func (t listGroupsResponseGroupV1) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.GroupID)
|
||||
wb.writeString(t.ProtocolType)
|
||||
}
|
||||
|
||||
func (t *listGroupsResponseGroupV1) readFrom(r *bufio.Reader, size int) (remain int, err error) {
|
||||
if remain, err = readString(r, size, &t.GroupID); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readString(r, remain, &t.ProtocolType); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type listGroupsResponseV1 struct {
|
||||
// ThrottleTimeMS holds the duration in milliseconds for which the request
|
||||
// was throttled due to quota violation (Zero if the request did not violate
|
||||
// any quota)
|
||||
ThrottleTimeMS int32
|
||||
|
||||
// ErrorCode holds response error code
|
||||
ErrorCode int16
|
||||
Groups []listGroupsResponseGroupV1
|
||||
}
|
||||
|
||||
func (t listGroupsResponseV1) size() int32 {
|
||||
return sizeofInt32(t.ThrottleTimeMS) +
|
||||
sizeofInt16(t.ErrorCode) +
|
||||
sizeofArray(len(t.Groups), func(i int) int32 { return t.Groups[i].size() })
|
||||
}
|
||||
|
||||
func (t listGroupsResponseV1) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(t.ThrottleTimeMS)
|
||||
wb.writeInt16(t.ErrorCode)
|
||||
wb.writeArray(len(t.Groups), func(i int) { t.Groups[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
func (t *listGroupsResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) {
|
||||
if remain, err = readInt32(r, size, &t.ThrottleTimeMS); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
fn := func(withReader *bufio.Reader, withSize int) (fnRemain int, fnErr error) {
|
||||
var item listGroupsResponseGroupV1
|
||||
if fnRemain, fnErr = (&item).readFrom(withReader, withSize); err != nil {
|
||||
return
|
||||
}
|
||||
t.Groups = append(t.Groups, item)
|
||||
return
|
||||
}
|
||||
if remain, err = readArrayWith(r, remain, fn); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
286
vendor/github.com/segmentio/kafka-go/listoffset.go
generated
vendored
Normal file
286
vendor/github.com/segmentio/kafka-go/listoffset.go
generated
vendored
Normal file
@@ -0,0 +1,286 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/listoffsets"
|
||||
)
|
||||
|
||||
// OffsetRequest represents a request to retrieve a single partition offset.
|
||||
type OffsetRequest struct {
|
||||
Partition int
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
// FirstOffsetOf constructs an OffsetRequest which asks for the first offset of
|
||||
// the parition given as argument.
|
||||
func FirstOffsetOf(partition int) OffsetRequest {
|
||||
return OffsetRequest{Partition: partition, Timestamp: FirstOffset}
|
||||
}
|
||||
|
||||
// LastOffsetOf constructs an OffsetRequest which asks for the last offset of
|
||||
// the partition given as argument.
|
||||
func LastOffsetOf(partition int) OffsetRequest {
|
||||
return OffsetRequest{Partition: partition, Timestamp: LastOffset}
|
||||
}
|
||||
|
||||
// TimeOffsetOf constructs an OffsetRequest which asks for a partition offset
|
||||
// at a given time.
|
||||
func TimeOffsetOf(partition int, at time.Time) OffsetRequest {
|
||||
return OffsetRequest{Partition: partition, Timestamp: timestamp(at)}
|
||||
}
|
||||
|
||||
// PartitionOffsets carries information about offsets available in a topic
|
||||
// partition.
|
||||
type PartitionOffsets struct {
|
||||
Partition int
|
||||
FirstOffset int64
|
||||
LastOffset int64
|
||||
Offsets map[int64]time.Time
|
||||
Error error
|
||||
}
|
||||
|
||||
// ListOffsetsRequest represents a request sent to a kafka broker to list of the
|
||||
// offsets of topic partitions.
|
||||
type ListOffsetsRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// A mapping of topic names to list of partitions that the program wishes to
|
||||
// get the offsets for.
|
||||
Topics map[string][]OffsetRequest
|
||||
|
||||
// The isolation level for the request.
|
||||
//
|
||||
// Defaults to ReadUncommitted.
|
||||
//
|
||||
// This field requires the kafka broker to support the ListOffsets API in
|
||||
// version 2 or above (otherwise the value is ignored).
|
||||
IsolationLevel IsolationLevel
|
||||
}
|
||||
|
||||
// ListOffsetsResponse represents a response from a kafka broker to a offset
|
||||
// listing request.
|
||||
type ListOffsetsResponse struct {
|
||||
// The amount of time that the broker throttled the request.
|
||||
Throttle time.Duration
|
||||
|
||||
// Mappings of topics names to partition offsets, there will be one entry
|
||||
// for each topic in the request.
|
||||
Topics map[string][]PartitionOffsets
|
||||
}
|
||||
|
||||
// ListOffsets sends an offset request to a kafka broker and returns the
|
||||
// response.
|
||||
func (c *Client) ListOffsets(ctx context.Context, req *ListOffsetsRequest) (*ListOffsetsResponse, error) {
|
||||
type topicPartition struct {
|
||||
topic string
|
||||
partition int
|
||||
}
|
||||
|
||||
partitionOffsets := make(map[topicPartition]PartitionOffsets)
|
||||
|
||||
for topicName, requests := range req.Topics {
|
||||
for _, r := range requests {
|
||||
key := topicPartition{
|
||||
topic: topicName,
|
||||
partition: r.Partition,
|
||||
}
|
||||
|
||||
partition, ok := partitionOffsets[key]
|
||||
if !ok {
|
||||
partition = PartitionOffsets{
|
||||
Partition: r.Partition,
|
||||
FirstOffset: -1,
|
||||
LastOffset: -1,
|
||||
Offsets: make(map[int64]time.Time),
|
||||
}
|
||||
}
|
||||
|
||||
switch r.Timestamp {
|
||||
case FirstOffset:
|
||||
partition.FirstOffset = 0
|
||||
case LastOffset:
|
||||
partition.LastOffset = 0
|
||||
}
|
||||
|
||||
partitionOffsets[topicPartition{
|
||||
topic: topicName,
|
||||
partition: r.Partition,
|
||||
}] = partition
|
||||
}
|
||||
}
|
||||
|
||||
topics := make([]listoffsets.RequestTopic, 0, len(req.Topics))
|
||||
|
||||
for topicName, requests := range req.Topics {
|
||||
partitions := make([]listoffsets.RequestPartition, len(requests))
|
||||
|
||||
for i, r := range requests {
|
||||
partitions[i] = listoffsets.RequestPartition{
|
||||
Partition: int32(r.Partition),
|
||||
CurrentLeaderEpoch: -1,
|
||||
Timestamp: r.Timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
topics = append(topics, listoffsets.RequestTopic{
|
||||
Topic: topicName,
|
||||
Partitions: partitions,
|
||||
})
|
||||
}
|
||||
|
||||
m, err := c.roundTrip(ctx, req.Addr, &listoffsets.Request{
|
||||
ReplicaID: -1,
|
||||
IsolationLevel: int8(req.IsolationLevel),
|
||||
Topics: topics,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).ListOffsets: %w", err)
|
||||
}
|
||||
|
||||
res := m.(*listoffsets.Response)
|
||||
ret := &ListOffsetsResponse{
|
||||
Throttle: makeDuration(res.ThrottleTimeMs),
|
||||
Topics: make(map[string][]PartitionOffsets, len(res.Topics)),
|
||||
}
|
||||
|
||||
for _, t := range res.Topics {
|
||||
for _, p := range t.Partitions {
|
||||
key := topicPartition{
|
||||
topic: t.Topic,
|
||||
partition: int(p.Partition),
|
||||
}
|
||||
|
||||
partition := partitionOffsets[key]
|
||||
|
||||
switch p.Timestamp {
|
||||
case FirstOffset:
|
||||
partition.FirstOffset = p.Offset
|
||||
case LastOffset:
|
||||
partition.LastOffset = p.Offset
|
||||
default:
|
||||
partition.Offsets[p.Offset] = makeTime(p.Timestamp)
|
||||
}
|
||||
|
||||
if p.ErrorCode != 0 {
|
||||
partition.Error = Error(p.ErrorCode)
|
||||
}
|
||||
|
||||
partitionOffsets[key] = partition
|
||||
}
|
||||
}
|
||||
|
||||
for key, partition := range partitionOffsets {
|
||||
ret.Topics[key.topic] = append(ret.Topics[key.topic], partition)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
type listOffsetRequestV1 struct {
|
||||
ReplicaID int32
|
||||
Topics []listOffsetRequestTopicV1
|
||||
}
|
||||
|
||||
func (r listOffsetRequestV1) size() int32 {
|
||||
return 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() })
|
||||
}
|
||||
|
||||
func (r listOffsetRequestV1) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(r.ReplicaID)
|
||||
wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
type listOffsetRequestTopicV1 struct {
|
||||
TopicName string
|
||||
Partitions []listOffsetRequestPartitionV1
|
||||
}
|
||||
|
||||
func (t listOffsetRequestTopicV1) size() int32 {
|
||||
return sizeofString(t.TopicName) +
|
||||
sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() })
|
||||
}
|
||||
|
||||
func (t listOffsetRequestTopicV1) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.TopicName)
|
||||
wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
type listOffsetRequestPartitionV1 struct {
|
||||
Partition int32
|
||||
Time int64
|
||||
}
|
||||
|
||||
func (p listOffsetRequestPartitionV1) size() int32 {
|
||||
return 4 + 8
|
||||
}
|
||||
|
||||
func (p listOffsetRequestPartitionV1) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(p.Partition)
|
||||
wb.writeInt64(p.Time)
|
||||
}
|
||||
|
||||
type listOffsetResponseV1 []listOffsetResponseTopicV1
|
||||
|
||||
func (r listOffsetResponseV1) size() int32 {
|
||||
return sizeofArray(len(r), func(i int) int32 { return r[i].size() })
|
||||
}
|
||||
|
||||
func (r listOffsetResponseV1) writeTo(wb *writeBuffer) {
|
||||
wb.writeArray(len(r), func(i int) { r[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
type listOffsetResponseTopicV1 struct {
|
||||
TopicName string
|
||||
PartitionOffsets []partitionOffsetV1
|
||||
}
|
||||
|
||||
func (t listOffsetResponseTopicV1) size() int32 {
|
||||
return sizeofString(t.TopicName) +
|
||||
sizeofArray(len(t.PartitionOffsets), func(i int) int32 { return t.PartitionOffsets[i].size() })
|
||||
}
|
||||
|
||||
func (t listOffsetResponseTopicV1) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.TopicName)
|
||||
wb.writeArray(len(t.PartitionOffsets), func(i int) { t.PartitionOffsets[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
type partitionOffsetV1 struct {
|
||||
Partition int32
|
||||
ErrorCode int16
|
||||
Timestamp int64
|
||||
Offset int64
|
||||
}
|
||||
|
||||
func (p partitionOffsetV1) size() int32 {
|
||||
return 4 + 2 + 8 + 8
|
||||
}
|
||||
|
||||
func (p partitionOffsetV1) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(p.Partition)
|
||||
wb.writeInt16(p.ErrorCode)
|
||||
wb.writeInt64(p.Timestamp)
|
||||
wb.writeInt64(p.Offset)
|
||||
}
|
||||
|
||||
func (p *partitionOffsetV1) readFrom(r *bufio.Reader, sz int) (remain int, err error) {
|
||||
if remain, err = readInt32(r, sz, &p.Partition); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readInt16(r, remain, &p.ErrorCode); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readInt64(r, remain, &p.Timestamp); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readInt64(r, remain, &p.Offset); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
17
vendor/github.com/segmentio/kafka-go/logger.go
generated
vendored
Normal file
17
vendor/github.com/segmentio/kafka-go/logger.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
package kafka
|
||||
|
||||
// Logger interface API for log.Logger.
|
||||
type Logger interface {
|
||||
Printf(string, ...interface{})
|
||||
}
|
||||
|
||||
// LoggerFunc is a bridge between Logger and any third party logger
|
||||
// Usage:
|
||||
// l := NewLogger() // some logger
|
||||
// r := kafka.NewReader(kafka.ReaderConfig{
|
||||
// Logger: kafka.LoggerFunc(l.Infof),
|
||||
// ErrorLogger: kafka.LoggerFunc(l.Errorf),
|
||||
// })
|
||||
type LoggerFunc func(string, ...interface{})
|
||||
|
||||
func (f LoggerFunc) Printf(msg string, args ...interface{}) { f(msg, args...) }
|
||||
121
vendor/github.com/segmentio/kafka-go/message.go
generated
vendored
Normal file
121
vendor/github.com/segmentio/kafka-go/message.go
generated
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Message is a data structure representing kafka messages.
|
||||
type Message struct {
|
||||
// Topic indicates which topic this message was consumed from via Reader.
|
||||
//
|
||||
// When being used with Writer, this can be used to configure the topic if
|
||||
// not already specified on the writer itself.
|
||||
Topic string
|
||||
|
||||
// Partition is read-only and MUST NOT be set when writing messages
|
||||
Partition int
|
||||
Offset int64
|
||||
HighWaterMark int64
|
||||
Key []byte
|
||||
Value []byte
|
||||
Headers []Header
|
||||
|
||||
// This field is used to hold arbitrary data you wish to include, so it
|
||||
// will be available when handle it on the Writer's `Completion` method,
|
||||
// this support the application can do any post operation on each message.
|
||||
WriterData interface{}
|
||||
|
||||
// If not set at the creation, Time will be automatically set when
|
||||
// writing the message.
|
||||
Time time.Time
|
||||
}
|
||||
|
||||
func (msg Message) message(cw *crc32Writer) message {
|
||||
m := message{
|
||||
MagicByte: 1,
|
||||
Key: msg.Key,
|
||||
Value: msg.Value,
|
||||
Timestamp: timestamp(msg.Time),
|
||||
}
|
||||
if cw != nil {
|
||||
m.CRC = m.crc32(cw)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
const timestampSize = 8
|
||||
|
||||
func (msg *Message) size() int32 {
|
||||
return 4 + 1 + 1 + sizeofBytes(msg.Key) + sizeofBytes(msg.Value) + timestampSize
|
||||
}
|
||||
|
||||
type message struct {
|
||||
CRC int32
|
||||
MagicByte int8
|
||||
Attributes int8
|
||||
Timestamp int64
|
||||
Key []byte
|
||||
Value []byte
|
||||
}
|
||||
|
||||
func (m message) crc32(cw *crc32Writer) int32 {
|
||||
cw.crc32 = 0
|
||||
cw.writeInt8(m.MagicByte)
|
||||
cw.writeInt8(m.Attributes)
|
||||
if m.MagicByte != 0 {
|
||||
cw.writeInt64(m.Timestamp)
|
||||
}
|
||||
cw.writeBytes(m.Key)
|
||||
cw.writeBytes(m.Value)
|
||||
return int32(cw.crc32)
|
||||
}
|
||||
|
||||
func (m message) size() int32 {
|
||||
size := 4 + 1 + 1 + sizeofBytes(m.Key) + sizeofBytes(m.Value)
|
||||
if m.MagicByte != 0 {
|
||||
size += timestampSize
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func (m message) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(m.CRC)
|
||||
wb.writeInt8(m.MagicByte)
|
||||
wb.writeInt8(m.Attributes)
|
||||
if m.MagicByte != 0 {
|
||||
wb.writeInt64(m.Timestamp)
|
||||
}
|
||||
wb.writeBytes(m.Key)
|
||||
wb.writeBytes(m.Value)
|
||||
}
|
||||
|
||||
type messageSetItem struct {
|
||||
Offset int64
|
||||
MessageSize int32
|
||||
Message message
|
||||
}
|
||||
|
||||
func (m messageSetItem) size() int32 {
|
||||
return 8 + 4 + m.Message.size()
|
||||
}
|
||||
|
||||
func (m messageSetItem) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt64(m.Offset)
|
||||
wb.writeInt32(m.MessageSize)
|
||||
m.Message.writeTo(wb)
|
||||
}
|
||||
|
||||
type messageSet []messageSetItem
|
||||
|
||||
func (s messageSet) size() (size int32) {
|
||||
for _, m := range s {
|
||||
size += m.size()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s messageSet) writeTo(wb *writeBuffer) {
|
||||
for _, m := range s {
|
||||
m.writeTo(wb)
|
||||
}
|
||||
}
|
||||
545
vendor/github.com/segmentio/kafka-go/message_reader.go
generated
vendored
Normal file
545
vendor/github.com/segmentio/kafka-go/message_reader.go
generated
vendored
Normal file
@@ -0,0 +1,545 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
)
|
||||
|
||||
type readBytesFunc func(*bufio.Reader, int, int) (int, error)
|
||||
|
||||
// messageSetReader processes the messages encoded into a fetch response.
|
||||
// The response may contain a mix of Record Batches (newer format) and Messages
|
||||
// (older format).
|
||||
type messageSetReader struct {
|
||||
*readerStack // used for decompressing compressed messages and record batches
|
||||
empty bool // if true, short circuits messageSetReader methods
|
||||
debug bool // enable debug log messages
|
||||
// How many bytes are expected to remain in the response.
|
||||
//
|
||||
// This is used to detect truncation of the response.
|
||||
lengthRemain int
|
||||
|
||||
decompressed *bytes.Buffer
|
||||
}
|
||||
|
||||
type readerStack struct {
|
||||
reader *bufio.Reader
|
||||
remain int
|
||||
base int64
|
||||
parent *readerStack
|
||||
count int // how many messages left in the current message set
|
||||
header messagesHeader // the current header for a subset of messages within the set.
|
||||
}
|
||||
|
||||
// messagesHeader describes a set of records. there may be many messagesHeader's in a message set.
|
||||
type messagesHeader struct {
|
||||
firstOffset int64
|
||||
length int32
|
||||
crc int32
|
||||
magic int8
|
||||
// v1 composes attributes specific to v0 and v1 message headers
|
||||
v1 struct {
|
||||
attributes int8
|
||||
timestamp int64
|
||||
}
|
||||
// v2 composes attributes specific to v2 message headers
|
||||
v2 struct {
|
||||
leaderEpoch int32
|
||||
attributes int16
|
||||
lastOffsetDelta int32
|
||||
firstTimestamp int64
|
||||
lastTimestamp int64
|
||||
producerID int64
|
||||
producerEpoch int16
|
||||
baseSequence int32
|
||||
count int32
|
||||
}
|
||||
}
|
||||
|
||||
func (h messagesHeader) compression() (codec CompressionCodec, err error) {
|
||||
const compressionCodecMask = 0x07
|
||||
var code int8
|
||||
switch h.magic {
|
||||
case 0, 1:
|
||||
code = h.v1.attributes & compressionCodecMask
|
||||
case 2:
|
||||
code = int8(h.v2.attributes & compressionCodecMask)
|
||||
default:
|
||||
err = h.badMagic()
|
||||
return
|
||||
}
|
||||
if code != 0 {
|
||||
codec, err = resolveCodec(code)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (h messagesHeader) badMagic() error {
|
||||
return fmt.Errorf("unsupported magic byte %d in header", h.magic)
|
||||
}
|
||||
|
||||
func newMessageSetReader(reader *bufio.Reader, remain int) (*messageSetReader, error) {
|
||||
res := &messageSetReader{
|
||||
readerStack: &readerStack{
|
||||
reader: reader,
|
||||
remain: remain,
|
||||
},
|
||||
decompressed: acquireBuffer(),
|
||||
}
|
||||
err := res.readHeader()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (r *messageSetReader) remaining() (remain int) {
|
||||
if r.empty {
|
||||
return 0
|
||||
}
|
||||
for s := r.readerStack; s != nil; s = s.parent {
|
||||
remain += s.remain
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *messageSetReader) discard() (err error) {
|
||||
switch {
|
||||
case r.empty:
|
||||
case r.readerStack == nil:
|
||||
default:
|
||||
// rewind up to the top-most reader b/c it's the only one that's doing
|
||||
// actual i/o. the rest are byte buffers that have been pushed on the stack
|
||||
// while reading compressed message sets.
|
||||
for r.parent != nil {
|
||||
r.readerStack = r.parent
|
||||
}
|
||||
err = r.discardN(r.remain)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *messageSetReader) readMessage(min int64, key readBytesFunc, val readBytesFunc) (
|
||||
offset int64, lastOffset int64, timestamp int64, headers []Header, err error) {
|
||||
|
||||
if r.empty {
|
||||
err = RequestTimedOut
|
||||
return
|
||||
}
|
||||
if err = r.readHeader(); err != nil {
|
||||
return
|
||||
}
|
||||
switch r.header.magic {
|
||||
case 0, 1:
|
||||
offset, timestamp, headers, err = r.readMessageV1(min, key, val)
|
||||
// Set an invalid value so that it can be ignored
|
||||
lastOffset = -1
|
||||
case 2:
|
||||
offset, lastOffset, timestamp, headers, err = r.readMessageV2(min, key, val)
|
||||
default:
|
||||
err = r.header.badMagic()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *messageSetReader) readMessageV1(min int64, key readBytesFunc, val readBytesFunc) (
|
||||
offset int64, timestamp int64, headers []Header, err error) {
|
||||
|
||||
for r.readerStack != nil {
|
||||
if r.remain == 0 {
|
||||
r.readerStack = r.parent
|
||||
continue
|
||||
}
|
||||
if err = r.readHeader(); err != nil {
|
||||
return
|
||||
}
|
||||
offset = r.header.firstOffset
|
||||
timestamp = r.header.v1.timestamp
|
||||
var codec CompressionCodec
|
||||
if codec, err = r.header.compression(); err != nil {
|
||||
return
|
||||
}
|
||||
r.log("Reading with codec=%T", codec)
|
||||
if codec != nil {
|
||||
// discard next four bytes...will be -1 to indicate null key
|
||||
if err = r.discardN(4); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// read and decompress the contained message set.
|
||||
r.decompressed.Reset()
|
||||
if err = r.readBytesWith(func(br *bufio.Reader, sz int, n int) (remain int, err error) {
|
||||
// x4 as a guess that the average compression ratio is near 75%
|
||||
r.decompressed.Grow(4 * n)
|
||||
limitReader := io.LimitedReader{R: br, N: int64(n)}
|
||||
codecReader := codec.NewReader(&limitReader)
|
||||
_, err = r.decompressed.ReadFrom(codecReader)
|
||||
remain = sz - (n - int(limitReader.N))
|
||||
codecReader.Close()
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// the compressed message's offset will be equal to the offset of
|
||||
// the last message in the set. within the compressed set, the
|
||||
// offsets will be relative, so we have to scan through them to
|
||||
// get the base offset. for example, if there are four compressed
|
||||
// messages at offsets 10-13, then the container message will have
|
||||
// offset 13 and the contained messages will be 0,1,2,3. the base
|
||||
// offset for the container, then is 13-3=10.
|
||||
if offset, err = extractOffset(offset, r.decompressed.Bytes()); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// mark the outer message as being read
|
||||
r.markRead()
|
||||
|
||||
// then push the decompressed bytes onto the stack.
|
||||
r.readerStack = &readerStack{
|
||||
// Allocate a buffer of size 0, which gets capped at 16 bytes
|
||||
// by the bufio package. We are already reading buffered data
|
||||
// here, no need to reserve another 4KB buffer.
|
||||
reader: bufio.NewReaderSize(r.decompressed, 0),
|
||||
remain: r.decompressed.Len(),
|
||||
base: offset,
|
||||
parent: r.readerStack,
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// adjust the offset in case we're reading compressed messages. the
|
||||
// base will be zero otherwise.
|
||||
offset += r.base
|
||||
|
||||
// When the messages are compressed kafka may return messages at an
|
||||
// earlier offset than the one that was requested, it's the client's
|
||||
// responsibility to ignore those.
|
||||
//
|
||||
// At this point, the message header has been read, so discarding
|
||||
// the rest of the message means we have to discard the key, and then
|
||||
// the value. Each of those are preceded by a 4-byte length. Discarding
|
||||
// them is then reading that length variable and then discarding that
|
||||
// amount.
|
||||
if offset < min {
|
||||
// discard the key
|
||||
if err = r.discardBytes(); err != nil {
|
||||
return
|
||||
}
|
||||
// discard the value
|
||||
if err = r.discardBytes(); err != nil {
|
||||
return
|
||||
}
|
||||
// since we have fully consumed the message, mark as read
|
||||
r.markRead()
|
||||
continue
|
||||
}
|
||||
if err = r.readBytesWith(key); err != nil {
|
||||
return
|
||||
}
|
||||
if err = r.readBytesWith(val); err != nil {
|
||||
return
|
||||
}
|
||||
r.markRead()
|
||||
return
|
||||
}
|
||||
err = errShortRead
|
||||
return
|
||||
}
|
||||
|
||||
func (r *messageSetReader) readMessageV2(_ int64, key readBytesFunc, val readBytesFunc) (
|
||||
offset int64, lastOffset int64, timestamp int64, headers []Header, err error) {
|
||||
if err = r.readHeader(); err != nil {
|
||||
return
|
||||
}
|
||||
if r.count == int(r.header.v2.count) { // first time reading this set, so check for compression headers.
|
||||
var codec CompressionCodec
|
||||
if codec, err = r.header.compression(); err != nil {
|
||||
return
|
||||
}
|
||||
if codec != nil {
|
||||
batchRemain := int(r.header.length - 49) // TODO: document this magic number
|
||||
if batchRemain > r.remain {
|
||||
err = errShortRead
|
||||
return
|
||||
}
|
||||
if batchRemain < 0 {
|
||||
err = fmt.Errorf("batch remain < 0 (%d)", batchRemain)
|
||||
return
|
||||
}
|
||||
r.decompressed.Reset()
|
||||
// x4 as a guess that the average compression ratio is near 75%
|
||||
r.decompressed.Grow(4 * batchRemain)
|
||||
limitReader := io.LimitedReader{R: r.reader, N: int64(batchRemain)}
|
||||
codecReader := codec.NewReader(&limitReader)
|
||||
_, err = r.decompressed.ReadFrom(codecReader)
|
||||
codecReader.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
r.remain -= batchRemain - int(limitReader.N)
|
||||
r.readerStack = &readerStack{
|
||||
reader: bufio.NewReaderSize(r.decompressed, 0), // the new stack reads from the decompressed buffer
|
||||
remain: r.decompressed.Len(),
|
||||
base: -1, // base is unused here
|
||||
parent: r.readerStack,
|
||||
header: r.header,
|
||||
count: r.count,
|
||||
}
|
||||
// all of the messages in this set are in the decompressed set just pushed onto the reader
|
||||
// stack. here we set the parent count to 0 so that when the child set is exhausted, the
|
||||
// reader will then try to read the header of the next message set
|
||||
r.readerStack.parent.count = 0
|
||||
}
|
||||
}
|
||||
remainBefore := r.remain
|
||||
var length int64
|
||||
if err = r.readVarInt(&length); err != nil {
|
||||
return
|
||||
}
|
||||
lengthOfLength := remainBefore - r.remain
|
||||
var attrs int8
|
||||
if err = r.readInt8(&attrs); err != nil {
|
||||
return
|
||||
}
|
||||
var timestampDelta int64
|
||||
if err = r.readVarInt(×tampDelta); err != nil {
|
||||
return
|
||||
}
|
||||
timestamp = r.header.v2.firstTimestamp + timestampDelta
|
||||
var offsetDelta int64
|
||||
if err = r.readVarInt(&offsetDelta); err != nil {
|
||||
return
|
||||
}
|
||||
offset = r.header.firstOffset + offsetDelta
|
||||
if err = r.runFunc(key); err != nil {
|
||||
return
|
||||
}
|
||||
if err = r.runFunc(val); err != nil {
|
||||
return
|
||||
}
|
||||
var headerCount int64
|
||||
if err = r.readVarInt(&headerCount); err != nil {
|
||||
return
|
||||
}
|
||||
if headerCount > 0 {
|
||||
headers = make([]Header, headerCount)
|
||||
for i := range headers {
|
||||
if err = r.readMessageHeader(&headers[i]); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
lastOffset = r.header.firstOffset + int64(r.header.v2.lastOffsetDelta)
|
||||
r.lengthRemain -= int(length) + lengthOfLength
|
||||
r.markRead()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *messageSetReader) discardBytes() (err error) {
|
||||
r.remain, err = discardBytes(r.reader, r.remain)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *messageSetReader) discardN(sz int) (err error) {
|
||||
r.remain, err = discardN(r.reader, r.remain, sz)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *messageSetReader) markRead() {
|
||||
if r.count == 0 {
|
||||
panic("markRead: negative count")
|
||||
}
|
||||
r.count--
|
||||
r.unwindStack()
|
||||
r.log("Mark read remain=%d", r.remain)
|
||||
}
|
||||
|
||||
func (r *messageSetReader) unwindStack() {
|
||||
for r.count == 0 {
|
||||
if r.remain == 0 {
|
||||
if r.parent != nil {
|
||||
r.log("Popped reader stack")
|
||||
r.readerStack = r.parent
|
||||
continue
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
func (r *messageSetReader) readMessageHeader(header *Header) (err error) {
|
||||
var keyLen int64
|
||||
if err = r.readVarInt(&keyLen); err != nil {
|
||||
return
|
||||
}
|
||||
if header.Key, err = r.readNewString(int(keyLen)); err != nil {
|
||||
return
|
||||
}
|
||||
var valLen int64
|
||||
if err = r.readVarInt(&valLen); err != nil {
|
||||
return
|
||||
}
|
||||
if header.Value, err = r.readNewBytes(int(valLen)); err != nil {
|
||||
return
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *messageSetReader) runFunc(rbFunc readBytesFunc) (err error) {
|
||||
var length int64
|
||||
if err = r.readVarInt(&length); err != nil {
|
||||
return
|
||||
}
|
||||
if r.remain, err = rbFunc(r.reader, r.remain, int(length)); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *messageSetReader) readHeader() (err error) {
|
||||
if r.count > 0 {
|
||||
// currently reading a set of messages, no need to read a header until they are exhausted.
|
||||
return
|
||||
}
|
||||
r.header = messagesHeader{}
|
||||
if err = r.readInt64(&r.header.firstOffset); err != nil {
|
||||
return
|
||||
}
|
||||
if err = r.readInt32(&r.header.length); err != nil {
|
||||
return
|
||||
}
|
||||
var crcOrLeaderEpoch int32
|
||||
if err = r.readInt32(&crcOrLeaderEpoch); err != nil {
|
||||
return
|
||||
}
|
||||
if err = r.readInt8(&r.header.magic); err != nil {
|
||||
return
|
||||
}
|
||||
switch r.header.magic {
|
||||
case 0:
|
||||
r.header.crc = crcOrLeaderEpoch
|
||||
if err = r.readInt8(&r.header.v1.attributes); err != nil {
|
||||
return
|
||||
}
|
||||
r.count = 1
|
||||
// Set arbitrary non-zero length so that we always assume the
|
||||
// message is truncated since bytes remain.
|
||||
r.lengthRemain = 1
|
||||
r.log("Read v0 header with offset=%d len=%d magic=%d attributes=%d", r.header.firstOffset, r.header.length, r.header.magic, r.header.v1.attributes)
|
||||
case 1:
|
||||
r.header.crc = crcOrLeaderEpoch
|
||||
if err = r.readInt8(&r.header.v1.attributes); err != nil {
|
||||
return
|
||||
}
|
||||
if err = r.readInt64(&r.header.v1.timestamp); err != nil {
|
||||
return
|
||||
}
|
||||
r.count = 1
|
||||
// Set arbitrary non-zero length so that we always assume the
|
||||
// message is truncated since bytes remain.
|
||||
r.lengthRemain = 1
|
||||
r.log("Read v1 header with remain=%d offset=%d magic=%d and attributes=%d", r.remain, r.header.firstOffset, r.header.magic, r.header.v1.attributes)
|
||||
case 2:
|
||||
r.header.v2.leaderEpoch = crcOrLeaderEpoch
|
||||
if err = r.readInt32(&r.header.crc); err != nil {
|
||||
return
|
||||
}
|
||||
if err = r.readInt16(&r.header.v2.attributes); err != nil {
|
||||
return
|
||||
}
|
||||
if err = r.readInt32(&r.header.v2.lastOffsetDelta); err != nil {
|
||||
return
|
||||
}
|
||||
if err = r.readInt64(&r.header.v2.firstTimestamp); err != nil {
|
||||
return
|
||||
}
|
||||
if err = r.readInt64(&r.header.v2.lastTimestamp); err != nil {
|
||||
return
|
||||
}
|
||||
if err = r.readInt64(&r.header.v2.producerID); err != nil {
|
||||
return
|
||||
}
|
||||
if err = r.readInt16(&r.header.v2.producerEpoch); err != nil {
|
||||
return
|
||||
}
|
||||
if err = r.readInt32(&r.header.v2.baseSequence); err != nil {
|
||||
return
|
||||
}
|
||||
if err = r.readInt32(&r.header.v2.count); err != nil {
|
||||
return
|
||||
}
|
||||
r.count = int(r.header.v2.count)
|
||||
// Subtracts the header bytes from the length
|
||||
r.lengthRemain = int(r.header.length) - 49
|
||||
r.log("Read v2 header with count=%d offset=%d len=%d magic=%d attributes=%d", r.count, r.header.firstOffset, r.header.length, r.header.magic, r.header.v2.attributes)
|
||||
default:
|
||||
err = r.header.badMagic()
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *messageSetReader) readNewBytes(len int) (res []byte, err error) {
|
||||
res, r.remain, err = readNewBytes(r.reader, r.remain, len)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *messageSetReader) readNewString(len int) (res string, err error) {
|
||||
res, r.remain, err = readNewString(r.reader, r.remain, len)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *messageSetReader) readInt8(val *int8) (err error) {
|
||||
r.remain, err = readInt8(r.reader, r.remain, val)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *messageSetReader) readInt16(val *int16) (err error) {
|
||||
r.remain, err = readInt16(r.reader, r.remain, val)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *messageSetReader) readInt32(val *int32) (err error) {
|
||||
r.remain, err = readInt32(r.reader, r.remain, val)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *messageSetReader) readInt64(val *int64) (err error) {
|
||||
r.remain, err = readInt64(r.reader, r.remain, val)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *messageSetReader) readVarInt(val *int64) (err error) {
|
||||
r.remain, err = readVarInt(r.reader, r.remain, val)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *messageSetReader) readBytesWith(fn readBytesFunc) (err error) {
|
||||
r.remain, err = readBytesWith(r.reader, r.remain, fn)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *messageSetReader) log(msg string, args ...interface{}) {
|
||||
if r.debug {
|
||||
log.Printf("[DEBUG] "+msg, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func extractOffset(base int64, msgSet []byte) (offset int64, err error) {
|
||||
r, remain := bufio.NewReader(bytes.NewReader(msgSet)), len(msgSet)
|
||||
for remain > 0 {
|
||||
if remain, err = readInt64(r, remain, &offset); err != nil {
|
||||
return
|
||||
}
|
||||
var sz int32
|
||||
if remain, err = readInt32(r, remain, &sz); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = discardN(r, remain, int(sz)); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
offset = base - offset
|
||||
return
|
||||
}
|
||||
287
vendor/github.com/segmentio/kafka-go/metadata.go
generated
vendored
Normal file
287
vendor/github.com/segmentio/kafka-go/metadata.go
generated
vendored
Normal file
@@ -0,0 +1,287 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
metadataAPI "github.com/segmentio/kafka-go/protocol/metadata"
|
||||
)
|
||||
|
||||
// MetadataRequest represents a request sent to a kafka broker to retrieve its
|
||||
// cluster metadata.
|
||||
type MetadataRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// The list of topics to retrieve metadata for.
|
||||
Topics []string
|
||||
}
|
||||
|
||||
// MetadatResponse represents a response from a kafka broker to a metadata
|
||||
// request.
|
||||
type MetadataResponse struct {
|
||||
// The amount of time that the broker throttled the request.
|
||||
Throttle time.Duration
|
||||
|
||||
// Name of the kafka cluster that client retrieved metadata from.
|
||||
ClusterID string
|
||||
|
||||
// The broker which is currently the controller for the cluster.
|
||||
Controller Broker
|
||||
|
||||
// The list of brokers registered to the cluster.
|
||||
Brokers []Broker
|
||||
|
||||
// The list of topics available on the cluster.
|
||||
Topics []Topic
|
||||
}
|
||||
|
||||
// Metadata sends a metadata request to a kafka broker and returns the response.
|
||||
func (c *Client) Metadata(ctx context.Context, req *MetadataRequest) (*MetadataResponse, error) {
|
||||
m, err := c.roundTrip(ctx, req.Addr, &metadataAPI.Request{
|
||||
TopicNames: req.Topics,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).Metadata: %w", err)
|
||||
}
|
||||
|
||||
res := m.(*metadataAPI.Response)
|
||||
ret := &MetadataResponse{
|
||||
Throttle: makeDuration(res.ThrottleTimeMs),
|
||||
Brokers: make([]Broker, len(res.Brokers)),
|
||||
Topics: make([]Topic, len(res.Topics)),
|
||||
ClusterID: res.ClusterID,
|
||||
}
|
||||
|
||||
brokers := make(map[int32]Broker, len(res.Brokers))
|
||||
|
||||
for i, b := range res.Brokers {
|
||||
broker := Broker{
|
||||
Host: b.Host,
|
||||
Port: int(b.Port),
|
||||
ID: int(b.NodeID),
|
||||
Rack: b.Rack,
|
||||
}
|
||||
|
||||
ret.Brokers[i] = broker
|
||||
brokers[b.NodeID] = broker
|
||||
|
||||
if b.NodeID == res.ControllerID {
|
||||
ret.Controller = broker
|
||||
}
|
||||
}
|
||||
|
||||
for i, t := range res.Topics {
|
||||
ret.Topics[i] = Topic{
|
||||
Name: t.Name,
|
||||
Internal: t.IsInternal,
|
||||
Partitions: make([]Partition, len(t.Partitions)),
|
||||
Error: makeError(t.ErrorCode, ""),
|
||||
}
|
||||
|
||||
for j, p := range t.Partitions {
|
||||
partition := Partition{
|
||||
Topic: t.Name,
|
||||
ID: int(p.PartitionIndex),
|
||||
Leader: brokers[p.LeaderID],
|
||||
Replicas: make([]Broker, len(p.ReplicaNodes)),
|
||||
Isr: make([]Broker, len(p.IsrNodes)),
|
||||
Error: makeError(p.ErrorCode, ""),
|
||||
}
|
||||
|
||||
for i, id := range p.ReplicaNodes {
|
||||
partition.Replicas[i] = brokers[id]
|
||||
}
|
||||
|
||||
for i, id := range p.IsrNodes {
|
||||
partition.Isr[i] = brokers[id]
|
||||
}
|
||||
|
||||
ret.Topics[i].Partitions[j] = partition
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
type topicMetadataRequestV1 []string
|
||||
|
||||
func (r topicMetadataRequestV1) size() int32 {
|
||||
return sizeofStringArray([]string(r))
|
||||
}
|
||||
|
||||
func (r topicMetadataRequestV1) writeTo(wb *writeBuffer) {
|
||||
// communicate nil-ness to the broker by passing -1 as the array length.
|
||||
// for this particular request, the broker interpets a zero length array
|
||||
// as a request for no topics whereas a nil array is for all topics.
|
||||
if r == nil {
|
||||
wb.writeArrayLen(-1)
|
||||
} else {
|
||||
wb.writeStringArray([]string(r))
|
||||
}
|
||||
}
|
||||
|
||||
type metadataResponseV1 struct {
|
||||
Brokers []brokerMetadataV1
|
||||
ControllerID int32
|
||||
Topics []topicMetadataV1
|
||||
}
|
||||
|
||||
func (r metadataResponseV1) size() int32 {
|
||||
n1 := sizeofArray(len(r.Brokers), func(i int) int32 { return r.Brokers[i].size() })
|
||||
n2 := sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() })
|
||||
return 4 + n1 + n2
|
||||
}
|
||||
|
||||
func (r metadataResponseV1) writeTo(wb *writeBuffer) {
|
||||
wb.writeArray(len(r.Brokers), func(i int) { r.Brokers[i].writeTo(wb) })
|
||||
wb.writeInt32(r.ControllerID)
|
||||
wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
type brokerMetadataV1 struct {
|
||||
NodeID int32
|
||||
Host string
|
||||
Port int32
|
||||
Rack string
|
||||
}
|
||||
|
||||
func (b brokerMetadataV1) size() int32 {
|
||||
return 4 + 4 + sizeofString(b.Host) + sizeofString(b.Rack)
|
||||
}
|
||||
|
||||
func (b brokerMetadataV1) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(b.NodeID)
|
||||
wb.writeString(b.Host)
|
||||
wb.writeInt32(b.Port)
|
||||
wb.writeString(b.Rack)
|
||||
}
|
||||
|
||||
type topicMetadataV1 struct {
|
||||
TopicErrorCode int16
|
||||
TopicName string
|
||||
Internal bool
|
||||
Partitions []partitionMetadataV1
|
||||
}
|
||||
|
||||
func (t topicMetadataV1) size() int32 {
|
||||
return 2 + 1 +
|
||||
sizeofString(t.TopicName) +
|
||||
sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() })
|
||||
}
|
||||
|
||||
func (t topicMetadataV1) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt16(t.TopicErrorCode)
|
||||
wb.writeString(t.TopicName)
|
||||
wb.writeBool(t.Internal)
|
||||
wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
type partitionMetadataV1 struct {
|
||||
PartitionErrorCode int16
|
||||
PartitionID int32
|
||||
Leader int32
|
||||
Replicas []int32
|
||||
Isr []int32
|
||||
}
|
||||
|
||||
func (p partitionMetadataV1) size() int32 {
|
||||
return 2 + 4 + 4 + sizeofInt32Array(p.Replicas) + sizeofInt32Array(p.Isr)
|
||||
}
|
||||
|
||||
func (p partitionMetadataV1) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt16(p.PartitionErrorCode)
|
||||
wb.writeInt32(p.PartitionID)
|
||||
wb.writeInt32(p.Leader)
|
||||
wb.writeInt32Array(p.Replicas)
|
||||
wb.writeInt32Array(p.Isr)
|
||||
}
|
||||
|
||||
type topicMetadataRequestV6 struct {
|
||||
Topics []string
|
||||
AllowAutoTopicCreation bool
|
||||
}
|
||||
|
||||
func (r topicMetadataRequestV6) size() int32 {
|
||||
return sizeofStringArray([]string(r.Topics)) + 1
|
||||
}
|
||||
|
||||
func (r topicMetadataRequestV6) writeTo(wb *writeBuffer) {
|
||||
// communicate nil-ness to the broker by passing -1 as the array length.
|
||||
// for this particular request, the broker interpets a zero length array
|
||||
// as a request for no topics whereas a nil array is for all topics.
|
||||
if r.Topics == nil {
|
||||
wb.writeArrayLen(-1)
|
||||
} else {
|
||||
wb.writeStringArray([]string(r.Topics))
|
||||
}
|
||||
wb.writeBool(r.AllowAutoTopicCreation)
|
||||
}
|
||||
|
||||
type metadataResponseV6 struct {
|
||||
ThrottleTimeMs int32
|
||||
Brokers []brokerMetadataV1
|
||||
ClusterId string
|
||||
ControllerID int32
|
||||
Topics []topicMetadataV6
|
||||
}
|
||||
|
||||
func (r metadataResponseV6) size() int32 {
|
||||
n1 := sizeofArray(len(r.Brokers), func(i int) int32 { return r.Brokers[i].size() })
|
||||
n2 := sizeofNullableString(&r.ClusterId)
|
||||
n3 := sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() })
|
||||
return 4 + 4 + n1 + n2 + n3
|
||||
}
|
||||
|
||||
func (r metadataResponseV6) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(r.ThrottleTimeMs)
|
||||
wb.writeArray(len(r.Brokers), func(i int) { r.Brokers[i].writeTo(wb) })
|
||||
wb.writeString(r.ClusterId)
|
||||
wb.writeInt32(r.ControllerID)
|
||||
wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
type topicMetadataV6 struct {
|
||||
TopicErrorCode int16
|
||||
TopicName string
|
||||
Internal bool
|
||||
Partitions []partitionMetadataV6
|
||||
}
|
||||
|
||||
func (t topicMetadataV6) size() int32 {
|
||||
return 2 + 1 +
|
||||
sizeofString(t.TopicName) +
|
||||
sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() })
|
||||
}
|
||||
|
||||
func (t topicMetadataV6) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt16(t.TopicErrorCode)
|
||||
wb.writeString(t.TopicName)
|
||||
wb.writeBool(t.Internal)
|
||||
wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
type partitionMetadataV6 struct {
|
||||
PartitionErrorCode int16
|
||||
PartitionID int32
|
||||
Leader int32
|
||||
Replicas []int32
|
||||
Isr []int32
|
||||
OfflineReplicas []int32
|
||||
}
|
||||
|
||||
func (p partitionMetadataV6) size() int32 {
|
||||
return 2 + 4 + 4 + sizeofInt32Array(p.Replicas) + sizeofInt32Array(p.Isr) + sizeofInt32Array(p.OfflineReplicas)
|
||||
}
|
||||
|
||||
func (p partitionMetadataV6) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt16(p.PartitionErrorCode)
|
||||
wb.writeInt32(p.PartitionID)
|
||||
wb.writeInt32(p.Leader)
|
||||
wb.writeInt32Array(p.Replicas)
|
||||
wb.writeInt32Array(p.Isr)
|
||||
wb.writeInt32Array(p.OfflineReplicas)
|
||||
}
|
||||
302
vendor/github.com/segmentio/kafka-go/offsetcommit.go
generated
vendored
Normal file
302
vendor/github.com/segmentio/kafka-go/offsetcommit.go
generated
vendored
Normal file
@@ -0,0 +1,302 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/offsetcommit"
|
||||
)
|
||||
|
||||
// OffsetCommit represent the commit of an offset to a partition.
|
||||
//
|
||||
// The extra metadata is opaque to the kafka protocol, it is intended to hold
|
||||
// information like an identifier for the process that committed the offset,
|
||||
// or the time at which the commit was made.
|
||||
type OffsetCommit struct {
|
||||
Partition int
|
||||
Offset int64
|
||||
Metadata string
|
||||
}
|
||||
|
||||
// OffsetCommitRequest represents a request sent to a kafka broker to commit
|
||||
// offsets for a partition.
|
||||
type OffsetCommitRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// ID of the consumer group to publish the offsets for.
|
||||
GroupID string
|
||||
|
||||
// ID of the consumer group generation.
|
||||
GenerationID int
|
||||
|
||||
// ID of the group member submitting the offsets.
|
||||
MemberID string
|
||||
|
||||
// ID of the group instance.
|
||||
InstanceID string
|
||||
|
||||
// Set of topic partitions to publish the offsets for.
|
||||
//
|
||||
// Not that offset commits need to be submitted to the broker acting as the
|
||||
// group coordinator. This will be automatically resolved by the transport.
|
||||
Topics map[string][]OffsetCommit
|
||||
}
|
||||
|
||||
// OffsetFetchResponse represents a response from a kafka broker to an offset
|
||||
// commit request.
|
||||
type OffsetCommitResponse struct {
|
||||
// The amount of time that the broker throttled the request.
|
||||
Throttle time.Duration
|
||||
|
||||
// Set of topic partitions that the kafka broker has accepted offset commits
|
||||
// for.
|
||||
Topics map[string][]OffsetCommitPartition
|
||||
}
|
||||
|
||||
// OffsetFetchPartition represents the state of a single partition in responses
|
||||
// to committing offsets.
|
||||
type OffsetCommitPartition struct {
|
||||
// ID of the partition.
|
||||
Partition int
|
||||
|
||||
// An error that may have occurred while attempting to publish consumer
|
||||
// group offsets for this partition.
|
||||
//
|
||||
// The error contains both the kafka error code, and an error message
|
||||
// returned by the kafka broker. Programs may use the standard errors.Is
|
||||
// function to test the error against kafka error codes.
|
||||
Error error
|
||||
}
|
||||
|
||||
// OffsetCommit sends an offset commit request to a kafka broker and returns the
|
||||
// response.
|
||||
func (c *Client) OffsetCommit(ctx context.Context, req *OffsetCommitRequest) (*OffsetCommitResponse, error) {
|
||||
now := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
topics := make([]offsetcommit.RequestTopic, 0, len(req.Topics))
|
||||
|
||||
for topicName, commits := range req.Topics {
|
||||
partitions := make([]offsetcommit.RequestPartition, len(commits))
|
||||
|
||||
for i, c := range commits {
|
||||
partitions[i] = offsetcommit.RequestPartition{
|
||||
PartitionIndex: int32(c.Partition),
|
||||
CommittedOffset: c.Offset,
|
||||
CommittedMetadata: c.Metadata,
|
||||
// This field existed in v1 of the OffsetCommit API, setting it
|
||||
// to the current timestamp is probably a safe thing to do, but
|
||||
// it is hard to tell.
|
||||
CommitTimestamp: now,
|
||||
}
|
||||
}
|
||||
|
||||
topics = append(topics, offsetcommit.RequestTopic{
|
||||
Name: topicName,
|
||||
Partitions: partitions,
|
||||
})
|
||||
}
|
||||
|
||||
m, err := c.roundTrip(ctx, req.Addr, &offsetcommit.Request{
|
||||
GroupID: req.GroupID,
|
||||
GenerationID: int32(req.GenerationID),
|
||||
MemberID: req.MemberID,
|
||||
GroupInstanceID: req.InstanceID,
|
||||
Topics: topics,
|
||||
// Hardcoded retention; this field existed between v2 and v4 of the
|
||||
// OffsetCommit API, we would have to figure out a way to give the
|
||||
// client control over the API version being used to support configuring
|
||||
// it in the request object.
|
||||
RetentionTimeMs: int64((24 * time.Hour) / time.Millisecond),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).OffsetCommit: %w", err)
|
||||
}
|
||||
r := m.(*offsetcommit.Response)
|
||||
|
||||
res := &OffsetCommitResponse{
|
||||
Throttle: makeDuration(r.ThrottleTimeMs),
|
||||
Topics: make(map[string][]OffsetCommitPartition, len(r.Topics)),
|
||||
}
|
||||
|
||||
for _, topic := range r.Topics {
|
||||
partitions := make([]OffsetCommitPartition, len(topic.Partitions))
|
||||
|
||||
for i, p := range topic.Partitions {
|
||||
partitions[i] = OffsetCommitPartition{
|
||||
Partition: int(p.PartitionIndex),
|
||||
Error: makeError(p.ErrorCode, ""),
|
||||
}
|
||||
}
|
||||
|
||||
res.Topics[topic.Name] = partitions
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
type offsetCommitRequestV2Partition struct {
|
||||
// Partition ID
|
||||
Partition int32
|
||||
|
||||
// Offset to be committed
|
||||
Offset int64
|
||||
|
||||
// Metadata holds any associated metadata the client wants to keep
|
||||
Metadata string
|
||||
}
|
||||
|
||||
func (t offsetCommitRequestV2Partition) size() int32 {
|
||||
return sizeofInt32(t.Partition) +
|
||||
sizeofInt64(t.Offset) +
|
||||
sizeofString(t.Metadata)
|
||||
}
|
||||
|
||||
func (t offsetCommitRequestV2Partition) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(t.Partition)
|
||||
wb.writeInt64(t.Offset)
|
||||
wb.writeString(t.Metadata)
|
||||
}
|
||||
|
||||
type offsetCommitRequestV2Topic struct {
|
||||
// Topic name
|
||||
Topic string
|
||||
|
||||
// Partitions to commit offsets
|
||||
Partitions []offsetCommitRequestV2Partition
|
||||
}
|
||||
|
||||
func (t offsetCommitRequestV2Topic) size() int32 {
|
||||
return sizeofString(t.Topic) +
|
||||
sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() })
|
||||
}
|
||||
|
||||
func (t offsetCommitRequestV2Topic) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.Topic)
|
||||
wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
type offsetCommitRequestV2 struct {
|
||||
// GroupID holds the unique group identifier
|
||||
GroupID string
|
||||
|
||||
// GenerationID holds the generation of the group.
|
||||
GenerationID int32
|
||||
|
||||
// MemberID assigned by the group coordinator
|
||||
MemberID string
|
||||
|
||||
// RetentionTime holds the time period in ms to retain the offset.
|
||||
RetentionTime int64
|
||||
|
||||
// Topics to commit offsets
|
||||
Topics []offsetCommitRequestV2Topic
|
||||
}
|
||||
|
||||
func (t offsetCommitRequestV2) size() int32 {
|
||||
return sizeofString(t.GroupID) +
|
||||
sizeofInt32(t.GenerationID) +
|
||||
sizeofString(t.MemberID) +
|
||||
sizeofInt64(t.RetentionTime) +
|
||||
sizeofArray(len(t.Topics), func(i int) int32 { return t.Topics[i].size() })
|
||||
}
|
||||
|
||||
func (t offsetCommitRequestV2) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.GroupID)
|
||||
wb.writeInt32(t.GenerationID)
|
||||
wb.writeString(t.MemberID)
|
||||
wb.writeInt64(t.RetentionTime)
|
||||
wb.writeArray(len(t.Topics), func(i int) { t.Topics[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
type offsetCommitResponseV2PartitionResponse struct {
|
||||
Partition int32
|
||||
|
||||
// ErrorCode holds response error code
|
||||
ErrorCode int16
|
||||
}
|
||||
|
||||
func (t offsetCommitResponseV2PartitionResponse) size() int32 {
|
||||
return sizeofInt32(t.Partition) +
|
||||
sizeofInt16(t.ErrorCode)
|
||||
}
|
||||
|
||||
func (t offsetCommitResponseV2PartitionResponse) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(t.Partition)
|
||||
wb.writeInt16(t.ErrorCode)
|
||||
}
|
||||
|
||||
func (t *offsetCommitResponseV2PartitionResponse) readFrom(r *bufio.Reader, size int) (remain int, err error) {
|
||||
if remain, err = readInt32(r, size, &t.Partition); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type offsetCommitResponseV2Response struct {
|
||||
Topic string
|
||||
PartitionResponses []offsetCommitResponseV2PartitionResponse
|
||||
}
|
||||
|
||||
func (t offsetCommitResponseV2Response) size() int32 {
|
||||
return sizeofString(t.Topic) +
|
||||
sizeofArray(len(t.PartitionResponses), func(i int) int32 { return t.PartitionResponses[i].size() })
|
||||
}
|
||||
|
||||
func (t offsetCommitResponseV2Response) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.Topic)
|
||||
wb.writeArray(len(t.PartitionResponses), func(i int) { t.PartitionResponses[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
func (t *offsetCommitResponseV2Response) readFrom(r *bufio.Reader, size int) (remain int, err error) {
|
||||
if remain, err = readString(r, size, &t.Topic); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
fn := func(r *bufio.Reader, withSize int) (fnRemain int, fnErr error) {
|
||||
item := offsetCommitResponseV2PartitionResponse{}
|
||||
if fnRemain, fnErr = (&item).readFrom(r, withSize); fnErr != nil {
|
||||
return
|
||||
}
|
||||
t.PartitionResponses = append(t.PartitionResponses, item)
|
||||
return
|
||||
}
|
||||
if remain, err = readArrayWith(r, remain, fn); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type offsetCommitResponseV2 struct {
|
||||
Responses []offsetCommitResponseV2Response
|
||||
}
|
||||
|
||||
func (t offsetCommitResponseV2) size() int32 {
|
||||
return sizeofArray(len(t.Responses), func(i int) int32 { return t.Responses[i].size() })
|
||||
}
|
||||
|
||||
func (t offsetCommitResponseV2) writeTo(wb *writeBuffer) {
|
||||
wb.writeArray(len(t.Responses), func(i int) { t.Responses[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
func (t *offsetCommitResponseV2) readFrom(r *bufio.Reader, size int) (remain int, err error) {
|
||||
fn := func(r *bufio.Reader, withSize int) (fnRemain int, fnErr error) {
|
||||
item := offsetCommitResponseV2Response{}
|
||||
if fnRemain, fnErr = (&item).readFrom(r, withSize); fnErr != nil {
|
||||
return
|
||||
}
|
||||
t.Responses = append(t.Responses, item)
|
||||
return
|
||||
}
|
||||
if remain, err = readArrayWith(r, size, fn); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
106
vendor/github.com/segmentio/kafka-go/offsetdelete.go
generated
vendored
Normal file
106
vendor/github.com/segmentio/kafka-go/offsetdelete.go
generated
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/offsetdelete"
|
||||
)
|
||||
|
||||
// OffsetDelete deletes the offset for a consumer group on a particular topic
|
||||
// for a particular partition.
|
||||
type OffsetDelete struct {
|
||||
Topic string
|
||||
Partition int
|
||||
}
|
||||
|
||||
// OffsetDeleteRequest represents a request sent to a kafka broker to delete
|
||||
// the offsets for a partition on a given topic associated with a consumer group.
|
||||
type OffsetDeleteRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// ID of the consumer group to delete the offsets for.
|
||||
GroupID string
|
||||
|
||||
// Set of topic partitions to delete offsets for.
|
||||
Topics map[string][]int
|
||||
}
|
||||
|
||||
// OffsetDeleteResponse represents a response from a kafka broker to a delete
|
||||
// offset request.
|
||||
type OffsetDeleteResponse struct {
|
||||
// An error that may have occurred while attempting to delete an offset
|
||||
Error error
|
||||
|
||||
// The amount of time that the broker throttled the request.
|
||||
Throttle time.Duration
|
||||
|
||||
// Set of topic partitions that the kafka broker has additional info (error?)
|
||||
// for.
|
||||
Topics map[string][]OffsetDeletePartition
|
||||
}
|
||||
|
||||
// OffsetDeletePartition represents the state of a status of a partition in response
|
||||
// to deleting offsets.
|
||||
type OffsetDeletePartition struct {
|
||||
// ID of the partition.
|
||||
Partition int
|
||||
|
||||
// An error that may have occurred while attempting to delete an offset for
|
||||
// this partition.
|
||||
Error error
|
||||
}
|
||||
|
||||
// OffsetDelete sends a delete offset request to a kafka broker and returns the
|
||||
// response.
|
||||
func (c *Client) OffsetDelete(ctx context.Context, req *OffsetDeleteRequest) (*OffsetDeleteResponse, error) {
|
||||
topics := make([]offsetdelete.RequestTopic, 0, len(req.Topics))
|
||||
|
||||
for topicName, partitionIndexes := range req.Topics {
|
||||
partitions := make([]offsetdelete.RequestPartition, len(partitionIndexes))
|
||||
|
||||
for i, c := range partitionIndexes {
|
||||
partitions[i] = offsetdelete.RequestPartition{
|
||||
PartitionIndex: int32(c),
|
||||
}
|
||||
}
|
||||
|
||||
topics = append(topics, offsetdelete.RequestTopic{
|
||||
Name: topicName,
|
||||
Partitions: partitions,
|
||||
})
|
||||
}
|
||||
|
||||
m, err := c.roundTrip(ctx, req.Addr, &offsetdelete.Request{
|
||||
GroupID: req.GroupID,
|
||||
Topics: topics,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).OffsetDelete: %w", err)
|
||||
}
|
||||
r := m.(*offsetdelete.Response)
|
||||
|
||||
res := &OffsetDeleteResponse{
|
||||
Error: makeError(r.ErrorCode, ""),
|
||||
Throttle: makeDuration(r.ThrottleTimeMs),
|
||||
Topics: make(map[string][]OffsetDeletePartition, len(r.Topics)),
|
||||
}
|
||||
|
||||
for _, topic := range r.Topics {
|
||||
partitions := make([]OffsetDeletePartition, len(topic.Partitions))
|
||||
|
||||
for i, p := range topic.Partitions {
|
||||
partitions[i] = OffsetDeletePartition{
|
||||
Partition: int(p.PartitionIndex),
|
||||
Error: makeError(p.ErrorCode, ""),
|
||||
}
|
||||
}
|
||||
|
||||
res.Topics[topic.Name] = partitions
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
263
vendor/github.com/segmentio/kafka-go/offsetfetch.go
generated
vendored
Normal file
263
vendor/github.com/segmentio/kafka-go/offsetfetch.go
generated
vendored
Normal file
@@ -0,0 +1,263 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol/offsetfetch"
|
||||
)
|
||||
|
||||
// OffsetFetchRequest represents a request sent to a kafka broker to read the
|
||||
// currently committed offsets of topic partitions.
|
||||
type OffsetFetchRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// ID of the consumer group to retrieve the offsets for.
|
||||
GroupID string
|
||||
|
||||
// Set of topic partitions to retrieve the offsets for.
|
||||
Topics map[string][]int
|
||||
}
|
||||
|
||||
// OffsetFetchResponse represents a response from a kafka broker to an offset
|
||||
// fetch request.
|
||||
type OffsetFetchResponse struct {
|
||||
// The amount of time that the broker throttled the request.
|
||||
Throttle time.Duration
|
||||
|
||||
// Set of topic partitions that the kafka broker has returned offsets for.
|
||||
Topics map[string][]OffsetFetchPartition
|
||||
|
||||
// An error that may have occurred while attempting to retrieve consumer
|
||||
// group offsets.
|
||||
//
|
||||
// The error contains both the kafka error code, and an error message
|
||||
// returned by the kafka broker. Programs may use the standard errors.Is
|
||||
// function to test the error against kafka error codes.
|
||||
Error error
|
||||
}
|
||||
|
||||
// OffsetFetchPartition represents the state of a single partition in a consumer
|
||||
// group.
|
||||
type OffsetFetchPartition struct {
|
||||
// ID of the partition.
|
||||
Partition int
|
||||
|
||||
// Last committed offsets on the partition when the request was served by
|
||||
// the kafka broker.
|
||||
CommittedOffset int64
|
||||
|
||||
// Consumer group metadata for this partition.
|
||||
Metadata string
|
||||
|
||||
// An error that may have occurred while attempting to retrieve consumer
|
||||
// group offsets for this partition.
|
||||
//
|
||||
// The error contains both the kafka error code, and an error message
|
||||
// returned by the kafka broker. Programs may use the standard errors.Is
|
||||
// function to test the error against kafka error codes.
|
||||
Error error
|
||||
}
|
||||
|
||||
// OffsetFetch sends an offset fetch request to a kafka broker and returns the
|
||||
// response.
|
||||
func (c *Client) OffsetFetch(ctx context.Context, req *OffsetFetchRequest) (*OffsetFetchResponse, error) {
|
||||
topics := make([]offsetfetch.RequestTopic, 0, len(req.Topics))
|
||||
|
||||
for topicName, partitions := range req.Topics {
|
||||
indexes := make([]int32, len(partitions))
|
||||
|
||||
for i, p := range partitions {
|
||||
indexes[i] = int32(p)
|
||||
}
|
||||
|
||||
topics = append(topics, offsetfetch.RequestTopic{
|
||||
Name: topicName,
|
||||
PartitionIndexes: indexes,
|
||||
})
|
||||
}
|
||||
|
||||
m, err := c.roundTrip(ctx, req.Addr, &offsetfetch.Request{
|
||||
GroupID: req.GroupID,
|
||||
Topics: topics,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kafka.(*Client).OffsetFetch: %w", err)
|
||||
}
|
||||
|
||||
res := m.(*offsetfetch.Response)
|
||||
ret := &OffsetFetchResponse{
|
||||
Throttle: makeDuration(res.ThrottleTimeMs),
|
||||
Topics: make(map[string][]OffsetFetchPartition, len(res.Topics)),
|
||||
Error: makeError(res.ErrorCode, ""),
|
||||
}
|
||||
|
||||
for _, t := range res.Topics {
|
||||
partitions := make([]OffsetFetchPartition, len(t.Partitions))
|
||||
|
||||
for i, p := range t.Partitions {
|
||||
partitions[i] = OffsetFetchPartition{
|
||||
Partition: int(p.PartitionIndex),
|
||||
CommittedOffset: p.CommittedOffset,
|
||||
Metadata: p.Metadata,
|
||||
Error: makeError(p.ErrorCode, ""),
|
||||
}
|
||||
}
|
||||
|
||||
ret.Topics[t.Name] = partitions
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
type offsetFetchRequestV1Topic struct {
|
||||
// Topic name
|
||||
Topic string
|
||||
|
||||
// Partitions to fetch offsets
|
||||
Partitions []int32
|
||||
}
|
||||
|
||||
func (t offsetFetchRequestV1Topic) size() int32 {
|
||||
return sizeofString(t.Topic) +
|
||||
sizeofInt32Array(t.Partitions)
|
||||
}
|
||||
|
||||
func (t offsetFetchRequestV1Topic) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.Topic)
|
||||
wb.writeInt32Array(t.Partitions)
|
||||
}
|
||||
|
||||
type offsetFetchRequestV1 struct {
|
||||
// GroupID holds the unique group identifier
|
||||
GroupID string
|
||||
|
||||
// Topics to fetch offsets.
|
||||
Topics []offsetFetchRequestV1Topic
|
||||
}
|
||||
|
||||
func (t offsetFetchRequestV1) size() int32 {
|
||||
return sizeofString(t.GroupID) +
|
||||
sizeofArray(len(t.Topics), func(i int) int32 { return t.Topics[i].size() })
|
||||
}
|
||||
|
||||
func (t offsetFetchRequestV1) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.GroupID)
|
||||
wb.writeArray(len(t.Topics), func(i int) { t.Topics[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
type offsetFetchResponseV1PartitionResponse struct {
|
||||
// Partition ID
|
||||
Partition int32
|
||||
|
||||
// Offset of last committed message
|
||||
Offset int64
|
||||
|
||||
// Metadata client wants to keep
|
||||
Metadata string
|
||||
|
||||
// ErrorCode holds response error code
|
||||
ErrorCode int16
|
||||
}
|
||||
|
||||
func (t offsetFetchResponseV1PartitionResponse) size() int32 {
|
||||
return sizeofInt32(t.Partition) +
|
||||
sizeofInt64(t.Offset) +
|
||||
sizeofString(t.Metadata) +
|
||||
sizeofInt16(t.ErrorCode)
|
||||
}
|
||||
|
||||
func (t offsetFetchResponseV1PartitionResponse) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(t.Partition)
|
||||
wb.writeInt64(t.Offset)
|
||||
wb.writeString(t.Metadata)
|
||||
wb.writeInt16(t.ErrorCode)
|
||||
}
|
||||
|
||||
func (t *offsetFetchResponseV1PartitionResponse) readFrom(r *bufio.Reader, size int) (remain int, err error) {
|
||||
if remain, err = readInt32(r, size, &t.Partition); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readInt64(r, remain, &t.Offset); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readString(r, remain, &t.Metadata); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type offsetFetchResponseV1Response struct {
|
||||
// Topic name
|
||||
Topic string
|
||||
|
||||
// PartitionResponses holds offsets by partition
|
||||
PartitionResponses []offsetFetchResponseV1PartitionResponse
|
||||
}
|
||||
|
||||
func (t offsetFetchResponseV1Response) size() int32 {
|
||||
return sizeofString(t.Topic) +
|
||||
sizeofArray(len(t.PartitionResponses), func(i int) int32 { return t.PartitionResponses[i].size() })
|
||||
}
|
||||
|
||||
func (t offsetFetchResponseV1Response) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.Topic)
|
||||
wb.writeArray(len(t.PartitionResponses), func(i int) { t.PartitionResponses[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
func (t *offsetFetchResponseV1Response) readFrom(r *bufio.Reader, size int) (remain int, err error) {
|
||||
if remain, err = readString(r, size, &t.Topic); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) {
|
||||
item := offsetFetchResponseV1PartitionResponse{}
|
||||
if fnRemain, fnErr = (&item).readFrom(r, size); err != nil {
|
||||
return
|
||||
}
|
||||
t.PartitionResponses = append(t.PartitionResponses, item)
|
||||
return
|
||||
}
|
||||
if remain, err = readArrayWith(r, remain, fn); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type offsetFetchResponseV1 struct {
|
||||
// Responses holds topic partition offsets
|
||||
Responses []offsetFetchResponseV1Response
|
||||
}
|
||||
|
||||
func (t offsetFetchResponseV1) size() int32 {
|
||||
return sizeofArray(len(t.Responses), func(i int) int32 { return t.Responses[i].size() })
|
||||
}
|
||||
|
||||
func (t offsetFetchResponseV1) writeTo(wb *writeBuffer) {
|
||||
wb.writeArray(len(t.Responses), func(i int) { t.Responses[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
func (t *offsetFetchResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) {
|
||||
fn := func(r *bufio.Reader, withSize int) (fnRemain int, fnErr error) {
|
||||
item := offsetFetchResponseV1Response{}
|
||||
if fnRemain, fnErr = (&item).readFrom(r, withSize); fnErr != nil {
|
||||
return
|
||||
}
|
||||
t.Responses = append(t.Responses, item)
|
||||
return
|
||||
}
|
||||
if remain, err = readArrayWith(r, size, fn); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
323
vendor/github.com/segmentio/kafka-go/produce.go
generated
vendored
Normal file
323
vendor/github.com/segmentio/kafka-go/produce.go
generated
vendored
Normal file
@@ -0,0 +1,323 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol"
|
||||
produceAPI "github.com/segmentio/kafka-go/protocol/produce"
|
||||
)
|
||||
|
||||
type RequiredAcks int
|
||||
|
||||
const (
|
||||
RequireNone RequiredAcks = 0
|
||||
RequireOne RequiredAcks = 1
|
||||
RequireAll RequiredAcks = -1
|
||||
)
|
||||
|
||||
func (acks RequiredAcks) String() string {
|
||||
switch acks {
|
||||
case RequireNone:
|
||||
return "none"
|
||||
case RequireOne:
|
||||
return "one"
|
||||
case RequireAll:
|
||||
return "all"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
func (acks RequiredAcks) MarshalText() ([]byte, error) {
|
||||
return []byte(acks.String()), nil
|
||||
}
|
||||
|
||||
func (acks *RequiredAcks) UnmarshalText(b []byte) error {
|
||||
switch string(b) {
|
||||
case "none":
|
||||
*acks = RequireNone
|
||||
case "one":
|
||||
*acks = RequireOne
|
||||
case "all":
|
||||
*acks = RequireAll
|
||||
default:
|
||||
x, err := strconv.ParseInt(string(b), 10, 64)
|
||||
parsed := RequiredAcks(x)
|
||||
if err != nil || (parsed != RequireNone && parsed != RequireOne && parsed != RequireAll) {
|
||||
return fmt.Errorf("required acks must be one of none, one, or all, not %q", b)
|
||||
}
|
||||
*acks = parsed
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
_ encoding.TextMarshaler = RequiredAcks(0)
|
||||
_ encoding.TextUnmarshaler = (*RequiredAcks)(nil)
|
||||
)
|
||||
|
||||
// ProduceRequest represents a request sent to a kafka broker to produce records
|
||||
// to a topic partition.
|
||||
type ProduceRequest struct {
|
||||
// Address of the kafka broker to send the request to.
|
||||
Addr net.Addr
|
||||
|
||||
// The topic to produce the records to.
|
||||
Topic string
|
||||
|
||||
// The partition to produce the records to.
|
||||
Partition int
|
||||
|
||||
// The level of required acknowledgements to ask the kafka broker for.
|
||||
RequiredAcks RequiredAcks
|
||||
|
||||
// The message format version used when encoding the records.
|
||||
//
|
||||
// By default, the client automatically determine which version should be
|
||||
// used based on the version of the Produce API supported by the server.
|
||||
MessageVersion int
|
||||
|
||||
// An optional transaction id when producing to the kafka broker is part of
|
||||
// a transaction.
|
||||
TransactionalID string
|
||||
|
||||
// The sequence of records to produce to the topic partition.
|
||||
Records RecordReader
|
||||
|
||||
// An optional compression algorithm to apply to the batch of records sent
|
||||
// to the kafka broker.
|
||||
Compression Compression
|
||||
}
|
||||
|
||||
// ProduceResponse represents a response from a kafka broker to a produce
|
||||
// request.
|
||||
type ProduceResponse struct {
|
||||
// The amount of time that the broker throttled the request.
|
||||
Throttle time.Duration
|
||||
|
||||
// An error that may have occurred while attempting to produce the records.
|
||||
//
|
||||
// The error contains both the kafka error code, and an error message
|
||||
// returned by the kafka broker. Programs may use the standard errors.Is
|
||||
// function to test the error against kafka error codes.
|
||||
Error error
|
||||
|
||||
// Offset of the first record that was written to the topic partition.
|
||||
//
|
||||
// This field will be zero if the kafka broker did no support the Produce
|
||||
// API in version 3 or above.
|
||||
BaseOffset int64
|
||||
|
||||
// Time at which the broker wrote the records to the topic partition.
|
||||
//
|
||||
// This field will be zero if the kafka broker did no support the Produce
|
||||
// API in version 2 or above.
|
||||
LogAppendTime time.Time
|
||||
|
||||
// First offset in the topic partition that the records were written to.
|
||||
//
|
||||
// This field will be zero if the kafka broker did no support the Produce
|
||||
// API in version 5 or above (or if the first offset is zero).
|
||||
LogStartOffset int64
|
||||
|
||||
// If errors occurred writing specific records, they will be reported in
|
||||
// this map.
|
||||
//
|
||||
// This field will always be empty if the kafka broker did no support the
|
||||
// Produce API in version 8 or above.
|
||||
RecordErrors map[int]error
|
||||
}
|
||||
|
||||
// Produce sends a produce request to a kafka broker and returns the response.
|
||||
//
|
||||
// If the request contained no records, an error wrapping protocol.ErrNoRecord
|
||||
// is returned.
|
||||
//
|
||||
// When the request is configured with RequiredAcks=none, both the response and
|
||||
// the error will be nil on success.
|
||||
func (c *Client) Produce(ctx context.Context, req *ProduceRequest) (*ProduceResponse, error) {
|
||||
attributes := protocol.Attributes(req.Compression) & 0x7
|
||||
|
||||
m, err := c.roundTrip(ctx, req.Addr, &produceAPI.Request{
|
||||
TransactionalID: req.TransactionalID,
|
||||
Acks: int16(req.RequiredAcks),
|
||||
Timeout: c.timeoutMs(ctx, defaultProduceTimeout),
|
||||
Topics: []produceAPI.RequestTopic{{
|
||||
Topic: req.Topic,
|
||||
Partitions: []produceAPI.RequestPartition{{
|
||||
Partition: int32(req.Partition),
|
||||
RecordSet: protocol.RecordSet{
|
||||
Attributes: attributes,
|
||||
Records: req.Records,
|
||||
},
|
||||
}},
|
||||
}},
|
||||
})
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
case errors.Is(err, protocol.ErrNoRecord):
|
||||
return new(ProduceResponse), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("kafka.(*Client).Produce: %w", err)
|
||||
}
|
||||
|
||||
if req.RequiredAcks == RequireNone {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
res := m.(*produceAPI.Response)
|
||||
if len(res.Topics) == 0 {
|
||||
return nil, fmt.Errorf("kafka.(*Client).Produce: %w", protocol.ErrNoTopic)
|
||||
}
|
||||
topic := &res.Topics[0]
|
||||
if len(topic.Partitions) == 0 {
|
||||
return nil, fmt.Errorf("kafka.(*Client).Produce: %w", protocol.ErrNoPartition)
|
||||
}
|
||||
partition := &topic.Partitions[0]
|
||||
|
||||
ret := &ProduceResponse{
|
||||
Throttle: makeDuration(res.ThrottleTimeMs),
|
||||
Error: makeError(partition.ErrorCode, partition.ErrorMessage),
|
||||
BaseOffset: partition.BaseOffset,
|
||||
LogAppendTime: makeTime(partition.LogAppendTime),
|
||||
LogStartOffset: partition.LogStartOffset,
|
||||
}
|
||||
|
||||
if len(partition.RecordErrors) != 0 {
|
||||
ret.RecordErrors = make(map[int]error, len(partition.RecordErrors))
|
||||
|
||||
for _, recErr := range partition.RecordErrors {
|
||||
ret.RecordErrors[int(recErr.BatchIndex)] = errors.New(recErr.BatchIndexErrorMessage)
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
type produceRequestV2 struct {
|
||||
RequiredAcks int16
|
||||
Timeout int32
|
||||
Topics []produceRequestTopicV2
|
||||
}
|
||||
|
||||
func (r produceRequestV2) size() int32 {
|
||||
return 2 + 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() })
|
||||
}
|
||||
|
||||
func (r produceRequestV2) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt16(r.RequiredAcks)
|
||||
wb.writeInt32(r.Timeout)
|
||||
wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
type produceRequestTopicV2 struct {
|
||||
TopicName string
|
||||
Partitions []produceRequestPartitionV2
|
||||
}
|
||||
|
||||
func (t produceRequestTopicV2) size() int32 {
|
||||
return sizeofString(t.TopicName) +
|
||||
sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() })
|
||||
}
|
||||
|
||||
func (t produceRequestTopicV2) writeTo(wb *writeBuffer) {
|
||||
wb.writeString(t.TopicName)
|
||||
wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) })
|
||||
}
|
||||
|
||||
type produceRequestPartitionV2 struct {
|
||||
Partition int32
|
||||
MessageSetSize int32
|
||||
MessageSet messageSet
|
||||
}
|
||||
|
||||
func (p produceRequestPartitionV2) size() int32 {
|
||||
return 4 + 4 + p.MessageSet.size()
|
||||
}
|
||||
|
||||
func (p produceRequestPartitionV2) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(p.Partition)
|
||||
wb.writeInt32(p.MessageSetSize)
|
||||
p.MessageSet.writeTo(wb)
|
||||
}
|
||||
|
||||
type produceResponsePartitionV2 struct {
|
||||
Partition int32
|
||||
ErrorCode int16
|
||||
Offset int64
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
func (p produceResponsePartitionV2) size() int32 {
|
||||
return 4 + 2 + 8 + 8
|
||||
}
|
||||
|
||||
func (p produceResponsePartitionV2) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(p.Partition)
|
||||
wb.writeInt16(p.ErrorCode)
|
||||
wb.writeInt64(p.Offset)
|
||||
wb.writeInt64(p.Timestamp)
|
||||
}
|
||||
|
||||
func (p *produceResponsePartitionV2) readFrom(r *bufio.Reader, sz int) (remain int, err error) {
|
||||
if remain, err = readInt32(r, sz, &p.Partition); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readInt16(r, remain, &p.ErrorCode); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readInt64(r, remain, &p.Offset); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readInt64(r, remain, &p.Timestamp); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type produceResponsePartitionV7 struct {
|
||||
Partition int32
|
||||
ErrorCode int16
|
||||
Offset int64
|
||||
Timestamp int64
|
||||
StartOffset int64
|
||||
}
|
||||
|
||||
func (p produceResponsePartitionV7) size() int32 {
|
||||
return 4 + 2 + 8 + 8 + 8
|
||||
}
|
||||
|
||||
func (p produceResponsePartitionV7) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(p.Partition)
|
||||
wb.writeInt16(p.ErrorCode)
|
||||
wb.writeInt64(p.Offset)
|
||||
wb.writeInt64(p.Timestamp)
|
||||
wb.writeInt64(p.StartOffset)
|
||||
}
|
||||
|
||||
func (p *produceResponsePartitionV7) readFrom(r *bufio.Reader, sz int) (remain int, err error) {
|
||||
if remain, err = readInt32(r, sz, &p.Partition); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readInt16(r, remain, &p.ErrorCode); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readInt64(r, remain, &p.Offset); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readInt64(r, remain, &p.Timestamp); err != nil {
|
||||
return
|
||||
}
|
||||
if remain, err = readInt64(r, remain, &p.StartOffset); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
214
vendor/github.com/segmentio/kafka-go/protocol.go
generated
vendored
Normal file
214
vendor/github.com/segmentio/kafka-go/protocol.go
generated
vendored
Normal file
@@ -0,0 +1,214 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type ApiVersion struct {
|
||||
ApiKey int16
|
||||
MinVersion int16
|
||||
MaxVersion int16
|
||||
}
|
||||
|
||||
func (v ApiVersion) Format(w fmt.State, r rune) {
|
||||
switch r {
|
||||
case 's':
|
||||
fmt.Fprint(w, apiKey(v.ApiKey))
|
||||
case 'd':
|
||||
switch {
|
||||
case w.Flag('-'):
|
||||
fmt.Fprint(w, v.MinVersion)
|
||||
case w.Flag('+'):
|
||||
fmt.Fprint(w, v.MaxVersion)
|
||||
default:
|
||||
fmt.Fprint(w, v.ApiKey)
|
||||
}
|
||||
case 'v':
|
||||
switch {
|
||||
case w.Flag('-'):
|
||||
fmt.Fprintf(w, "v%d", v.MinVersion)
|
||||
case w.Flag('+'):
|
||||
fmt.Fprintf(w, "v%d", v.MaxVersion)
|
||||
case w.Flag('#'):
|
||||
fmt.Fprintf(w, "kafka.ApiVersion{ApiKey:%d MinVersion:%d MaxVersion:%d}", v.ApiKey, v.MinVersion, v.MaxVersion)
|
||||
default:
|
||||
fmt.Fprintf(w, "%s[v%d:v%d]", apiKey(v.ApiKey), v.MinVersion, v.MaxVersion)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type apiKey int16
|
||||
|
||||
const (
|
||||
produce apiKey = 0
|
||||
fetch apiKey = 1
|
||||
listOffsets apiKey = 2
|
||||
metadata apiKey = 3
|
||||
leaderAndIsr apiKey = 4
|
||||
stopReplica apiKey = 5
|
||||
updateMetadata apiKey = 6
|
||||
controlledShutdown apiKey = 7
|
||||
offsetCommit apiKey = 8
|
||||
offsetFetch apiKey = 9
|
||||
findCoordinator apiKey = 10
|
||||
joinGroup apiKey = 11
|
||||
heartbeat apiKey = 12
|
||||
leaveGroup apiKey = 13
|
||||
syncGroup apiKey = 14
|
||||
describeGroups apiKey = 15
|
||||
listGroups apiKey = 16
|
||||
saslHandshake apiKey = 17
|
||||
apiVersions apiKey = 18
|
||||
createTopics apiKey = 19
|
||||
deleteTopics apiKey = 20
|
||||
deleteRecords apiKey = 21
|
||||
initProducerId apiKey = 22
|
||||
offsetForLeaderEpoch apiKey = 23
|
||||
addPartitionsToTxn apiKey = 24
|
||||
addOffsetsToTxn apiKey = 25
|
||||
endTxn apiKey = 26
|
||||
writeTxnMarkers apiKey = 27
|
||||
txnOffsetCommit apiKey = 28
|
||||
describeAcls apiKey = 29
|
||||
createAcls apiKey = 30
|
||||
deleteAcls apiKey = 31
|
||||
describeConfigs apiKey = 32
|
||||
alterConfigs apiKey = 33
|
||||
alterReplicaLogDirs apiKey = 34
|
||||
describeLogDirs apiKey = 35
|
||||
saslAuthenticate apiKey = 36
|
||||
createPartitions apiKey = 37
|
||||
createDelegationToken apiKey = 38
|
||||
renewDelegationToken apiKey = 39
|
||||
expireDelegationToken apiKey = 40
|
||||
describeDelegationToken apiKey = 41
|
||||
deleteGroups apiKey = 42
|
||||
electLeaders apiKey = 43
|
||||
incrementalAlterConfigs apiKey = 44
|
||||
alterPartitionReassignments apiKey = 45
|
||||
listPartitionReassignments apiKey = 46
|
||||
offsetDelete apiKey = 47
|
||||
)
|
||||
|
||||
func (k apiKey) String() string {
|
||||
if i := int(k); i >= 0 && i < len(apiKeyStrings) {
|
||||
return apiKeyStrings[i]
|
||||
}
|
||||
return strconv.Itoa(int(k))
|
||||
}
|
||||
|
||||
type apiVersion int16
|
||||
|
||||
const (
|
||||
v0 = 0
|
||||
v1 = 1
|
||||
v2 = 2
|
||||
v3 = 3
|
||||
v5 = 5
|
||||
v6 = 6
|
||||
v7 = 7
|
||||
v10 = 10
|
||||
|
||||
// Unused protocol versions: v4, v8, v9.
|
||||
)
|
||||
|
||||
var apiKeyStrings = [...]string{
|
||||
produce: "Produce",
|
||||
fetch: "Fetch",
|
||||
listOffsets: "ListOffsets",
|
||||
metadata: "Metadata",
|
||||
leaderAndIsr: "LeaderAndIsr",
|
||||
stopReplica: "StopReplica",
|
||||
updateMetadata: "UpdateMetadata",
|
||||
controlledShutdown: "ControlledShutdown",
|
||||
offsetCommit: "OffsetCommit",
|
||||
offsetFetch: "OffsetFetch",
|
||||
findCoordinator: "FindCoordinator",
|
||||
joinGroup: "JoinGroup",
|
||||
heartbeat: "Heartbeat",
|
||||
leaveGroup: "LeaveGroup",
|
||||
syncGroup: "SyncGroup",
|
||||
describeGroups: "DescribeGroups",
|
||||
listGroups: "ListGroups",
|
||||
saslHandshake: "SaslHandshake",
|
||||
apiVersions: "ApiVersions",
|
||||
createTopics: "CreateTopics",
|
||||
deleteTopics: "DeleteTopics",
|
||||
deleteRecords: "DeleteRecords",
|
||||
initProducerId: "InitProducerId",
|
||||
offsetForLeaderEpoch: "OffsetForLeaderEpoch",
|
||||
addPartitionsToTxn: "AddPartitionsToTxn",
|
||||
addOffsetsToTxn: "AddOffsetsToTxn",
|
||||
endTxn: "EndTxn",
|
||||
writeTxnMarkers: "WriteTxnMarkers",
|
||||
txnOffsetCommit: "TxnOffsetCommit",
|
||||
describeAcls: "DescribeAcls",
|
||||
createAcls: "CreateAcls",
|
||||
deleteAcls: "DeleteAcls",
|
||||
describeConfigs: "DescribeConfigs",
|
||||
alterConfigs: "AlterConfigs",
|
||||
alterReplicaLogDirs: "AlterReplicaLogDirs",
|
||||
describeLogDirs: "DescribeLogDirs",
|
||||
saslAuthenticate: "SaslAuthenticate",
|
||||
createPartitions: "CreatePartitions",
|
||||
createDelegationToken: "CreateDelegationToken",
|
||||
renewDelegationToken: "RenewDelegationToken",
|
||||
expireDelegationToken: "ExpireDelegationToken",
|
||||
describeDelegationToken: "DescribeDelegationToken",
|
||||
deleteGroups: "DeleteGroups",
|
||||
electLeaders: "ElectLeaders",
|
||||
incrementalAlterConfigs: "IncrementalAlfterConfigs",
|
||||
alterPartitionReassignments: "AlterPartitionReassignments",
|
||||
listPartitionReassignments: "ListPartitionReassignments",
|
||||
offsetDelete: "OffsetDelete",
|
||||
}
|
||||
|
||||
type requestHeader struct {
|
||||
Size int32
|
||||
ApiKey int16
|
||||
ApiVersion int16
|
||||
CorrelationID int32
|
||||
ClientID string
|
||||
}
|
||||
|
||||
func (h requestHeader) size() int32 {
|
||||
return 4 + 2 + 2 + 4 + sizeofString(h.ClientID)
|
||||
}
|
||||
|
||||
func (h requestHeader) writeTo(wb *writeBuffer) {
|
||||
wb.writeInt32(h.Size)
|
||||
wb.writeInt16(h.ApiKey)
|
||||
wb.writeInt16(h.ApiVersion)
|
||||
wb.writeInt32(h.CorrelationID)
|
||||
wb.writeString(h.ClientID)
|
||||
}
|
||||
|
||||
type request interface {
|
||||
size() int32
|
||||
writable
|
||||
}
|
||||
|
||||
func makeInt8(b []byte) int8 {
|
||||
return int8(b[0])
|
||||
}
|
||||
|
||||
func makeInt16(b []byte) int16 {
|
||||
return int16(binary.BigEndian.Uint16(b))
|
||||
}
|
||||
|
||||
func makeInt32(b []byte) int32 {
|
||||
return int32(binary.BigEndian.Uint32(b))
|
||||
}
|
||||
|
||||
func makeInt64(b []byte) int64 {
|
||||
return int64(binary.BigEndian.Uint64(b))
|
||||
}
|
||||
|
||||
func expectZeroSize(sz int, err error) error {
|
||||
if err == nil && sz != 0 {
|
||||
err = fmt.Errorf("reading a response left %d unread bytes", sz)
|
||||
}
|
||||
return err
|
||||
}
|
||||
35
vendor/github.com/segmentio/kafka-go/protocol/addoffsetstotxn/addoffsetstotxn.go
generated
vendored
Normal file
35
vendor/github.com/segmentio/kafka-go/protocol/addoffsetstotxn/addoffsetstotxn.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
package addoffsetstotxn
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v3,max=v3,tag"`
|
||||
|
||||
TransactionalID string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"`
|
||||
ProducerID int64 `kafka:"min=v0,max=v3"`
|
||||
ProducerEpoch int16 `kafka:"min=v0,max=v3"`
|
||||
GroupID string `kafka:"min=v0,max=v3|min=v3,max=v3,compact"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.AddOffsetsToTxn }
|
||||
|
||||
func (r *Request) Transaction() string { return r.TransactionalID }
|
||||
|
||||
var _ protocol.TransactionalMessage = (*Request)(nil)
|
||||
|
||||
type Response struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v3,max=v3,tag"`
|
||||
|
||||
ThrottleTimeMs int32 `kafka:"min=v0,max=v3"`
|
||||
ErrorCode int16 `kafka:"min=v0,max=v3"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.AddOffsetsToTxn }
|
||||
62
vendor/github.com/segmentio/kafka-go/protocol/addpartitionstotxn/addpartitionstotxn.go
generated
vendored
Normal file
62
vendor/github.com/segmentio/kafka-go/protocol/addpartitionstotxn/addpartitionstotxn.go
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
package addpartitionstotxn
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v3,max=v3,tag"`
|
||||
|
||||
TransactionalID string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"`
|
||||
ProducerID int64 `kafka:"min=v0,max=v3"`
|
||||
ProducerEpoch int16 `kafka:"min=v0,max=v3"`
|
||||
Topics []RequestTopic `kafka:"min=v0,max=v3"`
|
||||
}
|
||||
|
||||
type RequestTopic struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v3,max=v3,tag"`
|
||||
|
||||
Name string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"`
|
||||
Partitions []int32 `kafka:"min=v0,max=v3"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.AddPartitionsToTxn }
|
||||
|
||||
func (r *Request) Transaction() string { return r.TransactionalID }
|
||||
|
||||
var _ protocol.TransactionalMessage = (*Request)(nil)
|
||||
|
||||
type Response struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v3,max=v3,tag"`
|
||||
|
||||
ThrottleTimeMs int32 `kafka:"min=v0,max=v3"`
|
||||
Results []ResponseResult `kafka:"min=v0,max=v3"`
|
||||
}
|
||||
|
||||
type ResponseResult struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v3,max=v3,tag"`
|
||||
|
||||
Name string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"`
|
||||
Results []ResponsePartition `kafka:"min=v0,max=v3"`
|
||||
}
|
||||
|
||||
type ResponsePartition struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v3,max=v3,tag"`
|
||||
|
||||
PartitionIndex int32 `kafka:"min=v0,max=v3"`
|
||||
ErrorCode int16 `kafka:"min=v0,max=v3"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.AddPartitionsToTxn }
|
||||
48
vendor/github.com/segmentio/kafka-go/protocol/alterconfigs/alterconfigs.go
generated
vendored
Normal file
48
vendor/github.com/segmentio/kafka-go/protocol/alterconfigs/alterconfigs.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
package alterconfigs
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_AlterConfigs
|
||||
type Request struct {
|
||||
Resources []RequestResources `kafka:"min=v0,max=v1"`
|
||||
ValidateOnly bool `kafka:"min=v0,max=v1"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.AlterConfigs }
|
||||
|
||||
func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
|
||||
return cluster.Brokers[cluster.Controller], nil
|
||||
}
|
||||
|
||||
type RequestResources struct {
|
||||
ResourceType int8 `kafka:"min=v0,max=v1"`
|
||||
ResourceName string `kafka:"min=v0,max=v1"`
|
||||
Configs []RequestConfig `kafka:"min=v0,max=v1"`
|
||||
}
|
||||
|
||||
type RequestConfig struct {
|
||||
Name string `kafka:"min=v0,max=v1"`
|
||||
Value string `kafka:"min=v0,max=v1,nullable"`
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
ThrottleTimeMs int32 `kafka:"min=v0,max=v1"`
|
||||
Responses []ResponseResponses `kafka:"min=v0,max=v1"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.AlterConfigs }
|
||||
|
||||
type ResponseResponses struct {
|
||||
ErrorCode int16 `kafka:"min=v0,max=v1"`
|
||||
ErrorMessage string `kafka:"min=v0,max=v1,nullable"`
|
||||
ResourceType int8 `kafka:"min=v0,max=v1"`
|
||||
ResourceName string `kafka:"min=v0,max=v1"`
|
||||
}
|
||||
|
||||
var (
|
||||
_ protocol.BrokerMessage = (*Request)(nil)
|
||||
)
|
||||
61
vendor/github.com/segmentio/kafka-go/protocol/alterpartitionreassignments/alterpartitionreassignments.go
generated
vendored
Normal file
61
vendor/github.com/segmentio/kafka-go/protocol/alterpartitionreassignments/alterpartitionreassignments.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
package alterpartitionreassignments
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_AlterPartitionReassignments
|
||||
type Request struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v0,max=v0,tag"`
|
||||
|
||||
TimeoutMs int32 `kafka:"min=v0,max=v0"`
|
||||
Topics []RequestTopic `kafka:"min=v0,max=v0"`
|
||||
}
|
||||
|
||||
type RequestTopic struct {
|
||||
Name string `kafka:"min=v0,max=v0"`
|
||||
Partitions []RequestPartition `kafka:"min=v0,max=v0"`
|
||||
}
|
||||
|
||||
type RequestPartition struct {
|
||||
PartitionIndex int32 `kafka:"min=v0,max=v0"`
|
||||
Replicas []int32 `kafka:"min=v0,max=v0"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey {
|
||||
return protocol.AlterPartitionReassignments
|
||||
}
|
||||
|
||||
func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
|
||||
return cluster.Brokers[cluster.Controller], nil
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v0,max=v0,tag"`
|
||||
|
||||
ThrottleTimeMs int32 `kafka:"min=v0,max=v0"`
|
||||
ErrorCode int16 `kafka:"min=v0,max=v0"`
|
||||
ErrorMessage string `kafka:"min=v0,max=v0,nullable"`
|
||||
Results []ResponseResult `kafka:"min=v0,max=v0"`
|
||||
}
|
||||
|
||||
type ResponseResult struct {
|
||||
Name string `kafka:"min=v0,max=v0"`
|
||||
Partitions []ResponsePartition `kafka:"min=v0,max=v0"`
|
||||
}
|
||||
|
||||
type ResponsePartition struct {
|
||||
PartitionIndex int32 `kafka:"min=v0,max=v0"`
|
||||
ErrorCode int16 `kafka:"min=v0,max=v0"`
|
||||
ErrorMessage string `kafka:"min=v0,max=v0,nullable"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey {
|
||||
return protocol.AlterPartitionReassignments
|
||||
}
|
||||
27
vendor/github.com/segmentio/kafka-go/protocol/apiversions/apiversions.go
generated
vendored
Normal file
27
vendor/github.com/segmentio/kafka-go/protocol/apiversions/apiversions.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
package apiversions
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
_ struct{} `kafka:"min=v0,max=v2"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.ApiVersions }
|
||||
|
||||
type Response struct {
|
||||
ErrorCode int16 `kafka:"min=v0,max=v2"`
|
||||
ApiKeys []ApiKeyResponse `kafka:"min=v0,max=v2"`
|
||||
ThrottleTimeMs int32 `kafka:"min=v1,max=v2"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.ApiVersions }
|
||||
|
||||
type ApiKeyResponse struct {
|
||||
ApiKey int16 `kafka:"min=v0,max=v2"`
|
||||
MinVersion int16 `kafka:"min=v0,max=v2"`
|
||||
MaxVersion int16 `kafka:"min=v0,max=v2"`
|
||||
}
|
||||
634
vendor/github.com/segmentio/kafka-go/protocol/buffer.go
generated
vendored
Normal file
634
vendor/github.com/segmentio/kafka-go/protocol/buffer.go
generated
vendored
Normal file
@@ -0,0 +1,634 @@
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Bytes is an interface implemented by types that represent immutable
|
||||
// sequences of bytes.
|
||||
//
|
||||
// Bytes values are used to abstract the location where record keys and
|
||||
// values are read from (e.g. in-memory buffers, network sockets, files).
|
||||
//
|
||||
// The Close method should be called to release resources held by the object
|
||||
// when the program is done with it.
|
||||
//
|
||||
// Bytes values are generally not safe to use concurrently from multiple
|
||||
// goroutines.
|
||||
type Bytes interface {
|
||||
io.ReadCloser
|
||||
// Returns the number of bytes remaining to be read from the payload.
|
||||
Len() int
|
||||
}
|
||||
|
||||
// NewBytes constructs a Bytes value from b.
|
||||
//
|
||||
// The returned value references b, it does not make a copy of the backing
|
||||
// array.
|
||||
//
|
||||
// If b is nil, nil is returned to represent a null BYTES value in the kafka
|
||||
// protocol.
|
||||
func NewBytes(b []byte) Bytes {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
r := new(bytesReader)
|
||||
r.Reset(b)
|
||||
return r
|
||||
}
|
||||
|
||||
// ReadAll is similar to ioutil.ReadAll, but it takes advantage of knowing the
|
||||
// length of b to minimize the memory footprint.
|
||||
//
|
||||
// The function returns a nil slice if b is nil.
|
||||
func ReadAll(b Bytes) ([]byte, error) {
|
||||
if b == nil {
|
||||
return nil, nil
|
||||
}
|
||||
s := make([]byte, b.Len())
|
||||
_, err := io.ReadFull(b, s)
|
||||
return s, err
|
||||
}
|
||||
|
||||
type bytesReader struct{ bytes.Reader }
|
||||
|
||||
func (*bytesReader) Close() error { return nil }
|
||||
|
||||
type refCount uintptr
|
||||
|
||||
func (rc *refCount) ref() { atomic.AddUintptr((*uintptr)(rc), 1) }
|
||||
|
||||
func (rc *refCount) unref(onZero func()) {
|
||||
if atomic.AddUintptr((*uintptr)(rc), ^uintptr(0)) == 0 {
|
||||
onZero()
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// Size of the memory buffer for a single page. We use a farily
|
||||
// large size here (64 KiB) because batches exchanged with kafka
|
||||
// tend to be multiple kilobytes in size, sometimes hundreds.
|
||||
// Using large pages amortizes the overhead of the page metadata
|
||||
// and algorithms to manage the pages.
|
||||
pageSize = 65536
|
||||
)
|
||||
|
||||
type page struct {
|
||||
refc refCount
|
||||
offset int64
|
||||
length int
|
||||
buffer *[pageSize]byte
|
||||
}
|
||||
|
||||
func newPage(offset int64) *page {
|
||||
p, _ := pagePool.Get().(*page)
|
||||
if p != nil {
|
||||
p.offset = offset
|
||||
p.length = 0
|
||||
p.ref()
|
||||
} else {
|
||||
p = &page{
|
||||
refc: 1,
|
||||
offset: offset,
|
||||
buffer: &[pageSize]byte{},
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *page) ref() { p.refc.ref() }
|
||||
|
||||
func (p *page) unref() { p.refc.unref(func() { pagePool.Put(p) }) }
|
||||
|
||||
func (p *page) slice(begin, end int64) []byte {
|
||||
i, j := begin-p.offset, end-p.offset
|
||||
|
||||
if i < 0 {
|
||||
i = 0
|
||||
} else if i > pageSize {
|
||||
i = pageSize
|
||||
}
|
||||
|
||||
if j < 0 {
|
||||
j = 0
|
||||
} else if j > pageSize {
|
||||
j = pageSize
|
||||
}
|
||||
|
||||
if i < j {
|
||||
return p.buffer[i:j]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *page) Cap() int { return pageSize }
|
||||
|
||||
func (p *page) Len() int { return p.length }
|
||||
|
||||
func (p *page) Size() int64 { return int64(p.length) }
|
||||
|
||||
func (p *page) Truncate(n int) {
|
||||
if n < p.length {
|
||||
p.length = n
|
||||
}
|
||||
}
|
||||
|
||||
func (p *page) ReadAt(b []byte, off int64) (int, error) {
|
||||
if off -= p.offset; off < 0 || off > pageSize {
|
||||
panic("offset out of range")
|
||||
}
|
||||
if off > int64(p.length) {
|
||||
return 0, nil
|
||||
}
|
||||
return copy(b, p.buffer[off:p.length]), nil
|
||||
}
|
||||
|
||||
func (p *page) ReadFrom(r io.Reader) (int64, error) {
|
||||
n, err := io.ReadFull(r, p.buffer[p.length:])
|
||||
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
err = nil
|
||||
}
|
||||
p.length += n
|
||||
return int64(n), err
|
||||
}
|
||||
|
||||
func (p *page) WriteAt(b []byte, off int64) (int, error) {
|
||||
if off -= p.offset; off < 0 || off > pageSize {
|
||||
panic("offset out of range")
|
||||
}
|
||||
n := copy(p.buffer[off:], b)
|
||||
if end := int(off) + n; end > p.length {
|
||||
p.length = end
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (p *page) Write(b []byte) (int, error) {
|
||||
return p.WriteAt(b, p.offset+int64(p.length))
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.ReaderAt = (*page)(nil)
|
||||
_ io.ReaderFrom = (*page)(nil)
|
||||
_ io.Writer = (*page)(nil)
|
||||
_ io.WriterAt = (*page)(nil)
|
||||
)
|
||||
|
||||
type pageBuffer struct {
|
||||
refc refCount
|
||||
pages contiguousPages
|
||||
length int
|
||||
cursor int
|
||||
}
|
||||
|
||||
func newPageBuffer() *pageBuffer {
|
||||
b, _ := pageBufferPool.Get().(*pageBuffer)
|
||||
if b != nil {
|
||||
b.cursor = 0
|
||||
b.refc.ref()
|
||||
} else {
|
||||
b = &pageBuffer{
|
||||
refc: 1,
|
||||
pages: make(contiguousPages, 0, 16),
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) refTo(ref *pageRef, begin, end int64) {
|
||||
length := end - begin
|
||||
|
||||
if length > math.MaxUint32 {
|
||||
panic("reference to contiguous buffer pages exceeds the maximum size of 4 GB")
|
||||
}
|
||||
|
||||
ref.pages = append(ref.buffer[:0], pb.pages.slice(begin, end)...)
|
||||
ref.pages.ref()
|
||||
ref.offset = begin
|
||||
ref.length = uint32(length)
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) ref(begin, end int64) *pageRef {
|
||||
ref := new(pageRef)
|
||||
pb.refTo(ref, begin, end)
|
||||
return ref
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) unref() {
|
||||
pb.refc.unref(func() {
|
||||
pb.pages.unref()
|
||||
pb.pages.clear()
|
||||
pb.pages = pb.pages[:0]
|
||||
pb.length = 0
|
||||
pageBufferPool.Put(pb)
|
||||
})
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) newPage() *page {
|
||||
return newPage(int64(pb.length))
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Len() int {
|
||||
return pb.length - pb.cursor
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Size() int64 {
|
||||
return int64(pb.length)
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Discard(n int) (int, error) {
|
||||
remain := pb.length - pb.cursor
|
||||
if remain < n {
|
||||
n = remain
|
||||
}
|
||||
pb.cursor += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Truncate(n int) {
|
||||
if n < pb.length {
|
||||
pb.length = n
|
||||
|
||||
if n < pb.cursor {
|
||||
pb.cursor = n
|
||||
}
|
||||
|
||||
for i := range pb.pages {
|
||||
if p := pb.pages[i]; p.length <= n {
|
||||
n -= p.length
|
||||
} else {
|
||||
if n > 0 {
|
||||
pb.pages[i].Truncate(n)
|
||||
i++
|
||||
}
|
||||
pb.pages[i:].unref()
|
||||
pb.pages[i:].clear()
|
||||
pb.pages = pb.pages[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Seek(offset int64, whence int) (int64, error) {
|
||||
c, err := seek(int64(pb.cursor), int64(pb.length), offset, whence)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
pb.cursor = int(c)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) ReadByte() (byte, error) {
|
||||
b := [1]byte{}
|
||||
_, err := pb.Read(b[:])
|
||||
return b[0], err
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Read(b []byte) (int, error) {
|
||||
if pb.cursor >= pb.length {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n, err := pb.ReadAt(b, int64(pb.cursor))
|
||||
pb.cursor += n
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) ReadAt(b []byte, off int64) (int, error) {
|
||||
return pb.pages.ReadAt(b, off)
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) ReadFrom(r io.Reader) (int64, error) {
|
||||
if len(pb.pages) == 0 {
|
||||
pb.pages = append(pb.pages, pb.newPage())
|
||||
}
|
||||
|
||||
rn := int64(0)
|
||||
|
||||
for {
|
||||
tail := pb.pages[len(pb.pages)-1]
|
||||
free := tail.Cap() - tail.Len()
|
||||
|
||||
if free == 0 {
|
||||
tail = pb.newPage()
|
||||
free = pageSize
|
||||
pb.pages = append(pb.pages, tail)
|
||||
}
|
||||
|
||||
n, err := tail.ReadFrom(r)
|
||||
pb.length += int(n)
|
||||
rn += n
|
||||
if n < int64(free) {
|
||||
return rn, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) WriteString(s string) (int, error) {
|
||||
return pb.Write([]byte(s))
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Write(b []byte) (int, error) {
|
||||
wn := len(b)
|
||||
if wn == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if len(pb.pages) == 0 {
|
||||
pb.pages = append(pb.pages, pb.newPage())
|
||||
}
|
||||
|
||||
for len(b) != 0 {
|
||||
tail := pb.pages[len(pb.pages)-1]
|
||||
free := tail.Cap() - tail.Len()
|
||||
|
||||
if len(b) <= free {
|
||||
tail.Write(b)
|
||||
pb.length += len(b)
|
||||
break
|
||||
}
|
||||
|
||||
tail.Write(b[:free])
|
||||
b = b[free:]
|
||||
|
||||
pb.length += free
|
||||
pb.pages = append(pb.pages, pb.newPage())
|
||||
}
|
||||
|
||||
return wn, nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) WriteAt(b []byte, off int64) (int, error) {
|
||||
n, err := pb.pages.WriteAt(b, off)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if n < len(b) {
|
||||
pb.Write(b[n:])
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) WriteTo(w io.Writer) (int64, error) {
|
||||
var wn int
|
||||
var err error
|
||||
pb.pages.scan(int64(pb.cursor), int64(pb.length), func(b []byte) bool {
|
||||
var n int
|
||||
n, err = w.Write(b)
|
||||
wn += n
|
||||
return err == nil
|
||||
})
|
||||
pb.cursor += wn
|
||||
return int64(wn), err
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.ReaderAt = (*pageBuffer)(nil)
|
||||
_ io.ReaderFrom = (*pageBuffer)(nil)
|
||||
_ io.StringWriter = (*pageBuffer)(nil)
|
||||
_ io.Writer = (*pageBuffer)(nil)
|
||||
_ io.WriterAt = (*pageBuffer)(nil)
|
||||
_ io.WriterTo = (*pageBuffer)(nil)
|
||||
|
||||
pagePool sync.Pool
|
||||
pageBufferPool sync.Pool
|
||||
)
|
||||
|
||||
type contiguousPages []*page
|
||||
|
||||
func (pages contiguousPages) ref() {
|
||||
for _, p := range pages {
|
||||
p.ref()
|
||||
}
|
||||
}
|
||||
|
||||
func (pages contiguousPages) unref() {
|
||||
for _, p := range pages {
|
||||
p.unref()
|
||||
}
|
||||
}
|
||||
|
||||
func (pages contiguousPages) clear() {
|
||||
for i := range pages {
|
||||
pages[i] = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (pages contiguousPages) ReadAt(b []byte, off int64) (int, error) {
|
||||
rn := 0
|
||||
|
||||
for _, p := range pages.slice(off, off+int64(len(b))) {
|
||||
n, _ := p.ReadAt(b, off)
|
||||
b = b[n:]
|
||||
rn += n
|
||||
off += int64(n)
|
||||
}
|
||||
|
||||
return rn, nil
|
||||
}
|
||||
|
||||
func (pages contiguousPages) WriteAt(b []byte, off int64) (int, error) {
|
||||
wn := 0
|
||||
|
||||
for _, p := range pages.slice(off, off+int64(len(b))) {
|
||||
n, _ := p.WriteAt(b, off)
|
||||
b = b[n:]
|
||||
wn += n
|
||||
off += int64(n)
|
||||
}
|
||||
|
||||
return wn, nil
|
||||
}
|
||||
|
||||
func (pages contiguousPages) slice(begin, end int64) contiguousPages {
|
||||
i := pages.indexOf(begin)
|
||||
j := pages.indexOf(end)
|
||||
if j < len(pages) {
|
||||
j++
|
||||
}
|
||||
return pages[i:j]
|
||||
}
|
||||
|
||||
func (pages contiguousPages) indexOf(offset int64) int {
|
||||
if len(pages) == 0 {
|
||||
return 0
|
||||
}
|
||||
return int((offset - pages[0].offset) / pageSize)
|
||||
}
|
||||
|
||||
func (pages contiguousPages) scan(begin, end int64, f func([]byte) bool) {
|
||||
for _, p := range pages.slice(begin, end) {
|
||||
if !f(p.slice(begin, end)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.ReaderAt = contiguousPages{}
|
||||
_ io.WriterAt = contiguousPages{}
|
||||
)
|
||||
|
||||
type pageRef struct {
|
||||
buffer [2]*page
|
||||
pages contiguousPages
|
||||
offset int64
|
||||
cursor int64
|
||||
length uint32
|
||||
once uint32
|
||||
}
|
||||
|
||||
func (ref *pageRef) unref() {
|
||||
if atomic.CompareAndSwapUint32(&ref.once, 0, 1) {
|
||||
ref.pages.unref()
|
||||
ref.pages.clear()
|
||||
ref.pages = nil
|
||||
ref.offset = 0
|
||||
ref.cursor = 0
|
||||
ref.length = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (ref *pageRef) Len() int { return int(ref.Size() - ref.cursor) }
|
||||
|
||||
func (ref *pageRef) Size() int64 { return int64(ref.length) }
|
||||
|
||||
func (ref *pageRef) Close() error { ref.unref(); return nil }
|
||||
|
||||
func (ref *pageRef) String() string {
|
||||
return fmt.Sprintf("[offset=%d cursor=%d length=%d]", ref.offset, ref.cursor, ref.length)
|
||||
}
|
||||
|
||||
func (ref *pageRef) Seek(offset int64, whence int) (int64, error) {
|
||||
c, err := seek(ref.cursor, int64(ref.length), offset, whence)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
ref.cursor = c
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (ref *pageRef) ReadByte() (byte, error) {
|
||||
var c byte
|
||||
var ok bool
|
||||
ref.scan(ref.cursor, func(b []byte) bool {
|
||||
c, ok = b[0], true
|
||||
return false
|
||||
})
|
||||
if ok {
|
||||
ref.cursor++
|
||||
} else {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (ref *pageRef) Read(b []byte) (int, error) {
|
||||
if ref.cursor >= int64(ref.length) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n, err := ref.ReadAt(b, ref.cursor)
|
||||
ref.cursor += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (ref *pageRef) ReadAt(b []byte, off int64) (int, error) {
|
||||
limit := ref.offset + int64(ref.length)
|
||||
off += ref.offset
|
||||
|
||||
if off >= limit {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
if off+int64(len(b)) > limit {
|
||||
b = b[:limit-off]
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
n, err := ref.pages.ReadAt(b, off)
|
||||
if n == 0 && err == nil {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (ref *pageRef) WriteTo(w io.Writer) (wn int64, err error) {
|
||||
ref.scan(ref.cursor, func(b []byte) bool {
|
||||
var n int
|
||||
n, err = w.Write(b)
|
||||
wn += int64(n)
|
||||
return err == nil
|
||||
})
|
||||
ref.cursor += wn
|
||||
return
|
||||
}
|
||||
|
||||
func (ref *pageRef) scan(off int64, f func([]byte) bool) {
|
||||
begin := ref.offset + off
|
||||
end := ref.offset + int64(ref.length)
|
||||
ref.pages.scan(begin, end, f)
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.Closer = (*pageRef)(nil)
|
||||
_ io.Seeker = (*pageRef)(nil)
|
||||
_ io.Reader = (*pageRef)(nil)
|
||||
_ io.ReaderAt = (*pageRef)(nil)
|
||||
_ io.WriterTo = (*pageRef)(nil)
|
||||
)
|
||||
|
||||
type pageRefAllocator struct {
|
||||
refs []pageRef
|
||||
head int
|
||||
size int
|
||||
}
|
||||
|
||||
func (a *pageRefAllocator) newPageRef() *pageRef {
|
||||
if a.head == len(a.refs) {
|
||||
a.refs = make([]pageRef, a.size)
|
||||
a.head = 0
|
||||
}
|
||||
ref := &a.refs[a.head]
|
||||
a.head++
|
||||
return ref
|
||||
}
|
||||
|
||||
func seek(cursor, limit, offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
// absolute offset
|
||||
case io.SeekCurrent:
|
||||
offset = cursor + offset
|
||||
case io.SeekEnd:
|
||||
offset = limit - offset
|
||||
default:
|
||||
return -1, fmt.Errorf("seek: invalid whence value: %d", whence)
|
||||
}
|
||||
if offset < 0 {
|
||||
offset = 0
|
||||
}
|
||||
if offset > limit {
|
||||
offset = limit
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func closeBytes(b Bytes) {
|
||||
if b != nil {
|
||||
b.Close()
|
||||
}
|
||||
}
|
||||
143
vendor/github.com/segmentio/kafka-go/protocol/cluster.go
generated
vendored
Normal file
143
vendor/github.com/segmentio/kafka-go/protocol/cluster.go
generated
vendored
Normal file
@@ -0,0 +1,143 @@
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
)
|
||||
|
||||
type Cluster struct {
|
||||
ClusterID string
|
||||
Controller int32
|
||||
Brokers map[int32]Broker
|
||||
Topics map[string]Topic
|
||||
}
|
||||
|
||||
func (c Cluster) BrokerIDs() []int32 {
|
||||
brokerIDs := make([]int32, 0, len(c.Brokers))
|
||||
for id := range c.Brokers {
|
||||
brokerIDs = append(brokerIDs, id)
|
||||
}
|
||||
sort.Slice(brokerIDs, func(i, j int) bool {
|
||||
return brokerIDs[i] < brokerIDs[j]
|
||||
})
|
||||
return brokerIDs
|
||||
}
|
||||
|
||||
func (c Cluster) TopicNames() []string {
|
||||
topicNames := make([]string, 0, len(c.Topics))
|
||||
for name := range c.Topics {
|
||||
topicNames = append(topicNames, name)
|
||||
}
|
||||
sort.Strings(topicNames)
|
||||
return topicNames
|
||||
}
|
||||
|
||||
func (c Cluster) IsZero() bool {
|
||||
return c.ClusterID == "" && c.Controller == 0 && len(c.Brokers) == 0 && len(c.Topics) == 0
|
||||
}
|
||||
|
||||
func (c Cluster) Format(w fmt.State, _ rune) {
|
||||
tw := new(tabwriter.Writer)
|
||||
fmt.Fprintf(w, "CLUSTER: %q\n\n", c.ClusterID)
|
||||
|
||||
tw.Init(w, 0, 8, 2, ' ', 0)
|
||||
fmt.Fprint(tw, " BROKER\tHOST\tPORT\tRACK\tCONTROLLER\n")
|
||||
|
||||
for _, id := range c.BrokerIDs() {
|
||||
broker := c.Brokers[id]
|
||||
fmt.Fprintf(tw, " %d\t%s\t%d\t%s\t%t\n", broker.ID, broker.Host, broker.Port, broker.Rack, broker.ID == c.Controller)
|
||||
}
|
||||
|
||||
tw.Flush()
|
||||
fmt.Fprintln(w)
|
||||
|
||||
tw.Init(w, 0, 8, 2, ' ', 0)
|
||||
fmt.Fprint(tw, " TOPIC\tPARTITIONS\tBROKERS\n")
|
||||
topicNames := c.TopicNames()
|
||||
brokers := make(map[int32]struct{}, len(c.Brokers))
|
||||
brokerIDs := make([]int32, 0, len(c.Brokers))
|
||||
|
||||
for _, name := range topicNames {
|
||||
topic := c.Topics[name]
|
||||
|
||||
for _, p := range topic.Partitions {
|
||||
for _, id := range p.Replicas {
|
||||
brokers[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for id := range brokers {
|
||||
brokerIDs = append(brokerIDs, id)
|
||||
}
|
||||
|
||||
fmt.Fprintf(tw, " %s\t%d\t%s\n", topic.Name, len(topic.Partitions), formatBrokerIDs(brokerIDs, -1))
|
||||
|
||||
for id := range brokers {
|
||||
delete(brokers, id)
|
||||
}
|
||||
|
||||
brokerIDs = brokerIDs[:0]
|
||||
}
|
||||
|
||||
tw.Flush()
|
||||
fmt.Fprintln(w)
|
||||
|
||||
if w.Flag('+') {
|
||||
for _, name := range topicNames {
|
||||
fmt.Fprintf(w, " TOPIC: %q\n\n", name)
|
||||
|
||||
tw.Init(w, 0, 8, 2, ' ', 0)
|
||||
fmt.Fprint(tw, " PARTITION\tREPLICAS\tISR\tOFFLINE\n")
|
||||
|
||||
for _, p := range c.Topics[name].Partitions {
|
||||
fmt.Fprintf(tw, " %d\t%s\t%s\t%s\n", p.ID,
|
||||
formatBrokerIDs(p.Replicas, -1),
|
||||
formatBrokerIDs(p.ISR, p.Leader),
|
||||
formatBrokerIDs(p.Offline, -1),
|
||||
)
|
||||
}
|
||||
|
||||
tw.Flush()
|
||||
fmt.Fprintln(w)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func formatBrokerIDs(brokerIDs []int32, leader int32) string {
|
||||
if len(brokerIDs) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
if len(brokerIDs) == 1 {
|
||||
return itoa(brokerIDs[0])
|
||||
}
|
||||
|
||||
sort.Slice(brokerIDs, func(i, j int) bool {
|
||||
id1 := brokerIDs[i]
|
||||
id2 := brokerIDs[j]
|
||||
|
||||
if id1 == leader {
|
||||
return true
|
||||
}
|
||||
|
||||
if id2 == leader {
|
||||
return false
|
||||
}
|
||||
|
||||
return id1 < id2
|
||||
})
|
||||
|
||||
brokerNames := make([]string, len(brokerIDs))
|
||||
|
||||
for i, id := range brokerIDs {
|
||||
brokerNames[i] = itoa(id)
|
||||
}
|
||||
|
||||
return strings.Join(brokerNames, ",")
|
||||
}
|
||||
|
||||
var (
|
||||
_ fmt.Formatter = Cluster{}
|
||||
)
|
||||
100
vendor/github.com/segmentio/kafka-go/protocol/conn.go
generated
vendored
Normal file
100
vendor/github.com/segmentio/kafka-go/protocol/conn.go
generated
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Conn struct {
|
||||
buffer *bufio.Reader
|
||||
conn net.Conn
|
||||
clientID string
|
||||
idgen int32
|
||||
versions atomic.Value // map[ApiKey]int16
|
||||
}
|
||||
|
||||
func NewConn(conn net.Conn, clientID string) *Conn {
|
||||
return &Conn{
|
||||
buffer: bufio.NewReader(conn),
|
||||
conn: conn,
|
||||
clientID: clientID,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Conn) String() string {
|
||||
return fmt.Sprintf("kafka://%s@%s->%s", c.clientID, c.LocalAddr(), c.RemoteAddr())
|
||||
}
|
||||
|
||||
func (c *Conn) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
func (c *Conn) Discard(n int) (int, error) {
|
||||
return c.buffer.Discard(n)
|
||||
}
|
||||
|
||||
func (c *Conn) Peek(n int) ([]byte, error) {
|
||||
return c.buffer.Peek(n)
|
||||
}
|
||||
|
||||
func (c *Conn) Read(b []byte) (int, error) {
|
||||
return c.buffer.Read(b)
|
||||
}
|
||||
|
||||
func (c *Conn) Write(b []byte) (int, error) {
|
||||
return c.conn.Write(b)
|
||||
}
|
||||
|
||||
func (c *Conn) LocalAddr() net.Addr {
|
||||
return c.conn.LocalAddr()
|
||||
}
|
||||
|
||||
func (c *Conn) RemoteAddr() net.Addr {
|
||||
return c.conn.RemoteAddr()
|
||||
}
|
||||
|
||||
func (c *Conn) SetDeadline(t time.Time) error {
|
||||
return c.conn.SetDeadline(t)
|
||||
}
|
||||
|
||||
func (c *Conn) SetReadDeadline(t time.Time) error {
|
||||
return c.conn.SetReadDeadline(t)
|
||||
}
|
||||
|
||||
func (c *Conn) SetWriteDeadline(t time.Time) error {
|
||||
return c.conn.SetWriteDeadline(t)
|
||||
}
|
||||
|
||||
func (c *Conn) SetVersions(versions map[ApiKey]int16) {
|
||||
connVersions := make(map[ApiKey]int16, len(versions))
|
||||
|
||||
for k, v := range versions {
|
||||
connVersions[k] = v
|
||||
}
|
||||
|
||||
c.versions.Store(connVersions)
|
||||
}
|
||||
|
||||
func (c *Conn) RoundTrip(msg Message) (Message, error) {
|
||||
correlationID := atomic.AddInt32(&c.idgen, +1)
|
||||
versions, _ := c.versions.Load().(map[ApiKey]int16)
|
||||
apiVersion := versions[msg.ApiKey()]
|
||||
|
||||
if p, _ := msg.(PreparedMessage); p != nil {
|
||||
p.Prepare(apiVersion)
|
||||
}
|
||||
|
||||
if raw, ok := msg.(RawExchanger); ok && raw.Required(versions) {
|
||||
return raw.RawExchange(c)
|
||||
}
|
||||
|
||||
return RoundTrip(c, apiVersion, correlationID, c.clientID, msg)
|
||||
}
|
||||
|
||||
var (
|
||||
_ net.Conn = (*Conn)(nil)
|
||||
_ bufferedReader = (*Conn)(nil)
|
||||
)
|
||||
21
vendor/github.com/segmentio/kafka-go/protocol/consumer/consumer.go
generated
vendored
Normal file
21
vendor/github.com/segmentio/kafka-go/protocol/consumer/consumer.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
package consumer
|
||||
|
||||
const MaxVersionSupported = 1
|
||||
|
||||
type Subscription struct {
|
||||
Version int16 `kafka:"min=v0,max=v1"`
|
||||
Topics []string `kafka:"min=v0,max=v1"`
|
||||
UserData []byte `kafka:"min=v0,max=v1,nullable"`
|
||||
OwnedPartitions []TopicPartition `kafka:"min=v1,max=v1"`
|
||||
}
|
||||
|
||||
type Assignment struct {
|
||||
Version int16 `kafka:"min=v0,max=v1"`
|
||||
AssignedPartitions []TopicPartition `kafka:"min=v0,max=v1"`
|
||||
UserData []byte `kafka:"min=v0,max=v1,nullable"`
|
||||
}
|
||||
|
||||
type TopicPartition struct {
|
||||
Topic string `kafka:"min=v0,max=v1"`
|
||||
Partitions []int32 `kafka:"min=v0,max=v1"`
|
||||
}
|
||||
49
vendor/github.com/segmentio/kafka-go/protocol/createacls/createacls.go
generated
vendored
Normal file
49
vendor/github.com/segmentio/kafka-go/protocol/createacls/createacls.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
package createacls
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
// We need at least one tagged field to indicate that v2+ uses "flexible"
|
||||
// messages.
|
||||
_ struct{} `kafka:"min=v2,max=v2,tag"`
|
||||
|
||||
Creations []RequestACLs `kafka:"min=v0,max=v2"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.CreateAcls }
|
||||
|
||||
func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
|
||||
return cluster.Brokers[cluster.Controller], nil
|
||||
}
|
||||
|
||||
type RequestACLs struct {
|
||||
ResourceType int8 `kafka:"min=v0,max=v2"`
|
||||
ResourceName string `kafka:"min=v0,max=v2"`
|
||||
ResourcePatternType int8 `kafka:"min=v0,max=v2"`
|
||||
Principal string `kafka:"min=v0,max=v2"`
|
||||
Host string `kafka:"min=v0,max=v2"`
|
||||
Operation int8 `kafka:"min=v0,max=v2"`
|
||||
PermissionType int8 `kafka:"min=v0,max=v2"`
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
// We need at least one tagged field to indicate that v2+ uses "flexible"
|
||||
// messages.
|
||||
_ struct{} `kafka:"min=v2,max=v2,tag"`
|
||||
|
||||
ThrottleTimeMs int32 `kafka:"min=v0,max=v2"`
|
||||
Results []ResponseACLs `kafka:"min=v0,max=v2"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.CreateAcls }
|
||||
|
||||
type ResponseACLs struct {
|
||||
ErrorCode int16 `kafka:"min=v0,max=v2"`
|
||||
ErrorMessage string `kafka:"min=v0,max=v2,nullable"`
|
||||
}
|
||||
|
||||
var _ protocol.BrokerMessage = (*Request)(nil)
|
||||
46
vendor/github.com/segmentio/kafka-go/protocol/createpartitions/createpartitions.go
generated
vendored
Normal file
46
vendor/github.com/segmentio/kafka-go/protocol/createpartitions/createpartitions.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
package createpartitions
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_CreatePartitions.
|
||||
// TODO: Support version 2.
|
||||
type Request struct {
|
||||
Topics []RequestTopic `kafka:"min=v0,max=v1"`
|
||||
TimeoutMs int32 `kafka:"min=v0,max=v1"`
|
||||
ValidateOnly bool `kafka:"min=v0,max=v1"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.CreatePartitions }
|
||||
|
||||
func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
|
||||
return cluster.Brokers[cluster.Controller], nil
|
||||
}
|
||||
|
||||
type RequestTopic struct {
|
||||
Name string `kafka:"min=v0,max=v1"`
|
||||
Count int32 `kafka:"min=v0,max=v1"`
|
||||
Assignments []RequestAssignment `kafka:"min=v0,max=v1,nullable"`
|
||||
}
|
||||
|
||||
type RequestAssignment struct {
|
||||
BrokerIDs []int32 `kafka:"min=v0,max=v1"`
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
ThrottleTimeMs int32 `kafka:"min=v0,max=v1"`
|
||||
Results []ResponseResult `kafka:"min=v0,max=v1"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.CreatePartitions }
|
||||
|
||||
type ResponseResult struct {
|
||||
Name string `kafka:"min=v0,max=v1"`
|
||||
ErrorCode int16 `kafka:"min=v0,max=v1"`
|
||||
ErrorMessage string `kafka:"min=v0,max=v1,nullable"`
|
||||
}
|
||||
|
||||
var _ protocol.BrokerMessage = (*Request)(nil)
|
||||
74
vendor/github.com/segmentio/kafka-go/protocol/createtopics/createtopics.go
generated
vendored
Normal file
74
vendor/github.com/segmentio/kafka-go/protocol/createtopics/createtopics.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
package createtopics
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
// We need at least one tagged field to indicate that v5+ uses "flexible"
|
||||
// messages.
|
||||
_ struct{} `kafka:"min=v5,max=v5,tag"`
|
||||
|
||||
Topics []RequestTopic `kafka:"min=v0,max=v5"`
|
||||
TimeoutMs int32 `kafka:"min=v0,max=v5"`
|
||||
ValidateOnly bool `kafka:"min=v1,max=v5"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.CreateTopics }
|
||||
|
||||
func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
|
||||
return cluster.Brokers[cluster.Controller], nil
|
||||
}
|
||||
|
||||
type RequestTopic struct {
|
||||
Name string `kafka:"min=v0,max=v5"`
|
||||
NumPartitions int32 `kafka:"min=v0,max=v5"`
|
||||
ReplicationFactor int16 `kafka:"min=v0,max=v5"`
|
||||
Assignments []RequestAssignment `kafka:"min=v0,max=v5"`
|
||||
Configs []RequestConfig `kafka:"min=v0,max=v5"`
|
||||
}
|
||||
|
||||
type RequestAssignment struct {
|
||||
PartitionIndex int32 `kafka:"min=v0,max=v5"`
|
||||
BrokerIDs []int32 `kafka:"min=v0,max=v5"`
|
||||
}
|
||||
|
||||
type RequestConfig struct {
|
||||
Name string `kafka:"min=v0,max=v5"`
|
||||
Value string `kafka:"min=v0,max=v5,nullable"`
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
// We need at least one tagged field to indicate that v5+ uses "flexible"
|
||||
// messages.
|
||||
_ struct{} `kafka:"min=v5,max=v5,tag"`
|
||||
|
||||
ThrottleTimeMs int32 `kafka:"min=v2,max=v5"`
|
||||
Topics []ResponseTopic `kafka:"min=v0,max=v5"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.CreateTopics }
|
||||
|
||||
type ResponseTopic struct {
|
||||
Name string `kafka:"min=v0,max=v5"`
|
||||
ErrorCode int16 `kafka:"min=v0,max=v5"`
|
||||
ErrorMessage string `kafka:"min=v1,max=v5,nullable"`
|
||||
NumPartitions int32 `kafka:"min=v5,max=v5"`
|
||||
ReplicationFactor int16 `kafka:"min=v5,max=v5"`
|
||||
|
||||
Configs []ResponseTopicConfig `kafka:"min=v5,max=v5"`
|
||||
}
|
||||
|
||||
type ResponseTopicConfig struct {
|
||||
Name string `kafka:"min=v5,max=v5"`
|
||||
Value string `kafka:"min=v5,max=v5,nullable"`
|
||||
ReadOnly bool `kafka:"min=v5,max=v5"`
|
||||
ConfigSource int8 `kafka:"min=v5,max=v5"`
|
||||
IsSensitive bool `kafka:"min=v5,max=v5"`
|
||||
}
|
||||
|
||||
var (
|
||||
_ protocol.BrokerMessage = (*Request)(nil)
|
||||
)
|
||||
519
vendor/github.com/segmentio/kafka-go/protocol/decode.go
generated
vendored
Normal file
519
vendor/github.com/segmentio/kafka-go/protocol/decode.go
generated
vendored
Normal file
@@ -0,0 +1,519 @@
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type discarder interface {
|
||||
Discard(int) (int, error)
|
||||
}
|
||||
|
||||
type decoder struct {
|
||||
reader io.Reader
|
||||
remain int
|
||||
buffer [8]byte
|
||||
err error
|
||||
table *crc32.Table
|
||||
crc32 uint32
|
||||
}
|
||||
|
||||
func (d *decoder) Reset(r io.Reader, n int) {
|
||||
d.reader = r
|
||||
d.remain = n
|
||||
d.buffer = [8]byte{}
|
||||
d.err = nil
|
||||
d.table = nil
|
||||
d.crc32 = 0
|
||||
}
|
||||
|
||||
func (d *decoder) Read(b []byte) (int, error) {
|
||||
if d.err != nil {
|
||||
return 0, d.err
|
||||
}
|
||||
if d.remain == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if len(b) > d.remain {
|
||||
b = b[:d.remain]
|
||||
}
|
||||
n, err := d.reader.Read(b)
|
||||
if n > 0 && d.table != nil {
|
||||
d.crc32 = crc32.Update(d.crc32, d.table, b[:n])
|
||||
}
|
||||
d.remain -= n
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (d *decoder) ReadByte() (byte, error) {
|
||||
c := d.readByte()
|
||||
return c, d.err
|
||||
}
|
||||
|
||||
func (d *decoder) done() bool {
|
||||
return d.remain == 0 || d.err != nil
|
||||
}
|
||||
|
||||
func (d *decoder) setCRC(table *crc32.Table) {
|
||||
d.table, d.crc32 = table, 0
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBool(v value) {
|
||||
v.setBool(d.readBool())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInt8(v value) {
|
||||
v.setInt8(d.readInt8())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInt16(v value) {
|
||||
v.setInt16(d.readInt16())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInt32(v value) {
|
||||
v.setInt32(d.readInt32())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInt64(v value) {
|
||||
v.setInt64(d.readInt64())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeString(v value) {
|
||||
v.setString(d.readString())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeCompactString(v value) {
|
||||
v.setString(d.readCompactString())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBytes(v value) {
|
||||
v.setBytes(d.readBytes())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeCompactBytes(v value) {
|
||||
v.setBytes(d.readCompactBytes())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeArray(v value, elemType reflect.Type, decodeElem decodeFunc) {
|
||||
if n := d.readInt32(); n < 0 {
|
||||
v.setArray(array{})
|
||||
} else {
|
||||
a := makeArray(elemType, int(n))
|
||||
for i := 0; i < int(n) && d.remain > 0; i++ {
|
||||
decodeElem(d, a.index(i))
|
||||
}
|
||||
v.setArray(a)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeCompactArray(v value, elemType reflect.Type, decodeElem decodeFunc) {
|
||||
if n := d.readUnsignedVarInt(); n < 1 {
|
||||
v.setArray(array{})
|
||||
} else {
|
||||
a := makeArray(elemType, int(n-1))
|
||||
for i := 0; i < int(n-1) && d.remain > 0; i++ {
|
||||
decodeElem(d, a.index(i))
|
||||
}
|
||||
v.setArray(a)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) discardAll() {
|
||||
d.discard(d.remain)
|
||||
}
|
||||
|
||||
func (d *decoder) discard(n int) {
|
||||
if n > d.remain {
|
||||
n = d.remain
|
||||
}
|
||||
var err error
|
||||
if r, _ := d.reader.(discarder); r != nil {
|
||||
n, err = r.Discard(n)
|
||||
d.remain -= n
|
||||
} else {
|
||||
_, err = io.Copy(ioutil.Discard, d)
|
||||
}
|
||||
d.setError(err)
|
||||
}
|
||||
|
||||
func (d *decoder) read(n int) []byte {
|
||||
b := make([]byte, n)
|
||||
n, err := io.ReadFull(d, b)
|
||||
b = b[:n]
|
||||
d.setError(err)
|
||||
return b
|
||||
}
|
||||
|
||||
func (d *decoder) writeTo(w io.Writer, n int) {
|
||||
limit := d.remain
|
||||
if n < limit {
|
||||
d.remain = n
|
||||
}
|
||||
c, err := io.Copy(w, d)
|
||||
if int(c) < n && err == nil {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
d.remain = limit - int(c)
|
||||
d.setError(err)
|
||||
}
|
||||
|
||||
func (d *decoder) setError(err error) {
|
||||
if d.err == nil && err != nil {
|
||||
d.err = err
|
||||
d.discardAll()
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readFull(b []byte) bool {
|
||||
n, err := io.ReadFull(d, b)
|
||||
d.setError(err)
|
||||
return n == len(b)
|
||||
}
|
||||
|
||||
func (d *decoder) readByte() byte {
|
||||
if d.readFull(d.buffer[:1]) {
|
||||
return d.buffer[0]
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *decoder) readBool() bool {
|
||||
return d.readByte() != 0
|
||||
}
|
||||
|
||||
func (d *decoder) readInt8() int8 {
|
||||
if d.readFull(d.buffer[:1]) {
|
||||
return readInt8(d.buffer[:1])
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *decoder) readInt16() int16 {
|
||||
if d.readFull(d.buffer[:2]) {
|
||||
return readInt16(d.buffer[:2])
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *decoder) readInt32() int32 {
|
||||
if d.readFull(d.buffer[:4]) {
|
||||
return readInt32(d.buffer[:4])
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *decoder) readInt64() int64 {
|
||||
if d.readFull(d.buffer[:8]) {
|
||||
return readInt64(d.buffer[:8])
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *decoder) readString() string {
|
||||
if n := d.readInt16(); n < 0 {
|
||||
return ""
|
||||
} else {
|
||||
return bytesToString(d.read(int(n)))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readVarString() string {
|
||||
if n := d.readVarInt(); n < 0 {
|
||||
return ""
|
||||
} else {
|
||||
return bytesToString(d.read(int(n)))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readCompactString() string {
|
||||
if n := d.readUnsignedVarInt(); n < 1 {
|
||||
return ""
|
||||
} else {
|
||||
return bytesToString(d.read(int(n - 1)))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readBytes() []byte {
|
||||
if n := d.readInt32(); n < 0 {
|
||||
return nil
|
||||
} else {
|
||||
return d.read(int(n))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readVarBytes() []byte {
|
||||
if n := d.readVarInt(); n < 0 {
|
||||
return nil
|
||||
} else {
|
||||
return d.read(int(n))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readCompactBytes() []byte {
|
||||
if n := d.readUnsignedVarInt(); n < 1 {
|
||||
return nil
|
||||
} else {
|
||||
return d.read(int(n - 1))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readVarInt() int64 {
|
||||
n := 11 // varints are at most 11 bytes
|
||||
|
||||
if n > d.remain {
|
||||
n = d.remain
|
||||
}
|
||||
|
||||
x := uint64(0)
|
||||
s := uint(0)
|
||||
|
||||
for n > 0 {
|
||||
b := d.readByte()
|
||||
|
||||
if (b & 0x80) == 0 {
|
||||
x |= uint64(b) << s
|
||||
return int64(x>>1) ^ -(int64(x) & 1)
|
||||
}
|
||||
|
||||
x |= uint64(b&0x7f) << s
|
||||
s += 7
|
||||
n--
|
||||
}
|
||||
|
||||
d.setError(fmt.Errorf("cannot decode varint from input stream"))
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *decoder) readUnsignedVarInt() uint64 {
|
||||
n := 11 // varints are at most 11 bytes
|
||||
|
||||
if n > d.remain {
|
||||
n = d.remain
|
||||
}
|
||||
|
||||
x := uint64(0)
|
||||
s := uint(0)
|
||||
|
||||
for n > 0 {
|
||||
b := d.readByte()
|
||||
|
||||
if (b & 0x80) == 0 {
|
||||
x |= uint64(b) << s
|
||||
return x
|
||||
}
|
||||
|
||||
x |= uint64(b&0x7f) << s
|
||||
s += 7
|
||||
n--
|
||||
}
|
||||
|
||||
d.setError(fmt.Errorf("cannot decode unsigned varint from input stream"))
|
||||
return 0
|
||||
}
|
||||
|
||||
type decodeFunc func(*decoder, value)
|
||||
|
||||
var (
|
||||
_ io.Reader = (*decoder)(nil)
|
||||
_ io.ByteReader = (*decoder)(nil)
|
||||
|
||||
readerFrom = reflect.TypeOf((*io.ReaderFrom)(nil)).Elem()
|
||||
)
|
||||
|
||||
func decodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) decodeFunc {
|
||||
if reflect.PtrTo(typ).Implements(readerFrom) {
|
||||
return readerDecodeFuncOf(typ)
|
||||
}
|
||||
switch typ.Kind() {
|
||||
case reflect.Bool:
|
||||
return (*decoder).decodeBool
|
||||
case reflect.Int8:
|
||||
return (*decoder).decodeInt8
|
||||
case reflect.Int16:
|
||||
return (*decoder).decodeInt16
|
||||
case reflect.Int32:
|
||||
return (*decoder).decodeInt32
|
||||
case reflect.Int64:
|
||||
return (*decoder).decodeInt64
|
||||
case reflect.String:
|
||||
return stringDecodeFuncOf(flexible, tag)
|
||||
case reflect.Struct:
|
||||
return structDecodeFuncOf(typ, version, flexible)
|
||||
case reflect.Slice:
|
||||
if typ.Elem().Kind() == reflect.Uint8 { // []byte
|
||||
return bytesDecodeFuncOf(flexible, tag)
|
||||
}
|
||||
return arrayDecodeFuncOf(typ, version, flexible, tag)
|
||||
default:
|
||||
panic("unsupported type: " + typ.String())
|
||||
}
|
||||
}
|
||||
|
||||
func stringDecodeFuncOf(flexible bool, tag structTag) decodeFunc {
|
||||
if flexible {
|
||||
// In flexible messages, all strings are compact
|
||||
return (*decoder).decodeCompactString
|
||||
}
|
||||
return (*decoder).decodeString
|
||||
}
|
||||
|
||||
func bytesDecodeFuncOf(flexible bool, tag structTag) decodeFunc {
|
||||
if flexible {
|
||||
// In flexible messages, all arrays are compact
|
||||
return (*decoder).decodeCompactBytes
|
||||
}
|
||||
return (*decoder).decodeBytes
|
||||
}
|
||||
|
||||
func structDecodeFuncOf(typ reflect.Type, version int16, flexible bool) decodeFunc {
|
||||
type field struct {
|
||||
decode decodeFunc
|
||||
index index
|
||||
tagID int
|
||||
}
|
||||
|
||||
var fields []field
|
||||
taggedFields := map[int]*field{}
|
||||
|
||||
forEachStructField(typ, func(typ reflect.Type, index index, tag string) {
|
||||
forEachStructTag(tag, func(tag structTag) bool {
|
||||
if tag.MinVersion <= version && version <= tag.MaxVersion {
|
||||
f := field{
|
||||
decode: decodeFuncOf(typ, version, flexible, tag),
|
||||
index: index,
|
||||
tagID: tag.TagID,
|
||||
}
|
||||
|
||||
if tag.TagID < -1 {
|
||||
// Normal required field
|
||||
fields = append(fields, f)
|
||||
} else {
|
||||
// Optional tagged field (flexible messages only)
|
||||
taggedFields[tag.TagID] = &f
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
})
|
||||
|
||||
return func(d *decoder, v value) {
|
||||
for i := range fields {
|
||||
f := &fields[i]
|
||||
f.decode(d, v.fieldByIndex(f.index))
|
||||
}
|
||||
|
||||
if flexible {
|
||||
// See https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields
|
||||
// for details of tag buffers in "flexible" messages.
|
||||
n := int(d.readUnsignedVarInt())
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
tagID := int(d.readUnsignedVarInt())
|
||||
size := int(d.readUnsignedVarInt())
|
||||
|
||||
f, ok := taggedFields[tagID]
|
||||
if ok {
|
||||
f.decode(d, v.fieldByIndex(f.index))
|
||||
} else {
|
||||
d.read(size)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func arrayDecodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) decodeFunc {
|
||||
elemType := typ.Elem()
|
||||
elemFunc := decodeFuncOf(elemType, version, flexible, tag)
|
||||
if flexible {
|
||||
// In flexible messages, all arrays are compact
|
||||
return func(d *decoder, v value) { d.decodeCompactArray(v, elemType, elemFunc) }
|
||||
}
|
||||
|
||||
return func(d *decoder, v value) { d.decodeArray(v, elemType, elemFunc) }
|
||||
}
|
||||
|
||||
func readerDecodeFuncOf(typ reflect.Type) decodeFunc {
|
||||
typ = reflect.PtrTo(typ)
|
||||
return func(d *decoder, v value) {
|
||||
if d.err == nil {
|
||||
_, err := v.iface(typ).(io.ReaderFrom).ReadFrom(d)
|
||||
if err != nil {
|
||||
d.setError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readInt8(b []byte) int8 {
|
||||
return int8(b[0])
|
||||
}
|
||||
|
||||
func readInt16(b []byte) int16 {
|
||||
return int16(binary.BigEndian.Uint16(b))
|
||||
}
|
||||
|
||||
func readInt32(b []byte) int32 {
|
||||
return int32(binary.BigEndian.Uint32(b))
|
||||
}
|
||||
|
||||
func readInt64(b []byte) int64 {
|
||||
return int64(binary.BigEndian.Uint64(b))
|
||||
}
|
||||
|
||||
func Unmarshal(data []byte, version int16, value interface{}) error {
|
||||
typ := elemTypeOf(value)
|
||||
cache, _ := unmarshalers.Load().(map[versionedType]decodeFunc)
|
||||
key := versionedType{typ: typ, version: version}
|
||||
decode := cache[key]
|
||||
|
||||
if decode == nil {
|
||||
decode = decodeFuncOf(reflect.TypeOf(value).Elem(), version, false, structTag{
|
||||
MinVersion: -1,
|
||||
MaxVersion: -1,
|
||||
TagID: -2,
|
||||
Compact: true,
|
||||
Nullable: true,
|
||||
})
|
||||
|
||||
newCache := make(map[versionedType]decodeFunc, len(cache)+1)
|
||||
newCache[key] = decode
|
||||
|
||||
for typ, fun := range cache {
|
||||
newCache[typ] = fun
|
||||
}
|
||||
|
||||
unmarshalers.Store(newCache)
|
||||
}
|
||||
|
||||
d, _ := decoders.Get().(*decoder)
|
||||
if d == nil {
|
||||
d = &decoder{reader: bytes.NewReader(nil)}
|
||||
}
|
||||
|
||||
d.remain = len(data)
|
||||
r, _ := d.reader.(*bytes.Reader)
|
||||
r.Reset(data)
|
||||
|
||||
defer func() {
|
||||
r.Reset(nil)
|
||||
d.Reset(r, 0)
|
||||
decoders.Put(d)
|
||||
}()
|
||||
|
||||
decode(d, valueOf(value))
|
||||
return dontExpectEOF(d.err)
|
||||
}
|
||||
|
||||
var (
|
||||
decoders sync.Pool // *decoder
|
||||
unmarshalers atomic.Value // map[versionedType]decodeFunc
|
||||
)
|
||||
34
vendor/github.com/segmentio/kafka-go/protocol/deletetopics/deletetopics.go
generated
vendored
Normal file
34
vendor/github.com/segmentio/kafka-go/protocol/deletetopics/deletetopics.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
package deletetopics
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
TopicNames []string `kafka:"min=v0,max=v3"`
|
||||
TimeoutMs int32 `kafka:"min=v0,max=v3"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.DeleteTopics }
|
||||
|
||||
func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
|
||||
return cluster.Brokers[cluster.Controller], nil
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
ThrottleTimeMs int32 `kafka:"min=v1,max=v3"`
|
||||
Responses []ResponseTopic `kafka:"min=v0,max=v3"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.DeleteTopics }
|
||||
|
||||
type ResponseTopic struct {
|
||||
Name string `kafka:"min=v0,max=v3"`
|
||||
ErrorCode int16 `kafka:"min=v0,max=v3"`
|
||||
}
|
||||
|
||||
var (
|
||||
_ protocol.BrokerMessage = (*Request)(nil)
|
||||
)
|
||||
129
vendor/github.com/segmentio/kafka-go/protocol/describeconfigs/describeconfigs.go
generated
vendored
Normal file
129
vendor/github.com/segmentio/kafka-go/protocol/describeconfigs/describeconfigs.go
generated
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||
package describeconfigs
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
resourceTypeBroker int8 = 4
|
||||
)
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_DescribeConfigs
|
||||
type Request struct {
|
||||
Resources []RequestResource `kafka:"min=v0,max=v3"`
|
||||
IncludeSynonyms bool `kafka:"min=v1,max=v3"`
|
||||
IncludeDocumentation bool `kafka:"min=v3,max=v3"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeConfigs }
|
||||
|
||||
func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
|
||||
// Broker metadata requests must be sent to the associated broker
|
||||
for _, resource := range r.Resources {
|
||||
if resource.ResourceType == resourceTypeBroker {
|
||||
brokerID, err := strconv.Atoi(resource.ResourceName)
|
||||
if err != nil {
|
||||
return protocol.Broker{}, err
|
||||
}
|
||||
|
||||
return cluster.Brokers[int32(brokerID)], nil
|
||||
}
|
||||
}
|
||||
|
||||
return cluster.Brokers[cluster.Controller], nil
|
||||
}
|
||||
|
||||
func (r *Request) Split(cluster protocol.Cluster) (
|
||||
[]protocol.Message,
|
||||
protocol.Merger,
|
||||
error,
|
||||
) {
|
||||
messages := []protocol.Message{}
|
||||
topicsMessage := Request{}
|
||||
|
||||
for _, resource := range r.Resources {
|
||||
// Split out broker requests to separate brokers
|
||||
if resource.ResourceType == resourceTypeBroker {
|
||||
messages = append(messages, &Request{
|
||||
Resources: []RequestResource{resource},
|
||||
})
|
||||
} else {
|
||||
topicsMessage.Resources = append(
|
||||
topicsMessage.Resources, resource,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if len(topicsMessage.Resources) > 0 {
|
||||
messages = append(messages, &topicsMessage)
|
||||
}
|
||||
|
||||
return messages, new(Response), nil
|
||||
}
|
||||
|
||||
type RequestResource struct {
|
||||
ResourceType int8 `kafka:"min=v0,max=v3"`
|
||||
ResourceName string `kafka:"min=v0,max=v3"`
|
||||
ConfigNames []string `kafka:"min=v0,max=v3,nullable"`
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
ThrottleTimeMs int32 `kafka:"min=v0,max=v3"`
|
||||
Resources []ResponseResource `kafka:"min=v0,max=v3"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeConfigs }
|
||||
|
||||
func (r *Response) Merge(requests []protocol.Message, results []interface{}) (
|
||||
protocol.Message,
|
||||
error,
|
||||
) {
|
||||
response := &Response{}
|
||||
|
||||
for _, result := range results {
|
||||
m, err := protocol.Result(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.Resources = append(
|
||||
response.Resources,
|
||||
m.(*Response).Resources...,
|
||||
)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
type ResponseResource struct {
|
||||
ErrorCode int16 `kafka:"min=v0,max=v3"`
|
||||
ErrorMessage string `kafka:"min=v0,max=v3,nullable"`
|
||||
ResourceType int8 `kafka:"min=v0,max=v3"`
|
||||
ResourceName string `kafka:"min=v0,max=v3"`
|
||||
ConfigEntries []ResponseConfigEntry `kafka:"min=v0,max=v3"`
|
||||
}
|
||||
|
||||
type ResponseConfigEntry struct {
|
||||
ConfigName string `kafka:"min=v0,max=v3"`
|
||||
ConfigValue string `kafka:"min=v0,max=v3,nullable"`
|
||||
ReadOnly bool `kafka:"min=v0,max=v3"`
|
||||
IsDefault bool `kafka:"min=v0,max=v0"`
|
||||
ConfigSource int8 `kafka:"min=v1,max=v3"`
|
||||
IsSensitive bool `kafka:"min=v0,max=v3"`
|
||||
ConfigSynonyms []ResponseConfigSynonym `kafka:"min=v1,max=v3"`
|
||||
ConfigType int8 `kafka:"min=v3,max=v3"`
|
||||
ConfigDocumentation string `kafka:"min=v3,max=v3,nullable"`
|
||||
}
|
||||
|
||||
type ResponseConfigSynonym struct {
|
||||
ConfigName string `kafka:"min=v1,max=v3"`
|
||||
ConfigValue string `kafka:"min=v1,max=v3,nullable"`
|
||||
ConfigSource int8 `kafka:"min=v1,max=v3"`
|
||||
}
|
||||
|
||||
var _ protocol.BrokerMessage = (*Request)(nil)
|
||||
85
vendor/github.com/segmentio/kafka-go/protocol/describegroups/describegroups.go
generated
vendored
Normal file
85
vendor/github.com/segmentio/kafka-go/protocol/describegroups/describegroups.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
package describegroups
|
||||
|
||||
import (
|
||||
"github.com/segmentio/kafka-go/protocol"
|
||||
)
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_DescribeGroups
|
||||
type Request struct {
|
||||
Groups []string `kafka:"min=v0,max=v4"`
|
||||
IncludeAuthorizedOperations bool `kafka:"min=v3,max=v4"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeGroups }
|
||||
|
||||
func (r *Request) Group() string {
|
||||
return r.Groups[0]
|
||||
}
|
||||
|
||||
func (r *Request) Split(cluster protocol.Cluster) (
|
||||
[]protocol.Message,
|
||||
protocol.Merger,
|
||||
error,
|
||||
) {
|
||||
messages := []protocol.Message{}
|
||||
|
||||
// Split requests by group since they'll need to go to different coordinators.
|
||||
for _, group := range r.Groups {
|
||||
messages = append(
|
||||
messages,
|
||||
&Request{
|
||||
Groups: []string{group},
|
||||
IncludeAuthorizedOperations: r.IncludeAuthorizedOperations,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
return messages, new(Response), nil
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
ThrottleTimeMs int32 `kafka:"min=v1,max=v4"`
|
||||
Groups []ResponseGroup `kafka:"min=v0,max=v4"`
|
||||
}
|
||||
|
||||
type ResponseGroup struct {
|
||||
ErrorCode int16 `kafka:"min=v0,max=v4"`
|
||||
GroupID string `kafka:"min=v0,max=v4"`
|
||||
GroupState string `kafka:"min=v0,max=v4"`
|
||||
ProtocolType string `kafka:"min=v0,max=v4"`
|
||||
ProtocolData string `kafka:"min=v0,max=v4"`
|
||||
Members []ResponseGroupMember `kafka:"min=v0,max=v4"`
|
||||
AuthorizedOperations int32 `kafka:"min=v3,max=v4"`
|
||||
}
|
||||
|
||||
type ResponseGroupMember struct {
|
||||
MemberID string `kafka:"min=v0,max=v4"`
|
||||
GroupInstanceID string `kafka:"min=v4,max=v4,nullable"`
|
||||
ClientID string `kafka:"min=v0,max=v4"`
|
||||
ClientHost string `kafka:"min=v0,max=v4"`
|
||||
MemberMetadata []byte `kafka:"min=v0,max=v4"`
|
||||
MemberAssignment []byte `kafka:"min=v0,max=v4"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeGroups }
|
||||
|
||||
func (r *Response) Merge(requests []protocol.Message, results []interface{}) (
|
||||
protocol.Message,
|
||||
error,
|
||||
) {
|
||||
response := &Response{}
|
||||
|
||||
for _, result := range results {
|
||||
m, err := protocol.Result(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.Groups = append(response.Groups, m.(*Response).Groups...)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
44
vendor/github.com/segmentio/kafka-go/protocol/electleaders/electleaders.go
generated
vendored
Normal file
44
vendor/github.com/segmentio/kafka-go/protocol/electleaders/electleaders.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
package electleaders
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_ElectLeaders
|
||||
type Request struct {
|
||||
ElectionType int8 `kafka:"min=v1,max=v1"`
|
||||
TopicPartitions []RequestTopicPartitions `kafka:"min=v0,max=v1"`
|
||||
TimeoutMs int32 `kafka:"min=v0,max=v1"`
|
||||
}
|
||||
|
||||
type RequestTopicPartitions struct {
|
||||
Topic string `kafka:"min=v0,max=v1"`
|
||||
PartitionIDs []int32 `kafka:"min=v0,max=v1"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.ElectLeaders }
|
||||
|
||||
func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
|
||||
return cluster.Brokers[cluster.Controller], nil
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
ThrottleTime int32 `kafka:"min=v0,max=v1"`
|
||||
ErrorCode int16 `kafka:"min=v1,max=v1"`
|
||||
ReplicaElectionResults []ResponseReplicaElectionResult `kafka:"min=v0,max=v1"`
|
||||
}
|
||||
|
||||
type ResponseReplicaElectionResult struct {
|
||||
Topic string `kafka:"min=v0,max=v1"`
|
||||
PartitionResults []ResponsePartitionResult `kafka:"min=v0,max=v1"`
|
||||
}
|
||||
|
||||
type ResponsePartitionResult struct {
|
||||
PartitionID int32 `kafka:"min=v0,max=v1"`
|
||||
ErrorCode int16 `kafka:"min=v0,max=v1"`
|
||||
ErrorMessage string `kafka:"min=v0,max=v1,nullable"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.ElectLeaders }
|
||||
590
vendor/github.com/segmentio/kafka-go/protocol/encode.go
generated
vendored
Normal file
590
vendor/github.com/segmentio/kafka-go/protocol/encode.go
generated
vendored
Normal file
@@ -0,0 +1,590 @@
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"reflect"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type encoder struct {
|
||||
writer io.Writer
|
||||
err error
|
||||
table *crc32.Table
|
||||
crc32 uint32
|
||||
buffer [32]byte
|
||||
}
|
||||
|
||||
type encoderChecksum struct {
|
||||
reader io.Reader
|
||||
encoder *encoder
|
||||
}
|
||||
|
||||
func (e *encoderChecksum) Read(b []byte) (int, error) {
|
||||
n, err := e.reader.Read(b)
|
||||
if n > 0 {
|
||||
e.encoder.update(b[:n])
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (e *encoder) Reset(w io.Writer) {
|
||||
e.writer = w
|
||||
e.err = nil
|
||||
e.table = nil
|
||||
e.crc32 = 0
|
||||
e.buffer = [32]byte{}
|
||||
}
|
||||
|
||||
func (e *encoder) ReadFrom(r io.Reader) (int64, error) {
|
||||
if e.table != nil {
|
||||
r = &encoderChecksum{
|
||||
reader: r,
|
||||
encoder: e,
|
||||
}
|
||||
}
|
||||
return io.Copy(e.writer, r)
|
||||
}
|
||||
|
||||
func (e *encoder) Write(b []byte) (int, error) {
|
||||
if e.err != nil {
|
||||
return 0, e.err
|
||||
}
|
||||
n, err := e.writer.Write(b)
|
||||
if n > 0 {
|
||||
e.update(b[:n])
|
||||
}
|
||||
if err != nil {
|
||||
e.err = err
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (e *encoder) WriteByte(b byte) error {
|
||||
e.buffer[0] = b
|
||||
_, err := e.Write(e.buffer[:1])
|
||||
return err
|
||||
}
|
||||
|
||||
func (e *encoder) WriteString(s string) (int, error) {
|
||||
// This implementation is an optimization to avoid the heap allocation that
|
||||
// would occur when converting the string to a []byte to call crc32.Update.
|
||||
//
|
||||
// Strings are rarely long in the kafka protocol, so the use of a 32 byte
|
||||
// buffer is a good comprise between keeping the encoder value small and
|
||||
// limiting the number of calls to Write.
|
||||
//
|
||||
// We introduced this optimization because memory profiles on the benchmarks
|
||||
// showed that most heap allocations were caused by this code path.
|
||||
n := 0
|
||||
|
||||
for len(s) != 0 {
|
||||
c := copy(e.buffer[:], s)
|
||||
w, err := e.Write(e.buffer[:c])
|
||||
n += w
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
s = s[c:]
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (e *encoder) setCRC(table *crc32.Table) {
|
||||
e.table, e.crc32 = table, 0
|
||||
}
|
||||
|
||||
func (e *encoder) update(b []byte) {
|
||||
if e.table != nil {
|
||||
e.crc32 = crc32.Update(e.crc32, e.table, b)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) encodeBool(v value) {
|
||||
b := int8(0)
|
||||
if v.bool() {
|
||||
b = 1
|
||||
}
|
||||
e.writeInt8(b)
|
||||
}
|
||||
|
||||
func (e *encoder) encodeInt8(v value) {
|
||||
e.writeInt8(v.int8())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeInt16(v value) {
|
||||
e.writeInt16(v.int16())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeInt32(v value) {
|
||||
e.writeInt32(v.int32())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeInt64(v value) {
|
||||
e.writeInt64(v.int64())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeString(v value) {
|
||||
e.writeString(v.string())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeCompactString(v value) {
|
||||
e.writeCompactString(v.string())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeNullString(v value) {
|
||||
e.writeNullString(v.string())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeCompactNullString(v value) {
|
||||
e.writeCompactNullString(v.string())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeBytes(v value) {
|
||||
e.writeBytes(v.bytes())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeCompactBytes(v value) {
|
||||
e.writeCompactBytes(v.bytes())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeNullBytes(v value) {
|
||||
e.writeNullBytes(v.bytes())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeCompactNullBytes(v value) {
|
||||
e.writeCompactNullBytes(v.bytes())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeArray(v value, elemType reflect.Type, encodeElem encodeFunc) {
|
||||
a := v.array(elemType)
|
||||
n := a.length()
|
||||
e.writeInt32(int32(n))
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
encodeElem(e, a.index(i))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) encodeCompactArray(v value, elemType reflect.Type, encodeElem encodeFunc) {
|
||||
a := v.array(elemType)
|
||||
n := a.length()
|
||||
e.writeUnsignedVarInt(uint64(n + 1))
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
encodeElem(e, a.index(i))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) encodeNullArray(v value, elemType reflect.Type, encodeElem encodeFunc) {
|
||||
a := v.array(elemType)
|
||||
if a.isNil() {
|
||||
e.writeInt32(-1)
|
||||
return
|
||||
}
|
||||
|
||||
n := a.length()
|
||||
e.writeInt32(int32(n))
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
encodeElem(e, a.index(i))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) encodeCompactNullArray(v value, elemType reflect.Type, encodeElem encodeFunc) {
|
||||
a := v.array(elemType)
|
||||
if a.isNil() {
|
||||
e.writeUnsignedVarInt(0)
|
||||
return
|
||||
}
|
||||
|
||||
n := a.length()
|
||||
e.writeUnsignedVarInt(uint64(n + 1))
|
||||
for i := 0; i < n; i++ {
|
||||
encodeElem(e, a.index(i))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeInt8(i int8) {
|
||||
writeInt8(e.buffer[:1], i)
|
||||
e.Write(e.buffer[:1])
|
||||
}
|
||||
|
||||
func (e *encoder) writeInt16(i int16) {
|
||||
writeInt16(e.buffer[:2], i)
|
||||
e.Write(e.buffer[:2])
|
||||
}
|
||||
|
||||
func (e *encoder) writeInt32(i int32) {
|
||||
writeInt32(e.buffer[:4], i)
|
||||
e.Write(e.buffer[:4])
|
||||
}
|
||||
|
||||
func (e *encoder) writeInt64(i int64) {
|
||||
writeInt64(e.buffer[:8], i)
|
||||
e.Write(e.buffer[:8])
|
||||
}
|
||||
|
||||
func (e *encoder) writeString(s string) {
|
||||
e.writeInt16(int16(len(s)))
|
||||
e.WriteString(s)
|
||||
}
|
||||
|
||||
func (e *encoder) writeVarString(s string) {
|
||||
e.writeVarInt(int64(len(s)))
|
||||
e.WriteString(s)
|
||||
}
|
||||
|
||||
func (e *encoder) writeCompactString(s string) {
|
||||
e.writeUnsignedVarInt(uint64(len(s)) + 1)
|
||||
e.WriteString(s)
|
||||
}
|
||||
|
||||
func (e *encoder) writeNullString(s string) {
|
||||
if s == "" {
|
||||
e.writeInt16(-1)
|
||||
} else {
|
||||
e.writeInt16(int16(len(s)))
|
||||
e.WriteString(s)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeCompactNullString(s string) {
|
||||
if s == "" {
|
||||
e.writeUnsignedVarInt(0)
|
||||
} else {
|
||||
e.writeUnsignedVarInt(uint64(len(s)) + 1)
|
||||
e.WriteString(s)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeBytes(b []byte) {
|
||||
e.writeInt32(int32(len(b)))
|
||||
e.Write(b)
|
||||
}
|
||||
|
||||
func (e *encoder) writeCompactBytes(b []byte) {
|
||||
e.writeUnsignedVarInt(uint64(len(b)) + 1)
|
||||
e.Write(b)
|
||||
}
|
||||
|
||||
func (e *encoder) writeNullBytes(b []byte) {
|
||||
if b == nil {
|
||||
e.writeInt32(-1)
|
||||
} else {
|
||||
e.writeInt32(int32(len(b)))
|
||||
e.Write(b)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeVarNullBytes(b []byte) {
|
||||
if b == nil {
|
||||
e.writeVarInt(-1)
|
||||
} else {
|
||||
e.writeVarInt(int64(len(b)))
|
||||
e.Write(b)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeCompactNullBytes(b []byte) {
|
||||
if b == nil {
|
||||
e.writeUnsignedVarInt(0)
|
||||
} else {
|
||||
e.writeUnsignedVarInt(uint64(len(b)) + 1)
|
||||
e.Write(b)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeNullBytesFrom(b Bytes) error {
|
||||
if b == nil {
|
||||
e.writeInt32(-1)
|
||||
return nil
|
||||
} else {
|
||||
size := int64(b.Len())
|
||||
e.writeInt32(int32(size))
|
||||
n, err := io.Copy(e, b)
|
||||
if err == nil && n != size {
|
||||
err = fmt.Errorf("size of nullable bytes does not match the number of bytes that were written (size=%d, written=%d): %w", size, n, io.ErrUnexpectedEOF)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeVarNullBytesFrom(b Bytes) error {
|
||||
if b == nil {
|
||||
e.writeVarInt(-1)
|
||||
return nil
|
||||
} else {
|
||||
size := int64(b.Len())
|
||||
e.writeVarInt(size)
|
||||
n, err := io.Copy(e, b)
|
||||
if err == nil && n != size {
|
||||
err = fmt.Errorf("size of nullable bytes does not match the number of bytes that were written (size=%d, written=%d): %w", size, n, io.ErrUnexpectedEOF)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeVarInt(i int64) {
|
||||
e.writeUnsignedVarInt(uint64((i << 1) ^ (i >> 63)))
|
||||
}
|
||||
|
||||
func (e *encoder) writeUnsignedVarInt(i uint64) {
|
||||
b := e.buffer[:]
|
||||
n := 0
|
||||
|
||||
for i >= 0x80 && n < len(b) {
|
||||
b[n] = byte(i) | 0x80
|
||||
i >>= 7
|
||||
n++
|
||||
}
|
||||
|
||||
if n < len(b) {
|
||||
b[n] = byte(i)
|
||||
n++
|
||||
}
|
||||
|
||||
e.Write(b[:n])
|
||||
}
|
||||
|
||||
type encodeFunc func(*encoder, value)
|
||||
|
||||
var (
|
||||
_ io.ReaderFrom = (*encoder)(nil)
|
||||
_ io.Writer = (*encoder)(nil)
|
||||
_ io.ByteWriter = (*encoder)(nil)
|
||||
_ io.StringWriter = (*encoder)(nil)
|
||||
|
||||
writerTo = reflect.TypeOf((*io.WriterTo)(nil)).Elem()
|
||||
)
|
||||
|
||||
func encodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) encodeFunc {
|
||||
if reflect.PtrTo(typ).Implements(writerTo) {
|
||||
return writerEncodeFuncOf(typ)
|
||||
}
|
||||
switch typ.Kind() {
|
||||
case reflect.Bool:
|
||||
return (*encoder).encodeBool
|
||||
case reflect.Int8:
|
||||
return (*encoder).encodeInt8
|
||||
case reflect.Int16:
|
||||
return (*encoder).encodeInt16
|
||||
case reflect.Int32:
|
||||
return (*encoder).encodeInt32
|
||||
case reflect.Int64:
|
||||
return (*encoder).encodeInt64
|
||||
case reflect.String:
|
||||
return stringEncodeFuncOf(flexible, tag)
|
||||
case reflect.Struct:
|
||||
return structEncodeFuncOf(typ, version, flexible)
|
||||
case reflect.Slice:
|
||||
if typ.Elem().Kind() == reflect.Uint8 { // []byte
|
||||
return bytesEncodeFuncOf(flexible, tag)
|
||||
}
|
||||
return arrayEncodeFuncOf(typ, version, flexible, tag)
|
||||
default:
|
||||
panic("unsupported type: " + typ.String())
|
||||
}
|
||||
}
|
||||
|
||||
func stringEncodeFuncOf(flexible bool, tag structTag) encodeFunc {
|
||||
switch {
|
||||
case flexible && tag.Nullable:
|
||||
// In flexible messages, all strings are compact
|
||||
return (*encoder).encodeCompactNullString
|
||||
case flexible:
|
||||
// In flexible messages, all strings are compact
|
||||
return (*encoder).encodeCompactString
|
||||
case tag.Nullable:
|
||||
return (*encoder).encodeNullString
|
||||
default:
|
||||
return (*encoder).encodeString
|
||||
}
|
||||
}
|
||||
|
||||
func bytesEncodeFuncOf(flexible bool, tag structTag) encodeFunc {
|
||||
switch {
|
||||
case flexible && tag.Nullable:
|
||||
// In flexible messages, all arrays are compact
|
||||
return (*encoder).encodeCompactNullBytes
|
||||
case flexible:
|
||||
// In flexible messages, all arrays are compact
|
||||
return (*encoder).encodeCompactBytes
|
||||
case tag.Nullable:
|
||||
return (*encoder).encodeNullBytes
|
||||
default:
|
||||
return (*encoder).encodeBytes
|
||||
}
|
||||
}
|
||||
|
||||
func structEncodeFuncOf(typ reflect.Type, version int16, flexible bool) encodeFunc {
|
||||
type field struct {
|
||||
encode encodeFunc
|
||||
index index
|
||||
tagID int
|
||||
}
|
||||
|
||||
var fields []field
|
||||
var taggedFields []field
|
||||
|
||||
forEachStructField(typ, func(typ reflect.Type, index index, tag string) {
|
||||
if typ.Size() != 0 { // skip struct{}
|
||||
forEachStructTag(tag, func(tag structTag) bool {
|
||||
if tag.MinVersion <= version && version <= tag.MaxVersion {
|
||||
f := field{
|
||||
encode: encodeFuncOf(typ, version, flexible, tag),
|
||||
index: index,
|
||||
tagID: tag.TagID,
|
||||
}
|
||||
|
||||
if tag.TagID < -1 {
|
||||
// Normal required field
|
||||
fields = append(fields, f)
|
||||
} else {
|
||||
// Optional tagged field (flexible messages only)
|
||||
taggedFields = append(taggedFields, f)
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
return func(e *encoder, v value) {
|
||||
for i := range fields {
|
||||
f := &fields[i]
|
||||
f.encode(e, v.fieldByIndex(f.index))
|
||||
}
|
||||
|
||||
if flexible {
|
||||
// See https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields
|
||||
// for details of tag buffers in "flexible" messages.
|
||||
e.writeUnsignedVarInt(uint64(len(taggedFields)))
|
||||
|
||||
for i := range taggedFields {
|
||||
f := &taggedFields[i]
|
||||
e.writeUnsignedVarInt(uint64(f.tagID))
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
se := &encoder{writer: buf}
|
||||
f.encode(se, v.fieldByIndex(f.index))
|
||||
e.writeUnsignedVarInt(uint64(buf.Len()))
|
||||
e.Write(buf.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func arrayEncodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) encodeFunc {
|
||||
elemType := typ.Elem()
|
||||
elemFunc := encodeFuncOf(elemType, version, flexible, tag)
|
||||
switch {
|
||||
case flexible && tag.Nullable:
|
||||
// In flexible messages, all arrays are compact
|
||||
return func(e *encoder, v value) { e.encodeCompactNullArray(v, elemType, elemFunc) }
|
||||
case flexible:
|
||||
// In flexible messages, all arrays are compact
|
||||
return func(e *encoder, v value) { e.encodeCompactArray(v, elemType, elemFunc) }
|
||||
case tag.Nullable:
|
||||
return func(e *encoder, v value) { e.encodeNullArray(v, elemType, elemFunc) }
|
||||
default:
|
||||
return func(e *encoder, v value) { e.encodeArray(v, elemType, elemFunc) }
|
||||
}
|
||||
}
|
||||
|
||||
func writerEncodeFuncOf(typ reflect.Type) encodeFunc {
|
||||
typ = reflect.PtrTo(typ)
|
||||
return func(e *encoder, v value) {
|
||||
// Optimization to write directly into the buffer when the encoder
|
||||
// does no need to compute a crc32 checksum.
|
||||
w := io.Writer(e)
|
||||
if e.table == nil {
|
||||
w = e.writer
|
||||
}
|
||||
_, err := v.iface(typ).(io.WriterTo).WriteTo(w)
|
||||
if err != nil {
|
||||
e.err = err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func writeInt8(b []byte, i int8) {
|
||||
b[0] = byte(i)
|
||||
}
|
||||
|
||||
func writeInt16(b []byte, i int16) {
|
||||
binary.BigEndian.PutUint16(b, uint16(i))
|
||||
}
|
||||
|
||||
func writeInt32(b []byte, i int32) {
|
||||
binary.BigEndian.PutUint32(b, uint32(i))
|
||||
}
|
||||
|
||||
func writeInt64(b []byte, i int64) {
|
||||
binary.BigEndian.PutUint64(b, uint64(i))
|
||||
}
|
||||
|
||||
func Marshal(version int16, value interface{}) ([]byte, error) {
|
||||
typ := typeOf(value)
|
||||
cache, _ := marshalers.Load().(map[versionedType]encodeFunc)
|
||||
key := versionedType{typ: typ, version: version}
|
||||
encode := cache[key]
|
||||
|
||||
if encode == nil {
|
||||
encode = encodeFuncOf(reflect.TypeOf(value), version, false, structTag{
|
||||
MinVersion: -1,
|
||||
MaxVersion: -1,
|
||||
TagID: -2,
|
||||
Compact: true,
|
||||
Nullable: true,
|
||||
})
|
||||
|
||||
newCache := make(map[versionedType]encodeFunc, len(cache)+1)
|
||||
newCache[key] = encode
|
||||
|
||||
for typ, fun := range cache {
|
||||
newCache[typ] = fun
|
||||
}
|
||||
|
||||
marshalers.Store(newCache)
|
||||
}
|
||||
|
||||
e, _ := encoders.Get().(*encoder)
|
||||
if e == nil {
|
||||
e = &encoder{writer: new(bytes.Buffer)}
|
||||
}
|
||||
|
||||
b, _ := e.writer.(*bytes.Buffer)
|
||||
defer func() {
|
||||
b.Reset()
|
||||
e.Reset(b)
|
||||
encoders.Put(e)
|
||||
}()
|
||||
|
||||
encode(e, nonAddressableValueOf(value))
|
||||
|
||||
if e.err != nil {
|
||||
return nil, e.err
|
||||
}
|
||||
|
||||
buf := b.Bytes()
|
||||
out := make([]byte, len(buf))
|
||||
copy(out, buf)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
type versionedType struct {
|
||||
typ _type
|
||||
version int16
|
||||
}
|
||||
|
||||
var (
|
||||
encoders sync.Pool // *encoder
|
||||
marshalers atomic.Value // map[versionedType]encodeFunc
|
||||
)
|
||||
35
vendor/github.com/segmentio/kafka-go/protocol/endtxn/endtxn.go
generated
vendored
Normal file
35
vendor/github.com/segmentio/kafka-go/protocol/endtxn/endtxn.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
package endtxn
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v3,max=v3,tag"`
|
||||
|
||||
TransactionalID string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"`
|
||||
ProducerID int64 `kafka:"min=v0,max=v3"`
|
||||
ProducerEpoch int16 `kafka:"min=v0,max=v3"`
|
||||
Committed bool `kafka:"min=v0,max=v3"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.EndTxn }
|
||||
|
||||
func (r *Request) Transaction() string { return r.TransactionalID }
|
||||
|
||||
var _ protocol.TransactionalMessage = (*Request)(nil)
|
||||
|
||||
type Response struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v3,max=v3,tag"`
|
||||
|
||||
ThrottleTimeMs int32 `kafka:"min=v0,max=v3"`
|
||||
ErrorCode int16 `kafka:"min=v0,max=v3"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.EndTxn }
|
||||
91
vendor/github.com/segmentio/kafka-go/protocol/error.go
generated
vendored
Normal file
91
vendor/github.com/segmentio/kafka-go/protocol/error.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Error represents client-side protocol errors.
|
||||
type Error string
|
||||
|
||||
func (e Error) Error() string { return string(e) }
|
||||
|
||||
func Errorf(msg string, args ...interface{}) Error {
|
||||
return Error(fmt.Sprintf(msg, args...))
|
||||
}
|
||||
|
||||
const (
|
||||
// ErrNoTopic is returned when a request needs to be sent to a specific.
|
||||
ErrNoTopic Error = "topic not found"
|
||||
|
||||
// ErrNoPartition is returned when a request needs to be sent to a specific
|
||||
// partition, but the client did not find it in the cluster metadata.
|
||||
ErrNoPartition Error = "topic partition not found"
|
||||
|
||||
// ErrNoLeader is returned when a request needs to be sent to a partition
|
||||
// leader, but the client could not determine what the leader was at this
|
||||
// time.
|
||||
ErrNoLeader Error = "topic partition has no leader"
|
||||
|
||||
// ErrNoRecord is returned when attempting to write a message containing an
|
||||
// empty record set (which kafka forbids).
|
||||
//
|
||||
// We handle this case client-side because kafka will close the connection
|
||||
// that it received an empty produce request on, causing all concurrent
|
||||
// requests to be aborted.
|
||||
ErrNoRecord Error = "record set contains no records"
|
||||
|
||||
// ErrNoReset is returned by ResetRecordReader when the record reader does
|
||||
// not support being reset.
|
||||
ErrNoReset Error = "record sequence does not support reset"
|
||||
)
|
||||
|
||||
type TopicError struct {
|
||||
Topic string
|
||||
Err error
|
||||
}
|
||||
|
||||
func NewTopicError(topic string, err error) *TopicError {
|
||||
return &TopicError{Topic: topic, Err: err}
|
||||
}
|
||||
|
||||
func NewErrNoTopic(topic string) *TopicError {
|
||||
return NewTopicError(topic, ErrNoTopic)
|
||||
}
|
||||
|
||||
func (e *TopicError) Error() string {
|
||||
return fmt.Sprintf("%v (topic=%q)", e.Err, e.Topic)
|
||||
}
|
||||
|
||||
func (e *TopicError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
type TopicPartitionError struct {
|
||||
Topic string
|
||||
Partition int32
|
||||
Err error
|
||||
}
|
||||
|
||||
func NewTopicPartitionError(topic string, partition int32, err error) *TopicPartitionError {
|
||||
return &TopicPartitionError{
|
||||
Topic: topic,
|
||||
Partition: partition,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func NewErrNoPartition(topic string, partition int32) *TopicPartitionError {
|
||||
return NewTopicPartitionError(topic, partition, ErrNoPartition)
|
||||
}
|
||||
|
||||
func NewErrNoLeader(topic string, partition int32) *TopicPartitionError {
|
||||
return NewTopicPartitionError(topic, partition, ErrNoLeader)
|
||||
}
|
||||
|
||||
func (e *TopicPartitionError) Error() string {
|
||||
return fmt.Sprintf("%v (topic=%q partition=%d)", e.Err, e.Topic, e.Partition)
|
||||
}
|
||||
|
||||
func (e *TopicPartitionError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
126
vendor/github.com/segmentio/kafka-go/protocol/fetch/fetch.go
generated
vendored
Normal file
126
vendor/github.com/segmentio/kafka-go/protocol/fetch/fetch.go
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
package fetch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol"
|
||||
)
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
ReplicaID int32 `kafka:"min=v0,max=v11"`
|
||||
MaxWaitTime int32 `kafka:"min=v0,max=v11"`
|
||||
MinBytes int32 `kafka:"min=v0,max=v11"`
|
||||
MaxBytes int32 `kafka:"min=v3,max=v11"`
|
||||
IsolationLevel int8 `kafka:"min=v4,max=v11"`
|
||||
SessionID int32 `kafka:"min=v7,max=v11"`
|
||||
SessionEpoch int32 `kafka:"min=v7,max=v11"`
|
||||
Topics []RequestTopic `kafka:"min=v0,max=v11"`
|
||||
ForgottenTopics []RequestForgottenTopic `kafka:"min=v7,max=v11"`
|
||||
RackID string `kafka:"min=v11,max=v11"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.Fetch }
|
||||
|
||||
func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
|
||||
broker := protocol.Broker{ID: -1}
|
||||
|
||||
for i := range r.Topics {
|
||||
t := &r.Topics[i]
|
||||
|
||||
topic, ok := cluster.Topics[t.Topic]
|
||||
if !ok {
|
||||
return broker, NewError(protocol.NewErrNoTopic(t.Topic))
|
||||
}
|
||||
|
||||
for j := range t.Partitions {
|
||||
p := &t.Partitions[j]
|
||||
|
||||
partition, ok := topic.Partitions[p.Partition]
|
||||
if !ok {
|
||||
return broker, NewError(protocol.NewErrNoPartition(t.Topic, p.Partition))
|
||||
}
|
||||
|
||||
if b, ok := cluster.Brokers[partition.Leader]; !ok {
|
||||
return broker, NewError(protocol.NewErrNoLeader(t.Topic, p.Partition))
|
||||
} else if broker.ID < 0 {
|
||||
broker = b
|
||||
} else if b.ID != broker.ID {
|
||||
return broker, NewError(fmt.Errorf("mismatching leaders (%d!=%d)", b.ID, broker.ID))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return broker, nil
|
||||
}
|
||||
|
||||
type RequestTopic struct {
|
||||
Topic string `kafka:"min=v0,max=v11"`
|
||||
Partitions []RequestPartition `kafka:"min=v0,max=v11"`
|
||||
}
|
||||
|
||||
type RequestPartition struct {
|
||||
Partition int32 `kafka:"min=v0,max=v11"`
|
||||
CurrentLeaderEpoch int32 `kafka:"min=v9,max=v11"`
|
||||
FetchOffset int64 `kafka:"min=v0,max=v11"`
|
||||
LogStartOffset int64 `kafka:"min=v5,max=v11"`
|
||||
PartitionMaxBytes int32 `kafka:"min=v0,max=v11"`
|
||||
}
|
||||
|
||||
type RequestForgottenTopic struct {
|
||||
Topic string `kafka:"min=v7,max=v11"`
|
||||
Partitions []int32 `kafka:"min=v7,max=v11"`
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
ThrottleTimeMs int32 `kafka:"min=v1,max=v11"`
|
||||
ErrorCode int16 `kafka:"min=v7,max=v11"`
|
||||
SessionID int32 `kafka:"min=v7,max=v11"`
|
||||
Topics []ResponseTopic `kafka:"min=v0,max=v11"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.Fetch }
|
||||
|
||||
type ResponseTopic struct {
|
||||
Topic string `kafka:"min=v0,max=v11"`
|
||||
Partitions []ResponsePartition `kafka:"min=v0,max=v11"`
|
||||
}
|
||||
|
||||
type ResponsePartition struct {
|
||||
Partition int32 `kafka:"min=v0,max=v11"`
|
||||
ErrorCode int16 `kafka:"min=v0,max=v11"`
|
||||
HighWatermark int64 `kafka:"min=v0,max=v11"`
|
||||
LastStableOffset int64 `kafka:"min=v4,max=v11"`
|
||||
LogStartOffset int64 `kafka:"min=v5,max=v11"`
|
||||
AbortedTransactions []ResponseTransaction `kafka:"min=v4,max=v11"`
|
||||
PreferredReadReplica int32 `kafka:"min=v11,max=v11"`
|
||||
RecordSet protocol.RecordSet `kafka:"min=v0,max=v11"`
|
||||
}
|
||||
|
||||
type ResponseTransaction struct {
|
||||
ProducerID int64 `kafka:"min=v4,max=v11"`
|
||||
FirstOffset int64 `kafka:"min=v4,max=v11"`
|
||||
}
|
||||
|
||||
var (
|
||||
_ protocol.BrokerMessage = (*Request)(nil)
|
||||
)
|
||||
|
||||
type Error struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func NewError(err error) *Error {
|
||||
return &Error{Err: err}
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("fetch request error: %v", e.Err)
|
||||
}
|
||||
|
||||
func (e *Error) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
25
vendor/github.com/segmentio/kafka-go/protocol/findcoordinator/findcoordinator.go
generated
vendored
Normal file
25
vendor/github.com/segmentio/kafka-go/protocol/findcoordinator/findcoordinator.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package findcoordinator
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
Key string `kafka:"min=v0,max=v2"`
|
||||
KeyType int8 `kafka:"min=v1,max=v2"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.FindCoordinator }
|
||||
|
||||
type Response struct {
|
||||
ThrottleTimeMs int32 `kafka:"min=v1,max=v2"`
|
||||
ErrorCode int16 `kafka:"min=v0,max=v2"`
|
||||
ErrorMessage string `kafka:"min=v1,max=v2,nullable"`
|
||||
NodeID int32 `kafka:"min=v0,max=v2"`
|
||||
Host string `kafka:"min=v0,max=v2"`
|
||||
Port int32 `kafka:"min=v0,max=v2"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.FindCoordinator }
|
||||
36
vendor/github.com/segmentio/kafka-go/protocol/heartbeat/heartbeat.go
generated
vendored
Normal file
36
vendor/github.com/segmentio/kafka-go/protocol/heartbeat/heartbeat.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
package heartbeat
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_Heartbeat
|
||||
type Request struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v4,max=v4,tag"`
|
||||
|
||||
GroupID string `kafka:"min=v0,max=v4"`
|
||||
GenerationID int32 `kafka:"min=v0,max=v4"`
|
||||
MemberID string `kafka:"min=v0,max=v4"`
|
||||
GroupInstanceID string `kafka:"min=v3,max=v4,nullable"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey {
|
||||
return protocol.Heartbeat
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v4,max=v4,tag"`
|
||||
|
||||
ErrorCode int16 `kafka:"min=v0,max=v4"`
|
||||
ThrottleTimeMs int32 `kafka:"min=v1,max=v4"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey {
|
||||
return protocol.Heartbeat
|
||||
}
|
||||
79
vendor/github.com/segmentio/kafka-go/protocol/incrementalalterconfigs/incrementalalterconfigs.go
generated
vendored
Normal file
79
vendor/github.com/segmentio/kafka-go/protocol/incrementalalterconfigs/incrementalalterconfigs.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
package incrementalalterconfigs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
resourceTypeBroker int8 = 4
|
||||
)
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_IncrementalAlterConfigs
|
||||
type Request struct {
|
||||
Resources []RequestResource `kafka:"min=v0,max=v0"`
|
||||
ValidateOnly bool `kafka:"min=v0,max=v0"`
|
||||
}
|
||||
|
||||
type RequestResource struct {
|
||||
ResourceType int8 `kafka:"min=v0,max=v0"`
|
||||
ResourceName string `kafka:"min=v0,max=v0"`
|
||||
Configs []RequestConfig `kafka:"min=v0,max=v0"`
|
||||
}
|
||||
|
||||
type RequestConfig struct {
|
||||
Name string `kafka:"min=v0,max=v0"`
|
||||
ConfigOperation int8 `kafka:"min=v0,max=v0"`
|
||||
Value string `kafka:"min=v0,max=v0,nullable"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.IncrementalAlterConfigs }
|
||||
|
||||
func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
|
||||
// Check that at most only one broker is being updated.
|
||||
//
|
||||
// TODO: Support updating multiple brokers in a single request.
|
||||
brokers := map[string]struct{}{}
|
||||
for _, resource := range r.Resources {
|
||||
if resource.ResourceType == resourceTypeBroker {
|
||||
brokers[resource.ResourceName] = struct{}{}
|
||||
}
|
||||
}
|
||||
if len(brokers) > 1 {
|
||||
return protocol.Broker{},
|
||||
errors.New("Updating more than one broker in a single request is not supported yet")
|
||||
}
|
||||
|
||||
for _, resource := range r.Resources {
|
||||
if resource.ResourceType == resourceTypeBroker {
|
||||
brokerID, err := strconv.Atoi(resource.ResourceName)
|
||||
if err != nil {
|
||||
return protocol.Broker{}, err
|
||||
}
|
||||
|
||||
return cluster.Brokers[int32(brokerID)], nil
|
||||
}
|
||||
}
|
||||
|
||||
return cluster.Brokers[cluster.Controller], nil
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
ThrottleTimeMs int32 `kafka:"min=v0,max=v0"`
|
||||
Responses []ResponseAlterResponse `kafka:"min=v0,max=v0"`
|
||||
}
|
||||
|
||||
type ResponseAlterResponse struct {
|
||||
ErrorCode int16 `kafka:"min=v0,max=v0"`
|
||||
ErrorMessage string `kafka:"min=v0,max=v0,nullable"`
|
||||
ResourceType int8 `kafka:"min=v0,max=v0"`
|
||||
ResourceName string `kafka:"min=v0,max=v0"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.IncrementalAlterConfigs }
|
||||
37
vendor/github.com/segmentio/kafka-go/protocol/initproducerid/initproducerid.go
generated
vendored
Normal file
37
vendor/github.com/segmentio/kafka-go/protocol/initproducerid/initproducerid.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
package initproducerid
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v2,max=v4,tag"`
|
||||
|
||||
TransactionalID string `kafka:"min=v0,max=v4,nullable"`
|
||||
TransactionTimeoutMs int32 `kafka:"min=v0,max=v4"`
|
||||
ProducerID int64 `kafka:"min=v3,max=v4"`
|
||||
ProducerEpoch int16 `kafka:"min=v3,max=v4"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.InitProducerId }
|
||||
|
||||
func (r *Request) Transaction() string { return r.TransactionalID }
|
||||
|
||||
var _ protocol.TransactionalMessage = (*Request)(nil)
|
||||
|
||||
type Response struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v2,max=v4,tag"`
|
||||
|
||||
ThrottleTimeMs int32 `kafka:"min=v0,max=v4"`
|
||||
ErrorCode int16 `kafka:"min=v0,max=v4"`
|
||||
ProducerID int64 `kafka:"min=v0,max=v4"`
|
||||
ProducerEpoch int16 `kafka:"min=v0,max=v4"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.InitProducerId }
|
||||
67
vendor/github.com/segmentio/kafka-go/protocol/joingroup/joingroup.go
generated
vendored
Normal file
67
vendor/github.com/segmentio/kafka-go/protocol/joingroup/joingroup.go
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
package joingroup
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v6,max=v7,tag"`
|
||||
|
||||
GroupID string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"`
|
||||
SessionTimeoutMS int32 `kafka:"min=v0,max=v7"`
|
||||
RebalanceTimeoutMS int32 `kafka:"min=v1,max=v7"`
|
||||
MemberID string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"`
|
||||
GroupInstanceID string `kafka:"min=v5,max=v5,nullable|min=v6,max=v7,compact,nullable"`
|
||||
ProtocolType string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"`
|
||||
Protocols []RequestProtocol `kafka:"min=v0,max=v7"`
|
||||
}
|
||||
|
||||
type RequestProtocol struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v6,max=v7,tag"`
|
||||
|
||||
Name string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"`
|
||||
Metadata []byte `kafka:"min=v0,max=v5|min=v6,max=v7,compact"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey {
|
||||
return protocol.JoinGroup
|
||||
}
|
||||
|
||||
func (r *Request) Group() string { return r.GroupID }
|
||||
|
||||
var _ protocol.GroupMessage = (*Request)(nil)
|
||||
|
||||
type Response struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v6,max=v7,tag"`
|
||||
|
||||
ThrottleTimeMS int32 `kafka:"min=v2,max=v7"`
|
||||
ErrorCode int16 `kafka:"min=v0,max=v7"`
|
||||
GenerationID int32 `kafka:"min=v0,max=v7"`
|
||||
ProtocolType string `kafka:"min=v7,max=v7,compact,nullable"`
|
||||
ProtocolName string `kafka:"min=v0,max=v5|min=v6,max=v6,compact|min=v7,max=v7,compact,nullable"`
|
||||
LeaderID string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"`
|
||||
MemberID string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"`
|
||||
Members []ResponseMember `kafka:"min=v0,max=v7"`
|
||||
}
|
||||
|
||||
type ResponseMember struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v6,max=v7,tag"`
|
||||
|
||||
MemberID string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"`
|
||||
GroupInstanceID string `kafka:"min=v5,max=v5,nullable|min=v6,max=v7,nullable,compact"`
|
||||
Metadata []byte `kafka:"min=v0,max=v5|min=v6,max=v7,compact"`
|
||||
}
|
||||
|
||||
type ResponseMemberMetadata struct{}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.JoinGroup }
|
||||
65
vendor/github.com/segmentio/kafka-go/protocol/leavegroup/leavegroup.go
generated
vendored
Normal file
65
vendor/github.com/segmentio/kafka-go/protocol/leavegroup/leavegroup.go
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
package leavegroup
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v4,max=v4,tag"`
|
||||
|
||||
GroupID string `kafka:"min=v0,max=v2|min=v3,max=v4,compact"`
|
||||
MemberID string `kafka:"min=v0,max=v2"`
|
||||
Members []RequestMember `kafka:"min=v3,max=v4"`
|
||||
}
|
||||
|
||||
func (r *Request) Prepare(apiVersion int16) {
|
||||
if apiVersion < 3 {
|
||||
if len(r.Members) > 0 {
|
||||
r.MemberID = r.Members[0].MemberID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type RequestMember struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v4,max=v4,tag"`
|
||||
|
||||
MemberID string `kafka:"min=v3,max=v3|min=v4,max=v4,compact"`
|
||||
GroupInstanceID string `kafka:"min=v3,max=v3,nullable|min=v4,max=v4,nullable,compact"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.LeaveGroup }
|
||||
|
||||
func (r *Request) Group() string { return r.GroupID }
|
||||
|
||||
var (
|
||||
_ protocol.GroupMessage = (*Request)(nil)
|
||||
_ protocol.PreparedMessage = (*Request)(nil)
|
||||
)
|
||||
|
||||
type Response struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v4,max=v4,tag"`
|
||||
|
||||
ErrorCode int16 `kafka:"min=v0,max=v4"`
|
||||
ThrottleTimeMS int32 `kafka:"min=v1,max=v4"`
|
||||
Members []ResponseMember `kafka:"min=v3,max=v4"`
|
||||
}
|
||||
|
||||
type ResponseMember struct {
|
||||
// We need at least one tagged field to indicate that this is a "flexible" message
|
||||
// type.
|
||||
_ struct{} `kafka:"min=v4,max=v4,tag"`
|
||||
|
||||
MemberID string `kafka:"min=v3,max=v3|min=v4,max=v4,compact"`
|
||||
GroupInstanceID string `kafka:"min=v3,max=v3,nullable|min=v4,max=v4,nullable,compact"`
|
||||
ErrorCode int16 `kafka:"min=v3,max=v4"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.LeaveGroup }
|
||||
82
vendor/github.com/segmentio/kafka-go/protocol/listgroups/listgroups.go
generated
vendored
Normal file
82
vendor/github.com/segmentio/kafka-go/protocol/listgroups/listgroups.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
package listgroups
|
||||
|
||||
import (
|
||||
"github.com/segmentio/kafka-go/protocol"
|
||||
)
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_ListGroups
|
||||
type Request struct {
|
||||
_ struct{} `kafka:"min=v0,max=v2"`
|
||||
brokerID int32
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.ListGroups }
|
||||
|
||||
func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
|
||||
return cluster.Brokers[r.brokerID], nil
|
||||
}
|
||||
|
||||
func (r *Request) Split(cluster protocol.Cluster) (
|
||||
[]protocol.Message,
|
||||
protocol.Merger,
|
||||
error,
|
||||
) {
|
||||
messages := []protocol.Message{}
|
||||
|
||||
for _, broker := range cluster.Brokers {
|
||||
messages = append(messages, &Request{brokerID: broker.ID})
|
||||
}
|
||||
|
||||
return messages, new(Response), nil
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
ThrottleTimeMs int32 `kafka:"min=v1,max=v2"`
|
||||
ErrorCode int16 `kafka:"min=v0,max=v2"`
|
||||
Groups []ResponseGroup `kafka:"min=v0,max=v2"`
|
||||
}
|
||||
|
||||
type ResponseGroup struct {
|
||||
GroupID string `kafka:"min=v0,max=v2"`
|
||||
ProtocolType string `kafka:"min=v0,max=v2"`
|
||||
|
||||
// Use this to store which broker returned the response
|
||||
BrokerID int32 `kafka:"-"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.ListGroups }
|
||||
|
||||
func (r *Response) Merge(requests []protocol.Message, results []interface{}) (
|
||||
protocol.Message,
|
||||
error,
|
||||
) {
|
||||
response := &Response{}
|
||||
|
||||
for r, result := range results {
|
||||
m, err := protocol.Result(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
brokerResp := m.(*Response)
|
||||
respGroups := []ResponseGroup{}
|
||||
|
||||
for _, brokerResp := range brokerResp.Groups {
|
||||
respGroups = append(
|
||||
respGroups,
|
||||
ResponseGroup{
|
||||
GroupID: brokerResp.GroupID,
|
||||
ProtocolType: brokerResp.ProtocolType,
|
||||
BrokerID: requests[r].(*Request).brokerID,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
response.Groups = append(response.Groups, respGroups...)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
230
vendor/github.com/segmentio/kafka-go/protocol/listoffsets/listoffsets.go
generated
vendored
Normal file
230
vendor/github.com/segmentio/kafka-go/protocol/listoffsets/listoffsets.go
generated
vendored
Normal file
@@ -0,0 +1,230 @@
|
||||
package listoffsets
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol"
|
||||
)
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
ReplicaID int32 `kafka:"min=v1,max=v5"`
|
||||
IsolationLevel int8 `kafka:"min=v2,max=v5"`
|
||||
Topics []RequestTopic `kafka:"min=v1,max=v5"`
|
||||
}
|
||||
|
||||
type RequestTopic struct {
|
||||
Topic string `kafka:"min=v1,max=v5"`
|
||||
Partitions []RequestPartition `kafka:"min=v1,max=v5"`
|
||||
}
|
||||
|
||||
type RequestPartition struct {
|
||||
Partition int32 `kafka:"min=v1,max=v5"`
|
||||
CurrentLeaderEpoch int32 `kafka:"min=v4,max=v5"`
|
||||
Timestamp int64 `kafka:"min=v1,max=v5"`
|
||||
// v0 of the API predates kafka 0.10, and doesn't make much sense to
|
||||
// use so we chose not to support it. It had this extra field to limit
|
||||
// the number of offsets returned, which has been removed in v1.
|
||||
//
|
||||
// MaxNumOffsets int32 `kafka:"min=v0,max=v0"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.ListOffsets }
|
||||
|
||||
func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
|
||||
// Expects r to be a request that was returned by Map, will likely panic
|
||||
// or produce the wrong result if that's not the case.
|
||||
partition := r.Topics[0].Partitions[0].Partition
|
||||
topic := r.Topics[0].Topic
|
||||
|
||||
for _, p := range cluster.Topics[topic].Partitions {
|
||||
if p.ID == partition {
|
||||
return cluster.Brokers[p.Leader], nil
|
||||
}
|
||||
}
|
||||
|
||||
return protocol.Broker{ID: -1}, nil
|
||||
}
|
||||
|
||||
func (r *Request) Split(cluster protocol.Cluster) ([]protocol.Message, protocol.Merger, error) {
|
||||
// Because kafka refuses to answer ListOffsets requests containing multiple
|
||||
// entries of unique topic/partition pairs, we submit multiple requests on
|
||||
// the wire and merge their results back.
|
||||
//
|
||||
// ListOffsets requests also need to be sent to partition leaders, to keep
|
||||
// the logic simple we simply split each offset request into a single
|
||||
// message. This may cause a bit more requests to be sent on the wire but
|
||||
// it keeps the code sane, we can still optimize the aggregation mechanism
|
||||
// later if it becomes a problem.
|
||||
//
|
||||
// Really the idea here is to shield applications from having to deal with
|
||||
// the limitation of the kafka server, so they can request any combinations
|
||||
// of topic/partition/offsets.
|
||||
requests := make([]Request, 0, 2*len(r.Topics))
|
||||
|
||||
for _, t := range r.Topics {
|
||||
for _, p := range t.Partitions {
|
||||
requests = append(requests, Request{
|
||||
ReplicaID: r.ReplicaID,
|
||||
IsolationLevel: r.IsolationLevel,
|
||||
Topics: []RequestTopic{{
|
||||
Topic: t.Topic,
|
||||
Partitions: []RequestPartition{{
|
||||
Partition: p.Partition,
|
||||
CurrentLeaderEpoch: p.CurrentLeaderEpoch,
|
||||
Timestamp: p.Timestamp,
|
||||
}},
|
||||
}},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
messages := make([]protocol.Message, len(requests))
|
||||
|
||||
for i := range requests {
|
||||
messages[i] = &requests[i]
|
||||
}
|
||||
|
||||
return messages, new(Response), nil
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
ThrottleTimeMs int32 `kafka:"min=v2,max=v5"`
|
||||
Topics []ResponseTopic `kafka:"min=v1,max=v5"`
|
||||
}
|
||||
|
||||
type ResponseTopic struct {
|
||||
Topic string `kafka:"min=v1,max=v5"`
|
||||
Partitions []ResponsePartition `kafka:"min=v1,max=v5"`
|
||||
}
|
||||
|
||||
type ResponsePartition struct {
|
||||
Partition int32 `kafka:"min=v1,max=v5"`
|
||||
ErrorCode int16 `kafka:"min=v1,max=v5"`
|
||||
Timestamp int64 `kafka:"min=v1,max=v5"`
|
||||
Offset int64 `kafka:"min=v1,max=v5"`
|
||||
LeaderEpoch int32 `kafka:"min=v4,max=v5"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.ListOffsets }
|
||||
|
||||
func (r *Response) Merge(requests []protocol.Message, results []interface{}) (protocol.Message, error) {
|
||||
type topicPartition struct {
|
||||
topic string
|
||||
partition int32
|
||||
}
|
||||
|
||||
// Kafka doesn't always return the timestamp in the response, for example
|
||||
// when the request sends -2 (for the first offset) it always returns -1,
|
||||
// probably to indicate that the timestamp is unknown. This means that we
|
||||
// can't correlate the requests and responses based on their timestamps,
|
||||
// the primary key is the topic/partition pair.
|
||||
//
|
||||
// To make the API a bit friendly, we reconstructing an index of topic
|
||||
// partitions to the timestamps that were requested, and override the
|
||||
// timestamp value in the response.
|
||||
timestamps := make([]map[topicPartition]int64, len(requests))
|
||||
|
||||
for i, m := range requests {
|
||||
req := m.(*Request)
|
||||
ts := make(map[topicPartition]int64, len(req.Topics))
|
||||
|
||||
for _, t := range req.Topics {
|
||||
for _, p := range t.Partitions {
|
||||
ts[topicPartition{
|
||||
topic: t.Topic,
|
||||
partition: p.Partition,
|
||||
}] = p.Timestamp
|
||||
}
|
||||
}
|
||||
|
||||
timestamps[i] = ts
|
||||
}
|
||||
|
||||
topics := make(map[string][]ResponsePartition)
|
||||
errors := 0
|
||||
|
||||
for i, res := range results {
|
||||
m, err := protocol.Result(res)
|
||||
if err != nil {
|
||||
for _, t := range requests[i].(*Request).Topics {
|
||||
partitions := topics[t.Topic]
|
||||
|
||||
for _, p := range t.Partitions {
|
||||
partitions = append(partitions, ResponsePartition{
|
||||
Partition: p.Partition,
|
||||
ErrorCode: -1, // UNKNOWN, can we do better?
|
||||
Timestamp: -1,
|
||||
Offset: -1,
|
||||
LeaderEpoch: -1,
|
||||
})
|
||||
}
|
||||
|
||||
topics[t.Topic] = partitions
|
||||
}
|
||||
errors++
|
||||
continue
|
||||
}
|
||||
|
||||
response := m.(*Response)
|
||||
|
||||
if r.ThrottleTimeMs < response.ThrottleTimeMs {
|
||||
r.ThrottleTimeMs = response.ThrottleTimeMs
|
||||
}
|
||||
|
||||
for _, t := range response.Topics {
|
||||
for _, p := range t.Partitions {
|
||||
if timestamp, ok := timestamps[i][topicPartition{
|
||||
topic: t.Topic,
|
||||
partition: p.Partition,
|
||||
}]; ok {
|
||||
p.Timestamp = timestamp
|
||||
}
|
||||
topics[t.Topic] = append(topics[t.Topic], p)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if errors > 0 && errors == len(results) {
|
||||
_, err := protocol.Result(results[0])
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.Topics = make([]ResponseTopic, 0, len(topics))
|
||||
|
||||
for topicName, partitions := range topics {
|
||||
r.Topics = append(r.Topics, ResponseTopic{
|
||||
Topic: topicName,
|
||||
Partitions: partitions,
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(r.Topics, func(i, j int) bool {
|
||||
return r.Topics[i].Topic < r.Topics[j].Topic
|
||||
})
|
||||
|
||||
for _, t := range r.Topics {
|
||||
sort.Slice(t.Partitions, func(i, j int) bool {
|
||||
p1 := &t.Partitions[i]
|
||||
p2 := &t.Partitions[j]
|
||||
|
||||
if p1.Partition != p2.Partition {
|
||||
return p1.Partition < p2.Partition
|
||||
}
|
||||
|
||||
return p1.Offset < p2.Offset
|
||||
})
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
var (
|
||||
_ protocol.BrokerMessage = (*Request)(nil)
|
||||
_ protocol.Splitter = (*Request)(nil)
|
||||
_ protocol.Merger = (*Response)(nil)
|
||||
)
|
||||
52
vendor/github.com/segmentio/kafka-go/protocol/metadata/metadata.go
generated
vendored
Normal file
52
vendor/github.com/segmentio/kafka-go/protocol/metadata/metadata.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
package metadata
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
TopicNames []string `kafka:"min=v0,max=v8,nullable"`
|
||||
AllowAutoTopicCreation bool `kafka:"min=v4,max=v8"`
|
||||
IncludeClusterAuthorizedOperations bool `kafka:"min=v8,max=v8"`
|
||||
IncludeTopicAuthorizedOperations bool `kafka:"min=v8,max=v8"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.Metadata }
|
||||
|
||||
type Response struct {
|
||||
ThrottleTimeMs int32 `kafka:"min=v3,max=v8"`
|
||||
Brokers []ResponseBroker `kafka:"min=v0,max=v8"`
|
||||
ClusterID string `kafka:"min=v2,max=v8,nullable"`
|
||||
ControllerID int32 `kafka:"min=v1,max=v8"`
|
||||
Topics []ResponseTopic `kafka:"min=v0,max=v8"`
|
||||
ClusterAuthorizedOperations int32 `kafka:"min=v8,max=v8"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.Metadata }
|
||||
|
||||
type ResponseBroker struct {
|
||||
NodeID int32 `kafka:"min=v0,max=v8"`
|
||||
Host string `kafka:"min=v0,max=v8"`
|
||||
Port int32 `kafka:"min=v0,max=v8"`
|
||||
Rack string `kafka:"min=v1,max=v8,nullable"`
|
||||
}
|
||||
|
||||
type ResponseTopic struct {
|
||||
ErrorCode int16 `kafka:"min=v0,max=v8"`
|
||||
Name string `kafka:"min=v0,max=v8"`
|
||||
IsInternal bool `kafka:"min=v1,max=v8"`
|
||||
Partitions []ResponsePartition `kafka:"min=v0,max=v8"`
|
||||
TopicAuthorizedOperations int32 `kafka:"min=v8,max=v8"`
|
||||
}
|
||||
|
||||
type ResponsePartition struct {
|
||||
ErrorCode int16 `kafka:"min=v0,max=v8"`
|
||||
PartitionIndex int32 `kafka:"min=v0,max=v8"`
|
||||
LeaderID int32 `kafka:"min=v0,max=v8"`
|
||||
LeaderEpoch int32 `kafka:"min=v7,max=v8"`
|
||||
ReplicaNodes []int32 `kafka:"min=v0,max=v8"`
|
||||
IsrNodes []int32 `kafka:"min=v0,max=v8"`
|
||||
OfflineReplicas []int32 `kafka:"min=v5,max=v8"`
|
||||
}
|
||||
54
vendor/github.com/segmentio/kafka-go/protocol/offsetcommit/offsetcommit.go
generated
vendored
Normal file
54
vendor/github.com/segmentio/kafka-go/protocol/offsetcommit/offsetcommit.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
package offsetcommit
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
GroupID string `kafka:"min=v0,max=v7"`
|
||||
GenerationID int32 `kafka:"min=v1,max=v7"`
|
||||
MemberID string `kafka:"min=v1,max=v7"`
|
||||
RetentionTimeMs int64 `kafka:"min=v2,max=v4"`
|
||||
GroupInstanceID string `kafka:"min=v7,max=v7,nullable"`
|
||||
Topics []RequestTopic `kafka:"min=v0,max=v7"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.OffsetCommit }
|
||||
|
||||
func (r *Request) Group() string { return r.GroupID }
|
||||
|
||||
type RequestTopic struct {
|
||||
Name string `kafka:"min=v0,max=v7"`
|
||||
Partitions []RequestPartition `kafka:"min=v0,max=v7"`
|
||||
}
|
||||
|
||||
type RequestPartition struct {
|
||||
PartitionIndex int32 `kafka:"min=v0,max=v7"`
|
||||
CommittedOffset int64 `kafka:"min=v0,max=v7"`
|
||||
CommitTimestamp int64 `kafka:"min=v1,max=v1"`
|
||||
CommittedLeaderEpoch int32 `kafka:"min=v5,max=v7"`
|
||||
CommittedMetadata string `kafka:"min=v0,max=v7,nullable"`
|
||||
}
|
||||
|
||||
var (
|
||||
_ protocol.GroupMessage = (*Request)(nil)
|
||||
)
|
||||
|
||||
type Response struct {
|
||||
ThrottleTimeMs int32 `kafka:"min=v3,max=v7"`
|
||||
Topics []ResponseTopic `kafka:"min=v0,max=v7"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.OffsetCommit }
|
||||
|
||||
type ResponseTopic struct {
|
||||
Name string `kafka:"min=v0,max=v7"`
|
||||
Partitions []ResponsePartition `kafka:"min=v0,max=v7"`
|
||||
}
|
||||
|
||||
type ResponsePartition struct {
|
||||
PartitionIndex int32 `kafka:"min=v0,max=v7"`
|
||||
ErrorCode int16 `kafka:"min=v0,max=v7"`
|
||||
}
|
||||
47
vendor/github.com/segmentio/kafka-go/protocol/offsetdelete/offsetdelete.go
generated
vendored
Normal file
47
vendor/github.com/segmentio/kafka-go/protocol/offsetdelete/offsetdelete.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
package offsetdelete
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
GroupID string `kafka:"min=v0,max=v0"`
|
||||
Topics []RequestTopic `kafka:"min=v0,max=v0"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.OffsetDelete }
|
||||
|
||||
func (r *Request) Group() string { return r.GroupID }
|
||||
|
||||
type RequestTopic struct {
|
||||
Name string `kafka:"min=v0,max=v0"`
|
||||
Partitions []RequestPartition `kafka:"min=v0,max=v0"`
|
||||
}
|
||||
|
||||
type RequestPartition struct {
|
||||
PartitionIndex int32 `kafka:"min=v0,max=v0"`
|
||||
}
|
||||
|
||||
var (
|
||||
_ protocol.GroupMessage = (*Request)(nil)
|
||||
)
|
||||
|
||||
type Response struct {
|
||||
ErrorCode int16 `kafka:"min=v0,max=v0"`
|
||||
ThrottleTimeMs int32 `kafka:"min=v0,max=v0"`
|
||||
Topics []ResponseTopic `kafka:"min=v0,max=v0"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.OffsetDelete }
|
||||
|
||||
type ResponseTopic struct {
|
||||
Name string `kafka:"min=v0,max=v0"`
|
||||
Partitions []ResponsePartition `kafka:"min=v0,max=v0"`
|
||||
}
|
||||
|
||||
type ResponsePartition struct {
|
||||
PartitionIndex int32 `kafka:"min=v0,max=v0"`
|
||||
ErrorCode int16 `kafka:"min=v0,max=v0"`
|
||||
}
|
||||
46
vendor/github.com/segmentio/kafka-go/protocol/offsetfetch/offsetfetch.go
generated
vendored
Normal file
46
vendor/github.com/segmentio/kafka-go/protocol/offsetfetch/offsetfetch.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
package offsetfetch
|
||||
|
||||
import "github.com/segmentio/kafka-go/protocol"
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
GroupID string `kafka:"min=v0,max=v5"`
|
||||
Topics []RequestTopic `kafka:"min=v0,max=v5"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.OffsetFetch }
|
||||
|
||||
func (r *Request) Group() string { return r.GroupID }
|
||||
|
||||
type RequestTopic struct {
|
||||
Name string `kafka:"min=v0,max=v5"`
|
||||
PartitionIndexes []int32 `kafka:"min=v0,max=v5"`
|
||||
}
|
||||
|
||||
var (
|
||||
_ protocol.GroupMessage = (*Request)(nil)
|
||||
)
|
||||
|
||||
type Response struct {
|
||||
ThrottleTimeMs int32 `kafka:"min=v3,max=v5"`
|
||||
Topics []ResponseTopic `kafka:"min=v0,max=v5"`
|
||||
ErrorCode int16 `kafka:"min=v2,max=v5"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.OffsetFetch }
|
||||
|
||||
type ResponseTopic struct {
|
||||
Name string `kafka:"min=v0,max=v5"`
|
||||
Partitions []ResponsePartition `kafka:"min=v0,max=v5"`
|
||||
}
|
||||
|
||||
type ResponsePartition struct {
|
||||
PartitionIndex int32 `kafka:"min=v0,max=v5"`
|
||||
CommittedOffset int64 `kafka:"min=v0,max=v5"`
|
||||
ComittedLeaderEpoch int32 `kafka:"min=v5,max=v5"`
|
||||
Metadata string `kafka:"min=v0,max=v5,nullable"`
|
||||
ErrorCode int16 `kafka:"min=v0,max=v5"`
|
||||
}
|
||||
147
vendor/github.com/segmentio/kafka-go/protocol/produce/produce.go
generated
vendored
Normal file
147
vendor/github.com/segmentio/kafka-go/protocol/produce/produce.go
generated
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
package produce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/segmentio/kafka-go/protocol"
|
||||
)
|
||||
|
||||
func init() {
|
||||
protocol.Register(&Request{}, &Response{})
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
TransactionalID string `kafka:"min=v3,max=v8,nullable"`
|
||||
Acks int16 `kafka:"min=v0,max=v8"`
|
||||
Timeout int32 `kafka:"min=v0,max=v8"`
|
||||
Topics []RequestTopic `kafka:"min=v0,max=v8"`
|
||||
}
|
||||
|
||||
func (r *Request) ApiKey() protocol.ApiKey { return protocol.Produce }
|
||||
|
||||
func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
|
||||
broker := protocol.Broker{ID: -1}
|
||||
|
||||
for i := range r.Topics {
|
||||
t := &r.Topics[i]
|
||||
|
||||
topic, ok := cluster.Topics[t.Topic]
|
||||
if !ok {
|
||||
return broker, NewError(protocol.NewErrNoTopic(t.Topic))
|
||||
}
|
||||
|
||||
for j := range t.Partitions {
|
||||
p := &t.Partitions[j]
|
||||
|
||||
partition, ok := topic.Partitions[p.Partition]
|
||||
if !ok {
|
||||
return broker, NewError(protocol.NewErrNoPartition(t.Topic, p.Partition))
|
||||
}
|
||||
|
||||
if b, ok := cluster.Brokers[partition.Leader]; !ok {
|
||||
return broker, NewError(protocol.NewErrNoLeader(t.Topic, p.Partition))
|
||||
} else if broker.ID < 0 {
|
||||
broker = b
|
||||
} else if b.ID != broker.ID {
|
||||
return broker, NewError(fmt.Errorf("mismatching leaders (%d!=%d)", b.ID, broker.ID))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return broker, nil
|
||||
}
|
||||
|
||||
func (r *Request) Prepare(apiVersion int16) {
|
||||
// Determine which version of the message should be used, based on which
|
||||
// version of the Produce API is supported by the server.
|
||||
//
|
||||
// In version 0.11, kafka gives this error:
|
||||
//
|
||||
// org.apache.kafka.common.record.InvalidRecordException
|
||||
// Produce requests with version 3 are only allowed to contain record batches with magic version.
|
||||
//
|
||||
// In version 2.x, kafka refuses the message claiming that the CRC32
|
||||
// checksum is invalid.
|
||||
var recordVersion int8
|
||||
|
||||
if apiVersion < 3 {
|
||||
recordVersion = 1
|
||||
} else {
|
||||
recordVersion = 2
|
||||
}
|
||||
|
||||
for i := range r.Topics {
|
||||
t := &r.Topics[i]
|
||||
|
||||
for j := range t.Partitions {
|
||||
p := &t.Partitions[j]
|
||||
|
||||
// Allow the program to overload the version if really needed.
|
||||
if p.RecordSet.Version == 0 {
|
||||
p.RecordSet.Version = recordVersion
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Request) HasResponse() bool {
|
||||
return r.Acks != 0
|
||||
}
|
||||
|
||||
type RequestTopic struct {
|
||||
Topic string `kafka:"min=v0,max=v8"`
|
||||
Partitions []RequestPartition `kafka:"min=v0,max=v8"`
|
||||
}
|
||||
|
||||
type RequestPartition struct {
|
||||
Partition int32 `kafka:"min=v0,max=v8"`
|
||||
RecordSet protocol.RecordSet `kafka:"min=v0,max=v8"`
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
Topics []ResponseTopic `kafka:"min=v0,max=v8"`
|
||||
ThrottleTimeMs int32 `kafka:"min=v1,max=v8"`
|
||||
}
|
||||
|
||||
func (r *Response) ApiKey() protocol.ApiKey { return protocol.Produce }
|
||||
|
||||
type ResponseTopic struct {
|
||||
Topic string `kafka:"min=v0,max=v8"`
|
||||
Partitions []ResponsePartition `kafka:"min=v0,max=v8"`
|
||||
}
|
||||
|
||||
type ResponsePartition struct {
|
||||
Partition int32 `kafka:"min=v0,max=v8"`
|
||||
ErrorCode int16 `kafka:"min=v0,max=v8"`
|
||||
BaseOffset int64 `kafka:"min=v0,max=v8"`
|
||||
LogAppendTime int64 `kafka:"min=v2,max=v8"`
|
||||
LogStartOffset int64 `kafka:"min=v5,max=v8"`
|
||||
RecordErrors []ResponseError `kafka:"min=v8,max=v8"`
|
||||
ErrorMessage string `kafka:"min=v8,max=v8,nullable"`
|
||||
}
|
||||
|
||||
type ResponseError struct {
|
||||
BatchIndex int32 `kafka:"min=v8,max=v8"`
|
||||
BatchIndexErrorMessage string `kafka:"min=v8,max=v8,nullable"`
|
||||
}
|
||||
|
||||
var (
|
||||
_ protocol.BrokerMessage = (*Request)(nil)
|
||||
_ protocol.PreparedMessage = (*Request)(nil)
|
||||
)
|
||||
|
||||
type Error struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func NewError(err error) *Error {
|
||||
return &Error{Err: err}
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("fetch request error: %v", e.Err)
|
||||
}
|
||||
|
||||
func (e *Error) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
506
vendor/github.com/segmentio/kafka-go/protocol/protocol.go
generated
vendored
Normal file
506
vendor/github.com/segmentio/kafka-go/protocol/protocol.go
generated
vendored
Normal file
@@ -0,0 +1,506 @@
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Message is an interface implemented by all request and response types of the
|
||||
// kafka protocol.
|
||||
//
|
||||
// This interface is used mostly as a safe-guard to provide a compile-time check
|
||||
// for values passed to functions dealing kafka message types.
|
||||
type Message interface {
|
||||
ApiKey() ApiKey
|
||||
}
|
||||
|
||||
type ApiKey int16
|
||||
|
||||
func (k ApiKey) String() string {
|
||||
if i := int(k); i >= 0 && i < len(apiNames) {
|
||||
return apiNames[i]
|
||||
}
|
||||
return strconv.Itoa(int(k))
|
||||
}
|
||||
|
||||
func (k ApiKey) MinVersion() int16 { return k.apiType().minVersion() }
|
||||
|
||||
func (k ApiKey) MaxVersion() int16 { return k.apiType().maxVersion() }
|
||||
|
||||
func (k ApiKey) SelectVersion(minVersion, maxVersion int16) int16 {
|
||||
min := k.MinVersion()
|
||||
max := k.MaxVersion()
|
||||
switch {
|
||||
case min > maxVersion:
|
||||
return min
|
||||
case max < maxVersion:
|
||||
return max
|
||||
default:
|
||||
return maxVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (k ApiKey) apiType() apiType {
|
||||
if i := int(k); i >= 0 && i < len(apiTypes) {
|
||||
return apiTypes[i]
|
||||
}
|
||||
return apiType{}
|
||||
}
|
||||
|
||||
const (
|
||||
Produce ApiKey = 0
|
||||
Fetch ApiKey = 1
|
||||
ListOffsets ApiKey = 2
|
||||
Metadata ApiKey = 3
|
||||
LeaderAndIsr ApiKey = 4
|
||||
StopReplica ApiKey = 5
|
||||
UpdateMetadata ApiKey = 6
|
||||
ControlledShutdown ApiKey = 7
|
||||
OffsetCommit ApiKey = 8
|
||||
OffsetFetch ApiKey = 9
|
||||
FindCoordinator ApiKey = 10
|
||||
JoinGroup ApiKey = 11
|
||||
Heartbeat ApiKey = 12
|
||||
LeaveGroup ApiKey = 13
|
||||
SyncGroup ApiKey = 14
|
||||
DescribeGroups ApiKey = 15
|
||||
ListGroups ApiKey = 16
|
||||
SaslHandshake ApiKey = 17
|
||||
ApiVersions ApiKey = 18
|
||||
CreateTopics ApiKey = 19
|
||||
DeleteTopics ApiKey = 20
|
||||
DeleteRecords ApiKey = 21
|
||||
InitProducerId ApiKey = 22
|
||||
OffsetForLeaderEpoch ApiKey = 23
|
||||
AddPartitionsToTxn ApiKey = 24
|
||||
AddOffsetsToTxn ApiKey = 25
|
||||
EndTxn ApiKey = 26
|
||||
WriteTxnMarkers ApiKey = 27
|
||||
TxnOffsetCommit ApiKey = 28
|
||||
DescribeAcls ApiKey = 29
|
||||
CreateAcls ApiKey = 30
|
||||
DeleteAcls ApiKey = 31
|
||||
DescribeConfigs ApiKey = 32
|
||||
AlterConfigs ApiKey = 33
|
||||
AlterReplicaLogDirs ApiKey = 34
|
||||
DescribeLogDirs ApiKey = 35
|
||||
SaslAuthenticate ApiKey = 36
|
||||
CreatePartitions ApiKey = 37
|
||||
CreateDelegationToken ApiKey = 38
|
||||
RenewDelegationToken ApiKey = 39
|
||||
ExpireDelegationToken ApiKey = 40
|
||||
DescribeDelegationToken ApiKey = 41
|
||||
DeleteGroups ApiKey = 42
|
||||
ElectLeaders ApiKey = 43
|
||||
IncrementalAlterConfigs ApiKey = 44
|
||||
AlterPartitionReassignments ApiKey = 45
|
||||
ListPartitionReassignments ApiKey = 46
|
||||
OffsetDelete ApiKey = 47
|
||||
DescribeClientQuotas ApiKey = 48
|
||||
AlterClientQuotas ApiKey = 49
|
||||
|
||||
numApis = 50
|
||||
)
|
||||
|
||||
var apiNames = [numApis]string{
|
||||
Produce: "Produce",
|
||||
Fetch: "Fetch",
|
||||
ListOffsets: "ListOffsets",
|
||||
Metadata: "Metadata",
|
||||
LeaderAndIsr: "LeaderAndIsr",
|
||||
StopReplica: "StopReplica",
|
||||
UpdateMetadata: "UpdateMetadata",
|
||||
ControlledShutdown: "ControlledShutdown",
|
||||
OffsetCommit: "OffsetCommit",
|
||||
OffsetFetch: "OffsetFetch",
|
||||
FindCoordinator: "FindCoordinator",
|
||||
JoinGroup: "JoinGroup",
|
||||
Heartbeat: "Heartbeat",
|
||||
LeaveGroup: "LeaveGroup",
|
||||
SyncGroup: "SyncGroup",
|
||||
DescribeGroups: "DescribeGroups",
|
||||
ListGroups: "ListGroups",
|
||||
SaslHandshake: "SaslHandshake",
|
||||
ApiVersions: "ApiVersions",
|
||||
CreateTopics: "CreateTopics",
|
||||
DeleteTopics: "DeleteTopics",
|
||||
DeleteRecords: "DeleteRecords",
|
||||
InitProducerId: "InitProducerId",
|
||||
OffsetForLeaderEpoch: "OffsetForLeaderEpoch",
|
||||
AddPartitionsToTxn: "AddPartitionsToTxn",
|
||||
AddOffsetsToTxn: "AddOffsetsToTxn",
|
||||
EndTxn: "EndTxn",
|
||||
WriteTxnMarkers: "WriteTxnMarkers",
|
||||
TxnOffsetCommit: "TxnOffsetCommit",
|
||||
DescribeAcls: "DescribeAcls",
|
||||
CreateAcls: "CreateAcls",
|
||||
DeleteAcls: "DeleteAcls",
|
||||
DescribeConfigs: "DescribeConfigs",
|
||||
AlterConfigs: "AlterConfigs",
|
||||
AlterReplicaLogDirs: "AlterReplicaLogDirs",
|
||||
DescribeLogDirs: "DescribeLogDirs",
|
||||
SaslAuthenticate: "SaslAuthenticate",
|
||||
CreatePartitions: "CreatePartitions",
|
||||
CreateDelegationToken: "CreateDelegationToken",
|
||||
RenewDelegationToken: "RenewDelegationToken",
|
||||
ExpireDelegationToken: "ExpireDelegationToken",
|
||||
DescribeDelegationToken: "DescribeDelegationToken",
|
||||
DeleteGroups: "DeleteGroups",
|
||||
ElectLeaders: "ElectLeaders",
|
||||
IncrementalAlterConfigs: "IncrementalAlterConfigs",
|
||||
AlterPartitionReassignments: "AlterPartitionReassignments",
|
||||
ListPartitionReassignments: "ListPartitionReassignments",
|
||||
OffsetDelete: "OffsetDelete",
|
||||
DescribeClientQuotas: "DescribeClientQuotas",
|
||||
AlterClientQuotas: "AlterClientQuotas",
|
||||
}
|
||||
|
||||
type messageType struct {
|
||||
version int16
|
||||
flexible bool
|
||||
gotype reflect.Type
|
||||
decode decodeFunc
|
||||
encode encodeFunc
|
||||
}
|
||||
|
||||
func (t *messageType) new() Message {
|
||||
return reflect.New(t.gotype).Interface().(Message)
|
||||
}
|
||||
|
||||
type apiType struct {
|
||||
requests []messageType
|
||||
responses []messageType
|
||||
}
|
||||
|
||||
func (t apiType) minVersion() int16 {
|
||||
if len(t.requests) == 0 {
|
||||
return 0
|
||||
}
|
||||
return t.requests[0].version
|
||||
}
|
||||
|
||||
func (t apiType) maxVersion() int16 {
|
||||
if len(t.requests) == 0 {
|
||||
return 0
|
||||
}
|
||||
return t.requests[len(t.requests)-1].version
|
||||
}
|
||||
|
||||
var apiTypes [numApis]apiType
|
||||
|
||||
// Register is automatically called by sub-packages are imported to install a
|
||||
// new pair of request/response message types.
|
||||
func Register(req, res Message) {
|
||||
k1 := req.ApiKey()
|
||||
k2 := res.ApiKey()
|
||||
|
||||
if k1 != k2 {
|
||||
panic(fmt.Sprintf("[%T/%T]: request and response API keys mismatch: %d != %d", req, res, k1, k2))
|
||||
}
|
||||
|
||||
apiTypes[k1] = apiType{
|
||||
requests: typesOf(req),
|
||||
responses: typesOf(res),
|
||||
}
|
||||
}
|
||||
|
||||
func typesOf(v interface{}) []messageType {
|
||||
return makeTypes(reflect.TypeOf(v).Elem())
|
||||
}
|
||||
|
||||
func makeTypes(t reflect.Type) []messageType {
|
||||
minVersion := int16(-1)
|
||||
maxVersion := int16(-1)
|
||||
|
||||
// All future versions will be flexible (according to spec), so don't need to
|
||||
// worry about maxes here.
|
||||
minFlexibleVersion := int16(-1)
|
||||
|
||||
forEachStructField(t, func(_ reflect.Type, _ index, tag string) {
|
||||
forEachStructTag(tag, func(tag structTag) bool {
|
||||
if minVersion < 0 || tag.MinVersion < minVersion {
|
||||
minVersion = tag.MinVersion
|
||||
}
|
||||
if maxVersion < 0 || tag.MaxVersion > maxVersion {
|
||||
maxVersion = tag.MaxVersion
|
||||
}
|
||||
if tag.TagID > -2 && (minFlexibleVersion < 0 || tag.MinVersion < minFlexibleVersion) {
|
||||
minFlexibleVersion = tag.MinVersion
|
||||
}
|
||||
return true
|
||||
})
|
||||
})
|
||||
|
||||
types := make([]messageType, 0, (maxVersion-minVersion)+1)
|
||||
|
||||
for v := minVersion; v <= maxVersion; v++ {
|
||||
flexible := minFlexibleVersion >= 0 && v >= minFlexibleVersion
|
||||
|
||||
types = append(types, messageType{
|
||||
version: v,
|
||||
gotype: t,
|
||||
flexible: flexible,
|
||||
decode: decodeFuncOf(t, v, flexible, structTag{}),
|
||||
encode: encodeFuncOf(t, v, flexible, structTag{}),
|
||||
})
|
||||
}
|
||||
|
||||
return types
|
||||
}
|
||||
|
||||
type structTag struct {
|
||||
MinVersion int16
|
||||
MaxVersion int16
|
||||
Compact bool
|
||||
Nullable bool
|
||||
TagID int
|
||||
}
|
||||
|
||||
func forEachStructTag(tag string, do func(structTag) bool) {
|
||||
if tag == "-" {
|
||||
return // special case to ignore the field
|
||||
}
|
||||
|
||||
forEach(tag, '|', func(s string) bool {
|
||||
tag := structTag{
|
||||
MinVersion: -1,
|
||||
MaxVersion: -1,
|
||||
|
||||
// Legitimate tag IDs can start at 0. We use -1 as a placeholder to indicate
|
||||
// that the message type is flexible, so that leaves -2 as the default for
|
||||
// indicating that there is no tag ID and the message is not flexible.
|
||||
TagID: -2,
|
||||
}
|
||||
|
||||
var err error
|
||||
forEach(s, ',', func(s string) bool {
|
||||
switch {
|
||||
case strings.HasPrefix(s, "min="):
|
||||
tag.MinVersion, err = parseVersion(s[4:])
|
||||
case strings.HasPrefix(s, "max="):
|
||||
tag.MaxVersion, err = parseVersion(s[4:])
|
||||
case s == "tag":
|
||||
tag.TagID = -1
|
||||
case strings.HasPrefix(s, "tag="):
|
||||
tag.TagID, err = strconv.Atoi(s[4:])
|
||||
case s == "compact":
|
||||
tag.Compact = true
|
||||
case s == "nullable":
|
||||
tag.Nullable = true
|
||||
default:
|
||||
err = fmt.Errorf("unrecognized option: %q", s)
|
||||
}
|
||||
return err == nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("malformed struct tag: %w", err))
|
||||
}
|
||||
|
||||
if tag.MinVersion < 0 && tag.MaxVersion >= 0 {
|
||||
panic(fmt.Errorf("missing minimum version in struct tag: %q", s))
|
||||
}
|
||||
|
||||
if tag.MaxVersion < 0 && tag.MinVersion >= 0 {
|
||||
panic(fmt.Errorf("missing maximum version in struct tag: %q", s))
|
||||
}
|
||||
|
||||
if tag.MinVersion > tag.MaxVersion {
|
||||
panic(fmt.Errorf("invalid version range in struct tag: %q", s))
|
||||
}
|
||||
|
||||
return do(tag)
|
||||
})
|
||||
}
|
||||
|
||||
func forEach(s string, sep byte, do func(string) bool) bool {
|
||||
for len(s) != 0 {
|
||||
p := ""
|
||||
i := strings.IndexByte(s, sep)
|
||||
if i < 0 {
|
||||
p, s = s, ""
|
||||
} else {
|
||||
p, s = s[:i], s[i+1:]
|
||||
}
|
||||
if !do(p) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func forEachStructField(t reflect.Type, do func(reflect.Type, index, string)) {
|
||||
for i, n := 0, t.NumField(); i < n; i++ {
|
||||
f := t.Field(i)
|
||||
|
||||
if f.PkgPath != "" && f.Name != "_" {
|
||||
continue
|
||||
}
|
||||
|
||||
kafkaTag, ok := f.Tag.Lookup("kafka")
|
||||
if !ok {
|
||||
kafkaTag = "|"
|
||||
}
|
||||
|
||||
do(f.Type, indexOf(f), kafkaTag)
|
||||
}
|
||||
}
|
||||
|
||||
func parseVersion(s string) (int16, error) {
|
||||
if !strings.HasPrefix(s, "v") {
|
||||
return 0, fmt.Errorf("invalid version number: %q", s)
|
||||
}
|
||||
i, err := strconv.ParseInt(s[1:], 10, 16)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid version number: %q: %w", s, err)
|
||||
}
|
||||
if i < 0 {
|
||||
return 0, fmt.Errorf("invalid negative version number: %q", s)
|
||||
}
|
||||
return int16(i), nil
|
||||
}
|
||||
|
||||
func dontExpectEOF(err error) error {
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type Broker struct {
|
||||
Rack string
|
||||
Host string
|
||||
Port int32
|
||||
ID int32
|
||||
}
|
||||
|
||||
func (b Broker) String() string {
|
||||
return net.JoinHostPort(b.Host, itoa(b.Port))
|
||||
}
|
||||
|
||||
func (b Broker) Format(w fmt.State, v rune) {
|
||||
switch v {
|
||||
case 'd':
|
||||
io.WriteString(w, itoa(b.ID))
|
||||
case 's':
|
||||
io.WriteString(w, b.String())
|
||||
case 'v':
|
||||
io.WriteString(w, itoa(b.ID))
|
||||
io.WriteString(w, " ")
|
||||
io.WriteString(w, b.String())
|
||||
if b.Rack != "" {
|
||||
io.WriteString(w, " ")
|
||||
io.WriteString(w, b.Rack)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func itoa(i int32) string {
|
||||
return strconv.Itoa(int(i))
|
||||
}
|
||||
|
||||
type Topic struct {
|
||||
Name string
|
||||
Error int16
|
||||
Partitions map[int32]Partition
|
||||
}
|
||||
|
||||
type Partition struct {
|
||||
ID int32
|
||||
Error int16
|
||||
Leader int32
|
||||
Replicas []int32
|
||||
ISR []int32
|
||||
Offline []int32
|
||||
}
|
||||
|
||||
// RawExchanger is an extention to the Message interface to allow messages
|
||||
// to control the request response cycle for the message. This is currently
|
||||
// only used to facilitate v0 SASL Authenticate requests being written in
|
||||
// a non-standard fashion when the SASL Handshake was done at v0 but not
|
||||
// when done at v1.
|
||||
type RawExchanger interface {
|
||||
// Required should return true when a RawExchange is needed.
|
||||
// The passed in versions are the negotiated versions for the connection
|
||||
// performing the request.
|
||||
Required(versions map[ApiKey]int16) bool
|
||||
// RawExchange is given the raw connection to the broker and the Message
|
||||
// is responsible for writing itself to the connection as well as reading
|
||||
// the response.
|
||||
RawExchange(rw io.ReadWriter) (Message, error)
|
||||
}
|
||||
|
||||
// BrokerMessage is an extension of the Message interface implemented by some
|
||||
// request types to customize the broker assignment logic.
|
||||
type BrokerMessage interface {
|
||||
// Given a representation of the kafka cluster state as argument, returns
|
||||
// the broker that the message should be routed to.
|
||||
Broker(Cluster) (Broker, error)
|
||||
}
|
||||
|
||||
// GroupMessage is an extension of the Message interface implemented by some
|
||||
// request types to inform the program that they should be routed to a group
|
||||
// coordinator.
|
||||
type GroupMessage interface {
|
||||
// Returns the group configured on the message.
|
||||
Group() string
|
||||
}
|
||||
|
||||
// TransactionalMessage is an extension of the Message interface implemented by some
|
||||
// request types to inform the program that they should be routed to a transaction
|
||||
// coordinator.
|
||||
type TransactionalMessage interface {
|
||||
// Returns the transactional id configured on the message.
|
||||
Transaction() string
|
||||
}
|
||||
|
||||
// PreparedMessage is an extension of the Message interface implemented by some
|
||||
// request types which may need to run some pre-processing on their state before
|
||||
// being sent.
|
||||
type PreparedMessage interface {
|
||||
// Prepares the message before being sent to a kafka broker using the API
|
||||
// version passed as argument.
|
||||
Prepare(apiVersion int16)
|
||||
}
|
||||
|
||||
// Splitter is an interface implemented by messages that can be split into
|
||||
// multiple requests and have their results merged back by a Merger.
|
||||
type Splitter interface {
|
||||
// For a given cluster layout, returns the list of messages constructed
|
||||
// from the receiver for each requests that should be sent to the cluster.
|
||||
// The second return value is a Merger which can be used to merge back the
|
||||
// results of each request into a single message (or an error).
|
||||
Split(Cluster) ([]Message, Merger, error)
|
||||
}
|
||||
|
||||
// Merger is an interface implemented by messages which can merge multiple
|
||||
// results into one response.
|
||||
type Merger interface {
|
||||
// Given a list of message and associated results, merge them back into a
|
||||
// response (or an error). The results must be either Message or error
|
||||
// values, other types should trigger a panic.
|
||||
Merge(messages []Message, results []interface{}) (Message, error)
|
||||
}
|
||||
|
||||
// Result converts r to a Message or an error, or panics if r could not be
|
||||
// converted to these types.
|
||||
func Result(r interface{}) (Message, error) {
|
||||
switch v := r.(type) {
|
||||
case Message:
|
||||
return v, nil
|
||||
case error:
|
||||
return nil, v
|
||||
default:
|
||||
panic(fmt.Errorf("BUG: result must be a message or an error but not %T", v))
|
||||
}
|
||||
}
|
||||
314
vendor/github.com/segmentio/kafka-go/protocol/record.go
generated
vendored
Normal file
314
vendor/github.com/segmentio/kafka-go/protocol/record.go
generated
vendored
Normal file
@@ -0,0 +1,314 @@
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/compress"
|
||||
)
|
||||
|
||||
// Attributes is a bitset representing special attributes set on records.
|
||||
type Attributes int16
|
||||
|
||||
const (
|
||||
Gzip Attributes = Attributes(compress.Gzip) // 1
|
||||
Snappy Attributes = Attributes(compress.Snappy) // 2
|
||||
Lz4 Attributes = Attributes(compress.Lz4) // 3
|
||||
Zstd Attributes = Attributes(compress.Zstd) // 4
|
||||
Transactional Attributes = 1 << 4
|
||||
Control Attributes = 1 << 5
|
||||
)
|
||||
|
||||
func (a Attributes) Compression() compress.Compression {
|
||||
return compress.Compression(a & 7)
|
||||
}
|
||||
|
||||
func (a Attributes) Transactional() bool {
|
||||
return (a & Transactional) != 0
|
||||
}
|
||||
|
||||
func (a Attributes) Control() bool {
|
||||
return (a & Control) != 0
|
||||
}
|
||||
|
||||
func (a Attributes) String() string {
|
||||
s := a.Compression().String()
|
||||
if a.Transactional() {
|
||||
s += "+transactional"
|
||||
}
|
||||
if a.Control() {
|
||||
s += "+control"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Header represents a single entry in a list of record headers.
|
||||
type Header struct {
|
||||
Key string
|
||||
Value []byte
|
||||
}
|
||||
|
||||
// Record is an interface representing a single kafka record.
|
||||
//
|
||||
// Record values are not safe to use concurrently from multiple goroutines.
|
||||
type Record struct {
|
||||
// The offset at which the record exists in a topic partition. This value
|
||||
// is ignored in produce requests.
|
||||
Offset int64
|
||||
|
||||
// Returns the time of the record. This value may be omitted in produce
|
||||
// requests to let kafka set the time when it saves the record.
|
||||
Time time.Time
|
||||
|
||||
// Returns a byte sequence containing the key of this record. The returned
|
||||
// sequence may be nil to indicate that the record has no key. If the record
|
||||
// is part of a RecordSet, the content of the key must remain valid at least
|
||||
// until the record set is closed (or until the key is closed).
|
||||
Key Bytes
|
||||
|
||||
// Returns a byte sequence containing the value of this record. The returned
|
||||
// sequence may be nil to indicate that the record has no value. If the
|
||||
// record is part of a RecordSet, the content of the value must remain valid
|
||||
// at least until the record set is closed (or until the value is closed).
|
||||
Value Bytes
|
||||
|
||||
// Returns the list of headers associated with this record. The returned
|
||||
// slice may be reused across calls, the program should use it as an
|
||||
// immutable value.
|
||||
Headers []Header
|
||||
}
|
||||
|
||||
// RecordSet represents a sequence of records in Produce requests and Fetch
|
||||
// responses. All v0, v1, and v2 formats are supported.
|
||||
type RecordSet struct {
|
||||
// The message version that this record set will be represented as, valid
|
||||
// values are 1, or 2.
|
||||
//
|
||||
// When reading, this is the value of the highest version used in the
|
||||
// batches that compose the record set.
|
||||
//
|
||||
// When writing, this value dictates the format that the records will be
|
||||
// encoded in.
|
||||
Version int8
|
||||
|
||||
// Attributes set on the record set.
|
||||
//
|
||||
// When reading, the attributes are the combination of all attributes in
|
||||
// the batches that compose the record set.
|
||||
//
|
||||
// When writing, the attributes apply to the whole sequence of records in
|
||||
// the set.
|
||||
Attributes Attributes
|
||||
|
||||
// A reader exposing the sequence of records.
|
||||
//
|
||||
// When reading a RecordSet from an io.Reader, the Records field will be a
|
||||
// *RecordStream. If the program needs to access the details of each batch
|
||||
// that compose the stream, it may use type assertions to access the
|
||||
// underlying types of each batch.
|
||||
Records RecordReader
|
||||
}
|
||||
|
||||
// bufferedReader is an interface implemented by types like bufio.Reader, which
|
||||
// we use to optimize prefix reads by accessing the internal buffer directly
|
||||
// through calls to Peek.
|
||||
type bufferedReader interface {
|
||||
Discard(int) (int, error)
|
||||
Peek(int) ([]byte, error)
|
||||
}
|
||||
|
||||
// bytesBuffer is an interface implemented by types like bytes.Buffer, which we
|
||||
// use to optimize prefix reads by accessing the internal buffer directly
|
||||
// through calls to Bytes.
|
||||
type bytesBuffer interface {
|
||||
Bytes() []byte
|
||||
}
|
||||
|
||||
// magicByteOffset is the position of the magic byte in all versions of record
|
||||
// sets in the kafka protocol.
|
||||
const magicByteOffset = 16
|
||||
|
||||
// ReadFrom reads the representation of a record set from r into rs, returning
|
||||
// the number of bytes consumed from r, and an non-nil error if the record set
|
||||
// could not be read.
|
||||
func (rs *RecordSet) ReadFrom(r io.Reader) (int64, error) {
|
||||
d, _ := r.(*decoder)
|
||||
if d == nil {
|
||||
d = &decoder{
|
||||
reader: r,
|
||||
remain: 4,
|
||||
}
|
||||
}
|
||||
|
||||
*rs = RecordSet{}
|
||||
limit := d.remain
|
||||
size := d.readInt32()
|
||||
|
||||
if d.err != nil {
|
||||
return int64(limit - d.remain), d.err
|
||||
}
|
||||
|
||||
if size <= 0 {
|
||||
return 4, nil
|
||||
}
|
||||
|
||||
stream := &RecordStream{
|
||||
Records: make([]RecordReader, 0, 4),
|
||||
}
|
||||
|
||||
var err error
|
||||
d.remain = int(size)
|
||||
|
||||
for d.remain > 0 && err == nil {
|
||||
var version byte
|
||||
|
||||
if d.remain < (magicByteOffset + 1) {
|
||||
if len(stream.Records) != 0 {
|
||||
break
|
||||
}
|
||||
return 4, fmt.Errorf("impossible record set shorter than %d bytes", magicByteOffset+1)
|
||||
}
|
||||
|
||||
switch r := d.reader.(type) {
|
||||
case bufferedReader:
|
||||
b, err := r.Peek(magicByteOffset + 1)
|
||||
if err != nil {
|
||||
n, _ := r.Discard(len(b))
|
||||
return 4 + int64(n), dontExpectEOF(err)
|
||||
}
|
||||
version = b[magicByteOffset]
|
||||
case bytesBuffer:
|
||||
version = r.Bytes()[magicByteOffset]
|
||||
default:
|
||||
b := make([]byte, magicByteOffset+1)
|
||||
if n, err := io.ReadFull(d.reader, b); err != nil {
|
||||
return 4 + int64(n), dontExpectEOF(err)
|
||||
}
|
||||
version = b[magicByteOffset]
|
||||
// Reconstruct the prefix that we had to read to determine the version
|
||||
// of the record set from the magic byte.
|
||||
//
|
||||
// Technically this may recurisvely stack readers when consuming all
|
||||
// items of the batch, which could hurt performance. In practice this
|
||||
// path should not be taken tho, since the decoder would read from a
|
||||
// *bufio.Reader which implements the bufferedReader interface.
|
||||
d.reader = io.MultiReader(bytes.NewReader(b), d.reader)
|
||||
}
|
||||
|
||||
var tmp RecordSet
|
||||
switch version {
|
||||
case 0, 1:
|
||||
err = tmp.readFromVersion1(d)
|
||||
case 2:
|
||||
err = tmp.readFromVersion2(d)
|
||||
default:
|
||||
err = fmt.Errorf("unsupported message version %d for message of size %d", version, size)
|
||||
}
|
||||
|
||||
if tmp.Version > rs.Version {
|
||||
rs.Version = tmp.Version
|
||||
}
|
||||
|
||||
rs.Attributes |= tmp.Attributes
|
||||
|
||||
if tmp.Records != nil {
|
||||
stream.Records = append(stream.Records, tmp.Records)
|
||||
}
|
||||
}
|
||||
|
||||
if len(stream.Records) != 0 {
|
||||
rs.Records = stream
|
||||
// Ignore errors if we've successfully read records, so the
|
||||
// program can keep making progress.
|
||||
err = nil
|
||||
}
|
||||
|
||||
d.discardAll()
|
||||
rn := 4 + (int(size) - d.remain)
|
||||
d.remain = limit - rn
|
||||
return int64(rn), err
|
||||
}
|
||||
|
||||
// WriteTo writes the representation of rs into w. The value of rs.Version
|
||||
// dictates which format that the record set will be represented as.
|
||||
//
|
||||
// The error will be ErrNoRecord if rs contained no records.
|
||||
//
|
||||
// Note: since this package is only compatible with kafka 0.10 and above, the
|
||||
// method never produces messages in version 0. If rs.Version is zero, the
|
||||
// method defaults to producing messages in version 1.
|
||||
func (rs *RecordSet) WriteTo(w io.Writer) (int64, error) {
|
||||
if rs.Records == nil {
|
||||
return 0, ErrNoRecord
|
||||
}
|
||||
|
||||
// This optimization avoids rendering the record set in an intermediary
|
||||
// buffer when the writer is already a pageBuffer, which is a common case
|
||||
// due to the way WriteRequest and WriteResponse are implemented.
|
||||
buffer, _ := w.(*pageBuffer)
|
||||
bufferOffset := int64(0)
|
||||
|
||||
if buffer != nil {
|
||||
bufferOffset = buffer.Size()
|
||||
} else {
|
||||
buffer = newPageBuffer()
|
||||
defer buffer.unref()
|
||||
}
|
||||
|
||||
size := packUint32(0)
|
||||
buffer.Write(size[:]) // size placeholder
|
||||
|
||||
var err error
|
||||
switch rs.Version {
|
||||
case 0, 1:
|
||||
err = rs.writeToVersion1(buffer, bufferOffset+4)
|
||||
case 2:
|
||||
err = rs.writeToVersion2(buffer, bufferOffset+4)
|
||||
default:
|
||||
err = fmt.Errorf("unsupported record set version %d", rs.Version)
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n := buffer.Size() - bufferOffset
|
||||
if n == 0 {
|
||||
size = packUint32(^uint32(0))
|
||||
} else {
|
||||
size = packUint32(uint32(n) - 4)
|
||||
}
|
||||
buffer.WriteAt(size[:], bufferOffset)
|
||||
|
||||
// This condition indicates that the output writer received by `WriteTo` was
|
||||
// not a *pageBuffer, in which case we need to flush the buffered records
|
||||
// data into it.
|
||||
if buffer != w {
|
||||
return buffer.WriteTo(w)
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func makeTime(t int64) time.Time {
|
||||
return time.Unix(t/1000, (t%1000)*int64(time.Millisecond))
|
||||
}
|
||||
|
||||
func timestamp(t time.Time) int64 {
|
||||
if t.IsZero() {
|
||||
return 0
|
||||
}
|
||||
return t.UnixNano() / int64(time.Millisecond)
|
||||
}
|
||||
|
||||
func packUint32(u uint32) (b [4]byte) {
|
||||
binary.BigEndian.PutUint32(b[:], u)
|
||||
return
|
||||
}
|
||||
|
||||
func packUint64(u uint64) (b [8]byte) {
|
||||
binary.BigEndian.PutUint64(b[:], u)
|
||||
return
|
||||
}
|
||||
358
vendor/github.com/segmentio/kafka-go/protocol/record_batch.go
generated
vendored
Normal file
358
vendor/github.com/segmentio/kafka-go/protocol/record_batch.go
generated
vendored
Normal file
@@ -0,0 +1,358 @@
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
// RecordReader is an interface representing a sequence of records. Record sets
|
||||
// are used in both produce and fetch requests to represent the sequence of
|
||||
// records that are sent to or receive from kafka brokers.
|
||||
//
|
||||
// RecordSet values are not safe to use concurrently from multiple goroutines.
|
||||
type RecordReader interface {
|
||||
// Returns the next record in the set, or io.EOF if the end of the sequence
|
||||
// has been reached.
|
||||
//
|
||||
// The returned Record is guaranteed to be valid until the next call to
|
||||
// ReadRecord. If the program needs to retain the Record value it must make
|
||||
// a copy.
|
||||
ReadRecord() (*Record, error)
|
||||
}
|
||||
|
||||
// NewRecordReader constructs a reader exposing the records passed as arguments.
|
||||
func NewRecordReader(records ...Record) RecordReader {
|
||||
switch len(records) {
|
||||
case 0:
|
||||
return emptyRecordReader{}
|
||||
default:
|
||||
r := &recordReader{records: make([]Record, len(records))}
|
||||
copy(r.records, records)
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
||||
// MultiRecordReader merges multiple record batches into one.
|
||||
func MultiRecordReader(batches ...RecordReader) RecordReader {
|
||||
switch len(batches) {
|
||||
case 0:
|
||||
return emptyRecordReader{}
|
||||
case 1:
|
||||
return batches[0]
|
||||
default:
|
||||
m := &multiRecordReader{batches: make([]RecordReader, len(batches))}
|
||||
copy(m.batches, batches)
|
||||
return m
|
||||
}
|
||||
}
|
||||
|
||||
func forEachRecord(r RecordReader, f func(int, *Record) error) error {
|
||||
for i := 0; ; i++ {
|
||||
rec, err := r.ReadRecord()
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if err := handleRecord(i, rec, f); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleRecord(i int, r *Record, f func(int, *Record) error) error {
|
||||
if r.Key != nil {
|
||||
defer r.Key.Close()
|
||||
}
|
||||
if r.Value != nil {
|
||||
defer r.Value.Close()
|
||||
}
|
||||
return f(i, r)
|
||||
}
|
||||
|
||||
type recordReader struct {
|
||||
records []Record
|
||||
index int
|
||||
}
|
||||
|
||||
func (r *recordReader) ReadRecord() (*Record, error) {
|
||||
if i := r.index; i >= 0 && i < len(r.records) {
|
||||
r.index++
|
||||
return &r.records[i], nil
|
||||
}
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
type multiRecordReader struct {
|
||||
batches []RecordReader
|
||||
index int
|
||||
}
|
||||
|
||||
func (m *multiRecordReader) ReadRecord() (*Record, error) {
|
||||
for {
|
||||
if m.index == len(m.batches) {
|
||||
return nil, io.EOF
|
||||
}
|
||||
r, err := m.batches[m.index].ReadRecord()
|
||||
if err == nil {
|
||||
return r, nil
|
||||
}
|
||||
if !errors.Is(err, io.EOF) {
|
||||
return nil, err
|
||||
}
|
||||
m.index++
|
||||
}
|
||||
}
|
||||
|
||||
// optimizedRecordReader is an implementation of a RecordReader which exposes a
|
||||
// sequence.
|
||||
type optimizedRecordReader struct {
|
||||
records []optimizedRecord
|
||||
index int
|
||||
buffer Record
|
||||
headers [][]Header
|
||||
}
|
||||
|
||||
func (r *optimizedRecordReader) ReadRecord() (*Record, error) {
|
||||
if i := r.index; i >= 0 && i < len(r.records) {
|
||||
rec := &r.records[i]
|
||||
r.index++
|
||||
r.buffer = Record{
|
||||
Offset: rec.offset,
|
||||
Time: rec.time(),
|
||||
Key: rec.key(),
|
||||
Value: rec.value(),
|
||||
}
|
||||
if i < len(r.headers) {
|
||||
r.buffer.Headers = r.headers[i]
|
||||
}
|
||||
return &r.buffer, nil
|
||||
}
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
type optimizedRecord struct {
|
||||
offset int64
|
||||
timestamp int64
|
||||
keyRef *pageRef
|
||||
valueRef *pageRef
|
||||
}
|
||||
|
||||
func (r *optimizedRecord) time() time.Time {
|
||||
return makeTime(r.timestamp)
|
||||
}
|
||||
|
||||
func (r *optimizedRecord) key() Bytes {
|
||||
return makeBytes(r.keyRef)
|
||||
}
|
||||
|
||||
func (r *optimizedRecord) value() Bytes {
|
||||
return makeBytes(r.valueRef)
|
||||
}
|
||||
|
||||
func makeBytes(ref *pageRef) Bytes {
|
||||
if ref == nil {
|
||||
return nil
|
||||
}
|
||||
return ref
|
||||
}
|
||||
|
||||
type emptyRecordReader struct{}
|
||||
|
||||
func (emptyRecordReader) ReadRecord() (*Record, error) { return nil, io.EOF }
|
||||
|
||||
// ControlRecord represents a record read from a control batch.
|
||||
type ControlRecord struct {
|
||||
Offset int64
|
||||
Time time.Time
|
||||
Version int16
|
||||
Type int16
|
||||
Data []byte
|
||||
Headers []Header
|
||||
}
|
||||
|
||||
func ReadControlRecord(r *Record) (*ControlRecord, error) {
|
||||
if r.Key != nil {
|
||||
defer r.Key.Close()
|
||||
}
|
||||
if r.Value != nil {
|
||||
defer r.Value.Close()
|
||||
}
|
||||
|
||||
k, err := ReadAll(r.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if k == nil {
|
||||
return nil, Error("invalid control record with nil key")
|
||||
}
|
||||
if len(k) != 4 {
|
||||
return nil, Errorf("invalid control record with key of size %d", len(k))
|
||||
}
|
||||
|
||||
v, err := ReadAll(r.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := &ControlRecord{
|
||||
Offset: r.Offset,
|
||||
Time: r.Time,
|
||||
Version: readInt16(k[:2]),
|
||||
Type: readInt16(k[2:]),
|
||||
Data: v,
|
||||
Headers: r.Headers,
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (cr *ControlRecord) Key() Bytes {
|
||||
k := make([]byte, 4)
|
||||
writeInt16(k[:2], cr.Version)
|
||||
writeInt16(k[2:], cr.Type)
|
||||
return NewBytes(k)
|
||||
}
|
||||
|
||||
func (cr *ControlRecord) Value() Bytes {
|
||||
return NewBytes(cr.Data)
|
||||
}
|
||||
|
||||
func (cr *ControlRecord) Record() Record {
|
||||
return Record{
|
||||
Offset: cr.Offset,
|
||||
Time: cr.Time,
|
||||
Key: cr.Key(),
|
||||
Value: cr.Value(),
|
||||
Headers: cr.Headers,
|
||||
}
|
||||
}
|
||||
|
||||
// ControlBatch is an implementation of the RecordReader interface representing
|
||||
// control batches returned by kafka brokers.
|
||||
type ControlBatch struct {
|
||||
Attributes Attributes
|
||||
PartitionLeaderEpoch int32
|
||||
BaseOffset int64
|
||||
ProducerID int64
|
||||
ProducerEpoch int16
|
||||
BaseSequence int32
|
||||
Records RecordReader
|
||||
}
|
||||
|
||||
// NewControlBatch constructs a control batch from the list of records passed as
|
||||
// arguments.
|
||||
func NewControlBatch(records ...ControlRecord) *ControlBatch {
|
||||
rawRecords := make([]Record, len(records))
|
||||
for i, cr := range records {
|
||||
rawRecords[i] = cr.Record()
|
||||
}
|
||||
return &ControlBatch{
|
||||
Records: NewRecordReader(rawRecords...),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ControlBatch) ReadRecord() (*Record, error) {
|
||||
return c.Records.ReadRecord()
|
||||
}
|
||||
|
||||
func (c *ControlBatch) ReadControlRecord() (*ControlRecord, error) {
|
||||
r, err := c.ReadRecord()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.Key != nil {
|
||||
defer r.Key.Close()
|
||||
}
|
||||
if r.Value != nil {
|
||||
defer r.Value.Close()
|
||||
}
|
||||
return ReadControlRecord(r)
|
||||
}
|
||||
|
||||
func (c *ControlBatch) Offset() int64 {
|
||||
return c.BaseOffset
|
||||
}
|
||||
|
||||
func (c *ControlBatch) Version() int {
|
||||
return 2
|
||||
}
|
||||
|
||||
// RecordBatch is an implementation of the RecordReader interface representing
|
||||
// regular record batches (v2).
|
||||
type RecordBatch struct {
|
||||
Attributes Attributes
|
||||
PartitionLeaderEpoch int32
|
||||
BaseOffset int64
|
||||
ProducerID int64
|
||||
ProducerEpoch int16
|
||||
BaseSequence int32
|
||||
Records RecordReader
|
||||
}
|
||||
|
||||
func (r *RecordBatch) ReadRecord() (*Record, error) {
|
||||
return r.Records.ReadRecord()
|
||||
}
|
||||
|
||||
func (r *RecordBatch) Offset() int64 {
|
||||
return r.BaseOffset
|
||||
}
|
||||
|
||||
func (r *RecordBatch) Version() int {
|
||||
return 2
|
||||
}
|
||||
|
||||
// MessageSet is an implementation of the RecordReader interface representing
|
||||
// regular message sets (v1).
|
||||
type MessageSet struct {
|
||||
Attributes Attributes
|
||||
BaseOffset int64
|
||||
Records RecordReader
|
||||
}
|
||||
|
||||
func (m *MessageSet) ReadRecord() (*Record, error) {
|
||||
return m.Records.ReadRecord()
|
||||
}
|
||||
|
||||
func (m *MessageSet) Offset() int64 {
|
||||
return m.BaseOffset
|
||||
}
|
||||
|
||||
func (m *MessageSet) Version() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
// RecordStream is an implementation of the RecordReader interface which
|
||||
// combines multiple underlying RecordReader and only expose records that
|
||||
// are not from control batches.
|
||||
type RecordStream struct {
|
||||
Records []RecordReader
|
||||
index int
|
||||
}
|
||||
|
||||
func (s *RecordStream) ReadRecord() (*Record, error) {
|
||||
for {
|
||||
if s.index < 0 || s.index >= len(s.Records) {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
if _, isControl := s.Records[s.index].(*ControlBatch); isControl {
|
||||
s.index++
|
||||
continue
|
||||
}
|
||||
|
||||
r, err := s.Records[s.index].ReadRecord()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
s.index++
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return r, err
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user