-
Notifications
You must be signed in to change notification settings - Fork 31
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
fix(deps): update module github.com/segmentio/kafka-go to v0.4.47 #2764
Open
renovate
wants to merge
1
commit into
master
Choose a base branch
from
renovate/github.com-segmentio-kafka-go-0.x
base: master
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Conversation
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
49c524c
to
7b56437
Compare
[puLL-Merge] - segmentio/[email protected] Diffdiff --git .circleci/config.yml .circleci/config.yml
index f6af326e4..25fad54e3 100644
--- .circleci/config.yml
+++ .circleci/config.yml
@@ -51,6 +51,9 @@ jobs:
- run:
name: Test kafka-go
command: go test -race -cover ./...
+ - run:
+ name: Test kafka-go unsafe
+ command: go test -tags=unsafe -race -cover ./...
- run:
name: Test kafka-go/sasl/aws_msk_iam
working_directory: ./sasl/aws_msk_iam
diff --git README.md README.md
index 20f20e682..e17878825 100644
--- README.md
+++ README.md
@@ -225,7 +225,6 @@ r := kafka.NewReader(kafka.ReaderConfig{
Brokers: []string{"localhost:9092","localhost:9093", "localhost:9094"},
Topic: "topic-A",
Partition: 0,
- MinBytes: 10e3, // 10KB
MaxBytes: 10e6, // 10MB
})
r.SetOffset(42)
@@ -256,7 +255,6 @@ r := kafka.NewReader(kafka.ReaderConfig{
Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"},
GroupID: "consumer-group-id",
Topic: "topic-A",
- MinBytes: 10e3, // 10KB
MaxBytes: 10e6, // 10MB
})
@@ -320,7 +318,6 @@ r := kafka.NewReader(kafka.ReaderConfig{
Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"},
GroupID: "consumer-group-id",
Topic: "topic-A",
- MinBytes: 10e3, // 10KB
MaxBytes: 10e6, // 10MB
CommitInterval: time.Second, // flushes commits to Kafka every second
})
@@ -404,7 +401,7 @@ for i := 0; i < retries; i++ {
// attempt to create topic prior to publishing the message
err = w.WriteMessages(ctx, messages...)
- if errors.Is(err, LeaderNotAvailable) || errors.Is(err, context.DeadlineExceeded) {
+ if errors.Is(err, kafka.LeaderNotAvailable) || errors.Is(err, context.DeadlineExceeded) {
time.Sleep(time.Millisecond * 250)
continue
}
@@ -412,6 +409,7 @@ for i := 0; i < retries; i++ {
if err != nil {
log.Fatalf("unexpected error %v", err)
}
+ break
}
if err := w.Close(); err != nil {
@@ -718,7 +716,6 @@ r := kafka.NewReader(kafka.ReaderConfig{
Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"},
Topic: "my-topic1",
Partition: 0,
- MinBytes: batchSize,
MaxBytes: batchSize,
})
diff --git a/alterclientquotas.go b/alterclientquotas.go
new file mode 100644
index 000000000..7a926e5c4
--- /dev/null
+++ alterclientquotas.go
@@ -0,0 +1,131 @@
+package kafka
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/segmentio/kafka-go/protocol/alterclientquotas"
+)
+
+// AlterClientQuotasRequest represents a request sent to a kafka broker to
+// alter client quotas.
+type AlterClientQuotasRequest struct {
+ // Address of the kafka broker to send the request to.
+ Addr net.Addr
+
+ // List of client quotas entries to alter.
+ Entries []AlterClientQuotaEntry
+
+ // Whether the alteration should be validated, but not performed.
+ ValidateOnly bool
+}
+
+type AlterClientQuotaEntry struct {
+ // The quota entities to alter.
+ Entities []AlterClientQuotaEntity
+
+ // An individual quota configuration entry to alter.
+ Ops []AlterClientQuotaOps
+}
+
+type AlterClientQuotaEntity struct {
+ // The quota entity type.
+ EntityType string
+
+ // The name of the quota entity, or null if the default.
+ EntityName string
+}
+
+type AlterClientQuotaOps struct {
+ // The quota configuration key.
+ Key string
+
+ // The quota configuration value to set, otherwise ignored if the value is to be removed.
+ Value float64
+
+ // Whether the quota configuration value should be removed, otherwise set.
+ Remove bool
+}
+
+type AlterClientQuotaResponseQuotas struct {
+ // Error is set to a non-nil value including the code and message if a top-level
+ // error was encountered when doing the update.
+ Error error
+
+ // The altered quota entities.
+ Entities []AlterClientQuotaEntity
+}
+
+// AlterClientQuotasResponse represents a response from a kafka broker to an alter client
+// quotas request.
+type AlterClientQuotasResponse struct {
+ // The amount of time that the broker throttled the request.
+ Throttle time.Duration
+
+ // List of altered client quotas responses.
+ Entries []AlterClientQuotaResponseQuotas
+}
+
+// AlterClientQuotas sends client quotas alteration request to a kafka broker and returns
+// the response.
+func (c *Client) AlterClientQuotas(ctx context.Context, req *AlterClientQuotasRequest) (*AlterClientQuotasResponse, error) {
+ entries := make([]alterclientquotas.Entry, len(req.Entries))
+
+ for entryIdx, entry := range req.Entries {
+ entities := make([]alterclientquotas.Entity, len(entry.Entities))
+ for entityIdx, entity := range entry.Entities {
+ entities[entityIdx] = alterclientquotas.Entity{
+ EntityType: entity.EntityType,
+ EntityName: entity.EntityName,
+ }
+ }
+
+ ops := make([]alterclientquotas.Ops, len(entry.Ops))
+ for opsIdx, op := range entry.Ops {
+ ops[opsIdx] = alterclientquotas.Ops{
+ Key: op.Key,
+ Value: op.Value,
+ Remove: op.Remove,
+ }
+ }
+
+ entries[entryIdx] = alterclientquotas.Entry{
+ Entities: entities,
+ Ops: ops,
+ }
+ }
+
+ m, err := c.roundTrip(ctx, req.Addr, &alterclientquotas.Request{
+ Entries: entries,
+ ValidateOnly: req.ValidateOnly,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("kafka.(*Client).AlterClientQuotas: %w", err)
+ }
+
+ res := m.(*alterclientquotas.Response)
+ responseEntries := make([]AlterClientQuotaResponseQuotas, len(res.Results))
+
+ for responseEntryIdx, responseEntry := range res.Results {
+ responseEntities := make([]AlterClientQuotaEntity, len(responseEntry.Entities))
+ for responseEntityIdx, responseEntity := range responseEntry.Entities {
+ responseEntities[responseEntityIdx] = AlterClientQuotaEntity{
+ EntityType: responseEntity.EntityType,
+ EntityName: responseEntity.EntityName,
+ }
+ }
+
+ responseEntries[responseEntryIdx] = AlterClientQuotaResponseQuotas{
+ Error: makeError(responseEntry.ErrorCode, responseEntry.ErrorMessage),
+ Entities: responseEntities,
+ }
+ }
+ ret := &AlterClientQuotasResponse{
+ Throttle: makeDuration(res.ThrottleTimeMs),
+ Entries: responseEntries,
+ }
+
+ return ret, nil
+}
diff --git a/alterclientquotas_test.go b/alterclientquotas_test.go
new file mode 100644
index 000000000..d61c745e3
--- /dev/null
+++ alterclientquotas_test.go
@@ -0,0 +1,104 @@
+package kafka
+
+import (
+ "context"
+ "testing"
+
+ ktesting "github.com/segmentio/kafka-go/testing"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestClientAlterClientQuotas(t *testing.T) {
+ // Added in Version 2.6.0 https://issues.apache.org/jira/browse/KAFKA-7740
+ if !ktesting.KafkaIsAtLeast("2.6.0") {
+ return
+ }
+
+ const (
+ entityType = "client-id"
+ entityName = "my-client-id"
+ key = "producer_byte_rate"
+ value = 500000.0
+ )
+
+ client, shutdown := newLocalClient()
+ defer shutdown()
+
+ alterResp, err := client.AlterClientQuotas(context.Background(), &AlterClientQuotasRequest{
+ Entries: []AlterClientQuotaEntry{
+ {
+ Entities: []AlterClientQuotaEntity{
+ {
+ EntityType: entityType,
+ EntityName: entityName,
+ },
+ },
+ Ops: []AlterClientQuotaOps{
+ {
+ Key: key,
+ Value: value,
+ Remove: false,
+ },
+ },
+ },
+ },
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedAlterResp := AlterClientQuotasResponse{
+ Throttle: 0,
+ Entries: []AlterClientQuotaResponseQuotas{
+ {
+ Error: makeError(0, ""),
+ Entities: []AlterClientQuotaEntity{
+ {
+ EntityName: entityName,
+ EntityType: entityType,
+ },
+ },
+ },
+ },
+ }
+
+ assert.Equal(t, expectedAlterResp, *alterResp)
+
+ describeResp, err := client.DescribeClientQuotas(context.Background(), &DescribeClientQuotasRequest{
+ Components: []DescribeClientQuotasRequestComponent{
+ {
+ EntityType: entityType,
+ MatchType: 0,
+ Match: entityName,
+ },
+ },
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedDescribeResp := DescribeClientQuotasResponse{
+ Throttle: 0,
+ Error: makeError(0, ""),
+ Entries: []DescribeClientQuotasResponseQuotas{
+ {
+ Entities: []DescribeClientQuotasEntity{
+ {
+ EntityType: entityType,
+ EntityName: entityName,
+ },
+ },
+ Values: []DescribeClientQuotasValue{
+ {
+ Key: key,
+ Value: value,
+ },
+ },
+ },
+ },
+ }
+
+ assert.Equal(t, expectedDescribeResp, *describeResp)
+}
diff --git alterpartitionreassignments.go alterpartitionreassignments.go
index ec76dbd8b..dd67d003b 100644
--- alterpartitionreassignments.go
+++ alterpartitionreassignments.go
@@ -13,7 +13,8 @@ type AlterPartitionReassignmentsRequest struct {
// Address of the kafka broker to send the request to.
Addr net.Addr
- // Topic is the name of the topic to alter partitions in.
+ // Topic is the name of the topic to alter partitions in. Keep this field empty and use Topic in AlterPartitionReassignmentsRequestAssignment to
+ // reassign to multiple topics.
Topic string
// Assignments is the list of partition reassignments to submit to the API.
@@ -26,10 +27,13 @@ type AlterPartitionReassignmentsRequest struct {
// AlterPartitionReassignmentsRequestAssignment contains the requested reassignments for a single
// partition.
type AlterPartitionReassignmentsRequestAssignment struct {
+ // Topic is the name of the topic to alter partitions in. If empty, the value of Topic in AlterPartitionReassignmentsRequest is used.
+ Topic string
+
// PartitionID is the ID of the partition to make the reassignments in.
PartitionID int
- // BrokerIDs is a slice of brokers to set the partition replicas to.
+ // BrokerIDs is a slice of brokers to set the partition replicas to, or null to cancel a pending reassignment for this partition.
BrokerIDs []int
}
@@ -46,6 +50,9 @@ type AlterPartitionReassignmentsResponse struct {
// AlterPartitionReassignmentsResponsePartitionResult contains the detailed result of
// doing reassignments for a single partition.
type AlterPartitionReassignmentsResponsePartitionResult struct {
+ // Topic is the topic name.
+ Topic string
+
// PartitionID is the ID of the partition that was altered.
PartitionID int
@@ -58,16 +65,29 @@ func (c *Client) AlterPartitionReassignments(
ctx context.Context,
req *AlterPartitionReassignmentsRequest,
) (*AlterPartitionReassignmentsResponse, error) {
- apiPartitions := []alterpartitionreassignments.RequestPartition{}
+ apiTopicMap := make(map[string]*alterpartitionreassignments.RequestTopic)
for _, assignment := range req.Assignments {
+ topic := assignment.Topic
+ if topic == "" {
+ topic = req.Topic
+ }
+
+ apiTopic := apiTopicMap[topic]
+ if apiTopic == nil {
+ apiTopic = &alterpartitionreassignments.RequestTopic{
+ Name: topic,
+ }
+ apiTopicMap[topic] = apiTopic
+ }
+
replicas := []int32{}
for _, brokerID := range assignment.BrokerIDs {
replicas = append(replicas, int32(brokerID))
}
- apiPartitions = append(
- apiPartitions,
+ apiTopic.Partitions = append(
+ apiTopic.Partitions,
alterpartitionreassignments.RequestPartition{
PartitionIndex: int32(assignment.PartitionID),
Replicas: replicas,
@@ -77,12 +97,10 @@ func (c *Client) AlterPartitionReassignments(
apiReq := &alterpartitionreassignments.Request{
TimeoutMs: int32(req.Timeout.Milliseconds()),
- Topics: []alterpartitionreassignments.RequestTopic{
- {
- Name: req.Topic,
- Partitions: apiPartitions,
- },
- },
+ }
+
+ for _, apiTopic := range apiTopicMap {
+ apiReq.Topics = append(apiReq.Topics, *apiTopic)
}
protoResp, err := c.roundTrip(
@@ -104,6 +122,7 @@ func (c *Client) AlterPartitionReassignments(
resp.PartitionResults = append(
resp.PartitionResults,
AlterPartitionReassignmentsResponsePartitionResult{
+ Topic: topicResult.Name,
PartitionID: int(partitionResult.PartitionIndex),
Error: makeError(partitionResult.ErrorCode, partitionResult.ErrorMessage),
},
diff --git alterpartitionreassignments_test.go alterpartitionreassignments_test.go
index 84db7bdd2..7bbce8fff 100644
--- alterpartitionreassignments_test.go
+++ alterpartitionreassignments_test.go
@@ -56,3 +56,64 @@ func TestClientAlterPartitionReassignments(t *testing.T) {
)
}
}
+
+func TestClientAlterPartitionReassignmentsMultiTopics(t *testing.T) {
+ if !ktesting.KafkaIsAtLeast("2.4.0") {
+ return
+ }
+
+ ctx := context.Background()
+ client, shutdown := newLocalClient()
+ defer shutdown()
+
+ topic1 := makeTopic()
+ topic2 := makeTopic()
+ createTopic(t, topic1, 2)
+ createTopic(t, topic2, 2)
+ defer func() {
+ deleteTopic(t, topic1)
+ deleteTopic(t, topic2)
+ }()
+
+ // Local kafka only has 1 broker, so any partition reassignments are really no-ops.
+ resp, err := client.AlterPartitionReassignments(
+ ctx,
+ &AlterPartitionReassignmentsRequest{
+ Assignments: []AlterPartitionReassignmentsRequestAssignment{
+ {
+ Topic: topic1,
+ PartitionID: 0,
+ BrokerIDs: []int{1},
+ },
+ {
+ Topic: topic1,
+ PartitionID: 1,
+ BrokerIDs: []int{1},
+ },
+ {
+ Topic: topic2,
+ PartitionID: 0,
+ BrokerIDs: []int{1},
+ },
+ },
+ },
+ )
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.Error != nil {
+ t.Error(
+ "Unexpected error in response",
+ "expected", nil,
+ "got", resp.Error,
+ )
+ }
+ if len(resp.PartitionResults) != 3 {
+ t.Error(
+ "Unexpected length of partition results",
+ "expected", 3,
+ "got", len(resp.PartitionResults),
+ )
+ }
+}
diff --git a/alteruserscramcredentials.go b/alteruserscramcredentials.go
new file mode 100644
index 000000000..6163e564e
--- /dev/null
+++ alteruserscramcredentials.go
@@ -0,0 +1,107 @@
+package kafka
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/segmentio/kafka-go/protocol/alteruserscramcredentials"
+)
+
+// AlterUserScramCredentialsRequest represents a request sent to a kafka broker to
+// alter user scram credentials.
+type AlterUserScramCredentialsRequest struct {
+ // Address of the kafka broker to send the request to.
+ Addr net.Addr
+
+ // List of credentials to delete.
+ Deletions []UserScramCredentialsDeletion
+
+ // List of credentials to upsert.
+ Upsertions []UserScramCredentialsUpsertion
+}
+
+type ScramMechanism int8
+
+const (
+ ScramMechanismUnknown ScramMechanism = iota // 0
+ ScramMechanismSha256 // 1
+ ScramMechanismSha512 // 2
+)
+
+type UserScramCredentialsDeletion struct {
+ Name string
+ Mechanism ScramMechanism
+}
+
+type UserScramCredentialsUpsertion struct {
+ Name string
+ Mechanism ScramMechanism
+ Iterations int
+ Salt []byte
+ SaltedPassword []byte
+}
+
+// AlterUserScramCredentialsResponse represents a response from a kafka broker to an alter user
+// credentials request.
+type AlterUserScramCredentialsResponse struct {
+ // The amount of time that the broker throttled the request.
+ Throttle time.Duration
+
+ // List of altered user scram credentials.
+ Results []AlterUserScramCredentialsResponseUser
+}
+
+type AlterUserScramCredentialsResponseUser struct {
+ User string
+ Error error
+}
+
+// AlterUserScramCredentials sends user scram credentials alteration request to a kafka broker and returns
+// the response.
+func (c *Client) AlterUserScramCredentials(ctx context.Context, req *AlterUserScramCredentialsRequest) (*AlterUserScramCredentialsResponse, error) {
+ deletions := make([]alteruserscramcredentials.RequestUserScramCredentialsDeletion, len(req.Deletions))
+ upsertions := make([]alteruserscramcredentials.RequestUserScramCredentialsUpsertion, len(req.Upsertions))
+
+ for deletionIdx, deletion := range req.Deletions {
+ deletions[deletionIdx] = alteruserscramcredentials.RequestUserScramCredentialsDeletion{
+ Name: deletion.Name,
+ Mechanism: int8(deletion.Mechanism),
+ }
+ }
+
+ for upsertionIdx, upsertion := range req.Upsertions {
+ upsertions[upsertionIdx] = alteruserscramcredentials.RequestUserScramCredentialsUpsertion{
+ Name: upsertion.Name,
+ Mechanism: int8(upsertion.Mechanism),
+ Iterations: int32(upsertion.Iterations),
+ Salt: upsertion.Salt,
+ SaltedPassword: upsertion.SaltedPassword,
+ }
+ }
+
+ m, err := c.roundTrip(ctx, req.Addr, &alteruserscramcredentials.Request{
+ Deletions: deletions,
+ Upsertions: upsertions,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("kafka.(*Client).AlterUserScramCredentials: %w", err)
+ }
+
+ res := m.(*alteruserscramcredentials.Response)
+ responseEntries := make([]AlterUserScramCredentialsResponseUser, len(res.Results))
+
+ for responseIdx, responseResult := range res.Results {
+ responseEntries[responseIdx] = AlterUserScramCredentialsResponseUser{
+ User: responseResult.User,
+ Error: makeError(responseResult.ErrorCode, responseResult.ErrorMessage),
+ }
+ }
+ ret := &AlterUserScramCredentialsResponse{
+ Throttle: makeDuration(res.ThrottleTimeMs),
+ Results: responseEntries,
+ }
+
+ return ret, nil
+}
diff --git a/alteruserscramcredentials_test.go b/alteruserscramcredentials_test.go
new file mode 100644
index 000000000..f980dfd11
--- /dev/null
+++ alteruserscramcredentials_test.go
@@ -0,0 +1,73 @@
+package kafka
+
+import (
+ "context"
+ "testing"
+
+ ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestAlterUserScramCredentials(t *testing.T) {
+ // https://issues.apache.org/jira/browse/KAFKA-10259
+ if !ktesting.KafkaIsAtLeast("2.7.0") {
+ return
+ }
+
+ client, shutdown := newLocalClient()
+ defer shutdown()
+
+ name := makeTopic()
+
+ createRes, err := client.AlterUserScramCredentials(context.Background(), &AlterUserScramCredentialsRequest{
+ Upsertions: []UserScramCredentialsUpsertion{
+ {
+ Name: name,
+ Mechanism: ScramMechanismSha512,
+ Iterations: 15000,
+ Salt: []byte("my-salt"),
+ SaltedPassword: []byte("my-salted-password"),
+ },
+ },
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(createRes.Results) != 1 {
+ t.Fatalf("expected 1 createResult; got %d", len(createRes.Results))
+ }
+
+ if createRes.Results[0].User != name {
+ t.Fatalf("expected createResult with user: %s, got %s", name, createRes.Results[0].User)
+ }
+
+ if createRes.Results[0].Error != nil {
+ t.Fatalf("didn't expect an error in createResult, got %v", createRes.Results[0].Error)
+ }
+
+ deleteRes, err := client.AlterUserScramCredentials(context.Background(), &AlterUserScramCredentialsRequest{
+ Deletions: []UserScramCredentialsDeletion{
+ {
+ Name: name,
+ Mechanism: ScramMechanismSha512,
+ },
+ },
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(deleteRes.Results) != 1 {
+ t.Fatalf("expected 1 deleteResult; got %d", len(deleteRes.Results))
+ }
+
+ if deleteRes.Results[0].User != name {
+ t.Fatalf("expected deleteResult with user: %s, got %s", name, deleteRes.Results[0].User)
+ }
+
+ if deleteRes.Results[0].Error != nil {
+ t.Fatalf("didn't expect an error in deleteResult, got %v", deleteRes.Results[0].Error)
+ }
+}
diff --git balancer.go balancer.go
index cd2e8c1c4..ee3a25885 100644
--- balancer.go
+++ balancer.go
@@ -7,7 +7,6 @@ import (
"math/rand"
"sort"
"sync"
- "sync/atomic"
)
// The Balancer interface provides an abstraction of the message distribution
@@ -36,11 +35,16 @@ func (f BalancerFunc) Balance(msg Message, partitions ...int) int {
}
// RoundRobin is an Balancer implementation that equally distributes messages
-// across all available partitions.
+// across all available partitions. It can take an optional chunk size to send
+// ChunkSize messages to the same partition before moving to the next partition.
+// This can be used to improve batch sizes.
type RoundRobin struct {
+ ChunkSize int
// Use a 32 bits integer so RoundRobin values don't need to be aligned to
- // apply atomic increments.
- offset uint32
+ // apply increments.
+ counter uint32
+
+ mutex sync.Mutex
}
// Balance satisfies the Balancer interface.
@@ -49,8 +53,17 @@ func (rr *RoundRobin) Balance(msg Message, partitions ...int) int {
}
func (rr *RoundRobin) balance(partitions []int) int {
- length := uint32(len(partitions))
- offset := atomic.AddUint32(&rr.offset, 1) - 1
+ rr.mutex.Lock()
+ defer rr.mutex.Unlock()
+
+ if rr.ChunkSize < 1 {
+ rr.ChunkSize = 1
+ }
+
+ length := len(partitions)
+ counterNow := rr.counter
+ offset := int(counterNow / uint32(rr.ChunkSize))
+ rr.counter++
return partitions[offset%length]
}
@@ -122,7 +135,7 @@ var (
//
// The logic to calculate the partition is:
//
-// hasher.Sum32() % len(partitions) => partition
+// hasher.Sum32() % len(partitions) => partition
//
// By default, Hash uses the FNV-1a algorithm. This is the same algorithm used
// by the Sarama Producer and ensures that messages produced by kafka-go will
@@ -173,7 +186,7 @@ func (h *Hash) Balance(msg Message, partitions ...int) int {
//
// The logic to calculate the partition is:
//
-// (int32(hasher.Sum32()) & 0x7fffffff) % len(partitions) => partition
+// (int32(hasher.Sum32()) & 0x7fffffff) % len(partitions) => partition
//
// By default, ReferenceHash uses the FNV-1a algorithm. This is the same algorithm as
// the Sarama NewReferenceHashPartitioner and ensures that messages produced by kafka-go will
@@ -260,7 +273,7 @@ func (b CRC32Balancer) Balance(msg Message, partitions ...int) (partition int) {
// determine which partition to route messages to. This ensures that messages
// with the same key are routed to the same partition. This balancer is
// compatible with the partitioner used by the Java library and by librdkafka's
-// "murmur2" and "murmur2_random" partitioners. /
+// "murmur2" and "murmur2_random" partitioners.
//
// With the Consistent field false (default), this partitioner is equivalent to
// the "murmur2_random" setting in librdkafka. When Consistent is true, this
diff --git balancer_test.go balancer_test.go
index a078f192f..149bc6800 100644
--- balancer_test.go
+++ balancer_test.go
@@ -411,3 +411,68 @@ func TestLeastBytes(t *testing.T) {
})
}
}
+
+func TestRoundRobin(t *testing.T) {
+ testCases := map[string]struct {
+ Partitions []int
+ ChunkSize int
+ }{
+ "default - odd partition count": {
+ Partitions: []int{0, 1, 2, 3, 4, 5, 6},
+ },
+ "negative chunk size - odd partition count": {
+ Partitions: []int{0, 1, 2, 3, 4, 5, 6},
+ ChunkSize: -1,
+ },
+ "0 chunk size - odd partition count": {
+ Partitions: []int{0, 1, 2, 3, 4, 5, 6},
+ ChunkSize: 0,
+ },
+ "5 chunk size - odd partition count": {
+ Partitions: []int{0, 1, 2, 3, 4, 5, 6},
+ ChunkSize: 5,
+ },
+ "12 chunk size - odd partition count": {
+ Partitions: []int{0, 1, 2, 3, 4, 5, 6},
+ ChunkSize: 12,
+ },
+ "default - even partition count": {
+ Partitions: []int{0, 1, 2, 3, 4, 5, 6, 7},
+ },
+ "negative chunk size - even partition count": {
+ Partitions: []int{0, 1, 2, 3, 4, 5, 6, 7},
+ ChunkSize: -1,
+ },
+ "0 chunk size - even partition count": {
+ Partitions: []int{0, 1, 2, 3, 4, 5, 6, 7},
+ ChunkSize: 0,
+ },
+ "5 chunk size - even partition count": {
+ Partitions: []int{0, 1, 2, 3, 4, 5, 6, 7},
+ ChunkSize: 5,
+ },
+ "12 chunk size - even partition count": {
+ Partitions: []int{0, 1, 2, 3, 4, 5, 6, 7},
+ ChunkSize: 12,
+ },
+ }
+ for label, test := range testCases {
+ t.Run(label, func(t *testing.T) {
+ lb := &RoundRobin{ChunkSize: test.ChunkSize}
+ msg := Message{}
+ var partition int
+ var i int
+ expectedChunkSize := test.ChunkSize
+ if expectedChunkSize < 1 {
+ expectedChunkSize = 1
+ }
+ partitions := test.Partitions
+ for i = 0; i < 50; i++ {
+ partition = lb.Balance(msg, partitions...)
+ if partition != i/expectedChunkSize%len(partitions) {
+ t.Error("Returned partition", partition, "expecting", i/expectedChunkSize%len(partitions))
+ }
+ }
+ })
+ }
+}
diff --git batch.go batch.go
index f9f3e5227..19dcef8cd 100644
--- batch.go
+++ batch.go
@@ -79,10 +79,16 @@ func (batch *Batch) close() (err error) {
batch.conn = nil
batch.lock = nil
+
if batch.msgs != nil {
batch.msgs.discard()
}
+ if batch.msgs != nil && batch.msgs.decompressed != nil {
+ releaseBuffer(batch.msgs.decompressed)
+ batch.msgs.decompressed = nil
+ }
+
if err = batch.err; errors.Is(batch.err, io.EOF) {
err = nil
}
diff --git compress/compress.go compress/compress.go
index 6e92968f2..054bf03d0 100644
--- compress/compress.go
+++ compress/compress.go
@@ -13,7 +13,7 @@ import (
"github.com/segmentio/kafka-go/compress/zstd"
)
-// Compression represents the the compression applied to a record set.
+// Compression represents the compression applied to a record set.
type Compression int8
const (
diff --git compress/compress_test.go compress/compress_test.go
index e4d2426d2..1da841227 100644
--- compress/compress_test.go
+++ compress/compress_test.go
@@ -2,7 +2,6 @@ package compress_test
import (
"bytes"
- stdgzip "compress/gzip"
"context"
"fmt"
"io"
@@ -16,6 +15,7 @@ import (
"text/tabwriter"
"time"
+ gz "github.com/klauspost/compress/gzip"
"github.com/segmentio/kafka-go"
pkg "github.com/segmentio/kafka-go/compress"
"github.com/segmentio/kafka-go/compress/gzip"
@@ -345,7 +345,7 @@ func BenchmarkCompression(b *testing.B) {
}
defer f.Close()
- z, err := stdgzip.NewReader(f)
+ z, err := gz.NewReader(f)
if err != nil {
b.Fatal(err)
}
@@ -366,8 +366,6 @@ func BenchmarkCompression(b *testing.B) {
fmt.Println(ts)
}()
- b.ResetTimer()
-
for i := range benchmarks {
benchmark := &benchmarks[i]
ratio := 0.0
@@ -389,6 +387,7 @@ func benchmarkCompression(b *testing.B, codec pkg.Codec, buf *bytes.Buffer, payl
b.Run("compress", func(b *testing.B) {
compressed = true
r := bytes.NewReader(payload)
+ b.ReportAllocs()
for i := 0; i < b.N; i++ {
buf.Reset()
@@ -422,7 +421,7 @@ func benchmarkCompression(b *testing.B, codec pkg.Codec, buf *bytes.Buffer, payl
b.Run("decompress", func(b *testing.B) {
c := bytes.NewReader(buf.Bytes())
-
+ b.ReportAllocs()
for i := 0; i < b.N; i++ {
c.Reset(buf.Bytes())
r := codec.NewReader(c)
diff --git compress/gzip/gzip.go compress/gzip/gzip.go
index 64da3129d..ad5009c39 100644
--- compress/gzip/gzip.go
+++ compress/gzip/gzip.go
@@ -1,9 +1,10 @@
package gzip
import (
- "compress/gzip"
"io"
"sync"
+
+ "github.com/klauspost/compress/gzip"
)
var (
diff --git compress/snappy/snappy.go compress/snappy/snappy.go
index a726ecebc..5bc6194f1 100644
--- compress/snappy/snappy.go
+++ compress/snappy/snappy.go
@@ -4,6 +4,7 @@ import (
"io"
"sync"
+ "github.com/klauspost/compress/s2"
"github.com/klauspost/compress/snappy"
)
@@ -16,6 +17,16 @@ const (
Unframed
)
+// Compression level.
+type Compression int
+
+const (
+ DefaultCompression Compression = iota
+ FasterCompression
+ BetterCompression
+ BestCompression
+)
+
var (
readerPool sync.Pool
writerPool sync.Pool
@@ -28,6 +39,9 @@ type Codec struct {
//
// Default to Framed.
Framing Framing
+
+ // Compression level.
+ Compression Compression
}
// Code implements the compress.Codec interface.
@@ -56,12 +70,19 @@ func (c *Codec) NewWriter(w io.Writer) io.WriteCloser {
if x != nil {
x.Reset(w)
} else {
- x = &xerialWriter{
- writer: w,
- encode: snappy.Encode,
- }
+ x = &xerialWriter{writer: w}
}
x.framed = c.Framing == Framed
+ switch c.Compression {
+ case FasterCompression:
+ x.encode = s2.EncodeSnappy
+ case BetterCompression:
+ x.encode = s2.EncodeSnappyBetter
+ case BestCompression:
+ x.encode = s2.EncodeSnappyBest
+ default:
+ x.encode = snappy.Encode // aka. s2.EncodeSnappyBetter
+ }
return &writer{xerialWriter: x}
}
diff --git conn.go conn.go
index e32dc2163..2b51afbd5 100644
--- conn.go
+++ conn.go
@@ -9,7 +9,6 @@ import (
"net"
"os"
"path/filepath"
- "runtime"
"sync"
"sync/atomic"
"time"
@@ -971,48 +970,101 @@ func (c *Conn) ReadPartitions(topics ...string) (partitions []Partition, err err
topics = nil
}
}
+ metadataVersion, err := c.negotiateVersion(metadata, v1, v6)
+ if err != nil {
+ return nil, err
+ }
err = c.readOperation(
func(deadline time.Time, id int32) error {
- return c.writeRequest(metadata, v1, id, topicMetadataRequestV1(topics))
+ switch metadataVersion {
+ case v6:
+ return c.writeRequest(metadata, v6, id, topicMetadataRequestV6{Topics: topics, AllowAutoTopicCreation: true})
+ default:
+ return c.writeRequest(metadata, v1, id, topicMetadataRequestV1(topics))
+ }
},
func(deadline time.Time, size int) error {
- var res metadataResponseV1
+ partitions, err = c.readPartitionsResponse(metadataVersion, size)
+ return err
+ },
+ )
+ return
+}
- if err := c.readResponse(size, &res); err != nil {
- return err
- }
+func (c *Conn) readPartitionsResponse(metadataVersion apiVersion, size int) ([]Partition, error) {
+ switch metadataVersion {
+ case v6:
+ var res metadataResponseV6
+ if err := c.readResponse(size, &res); err != nil {
+ return nil, err
+ }
+ brokers := readBrokerMetadata(res.Brokers)
+ return c.readTopicMetadatav6(brokers, res.Topics)
+ default:
+ var res metadataResponseV1
+ if err := c.readResponse(size, &res); err != nil {
+ return nil, err
+ }
+ brokers := readBrokerMetadata(res.Brokers)
+ return c.readTopicMetadatav1(brokers, res.Topics)
+ }
+}
- brokers := make(map[int32]Broker, len(res.Brokers))
- for _, b := range res.Brokers {
- brokers[b.NodeID] = Broker{
- Host: b.Host,
- Port: int(b.Port),
- ID: int(b.NodeID),
- Rack: b.Rack,
- }
- }
+func readBrokerMetadata(brokerMetadata []brokerMetadataV1) map[int32]Broker {
+ brokers := make(map[int32]Broker, len(brokerMetadata))
+ for _, b := range brokerMetadata {
+ brokers[b.NodeID] = Broker{
+ Host: b.Host,
+ Port: int(b.Port),
+ ID: int(b.NodeID),
+ Rack: b.Rack,
+ }
+ }
+ return brokers
+}
- for _, t := range res.Topics {
- if t.TopicErrorCode != 0 && (c.topic == "" || t.TopicName == c.topic) {
- // We only report errors if they happened for the topic of
- // the connection, otherwise the topic will simply have no
- // partitions in the result set.
- return Error(t.TopicErrorCode)
- }
- for _, p := range t.Partitions {
- partitions = append(partitions, Partition{
- Topic: t.TopicName,
- Leader: brokers[p.Leader],
- Replicas: makeBrokers(brokers, p.Replicas...),
- Isr: makeBrokers(brokers, p.Isr...),
- ID: int(p.PartitionID),
- })
- }
- }
- return nil
- },
- )
+func (c *Conn) readTopicMetadatav1(brokers map[int32]Broker, topicMetadata []topicMetadataV1) (partitions []Partition, err error) {
+ for _, t := range topicMetadata {
+ if t.TopicErrorCode != 0 && (c.topic == "" || t.TopicName == c.topic) {
+ // We only report errors if they happened for the topic of
+ // the connection, otherwise the topic will simply have no
+ // partitions in the result set.
+ return nil, Error(t.TopicErrorCode)
+ }
+ for _, p := range t.Partitions {
+ partitions = append(partitions, Partition{
+ Topic: t.TopicName,
+ Leader: brokers[p.Leader],
+ Replicas: makeBrokers(brokers, p.Replicas...),
+ Isr: makeBrokers(brokers, p.Isr...),
+ ID: int(p.PartitionID),
+ OfflineReplicas: []Broker{},
+ })
+ }
+ }
+ return
+}
+
+func (c *Conn) readTopicMetadatav6(brokers map[int32]Broker, topicMetadata []topicMetadataV6) (partitions []Partition, err error) {
+ for _, t := range topicMetadata {
+ if t.TopicErrorCode != 0 && (c.topic == "" || t.TopicName == c.topic) {
+ // We only report errors if they happened for the topic of
+ // the connection, otherwise the topic will simply have no
+ // partitions in the result set.
+ return nil, Error(t.TopicErrorCode)
+ }
+ for _, p := range t.Partitions {
+ partitions = append(partitions, Partition{
+ Topic: t.TopicName,
+ Leader: brokers[p.Leader],
+ Replicas: makeBrokers(brokers, p.Replicas...),
+ Isr: makeBrokers(brokers, p.Isr...),
+ ID: int(p.PartitionID),
+ OfflineReplicas: makeBrokers(brokers, p.OfflineReplicas...),
+ })
+ }
+ }
return
}
@@ -1366,7 +1418,6 @@ func (c *Conn) waitResponse(d *connDeadline, id int32) (deadline time.Time, size
// Optimistically release the read lock if a response has already
// been received but the current operation is not the target for it.
c.rlock.Unlock()
- runtime.Gosched()
}
c.leave()
diff --git createacl_test.go createacl_test.go
deleted file mode 100644
index 4f4b15380..000000000
--- createacl_test.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package kafka
-
-import (
- "context"
- "testing"
-
- ktesting "github.com/segmentio/kafka-go/testing"
-)
-
-func TestClientCreateACLs(t *testing.T) {
- if !ktesting.KafkaIsAtLeast("2.0.1") {
- return
- }
-
- client, shutdown := newLocalClient()
- defer shutdown()
-
- res, err := client.CreateACLs(context.Background(), &CreateACLsRequest{
- ACLs: []ACLEntry{
- {
- Principal: "User:alice",
- PermissionType: ACLPermissionTypeAllow,
- Operation: ACLOperationTypeRead,
- ResourceType: ResourceTypeTopic,
- ResourcePatternType: PatternTypeLiteral,
- ResourceName: "fake-topic-for-alice",
- Host: "*",
- },
- {
- Principal: "User:bob",
- PermissionType: ACLPermissionTypeAllow,
- Operation: ACLOperationTypeRead,
- ResourceType: ResourceTypeGroup,
- ResourcePatternType: PatternTypeLiteral,
- ResourceName: "fake-group-for-bob",
- Host: "*",
- },
- },
- })
- if err != nil {
- t.Fatal(err)
- }
-
- for _, err := range res.Errors {
- if err != nil {
- t.Error(err)
- }
- }
-}
diff --git createacls.go createacls.go
index 672f6fdce..601974171 100644
--- createacls.go
+++ createacls.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"net"
+ "strings"
"time"
"github.com/segmentio/kafka-go/protocol/createacls"
@@ -42,6 +43,43 @@ const (
ACLPermissionTypeAllow ACLPermissionType = 3
)
+func (apt ACLPermissionType) String() string {
+ mapping := map[ACLPermissionType]string{
+ ACLPermissionTypeUnknown: "Unknown",
+ ACLPermissionTypeAny: "Any",
+ ACLPermissionTypeDeny: "Deny",
+ ACLPermissionTypeAllow: "Allow",
+ }
+ s, ok := mapping[apt]
+ if !ok {
+ s = mapping[ACLPermissionTypeUnknown]
+ }
+ return s
+}
+
+// MarshalText transforms an ACLPermissionType into its string representation.
+func (apt ACLPermissionType) MarshalText() ([]byte, error) {
+ return []byte(apt.String()), nil
+}
+
+// UnmarshalText takes a string representation of the resource type and converts it to an ACLPermissionType.
+func (apt *ACLPermissionType) UnmarshalText(text []byte) error {
+ normalized := strings.ToLower(string(text))
+ mapping := map[string]ACLPermissionType{
+ "unknown": ACLPermissionTypeUnknown,
+ "any": ACLPermissionTypeAny,
+ "deny": ACLPermissionTypeDeny,
+ "allow": ACLPermissionTypeAllow,
+ }
+ parsed, ok := mapping[normalized]
+ if !ok {
+ *apt = ACLPermissionTypeUnknown
+ return fmt.Errorf("cannot parse %s as an ACLPermissionType", normalized)
+ }
+ *apt = parsed
+ return nil
+}
+
type ACLOperationType int8
const (
@@ -60,6 +98,62 @@ const (
ACLOperationTypeIdempotentWrite ACLOperationType = 12
)
+func (aot ACLOperationType) String() string {
+ mapping := map[ACLOperationType]string{
+ ACLOperationTypeUnknown: "Unknown",
+ ACLOperationTypeAny: "Any",
+ ACLOperationTypeAll: "All",
+ ACLOperationTypeRead: "Read",
+ ACLOperationTypeWrite: "Write",
+ ACLOperationTypeCreate: "Create",
+ ACLOperationTypeDelete: "Delete",
+ ACLOperationTypeAlter: "Alter",
+ ACLOperationTypeDescribe: "Describe",
+ ACLOperationTypeClusterAction: "ClusterAction",
+ ACLOperationTypeDescribeConfigs: "DescribeConfigs",
+ ACLOperationTypeAlterConfigs: "AlterConfigs",
+ ACLOperationTypeIdempotentWrite: "IdempotentWrite",
+ }
+ s, ok := mapping[aot]
+ if !ok {
+ s = mapping[ACLOperationTypeUnknown]
+ }
+ return s
+}
+
+// MarshalText transforms an ACLOperationType into its string representation.
+func (aot ACLOperationType) MarshalText() ([]byte, error) {
+ return []byte(aot.String()), nil
+}
+
+// UnmarshalText takes a string representation of the resource type and converts it to an ACLPermissionType.
+func (aot *ACLOperationType) UnmarshalText(text []byte) error {
+ normalized := strings.ToLower(string(text))
+ mapping := map[string]ACLOperationType{
+ "unknown": ACLOperationTypeUnknown,
+ "any": ACLOperationTypeAny,
+ "all": ACLOperationTypeAll,
+ "read": ACLOperationTypeRead,
+ "write": ACLOperationTypeWrite,
+ "create": ACLOperationTypeCreate,
+ "delete": ACLOperationTypeDelete,
+ "alter": ACLOperationTypeAlter,
+ "describe": ACLOperationTypeDescribe,
+ "clusteraction": ACLOperationTypeClusterAction,
+ "describeconfigs": ACLOperationTypeDescribeConfigs,
+ "alterconfigs": ACLOperationTypeAlterConfigs,
+ "idempotentwrite": ACLOperationTypeIdempotentWrite,
+ }
+ parsed, ok := mapping[normalized]
+ if !ok {
+ *aot = ACLOperationTypeUnknown
+ return fmt.Errorf("cannot parse %s as an ACLOperationType", normalized)
+ }
+ *aot = parsed
+ return nil
+
+}
+
type ACLEntry struct {
ResourceType ResourceType
ResourceName string
diff --git a/createacls_test.go b/createacls_test.go
new file mode 100644
index 000000000..314427a57
--- /dev/null
+++ createacls_test.go
@@ -0,0 +1,86 @@
+package kafka
+
+import (
+ "context"
+ "testing"
+
+ ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestClientCreateACLs(t *testing.T) {
+ if !ktesting.KafkaIsAtLeast("2.0.1") {
+ return
+ }
+
+ client, shutdown := newLocalClient()
+ defer shutdown()
+
+ topic := makeTopic()
+ group := makeGroupID()
+
+ createRes, err := client.CreateACLs(context.Background(), &CreateACLsRequest{
+ ACLs: []ACLEntry{
+ {
+ Principal: "User:alice",
+ PermissionType: ACLPermissionTypeAllow,
+ Operation: ACLOperationTypeRead,
+ ResourceType: ResourceTypeTopic,
+ ResourcePatternType: PatternTypeLiteral,
+ ResourceName: topic,
+ Host: "*",
+ },
+ {
+ Principal: "User:bob",
+ PermissionType: ACLPermissionTypeAllow,
+ Operation: ACLOperationTypeRead,
+ ResourceType: ResourceTypeGroup,
+ ResourcePatternType: PatternTypeLiteral,
+ ResourceName: group,
+ Host: "*",
+ },
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, err := range createRes.Errors {
+ if err != nil {
+ t.Error(err)
+ }
+ }
+}
+
+func TestACLPermissionTypeMarshal(t *testing.T) {
+ for i := ACLPermissionTypeUnknown; i <= ACLPermissionTypeAllow; i++ {
+ text, err := i.MarshalText()
+ if err != nil {
+ t.Errorf("couldn't marshal %d to text: %s", i, err)
+ }
+ var got ACLPermissionType
+ err = got.UnmarshalText(text)
+ if err != nil {
+ t.Errorf("couldn't unmarshal %s to ACLPermissionType: %s", text, err)
+ }
+ if got != i {
+ t.Errorf("got %d, want %d", got, i)
+ }
+ }
+}
+
+func TestACLOperationTypeMarshal(t *testing.T) {
+ for i := ACLOperationTypeUnknown; i <= ACLOperationTypeIdempotentWrite; i++ {
+ text, err := i.MarshalText()
+ if err != nil {
+ t.Errorf("couldn't marshal %d to text: %s", i, err)
+ }
+ var got ACLOperationType
+ err = got.UnmarshalText(text)
+ if err != nil {
+ t.Errorf("couldn't unmarshal %s to ACLOperationType: %s", text, err)
+ }
+ if got != i {
+ t.Errorf("got %d, want %d", got, i)
+ }
+ }
+}
diff --git createtopics.go createtopics.go
index 6767e07c8..8ad9ebf44 100644
--- createtopics.go
+++ createtopics.go
@@ -3,7 +3,6 @@ package kafka
import (
"bufio"
"context"
- "errors"
"fmt"
"net"
"time"
@@ -23,7 +22,7 @@ type CreateTopicsRequest struct {
// When set to true, topics are not created but the configuration is
// validated as if they were.
//
- // This field will be ignored if the kafka broker did no support the
+ // This field will be ignored if the kafka broker did not support the
// CreateTopics API in version 1 or above.
ValidateOnly bool
}
@@ -33,7 +32,7 @@ type CreateTopicsRequest struct {
type CreateTopicsResponse struct {
// The amount of time that the broker throttled the request.
//
- // This field will be zero if the kafka broker did no support the
+ // This field will be zero if the kafka broker did not support the
// CreateTopics API in version 2 or above.
Throttle time.Duration
@@ -65,7 +64,6 @@ func (c *Client) CreateTopics(ctx context.Context, req *CreateTopicsRequest) (*C
TimeoutMs: c.timeoutMs(ctx, defaultCreateTopicsTimeout),
ValidateOnly: req.ValidateOnly,
})
-
if err != nil {
return nil, fmt.Errorf("kafka.(*Client).CreateTopics: %w", err)
}
@@ -363,6 +361,9 @@ func (c *Conn) createTopics(request createTopicsRequestV0) (createTopicsResponse
return response, err
}
for _, tr := range response.TopicErrors {
+ if tr.ErrorCode == int16(TopicAlreadyExists) {
+ continue
+ }
if tr.ErrorCode != 0 {
return response, Error(tr.ErrorCode)
}
@@ -385,14 +386,5 @@ func (c *Conn) CreateTopics(topics ...TopicConfig) error {
_, err := c.createTopics(createTopicsRequestV0{
Topics: requestV0Topics,
})
- if err != nil {
- if errors.Is(err, TopicAlreadyExists) {
- // ok
- return nil
- }
-
- return err
- }
-
- return nil
+ return err
}
diff --git createtopics_test.go createtopics_test.go
index 71cf456e0..38819c382 100644
--- createtopics_test.go
+++ createtopics_test.go
@@ -4,10 +4,86 @@ import (
"bufio"
"bytes"
"context"
+ "errors"
+ "net"
"reflect"
+ "strconv"
"testing"
)
+func TestConnCreateTopics(t *testing.T) {
+ topic1 := makeTopic()
+ topic2 := makeTopic()
+
+ conn, err := DialContext(context.Background(), "tcp", "localhost:9092")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ defer func() {
+ err := conn.Close()
+ if err != nil {
+ t.Fatalf("failed to close connection: %v", err)
+ }
+ }()
+
+ controller, _ := conn.Controller()
+
+ controllerConn, err := Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer controllerConn.Close()
+
+ err = controllerConn.CreateTopics(TopicConfig{
+ Topic: topic1,
+ NumPartitions: 1,
+ ReplicationFactor: 1,
+ })
+ if err != nil {
+ t.Fatalf("unexpected error creating topic: %s", err.Error())
+ }
+
+ err = controllerConn.CreateTopics(TopicConfig{
+ Topic: topic1,
+ NumPartitions: 1,
+ ReplicationFactor: 1,
+ })
+
+ // Duplicate topic should not return an error
+ if err != nil {
+ t.Fatalf("unexpected error creating duplicate topic topic: %v", err)
+ }
+
+ err = controllerConn.CreateTopics(
+ TopicConfig{
+ Topic: topic1,
+ NumPartitions: 1,
+ ReplicationFactor: 1,
+ },
+ TopicConfig{
+ Topic: topic2,
+ NumPartitions: 1,
+ ReplicationFactor: 1,
+ },
+ TopicConfig{
+ Topic: topic2,
+ NumPartitions: 1,
+ ReplicationFactor: 1,
+ },
+ )
+
+ if err == nil {
+ t.Fatal("CreateTopics should have returned an error for invalid requests")
+ }
+
+ if !errors.Is(err, InvalidRequest) {
+ t.Fatalf("expected invalid request: %v", err)
+ }
+
+ deleteTopic(t, topic1)
+}
+
func TestClientCreateTopics(t *testing.T) {
const (
topic1 = "client-topic-1"
@@ -59,7 +135,6 @@ func TestClientCreateTopics(t *testing.T) {
},
},
})
-
if err != nil {
t.Fatal(err)
}
diff --git a/deleteacls.go b/deleteacls.go
new file mode 100644
index 000000000..64cbd26d1
--- /dev/null
+++ deleteacls.go
@@ -0,0 +1,114 @@
+package kafka
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/segmentio/kafka-go/protocol/deleteacls"
+)
+
+// DeleteACLsRequest represents a request sent to a kafka broker to delete
+// ACLs.
+type DeleteACLsRequest struct {
+ // Address of the kafka broker to send the request to.
+ Addr net.Addr
+
+ // List of ACL filters to use for deletion.
+ Filters []DeleteACLsFilter
+}
+
+type DeleteACLsFilter struct {
+ ResourceTypeFilter ResourceType
+ ResourceNameFilter string
+ ResourcePatternTypeFilter PatternType
+ PrincipalFilter string
+ HostFilter string
+ Operation ACLOperationType
+ PermissionType ACLPermissionType
+}
+
+// DeleteACLsResponse represents a response from a kafka broker to an ACL
+// deletion request.
+type DeleteACLsResponse struct {
+ // The amount of time that the broker throttled the request.
+ Throttle time.Duration
+
+ // List of the results from the deletion request.
+ Results []DeleteACLsResult
+}
+
+type DeleteACLsResult struct {
+ Error error
+ MatchingACLs []DeleteACLsMatchingACLs
+}
+
+type DeleteACLsMatchingACLs struct {
+ Error error
+ ResourceType ResourceType
+ ResourceName string
+ ResourcePatternType PatternType
+ Principal string
+ Host string
+ Operation ACLOperationType
+ PermissionType ACLPermissionType
+}
+
+// DeleteACLs sends ACLs deletion request to a kafka broker and returns the
+// response.
+func (c *Client) DeleteACLs(ctx context.Context, req *DeleteACLsRequest) (*DeleteACLsResponse, error) {
+ filters := make([]deleteacls.RequestFilter, 0, len(req.Filters))
+
+ for _, filter := range req.Filters {
+ filters = append(filters, deleteacls.RequestFilter{
+ ResourceTypeFilter: int8(filter.ResourceTypeFilter),
+ ResourceNameFilter: filter.ResourceNameFilter,
+ ResourcePatternTypeFilter: int8(filter.ResourcePatternTypeFilter),
+ PrincipalFilter: filter.PrincipalFilter,
+ HostFilter: filter.HostFilter,
+ Operation: int8(filter.Operation),
+ PermissionType: int8(filter.PermissionType),
+ })
+ }
+
+ m, err := c.roundTrip(ctx, req.Addr, &deleteacls.Request{
+ Filters: filters,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("kafka.(*Client).DeleteACLs: %w", err)
+ }
+
+ res := m.(*deleteacls.Response)
+
+ results := make([]DeleteACLsResult, 0, len(res.FilterResults))
+
+ for _, result := range res.FilterResults {
+ matchingACLs := make([]DeleteACLsMatchingACLs, 0, len(result.MatchingACLs))
+
+ for _, matchingACL := range result.MatchingACLs {
+ matchingACLs = append(matchingACLs, DeleteACLsMatchingACLs{
+ Error: makeError(matchingACL.ErrorCode, matchingACL.ErrorMessage),
+ ResourceType: ResourceType(matchingACL.ResourceType),
+ ResourceName: matchingACL.ResourceName,
+ ResourcePatternType: PatternType(matchingACL.ResourcePatternType),
+ Principal: matchingACL.Principal,
+ Host: matchingACL.Host,
+ Operation: ACLOperationType(matchingACL.Operation),
+ PermissionType: ACLPermissionType(matchingACL.PermissionType),
+ })
+ }
+
+ results = append(results, DeleteACLsResult{
+ Error: makeError(result.ErrorCode, result.ErrorMessage),
+ MatchingACLs: matchingACLs,
+ })
+ }
+
+ ret := &DeleteACLsResponse{
+ Throttle: makeDuration(res.ThrottleTimeMs),
+ Results: results,
+ }
+
+ return ret, nil
+}
diff --git a/deleteacls_test.go b/deleteacls_test.go
new file mode 100644
index 000000000..299cede22
--- /dev/null
+++ deleteacls_test.go
@@ -0,0 +1,112 @@
+package kafka
+
+import (
+ "context"
+ "testing"
+
+ ktesting "github.com/segmentio/kafka-go/testing"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestClientDeleteACLs(t *testing.T) {
+ if !ktesting.KafkaIsAtLeast("2.0.1") {
+ return
+ }
+
+ client, shutdown := newLocalClient()
+ defer shutdown()
+
+ topic := makeTopic()
+ group := makeGroupID()
+
+ createRes, err := client.CreateACLs(context.Background(), &CreateACLsRequest{
+ ACLs: []ACLEntry{
+ {
+ Principal: "User:alice",
+ PermissionType: ACLPermissionTypeAllow,
+ Operation: ACLOperationTypeRead,
+ ResourceType: ResourceTypeTopic,
+ ResourcePatternType: PatternTypeLiteral,
+ ResourceName: topic,
+ Host: "*",
+ },
+ {
+ Principal: "User:bob",
+ PermissionType: ACLPermissionTypeAllow,
+ Operation: ACLOperationTypeRead,
+ ResourceType: ResourceTypeGroup,
+ ResourcePatternType: PatternTypeLiteral,
+ ResourceName: group,
+ Host: "*",
+ },
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, err := range createRes.Errors {
+ if err != nil {
+ t.Error(err)
+ }
+ }
+
+ deleteResp, err := client.DeleteACLs(context.Background(), &DeleteACLsRequest{
+ Filters: []DeleteACLsFilter{
+ {
+ ResourceTypeFilter: ResourceTypeTopic,
+ ResourceNameFilter: topic,
+ ResourcePatternTypeFilter: PatternTypeLiteral,
+ Operation: ACLOperationTypeRead,
+ PermissionType: ACLPermissionTypeAllow,
+ },
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedDeleteResp := DeleteACLsResponse{
+ Throttle: 0,
+ Results: []DeleteACLsResult{
+ {
+ Error: makeError(0, ""),
+ MatchingACLs: []DeleteACLsMatchingACLs{
+ {
+ Error: makeError(0, ""),
+ ResourceType: ResourceTypeTopic,
+ ResourceName: topic,
+ ResourcePatternType: PatternTypeLiteral,
+ Principal: "User:alice",
+ Host: "*",
+ Operation: ACLOperationTypeRead,
+ PermissionType: ACLPermissionTypeAllow,
+ },
+ },
+ },
+ },
+ }
+
+ assert.Equal(t, expectedDeleteResp, *deleteResp)
+
+ describeResp, err := client.DescribeACLs(context.Background(), &DescribeACLsRequest{
+ Filter: ACLFilter{
+ ResourceTypeFilter: ResourceTypeTopic,
+ ResourceNameFilter: topic,
+ ResourcePatternTypeFilter: PatternTypeLiteral,
+ Operation: ACLOperationTypeRead,
+ PermissionType: ACLPermissionTypeAllow,
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedDescribeResp := DescribeACLsResponse{
+ Throttle: 0,
+ Error: makeError(0, ""),
+ Resources: []ACLResource{},
+ }
+
+ assert.Equal(t, expectedDescribeResp, *describeResp)
+}
diff --git a/deletegroups.go b/deletegroups.go
new file mode 100644
index 000000000..6317ae7fa
--- /dev/null
+++ deletegroups.go
@@ -0,0 +1,60 @@
+package kafka
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/segmentio/kafka-go/protocol/deletegroups"
+)
+
+// DeleteGroupsRequest represents a request sent to a kafka broker to delete
+// consumer groups.
+type DeleteGroupsRequest struct {
+ // Address of the kafka broker to send the request to.
+ Addr net.Addr
+
+ // Identifiers of groups to delete.
+ GroupIDs []string
+}
+
+// DeleteGroupsResponse represents a response from a kafka broker to a consumer group
+// deletion request.
+type DeleteGroupsResponse struct {
+ // The amount of time that the broker throttled the request.
+ Throttle time.Duration
+
+ // Mapping of group ids to errors that occurred while attempting to delete those groups.
+ //
+ // The errors contain the kafka error code. Programs may use the standard
+ // errors.Is function to test the error against kafka error codes.
+ Errors map[string]error
+}
+
+// DeleteGroups sends a delete groups request and returns the response. The request is sent to the group coordinator of the first group
+// of the request. All deleted groups must be managed by the same group coordinator.
+func (c *Client) DeleteGroups(
+ ctx context.Context,
+ req *DeleteGroupsRequest,
+) (*DeleteGroupsResponse, error) {
+ m, err := c.roundTrip(ctx, req.Addr, &deletegroups.Request{
+ GroupIDs: req.GroupIDs,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("kafka.(*Client).DeleteGroups: %w", err)
+ }
+
+ r := m.(*deletegroups.Response)
+
+ ret := &DeleteGroupsResponse{
+ Throttle: makeDuration(r.ThrottleTimeMs),
+ Errors: make(map[string]error, len(r.Responses)),
+ }
+
+ for _, t := range r.Responses {
+ ret.Errors[t.GroupID] = makeError(t.ErrorCode, "")
+ }
+
+ return ret, nil
+}
diff --git a/deletegroups_test.go b/deletegroups_test.go
new file mode 100644
index 000000000..e857ac435
--- /dev/null
+++ deletegroups_test.go
@@ -0,0 +1,80 @@
+package kafka
+
+import (
+ "context"
+ "errors"
+ "testing"
+ "time"
+
+ ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestClientDeleteGroups(t *testing.T) {
+ if !ktesting.KafkaIsAtLeast("1.1.0") {
+ t.Skip("Skipping test because kafka version is not high enough.")
+ }
+
+ client, shutdown := newLocalClient()
+ defer shutdown()
+
+ topic := makeTopic()
+ createTopic(t, topic, 1)
+
+ groupID := makeGroupID()
+
+ group, err := NewConsumerGroup(ConsumerGroupConfig{
+ ID: groupID,
+ Topics: []string{topic},
+ Brokers: []string{"localhost:9092"},
+ HeartbeatInterval: 2 * time.Second,
+ RebalanceTimeout: 2 * time.Second,
+ RetentionTime: time.Hour,
+ Logger: &testKafkaLogger{T: t},
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer group.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+
+ gen, err := group.Next(ctx)
+ if gen == nil {
+ t.Fatalf("expected generation 1 not to be nil")
+ }
+ if err != nil {
+ t.Fatalf("expected no error, but got %+v", err)
+ }
+
+ // delete not empty group
+ res, err := client.DeleteGroups(ctx, &DeleteGroupsRequest{
+ GroupIDs: []string{groupID},
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !errors.Is(res.Errors[groupID], NonEmptyGroup) {
+ t.Fatalf("expected NonEmptyGroup error, but got %+v", res.Errors[groupID])
+ }
+
+ err = group.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // delete empty group
+ res, err = client.DeleteGroups(ctx, &DeleteGroupsRequest{
+ GroupIDs: []string{groupID},
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err = res.Errors[groupID]; err != nil {
+ t.Error(err)
+ }
+}
diff --git deletetopics.go deletetopics.go
index 470f9ef83..d758d9fd6 100644
--- deletetopics.go
+++ deletetopics.go
@@ -25,7 +25,7 @@ type DeleteTopicsRequest struct {
type DeleteTopicsResponse struct {
// The amount of time that the broker throttled the request.
//
- // This field will be zero if the kafka broker did no support the
+ // This field will be zero if the kafka broker did not support the
// DeleteTopics API in version 1 or above.
Throttle time.Duration
diff --git a/describeacls.go b/describeacls.go
new file mode 100644
index 000000000..d1093bbed
--- /dev/null
+++ describeacls.go
@@ -0,0 +1,107 @@
+package kafka
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/segmentio/kafka-go/protocol/describeacls"
+)
+
+// DescribeACLsRequest represents a request sent to a kafka broker to describe
+// existing ACLs.
+type DescribeACLsRequest struct {
+ // Address of the kafka broker to send the request to.
+ Addr net.Addr
+
+ // Filter to filter ACLs on.
+ Filter ACLFilter
+}
+
+type ACLFilter struct {
+ ResourceTypeFilter ResourceType
+ ResourceNameFilter string
+ // ResourcePatternTypeFilter was added in v1 and is not available prior to that.
+ ResourcePatternTypeFilter PatternType
+ PrincipalFilter string
+ HostFilter string
+ Operation ACLOperationType
+ PermissionType ACLPermissionType
+}
+
+// DescribeACLsResponse represents a response from a kafka broker to an ACL
+// describe request.
+type DescribeACLsResponse struct {
+ // The amount of time that the broker throttled the request.
+ Throttle time.Duration
+
+ // Error that occurred while attempting to describe
+ // the ACLs.
+ Error error
+
+ // ACL resources returned from the describe request.
+ Resources []ACLResource
+}
+
+type ACLResource struct {
+ ResourceType ResourceType
+ ResourceName string
+ PatternType PatternType
+ ACLs []ACLDescription
+}
+
+type ACLDescription struct {
+ Principal string
+ Host string
+ Operation ACLOperationType
+ PermissionType ACLPermissionType
+}
+
+func (c *Client) DescribeACLs(ctx context.Context, req *DescribeACLsRequest) (*DescribeACLsResponse, error) {
+ m, err := c.roundTrip(ctx, req.Addr, &describeacls.Request{
+ Filter: describeacls.ACLFilter{
+ ResourceTypeFilter: int8(req.Filter.ResourceTypeFilter),
+ ResourceNameFilter: req.Filter.ResourceNameFilter,
+ ResourcePatternTypeFilter: int8(req.Filter.ResourcePatternTypeFilter),
+ PrincipalFilter: req.Filter.PrincipalFilter,
+ HostFilter: req.Filter.HostFilter,
+ Operation: int8(req.Filter.Operation),
+ PermissionType: int8(req.Filter.PermissionType),
+ },
+ })
+ if err != nil {
+ return nil, fmt.Errorf("kafka.(*Client).DescribeACLs: %w", err)
+ }
+
+ res := m.(*describeacls.Response)
+ resources := make([]ACLResource, len(res.Resources))
+
+ for resourceIdx, respResource := range res.Resources {
+ descriptions := make([]ACLDescription, len(respResource.ACLs))
+
+ for descriptionIdx, respDescription := range respResource.ACLs {
+ descriptions[descriptionIdx] = ACLDescription{
+ Principal: respDescription.Principal,
+ Host: respDescription.Host,
+ Operation: ACLOperationType(respDescription.Operation),
+ PermissionType: ACLPermissionType(respDescription.PermissionType),
+ }
+ }
+
+ resources[resourceIdx] = ACLResource{
+ ResourceType: ResourceType(respResource.ResourceType),
+ ResourceName: respResource.ResourceName,
+ PatternType: PatternType(respResource.PatternType),
+ ACLs: descriptions,
+ }
+ }
+
+ ret := &DescribeACLsResponse{
+ Throttle: makeDuration(res.ThrottleTimeMs),
+ Error: makeError(res.ErrorCode, res.ErrorMessage),
+ Resources: resources,
+ }
+
+ return ret, nil
+}
diff --git a/describeacls_test.go b/describeacls_test.go
new file mode 100644
index 000000000..25585b25c
--- /dev/null
+++ describeacls_test.go
@@ -0,0 +1,88 @@
+package kafka
+
+import (
+ "context"
+ "testing"
+
+ ktesting "github.com/segmentio/kafka-go/testing"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestClientDescribeACLs(t *testing.T) {
+ if !ktesting.KafkaIsAtLeast("2.0.1") {
+ return
+ }
+
+ client, shutdown := newLocalClient()
+ defer shutdown()
+
+ topic := makeTopic()
+ group := makeGroupID()
+
+ createRes, err := client.CreateACLs(context.Background(), &CreateACLsRequest{
+ ACLs: []ACLEntry{
+ {
+ Principal: "User:alice",
+ PermissionType: ACLPermissionTypeAllow,
+ Operation: ACLOperationTypeRead,
+ ResourceType: ResourceTypeTopic,
+ ResourcePatternType: PatternTypeLiteral,
+ ResourceName: topic,
+ Host: "*",
+ },
+ {
+ Principal: "User:bob",
+ PermissionType: ACLPermissionTypeAllow,
+ Operation: ACLOperationTypeRead,
+ ResourceType: ResourceTypeGroup,
+ ResourcePatternType: PatternTypeLiteral,
+ ResourceName: group,
+ Host: "*",
+ },
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, err := range createRes.Errors {
+ if err != nil {
+ t.Error(err)
+ }
+ }
+
+ describeResp, err := client.DescribeACLs(context.Background(), &DescribeACLsRequest{
+ Filter: ACLFilter{
+ ResourceTypeFilter: ResourceTypeTopic,
+ ResourceNameFilter: topic,
+ ResourcePatternTypeFilter: PatternTypeLiteral,
+ Operation: ACLOperationTypeRead,
+ PermissionType: ACLPermissionTypeAllow,
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedDescribeResp := DescribeACLsResponse{
+ Throttle: 0,
+ Error: makeError(0, ""),
+ Resources: []ACLResource{
+ {
+ ResourceType: ResourceTypeTopic,
+ ResourceName: topic,
+ PatternType: PatternTypeLiteral,
+ ACLs: []ACLDescription{
+ {
+ Principal: "User:alice",
+ Host: "*",
+ Operation: ACLOperationTypeRead,
+ PermissionType: ACLPermissionTypeAllow,
+ },
+ },
+ },
+ },
+ }
+
+ assert.Equal(t, expectedDescribeResp, *describeResp)
+}
diff --git a/describeclientquotas.go b/describeclientquotas.go
new file mode 100644
index 000000000..6291dcd98
--- /dev/null
+++ describeclientquotas.go
@@ -0,0 +1,126 @@
+package kafka
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/segmentio/kafka-go/protocol/describeclientquotas"
+)
+
+// DescribeClientQuotasRequest represents a request sent to a kafka broker to
+// describe client quotas.
+type DescribeClientQuotasRequest struct {
+ // Address of the kafka broker to send the request to
+ Addr net.Addr
+
+ // List of quota components to describe.
+ Components []DescribeClientQuotasRequestComponent
+
+ // Whether the match is strict, i.e. should exclude entities with
+ // unspecified entity types.
+ Strict bool
+}
+
+type DescribeClientQuotasRequestComponent struct {
+ // The entity type that the filter component applies to.
+ EntityType string
+
+ // How to match the entity (0 = exact name, 1 = default name,
+ // 2 = any specified name).
+ MatchType int8
+
+ // The string to match against, or null if unused for the match type.
+ Match string
+}
+
+// DescribeClientQuotasReesponse represents a response from a kafka broker to a describe client quota request.
+type DescribeClientQuotasResponse struct {
+ // The amount of time that the broker throttled the request.
+ Throttle time.Duration
+
+ // Error is set to a non-nil value including the code and message if a top-level
+ // error was encountered when doing the update.
+ Error error
+
+ // List of describe client quota responses.
+ Entries []DescribeClientQuotasResponseQuotas
+}
+
+type DescribeClientQuotasEntity struct {
+ // The quota entity type.
+ EntityType string
+
+ // The name of the quota entity, or null if the default.
+ EntityName string
+}
+
+type DescribeClientQuotasValue struct {
+ // The quota configuration key.
+ Key string
+
+ // The quota configuration value.
+ Value float64
+}
+
+type DescribeClientQuotasResponseQuotas struct {
+ // List of client quota entities and their descriptions.
+ Entities []DescribeClientQuotasEntity
+
+ // The client quota configuration values.
+ Values []DescribeClientQuotasValue
+}
+
+// DescribeClientQuotas sends a describe client quotas request to a kafka broker and returns
+// the response.
+func (c *Client) DescribeClientQuotas(ctx context.Context, req *DescribeClientQuotasRequest) (*DescribeClientQuotasResponse, error) {
+ components := make([]describeclientquotas.Component, len(req.Components))
+
+ for componentIdx, component := range req.Components {
+ components[componentIdx] = describeclientquotas.Component{
+ EntityType: component.EntityType,
+ MatchType: component.MatchType,
+ Match: component.Match,
+ }
+ }
+
+ m, err := c.roundTrip(ctx, req.Addr, &describeclientquotas.Request{
+ Components: components,
+ Strict: req.Strict,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("kafka.(*Client).DescribeClientQuotas: %w", err)
+ }
+
+ res := m.(*describeclientquotas.Response)
+ responseEntries := make([]DescribeClientQuotasResponseQuotas, len(res.Entries))
+
+ for responseEntryIdx, responseEntry := range res.Entries {
+ responseEntities := make([]DescribeClientQuotasEntity, len(responseEntry.Entities))
+ for responseEntityIdx, responseEntity := range responseEntry.Entities {
+ responseEntities[responseEntityIdx] = DescribeClientQuotasEntity{
+ EntityType: responseEntity.EntityType,
+ EntityName: responseEntity.EntityName,
+ }
+ }
+
+ responseValues := make([]DescribeClientQuotasValue, len(responseEntry.Values))
+ for responseValueIdx, responseValue := range responseEntry.Values {
+ responseValues[responseValueIdx] = DescribeClientQuotasValue{
+ Key: responseValue.Key,
+ Value: responseValue.Value,
+ }
+ }
+ responseEntries[responseEntryIdx] = DescribeClientQuotasResponseQuotas{
+ Entities: responseEntities,
+ Values: responseValues,
+ }
+ }
+ ret := &DescribeClientQuotasResponse{
+ Throttle: time.Duration(res.ThrottleTimeMs),
+ Entries: responseEntries,
+ }
+
+ return ret, nil
+}
diff --git describeconfigs.go describeconfigs.go
index 4f5c09514..17f4f305f 100644
--- describeconfigs.go
+++ describeconfigs.go
@@ -14,7 +14,7 @@ type DescribeConfigsRequest struct {
// Address of the kafka broker to send the request to.
Addr net.Addr
- // List of resources to update.
+ // List of resources to get details for.
Resources []DescribeConfigRequestResource
// Ignored if API version is less than v1
diff --git a/describeuserscramcredentials.go b/describeuserscramcredentials.go
new file mode 100644
index 000000000..7194ea1e0
--- /dev/null
+++ describeuserscramcredentials.go
@@ -0,0 +1,97 @@
+package kafka
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/segmentio/kafka-go/protocol/describeuserscramcredentials"
+)
+
+// DescribeUserScramCredentialsRequest represents a request sent to a kafka broker to
+// describe user scram credentials.
+type Des,cribeUserScramCredentialsRequest struct {
+ // Address of the kafka broker to send the request to.
+ Addr net.Addr
+
+ // List of Scram users to describe
+ Users []UserScramCredentialsUser
+}
+
+type UserScramCredentialsUser struct {
+ Name string
+}
+
+// DescribeUserScramCredentialsResponse represents a response from a kafka broker to a describe user
+// credentials request.
+type DescribeUserScramCredentialsResponse struct {
+ // The amount of time that the broker throttled the request.
+ Throttle time.Duration
+
+ // Top level error that occurred while attempting to describe
+ // the user scram credentials.
+ //
+ // The errors contain the kafka error code. Programs may use the standard
+ // errors.Is function to test the error against kafka error codes.
+ Error error
+
+ // List of described user scram credentials.
+ Results []DescribeUserScramCredentialsResponseResult
+}
+
+type DescribeUserScramCredentialsResponseResult struct {
+ User string
+ CredentialInfos []DescribeUserScramCredentialsCredentialInfo
+ Error error
+}
+
+type DescribeUserScramCredentialsCredentialInfo struct {
+ Mechanism ScramMechanism
+ Iterations int
+}
+
+// DescribeUserScramCredentials sends a user scram credentials describe request to a kafka broker and returns
+// the response.
+func (c *Client) DescribeUserScramCredentials(ctx context.Context, req *DescribeUserScramCredentialsRequest) (*DescribeUserScramCredentialsResponse, error) {
+ users := make([]describeuserscramcredentials.RequestUser, len(req.Users))
+
+ for userIdx, user := range req.Users {
+ users[userIdx] = describeuserscramcredentials.RequestUser{
+ Name: user.Name,
+ }
+ }
+
+ m, err := c.roundTrip(ctx, req.Addr, &describeuserscramcredentials.Request{
+ Users: users,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("kafka.(*Client).DescribeUserScramCredentials: %w", err)
+ }
+
+ res := m.(*describeuserscramcredentials.Response)
+ responseResults := make([]DescribeUserScramCredentialsResponseResult, len(res.Results))
+
+ for responseIdx, responseResult := range res.Results {
+ credentialInfos := make([]DescribeUserScramCredentialsCredentialInfo, len(responseResult.CredentialInfos))
+
+ for credentialInfoIdx, credentialInfo := range responseResult.CredentialInfos {
+ credentialInfos[credentialInfoIdx] = DescribeUserScramCredentialsCredentialInfo{
+ Mechanism: ScramMechanism(credentialInfo.Mechanism),
+ Iterations: int(credentialInfo.Iterations),
+ }
+ }
+ responseResults[responseIdx] = DescribeUserScramCredentialsResponseResult{
+ User: responseResult.User,
+ CredentialInfos: credentialInfos,
+ Error: makeError(responseResult.ErrorCode, responseResult.ErrorMessage),
+ }
+ }
+ ret := &DescribeUserScramCredentialsResponse{
+ Throttle: makeDuration(res.ThrottleTimeMs),
+ Error: makeError(res.ErrorCode, res.ErrorMessage),
+ Results: responseResults,
+ }
+
+ return ret, nil
+}
diff --git a/describeuserscramcredentials_test.go b/describeuserscramcredentials_test.go
new file mode 100644
index 000000000..d30b47477
--- /dev/null
+++ describeuserscramcredentials_test.go
@@ -0,0 +1,140 @@
+package kafka
+
+import (
+ "context"
+ "errors"
+ "testing"
+
+ ktesting "github.com/segmentio/kafka-go/testing"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestDescribeUserScramCredentials(t *testing.T) {
+ // https://issues.apache.org/jira/browse/KAFKA-10259
+ if !ktesting.KafkaIsAtLeast("2.7.0") {
+ return
+ }
+
+ client, shutdown := newLocalClient()
+ defer shutdown()
+
+ name := makeTopic()
+
+ createRes, err := client.AlterUserScramCredentials(context.Background(), &AlterUserScramCredentialsRequest{
+ Upsertions: []UserScramCredentialsUpsertion{
+ {
+ Name: name,
+ Mechanism: ScramMechanismSha512,
+ Iterations: 15000,
+ Salt: []byte("my-salt"),
+ SaltedPassword: []byte("my-salted-password"),
+ },
+ },
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(createRes.Results) != 1 {
+ t.Fatalf("expected 1 createResult; got %d", len(createRes.Results))
+ }
+
+ if createRes.Results[0].User != name {
+ t.Fatalf("expected createResult with user: %s, got %s", name, createRes.Results[0].User)
+ }
+
+ if createRes.Results[0].Error != nil {
+ t.Fatalf("didn't expect an error in createResult, got %v", createRes.Results[0].Error)
+ }
+
+ describeCreationRes, err := client.DescribeUserScramCredentials(context.Background(), &DescribeUserScramCredentialsRequest{
+ Users: []UserScramCredentialsUser{
+ {
+ Name: name,
+ },
+ },
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedCreation := DescribeUserScramCredentialsResponse{
+ Throttle: makeDuration(0),
+ Error: makeError(0, ""),
+ Results: []DescribeUserScramCredentialsResponseResult{
+ {
+ User: name,
+ CredentialInfos: []DescribeUserScramCredentialsCredentialInfo{
+ {
+ Mechanism: ScramMechanismSha512,
+ Iterations: 15000,
+ },
+ },
+ Error: makeError(0, ""),
+ },
+ },
+ }
+
+ assert.Equal(t, expectedCreation, *describeCreationRes)
+
+ deleteRes, err := client.AlterUserScramCredentials(context.Background(), &AlterUserScramCredentialsRequest{
+ Deletions: []UserScramCredentialsDeletion{
+ {
+ Name: name,
+ Mechanism: ScramMechanismSha512,
+ },
+ },
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(deleteRes.Results) != 1 {
+ t.Fatalf("expected 1 deleteResult; got %d", len(deleteRes.Results))
+ }
+
+ if deleteRes.Results[0].User != name {
+ t.Fatalf("expected deleteResult with user: %s, got %s", name, deleteRes.Results[0].User)
+ }
+
+ if deleteRes.Results[0].Error != nil {
+ t.Fatalf("didn't expect an error in deleteResult, got %v", deleteRes.Results[0].Error)
+ }
+
+ describeDeletionRes, err := client.DescribeUserScramCredentials(context.Background(), &DescribeUserScramCredentialsRequest{
+ Users: []UserScramCredentialsUser{
+ {
+ Name: name,
+ },
+ },
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !errors.Is(describeDeletionRes.Error, makeError(0, "")) {
+ t.Fatalf("didn't expect a top level error on describe results after deletion, got %v", describeDeletionRes.Error)
+ }
+
+ if len(describeDeletionRes.Results) != 1 {
+ t.Fatalf("expected one describe results after deletion, got %d describe results", len(describeDeletionRes.Results))
+ }
+
+ result := describeDeletionRes.Results[0]
+
+ if result.User != name {
+ t.Fatalf("expected describeResult with user: %s, got %s", name, result.User)
+ }
+
+ if len(result.CredentialInfos) != 0 {
+ t.Fatalf("didn't expect describeResult credential infos, got %v", result.CredentialInfos)
+ }
+
+ if !errors.Is(result.Error, ResourceNotFound) {
+ t.Fatalf("expected describeResult resourcenotfound error, got %s", result.Error)
+ }
+}
diff --git dialer_test.go dialer_test.go
index 7bc9e58c7..4c8b813f3 100644
--- dialer_test.go
+++ dialer_test.go
@@ -61,11 +61,12 @@ func testDialerLookupPartitions(t *testing.T, ctx context.Context, d *Dialer) {
want := []Partition{
{
- Topic: topic,
- Leader: Broker{Host: "localhost", Port: 9092, ID: 1},
- Replicas: []Broker{{Host: "localhost", Port: 9092, ID: 1}},
- Isr: []Broker{{Host: "localhost", Port: 9092, ID: 1}},
- ID: 0,
+ Topic: topic,
+ Leader: Broker{Host: "localhost", Port: 9092, ID: 1},
+ Replicas: []Broker{{Host: "localhost", Port: 9092, ID: 1}},
+ Isr: []Broker{{Host: "localhost", Port: 9092, ID: 1}},
+ OfflineReplicas: []Broker{},
+ ID: 0,
},
}
if !reflect.DeepEqual(partitions, want) {
@@ -230,11 +231,12 @@ func TestDialerTLS(t *testing.T) {
want := []Partition{
{
- Topic: topic,
- Leader: Broker{Host: "localhost", Port: 9092, ID: 1},
- Replicas: []Broker{{Host: "localhost", Port: 9092, ID: 1}},
- Isr: []Broker{{Host: "localhost", Port: 9092, ID: 1}},
- ID: 0,
+ Topic: topic,
+ Leader: Broker{Host: "localhost", Port: 9092, ID: 1},
+ Replicas: []Broker{{Host: "localhost", Port: 9092, ID: 1}},
+ Isr: []Broker{{Host: "localhost", Port: 9092, ID: 1}},
+ OfflineReplicas: []Broker{},
+ ID: 0,
},
}
if !reflect.DeepEqual(partitions, want) {
@@ -377,11 +379,12 @@ func TestDialerResolver(t *testing.T) {
want := []Partition{
{
- Topic: topic,
- Leader: Broker{Host: "localhost", Port: 9092, ID: 1},
- Replicas: []Broker{{Host: "localhost", Port: 9092, ID: 1}},
- Isr: []Broker{{Host: "localhost", Port: 9092, ID: 1}},
- ID: 0,
+ Topic: topic,
+ Leader: Broker{Host: "localhost", Port: 9092, ID: 1},
+ Replicas: []Broker{{Host: "localhost", Port: 9092, ID: 1}},
+ Isr: []Broker{{Host: "localhost", Port: 9092, ID: 1}},
+ OfflineReplicas: []Broker{},
+ ID: 0,
},
}
if !reflect.DeepEqual(partitions, want) {
diff --git fetch.go fetch.go
index e682aeadb..eafd0de88 100644
--- fetch.go
+++ fetch.go
@@ -49,7 +49,7 @@ type FetchResponse struct {
Topic string
Partition int
- // Informations about the topic partition layout returned from the broker.
+ // Information about the topic partition layout returned from the broker.
//
// LastStableOffset requires the kafka broker to support the Fetch API in
// version 4 or above (otherwise the value is zero).
diff --git go.mod go.mod
index e3cd2c03e..d16e1ae78 100644
--- go.mod
+++ go.mod
@@ -3,11 +3,11 @@ module github.com/segmentio/kafka-go
go 1.15
require (
- github.com/klauspost/compress v1.15.7
+ github.com/klauspost/compress v1.15.9
github.com/pierrec/lz4/v4 v4.1.15
github.com/stretchr/testify v1.8.0
- github.com/xdg/scram v1.0.5
- github.com/xdg/stringprep v1.0.3 // indirect
- golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect
- golang.org/x/net v0.0.0-20220706163947-c90051bbdb60
+ github.com/xdg-go/scram v1.1.2
+ golang.org/x/net v0.17.0
)
+
+retract [v0.4.36, v0.4.37]
diff --git go.sum go.sum
index 3adcc8973..440b00f6d 100644
--- go.sum
+++ go.sum
@@ -1,8 +1,8 @@
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/klauspost/compress v1.15.7 h1:7cgTQxJCU/vy+oP/E3B9RGbQTgbiVzIJWIKOLoAsPok=
-github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY=
+github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0=
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@@ -12,25 +12,54 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/xdg/scram v1.0.5 h1:TuS0RFmt5Is5qm9Tm2SoD89OPqe4IRiFtyFY4iwWXsw=
-github.com/xdg/scram v1.0.5/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
-github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4=
-github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
-golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY=
-golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220706163947-c90051bbdb60 h1:8NSylCMxLW4JvserAndSgFL7aPli6A68yf0bYFTcWCM=
-golang.org/x/net v0.0.0-20220706163947-c90051bbdb60/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
+github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
+github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
+github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
+github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
+github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git gzip/gzip.go gzip/gzip.go
index 2ad84b500..230e4539e 100644
--- gzip/gzip.go
+++ gzip/gzip.go
@@ -4,8 +4,7 @@
package gzip
import (
- gz "compress/gzip"
-
+ gz "github.com/klauspost/compress/gzip"
"github.com/segmentio/kafka-go/compress/gzip"
)
diff --git initproducerid_test.go initproducerid_test.go
index 7a2b6b0a5..061819e58 100644
--- initproducerid_test.go
+++ initproducerid_test.go
@@ -18,7 +18,7 @@ func TestClientInitProducerId(t *testing.T) {
client, shutdown := newLocalClient()
defer shutdown()
- tid := "transaction1"
+ tid := makeTransactionalID()
// Wait for kafka setup and Coordinator to be available.
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
diff --git kafka.go kafka.go
index ec139ac91..d2d36e413 100644
--- kafka.go
+++ kafka.go
@@ -47,6 +47,9 @@ type Partition struct {
Replicas []Broker
Isr []Broker
+ // Available only with metadata API level >= 6:
+ OfflineReplicas []Broker
+
// An error that may have occurred while attempting to read the partition
// metadata.
//
diff --git a/listpartitionreassignments.go b/listpartitionreassignments.go
new file mode 100644
index 000000000..aa01fff3f
--- /dev/null
+++ listpartitionreassignments.go
@@ -0,0 +1,135 @@
+package kafka
+
+import (
+ "context"
+ "net"
+ "time"
+
+ "github.com/segmentio/kafka-go/protocol/listpartitionreassignments"
+)
+
+// ListPartitionReassignmentsRequest is a request to the ListPartitionReassignments API.
+type ListPartitionReassignmentsRequest struct {
+ // Address of the kafka broker to send the request to.
+ Addr net.Addr
+
+ // Topics we want reassignments for, mapped by their name, or nil to list everything.
+ Topics map[string]ListPartitionReassignmentsRequestTopic
+
+ // Timeout is the amount of time to wait for the request to complete.
+ Timeout time.Duration
+}
+
+// ListPartitionReassignmentsRequestTopic contains the requested partitions for a single
+// topic.
+type ListPartitionReassignmentsRequestTopic struct {
+ // The partitions to list partition reassignments for.
+ PartitionIndexes []int
+}
+
+// ListPartitionReassignmentsResponse is a response from the ListPartitionReassignments API.
+type ListPartitionReassignmentsResponse struct {
+ // Error is set to a non-nil value including the code and message if a top-level
+ // error was encountered.
+ Error error
+
+ // Topics contains results for each topic, mapped by their name.
+ Topics map[string]ListPartitionReassignmentsResponseTopic
+}
+
+// ListPartitionReassignmentsResponseTopic contains the detailed result of
+// ongoing reassignments for a topic.
+type ListPartitionReassignmentsResponseTopic struct {
+ // Partitions contains result for topic partitions.
+ Partitions []ListPartitionReassignmentsResponsePartition
+}
+
+// ListPartitionReassignmentsResponsePartition contains the detailed result of
+// ongoing reassignments for a single partition.
+type ListPartitionReassignmentsResponsePartition struct {
+ // PartitionIndex contains index of the partition.
+ PartitionIndex int
+
+ // Replicas contains the current replica set.
+ Replicas []int
+
+ // AddingReplicas contains the set of replicas we are currently adding.
+ AddingReplicas []int
+
+ // RemovingReplicas contains the set of replicas we are currently removing.
+ RemovingReplicas []int
+}
+
+func (c *Client) ListPartitionReassignments(
+ ctx context.Context,
+ req *ListPartitionReassignmentsRequest,
+) (*ListPartitionReassignmentsResponse, error) {
+ apiReq := &listpartitionreassignments.Request{
+ TimeoutMs: int32(req.Timeout.Milliseconds()),
+ }
+
+ for topicName, topicReq := range req.Topics {
+ apiReq.Topics = append(
+ apiReq.Topics,
+ listpartitionreassignments.RequestTopic{
+ Name: topicName,
+ PartitionIndexes: intToInt32Array(topicReq.PartitionIndexes),
+ },
+ )
+ }
+
+ protoResp, err := c.roundTrip(
+ ctx,
+ req.Addr,
+ apiReq,
+ )
+ if err != nil {
+ return nil, err
+ }
+ apiResp := protoResp.(*listpartitionreassignments.Response)
+
+ resp := &ListPartitionReassignmentsResponse{
+ Error: makeError(apiResp.ErrorCode, apiResp.ErrorMessage),
+ Topics: make(map[string]ListPartitionReassignmentsResponseTopic),
+ }
+
+ for _, topicResult := range apiResp.Topics {
+ respTopic := ListPartitionReassignmentsResponseTopic{}
+ for _, partitionResult := range topicResult.Partitions {
+ respTopic.Partitions = append(
+ respTopic.Partitions,
+ ListPartitionReassignmentsResponsePartition{
+ PartitionIndex: int(partitionResult.PartitionIndex),
+ Replicas: int32ToIntArray(partitionResult.Replicas),
+ AddingReplicas: int32ToIntArray(partitionResult.AddingReplicas),
+ RemovingReplicas: int32ToIntArray(partitionResult.RemovingReplicas),
+ },
+ )
+ }
+ resp.Topics[topicResult.Name] = respTopic
+ }
+
+ return resp, nil
+}
+
+func intToInt32Array(arr []int) []int32 {
+ if arr == nil {
+ return nil
+ }
+ res := make([]int32, len(arr))
+ for i := range arr {
+ res[i] = int32(arr[i])
+ }
+ return res
+}
+
+func int32ToIntArray(arr []int32) []int {
+ if arr == nil {
+ return nil
+ }
+ res := make([]int, len(arr))
+ for i := range arr {
+ res[i] = int(arr[i])
+ }
+ return res
+}
diff --git a/listpartitionreassignments_test.go b/listpartitionreassignments_test.go
new file mode 100644
index 000000000..fd2c31789
--- /dev/null
+++ listpartitionreassignments_test.go
@@ -0,0 +1,50 @@
+package kafka
+
+import (
+ "context"
+ "testing"
+
+ ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestClientListPartitionReassignments(t *testing.T) {
+ if !ktesting.KafkaIsAtLeast("2.4.0") {
+ return
+ }
+
+ ctx := context.Background()
+ client, shutdown := newLocalClient()
+ defer shutdown()
+
+ topic := makeTopic()
+ createTopic(t, topic, 2)
+ defer deleteTopic(t, topic)
+
+ // Can't really get an ongoing partition reassignment with local Kafka, so just do a superficial test here.
+ resp, err := client.ListPartitionReassignments(
+ ctx,
+ &ListPartitionReassignmentsRequest{
+ Topics: map[string]ListPartitionReassignmentsRequestTopic{
+ topic: {PartitionIndexes: []int{0, 1}},
+ },
+ },
+ )
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.Error != nil {
+ t.Error(
+ "Unexpected error in response",
+ "expected", nil,
+ "got", resp.Error,
+ )
+ }
+ if len(resp.Topics) != 0 {
+ t.Error(
+ "Unexpected length of topic results",
+ "expected", 0,
+ "got", len(resp.Topics),
+ )
+ }
+}
diff --git message.go message.go
index 5fb7b8ebe..0539e6038 100644
--- message.go
+++ message.go
@@ -20,6 +20,11 @@ type Message struct {
Value []byte
Headers []Header
+ // This field is used to hold arbitrary data you wish to include, so it
+ // will be available when handle it on the Writer's `Completion` method,
+ // this support the application can do any post operation on each message.
+ WriterData interface{}
+
// If not set at the creation, Time will be automatically set when
// writing the message.
Time time.Time
@@ -44,6 +49,17 @@ func (msg *Message) size() int32 {
return 4 + 1 + 1 + sizeofBytes(msg.Key) + sizeofBytes(msg.Value) + timestampSize
}
+func (msg *Message) headerSize() int {
+ return varArrayLen(len(msg.Headers), func(i int) int {
+ h := &msg.Headers[i]
+ return varStringLen(h.Key) + varBytesLen(h.Value)
+ })
+}
+
+func (msg *Message) totalSize() int32 {
+ return int32(msg.headerSize()) + msg.size()
+}
+
type message struct {
CRC int32
MagicByte int8
diff --git message_reader.go message_reader.go
index 35e1067f2..a0a0385ef 100644
--- message_reader.go
+++ message_reader.go
@@ -22,7 +22,7 @@ type messageSetReader struct {
// This is used to detect truncation of the response.
lengthRemain int
- decompressed bytes.Buffer
+ decompressed *bytes.Buffer
}
type readerStack struct {
@@ -87,6 +87,7 @@ func newMessageSetReader(reader *bufio.Reader, remain int) (*messageSetReader, e
reader: reader,
remain: remain,
},
+ decompressed: acquireBuffer(),
}
err := res.readHeader()
return res, err
@@ -158,7 +159,9 @@ func (r *messageSetReader) readMessageV1(min int64, key readBytesFunc, val readB
if codec, err = r.header.compression(); err != nil {
return
}
- r.log("Reading with codec=%T", codec)
+ if r.debug {
+ r.log("Reading with codec=%T", codec)
+ }
if codec != nil {
// discard next four bytes...will be -1 to indicate null key
if err = r.discardN(4); err != nil {
@@ -199,7 +202,7 @@ func (r *messageSetReader) readMessageV1(min int64, key readBytesFunc, val readB
// Allocate a buffer of size 0, which gets capped at 16 bytes
// by the bufio package. We are already reading buffered data
// here, no need to reserve another 4KB buffer.
- reader: bufio.NewReaderSize(&r.decompressed, 0),
+ reader: bufio.NewReaderSize(r.decompressed, 0),
remain: r.decompressed.Len(),
base: offset,
parent: r.readerStack,
@@ -278,7 +281,7 @@ func (r *messageSetReader) readMessageV2(_ int64, key readBytesFunc, val readByt
}
r.remain -= batchRemain - int(limitReader.N)
r.readerStack = &readerStack{
- reader: bufio.NewReaderSize(&r.decompressed, 0), // the new stack reads from the decompressed buffer
+ reader: bufio.NewReaderSize(r.decompressed, 0), // the new stack reads from the decompressed buffer
remain: r.decompressed.Len(),
base: -1, // base is unused here
parent: r.readerStack,
@@ -351,14 +354,18 @@ func (r *messageSetReader) markRead() {
}
r.count--
r.unwindStack()
- r.log("Mark read remain=%d", r.remain)
+ if r.debug {
+ r.log("Mark read remain=%d", r.remain)
+ }
}
func (r *messageSetReader) unwindStack() {
for r.count == 0 {
if r.remain == 0 {
if r.parent != nil {
- r.log("Popped reader stack")
+ if r.debug {
+ r.log("Popped reader stack")
+ }
r.readerStack = r.parent
continue
}
@@ -425,7 +432,9 @@ func (r *messageSetReader) readHeader() (err error) {
// Set arbitrary non-zero length so that we always assume the
// message is truncated since bytes remain.
r.lengthRemain = 1
- r.log("Read v0 header with offset=%d len=%d magic=%d attributes=%d", r.header.firstOffset, r.header.length, r.header.magic, r.header.v1.attributes)
+ if r.debug {
+ r.log("Read v0 header with offset=%d len=%d magic=%d attributes=%d", r.header.firstOffset, r.header.length, r.header.magic, r.header.v1.attributes)
+ }
case 1:
r.header.crc = crcOrLeaderEpoch
if err = r.readInt8(&r.header.v1.attributes); err != nil {
@@ -438,7 +447,9 @@ func (r *messageSetReader) readHeader() (err error) {
// Set arbitrary non-zero length so that we always assume the
// message is truncated since bytes remain.
r.lengthRemain = 1
- r.log("Read v1 header with remain=%d offset=%d magic=%d and attributes=%d", r.remain, r.header.firstOffset, r.header.magic, r.header.v1.attributes)
+ if r.debug {
+ r.log("Read v1 header with remain=%d offset=%d magic=%d and attributes=%d", r.remain, r.header.firstOffset, r.header.magic, r.header.v1.attributes)
+ }
case 2:
r.header.v2.leaderEpoch = crcOrLeaderEpoch
if err = r.readInt32(&r.header.crc); err != nil {
@@ -471,7 +482,9 @@ func (r *messageSetReader) readHeader() (err error) {
r.count = int(r.header.v2.count)
// Subtracts the header bytes from the length
r.lengthRemain = int(r.header.length) - 49
- r.log("Read v2 header with count=%d offset=%d len=%d magic=%d attributes=%d", r.count, r.header.firstOffset, r.header.length, r.header.magic, r.header.v2.attributes)
+ if r.debug {
+ r.log("Read v2 header with count=%d offset=%d len=%d magic=%d attributes=%d", r.count, r.header.firstOffset, r.header.length, r.header.magic, r.header.v2.attributes)
+ }
default:
err = r.header.badMagic()
return
@@ -520,9 +533,7 @@ func (r *messageSetReader) readBytesWith(fn readBytesFunc) (err error) {
}
func (r *messageSetReader) log(msg string, args ...interface{}) {
- if r.debug {
- log.Printf("[DEBUG] "+msg, args...)
- }
+ log.Printf("[DEBUG] "+msg, args...)
}
func extractOffset(base int64, msgSet []byte) (offset int64, err error) {
diff --git metadata.go metadata.go
index 4b1309f85..429a6a260 100644
--- metadata.go
+++ metadata.go
@@ -199,3 +199,89 @@ func (p partitionMetadataV1) writeTo(wb *writeBuffer) {
wb.writeInt32Array(p.Replicas)
wb.writeInt32Array(p.Isr)
}
+
+type topicMetadataRequestV6 struct {
+ Topics []string
+ AllowAutoTopicCreation bool
+}
+
+func (r topicMetadataRequestV6) size() int32 {
+ return sizeofStringArray([]string(r.Topics)) + 1
+}
+
+func (r topicMetadataRequestV6) writeTo(wb *writeBuffer) {
+ // communicate nil-ness to the broker by passing -1 as the array length.
+ // for this particular request, the broker interpets a zero length array
+ // as a request for no topics whereas a nil array is for all topics.
+ if r.Topics == nil {
+ wb.writeArrayLen(-1)
+ } else {
+ wb.writeStringArray([]string(r.Topics))
+ }
+ wb.writeBool(r.AllowAutoTopicCreation)
+}
+
+type metadataResponseV6 struct {
+ ThrottleTimeMs int32
+ Brokers []brokerMetadataV1
+ ClusterId string
+ ControllerID int32
+ Topics []topicMetadataV6
+}
+
+func (r metadataResponseV6) size() int32 {
+ n1 := sizeofArray(len(r.Brokers), func(i int) int32 { return r.Brokers[i].size() })
+ n2 := sizeofNullableString(&r.ClusterId)
+ n3 := sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() })
+ return 4 + 4 + n1 + n2 + n3
+}
+
+func (r metadataResponseV6) writeTo(wb *writeBuffer) {
+ wb.writeInt32(r.ThrottleTimeMs)
+ wb.writeArray(len(r.Brokers), func(i int) { r.Brokers[i].writeTo(wb) })
+ wb.writeString(r.ClusterId)
+ wb.writeInt32(r.ControllerID)
+ wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) })
+}
+
+type topicMetadataV6 struct {
+ TopicErrorCode int16
+ TopicName string
+ Internal bool
+ Partitions []partitionMetadataV6
+}
+
+func (t topicMetadataV6) size() int32 {
+ return 2 + 1 +
+ sizeofString(t.TopicName) +
+ sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() })
+}
+
+func (t topicMetadataV6) writeTo(wb *writeBuffer) {
+ wb.writeInt16(t.TopicErrorCode)
+ wb.writeString(t.TopicName)
+ wb.writeBool(t.Internal)
+ wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) })
+}
+
+type partitionMetadataV6 struct {
+ PartitionErrorCode int16
+ PartitionID int32
+ Leader int32
+ Replicas []int32
+ Isr []int32
+ OfflineReplicas []int32
+}
+
+func (p partitionMetadataV6) size() int32 {
+ return 2 + 4 + 4 + sizeofInt32Array(p.Replicas) + sizeofInt32Array(p.Isr) + sizeofInt32Array(p.OfflineReplicas)
+}
+
+func (p partitionMetadataV6) writeTo(wb *writeBuffer) {
+ wb.writeInt16(p.PartitionErrorCode)
+ wb.writeInt32(p.PartitionID)
+ wb.writeInt32(p.Leader)
+ wb.writeInt32Array(p.Replicas)
+ wb.writeInt32Array(p.Isr)
+ wb.writeInt32Array(p.OfflineReplicas)
+}
diff --git a/offsetdelete.go b/offsetdelete.go
new file mode 100644
index 000000000..ea526eb25
--- /dev/null
+++ offsetdelete.go
@@ -0,0 +1,106 @@
+package kafka
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/segmentio/kafka-go/protocol/offsetdelete"
+)
+
+// OffsetDelete deletes the offset for a consumer group on a particular topic
+// for a particular partition.
+type OffsetDelete struct {
+ Topic string
+ Partition int
+}
+
+// OffsetDeleteRequest represents a request sent to a kafka broker to delete
+// the offsets for a partition on a given topic associated with a consumer group.
+type OffsetDeleteRequest struct {
+ // Address of the kafka broker to send the request to.
+ Addr net.Addr
+
+ // ID of the consumer group to delete the offsets for.
+ GroupID string
+
+ // Set of topic partitions to delete offsets for.
+ Topics map[string][]int
+}
+
+// OffsetDeleteResponse represents a response from a kafka broker to a delete
+// offset request.
+type OffsetDeleteResponse struct {
+ // An error that may have occurred while attempting to delete an offset
+ Error error
+
+ // The amount of time that the broker throttled the request.
+ Throttle time.Duration
+
+ // Set of topic partitions that the kafka broker has additional info (error?)
+ // for.
+ Topics map[string][]OffsetDeletePartition
+}
+
+// OffsetDeletePartition represents the state of a status of a partition in response
+// to deleting offsets.
+type OffsetDeletePartition struct {
+ // ID of the partition.
+ Partition int
+
+ // An error that may have occurred while attempting to delete an offset for
+ // this partition.
+ Error error
+}
+
+// OffsetDelete sends a delete offset request to a kafka broker and returns the
+// response.
+func (c *Client) OffsetDelete(ctx context.Context, req *OffsetDeleteRequest) (*OffsetDeleteResponse, error) {
+ topics := make([]offsetdelete.RequestTopic, 0, len(req.Topics))
+
+ for topicName, partitionIndexes := range req.Topics {
+ partitions := make([]offsetdelete.RequestPartition, len(partitionIndexes))
+
+ for i, c := range partitionIndexes {
+ partitions[i] = offsetdelete.RequestPartition{
+ PartitionIndex: int32(c),
+ }
+ }
+
+ topics = append(topics, offsetdelete.RequestTopic{
+ Name: topicName,
+ Partitions: partitions,
+ })
+ }
+
+ m, err := c.roundTrip(ctx, req.Addr, &offsetdelete.Request{
+ GroupID: req.GroupID,
+ Topics: topics,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("kafka.(*Client).OffsetDelete: %w", err)
+ }
+ r := m.(*offsetdelete.Response)
+
+ res := &OffsetDeleteResponse{
+ Error: makeError(r.ErrorCode, ""),
+ Throttle: makeDuration(r.ThrottleTimeMs),
+ Topics: make(map[string][]OffsetDeletePartition, len(r.Topics)),
+ }
+
+ for _, topic := range r.Topics {
+ partitions := make([]OffsetDeletePartition, len(topic.Partitions))
+
+ for i, p := range topic.Partitions {
+ partitions[i] = OffsetDeletePartition{
+ Partition: int(p.PartitionIndex),
+ Error: makeError(p.ErrorCode, ""),
+ }
+ }
+
+ res.Topics[topic.Name] = partitions
+ }
+
+ return res, nil
+}
diff --git a/offsetdelete_test.go b/offsetdelete_test.go
new file mode 100644
index 000000000..e66b38fd3
--- /dev/null
+++ offsetdelete_test.go
@@ -0,0 +1,160 @@
+package kafka
+
+import (
+ "context"
+ "log"
+ "os"
+ "strconv"
+ "testing"
+ "time"
+
+ ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestClientDeleteOffset(t *testing.T) {
+ if !ktesting.KafkaIsAtLeast("2.4.0") {
+ return
+ }
+
+ topic := makeTopic()
+ client, shutdown := newLocalClientWithTopic(topic, 3)
+ defer shutdown()
+ now := time.Now()
+
+ const N = 10 * 3
+ records := make([]Record, 0, N)
+ for i := 0; i < N; i++ {
+ records = append(records, Record{
+ Time: now,
+ Value: NewBytes([]byte("test-message-" + strconv.Itoa(i))),
+ })
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
+ defer cancel()
+ res, err := client.Produce(ctx, &ProduceRequest{
+ Topic: topic,
+ RequiredAcks: RequireAll,
+ Records: NewRecordReader(records...),
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if res.Error != nil {
+ t.Error(res.Error)
+ }
+
+ for index, err := range res.RecordErrors {
+ t.Fatalf("record at index %d produced an error: %v", index, err)
+ }
+ ctx, cancel = context.WithTimeout(context.Background(), time.Second*30)
+ defer cancel()
+ groupID := makeGroupID()
+
+ group, err := NewConsumerGroup(ConsumerGroupConfig{
+ ID: groupID,
+ Topics: []string{topic},
+ Brokers: []string{"localhost:9092"},
+ HeartbeatInterval: 2 * time.Second,
+ RebalanceTimeout: 2 * time.Second,
+ RetentionTime: time.Hour,
+ Logger: log.New(os.Stdout, "cg-test: ", 0),
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ gen, err := group.Next(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ocr, err := client.OffsetCommit(ctx, &OffsetCommitRequest{
+ Addr: nil,
+ GroupID: groupID,
+ GenerationID: int(gen.ID),
+ MemberID: gen.MemberID,
+ Topics: map[string][]OffsetCommit{
+ topic: {
+ {Partition: 0, Offset: 10},
+ {Partition: 1, Offset: 10},
+ {Partition: 2, Offset: 10},
+ },
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ group.Close()
+
+ resps := ocr.Topics[topic]
+ if len(resps) != 3 {
+ t.Fatalf("expected 3 offsetcommitpartition responses; got %d", len(resps))
+ }
+
+ for _, resp := range resps {
+ if resp.Error != nil {
+ t.Fatal(resp.Error)
+ }
+ }
+
+ ofr, err := client.OffsetFetch(ctx, &OffsetFetchRequest{
+ GroupID: groupID,
+ Topics: map[string][]int{topic: {0, 1, 2}},
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if ofr.Error != nil {
+ t.Error(res.Error)
+ }
+
+ fetresps := ofr.Topics[topic]
+ if len(fetresps) != 3 {
+ t.Fatalf("expected 3 offsetfetchpartition responses; got %d", len(resps))
+ }
+
+ for _, r := range fetresps {
+ if r.Error != nil {
+ t.Fatal(r.Error)
+ }
+
+ if r.CommittedOffset != 10 {
+ t.Fatalf("expected committed offset to be 10; got: %v for partition: %v", r.CommittedOffset, r.Partition)
+ }
+ }
+
+ // Remove offsets
+ odr, err := client.OffsetDelete(ctx, &OffsetDeleteRequest{
+ GroupID: groupID,
+ Topics: map[string][]int{topic: {0, 1, 2}},
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if odr.Error != nil {
+ t.Error(odr.Error)
+ }
+
+ // Fetch the offsets again
+ ofr, err = client.OffsetFetch(ctx, &OffsetFetchRequest{
+ GroupID: groupID,
+ Topics: map[string][]int{topic: {0, 1, 2}},
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if ofr.Error != nil {
+ t.Error(res.Error)
+ }
+
+ for _, r := range ofr.Topics[topic] {
+ if r.CommittedOffset != -1 {
+ t.Fatalf("expected committed offset to be -1; got: %v for partition: %v", r.CommittedOffset, r.Partition)
+ }
+ }
+}
diff --git offsetfetch.go offsetfetch.go
index 61fcba2e3..b85bc5c83 100644
--- offsetfetch.go
+++ offsetfetch.go
@@ -66,19 +66,28 @@ type OffsetFetchPartition struct {
// OffsetFetch sends an offset fetch request to a kafka broker and returns the
// response.
func (c *Client) OffsetFetch(ctx context.Context, req *OffsetFetchRequest) (*OffsetFetchResponse, error) {
- topics := make([]offsetfetch.RequestTopic, 0, len(req.Topics))
- for topicName, partitions := range req.Topics {
- indexes := make([]int32, len(partitions))
+ // Kafka version 0.10.2.x and above allow null Topics map for OffsetFetch API
+ // which will return the result for all topics with the desired consumer group:
+ // https://kafka.apache.org/0102/protocol.html#The_Messages_OffsetFetch
+ // For Kafka version below 0.10.2.x this call will result in an error
+ var topics []offsetfetch.RequestTopic
- for i, p := range partitions {
- indexes[i] = int32(p)
- }
+ if len(req.Topics) > 0 {
+ topics = make([]offsetfetch.RequestTopic, 0, len(req.Topics))
+
+ for topicName, partitions := range req.Topics {
+ indexes := make([]int32, len(partitions))
- topics = append(topics, offsetfetch.RequestTopic{
- Name: topicName,
- PartitionIndexes: indexes,
- })
+ for i, p := range partitions {
+ indexes[i] = int32(p)
+ }
+
+ topics = append(topics, offsetfetch.RequestTopic{
+ Name: topicName,
+ PartitionIndexes: indexes,
+ })
+ }
}
m, err := c.roundTrip(ctx, req.Addr, &offsetfetch.Request{
diff --git offsetfetch_test.go offsetfetch_test.go
index 6edb7dfaf..7f244700d 100644
--- offsetfetch_test.go
+++ offsetfetch_test.go
@@ -3,8 +3,12 @@ package kafka
import (
"bufio"
"bytes"
+ "context"
"reflect"
"testing"
+ "time"
+
+ ktesting "github.com/segmentio/kafka-go/testing"
)
func TestOffsetFetchResponseV1(t *testing.T) {
@@ -43,3 +47,121 @@ func TestOffsetFetchResponseV1(t *testing.T) {
t.FailNow()
}
}
+
+func TestOffsetFetchRequestWithNoTopic(t *testing.T) {
+ if !ktesting.KafkaIsAtLeast("0.10.2.0") {
+ t.Logf("Test %s is not applicable for kafka versions below 0.10.2.0", t.Name())
+ t.SkipNow()
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ topic1 := makeTopic()
+ defer deleteTopic(t, topic1)
+ topic2 := makeTopic()
+ defer deleteTopic(t, topic2)
+ consumeGroup := makeGroupID()
+ numMsgs := 50
+ defer cancel()
+ r1 := NewReader(ReaderConfig{
+ Brokers: []string{"localhost:9092"},
+ Topic: topic1,
+ GroupID: consumeGroup,
+ MinBytes: 1,
+ MaxBytes: 100,
+ MaxWait: 100 * time.Millisecond,
+ })
+ defer r1.Close()
+ prepareReader(t, ctx, r1, makeTestSequence(numMsgs)...)
+ r2 := NewReader(ReaderConfig{
+ Brokers: []string{"localhost:9092"},
+ Topic: topic2,
+ GroupID: consumeGroup,
+ MinBytes: 1,
+ MaxBytes: 100,
+ MaxWait: 100 * time.Millisecond,
+ })
+ defer r2.Close()
+ prepareReader(t, ctx, r2, makeTestSequence(numMsgs)...)
+
+ for i := 0; i < numMsgs; i++ {
+ if _, err := r1.ReadMessage(ctx); err != nil {
+ t.Fatal(err)
+ }
+ }
+ for i := 0; i < numMsgs; i++ {
+ if _, err := r2.ReadMessage(ctx); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ client := Client{Addr: TCP("localhost:9092")}
+
+ topicOffsets, err := client.OffsetFetch(ctx, &OffsetFetchRequest{GroupID: consumeGroup})
+
+ if err != nil {
+ t.Error(err)
+ t.FailNow()
+ }
+
+ if len(topicOffsets.Topics) != 2 {
+ t.Error(err)
+ t.FailNow()
+ }
+
+}
+
+func TestOffsetFetchRequestWithOneTopic(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ topic1 := makeTopic()
+ defer deleteTopic(t, topic1)
+ topic2 := makeTopic()
+ defer deleteTopic(t, topic2)
+ consumeGroup := makeGroupID()
+ numMsgs := 50
+ defer cancel()
+ r1 := NewReader(ReaderConfig{
+ Brokers: []string{"localhost:9092"},
+ Topic: topic1,
+ GroupID: consumeGroup,
+ MinBytes: 1,
+ MaxBytes: 100,
+ MaxWait: 100 * time.Millisecond,
+ })
+ defer r1.Close()
+ prepareReader(t, ctx, r1, makeTestSequence(numMsgs)...)
+ r2 := NewReader(ReaderConfig{
+ Brokers: []string{"localhost:9092"},
+ Topic: topic2,
+ GroupID: consumeGroup,
+ MinBytes: 1,
+ MaxBytes: 100,
+ MaxWait: 100 * time.Millisecond,
+ })
+ defer r2.Close()
+ prepareReader(t, ctx, r2, makeTestSequence(numMsgs)...)
+
+ for i := 0; i < numMsgs; i++ {
+ if _, err := r1.ReadMessage(ctx); err != nil {
+ t.Fatal(err)
+ }
+ }
+ for i := 0; i < numMsgs; i++ {
+ if _, err := r2.ReadMessage(ctx); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ client := Client{Addr: TCP("localhost:9092")}
+ topicOffsets, err := client.OffsetFetch(ctx, &OffsetFetchRequest{GroupID: consumeGroup, Topics: map[string][]int{
+ topic1: {0},
+ }})
+
+ if err != nil {
+ t.Error(err)
+ t.FailNow()
+ }
+
+ if len(topicOffsets.Topics) != 1 {
+ t.Error(err)
+ t.FailNow()
+ }
+}
diff --git produce.go produce.go
index 1a196fe6b..72d1ed09b 100644
--- produce.go
+++ produce.go
@@ -111,26 +111,26 @@ type ProduceResponse struct {
// Offset of the first record that was written to the topic partition.
//
- // This field will be zero if the kafka broker did no support the Produce
- // API in version 3 or above.
+ // This field will be zero if the kafka broker did not support Produce API
+ // version 3 or above.
BaseOffset int64
// Time at which the broker wrote the records to the topic partition.
//
- // This field will be zero if the kafka broker did no support the Produce
- // API in version 2 or above.
+ // This field will be zero if the kafka broker did not support Produce API
+ // version 2 or above.
LogAppendTime time.Time
// First offset in the topic partition that the records were written to.
//
- // This field will be zero if the kafka broker did no support the Produce
- // API in version 5 or above (or if the first offset is zero).
+ // This field will be zero if the kafka broker did not support Produce
+ // API version 5 or above (or if the first offset is zero).
LogStartOffset int64
// If errors occurred writing specific records, they will be reported in
// this map.
//
- // This field will always be empty if the kafka broker did no support the
+ // This field will always be empty if the kafka broker did not support the
// Produce API in version 8 or above.
RecordErrors map[int]error
}
diff --git protocol.go protocol.go
index 829fabf54..37208abf1 100644
--- protocol.go
+++ protocol.go
@@ -107,10 +107,11 @@ const (
v2 = 2
v3 = 3
v5 = 5
+ v6 = 6
v7 = 7
v10 = 10
- // Unused protocol versions: v4, v6, v8, v9.
+ // Unused protocol versions: v4, v8, v9.
)
var apiKeyStrings = [...]string{
diff --git a/protocol/alterclientquotas/alterclientquotas.go b/protocol/alterclientquotas/alterclientquotas.go
new file mode 100644
index 000000000..c657d92ac
--- /dev/null
+++ protocol/alterclientquotas/alterclientquotas.go
@@ -0,0 +1,68 @@
+package alterclientquotas
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+ protocol.Register(&Request{}, &Response{})
+}
+
+// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_AlterClientQuotas
+type Request struct {
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v1,max=v1,tag"`
+ Entries []Entry `kafka:"min=v0,max=v1"`
+ ValidateOnly bool `kafka:"min=v0,max=v1"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.AlterClientQuotas }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+ return cluster.Brokers[cluster.Controller], nil
+}
+
+type Entry struct {
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v1,max=v1,tag"`
+ Entities []Entity `kafka:"min=v0,max=v1"`
+ Ops []Ops `kafka:"min=v0,max=v1"`
+}
+
+type Entity struct {
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v1,max=v1,tag"`
+ EntityType string `kafka:"min=v0,max=v0|min=v1,max=v1,compact"`
+ EntityName string `kafka:"min=v0,max=v0,nullable|min=v1,max=v1,nullable,compact"`
+}
+
+type Ops struct {
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v1,max=v1,tag"`
+ Key string `kafka:"min=v0,max=v0|min=v1,max=v1,compact"`
+ Value float64 `kafka:"min=v0,max=v1"`
+ Remove bool `kafka:"min=v0,max=v1"`
+}
+
+type Response struct {
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v1,max=v1,tag"`
+ ThrottleTimeMs int32 `kafka:"min=v0,max=v1"`
+ Results []ResponseQuotas `kafka:"min=v0,max=v1"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.AlterClientQuotas }
+
+type ResponseQuotas struct {
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v1,max=v1,tag"`
+ ErrorCode int16 `kafka:"min=v0,max=v1"`
+ ErrorMessage string `kafka:"min=v0,max=v1,nullable"`
+ Entities []Entity `kafka:"min=v0,max=v1"`
+}
+
+var _ protocol.BrokerMessage = (*Request)(nil)
diff --git a/protocol/alterclientquotas/alterclientquotas_test.go b/protocol/alterclientquotas/alterclientquotas_test.go
new file mode 100644
index 000000000..cf1b82f6b
--- /dev/null
+++ protocol/alterclientquotas/alterclientquotas_test.go
@@ -0,0 +1,91 @@
+package alterclientquotas_test
+
+import (
+ "testing"
+
+ "github.com/segmentio/kafka-go/protocol/alterclientquotas"
+ "github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+ v0 = 0
+ v1 = 1
+)
+
+func TestAlterClientQuotasRequest(t *testing.T) {
+ prototest.TestRequest(t, v0, &alterclientquotas.Request{
+ ValidateOnly: true,
+ Entries: []alterclientquotas.Entry{
+ {
+ Entities: []alterclientquotas.Entity{
+ {
+ EntityType: "client-id",
+ EntityName: "my-client-id",
+ },
+ },
+ Ops: []alterclientquotas.Ops{
+ {
+ Key: "producer_byte_rate",
+ Value: 1.0,
+ Remove: false,
+ },
+ },
+ },
+ },
+ })
+
+ prototest.TestRequest(t, v1, &alterclientquotas.Request{
+ ValidateOnly: true,
+ Entries: []alterclientquotas.Entry{
+ {
+ Entities: []alterclientquotas.Entity{
+ {
+ EntityType: "client-id",
+ EntityName: "my-client-id",
+ },
+ },
+ Ops: []alterclientquotas.Ops{
+ {
+ Key: "producer_byte_rate",
+ Value: 1.0,
+ Remove: false,
+ },
+ },
+ },
+ },
+ })
+}
+
+func TestAlterClientQuotasResponse(t *testing.T) {
+ prototest.TestResponse(t, v0, &alterclientquotas.Response{
+ ThrottleTimeMs: 500,
+ Results: []alterclientquotas.ResponseQuotas{
+ {
+ ErrorCode: 1,
+ ErrorMessage: "foo",
+ Entities: []alterclientquotas.Entity{
+ {
+ EntityType: "client-id",
+ EntityName: "my-client-id",
+ },
+ },
+ },
+ },
+ })
+
+ prototest.TestResponse(t, v1, &alterclientquotas.Response{
+ ThrottleTimeMs: 500,
+ Results: []alterclientquotas.ResponseQuotas{
+ {
+ ErrorCode: 1,
+ ErrorMessage: "foo",
+ Entities: []alterclientquotas.Entity{
+ {
+ EntityType: "client-id",
+ EntityName: "my-client-id",
+ },
+ },
+ },
+ },
+ })
+}
diff --git protocol/alterpartitionreassignments/alterpartitionreassignments.go protocol/alterpartitionreassignments/alterpartitionreassignments.go
index 4894a2e6a..7f8d2ed2f 100644
--- protocol/alterpartitionreassignments/alterpartitionreassignments.go
+++ protocol/alterpartitionreassignments/alterpartitionreassignments.go
@@ -23,7 +23,7 @@ type RequestTopic struct {
type RequestPartition struct {
PartitionIndex int32 `kafka:"min=v0,max=v0"`
- Replicas []int32 `kafka:"min=v0,max=v0"`
+ Replicas []int32 `kafka:"min=v0,max=v0,nullable"`
}
func (r *Request) ApiKey() protocol.ApiKey {
diff --git a/protocol/alterpartitionreassignments/alterpartitionreassignments_test.go b/protocol/alterpartitionreassignments/alterpartitionreassignments_test.go
new file mode 100644
index 000000000..1e3a860b9
--- /dev/null
+++ protocol/alterpartitionreassignments/alterpartitionreassignments_test.go
@@ -0,0 +1,55 @@
+package alterpartitionreassignments_test
+
+import (
+ "testing"
+
+ "github.com/segmentio/kafka-go/protocol/alterpartitionreassignments"
+ "github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+ v0 = 0
+)
+
+func TestAlterPartitionReassignmentsRequest(t *testing.T) {
+ prototest.TestRequest(t, v0, &alterpartitionreassignments.Request{
+ TimeoutMs: 1,
+ Topics: []alterpartitionreassignments.RequestTopic{
+ {
+ Name: "topic-1",
+ Partitions: []alterpartitionreassignments.RequestPartition{
+ {
+ PartitionIndex: 1,
+ Replicas: []int32{1, 2, 3},
+ },
+ {
+ PartitionIndex: 2,
+ },
+ },
+ },
+ },
+ })
+}
+
+func TestAlterPartitionReassignmentsResponse(t *testing.T) {
+ prototest.TestResponse(t, v0, &alterpartitionreassignments.Response{
+ ErrorCode: 1,
+ ErrorMessage: "error",
+ ThrottleTimeMs: 1,
+ Results: []alterpartitionreassignments.ResponseResult{
+ {
+ Name: "topic-1",
+ Partitions: []alterpartitionreassignments.ResponsePartition{
+ {
+ PartitionIndex: 1,
+ ErrorMessage: "error",
+ ErrorCode: 1,
+ },
+ {
+ PartitionIndex: 2,
+ },
+ },
+ },
+ },
+ })
+}
diff --git a/protocol/alteruserscramcredentials/alteruserscramcredentials.go b/protocol/alteruserscramcredentials/alteruserscramcredentials.go
new file mode 100644
index 000000000..b5369be20
--- /dev/null
+++ protocol/alteruserscramcredentials/alteruserscramcredentials.go
@@ -0,0 +1,66 @@
+package alteruserscramcredentials
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+ protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v0,max=v0,tag"`
+
+ Deletions []RequestUserScramCredentialsDeletion `kafka:"min=v0,max=v0"`
+ Upsertions []RequestUserScramCredentialsUpsertion `kafka:"min=v0,max=v0"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.AlterUserScramCredentials }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+ return cluster.Brokers[cluster.Controller], nil
+}
+
+type RequestUserScramCredentialsDeletion struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v0,max=v0,tag"`
+
+ Name string `kafka:"min=v0,max=v0,compact"`
+ Mechanism int8 `kafka:"min=v0,max=v0"`
+}
+
+type RequestUserScramCredentialsUpsertion struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v0,max=v0,tag"`
+
+ Name string `kafka:"min=v0,max=v0,compact"`
+ Mechanism int8 `kafka:"min=v0,max=v0"`
+ Iterations int32 `kafka:"min=v0,max=v0"`
+ Salt []byte `kafka:"min=v0,max=v0,compact"`
+ SaltedPassword []byte `kafka:"min=v0,max=v0,compact"`
+}
+
+type Response struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v0,max=v0,tag"`
+
+ ThrottleTimeMs int32 `kafka:"min=v0,max=v0"`
+ Results []ResponseUserScramCredentials `kafka:"min=v0,max=v0"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.AlterUserScramCredentials }
+
+type ResponseUserScramCredentials struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v0,max=v0,tag"`
+
+ User string `kafka:"min=v0,max=v0,compact"`
+ ErrorCode int16 `kafka:"min=v0,max=v0"`
+ ErrorMessage string `kafka:"min=v0,max=v0,nullable"`
+}
+
+var _ protocol.BrokerMessage = (*Request)(nil)
diff --git a/protocol/alteruserscramcredentials/alteruserscramcredentials_test.go b/protocol/alteruserscramcredentials/alteruserscramcredentials_test.go
new file mode 100644
index 000000000..850a59557
--- /dev/null
+++ protocol/alteruserscramcredentials/alteruserscramcredentials_test.go
@@ -0,0 +1,45 @@
+package alteruserscramcredentials_test
+
+import (
+ "testing"
+
+ "github.com/segmentio/kafka-go/protocol/alteruserscramcredentials"
+ "github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+ v0 = 0
+)
+
+func TestAlterUserScramCredentialsRequest(t *testing.T) {
+ prototest.TestRequest(t, v0, &alteruserscramcredentials.Request{
+ Deletions: []alteruserscramcredentials.RequestUserScramCredentialsDeletion{
+ {
+ Name: "foo-1",
+ Mechanism: 1,
+ },
+ },
+ Upsertions: []alteruserscramcredentials.RequestUserScramCredentialsUpsertion{
+ {
+ Name: "foo-2",
+ Mechanism: 2,
+ Iterations: 15000,
+ Salt: []byte("my-salt"),
+ SaltedPassword: []byte("my-salted-password"),
+ },
+ },
+ })
+}
+
+func TestAlterUserScramCredentialsResponse(t *testing.T) {
+ prototest.TestResponse(t, v0, &alteruserscramcredentials.Response{
+ ThrottleTimeMs: 500,
+ Results: []alteruserscramcredentials.ResponseUserScramCredentials{
+ {
+ User: "foo",
+ ErrorCode: 1,
+ ErrorMessage: "foo-error",
+ },
+ },
+ })
+}
diff --git protocol/createacls/createacls.go protocol/createacls/createacls.go
index 893be44dd..aad0cc07c 100644
--- protocol/createacls/createacls.go
+++ protocol/createacls/createacls.go
@@ -9,9 +9,9 @@ func init() {
type Request struct {
// We need at least one tagged field to indicate that v2+ uses "flexible"
// messages.
- _ struct{} `kafka:"min=v2,max=v2,tag"`
+ _ struct{} `kafka:"min=v2,max=v3,tag"`
- Creations []RequestACLs `kafka:"min=v0,max=v2"`
+ Creations []RequestACLs `kafka:"min=v0,max=v3"`
}
func (r *Request) ApiKey() protocol.ApiKey { return protocol.CreateAcls }
@@ -21,29 +21,37 @@ func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
}
type RequestACLs struct {
- ResourceType int8 `kafka:"min=v0,max=v2"`
- ResourceName string `kafka:"min=v0,max=v2"`
- ResourcePatternType int8 `kafka:"min=v0,max=v2"`
- Principal string `kafka:"min=v0,max=v2"`
- Host string `kafka:"min=v0,max=v2"`
- Operation int8 `kafka:"min=v0,max=v2"`
- PermissionType int8 `kafka:"min=v0,max=v2"`
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v2,max=v3,tag"`
+
+ ResourceType int8 `kafka:"min=v0,max=v3"`
+ ResourceName string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"`
+ ResourcePatternType int8 `kafka:"min=v1,max=v3"`
+ Principal string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"`
+ Host string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"`
+ Operation int8 `kafka:"min=v0,max=v3"`
+ PermissionType int8 `kafka:"min=v0,max=v3"`
}
type Response struct {
// We need at least one tagged field to indicate that v2+ uses "flexible"
// messages.
- _ struct{} `kafka:"min=v2,max=v2,tag"`
+ _ struct{} `kafka:"min=v2,max=v3,tag"`
- ThrottleTimeMs int32 `kafka:"min=v0,max=v2"`
- Results []ResponseACLs `kafka:"min=v0,max=v2"`
+ ThrottleTimeMs int32 `kafka:"min=v0,max=v3"`
+ Results []ResponseACLs `kafka:"min=v0,max=v3"`
}
func (r *Response) ApiKey() protocol.ApiKey { return protocol.CreateAcls }
type ResponseACLs struct {
- ErrorCode int16 `kafka:"min=v0,max=v2"`
- ErrorMessage string `kafka:"min=v0,max=v2,nullable"`
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v2,max=v3,tag"`
+
+ ErrorCode int16 `kafka:"min=v0,max=v3"`
+ ErrorMessage string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
}
var _ protocol.BrokerMessage = (*Request)(nil)
diff --git a/protocol/createacls/createacls_test.go b/protocol/createacls/createacls_test.go
new file mode 100644
index 000000000..61b48c805
--- /dev/null
+++ protocol/createacls/createacls_test.go
@@ -0,0 +1,115 @@
+package createacls_test
+
+import (
+ "testing"
+
+ "github.com/segmentio/kafka-go/protocol/createacls"
+ "github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+ v0 = 0
+ v1 = 1
+ v2 = 2
+ v3 = 3
+)
+
+func TestCreateACLsRequest(t *testing.T) {
+ prototest.TestRequest(t, v0, &createacls.Request{
+ Creations: []createacls.RequestACLs{
+ {
+ Principal: "User:alice",
+ PermissionType: 3,
+ Operation: 3,
+ ResourceType: 2,
+ ResourceName: "fake-topic-for-alice",
+ Host: "*",
+ },
+ },
+ })
+
+ prototest.TestRequest(t, v1, &createacls.Request{
+ Creations: []createacls.RequestACLs{
+ {
+ Principal: "User:alice",
+ PermissionType: 3,
+ Operation: 3,
+ ResourceType: 2,
+ ResourcePatternType: 3,
+ ResourceName: "fake-topic-for-alice",
+ Host: "*",
+ },
+ },
+ })
+
+ prototest.TestRequest(t, v2, &createacls.Request{
+ Creations: []createacls.RequestACLs{
+ {
+ Principal: "User:alice",
+ PermissionType: 3,
+ Operation: 3,
+ ResourceType: 2,
+ ResourcePatternType: 3,
+ ResourceName: "fake-topic-for-alice",
+ Host: "*",
+ },
+ },
+ })
+
+ prototest.TestRequest(t, v3, &createacls.Request{
+ Creations: []createacls.RequestACLs{
+ {
+ Principal: "User:alice",
+ PermissionType: 3,
+ Operation: 3,
+ ResourceType: 2,
+ ResourcePatternType: 3,
+ ResourceName: "fake-topic-for-alice",
+ Host: "*",
+ },
+ },
+ })
+}
+
+func TestCreateACLsResponse(t *testing.T) {
+ prototest.TestResponse(t, v0, &createacls.Response{
+ ThrottleTimeMs: 1,
+ Results: []createacls.ResponseACLs{
+ {
+ ErrorCode: 1,
+ ErrorMessage: "foo",
+ },
+ },
+ })
+
+ prototest.TestResponse(t, v1, &createacls.Response{
+ ThrottleTimeMs: 1,
+ Results: []createacls.ResponseACLs{
+ {
+ ErrorCode: 1,
+ ErrorMessage: "foo",
+ },
+ },
+ })
+
+ prototest.TestResponse(t, v2, &createacls.Response{
+ ThrottleTimeMs: 1,
+ Results: []createacls.ResponseACLs{
+ {
+ ErrorCode: 1,
+ ErrorMessage: "foo",
+ },
+ },
+ })
+
+ prototest.TestResponse(t, v3, &createacls.Response{
+ ThrottleTimeMs: 1,
+ Results: []createacls.ResponseACLs{
+ {
+ ErrorCode: 1,
+ ErrorMessage: "foo",
+ },
+ },
+ })
+
+}
diff --git protocol/decode.go protocol/decode.go
index 690f30a07..5bf61ffa4 100644
--- protocol/decode.go
+++ protocol/decode.go
@@ -7,6 +7,7 @@ import (
"hash/crc32"
"io"
"io/ioutil"
+ "math"
"reflect"
"sync"
"sync/atomic"
@@ -85,6 +86,10 @@ func (d *decoder) decodeInt64(v value) {
v.setInt64(d.readInt64())
}
+func (d *decoder) decodeFloat64(v value) {
+ v.setFloat64(d.readFloat64())
+}
+
func (d *decoder) decodeString(v value) {
v.setString(d.readString())
}
@@ -216,6 +221,13 @@ func (d *decoder) readInt64() int64 {
return 0
}
+func (d *decoder) readFloat64() float64 {
+ if d.readFull(d.buffer[:8]) {
+ return readFloat64(d.buffer[:8])
+ }
+ return 0
+}
+
func (d *decoder) readString() string {
if n := d.readInt16(); n < 0 {
return ""
@@ -342,6 +354,8 @@ func decodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag)
return (*decoder).decodeInt32
case reflect.Int64:
return (*decoder).decodeInt64
+ case reflect.Float64:
+ return (*decoder).decodeFloat64
case reflect.String:
return stringDecodeFuncOf(flexible, tag)
case reflect.Struct:
@@ -469,6 +483,10 @@ func readInt64(b []byte) int64 {
return int64(binary.BigEndian.Uint64(b))
}
+func readFloat64(b []byte) float64 {
+ return math.Float64frombits(binary.BigEndian.Uint64(b))
+}
+
func Unmarshal(data []byte, version int16, value interface{}) error {
typ := elemTypeOf(value)
cache, _ := unmarshalers.Load().(map[versionedType]decodeFunc)
diff --git a/protocol/deleteacls/deleteacls.go b/protocol/deleteacls/deleteacls.go
new file mode 100644
index 000000000..7f0f002f3
--- /dev/null
+++ protocol/deleteacls/deleteacls.go
@@ -0,0 +1,74 @@
+package deleteacls
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+ protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v2,max=v3,tag"`
+
+ Filters []RequestFilter `kafka:"min=v0,max=v3"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.DeleteAcls }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+ return cluster.Brokers[cluster.Controller], nil
+}
+
+type RequestFilter struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v2,max=v3,tag"`
+
+ ResourceTypeFilter int8 `kafka:"min=v0,max=v3"`
+ ResourceNameFilter string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
+ ResourcePatternTypeFilter int8 `kafka:"min=v1,max=v3"`
+ PrincipalFilter string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
+ HostFilter string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
+ Operation int8 `kafka:"min=v0,max=v3"`
+ PermissionType int8 `kafka:"min=v0,max=v3"`
+}
+
+type Response struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v2,max=v3,tag"`
+
+ ThrottleTimeMs int32 `kafka:"min=v0,max=v3"`
+ FilterResults []FilterResult `kafka:"min=v0,max=v3"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.DeleteAcls }
+
+type FilterResult struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v2,max=v3,tag"`
+
+ ErrorCode int16 `kafka:"min=v0,max=v3"`
+ ErrorMessage string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
+ MatchingACLs []MatchingACL `kafka:"min=v0,max=v3"`
+}
+
+type MatchingACL struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ ,_ struct{} `kafka:"min=v2,max=v3,tag"`
+
+ ErrorCode int16 `kafka:"min=v0,max=v3"`
+ ErrorMessage string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
+ ResourceType int8 `kafka:"min=v0,max=v3"`
+ ResourceName string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"`
+ ResourcePatternType int8 `kafka:"min=v1,max=v3"`
+ Principal string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"`
+ Host string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"`
+ Operation int8 `kafka:"min=v0,max=v3"`
+ PermissionType int8 `kafka:"min=v0,max=v3"`
+}
+
+var _ protocol.BrokerMessage = (*Request)(nil)
diff --git a/protocol/deleteacls/deleteacls_test.go b/protocol/deleteacls/deleteacls_test.go
new file mode 100644
index 000000000..51f8dd204
--- /dev/null
+++ protocol/deleteacls/deleteacls_test.go
@@ -0,0 +1,165 @@
+package deleteacls_test
+
+import (
+ "testing"
+
+ "github.com/segmentio/kafka-go/protocol/deleteacls"
+ "github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+ v0 = 0
+ v1 = 1
+ v2 = 2
+ v3 = 3
+)
+
+func TestDeleteACLsRequest(t *testing.T) {
+ prototest.TestRequest(t, v0, &deleteacls.Request{
+ Filters: []deleteacls.RequestFilter{
+ {
+ ResourceTypeFilter: 2,
+ ResourceNameFilter: "fake-topic-for-alice",
+ PrincipalFilter: "User:alice",
+ HostFilter: "*",
+ Operation: 3,
+ PermissionType: 3,
+ },
+ },
+ })
+
+ prototest.TestRequest(t, v1, &deleteacls.Request{
+ Filters: []deleteacls.RequestFilter{
+ {
+ ResourceTypeFilter: 2,
+ ResourceNameFilter: "fake-topic-for-alice",
+ ResourcePatternTypeFilter: 0,
+ PrincipalFilter: "User:alice",
+ HostFilter: "*",
+ Operation: 3,
+ PermissionType: 3,
+ },
+ },
+ })
+
+ prototest.TestRequest(t, v2, &deleteacls.Request{
+ Filters: []deleteacls.RequestFilter{
+ {
+ ResourceTypeFilter: 2,
+ ResourceNameFilter: "fake-topic-for-alice",
+ ResourcePatternTypeFilter: 0,
+ PrincipalFilter: "User:alice",
+ HostFilter: "*",
+ Operation: 3,
+ PermissionType: 3,
+ },
+ },
+ })
+
+ prototest.TestRequest(t, v3, &deleteacls.Request{
+ Filters: []deleteacls.RequestFilter{
+ {
+ ResourceTypeFilter: 2,
+ ResourceNameFilter: "fake-topic-for-alice",
+ ResourcePatternTypeFilter: 0,
+ PrincipalFilter: "User:alice",
+ HostFilter: "*",
+ Operation: 3,
+ PermissionType: 3,
+ },
+ },
+ })
+}
+
+func TestDeleteACLsResponse(t *testing.T) {
+ prototest.TestResponse(t, v0, &deleteacls.Response{
+ ThrottleTimeMs: 1,
+ FilterResults: []deleteacls.FilterResult{
+ {
+ ErrorCode: 1,
+ ErrorMessage: "foo",
+ MatchingACLs: []deleteacls.MatchingACL{
+ {
+ ErrorCode: 1,
+ ErrorMessage: "bar",
+ ResourceType: 2,
+ ResourceName: "fake-topic-for-alice",
+ Principal: "User:alice",
+ Host: "*",
+ Operation: 3,
+ PermissionType: 3,
+ },
+ },
+ },
+ },
+ })
+
+ prototest.TestResponse(t, v1, &deleteacls.Response{
+ ThrottleTimeMs: 1,
+ FilterResults: []deleteacls.FilterResult{
+ {
+ ErrorCode: 1,
+ ErrorMessage: "foo",
+ MatchingACLs: []deleteacls.MatchingACL{
+ {
+ ErrorCode: 1,
+ ErrorMessage: "bar",
+ ResourceType: 2,
+ ResourceName: "fake-topic-for-alice",
+ ResourcePatternType: 0,
+ Principal: "User:alice",
+ Host: "*",
+ Operation: 3,
+ PermissionType: 3,
+ },
+ },
+ },
+ },
+ })
+
+ prototest.TestResponse(t, v2, &deleteacls.Response{
+ ThrottleTimeMs: 1,
+ FilterResults: []deleteacls.FilterResult{
+ {
+ ErrorCode: 1,
+ ErrorMessage: "foo",
+ MatchingACLs: []deleteacls.MatchingACL{
+ {
+ ErrorCode: 1,
+ ErrorMessage: "bar",
+ ResourceType: 2,
+ ResourceName: "fake-topic-for-alice",
+ ResourcePatternType: 0,
+ Principal: "User:alice",
+ Host: "*",
+ Operation: 3,
+ PermissionType: 3,
+ },
+ },
+ },
+ },
+ })
+
+ prototest.TestResponse(t, v3, &deleteacls.Response{
+ ThrottleTimeMs: 1,
+ FilterResults: []deleteacls.FilterResult{
+ {
+ ErrorCode: 1,
+ ErrorMessage: "foo",
+ MatchingACLs: []deleteacls.MatchingACL{
+ {
+ ErrorCode: 1,
+ ErrorMessage: "bar",
+ ResourceType: 2,
+ ResourceName: "fake-topic-for-alice",
+ ResourcePatternType: 0,
+ Principal: "User:alice",
+ Host: "*",
+ Operation: 3,
+ PermissionType: 3,
+ },
+ },
+ },
+ },
+ })
+}
diff --git a/protocol/deletegroups/deletegroups.go b/protocol/deletegroups/deletegroups.go
new file mode 100644
index 000000000..759dfc2fe
--- /dev/null
+++ protocol/deletegroups/deletegroups.go
@@ -0,0 +1,45 @@
+package deletegroups
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+ protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v2,max=v2,tag"`
+
+ GroupIDs []string `kafka:"min=v0,max=v2"`
+}
+
+func (r *Request) Group() string {
+ // use first group to determine group coordinator
+ if len(r.GroupIDs) > 0 {
+ return r.GroupIDs[0]
+ }
+ return ""
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.DeleteGroups }
+
+var (
+ _ protocol.GroupMessage = (*Request)(nil)
+)
+
+type Response struct {
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v2,max=v2,tag"`
+
+ ThrottleTimeMs int32 `kafka:"min=v0,max=v2"`
+ Responses []ResponseGroup `kafka:"min=v0,max=v2"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.DeleteGroups }
+
+type ResponseGroup struct {
+ GroupID string `kafka:"min=v0,max=v2"`
+ ErrorCode int16 `kafka:"min=v0,max=v2"`
+}
diff --git a/protocol/deletegroups/deletegroups_test.go b/protocol/deletegroups/deletegroups_test.go
new file mode 100644
index 000000000..141ab3bcc
--- /dev/null
+++ protocol/deletegroups/deletegroups_test.go
@@ -0,0 +1,33 @@
+package deletegroups_test
+
+import (
+ "testing"
+
+ "github.com/segmentio/kafka-go/protocol/deletegroups"
+ "github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+func TestDeleteGroupsRequest(t *testing.T) {
+ for _, version := range []int16{0, 1, 2} {
+ prototest.TestRequest(t, version, &deletegroups.Request{
+ GroupIDs: []string{"group1", "group2"},
+ })
+ }
+}
+
+func TestDeleteGroupsResponse(t *testing.T) {
+ for _, version := range []int16{0, 1, 2} {
+ prototest.TestResponse(t, version, &deletegroups.Response{
+ Responses: []deletegroups.ResponseGroup{
+ {
+ GroupID: "group1",
+ ErrorCode: 0,
+ },
+ {
+ GroupID: "group2",
+ ErrorCode: 1,
+ },
+ },
+ })
+ }
+}
diff --git a/protocol/describeacls/describeacls.go b/protocol/describeacls/describeacls.go
new file mode 100644
index 000000000..93a7d2ed7
--- /dev/null
+++ protocol/describeacls/describeacls.go
@@ -0,0 +1,72 @@
+package describeacls
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+ protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v2,max=v3,tag"`
+
+ Filter ACLFilter `kafka:"min=v0,max=v3"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeAcls }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+ return cluster.Brokers[cluster.Controller], nil
+}
+
+type ACLFilter struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v2,max=v3,tag"`
+
+ ResourceTypeFilter int8 `kafka:"min=v0,max=v3"`
+ ResourceNameFilter string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
+ ResourcePatternTypeFilter int8 `kafka:"min=v1,max=v3"`
+ PrincipalFilter string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
+ HostFilter string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
+ Operation int8 `kafka:"min=v0,max=v3"`
+ PermissionType int8 `kafka:"min=v0,max=v3"`
+}
+
+type Response struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v2,max=v3,tag"`
+
+ ThrottleTimeMs int32 `kafka:"min=v0,max=v3"`
+ ErrorCode int16 `kafka:"min=v0,max=v3"`
+ ErrorMessage string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
+ Resources []Resource `kafka:"min=v0,max=v3"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeAcls }
+
+type Resource struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v2,max=v3,tag"`
+
+ ResourceType int8 `kafka:"min=v0,max=v3"`
+ ResourceName string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"`
+ PatternType int8 `kafka:"min=v1,max=v3"`
+ ACLs []ResponseACL `kafka:"min=v0,max=v3"`
+}
+
+type ResponseACL struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v2,max=v3,tag"`
+
+ Principal string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"`
+ Host string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"`
+ Operation int8 `kafka:"min=v0,max=v3"`
+ PermissionType int8 `kafka:"min=v0,max=v3"`
+}
+
+var _ protocol.BrokerMessage = (*Request)(nil)
diff --git a/protocol/describeacls/describeacls_test.go b/protocol/describeacls/describeacls_test.go
new file mode 100644
index 000000000..8fd45fffc
--- /dev/null
+++ protocol/describeacls/describeacls_test.go
@@ -0,0 +1,149 @@
+package describeacls_test
+
+import (
+ "testing"
+
+ "github.com/segmentio/kafka-go/protocol/describeacls"
+ "github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+ v0 = 0
+ v1 = 1
+ v2 = 2
+ v3 = 3
+)
+
+func TestDescribeACLsRequest(t *testing.T) {
+ prototest.TestRequest(t, v0, &describeacls.Request{
+ Filter: describeacls.ACLFilter{
+ ResourceTypeFilter: 2,
+ ResourceNameFilter: "fake-topic-for-alice",
+ PrincipalFilter: "User:alice",
+ HostFilter: "*",
+ Operation: 3,
+ PermissionType: 3,
+ },
+ })
+
+ prototest.TestRequest(t, v1, &describeacls.Request{
+ Filter: describeacls.ACLFilter{
+ ResourceTypeFilter: 2,
+ ResourceNameFilter: "fake-topic-for-alice",
+ ResourcePatternTypeFilter: 0,
+ PrincipalFilter: "User:alice",
+ HostFilter: "*",
+ Operation: 3,
+ PermissionType: 3,
+ },
+ })
+
+ prototest.TestRequest(t, v2, &describeacls.Request{
+ Filter: describeacls.ACLFilter{
+ ResourceTypeFilter: 2,
+ ResourceNameFilter: "fake-topic-for-alice",
+ ResourcePatternTypeFilter: 0,
+ PrincipalFilter: "User:alice",
+ HostFilter: "*",
+ Operation: 3,
+ PermissionType: 3,
+ },
+ })
+
+ prototest.TestRequest(t, v3, &describeacls.Request{
+ Filter: describeacls.ACLFilter{
+ ResourceTypeFilter: 2,
+ ResourceNameFilter: "fake-topic-for-alice",
+ ResourcePatternTypeFilter: 0,
+ PrincipalFilter: "User:alice",
+ HostFilter: "*",
+ Operation: 3,
+ PermissionType: 3,
+ },
+ })
+}
+
+func TestDescribeACLsResponse(t *testing.T) {
+ prototest.TestResponse(t, v0, &describeacls.Response{
+ ThrottleTimeMs: 1,
+ ErrorCode: 1,
+ ErrorMessage: "foo",
+ Resources: []describeacls.Resource{
+ {
+ ResourceType: 2,
+ ResourceName: "fake-topic-for-alice",
+ ACLs: []describeacls.ResponseACL{
+ {
+ Principal: "User:alice",
+ Host: "*",
+ Operation: 3,
+ PermissionType: 3,
+ },
+ },
+ },
+ },
+ })
+
+ prototest.TestResponse(t, v1, &describeacls.Response{
+ ThrottleTimeMs: 1,
+ ErrorCode: 1,
+ ErrorMessage: "foo",
+ Resources: []describeacls.Resource{
+ {
+ ResourceType: 2,
+ ResourceName: "fake-topic-for-alice",
+ PatternType: 3,
+ ACLs: []describeacls.ResponseACL{
+ {
+ Principal: "User:alice",
+ Host: "*",
+ Operation: 3,
+ PermissionType: 3,
+ },
+ },
+ },
+ },
+ })
+
+ prototest.TestResponse(t, v2, &describeacls.Response{
+ ThrottleTimeMs: 1,
+ ErrorCode: 1,
+ ErrorMessage: "foo",
+ Resources: []describeacls.Resource{
+ {
+ ResourceType: 2,
+ ResourceName: "fake-topic-for-alice",
+ PatternType: 3,
+ ACLs: []describeacls.ResponseACL{
+ {
+ Principal: "User:alice",
+ Host: "*",
+ Operation: 3,
+ PermissionType: 3,
+ },
+ },
+ },
+ },
+ })
+
+ prototest.TestResponse(t, v3, &describeacls.Response{
+ ThrottleTimeMs: 1,
+ ErrorCode: 1,
+ ErrorMessage: "foo",
+ Resources: []describeacls.Resource{
+ {
+ ResourceType: 2,
+ ResourceName: "fake-topic-for-alice",
+ PatternType: 3,
+ ACLs: []describeacls.ResponseACL{
+ {
+ Principal: "User:alice",
+ Host: "*",
+ Operation: 3,
+ PermissionType: 3,
+ },
+ },
+ },
+ },
+ })
+}
diff --git a/protocol/describeclientquotas/describeclientquotas.go b/protocol/describeclientquotas/describeclientquotas.go
new file mode 100644
index 000000000..e137776bf
--- /dev/null
+++ protocol/describeclientquotas/describeclientquotas.go
@@ -0,0 +1,68 @@
+package describeclientquotas
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+ protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v1,max=v1,tag"`
+ Components []Component `kafka:"min=v0,max=v1"`
+ Strict bool `kafka:"min=v0,max=v1"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeClientQuotas }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+ return cluster.Brokers[cluster.Controller], nil
+}
+
+type Component struct {
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v1,max=v1,tag"`
+ EntityType string `kafka:"min=v0,max=v1"`
+ MatchType int8 `kafka:"min=v0,max=v1"`
+ Match string `kafka:"min=v0,max=v1,nullable"`
+}
+
+type Response struct {
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v1,max=v1,tag"`
+ ThrottleTimeMs int32 `kafka:"min=v0,max=v1"`
+ ErrorCode int16 `kafka:"min=v0,max=v1"`
+ ErrorMessage string `kafka:"min=v0,max=v0,nullable|min=v1,max=v1,nullable,compact"`
+ Entries []ResponseQuotas `kafka:"min=v0,max=v1"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeClientQuotas }
+
+type Entity struct {
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v1,max=v1,tag"`
+ EntityType string `kafka:"min=v0,max=v0|min=v1,max=v1,compact"`
+ EntityName string `kafka:"min=v0,max=v0,nullable|min=v1,max=v1,nullable,compact"`
+}
+
+type Value struct {
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v1,max=v1,tag"`
+ Key string `kafka:"min=v0,max=v0|min=v1,max=v1,compact"`
+ Value float64 `kafka:"min=v0,max=v1"`
+}
+
+type ResponseQuotas struct {
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v1,max=v1,tag"`
+ Entities []Entity `kafka:"min=v0,max=v1"`
+ Values []Value `kafka:"min=v0,max=v1"`
+}
+
+var _ protocol.BrokerMessage = (*Request)(nil)
diff --git a/protocol/describeclientquotas/describeclientquotas_test.go b/protocol/describeclientquotas/describeclientquotas_test.go
new file mode 100644
index 000000000..3bb404c27
--- /dev/null
+++ protocol/describeclientquotas/describeclientquotas_test.go
@@ -0,0 +1,83 @@
+package describeclientquotas_test
+
+import (
+ "testing"
+
+ "github.com/segmentio/kafka-go/protocol/describeclientquotas"
+ "github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+ v0 = 0
+ v1 = 1
+)
+
+func TestDescribeClientQuotasRequest(t *testing.T) {
+ prototest.TestRequest(t, v0, &describeclientquotas.Request{
+ Strict: true,
+ Components: []describeclientquotas.Component{
+ {
+ EntityType: "client-id",
+ MatchType: 0,
+ Match: "my-client-id",
+ },
+ },
+ })
+
+ prototest.TestRequest(t, v1, &describeclientquotas.Request{
+ Strict: true,
+ Components: []describeclientquotas.Component{
+ {
+ EntityType: "client-id",
+ MatchType: 0,
+ Match: "my-client-id",
+ },
+ },
+ })
+}
+
+func TestDescribeClientQuotasResponse(t *testing.T) {
+ prototest.TestResponse(t, v0, &describeclientquotas.Response{
+ ThrottleTimeMs: 1,
+ ErrorCode: 1,
+ ErrorMessage: "foo",
+ Entries: []describeclientquotas.ResponseQuotas{
+ {
+ Entities: []describeclientquotas.Entity{
+ {
+ EntityType: "client-id",
+ EntityName: "my-client-id",
+ },
+ },
+ Values: []describeclientquotas.Value{
+ {
+ Key: "foo",
+ Value: 1.0,
+ },
+ },
+ },
+ },
+ })
+
+ prototest.TestResponse(t, v1, &describeclientquotas.Response{
+ ThrottleTimeMs: 1,
+ ErrorCode: 1,
+ ErrorMessage: "foo",
+ Entries: []describeclientquotas.ResponseQuotas{
+ {
+ Entities: []describeclientquotas.Entity{
+ {
+ EntityType: "client-id",
+ EntityName: "my-client-id",
+ },
+ },
+ Values: []describeclientquotas.Value{
+ {
+ Key: "foo",
+ Value: 1.0,
+ },
+ },
+ },
+ },
+ })
+}
diff --git a/protocol/describeuserscramcredentials/describeuserscramcredentials.go b/protocol/describeuserscramcredentials/describeuserscramcredentials.go
new file mode 100644
index 000000000..e923b9a11
--- /dev/null
+++ protocol/describeuserscramcredentials/describeuserscramcredentials.go
@@ -0,0 +1,64 @@
+package describeuserscramcredentials
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+ protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v0,max=v0,tag"`
+
+ Users []RequestUser `kafka:"min=v0,max=v0"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeUserScramCredentials }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+ return cluster.Brokers[cluster.Controller], nil
+}
+
+type RequestUser struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v0,max=v0,tag"`
+
+ Name string `kafka:"min=v0,max=v0,compact"`
+}
+
+type Response struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v0,max=v0,tag"`
+
+ ThrottleTimeMs int32 `kafka:"min=v0,max=v0"`
+ ErrorCode int16 `kafka:"min=v0,max=v0"`
+ ErrorMessage string `kafka:"min=v0,max=v0,nullable"`
+ Results []ResponseResult `kafka:"min=v0,max=v0"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeUserScramCredentials }
+
+type ResponseResult struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v0,max=v0,tag"`
+
+ User string `kafka:"min=v0,max=v0,compact"`
+ ErrorCode int16 `kafka:"min=v0,max=v0"`
+ ErrorMessage string `kafka:"min=v0,max=v0,nullable"`
+ CredentialInfos []CredentialInfo `kafka:"min=v0,max=v0"`
+}
+
+type CredentialInfo struct {
+ // We need at least one tagged field to indicate that v2+ uses "flexible"
+ // messages.
+ _ struct{} `kafka:"min=v0,max=v0,tag"`
+
+ Mechanism int8 `kafka:"min=v0,max=v0"`
+ Iterations int32 `kafka:"min=v0,max=v0"`
+}
+
+var _ protocol.BrokerMessage = (*Request)(nil)
diff --git a/protocol/describeuserscramcredentials/describeuserscramcredentials_test.go b/protocol/describeuserscramcredentials/describeuserscramcredentials_test.go
new file mode 100644
index 000000000..3f2df6ff2
--- /dev/null
+++ protocol/describeuserscramcredentials/describeuserscramcredentials_test.go
@@ -0,0 +1,41 @@
+package describeuserscramcredentials_test
+
+import (
+ "testing"
+
+ "github.com/segmentio/kafka-go/protocol/describeuserscramcredentials"
+ "github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+ v0 = 0
+)
+
+func TestDescribeUserScramCredentialsRequest(t *testing.T) {
+ prototest.TestRequest(t, v0, &describeuserscramcredentials.Request{
+ Users: []describeuserscramcredentials.RequestUser{
+ {
+ Name: "foo-1",
+ },
+ },
+ })
+}
+
+func TestDescribeUserScramCredentialsResponse(t *testing.T) {
+ prototest.TestResponse(t, v0, &describeuserscramcredentials.Response{
+ ThrottleTimeMs: 500,
+ Results: []describeuserscramcredentials.ResponseResult{
+ {
+ User: "foo",
+ ErrorCode: 1,
+ ErrorMessage: "foo-error",
+ CredentialInfos: []describeuserscramcredentials.CredentialInfo{
+ {
+ Mechanism: 2,
+ Iterations: 15000,
+ },
+ },
+ },
+ },
+ })
+}
diff --git protocol/encode.go protocol/encode.go
index a483a81ec..bd1633671 100644
--- protocol/encode.go
+++ protocol/encode.go
@@ -6,6 +6,7 @@ import (
"fmt"
"hash/crc32"
"io"
+ "math"
"reflect"
"sync"
"sync/atomic"
@@ -129,6 +130,10 @@ func (e *encoder) encodeInt64(v value) {
e.writeInt64(v.int64())
}
+func (e *encoder) encodeFloat64(v value) {
+ e.writeFloat64(v.float64())
+}
+
func (e *encoder) encodeString(v value) {
e.writeString(v.string())
}
@@ -230,6 +235,11 @@ func (e *encoder) writeInt64(i int64) {
e.Write(e.buffer[:8])
}
+func (e *encoder) writeFloat64(f float64) {
+ writeFloat64(e.buffer[:8], f)
+ e.Write(e.buffer[:8])
+}
+
func (e *encoder) writeString(s string) {
e.writeInt16(int16(len(s)))
e.WriteString(s)
@@ -378,6 +388,8 @@ func encodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag)
return (*encoder).encodeInt32
case reflect.Int64:
return (*encoder).encodeInt64
+ case reflect.Float64:
+ return (*encoder).encodeFloat64
case reflect.String:
return stringEncodeFuncOf(flexible, tag)
case reflect.Struct:
@@ -530,6 +542,10 @@ func writeInt64(b []byte, i int64) {
binary.BigEndian.PutUint64(b, uint64(i))
}
+func writeFloat64(b []byte, f float64) {
+ binary.BigEndian.PutUint64(b, math.Float64bits(f))
+}
+
func Marshal(version int16, value interface{}) ([]byte, error) {
typ := typeOf(value)
cache, _ := marshalers.Load().(map[versionedType]encodeFunc)
diff --git a/protocol/listpartitionreassignments/listpartitionreassignments.go b/protocol/listpartitionreassignments/listpartitionreassignments.go
new file mode 100644
index 000000000..d26a64101
--- /dev/null
+++ protocol/listpartitionreassignments/listpartitionreassignments.go
@@ -0,0 +1,70 @@
+package listpartitionreassignments
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+ protocol.Register(&Request{}, &Response{})
+}
+
+// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_ListPartitionReassignments.
+
+type Request struct {
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v0,max=v0,tag"`
+
+ TimeoutMs int32 `kafka:"min=v0,max=v0"`
+ Topics []RequestTopic `kafka:"min=v0,max=v0,nullable"`
+}
+
+type RequestTopic struct {
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v0,max=v0,tag"`
+
+ Name string `kafka:"min=v0,max=v0"`
+ PartitionIndexes []int32 `kafka:"min=v0,max=v0"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey {
+ return protocol.ListPartitionReassignments
+}
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+ return cluster.Brokers[cluster.Controller], nil
+}
+
+type Response struct {
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v0,max=v0,tag"`
+
+ ThrottleTimeMs int32 `kafka:"min=v0,max=v0"`
+ ErrorCode int16 `kafka:"min=v0,max=v0"`
+ ErrorMessage string `kafka:"min=v0,max=v0,nullable"`
+ Topics []ResponseTopic `kafka:"min=v0,max=v0"`
+}
+
+type ResponseTopic struct {
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v0,max=v0,tag"`
+
+ Name string `kafka:"min=v0,max=v0"`
+ Partitions []ResponsePartition `kafka:"min=v0,max=v0"`
+}
+
+type ResponsePartition struct {
+ // We need at least one tagged field to indicate that this is a "flexible" message
+ // type.
+ _ struct{} `kafka:"min=v0,max=v0,tag"`
+
+ PartitionIndex int32 `kafka:"min=v0,max=v0"`
+ Replicas []int32 `kafka:"min=v0,max=v0"`
+ AddingReplicas []int32 `kafka:"min=v0,max=v0"`
+ RemovingReplicas []int32 `kafka:"min=v0,max=v0"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey {
+ return protocol.ListPartitionReassignments
+}
diff --git a/protocol/listpartitionreassignments/listpartitionreassignments_test.go b/protocol/listpartitionreassignments/listpartitionreassignments_test.go
new file mode 100644
index 000000000..e32869faa
--- /dev/null
+++ protocol/listpartitionreassignments/listpartitionreassignments_test.go
@@ -0,0 +1,41 @@
+package listpartitionreassignments_test
+
+import (
+ "testing"
+
+ "github.com/segmentio/kafka-go/protocol/listpartitionreassignments"
+ "github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+ v0 = 0
+)
+
+func TestListPartitionReassignmentsRequest(t *testing.T) {
+ prototest.TestRequest(t, v0, &listpartitionreassignments.Request{
+ Topics: []listpartitionreassignments.RequestTopic{
+ {
+ Name: "topic-1",
+ PartitionIndexes: []int32{1, 2, 3},
+ },
+ },
+ })
+}
+
+func TestListPartitionReassignmentsResponse(t *testing.T) {
+ prototest.TestResponse(t, v0, &listpartitionreassignments.Response{
+ Topics: []listpartitionreassignments.ResponseTopic{
+ {
+ Name: "topic-1",
+ Partitions: []listpartitionreassignments.ResponsePartition{
+ {
+ PartitionIndex: 1,
+ Replicas: []int32{1, 2, 3},
+ AddingReplicas: []int32{4, 5, 6},
+ RemovingReplicas: []int32{7, 8, 9},
+ },
+ },
+ },
+ },
+ })
+}
diff --git a/protocol/offsetdelete/offsetdelete.go b/protocol/offsetdelete/offsetdelete.go
new file mode 100644
index 000000000..bda619f3c
--- /dev/null
+++ protocol/offsetdelete/offsetdelete.go
@@ -0,0 +1,47 @@
+package offsetdelete
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+ protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+ GroupID string `kafka:"min=v0,max=v0"`
+ Topics []RequestTopic `kafka:"min=v0,max=v0"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.OffsetDelete }
+
+func (r *Request) Group() string { return r.GroupID }
+
+type RequestTopic struct {
+ Name string `kafka:"min=v0,max=v0"`
+ Partitions []RequestPartition `kafka:"min=v0,max=v0"`
+}
+
+type RequestPartition struct {
+ PartitionIndex int32 `kafka:"min=v0,max=v0"`
+}
+
+var (
+ _ protocol.GroupMessage = (*Request)(nil)
+)
+
+type Response struct {
+ ErrorCode int16 `kafka:"min=v0,max=v0"`
+ ThrottleTimeMs int32 `kafka:"min=v0,max=v0"`
+ Topics []ResponseTopic `kafka:"min=v0,max=v0"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.OffsetDelete }
+
+type ResponseTopic struct {
+ Name string `kafka:"min=v0,max=v0"`
+ Partitions []ResponsePartition `kafka:"min=v0,max=v0"`
+}
+
+type ResponsePartition struct {
+ PartitionIndex int32 `kafka:"min=v0,max=v0"`
+ ErrorCode int16 `kafka:"min=v0,max=v0"`
+}
diff --git a/protocol/offsetdelete/offsetdelete_test.go b/protocol/offsetdelete/offsetdelete_test.go
new file mode 100644
index 000000000..ed8d7ea94
--- /dev/null
+++ protocol/offsetdelete/offsetdelete_test.go
@@ -0,0 +1,52 @@
+package offsetdelete_test
+
+import (
+ "testing"
+
+ "github.com/segmentio/kafka-go/protocol/offsetdelete"
+ "github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+func TestOffsetDeleteRequest(t *testing.T) {
+ for _, version := range []int16{0} {
+ prototest.TestRequest(t, version, &offsetdelete.Request{
+ GroupID: "group-0",
+ Topics: []offsetdelete.RequestTopic{
+ {
+ Name: "topic-0",
+ Partitions: []offsetdelete.RequestPartition{
+ {
+ PartitionIndex: 0,
+ },
+ {
+ PartitionIndex: 1,
+ },
+ },
+ },
+ },
+ })
+ }
+}
+
+func TestOffsetDeleteResponse(t *testing.T) {
+ for _, version := range []int16{0} {
+ prototest.TestResponse(t, version, &offsetdelete.Response{
+ ErrorCode: 0,
+ Topics: []offsetdelete.ResponseTopic{
+ {
+ Name: "topic-0",
+ Partitions: []offsetdelete.ResponsePartition{
+ {
+ PartitionIndex: 0,
+ ErrorCode: 1,
+ },
+ {
+ PartitionIndex: 1,
+ ErrorCode: 1,
+ },
+ },
+ },
+ },
+ })
+ }
+}
diff --git protocol/offsetfetch/offsetfetch.go protocol/offsetfetch/offsetfetch.go
index 011003340..8f1096f5d 100644
--- protocol/offsetfetch/offsetfetch.go
+++ protocol/offsetfetch/offsetfetch.go
@@ -8,7 +8,7 @@ func init() {
type Request struct {
GroupID string `kafka:"min=v0,max=v5"`
- Topics []RequestTopic `kafka:"min=v0,max=v5"`
+ Topics []RequestTopic `kafka:"min=v0,max=v5,nullable"`
}
func (r *Request) ApiKey() protocol.ApiKey { return protocol.OffsetFetch }
diff --git protocol/protocol.go protocol/protocol.go
index f5f536148..3d0a7b8dd 100644
--- protocol/protocol.go
+++ protocol/protocol.go
@@ -53,111 +53,115 @@ func (k ApiKey) apiType() apiType {
}
const (
- Produce ApiKey = 0
- Fetch ApiKey = 1
- ListOffsets ApiKey = 2
- Metadata ApiKey = 3
- LeaderAndIsr ApiKey = 4
- StopReplica ApiKey = 5
- UpdateMetadata ApiKey = 6
- ControlledShutdown ApiKey = 7
- OffsetCommit ApiKey = 8
- OffsetFetch ApiKey = 9
- FindCoordinator ApiKey = 10
- JoinGroup ApiKey = 11
- Heartbeat ApiKey = 12
- LeaveGroup ApiKey = 13
- SyncGroup ApiKey = 14
- DescribeGroups ApiKey = 15
- ListGroups ApiKey = 16
- SaslHandshake ApiKey = 17
- ApiVersions ApiKey = 18
- CreateTopics ApiKey = 19
- DeleteTopics ApiKey = 20
- DeleteRecords ApiKey = 21
- InitProducerId ApiKey = 22
- OffsetForLeaderEpoch ApiKey = 23
- AddPartitionsToTxn ApiKey = 24
- AddOffsetsToTxn ApiKey = 25
- EndTxn ApiKey = 26
- WriteTxnMarkers ApiKey = 27
- TxnOffsetCommit ApiKey = 28
- DescribeAcls ApiKey = 29
- CreateAcls ApiKey = 30
- DeleteAcls ApiKey = 31
- DescribeConfigs ApiKey = 32
- AlterConfigs ApiKey = 33
- AlterReplicaLogDirs ApiKey = 34
- DescribeLogDirs ApiKey = 35
- SaslAuthenticate ApiKey = 36
- CreatePartitions ApiKey = 37
- CreateDelegationToken ApiKey = 38
- RenewDelegationToken ApiKey = 39
- ExpireDelegationToken ApiKey = 40
- DescribeDelegationToken ApiKey = 41
- DeleteGroups ApiKey = 42
- ElectLeaders ApiKey = 43
- IncrementalAlterConfigs ApiKey = 44
- AlterPartitionReassignments ApiKey = 45
- ListPartitionReassignments ApiKey = 46
- OffsetDelete ApiKey = 47
- DescribeClientQuotas ApiKey = 48
- AlterClientQuotas ApiKey = 49
-
- numApis = 50
+ Produce ApiKey = 0
+ Fetch ApiKey = 1
+ ListOffsets ApiKey = 2
+ Metadata ApiKey = 3
+ LeaderAndIsr ApiKey = 4
+ StopReplica ApiKey = 5
+ UpdateMetadata ApiKey = 6
+ ControlledShutdown ApiKey = 7
+ OffsetCommit ApiKey = 8
+ OffsetFetch ApiKey = 9
+ FindCoordinator ApiKey = 10
+ JoinGroup ApiKey = 11
+ Heartbeat ApiKey = 12
+ LeaveGroup ApiKey = 13
+ SyncGroup ApiKey = 14
+ DescribeGroups ApiKey = 15
+ ListGroups ApiKey = 16
+ SaslHandshake ApiKey = 17
+ ApiVersions ApiKey = 18
+ CreateTopics ApiKey = 19
+ DeleteTopics ApiKey = 20
+ DeleteRecords ApiKey = 21
+ InitProducerId ApiKey = 22
+ OffsetForLeaderEpoch ApiKey = 23
+ AddPartitionsToTxn ApiKey = 24
+ AddOffsetsToTxn ApiKey = 25
+ EndTxn ApiKey = 26
+ WriteTxnMarkers ApiKey = 27
+ TxnOffsetCommit ApiKey = 28
+ DescribeAcls ApiKey = 29
+ CreateAcls ApiKey = 30
+ DeleteAcls ApiKey = 31
+ DescribeConfigs ApiKey = 32
+ AlterConfigs ApiKey = 33
+ AlterReplicaLogDirs ApiKey = 34
+ DescribeLogDirs ApiKey = 35
+ SaslAuthenticate ApiKey = 36
+ CreatePartitions ApiKey = 37
+ CreateDelegationToken ApiKey = 38
+ RenewDelegationToken ApiKey = 39
+ ExpireDelegationToken ApiKey = 40
+ DescribeDelegationToken ApiKey = 41
+ DeleteGroups ApiKey = 42
+ ElectLeaders ApiKey = 43
+ IncrementalAlterConfigs ApiKey = 44
+ AlterPartitionReassignments ApiKey = 45
+ ListPartitionReassignments ApiKey = 46
+ OffsetDelete ApiKey = 47
+ DescribeClientQuotas ApiKey = 48
+ AlterClientQuotas ApiKey = 49
+ DescribeUserScramCredentials ApiKey = 50
+ AlterUserScramCredentials ApiKey = 51
+
+ numApis = 52
)
var apiNames = [numApis]string{
- Produce: "Produce",
- Fetch: "Fetch",
- ListOffsets: "ListOffsets",
- Metadata: "Metadata",
- LeaderAndIsr: "LeaderAndIsr",
- StopReplica: "StopReplica",
- UpdateMetadata: "UpdateMetadata",
- ControlledShutdown: "ControlledShutdown",
- OffsetCommit: "OffsetCommit",
- OffsetFetch: "OffsetFetch",
- FindCoordinator: "FindCoordinator",
- JoinGroup: "JoinGroup",
- Heartbeat: "Heartbeat",
- LeaveGroup: "LeaveGroup",
- SyncGroup: "SyncGroup",
- DescribeGroups: "DescribeGroups",
- ListGroups: "ListGroups",
- SaslHandshake: "SaslHandshake",
- ApiVersions: "ApiVersions",
- CreateTopics: "CreateTopics",
- DeleteTopics: "DeleteTopics",
- DeleteRecords: "DeleteRecords",
- InitProducerId: "InitProducerId",
- OffsetForLeaderEpoch: "OffsetForLeaderEpoch",
- AddPartitionsToTxn: "AddPartitionsToTxn",
- AddOffsetsToTxn: "AddOffsetsToTxn",
- EndTxn: "EndTxn",
- WriteTxnMarkers: "WriteTxnMarkers",
- TxnOffsetCommit: "TxnOffsetCommit",
- DescribeAcls: "DescribeAcls",
- CreateAcls: "CreateAcls",
- DeleteAcls: "DeleteAcls",
- DescribeConfigs: "DescribeConfigs",
- AlterConfigs: "AlterConfigs",
- AlterReplicaLogDirs: "AlterReplicaLogDirs",
- DescribeLogDirs: "DescribeLogDirs",
- SaslAuthenticate: "SaslAuthenticate",
- CreatePartitions: "CreatePartitions",
- CreateDelegationToken: "CreateDelegationToken",
- RenewDelegationToken: "RenewDelegationToken",
- ExpireDelegationToken: "ExpireDelegationToken",
- DescribeDelegationToken: "DescribeDelegationToken",
- DeleteGroups: "DeleteGroups",
- ElectLeaders: "ElectLeaders",
- IncrementalAlterConfigs: "IncrementalAlterConfigs",
- AlterPartitionReassignments: "AlterPartitionReassignments",
- ListPartitionReassignments: "ListPartitionReassignments",
- OffsetDelete: "OffsetDelete",
- DescribeClientQuotas: "DescribeClientQuotas",
- AlterClientQuotas: "AlterClientQuotas",
+ Produce: "Produce",
+ Fetch: "Fetch",
+ ListOffsets: "ListOffsets",
+ Metadata: "Metadata",
+ LeaderAndIsr: "LeaderAndIsr",
+ StopReplica: "StopReplica",
+ UpdateMetadata: "UpdateMetadata",
+ ControlledShutdown: "ControlledShutdown",
+ OffsetCommit: "OffsetCommit",
+ OffsetFetch: "OffsetFetch",
+ FindCoordinator: "FindCoordinator",
+ JoinGroup: "JoinGroup",
+ Heartbeat: "Heartbeat",
+ LeaveGroup: "LeaveGroup",
+ SyncGroup: "SyncGroup",
+ DescribeGroups: "DescribeGroups",
+ ListGroups: "ListGroups",
+ SaslHandshake: "SaslHandshake",
+ ApiVersions: "ApiVersions",
+ CreateTopics: "CreateTopics",
+ DeleteTopics: "DeleteTopics",
+ DeleteRecords: "DeleteRecords",
+ InitProducerId: "InitProducerId",
+ OffsetForLeaderEpoch: "OffsetForLeaderEpoch",
+ AddPartitionsToTxn: "AddPartitionsToTxn",
+ AddOffsetsToTxn: "AddOffsetsToTxn",
+ EndTxn: "EndTxn",
+ WriteTxnMarkers: "WriteTxnMarkers",
+ TxnOffsetCommit: "TxnOffsetCommit",
+ DescribeAcls: "DescribeAcls",
+ CreateAcls: "CreateAcls",
+ DeleteAcls: "DeleteAcls",
+ DescribeConfigs: "DescribeConfigs",
+ AlterConfigs: "AlterConfigs",
+ AlterReplicaLogDirs: "AlterReplicaLogDirs",
+ DescribeLogDirs: "DescribeLogDirs",
+ SaslAuthenticate: "SaslAuthenticate",
+ CreatePartitions: "CreatePartitions",
+ CreateDelegationToken: "CreateDelegationToken",
+ RenewDelegationToken: "RenewDelegationToken",
+ ExpireDelegationToken: "ExpireDelegationToken",
+ DescribeDelegationToken: "DescribeDelegationToken",
+ DeleteGroups: "DeleteGroups",
+ ElectLeaders: "ElectLeaders",
+ IncrementalAlterConfigs: "IncrementalAlterConfigs",
+ AlterPartitionReassignments: "AlterPartitionReassignments",
+ ListPartitionReassignments: "ListPartitionReassignments",
+ OffsetDelete: "OffsetDelete",
+ DescribeClientQuotas: "DescribeClientQuotas",
+ AlterClientQuotas: "AlterClientQuotas",
+ DescribeUserScramCredentials: "DescribeUserScramCredentials",
+ AlterUserScramCredentials: "AlterUserScramCredentials",
}
type messageType struct {
@@ -209,6 +213,37 @@ func Register(req, res Message) {
}
}
+// OverrideTypeMessage is an interface implemented by messages that want to override the standard
+// request/response types for a given API.
+type OverrideTypeMessage interface {
+ TypeKey() OverrideTypeKey
+}
+
+type OverrideTypeKey int16
+
+const (
+ RawProduceOverride OverrideTypeKey = 0
+)
+
+var overrideApiTypes [numApis]map[OverrideTypeKey]apiType
+
+func RegisterOverride(req, res Message, key OverrideTypeKey) {
+ k1 := req.ApiKey()
+ k2 := res.ApiKey()
+
+ if k1 != k2 {
+ panic(fmt.Sprintf("[%T/%T]: request and response API keys mismatch: %d != %d", req, res, k1, k2))
+ }
+
+ if overrideApiTypes[k1] == nil {
+ overrideApiTypes[k1] = make(map[OverrideTypeKey]apiType)
+ }
+ overrideApiTypes[k1][key] = apiType{
+ requests: typesOf(req),
+ responses: typesOf(res),
+ }
+}
+
func typesOf(v interface{}) []messageType {
return makeTypes(reflect.TypeOf(v).Elem())
}
diff --git protocol/protocol_test.go protocol/protocol_test.go
index b0c5f1ae7..e9487c931 100644
--- protocol/protocol_test.go
+++ protocol/protocol_test.go
@@ -2,6 +2,7 @@ package protocol
import (
"bytes"
+ "math"
"reflect"
"testing"
)
@@ -279,3 +280,63 @@ func TestVarInts(t *testing.T) {
}
}
+
+func TestFloat64(t *testing.T) {
+ type tc struct {
+ input float64
+ expected []byte
+ }
+
+ tcs := []tc{
+ {
+ input: 0.0,
+ expected: []byte{0, 0, 0, 0, 0, 0, 0, 0},
+ },
+ {
+ input: math.MaxFloat64,
+ expected: []byte{127, 239, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ input: -math.MaxFloat64,
+ expected: []byte{255, 239, 255, 255, 255, 255, 255, 255},
+ },
+ {
+ input: math.SmallestNonzeroFloat64,
+ expected: []byte{0, 0, 0, 0, 0, 0, 0, 1},
+ },
+ {
+ input: -math.SmallestNonzeroFloat64,
+ expected: []byte{128, 0, 0, 0, 0, 0, 0, 1},
+ },
+ }
+
+ for _, tc := range tcs {
+ b := &bytes.Buffer{}
+ e := &encoder{writer: b}
+ e.writeFloat64(tc.input)
+ if e.err != nil {
+ t.Errorf(
+ "Unexpected error encoding %f as float64: %+v",
+ tc.input,
+ e.err,
+ )
+ }
+ if !reflect.DeepEqual(b.Bytes(), tc.expected) {
+ t.Error(
+ "Wrong output encoding value", tc.input, "as float64",
+ "expected", tc.expected,
+ "got", b.Bytes(),
+ )
+ }
+
+ d := &decoder{reader: b, remain: len(b.Bytes())}
+ v := d.readFloat64()
+ if v != tc.input {
+ t.Error(
+ "Decoded float64 value does not equal encoded one",
+ "expected", tc.input,
+ "got", v,
+ )
+ }
+ }
+}
diff --git protocol/prototest/prototest.go protocol/prototest/prototest.go
index 8ba4e5714..56042426f 100644
--- protocol/prototest/prototest.go
+++ protocol/prototest/prototest.go
@@ -48,6 +48,8 @@ func deepEqualValue(v1, v2 reflect.Value) bool {
return v1.Bool() == v2.Bool()
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v1.Int() == v2.Int()
+ case reflect.Float64:
+ return v1.Float() == v2.Float()
case reflect.String:
return v1.String() == v2.String()
case reflect.Struct:
diff --git protocol/prototest/reflect.go protocol/prototest/reflect.go
index 5c3d0a1d7..a266d688f 100644
--- protocol/prototest/reflect.go
+++ protocol/prototest/reflect.go
@@ -1,6 +1,7 @@
package prototest
import (
+ "bytes"
"errors"
"io"
"reflect"
@@ -49,6 +50,13 @@ func loadValue(v reflect.Value) (reset func()) {
}
resetFunc()
resets = append(resets, resetFunc)
+ case io.Reader:
+ buf, _ := io.ReadAll(x)
+ resetFunc := func() {
+ f.Set(reflect.ValueOf(bytes.NewBuffer(buf)))
+ }
+ resetFunc()
+ resets = append(resets, resetFunc)
}
})
diff --git protocol/prototest/request.go protocol/prototest/request.go
index 15c6e79c8..c0197f25d 100644
--- protocol/prototest/request.go
+++ protocol/prototest/request.go
@@ -46,6 +46,39 @@ func TestRequest(t *testing.T, version int16, msg protocol.Message) {
})
}
+// TestRequestWithOverride validates requests that have an overridden type. For requests with type overrides, we
+// double-serialize the request to ensure the resulting encoding of the overridden and original type are identical.
+func TestRequestWithOverride(t *testing.T, version int16, msg protocol.Message) {
+ reset := load(msg)
+
+ t.Run(fmt.Sprintf("v%d", version), func(t *testing.T) {
+ b1 := &bytes.Buffer{}
+
+ if err := protocol.WriteRequest(b1, version, 1234, "me", msg); err != nil {
+ t.Fatal(err)
+ }
+
+ reset()
+ t.Logf("\n%s\n", hex.Dump(b1.Bytes()))
+
+ _, _, _, req, err := protocol.ReadRequest(b1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ b2 := &bytes.Buffer{}
+ if err := protocol.WriteRequest(b2, version, 1234, "me", req); err != nil {
+ t.Fatal(err)
+ }
+
+ if !deepEqual(b1, b2) {
+ t.Errorf("request message mismatch:")
+ t.Logf("expected: %+v", hex.Dump(b1.Bytes()))
+ t.Logf("found: %+v", hex.Dump(b2.Bytes()))
+ }
+ })
+}
+
func BenchmarkRequest(b *testing.B, version int16, msg protocol.Message) {
reset := load(msg)
diff --git a/protocol/rawproduce/rawproduce.go b/protocol/rawproduce/rawproduce.go
new file mode 100644
index 000000000..bad83138d
--- /dev/null
+++ protocol/rawproduce/rawproduce.go
@@ -0,0 +1,91 @@
+package rawproduce
+
+import (
+ "fmt"
+
+ "github.com/segmentio/kafka-go/protocol"
+ "github.com/segmentio/kafka-go/protocol/produce"
+)
+
+func init() {
+ // Register a type override so that raw produce requests will be encoded with the correct type.
+ req := &Request{}
+ protocol.RegisterOverride(req, &produce.Response{}, req.TypeKey())
+}
+
+type Request struct {
+ TransactionalID string `kafka:"min=v3,max=v8,nullable"`
+ Acks int16 `kafka:"min=v0,max=v8"`
+ Timeout int32 `kafka:"min=v0,max=v8"`
+ Topics []RequestTopic `kafka:"min=v0,max=v8"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.Produce }
+
+func (r *Request) TypeKey() protocol.OverrideTypeKey { return protocol.RawProduceOverride }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+ broker := protocol.Broker{ID: -1}
+
+ for i := range r.Topics {
+ t := &r.Topics[i]
+
+ topic, ok := cluster.Topics[t.Topic]
+ if !ok {
+ return broker, NewError(protocol.NewErrNoTopic(t.Topic))
+ }
+
+ for j := range t.Partitions {
+ p := &t.Partitions[j]
+
+ partition, ok := topic.Partitions[p.Partition]
+ if !ok {
+ return broker, NewError(protocol.NewErrNoPartition(t.Topic, p.Partition))
+ }
+
+ if b, ok := cluster.Brokers[partition.Leader]; !ok {
+ return broker, NewError(protocol.NewErrNoLeader(t.Topic, p.Partition))
+ } else if broker.ID < 0 {
+ broker = b
+ } else if b.ID != broker.ID {
+ return broker, NewError(fmt.Errorf("mismatching leaders (%d!=%d)", b.ID, broker.ID))
+ }
+ }
+ }
+
+ return broker, nil
+}
+
+func (r *Request) HasResponse() bool {
+ return r.Acks != 0
+}
+
+type RequestTopic struct {
+ Topic string `kafka:"min=v0,max=v8"`
+ Partitions []RequestPartition `kafka:"min=v0,max=v8"`
+}
+
+type RequestPartition struct {
+ Partition int32 `kafka:"min=v0,max=v8"`
+ RecordSet protocol.RawRecordSet `kafka:"min=v0,max=v8"`
+}
+
+var (
+ _ protocol.BrokerMessage = (*Request)(nil)
+)
+
+type Error struct {
+ Err error
+}
+
+func NewError(err error) *Error {
+ return &Error{Err: err}
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("fetch request error: %v", e.Err)
+}
+
+func (e *Error) Unwrap() error {
+ return e.Err
+}
diff --git a/protocol/rawproduce/rawproduce_test.go b/protocol/rawproduce/rawproduce_test.go
new file mode 100644
index 000000000..2d987e711
--- /dev/null
+++ protocol/rawproduce/rawproduce_test.go
@@ -0,0 +1,201 @@
+package rawproduce_test
+
+import (
+ "bytes"
+ "testing"
+ "time"
+
+ "github.com/segmentio/kafka-go/protocol"
+ "github.com/segmentio/kafka-go/protocol/prototest"
+ "github.com/segmentio/kafka-go/protocol/rawproduce"
+)
+
+const (
+ v0 = 0
+ v3 = 3
+ v5 = 5
+)
+
+func TestRawProduceRequest(t *testing.T) {
+ t0 := time.Now().Truncate(time.Millisecond)
+ t1 := t0.Add(1 * time.Millisecond)
+ t2 := t0.Add(2 * time.Millisecond)
+
+ prototest.TestRequestWithOverride(t, v0, &rawproduce.Request{
+ Acks: 1,
+ Timeout: 500,
+ Topics: []rawproduce.RequestTopic{
+ {
+ Topic: "topic-1",
+ Partitions: []rawproduce.RequestPartition{
+ {
+ Partition: 0,
+ RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+ protocol.Record{Offset: 0, Time: t0, Key: nil, Value: nil},
+ ), 1, 0),
+ },
+ {
+ Partition: 1,
+ RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+ protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0")},
+ protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+ protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+ ), 1, 0),
+ },
+ },
+ },
+
+ {
+ Topic: "topic-2",
+ Partitions: []rawproduce.RequestPartition{
+ {
+ Partition: 0,
+ RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+ protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0")},
+ protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+ protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+ ), 1, protocol.Gzip),
+ },
+ },
+ },
+ },
+ })
+
+ prototest.TestRequestWithOverride(t, v3, &rawproduce.Request{
+ TransactionalID: "1234",
+ Acks: 1,
+ Timeout: 500,
+ Topics: []rawproduce.RequestTopic{
+ {
+ Topic: "topic-1",
+ Partitions: []rawproduce.RequestPartition{
+ {
+ Partition: 0,
+ RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+ protocol.Record{Offset: 0, Time: t0, Key: nil, Value: nil},
+ ), 1, 0),
+ },
+ {
+ Partition: 1,
+ RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+ protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0")},
+ protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+ protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+ ), 1, 0),
+ },
+ },
+ },
+ },
+ })
+
+ headers := []protocol.Header{
+ {Key: "key-1", Value: []byte("value-1")},
+ {Key: "key-2", Value: []byte("value-2")},
+ {Key: "key-3", Value: []byte("value-3")},
+ }
+
+ prototest.TestRequestWithOverride(t, v5, &rawproduce.Request{
+ TransactionalID: "1234",
+ Acks: 1,
+ Timeout: 500,
+ Topics: []rawproduce.RequestTopic{
+ {
+ Topic: "topic-1",
+ Partitions: []rawproduce.RequestPartition{
+ {
+ Partition: 1,
+ RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+ protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0"), Headers: headers},
+ protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+ protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+ ), 2, 0),
+ },
+ },
+ },
+
+ {
+ Topic: "topic-2",
+ Partitions: []rawproduce.RequestPartition{
+ {
+ Partition: 1,
+ RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+ protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0"), Headers: headers},
+ protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+ protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+ ), 2, protocol.Snappy),
+ },
+ },
+ },
+ },
+ })
+}
+
+func NewRawRecordSet(reader protocol.RecordReader, version int8, attr protocol.Attributes) protocol.RawRecordSet {
+ rs := protocol.RecordSet{Version: version, Attributes: attr, Records: reader}
+ buf := &bytes.Buffer{}
+ rs.WriteTo(buf)
+
+ return protocol.RawRecordSet{
+ Reader: buf,
+ }
+}
+
+func BenchmarkProduceRequest(b *testing.B) {
+ t0 := time.Now().Truncate(time.Millisecond)
+ t1 := t0.Add(1 * time.Millisecond)
+ t2 := t0.Add(2 * time.Millisecond)
+
+ prototest.BenchmarkRequest(b, v3, &rawproduce.Request{
+ TransactionalID: "1234",
+ Acks: 1,
+ Timeout: 500,
+ Topics: []rawproduce.RequestTopic{
+ {
+ Topic: "topic-1",
+ Partitions: []rawproduce.RequestPartition{
+ {
+ Partition: 0,
+ RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+ protocol.Record{Offset: 0, Time: t0, Key: nil, Value: nil},
+ ), 1, 0),
+ },
+ {
+ Partition: 1,
+ RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+ protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0")},
+ protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+ protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+ ), 1, 0),
+ },
+ },
+ },
+ },
+ })
+
+ headers := []protocol.Header{
+ {Key: "key-1", Value: []byte("value-1")},
+ {Key: "key-2", Value: []byte("value-2")},
+ {Key: "key-3", Value: []byte("value-3")},
+ }
+
+ prototest.BenchmarkRequest(b, v5, &rawproduce.Request{
+ TransactionalID: "1234",
+ Acks: 1,
+ Timeout: 500,
+ Topics: []rawproduce.RequestTopic{
+ {
+ Topic: "topic-1",
+ Partitions: []rawproduce.RequestPartition{
+ {
+ Partition: 1,
+ RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+ protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0"), Headers: headers},
+ protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+ protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+ ), 2, 0),
+ },
+ },
+ },
+ },
+ })
+}
diff --git protocol/record.go protocol/record.go
index 84594868b..e11af4dcc 100644
--- protocol/record.go
+++ protocol/record.go
@@ -292,6 +292,46 @@ func (rs *RecordSet) WriteTo(w io.Writer) (int64, error) {
return n, nil
}
+// RawRecordSet represents a record set for a RawProduce request. The record set is
+// represented as a raw sequence of pre-encoded record set bytes.
+type RawRecordSet struct {
+ // Reader exposes the raw sequence of record set bytes.
+ Reader io.Reader
+}
+
+// ReadFrom reads the representation of a record set from r into rrs. It re-uses the
+// existing RecordSet.ReadFrom implementation to first read/decode data into a RecordSet,
+// then writes/encodes the RecordSet to a buffer referenced by the RawRecordSet.
+//
+// Note: re-using the RecordSet.ReadFrom implementation makes this suboptimal from a
+// performance standpoint as it require an extra copy of the record bytes. Holding off
+// on optimizing, as this code path is only invoked in tests.
+func (rrs *RawRecordSet) ReadFrom(r io.Reader) (int64, error) {
+ rs := &RecordSet{}
+ n, err := rs.ReadFrom(r)
+ if err != nil {
+ return 0, err
+ }
+
+ buf := &bytes.Buffer{}
+ rs.WriteTo(buf)
+ *rrs = RawRecordSet{
+ Reader: buf,
+ }
+
+ return n, nil
+}
+
+// WriteTo writes the RawRecordSet to an io.Writer. Since this is a raw record set representation, all that is
+// done here is copying bytes from the underlying reader to the specified writer.
+func (rrs *RawRecordSet) WriteTo(w io.Writer) (int64, error) {
+ if rrs.Reader == nil {
+ return 0, ErrNoRecord
+ }
+
+ return io.Copy(w, rrs.Reader)
+}
+
func makeTime(t int64) time.Time {
return time.Unix(t/1000, (t%1000)*int64(time.Millisecond))
}
diff --git protocol/reflect.go protocol/reflect.go
index 910fd6219..4d664b26b 100644
--- protocol/reflect.go
+++ protocol/reflect.go
@@ -45,6 +45,8 @@ func (v value) int32() int32 { return int32(v.int64()) }
func (v value) int64() int64 { return v.val.Int() }
+func (v value) float64() float64 { return v.val.Float() }
+
func (v value) string() string { return v.val.String() }
func (v value) bytes() []byte { return v.val.Bytes() }
@@ -63,6 +65,8 @@ func (v value) setInt32(i int32) { v.setInt64(int64(i)) }
func (v value) setInt64(i int64) { v.val.SetInt(i) }
+func (v value) setFloat64(f float64) { v.val.SetFloat(f) }
+
func (v value) setString(s string) { v.val.SetString(s) }
func (v value) setBytes(b []byte) { v.val.SetBytes(b) }
diff --git protocol/reflect_unsafe.go protocol/reflect_unsafe.go
index 0e8397242..9eca5060f 100644
--- protocol/reflect_unsafe.go
+++ protocol/reflect_unsafe.go
@@ -63,6 +63,8 @@ func (v value) int32() int32 { return *(*int32)(v.ptr) }
func (v value) int64() int64 { return *(*int64)(v.ptr) }
+func (v value) float64() float64 { return *(*float64)(v.ptr) }
+
func (v value) string() string { return *(*string)(v.ptr) }
func (v value) bytes() []byte { return *(*[]byte)(v.ptr) }
@@ -92,6 +94,8 @@ func (v value) setInt32(i int32) { *(*int32)(v.ptr) = i }
func (v value) setInt64(i int64) { *(*int64)(v.ptr) = i }
+func (v value) setFloat64(f float64) { *(*float64)(v.ptr) = f }
+
func (v value) setString(s string) { *(*string)(v.ptr) = s }
func (v value) setBytes(b []byte) { *(*[]byte)(v.ptr) = b }
diff --git protocol/request.go protocol/request.go
index 8b99e0537..135b938bb 100644
--- protocol/request.go
+++ protocol/request.go
@@ -81,6 +81,12 @@ func WriteRequest(w io.Writer, apiVersion int16, correlationID int32, clientID s
return fmt.Errorf("unsupported api: %s", apiNames[apiKey])
}
+ if typedMessage, ok := msg.(OverrideTypeMessage); ok {
+ typeKey := typedMessage.TypeKey()
+ overrideType := overrideApiTypes[apiKey][typeKey]
+ t = &overrideType
+ }
+
minVersion := t.minVersion()
maxVersion := t.maxVersion()
diff --git protocol/response.go protocol/response.go
index 619480313..a43bd0237 100644
--- protocol/response.go
+++ protocol/response.go
@@ -95,6 +95,12 @@ func WriteResponse(w io.Writer, apiVersion int16, correlationID int32, msg Messa
return fmt.Errorf("unsupported api: %s", apiNames[apiKey])
}
+ if typedMessage, ok := msg.(OverrideTypeMessage); ok {
+ typeKey := typedMessage.TypeKey()
+ overrideType := overrideApiTypes[apiKey][typeKey]
+ t = &overrideType
+ }
+
minVersion := t.minVersion()
maxVersion := t.maxVersion()
diff --git protocol_test.go protocol_test.go
index d1f0540fe..7a295949a 100644
--- protocol_test.go
+++ protocol_test.go
@@ -76,6 +76,30 @@ func TestProtocol(t *testing.T) {
},
},
+ topicMetadataRequestV6{
+ Topics: []string{"A", "B", "C"},
+ AllowAutoTopicCreation: true,
+ },
+
+ metadataResponseV6{
+ Brokers: []brokerMetadataV1{
+ {NodeID: 1, Host: "localhost", Port: 9001},
+ {NodeID: 2, Host: "localhost", Port: 9002, Rack: "rack2"},
+ },
+ ClusterId: "cluster",
+ ControllerID: 2,
+ Topics: []topicMetadataV6{
+ {TopicErrorCode: 0, Internal: true, Partitions: []partitionMetadataV6{{
+ PartitionErrorCode: 0,
+ PartitionID: 1,
+ Leader: 2,
+ Replicas: []int32{1},
+ Isr: []int32{1},
+ OfflineReplicas: []int32{1},
+ }}},
+ },
+ },
+
listOffsetRequestV1{
ReplicaID: 1,
Topics: []listOffsetRequestTopicV1{
diff --git a/rawproduce.go b/rawproduce.go
new file mode 100644
index 000000000..5928cb2f8
--- /dev/null
+++ rawproduce.go
@@ -0,0 +1,103 @@
+package kafka
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+
+ "github.com/segmentio/kafka-go/protocol"
+ produceAPI "github.com/segmentio/kafka-go/protocol/produce"
+ "github.com/segmentio/kafka-go/protocol/rawproduce"
+)
+
+// RawProduceRequest represents a request sent to a kafka broker to produce records
+// to a topic partition. The request contains a pre-encoded/raw record set.
+type RawProduceRequest struct {
+ // Address of the kafka broker to send the request to.
+ Addr net.Addr
+
+ // The topic to produce the records to.
+ Topic string
+
+ // The partition to produce the records to.
+ Partition int
+
+ // The level of required acknowledgements to ask the kafka broker for.
+ RequiredAcks RequiredAcks
+
+ // The message format version used when encoding the records.
+ //
+ // By default, the client automatically determine which version should be
+ // used based on the version of the Produce API supported by the server.
+ MessageVersion int
+
+ // An optional transaction id when producing to the kafka broker is part of
+ // a transaction.
+ TransactionalID string
+
+ // The sequence of records to produce to the topic partition.
+ RawRecords protocol.RawRecordSet
+}
+
+// RawProduce sends a raw produce request to a kafka broker and returns the response.
+//
+// If the request contained no records, an error wrapping protocol.ErrNoRecord
+// is returned.
+//
+// When the request is configured with RequiredAcks=none, both the response and
+// the error will be nil on success.
+func (c *Client) RawProduce(ctx context.Context, req *RawProduceRequest) (*ProduceResponse, error) {
+ m, err := c.roundTrip(ctx, req.Addr, &rawproduce.Request{
+ TransactionalID: req.TransactionalID,
+ Acks: int16(req.RequiredAcks),
+ Timeout: c.timeoutMs(ctx, defaultProduceTimeout),
+ Topics: []rawproduce.RequestTopic{{
+ Topic: req.Topic,
+ Partitions: []rawproduce.RequestPartition{{
+ Partition: int32(req.Partition),
+ RecordSet: req.RawRecords,
+ }},
+ }},
+ })
+
+ switch {
+ case err == nil:
+ case errors.Is(err, protocol.ErrNoRecord):
+ return new(ProduceResponse), nil
+ default:
+ return nil, fmt.Errorf("kafka.(*Client).RawProduce: %w", err)
+ }
+
+ if req.RequiredAcks == RequireNone {
+ return nil, nil
+ }
+
+ res := m.(*produceAPI.Response)
+ if len(res.Topics) == 0 {
+ return nil, fmt.Errorf("kafka.(*Client).RawProduce: %w", protocol.ErrNoTopic)
+ }
+ topic := &res.Topics[0]
+ if len(topic.Partitions) == 0 {
+ return nil, fmt.Errorf("kafka.(*Client).RawProduce: %w", protocol.ErrNoPartition)
+ }
+ partition := &topic.Partitions[0]
+
+ ret := &ProduceResponse{
+ Throttle: makeDuration(res.ThrottleTimeMs),
+ Error: makeError(partition.ErrorCode, partition.ErrorMessage),
+ BaseOffset: partition.BaseOffset,
+ LogAppendTime: makeTime(partition.LogAppendTime),
+ LogStartOffset: partition.LogStartOffset,
+ }
+
+ if len(partition.RecordErrors) != 0 {
+ ret.RecordErrors = make(map[int]error, len(partition.RecordErrors))
+
+ for _, recErr := range partition.RecordErrors {
+ ret.RecordErrors[int(recErr.BatchIndex)] = errors.New(recErr.BatchIndexErrorMessage)
+ }
+ }
+
+ return ret, nil
+}
diff --git a/rawproduce_test.go b/rawproduce_test.go
new file mode 100644
index 000000000..2c7fed782
--- /dev/null
+++ rawproduce_test.go
@@ -0,0 +1,123 @@
+package kafka
+
+import (
+ "bytes"
+ "context"
+ "testing"
+ "time"
+
+ "github.com/segmentio/kafka-go/protocol"
+ ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestClientRawProduce(t *testing.T) {
+ // The RawProduce request records are encoded in the format introduced in Kafka 0.11.0.
+ if !ktesting.KafkaIsAtLeast("0.11.0") {
+ t.Skip("Skipping because the RawProduce request is not supported by Kafka versions below 0.11.0")
+ }
+
+ client, topic, shutdown := newLocalClientAndTopic()
+ defer shutdown()
+
+ now := time.Now()
+
+ res, err := client.RawProduce(context.Background(), &RawProduceRequest{
+ Topic: topic,
+ Partition: 0,
+ RequiredAcks: -1,
+ RawRecords: NewRawRecordSet(NewRecordReader(
+ Record{Time: now, Value: NewBytes([]byte(`hello-1`))},
+ Record{Time: now, Value: NewBytes([]byte(`hello-2`))},
+ Record{Time: now, Value: NewBytes([]byte(`hello-3`))},
+ ), 0),
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if res.Error != nil {
+ t.Error(res.Error)
+ }
+
+ for index, err := range res.RecordErrors {
+ t.Errorf("record at index %d produced an error: %v", index, err)
+ }
+}
+
+func TestClientRawProduceCompressed(t *testing.T) {
+ // The RawProduce request records are encoded in the format introduced in Kafka 0.11.0.
+ if !ktesting.KafkaIsAtLeast("0.11.0") {
+ t.Skip("Skipping because the RawProduce request is not supported by Kafka versions below 0.11.0")
+ }
+
+ client, topic, shutdown := newLocalClientAndTopic()
+ defer shutdown()
+
+ now := time.Now()
+
+ res, err := client.RawProduce(context.Background(), &RawProduceRequest{
+ Topic: topic,
+ Partition: 0,
+ RequiredAcks: -1,
+ RawRecords: NewRawRecordSet(NewRecordReader(
+ Record{Time: now, Value: NewBytes([]byte(`hello-1`))},
+ Record{Time: now, Value: NewBytes([]byte(`hello-2`))},
+ Record{Time: now, Value: NewBytes([]byte(`hello-3`))},
+ ), protocol.Gzip),
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if res.Error != nil {
+ t.Error(res.Error)
+ }
+
+ for index, err := range res.RecordErrors {
+ t.Errorf("record at index %d produced an error: %v", index, err)
+ }
+}
+
+func TestClientRawProduceNilRecords(t *testing.T) {
+ client, topic, shutdown := newLocalClientAndTopic()
+ defer shutdown()
+
+ _, err := client.RawProduce(context.Background(), &RawProduceRequest{
+ Topic: topic,
+ Partition: 0,
+ RequiredAcks: -1,
+ RawRecords: protocol.RawR,ecordSet{Reader: nil},
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestClientRawProduceEmptyRecords(t *testing.T) {
+ client, topic, shutdown := newLocalClientAndTopic()
+ defer shutdown()
+
+ _, err := client.Produce(context.Background(), &ProduceRequest{
+ Topic: topic,
+ Partition: 0,
+ RequiredAcks: -1,
+ Records: NewRecordReader(),
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func NewRawRecordSet(reader protocol.RecordReader, attr protocol.Attributes) protocol.RawRecordSet {
+ rs := protocol.RecordSet{Version: 2, Attributes: attr, Records: reader}
+ buf := &bytes.Buffer{}
+ rs.WriteTo(buf)
+
+ return protocol.RawRecordSet{
+ Reader: buf,
+ }
+}
diff --git reader.go reader.go
index facaf7090..cfc7cb8f5 100644
--- reader.go
+++ reader.go
@@ -19,7 +19,7 @@ const (
)
const (
- // defaultCommitRetries holds the number commit attempts to make
+ // defaultCommitRetries holds the number of commit attempts to make
// before giving up.
defaultCommitRetries = 3
)
@@ -238,7 +238,7 @@ func (r *Reader) commitLoopInterval(ctx context.Context, gen *Generation) {
commit := func() {
if err := r.commitOffsetsWithRetry(gen, offsets, defaultCommitRetries); err != nil {
- r.withErrorLogger(func(l Logger) { l.Printf(err.Error()) })
+ r.withErrorLogger(func(l Logger) { l.Printf("%v", err) })
} else {
offsets.reset()
}
@@ -277,7 +277,7 @@ func (r *Reader) commitLoop(ctx context.Context, gen *Generation) {
l.Printf("stopped commit for group %s\n", r.config.GroupID)
})
- if r.config.CommitInterval == 0 {
+ if r.useSyncCommits() {
r.commitLoopImmediate(ctx, gen)
} else {
r.commitLoopInterval(ctx, gen)
@@ -311,7 +311,7 @@ func (r *Reader) run(cg *ConsumerGroup) {
}
r.stats.errors.observe(1)
r.withErrorLogger(func(l Logger) {
- l.Printf(err.Error())
+ l.Printf("%v", err)
})
// Continue with next attempt...
}
@@ -398,6 +398,11 @@ type ReaderConfig struct {
// Default: 10s
MaxWait time.Duration
+ // ReadBatchTimeout amount of time to wait to fetch message from kafka messages batch.
+ //
+ // Default: 10s
+ ReadBatchTimeout time.Duration
+
// ReadLagInterval sets the frequency at which the reader lag is updated.
// Setting this field to a negative value disables lag reporting.
ReadLagInterval time.Duration
@@ -505,7 +510,7 @@ type ReaderConfig struct {
// non-transactional and committed records are visible.
IsolationLevel IsolationLevel
- // Limit of how many attempts will be made before delivering the error.
+ // Limit of how many attempts to connect will be made before returning the error.
//
// The default is to try 3 times.
MaxAttempts int
@@ -649,6 +654,10 @@ func NewReader(config ReaderConfig) *Reader {
config.MaxWait = 10 * time.Second
}
+ if config.ReadBatchTimeout == 0 {
+ config.ReadBatchTimeout = 10 * time.Second
+ }
+
if config.ReadLagInterval == 0 {
config.ReadLagInterval = 1 * time.Minute
}
@@ -776,17 +785,17 @@ func (r *Reader) Close() error {
// offset when called. Note that this could result in an offset being committed
// before the message is fully processed.
//
-// If more fine grained control of when offsets are committed is required, it
+// If more fine-grained control of when offsets are committed is required, it
// is recommended to use FetchMessage with CommitMessages instead.
func (r *Reader) ReadMessage(ctx context.Context) (Message, error) {
m, err := r.FetchMessage(ctx)
if err != nil {
- return Message{}, err
+ return Message{}, fmt.Errorf("fetching message: %w", err)
}
if r.useConsumerGroup() {
if err := r.CommitMessages(ctx, m); err != nil {
- return Message{}, err
+ return Message{}, fmt.Errorf("committing message: %w", err)
}
}
@@ -1052,12 +1061,16 @@ func (r *Reader) SetOffsetAt(ctx context.Context, t time.Time) error {
}
r.mutex.Unlock()
+ if len(r.config.Brokers) < 1 {
+ return errors.New("no brokers in config")
+ }
+ var conn *Conn
+ var err error
for _, broker := range r.config.Brokers {
- conn, err := r.config.Dialer.DialLeader(ctx, "tcp", broker, r.config.Topic, r.config.Partition)
+ conn, err = r.config.Dialer.DialLeader(ctx, "tcp", broker, r.config.Topic, r.config.Partition)
if err != nil {
continue
}
-
deadline, _ := ctx.Deadline()
conn.SetDeadline(deadline)
offset, err := conn.ReadOffset(t)
@@ -1068,7 +1081,7 @@ func (r *Reader) SetOffsetAt(ctx context.Context, t time.Time) error {
return r.SetOffset(offset)
}
- return fmt.Errorf("error setting offset for timestamp %+v", t)
+ return fmt.Errorf("error dialing all brokers, one of the errors: %w", err)
}
// Stats returns a snapshot of the reader stats since the last time the method
@@ -1181,22 +1194,23 @@ func (r *Reader) start(offsetsByPartition map[topicPartition]int64) {
defer join.Done()
(&reader{
- dialer: r.config.Dialer,
- logger: r.config.Logger,
- errorLogger: r.config.ErrorLogger,
- brokers: r.config.Brokers,
- topic: key.topic,
- partition: int(key.partition),
- minBytes: r.config.MinBytes,
- maxBytes: r.config.MaxBytes,
- maxWait: r.config.MaxWait,
- backoffDelayMin: r.config.ReadBackoffMin,
- backoffDelayMax: r.config.ReadBackoffMax,
- version: r.version,
- msgs: r.msgs,
- stats: r.stats,
- isolationLevel: r.config.IsolationLevel,
- maxAttempts: r.config.MaxAttempts,
+ dialer: r.config.Dialer,
+ logger: r.config.Logger,
+ errorLogger: r.config.ErrorLogger,
+ brokers: r.config.Brokers,
+ topic: key.topic,
+ partition: int(key.partition),
+ minBytes: r.config.MinBytes,
+ maxBytes: r.config.MaxBytes,
+ maxWait: r.config.MaxWait,
+ readBatchTimeout: r.config.ReadBatchTimeout,
+ backoffDelayMin: r.config.ReadBackoffMin,
+ backoffDelayMax: r.config.ReadBackoffMax,
+ version: r.version,
+ msgs: r.msgs,
+ stats: r.stats,
+ isolationLevel: r.config.IsolationLevel,
+ maxAttempts: r.config.MaxAttempts,
// backwards-compatibility flags
offsetOutOfRangeError: r.config.OffsetOutOfRangeError,
@@ -1206,25 +1220,26 @@ func (r *Reader) start(offsetsByPartition map[topicPartition]int64) {
}
// A reader reads messages from kafka and produces them on its channels, it's
-// used as an way to asynchronously fetch messages while the main program reads
+// used as a way to asynchronously fetch messages while the main program reads
// them using the high level reader API.
type reader struct {
- dialer *Dialer
- logger Logger
- errorLogger Logger
- brokers []string
- topic string
- partition int
- minBytes int
- maxBytes int
- maxWait time.Duration
- backoffDelayMin time.Duration
- backoffDelayMax time.Duration
- version int64
- msgs chan<- readerMessage
- stats *readerStats
- isolationLevel IsolationLevel
- maxAttempts int
+ dialer *Dialer
+ logger Logger
+ errorLogger Logger
+ brokers []string
+ topic string
+ partition int
+ minBytes int
+ maxBytes int
+ maxWait time.Duration
+ readBatchTimeout time.Duration
+ backoffDelayMin time.Duration
+ backoffDelayMax time.Duration
+ version int64
+ msgs chan<- readerMessage
+ stats *readerStats
+ isolationLevel IsolationLevel
+ maxAttempts int
offsetOutOfRangeError bool
}
@@ -1331,7 +1346,7 @@ func (r *reader) run(ctx context.Context, offset int64) {
case errors.Is(err, UnknownTopicOrPartition):
r.withErrorLogger(func(log Logger) {
- log.Printf("failed to read from current broker for partition %d of %s at offset %d, topic or parition not found on this broker, %v", r.partition, r.topic, toHumanOffset(offset), r.brokers)
+ log.Printf("failed to read from current broker %v for partition %d of %s at offset %d: %v", r.brokers, r.partition, r.topic, toHumanOffset(offset), err)
})
conn.Close()
@@ -1343,7 +1358,7 @@ func (r *reader) run(ctx context.Context, offset int64) {
case errors.Is(err, NotLeaderForPartition):
r.withErrorLogger(func(log Logger) {
- log.Printf("failed to read from current broker for partition %d of %s at offset %d, not the leader", r.partition, r.topic, toHumanOffset(offset))
+ log.Printf("failed to read from current broker for partition %d of %s at offset %d: %v", r.partition, r.topic, toHumanOffset(offset), err)
})
conn.Close()
@@ -1357,7 +1372,7 @@ func (r *reader) run(ctx context.Context, offset int64) {
// Timeout on the kafka side, this can be safely retried.
errcount = 0
r.withLogger(func(log Logger) {
- log.Printf("no messages received from kafka within the allocated time for partition %d of %s at offset %d", r.partition, r.topic, toHumanOffset(offset))
+ log.Printf("no messages received from kafka within the allocated time for partition %d of %s at offset %d: %v", r.partition, r.topic, toHumanOffset(offset), err)
})
r.stats.timeouts.observe(1)
continue
@@ -1492,15 +1507,8 @@ func (r *reader) read(ctx context.Context, offset int64, conn *Conn) (int64, err
var size int64
var bytes int64
- const safetyTimeout = 10 * time.Second
- deadline := time.Now().Add(safetyTimeout)
- conn.SetReadDeadline(deadline)
-
for {
- if now := time.Now(); deadline.Sub(now) < (safetyTimeout / 2) {
- deadline = now.Add(safetyTimeout)
- conn.SetReadDeadline(deadline)
- }
+ conn.SetReadDeadline(time.Now().Add(r.readBatchTimeout))
if msg, err = batch.ReadMessage(); err != nil {
batch.Close()
diff --git reader_test.go reader_test.go
index d73bdfbe3..f413d7429 100644
--- reader_test.go
+++ reader_test.go
@@ -313,7 +313,7 @@ func createTopic(t *testing.T, topic string, partitions int) {
})
if err != nil {
if !errors.Is(err, TopicAlreadyExists) {
- err = fmt.Errorf("creaetTopic, conn.createtTopics: %w", err)
+ err = fmt.Errorf("createTopic, conn.createTopics: %w", err)
t.Error(err)
t.FailNow()
}
diff --git resource.go resource.go
index f5c2e73a5..b9be107c2 100644
--- resource.go
+++ resource.go
@@ -1,5 +1,10 @@
package kafka
+import (
+ "fmt"
+ "strings"
+)
+
// https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
type ResourceType int8
@@ -15,6 +20,50 @@ const (
ResourceTypeDelegationToken ResourceType = 6
)
+func (rt ResourceType) String() string {
+ mapping := map[ResourceType]string{
+ ResourceTypeUnknown: "Unknown",
+ ResourceTypeAny: "Any",
+ ResourceTypeTopic: "Topic",
+ ResourceTypeGroup: "Group",
+ // Note that ResourceTypeBroker and ResourceTypeCluster have the same value.
+ // A map cannot have duplicate values so we just use the same value for both.
+ ResourceTypeCluster: "Cluster",
+ ResourceTypeTransactionalID: "Transactionalid",
+ ResourceTypeDelegationToken: "Delegationtoken",
+ }
+ s, ok := mapping[rt]
+ if !ok {
+ s = mapping[ResourceTypeUnknown]
+ }
+ return s
+}
+
+func (rt ResourceType) MarshalText() ([]byte, error) {
+ return []byte(rt.String()), nil
+}
+
+func (rt *ResourceType) UnmarshalText(text []byte) error {
+ normalized := strings.ToLower(string(text))
+ mapping := map[string]ResourceType{
+ "unknown": ResourceTypeUnknown,
+ "any": ResourceTypeAny,
+ "topic": ResourceTypeTopic,
+ "group": ResourceTypeGroup,
+ "broker": ResourceTypeBroker,
+ "cluster": ResourceTypeCluster,
+ "transactionalid": ResourceTypeTransactionalID,
+ "delegationtoken": ResourceTypeDelegationToken,
+ }
+ parsed, ok := mapping[normalized]
+ if !ok {
+ *rt = ResourceTypeUnknown
+ return fmt.Errorf("cannot parse %s as a ResourceType", normalized)
+ }
+ *rt = parsed
+ return nil
+}
+
// https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java
type PatternType int8
@@ -35,3 +84,40 @@ const (
// that start with 'foo'.
PatternTypePrefixed PatternType = 4
)
+
+func (pt PatternType) String() string {
+ mapping := map[PatternType]string{
+ PatternTypeUnknown: "Unknown",
+ PatternTypeAny: "Any",
+ PatternTypeMatch: "Match",
+ PatternTypeLiteral: "Literal",
+ PatternTypePrefixed: "Prefixed",
+ }
+ s, ok := mapping[pt]
+ if !ok {
+ s = mapping[PatternTypeUnknown]
+ }
+ return s
+}
+
+func (pt PatternType) MarshalText() ([]byte, error) {
+ return []byte(pt.String()), nil
+}
+
+func (pt *PatternType) UnmarshalText(text []byte) error {
+ normalized := strings.ToLower(string(text))
+ mapping := map[string]PatternType{
+ "unknown": PatternTypeUnknown,
+ "any": PatternTypeAny,
+ "match": PatternTypeMatch,
+ "literal": PatternTypeLiteral,
+ "prefixed": PatternTypePrefixed,
+ }
+ parsed, ok := mapping[normalized]
+ if !ok {
+ *pt = PatternTypeUnknown
+ return fmt.Errorf("cannot parse %s as a PatternType", normalized)
+ }
+ *pt = parsed
+ return nil
+}
diff --git a/resource_test.go b/resource_test.go
new file mode 100644
index 000000000..b0175d2a3
--- /dev/null
+++ resource_test.go
@@ -0,0 +1,58 @@
+package kafka
+
+import "testing"
+
+func TestResourceTypeMarshal(t *testing.T) {
+ for i := ResourceTypeUnknown; i <= ResourceTypeDelegationToken; i++ {
+ text, err := i.MarshalText()
+ if err != nil {
+ t.Errorf("couldn't marshal %d to text: %s", i, err)
+ }
+ var got ResourceType
+ err = got.UnmarshalText(text)
+ if err != nil {
+ t.Errorf("couldn't unmarshal %s to ResourceType: %s", text, err)
+ }
+ if got != i {
+ t.Errorf("got %d, want %d", got, i)
+ }
+ }
+}
+
+// Verify that the text version of ResourceTypeBroker is "Cluster".
+// This is added since ResourceTypeBroker and ResourceTypeCluster
+// have the same value.
+func TestResourceTypeBroker(t *testing.T) {
+ text, err := ResourceTypeBroker.MarshalText()
+ if err != nil {
+ t.Errorf("couldn't marshal %d to text: %s", ResourceTypeBroker, err)
+ }
+ if string(text) != "Cluster" {
+ t.Errorf("got %s, want %s", string(text), "Cluster")
+ }
+ var got ResourceType
+ err = got.UnmarshalText(text)
+ if err != nil {
+ t.Errorf("couldn't unmarshal %s to ResourceType: %s", text, err)
+ }
+ if got != ResourceTypeBroker {
+ t.Errorf("got %d, want %d", got, ResourceTypeBroker)
+ }
+}
+
+func TestPatternTypeMarshal(t *testing.T) {
+ for i := PatternTypeUnknown; i <= PatternTypePrefixed; i++ {
+ text, err := i.MarshalText()
+ if err != nil {
+ t.Errorf("couldn't marshal %d to text: %s", i, err)
+ }
+ var got PatternType
+ err = got.UnmarshalText(text)
+ if err != nil {
+ t.Errorf("couldn't unmarshal %s to PatternType: %s", text, err)
+ }
+ if got != i {
+ t.Errorf("got %d, want %d", got, i)
+ }
+ }
+}
diff --git sasl/aws_msk_iam_v2/README.md sasl/aws_msk_iam_v2/README.md
index 2a7af8a37..e3b6c537b 100644
--- sasl/aws_msk_iam_v2/README.md
+++ sasl/aws_msk_iam_v2/README.md
@@ -12,49 +12,4 @@ You can add this module to your dependency by running the command below.
go get github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2
\`\`\`
-You can use the `Mechanism` for SASL authentication, like below.
-
-```go
-package main
-
-import (
- "context"
- "crypto/tls"
- "time"
-
- signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
- awsCfg "github.com/aws/aws-sdk-go-v2/config"
- "github.com/segmentio/kafka-go"
- "github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2"
-)
-
-func main() {
- ctx := context.Background()
-
- // using aws-sdk-go-v2
- // NOTE: address error properly
-
- cfg, _ := awsCfg.LoadDefaultConfig(ctx)
- creds, _ := cfg.Credentials.Retrieve(ctx)
- m := &aws_msk_iam_v2.Mechanism{
- Signer: signer.NewSigner(),
- Credentials: creds,
- Region: "us-east-1",
- SignTime: time.Now(),
- Expiry: time.Minute * 5,
- }
- config := kafka.ReaderConfig{
- Brokers: []string{"https://localhost"},
- GroupID: "some-consumer-group",
- GroupTopics: []string{"some-topic"},
- Dialer: &kafka.Dialer{
- Timeout: 10 * time.Second,
- DualStack: true,
- SASLMechanism: m,
- TLS: &tls.Config{},
- },
- }
-}
-
-
-```
\ No newline at end of file
+Please find the sample code in [example_test.go](./example_test.go), you can use the `Mechanism` for SASL authentication of `Reader` and `Writer`.
diff --git a/sasl/aws_msk_iam_v2/example_test.go b/sasl/aws_msk_iam_v2/example_test.go
new file mode 100644
index 000000000..e7bcfdbd1
--- /dev/null
+++ sasl/aws_msk_iam_v2/example_test.go
@@ -0,0 +1,30 @@
+package aws_msk_iam_v2_test
+
+import (
+ "context"
+ "crypto/tls"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/segmentio/kafka-go"
+ "github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2"
+)
+
+func main() {
+ cfg, err := config.LoadDefaultConfig(context.TODO())
+ if err != nil {
+ panic(err)
+ }
+ mechanism := aws_msk_iam_v2.NewMechanism(cfg)
+ _ = kafka.ReaderConfig{
+ Brokers: []string{"https://localhost"},
+ GroupID: "some-consumer-group",
+ GroupTopics: []string{"some-topic"},
+ Dialer: &kafka.Dialer{
+ Timeout: 10 * time.Second,
+ DualStack: true,
+ SASLMechanism: mechanism,
+ TLS: &tls.Config{},
+ },
+ }
+}
diff --git sasl/aws_msk_iam_v2/go.mod sasl/aws_msk_iam_v2/go.mod
index 69d811251..0b11730f1 100644
--- sasl/aws_msk_iam_v2/go.mod
+++ sasl/aws_msk_iam_v2/go.mod
@@ -3,8 +3,9 @@ module github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2
go 1.15
require (
- github.com/aws/aws-sdk-go-v2 v1.16.7
- github.com/aws/aws-sdk-go-v2/credentials v1.12.9
- github.com/segmentio/kafka-go v0.4.32
- github.com/stretchr/testify v1.7.1
+ github.com/aws/aws-sdk-go-v2 v1.16.12
+ github.com/aws/aws-sdk-go-v2/config v1.17.2
+ github.com/aws/aws-sdk-go-v2/credentials v1.12.15
+ github.com/segmentio/kafka-go v0.4.34
+ github.com/stretchr/testify v1.8.0
)
diff --git sasl/aws_msk_iam_v2/go.sum sasl/aws_msk_iam_v2/go.sum
index ebef8f424..e70fb99b2 100644
--- sasl/aws_msk_iam_v2/go.sum
+++ sasl/aws_msk_iam_v2/go.sum
@@ -1,15 +1,27 @@
-github.com/aws/aws-sdk-go-v2 v1.16.7 h1:zfBwXus3u14OszRxGcqCDS4MfMCv10e8SMJ2r8Xm0Ns=
-github.com/aws/aws-sdk-go-v2 v1.16.7/go.mod h1:6CpKuLXg2w7If3ABZCl/qZ6rEgwtjZTn4eAf4RcEyuw=
-github.com/aws/aws-sdk-go-v2/credentials v1.12.9 h1:DloAJr0/jbvm0iVRFDFh8GlWxrOd9XKyX82U+dfVeZs=
-github.com/aws/aws-sdk-go-v2/credentials v1.12.9/go.mod h1:2Vavxl1qqQXJ8MUcQZTsIEW8cwenFCWYXtLRPba3L/o=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.8/go.mod h1:oL1Q3KuCq1D4NykQnIvtRiBGLUXhcpY5pl6QZB2XEPU=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.14/go.mod h1:kdjrMwHwrC3+FsKhNcCMJ7tUVj/8uSD5CZXeQ4wV6fM=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.8/go.mod h1:ZIV8GYoC6WLBW5KGs+o4rsc65/ozd+eQ0L31XF5VDwk=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.8/go.mod h1:rDVhIMAX9N2r8nWxDUlbubvvaFMnfsm+3jAV7q+rpM4=
-github.com/aws/aws-sdk-go-v2/service/sso v1.11.12/go.mod h1:MO4qguFjs3wPGcCSpQ7kOFTwRvb+eu+fn+1vKleGHUk=
-github.com/aws/aws-sdk-go-v2/service/sts v1.16.9/go.mod h1:O1IvkYxr+39hRf960Us6j0x1P8pDqhTX+oXM5kQNl/Y=
-github.com/aws/smithy-go v1.12.0 h1:gXpeZel/jPoWQ7OEmLIgCUnhkFftqNfwWUwAHSlp1v0=
-github.com/aws/smithy-go v1.12.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/aws/aws-sdk-go-v2 v1.16.12 h1:wbMYa2PlFysFx2GLIQojr6FJV5+OWCM/BwyHXARxETA=
+github.com/aws/aws-sdk-go-v2 v1.16.12/go.mod h1:C+Ym0ag2LIghJbXhfXZ0YEEp49rBWowxKzJLUoob0ts=
+github.com/aws/aws-sdk-go-v2/config v1.17.2 h1:V96WPd2a1H/MXGZjk4zto+KpYnwZI2kdIdy/cI8kYnQ=
+github.com/aws/aws-sdk-go-v2/config v1.17.2/go.mod h1:jumS/AMwul4WaG8vyXsF6kUndG9zndR+yfYBwl4i9ds=
+github.com/aws/aws-sdk-go-v2/credentials v1.12.15 h1:6DONxG9cR3pAuISj1Irh5u2SRqCfIJwyHNyDDes7SZw=
+github.com/aws/aws-sdk-go-v2/credentials v1.12.15/go.mod h1:41zTC6U/78fUD7ZCa5NymTJANDjfqySg5YEAYVFl2Ic=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.13 h1:+uferi8SUDZtMloCDt24Zenyy/i71C/ua5mjUCpbpN0=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.13/go.mod h1:y0eXmsNBFIVjUE8ZBjES8myOHlMsXDz7qGT93+MVdjk=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.19 h1:gC5mudiFrWGhzcdoWj1iCGUfrzCpQG0MQIQf0CXFFQQ=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.19/go.mod h1:llxE6bwUZhuCa,s0K7qGiu5OgMis3N7kdWtFSxoHmJ7E=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.13 h1:qezY57na06d6kSE7uuB0N7XEflu914AXx/hg2L8Ykcw=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.13/go.mod h1:lB12mkZqCSo5PsdBFLNqc2M/OOYgNAy8UtaktyuWvE8=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.20 h1:GvszACAU8GSV3+Tant5GutW6smY8WavrP8ZuRS9Ku4Q=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.20/go.mod h1:bfTcsThj5a9P5pIGRy0QudJ8k4+issxXX+O6Djnd5Cs=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.13 h1:ObfthqDyhe7rMAOa7pqft6974VHIk8BAJB7kYdoIfTA=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.13/go.mod h1:V390DK4MQxLpDdXxFqizyz8KUxuWImkW/xzgXMz0yyk=
+github.com/aws/aws-sdk-go-v2/service/sso v1.11.18 h1:gTn1a/FbcOXK5LQS88dD5k+PKwyjVvhAEEwyN4c6eW8=
+github.com/aws/aws-sdk-go-v2/service/sso v1.11.18/go.mod h1:ytmEi5+qwcSNcV2pVA8PIb1DnKT/0Bu/K4nfJHwoM6c=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.1 h1:p48IfndYbRk3iDsoQAmVXdCKEM5+7Y50JAPikjwk8gI=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.1/go.mod h1:NY+G+8PW0ISyJ7/6t5mgOe6qpJiwZa9Jix05WPscJjg=
+github.com/aws/aws-sdk-go-v2/service/sts v1.16.14 h1:7kxso8VZLQ86Jg27QRBw6fjrQhQ8CMNMZ7SB0w7RQiA=
+github.com/aws/aws-sdk-go-v2/service/sts v1.16.14/go.mod h1:Y+BUV19q3OmQVqNUlbZ40zVi3NM6Biuxwkx/qdSD/CY=
+github.com/aws/smithy-go v1.13.0 h1:YfyEmSJLo7fAv8FbuDK4R8F9aAmi9DZ88Zb/KJJmUl0=
+github.com/aws/smithy-go v1.13.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -17,33 +29,41 @@ github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
-github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw=
-github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/pierrec/lz4/v4 v4.1.14 h1:+fL8AQEZtz/ijeNnpduH0bROTu0O3NZAlPjQxGn8LwE=
-github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/klauspost/compress v1.15.7 h1:7cgTQxJCU/vy+oP/E3B9RGbQTgbiVzIJWIKOLoAsPok=
+github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0=
+github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/segmentio/kafka-go v0.4.32 h1:Ohr+9E+kDv/Ld2UPJN9hnKZRd2qgiqCmI8v2e1qlfLM=
-github.com/segmentio/kafka-go v0.4.32/go.mod h1:JAPPIiY3MQIwVHj64CWOP0LsFFfQ7H0w69kuoxnMIS0=
+github.com/segmentio/kafka-go v0.4.34 h1:Dm6YlLMiVSiwwav20KY0AoY63s661FXevwJ3CVHUERo=
+github.com/segmentio/kafka-go v0.4.34/go.mod h1:GAjxBQJdQMB5zfNA21AhpaqOB2Mu+w3De4ni3Gbm8y0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
-github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
-github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
-github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284 h1:rlLehGeYg6jfoyz/eDqDU1iRXLKfR42nnNh57ytKEWo=
-golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/xdg/scram v1.0.5 h1:TuS0RFmt5Is5qm9Tm2SoD89OPqe4IRiFtyFY4iwWXsw=
+github.com/xdg/scram v1.0.5/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4=
+github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
+golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY=
+golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220706163947-c90051bbdb60 h1:8NSylCMxLW4JvserAndSgFL7aPli6A68yf0bYFTcWCM=
+golang.org/x/net v0.0.0-20220706163947-c90051bbdb60/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20220512140231-539c8e751b99 h1:dbuHpmKjkDzSOMKAWl10QNlgaZUd3V1q99xc81tt2Kc=
-gopkg.in/yaml.v3 v3.0.0-20220512140231-539c8e751b99/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git sasl/aws_msk_iam_v2/msk_iam.go sasl/aws_msk_iam_v2/msk_iam.go
index f6b06398a..7fa49337f 100644
--- sasl/aws_msk_iam_v2/msk_iam.go
+++ sasl/aws_msk_iam_v2/msk_iam.go
@@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"errors"
- "fmt"
"net/http"
"net/url"
"runtime"
@@ -32,15 +31,15 @@ const (
queryExpiryKey = "X-Amz-Expires"
)
-var signUserAgent = fmt.Sprintf("kafka-go/sasl/aws_msk_iam/%s", runtime.Version())
+var signUserAgent = "kafka-go/sasl/aws_msk_iam_v2/" + runtime.Version()
// Mechanism implements sasl.Mechanism for the AWS_MSK_IAM mechanism, based on the official java implementation:
// https://github.com/aws/aws-msk-iam-auth
type Mechanism struct {
// The sigv4.Signer of aws-sdk-go-v2 to use when signing the request. Required.
Signer *signer.Signer
- // The aws.Credentials of aws-sdk-go-v2. Required.
- Credentials aws.Credentials
+ // The aws.Config.Credentials or config.CredentialsProvider of aws-sdk-go-v2. Required.
+ Credentials aws.CredentialsProvider
// The region where the msk cluster is hosted, e.g. "us-east-1". Required.
Region string
// The time the request is planned for. Optional, defaults to time.Now() at time of authentication.
@@ -62,19 +61,20 @@ func (m *Mechanism) Next(ctx context.Context, challenge []byte) (bool, []byte, e
// Start produces the authentication values required for AWS_MSK_IAM. It produces the following json as a byte array,
// making use of the aws-sdk to produce the signed output.
-// {
-// "version" : "2020_10_22",
-// "host" : "<broker host>",
-// "user-agent": "<user agent string from the client>",
-// "action": "kafka-cluster:Connect",
-// "x-amz-algorithm" : "<algorithm>",
-// "x-amz-credential" : "<clientAWSAccessKeyID>/<date in yyyyMMdd format>/<region>/kafka-cluster/aws4_request",
-// "x-amz-date" : "<timestamp in yyyyMMdd'T'HHmmss'Z' format>",
-// "x-amz-security-token" : "<clientAWSSessionToken if any>",
-// "x-amz-signedheaders" : "host",
-// "x-amz-expires" : "<expiration in seconds>",
-// "x-amz-signature" : "<AWS SigV4 signature computed by the client>"
-// }
+//
+// {
+// "version" : "2020_10_22",
+// "host" : "<broker host>",
+// "user-agent": "<user agent string from the client>",
+// "action": "kafka-cluster:Connect",
+// "x-amz-algorithm" : "<algorithm>",
+// "x-amz-credential" : "<clientAWSAccessKeyID>/<date in yyyyMMdd format>/<region>/kafka-cluster/aws4_request",
+// "x-amz-date" : "<timestamp in yyyyMMdd'T'HHmmss'Z' format>",
+// "x-amz-security-token" : "<clientAWSSessionToken if any>",
+// "x-amz-signedheaders" : "host",
+// "x-amz-expires" : "<expiration in seconds>",
+// "x-amz-signature" : "<AWS SigV4 signature computed by the client>"
+// }
func (m *Mechanism) Start(ctx context.Context) (sess sasl.StateMachine, ir []byte, err error) {
signedMap, err := m.preSign(ctx)
if err != nil {
@@ -92,7 +92,12 @@ func (m *Mechanism) preSign(ctx context.Context) (map[string]string, error) {
return nil, err
}
- signedUrl, header, err := m.Signer.PresignHTTP(ctx, m.Credentials, req, signPayload, signService, m.Region, defaultSignTime(m.SignTime))
+ creds, err := m.Credentials.Retrieve(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ signedUrl, header, err := m.Signer.PresignHTTP(ctx, creds, req, signPayload, signService, m.Region, defaultSignTime(m.SignTime))
if err != nil {
return nil, err
}
@@ -164,3 +169,12 @@ func defaultSignTime(v time.Time) time.Time {
}
return v
}
+
+// NewMechanism provides
+func NewMechanism(awsCfg aws.Config) *Mechanism {
+ return &Mechanism{
+ Signer: signer.NewSigner(),
+ Credentials: awsCfg.Credentials,
+ Region: awsCfg.Region,
+ }
+}
diff --git sasl/aws_msk_iam_v2/msk_iam_test.go sasl/aws_msk_iam_v2/msk_iam_test.go
index d5acb6ad1..e0fe014da 100644
--- sasl/aws_msk_iam_v2/msk_iam_test.go
+++ sasl/aws_msk_iam_v2/msk_iam_test.go
@@ -7,11 +7,12 @@ import (
"testing"
"time"
+ "github.com/aws/aws-sdk-go-v2/aws"
"github.com/segmentio/kafka-go/sasl"
"github.com/stretchr/testify/assert"
signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
- credentialsv2 "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/credentials"
)
const (
@@ -23,17 +24,12 @@ const (
var signTime = time.Date(2021, 10, 14, 13, 5, 0, 0, time.UTC)
func TestAwsMskIamMechanism(t *testing.T) {
- creds, err := credentialsv2.NewStaticCredentialsProvider(accessKeyId, secretAccessKey, "").Retrieve(context.Background())
- if err != nil {
- t.Fatal(err)
- }
-
+ creds := credentials.NewStaticCredentialsProvider(accessKeyId, secretAccessKey, "")
ctxWithMetadata := func() context.Context {
return sasl.WithMetadata(context.Background(), &sasl.Metadata{
Host: "localhost",
Port: 9092,
})
-
}
tests := []struct {
@@ -64,7 +60,6 @@ func TestAwsMskIamMechanism(t *testing.T) {
Region: "us-east-1",
SignTime: signTime,
}
-
sess, auth, err := mskMechanism.Start(ctx)
if tt.shouldFail { // if error is expected
if err == nil { // but we don't find one
@@ -154,3 +149,15 @@ func TestDefaultSignTime(t *testing.T) {
})
}
}
+
+func TestNewMechanism(t *testing.T) {
+ region := "us-east-1"
+ creds := credentials.StaticCredentialsProvider{}
+ awsCfg := aws.Config{
+ Region: region,
+ Credentials: creds,
+ }
+ m := NewMechanism(awsCfg)
+ assert.Equal(t, m.Region, region)
+ assert.Equal(t, m.Credentials, creds)
+}
diff --git sasl/scram/scram.go sasl/scram/scram.go
index bc2b28ed2..b29885f32 100644
--- sasl/scram/scram.go
+++ sasl/scram/scram.go
@@ -7,7 +7,7 @@ import (
"hash"
"github.com/segmentio/kafka-go/sasl"
- "github.com/xdg/scram"
+ "github.com/xdg-go/scram"
)
// Algorithm determines the hash function used by SCRAM to protect the user's
diff --git stats.go stats.go
index aec9df5e2..ef1e582cb 100644
--- stats.go
+++ stats.go
@@ -6,19 +6,21 @@ import (
)
// SummaryStats is a data structure that carries a summary of observed values.
-// The average, minimum, and maximum are reported.
type SummaryStats struct {
- Avg int64 `metric:"avg" type:"gauge"`
- Min int64 `metric:"min" type:"gauge"`
- Max int64 `metric:"max" type:"gauge"`
+ Avg int64 `metric:"avg" type:"gauge"`
+ Min int64 `metric:"min" type:"gauge"`
+ Max int64 `metric:"max" type:"gauge"`
+ Count int64 `metric:"count" type:"counter"`
+ Sum int64 `metric:"sum" type:"counter"`
}
-// DurationStats is a data structure that carries a summary of observed duration
-// values. The average, minimum, and maximum are reported.
+// DurationStats is a data structure that carries a summary of observed duration values.
type DurationStats struct {
- Avg time.Duration `metric:"avg" type:"gauge"`
- Min time.Duration `metric:"min" type:"gauge"`
- Max time.Duration `metric:"max" type:"gauge"`
+ Avg time.Duration `metric:"avg" type:"gauge"`
+ Min time.Duration `metric:"min" type:"gauge"`
+ Max time.Duration `metric:"max" type:"gauge"`
+ Count int64 `metric:"count" type:"counter"`
+ Sum time.Duration `metric:"sum" type:"counter"`
}
// counter is an atomic incrementing counter which gets reset on snapshot.
@@ -167,17 +169,21 @@ func (s *summary) snapshot() SummaryStats {
}
return SummaryStats{
- Avg: avg,
- Min: min,
- Max: max,
+ Avg: avg,
+ Min: min,
+ Max: max,
+ Count: count,
+ Sum: sum,
}
}
func (s *summary) snapshotDuration() DurationStats {
summary := s.snapshot()
return DurationStats{
- Avg: time.Duration(summary.Avg),
- Min: time.Duration(summary.Min),
- Max: time.Duration(summary.Max),
+ Avg: time.Duration(summary.Avg),
+ Min: time.Duration(summary.Min),
+ Max: time.Duration(summary.Max),
+ Count: summary.Count,
+ Sum: time.Duration(summary.Sum),
}
}
diff --git transport.go transport.go
index 6ba2d638c..685bdddb1 100644
--- transport.go
+++ transport.go
@@ -60,7 +60,7 @@ type Transport struct {
// Time limit set for establishing connections to the kafka cluster. This
// limit includes all round trips done to establish the connections (TLS
- // hadbhaske, SASL negotiation, etc...).
+ // handshake, SASL negotiation, etc...).
//
// Defaults to 5s.
DialTimeout time.Duration
@@ -81,6 +81,10 @@ type Transport struct {
// Default to 6s.
MetadataTTL time.Duration
+ // Topic names for the metadata cached by this transport. If this field is left blank,
+ // metadata information of all topics in the cluster will be retrieved.
+ MetadataTopics []string
+
// Unique identifier that the transport communicates to the brokers when it
// sends requests.
ClientID string
@@ -150,7 +154,7 @@ func (t *Transport) CloseIdleConnections() {
// package.
//
// The type of the response message will match the type of the request. For
-// exmple, if RoundTrip was called with a *fetch.Request as argument, the value
+// example, if RoundTrip was called with a *fetch.Request as argument, the value
// returned will be of type *fetch.Response. It is safe for the program to do a
// type assertion after checking that no error was returned.
//
@@ -235,14 +239,15 @@ func (t *Transport) grabPool(addr net.Addr) *connPool {
p = &connPool{
refc: 2,
- dial: t.dial(),
- dialTimeout: t.dialTimeout(),
- idleTimeout: t.idleTimeout(),
- metadataTTL: t.metadataTTL(),
- clientID: t.ClientID,
- tls: t.TLS,
- sasl: t.SASL,
- resolver: t.Resolver,
+ dial: t.dial(),
+ dialTimeout: t.dialTimeout(),
+ idleTimeout: t.idleTimeout(),
+ metadataTTL: t.metadataTTL(),
+ metadataTopics: t.MetadataTopics,
+ clientID: t.ClientID,
+ tls: t.TLS,
+ sasl: t.SASL,
+ resolver: t.Resolver,
ready: make(event),
wake: make(chan event),
@@ -276,14 +281,15 @@ type connPool struct {
// Immutable fields of the connection pool. Connections access these field
// on their parent pool in a ready-only fashion, so no synchronization is
// required.
- dial func(context.Context, string, string) (net.Conn, error)
- dialTimeout time.Duration
- idleTimeout time.Duration
- metadataTTL time.Duration
- clientID string
- tls *tls.Config
- sasl sasl.Mechanism
- resolver BrokerResolver
+ dial func(context.Context, string, string) (net.Conn, error)
+ dialTimeout time.Duration
+ idleTimeout time.Duration
+ metadataTTL time.Duration
+ metadataTopics []string
+ clientID string
+ tls *tls.Config
+ sasl sasl.Mechanism
+ resolver BrokerResolver
// Signaling mechanisms to orchestrate communications between the pool and
// the rest of the program.
once sync.Once // ensure that `ready` is triggered only once
@@ -413,14 +419,16 @@ func (p *connPool) roundTrip(ctx context.Context, req Request) (Response, error)
case *meta.Response:
m := req.(*meta.Request)
// If we get here with allow auto topic creation then
- // we didn't have that topic in our cache so we should update
+ // we didn't have that topic in our cache, so we should update
// the cache.
if m.AllowAutoTopicCreation {
topicsToRefresh := make([]string, 0, len(resp.Topics))
for _, topic := range resp.Topics {
- // fixes issue 806: don't refresh topics that failed to create,
- // it may means kafka doesn't enable auto topic creation.
- // This causes the library to hang indefinitely, same as createtopics process.
+ // Don't refresh topics that failed to create, since that may
+ // mean that enable automatic topic creation is not enabled.
+ // That causes the library to hang indefinitely, same as
+ // don't refresh topics that failed to create,
+ // createtopics process. Fixes issue 806.
if topic.ErrorCode != 0 {
continue
}
@@ -590,13 +598,16 @@ func (p *connPool) discover(ctx context.Context, wake <-chan event) {
var notify event
done := ctx.Done()
+ req := &meta.Request{
+ TopicNames: p.metadataTopics,
+ }
+
for {
c, err := p.grabClusterConn(ctx)
if err != nil {
p.update(ctx, nil, err)
} else {
res := make(async, 1)
- req := &meta.Request{}
deadline, cancel := context.WithTimeout(ctx, p.metadataTTL)
c.reqs <- connRequest{
ctx: deadline,
diff --git writer.go writer.go
index 8d48e95cd..3c7af907a 100644
--- writer.go
+++ writer.go
@@ -27,35 +27,35 @@ import (
// by the function and test if it an instance of kafka.WriteErrors in order to
// identify which messages have succeeded or failed, for example:
//
-// // Construct a synchronous writer (the default mode).
-// w := &kafka.Writer{
-// Addr: Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
-// Topic: "topic-A",
-// RequiredAcks: kafka.RequireAll,
-// }
+// // Construct a synchronous writer (the default mode).
+// w := &kafka.Writer{
+// Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
+// Topic: "topic-A",
+// RequiredAcks: kafka.RequireAll,
+// }
//
-// ...
+// ...
//
-// // Passing a context can prevent the operation from blocking indefinitely.
-// switch err := w.WriteMessages(ctx, msgs...).(type) {
-// case nil:
-// case kafka.WriteErrors:
-// for i := range msgs {
-// if err[i] != nil {
-// // handle the error writing msgs[i]
-// ...
+// // Passing a context can prevent the operation from blocking indefinitely.
+// switch err := w.WriteMessages(ctx, msgs...).(type) {
+// case nil:
+// case kafka.WriteErrors:
+// for i := range msgs {
+// if err[i] != nil {
+// // handle the error writing msgs[i]
+// ...
+// }
// }
+// default:
+// // handle other errors
+// ...
// }
-// default:
-// // handle other errors
-// ...
-// }
//
// In asynchronous mode, the program may configure a completion handler on the
// writer to receive notifications of messages being written to kafka:
//
// w := &kafka.Writer{
-// Addr: Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
+// Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
// Topic: "topic-A",
// RequiredAcks: kafka.RequireAll,
// Async: true, // make the writer asynchronous
@@ -100,6 +100,18 @@ type Writer struct {
// The default is to try at most 10 times.
MaxAttempts int
+ // WriteBackoffMin optionally sets the smallest amount of time the writer waits before
+ // it attempts to write a batch of messages
+ //
+ // Default: 100ms
+ WriteBackoffMin time.Duration
+
+ // WriteBackoffMax optionally sets the maximum amount of time the writer waits before
+ // it attempts to write a batch of messages
+ //
+ // Default: 1s
+ WriteBackoffMax time.Duration
+
// Limit on how many messages will be buffered before being sent to a
// partition.
//
@@ -295,10 +307,6 @@ type WriterConfig struct {
// a response to a produce request. The default is -1, which means to wait for
// all replicas, and a value above 0 is required to indicate how many replicas
// should acknowledge a message to be considered successful.
- //
- // This version of kafka-go (v0.3) does not support 0 required acks, due to
- // some internal complexity implementing this with the Kafka protocol. If you
- // need that functionality specifically, you'll need to upgrade to v0.4.
RequiredAcks int
// Setting this flag to true causes the WriteMessages method to never block.
@@ -340,20 +348,23 @@ type WriterStats struct {
Bytes int64 `metric:"kafka.writer.message.bytes" type:"counter"`
Errors int64 `metric:"kafka.writer.error.count" type:"counter"`
- BatchTime DurationStats `metric:"kafka.writer.batch.seconds"`
- WriteTime DurationStats `metric:"kafka.writer.write.seconds"`
- WaitTime DurationStats `metric:"kafka.writer.wait.seconds"`
- Retries SummaryStats `metric:"kafka.writer.retries.count"`
- BatchSize SummaryStats `metric:"kafka.writer.batch.size"`
- BatchBytes SummaryStats `metric:"kafka.writer.batch.bytes"`
-
- MaxAttempts int64 `metric:"kafka.writer.attempts.max" type:"gauge"`
- MaxBatchSize int64 `metric:"kafka.writer.batch.max" type:"gauge"`
- BatchTimeout time.Duration `metric:"kafka.writer.batch.timeout" type:"gauge"`
- ReadTimeout time.Duration `metric:"kafka.writer.read.timeout" type:"gauge"`
- WriteTimeout time.Duration `metric:"kafka.writer.write.timeout" type:"gauge"`
- RequiredAcks int64 `metric:"kafka.writer.acks.required" type:"gauge"`
- Async bool `metric:"kafka.writer.async" type:"gauge"`
+ BatchTime DurationStats `metric:"kafka.writer.batch.seconds"`
+ BatchQueueTime DurationStats `metric:"kafka.writer.batch.queue.seconds"`
+ WriteTime DurationStats `metric:"kafka.writer.write.seconds"`
+ WaitTime DurationStats `metric:"kafka.writer.wait.seconds"`
+ Retries int64 `metric:"kafka.writer.retries.count" type:"counter"`
+ BatchSize SummaryStats `metric:"kafka.writer.batch.size"`
+ BatchBytes SummaryStats `metric:"kafka.writer.batch.bytes"`
+
+ MaxAttempts int64 `metric:"kafka.writer.attempts.max" type:"gauge"`
+ WriteBackoffMin time.Duration `metric:"kafka.writer.backoff.min" type:"gauge"`
+ WriteBackoffMax time.Duration `metric:"kafka.writer.backoff.max" type:"gauge"`
+ MaxBatchSize int64 `metric:"kafka.writer.batch.max" type:"gauge"`
+ BatchTimeout time.Duration `metric:"kafka.writer.batch.timeout" type:"gauge"`
+ ReadTimeout time.Duration `metric:"kafka.writer.read.timeout" type:"gauge"`
+ WriteTimeout time.Duration `metric:"kafka.writer.write.timeout" type:"gauge"`
+ RequiredAcks int64 `metric:"kafka.writer.acks.required" type:"gauge"`
+ Async bool `metric:"kafka.writer.async" type:"gauge"`
Topic string `tag:"topic"`
@@ -388,9 +399,10 @@ type writerStats struct {
errors counter
dialTime summary
batchTime summary
+ batchQueueTime summary
writeTime summary
waitTime summary
- retries summary
+ retries counter
batchSize summary
batchSizeBytes summary
}
@@ -523,7 +535,7 @@ func (w *Writer) enter() bool {
// completed.
func (w *Writer) leave() { w.group.Done() }
-// spawn starts an new asynchronous operation on the writer. This method is used
+// spawn starts a new asynchronous operation on the writer. This method is used
// instead of starting goroutines inline to help manage the state of the
// writer's wait group. The wait group is used to block Close calls until all
// inflight operations have completed, therefore automatically including those
@@ -588,9 +600,12 @@ func (w *Writer) Close() error {
//
// The context passed as first argument may also be used to asynchronously
// cancel the operation. Note that in this case there are no guarantees made on
-// whether messages were written to kafka. The program should assume that the
-// whole batch failed and re-write the messages later (which could then cause
-// duplicates).
+// whether messages were written to kafka, they might also still be written
+// after this method has already returned, therefore it is important to not
+// modify byte slices of passed messages if WriteMessages returned early due
+// to a canceled context.
+// The program should assume that the whole batch failed and re-write the
+// messages later (which could then cause duplicates).
func (w *Writer) WriteMessages(ctx context.Context, msgs ...Message) error {
if w.Addr == nil {
return errors.New("kafka.(*Writer).WriteMessages: cannot create a kafka writer with a nil address")
@@ -609,7 +624,7 @@ func (w *Writer) WriteMessages(ctx context.Context, msgs ...Message) error {
batchBytes := w.batchBytes()
for i := range msgs {
- n := int64(msgs[i].size())
+ n := int64(msgs[i].totalSize())
if n > batchBytes {
// This error is left for backward compatibility with historical
// behavior, but it can yield O(N^2) behaviors. The expectations
@@ -779,6 +794,20 @@ func (w *Writer) maxAttempts() int {
return 10
}
+func (w *Writer) writeBackoffMin() time.Duration {
+ if w.WriteBackoffMin > 0 {
+ return w.WriteBackoffMin
+ }
+ return 100 * time.Millisecond
+}
+
+func (w *Writer) writeBackoffMax() time.Duration {
+ if w.WriteBackoffMax > 0 {
+ return w.WriteBackoffMax
+ }
+ return 1 * time.Second
+}
+
func (w *Writer) batchSize() int {
if w.BatchSize > 0 {
return w.BatchSize
@@ -849,26 +878,29 @@ func (w *Writer) stats() *writerStats {
func (w *Writer) Stats() WriterStats {
stats := w.stats()
return WriterStats{
- Dials: stats.dials.snapshot(),
- Writes: stats.writes.snapshot(),
- Messages: stats.messages.snapshot(),
- Bytes: stats.bytes.snapshot(),
- Errors: stats.errors.snapshot(),
- DialTime: stats.dialTime.snapshotDuration(),
- BatchTime: stats.batchTime.snapshotDuration(),
- WriteTime: stats.writeTime.snapshotDuration(),
- WaitTime: stats.waitTime.snapshotDuration(),
- Retries: stats.retries.snapshot(),
- BatchSize: stats.batchSize.snapshot(),
- BatchBytes: stats.batchSizeBytes.snapshot(),
- MaxAttempts: int64(w.MaxAttempts),
- MaxBatchSize: int64(w.BatchSize),
- BatchTimeout: w.BatchTimeout,
- ReadTimeout: w.ReadTimeout,
- WriteTimeout: w.WriteTimeout,
- RequiredAcks: int64(w.RequiredAcks),
- Async: w.Async,
- Topic: w.Topic,
+ Dials: stats.dials.snapshot(),
+ Writes: stats.writes.snapshot(),
+ Messages: stats.messages.snapshot(),
+ Bytes: stats.bytes.snapshot(),
+ Errors: stats.errors.snapshot(),
+ DialTime: stats.dialTime.snapshotDuration(),
+ BatchTime: stats.batchTime.snapshotDuration(),
+ BatchQueueTime: stats.batchQueueTime.snapshotDuration(),
+ WriteTime: stats.writeTime.snapshotDuration(),
+ WaitTime: stats.waitTime.snapshotDuration(),
+ Retries: stats.retries.snapshot(),
+ BatchSize: stats.batchSize.snapshot(),
+ BatchBytes: stats.batchSizeBytes.snapshot(),
+ MaxAttempts: int64(w.maxAttempts()),
+ WriteBackoffMin: w.writeBackoffMin(),
+ WriteBackoffMax: w.writeBackoffMax(),
+ MaxBatchSize: int64(w.batchSize()),
+ BatchTimeout: w.batchTimeout(),
+ ReadTimeout: w.readTimeout(),
+ WriteTimeout: w.writeTimeout(),
+ RequiredAcks: int64(w.RequiredAcks),
+ Async: w.Async,
+ Topic: w.Topic,
}
}
@@ -1062,6 +1094,8 @@ func (ptw *partitionWriter) awaitBatch(batch *writeBatch) {
// having it leak until it expires.
batch.timer.Stop()
}
+ stats := ptw.w.stats()
+ stats.batchQueueTime.observe(int64(time.Since(batch.time)))
}
func (ptw *partitionWriter) writeBatch(batch *writeBatch) {
@@ -1086,7 +1120,7 @@ func (ptw *partitionWriter) writeBatch(batch *writeBatch) {
// guarantees to abort, but may be better to avoid long wait times
// on close.
//
- delay := backoff(attempt, 100*time.Millisecond, 1*time.Second)
+ delay := backoff(attempt, ptw.w.writeBackoffMin(), ptw.w.writeBackoffMax())
ptw.w.withLogger(func(log Logger) {
log.Printf("backing off %s writing %d messages to %s (partition: %d)", delay, len(batch.msgs), key.topic, key.partition)
})
@@ -1122,7 +1156,7 @@ func (ptw *partitionWriter) writeBatch(batch *writeBatch) {
stats.errors.observe(1)
ptw.w.withErrorLogger(func(log Logger) {
- log.Printf("error writing messages to %s (partition %d): %s", key.topic, key.partition, err)
+ log.Printf("error writing messages to %s (partition %d, attempt %d): %s", key.topic, key.partition, attempt, err)
})
if !isTemporary(err) && !isTransientNetworkError(err) {
@@ -1185,7 +1219,7 @@ func newWriteBatch(now time.Time, timeout time.Duration) *writeBatch {
}
func (b *writeBatch) add(msg Message, maxSize int, maxBytes int64) bool {
- bytes := int64(msg.size())
+ bytes := int64(msg.totalSize())
if b.size > 0 && (b.bytes+bytes) > maxBytes {
return false
diff --git writer_test.go writer_test.go
index 04d012079..6f894ecd3 100644
--- writer_test.go
+++ writer_test.go
@@ -7,6 +7,7 @@ import (
"io"
"math"
"strconv"
+ "strings"
"sync"
"testing"
"time"
@@ -131,9 +132,13 @@ func TestWriter(t *testing.T) {
},
{
- scenario: "writing messsages with a small batch byte size",
+ scenario: "writing messages with a small batch byte size",
function: testWriterSmallBatchBytes,
},
+ {
+ scenario: "writing messages with headers",
+ function: testWriterBatchBytesHeaders,
+ },
{
scenario: "setting a non default balancer on the writer",
function: testWriterSetsRightBalancer,
@@ -159,7 +164,7 @@ func TestWriter(t *testing.T) {
function: testWriterInvalidPartition,
},
{
- scenario: "writing a message to a non-existant topic creates the topic",
+ scenario: "writing a message to a non-existent topic creates the topic",
function: testWriterAutoCreateTopic,
},
{
@@ -170,6 +175,22 @@ func TestWriter(t *testing.T) {
scenario: "writing a message with SASL Plain authentication",
function: testWriterSasl,
},
+ {
+ scenario: "test default configuration values",
+ function: testWriterDefaults,
+ },
+ {
+ scenario: "test default stats values",
+ function: testWriterDefaultStats,
+ },
+ {
+ scenario: "test stats values with override config",
+ function: testWriterOverrideConfigStats,
+ },
+ {
+ scenario: "test write message with writer data",
+ function: testWriteMessageWithWriterData,
+ },
}
for _, test := range tests {
@@ -441,7 +462,7 @@ func testWriterBatchBytes(t *testing.T) {
w := newTestWriter(WriterConfig{
Topic: topic,
- BatchBytes: 48,
+ BatchBytes: 50,
BatchTimeout: math.MaxInt32 * time.Second,
Balancer: &RoundRobin{},
})
@@ -450,10 +471,10 @@ func testWriterBatchBytes(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
if err := w.WriteMessages(ctx, []Message{
- {Value: []byte("M0")}, // 24 Bytes
- {Value: []byte("M1")}, // 24 Bytes
- {Value: []byte("M2")}, // 24 Bytes
- {Value: []byte("M3")}, // 24 Bytes
+ {Value: []byte("M0")}, // 25 Bytes
+ {Value: []byte("M1")}, // 25 Bytes
+ {Value: []byte("M2")}, // 25 Bytes
+ {Value: []byte("M3")}, // 25 Bytes
}...); err != nil {
t.Error(err)
return
@@ -584,6 +605,67 @@ func testWriterSmallBatchBytes(t *testing.T) {
}
}
+func testWriterBatchBytesHeaders(t *testing.T) {
+ topic := makeTopic()
+ createTopic(t, topic, 1)
+ defer deleteTopic(t, topic)
+
+ offset, err := readOffset(topic, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ w := newTestWriter(WriterConfig{
+ Topic: topic,
+ BatchBytes: 100,
+ BatchTimeout: 50 * time.Millisecond,
+ Balancer: &RoundRobin{},
+ })
+ defer w.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ if err := w.WriteMessages(ctx, []Message{
+ {
+ Value: []byte("Hello World 1"),
+ Headers: []Header{
+ {Key: "User-Agent", Value: []byte("abc/xyz")},
+ },
+ },
+ {
+ Value: []byte("Hello World 2"),
+ Headers: []Header{
+ {Key: "User-Agent", Value: []byte("abc/xyz")},
+ },
+ },
+ }...); err != nil {
+ t.Error(err)
+ return
+ }
+ ws := w.Stats()
+ if ws.Writes != 2 {
+ t.Error("didn't batch messages; Writes: ", ws.Writes)
+ return
+ }
+ msgs, err := readPartition(topic, 0, offset)
+ if err != nil {
+ t.Error("error reading partition", err)
+ return
+ }
+
+ if len(msgs) != 2 {
+ t.Error("bad messages in partition", msgs)
+ return
+ }
+
+ for _, m := range msgs {
+ if strings.HasPrefix(string(m.Value), "Hello World") {
+ continue
+ }
+ t.Error("bad messages in partition", msgs)
+ }
+}
+
func testWriterMultipleTopics(t *testing.T) {
topic1 := makeTopic()
createTopic(t, topic1, 1)
@@ -715,6 +797,45 @@ func testWriterUnexpectedMessageTopic(t *testing.T) {
}
}
+func testWriteMessageWithWriterData(t *testing.T) {
+ topic := makeTopic()
+ createTopic(t, topic, 1)
+ defer deleteTopic(t, topic)
+ w := newTestWriter(WriterConfig{
+ Topic: topic,
+ Balancer: &RoundRobin{},
+ })
+ defer w.Close()
+
+ index := 0
+ w.Completion = func(messages []Message, err error) {
+ if err != nil {
+ t.Errorf("unexpected error %v", err)
+ }
+
+ for _, msg := range messages {
+ meta := msg.WriterData.(int)
+ if index != meta {
+ t.Errorf("metadata is not correct, index = %d, writerData = %d", index, meta)
+ }
+ index += 1
+ }
+ }
+
+ msg := Message{Key: []byte("key"), Value: []byte("Hello World")}
+ for i := 0; i < 5; i++ {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ msg.WriterData = i
+ err := w.WriteMessages(ctx, msg)
+ if err != nil {
+ t.Errorf("unexpected error %v", err)
+ }
+ }
+
+}
+
func testWriterAutoCreateTopic(t *testing.T) {
topic := makeTopic()
// Assume it's going to get created.
@@ -818,6 +939,97 @@ func testWriterSasl(t *testing.T) {
}
}
+func testWriterDefaults(t *testing.T) {
+ w := &Writer{}
+ defer w.Close()
+
+ if w.writeBackoffMin() != 100*time.Millisecond {
+ t.Error("Incorrect default min write backoff delay")
+ }
+
+ if w.writeBackoffMax() != 1*time.Second {
+ t.Error("Incorrect default max write backoff delay")
+ }
+}
+
+func testWriterDefaultStats(t *testing.T) {
+ w := &Writer{}
+ defer w.Close()
+
+ stats := w.Stats()
+
+ if stats.MaxAttempts == 0 {
+ t.Error("Incorrect default MaxAttempts value")
+ }
+
+ if stats.WriteBackoffMin == 0 {
+ t.Error("Incorrect default WriteBackoffMin value")
+ }
+
+ if stats.WriteBackoffMax == 0 {
+ t.Error("Incorrect default WriteBackoffMax value")
+ }
+
+ if stats.MaxBatchSize == 0 {
+ t.Error("Incorrect default MaxBatchSize value")
+ }
+
+ if stats.BatchTimeout == 0 {
+ t.Error("Incorrect default BatchTimeout value")
+ }
+
+ if stats.ReadTimeout == 0 {
+ t.Error("Incorrect default ReadTimeout value")
+ }
+
+ if stats.WriteTimeout == 0 {
+ t.Error("Incorrect default WriteTimeout value")
+ }
+}
+
+func testWriterOverrideConfigStats(t *testing.T) {
+ w := &Writer{
+ MaxAttempts: 6,
+ WriteBackoffMin: 2,
+ WriteBackoffMax: 4,
+ BatchSize: 1024,
+ BatchTimeout: 16,
+ ReadTimeout: 24,
+ WriteTimeout: 32,
+ }
+ defer w.Close()
+
+ stats := w.Stats()
+
+ if stats.MaxAttempts != 6 {
+ t.Error("Incorrect MaxAttempts value")
+ }
+
+ if stats.WriteBackoffMin != 2 {
+ t.Error("Incorrect WriteBackoffMin value")
+ }
+
+ if stats.WriteBackoffMax != 4 {
+ t.Error("Incorrect WriteBackoffMax value")
+ }
+
+ if stats.MaxBatchSize != 1024 {
+ t.Error("Incorrect MaxBatchSize value")
+ }
+
+ if stats.BatchTimeout != 16 {
+ t.Error("Incorrect BatchTimeout value")
+ }
+
+ if stats.ReadTimeout != 24 {
+ t.Error("Incorrect ReadTimeout value")
+ }
+
+ if stats.WriteTimeout != 32 {
+ t.Error("Incorrect WriteTimeout value")
+ }
+}
+
type staticBalancer struct {
partition int
}
DescriptionThis PR introduces several new features, improvements, and bug fixes to the kafka-go library. The main changes include:
Possible Issues
Security Hotspots
ChangesChangesBy Filename: .circleci/config.yml:
README.md:
balancer.go:
conn.go:
compress/:
protocol/:
writer.go:
sequenceDiagram
participant App
participant Writer
participant Conn
participant Kafka
App->>Writer: WriteMessages()
Writer->>Writer: BatchMessages()
Writer->>Conn: SendBatch()
Conn->>Kafka: ProduceRequest
Kafka-->>Conn: ProduceResponse
Conn-->>Writer: BatchResult
Writer-->>App: WriteResult
opt Async Mode
Writer->>Writer: HandleCompletion()
end
|
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
This PR contains the following updates:
v0.4.35
->v0.4.47
Release Notes
segmentio/kafka-go (github.com/segmentio/kafka-go)
v0.4.47
Compare Source
What's Changed
New Contributors
Full Changelog: segmentio/kafka-go@v0.4.46...v0.4.47
v0.4.46
Compare Source
What's Changed
New Contributors
Full Changelog: segmentio/kafka-go@v0.4.45...v0.4.46
v0.4.45
Compare Source
What's Changed
Full Changelog: segmentio/kafka-go@v0.4.44...v0.4.45
v0.4.44
Compare Source
What's Changed
New Contributors
Full Changelog: segmentio/kafka-go@v0.4.43...v0.4.44
v0.4.43
Compare Source
What's Changed
New Contributors
Full Changelog: segmentio/kafka-go@v0.4.42...v0.4.43
v0.4.42
Compare Source
What's Changed
Full Changelog: segmentio/kafka-go@v0.4.41...v0.4.42
v0.4.41
Compare Source
What's Changed
New Contributors
Full Changelog: segmentio/kafka-go@v0.4.40...v0.4.41
v0.4.40
Compare Source
What's Changed
batchQueueTime
to track the batch time cost by @3AceShowHand in https://github.com/segmentio/kafka-go/pull/1103New Contributors
Full Changelog: segmentio/kafka-go@v0.4.39...v0.4.40
v0.4.39
Compare Source
What's Changed
New Contributors
Full Changelog: segmentio/kafka-go@v0.4.38...v0.4.39
v0.4.38
Compare Source
What's Changed
Full Changelog: segmentio/kafka-go@v0.4.37...v0.4.38
v0.4.37
Compare Source
What's Changed
Full Changelog: segmentio/kafka-go@v0.4.36...v0.4.37
v0.4.36
Compare Source
What's Changed
New Contributors
Full Changelog: segmentio/kafka-go@v0.4.35...v0.4.36
Configuration
📅 Schedule: Branch creation - "* 0-12 * * 3" (UTC), Automerge - At any time (no schedule defined).
🚦 Automerge: Disabled by config. Please merge this manually once you are satisfied.
♻ Rebasing: Whenever PR is behind base branch, or you tick the rebase/retry checkbox.
🔕 Ignore: Close this PR and you won't be reminded about this update again.
This PR was generated by Mend Renovate. View the repository job log.