Files
akvorado/inlet/kafka/root_test.go
Vincent Bernat 756e4a8fbd */kafka: switch to franz-go
The concurrency of this library is easier to handle than Sarama.
Notably, it is more compatible with the new model of "almost share
nothing" we use for the inlet and the outlet. The lock for workers in
outlet is removed. We can now use sync.Pool to allocate slice of bytes
in inlet.

It may also be more performant.

In the future, we may want to commit only when pushing data to
ClickHouse. However, this does not seem easy when there is a rebalance.
In case of rebalance, we need to do something when a partition is
revoked to avoid duplicating data. For example, we could flush the
current batch to ClickHouse. Have a look at the
`example/mark_offsets/main.go` file in franz-go repository for a
possible approach. In the meantime, we rely on autocommit.

Another contender could be https://github.com/segmentio/kafka-go. Also
see https://github.com/twmb/franz-go/pull/1064.
2025-07-27 21:44:28 +02:00

125 lines
3.2 KiB
Go

// SPDX-FileCopyrightText: 2022 Free Mobile
// SPDX-License-Identifier: AGPL-3.0-only
package kafka
import (
"context"
"fmt"
"slices"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/twmb/franz-go/pkg/kerr"
"github.com/twmb/franz-go/pkg/kgo"
"github.com/twmb/franz-go/pkg/kmsg"
"akvorado/common/helpers"
"akvorado/common/pb"
"akvorado/common/reporter"
)
func TestKafka(t *testing.T) {
r := reporter.NewMock(t)
topic := fmt.Sprintf("flows-v%d", pb.Version)
config := DefaultConfiguration()
config.QueueSize = 1
c, mock := NewMock(t, r, config)
defer mock.Close()
// Inject an error on third message.
var count atomic.Uint32
mock.ControlKey(0, func(kreq kmsg.Request) (kmsg.Response, error, bool) {
mock.KeepControl()
current := count.Add(1)
if current != 3 {
t.Logf("message %d: ok", current)
return nil, nil, false
}
t.Logf("mesage %d: error", current)
req := kreq.(*kmsg.ProduceRequest)
resp := kreq.ResponseKind().(*kmsg.ProduceResponse)
for _, rt := range req.Topics {
st := kmsg.NewProduceResponseTopic()
st.Topic = rt.Topic
for _, rp := range rt.Partitions {
sp := kmsg.NewProduceResponseTopicPartition()
sp.Partition = rp.Partition
sp.ErrorCode = kerr.CorruptMessage.Code
st.Partitions = append(st.Partitions, sp)
}
resp.Topics = append(resp.Topics, st)
}
return resp, nil, true
})
// Send messages
var wg sync.WaitGroup
wg.Add(4)
c.Send("127.0.0.1", []byte("hello world!"), func() { wg.Done() })
c.Send("127.0.0.1", []byte("goodbye world!"), func() { wg.Done() })
c.Send("127.0.0.1", []byte("nooooo!"), func() { wg.Done() })
c.Send("127.0.0.1", []byte("all good"), func() { wg.Done() })
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
select {
case <-done:
case <-time.After(time.Second):
t.Fatal("Send() timeout")
}
expectedMessages := []string{"hello world!", "goodbye world!", "all good"}
// Create consumer to check messages
consumer, err := kgo.NewClient(
kgo.SeedBrokers(mock.ListenAddrs()...),
kgo.ConsumeTopics(topic),
kgo.ConsumeResetOffset(kgo.NewOffset().AtStart()),
)
if err != nil {
t.Fatalf("NewClient() error:\n%+v", err)
}
defer consumer.Close()
// Consume messages
messages := make([]string, 0)
ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)
defer cancel()
for {
if len(messages) >= len(expectedMessages) {
break
}
fetches := consumer.PollFetches(ctx)
if errs := fetches.Errors(); len(errs) > 0 {
t.Fatalf("PollFetches() error:\n%+v", errs)
}
fetches.EachPartition(func(p kgo.FetchTopicPartition) {
for _, record := range p.Records {
messages = append(messages, string(record.Value))
}
})
}
slices.Sort(expectedMessages)
slices.Sort(messages)
if diff := helpers.Diff(messages, expectedMessages); diff != "" {
t.Fatalf("Send() (-got, +want):\n%s", diff)
}
gotMetrics := r.GetMetrics("akvorado_inlet_kafka_", "sent_", "errors")
expectedMetrics := map[string]string{
`sent_bytes_total{exporter="127.0.0.1"}`: "34",
`sent_messages_total{exporter="127.0.0.1"}`: "3",
`errors_total{error="CORRUPT_MESSAGE"}`: "1",
}
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
t.Fatalf("Metrics (-got, +want):\n%s", diff)
}
}