mirror of
https://github.com/akvorado/akvorado.git
synced 2025-12-12 06:24:10 +01:00
Some checks failed
CI / 🤖 Check dependabot status (push) Has been cancelled
CI / 🐧 Test on Linux (${{ github.ref_type == 'tag' }}, misc) (push) Has been cancelled
CI / 🐧 Test on Linux (coverage) (push) Has been cancelled
CI / 🐧 Test on Linux (regular) (push) Has been cancelled
CI / ❄️ Build on Nix (push) Has been cancelled
CI / 🍏 Build and test on macOS (push) Has been cancelled
CI / 🧪 End-to-end testing (push) Has been cancelled
CI / 🔍 Upload code coverage (push) Has been cancelled
CI / 🔬 Test only Go (push) Has been cancelled
CI / 🔬 Test only JS (${{ needs.dependabot.outputs.package-ecosystem }}, 20) (push) Has been cancelled
CI / 🔬 Test only JS (${{ needs.dependabot.outputs.package-ecosystem }}, 22) (push) Has been cancelled
CI / 🔬 Test only JS (${{ needs.dependabot.outputs.package-ecosystem }}, 24) (push) Has been cancelled
CI / ⚖️ Check licenses (push) Has been cancelled
CI / 🐋 Build Docker images (push) Has been cancelled
CI / 🐋 Tag Docker images (push) Has been cancelled
CI / 🚀 Publish release (push) Has been cancelled
Update Nix dependency hashes / Update dependency hashes (push) Has been cancelled
And use random by default. This scales better. And even when not using multiple outlets, there is little drawback to pin an exporter to a partition.
182 lines
4.7 KiB
Go
182 lines
4.7 KiB
Go
// SPDX-FileCopyrightText: 2022 Free Mobile
|
|
// SPDX-License-Identifier: AGPL-3.0-only
|
|
|
|
package kafka
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"slices"
|
|
"sync"
|
|
"sync/atomic"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/twmb/franz-go/pkg/kerr"
|
|
"github.com/twmb/franz-go/pkg/kgo"
|
|
"github.com/twmb/franz-go/pkg/kmsg"
|
|
|
|
"akvorado/common/helpers"
|
|
"akvorado/common/kafka"
|
|
"akvorado/common/pb"
|
|
"akvorado/common/reporter"
|
|
)
|
|
|
|
func TestKafka(t *testing.T) {
|
|
r := reporter.NewMock(t)
|
|
topic := fmt.Sprintf("flows-v%d", pb.Version)
|
|
config := DefaultConfiguration()
|
|
config.QueueSize = 1
|
|
c, mock := NewMock(t, r, config)
|
|
defer mock.Close()
|
|
|
|
// Inject an error on third message.
|
|
var count atomic.Uint32
|
|
mock.ControlKey(0, func(kreq kmsg.Request) (kmsg.Response, error, bool) {
|
|
mock.KeepControl()
|
|
current := count.Add(1)
|
|
if current != 3 {
|
|
t.Logf("ControlKey() message %d: ok", current)
|
|
return nil, nil, false
|
|
}
|
|
t.Logf("ControlKey() mesage %d: error", current)
|
|
req := kreq.(*kmsg.ProduceRequest)
|
|
resp := kreq.ResponseKind().(*kmsg.ProduceResponse)
|
|
for _, rt := range req.Topics {
|
|
st := kmsg.NewProduceResponseTopic()
|
|
st.Topic = rt.Topic
|
|
for _, rp := range rt.Partitions {
|
|
sp := kmsg.NewProduceResponseTopicPartition()
|
|
sp.Partition = rp.Partition
|
|
sp.ErrorCode = kerr.CorruptMessage.Code
|
|
st.Partitions = append(st.Partitions, sp)
|
|
}
|
|
resp.Topics = append(resp.Topics, st)
|
|
}
|
|
return resp, nil, true
|
|
})
|
|
|
|
// Send messages
|
|
var wg sync.WaitGroup
|
|
wg.Add(4)
|
|
c.Send("127.0.0.1", []byte("hello world!"), func() { wg.Done() })
|
|
c.Send("127.0.0.1", []byte("goodbye world!"), func() { wg.Done() })
|
|
c.Send("127.0.0.1", []byte("nooooo!"), func() { wg.Done() })
|
|
c.Send("127.0.0.1", []byte("all good"), func() { wg.Done() })
|
|
done := make(chan struct{})
|
|
go func() {
|
|
wg.Wait()
|
|
close(done)
|
|
}()
|
|
select {
|
|
case <-done:
|
|
case <-time.After(time.Second):
|
|
t.Fatal("Send() timeout")
|
|
}
|
|
|
|
expectedMessages := []string{"hello world!", "goodbye world!", "all good"}
|
|
|
|
// Create consumer to check messages
|
|
consumer, err := kgo.NewClient(
|
|
kgo.SeedBrokers(mock.ListenAddrs()...),
|
|
kgo.ConsumeTopics(topic),
|
|
kgo.ConsumeResetOffset(kgo.NewOffset().AtStart()),
|
|
kgo.WithLogger(kafka.NewLogger(r)),
|
|
)
|
|
if err != nil {
|
|
t.Fatalf("NewClient() error:\n%+v", err)
|
|
}
|
|
defer consumer.Close()
|
|
|
|
// Consume messages
|
|
messages := make([]string, 0)
|
|
ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)
|
|
defer cancel()
|
|
for {
|
|
if len(messages) >= len(expectedMessages) {
|
|
break
|
|
}
|
|
fetches := consumer.PollFetches(ctx)
|
|
if errs := fetches.Errors(); len(errs) > 0 {
|
|
t.Fatalf("PollFetches() error:\n%+v", errs)
|
|
}
|
|
|
|
fetches.EachPartition(func(p kgo.FetchTopicPartition) {
|
|
t.Logf("EachPartition() partition %d", p.Partition)
|
|
for _, record := range p.Records {
|
|
messages = append(messages, string(record.Value))
|
|
t.Logf("EachPartition() messages: %v", messages)
|
|
}
|
|
})
|
|
}
|
|
|
|
slices.Sort(expectedMessages)
|
|
slices.Sort(messages)
|
|
if diff := helpers.Diff(messages, expectedMessages); diff != "" {
|
|
t.Fatalf("Send() (-got, +want):\n%s", diff)
|
|
}
|
|
|
|
gotMetrics := r.GetMetrics("akvorado_inlet_kafka_", "sent_", "errors")
|
|
expectedMetrics := map[string]string{
|
|
`sent_bytes_total{exporter="127.0.0.1"}`: "34",
|
|
`sent_messages_total{exporter="127.0.0.1"}`: "3",
|
|
`errors_total{error="CORRUPT_MESSAGE"}`: "1",
|
|
}
|
|
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
|
t.Fatalf("Metrics (-got, +want):\n%s", diff)
|
|
}
|
|
}
|
|
|
|
func TestLoadBalancingAlgorithm(t *testing.T) {
|
|
for _, algo := range []LoadBalanceAlgorithm{LoadBalanceRandom, LoadBalanceByExporter} {
|
|
t.Run(algo.String(), func(t *testing.T) {
|
|
topic := fmt.Sprintf("balance-%s", algo)
|
|
r := reporter.NewMock(t)
|
|
config := DefaultConfiguration()
|
|
config.QueueSize = 1
|
|
config.Topic = topic
|
|
c, mock := NewMock(t, r, config)
|
|
defer mock.Close()
|
|
|
|
total := 500
|
|
|
|
// Intercept messages
|
|
var wg sync.WaitGroup
|
|
wg.Add(total)
|
|
var mu sync.Mutex
|
|
messages := make(map[int32]int)
|
|
kafka.InterceptMessages(t, mock, func(r *kgo.Record) {
|
|
mu.Lock()
|
|
defer mu.Unlock()
|
|
messages[r.Partition]++
|
|
wg.Done()
|
|
})
|
|
|
|
// Send messages
|
|
for i := range total {
|
|
c.Send("127.0.0.1", []byte(fmt.Sprintf("hello %d", i)), func() {})
|
|
}
|
|
wg.Wait()
|
|
|
|
expected := make(map[int32]int, len(messages))
|
|
if algo == LoadBalanceRandom {
|
|
for p, count := range messages {
|
|
if count > total/len(messages)*6/10 && count < total/len(messages)*14/10 {
|
|
expected[p] = count
|
|
} else {
|
|
expected[p] = total / len(messages)
|
|
}
|
|
}
|
|
} else if algo == LoadBalanceByExporter {
|
|
for p, count := range messages {
|
|
expected[p] = count
|
|
}
|
|
}
|
|
|
|
if diff := helpers.Diff(messages, expected); diff != "" {
|
|
t.Fatalf("Messages per partitions (-got, +want):\n%s", diff)
|
|
}
|
|
})
|
|
}
|
|
}
|