mirror of
https://github.com/akvorado/akvorado.git
synced 2025-12-12 06:24:10 +01:00
Inserting into ClickHouse should be done in large batches to minimize the number of parts created. This would require the user to tune the number of Kafka workers to match a target of around 50k-100k rows. Instead, we dynamically tune the number of workers depending on the load to reach this target. We keep using async if we are too low in number of flows. It is still possible to do better by consolidating batches from various workers, but that's something I wanted to avoid. Also, increase the maximum wait time to 5 seconds. It should be good enough for most people. Fix #1885
53 lines
1.1 KiB
Go
53 lines
1.1 KiB
Go
// SPDX-FileCopyrightText: 2022 Free Mobile
|
|
// SPDX-License-Identifier: AGPL-3.0-only
|
|
|
|
package kafka
|
|
|
|
import (
|
|
"context"
|
|
"testing"
|
|
"time"
|
|
|
|
"akvorado/common/helpers"
|
|
)
|
|
|
|
func TestMock(t *testing.T) {
|
|
c, incoming := NewMock(t, DefaultConfiguration())
|
|
|
|
got := []string{}
|
|
expected := []string{"hello1", "hello2", "hello3"}
|
|
gotAll := make(chan bool)
|
|
shutdownCalled := false
|
|
callback := func(_ context.Context, message []byte) error {
|
|
got = append(got, string(message))
|
|
if len(got) == len(expected) {
|
|
close(gotAll)
|
|
}
|
|
return nil
|
|
}
|
|
c.StartWorkers(
|
|
func(int, chan<- ScaleRequest) (ReceiveFunc, ShutdownFunc) {
|
|
return callback, func() { shutdownCalled = true }
|
|
},
|
|
)
|
|
|
|
// Produce messages and wait for them
|
|
for _, msg := range expected {
|
|
incoming <- []byte(msg)
|
|
}
|
|
select {
|
|
case <-time.After(time.Second):
|
|
t.Fatal("Too long to get messages")
|
|
case <-gotAll:
|
|
}
|
|
|
|
if diff := helpers.Diff(got, expected); diff != "" {
|
|
t.Errorf("Didn't received the expected messages (-got, +want):\n%s", diff)
|
|
}
|
|
|
|
c.Stop()
|
|
if !shutdownCalled {
|
|
t.Error("Stop() should have triggered shutdown function")
|
|
}
|
|
}
|