mirror of
https://github.com/akvorado/akvorado.git
synced 2025-12-11 22:14:02 +01:00
This is a huge change to make the various subcomponents of the inlet use the schema to generate the protobuf. For it to make sense, we also modify the way we parse flows to directly serialize non-essential fields to Protobuf. The performance is mostly on par with the previous commit. We are a bit less efficient because we don't have a fixed structure, but we avoid loosing too much performance by not relying on reflection and keeping the production of messages as code. We use less of Goflow2: raw flow parsing is still done by Goflow2, but we don't use the producer part anymore. This helps a bit with the performance as we parse less. Overall, we are 20% than the previous commit and twice faster than the 1.6.4! ``` goos: linux goarch: amd64 pkg: akvorado/inlet/flow cpu: AMD Ryzen 5 5600X 6-Core Processor BenchmarkDecodeEncodeNetflow BenchmarkDecodeEncodeNetflow/with_encoding BenchmarkDecodeEncodeNetflow/with_encoding-12 151484 7789 ns/op 8272 B/op 143 allocs/op BenchmarkDecodeEncodeNetflow/without_encoding BenchmarkDecodeEncodeNetflow/without_encoding-12 162550 7133 ns/op 8272 B/op 143 allocs/op BenchmarkDecodeEncodeSflow BenchmarkDecodeEncodeSflow/with_encoding BenchmarkDecodeEncodeSflow/with_encoding-12 94844 13193 ns/op 9816 B/op 295 allocs/op BenchmarkDecodeEncodeSflow/without_encoding BenchmarkDecodeEncodeSflow/without_encoding-12 92569 12456 ns/op 9816 B/op 295 allocs/op ``` There was a tentative to parse sFlow packets with gopackets, but the adhoc parser used here is more performant.
110 lines
3.5 KiB
Go
110 lines
3.5 KiB
Go
// SPDX-FileCopyrightText: 2022 Free Mobile
|
|
// SPDX-License-Identifier: AGPL-3.0-only
|
|
|
|
// Package kafka handles Kafka-related configuration for the orchestrator.
|
|
package kafka
|
|
|
|
import (
|
|
"fmt"
|
|
"strings"
|
|
|
|
"github.com/Shopify/sarama"
|
|
|
|
"akvorado/common/kafka"
|
|
"akvorado/common/reporter"
|
|
"akvorado/common/schema"
|
|
)
|
|
|
|
// Component represents the Kafka configurator.
|
|
type Component struct {
|
|
r *reporter.Reporter
|
|
config Configuration
|
|
|
|
kafkaConfig *sarama.Config
|
|
kafkaTopic string
|
|
}
|
|
|
|
// New creates a new Kafka configurator.
|
|
func New(r *reporter.Reporter, config Configuration) (*Component, error) {
|
|
kafkaConfig, err := kafka.NewConfig(config.Configuration)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if err := kafkaConfig.Validate(); err != nil {
|
|
return nil, fmt.Errorf("cannot validate Kafka configuration: %w", err)
|
|
}
|
|
|
|
return &Component{
|
|
r: r,
|
|
config: config,
|
|
|
|
kafkaConfig: kafkaConfig,
|
|
kafkaTopic: fmt.Sprintf("%s-%s", config.Topic, schema.Flows.ProtobufMessageHash()),
|
|
}, nil
|
|
}
|
|
|
|
// Start starts Kafka configuration.
|
|
func (c *Component) Start() error {
|
|
c.r.Info().Msg("starting Kafka component")
|
|
kafka.GlobalKafkaLogger.Register(c.r)
|
|
defer func() {
|
|
kafka.GlobalKafkaLogger.Unregister()
|
|
c.r.Info().Msg("Kafka component stopped")
|
|
}()
|
|
|
|
// Create topic
|
|
admin, err := sarama.NewClusterAdmin(c.config.Brokers, c.kafkaConfig)
|
|
if err != nil {
|
|
c.r.Err(err).
|
|
Str("brokers", strings.Join(c.config.Brokers, ",")).
|
|
Msg("unable to get admin client for topic creation")
|
|
return fmt.Errorf("unable to get admin client for topic creation: %w", err)
|
|
}
|
|
defer admin.Close()
|
|
l := c.r.With().
|
|
Str("brokers", strings.Join(c.config.Brokers, ",")).
|
|
Str("topic", c.kafkaTopic).
|
|
Logger()
|
|
topics, err := admin.ListTopics()
|
|
if err != nil {
|
|
l.Err(err).Msg("unable to get metadata for topics")
|
|
return fmt.Errorf("unable to get metadata for topics: %w", err)
|
|
}
|
|
if topic, ok := topics[c.kafkaTopic]; !ok {
|
|
if err := admin.CreateTopic(c.kafkaTopic,
|
|
&sarama.TopicDetail{
|
|
NumPartitions: c.config.TopicConfiguration.NumPartitions,
|
|
ReplicationFactor: c.config.TopicConfiguration.ReplicationFactor,
|
|
ConfigEntries: c.config.TopicConfiguration.ConfigEntries,
|
|
}, false); err != nil {
|
|
l.Err(err).Msg("unable to create topic")
|
|
return fmt.Errorf("unable to create topic %q: %w", c.kafkaTopic, err)
|
|
}
|
|
l.Info().Msg("topic created")
|
|
} else {
|
|
if topic.NumPartitions > c.config.TopicConfiguration.NumPartitions {
|
|
l.Warn().Msgf("cannot decrease the number of partitions (from %d to %d)",
|
|
topic.NumPartitions, c.config.TopicConfiguration.NumPartitions)
|
|
} else if topic.NumPartitions < c.config.TopicConfiguration.NumPartitions {
|
|
nb := c.config.TopicConfiguration.NumPartitions
|
|
if err := admin.CreatePartitions(c.kafkaTopic, nb, nil, false); err != nil {
|
|
l.Err(err).Msg("unable to add more partitions")
|
|
return fmt.Errorf("unable to add more partitions to topic %q: %w",
|
|
c.kafkaTopic, err)
|
|
}
|
|
}
|
|
if c.config.TopicConfiguration.ReplicationFactor != topic.ReplicationFactor {
|
|
// TODO: https://github.com/deviceinsight/kafkactl/blob/main/internal/topic/topic-operation.go
|
|
l.Warn().Msgf("mismatch for replication factor: got %d, want %d",
|
|
topic.ReplicationFactor, c.config.TopicConfiguration.ReplicationFactor)
|
|
}
|
|
if err := admin.AlterConfig(sarama.TopicResource, c.kafkaTopic, c.config.TopicConfiguration.ConfigEntries, false); err != nil {
|
|
l.Err(err).Msg("unable to set topic configuration")
|
|
return fmt.Errorf("unable to set topic configuration for %q: %w",
|
|
c.kafkaTopic, err)
|
|
}
|
|
l.Info().Msg("topic updated")
|
|
}
|
|
return nil
|
|
}
|