global: split Akvorado into 3 services

This commit is contained in:
Vincent Bernat
2022-04-01 20:21:53 +02:00
parent a336370c05
commit 1dc253764d
179 changed files with 1768 additions and 1263 deletions

6
.gitignore vendored
View File

@@ -1,6 +1,6 @@
/bin/ /bin/
/test/ /test/
/flow/decoder/flow*.pb.go /inlet/flow/decoder/flow*.pb.go
/web/data/node_modules/ /console/data/node_modules/
/web/data/assets/generated/ /console/data/assets/generated/

View File

@@ -13,7 +13,7 @@ M = $(shell if [ "$$(tput colors 2> /dev/null || echo 0)" -ge 8 ]; then printf "
export GO111MODULE=on export GO111MODULE=on
GENERATED = flow/decoder/flow-1.pb.go web/data/node_modules web/data/assets/generated GENERATED = inlet/flow/decoder/flow-1.pb.go console/data/node_modules console/data/assets/generated
.PHONY: all .PHONY: all
all: fmt lint $(GENERATED) | $(BIN) ; $(info $(M) building executable) @ ## Build program binary all: fmt lint $(GENERATED) | $(BIN) ; $(info $(M) building executable) @ ## Build program binary
@@ -47,17 +47,17 @@ $(BIN)/protoc-gen-go: PACKAGE=google.golang.org/protobuf/cmd/protoc-gen-go
# Generated files # Generated files
flow/decoder/%.pb.go: flow/data/schemas/%.proto | $(PROTOC_GEN_GO) ; $(info $(M) compiling protocol buffers definition) inlet/flow/decoder/%.pb.go: inlet/flow/data/schemas/%.proto | $(PROTOC_GEN_GO) ; $(info $(M) compiling protocol buffers definition)
$Q $(PROTOC) -I=. --plugin=$(PROTOC_GEN_GO) --go_out=. --go_opt=module=$(MODULE) $< $Q $(PROTOC) -I=. --plugin=$(PROTOC_GEN_GO) --go_out=. --go_opt=module=$(MODULE) $<
web/data/node_modules: web/data/package.json web/data/yarn.lock ; $(info $(M) fetching node modules) console/data/node_modules: console/data/package.json console/data/yarn.lock ; $(info $(M) fetching node modules)
$Q yarn install --frozen-lockfile --cwd web/data && touch $@ $Q yarn install --frozen-lockfile --cwd console/data && touch $@
web/data/assets/generated: web/data/node_modules Makefile ; $(info $(M) copying static assets) console/data/assets/generated: console/data/node_modules Makefile ; $(info $(M) copying static assets)
$Q rm -rf $@ && mkdir -p $@/stylesheets $@/javascript $@/fonts $Q rm -rf $@ && mkdir -p $@/stylesheets $@/javascript $@/fonts
$Q cp web/data/node_modules/@mdi/font/fonts/materialdesignicons-webfont.woff* $@/fonts/. $Q cp console/data/node_modules/@mdi/font/fonts/materialdesignicons-webfont.woff* $@/fonts/.
$Q cp web/data/node_modules/@mdi/font/css/materialdesignicons.min.css $@/stylesheets/. $Q cp console/data/node_modules/@mdi/font/css/materialdesignicons.min.css $@/stylesheets/.
$Q cp web/data/node_modules/bootstrap/dist/css/bootstrap.min.css $@/stylesheets/. $Q cp console/data/node_modules/bootstrap/dist/css/bootstrap.min.css $@/stylesheets/.
$Q cp web/data/node_modules/bootstrap/dist/js/bootstrap.bundle.min.js $@/javascript/. $Q cp console/data/node_modules/bootstrap/dist/js/bootstrap.bundle.min.js $@/javascript/.
# These files are versioned in Git, but we may want to update them. # These files are versioned in Git, but we may want to update them.
clickhouse/data/protocols.csv: clickhouse/data/protocols.csv:

48
cmd/components.go Normal file
View File

@@ -0,0 +1,48 @@
package cmd
import (
"fmt"
"akvorado/common/daemon"
"akvorado/common/reporter"
)
// StartStopComponents activate/deactivate components in order.
func StartStopComponents(r *reporter.Reporter, daemonComponent daemon.Component, otherComponents []interface{}) error {
components := append([]interface{}{r, daemonComponent}, otherComponents...)
startedComponents := []interface{}{}
defer func() {
for _, cmp := range startedComponents {
if stopperC, ok := cmp.(stopper); ok {
if err := stopperC.Stop(); err != nil {
r.Err(err).Msg("unable to stop component, ignoring")
}
}
}
}()
for _, cmp := range components {
if starterC, ok := cmp.(starter); ok {
if err := starterC.Start(); err != nil {
return fmt.Errorf("unable to start component: %w", err)
}
}
startedComponents = append([]interface{}{cmp}, startedComponents...)
}
r.Info().
Str("version", Version).Str("build-date", BuildDate).
Msg("akvorado has started")
select {
case <-daemonComponent.Terminated():
r.Info().Msg("stopping all components")
}
return nil
}
type starter interface {
Start() error
}
type stopper interface {
Stop() error
}

81
cmd/components_test.go Normal file
View File

@@ -0,0 +1,81 @@
package cmd_test
import (
"errors"
"testing"
"akvorado/cmd"
"akvorado/common/daemon"
"akvorado/common/helpers"
"akvorado/common/reporter"
)
type Startable struct {
Started bool
}
type Stopable struct {
Stopped bool
}
func (c *Startable) Start() error {
c.Started = true
return nil
}
func (c *Stopable) Stop() error {
c.Stopped = true
return nil
}
type ComponentStartStop struct {
Startable
Stopable
}
type ComponentStop struct {
Stopable
}
type ComponentStart struct {
Startable
}
type ComponentNone struct{}
type ComponentStartError struct {
Stopable
}
func (c ComponentStartError) Start() error {
return errors.New("nooo")
}
func TestStartStop(t *testing.T) {
r := reporter.NewMock(t)
daemonComponent := daemon.NewMock(t)
otherComponents := []interface{}{
&ComponentStartStop{},
&ComponentStop{},
&ComponentStart{},
&ComponentNone{},
&ComponentStartError{},
&ComponentStartStop{},
}
if err := cmd.StartStopComponents(r, daemonComponent, otherComponents); err == nil {
t.Error("StartStopComponents() did not trigger an error")
}
expected := []interface{}{
&ComponentStartStop{
Startable: Startable{Started: true},
Stopable: Stopable{Stopped: true},
},
&ComponentStop{
Stopable: Stopable{Stopped: true},
},
&ComponentStart{
Startable: Startable{Started: true},
},
&ComponentNone{},
&ComponentStartError{},
&ComponentStartStop{},
}
if diff := helpers.Diff(otherComponents, expected); diff != "" {
t.Errorf("StartStopComponents() (-got, +want):\n%s", diff)
}
}

107
cmd/config.go Normal file
View File

@@ -0,0 +1,107 @@
package cmd
import (
"fmt"
"io"
"io/ioutil"
"os"
"strconv"
"strings"
"github.com/mitchellh/mapstructure"
"gopkg.in/yaml.v2"
"akvorado/inlet/flow"
)
// ConfigRelatedOptions are command-line options related to handling a
// configuration file.
type ConfigRelatedOptions struct {
Path string
Dump bool
}
// Parse parses the configuration file (if present) and the
// environment variables into the provided configuration.
func (c ConfigRelatedOptions) Parse(out io.Writer, component string, config interface{}) error {
var rawConfig map[string]interface{}
if cfgFile := c.Path; cfgFile != "" {
input, err := ioutil.ReadFile(cfgFile)
if err != nil {
return fmt.Errorf("unable to read configuration file: %w", err)
}
if err := yaml.Unmarshal(input, &rawConfig); err != nil {
return fmt.Errorf("unable to parse configuration file: %w", err)
}
}
// Parse provided configuration
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
Result: &config,
ErrorUnused: true,
Metadata: nil,
WeaklyTypedInput: true,
MatchName: func(mapKey, fieldName string) bool {
key := strings.ToLower(strings.ReplaceAll(mapKey, "-", ""))
field := strings.ToLower(fieldName)
return key == field
},
DecodeHook: mapstructure.ComposeDecodeHookFunc(
flow.ConfigurationUnmarshalerHook(),
mapstructure.TextUnmarshallerHookFunc(),
mapstructure.StringToTimeDurationHookFunc(),
mapstructure.StringToSliceHookFunc(","),
),
})
if err != nil {
return fmt.Errorf("unable to create configuration decoder: %w", err)
}
if err := decoder.Decode(rawConfig); err != nil {
return fmt.Errorf("unable to parse configuration: %w", err)
}
// Override with environment variables
for _, keyval := range os.Environ() {
kv := strings.SplitN(keyval, "=", 2)
if len(kv) != 2 {
continue
}
kk := strings.Split(kv[0], "_")
if len(kk) < 3 || kk[0] != "AKVORADO" || kk[1] != strings.ToUpper(component) {
continue
}
// From AKVORADO_CMP_SQUID_PURPLE_QUIRK=47, we
// build a map "squid -> purple -> quirk ->
// 47". From AKVORADO_CMP_SQUID_3_PURPLE=47, we
// build "squid[3] -> purple -> 47"
var rawConfig interface{}
rawConfig = kv[1]
for i := len(kk) - 1; i > 1; i-- {
if index, err := strconv.Atoi(kk[i]); err == nil {
newRawConfig := make([]interface{}, index+1)
newRawConfig[index] = rawConfig
rawConfig = newRawConfig
} else {
rawConfig = map[string]interface{}{
kk[i]: rawConfig,
}
}
}
if err := decoder.Decode(rawConfig); err != nil {
return fmt.Errorf("unable to parse override %q: %w", kv[0], err)
}
}
// Dump configuration if requested
if c.Dump {
output, err := yaml.Marshal(config)
if err != nil {
return fmt.Errorf("unable to dump configuration: %w", err)
}
out.Write([]byte("---\n"))
out.Write(output)
out.Write([]byte("\n"))
}
return nil
}

View File

@@ -10,7 +10,7 @@ import (
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
"akvorado/cmd" "akvorado/cmd"
"akvorado/helpers" "akvorado/common/helpers"
) )
func want(t *testing.T, got, expected interface{}) { func want(t *testing.T, got, expected interface{}) {
@@ -20,7 +20,7 @@ func want(t *testing.T, got, expected interface{}) {
} }
} }
func TestServeDump(t *testing.T) { func TestDump(t *testing.T) {
// Configuration file // Configuration file
config := `--- config := `---
http: http:
@@ -36,27 +36,25 @@ snmp:
cache-duration: 20m cache-duration: 20m
default-community: private default-community: private
kafka: kafka:
connect:
version: 2.8.1
topic: netflow topic: netflow
compression-codec: zstd compression-codec: zstd
version: 2.8.1
core: core:
workers: 3 workers: 3
` `
configFile := filepath.Join(t.TempDir(), "akvorado.yaml") configFile := filepath.Join(t.TempDir(), "akvorado.yaml")
ioutil.WriteFile(configFile, []byte(config), 0644) ioutil.WriteFile(configFile, []byte(config), 0644)
// Start serves with it c := cmd.ConfigRelatedOptions{
root := cmd.RootCmd Path: configFile,
buf := new(bytes.Buffer) Dump: true,
root.SetOut(buf) }
root.SetErr(os.Stderr) conf := cmd.DefaultInletConfiguration
root.SetArgs([]string{"serve", "-D", "-C", "--config", configFile}) buf := bytes.NewBuffer([]byte{})
cmd.ServeOptionsReset() if err := c.Parse(buf, "inlet", conf); err != nil {
err := root.Execute() t.Fatalf("Parse() error:\n%+v", err)
if err != nil {
t.Fatalf("`serve -D -C` error:\n%+v", err)
} }
var got map[string]map[string]interface{} var got map[string]map[string]interface{}
if err := yaml.Unmarshal(buf.Bytes(), &got); err != nil { if err := yaml.Unmarshal(buf.Bytes(), &got); err != nil {
t.Fatalf("Unmarshal() error:\n%+v", err) t.Fatalf("Unmarshal() error:\n%+v", err)
@@ -74,12 +72,14 @@ core:
want(t, got["snmp"]["workers"], 2) want(t, got["snmp"]["workers"], 2)
want(t, got["snmp"]["cacheduration"], "20m0s") want(t, got["snmp"]["cacheduration"], "20m0s")
want(t, got["snmp"]["defaultcommunity"], "private") want(t, got["snmp"]["defaultcommunity"], "private")
want(t, got["kafka"]["topic"], "netflow") want(t, got["kafka"]["connect"], map[string]interface{}{
want(t, got["kafka"]["version"], "2.8.1") "brokers": []string{"127.0.0.1:9092"},
want(t, got["kafka"]["brokers"], []string{"127.0.0.1:9092"}) "version": "2.8.1",
"topic": "netflow",
})
} }
func TestServeEnvOverride(t *testing.T) { func TestEnvOverride(t *testing.T) {
// Configuration file // Configuration file
config := `--- config := `---
http: http:
@@ -94,9 +94,10 @@ snmp:
workers: 2 workers: 2
cache-duration: 10m cache-duration: 10m
kafka: kafka:
connect:
version: 2.8.1
topic: netflow topic: netflow
compression-codec: zstd compression-codec: zstd
version: 2.8.1
core: core:
workers: 3 workers: 3
` `
@@ -104,28 +105,25 @@ core:
ioutil.WriteFile(configFile, []byte(config), 0644) ioutil.WriteFile(configFile, []byte(config), 0644)
// Environment // Environment
os.Setenv("AKVORADO_SNMP_CACHEDURATION", "22m") os.Setenv("AKVORADO_INLET_SNMP_CACHEDURATION", "22m")
os.Setenv("AKVORADO_SNMP_DEFAULTCOMMUNITY", "privateer") os.Setenv("AKVORADO_INLET_SNMP_DEFAULTCOMMUNITY", "privateer")
os.Setenv("AKVORADO_SNMP_WORKERS", "3") os.Setenv("AKVORADO_INLET_SNMP_WORKERS", "3")
os.Setenv("AKVORADO_KAFKA_BROKERS", "127.0.0.1:9092,127.0.0.2:9092") os.Setenv("AKVORADO_INLET_KAFKA_CONNECT_BROKERS", "127.0.0.1:9092,127.0.0.2:9092")
os.Setenv("AKVORADO_FLOW_INPUTS_0_LISTEN", "0.0.0.0:2056") os.Setenv("AKVORADO_INLET_FLOW_INPUTS_0_LISTEN", "0.0.0.0:2056")
// We may be lucky or the environment is keeping order // We may be lucky or the environment is keeping order
os.Setenv("AKVORADO_FLOW_INPUTS_1_TYPE", "file") os.Setenv("AKVORADO_INLET_FLOW_INPUTS_1_TYPE", "file")
os.Setenv("AKVORADO_FLOW_INPUTS_1_DECODER", "netflow") os.Setenv("AKVORADO_INLET_FLOW_INPUTS_1_DECODER", "netflow")
os.Setenv("AKVORADO_FLOW_INPUTS_1_PATHS", "f1,f2") os.Setenv("AKVORADO_INLET_FLOW_INPUTS_1_PATHS", "f1,f2")
// Start serves with it c := cmd.ConfigRelatedOptions{
root := cmd.RootCmd Path: configFile,
buf := new(bytes.Buffer) Dump: true,
root.SetOut(buf) }
root.SetErr(os.Stderr) conf := cmd.DefaultInletConfiguration
root.SetArgs([]string{"serve", "-D", "-C", "--config", configFile}) buf := bytes.NewBuffer([]byte{})
cmd.ServeOptionsReset() if err := c.Parse(buf, "inlet", conf); err != nil {
err := root.Execute() t.Fatalf("Parse() error:\n%+v", err)
if err != nil {
t.Fatalf("`serve -D -C` error:\n%+v", err)
} }
var got map[string]map[string]interface{} var got map[string]map[string]interface{}
if err := yaml.Unmarshal(buf.Bytes(), &got); err != nil { if err := yaml.Unmarshal(buf.Bytes(), &got); err != nil {
t.Fatalf("Unmarshal() error:\n%+v", err) t.Fatalf("Unmarshal() error:\n%+v", err)
@@ -133,7 +131,11 @@ core:
want(t, got["snmp"]["cacheduration"], "22m0s") want(t, got["snmp"]["cacheduration"], "22m0s")
want(t, got["snmp"]["defaultcommunity"], "privateer") want(t, got["snmp"]["defaultcommunity"], "privateer")
want(t, got["snmp"]["workers"], 3) want(t, got["snmp"]["workers"], 3)
want(t, got["kafka"]["brokers"], []string{"127.0.0.1:9092", "127.0.0.2:9092"}) want(t, got["kafka"]["connect"], map[string]interface{}{
"brokers": []string{"127.0.0.1:9092", "127.0.0.2:9092"},
"version": "2.8.1",
"topic": "netflow",
})
want(t, got["flow"], map[string]interface{}{ want(t, got["flow"], map[string]interface{}{
"inputs": []map[string]interface{}{ "inputs": []map[string]interface{}{
{ {

109
cmd/configure.go Normal file
View File

@@ -0,0 +1,109 @@
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"akvorado/common/daemon"
"akvorado/common/http"
"akvorado/common/reporter"
"akvorado/configure/clickhouse"
"akvorado/configure/kafka"
)
// ConfigureConfiguration represents the configuration file for the configure command.
type ConfigureConfiguration struct {
Reporting reporter.Configuration
HTTP http.Configuration
Clickhouse clickhouse.Configuration
Kafka kafka.Configuration
}
// DefaultConfigureConfiguration is the default configuration for the configure command.
var DefaultConfigureConfiguration = ConfigureConfiguration{
HTTP: http.DefaultConfiguration,
Reporting: reporter.DefaultConfiguration,
Clickhouse: clickhouse.DefaultConfiguration,
Kafka: kafka.DefaultConfiguration,
}
type configureOptions struct {
ConfigRelatedOptions
CheckMode bool
}
// ConfigureOptions stores the command-line option values for the configure
// command.
var ConfigureOptions configureOptions
var configureCmd = &cobra.Command{
Use: "configure",
Short: "Start Akvorado's configure service",
Long: `Akvorado is a Netflow/IPFIX collector. The configure service configure external
components: Kafka and Clickhouse.`,
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
config := DefaultConfigureConfiguration
if err := ConfigureOptions.Parse(cmd.OutOrStdout(), "configure", &config); err != nil {
return err
}
r, err := reporter.New(config.Reporting)
if err != nil {
return fmt.Errorf("unable to initialize reporter: %w", err)
}
return configureStart(r, config, ConfigureOptions.CheckMode)
},
}
func init() {
RootCmd.AddCommand(configureCmd)
configureCmd.Flags().StringVarP(&ConfigureOptions.ConfigRelatedOptions.Path, "config", "c", "",
"Configuration file")
configureCmd.Flags().BoolVarP(&ConfigureOptions.ConfigRelatedOptions.Dump, "dump", "D", false,
"Dump configuration before starting")
configureCmd.Flags().BoolVarP(&ConfigureOptions.CheckMode, "check", "C", false,
"Check configuration, but does not start")
}
func configureStart(r *reporter.Reporter, config ConfigureConfiguration, checkOnly bool) error {
daemonComponent, err := daemon.New(r)
if err != nil {
return fmt.Errorf("unable to initialize daemon component: %w", err)
}
httpComponent, err := http.New(r, config.HTTP, http.Dependencies{
Daemon: daemonComponent,
})
if err != nil {
return fmt.Errorf("unable to initialize HTTP component: %w", err)
}
kafkaComponent, err := kafka.New(r, config.Kafka)
if err != nil {
return fmt.Errorf("unable to initialize kafka component: %w", err)
}
clickhouseComponent, err := clickhouse.New(r, config.Clickhouse, clickhouse.Dependencies{
Daemon: daemonComponent,
HTTP: httpComponent,
})
if err != nil {
return fmt.Errorf("unable to initialize clickhouse component: %w", err)
}
// Expose some informations and metrics
addCommonHTTPHandlers(r, "configure", httpComponent)
versionMetrics(r)
// If we only asked for a check, stop here.
if checkOnly {
return nil
}
// Start all the components.
components := []interface{}{
httpComponent,
clickhouseComponent,
kafkaComponent,
}
return StartStopComponents(r, daemonComponent, components)
}

14
cmd/configure_test.go Normal file
View File

@@ -0,0 +1,14 @@
package cmd
import (
"testing"
"akvorado/common/reporter"
)
func TestConfigureStart(t *testing.T) {
r := reporter.NewMock(t)
if err := configureStart(r, DefaultConfigureConfiguration, true); err != nil {
t.Fatalf("configureStart() error:\n%+v", err)
}
}

101
cmd/console.go Normal file
View File

@@ -0,0 +1,101 @@
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"akvorado/common/daemon"
"akvorado/common/http"
"akvorado/common/reporter"
"akvorado/console"
)
// ConsoleConfiguration represents the configuration file for the console command.
type ConsoleConfiguration struct {
Reporting reporter.Configuration
HTTP http.Configuration
Console console.Configuration
}
// DefaultConsoleConfiguration is the default configuration for the console command.
var DefaultConsoleConfiguration = ConsoleConfiguration{
HTTP: http.DefaultConfiguration,
Reporting: reporter.DefaultConfiguration,
Console: console.DefaultConfiguration,
}
type consoleOptions struct {
ConfigRelatedOptions
CheckMode bool
}
// ConsoleOptions stores the command-line option values for the console
// command.
var ConsoleOptions consoleOptions
var consoleCmd = &cobra.Command{
Use: "console",
Short: "Start Akvorado's console service",
Long: `Akvorado is a Netflow/IPFIX collector. The console service exposes a web interface to
manage collected flows.`,
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
config := DefaultConsoleConfiguration
if err := ConsoleOptions.Parse(cmd.OutOrStdout(), "console", &config); err != nil {
return err
}
r, err := reporter.New(config.Reporting)
if err != nil {
return fmt.Errorf("unable to initialize reporter: %w", err)
}
return consoleStart(r, config, ConsoleOptions.CheckMode)
},
}
func init() {
RootCmd.AddCommand(consoleCmd)
consoleCmd.Flags().StringVarP(&ConsoleOptions.ConfigRelatedOptions.Path, "config", "c", "",
"Configuration file")
consoleCmd.Flags().BoolVarP(&ConsoleOptions.ConfigRelatedOptions.Dump, "dump", "D", false,
"Dump configuration before starting")
consoleCmd.Flags().BoolVarP(&ConsoleOptions.CheckMode, "check", "C", false,
"Check configuration, but does not start")
}
func consoleStart(r *reporter.Reporter, config ConsoleConfiguration, checkOnly bool) error {
daemonComponent, err := daemon.New(r)
if err != nil {
return fmt.Errorf("unable to initialize daemon component: %w", err)
}
httpComponent, err := http.New(r, config.HTTP, http.Dependencies{
Daemon: daemonComponent,
})
if err != nil {
return fmt.Errorf("unable to initialize HTTP component: %w", err)
}
consoleComponent, err := console.New(r, config.Console, console.Dependencies{
Daemon: daemonComponent,
HTTP: httpComponent,
})
if err != nil {
return fmt.Errorf("unable to initialize console component: %w", err)
}
// Expose some informations and metrics
addCommonHTTPHandlers(r, "console", httpComponent)
versionMetrics(r)
// If we only asked for a check, stop here.
if checkOnly {
return nil
}
// Start all the components.
components := []interface{}{
httpComponent,
consoleComponent,
}
return StartStopComponents(r, daemonComponent, components)
}

14
cmd/console_test.go Normal file
View File

@@ -0,0 +1,14 @@
package cmd
import (
"testing"
"akvorado/common/reporter"
)
func TestConsoleStart(t *testing.T) {
r := reporter.NewMock(t)
if err := consoleStart(r, DefaultConsoleConfiguration, true); err != nil {
t.Fatalf("consoleStart() error:\n%+v", err)
}
}

20
cmd/http.go Normal file
View File

@@ -0,0 +1,20 @@
package cmd
import (
"fmt"
"akvorado/common/http"
"akvorado/common/reporter"
)
// addCommonHTTPHandlers configures various endpoints common to all
// services. Each endpoint is registered under `/api/v0` and
// `/api/v0/SERVICE` namespaces.
func addCommonHTTPHandlers(r *reporter.Reporter, service string, httpComponent *http.Component) {
httpComponent.AddHandler(fmt.Sprintf("/api/v0/%s/metrics", service), r.MetricsHTTPHandler())
httpComponent.AddHandler("/api/v0/metrics", r.MetricsHTTPHandler())
httpComponent.AddHandler(fmt.Sprintf("/api/v0/%s/healthcheck", service), r.HealthcheckHTTPHandler())
httpComponent.AddHandler("/api/v0/healthcheck", r.HealthcheckHTTPHandler())
httpComponent.AddHandler(fmt.Sprintf("/api/v0/%s/version", service), versionHandler())
httpComponent.AddHandler("/api/v0/version", versionHandler())
}

147
cmd/inlet.go Normal file
View File

@@ -0,0 +1,147 @@
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"akvorado/common/daemon"
"akvorado/common/http"
"akvorado/common/reporter"
"akvorado/inlet/core"
"akvorado/inlet/flow"
"akvorado/inlet/geoip"
"akvorado/inlet/kafka"
"akvorado/inlet/snmp"
)
// InletConfiguration represents the configuration file for the inlet command.
type InletConfiguration struct {
Reporting reporter.Configuration
HTTP http.Configuration
Flow flow.Configuration
SNMP snmp.Configuration
GeoIP geoip.Configuration
Kafka kafka.Configuration
Core core.Configuration
}
// DefaultInletConfiguration is the default configuration for the inlet command.
var DefaultInletConfiguration = InletConfiguration{
HTTP: http.DefaultConfiguration,
Reporting: reporter.DefaultConfiguration,
Flow: flow.DefaultConfiguration,
SNMP: snmp.DefaultConfiguration,
GeoIP: geoip.DefaultConfiguration,
Kafka: kafka.DefaultConfiguration,
Core: core.DefaultConfiguration,
}
type inletOptions struct {
ConfigRelatedOptions
CheckMode bool
}
// InletOptions stores the command-line option values for the inlet
// command.
var InletOptions inletOptions
var inletCmd = &cobra.Command{
Use: "inlet",
Short: "Start Akvorado's inlet service",
Long: `Akvorado is a Netflow/IPFIX collector. The inlet service handles flow ingestion,
hydration and export to Kafka.`,
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
config := DefaultInletConfiguration
if err := InletOptions.Parse(cmd.OutOrStdout(), "inlet", &config); err != nil {
return err
}
r, err := reporter.New(config.Reporting)
if err != nil {
return fmt.Errorf("unable to initialize reporter: %w", err)
}
return inletStart(r, config, InletOptions.CheckMode)
},
}
func init() {
RootCmd.AddCommand(inletCmd)
inletCmd.Flags().StringVarP(&InletOptions.ConfigRelatedOptions.Path, "config", "c", "",
"Configuration file")
inletCmd.Flags().BoolVarP(&InletOptions.ConfigRelatedOptions.Dump, "dump", "D", false,
"Dump configuration before starting")
inletCmd.Flags().BoolVarP(&InletOptions.CheckMode, "check", "C", false,
"Check configuration, but does not start")
}
func inletStart(r *reporter.Reporter, config InletConfiguration, checkOnly bool) error {
// Initialize the various components
daemonComponent, err := daemon.New(r)
if err != nil {
return fmt.Errorf("unable to initialize daemon component: %w", err)
}
httpComponent, err := http.New(r, config.HTTP, http.Dependencies{
Daemon: daemonComponent,
})
if err != nil {
return fmt.Errorf("unable to initialize http component: %w", err)
}
flowComponent, err := flow.New(r, config.Flow, flow.Dependencies{
Daemon: daemonComponent,
HTTP: httpComponent,
})
if err != nil {
return fmt.Errorf("unable to initialize flow component: %w", err)
}
snmpComponent, err := snmp.New(r, config.SNMP, snmp.Dependencies{
Daemon: daemonComponent,
})
if err != nil {
return fmt.Errorf("unable to initialize SNMP component: %w", err)
}
geoipComponent, err := geoip.New(r, config.GeoIP, geoip.Dependencies{
Daemon: daemonComponent,
})
if err != nil {
return fmt.Errorf("unable to initialize GeoIP component: %w", err)
}
kafkaComponent, err := kafka.New(r, config.Kafka, kafka.Dependencies{
Daemon: daemonComponent,
})
if err != nil {
return fmt.Errorf("unable to initialize Kafka component: %w", err)
}
coreComponent, err := core.New(r, config.Core, core.Dependencies{
Daemon: daemonComponent,
Flow: flowComponent,
Snmp: snmpComponent,
GeoIP: geoipComponent,
Kafka: kafkaComponent,
HTTP: httpComponent,
})
if err != nil {
return fmt.Errorf("unable to initialize core component: %w", err)
}
// Expose some informations and metrics
addCommonHTTPHandlers(r, "inlet", httpComponent)
versionMetrics(r)
// If we only asked for a check, stop here.
if checkOnly {
return nil
}
// Start all the components.
components := []interface{}{
httpComponent,
snmpComponent,
geoipComponent,
kafkaComponent,
coreComponent,
flowComponent,
}
return StartStopComponents(r, daemonComponent, components)
}

14
cmd/inlet_test.go Normal file
View File

@@ -0,0 +1,14 @@
package cmd
import (
"testing"
"akvorado/common/reporter"
)
func TestInletStart(t *testing.T) {
r := reporter.NewMock(t)
if err := inletStart(r, DefaultInletConfiguration, true); err != nil {
t.Fatalf("inletStart() error:\n%+v", err)
}
}

View File

@@ -1,310 +0,0 @@
package cmd
import (
"encoding/json"
"fmt"
"io/ioutil"
netHTTP "net/http"
"os"
"runtime"
"strconv"
"strings"
"github.com/mitchellh/mapstructure"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"akvorado/clickhouse"
"akvorado/core"
"akvorado/daemon"
"akvorado/flow"
"akvorado/geoip"
"akvorado/http"
"akvorado/kafka"
"akvorado/reporter"
"akvorado/snmp"
"akvorado/web"
)
// ServeConfiguration represents the configuration file for the serve command.
type ServeConfiguration struct {
Reporting reporter.Configuration
HTTP http.Configuration
Flow flow.Configuration
SNMP snmp.Configuration
GeoIP geoip.Configuration
Kafka kafka.Configuration
Core core.Configuration
Web web.Configuration
ClickHouse clickhouse.Configuration
}
// DefaultServeConfiguration is the default configuration for the serve command.
var DefaultServeConfiguration = ServeConfiguration{
Reporting: reporter.DefaultConfiguration,
HTTP: http.DefaultConfiguration,
Flow: flow.DefaultConfiguration,
SNMP: snmp.DefaultConfiguration,
GeoIP: geoip.DefaultConfiguration,
Kafka: kafka.DefaultConfiguration,
Core: core.DefaultConfiguration,
Web: web.DefaultConfiguration,
ClickHouse: clickhouse.DefaultConfiguration,
}
type serveOptions struct {
configurationFile string
checkMode bool
dumpConfiguration bool
}
// ServeOptions stores the command-line option values for the serve
// command.
var ServeOptions serveOptions
var serveCmd = &cobra.Command{
Use: "serve",
Short: "Start akvorado",
Long: `Akvorado is a Netflow/IPFIX collector. It hydrates flows with information from SNMP and GeoIP
and exports them to Kafka.`,
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
// Parse YAML
var rawConfig map[string]interface{}
if cfgFile := ServeOptions.configurationFile; cfgFile != "" {
input, err := ioutil.ReadFile(cfgFile)
if err != nil {
return fmt.Errorf("unable to read configuration file: %w", err)
}
if err := yaml.Unmarshal(input, &rawConfig); err != nil {
return fmt.Errorf("unable to parse configuration file: %w", err)
}
}
// Parse provided configuration
config := DefaultServeConfiguration
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
Result: &config,
ErrorUnused: true,
Metadata: nil,
WeaklyTypedInput: true,
MatchName: func(mapKey, fieldName string) bool {
key := strings.ToLower(strings.ReplaceAll(mapKey, "-", ""))
field := strings.ToLower(fieldName)
return key == field
},
DecodeHook: mapstructure.ComposeDecodeHookFunc(
flow.ConfigurationUnmarshalerHook(),
mapstructure.TextUnmarshallerHookFunc(),
mapstructure.StringToTimeDurationHookFunc(),
mapstructure.StringToSliceHookFunc(","),
),
})
if err != nil {
return fmt.Errorf("unable to create configuration decoder: %w", err)
}
if err := decoder.Decode(rawConfig); err != nil {
return fmt.Errorf("unable to parse configuration: %w", err)
}
// Override with environment variables
for _, keyval := range os.Environ() {
kv := strings.SplitN(keyval, "=", 2)
if len(kv) != 2 {
continue
}
kk := strings.Split(kv[0], "_")
if kk[0] != "AKVORADO" || len(kk) < 2 {
continue
}
// From AKVORADO_SQUID_PURPLE_QUIRK=47, we
// build a map "squid -> purple -> quirk ->
// 47". From AKVORADO_SQUID_3_PURPLE=47, we
// build "squid[3] -> purple -> 47"
var rawConfig interface{}
rawConfig = kv[1]
for i := len(kk) - 1; i > 0; i-- {
if index, err := strconv.Atoi(kk[i]); err == nil {
newRawConfig := make([]interface{}, index+1)
newRawConfig[index] = rawConfig
rawConfig = newRawConfig
} else {
rawConfig = map[string]interface{}{
kk[i]: rawConfig,
}
}
}
if err := decoder.Decode(rawConfig); err != nil {
return fmt.Errorf("unable to parse override %q: %w", kv[0], err)
}
}
// Dump configuration if requested
if ServeOptions.dumpConfiguration {
output, err := yaml.Marshal(config)
if err != nil {
return fmt.Errorf("unable to dump configuration: %w", err)
}
cmd.Printf("---\n%s\n", string(output))
}
r, err := reporter.New(config.Reporting)
if err != nil {
return fmt.Errorf("unable to initialize reporter: %w", err)
}
return daemonStart(r, config, ServeOptions.checkMode)
},
}
func init() {
RootCmd.AddCommand(serveCmd)
serveCmd.Flags().StringVarP(&ServeOptions.configurationFile, "config", "c", "",
"Configuration file")
serveCmd.Flags().BoolVarP(&ServeOptions.checkMode, "check", "C", false,
"Check configuration, but does not start")
serveCmd.Flags().BoolVarP(&ServeOptions.dumpConfiguration, "dump", "D", false,
"Dump configuration before starting")
}
// daemonStart will start all components and manage daemon lifetime.
func daemonStart(r *reporter.Reporter, config ServeConfiguration, checkOnly bool) error {
// Initialize the various components
daemonComponent, err := daemon.New(r)
if err != nil {
return fmt.Errorf("unable to initialize daemon component: %w", err)
}
httpComponent, err := http.New(r, config.HTTP, http.Dependencies{
Daemon: daemonComponent,
})
if err != nil {
return fmt.Errorf("unable to initialize http component: %w", err)
}
flowComponent, err := flow.New(r, config.Flow, flow.Dependencies{
Daemon: daemonComponent,
HTTP: httpComponent,
})
if err != nil {
return fmt.Errorf("unable to initialize flow component: %w", err)
}
snmpComponent, err := snmp.New(r, config.SNMP, snmp.Dependencies{
Daemon: daemonComponent,
})
if err != nil {
return fmt.Errorf("unable to initialize SNMP component: %w", err)
}
geoipComponent, err := geoip.New(r, config.GeoIP, geoip.Dependencies{
Daemon: daemonComponent,
})
if err != nil {
return fmt.Errorf("unable to initialize GeoIP component: %w", err)
}
kafkaComponent, err := kafka.New(r, config.Kafka, kafka.Dependencies{
Daemon: daemonComponent,
})
if err != nil {
return fmt.Errorf("unable to initialize Kafka component: %w", err)
}
clickhouseComponent, err := clickhouse.New(r, config.ClickHouse, clickhouse.Dependencies{
Daemon: daemonComponent,
HTTP: httpComponent,
Kafka: kafkaComponent,
})
if err != nil {
return fmt.Errorf("unable to initialize ClickHouse component: %w", err)
}
coreComponent, err := core.New(r, config.Core, core.Dependencies{
Daemon: daemonComponent,
Flow: flowComponent,
Snmp: snmpComponent,
GeoIP: geoipComponent,
Kafka: kafkaComponent,
HTTP: httpComponent,
})
if err != nil {
return fmt.Errorf("unable to initialize core component: %w", err)
}
webComponent, err := web.New(r, config.Web, web.Dependencies{
HTTP: httpComponent,
})
if err != nil {
return fmt.Errorf("unable to initialize web component: %w", err)
}
// If we only asked for a check, stop here.
if checkOnly {
return nil
}
// Expose some informations and metrics
httpComponent.AddHandler("/api/v0/metrics", r.MetricsHTTPHandler())
httpComponent.AddHandler("/api/v0/healthcheck", r.HealthcheckHTTPHandler())
httpComponent.AddHandler("/api/v0/version", netHTTP.HandlerFunc(
func(w netHTTP.ResponseWriter, r *netHTTP.Request) {
versionInfo := struct {
Version string `json:"version"`
BuildDate string `json:"build_date"`
Compiler string `json:"compiler"`
}{
Version: Version,
BuildDate: BuildDate,
Compiler: runtime.Version(),
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(versionInfo)
}))
r.GaugeVec(reporter.GaugeOpts{
Name: "info",
Help: "Akvorado build information",
}, []string{"version", "build_date", "compiler"}).
WithLabelValues(Version, BuildDate, runtime.Version()).Set(1)
// Start all the components.
components := []interface{}{
r,
daemonComponent,
httpComponent,
snmpComponent,
geoipComponent,
kafkaComponent,
clickhouseComponent,
coreComponent,
webComponent,
flowComponent,
}
startedComponents := []interface{}{}
defer func() {
for _, cmp := range startedComponents {
if stopperC, ok := cmp.(stopper); ok {
if err := stopperC.Stop(); err != nil {
r.Err(err).Msg("unable to stop component, ignoring")
}
}
}
}()
for _, cmp := range components {
if starterC, ok := cmp.(starter); ok {
if err := starterC.Start(); err != nil {
return fmt.Errorf("unable to start component: %w", err)
}
}
startedComponents = append([]interface{}{cmp}, startedComponents...)
}
r.Info().
Str("version", Version).Str("build-date", BuildDate).
Msg("akvorado has started")
select {
case <-daemonComponent.Terminated():
r.Info().Msg("stopping all components")
}
return nil
}
type starter interface {
Start() error
}
type stopper interface {
Stop() error
}

View File

@@ -1,7 +0,0 @@
package cmd
// ServeOptionsReset resets serve options provided on command line.
// This should be used between two tests.
func ServeOptionsReset() {
ServeOptions = serveOptions{}
}

View File

@@ -1,9 +1,13 @@
package cmd package cmd
import ( import (
"encoding/json"
"net/http"
"runtime" "runtime"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"akvorado/common/reporter"
) )
var ( var (
@@ -27,3 +31,28 @@ var versionCmd = &cobra.Command{
cmd.Printf(" Built with: %s\n", runtime.Version()) cmd.Printf(" Built with: %s\n", runtime.Version())
}, },
} }
func versionHandler() http.Handler {
return http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
versionInfo := struct {
Version string `json:"version"`
BuildDate string `json:"build_date"`
Compiler string `json:"compiler"`
}{
Version: Version,
BuildDate: BuildDate,
Compiler: runtime.Version(),
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(versionInfo)
})
}
func versionMetrics(r *reporter.Reporter) {
r.GaugeVec(reporter.GaugeOpts{
Name: "info",
Help: "Akvorado build information",
}, []string{"version", "build_date", "compiler"}).
WithLabelValues(Version, BuildDate, runtime.Version()).Set(1)
}

View File

@@ -8,7 +8,7 @@ import (
"testing" "testing"
"akvorado/cmd" "akvorado/cmd"
"akvorado/helpers" "akvorado/common/helpers"
) )
func TestVersion(t *testing.T) { func TestVersion(t *testing.T) {

View File

@@ -10,7 +10,7 @@ import (
"gopkg.in/tomb.v2" "gopkg.in/tomb.v2"
"akvorado/reporter" "akvorado/common/reporter"
) )
// Component is the interface the daemon component provides. // Component is the interface the daemon component provides.

View File

@@ -7,7 +7,7 @@ import (
"gopkg.in/tomb.v2" "gopkg.in/tomb.v2"
"akvorado/reporter" "akvorado/common/reporter"
) )
func TestTerminate(t *testing.T) { func TestTerminate(t *testing.T) {

View File

@@ -8,7 +8,7 @@ type Configuration struct {
Profiler bool Profiler bool
} }
// DefaultConfiguration represents the default configuration for the HTTP server. // DefaultConfiguration is the default configuration of the HTTP server.
var DefaultConfiguration = Configuration{ var DefaultConfiguration = Configuration{
Listen: "localhost:8080", Listen: "localhost:8080",
} }

View File

@@ -14,8 +14,8 @@ import (
"github.com/rs/zerolog/hlog" "github.com/rs/zerolog/hlog"
"gopkg.in/tomb.v2" "gopkg.in/tomb.v2"
"akvorado/daemon" "akvorado/common/daemon"
"akvorado/reporter" "akvorado/common/reporter"
) )
// Component represents the HTTP compomenent. // Component represents the HTTP compomenent.

View File

@@ -6,9 +6,9 @@ import (
"runtime" "runtime"
"testing" "testing"
"akvorado/helpers" "akvorado/common/helpers"
"akvorado/http" "akvorado/common/http"
"akvorado/reporter" "akvorado/common/reporter"
) )
func TestHandler(t *testing.T) { func TestHandler(t *testing.T) {
@@ -39,7 +39,7 @@ func TestHandler(t *testing.T) {
t.Fatalf("GET /test: got status code %d, not 200", resp.StatusCode) t.Fatalf("GET /test: got status code %d, not 200", resp.StatusCode)
} }
gotMetrics := r.GetMetrics("akvorado_http_", "inflight_", "requests_total", "response_size") gotMetrics := r.GetMetrics("akvorado_common_http_", "inflight_", "requests_total", "response_size")
expectedMetrics := map[string]string{ expectedMetrics := map[string]string{
`inflight_requests`: "0", `inflight_requests`: "0",
`requests_total{code="200",handler="/test",method="get"}`: "1", `requests_total{code="200",handler="/test",method="get"}`: "1",

View File

@@ -5,15 +5,16 @@ package http
import ( import (
"testing" "testing"
"akvorado/daemon" "akvorado/common/daemon"
"akvorado/reporter" "akvorado/common/reporter"
) )
// NewMock create a new HTTP component listening on a random free port. // NewMock create a new HTTP component listening on a random free port.
func NewMock(t *testing.T, r *reporter.Reporter) *Component { func NewMock(t *testing.T, r *reporter.Reporter) *Component {
t.Helper() t.Helper()
config := DefaultConfiguration config := Configuration{
config.Listen = "127.0.0.1:0" Listen: "127.0.0.1:0",
}
c, err := New(r, config, Dependencies{Daemon: daemon.NewMock(t)}) c, err := New(r, config, Dependencies{Daemon: daemon.NewMock(t)})
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)

43
common/kafka/config.go Normal file
View File

@@ -0,0 +1,43 @@
package kafka
import "github.com/Shopify/sarama"
// Configuration defines how we connect to a Kafka cluster.
type Configuration struct {
// Topic defines the topic to write flows to.
Topic string
// Brokers is the list of brokers to connect to.
Brokers []string
// Version is the version of Kafka we assume to work
Version Version
}
// DefaultConfiguration represents the default configuration for connecting to Kafka.
var DefaultConfiguration = Configuration{
Topic: "flows",
Brokers: []string{"127.0.0.1:9092"},
Version: Version(sarama.V2_8_1_0),
}
// Version represents a supported version of Kafka
type Version sarama.KafkaVersion
// UnmarshalText parses a version of Kafka
func (v *Version) UnmarshalText(text []byte) error {
version, err := sarama.ParseKafkaVersion(string(text))
if err != nil {
return err
}
*v = Version(version)
return nil
}
// String turns a Kafka version into a string
func (v Version) String() string {
return sarama.KafkaVersion(v).String()
}
// MarshalText turns a Kafka version intro a string
func (v Version) MarshalText() ([]byte, error) {
return []byte(v.String()), nil
}

View File

@@ -6,20 +6,32 @@ import (
"github.com/Shopify/sarama" "github.com/Shopify/sarama"
"akvorado/reporter" "akvorado/common/reporter"
) )
func init() { func init() {
// The logger in Sarama is global. Do the same. // The logger in Sarama is global. Do the same.
sarama.Logger = &globalKafkaLogger sarama.Logger = &GlobalKafkaLogger
} }
var globalKafkaLogger kafkaLogger // GlobalKafkaLogger is the logger instance registered to sarama.
var GlobalKafkaLogger kafkaLogger
type kafkaLogger struct { type kafkaLogger struct {
r atomic.Value r atomic.Value
} }
// Register register the provided reporter to be used for logging with sarama.
func (l *kafkaLogger) Register(r *reporter.Reporter) {
l.r.Store(r)
}
// Unregister removes the currently registered reporter.
func (l *kafkaLogger) Unregister() {
var noreporter *reporter.Reporter
l.r.Store(noreporter)
}
func (l *kafkaLogger) Print(v ...interface{}) { func (l *kafkaLogger) Print(v ...interface{}) {
r := l.r.Load() r := l.r.Load()
if r != nil && r.(*reporter.Reporter) != nil { if r != nil && r.(*reporter.Reporter) != nil {

59
common/kafka/tests.go Normal file
View File

@@ -0,0 +1,59 @@
//go:build !release
package kafka
import (
"testing"
"time"
"github.com/Shopify/sarama"
"akvorado/common/helpers"
)
// SetupKafkaBroker configures a client to use for testing.
func SetupKafkaBroker(t *testing.T) (sarama.Client, []string) {
broker := helpers.CheckExternalService(t, "Kafka", []string{"kafka", "localhost"}, "9092")
// Wait for broker to be ready
saramaConfig := sarama.NewConfig()
saramaConfig.Version = sarama.V2_8_1_0
saramaConfig.Net.DialTimeout = 1 * time.Second
saramaConfig.Net.ReadTimeout = 1 * time.Second
saramaConfig.Net.WriteTimeout = 1 * time.Second
ready := false
var (
client sarama.Client
err error
)
for i := 0; i < 90; i++ {
if client != nil {
client.Close()
}
client, err = sarama.NewClient([]string{broker}, saramaConfig)
if err != nil {
continue
}
if err := client.RefreshMetadata(); err != nil {
continue
}
brokers := client.Brokers()
if len(brokers) == 0 {
continue
}
if err := brokers[0].Open(client.Config()); err != nil {
continue
}
if connected, err := brokers[0].Connected(); err != nil || !connected {
brokers[0].Close()
continue
}
brokers[0].Close()
ready = true
}
if !ready {
t.Fatalf("broker is not ready")
}
return client, []string{broker}
}

View File

@@ -1,8 +1,8 @@
package reporter package reporter
import ( import (
"akvorado/reporter/logger" "akvorado/common/reporter/logger"
"akvorado/reporter/metrics" "akvorado/common/reporter/metrics"
) )
// Configuration contains the reporter configuration. // Configuration contains the reporter configuration.

View File

@@ -9,8 +9,8 @@ import (
"testing" "testing"
"time" "time"
"akvorado/helpers" "akvorado/common/helpers"
"akvorado/reporter" "akvorado/common/reporter"
) )
func testHealthchecks(t *testing.T, r *reporter.Reporter, ctx context.Context, expectedStatus reporter.HealthcheckStatus, expectedResults map[string]reporter.HealthcheckResult) { func testHealthchecks(t *testing.T, r *reporter.Reporter, ctx context.Context, expectedStatus reporter.HealthcheckStatus, expectedResults map[string]reporter.HealthcheckResult) {

View File

@@ -15,7 +15,7 @@ import (
"github.com/rs/zerolog" "github.com/rs/zerolog"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
"akvorado/reporter/stack" "akvorado/common/reporter/stack"
) )
// Logger is a logger instance. It is compatible with the interface // Logger is a logger instance. It is compatible with the interface

View File

@@ -7,7 +7,7 @@ import (
"github.com/rs/zerolog" "github.com/rs/zerolog"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
"akvorado/reporter/logger" "akvorado/common/reporter/logger"
) )
func ExampleNew() { func ExampleNew() {
@@ -24,5 +24,5 @@ func ExampleNew() {
} }
logger.Info().Int("example", 15).Msg("hello world") logger.Info().Int("example", 15).Msg("hello world")
// Output: {"level":"info","example":15,"time":"2008-01-08T17:05:05Z","caller":"akvorado/reporter/logger/root_example_test.go:26","module":"akvorado/reporter/logger_test","message":"hello world"} // Output: {"level":"info","example":15,"time":"2008-01-08T17:05:05Z","caller":"akvorado/common/reporter/logger/root_example_test.go:26","module":"akvorado/common/reporter/logger_test","message":"hello world"}
} }

View File

@@ -3,7 +3,7 @@ package metrics
import ( import (
"fmt" "fmt"
"akvorado/reporter/logger" "akvorado/common/reporter/logger"
) )
// promHTTPLogger is an adapter for logger.Logger to be used as promhttp.Logger // promHTTPLogger is an adapter for logger.Logger to be used as promhttp.Logger

View File

@@ -13,8 +13,8 @@ import (
"github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"akvorado/reporter/logger" "akvorado/common/reporter/logger"
"akvorado/reporter/stack" "akvorado/common/reporter/stack"
) )
// Metrics represents the internal state of the metric subsystem. // Metrics represents the internal state of the metric subsystem.
@@ -54,9 +54,9 @@ func getPrefix(module string) (moduleName string) {
moduleName = stack.ModuleName moduleName = stack.ModuleName
} else { } else {
moduleName = strings.SplitN(module, ".", 2)[0] moduleName = strings.SplitN(module, ".", 2)[0]
}
moduleName = strings.ReplaceAll(moduleName, "/", "_") moduleName = strings.ReplaceAll(moduleName, "/", "_")
moduleName = strings.ReplaceAll(moduleName, ".", "_") moduleName = strings.ReplaceAll(moduleName, ".", "_")
}
moduleName = fmt.Sprintf("%s_", moduleName) moduleName = fmt.Sprintf("%s_", moduleName)
return return
} }

View File

@@ -8,9 +8,9 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"akvorado/helpers" "akvorado/common/helpers"
"akvorado/reporter/logger" "akvorado/common/reporter/logger"
"akvorado/reporter/metrics" "akvorado/common/reporter/metrics"
) )
func TestNew(t *testing.T) { func TestNew(t *testing.T) {
@@ -64,12 +64,12 @@ func TestNew(t *testing.T) {
gotFiltered = append(gotFiltered, line) gotFiltered = append(gotFiltered, line)
} }
expected := []string{ expected := []string{
"# HELP akvorado_reporter_metrics_test_counter1 Some counter", "# HELP akvorado_common_reporter_metrics_test_counter1 Some counter",
"# TYPE akvorado_reporter_metrics_test_counter1 counter", "# TYPE akvorado_common_reporter_metrics_test_counter1 counter",
"akvorado_reporter_metrics_test_counter1 18", "akvorado_common_reporter_metrics_test_counter1 18",
"# HELP akvorado_reporter_metrics_test_gauge1 Some gauge", "# HELP akvorado_common_reporter_metrics_test_gauge1 Some gauge",
"# TYPE akvorado_reporter_metrics_test_gauge1 gauge", "# TYPE akvorado_common_reporter_metrics_test_gauge1 gauge",
"akvorado_reporter_metrics_test_gauge1 4", "akvorado_common_reporter_metrics_test_gauge1 4",
"", "",
} }
if diff := helpers.Diff(gotFiltered, expected); diff != "" { if diff := helpers.Diff(gotFiltered, expected); diff != "" {

View File

@@ -6,8 +6,8 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"akvorado/helpers" "akvorado/common/helpers"
"akvorado/reporter" "akvorado/common/reporter"
) )
func TestMetrics(t *testing.T) { func TestMetrics(t *testing.T) {
@@ -90,7 +90,7 @@ func TestMetrics(t *testing.T) {
summary2.WithLabelValues("value2").Observe(15) summary2.WithLabelValues("value2").Observe(15)
} }
got := r.GetMetrics("akvorado_reporter_test_") got := r.GetMetrics("akvorado_common_reporter_test_")
expected := map[string]string{ expected := map[string]string{
`counter1`: "18", `counter1`: "18",
`counter2`: "1.17", `counter2`: "1.17",
@@ -144,7 +144,7 @@ func TestMetrics(t *testing.T) {
t.Fatalf("metrics (-got, +want):\n%s", diff) t.Fatalf("metrics (-got, +want):\n%s", diff)
} }
got = r.GetMetrics("akvorado_reporter_test_", got = r.GetMetrics("akvorado_common_reporter_test_",
"counter1", "counter2", "counter3") "counter1", "counter2", "counter3")
expected = map[string]string{ expected = map[string]string{
`counter1`: "18", `counter1`: "18",
@@ -183,7 +183,7 @@ func TestMetricCollector(t *testing.T) {
m.metric2 = r.MetricDesc("metric2", "Custom metric 2", nil) m.metric2 = r.MetricDesc("metric2", "Custom metric 2", nil)
r.MetricCollector(m) r.MetricCollector(m)
got := r.GetMetrics("akvorado_reporter_test_") got := r.GetMetrics("akvorado_common_reporter_test_")
expected := map[string]string{ expected := map[string]string{
`metric1`: "18", `metric1`: "18",
`metric2`: "30", `metric2`: "30",

View File

@@ -6,8 +6,8 @@ package reporter
import ( import (
"sync" "sync"
"akvorado/reporter/logger" "akvorado/common/reporter/logger"
"akvorado/reporter/metrics" "akvorado/common/reporter/metrics"
) )
// Reporter contains the state for a reporter. It also supports the // Reporter contains the state for a reporter. It also supports the

View File

@@ -94,9 +94,9 @@ func (pc Call) SourceFile(withLine bool) string {
var ( var (
ownPackageCall = Callers()[0] ownPackageCall = Callers()[0]
ownPackageName = strings.SplitN(ownPackageCall.FunctionName(), ".", 2)[0] // akvorado/reporter/stack ownPackageName = strings.SplitN(ownPackageCall.FunctionName(), ".", 2)[0] // akvorado/common/reporter/stack
parentPackageName = ownPackageName[0:strings.LastIndex(ownPackageName, "/")] // akvorado/reporter parentPackageName = ownPackageName[0:strings.LastIndex(ownPackageName, "/")] // akvorado/common/reporter
// ModuleName is the name of the current module. This can be used to prefix stuff. // ModuleName is the name of the current module. This can be used to prefix stuff.
ModuleName = parentPackageName[0:strings.LastIndex(parentPackageName, "/")] // akvorado ModuleName = strings.TrimSuffix(parentPackageName[0:strings.LastIndex(parentPackageName, "/")], "/common") // akvorado
) )

View File

@@ -4,8 +4,8 @@ import (
"strings" "strings"
"testing" "testing"
"akvorado/helpers" "akvorado/common/helpers"
"akvorado/reporter/stack" "akvorado/common/reporter/stack"
) )
func TestSourceFile(t *testing.T) { func TestSourceFile(t *testing.T) {
@@ -15,7 +15,7 @@ func TestSourceFile(t *testing.T) {
got = append(got, caller.SourceFile(false)) got = append(got, caller.SourceFile(false))
} }
expected := []string{ expected := []string{
"akvorado/reporter/stack/root_test.go", "akvorado/common/reporter/stack/root_test.go",
"testing/testing.go", "testing/testing.go",
} }
if diff := helpers.Diff(got, expected); diff != "" { if diff := helpers.Diff(got, expected); diff != "" {
@@ -30,7 +30,7 @@ func TestFunctionName(t *testing.T) {
got = append(got, caller.FunctionName()) got = append(got, caller.FunctionName())
} }
expected := []string{ expected := []string{
"akvorado/reporter/stack_test.TestFunctionName", "akvorado/common/reporter/stack_test.TestFunctionName",
"testing.tRunner", "testing.tRunner",
} }
if diff := helpers.Diff(got, expected); diff != "" { if diff := helpers.Diff(got, expected); diff != "" {

View File

@@ -1,6 +1,8 @@
package clickhouse package clickhouse
// Configuration describes the configuration for the ClickHouse component. import "akvorado/common/kafka"
// Configuration describes the configuration for the ClickHouse configurator.
type Configuration struct { type Configuration struct {
// Servers define the list of clickhouse servers to connect to (with ports) // Servers define the list of clickhouse servers to connect to (with ports)
Servers []string Servers []string
@@ -10,13 +12,19 @@ type Configuration struct {
Username string Username string
// Password defines the password to use for authentication // Password defines the password to use for authentication
Password string Password string
// Kafka describes how to connect to Kafka
Kafka kafka.Configuration `yaml:"-"`
// KafkaThreads tell how many threads to use to poll data from Kafka
KafkaThreads int
// AkvoradoURL allows one to override URL to reach Akvorado from Clickhouse // AkvoradoURL allows one to override URL to reach Akvorado from Clickhouse
AkvoradoURL string AkvoradoURL string
} }
// DefaultConfiguration represents the default configuration for the ClickHouse component. // DefaultConfiguration represents the default configuration for the ClickHouse configurator.
var DefaultConfiguration = Configuration{ var DefaultConfiguration = Configuration{
Servers: []string{}, // No clickhouse by default Servers: []string{}, // No clickhouse by default
Database: "default", Database: "default",
Username: "default", Username: "default",
Kafka: kafka.DefaultConfiguration,
KafkaThreads: 1,
} }

View File

Can't render this file because it is too large.

View File

@@ -44,7 +44,7 @@ SETTINGS
kafka_broker_list = '{{ .KafkaBrokers }}', kafka_broker_list = '{{ .KafkaBrokers }}',
kafka_topic_list = '{{ .KafkaTopic }}-v{{ $version }}', kafka_topic_list = '{{ .KafkaTopic }}-v{{ $version }}',
kafka_group_name = 'clickhouse', kafka_group_name = 'clickhouse',
kafka_num_consumers = {{ .KafkaPartitions }}, kafka_num_consumers = {{ .KafkaThreads }},
kafka_thread_per_consumer = 1, kafka_thread_per_consumer = 1,
kafka_format = 'Protobuf', kafka_format = 'Protobuf',
kafka_schema = 'flow-{{ $version }}.proto:FlowMessage' kafka_schema = 'flow-{{ $version }}.proto:FlowMessage'

View File

@@ -6,7 +6,7 @@ CREATE DICTIONARY protocols (
PRIMARY KEY proto PRIMARY KEY proto
LAYOUT(HASHED()) LAYOUT(HASHED())
SOURCE (HTTP( SOURCE (HTTP(
url '{{ .BaseURL }}/api/v0/clickhouse/protocols.csv' url '{{ .BaseURL }}/api/v0/configure/clickhouse/protocols.csv'
format 'CSVWithNames' format 'CSVWithNames'
)) ))
LIFETIME(3600) LIFETIME(3600)

View File

@@ -5,7 +5,7 @@ CREATE DICTIONARY asns (
PRIMARY KEY asn PRIMARY KEY asn
LAYOUT(HASHED()) LAYOUT(HASHED())
SOURCE (HTTP( SOURCE (HTTP(
url '{{ .BaseURL }}/api/v0/clickhouse/asns.csv' url '{{ .BaseURL }}/api/v0/configure/clickhouse/asns.csv'
format 'CSVWithNames' format 'CSVWithNames'
)) ))
LIFETIME(3600) LIFETIME(3600)

View File

@@ -44,7 +44,7 @@ SETTINGS
kafka_broker_list = '{{ .KafkaBrokers }}', kafka_broker_list = '{{ .KafkaBrokers }}',
kafka_topic_list = '{{ .KafkaTopic }}-v{{ $version }}', kafka_topic_list = '{{ .KafkaTopic }}-v{{ $version }}',
kafka_group_name = 'clickhouse', kafka_group_name = 'clickhouse',
kafka_num_consumers = {{ .KafkaPartitions }}, kafka_num_consumers = {{ .KafkaThreads }},
kafka_thread_per_consumer = 1, kafka_thread_per_consumer = 1,
kafka_format = 'Protobuf', kafka_format = 'Protobuf',
kafka_schema = 'flow-{{ $version }}.proto:FlowMessage' kafka_schema = 'flow-{{ $version }}.proto:FlowMessage'

View File

@@ -7,11 +7,10 @@ import (
"github.com/ClickHouse/clickhouse-go/v2" "github.com/ClickHouse/clickhouse-go/v2"
"akvorado/daemon" "akvorado/common/daemon"
"akvorado/helpers" "akvorado/common/helpers"
"akvorado/http" "akvorado/common/http"
"akvorado/kafka" "akvorado/common/reporter"
"akvorado/reporter"
) )
func TestRealClickHouse(t *testing.T) { func TestRealClickHouse(t *testing.T) {
@@ -20,10 +19,8 @@ func TestRealClickHouse(t *testing.T) {
configuration := DefaultConfiguration configuration := DefaultConfiguration
configuration.Servers = []string{chServer} configuration.Servers = []string{chServer}
r := reporter.NewMock(t) r := reporter.NewMock(t)
kafka, _ := kafka.NewMock(t, r, kafka.DefaultConfiguration)
ch, err := New(r, configuration, Dependencies{ ch, err := New(r, configuration, Dependencies{
Daemon: daemon.NewMock(t), Daemon: daemon.NewMock(t),
Kafka: kafka,
HTTP: http.NewMock(t, r), HTTP: http.NewMock(t, r),
}) })
if err != nil { if err != nil {

View File

@@ -7,7 +7,7 @@ import (
"text/template" "text/template"
"time" "time"
"akvorado/flow" "akvorado/inlet/flow"
) )
var ( var (
@@ -39,7 +39,7 @@ func (c *Component) addHandlerEmbedded(url string, path string) {
// registerHTTPHandler register some handlers that will be useful for // registerHTTPHandler register some handlers that will be useful for
// ClickHouse // ClickHouse
func (c *Component) registerHTTPHandlers() error { func (c *Component) registerHTTPHandlers() error {
c.d.HTTP.AddHandler("/api/v0/clickhouse/init.sh", c.d.HTTP.AddHandler("/api/v0/configure/clickhouse/init.sh",
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/x-shellscript") w.Header().Set("Content-Type", "text/x-shellscript")
initShTemplate.Execute(w, flow.VersionedSchemas) initShTemplate.Execute(w, flow.VersionedSchemas)
@@ -53,7 +53,7 @@ func (c *Component) registerHTTPHandlers() error {
if entry.IsDir() { if entry.IsDir() {
continue continue
} }
url := fmt.Sprintf("/api/v0/clickhouse/%s", entry.Name()) url := fmt.Sprintf("/api/v0/configure/clickhouse/%s", entry.Name())
path := fmt.Sprintf("data/%s", entry.Name()) path := fmt.Sprintf("data/%s", entry.Name())
c.addHandlerEmbedded(url, path) c.addHandlerEmbedded(url, path)
} }

View File

@@ -3,19 +3,16 @@ package clickhouse
import ( import (
"testing" "testing"
"akvorado/daemon" "akvorado/common/daemon"
"akvorado/helpers" "akvorado/common/helpers"
"akvorado/http" "akvorado/common/http"
"akvorado/kafka" "akvorado/common/reporter"
"akvorado/reporter"
) )
func TestHTTPEndpoints(t *testing.T) { func TestHTTPEndpoints(t *testing.T) {
r := reporter.NewMock(t) r := reporter.NewMock(t)
kafka, _ := kafka.NewMock(t, r, kafka.DefaultConfiguration)
c, err := New(r, DefaultConfiguration, Dependencies{ c, err := New(r, DefaultConfiguration, Dependencies{
Daemon: daemon.NewMock(t), Daemon: daemon.NewMock(t),
Kafka: kafka,
HTTP: http.NewMock(t, r), HTTP: http.NewMock(t, r),
}) })
if err != nil { if err != nil {
@@ -24,7 +21,7 @@ func TestHTTPEndpoints(t *testing.T) {
cases := helpers.HTTPEndpointCases{ cases := helpers.HTTPEndpointCases{
{ {
URL: "/api/v0/clickhouse/protocols.csv", URL: "/api/v0/configure/clickhouse/protocols.csv",
ContentType: "text/csv; charset=utf-8", ContentType: "text/csv; charset=utf-8",
FirstLines: []string{ FirstLines: []string{
`proto,name,description`, `proto,name,description`,
@@ -32,14 +29,14 @@ func TestHTTPEndpoints(t *testing.T) {
`1,ICMP,Internet Control Message`, `1,ICMP,Internet Control Message`,
}, },
}, { }, {
URL: "/api/v0/clickhouse/asns.csv", URL: "/api/v0/configure/clickhouse/asns.csv",
ContentType: "text/csv; charset=utf-8", ContentType: "text/csv; charset=utf-8",
FirstLines: []string{ FirstLines: []string{
"asn,name", "asn,name",
"1,LVLT-1", "1,LVLT-1",
}, },
}, { }, {
URL: "/api/v0/clickhouse/init.sh", URL: "/api/v0/configure/clickhouse/init.sh",
ContentType: "text/x-shellscript", ContentType: "text/x-shellscript",
FirstLines: []string{ FirstLines: []string{
`#!/bin/sh`, `#!/bin/sh`,

View File

@@ -1,7 +1,8 @@
package clickhouse package clickhouse
import ( import (
"akvorado/reporter" "akvorado/common/reporter"
"akvorado/inlet/flow"
"embed" "embed"
"errors" "errors"
"fmt" "fmt"
@@ -44,15 +45,10 @@ func (c *Component) migrateDatabaseOnServer(server string) error {
return err return err
} }
} }
kafkaConf := c.d.Kafka.GetConfiguration()
partitions := 1
if kafkaConf.TopicConfiguration != nil && kafkaConf.TopicConfiguration.NumPartitions > 0 {
partitions = int(kafkaConf.TopicConfiguration.NumPartitions)
}
data := map[string]string{ data := map[string]string{
"KafkaBrokers": strings.Join(kafkaConf.Brokers, ","), "KafkaBrokers": strings.Join(c.config.Kafka.Brokers, ","),
"KafkaTopic": kafkaConf.Topic, "KafkaTopic": fmt.Sprintf("%s-v%d", c.config.Kafka.Topic, flow.CurrentSchemaVersion),
"KafkaPartitions": strconv.Itoa(partitions), "KafkaThreads": strconv.Itoa(c.config.KafkaThreads),
"BaseURL": baseURL, "BaseURL": baseURL,
} }

View File

@@ -5,20 +5,17 @@ import (
"strings" "strings"
"testing" "testing"
"akvorado/daemon" "akvorado/common/daemon"
"akvorado/helpers" "akvorado/common/helpers"
"akvorado/http" "akvorado/common/http"
"akvorado/kafka" "akvorado/common/reporter"
"akvorado/reporter"
) )
func TestGetHTTPBaseURL(t *testing.T) { func TestGetHTTPBaseURL(t *testing.T) {
r := reporter.NewMock(t) r := reporter.NewMock(t)
kafka, _ := kafka.NewMock(t, r, kafka.DefaultConfiguration)
http := http.NewMock(t, r) http := http.NewMock(t, r)
c, err := New(r, DefaultConfiguration, Dependencies{ c, err := New(r, DefaultConfiguration, Dependencies{
Daemon: daemon.NewMock(t), Daemon: daemon.NewMock(t),
Kafka: kafka,
HTTP: http, HTTP: http,
}) })
if err != nil { if err != nil {

View File

@@ -1,4 +1,4 @@
// Package clickhouse handles housekeeping for the ClickHouse database. // Package clickhouse handles configuration of the ClickHouse database.
package clickhouse package clickhouse
import ( import (
@@ -6,10 +6,9 @@ import (
"gopkg.in/tomb.v2" "gopkg.in/tomb.v2"
"akvorado/daemon" "akvorado/common/daemon"
"akvorado/http" "akvorado/common/http"
"akvorado/kafka" "akvorado/common/reporter"
"akvorado/reporter"
) )
// Component represents the Kafka exporter. // Component represents the Kafka exporter.
@@ -25,7 +24,6 @@ type Component struct {
// Dependencies define the dependencies of the Kafka exporter. // Dependencies define the dependencies of the Kafka exporter.
type Dependencies struct { type Dependencies struct {
Daemon daemon.Component Daemon daemon.Component
Kafka *kafka.Component
HTTP *http.Component HTTP *http.Component
} }
@@ -40,15 +38,12 @@ func New(reporter *reporter.Reporter, configuration Configuration, dependencies
if err := c.registerHTTPHandlers(); err != nil { if err := c.registerHTTPHandlers(); err != nil {
return nil, err return nil, err
} }
c.d.Daemon.Track(&c.t, "clickhouse") c.d.Daemon.Track(&c.t, "configure/clickhouse")
return &c, nil return &c, nil
} }
// Start the ClickHouse component // Start the ClickHouse component
func (c *Component) Start() error { func (c *Component) Start() error {
if len(c.config.Servers) == 0 {
c.r.Warn().Msg("no clickhouse configuration, skipping database management")
}
c.r.Info().Msg("starting ClickHouse component") c.r.Info().Msg("starting ClickHouse component")
if err := c.migrateDatabase(); err != nil { if err := c.migrateDatabase(); err != nil {
c.r.Warn().Msg("database migration failed, continue in the background") c.r.Warn().Msg("database migration failed, continue in the background")
@@ -78,9 +73,6 @@ func (c *Component) Start() error {
// Stop stops the ClickHouse component // Stop stops the ClickHouse component
func (c *Component) Stop() error { func (c *Component) Stop() error {
if len(c.config.Servers) == 0 {
return nil
}
c.r.Info().Msg("stopping ClickHouse component") c.r.Info().Msg("stopping ClickHouse component")
defer c.r.Info().Msg("ClickHouse component stopped") defer c.r.Info().Msg("ClickHouse component stopped")
c.t.Kill(nil) c.t.Kill(nil)

View File

@@ -7,7 +7,7 @@ import (
"sort" "sort"
"testing" "testing"
"akvorado/helpers" "akvorado/common/helpers"
) )
//go:embed testdata //go:embed testdata

30
configure/kafka/config.go Normal file
View File

@@ -0,0 +1,30 @@
package kafka
import "akvorado/common/kafka"
// Configuration describes the configuration for the Kafka configurator.
type Configuration struct {
// Connect describes how to connect to Kafka.
Connect kafka.Configuration
// TopicConfiguration describes the topic configuration.
TopicConfiguration TopicConfiguration
}
// TopicConfiguration describes the configuration for a topic
type TopicConfiguration struct {
// NumPartitions tells how many partitions should be used for the topic.
NumPartitions int32
// ReplicationFactor tells the replication factor for the topic.
ReplicationFactor int16
// ConfigEntries is a map to specify the topic overrides. Non-listed overrides will be removed
ConfigEntries map[string]*string
}
// DefaultConfiguration represents the default configuration for the Kafka configurator.
var DefaultConfiguration = Configuration{
Connect: kafka.DefaultConfiguration,
TopicConfiguration: TopicConfiguration{
NumPartitions: 1,
ReplicationFactor: 1,
},
}

View File

@@ -0,0 +1,91 @@
package kafka
import (
"fmt"
"math/rand"
"testing"
"time"
"github.com/Shopify/sarama"
"akvorado/common/helpers"
"akvorado/common/kafka"
"akvorado/common/reporter"
"akvorado/inlet/flow"
)
func TestTopicCreation(t *testing.T) {
client, brokers := kafka.SetupKafkaBroker(t)
rand.Seed(time.Now().UnixMicro())
topicName := fmt.Sprintf("test-topic-%d", rand.Int())
expectedTopicName := fmt.Sprintf("%s-v%d", topicName, flow.CurrentSchemaVersion)
retentionMs := "76548"
segmentBytes := "107374184"
segmentBytes2 := "10737184"
cleanupPolicy := "delete"
cases := []struct {
Name string
ConfigEntries map[string]*string
}{
{
Name: "Set initial config",
ConfigEntries: map[string]*string{
"retention.ms": &retentionMs,
"segment.bytes": &segmentBytes,
},
}, {
Name: "Alter initial config",
ConfigEntries: map[string]*string{
"retention.ms": &retentionMs,
"segment.bytes": &segmentBytes2,
"cleanup.policy": &cleanupPolicy,
},
}, {
Name: "Remove item",
ConfigEntries: map[string]*string{
"retention.ms": &retentionMs,
"segment.bytes": &segmentBytes2,
},
},
}
for _, tc := range cases {
t.Run(tc.Name, func(t *testing.T) {
configuration := DefaultConfiguration
configuration.Connect.Topic = topicName
configuration.TopicConfiguration = TopicConfiguration{
NumPartitions: 1,
ReplicationFactor: 1,
ConfigEntries: tc.ConfigEntries,
}
configuration.Connect.Brokers = brokers
configuration.Connect.Version = kafka.Version(sarama.V2_8_1_0)
c, err := New(reporter.NewMock(t), configuration)
if err != nil {
t.Fatalf("New() error:\n%+v", err)
}
if err := c.Start(); err != nil {
t.Fatalf("Start() error:\n%+v", err)
}
adminClient, err := sarama.NewClusterAdminFromClient(client)
if err != nil {
t.Fatalf("NewClusterAdmin() error:\n%+v", err)
}
topics, err := adminClient.ListTopics()
if err != nil {
t.Fatalf("ListTopics() error:\n%+v", err)
}
topic, ok := topics[expectedTopicName]
if !ok {
t.Fatal("ListTopics() did not find the topic")
}
if diff := helpers.Diff(topic.ConfigEntries, tc.ConfigEntries); diff != "" {
t.Fatalf("ListTopics() (-got, +want):\n%s", diff)
}
})
}
}

95
configure/kafka/root.go Normal file
View File

@@ -0,0 +1,95 @@
package kafka
import (
"fmt"
"strings"
"github.com/Shopify/sarama"
"akvorado/common/kafka"
"akvorado/common/reporter"
"akvorado/inlet/flow"
)
// Component represents the Kafka configurator.
type Component struct {
r *reporter.Reporter
config Configuration
kafkaConfig *sarama.Config
kafkaTopic string
}
// New creates a new Kafka configurator.
func New(r *reporter.Reporter, config Configuration) (*Component, error) {
kafkaConfig := sarama.NewConfig()
kafkaConfig.Version = sarama.KafkaVersion(config.Connect.Version)
if err := kafkaConfig.Validate(); err != nil {
return nil, fmt.Errorf("cannot validate Kafka configuration: %w", err)
}
return &Component{
r: r,
config: config,
kafkaConfig: kafkaConfig,
kafkaTopic: fmt.Sprintf("%s-v%d", config.Connect.Topic, flow.CurrentSchemaVersion),
}, nil
}
// Start starts Kafka configuration.
func (c *Component) Start() error {
c.r.Info().Msg("starting Kafka component")
kafka.GlobalKafkaLogger.Register(c.r)
defer func() {
kafka.GlobalKafkaLogger.Unregister()
c.r.Info().Msg("Kafka component stopped")
}()
// Create topic
client, err := sarama.NewClusterAdmin(c.config.Connect.Brokers, c.kafkaConfig)
if err != nil {
c.r.Err(err).
Str("brokers", strings.Join(c.config.Connect.Brokers, ",")).
Msg("unable to get admin client for topic creation")
return fmt.Errorf("unable to get admin client for topic creation: %w", err)
}
defer client.Close()
l := c.r.With().
Str("brokers", strings.Join(c.config.Connect.Brokers, ",")).
Str("topic", c.kafkaTopic).
Logger()
topics, err := client.ListTopics()
if err != nil {
l.Err(err).Msg("unable to get metadata for topics")
return fmt.Errorf("unable to get metadata for topics: %w", err)
}
if topic, ok := topics[c.kafkaTopic]; !ok {
if err := client.CreateTopic(c.kafkaTopic,
&sarama.TopicDetail{
NumPartitions: c.config.TopicConfiguration.NumPartitions,
ReplicationFactor: c.config.TopicConfiguration.ReplicationFactor,
ConfigEntries: c.config.TopicConfiguration.ConfigEntries,
}, false); err != nil {
l.Err(err).Msg("unable to create topic")
return fmt.Errorf("unable to create topic %q: %w", c.kafkaTopic, err)
}
l.Info().Msg("topic created")
} else {
if topic.NumPartitions != c.config.TopicConfiguration.NumPartitions {
l.Warn().Msgf("mismatch for number of partitions: got %d, want %d",
topic.NumPartitions, c.config.TopicConfiguration.NumPartitions)
}
if topic.ReplicationFactor != c.config.TopicConfiguration.ReplicationFactor {
l.Warn().Msgf("mismatch for replication factor: got %d, want %d",
topic.ReplicationFactor, c.config.TopicConfiguration.ReplicationFactor)
}
if err := client.AlterConfig(sarama.TopicResource, c.kafkaTopic, c.config.TopicConfiguration.ConfigEntries, false); err != nil {
l.Err(err).Msg("unable to set topic configuration")
return fmt.Errorf("unable to set topic configuration for %q: %w",
c.kafkaTopic, err)
}
l.Info().Msg("topic updated")
}
return nil
}

View File

@@ -1,4 +1,4 @@
package web package console
import ( import (
"embed" "embed"

View File

@@ -1,12 +1,13 @@
package web package console
import ( import (
"fmt" "fmt"
netHTTP "net/http" netHTTP "net/http"
"testing" "testing"
"akvorado/http" "akvorado/common/daemon"
"akvorado/reporter" "akvorado/common/http"
"akvorado/common/reporter"
) )
func TestServeAssets(t *testing.T) { func TestServeAssets(t *testing.T) {
@@ -24,7 +25,10 @@ func TestServeAssets(t *testing.T) {
h := http.NewMock(t, r) h := http.NewMock(t, r)
_, err := New(r, Configuration{ _, err := New(r, Configuration{
ServeLiveFS: live, ServeLiveFS: live,
}, Dependencies{HTTP: h}) }, Dependencies{
HTTP: h,
Daemon: daemon.NewMock(t),
})
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)
} }

View File

@@ -1,6 +1,6 @@
package web package console
// Configuration describes the configuration for the web component. // Configuration describes the configuration for the console component.
type Configuration struct { type Configuration struct {
// GrafanaURL is the URL to acess Grafana. // GrafanaURL is the URL to acess Grafana.
GrafanaURL string GrafanaURL string
@@ -8,5 +8,5 @@ type Configuration struct {
ServeLiveFS bool ServeLiveFS bool
} }
// DefaultConfiguration represents the default configuration for the web exporter. // DefaultConfiguration represents the default configuration for the console component.
var DefaultConfiguration = Configuration{} var DefaultConfiguration = Configuration{}

View File

Before

Width:  |  Height:  |  Size: 93 KiB

After

Width:  |  Height:  |  Size: 93 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 551 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 545 KiB

View File

@@ -0,0 +1,48 @@
![Akvorado logo](../assets/images/akvorado.svg)
# Introduction
*Akvorado*[^name] is a flow collector, hydrater and exporter. It
receives flows, adds some data like interface names and countries, and
exports them to Kafka.
[^name]: [Akvorado][] means "water wheel" in Esperanto.
[Akvorado]: https://eo.wikipedia.org/wiki/Akvorado
## Big picture
![General design](../assets/images/design.svg)
*Akvorado* is split into three components:
- The **inlet service** receives flows from exporters. It poll each
exporter using SNMP to get the *system name*, the *interface names*,
*descriptions* and *speeds*. It query GeoIP databases to get the
*country* and the *AS number*. It applies rules to classify
exporters into *groups*. Interface rules attach to each interface a
*boundary* (external or internal), a *network provider* and a
*connectivity type* (PNI, IX, transit). The flow is exported to
*Kafka*, serialized using *Protobuf*.
- The **configuration service** configures the external components. It
creates the *Kafka topic* and configures *ClickHouse* to receive the
flows from Kafka.
- The **console service** exposes a web interface to look and
manipulate the flows stored inside the ClickHouse database.
## Serialized flow schemas
Flows sent to Kafka are encoded with a versioned schema, described in
the `flow-*.proto` files. For each version of the schema, a different
Kafka topic is used. For example, the `flows-v1` topic receive
serialized flows using the first version of the schema. The inlet
service exports the schemas as well as the current version with its
HTTP service, via the `/api/v0/inlet/schemas.json` endpoint.
## ClickHouse database schemas
Flows are stored in a ClickHouse database using a single table
`flows`. The configuration service keeps the table schema up-to-date.
You can check the schema using `SHOW CREATE TABLE flows`.

View File

@@ -1,9 +1,14 @@
# Installation # Installation
*Akvorado* is written in Go. It provides its 3 components into a
single binary or Docker image.
## Compilation from source ## Compilation from source
*Akvorado* is written in Go. You need a proper installation of *Go*. You need a proper installation of [Go](https://go.dev/doc/install)
Then, simply type: (1.17+) as well as
[Yarn](https://yarnpkg.com/getting-started/install). Then, simply
type:
```console ```console
# make # make
@@ -31,7 +36,8 @@ The following `make` targets are available:
## Docker image ## Docker image
It is also possible to build a Docker image with: It is also possible to build a Docker image without installing
anything else than [Docker](https://docs.docker.com/get-docker):
```console ```console
# docker build . -t akvorado:main # docker build . -t akvorado:main

View File

@@ -1,19 +1,22 @@
# Configuration # Configuration
*Akvorado* can be configured through a YAML file. You can get the Each *Akvorado* service is configured through a YAML file. You can get
default configuration with `./akvorado --dump --check`. Durations can the default configuration with `./akvorado SERVICE --dump --check`.
be written in seconds or using strings like `10h20m`. Durations can be written in seconds or using strings like `10h20m`.
It is also possible to override configuration settings using It is also possible to override configuration settings using
environment variables. You need to remove any `-` from key names and environment variables. You need to remove any `-` from key names and
use `_` to handle nesting. Then, put `AKVORADO_` as a prefix. For use `_` to handle nesting. Then, put `AKVORADO_SERVICE_` as a prefix
example, let's consider the following configuration file: where `SERVICE` should be replaced by the service name (`inlet`,
`configure` or `console`). For example, let's consider the following
configuration file for the *inlet* service:
```yaml ```yaml
http:
listen: 127.0.0.1:8081
kafka: kafka:
connect:
topic: test-topic topic: test-topic
topic-configuration:
num-partitions: 1
brokers: brokers:
- 192.0.2.1:9092 - 192.0.2.1:9092
- 192.0.2.2:9092 - 192.0.2.2:9092
@@ -22,12 +25,20 @@ kafka:
It can be translated to: It can be translated to:
```sh ```sh
AKVORADO_KAFKA_TOPIC=test-topic AKVORADO_INLET_HTTP_LISTEN=127.0.0.1:8081
AKVORADO_KAFKA_TOPICCONFIGURATION_NUMPARTITIONS=1 AKVORADO_INLET_KAFKA_CONNECT_TOPIC=test-topic
AKVORADO_KAFKA_BROKERS=192.0.2.1:9092,192.0.2.2:9092 AKVORADO_INLET_KAFKA_CONNECT_BROKERS=192.0.2.1:9092,192.0.2.2:9092
``` ```
## Flow Each service is split into several functional components. Each of them
gets a section of the configuration file matching its name.
## Inlet service
The main components of the inlet services are `flow`, `kafka`, and
`core`.
### Flow
The flow component handles incoming flows. It only accepts the The flow component handles incoming flows. It only accepts the
`inputs` key to define the list of inputs to receive incoming flows. `inputs` key to define the list of inputs to receive incoming flows.
@@ -70,7 +81,7 @@ flow:
Without configuration, *Akvorado* will listen for incoming Without configuration, *Akvorado* will listen for incoming
Netflow/IPFIX flows on port 2055. Netflow/IPFIX flows on port 2055.
## Kafka ### Kafka
Received flows are exported to a Kafka topic using the [protocol Received flows are exported to a Kafka topic using the [protocol
buffers format][]. The definition file is `flow/flow-*.proto`. Each buffers format][]. The definition file is `flow/flow-*.proto`. Each
@@ -81,12 +92,11 @@ flow is written in the [length-delimited format][].
The following keys are accepted: The following keys are accepted:
- `topic` tells which topic to use to write messages - `connect` describes how to connect to the *Kafka* topic. It contains
- `topic-configuration` contains the topic configuration three keys: `topic` defines the base topic name, `brokers` specifies
- `brokers` specifies the list of brokers to use to bootstrap the the list of brokers to use to bootstrap the connection to the Kafka
connection to the Kafka cluster cluster and `version` tells which minimal version of Kafka to
- `version` tells which minimal version of Kafka to expect expect.
- `usetls` tells if we should use TLS to connection (authentication is not supported)
- `flush-interval` defines the maximum flush interval to send received - `flush-interval` defines the maximum flush interval to send received
flows to Kafka flows to Kafka
- `flush-bytes` defines the maximum number of bytes to store before - `flush-bytes` defines the maximum number of bytes to store before
@@ -101,37 +111,24 @@ The following keys are accepted:
The topic name is suffixed by the version of the schema. For example, The topic name is suffixed by the version of the schema. For example,
if the configured topic is `flows` and the current schema version is if the configured topic is `flows` and the current schema version is
0, the topic used to send received flows will be `flows-v0`. 1, the topic used to send received flows will be `flows-v1`.
If no topic configuration is provided, the topic should already exist
in Kafka. If a configuration is provided, the topic is created if it
does not exist or updated if it does. Currently, updating the number
of partitions or the replication factor is not possible. The following
keys are accepted for the topic configuration:
- `num-partitions` for the number of partitions
- `replication-factor` for the replication factor
- `config-entries` is a mapping from configuration names to their values
For example: For example:
```yaml ```yaml
kafka: kafka:
connect:
topic: test-topic topic: test-topic
topic-configuration: brokers: 10.167.19.3:9092,10.167.19.4:9092,10.167.19.5:9092
num-partitions: 1 compression-codec: zstd
replication-factor: 1
config-entries:
segment.bytes: 1073741824
retention.ms: 86400000
cleanup.policy: delete
``` ```
## Core ### Core
The core component adds some information using the GeoIP databases and The core component queries the `geoip` and the `snmp` component to
the SNMP poller, and push the resulting flow to Kafka. It is also able hydrates the flows with additional information. It also classifies
to classify exporters and interfaces into groups. exporters and interfaces into groups with a set of classification
rules.
The following configuration keys are accepted: The following configuration keys are accepted:
@@ -195,7 +192,7 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")
[expr]: https://github.com/antonmedv/expr/blob/master/docs/Language-Definition.md [expr]: https://github.com/antonmedv/expr/blob/master/docs/Language-Definition.md
[from Go]: https://pkg.go.dev/regexp#Regexp.Expand [from Go]: https://pkg.go.dev/regexp#Regexp.Expand
## GeoIP ### GeoIP
The GeoIP component adds source and destination country, as well as The GeoIP component adds source and destination country, as well as
the AS number of the source and destination IP if they are not present the AS number of the source and destination IP if they are not present
@@ -211,7 +208,7 @@ is provided, the component is inactive. It accepts the following keys:
If the files are updated while *Akvorado* is running, they are If the files are updated while *Akvorado* is running, they are
automatically refreshed. automatically refreshed.
## SNMP ### SNMP
Flows only include interface indexes. To associate them with an Flows only include interface indexes. To associate them with an
interface name and description, SNMP is used to poll the exporter interface name and description, SNMP is used to poll the exporter
@@ -236,7 +233,7 @@ As flows missing interface information are discarded, persisting the
cache is useful to quickly be able to handle incoming flows. By cache is useful to quickly be able to handle incoming flows. By
default, no persistent cache is configured. default, no persistent cache is configured.
## HTTP ### HTTP
The builtin HTTP server serves various pages. Its configuration The builtin HTTP server serves various pages. Its configuration
supports only the `listen` key to specify the address and port to supports only the `listen` key to specify the address and port to
@@ -247,32 +244,72 @@ http:
listen: 0.0.0.0:8000 listen: 0.0.0.0:8000
``` ```
## Web ### Reporting
The web interface presents the landing page of *Akvorado*. It also Reporting encompasses logging and metrics. Currently, as *Akvorado* is
embeds the documentation. It accepts only the following key: expected to be run inside Docker, logging is done on the standard
output and is not configurable. As for metrics, they are reported by
the HTTP component on the `/api/v0/inlet/metrics` endpoint and there is
nothing to configure either.
- `grafanaurl` to specify the URL to Grafana and exposes it as ## Configuration service
[`/grafana`](/grafana).
The two main components of the configuration service are `clickhouse`
and `kafka`. It also uses the [HTTP](#http) and
[reporting](#reporting) component from the inlet service and accepts
the same configuration settings.
## ClickHouse ### ClickHouse
The ClickHouse component exposes some useful HTTP endpoints to The ClickHouse component exposes some useful HTTP endpoints to
configure a ClickHouse database. Optionally, it will also provision configure a ClickHouse database. It also provisions and keep
and keep up-to-date a ClickHouse database. In this case, the following up-to-date a ClickHouse database. The following keys should be
keys should be provided: provided:
- `servers` defines the list of ClickHouse servers to connect to - `servers` defines the list of ClickHouse servers to connect to
- `username` is the username to use for authentication - `username` is the username to use for authentication
- `password` is the password to use for authentication - `password` is the password to use for authentication
- `database` defines the database to use to create tables - `database` defines the database to use to create tables
- `akvorado-url` defines the URL of Akvorado to be used by Clickhouse (autodetection when not specified) - `akvorado-url` defines the URL of Akvorado to be used by Clickhouse (autodetection when not specified)
- `kafka-threads` defines the number of threads to use to poll Kafka (it should not exceed the number of partitions)
## Reporting ### Kafka
Reporting encompasses logging and metrics. Currently, as *Akvorado* is The Kafka component creates or updates the Kafka topic to receive
expected to be run inside Docker, logging is done on the standard flows. It accepts the following keys:
output and is not configurable. As for metrics, they are reported by
the HTTP component on the `/api/v0/metrics` endpoint and there is - `connect` describes how to connect to the topic. This is the same
nothing to configure either. configuration as for the [inlet service](#kafka): the `topic`,
`brokers`, and `version` keys are accepted.
- `topic-configuration` describes how the topic should be configured.
The following keys are accepted for the topic configuration:
- `num-partitions` for the number of partitions
- `replication-factor` for the replication factor
- `config-entries` is a mapping from configuration names to their values
For example:
```yaml
kafka:
connect:
topic: test-topic
topic-configuration:
num-partitions: 1
replication-factor: 1
config-entries:
segment.bytes: 1073741824
retention.ms: 86400000
cleanup.policy: delete
```
Currently, the configure service won't update the number of partitions
or the replicaiton factor. However, the configuration entries are kept
in sync with the content of the configuration file.
## Console service
The main components of the console service are `http` and `console`.
`http` accepts the [same configuration](#http) as for the inlet
service. The `console` has no configuration.

View File

@@ -0,0 +1,79 @@
# Usage
*Akvorado* uses a subcommand system. Each subcommand comes with its
own set of options. It is possible to get help using `akvorado
--help`. Each service is started using the matchin subcommand. When
started from a TTY, a service displays logs in a fancy way. Without a
TTY, logs are output formatted as JSON.
## Common options
Each service accepts a set of common options as flags.
The `--config` options allows to provide a configuration file in YAML
format. See the [configuration section](02-configuration.md) for more
information on this file.
The `--check` option will check if the provided configuration is
correct and stops here. The `--dump` option will dump the parsed
configuration, along with the default values. It should be combined
with `--check` if you don't want the service to start.
Each service embeds an HTTP server exposing a few endpoints. All
services expose the following endpoints in addition to the
service-specific endpoints:
- `/api/v0/metrics`: Prometheus metrics
- `/api/v0/version`: *Akvorado* version
- `/api/v0/healthcheck`: are we alive?
Each endpoint is also exposed under the service namespace. The idea is
to be able to expose an unified API for all services under a single
endpoint using an HTTP proxy. For example, the `inlet` service also
exposes its metrics under `/api/v0/inlet/metrics`.
## Inlet service
`akvorado inlet` starts the inlet service, allowing it to receive and
process flows. The following endpoints are exposed by the HTTP
component embedded into the service:
- `/api/v0/inlet/flows`: stream the received flows
- `/api/v0/inlet/schemas.json`: versioned list of protobuf schemas used to export flows
- `/api/v0/inlet/schemas-X.proto`: protobuf schema for the provided version
## Configure service
`akvorado configure` starts the configure service. It runs as a
service as it exposes an HTTP service for ClickHouse to configure
itself. The Kafka topic is configured at start and does not need the
service to be running.
The following endpoints are exposed for use by ClickHouse:
- `/api/v0/clickhouse/init.sh` contains the schemas in the form of a
script to execute during initialization to get them installed at the
proper location
- `/api/v0/clickhouse/protocols.csv` contains a CSV with the mapping
between protocol numbers and names
- `/api/v0/clickhouse/asns.csv` contains a CSV with the mapping
between AS numbers and organization names
ClickHouse clusters are currently not supported, despite being able to
configure several servers in the configuration. Several servers are in
fact managed like they are a copy of one another.
*Akvorado* also handles database migration during upgrades. When the
protobuf schema is updated, new Kafka tables should be created, as
well as the associated materialized view. Older tables should be kept
around, notably when upgrades can be rolling (some *akvorado*
instances are still running an older version).
## Console service
`akvorado console` starts the console service. Currently, only this
documentation is accessible through this service.
## Other commands
`akvorado version` displays the version.

View File

@@ -1,11 +1,11 @@
# Troubleshooting # Troubleshooting
*Akvorado* outputs some logs and exposes some counters to help The inlet service outputs some logs and exposes some counters to help
troubleshoot most issues. The first step to check if everything works troubleshoot most issues. The first step to check if everything works
as expected is to request a flow: as expected is to request a flow:
```console ```console
$ curl -s http://akvorado/api/v0/flows\?limit=1 $ curl -s http://akvorado/api/v0/inlet/flows\?limit=1
{ {
"TimeReceived": 1648305235, "TimeReceived": 1648305235,
"SequenceNum": 425385846, "SequenceNum": 425385846,
@@ -18,7 +18,7 @@ If this does not work, be sure to check the logs and the metrics. The
later can be queried with `curl`: later can be queried with `curl`:
```console ```console
$ curl -s http://akvorado/api/v0/metrics $ curl -s http://akvorado/api/v0/inlet/metrics
``` ```
## No packets received ## No packets received
@@ -41,7 +41,7 @@ contain information such as:
- `exporter:172.19.162.244 poller breaker open` - `exporter:172.19.162.244 poller breaker open`
- `exporter:172.19.162.244 unable to GET` - `exporter:172.19.162.244 unable to GET`
The `akvorado_snmp_poller_failure_requests` metric would also increase The `akvorado_inlet_snmp_poller_failure_requests` metric would also increase
for the affected exporter. for the affected exporter.
## Dropped packets ## Dropped packets
@@ -56,7 +56,7 @@ The first problem may come from the exporter dropping some of the
flows. Most of the time, there are counters to detect this situation flows. Most of the time, there are counters to detect this situation
and it can be solved by lowering the exporter rate. and it can be solved by lowering the exporter rate.
#### On Cisco NCS5500 routers #### NCS5500 routers
[Netflow, Sampling-Interval and the Mythical Internet Packet Size][1] [Netflow, Sampling-Interval and the Mythical Internet Packet Size][1]
contains many information about the limit of this platform. The first contains many information about the limit of this platform. The first
@@ -98,8 +98,8 @@ default) to keep packets before handling them to the application. When
this buffer is full, packets are dropped. this buffer is full, packets are dropped.
*Akvorado* reports the number of drops for each listening socket with *Akvorado* reports the number of drops for each listening socket with
the `akvorado_flow_input_udp_in_drops` counter. This should be the `akvorado_inlet_flow_input_udp_in_drops` counter. This should be
compared to `akvorado_flow_input_udp_packets`. Another way to get the same compared to `akvorado_inlet_flow_input_udp_packets`. Another way to get the same
information is by using `ss -lunepm` and look at the drop counter: information is by using `ss -lunepm` and look at the drop counter:
```console ```console
@@ -116,10 +116,10 @@ increasing the value of `net.core.rmem_max` sysctl and increasing the
### Internal queues ### Internal queues
Inside *Akvorado*, parsed packets are transmitted to one module to Inside the inlet service, parsed packets are transmitted to one module
another using channels. When there is a bottleneck at this level, the to another using channels. When there is a bottleneck at this level,
`akvorado_flow_input_udp_out_drops` counter will increase. There are the `akvorado_inlet_flow_input_udp_out_drops` counter will increase.
several ways to fix that: There are several ways to fix that:
- increasing the channel between the input module and the flow module, - increasing the channel between the input module and the flow module,
with the `queue-size` setting attached to the input, with the `queue-size` setting attached to the input,
@@ -130,12 +130,12 @@ several ways to fix that:
### SNMP poller ### SNMP poller
To process a flow, *Akvorado* needs the interface name and To process a flow, the inlet service needs the interface name and
description. This information is provided by the `snmp` submodule. description. This information is provided by the `snmp` submodule.
When all workers of the SNMP pollers are busy, new requests are When all workers of the SNMP pollers are busy, new requests are
dropped. In this case, the `akvorado_snmp_poller_busy_count` counter dropped. In this case, the `akvorado_inlet_snmp_poller_busy_count`
is increased. To mitigate this issue, *Akvorado* tries to skip counter is increased. To mitigate this issue, the inlet service tries
exporters with too many errors to avoid blocking SNMP requests for to skip exporters with too many errors to avoid blocking SNMP requests
other exporters. However, ensuring the exporters accept to answer for other exporters. However, ensuring the exporters accept to answer
requests is the first fix. If not enough, you can increase the number requests is the first fix. If not enough, you can increase the number
of workers. Workers handle SNMP requests synchronously. of workers. Workers handle SNMP requests synchronously.

View File

@@ -1,15 +1,17 @@
# Internal design # Internal design
*Akvorado* is written in Go. It uses a component architecture. The *Akvorado* is written in Go. Each service has its code in a distinct
entry point is `cmd/serve.go` and each directory is a distinct directory (`inlet/`, `configure/` and `console/`). The `common/`
component. This is heavily inspired by the [Component framework in directory contains components common to several services. The `cmd/`
Clojure][]. A component is a piece of software with its configuration, directory contains the main entry points.
its state and its dependencies on other components.
Each service is splitted into several components. This is heavily
inspired by the [Component framework in Clojure][]. A component is a
piece of software with its configuration, its state and its
dependencies on other components.
[Component framework in Clojure]: https://github.com/stuartsierra/component [Component framework in Clojure]: https://github.com/stuartsierra/component
![General design](../assets/images/design.svg)
Each component features the following piece of code: Each component features the following piece of code:
- A `Component` structure containing its state. - A `Component` structure containing its state.
@@ -71,9 +73,9 @@ fatal, or rate-limited and accounted into a metric.
## CLI ## CLI
The CLI is handled by [Cobra](https://github.com/spf13/cobra). The The CLI (not a component) is handled by
configuration file is handled by [Cobra](https://github.com/spf13/cobra). The configuration file is
[mapstructure](https://github.com/mitchellh/mapstructure). handled by [mapstructure](https://github.com/mitchellh/mapstructure).
## Flow decoding ## Flow decoding

Some files were not shown because too many files have changed in this diff Show More