mirror of
https://github.com/akvorado/akvorado.git
synced 2025-12-12 06:24:10 +01:00
inlet: split inlet into new inlet and outlet
This change split the inlet component into a simpler inlet and a new outlet component. The new inlet component receive flows and put them in Kafka, unparsed. The outlet component takes them from Kafka and resume the processing from here (flow parsing, enrichment) and puts them in ClickHouse. The main goal is to ensure the inlet does a minimal work to not be late when processing packets (and restart faster). It also brings some simplification as the number of knobs to tune everything is reduced: for inlet, we only need to tune the queue size for UDP, the number of workers and a few Kafka parameters; for outlet, we need to tune a few Kafka parameters, the number of workers and a few ClickHouse parameters. The outlet component features a simple Kafka input component. The core component becomes just a callback function. There is also a new ClickHouse component to push data to ClickHouse using the low-level ch-go library with batch inserts. This processing has an impact on the internal representation of a FlowMessage. Previously, it was tailored to dynamically build the protobuf message to be put in Kafka. Now, it builds the batch request to be sent to ClickHouse. This makes the FlowMessage structure hides the content of the next batch request and therefore, it should be reused. This also changes the way we decode flows as they don't output FlowMessage anymore, they reuse one that is provided to each worker. The ClickHouse tables are slightly updated. Instead of using Kafka engine, the Null engine is used instead. Fix #1122
This commit is contained in:
8
.github/workflows/ci.yml
vendored
8
.github/workflows/ci.yml
vendored
@@ -47,10 +47,8 @@ jobs:
|
|||||||
run: docker compose -f docker/docker-compose-dev.yml up --wait --wait-timeout 60 --quiet-pull
|
run: docker compose -f docker/docker-compose-dev.yml up --wait --wait-timeout 60 --quiet-pull
|
||||||
- name: Setup
|
- name: Setup
|
||||||
uses: ./.github/actions/setup
|
uses: ./.github/actions/setup
|
||||||
|
|
||||||
# Install dependencies
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: sudo apt-get install -qqy shared-mime-info curl
|
run: sudo apt-get install -qqy shared-mime-info curl protobuf-compiler
|
||||||
|
|
||||||
# Build and test
|
# Build and test
|
||||||
- name: Build
|
- name: Build
|
||||||
@@ -95,6 +93,8 @@ jobs:
|
|||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- name: Setup
|
- name: Setup
|
||||||
uses: ./.github/actions/setup
|
uses: ./.github/actions/setup
|
||||||
|
- name: Install dependencies
|
||||||
|
run: brew install protobuf
|
||||||
|
|
||||||
# Build and test
|
# Build and test
|
||||||
- name: Build
|
- name: Build
|
||||||
@@ -157,6 +157,8 @@ jobs:
|
|||||||
uses: ./.github/actions/setup
|
uses: ./.github/actions/setup
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go-version }}
|
go-version: ${{ matrix.go-version }}
|
||||||
|
- name: Install dependencies
|
||||||
|
run: sudo apt-get install -qqy protobuf-compiler
|
||||||
- name: Build
|
- name: Build
|
||||||
run: make && ./bin/akvorado version
|
run: make && ./bin/akvorado version
|
||||||
- uses: actions/cache/save@v4
|
- uses: actions/cache/save@v4
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,4 +1,4 @@
|
|||||||
/bin/
|
/bin/akvorado
|
||||||
/test/
|
/test/
|
||||||
/orchestrator/clickhouse/data/asns.csv
|
/orchestrator/clickhouse/data/asns.csv
|
||||||
/orchestrator/clickhouse/data/tcp.csv
|
/orchestrator/clickhouse/data/tcp.csv
|
||||||
@@ -7,6 +7,7 @@
|
|||||||
/common/schema/definition_gen.go
|
/common/schema/definition_gen.go
|
||||||
mock_*.go
|
mock_*.go
|
||||||
*_enumer.go
|
*_enumer.go
|
||||||
|
*.pb.go
|
||||||
|
|
||||||
/console/data/frontend/
|
/console/data/frontend/
|
||||||
|
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ run tests:
|
|||||||
# past but this is a slight burden to maintain in addition to GitHub CI.
|
# past but this is a slight burden to maintain in addition to GitHub CI.
|
||||||
# Check commit ceaa6ebf8ef6 for the last version supporting functional
|
# Check commit ceaa6ebf8ef6 for the last version supporting functional
|
||||||
# tests.
|
# tests.
|
||||||
- time apk add --no-cache git make gcc musl-dev shared-mime-info npm curl
|
- time apk add --no-cache git make gcc musl-dev shared-mime-info npm curl protoc
|
||||||
- export GOMODCACHE=$PWD/.go-cache
|
- export GOMODCACHE=$PWD/.go-cache
|
||||||
- npm config --user set cache $PWD/.npm-cache
|
- npm config --user set cache $PWD/.npm-cache
|
||||||
- time go mod download
|
- time go mod download
|
||||||
|
|||||||
49
Makefile
49
Makefile
@@ -6,7 +6,6 @@ DATE ?= $(shell date +%FT%T%z)
|
|||||||
VERSION ?= $(shell git describe --tags --always --dirty --match=v* 2> /dev/null || \
|
VERSION ?= $(shell git describe --tags --always --dirty --match=v* 2> /dev/null || \
|
||||||
cat .version 2> /dev/null || echo v0)
|
cat .version 2> /dev/null || echo v0)
|
||||||
PKGS = $(or $(PKG),$(shell env GO111MODULE=on $(GO) list ./...))
|
PKGS = $(or $(PKG),$(shell env GO111MODULE=on $(GO) list ./...))
|
||||||
BIN = bin
|
|
||||||
|
|
||||||
GO = go
|
GO = go
|
||||||
NPM = npm
|
NPM = npm
|
||||||
@@ -19,18 +18,18 @@ M = $(shell if [ "$$(tput colors 2> /dev/null || echo 0)" -ge 8 ]; then printf "
|
|||||||
GENERATED_JS = \
|
GENERATED_JS = \
|
||||||
console/frontend/node_modules
|
console/frontend/node_modules
|
||||||
GENERATED_GO = \
|
GENERATED_GO = \
|
||||||
|
common/pb/rawflow.pb.go \
|
||||||
common/schema/definition_gen.go \
|
common/schema/definition_gen.go \
|
||||||
orchestrator/clickhouse/data/asns.csv \
|
orchestrator/clickhouse/data/asns.csv \
|
||||||
orchestrator/clickhouse/data/protocols.csv \
|
orchestrator/clickhouse/data/protocols.csv \
|
||||||
orchestrator/clickhouse/data/tcp.csv \
|
orchestrator/clickhouse/data/tcp.csv \
|
||||||
orchestrator/clickhouse/data/udp.csv \
|
orchestrator/clickhouse/data/udp.csv \
|
||||||
console/filter/parser.go \
|
console/filter/parser.go \
|
||||||
inlet/core/asnprovider_enumer.go \
|
outlet/core/asnprovider_enumer.go \
|
||||||
inlet/core/netprovider_enumer.go \
|
outlet/core/netprovider_enumer.go \
|
||||||
inlet/flow/decoder/timestampsource_enumer.go \
|
outlet/metadata/provider/snmp/authprotocol_enumer.go \
|
||||||
inlet/metadata/provider/snmp/authprotocol_enumer.go \
|
outlet/metadata/provider/snmp/privprotocol_enumer.go \
|
||||||
inlet/metadata/provider/snmp/privprotocol_enumer.go \
|
outlet/metadata/provider/gnmi/ifspeedpathunit_enumer.go \
|
||||||
inlet/metadata/provider/gnmi/ifspeedpathunit_enumer.go \
|
|
||||||
console/homepagetopwidget_enumer.go \
|
console/homepagetopwidget_enumer.go \
|
||||||
common/kafka/saslmechanism_enumer.go
|
common/kafka/saslmechanism_enumer.go
|
||||||
GENERATED_TEST_GO = \
|
GENERATED_TEST_GO = \
|
||||||
@@ -42,20 +41,17 @@ GENERATED = \
|
|||||||
console/data/frontend
|
console/data/frontend
|
||||||
|
|
||||||
.PHONY: all
|
.PHONY: all
|
||||||
all: fmt lint $(GENERATED) | $(BIN) ; $(info $(M) building executable…) @ ## Build program binary
|
all: fmt lint $(GENERATED) ; $(info $(M) building executable…) @ ## Build program binary
|
||||||
$Q $(GO) build \
|
$Q $(GO) build \
|
||||||
-tags release \
|
-tags release \
|
||||||
-ldflags '-X $(MODULE)/common/helpers.AkvoradoVersion=$(VERSION)' \
|
-ldflags '-X $(MODULE)/common/helpers.AkvoradoVersion=$(VERSION)' \
|
||||||
-o $(BIN)/$(basename $(MODULE)) main.go
|
-o bin/$(basename $(MODULE)) main.go
|
||||||
|
|
||||||
.PHONY: all_js
|
.PHONY: all_js
|
||||||
all_js: .fmt-js~ .lint-js~ $(GENERATED_JS) console/data/frontend
|
all_js: .fmt-js~ .lint-js~ $(GENERATED_JS) console/data/frontend
|
||||||
|
|
||||||
# Tools
|
# Tools
|
||||||
|
|
||||||
$(BIN):
|
|
||||||
@mkdir -p $@
|
|
||||||
|
|
||||||
ENUMER = go tool enumer
|
ENUMER = go tool enumer
|
||||||
GOCOV = go tool gocov
|
GOCOV = go tool gocov
|
||||||
GOCOVXML = go tool gocov-xml
|
GOCOVXML = go tool gocov-xml
|
||||||
@@ -63,6 +59,8 @@ GOIMPORTS = go tool goimports
|
|||||||
GOTESTSUM = go tool gotestsum
|
GOTESTSUM = go tool gotestsum
|
||||||
MOCKGEN = go tool mockgen
|
MOCKGEN = go tool mockgen
|
||||||
PIGEON = go tool pigeon
|
PIGEON = go tool pigeon
|
||||||
|
PROTOC = protoc
|
||||||
|
PROTOC_GEN_GO = bin/protoc-gen-go
|
||||||
REVIVE = go tool revive
|
REVIVE = go tool revive
|
||||||
WWHRD = go tool wwhrd
|
WWHRD = go tool wwhrd
|
||||||
|
|
||||||
@@ -70,6 +68,9 @@ WWHRD = go tool wwhrd
|
|||||||
|
|
||||||
.DELETE_ON_ERROR:
|
.DELETE_ON_ERROR:
|
||||||
|
|
||||||
|
common/pb/rawflow.pb.go: common/pb/rawflow.proto ; $(info $(M) compiling protocol buffers definition…)
|
||||||
|
$Q $(PROTOC) -I=. --plugin=$(PROTOC_GEN_GO) --go_out=. --go_opt=module=$(MODULE) $<
|
||||||
|
|
||||||
common/clickhousedb/mocks/mock_driver.go: go.mod ; $(info $(M) generate mocks for ClickHouse driver…)
|
common/clickhousedb/mocks/mock_driver.go: go.mod ; $(info $(M) generate mocks for ClickHouse driver…)
|
||||||
$Q $(MOCKGEN) -package mocks -build_constraint "!release" -destination $@ \
|
$Q $(MOCKGEN) -package mocks -build_constraint "!release" -destination $@ \
|
||||||
github.com/ClickHouse/clickhouse-go/v2/lib/driver Conn,Row,Rows,ColumnType
|
github.com/ClickHouse/clickhouse-go/v2/lib/driver Conn,Row,Rows,ColumnType
|
||||||
@@ -81,18 +82,16 @@ conntrackfixer/mocks/mock_conntrackfixer.go: go.mod ; $(info $(M) generate mocks
|
|||||||
touch $@ ; \
|
touch $@ ; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
inlet/core/asnprovider_enumer.go: go.mod inlet/core/config.go ; $(info $(M) generate enums for ASNProvider…)
|
outlet/core/asnprovider_enumer.go: go.mod outlet/core/config.go ; $(info $(M) generate enums for ASNProvider…)
|
||||||
$Q $(ENUMER) -type=ASNProvider -text -transform=kebab -trimprefix=ASNProvider inlet/core/config.go
|
$Q $(ENUMER) -type=ASNProvider -text -transform=kebab -trimprefix=ASNProvider outlet/core/config.go
|
||||||
inlet/core/netprovider_enumer.go: go.mod inlet/core/config.go ; $(info $(M) generate enums for NetProvider…)
|
outlet/core/netprovider_enumer.go: go.mod outlet/core/config.go ; $(info $(M) generate enums for NetProvider…)
|
||||||
$Q $(ENUMER) -type=NetProvider -text -transform=kebab -trimprefix=NetProvider inlet/core/config.go
|
$Q $(ENUMER) -type=NetProvider -text -transform=kebab -trimprefix=NetProvider outlet/core/config.go
|
||||||
inlet/flow/decoder/timestampsource_enumer.go: go.mod inlet/flow/decoder/config.go ; $(info $(M) generate enums for TimestampSource…)
|
outlet/metadata/provider/snmp/authprotocol_enumer.go: go.mod outlet/metadata/provider/snmp/config.go ; $(info $(M) generate enums for AuthProtocol…)
|
||||||
$Q $(ENUMER) -type=TimestampSource -text -transform=kebab -trimprefix=TimestampSource inlet/flow/decoder/config.go
|
$Q $(ENUMER) -type=AuthProtocol -text -transform=kebab -trimprefix=AuthProtocol outlet/metadata/provider/snmp/config.go
|
||||||
inlet/metadata/provider/snmp/authprotocol_enumer.go: go.mod inlet/metadata/provider/snmp/config.go ; $(info $(M) generate enums for AuthProtocol…)
|
outlet/metadata/provider/snmp/privprotocol_enumer.go: go.mod outlet/metadata/provider/snmp/config.go ; $(info $(M) generate enums for PrivProtocol…)
|
||||||
$Q $(ENUMER) -type=AuthProtocol -text -transform=kebab -trimprefix=AuthProtocol inlet/metadata/provider/snmp/config.go
|
$Q $(ENUMER) -type=PrivProtocol -text -transform=kebab -trimprefix=PrivProtocol outlet/metadata/provider/snmp/config.go
|
||||||
inlet/metadata/provider/snmp/privprotocol_enumer.go: go.mod inlet/metadata/provider/snmp/config.go ; $(info $(M) generate enums for PrivProtocol…)
|
outlet/metadata/provider/gnmi/ifspeedpathunit_enumer.go: go.mod outlet/metadata/provider/gnmi/config.go ; $(info $(M) generate enums for IfSpeedPathUnit…)
|
||||||
$Q $(ENUMER) -type=PrivProtocol -text -transform=kebab -trimprefix=PrivProtocol inlet/metadata/provider/snmp/config.go
|
$Q $(ENUMER) -type=IfSpeedPathUnit -text -transform=kebab -trimprefix=Speed outlet/metadata/provider/gnmi/config.go
|
||||||
inlet/metadata/provider/gnmi/ifspeedpathunit_enumer.go: go.mod inlet/metadata/provider/gnmi/config.go ; $(info $(M) generate enums for IfSpeedPathUnit…)
|
|
||||||
$Q $(ENUMER) -type=IfSpeedPathUnit -text -transform=kebab -trimprefix=Speed inlet/metadata/provider/gnmi/config.go
|
|
||||||
console/homepagetopwidget_enumer.go: go.mod console/config.go ; $(info $(M) generate enums for HomepageTopWidget…)
|
console/homepagetopwidget_enumer.go: go.mod console/config.go ; $(info $(M) generate enums for HomepageTopWidget…)
|
||||||
$Q $(ENUMER) -type=HomepageTopWidget -text -json -transform=kebab -trimprefix=HomepageTopWidget console/config.go
|
$Q $(ENUMER) -type=HomepageTopWidget -text -json -transform=kebab -trimprefix=HomepageTopWidget console/config.go
|
||||||
common/kafka/saslmechanism_enumer.go: go.mod common/kafka/config.go ; $(info $(M) generate enums for SASLMechanism…)
|
common/kafka/saslmechanism_enumer.go: go.mod common/kafka/config.go ; $(info $(M) generate enums for SASLMechanism…)
|
||||||
@@ -226,7 +225,7 @@ licensecheck: console/frontend/node_modules ; $(info $(M) check dependency licen
|
|||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean: ; $(info $(M) cleaning…) @ ## Cleanup everything
|
clean: ; $(info $(M) cleaning…) @ ## Cleanup everything
|
||||||
@rm -rf test $(GENERATED) inlet/flow/decoder/flow-*.pb.go *~ bin
|
@rm -rf test $(GENERATED) inlet/flow/decoder/flow-*.pb.go *~ bin/akvorado
|
||||||
|
|
||||||
.PHONY: help
|
.PHONY: help
|
||||||
help:
|
help:
|
||||||
|
|||||||
2
bin/protoc-gen-go
Executable file
2
bin/protoc-gen-go
Executable file
@@ -0,0 +1,2 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
go tool protoc-gen-go "$@"
|
||||||
182
cmd/inlet.go
182
cmd/inlet.go
@@ -5,24 +5,14 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
"github.com/go-viper/mapstructure/v2"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
"akvorado/common/daemon"
|
"akvorado/common/daemon"
|
||||||
"akvorado/common/helpers"
|
|
||||||
"akvorado/common/httpserver"
|
"akvorado/common/httpserver"
|
||||||
"akvorado/common/reporter"
|
"akvorado/common/reporter"
|
||||||
"akvorado/common/schema"
|
|
||||||
"akvorado/inlet/core"
|
|
||||||
"akvorado/inlet/flow"
|
"akvorado/inlet/flow"
|
||||||
"akvorado/inlet/kafka"
|
"akvorado/inlet/kafka"
|
||||||
"akvorado/inlet/metadata"
|
|
||||||
"akvorado/inlet/metadata/provider/snmp"
|
|
||||||
"akvorado/inlet/routing"
|
|
||||||
"akvorado/inlet/routing/provider/bmp"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// InletConfiguration represents the configuration file for the inlet command.
|
// InletConfiguration represents the configuration file for the inlet command.
|
||||||
@@ -30,11 +20,7 @@ type InletConfiguration struct {
|
|||||||
Reporting reporter.Configuration
|
Reporting reporter.Configuration
|
||||||
HTTP httpserver.Configuration
|
HTTP httpserver.Configuration
|
||||||
Flow flow.Configuration
|
Flow flow.Configuration
|
||||||
Metadata metadata.Configuration
|
|
||||||
Routing routing.Configuration
|
|
||||||
Kafka kafka.Configuration
|
Kafka kafka.Configuration
|
||||||
Core core.Configuration
|
|
||||||
Schema schema.Configuration
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset resets the configuration for the inlet command to its default value.
|
// Reset resets the configuration for the inlet command to its default value.
|
||||||
@@ -43,14 +29,8 @@ func (c *InletConfiguration) Reset() {
|
|||||||
HTTP: httpserver.DefaultConfiguration(),
|
HTTP: httpserver.DefaultConfiguration(),
|
||||||
Reporting: reporter.DefaultConfiguration(),
|
Reporting: reporter.DefaultConfiguration(),
|
||||||
Flow: flow.DefaultConfiguration(),
|
Flow: flow.DefaultConfiguration(),
|
||||||
Metadata: metadata.DefaultConfiguration(),
|
|
||||||
Routing: routing.DefaultConfiguration(),
|
|
||||||
Kafka: kafka.DefaultConfiguration(),
|
Kafka: kafka.DefaultConfiguration(),
|
||||||
Core: core.DefaultConfiguration(),
|
|
||||||
Schema: schema.DefaultConfiguration(),
|
|
||||||
}
|
}
|
||||||
c.Metadata.Providers = []metadata.ProviderConfiguration{{Config: snmp.DefaultConfiguration()}}
|
|
||||||
c.Routing.Provider.Config = bmp.DefaultConfiguration()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type inletOptions struct {
|
type inletOptions struct {
|
||||||
@@ -66,7 +46,7 @@ var inletCmd = &cobra.Command{
|
|||||||
Use: "inlet",
|
Use: "inlet",
|
||||||
Short: "Start Akvorado's inlet service",
|
Short: "Start Akvorado's inlet service",
|
||||||
Long: `Akvorado is a Netflow/IPFIX collector. The inlet service handles flow ingestion,
|
Long: `Akvorado is a Netflow/IPFIX collector. The inlet service handles flow ingestion,
|
||||||
enrichment and export to Kafka.`,
|
and export to Kafka.`,
|
||||||
Args: cobra.ExactArgs(1),
|
Args: cobra.ExactArgs(1),
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
config := InletConfiguration{}
|
config := InletConfiguration{}
|
||||||
@@ -103,48 +83,19 @@ func inletStart(r *reporter.Reporter, config InletConfiguration, checkOnly bool)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to initialize http component: %w", err)
|
return fmt.Errorf("unable to initialize http component: %w", err)
|
||||||
}
|
}
|
||||||
schemaComponent, err := schema.New(config.Schema)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to initialize schema component: %w", err)
|
|
||||||
}
|
|
||||||
flowComponent, err := flow.New(r, config.Flow, flow.Dependencies{
|
|
||||||
Daemon: daemonComponent,
|
|
||||||
HTTP: httpComponent,
|
|
||||||
Schema: schemaComponent,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to initialize flow component: %w", err)
|
|
||||||
}
|
|
||||||
metadataComponent, err := metadata.New(r, config.Metadata, metadata.Dependencies{
|
|
||||||
Daemon: daemonComponent,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to initialize metadata component: %w", err)
|
|
||||||
}
|
|
||||||
routingComponent, err := routing.New(r, config.Routing, routing.Dependencies{
|
|
||||||
Daemon: daemonComponent,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to initialize routing component: %w", err)
|
|
||||||
}
|
|
||||||
kafkaComponent, err := kafka.New(r, config.Kafka, kafka.Dependencies{
|
kafkaComponent, err := kafka.New(r, config.Kafka, kafka.Dependencies{
|
||||||
Daemon: daemonComponent,
|
Daemon: daemonComponent,
|
||||||
Schema: schemaComponent,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to initialize Kafka component: %w", err)
|
return fmt.Errorf("unable to initialize Kafka component: %w", err)
|
||||||
}
|
}
|
||||||
coreComponent, err := core.New(r, config.Core, core.Dependencies{
|
flowComponent, err := flow.New(r, config.Flow, flow.Dependencies{
|
||||||
Daemon: daemonComponent,
|
Daemon: daemonComponent,
|
||||||
Flow: flowComponent,
|
HTTP: httpComponent,
|
||||||
Metadata: metadataComponent,
|
Kafka: kafkaComponent,
|
||||||
Routing: routingComponent,
|
|
||||||
Kafka: kafkaComponent,
|
|
||||||
HTTP: httpComponent,
|
|
||||||
Schema: schemaComponent,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to initialize core component: %w", err)
|
return fmt.Errorf("unable to initialize flow component: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Expose some information and metrics
|
// Expose some information and metrics
|
||||||
@@ -159,129 +110,8 @@ func inletStart(r *reporter.Reporter, config InletConfiguration, checkOnly bool)
|
|||||||
// Start all the components.
|
// Start all the components.
|
||||||
components := []interface{}{
|
components := []interface{}{
|
||||||
httpComponent,
|
httpComponent,
|
||||||
metadataComponent,
|
|
||||||
routingComponent,
|
|
||||||
kafkaComponent,
|
kafkaComponent,
|
||||||
coreComponent,
|
|
||||||
flowComponent,
|
flowComponent,
|
||||||
}
|
}
|
||||||
return StartStopComponents(r, daemonComponent, components)
|
return StartStopComponents(r, daemonComponent, components)
|
||||||
}
|
}
|
||||||
|
|
||||||
// InletConfigurationUnmarshallerHook renames SNMP configuration to metadata and
|
|
||||||
// BMP configuration to routing.
|
|
||||||
func InletConfigurationUnmarshallerHook() mapstructure.DecodeHookFunc {
|
|
||||||
return func(from, to reflect.Value) (interface{}, error) {
|
|
||||||
if from.Kind() != reflect.Map || from.IsNil() || to.Type() != reflect.TypeOf(InletConfiguration{}) {
|
|
||||||
return from.Interface(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// snmp → metadata
|
|
||||||
{
|
|
||||||
var snmpKey, metadataKey *reflect.Value
|
|
||||||
fromKeys := from.MapKeys()
|
|
||||||
for i, k := range fromKeys {
|
|
||||||
k = helpers.ElemOrIdentity(k)
|
|
||||||
if k.Kind() != reflect.String {
|
|
||||||
return from.Interface(), nil
|
|
||||||
}
|
|
||||||
if helpers.MapStructureMatchName(k.String(), "Snmp") {
|
|
||||||
snmpKey = &fromKeys[i]
|
|
||||||
} else if helpers.MapStructureMatchName(k.String(), "Metadata") {
|
|
||||||
metadataKey = &fromKeys[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if snmpKey != nil {
|
|
||||||
if metadataKey != nil {
|
|
||||||
return nil, fmt.Errorf("cannot have both %q and %q", snmpKey.String(), metadataKey.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build the metadata configuration
|
|
||||||
providerValue := gin.H{}
|
|
||||||
metadataValue := gin.H{}
|
|
||||||
// Dispatch values from snmp key into metadata
|
|
||||||
snmpMap := helpers.ElemOrIdentity(from.MapIndex(*snmpKey))
|
|
||||||
snmpKeys := snmpMap.MapKeys()
|
|
||||||
outerSNMP:
|
|
||||||
for i, k := range snmpKeys {
|
|
||||||
k = helpers.ElemOrIdentity(k)
|
|
||||||
if k.Kind() != reflect.String {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if helpers.MapStructureMatchName(k.String(), "PollerCoalesce") {
|
|
||||||
metadataValue["MaxBatchRequests"] = snmpMap.MapIndex(snmpKeys[i]).Interface()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
metadataConfig := reflect.TypeOf(metadata.Configuration{})
|
|
||||||
for j := range metadataConfig.NumField() {
|
|
||||||
if helpers.MapStructureMatchName(k.String(), metadataConfig.Field(j).Name) {
|
|
||||||
metadataValue[k.String()] = snmpMap.MapIndex(snmpKeys[i]).Interface()
|
|
||||||
continue outerSNMP
|
|
||||||
}
|
|
||||||
}
|
|
||||||
providerValue[k.String()] = snmpMap.MapIndex(snmpKeys[i]).Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
providerValue["type"] = "snmp"
|
|
||||||
metadataValue["provider"] = providerValue
|
|
||||||
from.SetMapIndex(reflect.ValueOf("metadata"), reflect.ValueOf(metadataValue))
|
|
||||||
from.SetMapIndex(*snmpKey, reflect.Value{})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// bmp → routing
|
|
||||||
{
|
|
||||||
var bmpKey, routingKey *reflect.Value
|
|
||||||
fromKeys := from.MapKeys()
|
|
||||||
for i, k := range fromKeys {
|
|
||||||
k = helpers.ElemOrIdentity(k)
|
|
||||||
if k.Kind() != reflect.String {
|
|
||||||
return from.Interface(), nil
|
|
||||||
}
|
|
||||||
if helpers.MapStructureMatchName(k.String(), "Bmp") {
|
|
||||||
bmpKey = &fromKeys[i]
|
|
||||||
} else if helpers.MapStructureMatchName(k.String(), "Routing") {
|
|
||||||
routingKey = &fromKeys[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if bmpKey != nil {
|
|
||||||
if routingKey != nil {
|
|
||||||
return nil, fmt.Errorf("cannot have both %q and %q", bmpKey.String(), routingKey.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build the routing configuration
|
|
||||||
providerValue := gin.H{}
|
|
||||||
routingValue := gin.H{}
|
|
||||||
// Dispatch values from bmp key into routing
|
|
||||||
bmpMap := helpers.ElemOrIdentity(from.MapIndex(*bmpKey))
|
|
||||||
bmpKeys := bmpMap.MapKeys()
|
|
||||||
outerBMP:
|
|
||||||
for i, k := range bmpKeys {
|
|
||||||
k = helpers.ElemOrIdentity(k)
|
|
||||||
if k.Kind() != reflect.String {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
routingConfig := reflect.TypeOf(routing.Configuration{})
|
|
||||||
for j := range routingConfig.NumField() {
|
|
||||||
if helpers.MapStructureMatchName(k.String(), routingConfig.Field(j).Name) {
|
|
||||||
routingValue[k.String()] = bmpMap.MapIndex(bmpKeys[i]).Interface()
|
|
||||||
continue outerBMP
|
|
||||||
}
|
|
||||||
}
|
|
||||||
providerValue[k.String()] = bmpMap.MapIndex(bmpKeys[i]).Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
providerValue["type"] = "bmp"
|
|
||||||
routingValue["provider"] = providerValue
|
|
||||||
from.SetMapIndex(reflect.ValueOf("routing"), reflect.ValueOf(routingValue))
|
|
||||||
from.SetMapIndex(*bmpKey, reflect.Value{})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return from.Interface(), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
helpers.RegisterMapstructureUnmarshallerHook(InletConfigurationUnmarshallerHook())
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -37,6 +37,7 @@ type OrchestratorConfiguration struct {
|
|||||||
Schema schema.Configuration
|
Schema schema.Configuration
|
||||||
// Other service configurations
|
// Other service configurations
|
||||||
Inlet []InletConfiguration `validate:"dive"`
|
Inlet []InletConfiguration `validate:"dive"`
|
||||||
|
Outlet []OutletConfiguration `validate:"dive"`
|
||||||
Console []ConsoleConfiguration `validate:"dive"`
|
Console []ConsoleConfiguration `validate:"dive"`
|
||||||
DemoExporter []DemoExporterConfiguration `validate:"dive"`
|
DemoExporter []DemoExporterConfiguration `validate:"dive"`
|
||||||
}
|
}
|
||||||
@@ -45,6 +46,8 @@ type OrchestratorConfiguration struct {
|
|||||||
func (c *OrchestratorConfiguration) Reset() {
|
func (c *OrchestratorConfiguration) Reset() {
|
||||||
inletConfiguration := InletConfiguration{}
|
inletConfiguration := InletConfiguration{}
|
||||||
inletConfiguration.Reset()
|
inletConfiguration.Reset()
|
||||||
|
outletConfiguration := OutletConfiguration{}
|
||||||
|
outletConfiguration.Reset()
|
||||||
consoleConfiguration := ConsoleConfiguration{}
|
consoleConfiguration := ConsoleConfiguration{}
|
||||||
consoleConfiguration.Reset()
|
consoleConfiguration.Reset()
|
||||||
*c = OrchestratorConfiguration{
|
*c = OrchestratorConfiguration{
|
||||||
@@ -58,6 +61,7 @@ func (c *OrchestratorConfiguration) Reset() {
|
|||||||
Schema: schema.DefaultConfiguration(),
|
Schema: schema.DefaultConfiguration(),
|
||||||
// Other service configurations
|
// Other service configurations
|
||||||
Inlet: []InletConfiguration{inletConfiguration},
|
Inlet: []InletConfiguration{inletConfiguration},
|
||||||
|
Outlet: []OutletConfiguration{outletConfiguration},
|
||||||
Console: []ConsoleConfiguration{consoleConfiguration},
|
Console: []ConsoleConfiguration{consoleConfiguration},
|
||||||
DemoExporter: []DemoExporterConfiguration{},
|
DemoExporter: []DemoExporterConfiguration{},
|
||||||
}
|
}
|
||||||
@@ -83,14 +87,19 @@ components and centralizes configuration of the various other components.`,
|
|||||||
OrchestratorOptions.Path = args[0]
|
OrchestratorOptions.Path = args[0]
|
||||||
OrchestratorOptions.BeforeDump = func(metadata mapstructure.Metadata) {
|
OrchestratorOptions.BeforeDump = func(metadata mapstructure.Metadata) {
|
||||||
// Override some parts of the configuration
|
// Override some parts of the configuration
|
||||||
if !slices.Contains(metadata.Keys, "ClickHouse.Kafka.Brokers[0]") {
|
|
||||||
config.ClickHouse.Kafka.Configuration = config.Kafka.Configuration
|
|
||||||
}
|
|
||||||
for idx := range config.Inlet {
|
for idx := range config.Inlet {
|
||||||
if !slices.Contains(metadata.Keys, fmt.Sprintf("Inlet[%d].Kafka.Brokers[0]", idx)) {
|
if !slices.Contains(metadata.Keys, fmt.Sprintf("Inlet[%d].Kafka.Brokers[0]", idx)) {
|
||||||
config.Inlet[idx].Kafka.Configuration = config.Kafka.Configuration
|
config.Inlet[idx].Kafka.Configuration = config.Kafka.Configuration
|
||||||
}
|
}
|
||||||
config.Inlet[idx].Schema = config.Schema
|
}
|
||||||
|
for idx := range config.Outlet {
|
||||||
|
if !slices.Contains(metadata.Keys, fmt.Sprintf("Outlet[%d].ClickHouse.Servers[0]", idx)) {
|
||||||
|
config.Outlet[idx].ClickHouseDB = config.ClickHouseDB
|
||||||
|
}
|
||||||
|
if !slices.Contains(metadata.Keys, fmt.Sprintf("Outlet[%d].Kafka.Brokers[0]", idx)) {
|
||||||
|
config.Outlet[idx].Kafka.Configuration = config.Kafka.Configuration
|
||||||
|
}
|
||||||
|
config.Outlet[idx].Schema = config.Schema
|
||||||
}
|
}
|
||||||
for idx := range config.Console {
|
for idx := range config.Console {
|
||||||
if !slices.Contains(metadata.Keys, fmt.Sprintf("Console[%d].ClickHouse.Servers[0]", idx)) {
|
if !slices.Contains(metadata.Keys, fmt.Sprintf("Console[%d].ClickHouse.Servers[0]", idx)) {
|
||||||
@@ -144,14 +153,12 @@ func orchestratorStart(r *reporter.Reporter, config OrchestratorConfiguration, c
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to initialize ClickHouse component: %w", err)
|
return fmt.Errorf("unable to initialize ClickHouse component: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
geoipComponent, err := geoip.New(r, config.GeoIP, geoip.Dependencies{
|
geoipComponent, err := geoip.New(r, config.GeoIP, geoip.Dependencies{
|
||||||
Daemon: daemonComponent,
|
Daemon: daemonComponent,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to initialize GeoIP component: %w", err)
|
return fmt.Errorf("unable to initialize GeoIP component: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
clickhouseComponent, err := clickhouse.New(r, config.ClickHouse, clickhouse.Dependencies{
|
clickhouseComponent, err := clickhouse.New(r, config.ClickHouse, clickhouse.Dependencies{
|
||||||
Daemon: daemonComponent,
|
Daemon: daemonComponent,
|
||||||
HTTP: httpComponent,
|
HTTP: httpComponent,
|
||||||
@@ -171,6 +178,9 @@ func orchestratorStart(r *reporter.Reporter, config OrchestratorConfiguration, c
|
|||||||
for idx := range config.Inlet {
|
for idx := range config.Inlet {
|
||||||
orchestratorComponent.RegisterConfiguration(orchestrator.InletService, config.Inlet[idx])
|
orchestratorComponent.RegisterConfiguration(orchestrator.InletService, config.Inlet[idx])
|
||||||
}
|
}
|
||||||
|
for idx := range config.Outlet {
|
||||||
|
orchestratorComponent.RegisterConfiguration(orchestrator.OutletService, config.Outlet[idx])
|
||||||
|
}
|
||||||
for idx := range config.Console {
|
for idx := range config.Console {
|
||||||
orchestratorComponent.RegisterConfiguration(orchestrator.ConsoleService, config.Console[idx])
|
orchestratorComponent.RegisterConfiguration(orchestrator.ConsoleService, config.Console[idx])
|
||||||
}
|
}
|
||||||
@@ -188,7 +198,7 @@ func orchestratorStart(r *reporter.Reporter, config OrchestratorConfiguration, c
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Start all the components.
|
// Start all the components.
|
||||||
components := []interface{}{
|
components := []any{
|
||||||
geoipComponent,
|
geoipComponent,
|
||||||
httpComponent,
|
httpComponent,
|
||||||
clickhouseDBComponent,
|
clickhouseDBComponent,
|
||||||
@@ -198,136 +208,224 @@ func orchestratorStart(r *reporter.Reporter, config OrchestratorConfiguration, c
|
|||||||
return StartStopComponents(r, daemonComponent, components)
|
return StartStopComponents(r, daemonComponent, components)
|
||||||
}
|
}
|
||||||
|
|
||||||
// OrchestratorConfigurationUnmarshallerHook migrates GeoIP configuration from inlet
|
// orchestratorGeoIPMigrationHook migrates GeoIP configuration from inlet
|
||||||
// component to clickhouse component and ClickHouse database configuration from
|
// component to clickhouse component
|
||||||
// clickhouse component to clickhousedb component.
|
func orchestratorGeoIPMigrationHook() mapstructure.DecodeHookFunc {
|
||||||
func OrchestratorConfigurationUnmarshallerHook() mapstructure.DecodeHookFunc {
|
return func(from, to reflect.Value) (any, error) {
|
||||||
return func(from, to reflect.Value) (interface{}, error) {
|
|
||||||
if from.Kind() != reflect.Map || from.IsNil() || to.Type() != reflect.TypeOf(OrchestratorConfiguration{}) {
|
if from.Kind() != reflect.Map || from.IsNil() || to.Type() != reflect.TypeOf(OrchestratorConfiguration{}) {
|
||||||
return from.Interface(), nil
|
return from.Interface(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
inletgeoip:
|
var (
|
||||||
// inlet/geoip → geoip
|
inletKey, geoIPKey, inletGeoIPValue *reflect.Value
|
||||||
for {
|
)
|
||||||
var (
|
|
||||||
inletKey, geoIPKey, inletGeoIPValue *reflect.Value
|
|
||||||
)
|
|
||||||
|
|
||||||
fromKeys := from.MapKeys()
|
fromKeys := from.MapKeys()
|
||||||
for i, k := range fromKeys {
|
for i, k := range fromKeys {
|
||||||
|
k = helpers.ElemOrIdentity(k)
|
||||||
|
if k.Kind() != reflect.String {
|
||||||
|
return from.Interface(), nil
|
||||||
|
}
|
||||||
|
if helpers.MapStructureMatchName(k.String(), "Inlet") {
|
||||||
|
inletKey = &fromKeys[i]
|
||||||
|
} else if helpers.MapStructureMatchName(k.String(), "GeoIP") {
|
||||||
|
geoIPKey = &fromKeys[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if inletKey == nil {
|
||||||
|
return from.Interface(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take the first geoip configuration and delete the others
|
||||||
|
inletConfigs := helpers.ElemOrIdentity(from.MapIndex(*inletKey))
|
||||||
|
if inletConfigs.Kind() != reflect.Slice {
|
||||||
|
inletConfigs = reflect.ValueOf([]any{inletConfigs.Interface()})
|
||||||
|
}
|
||||||
|
for i := range inletConfigs.Len() {
|
||||||
|
fromInlet := helpers.ElemOrIdentity(inletConfigs.Index(i))
|
||||||
|
if fromInlet.Kind() != reflect.Map {
|
||||||
|
return from.Interface(), nil
|
||||||
|
}
|
||||||
|
fromInletKeys := fromInlet.MapKeys()
|
||||||
|
for _, k := range fromInletKeys {
|
||||||
k = helpers.ElemOrIdentity(k)
|
k = helpers.ElemOrIdentity(k)
|
||||||
if k.Kind() != reflect.String {
|
if k.Kind() != reflect.String {
|
||||||
break inletgeoip
|
return from.Interface(), nil
|
||||||
}
|
}
|
||||||
if helpers.MapStructureMatchName(k.String(), "Inlet") {
|
if helpers.MapStructureMatchName(k.String(), "GeoIP") {
|
||||||
inletKey = &fromKeys[i]
|
if inletGeoIPValue == nil {
|
||||||
} else if helpers.MapStructureMatchName(k.String(), "GeoIP") {
|
v := fromInlet.MapIndex(k)
|
||||||
geoIPKey = &fromKeys[i]
|
inletGeoIPValue = &v
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if inletKey == nil {
|
}
|
||||||
break inletgeoip
|
if inletGeoIPValue == nil {
|
||||||
|
return from.Interface(), nil
|
||||||
|
}
|
||||||
|
if geoIPKey != nil {
|
||||||
|
return nil, errors.New("cannot have both \"GeoIP\" in inlet and clickhouse configuration")
|
||||||
|
}
|
||||||
|
|
||||||
|
from.SetMapIndex(reflect.ValueOf("geoip"), *inletGeoIPValue)
|
||||||
|
for i := range inletConfigs.Len() {
|
||||||
|
fromInlet := helpers.ElemOrIdentity(inletConfigs.Index(i))
|
||||||
|
fromInletKeys := fromInlet.MapKeys()
|
||||||
|
for _, k := range fromInletKeys {
|
||||||
|
k = helpers.ElemOrIdentity(k)
|
||||||
|
if helpers.MapStructureMatchName(k.String(), "GeoIP") {
|
||||||
|
fromInlet.SetMapIndex(k, reflect.Value{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return from.Interface(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// orchestratorClickHouseMigrationHook migrates ClickHouse database
|
||||||
|
// configuration from clickhouse component to clickhousedb component
|
||||||
|
func orchestratorClickHouseMigrationHook() mapstructure.DecodeHookFunc {
|
||||||
|
return func(from, to reflect.Value) (any, error) {
|
||||||
|
if from.Kind() != reflect.Map || from.IsNil() || to.Type() != reflect.TypeOf(OrchestratorConfiguration{}) {
|
||||||
|
return from.Interface(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var clickhouseKey, clickhouseDBKey *reflect.Value
|
||||||
|
fromKeys := from.MapKeys()
|
||||||
|
for i, k := range fromKeys {
|
||||||
|
k = helpers.ElemOrIdentity(k)
|
||||||
|
if k.Kind() != reflect.String {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if helpers.MapStructureMatchName(k.String(), "ClickHouse") {
|
||||||
|
clickhouseKey = &fromKeys[i]
|
||||||
|
} else if helpers.MapStructureMatchName(k.String(), "ClickHouseDB") {
|
||||||
|
clickhouseDBKey = &fromKeys[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if clickhouseKey != nil {
|
||||||
|
var clickhouseDB reflect.Value
|
||||||
|
if clickhouseDBKey != nil {
|
||||||
|
clickhouseDB = helpers.ElemOrIdentity(from.MapIndex(*clickhouseDBKey))
|
||||||
|
} else {
|
||||||
|
clickhouseDB = reflect.ValueOf(gin.H{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Take the first geoip configuration and delete the others
|
clickhouse := helpers.ElemOrIdentity(from.MapIndex(*clickhouseKey))
|
||||||
|
if clickhouse.Kind() == reflect.Map {
|
||||||
|
clickhouseKeys := clickhouse.MapKeys()
|
||||||
|
// Fields to migrate from clickhouse to clickhousedb
|
||||||
|
fieldsToMigrate := []string{
|
||||||
|
"Servers", "Cluster", "Database", "Username", "Password",
|
||||||
|
"MaxOpenConns", "DialTimeout", "TLS",
|
||||||
|
}
|
||||||
|
found := false
|
||||||
|
for _, k := range clickhouseKeys {
|
||||||
|
k = helpers.ElemOrIdentity(k)
|
||||||
|
if k.Kind() != reflect.String {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, field := range fieldsToMigrate {
|
||||||
|
if helpers.MapStructureMatchName(k.String(), field) {
|
||||||
|
if clickhouseDBKey != nil {
|
||||||
|
return nil, errors.New("cannot have both \"ClickHouseDB\" and ClickHouse database settings in \"ClickHouse\"")
|
||||||
|
}
|
||||||
|
clickhouseDB.SetMapIndex(k, helpers.ElemOrIdentity(clickhouse.MapIndex(k)))
|
||||||
|
clickhouse.SetMapIndex(k, reflect.Value{})
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if clickhouseDBKey == nil && found {
|
||||||
|
from.SetMapIndex(reflect.ValueOf("clickhousedb"), clickhouseDB)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return from.Interface(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// orchestratorInletToOutletMigrationHook migrates inlet configuration to outlet
|
||||||
|
// configuration. This only works if there is no outlet configuration and if
|
||||||
|
// there is only one inlet configuration.
|
||||||
|
func orchestratorInletToOutletMigrationHook() mapstructure.DecodeHookFunc {
|
||||||
|
return func(from, to reflect.Value) (any, error) {
|
||||||
|
if from.Kind() != reflect.Map || from.IsNil() || to.Type() != reflect.TypeOf(OrchestratorConfiguration{}) {
|
||||||
|
return from.Interface(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// inlet fields (Metadata, Routing, Core, Schema) → outlet
|
||||||
|
var inletKey, outletKey *reflect.Value
|
||||||
|
fromKeys := from.MapKeys()
|
||||||
|
for i, k := range fromKeys {
|
||||||
|
k = helpers.ElemOrIdentity(k)
|
||||||
|
if k.Kind() != reflect.String {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if helpers.MapStructureMatchName(k.String(), "Inlet") {
|
||||||
|
inletKey = &fromKeys[i]
|
||||||
|
} else if helpers.MapStructureMatchName(k.String(), "Outlet") {
|
||||||
|
outletKey = &fromKeys[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if inletKey != nil {
|
||||||
inletConfigs := helpers.ElemOrIdentity(from.MapIndex(*inletKey))
|
inletConfigs := helpers.ElemOrIdentity(from.MapIndex(*inletKey))
|
||||||
if inletConfigs.Kind() != reflect.Slice {
|
if inletConfigs.Kind() != reflect.Slice {
|
||||||
inletConfigs = reflect.ValueOf([]interface{}{inletConfigs.Interface()})
|
inletConfigs = reflect.ValueOf([]any{inletConfigs.Interface()})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Fields to migrate from inlet to outlet
|
||||||
|
fieldsToMigrate := []string{
|
||||||
|
// Current keys
|
||||||
|
"Metadata", "Routing", "Core", "Schema",
|
||||||
|
// Older keys (which will be migrated)
|
||||||
|
"BMP", "SNMP",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process each inlet configuration
|
||||||
for i := range inletConfigs.Len() {
|
for i := range inletConfigs.Len() {
|
||||||
fromInlet := helpers.ElemOrIdentity(inletConfigs.Index(i))
|
fromInlet := helpers.ElemOrIdentity(inletConfigs.Index(i))
|
||||||
if fromInlet.Kind() != reflect.Map {
|
if fromInlet.Kind() != reflect.Map {
|
||||||
break inletgeoip
|
continue
|
||||||
}
|
}
|
||||||
|
modified := false
|
||||||
|
toOutlet := reflect.ValueOf(gin.H{})
|
||||||
|
|
||||||
|
// Migrate fields from inlet to outlet
|
||||||
fromInletKeys := fromInlet.MapKeys()
|
fromInletKeys := fromInlet.MapKeys()
|
||||||
for _, k := range fromInletKeys {
|
for _, k := range fromInletKeys {
|
||||||
k = helpers.ElemOrIdentity(k)
|
k = helpers.ElemOrIdentity(k)
|
||||||
if k.Kind() != reflect.String {
|
if k.Kind() != reflect.String {
|
||||||
break inletgeoip
|
continue
|
||||||
}
|
}
|
||||||
if helpers.MapStructureMatchName(k.String(), "GeoIP") {
|
for _, field := range fieldsToMigrate {
|
||||||
if inletGeoIPValue == nil {
|
if !helpers.MapStructureMatchName(k.String(), field) {
|
||||||
v := fromInlet.MapIndex(k)
|
|
||||||
inletGeoIPValue = &v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if inletGeoIPValue == nil {
|
|
||||||
break inletgeoip
|
|
||||||
}
|
|
||||||
if geoIPKey != nil {
|
|
||||||
return nil, errors.New("cannot have both \"GeoIP\" in inlet and clickhouse configuration")
|
|
||||||
}
|
|
||||||
|
|
||||||
from.SetMapIndex(reflect.ValueOf("geoip"), *inletGeoIPValue)
|
|
||||||
for i := range inletConfigs.Len() {
|
|
||||||
fromInlet := helpers.ElemOrIdentity(inletConfigs.Index(i))
|
|
||||||
fromInletKeys := fromInlet.MapKeys()
|
|
||||||
for _, k := range fromInletKeys {
|
|
||||||
k = helpers.ElemOrIdentity(k)
|
|
||||||
if helpers.MapStructureMatchName(k.String(), "GeoIP") {
|
|
||||||
fromInlet.SetMapIndex(k, reflect.Value{})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
// clickhouse database fields → clickhousedb
|
|
||||||
var clickhouseKey, clickhouseDBKey *reflect.Value
|
|
||||||
fromKeys := from.MapKeys()
|
|
||||||
for i, k := range fromKeys {
|
|
||||||
k = helpers.ElemOrIdentity(k)
|
|
||||||
if k.Kind() != reflect.String {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if helpers.MapStructureMatchName(k.String(), "ClickHouse") {
|
|
||||||
clickhouseKey = &fromKeys[i]
|
|
||||||
} else if helpers.MapStructureMatchName(k.String(), "ClickHouseDB") {
|
|
||||||
clickhouseDBKey = &fromKeys[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if clickhouseKey != nil {
|
|
||||||
var clickhouseDB reflect.Value
|
|
||||||
if clickhouseDBKey != nil {
|
|
||||||
clickhouseDB = helpers.ElemOrIdentity(from.MapIndex(*clickhouseDBKey))
|
|
||||||
} else {
|
|
||||||
clickhouseDB = reflect.ValueOf(gin.H{})
|
|
||||||
}
|
|
||||||
|
|
||||||
clickhouse := helpers.ElemOrIdentity(from.MapIndex(*clickhouseKey))
|
|
||||||
if clickhouse.Kind() == reflect.Map {
|
|
||||||
clickhouseKeys := clickhouse.MapKeys()
|
|
||||||
// Fields to migrate from clickhouse to clickhousedb
|
|
||||||
fieldsToMigrate := []string{
|
|
||||||
"Servers", "Cluster", "Database", "Username", "Password",
|
|
||||||
"MaxOpenConns", "DialTimeout", "TLS",
|
|
||||||
}
|
|
||||||
found := false
|
|
||||||
for _, k := range clickhouseKeys {
|
|
||||||
k = helpers.ElemOrIdentity(k)
|
|
||||||
if k.Kind() != reflect.String {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, field := range fieldsToMigrate {
|
// We can only do a migration if we have no existing
|
||||||
if helpers.MapStructureMatchName(k.String(), field) {
|
// outlet configuration AND only one inlet configuration.
|
||||||
if clickhouseDBKey != nil {
|
if outletKey != nil {
|
||||||
return nil, errors.New("cannot have both \"ClickHouseDB\" and ClickHouse database settings in \"ClickHouse\"")
|
return nil, fmt.Errorf("cannot have both \"inlet\" configuration with %q field and \"outlet\" configuration", field)
|
||||||
}
|
|
||||||
clickhouseDB.SetMapIndex(k, helpers.ElemOrIdentity(clickhouse.MapIndex(k)))
|
|
||||||
clickhouse.SetMapIndex(k, reflect.Value{})
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
if inletConfigs.Len() > 1 {
|
||||||
|
return nil, fmt.Errorf("cannot migrate %q from %q to %q as there are several inlet configurations", field, "inlet", "outlet")
|
||||||
|
}
|
||||||
|
toOutlet.SetMapIndex(k, helpers.ElemOrIdentity(fromInlet.MapIndex(k)))
|
||||||
|
fromInlet.SetMapIndex(k, reflect.Value{})
|
||||||
|
modified = true
|
||||||
|
break
|
||||||
}
|
}
|
||||||
if clickhouseDBKey == nil && found {
|
}
|
||||||
from.SetMapIndex(reflect.ValueOf("clickhousedb"), clickhouseDB)
|
|
||||||
}
|
if modified {
|
||||||
|
// We know there is no existing outlet configuration.
|
||||||
|
outletConfigs := reflect.ValueOf([]any{toOutlet})
|
||||||
|
from.SetMapIndex(reflect.ValueOf("outlet"), outletConfigs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -337,5 +435,7 @@ func OrchestratorConfigurationUnmarshallerHook() mapstructure.DecodeHookFunc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
helpers.RegisterMapstructureUnmarshallerHook(OrchestratorConfigurationUnmarshallerHook())
|
helpers.RegisterMapstructureUnmarshallerHook(orchestratorGeoIPMigrationHook())
|
||||||
|
helpers.RegisterMapstructureUnmarshallerHook(orchestratorClickHouseMigrationHook())
|
||||||
|
helpers.RegisterMapstructureUnmarshallerHook(orchestratorInletToOutletMigrationHook())
|
||||||
}
|
}
|
||||||
|
|||||||
304
cmd/outlet.go
Normal file
304
cmd/outlet.go
Normal file
@@ -0,0 +1,304 @@
|
|||||||
|
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/go-viper/mapstructure/v2"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
|
"akvorado/common/clickhousedb"
|
||||||
|
"akvorado/common/daemon"
|
||||||
|
"akvorado/common/helpers"
|
||||||
|
"akvorado/common/httpserver"
|
||||||
|
"akvorado/common/reporter"
|
||||||
|
"akvorado/common/schema"
|
||||||
|
"akvorado/outlet/clickhouse"
|
||||||
|
"akvorado/outlet/core"
|
||||||
|
"akvorado/outlet/flow"
|
||||||
|
"akvorado/outlet/kafka"
|
||||||
|
"akvorado/outlet/metadata"
|
||||||
|
"akvorado/outlet/metadata/provider/snmp"
|
||||||
|
"akvorado/outlet/routing"
|
||||||
|
"akvorado/outlet/routing/provider/bmp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OutletConfiguration represents the configuration file for the outlet command.
|
||||||
|
type OutletConfiguration struct {
|
||||||
|
Reporting reporter.Configuration
|
||||||
|
HTTP httpserver.Configuration
|
||||||
|
Metadata metadata.Configuration
|
||||||
|
Routing routing.Configuration
|
||||||
|
Kafka kafka.Configuration
|
||||||
|
ClickHouseDB clickhousedb.Configuration
|
||||||
|
ClickHouse clickhouse.Configuration
|
||||||
|
Core core.Configuration
|
||||||
|
Schema schema.Configuration
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset resets the configuration for the outlet command to its default value.
|
||||||
|
func (c *OutletConfiguration) Reset() {
|
||||||
|
*c = OutletConfiguration{
|
||||||
|
HTTP: httpserver.DefaultConfiguration(),
|
||||||
|
Reporting: reporter.DefaultConfiguration(),
|
||||||
|
Metadata: metadata.DefaultConfiguration(),
|
||||||
|
Routing: routing.DefaultConfiguration(),
|
||||||
|
Kafka: kafka.DefaultConfiguration(),
|
||||||
|
ClickHouseDB: clickhousedb.DefaultConfiguration(),
|
||||||
|
ClickHouse: clickhouse.DefaultConfiguration(),
|
||||||
|
Core: core.DefaultConfiguration(),
|
||||||
|
Schema: schema.DefaultConfiguration(),
|
||||||
|
}
|
||||||
|
c.Metadata.Providers = []metadata.ProviderConfiguration{{Config: snmp.DefaultConfiguration()}}
|
||||||
|
c.Routing.Provider.Config = bmp.DefaultConfiguration()
|
||||||
|
}
|
||||||
|
|
||||||
|
type outletOptions struct {
|
||||||
|
ConfigRelatedOptions
|
||||||
|
CheckMode bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// OutletOptions stores the command-line option values for the outlet
|
||||||
|
// command.
|
||||||
|
var OutletOptions outletOptions
|
||||||
|
|
||||||
|
var outletCmd = &cobra.Command{
|
||||||
|
Use: "outlet",
|
||||||
|
Short: "Start Akvorado's outlet service",
|
||||||
|
Long: `Akvorado is a Netflow/IPFIX collector. The outlet service handles flow ingestion,
|
||||||
|
enrichment and export to Kafka.`,
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
config := OutletConfiguration{}
|
||||||
|
OutletOptions.Path = args[0]
|
||||||
|
if err := OutletOptions.Parse(cmd.OutOrStdout(), "outlet", &config); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := reporter.New(config.Reporting)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to initialize reporter: %w", err)
|
||||||
|
}
|
||||||
|
return outletStart(r, config, OutletOptions.CheckMode)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
RootCmd.AddCommand(outletCmd)
|
||||||
|
outletCmd.Flags().BoolVarP(&OutletOptions.ConfigRelatedOptions.Dump, "dump", "D", false,
|
||||||
|
"Dump configuration before starting")
|
||||||
|
outletCmd.Flags().BoolVarP(&OutletOptions.CheckMode, "check", "C", false,
|
||||||
|
"Check configuration, but does not start")
|
||||||
|
}
|
||||||
|
|
||||||
|
func outletStart(r *reporter.Reporter, config OutletConfiguration, checkOnly bool) error {
|
||||||
|
// Initialize the various components
|
||||||
|
daemonComponent, err := daemon.New(r)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to initialize daemon component: %w", err)
|
||||||
|
}
|
||||||
|
httpComponent, err := httpserver.New(r, config.HTTP, httpserver.Dependencies{
|
||||||
|
Daemon: daemonComponent,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to initialize http component: %w", err)
|
||||||
|
}
|
||||||
|
schemaComponent, err := schema.New(config.Schema)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to initialize schema component: %w", err)
|
||||||
|
}
|
||||||
|
flowComponent, err := flow.New(r, flow.Dependencies{
|
||||||
|
Schema: schemaComponent,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to initialize flow component: %w", err)
|
||||||
|
}
|
||||||
|
metadataComponent, err := metadata.New(r, config.Metadata, metadata.Dependencies{
|
||||||
|
Daemon: daemonComponent,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to initialize metadata component: %w", err)
|
||||||
|
}
|
||||||
|
routingComponent, err := routing.New(r, config.Routing, routing.Dependencies{
|
||||||
|
Daemon: daemonComponent,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to initialize routing component: %w", err)
|
||||||
|
}
|
||||||
|
kafkaComponent, err := kafka.New(r, config.Kafka, kafka.Dependencies{
|
||||||
|
Daemon: daemonComponent,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to initialize Kafka component: %w", err)
|
||||||
|
}
|
||||||
|
clickhouseDBComponent, err := clickhousedb.New(r, config.ClickHouseDB, clickhousedb.Dependencies{
|
||||||
|
Daemon: daemonComponent,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to initialize ClickHouse component: %w", err)
|
||||||
|
}
|
||||||
|
clickhouseComponent, err := clickhouse.New(r, config.ClickHouse, clickhouse.Dependencies{
|
||||||
|
ClickHouse: clickhouseDBComponent,
|
||||||
|
Schema: schemaComponent,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to initialize outlet ClickHouse component: %w", err)
|
||||||
|
}
|
||||||
|
coreComponent, err := core.New(r, config.Core, core.Dependencies{
|
||||||
|
Daemon: daemonComponent,
|
||||||
|
Flow: flowComponent,
|
||||||
|
Metadata: metadataComponent,
|
||||||
|
Routing: routingComponent,
|
||||||
|
Kafka: kafkaComponent,
|
||||||
|
ClickHouse: clickhouseComponent,
|
||||||
|
HTTP: httpComponent,
|
||||||
|
Schema: schemaComponent,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to initialize core component: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expose some information and metrics
|
||||||
|
addCommonHTTPHandlers(r, "outlet", httpComponent)
|
||||||
|
versionMetrics(r)
|
||||||
|
|
||||||
|
// If we only asked for a check, stop here.
|
||||||
|
if checkOnly {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start all the components.
|
||||||
|
components := []any{
|
||||||
|
httpComponent,
|
||||||
|
clickhouseDBComponent,
|
||||||
|
clickhouseComponent,
|
||||||
|
flowComponent,
|
||||||
|
metadataComponent,
|
||||||
|
routingComponent,
|
||||||
|
kafkaComponent,
|
||||||
|
coreComponent,
|
||||||
|
}
|
||||||
|
return StartStopComponents(r, daemonComponent, components)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OutletConfigurationUnmarshallerHook renames SNMP configuration to metadata and
|
||||||
|
// BMP configuration to routing.
|
||||||
|
func OutletConfigurationUnmarshallerHook() mapstructure.DecodeHookFunc {
|
||||||
|
return func(from, to reflect.Value) (interface{}, error) {
|
||||||
|
if from.Kind() != reflect.Map || from.IsNil() || to.Type() != reflect.TypeOf(OutletConfiguration{}) {
|
||||||
|
return from.Interface(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// snmp → metadata
|
||||||
|
{
|
||||||
|
var snmpKey, metadataKey *reflect.Value
|
||||||
|
fromKeys := from.MapKeys()
|
||||||
|
for i, k := range fromKeys {
|
||||||
|
k = helpers.ElemOrIdentity(k)
|
||||||
|
if k.Kind() != reflect.String {
|
||||||
|
return from.Interface(), nil
|
||||||
|
}
|
||||||
|
if helpers.MapStructureMatchName(k.String(), "Snmp") {
|
||||||
|
snmpKey = &fromKeys[i]
|
||||||
|
} else if helpers.MapStructureMatchName(k.String(), "Metadata") {
|
||||||
|
metadataKey = &fromKeys[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if snmpKey != nil {
|
||||||
|
if metadataKey != nil {
|
||||||
|
return nil, fmt.Errorf("cannot have both %q and %q", snmpKey.String(), metadataKey.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the metadata configuration
|
||||||
|
providerValue := gin.H{}
|
||||||
|
metadataValue := gin.H{}
|
||||||
|
// Dispatch values from snmp key into metadata
|
||||||
|
snmpMap := helpers.ElemOrIdentity(from.MapIndex(*snmpKey))
|
||||||
|
snmpKeys := snmpMap.MapKeys()
|
||||||
|
outerSNMP:
|
||||||
|
for i, k := range snmpKeys {
|
||||||
|
k = helpers.ElemOrIdentity(k)
|
||||||
|
if k.Kind() != reflect.String {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if helpers.MapStructureMatchName(k.String(), "PollerCoalesce") {
|
||||||
|
metadataValue["MaxBatchRequests"] = snmpMap.MapIndex(snmpKeys[i]).Interface()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
metadataConfig := reflect.TypeOf(metadata.Configuration{})
|
||||||
|
for j := range metadataConfig.NumField() {
|
||||||
|
if helpers.MapStructureMatchName(k.String(), metadataConfig.Field(j).Name) {
|
||||||
|
metadataValue[k.String()] = snmpMap.MapIndex(snmpKeys[i]).Interface()
|
||||||
|
continue outerSNMP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
providerValue[k.String()] = snmpMap.MapIndex(snmpKeys[i]).Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
providerValue["type"] = "snmp"
|
||||||
|
metadataValue["provider"] = providerValue
|
||||||
|
from.SetMapIndex(reflect.ValueOf("metadata"), reflect.ValueOf(metadataValue))
|
||||||
|
from.SetMapIndex(*snmpKey, reflect.Value{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// bmp → routing
|
||||||
|
{
|
||||||
|
var bmpKey, routingKey *reflect.Value
|
||||||
|
fromKeys := from.MapKeys()
|
||||||
|
for i, k := range fromKeys {
|
||||||
|
k = helpers.ElemOrIdentity(k)
|
||||||
|
if k.Kind() != reflect.String {
|
||||||
|
return from.Interface(), nil
|
||||||
|
}
|
||||||
|
if helpers.MapStructureMatchName(k.String(), "Bmp") {
|
||||||
|
bmpKey = &fromKeys[i]
|
||||||
|
} else if helpers.MapStructureMatchName(k.String(), "Routing") {
|
||||||
|
routingKey = &fromKeys[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if bmpKey != nil {
|
||||||
|
if routingKey != nil {
|
||||||
|
return nil, fmt.Errorf("cannot have both %q and %q", bmpKey.String(), routingKey.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the routing configuration
|
||||||
|
providerValue := gin.H{}
|
||||||
|
routingValue := gin.H{}
|
||||||
|
// Dispatch values from bmp key into routing
|
||||||
|
bmpMap := helpers.ElemOrIdentity(from.MapIndex(*bmpKey))
|
||||||
|
bmpKeys := bmpMap.MapKeys()
|
||||||
|
outerBMP:
|
||||||
|
for i, k := range bmpKeys {
|
||||||
|
k = helpers.ElemOrIdentity(k)
|
||||||
|
if k.Kind() != reflect.String {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
routingConfig := reflect.TypeOf(routing.Configuration{})
|
||||||
|
for j := range routingConfig.NumField() {
|
||||||
|
if helpers.MapStructureMatchName(k.String(), routingConfig.Field(j).Name) {
|
||||||
|
routingValue[k.String()] = bmpMap.MapIndex(bmpKeys[i]).Interface()
|
||||||
|
continue outerBMP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
providerValue[k.String()] = bmpMap.MapIndex(bmpKeys[i]).Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
providerValue["type"] = "bmp"
|
||||||
|
routingValue["provider"] = providerValue
|
||||||
|
from.SetMapIndex(reflect.ValueOf("routing"), reflect.ValueOf(routingValue))
|
||||||
|
from.SetMapIndex(*bmpKey, reflect.Value{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return from.Interface(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
helpers.RegisterMapstructureUnmarshallerHook(OutletConfigurationUnmarshallerHook())
|
||||||
|
}
|
||||||
31
cmd/outlet_test.go
Normal file
31
cmd/outlet_test.go
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"akvorado/common/reporter"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestOutletStart(t *testing.T) {
|
||||||
|
r := reporter.NewMock(t)
|
||||||
|
config := OutletConfiguration{}
|
||||||
|
config.Reset()
|
||||||
|
if err := outletStart(r, config, true); err != nil {
|
||||||
|
t.Fatalf("outletStart() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOutlet(t *testing.T) {
|
||||||
|
root := RootCmd
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
root.SetOut(buf)
|
||||||
|
root.SetArgs([]string{"outlet", "--check", "/dev/null"})
|
||||||
|
err := root.Execute()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("`outlet` error:\n%+v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
paths:
|
paths:
|
||||||
inlet.0.routing:
|
outlet.0.routing:
|
||||||
provider:
|
provider:
|
||||||
type: bmp
|
type: bmp
|
||||||
listen: 127.0.0.1:1179
|
listen: 127.0.0.1:1179
|
||||||
@@ -13,8 +13,8 @@ paths:
|
|||||||
ribpeerremovalmaxqueue: 10000
|
ribpeerremovalmaxqueue: 10000
|
||||||
ribpeerremovalmaxtime: 100ms
|
ribpeerremovalmaxtime: 100ms
|
||||||
ribpeerremovalsleepinterval: 500ms
|
ribpeerremovalsleepinterval: 500ms
|
||||||
inlet.0.core.asnproviders:
|
outlet.0.core.asnproviders:
|
||||||
- flow
|
- flow
|
||||||
- routing
|
- routing
|
||||||
inlet.0.core.netproviders:
|
outlet.0.core.netproviders:
|
||||||
- routing
|
- routing
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
paths:
|
paths:
|
||||||
inlet.0.core.asnproviders:
|
outlet.0.core.asnproviders:
|
||||||
- routing
|
- routing
|
||||||
- geo-ip
|
- geo-ip
|
||||||
|
|||||||
@@ -4,5 +4,5 @@ paths:
|
|||||||
- 127.0.0.1:9092
|
- 127.0.0.1:9092
|
||||||
inlet.0.kafka.brokers:
|
inlet.0.kafka.brokers:
|
||||||
- 127.0.0.1:9092
|
- 127.0.0.1:9092
|
||||||
clickhouse.kafka.brokers:
|
outlet.0.kafka.brokers:
|
||||||
- 127.0.0.1:9092
|
- 127.0.0.1:9092
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
paths:
|
paths:
|
||||||
inlet.0.metadata.providers:
|
outlet.0.metadata.providers:
|
||||||
- type: gnmi
|
- type: gnmi
|
||||||
timeout: "1s"
|
timeout: "1s"
|
||||||
minimalrefreshinterval: "1m0s"
|
minimalrefreshinterval: "1m0s"
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
paths:
|
paths:
|
||||||
inlet.0.schema:
|
outlet.0.schema:
|
||||||
customdictionaries:
|
customdictionaries:
|
||||||
test:
|
test:
|
||||||
source: test.csv
|
source: test.csv
|
||||||
@@ -45,4 +45,4 @@ paths:
|
|||||||
enabled: []
|
enabled: []
|
||||||
materialize: []
|
materialize: []
|
||||||
maintableonly: []
|
maintableonly: []
|
||||||
notmaintableonly: []
|
notmaintableonly: []
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
paths:
|
paths:
|
||||||
inlet.0.schema:
|
outlet.0.schema:
|
||||||
customdictionaries: {}
|
customdictionaries: {}
|
||||||
disabled:
|
disabled:
|
||||||
- SrcCountry
|
- SrcCountry
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ paths:
|
|||||||
- kafka:9092
|
- kafka:9092
|
||||||
inlet.0.kafka.brokers:
|
inlet.0.kafka.brokers:
|
||||||
- kafka:9092
|
- kafka:9092
|
||||||
clickhouse.kafka.brokers:
|
outlet.0.kafka.brokers:
|
||||||
- kafka:9092
|
- kafka:9092
|
||||||
console.0.clickhouse.servers:
|
console.0.clickhouse.servers:
|
||||||
- clickhouse:9000
|
- clickhouse:9000
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
paths:
|
paths:
|
||||||
inlet.0.metadata.providers:
|
outlet.0.metadata.providers:
|
||||||
- type: snmp
|
- type: snmp
|
||||||
pollerretries: 1
|
pollerretries: 1
|
||||||
pollertimeout: 1s
|
pollertimeout: 1s
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
paths:
|
paths:
|
||||||
inlet.0.metadata.providers:
|
outlet.0.metadata.providers:
|
||||||
- type: snmp
|
- type: snmp
|
||||||
pollerretries: 1
|
pollerretries: 1
|
||||||
pollertimeout: 1s
|
pollertimeout: 1s
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
---
|
---
|
||||||
paths:
|
paths:
|
||||||
inlet.0.metadata.providers.0.ports:
|
outlet.0.metadata.providers.0.ports:
|
||||||
::/0: 1611
|
::/0: 1611
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
paths:
|
paths:
|
||||||
inlet.0.metadata:
|
outlet.0.metadata:
|
||||||
workers: 10
|
workers: 10
|
||||||
maxbatchrequests: 20
|
maxbatchrequests: 20
|
||||||
cacheduration: 30m0s
|
cacheduration: 30m0s
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ package clickhousedb
|
|||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ClickHouse/ch-go"
|
||||||
|
|
||||||
"akvorado/common/helpers"
|
"akvorado/common/helpers"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -55,3 +57,19 @@ func (c *Component) ClusterName() string {
|
|||||||
func (c *Component) DatabaseName() string {
|
func (c *Component) DatabaseName() string {
|
||||||
return c.config.Database
|
return c.config.Database
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ChGoOptions returns options suitable to use with ch-go and the list of
|
||||||
|
// available servers.
|
||||||
|
func (c *Component) ChGoOptions() (ch.Options, []string) {
|
||||||
|
tlsConfig, _ := c.config.TLS.MakeTLSConfig()
|
||||||
|
return ch.Options{
|
||||||
|
Address: c.config.Servers[0],
|
||||||
|
Database: c.config.Database,
|
||||||
|
User: c.config.Username,
|
||||||
|
Password: c.config.Password,
|
||||||
|
Compression: ch.CompressionLZ4,
|
||||||
|
ClientName: "akvorado",
|
||||||
|
DialTimeout: c.config.DialTimeout,
|
||||||
|
TLS: tlsConfig,
|
||||||
|
}, c.config.Servers
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,9 +3,24 @@
|
|||||||
|
|
||||||
package helpers
|
package helpers
|
||||||
|
|
||||||
|
import "net"
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// ETypeIPv4 is the ether type for IPv4
|
// ETypeIPv4 is the ether type for IPv4
|
||||||
ETypeIPv4 = 0x800
|
ETypeIPv4 = 0x800
|
||||||
// ETypeIPv6 is the ether type for IPv6
|
// ETypeIPv6 is the ether type for IPv6
|
||||||
ETypeIPv6 = 0x86dd
|
ETypeIPv6 = 0x86dd
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// MACToUint64 converts a MAC address to an uint64
|
||||||
|
func MACToUint64(mac net.HardwareAddr) uint64 {
|
||||||
|
if len(mac) != 6 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return uint64(mac[0])<<40 |
|
||||||
|
uint64(mac[1])<<32 |
|
||||||
|
uint64(mac[2])<<24 |
|
||||||
|
uint64(mac[3])<<16 |
|
||||||
|
uint64(mac[4])<<8 |
|
||||||
|
uint64(mac[5])
|
||||||
|
}
|
||||||
|
|||||||
78
common/pb/rawflow.go
Normal file
78
common/pb/rawflow.go
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
// SPDX-FileCopyrightText: 2024 Free Mobile
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
|
||||||
|
// Package pb contains the definition of RawFlow, the protobuf-based
|
||||||
|
// structure to exchange flows between the inlet and the outlet.
|
||||||
|
package pb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"akvorado/common/helpers/bimap"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Version is the version of the schema. On incompatible changes, this should be
|
||||||
|
// bumped.
|
||||||
|
var Version = 5
|
||||||
|
|
||||||
|
var decoderMap = bimap.New(map[RawFlow_Decoder]string{
|
||||||
|
RawFlow_DECODER_NETFLOW: "netflow",
|
||||||
|
RawFlow_DECODER_SFLOW: "sflow",
|
||||||
|
})
|
||||||
|
|
||||||
|
// MarshalText turns a decoder to text
|
||||||
|
func (d RawFlow_Decoder) MarshalText() ([]byte, error) {
|
||||||
|
got, ok := decoderMap.LoadValue(d)
|
||||||
|
if ok {
|
||||||
|
return []byte(got), nil
|
||||||
|
}
|
||||||
|
return nil, errors.New("unknown decoder")
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText provides a decoder from text
|
||||||
|
func (d *RawFlow_Decoder) UnmarshalText(input []byte) error {
|
||||||
|
if len(input) == 0 {
|
||||||
|
*d = RawFlow_DECODER_UNSPECIFIED
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
got, ok := decoderMap.LoadKey(string(input))
|
||||||
|
if ok {
|
||||||
|
*d = got
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.New("unknown decoder")
|
||||||
|
}
|
||||||
|
|
||||||
|
var tsMap = bimap.New(map[RawFlow_TimestampSource]string{
|
||||||
|
RawFlow_TS_INPUT: "input", // this is the default value
|
||||||
|
RawFlow_TS_NETFLOW_FIRST_SWITCHED: "netflow-first-switched",
|
||||||
|
RawFlow_TS_NETFLOW_PACKET: "netflow-packet",
|
||||||
|
})
|
||||||
|
|
||||||
|
// MarshalText turns a timestamp source to text
|
||||||
|
func (ts RawFlow_TimestampSource) MarshalText() ([]byte, error) {
|
||||||
|
got, ok := tsMap.LoadValue(ts)
|
||||||
|
if ok {
|
||||||
|
return []byte(got), nil
|
||||||
|
}
|
||||||
|
return nil, errors.New("unknown timestamp source")
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText provides a timestamp source from text
|
||||||
|
func (ts *RawFlow_TimestampSource) UnmarshalText(input []byte) error {
|
||||||
|
if len(input) == 0 {
|
||||||
|
*ts = RawFlow_TS_INPUT
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if string(input) == "udp" {
|
||||||
|
*ts = RawFlow_TS_INPUT
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
got, ok := tsMap.LoadKey(string(input))
|
||||||
|
if ok {
|
||||||
|
*ts = got
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unknown timestamp source %q", string(input))
|
||||||
|
}
|
||||||
26
common/pb/rawflow.proto
Normal file
26
common/pb/rawflow.proto
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
package input;
|
||||||
|
option go_package = "akvorado/common/pb";
|
||||||
|
|
||||||
|
// RawFlow is an undecoded flow with some options.
|
||||||
|
message RawFlow {
|
||||||
|
uint64 time_received = 1; // when the flow was received
|
||||||
|
bytes payload = 2; // payload of the flow
|
||||||
|
bytes source_address = 3; // source IPv6 address
|
||||||
|
bool use_source_address = 4; // use source address as exporter address
|
||||||
|
|
||||||
|
// Decoding options
|
||||||
|
enum Decoder {
|
||||||
|
DECODER_UNSPECIFIED = 0;
|
||||||
|
DECODER_NETFLOW = 1;
|
||||||
|
DECODER_SFLOW = 2;
|
||||||
|
DECODER_GOB = 3;
|
||||||
|
}
|
||||||
|
enum TimestampSource {
|
||||||
|
TS_INPUT = 0;
|
||||||
|
TS_NETFLOW_PACKET = 1;
|
||||||
|
TS_NETFLOW_FIRST_SWITCHED = 2;
|
||||||
|
}
|
||||||
|
Decoder decoder = 5;
|
||||||
|
TimestampSource timestamp_source = 6;
|
||||||
|
}
|
||||||
@@ -4,9 +4,15 @@
|
|||||||
package schema
|
package schema
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/base32"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"hash/fnv"
|
||||||
|
"net/netip"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ClickHouse/ch-go/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ClickHouseDefinition turns a column into a declaration for ClickHouse
|
// ClickHouseDefinition turns a column into a declaration for ClickHouse
|
||||||
@@ -21,6 +27,33 @@ func (column Column) ClickHouseDefinition() string {
|
|||||||
return strings.Join(result, " ")
|
return strings.Join(result, " ")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// newProtoColumn turns a column into its proto.Column definition
|
||||||
|
func (column Column) newProtoColumn() proto.Column {
|
||||||
|
if strings.HasPrefix(column.ClickHouseType, "Enum8(") {
|
||||||
|
// Enum8 is a special case. We do not want to use ColAuto as it comes
|
||||||
|
// with a performance penalty due to conversion between key values.
|
||||||
|
return new(proto.ColEnum8)
|
||||||
|
}
|
||||||
|
|
||||||
|
col := &proto.ColAuto{}
|
||||||
|
err := col.Infer(proto.ColumnType(column.ClickHouseType))
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("unhandled ClickHouse type %q", column.ClickHouseType))
|
||||||
|
}
|
||||||
|
return col.Data
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrapProtoColumn optionally wraps the proto.Column for use in proto.Input
|
||||||
|
func (column Column) wrapProtoColumn(in proto.Column) proto.Column {
|
||||||
|
if strings.HasPrefix(column.ClickHouseType, "Enum8(") {
|
||||||
|
// Enum8 is a special case. See above.
|
||||||
|
ddl := column.ClickHouseType[6 : len(column.ClickHouseType)-1]
|
||||||
|
return proto.Wrap(in, ddl)
|
||||||
|
}
|
||||||
|
|
||||||
|
return in
|
||||||
|
}
|
||||||
|
|
||||||
// ClickHouseTableOption is an option to alter the values returned by ClickHouseCreateTable() and ClickHouseSelectColumns().
|
// ClickHouseTableOption is an option to alter the values returned by ClickHouseCreateTable() and ClickHouseSelectColumns().
|
||||||
type ClickHouseTableOption int
|
type ClickHouseTableOption int
|
||||||
|
|
||||||
@@ -29,18 +62,12 @@ const (
|
|||||||
ClickHouseSkipMainOnlyColumns ClickHouseTableOption = iota
|
ClickHouseSkipMainOnlyColumns ClickHouseTableOption = iota
|
||||||
// ClickHouseSkipGeneratedColumns skips the columns with a GenerateFrom value
|
// ClickHouseSkipGeneratedColumns skips the columns with a GenerateFrom value
|
||||||
ClickHouseSkipGeneratedColumns
|
ClickHouseSkipGeneratedColumns
|
||||||
// ClickHouseSkipTransformColumns skips the columns with a TransformFrom value
|
|
||||||
ClickHouseSkipTransformColumns
|
|
||||||
// ClickHouseSkipAliasedColumns skips the columns with a Alias value
|
// ClickHouseSkipAliasedColumns skips the columns with a Alias value
|
||||||
ClickHouseSkipAliasedColumns
|
ClickHouseSkipAliasedColumns
|
||||||
// ClickHouseSkipTimeReceived skips the time received column
|
// ClickHouseSkipTimeReceived skips the time received column
|
||||||
ClickHouseSkipTimeReceived
|
ClickHouseSkipTimeReceived
|
||||||
// ClickHouseUseTransformFromType uses the type from TransformFrom if any
|
|
||||||
ClickHouseUseTransformFromType
|
|
||||||
// ClickHouseSubstituteGenerates changes the column name to use the default generated value
|
// ClickHouseSubstituteGenerates changes the column name to use the default generated value
|
||||||
ClickHouseSubstituteGenerates
|
ClickHouseSubstituteGenerates
|
||||||
// ClickHouseSubstituteTransforms changes the column name to use the transformed value
|
|
||||||
ClickHouseSubstituteTransforms
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ClickHouseCreateTable returns the columns for the CREATE TABLE clause in ClickHouse.
|
// ClickHouseCreateTable returns the columns for the CREATE TABLE clause in ClickHouse.
|
||||||
@@ -72,27 +99,12 @@ func (schema Schema) clickhouseIterate(fn func(Column), options ...ClickHouseTab
|
|||||||
if slices.Contains(options, ClickHouseSkipGeneratedColumns) && column.ClickHouseGenerateFrom != "" && !column.ClickHouseSelfGenerated {
|
if slices.Contains(options, ClickHouseSkipGeneratedColumns) && column.ClickHouseGenerateFrom != "" && !column.ClickHouseSelfGenerated {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if slices.Contains(options, ClickHouseSkipTransformColumns) && column.ClickHouseTransformFrom != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if slices.Contains(options, ClickHouseSkipAliasedColumns) && column.ClickHouseAlias != "" {
|
if slices.Contains(options, ClickHouseSkipAliasedColumns) && column.ClickHouseAlias != "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if slices.Contains(options, ClickHouseUseTransformFromType) && column.ClickHouseTransformFrom != nil {
|
|
||||||
for _, ocol := range column.ClickHouseTransformFrom {
|
|
||||||
// We assume we only need to use name/type
|
|
||||||
column.Name = ocol.Name
|
|
||||||
column.ClickHouseType = ocol.ClickHouseType
|
|
||||||
fn(column)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if slices.Contains(options, ClickHouseSubstituteGenerates) && column.ClickHouseGenerateFrom != "" {
|
if slices.Contains(options, ClickHouseSubstituteGenerates) && column.ClickHouseGenerateFrom != "" {
|
||||||
column.Name = fmt.Sprintf("%s AS %s", column.ClickHouseGenerateFrom, column.Name)
|
column.Name = fmt.Sprintf("%s AS %s", column.ClickHouseGenerateFrom, column.Name)
|
||||||
}
|
}
|
||||||
if slices.Contains(options, ClickHouseSubstituteTransforms) && column.ClickHouseTransformFrom != nil {
|
|
||||||
column.Name = fmt.Sprintf("%s AS %s", column.ClickHouseTransformTo, column.Name)
|
|
||||||
}
|
|
||||||
fn(column)
|
fn(column)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -119,3 +131,256 @@ func (schema Schema) ClickHousePrimaryKeys() []string {
|
|||||||
}
|
}
|
||||||
return cols
|
return cols
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ClickHouseHash returns an hash of the inpt table in ClickHouse
|
||||||
|
func (schema Schema) ClickHouseHash() string {
|
||||||
|
hash := fnv.New128()
|
||||||
|
create := schema.ClickHouseCreateTable(ClickHouseSkipGeneratedColumns, ClickHouseSkipAliasedColumns)
|
||||||
|
hash.Write([]byte(create))
|
||||||
|
hashString := base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(hash.Sum(nil))
|
||||||
|
return fmt.Sprintf("%sv5", hashString)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendDateTime adds a DateTime value to the provided column
|
||||||
|
func (bf *FlowMessage) AppendDateTime(columnKey ColumnKey, value uint32) {
|
||||||
|
col := bf.batch.columns[columnKey]
|
||||||
|
if value == 0 || col == nil || bf.batch.columnSet.Test(uint(columnKey)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bf.batch.columnSet.Set(uint(columnKey))
|
||||||
|
col.(*proto.ColDateTime).AppendRaw(proto.DateTime(value))
|
||||||
|
bf.appendDebug(columnKey, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendUint adds an UInt64/32/16/8 or Enum8 value to the provided column
|
||||||
|
func (bf *FlowMessage) AppendUint(columnKey ColumnKey, value uint64) {
|
||||||
|
col := bf.batch.columns[columnKey]
|
||||||
|
if value == 0 || col == nil || bf.batch.columnSet.Test(uint(columnKey)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch col := col.(type) {
|
||||||
|
case *proto.ColUInt64:
|
||||||
|
col.Append(value)
|
||||||
|
case *proto.ColUInt32:
|
||||||
|
col.Append(uint32(value))
|
||||||
|
case *proto.ColUInt16:
|
||||||
|
col.Append(uint16(value))
|
||||||
|
case *proto.ColUInt8:
|
||||||
|
col.Append(uint8(value))
|
||||||
|
case *proto.ColEnum8:
|
||||||
|
col.Append(proto.Enum8(value))
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unhandled uint type %q", col.Type()))
|
||||||
|
}
|
||||||
|
bf.batch.columnSet.Set(uint(columnKey))
|
||||||
|
bf.appendDebug(columnKey, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendString adds a String value to the provided column
|
||||||
|
func (bf *FlowMessage) AppendString(columnKey ColumnKey, value string) {
|
||||||
|
col := bf.batch.columns[columnKey]
|
||||||
|
if value == "" || col == nil || bf.batch.columnSet.Test(uint(columnKey)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch col := col.(type) {
|
||||||
|
case *proto.ColLowCardinality[string]:
|
||||||
|
col.Append(value)
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unhandled string type %q", col.Type()))
|
||||||
|
}
|
||||||
|
bf.batch.columnSet.Set(uint(columnKey))
|
||||||
|
bf.appendDebug(columnKey, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendIPv6 adds an IPv6 value to the provided column
|
||||||
|
func (bf *FlowMessage) AppendIPv6(columnKey ColumnKey, value netip.Addr) {
|
||||||
|
col := bf.batch.columns[columnKey]
|
||||||
|
if !value.IsValid() || col == nil || bf.batch.columnSet.Test(uint(columnKey)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch col := col.(type) {
|
||||||
|
case *proto.ColIPv6:
|
||||||
|
col.Append(value.As16())
|
||||||
|
case *proto.ColLowCardinality[proto.IPv6]:
|
||||||
|
col.Append(value.As16())
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unhandled string type %q", col.Type()))
|
||||||
|
}
|
||||||
|
bf.batch.columnSet.Set(uint(columnKey))
|
||||||
|
bf.appendDebug(columnKey, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendArrayUInt32 adds an Array(UInt32) value to the provided column
|
||||||
|
func (bf *FlowMessage) AppendArrayUInt32(columnKey ColumnKey, value []uint32) {
|
||||||
|
col := bf.batch.columns[columnKey]
|
||||||
|
if len(value) == 0 || col == nil || bf.batch.columnSet.Test(uint(columnKey)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bf.batch.columnSet.Set(uint(columnKey))
|
||||||
|
col.(*proto.ColArr[uint32]).Append(value)
|
||||||
|
bf.appendDebug(columnKey, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendArrayUInt128 adds an Array(UInt128) value to the provided column
|
||||||
|
func (bf *FlowMessage) AppendArrayUInt128(columnKey ColumnKey, value []UInt128) {
|
||||||
|
col := bf.batch.columns[columnKey]
|
||||||
|
if len(value) == 0 || col == nil || bf.batch.columnSet.Test(uint(columnKey)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bf.batch.columnSet.Set(uint(columnKey))
|
||||||
|
col.(*proto.ColArr[proto.UInt128]).Append(value)
|
||||||
|
bf.appendDebug(columnKey, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bf *FlowMessage) appendDebug(columnKey ColumnKey, value any) {
|
||||||
|
if !debug {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if bf.OtherColumns == nil {
|
||||||
|
bf.OtherColumns = make(map[ColumnKey]any)
|
||||||
|
}
|
||||||
|
bf.OtherColumns[columnKey] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
// check executes some sanity checks when in debug mode. It should be called
|
||||||
|
// only after finalization.
|
||||||
|
func (bf *FlowMessage) check() {
|
||||||
|
if !debug {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
// Check that all columns have the right amount of rows
|
||||||
|
for idx, col := range bf.batch.columns {
|
||||||
|
if col == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if col.Rows() != bf.batch.rowCount {
|
||||||
|
panic(fmt.Sprintf("row %s has a count of %d instead of %d", ColumnKey(idx), col.Rows(), bf.batch.rowCount))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendDefaultValue appends a default/zero value to the given column.
|
||||||
|
func (bf *FlowMessage) appendDefaultValues() {
|
||||||
|
for idx, col := range bf.batch.columns {
|
||||||
|
// Skip unpopulated columns
|
||||||
|
if col == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Or columns already set
|
||||||
|
if bf.batch.columnSet.Test(uint(idx)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Put the default value depending on the real type
|
||||||
|
switch col := col.(type) {
|
||||||
|
case *proto.ColUInt64:
|
||||||
|
col.Append(0)
|
||||||
|
case *proto.ColUInt32:
|
||||||
|
col.Append(0)
|
||||||
|
case *proto.ColUInt16:
|
||||||
|
col.Append(0)
|
||||||
|
case *proto.ColUInt8:
|
||||||
|
col.Append(0)
|
||||||
|
case *proto.ColIPv6:
|
||||||
|
col.Append([16]byte{})
|
||||||
|
case *proto.ColDateTime:
|
||||||
|
col.Append(time.Unix(0, 0))
|
||||||
|
case *proto.ColEnum8:
|
||||||
|
col.Append(0)
|
||||||
|
case *proto.ColLowCardinality[string]:
|
||||||
|
col.Append("")
|
||||||
|
case *proto.ColLowCardinality[proto.IPv6]:
|
||||||
|
col.Append(proto.IPv6{})
|
||||||
|
case *proto.ColArr[uint32]:
|
||||||
|
col.Append([]uint32{})
|
||||||
|
case *proto.ColArr[proto.UInt128]:
|
||||||
|
col.Append([]proto.UInt128{})
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unhandled ClickHouse type %q", col.Type()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Undo reverts the current changes. This should revert the various Append() functions.
|
||||||
|
func (bf *FlowMessage) Undo() {
|
||||||
|
for idx, col := range bf.batch.columns {
|
||||||
|
if col == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !bf.batch.columnSet.Test(uint(idx)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch col := col.(type) {
|
||||||
|
case *proto.ColUInt64:
|
||||||
|
*col = (*col)[:len(*col)-1]
|
||||||
|
case *proto.ColUInt32:
|
||||||
|
*col = (*col)[:len(*col)-1]
|
||||||
|
case *proto.ColUInt16:
|
||||||
|
*col = (*col)[:len(*col)-1]
|
||||||
|
case *proto.ColUInt8:
|
||||||
|
*col = (*col)[:len(*col)-1]
|
||||||
|
case *proto.ColIPv6:
|
||||||
|
*col = (*col)[:len(*col)-1]
|
||||||
|
case *proto.ColDateTime:
|
||||||
|
col.Data = col.Data[:len(col.Data)-1]
|
||||||
|
case *proto.ColEnum8:
|
||||||
|
*col = (*col)[:len(*col)-1]
|
||||||
|
case *proto.ColLowCardinality[string]:
|
||||||
|
col.Values = col.Values[:len(col.Values)-1]
|
||||||
|
case *proto.ColLowCardinality[proto.IPv6]:
|
||||||
|
col.Values = col.Values[:len(col.Values)-1]
|
||||||
|
case *proto.ColArr[uint32]:
|
||||||
|
l := len(col.Offsets)
|
||||||
|
if l > 0 {
|
||||||
|
start := uint64(0)
|
||||||
|
if l > 1 {
|
||||||
|
start = col.Offsets[l-2]
|
||||||
|
}
|
||||||
|
data := col.Data.(*proto.ColUInt32)
|
||||||
|
*data = (*data)[:start]
|
||||||
|
col.Data = data
|
||||||
|
col.Offsets = col.Offsets[:l-1]
|
||||||
|
}
|
||||||
|
case *proto.ColArr[proto.UInt128]:
|
||||||
|
l := len(col.Offsets)
|
||||||
|
if l > 0 {
|
||||||
|
start := uint64(0)
|
||||||
|
if l > 1 {
|
||||||
|
start = col.Offsets[l-2]
|
||||||
|
}
|
||||||
|
data := col.Data.(*proto.ColUInt128)
|
||||||
|
*data = (*data)[:start]
|
||||||
|
col.Data = data
|
||||||
|
col.Offsets = col.Offsets[:l-1]
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unhandled ClickHouse type %q", col.Type()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bf.reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finalize finalizes the current FlowMessage. It can then be reused for the
|
||||||
|
// next one. It is crucial to always call Finalize, otherwise the batch could be
|
||||||
|
// faulty.
|
||||||
|
func (bf *FlowMessage) Finalize() {
|
||||||
|
bf.AppendDateTime(ColumnTimeReceived, bf.TimeReceived)
|
||||||
|
bf.AppendUint(ColumnSamplingRate, bf.SamplingRate)
|
||||||
|
bf.AppendIPv6(ColumnExporterAddress, bf.ExporterAddress)
|
||||||
|
bf.AppendUint(ColumnSrcAS, uint64(bf.SrcAS))
|
||||||
|
bf.AppendUint(ColumnDstAS, uint64(bf.DstAS))
|
||||||
|
bf.AppendUint(ColumnSrcNetMask, uint64(bf.SrcNetMask))
|
||||||
|
bf.AppendUint(ColumnDstNetMask, uint64(bf.DstNetMask))
|
||||||
|
bf.AppendIPv6(ColumnSrcAddr, bf.SrcAddr)
|
||||||
|
bf.AppendIPv6(ColumnDstAddr, bf.DstAddr)
|
||||||
|
bf.AppendIPv6(ColumnNextHop, bf.NextHop)
|
||||||
|
if !bf.schema.IsDisabled(ColumnGroupL2) {
|
||||||
|
bf.AppendUint(ColumnSrcVlan, uint64(bf.SrcVlan))
|
||||||
|
bf.AppendUint(ColumnDstVlan, uint64(bf.DstVlan))
|
||||||
|
}
|
||||||
|
bf.batch.rowCount++
|
||||||
|
bf.appendDefaultValues()
|
||||||
|
bf.reset()
|
||||||
|
bf.check()
|
||||||
|
}
|
||||||
|
|||||||
609
common/schema/clickhouse_test.go
Normal file
609
common/schema/clickhouse_test.go
Normal file
@@ -0,0 +1,609 @@
|
|||||||
|
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
|
||||||
|
package schema
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/netip"
|
||||||
|
"slices"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"akvorado/common/helpers"
|
||||||
|
|
||||||
|
"github.com/ClickHouse/ch-go/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAppendDefault(t *testing.T) {
|
||||||
|
c := NewMock(t).EnableAllColumns()
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
bf.Finalize()
|
||||||
|
if bf.batch.rowCount != 1 {
|
||||||
|
t.Errorf("rowCount should be 1, not %d", bf.batch.rowCount)
|
||||||
|
}
|
||||||
|
if bf.batch.columnSet.Any() {
|
||||||
|
t.Error("columnSet should be empty after finalize")
|
||||||
|
}
|
||||||
|
for idx, col := range bf.batch.columns {
|
||||||
|
if col == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if col.Rows() != 1 {
|
||||||
|
t.Errorf("column %q should be length 1", ColumnKey(idx))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAppendBasics(t *testing.T) {
|
||||||
|
c := NewMock(t)
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
|
||||||
|
// Test basic append
|
||||||
|
bf.AppendDateTime(ColumnTimeReceived, 1000)
|
||||||
|
bf.AppendUint(ColumnSamplingRate, 20000)
|
||||||
|
bf.AppendUint(ColumnDstAS, 65000)
|
||||||
|
|
||||||
|
// Test zero value (should not append)
|
||||||
|
bf.AppendUint(ColumnSrcAS, 0)
|
||||||
|
|
||||||
|
// Test duplicate append
|
||||||
|
bf.AppendUint(ColumnPackets, 100)
|
||||||
|
bf.AppendUint(ColumnPackets, 200)
|
||||||
|
|
||||||
|
expected := map[ColumnKey]any{
|
||||||
|
ColumnTimeReceived: 1000,
|
||||||
|
ColumnSamplingRate: 20000,
|
||||||
|
ColumnDstAS: 65000,
|
||||||
|
ColumnPackets: 100,
|
||||||
|
}
|
||||||
|
got := bf.OtherColumns
|
||||||
|
if diff := helpers.Diff(got, expected); diff != "" {
|
||||||
|
t.Errorf("Append() (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
bf.Finalize()
|
||||||
|
for idx, col := range bf.batch.columns {
|
||||||
|
if col == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if col.Rows() != 1 {
|
||||||
|
t.Errorf("column %q should be length 1", ColumnKey(idx))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAppendWithDisabledColumns(t *testing.T) {
|
||||||
|
c := NewMock(t)
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
|
||||||
|
// Try to append to a disabled column (L2 group is disabled by default in mock)
|
||||||
|
bf.AppendUint(ColumnSrcVlan, 100)
|
||||||
|
bf.Finalize()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAppendArrayUInt32Columns(t *testing.T) {
|
||||||
|
c := NewMock(t)
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
bf.AppendArrayUInt32(ColumnDstASPath, []uint32{65400, 65500, 65001})
|
||||||
|
bf.Finalize()
|
||||||
|
bf.AppendArrayUInt32(ColumnDstASPath, []uint32{65403, 65503, 65003})
|
||||||
|
bf.Finalize()
|
||||||
|
|
||||||
|
// Verify column has data
|
||||||
|
got := bf.batch.columns[ColumnDstASPath].(*proto.ColArr[uint32])
|
||||||
|
expected := proto.ColArr[uint32]{
|
||||||
|
Offsets: proto.ColUInt64{3, 6},
|
||||||
|
Data: &proto.ColUInt32{65400, 65500, 65001, 65403, 65503, 65003},
|
||||||
|
}
|
||||||
|
if diff := helpers.Diff(got, expected); diff != "" {
|
||||||
|
t.Errorf("AppendArrayUInt32 (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAppendArrayUInt128Columns(t *testing.T) {
|
||||||
|
c := NewMock(t)
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
bf.AppendArrayUInt128(ColumnDstLargeCommunities, []UInt128{
|
||||||
|
{
|
||||||
|
High: (65401 << 32) + 100,
|
||||||
|
Low: 200,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
High: (65401 << 32) + 100,
|
||||||
|
Low: 201,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
bf.Finalize()
|
||||||
|
|
||||||
|
got := bf.batch.columns[ColumnDstLargeCommunities].(*proto.ColArr[proto.UInt128])
|
||||||
|
expected := proto.ColArr[proto.UInt128]{
|
||||||
|
Offsets: proto.ColUInt64{2},
|
||||||
|
Data: &proto.ColUInt128{
|
||||||
|
{High: (65401 << 32) + 100, Low: 200},
|
||||||
|
{High: (65401 << 32) + 100, Low: 201},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if diff := helpers.Diff(got, expected); diff != "" {
|
||||||
|
t.Errorf("AppendArrayUInt128 (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUndoUInt64(t *testing.T) {
|
||||||
|
c := NewMock(t)
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
|
||||||
|
// Add two values
|
||||||
|
bf.AppendUint(ColumnBytes, 100)
|
||||||
|
bf.AppendUint(ColumnPackets, 200)
|
||||||
|
|
||||||
|
// Check we have the expected initial state
|
||||||
|
bytesCol := bf.batch.columns[ColumnBytes].(*proto.ColUInt64)
|
||||||
|
packetsCol := bf.batch.columns[ColumnPackets].(*proto.ColUInt64)
|
||||||
|
|
||||||
|
expectedBytes := proto.ColUInt64{100}
|
||||||
|
expectedPackets := proto.ColUInt64{200}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(bytesCol, expectedBytes); diff != "" {
|
||||||
|
t.Errorf("Initial bytes column state (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
if diff := helpers.Diff(packetsCol, expectedPackets); diff != "" {
|
||||||
|
t.Errorf("Initial packets column state (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Undo should remove the last appended values
|
||||||
|
bf.Undo()
|
||||||
|
|
||||||
|
expectedBytesAfter := proto.ColUInt64{}
|
||||||
|
expectedPacketsAfter := proto.ColUInt64{}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(bytesCol, expectedBytesAfter); diff != "" {
|
||||||
|
t.Errorf("Bytes column after undo (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
if diff := helpers.Diff(packetsCol, expectedPacketsAfter); diff != "" {
|
||||||
|
t.Errorf("Packets column after undo (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUndoUInt32(t *testing.T) {
|
||||||
|
c := NewMock(t)
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
|
||||||
|
// Add two values
|
||||||
|
bf.AppendUint(ColumnSrcAS, 65001)
|
||||||
|
bf.AppendUint(ColumnDstAS, 65002)
|
||||||
|
|
||||||
|
// Check we have the expected initial state
|
||||||
|
srcCol := bf.batch.columns[ColumnSrcAS].(*proto.ColUInt32)
|
||||||
|
dstCol := bf.batch.columns[ColumnDstAS].(*proto.ColUInt32)
|
||||||
|
|
||||||
|
expectedSrc := proto.ColUInt32{65001}
|
||||||
|
expectedDst := proto.ColUInt32{65002}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(srcCol, expectedSrc); diff != "" {
|
||||||
|
t.Errorf("Initial SrcAS column state (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
if diff := helpers.Diff(dstCol, expectedDst); diff != "" {
|
||||||
|
t.Errorf("Initial DstAS column state (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Undo should remove the last appended values
|
||||||
|
bf.Undo()
|
||||||
|
|
||||||
|
expectedSrcAfter := proto.ColUInt32{}
|
||||||
|
expectedDstAfter := proto.ColUInt32{}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(srcCol, expectedSrcAfter); diff != "" {
|
||||||
|
t.Errorf("SrcAS column after undo (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
if diff := helpers.Diff(dstCol, expectedDstAfter); diff != "" {
|
||||||
|
t.Errorf("DstAS column after undo (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUndoUInt16(t *testing.T) {
|
||||||
|
c := NewMock(t)
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
|
||||||
|
// Add two values
|
||||||
|
bf.AppendUint(ColumnSrcPort, 80)
|
||||||
|
bf.AppendUint(ColumnDstPort, 443)
|
||||||
|
|
||||||
|
// Check we have the expected initial state
|
||||||
|
srcCol := bf.batch.columns[ColumnSrcPort].(*proto.ColUInt16)
|
||||||
|
dstCol := bf.batch.columns[ColumnDstPort].(*proto.ColUInt16)
|
||||||
|
|
||||||
|
expectedSrc := proto.ColUInt16{80}
|
||||||
|
expectedDst := proto.ColUInt16{443}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(srcCol, expectedSrc); diff != "" {
|
||||||
|
t.Errorf("Initial SrcPort column state (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
if diff := helpers.Diff(dstCol, expectedDst); diff != "" {
|
||||||
|
t.Errorf("Initial DstPort column state (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Undo should remove the last appended values
|
||||||
|
bf.Undo()
|
||||||
|
|
||||||
|
expectedSrcAfter := proto.ColUInt16{}
|
||||||
|
expectedDstAfter := proto.ColUInt16{}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(srcCol, expectedSrcAfter); diff != "" {
|
||||||
|
t.Errorf("SrcPort column after undo (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
if diff := helpers.Diff(dstCol, expectedDstAfter); diff != "" {
|
||||||
|
t.Errorf("DstPort column after undo (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUndoUInt8(t *testing.T) {
|
||||||
|
c := NewMock(t)
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
|
||||||
|
// Add value
|
||||||
|
bf.AppendUint(ColumnSrcNetMask, 6)
|
||||||
|
|
||||||
|
// Check we have the expected initial state
|
||||||
|
col := bf.batch.columns[ColumnSrcNetMask].(*proto.ColUInt8)
|
||||||
|
expected := proto.ColUInt8{6}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(col, expected); diff != "" {
|
||||||
|
t.Errorf("Initial Proto column state (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Undo should remove the last appended value
|
||||||
|
bf.Undo()
|
||||||
|
|
||||||
|
expectedAfter := proto.ColUInt8{}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(col, expectedAfter); diff != "" {
|
||||||
|
t.Errorf("Proto column after undo (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUndoIPv6(t *testing.T) {
|
||||||
|
c := NewMock(t)
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
|
||||||
|
// Add IPv6 values
|
||||||
|
srcAddr := netip.MustParseAddr("2001:db8::1")
|
||||||
|
dstAddr := netip.MustParseAddr("2001:db8::2")
|
||||||
|
|
||||||
|
bf.AppendIPv6(ColumnSrcAddr, srcAddr)
|
||||||
|
bf.AppendIPv6(ColumnDstAddr, dstAddr)
|
||||||
|
|
||||||
|
// Check we have the expected initial state
|
||||||
|
srcCol := bf.batch.columns[ColumnSrcAddr].(*proto.ColIPv6)
|
||||||
|
dstCol := bf.batch.columns[ColumnDstAddr].(*proto.ColIPv6)
|
||||||
|
|
||||||
|
expectedSrc := proto.ColIPv6{srcAddr.As16()}
|
||||||
|
expectedDst := proto.ColIPv6{dstAddr.As16()}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(srcCol, expectedSrc); diff != "" {
|
||||||
|
t.Errorf("Initial SrcAddr column state (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
if diff := helpers.Diff(dstCol, expectedDst); diff != "" {
|
||||||
|
t.Errorf("Initial DstAddr column state (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Undo should remove the values
|
||||||
|
bf.Undo()
|
||||||
|
|
||||||
|
expectedSrcAfter := proto.ColIPv6{}
|
||||||
|
expectedDstAfter := proto.ColIPv6{}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(srcCol, expectedSrcAfter); diff != "" {
|
||||||
|
t.Errorf("SrcAddr column after undo (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
if diff := helpers.Diff(dstCol, expectedDstAfter); diff != "" {
|
||||||
|
t.Errorf("DstAddr column after undo (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUndoDateTime(t *testing.T) {
|
||||||
|
c := NewMock(t)
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
|
||||||
|
// Add DateTime value
|
||||||
|
bf.AppendDateTime(ColumnTimeReceived, 1000)
|
||||||
|
|
||||||
|
// Check we have the expected initial state
|
||||||
|
col := bf.batch.columns[ColumnTimeReceived].(*proto.ColDateTime)
|
||||||
|
expected := proto.ColDateTime{Data: []proto.DateTime{1000}}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(col, expected); diff != "" {
|
||||||
|
t.Errorf("Initial TimeReceived column state (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Undo should remove the value
|
||||||
|
bf.Undo()
|
||||||
|
|
||||||
|
expectedAfter := proto.ColDateTime{Data: []proto.DateTime{}}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(col, expectedAfter); diff != "" {
|
||||||
|
t.Errorf("TimeReceived column after undo (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUndoEnum8(t *testing.T) {
|
||||||
|
c := NewMock(t)
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
|
||||||
|
// Add Enum8 value (using interface boundary enum)
|
||||||
|
bf.AppendUint(ColumnInIfBoundary, uint64(InterfaceBoundaryExternal))
|
||||||
|
|
||||||
|
// Check we have the expected initial state
|
||||||
|
col := bf.batch.columns[ColumnInIfBoundary].(*proto.ColEnum8)
|
||||||
|
expected := proto.ColEnum8{proto.Enum8(InterfaceBoundaryExternal)}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(col, expected); diff != "" {
|
||||||
|
t.Errorf("Initial InIfBoundary column state (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Undo should remove the value
|
||||||
|
bf.Undo()
|
||||||
|
|
||||||
|
expectedAfter := proto.ColEnum8{}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(col, expectedAfter); diff != "" {
|
||||||
|
t.Errorf("InIfBoundary column after undo (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUndoLowCardinalityString(t *testing.T) {
|
||||||
|
c := NewMock(t)
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
|
||||||
|
// Add LowCardinality string values
|
||||||
|
bf.AppendString(ColumnExporterName, "router1")
|
||||||
|
bf.AppendString(ColumnExporterRole, "edge")
|
||||||
|
|
||||||
|
// Check we have the expected initial state
|
||||||
|
nameCol := bf.batch.columns[ColumnExporterName].(*proto.ColLowCardinality[string])
|
||||||
|
roleCol := bf.batch.columns[ColumnExporterRole].(*proto.ColLowCardinality[string])
|
||||||
|
|
||||||
|
expectedName := proto.ColLowCardinality[string]{Values: []string{"router1"}}
|
||||||
|
expectedRole := proto.ColLowCardinality[string]{Values: []string{"edge"}}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(nameCol, expectedName); diff != "" {
|
||||||
|
t.Errorf("Initial ExporterName column state (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
if diff := helpers.Diff(roleCol, expectedRole); diff != "" {
|
||||||
|
t.Errorf("Initial ExporterRole column state (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Undo should remove the values
|
||||||
|
bf.Undo()
|
||||||
|
|
||||||
|
expectedNameAfter := proto.ColLowCardinality[string]{Values: []string{}}
|
||||||
|
expectedRoleAfter := proto.ColLowCardinality[string]{Values: []string{}}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(nameCol, expectedNameAfter); diff != "" {
|
||||||
|
t.Errorf("ExporterName column after undo (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
if diff := helpers.Diff(roleCol, expectedRoleAfter); diff != "" {
|
||||||
|
t.Errorf("ExporterRole column after undo (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUndoLowCardinalityIPv6(t *testing.T) {
|
||||||
|
c := NewMock(t)
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
|
||||||
|
// Add LowCardinality IPv6 value
|
||||||
|
addr := netip.MustParseAddr("2001:db8::1")
|
||||||
|
bf.AppendIPv6(ColumnExporterAddress, addr)
|
||||||
|
|
||||||
|
// Check we have the expected initial state
|
||||||
|
col := bf.batch.columns[ColumnExporterAddress].(*proto.ColLowCardinality[proto.IPv6])
|
||||||
|
expected := proto.ColLowCardinality[proto.IPv6]{Values: []proto.IPv6{addr.As16()}}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(col, expected); diff != "" {
|
||||||
|
t.Errorf("Initial ExporterAddress column state (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Undo should remove the value
|
||||||
|
bf.Undo()
|
||||||
|
|
||||||
|
expectedAfter := proto.ColLowCardinality[proto.IPv6]{Values: []proto.IPv6{}}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(col, expectedAfter); diff != "" {
|
||||||
|
t.Errorf("ExporterAddress column after undo (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUndoArrayUInt32(t *testing.T) {
|
||||||
|
c := NewMock(t)
|
||||||
|
|
||||||
|
t.Run("one value", func(t *testing.T) {
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
bf.AppendArrayUInt32(ColumnDstASPath, []uint32{65001, 65002, 65003})
|
||||||
|
|
||||||
|
// Check we have the expected initial state
|
||||||
|
col := bf.batch.columns[ColumnDstASPath].(*proto.ColArr[uint32])
|
||||||
|
expected := proto.ColArr[uint32]{
|
||||||
|
Offsets: proto.ColUInt64{3},
|
||||||
|
Data: &proto.ColUInt32{65001, 65002, 65003},
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(*col, expected); diff != "" {
|
||||||
|
t.Errorf("Initial DstASPath column state (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Undo should remove the array
|
||||||
|
bf.Undo()
|
||||||
|
|
||||||
|
expectedAfter := proto.ColArr[uint32]{
|
||||||
|
Offsets: proto.ColUInt64{},
|
||||||
|
Data: &proto.ColUInt32{},
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(*col, expectedAfter); diff != "" {
|
||||||
|
t.Errorf("DstASPath column after undo (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("two values", func(t *testing.T) {
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
bf.AppendArrayUInt32(ColumnDstASPath, []uint32{65001, 65002, 65003})
|
||||||
|
bf.Finalize()
|
||||||
|
bf.AppendArrayUInt32(ColumnDstASPath, []uint32{65007, 65008})
|
||||||
|
|
||||||
|
// Check we have the expected initial state
|
||||||
|
col := bf.batch.columns[ColumnDstASPath].(*proto.ColArr[uint32])
|
||||||
|
expected := proto.ColArr[uint32]{
|
||||||
|
Offsets: proto.ColUInt64{3, 5},
|
||||||
|
Data: &proto.ColUInt32{65001, 65002, 65003, 65007, 65008},
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(*col, expected); diff != "" {
|
||||||
|
t.Errorf("Initial DstASPath column state (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Undo should remove the last array
|
||||||
|
bf.Undo()
|
||||||
|
|
||||||
|
expectedAfter := proto.ColArr[uint32]{
|
||||||
|
Offsets: proto.ColUInt64{3},
|
||||||
|
Data: &proto.ColUInt32{65001, 65002, 65003},
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(*col, expectedAfter); diff != "" {
|
||||||
|
t.Errorf("DstASPath column after undo (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUndoArrayUInt128(t *testing.T) {
|
||||||
|
c := NewMock(t)
|
||||||
|
|
||||||
|
t.Run("one value", func(t *testing.T) {
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
|
||||||
|
// Add Array(UInt128) value
|
||||||
|
bf.AppendArrayUInt128(ColumnDstLargeCommunities, []UInt128{
|
||||||
|
{High: (65401 << 32) + 100, Low: 200},
|
||||||
|
{High: (65401 << 32) + 100, Low: 201},
|
||||||
|
})
|
||||||
|
|
||||||
|
// Check we have the expected initial state
|
||||||
|
col := bf.batch.columns[ColumnDstLargeCommunities].(*proto.ColArr[proto.UInt128])
|
||||||
|
expected := proto.ColArr[proto.UInt128]{
|
||||||
|
Offsets: proto.ColUInt64{2},
|
||||||
|
Data: &proto.ColUInt128{
|
||||||
|
{High: (65401 << 32) + 100, Low: 200},
|
||||||
|
{High: (65401 << 32) + 100, Low: 201},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(*col, expected); diff != "" {
|
||||||
|
t.Errorf("Initial DstLargeCommunities column state (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Undo should remove the array
|
||||||
|
bf.Undo()
|
||||||
|
|
||||||
|
expectedAfter := proto.ColArr[proto.UInt128]{
|
||||||
|
Offsets: proto.ColUInt64{},
|
||||||
|
Data: &proto.ColUInt128{},
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(*col, expectedAfter); diff != "" {
|
||||||
|
t.Errorf("DstLargeCommunities column after undo (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("two values", func(t *testing.T) {
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
|
||||||
|
// Add first Array(UInt128) value
|
||||||
|
bf.AppendArrayUInt128(ColumnDstLargeCommunities, []UInt128{
|
||||||
|
{High: (65401 << 32) + 100, Low: 200},
|
||||||
|
{High: (65401 << 32) + 100, Low: 201},
|
||||||
|
})
|
||||||
|
bf.Finalize()
|
||||||
|
|
||||||
|
// Add second Array(UInt128) value
|
||||||
|
bf.AppendArrayUInt128(ColumnDstLargeCommunities, []UInt128{
|
||||||
|
{High: (65402 << 32) + 100, Low: 300},
|
||||||
|
})
|
||||||
|
|
||||||
|
// Check we have the expected initial state
|
||||||
|
col := bf.batch.columns[ColumnDstLargeCommunities].(*proto.ColArr[proto.UInt128])
|
||||||
|
expected := proto.ColArr[proto.UInt128]{
|
||||||
|
Offsets: proto.ColUInt64{2, 3},
|
||||||
|
Data: &proto.ColUInt128{
|
||||||
|
{High: (65401 << 32) + 100, Low: 200},
|
||||||
|
{High: (65401 << 32) + 100, Low: 201},
|
||||||
|
{High: (65402 << 32) + 100, Low: 300},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(*col, expected); diff != "" {
|
||||||
|
t.Errorf("Initial DstLargeCommunities column state (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Undo should remove the last array
|
||||||
|
bf.Undo()
|
||||||
|
|
||||||
|
expectedAfter := proto.ColArr[proto.UInt128]{
|
||||||
|
Offsets: proto.ColUInt64{2},
|
||||||
|
Data: &proto.ColUInt128{
|
||||||
|
{High: (65401 << 32) + 100, Low: 200},
|
||||||
|
{High: (65401 << 32) + 100, Low: 201},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(*col, expectedAfter); diff != "" {
|
||||||
|
t.Errorf("DstLargeCommunities column after undo (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuildProtoInput(t *testing.T) {
|
||||||
|
// Use a smaller version
|
||||||
|
exporterAddress := netip.MustParseAddr("::ffff:203.0.113.14")
|
||||||
|
c := NewMock(t)
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
got := bf.ClickHouseProtoInput()
|
||||||
|
|
||||||
|
bf.TimeReceived = 1000
|
||||||
|
bf.SamplingRate = 20000
|
||||||
|
bf.ExporterAddress = exporterAddress
|
||||||
|
bf.AppendUint(ColumnDstAS, 65000)
|
||||||
|
bf.AppendUint(ColumnBytes, 200)
|
||||||
|
bf.AppendUint(ColumnPackets, 300)
|
||||||
|
bf.Finalize()
|
||||||
|
|
||||||
|
bf.Clear()
|
||||||
|
|
||||||
|
bf.TimeReceived = 1002
|
||||||
|
bf.ExporterAddress = exporterAddress
|
||||||
|
bf.AppendUint(ColumnSrcAS, 65000)
|
||||||
|
bf.AppendUint(ColumnBytes, 2000)
|
||||||
|
bf.AppendUint(ColumnPackets, 30)
|
||||||
|
bf.AppendUint(ColumnBytes, 300) // Duplicate!
|
||||||
|
bf.Finalize()
|
||||||
|
|
||||||
|
bf.TimeReceived = 1003
|
||||||
|
bf.ExporterAddress = exporterAddress
|
||||||
|
bf.AppendUint(ColumnSrcAS, 65001)
|
||||||
|
bf.AppendUint(ColumnBytes, 202)
|
||||||
|
bf.AppendUint(ColumnPackets, 3)
|
||||||
|
bf.Finalize()
|
||||||
|
|
||||||
|
// Let's compare a subset
|
||||||
|
expected := proto.Input{
|
||||||
|
{Name: "TimeReceived", Data: proto.ColDateTime{Data: []proto.DateTime{1002, 1003}}},
|
||||||
|
{Name: "SrcAS", Data: proto.ColUInt32{65000, 65001}},
|
||||||
|
{Name: "DstAS", Data: proto.ColUInt32{0, 0}},
|
||||||
|
{Name: "Bytes", Data: proto.ColUInt64{2000, 202}},
|
||||||
|
{Name: "Packets", Data: proto.ColUInt64{30, 3}},
|
||||||
|
}
|
||||||
|
got = slices.DeleteFunc(got, func(col proto.InputColumn) bool {
|
||||||
|
return !slices.Contains([]string{"TimeReceived", "SrcAS", "DstAS", "Packets", "Bytes"}, col.Name)
|
||||||
|
})
|
||||||
|
if diff := helpers.Diff(got, expected); diff != "" {
|
||||||
|
t.Fatalf("ClickHouseProtoInput() (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -12,8 +12,6 @@ import (
|
|||||||
"akvorado/common/helpers/bimap"
|
"akvorado/common/helpers/bimap"
|
||||||
|
|
||||||
"github.com/bits-and-blooms/bitset"
|
"github.com/bits-and-blooms/bitset"
|
||||||
"google.golang.org/protobuf/encoding/protowire"
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// InterfaceBoundary identifies wether the interface is facing inside or outside the network.
|
// InterfaceBoundary identifies wether the interface is facing inside or outside the network.
|
||||||
@@ -133,9 +131,6 @@ const (
|
|||||||
ColumnDst3rdAS
|
ColumnDst3rdAS
|
||||||
ColumnDstCommunities
|
ColumnDstCommunities
|
||||||
ColumnDstLargeCommunities
|
ColumnDstLargeCommunities
|
||||||
ColumnDstLargeCommunitiesASN
|
|
||||||
ColumnDstLargeCommunitiesLocalData1
|
|
||||||
ColumnDstLargeCommunitiesLocalData2
|
|
||||||
ColumnInIfName
|
ColumnInIfName
|
||||||
ColumnOutIfName
|
ColumnOutIfName
|
||||||
ColumnInIfDescription
|
ColumnInIfDescription
|
||||||
@@ -212,7 +207,6 @@ func flows() Schema {
|
|||||||
ClickHouseType: "DateTime",
|
ClickHouseType: "DateTime",
|
||||||
ClickHouseCodec: "DoubleDelta, LZ4",
|
ClickHouseCodec: "DoubleDelta, LZ4",
|
||||||
ConsoleNotDimension: true,
|
ConsoleNotDimension: true,
|
||||||
ProtobufType: protoreflect.Uint64Kind,
|
|
||||||
},
|
},
|
||||||
{Key: ColumnSamplingRate, NoDisable: true, ClickHouseType: "UInt64", ConsoleNotDimension: true},
|
{Key: ColumnSamplingRate, NoDisable: true, ClickHouseType: "UInt64", ConsoleNotDimension: true},
|
||||||
{Key: ColumnExporterAddress, ParserType: "ip", ClickHouseType: "LowCardinality(IPv6)"},
|
{Key: ColumnExporterAddress, ParserType: "ip", ClickHouseType: "LowCardinality(IPv6)"},
|
||||||
@@ -385,25 +379,10 @@ END`,
|
|||||||
ClickHouseType: "Array(UInt32)",
|
ClickHouseType: "Array(UInt32)",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Key: ColumnDstLargeCommunities,
|
Key: ColumnDstLargeCommunities,
|
||||||
ClickHouseMainOnly: true,
|
ClickHouseMainOnly: true,
|
||||||
ClickHouseType: "Array(UInt128)",
|
ClickHouseType: "Array(UInt128)",
|
||||||
ClickHouseTransformFrom: []Column{
|
ConsoleNotDimension: true,
|
||||||
{
|
|
||||||
Key: ColumnDstLargeCommunitiesASN,
|
|
||||||
ClickHouseType: "Array(UInt32)",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Key: ColumnDstLargeCommunitiesLocalData1,
|
|
||||||
ClickHouseType: "Array(UInt32)",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Key: ColumnDstLargeCommunitiesLocalData2,
|
|
||||||
ClickHouseType: "Array(UInt32)",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
ClickHouseTransformTo: "arrayMap((asn, l1, l2) -> ((bitShiftLeft(CAST(asn, 'UInt128'), 64) + bitShiftLeft(CAST(l1, 'UInt128'), 32)) + CAST(l2, 'UInt128')), DstLargeCommunitiesASN, DstLargeCommunitiesLocalData1, DstLargeCommunitiesLocalData2)",
|
|
||||||
ConsoleNotDimension: true,
|
|
||||||
},
|
},
|
||||||
{Key: ColumnInIfName, ParserType: "string", ClickHouseType: "LowCardinality(String)"},
|
{Key: ColumnInIfName, ParserType: "string", ClickHouseType: "LowCardinality(String)"},
|
||||||
{Key: ColumnInIfDescription, ParserType: "string", ClickHouseType: "LowCardinality(String)", ClickHouseNotSortingKey: true},
|
{Key: ColumnInIfDescription, ParserType: "string", ClickHouseType: "LowCardinality(String)", ClickHouseNotSortingKey: true},
|
||||||
@@ -414,13 +393,6 @@ END`,
|
|||||||
Key: ColumnInIfBoundary,
|
Key: ColumnInIfBoundary,
|
||||||
ClickHouseType: fmt.Sprintf("Enum8('undefined' = %d, 'external' = %d, 'internal' = %d)", InterfaceBoundaryUndefined, InterfaceBoundaryExternal, InterfaceBoundaryInternal),
|
ClickHouseType: fmt.Sprintf("Enum8('undefined' = %d, 'external' = %d, 'internal' = %d)", InterfaceBoundaryUndefined, InterfaceBoundaryExternal, InterfaceBoundaryInternal),
|
||||||
ClickHouseNotSortingKey: true,
|
ClickHouseNotSortingKey: true,
|
||||||
ProtobufType: protoreflect.EnumKind,
|
|
||||||
ProtobufEnumName: "Boundary",
|
|
||||||
ProtobufEnum: map[int]string{
|
|
||||||
int(InterfaceBoundaryUndefined): "UNDEFINED",
|
|
||||||
int(InterfaceBoundaryExternal): "EXTERNAL",
|
|
||||||
int(InterfaceBoundaryInternal): "INTERNAL",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{Key: ColumnEType, ClickHouseType: "UInt32"}, // TODO: UInt16 but hard to change, primary key
|
{Key: ColumnEType, ClickHouseType: "UInt32"}, // TODO: UInt16 but hard to change, primary key
|
||||||
{Key: ColumnProto, ClickHouseType: "UInt32"}, // TODO: UInt8 but hard to change, primary key
|
{Key: ColumnProto, ClickHouseType: "UInt32"}, // TODO: UInt8 but hard to change, primary key
|
||||||
@@ -574,9 +546,9 @@ END`,
|
|||||||
}.finalize()
|
}.finalize()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (column *Column) shouldBeProto() bool {
|
// shouldProvideValue tells if we should send a value for this column to ClickHouse.
|
||||||
return column.ClickHouseTransformFrom == nil &&
|
func (column *Column) shouldProvideValue() bool {
|
||||||
(column.ClickHouseGenerateFrom == "" || column.ClickHouseSelfGenerated) &&
|
return (column.ClickHouseGenerateFrom == "" || column.ClickHouseSelfGenerated) &&
|
||||||
column.ClickHouseAlias == ""
|
column.ClickHouseAlias == ""
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -592,30 +564,12 @@ func (schema Schema) finalize() Schema {
|
|||||||
column.Name = name
|
column.Name = name
|
||||||
}
|
}
|
||||||
|
|
||||||
// Also true name for columns in ClickHouseTransformFrom
|
|
||||||
for idx, ecolumn := range column.ClickHouseTransformFrom {
|
|
||||||
if ecolumn.Name == "" {
|
|
||||||
name, ok := columnNameMap.LoadValue(ecolumn.Key)
|
|
||||||
if !ok {
|
|
||||||
panic(fmt.Sprintf("missing name mapping for %d", ecolumn.Key))
|
|
||||||
}
|
|
||||||
column.ClickHouseTransformFrom[idx].Name = name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Non-main columns with an alias are NotSortingKey
|
// Non-main columns with an alias are NotSortingKey
|
||||||
if !column.ClickHouseMainOnly && column.ClickHouseAlias != "" {
|
if !column.ClickHouseMainOnly && column.ClickHouseAlias != "" {
|
||||||
column.ClickHouseNotSortingKey = true
|
column.ClickHouseNotSortingKey = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Transform implicit dependencies
|
// Deduplicate dependencies
|
||||||
for idx := range column.ClickHouseTransformFrom {
|
|
||||||
deps := column.ClickHouseTransformFrom[idx].Depends
|
|
||||||
deps = append(deps, column.Key)
|
|
||||||
slices.Sort(deps)
|
|
||||||
column.ClickHouseTransformFrom[idx].Depends = slices.Compact(deps)
|
|
||||||
column.Depends = append(column.Depends, column.ClickHouseTransformFrom[idx].Key)
|
|
||||||
}
|
|
||||||
slices.Sort(column.Depends)
|
slices.Sort(column.Depends)
|
||||||
column.Depends = slices.Compact(column.Depends)
|
column.Depends = slices.Compact(column.Depends)
|
||||||
|
|
||||||
@@ -639,7 +593,6 @@ func (schema Schema) finalize() Schema {
|
|||||||
panic(fmt.Sprintf("missing name mapping for %q", column.Name))
|
panic(fmt.Sprintf("missing name mapping for %q", column.Name))
|
||||||
}
|
}
|
||||||
column.ClickHouseAlias = strings.ReplaceAll(column.ClickHouseAlias, "Src", "Dst")
|
column.ClickHouseAlias = strings.ReplaceAll(column.ClickHouseAlias, "Src", "Dst")
|
||||||
column.ClickHouseTransformFrom = slices.Clone(column.ClickHouseTransformFrom)
|
|
||||||
ncolumns = append(ncolumns, column)
|
ncolumns = append(ncolumns, column)
|
||||||
}
|
}
|
||||||
} else if strings.HasPrefix(column.Name, "InIf") {
|
} else if strings.HasPrefix(column.Name, "InIf") {
|
||||||
@@ -650,53 +603,12 @@ func (schema Schema) finalize() Schema {
|
|||||||
panic(fmt.Sprintf("missing name mapping for %q", column.Name))
|
panic(fmt.Sprintf("missing name mapping for %q", column.Name))
|
||||||
}
|
}
|
||||||
column.ClickHouseAlias = strings.ReplaceAll(column.ClickHouseAlias, "InIf", "OutIf")
|
column.ClickHouseAlias = strings.ReplaceAll(column.ClickHouseAlias, "InIf", "OutIf")
|
||||||
column.ClickHouseTransformFrom = slices.Clone(column.ClickHouseTransformFrom)
|
|
||||||
ncolumns = append(ncolumns, column)
|
ncolumns = append(ncolumns, column)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
schema.columns = ncolumns
|
schema.columns = ncolumns
|
||||||
|
|
||||||
// Set Protobuf index and type
|
|
||||||
protobufIndex := 1
|
|
||||||
ncolumns = []Column{}
|
|
||||||
for _, column := range schema.columns {
|
|
||||||
pcolumns := []*Column{&column}
|
|
||||||
for idx := range column.ClickHouseTransformFrom {
|
|
||||||
pcolumns = append(pcolumns, &column.ClickHouseTransformFrom[idx])
|
|
||||||
}
|
|
||||||
for _, column := range pcolumns {
|
|
||||||
if column.ProtobufIndex == 0 {
|
|
||||||
if !column.shouldBeProto() {
|
|
||||||
column.ProtobufIndex = -1
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
column.ProtobufIndex = protowire.Number(protobufIndex)
|
|
||||||
protobufIndex++
|
|
||||||
}
|
|
||||||
|
|
||||||
if column.ProtobufType == 0 &&
|
|
||||||
column.shouldBeProto() {
|
|
||||||
switch column.ClickHouseType {
|
|
||||||
case "String", "LowCardinality(String)", "FixedString(2)":
|
|
||||||
column.ProtobufType = protoreflect.StringKind
|
|
||||||
case "UInt64":
|
|
||||||
column.ProtobufType = protoreflect.Uint64Kind
|
|
||||||
case "UInt32", "UInt16", "UInt8":
|
|
||||||
column.ProtobufType = protoreflect.Uint32Kind
|
|
||||||
case "IPv6", "LowCardinality(IPv6)":
|
|
||||||
column.ProtobufType = protoreflect.BytesKind
|
|
||||||
case "Array(UInt32)":
|
|
||||||
column.ProtobufType = protoreflect.Uint32Kind
|
|
||||||
column.ProtobufRepeated = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ncolumns = append(ncolumns, column)
|
|
||||||
}
|
|
||||||
schema.columns = ncolumns
|
|
||||||
|
|
||||||
// Build column index
|
// Build column index
|
||||||
maxKey := ColumnTimeReceived
|
maxKey := ColumnTimeReceived
|
||||||
for _, column := range schema.columns {
|
for _, column := range schema.columns {
|
||||||
@@ -707,9 +619,6 @@ func (schema Schema) finalize() Schema {
|
|||||||
schema.columnIndex = make([]*Column, maxKey+1)
|
schema.columnIndex = make([]*Column, maxKey+1)
|
||||||
for i, column := range schema.columns {
|
for i, column := range schema.columns {
|
||||||
schema.columnIndex[column.Key] = &schema.columns[i]
|
schema.columnIndex[column.Key] = &schema.columns[i]
|
||||||
for j, column := range column.ClickHouseTransformFrom {
|
|
||||||
schema.columnIndex[column.Key] = &schema.columns[i].ClickHouseTransformFrom[j]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update disabledGroups
|
// Update disabledGroups
|
||||||
|
|||||||
@@ -22,17 +22,6 @@ func TestFlowsClickHouse(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFlowsProtobuf(t *testing.T) {
|
|
||||||
c := NewMock(t)
|
|
||||||
for _, column := range c.Columns() {
|
|
||||||
if column.ProtobufIndex >= 0 {
|
|
||||||
if column.ProtobufType == 0 {
|
|
||||||
t.Errorf("column %s has not protobuf type", column.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestColumnIndex(t *testing.T) {
|
func TestColumnIndex(t *testing.T) {
|
||||||
c := NewMock(t)
|
c := NewMock(t)
|
||||||
for i := ColumnTimeReceived; i < ColumnLast; i++ {
|
for i := ColumnTimeReceived; i < ColumnLast; i++ {
|
||||||
|
|||||||
193
common/schema/insert_test.go
Normal file
193
common/schema/insert_test.go
Normal file
@@ -0,0 +1,193 @@
|
|||||||
|
// SPDX-FileCopyrightText: 2025 Free Mobile
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
|
||||||
|
package schema_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/netip"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ClickHouse/ch-go"
|
||||||
|
"github.com/ClickHouse/clickhouse-go/v2"
|
||||||
|
|
||||||
|
"akvorado/common/helpers"
|
||||||
|
"akvorado/common/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInsertMemory(t *testing.T) {
|
||||||
|
c := schema.NewMock(t)
|
||||||
|
bf := c.NewFlowMessage()
|
||||||
|
exporterAddress := netip.MustParseAddr("::ffff:203.0.113.14")
|
||||||
|
|
||||||
|
bf.TimeReceived = 1000
|
||||||
|
bf.SamplingRate = 20000
|
||||||
|
bf.ExporterAddress = exporterAddress
|
||||||
|
bf.AppendString(schema.ColumnExporterName, "router1.example.net")
|
||||||
|
bf.AppendUint(schema.ColumnSrcAS, 65000)
|
||||||
|
bf.AppendUint(schema.ColumnDstAS, 12322)
|
||||||
|
bf.AppendUint(schema.ColumnBytes, 20)
|
||||||
|
bf.AppendUint(schema.ColumnPackets, 3)
|
||||||
|
bf.AppendUint(schema.ColumnInIfBoundary, uint64(schema.InterfaceBoundaryInternal))
|
||||||
|
bf.AppendUint(schema.ColumnOutIfBoundary, uint64(schema.InterfaceBoundaryExternal))
|
||||||
|
bf.AppendUint(schema.ColumnInIfSpeed, 10000)
|
||||||
|
bf.AppendUint(schema.ColumnEType, helpers.ETypeIPv4)
|
||||||
|
bf.Finalize()
|
||||||
|
|
||||||
|
bf.TimeReceived = 1001
|
||||||
|
bf.SamplingRate = 20000
|
||||||
|
bf.ExporterAddress = exporterAddress
|
||||||
|
bf.AppendString(schema.ColumnExporterName, "router1.example.net")
|
||||||
|
bf.AppendUint(schema.ColumnSrcAS, 12322)
|
||||||
|
bf.AppendUint(schema.ColumnDstAS, 65000)
|
||||||
|
bf.AppendUint(schema.ColumnBytes, 200)
|
||||||
|
bf.AppendUint(schema.ColumnPackets, 3)
|
||||||
|
bf.AppendUint(schema.ColumnInIfBoundary, uint64(schema.InterfaceBoundaryExternal))
|
||||||
|
bf.AppendUint(schema.ColumnOutIfSpeed, 10000)
|
||||||
|
bf.AppendUint(schema.ColumnEType, helpers.ETypeIPv4)
|
||||||
|
bf.AppendArrayUInt32(schema.ColumnDstASPath, []uint32{65400, 65500, 65001})
|
||||||
|
bf.AppendArrayUInt128(schema.ColumnDstLargeCommunities, []schema.UInt128{
|
||||||
|
{
|
||||||
|
High: 65401,
|
||||||
|
Low: (100 << 32) + 200,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
High: 65401,
|
||||||
|
Low: (100 << 32) + 201,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
bf.Finalize()
|
||||||
|
|
||||||
|
server := helpers.CheckExternalService(t, "ClickHouse", []string{"clickhouse:9000", "127.0.0.1:9000"})
|
||||||
|
ctx := t.Context()
|
||||||
|
|
||||||
|
conn, err := ch.Dial(ctx, ch.Options{
|
||||||
|
Address: server,
|
||||||
|
DialTimeout: 100 * time.Millisecond,
|
||||||
|
Settings: []ch.Setting{
|
||||||
|
{Key: "allow_suspicious_low_cardinality_types", Value: "1"},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Dial() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the table
|
||||||
|
q := fmt.Sprintf(
|
||||||
|
`CREATE OR REPLACE TABLE test_table_insert (%s) ENGINE = Memory`,
|
||||||
|
c.ClickHouseCreateTable(schema.ClickHouseSkipAliasedColumns, schema.ClickHouseSkipGeneratedColumns),
|
||||||
|
)
|
||||||
|
t.Logf("Query: %s", q)
|
||||||
|
if err := conn.Do(ctx, ch.Query{
|
||||||
|
Body: q,
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatalf("Do() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert
|
||||||
|
input := bf.ClickHouseProtoInput()
|
||||||
|
if err := conn.Do(ctx, ch.Query{
|
||||||
|
Body: input.Into("test_table_insert"),
|
||||||
|
Input: input,
|
||||||
|
OnInput: func(ctx context.Context) error {
|
||||||
|
bf.Clear()
|
||||||
|
// No more data to send!
|
||||||
|
return io.EOF
|
||||||
|
},
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatalf("Do() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the result (with the full-featured client)
|
||||||
|
{
|
||||||
|
conn, err := clickhouse.Open(&clickhouse.Options{
|
||||||
|
Addr: []string{server},
|
||||||
|
DialTimeout: 100 * time.Millisecond,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("clickhouse.Open() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
// Use formatRow to get JSON representation
|
||||||
|
rows, err := conn.Query(ctx, "SELECT formatRow('JSONEachRow', *) FROM test_table_insert ORDER BY TimeReceived")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("clickhouse.Query() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var got []map[string]any
|
||||||
|
for rows.Next() {
|
||||||
|
var jsonRow string
|
||||||
|
if err := rows.Scan(&jsonRow); err != nil {
|
||||||
|
t.Fatalf("rows.Scan() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var row map[string]any
|
||||||
|
if err := json.Unmarshal([]byte(jsonRow), &row); err != nil {
|
||||||
|
t.Fatalf("json.Unmarshal() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove fields with default values
|
||||||
|
for k, v := range row {
|
||||||
|
switch val := v.(type) {
|
||||||
|
case string:
|
||||||
|
if val == "" || val == "::" {
|
||||||
|
delete(row, k)
|
||||||
|
}
|
||||||
|
case float64:
|
||||||
|
if val == 0 {
|
||||||
|
delete(row, k)
|
||||||
|
}
|
||||||
|
case []any:
|
||||||
|
if len(val) == 0 {
|
||||||
|
delete(row, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
got = append(got, row)
|
||||||
|
}
|
||||||
|
rows.Close()
|
||||||
|
|
||||||
|
expected := []map[string]any{
|
||||||
|
{
|
||||||
|
"TimeReceived": "1970-01-01 00:16:40",
|
||||||
|
"SamplingRate": "20000",
|
||||||
|
"ExporterAddress": "::ffff:203.0.113.14",
|
||||||
|
"ExporterName": "router1.example.net",
|
||||||
|
"SrcAS": 65000,
|
||||||
|
"DstAS": 12322,
|
||||||
|
"Bytes": "20",
|
||||||
|
"Packets": "3",
|
||||||
|
"InIfBoundary": "internal",
|
||||||
|
"OutIfBoundary": "external",
|
||||||
|
"InIfSpeed": 10000,
|
||||||
|
"EType": helpers.ETypeIPv4,
|
||||||
|
}, {
|
||||||
|
"TimeReceived": "1970-01-01 00:16:41",
|
||||||
|
"SamplingRate": "20000",
|
||||||
|
"ExporterAddress": "::ffff:203.0.113.14",
|
||||||
|
"ExporterName": "router1.example.net",
|
||||||
|
"SrcAS": 12322,
|
||||||
|
"DstAS": 65000,
|
||||||
|
"Bytes": "200",
|
||||||
|
"Packets": "3",
|
||||||
|
"InIfBoundary": "external",
|
||||||
|
"OutIfBoundary": "undefined",
|
||||||
|
"OutIfSpeed": 10000,
|
||||||
|
"EType": helpers.ETypeIPv4,
|
||||||
|
"DstASPath": []uint32{65400, 65500, 65001},
|
||||||
|
"DstLargeCommunities": []string{
|
||||||
|
"1206435509165107881967816", // 65401:100:200
|
||||||
|
"1206435509165107881967817", // 65401:100:201
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := helpers.Diff(got, expected); diff != "" {
|
||||||
|
t.Errorf("Insert (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
110
common/schema/message.go
Normal file
110
common/schema/message.go
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
// SPDX-FileCopyrightText: 2025 Free Mobile
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
|
||||||
|
package schema
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/netip"
|
||||||
|
|
||||||
|
"github.com/ClickHouse/ch-go/proto"
|
||||||
|
"github.com/bits-and-blooms/bitset"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FlowMessage is the abstract representation of a flow through various subsystems.
|
||||||
|
type FlowMessage struct {
|
||||||
|
TimeReceived uint32
|
||||||
|
SamplingRate uint64
|
||||||
|
|
||||||
|
// For exporter classifier
|
||||||
|
ExporterAddress netip.Addr
|
||||||
|
|
||||||
|
// For interface classifier
|
||||||
|
InIf uint32
|
||||||
|
OutIf uint32
|
||||||
|
SrcVlan uint16
|
||||||
|
DstVlan uint16
|
||||||
|
|
||||||
|
// For routing component
|
||||||
|
SrcAddr netip.Addr
|
||||||
|
DstAddr netip.Addr
|
||||||
|
NextHop netip.Addr
|
||||||
|
|
||||||
|
// Core component may override them
|
||||||
|
SrcAS uint32
|
||||||
|
DstAS uint32
|
||||||
|
SrcNetMask uint8
|
||||||
|
DstNetMask uint8
|
||||||
|
|
||||||
|
// Only for tests
|
||||||
|
OtherColumns map[ColumnKey]any
|
||||||
|
|
||||||
|
batch clickhouseBatch
|
||||||
|
schema *Schema
|
||||||
|
}
|
||||||
|
|
||||||
|
// clickhouseBatch stores columns for efficient streaming. It is embedded
|
||||||
|
// inside a FlowMessage.
|
||||||
|
type clickhouseBatch struct {
|
||||||
|
columns []proto.Column // Indexed by ColumnKey
|
||||||
|
columnSet bitset.BitSet // Track which columns have been set
|
||||||
|
rowCount int // Number of rows accumulated
|
||||||
|
input proto.Input // Input including all columns to stream to ClickHouse
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset resets a flow message. All public fields are set to 0,
|
||||||
|
// but the current ClickHouse batch is left untouched.
|
||||||
|
func (bf *FlowMessage) reset() {
|
||||||
|
*bf = FlowMessage{
|
||||||
|
batch: bf.batch,
|
||||||
|
schema: bf.schema,
|
||||||
|
}
|
||||||
|
bf.batch.columnSet.ClearAll()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear clears all column data.
|
||||||
|
func (bf *FlowMessage) Clear() {
|
||||||
|
bf.reset()
|
||||||
|
bf.batch.input.Reset()
|
||||||
|
bf.batch.rowCount = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClickHouseProtoInput returns the proto.Input that can be used to stream results
|
||||||
|
// to ClickHouse.
|
||||||
|
func (bf *FlowMessage) ClickHouseProtoInput() proto.Input {
|
||||||
|
return bf.batch.input
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFlowMessage creates a new FlowMessage for the given schema with ClickHouse batch initialized.
|
||||||
|
func (schema *Schema) NewFlowMessage() *FlowMessage {
|
||||||
|
bf := &FlowMessage{
|
||||||
|
schema: schema,
|
||||||
|
}
|
||||||
|
|
||||||
|
maxKey := ColumnKey(0)
|
||||||
|
for _, column := range bf.schema.columns {
|
||||||
|
if column.Key > maxKey {
|
||||||
|
maxKey = column.Key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bf.batch.columns = make([]proto.Column, maxKey+1)
|
||||||
|
bf.batch.columnSet = *bitset.New(uint(maxKey + 1))
|
||||||
|
bf.batch.rowCount = 0
|
||||||
|
|
||||||
|
for _, column := range bf.schema.columns {
|
||||||
|
if !column.Disabled && column.shouldProvideValue() {
|
||||||
|
bf.batch.columns[column.Key] = column.newProtoColumn()
|
||||||
|
bf.batch.input = append(bf.batch.input, proto.InputColumn{
|
||||||
|
Name: column.Name,
|
||||||
|
Data: column.wrapProtoColumn(bf.batch.columns[column.Key]),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return bf
|
||||||
|
}
|
||||||
|
|
||||||
|
// FlowCount return the number of flows batched
|
||||||
|
func (bf *FlowMessage) FlowCount() int {
|
||||||
|
return bf.batch.rowCount
|
||||||
|
}
|
||||||
@@ -1,262 +0,0 @@
|
|||||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
|
||||||
// SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
|
|
||||||
package schema
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/base32"
|
|
||||||
"fmt"
|
|
||||||
"hash/fnv"
|
|
||||||
"net/netip"
|
|
||||||
"slices"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/bits-and-blooms/bitset"
|
|
||||||
"google.golang.org/protobuf/encoding/protowire"
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ProtobufMessageHash returns the name of the protobuf definition.
|
|
||||||
func (schema Schema) ProtobufMessageHash() string {
|
|
||||||
name, _ := schema.protobufMessageHashAndDefinition()
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProtobufDefinition returns the protobuf definition.
|
|
||||||
func (schema Schema) ProtobufDefinition() string {
|
|
||||||
_, definition := schema.protobufMessageHashAndDefinition()
|
|
||||||
return definition
|
|
||||||
}
|
|
||||||
|
|
||||||
// protobufMessageHashAndDefinition returns the name of the protobuf definition
|
|
||||||
// along with the protobuf definition itself (.proto file).
|
|
||||||
func (schema Schema) protobufMessageHashAndDefinition() (string, string) {
|
|
||||||
lines := []string{}
|
|
||||||
enums := map[string]string{}
|
|
||||||
|
|
||||||
hash := fnv.New128()
|
|
||||||
for _, column := range schema.Columns() {
|
|
||||||
for _, column := range append([]Column{column}, column.ClickHouseTransformFrom...) {
|
|
||||||
if column.ProtobufIndex < 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
t := column.ProtobufType.String()
|
|
||||||
|
|
||||||
// Enum definition
|
|
||||||
if column.ProtobufType == protoreflect.EnumKind {
|
|
||||||
if _, ok := enums[column.ProtobufEnumName]; !ok {
|
|
||||||
definition := []string{}
|
|
||||||
keys := []int{}
|
|
||||||
for key := range column.ProtobufEnum {
|
|
||||||
keys = append(keys, key)
|
|
||||||
}
|
|
||||||
slices.Sort(keys)
|
|
||||||
for _, key := range keys {
|
|
||||||
definition = append(definition, fmt.Sprintf("%s = %d;", column.ProtobufEnum[key], key))
|
|
||||||
}
|
|
||||||
enums[column.ProtobufEnumName] = fmt.Sprintf("enum %s { %s }",
|
|
||||||
column.ProtobufEnumName,
|
|
||||||
strings.Join(definition, " "))
|
|
||||||
}
|
|
||||||
t = column.ProtobufEnumName
|
|
||||||
}
|
|
||||||
|
|
||||||
// Column definition
|
|
||||||
if column.ProtobufRepeated {
|
|
||||||
t = fmt.Sprintf("repeated %s", t)
|
|
||||||
}
|
|
||||||
line := fmt.Sprintf("%s %s = %d;",
|
|
||||||
t,
|
|
||||||
column.Name,
|
|
||||||
column.ProtobufIndex,
|
|
||||||
)
|
|
||||||
lines = append(lines, line)
|
|
||||||
hash.Write([]byte(line))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
enumDefinitions := []string{}
|
|
||||||
for _, v := range enums {
|
|
||||||
enumDefinitions = append(enumDefinitions, v)
|
|
||||||
hash.Write([]byte(v))
|
|
||||||
}
|
|
||||||
hashString := base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(hash.Sum(nil))
|
|
||||||
|
|
||||||
return hashString, fmt.Sprintf(`
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
message FlowMessagev%s {
|
|
||||||
%s
|
|
||||||
|
|
||||||
%s
|
|
||||||
}
|
|
||||||
`, hashString, strings.Join(enumDefinitions, "\n "), strings.Join(lines, "\n "))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProtobufMarshal transforms a basic flow into protobuf bytes. The provided flow should
|
|
||||||
// not be reused afterwards.
|
|
||||||
func (schema *Schema) ProtobufMarshal(bf *FlowMessage) []byte {
|
|
||||||
schema.ProtobufAppendVarint(bf, ColumnTimeReceived, bf.TimeReceived)
|
|
||||||
schema.ProtobufAppendVarint(bf, ColumnSamplingRate, uint64(bf.SamplingRate))
|
|
||||||
schema.ProtobufAppendIP(bf, ColumnExporterAddress, bf.ExporterAddress)
|
|
||||||
schema.ProtobufAppendVarint(bf, ColumnSrcAS, uint64(bf.SrcAS))
|
|
||||||
schema.ProtobufAppendVarint(bf, ColumnDstAS, uint64(bf.DstAS))
|
|
||||||
schema.ProtobufAppendVarint(bf, ColumnSrcNetMask, uint64(bf.SrcNetMask))
|
|
||||||
schema.ProtobufAppendVarint(bf, ColumnDstNetMask, uint64(bf.DstNetMask))
|
|
||||||
schema.ProtobufAppendIP(bf, ColumnSrcAddr, bf.SrcAddr)
|
|
||||||
schema.ProtobufAppendIP(bf, ColumnDstAddr, bf.DstAddr)
|
|
||||||
schema.ProtobufAppendIP(bf, ColumnNextHop, bf.NextHop)
|
|
||||||
if !schema.IsDisabled(ColumnGroupL2) {
|
|
||||||
schema.ProtobufAppendVarint(bf, ColumnSrcVlan, uint64(bf.SrcVlan))
|
|
||||||
schema.ProtobufAppendVarint(bf, ColumnDstVlan, uint64(bf.DstVlan))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add length and move it as a prefix
|
|
||||||
end := len(bf.protobuf)
|
|
||||||
payloadLen := end - maxSizeVarint
|
|
||||||
bf.protobuf = protowire.AppendVarint(bf.protobuf, uint64(payloadLen))
|
|
||||||
sizeLen := len(bf.protobuf) - end
|
|
||||||
result := bf.protobuf[maxSizeVarint-sizeLen : end]
|
|
||||||
copy(result, bf.protobuf[end:end+sizeLen])
|
|
||||||
bf.protobuf = result
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProtobufAppendVarint append a varint to the protobuf representation of a flow.
|
|
||||||
func (schema *Schema) ProtobufAppendVarint(bf *FlowMessage, columnKey ColumnKey, value uint64) {
|
|
||||||
// Check if value is 0 to avoid a lookup.
|
|
||||||
if value > 0 {
|
|
||||||
schema.ProtobufAppendVarintForce(bf, columnKey, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProtobufAppendVarintForce append a varint to the protobuf representation of a flow, even if it is a 0-value.
|
|
||||||
func (schema *Schema) ProtobufAppendVarintForce(bf *FlowMessage, columnKey ColumnKey, value uint64) {
|
|
||||||
column, _ := schema.LookupColumnByKey(columnKey)
|
|
||||||
column.ProtobufAppendVarintForce(bf, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProtobufAppendVarint append a varint to the protobuf representation of a flow.
|
|
||||||
func (column *Column) ProtobufAppendVarint(bf *FlowMessage, value uint64) {
|
|
||||||
if value > 0 {
|
|
||||||
column.ProtobufAppendVarintForce(bf, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProtobufAppendVarintForce append a varint to the protobuf representation of a flow, even when 0.
|
|
||||||
func (column *Column) ProtobufAppendVarintForce(bf *FlowMessage, value uint64) {
|
|
||||||
bf.init()
|
|
||||||
if column.protobufCanAppend(bf) {
|
|
||||||
bf.protobuf = protowire.AppendTag(bf.protobuf, column.ProtobufIndex, protowire.VarintType)
|
|
||||||
bf.protobuf = protowire.AppendVarint(bf.protobuf, value)
|
|
||||||
bf.protobufSet.Set(uint(column.ProtobufIndex))
|
|
||||||
if debug {
|
|
||||||
column.appendDebug(bf, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (column Column) protobufCanAppend(bf *FlowMessage) bool {
|
|
||||||
return column.ProtobufIndex > 0 &&
|
|
||||||
!column.Disabled &&
|
|
||||||
(column.ProtobufRepeated || !bf.protobufSet.Test(uint(column.ProtobufIndex)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProtobufAppendBytes append a slice of bytes to the protobuf representation
|
|
||||||
// of a flow.
|
|
||||||
func (schema *Schema) ProtobufAppendBytes(bf *FlowMessage, columnKey ColumnKey, value []byte) {
|
|
||||||
if len(value) > 0 {
|
|
||||||
schema.ProtobufAppendBytesForce(bf, columnKey, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProtobufAppendBytesForce append a slice of bytes to the protobuf representation
|
|
||||||
// of a flow, even when empty
|
|
||||||
func (schema *Schema) ProtobufAppendBytesForce(bf *FlowMessage, columnKey ColumnKey, value []byte) {
|
|
||||||
column, _ := schema.LookupColumnByKey(columnKey)
|
|
||||||
column.ProtobufAppendBytesForce(bf, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProtobufAppendBytes append a slice of bytes to the protobuf representation
|
|
||||||
// of a flow.
|
|
||||||
func (column *Column) ProtobufAppendBytes(bf *FlowMessage, value []byte) {
|
|
||||||
if len(value) > 0 {
|
|
||||||
column.ProtobufAppendBytesForce(bf, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProtobufAppendBytesForce append a slice of bytes to the protobuf representation
|
|
||||||
// of a flow, even when empty
|
|
||||||
func (column *Column) ProtobufAppendBytesForce(bf *FlowMessage, value []byte) {
|
|
||||||
bf.init()
|
|
||||||
if column.protobufCanAppend(bf) {
|
|
||||||
bf.protobuf = protowire.AppendTag(bf.protobuf, column.ProtobufIndex, protowire.BytesType)
|
|
||||||
bf.protobuf = protowire.AppendBytes(bf.protobuf, value)
|
|
||||||
bf.protobufSet.Set(uint(column.ProtobufIndex))
|
|
||||||
if debug {
|
|
||||||
column.appendDebug(bf, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProtobufAppendIP append an IP to the protobuf representation
|
|
||||||
// of a flow.
|
|
||||||
func (schema *Schema) ProtobufAppendIP(bf *FlowMessage, columnKey ColumnKey, value netip.Addr) {
|
|
||||||
if value.IsValid() {
|
|
||||||
column, _ := schema.LookupColumnByKey(columnKey)
|
|
||||||
column.ProtobufAppendIPForce(bf, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProtobufAppendIP append an IP to the protobuf representation
|
|
||||||
// of a flow.
|
|
||||||
func (column *Column) ProtobufAppendIP(bf *FlowMessage, value netip.Addr) {
|
|
||||||
if value.IsValid() {
|
|
||||||
column.ProtobufAppendIPForce(bf, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProtobufAppendIPForce append an IP to the protobuf representation
|
|
||||||
// of a flow, even when not valid
|
|
||||||
func (column *Column) ProtobufAppendIPForce(bf *FlowMessage, value netip.Addr) {
|
|
||||||
bf.init()
|
|
||||||
if column.protobufCanAppend(bf) {
|
|
||||||
v := value.As16()
|
|
||||||
bf.protobuf = protowire.AppendTag(bf.protobuf, column.ProtobufIndex, protowire.BytesType)
|
|
||||||
bf.protobuf = protowire.AppendBytes(bf.protobuf, v[:])
|
|
||||||
bf.protobufSet.Set(uint(column.ProtobufIndex))
|
|
||||||
if debug {
|
|
||||||
column.appendDebug(bf, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (column *Column) appendDebug(bf *FlowMessage, value interface{}) {
|
|
||||||
if bf.ProtobufDebug == nil {
|
|
||||||
bf.ProtobufDebug = make(map[ColumnKey]interface{})
|
|
||||||
}
|
|
||||||
if column.ProtobufRepeated {
|
|
||||||
if current, ok := bf.ProtobufDebug[column.Key]; ok {
|
|
||||||
bf.ProtobufDebug[column.Key] = append(current.([]interface{}), value)
|
|
||||||
} else {
|
|
||||||
bf.ProtobufDebug[column.Key] = []interface{}{value}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
bf.ProtobufDebug[column.Key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bytes returns protobuf bytes. The flow should have been processed by
|
|
||||||
// `ProtobufMarshal` first.
|
|
||||||
func (bf *FlowMessage) Bytes() []byte {
|
|
||||||
return bf.protobuf
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bf *FlowMessage) init() {
|
|
||||||
if bf.protobuf == nil {
|
|
||||||
bf.protobuf = make([]byte, maxSizeVarint, 500)
|
|
||||||
bf.protobufSet = *bitset.New(uint(ColumnLast))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,242 +0,0 @@
|
|||||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
|
||||||
// SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
|
|
||||||
package schema
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/netip"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"akvorado/common/helpers"
|
|
||||||
|
|
||||||
"google.golang.org/protobuf/encoding/protowire"
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestProtobufDefinition(t *testing.T) {
|
|
||||||
// Use a smaller version
|
|
||||||
flows := Schema{
|
|
||||||
columns: []Column{
|
|
||||||
{
|
|
||||||
Key: ColumnTimeReceived,
|
|
||||||
ClickHouseType: "DateTime",
|
|
||||||
ProtobufType: protoreflect.Uint64Kind,
|
|
||||||
},
|
|
||||||
{Key: ColumnSamplingRate, ClickHouseType: "UInt64"},
|
|
||||||
{Key: ColumnExporterAddress, ClickHouseType: "LowCardinality(IPv6)"},
|
|
||||||
{Key: ColumnExporterName, ClickHouseType: "LowCardinality(String)"},
|
|
||||||
{
|
|
||||||
Key: ColumnSrcAddr,
|
|
||||||
ClickHouseType: "IPv6",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Key: ColumnSrcNetMask,
|
|
||||||
ClickHouseType: "UInt8",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Key: ColumnSrcNetPrefix,
|
|
||||||
ClickHouseType: "String",
|
|
||||||
ClickHouseAlias: `something`,
|
|
||||||
},
|
|
||||||
{Key: ColumnSrcAS, ClickHouseType: "UInt32"},
|
|
||||||
{
|
|
||||||
Key: ColumnSrcNetName,
|
|
||||||
ClickHouseType: "LowCardinality(String)",
|
|
||||||
ClickHouseGenerateFrom: fmt.Sprintf("dictGetOrDefault('%s', 'name', SrcAddr, '')", DictionaryNetworks),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Key: ColumnDstASPath,
|
|
||||||
ClickHouseType: "Array(UInt32)",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Key: ColumnDstLargeCommunities,
|
|
||||||
ClickHouseType: "Array(UInt128)",
|
|
||||||
ClickHouseTransformFrom: []Column{
|
|
||||||
{Key: ColumnDstLargeCommunitiesASN, ClickHouseType: "Array(UInt32)"},
|
|
||||||
{Key: ColumnDstLargeCommunitiesLocalData1, ClickHouseType: "Array(UInt32)"},
|
|
||||||
{Key: ColumnDstLargeCommunitiesLocalData2, ClickHouseType: "Array(UInt32)"},
|
|
||||||
},
|
|
||||||
ClickHouseTransformTo: "something",
|
|
||||||
},
|
|
||||||
{Key: ColumnInIfName, ClickHouseType: "LowCardinality(String)"},
|
|
||||||
{
|
|
||||||
Key: ColumnInIfBoundary,
|
|
||||||
ClickHouseType: "Enum8('undefined' = 0, 'external' = 1, 'internal' = 2)",
|
|
||||||
ClickHouseNotSortingKey: true,
|
|
||||||
ProtobufType: protoreflect.EnumKind,
|
|
||||||
ProtobufEnumName: "Boundary",
|
|
||||||
ProtobufEnum: map[int]string{
|
|
||||||
0: "UNDEFINED",
|
|
||||||
1: "EXTERNAL",
|
|
||||||
2: "INTERNAL",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{Key: ColumnBytes, ClickHouseType: "UInt64"},
|
|
||||||
},
|
|
||||||
}.finalize()
|
|
||||||
|
|
||||||
got := flows.ProtobufDefinition()
|
|
||||||
expected := `
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
message FlowMessagev5WRSGBXQDXZSUHZQE6QEHLI5JM {
|
|
||||||
enum Boundary { UNDEFINED = 0; EXTERNAL = 1; INTERNAL = 2; }
|
|
||||||
|
|
||||||
uint64 TimeReceived = 1;
|
|
||||||
uint64 SamplingRate = 2;
|
|
||||||
bytes ExporterAddress = 3;
|
|
||||||
string ExporterName = 4;
|
|
||||||
bytes SrcAddr = 5;
|
|
||||||
bytes DstAddr = 6;
|
|
||||||
uint32 SrcNetMask = 7;
|
|
||||||
uint32 DstNetMask = 8;
|
|
||||||
uint32 SrcAS = 9;
|
|
||||||
uint32 DstAS = 10;
|
|
||||||
repeated uint32 DstASPath = 11;
|
|
||||||
repeated uint32 DstLargeCommunitiesASN = 12;
|
|
||||||
repeated uint32 DstLargeCommunitiesLocalData1 = 13;
|
|
||||||
repeated uint32 DstLargeCommunitiesLocalData2 = 14;
|
|
||||||
string InIfName = 15;
|
|
||||||
string OutIfName = 16;
|
|
||||||
Boundary InIfBoundary = 17;
|
|
||||||
Boundary OutIfBoundary = 18;
|
|
||||||
uint64 Bytes = 19;
|
|
||||||
}
|
|
||||||
`
|
|
||||||
if diff := helpers.Diff(strings.Split(got, "\n"), strings.Split(expected, "\n")); diff != "" {
|
|
||||||
t.Fatalf("ProtobufDefinition() (-got, +want): %s", diff)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProtobufMarshal(t *testing.T) {
|
|
||||||
c := NewMock(t)
|
|
||||||
exporterAddress := netip.MustParseAddr("::ffff:203.0.113.14")
|
|
||||||
bf := &FlowMessage{}
|
|
||||||
bf.TimeReceived = 1000
|
|
||||||
bf.SamplingRate = 20000
|
|
||||||
bf.ExporterAddress = exporterAddress
|
|
||||||
c.ProtobufAppendVarint(bf, ColumnDstAS, 65000)
|
|
||||||
c.ProtobufAppendVarint(bf, ColumnBytes, 200)
|
|
||||||
c.ProtobufAppendVarint(bf, ColumnPackets, 300)
|
|
||||||
c.ProtobufAppendVarint(bf, ColumnBytes, 300) // duplicate!
|
|
||||||
|
|
||||||
got := c.ProtobufMarshal(bf)
|
|
||||||
|
|
||||||
size, n := protowire.ConsumeVarint(got)
|
|
||||||
if uint64(len(got)-n) != size {
|
|
||||||
t.Fatalf("ProtobufMarshal() produced an incorrect size: %d + %d != %d", size, n, len(got))
|
|
||||||
}
|
|
||||||
|
|
||||||
// text schema definition for reference
|
|
||||||
// syntax = "proto3";
|
|
||||||
|
|
||||||
// message FlowMessagevLAABIGYMRYZPTGOYIIFZNYDEQM {
|
|
||||||
// enum Boundary { UNDEFINED = 0; EXTERNAL = 1; INTERNAL = 2; }
|
|
||||||
|
|
||||||
// uint64 TimeReceived = 1;
|
|
||||||
// uint64 SamplingRate = 2;
|
|
||||||
// bytes ExporterAddress = 3;
|
|
||||||
// string ExporterName = 4;
|
|
||||||
// string ExporterGroup = 5;
|
|
||||||
// string ExporterRole = 6;
|
|
||||||
// string ExporterSite = 7;
|
|
||||||
// string ExporterRegion = 8;
|
|
||||||
// string ExporterTenant = 9;
|
|
||||||
// bytes SrcAddr = 10;
|
|
||||||
// bytes DstAddr = 11;
|
|
||||||
// uint32 SrcNetMask = 12;
|
|
||||||
// uint32 DstNetMask = 13;
|
|
||||||
// uint32 SrcAS = 14;
|
|
||||||
// uint32 DstAS = 15;
|
|
||||||
// repeated uint32 DstASPath = 18;
|
|
||||||
// repeated uint32 DstCommunities = 19;
|
|
||||||
// repeated uint32 DstLargeCommunitiesASN = 20;
|
|
||||||
// repeated uint32 DstLargeCommunitiesLocalData1 = 21;
|
|
||||||
// repeated uint32 DstLargeCommunitiesLocalData2 = 22;
|
|
||||||
// string InIfName = 23;
|
|
||||||
// string OutIfName = 24;
|
|
||||||
// string InIfDescription = 25;
|
|
||||||
// string OutIfDescription = 26;
|
|
||||||
// uint32 InIfSpeed = 27;
|
|
||||||
// uint32 OutIfSpeed = 28;
|
|
||||||
// string InIfConnectivity = 29;
|
|
||||||
// string OutIfConnectivity = 30;
|
|
||||||
// string InIfProvider = 31;
|
|
||||||
// string OutIfProvider = 32;
|
|
||||||
// Boundary InIfBoundary = 33;
|
|
||||||
// Boundary OutIfBoundary = 34;
|
|
||||||
// uint32 EType = 35;
|
|
||||||
// uint32 Proto = 36;
|
|
||||||
// uint32 SrcPort = 37;
|
|
||||||
// uint32 DstPort = 38;
|
|
||||||
// uint64 Bytes = 39;
|
|
||||||
// uint64 Packets = 40;
|
|
||||||
// uint32 ForwardingStatus = 41;
|
|
||||||
// }
|
|
||||||
// to check: https://protobuf-decoder.netlify.app/
|
|
||||||
t.Run("compare as bytes", func(t *testing.T) {
|
|
||||||
expected := []byte{
|
|
||||||
// DstAS
|
|
||||||
// 15: 65000
|
|
||||||
0x78, 0xe8, 0xfb, 0x03,
|
|
||||||
// Bytes
|
|
||||||
// 39: 200
|
|
||||||
0xb8, 0x02, 0xc8, 0x01,
|
|
||||||
// Packet
|
|
||||||
// 40: 300
|
|
||||||
0xc0, 0x02, 0xac, 0x02,
|
|
||||||
// TimeReceived
|
|
||||||
// 1: 1000
|
|
||||||
0x08, 0xe8, 0x07,
|
|
||||||
// SamplingRate
|
|
||||||
// 2: 20000
|
|
||||||
0x10, 0xa0, 0x9c, 0x01,
|
|
||||||
// ExporterAddress
|
|
||||||
// 3: ::ffff:203.0.113.14
|
|
||||||
0x1a, 0x10, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xcb, 0x0, 0x71, 0xe,
|
|
||||||
}
|
|
||||||
if diff := helpers.Diff(got[n:], expected); diff != "" {
|
|
||||||
t.Logf("got: %v", got)
|
|
||||||
|
|
||||||
t.Fatalf("ProtobufMarshal() (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("compare as protobuf message", func(t *testing.T) {
|
|
||||||
got := c.ProtobufDecode(t, got)
|
|
||||||
expected := FlowMessage{
|
|
||||||
TimeReceived: 1000,
|
|
||||||
SamplingRate: 20000,
|
|
||||||
ExporterAddress: exporterAddress,
|
|
||||||
DstAS: 65000,
|
|
||||||
ProtobufDebug: map[ColumnKey]interface{}{
|
|
||||||
ColumnBytes: 200,
|
|
||||||
ColumnPackets: 300,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if diff := helpers.Diff(got, expected); diff != "" {
|
|
||||||
t.Fatalf("ProtobufDecode() (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkProtobufMarshal(b *testing.B) {
|
|
||||||
c := NewMock(b)
|
|
||||||
exporterAddress := netip.MustParseAddr("::ffff:203.0.113.14")
|
|
||||||
DisableDebug(b)
|
|
||||||
for b.Loop() {
|
|
||||||
bf := &FlowMessage{
|
|
||||||
TimeReceived: 1000,
|
|
||||||
SamplingRate: 20000,
|
|
||||||
ExporterAddress: exporterAddress,
|
|
||||||
}
|
|
||||||
c.ProtobufAppendVarint(bf, ColumnDstAS, 65000)
|
|
||||||
c.ProtobufAppendVarint(bf, ColumnBytes, 200)
|
|
||||||
c.ProtobufAppendVarint(bf, ColumnPackets, 300)
|
|
||||||
c.ProtobufAppendVarint(bf, ColumnBytes, 300) // duplicate!
|
|
||||||
c.ProtobufAppendVarint(bf, ColumnSrcVlan, 1600) // disabled!
|
|
||||||
c.ProtobufMarshal(bf)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -38,29 +38,6 @@ func TestDisableForbiddenColumns(t *testing.T) {
|
|||||||
if _, err := schema.New(config); err == nil {
|
if _, err := schema.New(config); err == nil {
|
||||||
t.Fatal("New() did not error")
|
t.Fatal("New() did not error")
|
||||||
}
|
}
|
||||||
|
|
||||||
config = schema.DefaultConfiguration()
|
|
||||||
config.Disabled = []schema.ColumnKey{schema.ColumnDstLargeCommunitiesASN}
|
|
||||||
if _, err := schema.New(config); err == nil {
|
|
||||||
t.Fatal("New() did not error")
|
|
||||||
}
|
|
||||||
|
|
||||||
config = schema.DefaultConfiguration()
|
|
||||||
config.Disabled = []schema.ColumnKey{schema.ColumnDstLargeCommunities}
|
|
||||||
if _, err := schema.New(config); err == nil {
|
|
||||||
t.Fatal("New() did not error")
|
|
||||||
}
|
|
||||||
|
|
||||||
config = schema.DefaultConfiguration()
|
|
||||||
config.Disabled = []schema.ColumnKey{
|
|
||||||
schema.ColumnDstLargeCommunities,
|
|
||||||
schema.ColumnDstLargeCommunitiesASN,
|
|
||||||
schema.ColumnDstLargeCommunitiesLocalData1,
|
|
||||||
schema.ColumnDstLargeCommunitiesLocalData2,
|
|
||||||
}
|
|
||||||
if _, err := schema.New(config); err != nil {
|
|
||||||
t.Fatalf("New() error:\n%+v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCustomDictionaries(t *testing.T) {
|
func TestCustomDictionaries(t *testing.T) {
|
||||||
|
|||||||
@@ -7,17 +7,10 @@ package schema
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/netip"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"akvorado/common/helpers"
|
"akvorado/common/helpers"
|
||||||
|
|
||||||
"github.com/jhump/protoreflect/desc"
|
|
||||||
"github.com/jhump/protoreflect/desc/protoparse"
|
|
||||||
"github.com/jhump/protoreflect/dynamic"
|
|
||||||
"google.golang.org/protobuf/encoding/protowire"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var debug = true
|
var debug = true
|
||||||
@@ -41,82 +34,6 @@ func NewMock(t testing.TB) *Component {
|
|||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProtobufDecode decodes the provided protobuf message.
|
|
||||||
func (schema *Schema) ProtobufDecode(t *testing.T, input []byte) *FlowMessage {
|
|
||||||
t.Helper()
|
|
||||||
parser := protoparse.Parser{
|
|
||||||
Accessor: protoparse.FileContentsFromMap(map[string]string{
|
|
||||||
"flow.proto": schema.ProtobufDefinition(),
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
descs, err := parser.ParseFiles("flow.proto")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ParseFiles(%q) error:\n%+v", "flow.proto", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var descriptor *desc.MessageDescriptor
|
|
||||||
for _, msg := range descs[0].GetMessageTypes() {
|
|
||||||
if strings.HasPrefix(msg.GetName(), "FlowMessagev") {
|
|
||||||
descriptor = msg
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if descriptor == nil {
|
|
||||||
t.Fatal("cannot find message descriptor")
|
|
||||||
}
|
|
||||||
|
|
||||||
message := dynamic.NewMessage(descriptor)
|
|
||||||
size, n := protowire.ConsumeVarint(input)
|
|
||||||
if len(input)-n != int(size) {
|
|
||||||
t.Fatalf("bad length for protobuf message: %d - %d != %d", len(input), n, size)
|
|
||||||
}
|
|
||||||
if err := message.Unmarshal(input[n:]); err != nil {
|
|
||||||
t.Fatalf("Unmarshal() error:\n%+v", err)
|
|
||||||
}
|
|
||||||
textVersion, _ := message.MarshalTextIndent()
|
|
||||||
t.Logf("Unmarshal():\n%s", textVersion)
|
|
||||||
|
|
||||||
flow := FlowMessage{
|
|
||||||
ProtobufDebug: map[ColumnKey]interface{}{},
|
|
||||||
}
|
|
||||||
for _, field := range message.GetKnownFields() {
|
|
||||||
k := int(field.GetNumber())
|
|
||||||
name := field.GetName()
|
|
||||||
switch name {
|
|
||||||
case "TimeReceived":
|
|
||||||
flow.TimeReceived = message.GetFieldByNumber(k).(uint64)
|
|
||||||
case "SamplingRate":
|
|
||||||
flow.SamplingRate = uint32(message.GetFieldByNumber(k).(uint64))
|
|
||||||
case "ExporterAddress":
|
|
||||||
ip, _ := netip.AddrFromSlice(message.GetFieldByNumber(k).([]byte))
|
|
||||||
flow.ExporterAddress = ip
|
|
||||||
case "SrcAddr":
|
|
||||||
ip, _ := netip.AddrFromSlice(message.GetFieldByNumber(k).([]byte))
|
|
||||||
flow.SrcAddr = ip
|
|
||||||
case "DstAddr":
|
|
||||||
ip, _ := netip.AddrFromSlice(message.GetFieldByNumber(k).([]byte))
|
|
||||||
flow.DstAddr = ip
|
|
||||||
case "SrcAS":
|
|
||||||
flow.SrcAS = uint32(message.GetFieldByNumber(k).(uint32))
|
|
||||||
case "DstAS":
|
|
||||||
flow.DstAS = uint32(message.GetFieldByNumber(k).(uint32))
|
|
||||||
default:
|
|
||||||
column, ok := schema.LookupColumnByName(name)
|
|
||||||
if !ok {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
key := column.Key
|
|
||||||
value := message.GetFieldByNumber(k)
|
|
||||||
if reflect.ValueOf(value).IsZero() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
flow.ProtobufDebug[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &flow
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnableAllColumns enable all columns and returns itself.
|
// EnableAllColumns enable all columns and returns itself.
|
||||||
func (schema *Component) EnableAllColumns() *Component {
|
func (schema *Component) EnableAllColumns() *Component {
|
||||||
for i := range schema.columns {
|
for i := range schema.columns {
|
||||||
|
|||||||
@@ -4,11 +4,8 @@
|
|||||||
package schema
|
package schema
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/netip"
|
"github.com/ClickHouse/ch-go/proto"
|
||||||
|
|
||||||
"github.com/bits-and-blooms/bitset"
|
"github.com/bits-and-blooms/bitset"
|
||||||
"google.golang.org/protobuf/encoding/protowire"
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Schema is the data schema.
|
// Schema is the data schema.
|
||||||
@@ -42,18 +39,18 @@ type Column struct {
|
|||||||
// instead of being retrieved from the protobuf. `TransformFrom' and
|
// instead of being retrieved from the protobuf. `TransformFrom' and
|
||||||
// `TransformTo' work in pairs. The first one is the set of column in the
|
// `TransformTo' work in pairs. The first one is the set of column in the
|
||||||
// raw table while the second one is how to transform it for the main table.
|
// raw table while the second one is how to transform it for the main table.
|
||||||
ClickHouseType string
|
ClickHouseType string // ClickHouse type for the column
|
||||||
ClickHouseMaterializedType string
|
ClickHouseMaterializedType string // ClickHouse type when we request materialization
|
||||||
ClickHouseCodec string
|
ClickHouseCodec string // Compression codec
|
||||||
ClickHouseAlias string
|
ClickHouseAlias string // Alias expression
|
||||||
ClickHouseNotSortingKey bool
|
// ClickHouseNotSortingKey is to be used for columns whose content is
|
||||||
ClickHouseGenerateFrom string
|
// derived from another column. Like Exporter* all derive from
|
||||||
ClickHouseTransformFrom []Column
|
// ExporterAddress.
|
||||||
ClickHouseTransformTo string
|
ClickHouseNotSortingKey bool
|
||||||
ClickHouseMainOnly bool
|
// ClickHouseGenerateFrom computes the content of the column using another column
|
||||||
// ClickHouseSelfGenerated identifies a column as being formatted using itself as source
|
ClickHouseGenerateFrom string
|
||||||
ClickHouseSelfGenerated bool
|
ClickHouseMainOnly bool // Only include this column in the main table
|
||||||
|
ClickHouseSelfGenerated bool // Generated (partly) from its own value
|
||||||
// ClickHouseMaterialized indicates that the column was materialized (and is not by default)
|
// ClickHouseMaterialized indicates that the column was materialized (and is not by default)
|
||||||
ClickHouseMaterialized bool
|
ClickHouseMaterialized bool
|
||||||
|
|
||||||
@@ -61,55 +58,13 @@ type Column struct {
|
|||||||
// truncatable when used as a dimension.
|
// truncatable when used as a dimension.
|
||||||
ConsoleNotDimension bool
|
ConsoleNotDimension bool
|
||||||
ConsoleTruncateIP bool
|
ConsoleTruncateIP bool
|
||||||
|
|
||||||
// For protobuf. The index is automatically derived from the position,
|
|
||||||
// unless specified. Use -1 to not include the column into the protobuf
|
|
||||||
// schema.
|
|
||||||
ProtobufIndex protowire.Number
|
|
||||||
ProtobufType protoreflect.Kind // Uint64Kind, Uint32Kind, BytesKind, StringKind, EnumKind
|
|
||||||
ProtobufEnum map[int]string
|
|
||||||
ProtobufEnumName string
|
|
||||||
ProtobufRepeated bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ColumnKey is the name of a column
|
// ColumnKey is the name of a column
|
||||||
type ColumnKey int
|
type ColumnKey uint
|
||||||
|
|
||||||
// ColumnGroup represents a group of columns
|
// ColumnGroup represents a group of columns
|
||||||
type ColumnGroup uint
|
type ColumnGroup uint
|
||||||
|
|
||||||
// FlowMessage is the abstract representation of a flow through various subsystems.
|
// UInt128 is an unsigned 128-bit number
|
||||||
type FlowMessage struct {
|
type UInt128 = proto.UInt128
|
||||||
TimeReceived uint64
|
|
||||||
SamplingRate uint32
|
|
||||||
|
|
||||||
// For exporter classifier
|
|
||||||
ExporterAddress netip.Addr
|
|
||||||
|
|
||||||
// For interface classifier
|
|
||||||
InIf uint32
|
|
||||||
OutIf uint32
|
|
||||||
SrcVlan uint16
|
|
||||||
DstVlan uint16
|
|
||||||
|
|
||||||
// For geolocation or BMP
|
|
||||||
SrcAddr netip.Addr
|
|
||||||
DstAddr netip.Addr
|
|
||||||
NextHop netip.Addr
|
|
||||||
|
|
||||||
// Core component may override them
|
|
||||||
SrcAS uint32
|
|
||||||
DstAS uint32
|
|
||||||
GotASPath bool
|
|
||||||
GotCommunities bool
|
|
||||||
|
|
||||||
SrcNetMask uint8
|
|
||||||
DstNetMask uint8
|
|
||||||
|
|
||||||
// protobuf is the protobuf representation for the information not contained above.
|
|
||||||
protobuf []byte
|
|
||||||
protobufSet bitset.BitSet
|
|
||||||
ProtobufDebug map[ColumnKey]interface{} `json:"-"` // for testing purpose
|
|
||||||
}
|
|
||||||
|
|
||||||
const maxSizeVarint = 10 // protowire.SizeVarint(^uint64(0))
|
|
||||||
|
|||||||
@@ -1,17 +0,0 @@
|
|||||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
|
||||||
// SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
|
|
||||||
package schema
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"google.golang.org/protobuf/encoding/protowire"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMaxSizeVarint(t *testing.T) {
|
|
||||||
got := protowire.SizeVarint(^uint64(0))
|
|
||||||
if got != maxSizeVarint {
|
|
||||||
t.Fatalf("maximum size for varint is %d, not %d", got, maxSizeVarint)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -76,6 +76,7 @@ clickhouse:
|
|||||||
# { prefix: (.ipv4Prefix // .ipv6Prefix), tenant: "google-cloud", region: .scope }
|
# { prefix: (.ipv4Prefix // .ipv6Prefix), tenant: "google-cloud", region: .scope }
|
||||||
|
|
||||||
inlet: !include "inlet.yaml"
|
inlet: !include "inlet.yaml"
|
||||||
|
outlet: !include "outlet.yaml"
|
||||||
console: !include "console.yaml"
|
console: !include "console.yaml"
|
||||||
|
|
||||||
# Remove the following line if you don't want to get demo data
|
# Remove the following line if you don't want to get demo data
|
||||||
|
|||||||
@@ -96,7 +96,7 @@
|
|||||||
21: "core"
|
21: "core"
|
||||||
listen: :161
|
listen: :161
|
||||||
bmp: &bmp
|
bmp: &bmp
|
||||||
target: akvorado-inlet:10179
|
target: akvorado-outlet:10179
|
||||||
routes:
|
routes:
|
||||||
- prefixes: 192.0.2.0/24,2a01:db8:cafe:1::/64
|
- prefixes: 192.0.2.0/24,2a01:db8:cafe:1::/64
|
||||||
aspath: 64501
|
aspath: 64501
|
||||||
|
|||||||
@@ -1,13 +1,6 @@
|
|||||||
---
|
---
|
||||||
kafka:
|
kafka:
|
||||||
compression-codec: zstd
|
compression-codec: zstd
|
||||||
metadata:
|
|
||||||
workers: 10
|
|
||||||
provider:
|
|
||||||
type: snmp
|
|
||||||
credentials:
|
|
||||||
::/0:
|
|
||||||
communities: public
|
|
||||||
flow:
|
flow:
|
||||||
inputs:
|
inputs:
|
||||||
- type: udp
|
- type: udp
|
||||||
@@ -20,22 +13,3 @@ flow:
|
|||||||
listen: :6343
|
listen: :6343
|
||||||
workers: 6
|
workers: 6
|
||||||
receive-buffer: 10485760
|
receive-buffer: 10485760
|
||||||
core:
|
|
||||||
workers: 6
|
|
||||||
exporter-classifiers:
|
|
||||||
# This is an example. This should be customized depending on how
|
|
||||||
# your exporters are named.
|
|
||||||
- ClassifySiteRegex(Exporter.Name, "^([^-]+)-", "$1")
|
|
||||||
- ClassifyRegion("europe")
|
|
||||||
- ClassifyTenant("acme")
|
|
||||||
- ClassifyRole("edge")
|
|
||||||
interface-classifiers:
|
|
||||||
# This is an example. This must be customized depending on the
|
|
||||||
# descriptions of your interfaces. In the following, we assume
|
|
||||||
# external interfaces are named "Transit: Cogent" Or "IX:
|
|
||||||
# FranceIX".
|
|
||||||
- |
|
|
||||||
ClassifyConnectivityRegex(Interface.Description, "^(?i)(transit|pni|ppni|ix):? ", "$1") &&
|
|
||||||
ClassifyProviderRegex(Interface.Description, "^\\S+?\\s(\\S+)", "$1") &&
|
|
||||||
ClassifyExternal()
|
|
||||||
- ClassifyInternal()
|
|
||||||
|
|||||||
28
config/outlet.yaml
Normal file
28
config/outlet.yaml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
metadata:
|
||||||
|
workers: 10
|
||||||
|
provider:
|
||||||
|
type: snmp
|
||||||
|
credentials:
|
||||||
|
::/0:
|
||||||
|
communities: public
|
||||||
|
kafka:
|
||||||
|
workers: 6
|
||||||
|
core:
|
||||||
|
exporter-classifiers:
|
||||||
|
# This is an example. This should be customized depending on how
|
||||||
|
# your exporters are named.
|
||||||
|
- ClassifySiteRegex(Exporter.Name, "^([^-]+)-", "$1")
|
||||||
|
- ClassifyRegion("europe")
|
||||||
|
- ClassifyTenant("acme")
|
||||||
|
- ClassifyRole("edge")
|
||||||
|
interface-classifiers:
|
||||||
|
# This is an example. This must be customized depending on the
|
||||||
|
# descriptions of your interfaces. In the following, we assume
|
||||||
|
# external interfaces are named "Transit: Cogent" Or "IX:
|
||||||
|
# FranceIX".
|
||||||
|
- |
|
||||||
|
ClassifyConnectivityRegex(Interface.Description, "^(?i)(transit|pni|ppni|ix):? ", "$1") &&
|
||||||
|
ClassifyProviderRegex(Interface.Description, "^\\S+?\\s(\\S+)", "$1") &&
|
||||||
|
ClassifyExternal()
|
||||||
|
- ClassifyInternal()
|
||||||
@@ -32,12 +32,12 @@ Currently, only a pre-built binary for Linux x86-64 is provided.
|
|||||||
|
|
||||||
## Compilation from source
|
## Compilation from source
|
||||||
|
|
||||||
You need a proper installation of [Go](https://go.dev/doc/install) (1.24+), and
|
You need a proper installation of [Go](https://go.dev/doc/install) (1.24+),
|
||||||
[NodeJS](https://nodejs.org/en/download/) (20+) with NPM (6+). For example, on
|
[NodeJS](https://nodejs.org/en/download/) (20+) with NPM (6+), and
|
||||||
Debian:
|
[protoc](https://protobuf.dev/installation/). For example, on Debian:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
# apt install golang nodejs npm
|
# apt install golang nodejs npm protobuf-compiler
|
||||||
# go version
|
# go version
|
||||||
go version go1.24.1 linux/amd64
|
go version go1.24.1 linux/amd64
|
||||||
# node --version
|
# node --version
|
||||||
|
|||||||
@@ -50,7 +50,6 @@ process flows. The following endpoints are exposed by the HTTP
|
|||||||
component embedded into the service:
|
component embedded into the service:
|
||||||
|
|
||||||
- `/api/v0/inlet/flows`: stream the received flows
|
- `/api/v0/inlet/flows`: stream the received flows
|
||||||
- `/api/v0/inlet/schemas.proto`: protobuf schema
|
|
||||||
|
|
||||||
## Orchestrator service
|
## Orchestrator service
|
||||||
|
|
||||||
|
|||||||
@@ -13,6 +13,15 @@ identified with a specific icon:
|
|||||||
|
|
||||||
## Unreleased
|
## Unreleased
|
||||||
|
|
||||||
|
This release introduce a new component: the outlet. Previously, ClickHouse was
|
||||||
|
fetching data directly from Kafka. However, this required to push the protobuf
|
||||||
|
schema using an out-of-band method. This makes cloud deployments more complex.
|
||||||
|
The inlet now pushes incoming raw flows to Kafka without decoding them. The
|
||||||
|
outlet takes them, decode them, enriches them, and push them to ClickHouse. This
|
||||||
|
also reduces the likeliness to lose packets. This change should be transparent
|
||||||
|
on most setups but you are encouraged to review the new proposed configuration
|
||||||
|
in the [quickstart tarball][] and update your own configuration.
|
||||||
|
|
||||||
As it seems a good time as any, Zookeeper is removed from the `docker compose`
|
As it seems a good time as any, Zookeeper is removed from the `docker compose`
|
||||||
setup (except when using ClickHouse cluster mode). Kafka is now using the KRaft
|
setup (except when using ClickHouse cluster mode). Kafka is now using the KRaft
|
||||||
mode. You can follow the [migration documentation][], but is easier to loose a
|
mode. You can follow the [migration documentation][], but is easier to loose a
|
||||||
@@ -25,6 +34,8 @@ bit of data and reset the Kafka container:
|
|||||||
# docker compose up -d
|
# docker compose up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- 💥 *outlet*: new service
|
||||||
|
- 💥 *inlet*: flow rate limiting feature has been removed
|
||||||
- 💥 *docker*: switch Kafka to KRaft mode
|
- 💥 *docker*: switch Kafka to KRaft mode
|
||||||
- 🩹 *console*: fix deletion of saved filters
|
- 🩹 *console*: fix deletion of saved filters
|
||||||
- 🩹 *console*: fix intermittent failure when requesting previous period
|
- 🩹 *console*: fix intermittent failure when requesting previous period
|
||||||
@@ -37,6 +48,7 @@ bit of data and reset the Kafka container:
|
|||||||
- 🌱 *inlet*: improve performance of classifiers
|
- 🌱 *inlet*: improve performance of classifiers
|
||||||
|
|
||||||
[migration documentation]: https://github.com/bitnami/containers/blob/main/bitnami/kafka/README.md#migrating-from-zookeeper-mode-to-kraft-mode
|
[migration documentation]: https://github.com/bitnami/containers/blob/main/bitnami/kafka/README.md#migrating-from-zookeeper-mode-to-kraft-mode
|
||||||
|
[quickstart tarball]: https://github.com/akvorado/akvorado/releases/latest/download/docker-compose-quickstart.tar.gz
|
||||||
|
|
||||||
## 1.11.5 - 2025-05-11
|
## 1.11.5 - 2025-05-11
|
||||||
|
|
||||||
|
|||||||
@@ -11,15 +11,18 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"akvorado/common/helpers"
|
"akvorado/common/helpers"
|
||||||
|
"akvorado/common/pb"
|
||||||
"akvorado/common/reporter"
|
"akvorado/common/reporter"
|
||||||
"akvorado/common/schema"
|
"akvorado/common/schema"
|
||||||
"akvorado/inlet/flow/decoder"
|
"akvorado/outlet/flow/decoder"
|
||||||
"akvorado/inlet/flow/decoder/netflow"
|
"akvorado/outlet/flow/decoder/netflow"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGetNetflowData(t *testing.T) {
|
func TestGetNetflowData(t *testing.T) {
|
||||||
r := reporter.NewMock(t)
|
r := reporter.NewMock(t)
|
||||||
nfdecoder := netflow.New(r, decoder.Dependencies{Schema: schema.NewMock(t)}, decoder.Option{TimestampSource: decoder.TimestampSourceUDP})
|
sch := schema.NewMock(t)
|
||||||
|
bf := sch.NewFlowMessage()
|
||||||
|
nfdecoder := netflow.New(r, decoder.Dependencies{Schema: sch})
|
||||||
|
|
||||||
ch := getNetflowTemplates(
|
ch := getNetflowTemplates(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
@@ -27,11 +30,22 @@ func TestGetNetflowData(t *testing.T) {
|
|||||||
30000,
|
30000,
|
||||||
time.Date(2022, 3, 15, 14, 33, 0, 0, time.UTC),
|
time.Date(2022, 3, 15, 14, 33, 0, 0, time.UTC),
|
||||||
time.Date(2022, 3, 15, 15, 33, 0, 0, time.UTC))
|
time.Date(2022, 3, 15, 15, 33, 0, 0, time.UTC))
|
||||||
got := []interface{}{}
|
got := []*schema.FlowMessage{}
|
||||||
|
finalize := func() {
|
||||||
|
bf.TimeReceived = 0
|
||||||
|
// Keep a copy of the current flow message
|
||||||
|
clone := *bf
|
||||||
|
got = append(got, &clone)
|
||||||
|
// And clear the flow message
|
||||||
|
bf.Clear()
|
||||||
|
}
|
||||||
|
|
||||||
for payload := range ch {
|
for payload := range ch {
|
||||||
got = append(got, nfdecoder.Decode(decoder.RawFlow{
|
if _, err := nfdecoder.Decode(decoder.RawFlow{
|
||||||
Payload: payload, Source: net.ParseIP("127.0.0.1"),
|
Payload: payload, Source: netip.MustParseAddr("::ffff:127.0.0.1"),
|
||||||
}))
|
}, decoder.Option{TimestampSource: pb.RawFlow_TS_INPUT}, bf, finalize); err != nil {
|
||||||
|
t.Fatalf("Decode() error:\n%+v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ch = getNetflowData(
|
ch = getNetflowData(
|
||||||
@@ -97,90 +111,76 @@ func TestGetNetflowData(t *testing.T) {
|
|||||||
time.Date(2022, 3, 15, 14, 33, 0, 0, time.UTC),
|
time.Date(2022, 3, 15, 14, 33, 0, 0, time.UTC),
|
||||||
time.Date(2022, 3, 15, 16, 33, 0, 0, time.UTC))
|
time.Date(2022, 3, 15, 16, 33, 0, 0, time.UTC))
|
||||||
for payload := range ch {
|
for payload := range ch {
|
||||||
got = append(got, nfdecoder.Decode(decoder.RawFlow{
|
if _, err := nfdecoder.Decode(decoder.RawFlow{
|
||||||
Payload: payload, Source: net.ParseIP("127.0.0.1"),
|
Payload: payload, Source: netip.MustParseAddr("::ffff:127.0.0.1"),
|
||||||
}))
|
}, decoder.Option{TimestampSource: pb.RawFlow_TS_INPUT}, bf, finalize); err != nil {
|
||||||
|
t.Fatalf("Decode() error:\n%+v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
expected := []interface{}{
|
expected := []*schema.FlowMessage{
|
||||||
[]interface{}{}, // templates
|
{
|
||||||
[]interface{}{
|
SamplingRate: 30000,
|
||||||
&schema.FlowMessage{
|
ExporterAddress: netip.MustParseAddr("::ffff:127.0.0.1"),
|
||||||
SamplingRate: 30000,
|
SrcAddr: netip.MustParseAddr("::ffff:192.0.2.206"),
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:127.0.0.1"),
|
DstAddr: netip.MustParseAddr("::ffff:203.0.113.165"),
|
||||||
SrcAddr: netip.MustParseAddr("::ffff:192.0.2.206"),
|
InIf: 10,
|
||||||
DstAddr: netip.MustParseAddr("::ffff:203.0.113.165"),
|
OutIf: 20,
|
||||||
InIf: 10,
|
SrcAS: 65201,
|
||||||
OutIf: 20,
|
DstAS: 65202,
|
||||||
SrcAS: 65201,
|
SrcNetMask: 24,
|
||||||
DstAS: 65202,
|
DstNetMask: 23,
|
||||||
SrcNetMask: 24,
|
OtherColumns: map[schema.ColumnKey]any{
|
||||||
DstNetMask: 23,
|
schema.ColumnBytes: 1500,
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
schema.ColumnPackets: 1,
|
||||||
schema.ColumnBytes: 1500,
|
schema.ColumnEType: helpers.ETypeIPv4,
|
||||||
schema.ColumnPackets: 1,
|
schema.ColumnProto: 6,
|
||||||
schema.ColumnEType: helpers.ETypeIPv4,
|
schema.ColumnSrcPort: 443,
|
||||||
schema.ColumnProto: 6,
|
schema.ColumnDstPort: 34974,
|
||||||
schema.ColumnSrcPort: 443,
|
schema.ColumnForwardingStatus: 64,
|
||||||
schema.ColumnDstPort: 34974,
|
|
||||||
schema.ColumnForwardingStatus: 64,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
&schema.FlowMessage{
|
|
||||||
SamplingRate: 30000,
|
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:127.0.0.1"),
|
|
||||||
SrcAddr: netip.MustParseAddr("::ffff:192.0.2.236"),
|
|
||||||
DstAddr: netip.MustParseAddr("::ffff:203.0.113.67"),
|
|
||||||
InIf: 10,
|
|
||||||
OutIf: 20,
|
|
||||||
SrcAS: 65201,
|
|
||||||
DstAS: 65202,
|
|
||||||
SrcNetMask: 24,
|
|
||||||
DstNetMask: 24,
|
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
|
||||||
schema.ColumnBytes: 1339,
|
|
||||||
schema.ColumnPackets: 1,
|
|
||||||
schema.ColumnEType: helpers.ETypeIPv4,
|
|
||||||
schema.ColumnProto: 6,
|
|
||||||
schema.ColumnSrcPort: 443,
|
|
||||||
schema.ColumnDstPort: 33199,
|
|
||||||
schema.ColumnForwardingStatus: 64,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
[]interface{}{
|
{
|
||||||
&schema.FlowMessage{
|
SamplingRate: 30000,
|
||||||
SamplingRate: 30000,
|
ExporterAddress: netip.MustParseAddr("::ffff:127.0.0.1"),
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:127.0.0.1"),
|
SrcAddr: netip.MustParseAddr("::ffff:192.0.2.236"),
|
||||||
SrcAddr: netip.MustParseAddr("2001:db8::1"),
|
DstAddr: netip.MustParseAddr("::ffff:203.0.113.67"),
|
||||||
DstAddr: netip.MustParseAddr("2001:db8:2:0:cea5:d643:ec43:3772"),
|
InIf: 10,
|
||||||
InIf: 20,
|
OutIf: 20,
|
||||||
OutIf: 10,
|
SrcAS: 65201,
|
||||||
SrcAS: 65201,
|
DstAS: 65202,
|
||||||
DstAS: 65202,
|
SrcNetMask: 24,
|
||||||
SrcNetMask: 48,
|
DstNetMask: 24,
|
||||||
DstNetMask: 48,
|
OtherColumns: map[schema.ColumnKey]any{
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
schema.ColumnBytes: 1339,
|
||||||
schema.ColumnBytes: 1300,
|
schema.ColumnPackets: 1,
|
||||||
schema.ColumnPackets: 1,
|
schema.ColumnEType: helpers.ETypeIPv4,
|
||||||
schema.ColumnEType: helpers.ETypeIPv6,
|
schema.ColumnProto: 6,
|
||||||
schema.ColumnProto: 6,
|
schema.ColumnSrcPort: 443,
|
||||||
schema.ColumnSrcPort: 33179,
|
schema.ColumnDstPort: 33199,
|
||||||
schema.ColumnDstPort: 443,
|
schema.ColumnForwardingStatus: 64,
|
||||||
schema.ColumnForwardingStatus: 64,
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
SamplingRate: 30000,
|
||||||
|
ExporterAddress: netip.MustParseAddr("::ffff:127.0.0.1"),
|
||||||
|
SrcAddr: netip.MustParseAddr("2001:db8::1"),
|
||||||
|
DstAddr: netip.MustParseAddr("2001:db8:2:0:cea5:d643:ec43:3772"),
|
||||||
|
InIf: 20,
|
||||||
|
OutIf: 10,
|
||||||
|
SrcAS: 65201,
|
||||||
|
DstAS: 65202,
|
||||||
|
SrcNetMask: 48,
|
||||||
|
DstNetMask: 48,
|
||||||
|
OtherColumns: map[schema.ColumnKey]any{
|
||||||
|
schema.ColumnBytes: 1300,
|
||||||
|
schema.ColumnPackets: 1,
|
||||||
|
schema.ColumnEType: helpers.ETypeIPv6,
|
||||||
|
schema.ColumnProto: 6,
|
||||||
|
schema.ColumnSrcPort: 33179,
|
||||||
|
schema.ColumnDstPort: 443,
|
||||||
|
schema.ColumnForwardingStatus: 64,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
|
||||||
for idx1 := range got {
|
|
||||||
if got[idx1] == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
switch g := got[idx1].(type) {
|
|
||||||
case []*schema.FlowMessage:
|
|
||||||
for idx2 := range g {
|
|
||||||
g[idx2].TimeReceived = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if diff := helpers.Diff(got, expected); diff != "" {
|
if diff := helpers.Diff(got, expected); diff != "" {
|
||||||
|
|||||||
@@ -9,6 +9,8 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
akvorado-inlet:
|
akvorado-inlet:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
akvorado-outlet:
|
||||||
|
condition: service_healthy
|
||||||
akvorado-exporter1:
|
akvorado-exporter1:
|
||||||
<<: *exporter
|
<<: *exporter
|
||||||
command: demo-exporter http://akvorado-orchestrator:8080#1
|
command: demo-exporter http://akvorado-orchestrator:8080#1
|
||||||
|
|||||||
@@ -105,7 +105,6 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- 2055:2055/udp
|
- 2055:2055/udp
|
||||||
- 6343:6343/udp
|
- 6343:6343/udp
|
||||||
- 10179:10179/tcp
|
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
depends_on:
|
depends_on:
|
||||||
akvorado-orchestrator:
|
akvorado-orchestrator:
|
||||||
@@ -115,8 +114,6 @@ services:
|
|||||||
command: inlet http://akvorado-orchestrator:8080
|
command: inlet http://akvorado-orchestrator:8080
|
||||||
volumes:
|
volumes:
|
||||||
- akvorado-run:/run/akvorado
|
- akvorado-run:/run/akvorado
|
||||||
environment:
|
|
||||||
- AKVORADO_CFG_INLET_METADATA_CACHEPERSISTFILE=/run/akvorado/metadata.cache
|
|
||||||
labels:
|
labels:
|
||||||
- traefik.enable=true
|
- traefik.enable=true
|
||||||
# Disable access logging of /api/v0/inlet/metrics
|
# Disable access logging of /api/v0/inlet/metrics
|
||||||
@@ -129,6 +126,36 @@ services:
|
|||||||
- traefik.http.routers.akvorado-inlet.rule=PathPrefix(`/api/v0/inlet`)
|
- traefik.http.routers.akvorado-inlet.rule=PathPrefix(`/api/v0/inlet`)
|
||||||
- traefik.http.services.akvorado-inlet.loadbalancer.server.port=8080
|
- traefik.http.services.akvorado-inlet.loadbalancer.server.port=8080
|
||||||
- akvorado.conntrack.fix=true
|
- akvorado.conntrack.fix=true
|
||||||
|
akvorado-outlet:
|
||||||
|
extends:
|
||||||
|
file: versions.yml
|
||||||
|
service: akvorado
|
||||||
|
ports:
|
||||||
|
- 10179:10179/tcp
|
||||||
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
akvorado-orchestrator:
|
||||||
|
condition: service_healthy
|
||||||
|
kafka:
|
||||||
|
condition: service_healthy
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
command: outlet http://akvorado-orchestrator:8080
|
||||||
|
volumes:
|
||||||
|
- akvorado-run:/run/akvorado
|
||||||
|
environment:
|
||||||
|
- AKVORADO_CFG_OUTLET_METADATA_CACHEPERSISTFILE=/run/akvorado/metadata.cache
|
||||||
|
labels:
|
||||||
|
- traefik.enable=true
|
||||||
|
# Disable access logging of /api/v0/outlet/metrics
|
||||||
|
- traefik.http.routers.akvorado-outlet-metrics.entrypoints=private
|
||||||
|
- traefik.http.routers.akvorado-outlet-metrics.rule=PathPrefix(`/api/v0/outlet/metrics`)
|
||||||
|
- traefik.http.routers.akvorado-outlet-metrics.service=akvorado-outlet
|
||||||
|
- traefik.http.routers.akvorado-outlet-metrics.observability.accesslogs=false
|
||||||
|
# Everything else is exposed to private entrypoing in /api/v0/outlet
|
||||||
|
- traefik.http.routers.akvorado-outlet.entrypoints=private
|
||||||
|
- traefik.http.routers.akvorado-outlet.rule=PathPrefix(`/api/v0/outlet`)
|
||||||
|
- traefik.http.services.akvorado-outlet.loadbalancer.server.port=8080
|
||||||
akvorado-conntrack-fixer:
|
akvorado-conntrack-fixer:
|
||||||
extends:
|
extends:
|
||||||
file: versions.yml
|
file: versions.yml
|
||||||
|
|||||||
@@ -60,7 +60,7 @@ scrape_configs:
|
|||||||
- com.docker.compose.project=akvorado
|
- com.docker.compose.project=akvorado
|
||||||
relabel_configs:
|
relabel_configs:
|
||||||
- source_labels: [__meta_docker_container_label_com_docker_compose_service]
|
- source_labels: [__meta_docker_container_label_com_docker_compose_service]
|
||||||
regex: akvorado-(inlet|orchestrator|console)
|
regex: akvorado-(inlet|outlet|orchestrator|console)
|
||||||
action: keep
|
action: keep
|
||||||
- source_labels: [__meta_docker_port_private]
|
- source_labels: [__meta_docker_port_private]
|
||||||
regex: 8080
|
regex: 8080
|
||||||
|
|||||||
@@ -55,6 +55,7 @@
|
|||||||
find . -print0 | xargs -0 touch -d @0
|
find . -print0 | xargs -0 touch -d @0
|
||||||
|
|
||||||
make all \
|
make all \
|
||||||
|
PROTOC=${pkgs.protobuf}/bin/protoc \
|
||||||
ASNS_URL=${asn2org}/asns.csv \
|
ASNS_URL=${asn2org}/asns.csv \
|
||||||
SERVICES_URL=${ianaServiceNames}
|
SERVICES_URL=${ianaServiceNames}
|
||||||
'';
|
'';
|
||||||
@@ -110,6 +111,7 @@
|
|||||||
nodejs
|
nodejs
|
||||||
pkgs.git
|
pkgs.git
|
||||||
pkgs.curl
|
pkgs.curl
|
||||||
|
pkgs.protobuf
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|||||||
1
go.mod
1
go.mod
@@ -218,6 +218,7 @@ tool (
|
|||||||
github.com/mna/pigeon
|
github.com/mna/pigeon
|
||||||
go.uber.org/mock/mockgen
|
go.uber.org/mock/mockgen
|
||||||
golang.org/x/tools/cmd/goimports
|
golang.org/x/tools/cmd/goimports
|
||||||
|
google.golang.org/protobuf/cmd/protoc-gen-go
|
||||||
gotest.tools/gotestsum
|
gotest.tools/gotestsum
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -1,167 +0,0 @@
|
|||||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
|
||||||
// SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
|
|
||||||
// Package core plumbs all the other components together.
|
|
||||||
package core
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"gopkg.in/tomb.v2"
|
|
||||||
|
|
||||||
"akvorado/common/daemon"
|
|
||||||
"akvorado/common/helpers/cache"
|
|
||||||
"akvorado/common/httpserver"
|
|
||||||
"akvorado/common/reporter"
|
|
||||||
"akvorado/common/schema"
|
|
||||||
"akvorado/inlet/flow"
|
|
||||||
"akvorado/inlet/kafka"
|
|
||||||
"akvorado/inlet/metadata"
|
|
||||||
"akvorado/inlet/routing"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Component represents the HTTP compomenent.
|
|
||||||
type Component struct {
|
|
||||||
r *reporter.Reporter
|
|
||||||
d *Dependencies
|
|
||||||
t tomb.Tomb
|
|
||||||
config Configuration
|
|
||||||
|
|
||||||
metrics metrics
|
|
||||||
|
|
||||||
healthy chan reporter.ChannelHealthcheckFunc
|
|
||||||
httpFlowClients uint32 // for dumping flows
|
|
||||||
httpFlowChannel chan *schema.FlowMessage
|
|
||||||
httpFlowFlushDelay time.Duration
|
|
||||||
|
|
||||||
classifierExporterCache *cache.Cache[exporterInfo, exporterClassification]
|
|
||||||
classifierInterfaceCache *cache.Cache[exporterAndInterfaceInfo, interfaceClassification]
|
|
||||||
classifierErrLogger reporter.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dependencies define the dependencies of the HTTP component.
|
|
||||||
type Dependencies struct {
|
|
||||||
Daemon daemon.Component
|
|
||||||
Flow *flow.Component
|
|
||||||
Metadata *metadata.Component
|
|
||||||
Routing *routing.Component
|
|
||||||
Kafka *kafka.Component
|
|
||||||
HTTP *httpserver.Component
|
|
||||||
Schema *schema.Component
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new core component.
|
|
||||||
func New(r *reporter.Reporter, configuration Configuration, dependencies Dependencies) (*Component, error) {
|
|
||||||
c := Component{
|
|
||||||
r: r,
|
|
||||||
d: &dependencies,
|
|
||||||
config: configuration,
|
|
||||||
|
|
||||||
healthy: make(chan reporter.ChannelHealthcheckFunc),
|
|
||||||
httpFlowClients: 0,
|
|
||||||
httpFlowChannel: make(chan *schema.FlowMessage, 10),
|
|
||||||
httpFlowFlushDelay: time.Second,
|
|
||||||
|
|
||||||
classifierExporterCache: cache.New[exporterInfo, exporterClassification](),
|
|
||||||
classifierInterfaceCache: cache.New[exporterAndInterfaceInfo, interfaceClassification](),
|
|
||||||
classifierErrLogger: r.Sample(reporter.BurstSampler(10*time.Second, 3)),
|
|
||||||
}
|
|
||||||
c.d.Daemon.Track(&c.t, "inlet/core")
|
|
||||||
c.initMetrics()
|
|
||||||
return &c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start starts the core component.
|
|
||||||
func (c *Component) Start() error {
|
|
||||||
c.r.Info().Msg("starting core component")
|
|
||||||
for i := range c.config.Workers {
|
|
||||||
workerID := i
|
|
||||||
c.t.Go(func() error {
|
|
||||||
return c.runWorker(workerID)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Classifier cache expiration
|
|
||||||
c.t.Go(func() error {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-c.t.Dying():
|
|
||||||
return nil
|
|
||||||
case <-time.After(c.config.ClassifierCacheDuration):
|
|
||||||
before := time.Now().Add(-c.config.ClassifierCacheDuration)
|
|
||||||
c.classifierExporterCache.DeleteLastAccessedBefore(before)
|
|
||||||
c.classifierInterfaceCache.DeleteLastAccessedBefore(before)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
c.r.RegisterHealthcheck("core", c.channelHealthcheck())
|
|
||||||
c.d.HTTP.GinRouter.GET("/api/v0/inlet/flows", c.FlowsHTTPHandler)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// runWorker starts a worker.
|
|
||||||
func (c *Component) runWorker(workerID int) error {
|
|
||||||
c.r.Debug().Int("worker", workerID).Msg("starting core worker")
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-c.t.Dying():
|
|
||||||
c.r.Debug().Int("worker", workerID).Msg("stopping core worker")
|
|
||||||
return nil
|
|
||||||
case cb, ok := <-c.healthy:
|
|
||||||
if ok {
|
|
||||||
cb(reporter.HealthcheckOK, fmt.Sprintf("worker %d ok", workerID))
|
|
||||||
}
|
|
||||||
case flow := <-c.d.Flow.Flows():
|
|
||||||
if flow == nil {
|
|
||||||
c.r.Info().Int("worker", workerID).Msg("no more flow available, stopping")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
exporter := flow.ExporterAddress.Unmap().String()
|
|
||||||
c.metrics.flowsReceived.WithLabelValues(exporter).Inc()
|
|
||||||
|
|
||||||
// Enrichment
|
|
||||||
ip := flow.ExporterAddress
|
|
||||||
if skip := c.enrichFlow(ip, exporter, flow); skip {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serialize flow to Protobuf
|
|
||||||
buf := c.d.Schema.ProtobufMarshal(flow)
|
|
||||||
|
|
||||||
// Forward to Kafka. This could block and buf is now owned by the
|
|
||||||
// Kafka subsystem!
|
|
||||||
c.metrics.flowsForwarded.WithLabelValues(exporter).Inc()
|
|
||||||
c.d.Kafka.Send(exporter, buf)
|
|
||||||
|
|
||||||
// If we have HTTP clients, send to them too
|
|
||||||
if atomic.LoadUint32(&c.httpFlowClients) > 0 {
|
|
||||||
select {
|
|
||||||
case c.httpFlowChannel <- flow: // OK
|
|
||||||
default: // Overflow, best effort and ignore
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop stops the core component.
|
|
||||||
func (c *Component) Stop() error {
|
|
||||||
defer func() {
|
|
||||||
close(c.httpFlowChannel)
|
|
||||||
close(c.healthy)
|
|
||||||
c.r.Info().Msg("core component stopped")
|
|
||||||
}()
|
|
||||||
c.r.Info().Msg("stopping core component")
|
|
||||||
c.t.Kill(nil)
|
|
||||||
return c.t.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Component) channelHealthcheck() reporter.HealthcheckFunc {
|
|
||||||
return reporter.ChannelHealthcheck(c.t.Context(nil), c.healthy)
|
|
||||||
}
|
|
||||||
@@ -1,363 +0,0 @@
|
|||||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
|
||||||
// SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
|
|
||||||
package core
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/netip"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/IBM/sarama"
|
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
|
|
||||||
"akvorado/common/daemon"
|
|
||||||
"akvorado/common/helpers"
|
|
||||||
"akvorado/common/httpserver"
|
|
||||||
"akvorado/common/reporter"
|
|
||||||
"akvorado/common/schema"
|
|
||||||
"akvorado/inlet/flow"
|
|
||||||
"akvorado/inlet/kafka"
|
|
||||||
"akvorado/inlet/metadata"
|
|
||||||
"akvorado/inlet/routing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCore(t *testing.T) {
|
|
||||||
r := reporter.NewMock(t)
|
|
||||||
|
|
||||||
// Prepare all components.
|
|
||||||
daemonComponent := daemon.NewMock(t)
|
|
||||||
metadataComponent := metadata.NewMock(t, r, metadata.DefaultConfiguration(),
|
|
||||||
metadata.Dependencies{Daemon: daemonComponent})
|
|
||||||
flowComponent := flow.NewMock(t, r, flow.DefaultConfiguration())
|
|
||||||
kafkaComponent, kafkaProducer := kafka.NewMock(t, r, kafka.DefaultConfiguration())
|
|
||||||
httpComponent := httpserver.NewMock(t, r)
|
|
||||||
routingComponent := routing.NewMock(t, r)
|
|
||||||
routingComponent.PopulateRIB(t)
|
|
||||||
|
|
||||||
// Instantiate and start core
|
|
||||||
sch := schema.NewMock(t)
|
|
||||||
c, err := New(r, DefaultConfiguration(), Dependencies{
|
|
||||||
Daemon: daemonComponent,
|
|
||||||
Flow: flowComponent,
|
|
||||||
Metadata: metadataComponent,
|
|
||||||
Kafka: kafkaComponent,
|
|
||||||
HTTP: httpComponent,
|
|
||||||
Routing: routingComponent,
|
|
||||||
Schema: sch,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("New() error:\n%+v", err)
|
|
||||||
}
|
|
||||||
helpers.StartStop(t, c)
|
|
||||||
|
|
||||||
flowMessage := func(exporter string, in, out uint32) *schema.FlowMessage {
|
|
||||||
msg := &schema.FlowMessage{
|
|
||||||
TimeReceived: 200,
|
|
||||||
SamplingRate: 1000,
|
|
||||||
ExporterAddress: netip.MustParseAddr(exporter),
|
|
||||||
InIf: in,
|
|
||||||
OutIf: out,
|
|
||||||
SrcAddr: netip.MustParseAddr("67.43.156.77"),
|
|
||||||
DstAddr: netip.MustParseAddr("2.125.160.216"),
|
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
|
||||||
schema.ColumnBytes: 6765,
|
|
||||||
schema.ColumnPackets: 4,
|
|
||||||
schema.ColumnEType: 0x800,
|
|
||||||
schema.ColumnProto: 6,
|
|
||||||
schema.ColumnSrcPort: 8534,
|
|
||||||
schema.ColumnDstPort: 80,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for k, v := range msg.ProtobufDebug {
|
|
||||||
vi := v.(int)
|
|
||||||
sch.ProtobufAppendVarint(msg, k, uint64(vi))
|
|
||||||
}
|
|
||||||
return msg
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedFlowMessage := func(exporter string, in, out uint32) *schema.FlowMessage {
|
|
||||||
expected := flowMessage(exporter, in, out)
|
|
||||||
expected.SrcAS = 0 // no geoip enrich anymore
|
|
||||||
expected.DstAS = 0 // no geoip enrich anymore
|
|
||||||
expected.InIf = 0 // not serialized
|
|
||||||
expected.OutIf = 0 // not serialized
|
|
||||||
expected.ExporterAddress = netip.AddrFrom16(expected.ExporterAddress.As16())
|
|
||||||
expected.SrcAddr = netip.AddrFrom16(expected.SrcAddr.As16())
|
|
||||||
expected.DstAddr = netip.AddrFrom16(expected.DstAddr.As16())
|
|
||||||
expected.ProtobufDebug[schema.ColumnInIfName] = fmt.Sprintf("Gi0/0/%d", in)
|
|
||||||
expected.ProtobufDebug[schema.ColumnOutIfName] = fmt.Sprintf("Gi0/0/%d", out)
|
|
||||||
expected.ProtobufDebug[schema.ColumnInIfDescription] = fmt.Sprintf("Interface %d", in)
|
|
||||||
expected.ProtobufDebug[schema.ColumnOutIfDescription] = fmt.Sprintf("Interface %d", out)
|
|
||||||
expected.ProtobufDebug[schema.ColumnInIfSpeed] = 1000
|
|
||||||
expected.ProtobufDebug[schema.ColumnOutIfSpeed] = 1000
|
|
||||||
expected.ProtobufDebug[schema.ColumnExporterName] = strings.ReplaceAll(exporter, ".", "_")
|
|
||||||
return expected
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("kafka", func(t *testing.T) {
|
|
||||||
// Inject several messages with a cache miss from the SNMP
|
|
||||||
// component for each of them. No message sent to Kafka.
|
|
||||||
flowComponent.Inject(flowMessage("192.0.2.142", 434, 677))
|
|
||||||
flowComponent.Inject(flowMessage("192.0.2.143", 434, 677))
|
|
||||||
flowComponent.Inject(flowMessage("192.0.2.143", 437, 677))
|
|
||||||
flowComponent.Inject(flowMessage("192.0.2.143", 434, 679))
|
|
||||||
|
|
||||||
time.Sleep(20 * time.Millisecond)
|
|
||||||
gotMetrics := r.GetMetrics("akvorado_inlet_core_", "-flows_processing_")
|
|
||||||
expectedMetrics := map[string]string{
|
|
||||||
`classifier_exporter_cache_size_items`: "0",
|
|
||||||
`classifier_interface_cache_size_items`: "0",
|
|
||||||
`flows_errors_total{error="SNMP cache miss",exporter="192.0.2.142"}`: "1",
|
|
||||||
`flows_errors_total{error="SNMP cache miss",exporter="192.0.2.143"}`: "3",
|
|
||||||
`received_flows_total{exporter="192.0.2.142"}`: "1",
|
|
||||||
`received_flows_total{exporter="192.0.2.143"}`: "3",
|
|
||||||
`flows_http_clients`: "0",
|
|
||||||
}
|
|
||||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
|
||||||
t.Fatalf("Metrics (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inject again the messages, this time, we will get a cache hit!
|
|
||||||
kafkaProducer.ExpectInputAndSucceed()
|
|
||||||
flowComponent.Inject(flowMessage("192.0.2.142", 434, 677))
|
|
||||||
kafkaProducer.ExpectInputAndSucceed()
|
|
||||||
flowComponent.Inject(flowMessage("192.0.2.143", 437, 679))
|
|
||||||
|
|
||||||
time.Sleep(20 * time.Millisecond)
|
|
||||||
gotMetrics = r.GetMetrics("akvorado_inlet_core_", "classifier_", "-flows_processing_", "flows_", "received_", "forwarded_")
|
|
||||||
expectedMetrics = map[string]string{
|
|
||||||
`classifier_exporter_cache_size_items`: "0",
|
|
||||||
`classifier_interface_cache_size_items`: "0",
|
|
||||||
`flows_errors_total{error="SNMP cache miss",exporter="192.0.2.142"}`: "1",
|
|
||||||
`flows_errors_total{error="SNMP cache miss",exporter="192.0.2.143"}`: "3",
|
|
||||||
`received_flows_total{exporter="192.0.2.142"}`: "2",
|
|
||||||
`received_flows_total{exporter="192.0.2.143"}`: "4",
|
|
||||||
`forwarded_flows_total{exporter="192.0.2.142"}`: "1",
|
|
||||||
`forwarded_flows_total{exporter="192.0.2.143"}`: "1",
|
|
||||||
`flows_http_clients`: "0",
|
|
||||||
}
|
|
||||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
|
||||||
t.Fatalf("Metrics (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now, check we get the message we expect
|
|
||||||
input := flowMessage("192.0.2.142", 434, 677)
|
|
||||||
received := make(chan bool)
|
|
||||||
kafkaProducer.ExpectInputWithMessageCheckerFunctionAndSucceed(func(msg *sarama.ProducerMessage) error {
|
|
||||||
defer close(received)
|
|
||||||
expectedTopic := fmt.Sprintf("flows-%s", sch.ProtobufMessageHash())
|
|
||||||
if msg.Topic != expectedTopic {
|
|
||||||
t.Errorf("Kafka message topic (-got, +want):\n-%s\n+%s", msg.Topic, expectedTopic)
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err := msg.Value.Encode()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Kafka message encoding error:\n%+v", err)
|
|
||||||
}
|
|
||||||
got := sch.ProtobufDecode(t, b)
|
|
||||||
expected := expectedFlowMessage("192.0.2.142", 434, 677)
|
|
||||||
if diff := helpers.Diff(&got, expected); diff != "" {
|
|
||||||
t.Errorf("Kafka message (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
flowComponent.Inject(input)
|
|
||||||
select {
|
|
||||||
case <-received:
|
|
||||||
case <-time.After(time.Second):
|
|
||||||
t.Fatal("Kafka message not received")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to inject a message with missing sampling rate
|
|
||||||
input = flowMessage("192.0.2.142", 434, 677)
|
|
||||||
input.SamplingRate = 0
|
|
||||||
flowComponent.Inject(input)
|
|
||||||
time.Sleep(20 * time.Millisecond)
|
|
||||||
gotMetrics = r.GetMetrics("akvorado_inlet_core_", "classifier_", "-flows_processing_", "flows_", "forwarded_", "received_")
|
|
||||||
expectedMetrics = map[string]string{
|
|
||||||
`classifier_exporter_cache_size_items`: "0",
|
|
||||||
`classifier_interface_cache_size_items`: "0",
|
|
||||||
`flows_errors_total{error="SNMP cache miss",exporter="192.0.2.142"}`: "1",
|
|
||||||
`flows_errors_total{error="SNMP cache miss",exporter="192.0.2.143"}`: "3",
|
|
||||||
`flows_errors_total{error="sampling rate missing",exporter="192.0.2.142"}`: "1",
|
|
||||||
`received_flows_total{exporter="192.0.2.142"}`: "4",
|
|
||||||
`received_flows_total{exporter="192.0.2.143"}`: "4",
|
|
||||||
`forwarded_flows_total{exporter="192.0.2.142"}`: "2",
|
|
||||||
`forwarded_flows_total{exporter="192.0.2.143"}`: "1",
|
|
||||||
`flows_http_clients`: "0",
|
|
||||||
}
|
|
||||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
|
||||||
t.Fatalf("Metrics (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Test the healthcheck function
|
|
||||||
t.Run("healthcheck", func(t *testing.T) {
|
|
||||||
got := r.RunHealthchecks(context.Background())
|
|
||||||
if diff := helpers.Diff(got.Details["core"], reporter.HealthcheckResult{
|
|
||||||
Status: reporter.HealthcheckOK,
|
|
||||||
Reason: "worker 0 ok",
|
|
||||||
}); diff != "" {
|
|
||||||
t.Fatalf("runHealthcheck() (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Test HTTP flow clients (JSON)
|
|
||||||
t.Run("http flows", func(t *testing.T) {
|
|
||||||
c.httpFlowFlushDelay = 20 * time.Millisecond
|
|
||||||
|
|
||||||
resp, err := http.Get(fmt.Sprintf("http://%s/api/v0/inlet/flows", c.d.HTTP.LocalAddr()))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("GET /api/v0/inlet/flows:\n%+v", err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode != 200 {
|
|
||||||
t.Fatalf("GET /api/v0/inlet/flows status code %d", resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metrics should tell we have a client
|
|
||||||
gotMetrics := r.GetMetrics("akvorado_inlet_core_", "flows_http_clients", "-flows_processing_")
|
|
||||||
expectedMetrics := map[string]string{
|
|
||||||
`flows_http_clients`: "1",
|
|
||||||
}
|
|
||||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
|
||||||
t.Fatalf("Metrics (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Produce some flows
|
|
||||||
for range 12 {
|
|
||||||
kafkaProducer.ExpectInputAndSucceed()
|
|
||||||
flowComponent.Inject(flowMessage("192.0.2.142", 434, 677))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode some of them
|
|
||||||
reader := bufio.NewReader(resp.Body)
|
|
||||||
decoder := json.NewDecoder(reader)
|
|
||||||
for range 10 {
|
|
||||||
var got gin.H
|
|
||||||
if err := decoder.Decode(&got); err != nil {
|
|
||||||
t.Fatalf("GET /api/v0/inlet/flows error while reading body:\n%+v", err)
|
|
||||||
}
|
|
||||||
expected := gin.H{
|
|
||||||
"TimeReceived": 200,
|
|
||||||
"SamplingRate": 1000,
|
|
||||||
"ExporterAddress": "192.0.2.142",
|
|
||||||
"SrcAddr": "67.43.156.77",
|
|
||||||
"DstAddr": "2.125.160.216",
|
|
||||||
"SrcAS": 0, // no geoip enrich anymore
|
|
||||||
"InIf": 434,
|
|
||||||
"OutIf": 677,
|
|
||||||
|
|
||||||
"NextHop": "",
|
|
||||||
"SrcNetMask": 0,
|
|
||||||
"DstNetMask": 0,
|
|
||||||
"SrcVlan": 0,
|
|
||||||
"DstVlan": 0,
|
|
||||||
"GotASPath": false,
|
|
||||||
"GotCommunities": false,
|
|
||||||
"DstAS": 0,
|
|
||||||
}
|
|
||||||
if diff := helpers.Diff(got, expected); diff != "" {
|
|
||||||
t.Fatalf("GET /api/v0/inlet/flows (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Test HTTP flow clients with a limit
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
t.Run("http flows with limit", func(t *testing.T) {
|
|
||||||
resp, err := http.Get(fmt.Sprintf("http://%s/api/v0/inlet/flows?limit=4", c.d.HTTP.LocalAddr()))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("GET /api/v0/inlet/flows:\n%+v", err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode != 200 {
|
|
||||||
t.Fatalf("GET /api/v0/inlet/flows status code %d", resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metrics should tell we have a client
|
|
||||||
gotMetrics := r.GetMetrics("akvorado_inlet_core_", "flows_http_clients")
|
|
||||||
expectedMetrics := map[string]string{
|
|
||||||
`flows_http_clients`: "1",
|
|
||||||
}
|
|
||||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
|
||||||
t.Fatalf("Metrics (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Produce some flows
|
|
||||||
for range 12 {
|
|
||||||
kafkaProducer.ExpectInputAndSucceed()
|
|
||||||
flowComponent.Inject(flowMessage("192.0.2.142", 434, 677))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check we got only 4
|
|
||||||
reader := bufio.NewReader(resp.Body)
|
|
||||||
count := 0
|
|
||||||
for {
|
|
||||||
_, err := reader.ReadString('\n')
|
|
||||||
if err == io.EOF {
|
|
||||||
t.Log("EOF")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("GET /api/v0/inlet/flows error while reading:\n%+v", err)
|
|
||||||
}
|
|
||||||
count++
|
|
||||||
if count > 4 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if count != 4 {
|
|
||||||
t.Fatalf("GET /api/v0/inlet/flows got less than 4 flows (%d)", count)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Test HTTP flow clients using protobuf
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
t.Run("http flows with protobuf", func(t *testing.T) {
|
|
||||||
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("http://%s/api/v0/inlet/flows?limit=1", c.d.HTTP.LocalAddr()), nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("http.NewRequest() error:\n%+v", err)
|
|
||||||
}
|
|
||||||
req.Header.Set("accept", "application/x-protobuf")
|
|
||||||
resp, err := http.DefaultClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("GET /api/v0/inlet/flows:\n%+v", err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode != 200 {
|
|
||||||
t.Fatalf("GET /api/v0/inlet/flows status code %d", resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Produce some flows
|
|
||||||
for range 12 {
|
|
||||||
kafkaProducer.ExpectInputAndSucceed()
|
|
||||||
flowComponent.Inject(flowMessage("192.0.2.142", 434, 677))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the resulting flow
|
|
||||||
reader := bufio.NewReader(resp.Body)
|
|
||||||
got, err := io.ReadAll(reader)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("GET /api/v0/inlet/flows error while reading:\n%+v", err)
|
|
||||||
}
|
|
||||||
t.Logf("got %v", got)
|
|
||||||
|
|
||||||
// Decode
|
|
||||||
sch := schema.NewMock(t)
|
|
||||||
decoded := sch.ProtobufDecode(t, got)
|
|
||||||
expected := expectedFlowMessage("192.0.2.142", 434, 677)
|
|
||||||
if diff := helpers.Diff(decoded, expected); diff != "" {
|
|
||||||
t.Errorf("HTTP message (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -4,10 +4,8 @@
|
|||||||
package flow
|
package flow
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"golang.org/x/time/rate"
|
|
||||||
|
|
||||||
"akvorado/common/helpers"
|
"akvorado/common/helpers"
|
||||||
"akvorado/inlet/flow/decoder"
|
"akvorado/common/pb"
|
||||||
"akvorado/inlet/flow/input"
|
"akvorado/inlet/flow/input"
|
||||||
"akvorado/inlet/flow/input/file"
|
"akvorado/inlet/flow/input/file"
|
||||||
"akvorado/inlet/flow/input/udp"
|
"akvorado/inlet/flow/input/udp"
|
||||||
@@ -17,21 +15,18 @@ import (
|
|||||||
type Configuration struct {
|
type Configuration struct {
|
||||||
// Inputs define a list of input modules to enable
|
// Inputs define a list of input modules to enable
|
||||||
Inputs []InputConfiguration `validate:"dive"`
|
Inputs []InputConfiguration `validate:"dive"`
|
||||||
// RateLimit defines a rate limit on the number of flows per
|
|
||||||
// second. The limit is per-exporter.
|
|
||||||
RateLimit rate.Limit `validate:"isdefault|min=100"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultConfiguration represents the default configuration for the flow component
|
// DefaultConfiguration represents the default configuration for the flow component
|
||||||
func DefaultConfiguration() Configuration {
|
func DefaultConfiguration() Configuration {
|
||||||
return Configuration{
|
return Configuration{
|
||||||
Inputs: []InputConfiguration{{
|
Inputs: []InputConfiguration{{
|
||||||
TimestampSource: decoder.TimestampSourceUDP,
|
TimestampSource: pb.RawFlow_TS_INPUT,
|
||||||
Decoder: "netflow",
|
Decoder: pb.RawFlow_DECODER_NETFLOW,
|
||||||
Config: udp.DefaultConfiguration(),
|
Config: udp.DefaultConfiguration(),
|
||||||
}, {
|
}, {
|
||||||
TimestampSource: decoder.TimestampSourceUDP,
|
TimestampSource: pb.RawFlow_TS_INPUT,
|
||||||
Decoder: "sflow",
|
Decoder: pb.RawFlow_DECODER_SFLOW,
|
||||||
Config: udp.DefaultConfiguration(),
|
Config: udp.DefaultConfiguration(),
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
@@ -40,12 +35,12 @@ func DefaultConfiguration() Configuration {
|
|||||||
// InputConfiguration represents the configuration for an input.
|
// InputConfiguration represents the configuration for an input.
|
||||||
type InputConfiguration struct {
|
type InputConfiguration struct {
|
||||||
// Decoder is the decoder to associate to the input.
|
// Decoder is the decoder to associate to the input.
|
||||||
Decoder string
|
Decoder pb.RawFlow_Decoder `validate:"required"`
|
||||||
// UseSrcAddrForExporterAddr replaces the exporter address by the transport
|
// UseSrcAddrForExporterAddr replaces the exporter address by the transport
|
||||||
// source address.
|
// source address.
|
||||||
UseSrcAddrForExporterAddr bool
|
UseSrcAddrForExporterAddr bool
|
||||||
// TimestampSource identify the source to use to timestamp the flows
|
// TimestampSource identify the source to use to timestamp the flows
|
||||||
TimestampSource decoder.TimestampSource
|
TimestampSource pb.RawFlow_TimestampSource
|
||||||
// Config is the actual configuration of the input.
|
// Config is the actual configuration of the input.
|
||||||
Config input.Configuration
|
Config input.Configuration
|
||||||
}
|
}
|
||||||
@@ -61,6 +56,7 @@ var inputs = map[string](func() input.Configuration){
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
helpers.RegisterMapstructureDeprecatedFields[Configuration]("RateLimit")
|
||||||
helpers.RegisterMapstructureUnmarshallerHook(
|
helpers.RegisterMapstructureUnmarshallerHook(
|
||||||
helpers.ParametrizedConfigurationUnmarshallerHook(InputConfiguration{}, inputs))
|
helpers.ParametrizedConfigurationUnmarshallerHook(InputConfiguration{}, inputs))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,9 +10,9 @@ import (
|
|||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
|
||||||
"akvorado/common/helpers/yaml"
|
"akvorado/common/helpers/yaml"
|
||||||
|
"akvorado/common/pb"
|
||||||
|
|
||||||
"akvorado/common/helpers"
|
"akvorado/common/helpers"
|
||||||
"akvorado/inlet/flow/decoder"
|
|
||||||
"akvorado/inlet/flow/input/file"
|
"akvorado/inlet/flow/input/file"
|
||||||
"akvorado/inlet/flow/input/udp"
|
"akvorado/inlet/flow/input/udp"
|
||||||
)
|
)
|
||||||
@@ -42,7 +42,7 @@ func TestDecodeConfiguration(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Expected: Configuration{
|
Expected: Configuration{
|
||||||
Inputs: []InputConfiguration{{
|
Inputs: []InputConfiguration{{
|
||||||
Decoder: "netflow",
|
Decoder: pb.RawFlow_DECODER_NETFLOW,
|
||||||
Config: &udp.Configuration{
|
Config: &udp.Configuration{
|
||||||
Workers: 3,
|
Workers: 3,
|
||||||
QueueSize: 100000,
|
QueueSize: 100000,
|
||||||
@@ -50,7 +50,7 @@ func TestDecodeConfiguration(t *testing.T) {
|
|||||||
},
|
},
|
||||||
UseSrcAddrForExporterAddr: true,
|
UseSrcAddrForExporterAddr: true,
|
||||||
}, {
|
}, {
|
||||||
Decoder: "sflow",
|
Decoder: pb.RawFlow_DECODER_SFLOW,
|
||||||
Config: &udp.Configuration{
|
Config: &udp.Configuration{
|
||||||
Workers: 3,
|
Workers: 3,
|
||||||
QueueSize: 100000,
|
QueueSize: 100000,
|
||||||
@@ -64,10 +64,10 @@ func TestDecodeConfiguration(t *testing.T) {
|
|||||||
Initial: func() interface{} {
|
Initial: func() interface{} {
|
||||||
return Configuration{
|
return Configuration{
|
||||||
Inputs: []InputConfiguration{{
|
Inputs: []InputConfiguration{{
|
||||||
Decoder: "netflow",
|
Decoder: pb.RawFlow_DECODER_NETFLOW,
|
||||||
Config: udp.DefaultConfiguration(),
|
Config: udp.DefaultConfiguration(),
|
||||||
}, {
|
}, {
|
||||||
Decoder: "sflow",
|
Decoder: pb.RawFlow_DECODER_SFLOW,
|
||||||
Config: udp.DefaultConfiguration(),
|
Config: udp.DefaultConfiguration(),
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
@@ -91,14 +91,14 @@ func TestDecodeConfiguration(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Expected: Configuration{
|
Expected: Configuration{
|
||||||
Inputs: []InputConfiguration{{
|
Inputs: []InputConfiguration{{
|
||||||
Decoder: "netflow",
|
Decoder: pb.RawFlow_DECODER_NETFLOW,
|
||||||
Config: &udp.Configuration{
|
Config: &udp.Configuration{
|
||||||
Workers: 3,
|
Workers: 3,
|
||||||
QueueSize: 100000,
|
QueueSize: 100000,
|
||||||
Listen: "192.0.2.1:2055",
|
Listen: "192.0.2.1:2055",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Decoder: "sflow",
|
Decoder: pb.RawFlow_DECODER_SFLOW,
|
||||||
Config: &udp.Configuration{
|
Config: &udp.Configuration{
|
||||||
Workers: 3,
|
Workers: 3,
|
||||||
QueueSize: 100000,
|
QueueSize: 100000,
|
||||||
@@ -111,7 +111,7 @@ func TestDecodeConfiguration(t *testing.T) {
|
|||||||
Initial: func() interface{} {
|
Initial: func() interface{} {
|
||||||
return Configuration{
|
return Configuration{
|
||||||
Inputs: []InputConfiguration{{
|
Inputs: []InputConfiguration{{
|
||||||
Decoder: "netflow",
|
Decoder: pb.RawFlow_DECODER_NETFLOW,
|
||||||
Config: udp.DefaultConfiguration(),
|
Config: udp.DefaultConfiguration(),
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
@@ -128,7 +128,7 @@ func TestDecodeConfiguration(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Expected: Configuration{
|
Expected: Configuration{
|
||||||
Inputs: []InputConfiguration{{
|
Inputs: []InputConfiguration{{
|
||||||
Decoder: "netflow",
|
Decoder: pb.RawFlow_DECODER_NETFLOW,
|
||||||
Config: &file.Configuration{
|
Config: &file.Configuration{
|
||||||
Paths: []string{"file1", "file2"},
|
Paths: []string{"file1", "file2"},
|
||||||
},
|
},
|
||||||
@@ -139,8 +139,8 @@ func TestDecodeConfiguration(t *testing.T) {
|
|||||||
Initial: func() interface{} {
|
Initial: func() interface{} {
|
||||||
return Configuration{
|
return Configuration{
|
||||||
Inputs: []InputConfiguration{{
|
Inputs: []InputConfiguration{{
|
||||||
Decoder: "netflow",
|
Decoder: pb.RawFlow_DECODER_NETFLOW,
|
||||||
TimestampSource: decoder.TimestampSourceUDP,
|
TimestampSource: pb.RawFlow_TS_INPUT,
|
||||||
Config: &udp.Configuration{
|
Config: &udp.Configuration{
|
||||||
Workers: 2,
|
Workers: 2,
|
||||||
QueueSize: 100,
|
QueueSize: 100,
|
||||||
@@ -160,7 +160,7 @@ func TestDecodeConfiguration(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Expected: Configuration{
|
Expected: Configuration{
|
||||||
Inputs: []InputConfiguration{{
|
Inputs: []InputConfiguration{{
|
||||||
Decoder: "netflow",
|
Decoder: pb.RawFlow_DECODER_NETFLOW,
|
||||||
Config: &udp.Configuration{
|
Config: &udp.Configuration{
|
||||||
Workers: 2,
|
Workers: 2,
|
||||||
QueueSize: 100,
|
QueueSize: 100,
|
||||||
@@ -197,7 +197,7 @@ func TestDecodeConfiguration(t *testing.T) {
|
|||||||
Initial: func() interface{} {
|
Initial: func() interface{} {
|
||||||
return Configuration{
|
return Configuration{
|
||||||
Inputs: []InputConfiguration{{
|
Inputs: []InputConfiguration{{
|
||||||
Decoder: "netflow",
|
Decoder: pb.RawFlow_DECODER_NETFLOW,
|
||||||
Config: &udp.Configuration{
|
Config: &udp.Configuration{
|
||||||
Workers: 2,
|
Workers: 2,
|
||||||
QueueSize: 100,
|
QueueSize: 100,
|
||||||
@@ -218,8 +218,8 @@ func TestDecodeConfiguration(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Expected: Configuration{
|
Expected: Configuration{
|
||||||
Inputs: []InputConfiguration{{
|
Inputs: []InputConfiguration{{
|
||||||
Decoder: "netflow",
|
Decoder: pb.RawFlow_DECODER_NETFLOW,
|
||||||
TimestampSource: decoder.TimestampSourceNetflowPacket,
|
TimestampSource: pb.RawFlow_TS_NETFLOW_PACKET,
|
||||||
Config: &udp.Configuration{
|
Config: &udp.Configuration{
|
||||||
Workers: 2,
|
Workers: 2,
|
||||||
QueueSize: 100,
|
QueueSize: 100,
|
||||||
@@ -233,7 +233,7 @@ func TestDecodeConfiguration(t *testing.T) {
|
|||||||
Initial: func() interface{} {
|
Initial: func() interface{} {
|
||||||
return Configuration{
|
return Configuration{
|
||||||
Inputs: []InputConfiguration{{
|
Inputs: []InputConfiguration{{
|
||||||
Decoder: "netflow",
|
Decoder: pb.RawFlow_DECODER_NETFLOW,
|
||||||
Config: &udp.Configuration{
|
Config: &udp.Configuration{
|
||||||
Workers: 2,
|
Workers: 2,
|
||||||
QueueSize: 100,
|
QueueSize: 100,
|
||||||
@@ -254,8 +254,8 @@ func TestDecodeConfiguration(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Expected: Configuration{
|
Expected: Configuration{
|
||||||
Inputs: []InputConfiguration{{
|
Inputs: []InputConfiguration{{
|
||||||
Decoder: "netflow",
|
Decoder: pb.RawFlow_DECODER_NETFLOW,
|
||||||
TimestampSource: decoder.TimestampSourceNetflowFirstSwitched,
|
TimestampSource: pb.RawFlow_TS_NETFLOW_FIRST_SWITCHED,
|
||||||
Config: &udp.Configuration{
|
Config: &udp.Configuration{
|
||||||
Workers: 2,
|
Workers: 2,
|
||||||
QueueSize: 100,
|
QueueSize: 100,
|
||||||
@@ -271,15 +271,15 @@ func TestMarshalYAML(t *testing.T) {
|
|||||||
cfg := Configuration{
|
cfg := Configuration{
|
||||||
Inputs: []InputConfiguration{
|
Inputs: []InputConfiguration{
|
||||||
{
|
{
|
||||||
Decoder: "netflow",
|
Decoder: pb.RawFlow_DECODER_NETFLOW,
|
||||||
TimestampSource: decoder.TimestampSourceNetflowFirstSwitched,
|
TimestampSource: pb.RawFlow_TS_NETFLOW_FIRST_SWITCHED,
|
||||||
Config: &udp.Configuration{
|
Config: &udp.Configuration{
|
||||||
Listen: "192.0.2.11:2055",
|
Listen: "192.0.2.11:2055",
|
||||||
QueueSize: 1000,
|
QueueSize: 1000,
|
||||||
Workers: 3,
|
Workers: 3,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Decoder: "sflow",
|
Decoder: pb.RawFlow_DECODER_SFLOW,
|
||||||
Config: &udp.Configuration{
|
Config: &udp.Configuration{
|
||||||
Listen: "192.0.2.11:6343",
|
Listen: "192.0.2.11:6343",
|
||||||
QueueSize: 1000,
|
QueueSize: 1000,
|
||||||
@@ -306,11 +306,10 @@ func TestMarshalYAML(t *testing.T) {
|
|||||||
listen: 192.0.2.11:6343
|
listen: 192.0.2.11:6343
|
||||||
queuesize: 1000
|
queuesize: 1000
|
||||||
receivebuffer: 0
|
receivebuffer: 0
|
||||||
timestampsource: udp
|
timestampsource: input
|
||||||
type: udp
|
type: udp
|
||||||
usesrcaddrforexporteraddr: true
|
usesrcaddrforexporteraddr: true
|
||||||
workers: 3
|
workers: 3
|
||||||
ratelimit: 0
|
|
||||||
`
|
`
|
||||||
if diff := helpers.Diff(strings.Split(string(got), "\n"), strings.Split(expected, "\n")); diff != "" {
|
if diff := helpers.Diff(strings.Split(string(got), "\n"), strings.Split(expected, "\n")); diff != "" {
|
||||||
t.Fatalf("Marshal() (-got, +want):\n%s", diff)
|
t.Fatalf("Marshal() (-got, +want):\n%s", diff)
|
||||||
|
|||||||
@@ -1,66 +0,0 @@
|
|||||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
|
||||||
// SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
|
|
||||||
package flow
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/netip"
|
|
||||||
|
|
||||||
"akvorado/common/schema"
|
|
||||||
"akvorado/inlet/flow/decoder"
|
|
||||||
"akvorado/inlet/flow/decoder/netflow"
|
|
||||||
"akvorado/inlet/flow/decoder/sflow"
|
|
||||||
)
|
|
||||||
|
|
||||||
type wrappedDecoder struct {
|
|
||||||
c *Component
|
|
||||||
orig decoder.Decoder
|
|
||||||
useSrcAddrForExporterAddr bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode decodes a flow while keeping some stats.
|
|
||||||
func (wd *wrappedDecoder) Decode(in decoder.RawFlow) []*schema.FlowMessage {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
wd.c.metrics.decoderErrors.WithLabelValues(wd.orig.Name()).
|
|
||||||
Inc()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
decoded := wd.orig.Decode(in)
|
|
||||||
|
|
||||||
if decoded == nil {
|
|
||||||
wd.c.metrics.decoderErrors.WithLabelValues(wd.orig.Name()).
|
|
||||||
Inc()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if wd.useSrcAddrForExporterAddr {
|
|
||||||
exporterAddress, _ := netip.AddrFromSlice(in.Source.To16())
|
|
||||||
for _, f := range decoded {
|
|
||||||
f.ExporterAddress = exporterAddress
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
wd.c.metrics.decoderStats.WithLabelValues(wd.orig.Name()).
|
|
||||||
Inc()
|
|
||||||
return decoded
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the name of the original decoder.
|
|
||||||
func (wd *wrappedDecoder) Name() string {
|
|
||||||
return wd.orig.Name()
|
|
||||||
}
|
|
||||||
|
|
||||||
// wrapDecoder wraps the provided decoders to get statistics from it.
|
|
||||||
func (c *Component) wrapDecoder(d decoder.Decoder, useSrcAddrForExporterAddr bool) decoder.Decoder {
|
|
||||||
return &wrappedDecoder{
|
|
||||||
c: c,
|
|
||||||
orig: d,
|
|
||||||
useSrcAddrForExporterAddr: useSrcAddrForExporterAddr,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var decoders = map[string]decoder.NewDecoderFunc{
|
|
||||||
"netflow": netflow.New,
|
|
||||||
"sflow": sflow.New,
|
|
||||||
}
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
// SPDX-FileCopyrightText: 2024 Free Mobile
|
|
||||||
// SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
|
|
||||||
package decoder
|
|
||||||
|
|
||||||
// TimestampSource defines the method to use to extract the TimeReceived for the flows
|
|
||||||
type TimestampSource uint
|
|
||||||
|
|
||||||
const (
|
|
||||||
// TimestampSourceUDP tells the decoder to use the kernel time at which
|
|
||||||
// the UDP packet was received
|
|
||||||
TimestampSourceUDP TimestampSource = iota
|
|
||||||
// TimestampSourceNetflowPacket tells the decoder to use the timestamp
|
|
||||||
// from the router in the netflow packet
|
|
||||||
TimestampSourceNetflowPacket
|
|
||||||
// TimestampSourceNetflowFirstSwitched tells the decoder to use the timestamp
|
|
||||||
// from each flow "FIRST_SWITCHED" field
|
|
||||||
TimestampSourceNetflowFirstSwitched
|
|
||||||
)
|
|
||||||
@@ -1,543 +0,0 @@
|
|||||||
// SPDX-FileCopyrightText: 2022 Tchadel Icard
|
|
||||||
// SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
|
|
||||||
package sflow
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"net/netip"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"akvorado/common/helpers"
|
|
||||||
"akvorado/common/reporter"
|
|
||||||
"akvorado/common/schema"
|
|
||||||
"akvorado/inlet/flow/decoder"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestDecode(t *testing.T) {
|
|
||||||
r := reporter.NewMock(t)
|
|
||||||
sdecoder := New(r, decoder.Dependencies{Schema: schema.NewMock(t).EnableAllColumns()}, decoder.Option{})
|
|
||||||
|
|
||||||
// Send data
|
|
||||||
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-1140.pcap"))
|
|
||||||
got := sdecoder.Decode(decoder.RawFlow{Payload: data, Source: net.ParseIP("127.0.0.1")})
|
|
||||||
if got == nil {
|
|
||||||
t.Fatalf("Decode() error on data")
|
|
||||||
}
|
|
||||||
expectedFlows := []*schema.FlowMessage{
|
|
||||||
{
|
|
||||||
SamplingRate: 1024,
|
|
||||||
InIf: 27,
|
|
||||||
OutIf: 28,
|
|
||||||
SrcVlan: 100,
|
|
||||||
DstVlan: 100,
|
|
||||||
SrcAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:38"),
|
|
||||||
DstAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:39"),
|
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:172.16.0.3"),
|
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
|
||||||
schema.ColumnBytes: 1500,
|
|
||||||
schema.ColumnPackets: 1,
|
|
||||||
schema.ColumnEType: helpers.ETypeIPv6,
|
|
||||||
schema.ColumnProto: 6,
|
|
||||||
schema.ColumnSrcPort: 46026,
|
|
||||||
schema.ColumnDstPort: 22,
|
|
||||||
schema.ColumnSrcMAC: 40057391053392,
|
|
||||||
schema.ColumnDstMAC: 40057381862408,
|
|
||||||
schema.ColumnIPTTL: 64,
|
|
||||||
schema.ColumnIPTos: 0x8,
|
|
||||||
schema.ColumnIPv6FlowLabel: 0x68094,
|
|
||||||
schema.ColumnTCPFlags: 0x10,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
SamplingRate: 1024,
|
|
||||||
SrcAddr: netip.MustParseAddr("::ffff:104.26.8.24"),
|
|
||||||
DstAddr: netip.MustParseAddr("::ffff:45.90.161.46"),
|
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:172.16.0.3"),
|
|
||||||
NextHop: netip.MustParseAddr("::ffff:45.90.161.46"),
|
|
||||||
InIf: 49001,
|
|
||||||
OutIf: 25,
|
|
||||||
DstVlan: 100,
|
|
||||||
SrcAS: 13335,
|
|
||||||
DstAS: 39421,
|
|
||||||
SrcNetMask: 20,
|
|
||||||
DstNetMask: 27,
|
|
||||||
GotASPath: false,
|
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
|
||||||
schema.ColumnBytes: 421,
|
|
||||||
schema.ColumnPackets: 1,
|
|
||||||
schema.ColumnEType: helpers.ETypeIPv4,
|
|
||||||
schema.ColumnProto: 6,
|
|
||||||
schema.ColumnSrcPort: 443,
|
|
||||||
schema.ColumnDstPort: 56876,
|
|
||||||
schema.ColumnSrcMAC: 216372595274807,
|
|
||||||
schema.ColumnDstMAC: 191421060163210,
|
|
||||||
schema.ColumnIPFragmentID: 0xa572,
|
|
||||||
schema.ColumnIPTTL: 59,
|
|
||||||
schema.ColumnTCPFlags: 0x18,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
SamplingRate: 1024,
|
|
||||||
SrcAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:38"),
|
|
||||||
DstAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:39"),
|
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:172.16.0.3"),
|
|
||||||
InIf: 27,
|
|
||||||
OutIf: 28,
|
|
||||||
SrcVlan: 100,
|
|
||||||
DstVlan: 100,
|
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
|
||||||
schema.ColumnBytes: 1500,
|
|
||||||
schema.ColumnPackets: 1,
|
|
||||||
schema.ColumnEType: helpers.ETypeIPv6,
|
|
||||||
schema.ColumnProto: 6,
|
|
||||||
schema.ColumnSrcPort: 46026,
|
|
||||||
schema.ColumnDstPort: 22,
|
|
||||||
schema.ColumnSrcMAC: 40057391053392,
|
|
||||||
schema.ColumnDstMAC: 40057381862408,
|
|
||||||
schema.ColumnIPTTL: 64,
|
|
||||||
schema.ColumnIPTos: 0x8,
|
|
||||||
schema.ColumnIPv6FlowLabel: 0x68094,
|
|
||||||
schema.ColumnTCPFlags: 0x10,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
SamplingRate: 1024,
|
|
||||||
InIf: 28,
|
|
||||||
OutIf: 49001,
|
|
||||||
SrcVlan: 100,
|
|
||||||
SrcAS: 39421,
|
|
||||||
DstAS: 26615,
|
|
||||||
SrcAddr: netip.MustParseAddr("::ffff:45.90.161.148"),
|
|
||||||
DstAddr: netip.MustParseAddr("::ffff:191.87.91.27"),
|
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:172.16.0.3"),
|
|
||||||
NextHop: netip.MustParseAddr("::ffff:31.14.69.110"),
|
|
||||||
SrcNetMask: 27,
|
|
||||||
DstNetMask: 17,
|
|
||||||
GotASPath: true,
|
|
||||||
GotCommunities: true,
|
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
|
||||||
schema.ColumnBytes: 40,
|
|
||||||
schema.ColumnPackets: 1,
|
|
||||||
schema.ColumnEType: helpers.ETypeIPv4,
|
|
||||||
schema.ColumnProto: 6,
|
|
||||||
schema.ColumnSrcPort: 55658,
|
|
||||||
schema.ColumnDstPort: 5555,
|
|
||||||
schema.ColumnSrcMAC: 138617863011056,
|
|
||||||
schema.ColumnDstMAC: 216372595274807,
|
|
||||||
schema.ColumnDstASPath: []uint32{203698, 6762, 26615},
|
|
||||||
schema.ColumnDstCommunities: []uint64{2583495656, 2583495657, 4259880000, 4259880001, 4259900001},
|
|
||||||
schema.ColumnIPFragmentID: 0xd431,
|
|
||||||
schema.ColumnIPTTL: 255,
|
|
||||||
schema.ColumnTCPFlags: 0x2,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
SamplingRate: 1024,
|
|
||||||
SrcAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:38"),
|
|
||||||
DstAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:39"),
|
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:172.16.0.3"),
|
|
||||||
InIf: 27,
|
|
||||||
OutIf: 28,
|
|
||||||
SrcVlan: 100,
|
|
||||||
DstVlan: 100,
|
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
|
||||||
schema.ColumnBytes: 1500,
|
|
||||||
schema.ColumnPackets: 1,
|
|
||||||
schema.ColumnEType: helpers.ETypeIPv6,
|
|
||||||
schema.ColumnProto: 6,
|
|
||||||
schema.ColumnSrcPort: 46026,
|
|
||||||
schema.ColumnDstPort: 22,
|
|
||||||
schema.ColumnSrcMAC: 40057391053392,
|
|
||||||
schema.ColumnDstMAC: 40057381862408,
|
|
||||||
schema.ColumnIPTTL: 64,
|
|
||||||
schema.ColumnIPTos: 0x8,
|
|
||||||
schema.ColumnIPv6FlowLabel: 0x68094,
|
|
||||||
schema.ColumnTCPFlags: 0x10,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, f := range got {
|
|
||||||
f.TimeReceived = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if diff := helpers.Diff(got, expectedFlows); diff != "" {
|
|
||||||
t.Fatalf("Decode() (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
gotMetrics := r.GetMetrics(
|
|
||||||
"akvorado_inlet_flow_decoder_sflow_",
|
|
||||||
"flows_total",
|
|
||||||
"sample_",
|
|
||||||
)
|
|
||||||
expectedMetrics := map[string]string{
|
|
||||||
`flows_total{agent="172.16.0.3",exporter="127.0.0.1",version="5"}`: "1",
|
|
||||||
`sample_records_sum{agent="172.16.0.3",exporter="127.0.0.1",type="FlowSample",version="5"}`: "14",
|
|
||||||
`sample_sum{agent="172.16.0.3",exporter="127.0.0.1",type="FlowSample",version="5"}`: "5",
|
|
||||||
}
|
|
||||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
|
||||||
t.Fatalf("Metrics after data (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDecodeInterface(t *testing.T) {
|
|
||||||
r := reporter.NewMock(t)
|
|
||||||
sdecoder := New(r, decoder.Dependencies{Schema: schema.NewMock(t)}, decoder.Option{})
|
|
||||||
|
|
||||||
t.Run("local interface", func(t *testing.T) {
|
|
||||||
// Send data
|
|
||||||
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-local-interface.pcap"))
|
|
||||||
got := sdecoder.Decode(decoder.RawFlow{Payload: data, Source: net.ParseIP("127.0.0.1")})
|
|
||||||
if got == nil {
|
|
||||||
t.Fatalf("Decode() error on data")
|
|
||||||
}
|
|
||||||
expectedFlows := []*schema.FlowMessage{
|
|
||||||
{
|
|
||||||
SamplingRate: 1024,
|
|
||||||
SrcAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:38"),
|
|
||||||
DstAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:39"),
|
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:172.16.0.3"),
|
|
||||||
InIf: 27,
|
|
||||||
OutIf: 0, // local interface
|
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
|
||||||
schema.ColumnBytes: 1500,
|
|
||||||
schema.ColumnPackets: 1,
|
|
||||||
schema.ColumnEType: helpers.ETypeIPv6,
|
|
||||||
schema.ColumnProto: 6,
|
|
||||||
schema.ColumnSrcPort: 46026,
|
|
||||||
schema.ColumnDstPort: 22,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, f := range got {
|
|
||||||
f.TimeReceived = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if diff := helpers.Diff(got, expectedFlows); diff != "" {
|
|
||||||
t.Fatalf("Decode() (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("discard interface", func(t *testing.T) {
|
|
||||||
// Send data
|
|
||||||
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-discard-interface.pcap"))
|
|
||||||
got := sdecoder.Decode(decoder.RawFlow{Payload: data, Source: net.ParseIP("127.0.0.1")})
|
|
||||||
if got == nil {
|
|
||||||
t.Fatalf("Decode() error on data")
|
|
||||||
}
|
|
||||||
expectedFlows := []*schema.FlowMessage{
|
|
||||||
{
|
|
||||||
SamplingRate: 1024,
|
|
||||||
SrcAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:38"),
|
|
||||||
DstAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:39"),
|
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:172.16.0.3"),
|
|
||||||
InIf: 27,
|
|
||||||
OutIf: 0, // discard interface
|
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
|
||||||
schema.ColumnBytes: 1500,
|
|
||||||
schema.ColumnPackets: 1,
|
|
||||||
schema.ColumnEType: helpers.ETypeIPv6,
|
|
||||||
schema.ColumnProto: 6,
|
|
||||||
schema.ColumnSrcPort: 46026,
|
|
||||||
schema.ColumnDstPort: 22,
|
|
||||||
schema.ColumnForwardingStatus: 128,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, f := range got {
|
|
||||||
f.TimeReceived = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if diff := helpers.Diff(got, expectedFlows); diff != "" {
|
|
||||||
t.Fatalf("Decode() (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("multiple interfaces", func(t *testing.T) {
|
|
||||||
// Send data
|
|
||||||
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-multiple-interfaces.pcap"))
|
|
||||||
got := sdecoder.Decode(decoder.RawFlow{Payload: data, Source: net.ParseIP("127.0.0.1")})
|
|
||||||
if got == nil {
|
|
||||||
t.Fatalf("Decode() error on data")
|
|
||||||
}
|
|
||||||
expectedFlows := []*schema.FlowMessage{
|
|
||||||
{
|
|
||||||
SamplingRate: 1024,
|
|
||||||
SrcAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:38"),
|
|
||||||
DstAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:39"),
|
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:172.16.0.3"),
|
|
||||||
InIf: 27,
|
|
||||||
OutIf: 0, // multiple interfaces
|
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
|
||||||
schema.ColumnBytes: 1500,
|
|
||||||
schema.ColumnPackets: 1,
|
|
||||||
schema.ColumnEType: helpers.ETypeIPv6,
|
|
||||||
schema.ColumnProto: 6,
|
|
||||||
schema.ColumnSrcPort: 46026,
|
|
||||||
schema.ColumnDstPort: 22,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, f := range got {
|
|
||||||
f.TimeReceived = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if diff := helpers.Diff(got, expectedFlows); diff != "" {
|
|
||||||
t.Fatalf("Decode() (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDecodeSamples(t *testing.T) {
|
|
||||||
r := reporter.NewMock(t)
|
|
||||||
sdecoder := New(r, decoder.Dependencies{Schema: schema.NewMock(t).EnableAllColumns()}, decoder.Option{})
|
|
||||||
|
|
||||||
t.Run("expanded flow sample", func(t *testing.T) {
|
|
||||||
// Send data
|
|
||||||
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-sflow-expanded-sample.pcap"))
|
|
||||||
got := sdecoder.Decode(decoder.RawFlow{Payload: data, Source: net.ParseIP("127.0.0.1")})
|
|
||||||
if got == nil {
|
|
||||||
t.Fatalf("Decode() error on data")
|
|
||||||
}
|
|
||||||
expectedFlows := []*schema.FlowMessage{
|
|
||||||
{
|
|
||||||
SamplingRate: 1000,
|
|
||||||
InIf: 29001,
|
|
||||||
OutIf: 1285816721,
|
|
||||||
SrcAddr: netip.MustParseAddr("::ffff:52.52.52.52"),
|
|
||||||
DstAddr: netip.MustParseAddr("::ffff:53.53.53.53"),
|
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:49.49.49.49"),
|
|
||||||
NextHop: netip.MustParseAddr("::ffff:54.54.54.54"),
|
|
||||||
SrcAS: 203476,
|
|
||||||
DstAS: 203361,
|
|
||||||
SrcVlan: 809,
|
|
||||||
GotASPath: true,
|
|
||||||
GotCommunities: true,
|
|
||||||
SrcNetMask: 32,
|
|
||||||
DstNetMask: 22,
|
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
|
||||||
schema.ColumnBytes: 104,
|
|
||||||
schema.ColumnPackets: 1,
|
|
||||||
schema.ColumnEType: helpers.ETypeIPv4,
|
|
||||||
schema.ColumnProto: 6,
|
|
||||||
schema.ColumnSrcPort: 22,
|
|
||||||
schema.ColumnDstPort: 52237,
|
|
||||||
schema.ColumnDstASPath: []uint32{8218, 29605, 203361},
|
|
||||||
schema.ColumnDstCommunities: []uint64{538574949, 1911619684, 1911669584, 1911671290},
|
|
||||||
schema.ColumnTCPFlags: 0x18,
|
|
||||||
schema.ColumnIPFragmentID: 0xab4e,
|
|
||||||
schema.ColumnIPTTL: 61,
|
|
||||||
schema.ColumnIPTos: 0x8,
|
|
||||||
schema.ColumnSrcMAC: 0x948ed30a713b,
|
|
||||||
schema.ColumnDstMAC: 0x22421f4a9fcd,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, f := range got {
|
|
||||||
f.TimeReceived = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if diff := helpers.Diff(got, expectedFlows); diff != "" {
|
|
||||||
t.Fatalf("Decode() (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("flow sample with IPv4 data", func(t *testing.T) {
|
|
||||||
// Send data
|
|
||||||
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-sflow-ipv4-data.pcap"))
|
|
||||||
got := sdecoder.Decode(decoder.RawFlow{Payload: data, Source: net.ParseIP("127.0.0.1")})
|
|
||||||
if got == nil {
|
|
||||||
t.Fatalf("Decode() error on data")
|
|
||||||
}
|
|
||||||
expectedFlows := []*schema.FlowMessage{
|
|
||||||
{
|
|
||||||
SamplingRate: 256,
|
|
||||||
InIf: 0,
|
|
||||||
OutIf: 182,
|
|
||||||
DstVlan: 3001,
|
|
||||||
SrcAddr: netip.MustParseAddr("::ffff:50.50.50.50"),
|
|
||||||
DstAddr: netip.MustParseAddr("::ffff:51.51.51.51"),
|
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:49.49.49.49"),
|
|
||||||
GotASPath: false,
|
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
|
||||||
schema.ColumnBytes: 1344,
|
|
||||||
schema.ColumnPackets: 1,
|
|
||||||
schema.ColumnEType: helpers.ETypeIPv4,
|
|
||||||
schema.ColumnProto: 17,
|
|
||||||
schema.ColumnSrcPort: 46622,
|
|
||||||
schema.ColumnDstPort: 58631,
|
|
||||||
schema.ColumnSrcMAC: 1094287164743,
|
|
||||||
schema.ColumnDstMAC: 1101091482116,
|
|
||||||
schema.ColumnIPFragmentID: 41647,
|
|
||||||
schema.ColumnIPTTL: 64,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, f := range got {
|
|
||||||
f.TimeReceived = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if diff := helpers.Diff(got, expectedFlows); diff != "" {
|
|
||||||
t.Fatalf("Decode() (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("flow sample with IPv4 raw packet", func(t *testing.T) {
|
|
||||||
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-sflow-raw-ipv4.pcap"))
|
|
||||||
got := sdecoder.Decode(decoder.RawFlow{Payload: data, Source: net.ParseIP("127.0.0.1")})
|
|
||||||
if got == nil {
|
|
||||||
t.Fatalf("Decode() error on data")
|
|
||||||
}
|
|
||||||
expectedFlows := []*schema.FlowMessage{
|
|
||||||
{
|
|
||||||
SamplingRate: 1,
|
|
||||||
InIf: 0,
|
|
||||||
OutIf: 2,
|
|
||||||
SrcAddr: netip.MustParseAddr("::ffff:69.58.92.107"),
|
|
||||||
DstAddr: netip.MustParseAddr("::ffff:92.222.186.1"),
|
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:172.19.64.116"),
|
|
||||||
GotASPath: false,
|
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
|
||||||
schema.ColumnBytes: 32,
|
|
||||||
schema.ColumnPackets: 1,
|
|
||||||
schema.ColumnEType: helpers.ETypeIPv4,
|
|
||||||
schema.ColumnProto: 1,
|
|
||||||
schema.ColumnIPFragmentID: 4329,
|
|
||||||
schema.ColumnIPTTL: 64,
|
|
||||||
schema.ColumnIPTos: 8,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
SamplingRate: 1,
|
|
||||||
InIf: 0,
|
|
||||||
OutIf: 2,
|
|
||||||
SrcAddr: netip.MustParseAddr("::ffff:69.58.92.107"),
|
|
||||||
DstAddr: netip.MustParseAddr("::ffff:92.222.184.1"),
|
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:172.19.64.116"),
|
|
||||||
GotASPath: false,
|
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
|
||||||
schema.ColumnBytes: 32,
|
|
||||||
schema.ColumnPackets: 1,
|
|
||||||
schema.ColumnEType: helpers.ETypeIPv4,
|
|
||||||
schema.ColumnProto: 1,
|
|
||||||
schema.ColumnIPFragmentID: 62945,
|
|
||||||
schema.ColumnIPTTL: 64,
|
|
||||||
schema.ColumnIPTos: 8,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, f := range got {
|
|
||||||
f.TimeReceived = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if diff := helpers.Diff(got, expectedFlows); diff != "" {
|
|
||||||
t.Fatalf("Decode() (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("flow sample with ICMPv4", func(t *testing.T) {
|
|
||||||
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-icmpv4.pcap"))
|
|
||||||
got := sdecoder.Decode(decoder.RawFlow{Payload: data, Source: net.ParseIP("127.0.0.1")})
|
|
||||||
if got == nil {
|
|
||||||
t.Fatalf("Decode() error on data")
|
|
||||||
}
|
|
||||||
expectedFlows := []*schema.FlowMessage{
|
|
||||||
{
|
|
||||||
SamplingRate: 1,
|
|
||||||
SrcAddr: netip.MustParseAddr("::ffff:203.0.113.4"),
|
|
||||||
DstAddr: netip.MustParseAddr("::ffff:203.0.113.5"),
|
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:127.0.0.1"),
|
|
||||||
GotASPath: false,
|
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
|
||||||
schema.ColumnBytes: 84,
|
|
||||||
schema.ColumnPackets: 1,
|
|
||||||
schema.ColumnEType: helpers.ETypeIPv4,
|
|
||||||
schema.ColumnProto: 1,
|
|
||||||
schema.ColumnDstMAC: 0xd25b45ee5ecf,
|
|
||||||
schema.ColumnSrcMAC: 0xe2efc68f8cd4,
|
|
||||||
schema.ColumnICMPv4Type: 8,
|
|
||||||
// schema.ColumnICMPv4Code: 0,
|
|
||||||
schema.ColumnIPTTL: 64,
|
|
||||||
schema.ColumnIPFragmentID: 0x90c5,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, f := range got {
|
|
||||||
f.TimeReceived = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if diff := helpers.Diff(got, expectedFlows); diff != "" {
|
|
||||||
t.Fatalf("Decode() (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("flow sample with ICMPv6", func(t *testing.T) {
|
|
||||||
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-icmpv6.pcap"))
|
|
||||||
got := sdecoder.Decode(decoder.RawFlow{Payload: data, Source: net.ParseIP("127.0.0.1")})
|
|
||||||
if got == nil {
|
|
||||||
t.Fatalf("Decode() error on data")
|
|
||||||
}
|
|
||||||
expectedFlows := []*schema.FlowMessage{
|
|
||||||
{
|
|
||||||
SamplingRate: 1,
|
|
||||||
SrcAddr: netip.MustParseAddr("fe80::d05b:45ff:feee:5ecf"),
|
|
||||||
DstAddr: netip.MustParseAddr("2001:db8::"),
|
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:127.0.0.1"),
|
|
||||||
GotASPath: false,
|
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
|
||||||
schema.ColumnBytes: 72,
|
|
||||||
schema.ColumnPackets: 1,
|
|
||||||
schema.ColumnEType: helpers.ETypeIPv6,
|
|
||||||
schema.ColumnProto: 58,
|
|
||||||
schema.ColumnSrcMAC: 0xd25b45ee5ecf,
|
|
||||||
schema.ColumnDstMAC: 0xe2efc68f8cd4,
|
|
||||||
schema.ColumnIPTTL: 255,
|
|
||||||
schema.ColumnICMPv6Type: 135,
|
|
||||||
// schema.ColumnICMPv6Code: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, f := range got {
|
|
||||||
f.TimeReceived = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if diff := helpers.Diff(got, expectedFlows); diff != "" {
|
|
||||||
t.Fatalf("Decode() (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("flow sample with QinQ", func(t *testing.T) {
|
|
||||||
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-qinq.pcap"))
|
|
||||||
got := sdecoder.Decode(decoder.RawFlow{Payload: data, Source: net.ParseIP("127.0.0.1")})
|
|
||||||
if got == nil {
|
|
||||||
t.Fatalf("Decode() error on data")
|
|
||||||
}
|
|
||||||
expectedFlows := []*schema.FlowMessage{
|
|
||||||
{
|
|
||||||
SamplingRate: 4096,
|
|
||||||
InIf: 369098852,
|
|
||||||
OutIf: 369098851,
|
|
||||||
SrcVlan: 1493,
|
|
||||||
SrcAddr: netip.MustParseAddr("::ffff:49.49.49.2"),
|
|
||||||
DstAddr: netip.MustParseAddr("::ffff:49.49.49.109"),
|
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:172.17.128.58"),
|
|
||||||
GotASPath: false,
|
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
|
||||||
schema.ColumnBytes: 80,
|
|
||||||
schema.ColumnPackets: 1,
|
|
||||||
schema.ColumnEType: helpers.ETypeIPv4,
|
|
||||||
schema.ColumnProto: 6,
|
|
||||||
schema.ColumnSrcMAC: 0x4caea3520ff6,
|
|
||||||
schema.ColumnDstMAC: 0x000110621493,
|
|
||||||
schema.ColumnIPTTL: 62,
|
|
||||||
schema.ColumnIPFragmentID: 56159,
|
|
||||||
schema.ColumnTCPFlags: 16,
|
|
||||||
schema.ColumnSrcPort: 32017,
|
|
||||||
schema.ColumnDstPort: 443,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, f := range got {
|
|
||||||
f.TimeReceived = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if diff := helpers.Diff(got, expectedFlows); diff != "" {
|
|
||||||
t.Fatalf("Decode() (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
|
||||||
// SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
|
|
||||||
//go:build !release
|
|
||||||
|
|
||||||
package decoder
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/netip"
|
|
||||||
|
|
||||||
"akvorado/common/schema"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DummyDecoder is a simple decoder producing flows from random data.
|
|
||||||
// The payload is copied in IfDescription
|
|
||||||
type DummyDecoder struct {
|
|
||||||
Schema *schema.Component
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode returns uninteresting flow messages.
|
|
||||||
func (dc *DummyDecoder) Decode(in RawFlow) []*schema.FlowMessage {
|
|
||||||
exporterAddress, _ := netip.AddrFromSlice(in.Source.To16())
|
|
||||||
f := &schema.FlowMessage{
|
|
||||||
TimeReceived: uint64(in.TimeReceived.UTC().Unix()),
|
|
||||||
ExporterAddress: exporterAddress,
|
|
||||||
}
|
|
||||||
dc.Schema.ProtobufAppendVarint(f, schema.ColumnBytes, uint64(len(in.Payload)))
|
|
||||||
dc.Schema.ProtobufAppendVarint(f, schema.ColumnPackets, 1)
|
|
||||||
dc.Schema.ProtobufAppendBytes(f, schema.ColumnInIfDescription, in.Payload)
|
|
||||||
return []*schema.FlowMessage{f}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the original name.
|
|
||||||
func (dc *DummyDecoder) Name() string {
|
|
||||||
return "dummy"
|
|
||||||
}
|
|
||||||
@@ -1,92 +0,0 @@
|
|||||||
// SPDX-FileCopyrightText: 2023 Free Mobile
|
|
||||||
// SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
|
|
||||||
package flow
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"akvorado/common/helpers"
|
|
||||||
"akvorado/common/reporter"
|
|
||||||
"akvorado/common/schema"
|
|
||||||
"akvorado/inlet/flow/decoder"
|
|
||||||
"akvorado/inlet/flow/decoder/netflow"
|
|
||||||
"akvorado/inlet/flow/decoder/sflow"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The goal is to benchmark flow decoding + encoding to protobuf
|
|
||||||
|
|
||||||
func BenchmarkDecodeEncodeNetflow(b *testing.B) {
|
|
||||||
schema.DisableDebug(b)
|
|
||||||
r := reporter.NewMock(b)
|
|
||||||
sch := schema.NewMock(b)
|
|
||||||
nfdecoder := netflow.New(r, decoder.Dependencies{Schema: sch}, decoder.Option{TimestampSource: decoder.TimestampSourceUDP})
|
|
||||||
|
|
||||||
template := helpers.ReadPcapL4(b, filepath.Join("decoder", "netflow", "testdata", "options-template.pcap"))
|
|
||||||
got := nfdecoder.Decode(decoder.RawFlow{Payload: template, Source: net.ParseIP("127.0.0.1")})
|
|
||||||
if got == nil || len(got) != 0 {
|
|
||||||
b.Fatal("Decode() error on options template")
|
|
||||||
}
|
|
||||||
data := helpers.ReadPcapL4(b, filepath.Join("decoder", "netflow", "testdata", "options-data.pcap"))
|
|
||||||
got = nfdecoder.Decode(decoder.RawFlow{Payload: data, Source: net.ParseIP("127.0.0.1")})
|
|
||||||
if got == nil || len(got) != 0 {
|
|
||||||
b.Fatal("Decode() error on options data")
|
|
||||||
}
|
|
||||||
template = helpers.ReadPcapL4(b, filepath.Join("decoder", "netflow", "testdata", "template.pcap"))
|
|
||||||
got = nfdecoder.Decode(decoder.RawFlow{Payload: template, Source: net.ParseIP("127.0.0.1")})
|
|
||||||
if got == nil || len(got) != 0 {
|
|
||||||
b.Fatal("Decode() error on template")
|
|
||||||
}
|
|
||||||
data = helpers.ReadPcapL4(b, filepath.Join("decoder", "netflow", "testdata", "data.pcap"))
|
|
||||||
|
|
||||||
for _, withEncoding := range []bool{true, false} {
|
|
||||||
title := map[bool]string{
|
|
||||||
true: "with encoding",
|
|
||||||
false: "without encoding",
|
|
||||||
}[withEncoding]
|
|
||||||
b.Run(title, func(b *testing.B) {
|
|
||||||
for b.Loop() {
|
|
||||||
got = nfdecoder.Decode(decoder.RawFlow{Payload: data, Source: net.ParseIP("127.0.0.1")})
|
|
||||||
if withEncoding {
|
|
||||||
for _, flow := range got {
|
|
||||||
sch.ProtobufMarshal(flow)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if got[0].ProtobufDebug != nil {
|
|
||||||
b.Fatal("debug is enabled")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkDecodeEncodeSflow(b *testing.B) {
|
|
||||||
schema.DisableDebug(b)
|
|
||||||
r := reporter.NewMock(b)
|
|
||||||
sch := schema.NewMock(b)
|
|
||||||
sdecoder := sflow.New(r, decoder.Dependencies{Schema: sch}, decoder.Option{TimestampSource: decoder.TimestampSourceUDP})
|
|
||||||
data := helpers.ReadPcapL4(b, filepath.Join("decoder", "sflow", "testdata", "data-1140.pcap"))
|
|
||||||
|
|
||||||
for _, withEncoding := range []bool{true, false} {
|
|
||||||
title := map[bool]string{
|
|
||||||
true: "with encoding",
|
|
||||||
false: "without encoding",
|
|
||||||
}[withEncoding]
|
|
||||||
var got []*schema.FlowMessage
|
|
||||||
b.Run(title, func(b *testing.B) {
|
|
||||||
for b.Loop() {
|
|
||||||
got = sdecoder.Decode(decoder.RawFlow{Payload: data, Source: net.ParseIP("127.0.0.1")})
|
|
||||||
if withEncoding {
|
|
||||||
for _, flow := range got {
|
|
||||||
sch.ProtobufMarshal(flow)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if got[0].ProtobufDebug != nil {
|
|
||||||
b.Fatal("debug is enabled")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -9,6 +9,9 @@ import "akvorado/inlet/flow/input"
|
|||||||
type Configuration struct {
|
type Configuration struct {
|
||||||
// Paths to use as input
|
// Paths to use as input
|
||||||
Paths []string `validate:"min=1,dive,required"`
|
Paths []string `validate:"min=1,dive,required"`
|
||||||
|
// MaxFlows tell how many flows will be read before stopping production. 0
|
||||||
|
// means to not stop.
|
||||||
|
MaxFlows uint
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultConfiguration descrives the default configuration for file input.
|
// DefaultConfiguration descrives the default configuration for file input.
|
||||||
|
|||||||
@@ -13,9 +13,8 @@ import (
|
|||||||
"gopkg.in/tomb.v2"
|
"gopkg.in/tomb.v2"
|
||||||
|
|
||||||
"akvorado/common/daemon"
|
"akvorado/common/daemon"
|
||||||
|
"akvorado/common/pb"
|
||||||
"akvorado/common/reporter"
|
"akvorado/common/reporter"
|
||||||
"akvorado/common/schema"
|
|
||||||
"akvorado/inlet/flow/decoder"
|
|
||||||
"akvorado/inlet/flow/input"
|
"akvorado/inlet/flow/input"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -24,62 +23,66 @@ type Input struct {
|
|||||||
r *reporter.Reporter
|
r *reporter.Reporter
|
||||||
t tomb.Tomb
|
t tomb.Tomb
|
||||||
config *Configuration
|
config *Configuration
|
||||||
|
send input.SendFunc
|
||||||
ch chan []*schema.FlowMessage // channel to send flows to
|
|
||||||
decoder decoder.Decoder
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// New instantiate a new UDP listener from the provided configuration.
|
// New instantiate a new UDP listener from the provided configuration.
|
||||||
func (configuration *Configuration) New(r *reporter.Reporter, daemon daemon.Component, dec decoder.Decoder) (input.Input, error) {
|
func (configuration *Configuration) New(r *reporter.Reporter, daemon daemon.Component, send input.SendFunc) (input.Input, error) {
|
||||||
if len(configuration.Paths) == 0 {
|
if len(configuration.Paths) == 0 {
|
||||||
return nil, errors.New("no paths provided for file input")
|
return nil, errors.New("no paths provided for file input")
|
||||||
}
|
}
|
||||||
input := &Input{
|
input := &Input{
|
||||||
r: r,
|
r: r,
|
||||||
config: configuration,
|
config: configuration,
|
||||||
ch: make(chan []*schema.FlowMessage),
|
send: send,
|
||||||
decoder: dec,
|
|
||||||
}
|
}
|
||||||
daemon.Track(&input.t, "inlet/flow/input/file")
|
daemon.Track(&input.t, "inlet/flow/input/file")
|
||||||
return input, nil
|
return input, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start starts reading the provided files to produce fake flows in a loop.
|
// Start starts streaming files to produce flake flows in a loop.
|
||||||
func (in *Input) Start() (<-chan []*schema.FlowMessage, error) {
|
func (in *Input) Start() error {
|
||||||
in.r.Info().Msg("file input starting")
|
in.r.Info().Msg("file input starting")
|
||||||
in.t.Go(func() error {
|
in.t.Go(func() error {
|
||||||
|
count := uint(0)
|
||||||
|
payload := make([]byte, 9000)
|
||||||
|
flow := pb.RawFlow{}
|
||||||
for idx := 0; true; idx++ {
|
for idx := 0; true; idx++ {
|
||||||
|
if in.config.MaxFlows > 0 && count >= in.config.MaxFlows {
|
||||||
|
<-in.t.Dying()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
path := in.config.Paths[idx%len(in.config.Paths)]
|
path := in.config.Paths[idx%len(in.config.Paths)]
|
||||||
data, err := os.ReadFile(path)
|
data, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
in.r.Err(err).Str("path", path).Msg("unable to read path")
|
in.r.Err(err).Str("path", path).Msg("unable to read path")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
flows := in.decoder.Decode(decoder.RawFlow{
|
|
||||||
TimeReceived: time.Now(),
|
// Mimic the way it works with UDP
|
||||||
Payload: data,
|
n := copy(payload, data)
|
||||||
Source: net.ParseIP("127.0.0.1"),
|
flow.Reset()
|
||||||
})
|
flow.TimeReceived = uint64(time.Now().Unix())
|
||||||
if len(flows) == 0 {
|
flow.Payload = payload[:n]
|
||||||
continue
|
flow.SourceAddress = net.ParseIP("127.0.0.1").To16()
|
||||||
}
|
|
||||||
|
in.send("127.0.0.1", &flow)
|
||||||
|
count++
|
||||||
select {
|
select {
|
||||||
case <-in.t.Dying():
|
case <-in.t.Dying():
|
||||||
return nil
|
return nil
|
||||||
case in.ch <- flows:
|
default:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
return in.ch, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop stops the UDP listeners
|
// Stop stops the UDP listeners
|
||||||
func (in *Input) Stop() error {
|
func (in *Input) Stop() error {
|
||||||
defer func() {
|
defer in.r.Info().Msg("file input stopped")
|
||||||
close(in.ch)
|
|
||||||
in.r.Info().Msg("file input stopped")
|
|
||||||
}()
|
|
||||||
in.t.Kill(nil)
|
in.t.Kill(nil)
|
||||||
return in.t.Wait()
|
return in.t.Wait()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,29 +4,62 @@
|
|||||||
package file
|
package file
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net"
|
||||||
"path"
|
"path"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"akvorado/common/daemon"
|
"akvorado/common/daemon"
|
||||||
"akvorado/common/helpers"
|
"akvorado/common/helpers"
|
||||||
|
"akvorado/common/pb"
|
||||||
"akvorado/common/reporter"
|
"akvorado/common/reporter"
|
||||||
"akvorado/common/schema"
|
|
||||||
"akvorado/inlet/flow/decoder"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFileInput(t *testing.T) {
|
func TestFileInput(t *testing.T) {
|
||||||
r := reporter.NewMock(t)
|
r := reporter.NewMock(t)
|
||||||
configuration := DefaultConfiguration().(*Configuration)
|
configuration := DefaultConfiguration().(*Configuration)
|
||||||
configuration.Paths = []string{path.Join("testdata", "file1.txt"), path.Join("testdata", "file2.txt")}
|
configuration.Paths = []string{path.Join("testdata", "file1.txt"), path.Join("testdata", "file2.txt")}
|
||||||
in, err := configuration.New(r, daemon.NewMock(t), &decoder.DummyDecoder{
|
|
||||||
Schema: schema.NewMock(t),
|
done := make(chan bool)
|
||||||
})
|
expected := []pb.RawFlow{
|
||||||
|
{
|
||||||
|
Payload: []byte("hello world!\n"),
|
||||||
|
SourceAddress: net.ParseIP("127.0.0.1").To16(),
|
||||||
|
}, {
|
||||||
|
Payload: []byte("bye bye\n"),
|
||||||
|
SourceAddress: net.ParseIP("127.0.0.1").To16(),
|
||||||
|
}, {
|
||||||
|
Payload: []byte("hello world!\n"),
|
||||||
|
SourceAddress: net.ParseIP("127.0.0.1").To16(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
var mu sync.Mutex
|
||||||
|
got := []*pb.RawFlow{}
|
||||||
|
send := func(_ string, flow *pb.RawFlow) {
|
||||||
|
// Make a copy
|
||||||
|
payload := make([]byte, len(flow.Payload))
|
||||||
|
copy(payload, flow.Payload)
|
||||||
|
newFlow := pb.RawFlow{
|
||||||
|
TimeReceived: 0,
|
||||||
|
Payload: payload,
|
||||||
|
SourceAddress: flow.SourceAddress,
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
if len(got) < len(expected) {
|
||||||
|
got = append(got, &newFlow)
|
||||||
|
if len(got) == len(expected) {
|
||||||
|
close(done)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
in, err := configuration.New(r, daemon.NewMock(t), send)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("New() error:\n%+v", err)
|
t.Fatalf("New() error:\n%+v", err)
|
||||||
}
|
}
|
||||||
ch, err := in.Start()
|
if err := in.Start(); err != nil {
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Start() error:\n%+v", err)
|
t.Fatalf("Start() error:\n%+v", err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
@@ -35,21 +68,12 @@ func TestFileInput(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Get it back
|
select {
|
||||||
expected := []string{"hello world!\n", "bye bye\n", "hello world!\n"}
|
case <-time.After(50 * time.Millisecond):
|
||||||
got := []string{}
|
t.Fatal("timeout while waiting to receive flows")
|
||||||
out:
|
case <-done:
|
||||||
for range len(expected) {
|
if diff := helpers.Diff(got, expected); diff != "" {
|
||||||
select {
|
t.Fatalf("Input data (-got, +want):\n%s", diff)
|
||||||
case got1 := <-ch:
|
|
||||||
for _, fl := range got1 {
|
|
||||||
got = append(got, string(fl.ProtobufDebug[schema.ColumnInIfDescription].([]byte)))
|
|
||||||
}
|
|
||||||
case <-time.After(50 * time.Millisecond):
|
|
||||||
break out
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if diff := helpers.Diff(got, expected); diff != "" {
|
|
||||||
t.Fatalf("Input data (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,21 +6,23 @@ package input
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"akvorado/common/daemon"
|
"akvorado/common/daemon"
|
||||||
|
"akvorado/common/pb"
|
||||||
"akvorado/common/reporter"
|
"akvorado/common/reporter"
|
||||||
"akvorado/common/schema"
|
|
||||||
"akvorado/inlet/flow/decoder"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Input is the interface any input should meet
|
// Input is the interface any input should meet
|
||||||
type Input interface {
|
type Input interface {
|
||||||
// Start instructs an input to start producing flows on the returned channel.
|
// Start instructs an input to start producing flows to be sent to Kafka component.
|
||||||
Start() (<-chan []*schema.FlowMessage, error)
|
Start() error
|
||||||
// Stop instructs the input to stop producing flows.
|
// Stop instructs the input to stop producing flows.
|
||||||
Stop() error
|
Stop() error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SendFunc is a function to send a flow to Kafka
|
||||||
|
type SendFunc func(exporter string, flow *pb.RawFlow)
|
||||||
|
|
||||||
// Configuration defines the interface to instantiate an input module from its configuration.
|
// Configuration defines the interface to instantiate an input module from its configuration.
|
||||||
type Configuration interface {
|
type Configuration interface {
|
||||||
// New instantiates a new input from its configuration.
|
// New instantiates a new input from its configuration.
|
||||||
New(r *reporter.Reporter, daemon daemon.Component, dec decoder.Decoder) (Input, error)
|
New(r *reporter.Reporter, daemon daemon.Component, send SendFunc) (Input, error)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,9 +15,8 @@ import (
|
|||||||
"gopkg.in/tomb.v2"
|
"gopkg.in/tomb.v2"
|
||||||
|
|
||||||
"akvorado/common/daemon"
|
"akvorado/common/daemon"
|
||||||
|
"akvorado/common/pb"
|
||||||
"akvorado/common/reporter"
|
"akvorado/common/reporter"
|
||||||
"akvorado/common/schema"
|
|
||||||
"akvorado/inlet/flow/decoder"
|
|
||||||
"akvorado/inlet/flow/input"
|
"akvorado/inlet/flow/input"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -32,23 +31,19 @@ type Input struct {
|
|||||||
packets *reporter.CounterVec
|
packets *reporter.CounterVec
|
||||||
packetSizeSum *reporter.SummaryVec
|
packetSizeSum *reporter.SummaryVec
|
||||||
errors *reporter.CounterVec
|
errors *reporter.CounterVec
|
||||||
outDrops *reporter.CounterVec
|
|
||||||
inDrops *reporter.GaugeVec
|
inDrops *reporter.GaugeVec
|
||||||
decodedFlows *reporter.CounterVec
|
|
||||||
}
|
}
|
||||||
|
|
||||||
address net.Addr // listening address, for testing purpoese
|
address net.Addr // listening address, for testing purpoese
|
||||||
ch chan []*schema.FlowMessage // channel to send flows to
|
send input.SendFunc // function to send to kafka
|
||||||
decoder decoder.Decoder // decoder to use
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// New instantiate a new UDP listener from the provided configuration.
|
// New instantiate a new UDP listener from the provided configuration.
|
||||||
func (configuration *Configuration) New(r *reporter.Reporter, daemon daemon.Component, dec decoder.Decoder) (input.Input, error) {
|
func (configuration *Configuration) New(r *reporter.Reporter, daemon daemon.Component, send input.SendFunc) (input.Input, error) {
|
||||||
input := &Input{
|
input := &Input{
|
||||||
r: r,
|
r: r,
|
||||||
config: configuration,
|
config: configuration,
|
||||||
ch: make(chan []*schema.FlowMessage, configuration.QueueSize),
|
send: send,
|
||||||
decoder: dec,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
input.metrics.bytes = r.CounterVec(
|
input.metrics.bytes = r.CounterVec(
|
||||||
@@ -80,13 +75,6 @@ func (configuration *Configuration) New(r *reporter.Reporter, daemon daemon.Comp
|
|||||||
},
|
},
|
||||||
[]string{"listener", "worker"},
|
[]string{"listener", "worker"},
|
||||||
)
|
)
|
||||||
input.metrics.outDrops = r.CounterVec(
|
|
||||||
reporter.CounterOpts{
|
|
||||||
Name: "out_dropped_packets_total",
|
|
||||||
Help: "Dropped packets due to internal queue full.",
|
|
||||||
},
|
|
||||||
[]string{"listener", "worker", "exporter"},
|
|
||||||
)
|
|
||||||
input.metrics.inDrops = r.GaugeVec(
|
input.metrics.inDrops = r.GaugeVec(
|
||||||
reporter.GaugeOpts{
|
reporter.GaugeOpts{
|
||||||
Name: "in_dropped_packets_total",
|
Name: "in_dropped_packets_total",
|
||||||
@@ -94,20 +82,13 @@ func (configuration *Configuration) New(r *reporter.Reporter, daemon daemon.Comp
|
|||||||
},
|
},
|
||||||
[]string{"listener", "worker"},
|
[]string{"listener", "worker"},
|
||||||
)
|
)
|
||||||
input.metrics.decodedFlows = r.CounterVec(
|
|
||||||
reporter.CounterOpts{
|
|
||||||
Name: "decoded_flows_total",
|
|
||||||
Help: "Number of flows decoded and written to the internal queue",
|
|
||||||
},
|
|
||||||
[]string{"listener", "worker", "exporter"},
|
|
||||||
)
|
|
||||||
|
|
||||||
daemon.Track(&input.t, "inlet/flow/input/udp")
|
daemon.Track(&input.t, "inlet/flow/input/udp")
|
||||||
return input, nil
|
return input, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start starts listening to the provided UDP socket and producing flows.
|
// Start starts listening to the provided UDP socket and producing flows.
|
||||||
func (in *Input) Start() (<-chan []*schema.FlowMessage, error) {
|
func (in *Input) Start() error {
|
||||||
in.r.Info().Str("listen", in.config.Listen).Msg("starting UDP input")
|
in.r.Info().Str("listen", in.config.Listen).Msg("starting UDP input")
|
||||||
|
|
||||||
// Listen to UDP port
|
// Listen to UDP port
|
||||||
@@ -122,12 +103,12 @@ func (in *Input) Start() (<-chan []*schema.FlowMessage, error) {
|
|||||||
var err error
|
var err error
|
||||||
listenAddr, err = net.ResolveUDPAddr("udp", in.config.Listen)
|
listenAddr, err = net.ResolveUDPAddr("udp", in.config.Listen)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to resolve %v: %w", in.config.Listen, err)
|
return fmt.Errorf("unable to resolve %v: %w", in.config.Listen, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pconn, err := listenConfig.ListenPacket(in.t.Context(context.Background()), "udp", listenAddr.String())
|
pconn, err := listenConfig.ListenPacket(in.t.Context(context.Background()), "udp", listenAddr.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to listen to %v: %w", listenAddr, err)
|
return fmt.Errorf("unable to listen to %v: %w", listenAddr, err)
|
||||||
}
|
}
|
||||||
udpConn := pconn.(*net.UDPConn)
|
udpConn := pconn.(*net.UDPConn)
|
||||||
in.address = udpConn.LocalAddr()
|
in.address = udpConn.LocalAddr()
|
||||||
@@ -152,11 +133,13 @@ func (in *Input) Start() (<-chan []*schema.FlowMessage, error) {
|
|||||||
in.t.Go(func() error {
|
in.t.Go(func() error {
|
||||||
payload := make([]byte, 9000)
|
payload := make([]byte, 9000)
|
||||||
oob := make([]byte, oobLength)
|
oob := make([]byte, oobLength)
|
||||||
|
flow := pb.RawFlow{}
|
||||||
listen := in.config.Listen
|
listen := in.config.Listen
|
||||||
l := in.r.With().
|
l := in.r.With().
|
||||||
Str("worker", worker).
|
Str("worker", worker).
|
||||||
Str("listen", listen).
|
Str("listen", listen).
|
||||||
Logger()
|
Logger()
|
||||||
|
dying := in.t.Dying()
|
||||||
errLogger := l.Sample(reporter.BurstSampler(time.Minute, 1))
|
errLogger := l.Sample(reporter.BurstSampler(time.Minute, 1))
|
||||||
for count := 0; ; count++ {
|
for count := 0; ; count++ {
|
||||||
n, oobn, _, source, err := conns[workerID].ReadMsgUDP(payload, oob)
|
n, oobn, _, source, err := conns[workerID].ReadMsgUDP(payload, oob)
|
||||||
@@ -189,25 +172,17 @@ func (in *Input) Start() (<-chan []*schema.FlowMessage, error) {
|
|||||||
Inc()
|
Inc()
|
||||||
in.metrics.packetSizeSum.WithLabelValues(listen, worker, srcIP).
|
in.metrics.packetSizeSum.WithLabelValues(listen, worker, srcIP).
|
||||||
Observe(float64(n))
|
Observe(float64(n))
|
||||||
flows := in.decoder.Decode(decoder.RawFlow{
|
|
||||||
TimeReceived: oobMsg.Received,
|
flow.Reset()
|
||||||
Payload: payload[:n],
|
flow.TimeReceived = uint64(oobMsg.Received.Unix())
|
||||||
Source: source.IP,
|
flow.Payload = payload[:n]
|
||||||
})
|
flow.SourceAddress = source.IP.To16()
|
||||||
if len(flows) == 0 {
|
in.send(srcIP, &flow)
|
||||||
continue
|
|
||||||
}
|
|
||||||
select {
|
select {
|
||||||
case <-in.t.Dying():
|
case <-dying:
|
||||||
return nil
|
return nil
|
||||||
case in.ch <- flows:
|
|
||||||
in.metrics.decodedFlows.WithLabelValues(listen, worker, srcIP).
|
|
||||||
Add(float64(len((flows))))
|
|
||||||
default:
|
default:
|
||||||
errLogger.Warn().Msgf("dropping flow due to queue full (size %d)",
|
|
||||||
in.config.QueueSize)
|
|
||||||
in.metrics.outDrops.WithLabelValues(listen, worker, srcIP).
|
|
||||||
Inc()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -223,16 +198,13 @@ func (in *Input) Start() (<-chan []*schema.FlowMessage, error) {
|
|||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
return in.ch, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop stops the UDP listeners
|
// Stop stops the UDP listeners
|
||||||
func (in *Input) Stop() error {
|
func (in *Input) Stop() error {
|
||||||
l := in.r.With().Str("listen", in.config.Listen).Logger()
|
l := in.r.With().Str("listen", in.config.Listen).Logger()
|
||||||
defer func() {
|
defer l.Info().Msg("UDP listener stopped")
|
||||||
close(in.ch)
|
|
||||||
l.Info().Msg("UDP listener stopped")
|
|
||||||
}()
|
|
||||||
in.t.Kill(nil)
|
in.t.Kill(nil)
|
||||||
return in.t.Wait()
|
return in.t.Wait()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,27 +5,61 @@ package udp
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"akvorado/common/daemon"
|
"akvorado/common/daemon"
|
||||||
"akvorado/common/helpers"
|
"akvorado/common/helpers"
|
||||||
|
"akvorado/common/pb"
|
||||||
"akvorado/common/reporter"
|
"akvorado/common/reporter"
|
||||||
"akvorado/common/schema"
|
|
||||||
"akvorado/inlet/flow/decoder"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestUDPInput(t *testing.T) {
|
func TestUDPInput(t *testing.T) {
|
||||||
r := reporter.NewMock(t)
|
r := reporter.NewMock(t)
|
||||||
configuration := DefaultConfiguration().(*Configuration)
|
configuration := DefaultConfiguration().(*Configuration)
|
||||||
configuration.Listen = "127.0.0.1:0"
|
configuration.Listen = "127.0.0.1:0"
|
||||||
in, err := configuration.New(r, daemon.NewMock(t), &decoder.DummyDecoder{Schema: schema.NewMock(t)})
|
|
||||||
|
done := make(chan bool)
|
||||||
|
expected := &pb.RawFlow{
|
||||||
|
SourceAddress: net.ParseIP("127.0.0.1").To16(),
|
||||||
|
Payload: []byte("hello world!"),
|
||||||
|
}
|
||||||
|
send := func(_ string, got *pb.RawFlow) {
|
||||||
|
expected.TimeReceived = got.TimeReceived
|
||||||
|
|
||||||
|
delta := uint64(time.Now().UTC().Unix()) - got.TimeReceived
|
||||||
|
if delta > 1 {
|
||||||
|
t.Errorf("TimeReceived out of range: %d (now: %d)", got.TimeReceived, time.Now().UTC().Unix())
|
||||||
|
}
|
||||||
|
if diff := helpers.Diff(got, expected); diff != "" {
|
||||||
|
t.Fatalf("Input data (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check metrics
|
||||||
|
gotMetrics := r.GetMetrics("akvorado_inlet_flow_input_udp_")
|
||||||
|
expectedMetrics := map[string]string{
|
||||||
|
`bytes_total{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0"}`: "12",
|
||||||
|
`packets_total{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0"}`: "1",
|
||||||
|
`in_dropped_packets_total{listener="127.0.0.1:0",worker="0"}`: "0",
|
||||||
|
`summary_size_bytes_count{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0"}`: "1",
|
||||||
|
`summary_size_bytes_sum{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0"}`: "12",
|
||||||
|
`summary_size_bytes{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0",quantile="0.5"}`: "12",
|
||||||
|
`summary_size_bytes{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0",quantile="0.9"}`: "12",
|
||||||
|
`summary_size_bytes{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0",quantile="0.99"}`: "12",
|
||||||
|
}
|
||||||
|
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||||
|
t.Fatalf("Input metrics (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
close(done)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
in, err := configuration.New(r, daemon.NewMock(t), send)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("New() error:\n%+v", err)
|
t.Fatalf("New() error:\n%+v", err)
|
||||||
}
|
}
|
||||||
ch, err := in.Start()
|
if err := in.Start(); err != nil {
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Start() error:\n%+v", err)
|
t.Fatalf("Start() error:\n%+v", err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
@@ -46,103 +80,9 @@ func TestUDPInput(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get it back
|
// Get it back
|
||||||
var got []*schema.FlowMessage
|
|
||||||
select {
|
select {
|
||||||
case got = <-ch:
|
|
||||||
if len(got) == 0 {
|
|
||||||
t.Fatalf("empty decoded flows received")
|
|
||||||
}
|
|
||||||
case <-time.After(20 * time.Millisecond):
|
case <-time.After(20 * time.Millisecond):
|
||||||
t.Fatal("no decoded flows received")
|
t.Fatal("no decoded flows received")
|
||||||
}
|
case <-done:
|
||||||
|
|
||||||
delta := uint64(time.Now().UTC().Unix()) - got[0].TimeReceived
|
|
||||||
if delta > 1 {
|
|
||||||
t.Errorf("TimeReceived out of range: %d (now: %d)", got[0].TimeReceived, time.Now().UTC().Unix())
|
|
||||||
}
|
|
||||||
expected := []*schema.FlowMessage{
|
|
||||||
{
|
|
||||||
TimeReceived: got[0].TimeReceived,
|
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:127.0.0.1"),
|
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
|
||||||
schema.ColumnBytes: 12,
|
|
||||||
schema.ColumnPackets: 1,
|
|
||||||
schema.ColumnInIfDescription: []byte("hello world!"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if diff := helpers.Diff(got, expected); diff != "" {
|
|
||||||
t.Fatalf("Input data (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check metrics
|
|
||||||
gotMetrics := r.GetMetrics("akvorado_inlet_flow_input_udp_")
|
|
||||||
expectedMetrics := map[string]string{
|
|
||||||
`bytes_total{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0"}`: "12",
|
|
||||||
`decoded_flows_total{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0"}`: "1",
|
|
||||||
`packets_total{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0"}`: "1",
|
|
||||||
`in_dropped_packets_total{listener="127.0.0.1:0",worker="0"}`: "0",
|
|
||||||
`summary_size_bytes_count{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0"}`: "1",
|
|
||||||
`summary_size_bytes_sum{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0"}`: "12",
|
|
||||||
`summary_size_bytes{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0",quantile="0.5"}`: "12",
|
|
||||||
`summary_size_bytes{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0",quantile="0.9"}`: "12",
|
|
||||||
`summary_size_bytes{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0",quantile="0.99"}`: "12",
|
|
||||||
}
|
|
||||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
|
||||||
t.Fatalf("Input metrics (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOverflow(t *testing.T) {
|
|
||||||
r := reporter.NewMock(t)
|
|
||||||
configuration := DefaultConfiguration().(*Configuration)
|
|
||||||
configuration.Listen = "127.0.0.1:0"
|
|
||||||
configuration.QueueSize = 1
|
|
||||||
in, err := configuration.New(r, daemon.NewMock(t), &decoder.DummyDecoder{
|
|
||||||
Schema: schema.NewMock(t),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("New() error:\n%+v", err)
|
|
||||||
}
|
|
||||||
_, err = in.Start()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Start() error:\n%+v", err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := in.Stop(); err != nil {
|
|
||||||
t.Fatalf("Stop() error:\n%+v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Connect
|
|
||||||
conn, err := net.Dial("udp", in.(*Input).address.String())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Dial() error:\n%+v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send data
|
|
||||||
for range 10 {
|
|
||||||
if _, err := conn.Write([]byte("hello world!")); err != nil {
|
|
||||||
t.Fatalf("Write() error:\n%+v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
time.Sleep(20 * time.Millisecond)
|
|
||||||
|
|
||||||
// Check metrics
|
|
||||||
gotMetrics := r.GetMetrics("akvorado_inlet_flow_input_udp_")
|
|
||||||
expectedMetrics := map[string]string{
|
|
||||||
`bytes_total{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0"}`: "120",
|
|
||||||
`decoded_flows_total{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0"}`: "1",
|
|
||||||
`in_dropped_packets_total{listener="127.0.0.1:0",worker="0"}`: "0",
|
|
||||||
`out_dropped_packets_total{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0"}`: "9",
|
|
||||||
`packets_total{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0"}`: "10",
|
|
||||||
`summary_size_bytes_count{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0"}`: "10",
|
|
||||||
`summary_size_bytes_sum{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0"}`: "120",
|
|
||||||
`summary_size_bytes{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0",quantile="0.5"}`: "12",
|
|
||||||
`summary_size_bytes{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0",quantile="0.9"}`: "12",
|
|
||||||
`summary_size_bytes{exporter="127.0.0.1",listener="127.0.0.1:0",worker="0",quantile="0.99"}`: "12",
|
|
||||||
}
|
|
||||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
|
||||||
t.Fatalf("Input metrics (-got, +want):\n%s", diff)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,57 +0,0 @@
|
|||||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
|
||||||
// SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
|
|
||||||
package flow
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"akvorado/common/schema"
|
|
||||||
|
|
||||||
"golang.org/x/time/rate"
|
|
||||||
)
|
|
||||||
|
|
||||||
type limiter struct {
|
|
||||||
l *rate.Limiter
|
|
||||||
dropped uint64 // dropped during the current second
|
|
||||||
total uint64 // total during the current second
|
|
||||||
dropRate float64 // drop rate during the last second
|
|
||||||
currentTick time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// allowMessages tell if we can transmit the provided messages,
|
|
||||||
// depending on the rate limiter configuration. If yes, their sampling
|
|
||||||
// rate may be modified to match current drop rate.
|
|
||||||
func (c *Component) allowMessages(fmsgs []*schema.FlowMessage) bool {
|
|
||||||
count := len(fmsgs)
|
|
||||||
if c.config.RateLimit == 0 || count == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
exporter := fmsgs[0].ExporterAddress
|
|
||||||
exporterLimiter, ok := c.limiters[exporter]
|
|
||||||
if !ok {
|
|
||||||
exporterLimiter = &limiter{
|
|
||||||
l: rate.NewLimiter(rate.Limit(c.config.RateLimit), int(c.config.RateLimit/10)),
|
|
||||||
}
|
|
||||||
c.limiters[exporter] = exporterLimiter
|
|
||||||
}
|
|
||||||
now := time.Now()
|
|
||||||
tick := now.Truncate(200 * time.Millisecond) // we use a 200-millisecond resolution
|
|
||||||
if exporterLimiter.currentTick.UnixMilli() != tick.UnixMilli() {
|
|
||||||
exporterLimiter.dropRate = float64(exporterLimiter.dropped) / float64(exporterLimiter.total)
|
|
||||||
exporterLimiter.dropped = 0
|
|
||||||
exporterLimiter.total = 0
|
|
||||||
exporterLimiter.currentTick = tick
|
|
||||||
}
|
|
||||||
exporterLimiter.total += uint64(count)
|
|
||||||
if !exporterLimiter.l.AllowN(now, count) {
|
|
||||||
exporterLimiter.dropped += uint64(count)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if exporterLimiter.dropRate > 0 {
|
|
||||||
for _, flow := range fmsgs {
|
|
||||||
flow.SamplingRate *= uint32(1 / (1 - exporterLimiter.dropRate))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
@@ -1,23 +1,21 @@
|
|||||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||||
// SPDX-License-Identifier: AGPL-3.0-only
|
// SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
|
||||||
// Package flow handle incoming flows (currently Netflow v9 and IPFIX).
|
// Package flow handle incoming Netflow/IPFIX/sflow flows.
|
||||||
package flow
|
package flow
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/netip"
|
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
"gopkg.in/tomb.v2"
|
"gopkg.in/tomb.v2"
|
||||||
|
|
||||||
"akvorado/common/daemon"
|
"akvorado/common/daemon"
|
||||||
"akvorado/common/httpserver"
|
"akvorado/common/httpserver"
|
||||||
|
"akvorado/common/pb"
|
||||||
"akvorado/common/reporter"
|
"akvorado/common/reporter"
|
||||||
"akvorado/common/schema"
|
|
||||||
"akvorado/inlet/flow/decoder"
|
|
||||||
"akvorado/inlet/flow/input"
|
"akvorado/inlet/flow/input"
|
||||||
|
"akvorado/inlet/kafka"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Component represents the flow component.
|
// Component represents the flow component.
|
||||||
@@ -27,17 +25,6 @@ type Component struct {
|
|||||||
t tomb.Tomb
|
t tomb.Tomb
|
||||||
config Configuration
|
config Configuration
|
||||||
|
|
||||||
metrics struct {
|
|
||||||
decoderStats *reporter.CounterVec
|
|
||||||
decoderErrors *reporter.CounterVec
|
|
||||||
}
|
|
||||||
|
|
||||||
// Channel for sending flows out of the package.
|
|
||||||
outgoingFlows chan *schema.FlowMessage
|
|
||||||
|
|
||||||
// Per-exporter rate-limiters
|
|
||||||
limiters map[netip.Addr]*limiter
|
|
||||||
|
|
||||||
// Inputs
|
// Inputs
|
||||||
inputs []input.Input
|
inputs []input.Input
|
||||||
}
|
}
|
||||||
@@ -46,7 +33,7 @@ type Component struct {
|
|||||||
type Dependencies struct {
|
type Dependencies struct {
|
||||||
Daemon daemon.Component
|
Daemon daemon.Component
|
||||||
HTTP *httpserver.Component
|
HTTP *httpserver.Component
|
||||||
Schema *schema.Component
|
Kafka *kafka.Component
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new flow component.
|
// New creates a new flow component.
|
||||||
@@ -56,99 +43,50 @@ func New(r *reporter.Reporter, configuration Configuration, dependencies Depende
|
|||||||
}
|
}
|
||||||
|
|
||||||
c := Component{
|
c := Component{
|
||||||
r: r,
|
r: r,
|
||||||
d: &dependencies,
|
d: &dependencies,
|
||||||
config: configuration,
|
config: configuration,
|
||||||
outgoingFlows: make(chan *schema.FlowMessage),
|
inputs: make([]input.Input, len(configuration.Inputs)),
|
||||||
limiters: make(map[netip.Addr]*limiter),
|
|
||||||
inputs: make([]input.Input, len(configuration.Inputs)),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize decoders (at most once each)
|
|
||||||
alreadyInitialized := map[string]decoder.Decoder{}
|
|
||||||
decs := make([]decoder.Decoder, len(configuration.Inputs))
|
|
||||||
for idx, input := range c.config.Inputs {
|
|
||||||
dec, ok := alreadyInitialized[input.Decoder]
|
|
||||||
if ok {
|
|
||||||
decs[idx] = dec
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
decoderfunc, ok := decoders[input.Decoder]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unknown decoder %q", input.Decoder)
|
|
||||||
}
|
|
||||||
dec = decoderfunc(r, decoder.Dependencies{Schema: c.d.Schema}, decoder.Option{TimestampSource: input.TimestampSource})
|
|
||||||
alreadyInitialized[input.Decoder] = dec
|
|
||||||
decs[idx] = c.wrapDecoder(dec, input.UseSrcAddrForExporterAddr)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize inputs
|
// Initialize inputs
|
||||||
for idx, input := range c.config.Inputs {
|
for idx, input := range c.config.Inputs {
|
||||||
var err error
|
var err error
|
||||||
c.inputs[idx], err = input.Config.New(r, c.d.Daemon, decs[idx])
|
c.inputs[idx], err = input.Config.New(r, c.d.Daemon, c.Send(input))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metrics
|
|
||||||
c.metrics.decoderStats = c.r.CounterVec(
|
|
||||||
reporter.CounterOpts{
|
|
||||||
Name: "decoder_flows_total",
|
|
||||||
Help: "Decoder processed count.",
|
|
||||||
},
|
|
||||||
[]string{"name"},
|
|
||||||
)
|
|
||||||
c.metrics.decoderErrors = c.r.CounterVec(
|
|
||||||
reporter.CounterOpts{
|
|
||||||
Name: "decoder_errors_total",
|
|
||||||
Help: "Decoder processed error count.",
|
|
||||||
},
|
|
||||||
[]string{"name"},
|
|
||||||
)
|
|
||||||
|
|
||||||
c.d.Daemon.Track(&c.t, "inlet/flow")
|
c.d.Daemon.Track(&c.t, "inlet/flow")
|
||||||
|
|
||||||
c.d.HTTP.AddHandler("/api/v0/inlet/flow/schema.proto",
|
|
||||||
http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
|
||||||
w.Header().Set("Content-Type", "text/plain")
|
|
||||||
w.Write([]byte(c.d.Schema.ProtobufDefinition()))
|
|
||||||
}))
|
|
||||||
|
|
||||||
return &c, nil
|
return &c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flows returns a channel to receive flows.
|
// Send sends a raw flow to Kafka.
|
||||||
func (c *Component) Flows() <-chan *schema.FlowMessage {
|
func (c *Component) Send(config InputConfiguration) input.SendFunc {
|
||||||
return c.outgoingFlows
|
return func(exporter string, flow *pb.RawFlow) {
|
||||||
|
flow.TimestampSource = config.TimestampSource
|
||||||
|
flow.Decoder = config.Decoder
|
||||||
|
flow.UseSourceAddress = config.UseSrcAddrForExporterAddr
|
||||||
|
if bytes, err := proto.Marshal(flow); err == nil {
|
||||||
|
c.d.Kafka.Send(exporter, bytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start starts the flow component.
|
// Start starts the flow component.
|
||||||
func (c *Component) Start() error {
|
func (c *Component) Start() error {
|
||||||
for _, input := range c.inputs {
|
for _, input := range c.inputs {
|
||||||
ch, err := input.Start()
|
err := input.Start()
|
||||||
stopper := input.Stop
|
stopper := input.Stop
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.t.Go(func() error {
|
c.t.Go(func() error {
|
||||||
defer stopper()
|
<-c.t.Dying()
|
||||||
for {
|
stopper()
|
||||||
select {
|
return nil
|
||||||
case <-c.t.Dying():
|
|
||||||
return nil
|
|
||||||
case fmsgs := <-ch:
|
|
||||||
if c.allowMessages(fmsgs) {
|
|
||||||
for _, fmsg := range fmsgs {
|
|
||||||
select {
|
|
||||||
case <-c.t.Dying():
|
|
||||||
return nil
|
|
||||||
case c.outgoingFlows <- fmsg:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -156,10 +94,7 @@ func (c *Component) Start() error {
|
|||||||
|
|
||||||
// Stop stops the flow component
|
// Stop stops the flow component
|
||||||
func (c *Component) Stop() error {
|
func (c *Component) Stop() error {
|
||||||
defer func() {
|
defer c.r.Info().Msg("flow component stopped")
|
||||||
close(c.outgoingFlows)
|
|
||||||
c.r.Info().Msg("flow component stopped")
|
|
||||||
}()
|
|
||||||
c.r.Info().Msg("stopping flow component")
|
c.r.Info().Msg("stopping flow component")
|
||||||
c.t.Kill(nil)
|
c.t.Kill(nil)
|
||||||
return c.t.Wait()
|
return c.t.Wait()
|
||||||
|
|||||||
@@ -4,122 +4,89 @@
|
|||||||
package flow
|
package flow
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"path"
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"akvorado/common/daemon"
|
||||||
"akvorado/common/helpers"
|
"akvorado/common/helpers"
|
||||||
|
"akvorado/common/httpserver"
|
||||||
|
"akvorado/common/pb"
|
||||||
"akvorado/common/reporter"
|
"akvorado/common/reporter"
|
||||||
"akvorado/inlet/flow/input/file"
|
"akvorado/inlet/flow/input/file"
|
||||||
|
"akvorado/inlet/kafka"
|
||||||
|
|
||||||
|
"github.com/IBM/sarama"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFlow(t *testing.T) {
|
func TestFlow(t *testing.T) {
|
||||||
var nominalRate int
|
|
||||||
_, src, _, _ := runtime.Caller(0)
|
_, src, _, _ := runtime.Caller(0)
|
||||||
base := path.Join(path.Dir(src), "decoder", "netflow", "testdata")
|
base := path.Join(path.Dir(src), "input", "file", "testdata")
|
||||||
outDir := t.TempDir()
|
paths := []string{
|
||||||
outFiles := []string{}
|
path.Join(base, "file1.txt"),
|
||||||
for idx, f := range []string{
|
path.Join(base, "file2.txt"),
|
||||||
"options-template.pcap",
|
|
||||||
"options-data.pcap",
|
|
||||||
"template.pcap",
|
|
||||||
"data.pcap", "data.pcap", "data.pcap", "data.pcap",
|
|
||||||
"data.pcap", "data.pcap", "data.pcap", "data.pcap",
|
|
||||||
"data.pcap", "data.pcap", "data.pcap", "data.pcap",
|
|
||||||
"data.pcap", "data.pcap", "data.pcap", "data.pcap",
|
|
||||||
} {
|
|
||||||
outFile := path.Join(outDir, fmt.Sprintf("data-%d", idx))
|
|
||||||
err := os.WriteFile(outFile, helpers.ReadPcapL4(t, path.Join(base, f)), 0o666)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("WriteFile(%q) error:\n%+v", outFile, err)
|
|
||||||
}
|
|
||||||
outFiles = append(outFiles, outFile)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inputs := []InputConfiguration{
|
inputs := []InputConfiguration{
|
||||||
{
|
{
|
||||||
Decoder: "netflow",
|
|
||||||
Config: &file.Configuration{
|
Config: &file.Configuration{
|
||||||
Paths: outFiles,
|
Paths: paths,
|
||||||
|
MaxFlows: 100,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for retry := 2; retry >= 0; retry-- {
|
r := reporter.NewMock(t)
|
||||||
// Without rate limiting
|
config := DefaultConfiguration()
|
||||||
{
|
config.Inputs = inputs
|
||||||
r := reporter.NewMock(t)
|
|
||||||
config := DefaultConfiguration()
|
|
||||||
config.Inputs = inputs
|
|
||||||
c := NewMock(t, r, config)
|
|
||||||
|
|
||||||
// Receive flows
|
producer, mockProducer := kafka.NewMock(t, r, kafka.DefaultConfiguration())
|
||||||
now := time.Now()
|
done := make(chan bool)
|
||||||
for range 1000 {
|
for i := range 100 {
|
||||||
select {
|
mockProducer.ExpectInputWithMessageCheckerFunctionAndSucceed(func(got *sarama.ProducerMessage) error {
|
||||||
case <-c.Flows():
|
if i == 99 {
|
||||||
case <-time.After(100 * time.Millisecond):
|
defer close(done)
|
||||||
t.Fatalf("no flow received")
|
}
|
||||||
|
expected := sarama.ProducerMessage{
|
||||||
|
Topic: fmt.Sprintf("flows-v%d", pb.Version),
|
||||||
|
Key: got.Key,
|
||||||
|
Value: got.Value,
|
||||||
|
Partition: got.Partition,
|
||||||
|
}
|
||||||
|
if diff := helpers.Diff(got, expected); diff != "" {
|
||||||
|
t.Fatalf("Send() (-got, +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
val, _ := got.Value.Encode()
|
||||||
|
if i%2 == 0 {
|
||||||
|
if !bytes.Contains(val, []byte("hello world!")) {
|
||||||
|
t.Fatalf("Send() did not return %q", "hello world!")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !bytes.Contains(val, []byte("bye bye")) {
|
||||||
|
t.Fatalf("Send() did not return %q", "bye bye")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
elapsed := time.Now().Sub(now)
|
return nil
|
||||||
t.Logf("Elapsed time for 1000 messages is %s", elapsed)
|
})
|
||||||
nominalRate = int(1000 * (time.Second / elapsed))
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// With rate limiting
|
c, err := New(r, config, Dependencies{
|
||||||
if runtime.GOOS == "Linux" {
|
Daemon: daemon.NewMock(t),
|
||||||
r := reporter.NewMock(t)
|
HTTP: httpserver.NewMock(t, r),
|
||||||
config := DefaultConfiguration()
|
Kafka: producer,
|
||||||
config.RateLimit = 1000
|
})
|
||||||
config.Inputs = inputs
|
if err != nil {
|
||||||
c := NewMock(t, r, config)
|
t.Fatalf("New() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
helpers.StartStop(t, c)
|
||||||
|
|
||||||
// Receive flows
|
// Wait for flows
|
||||||
twoSeconds := time.After(2 * time.Second)
|
select {
|
||||||
count := 0
|
case <-done:
|
||||||
outer1:
|
case <-time.After(time.Second):
|
||||||
for {
|
t.Fatalf("flows not received")
|
||||||
select {
|
|
||||||
case <-c.Flows():
|
|
||||||
count++
|
|
||||||
case <-twoSeconds:
|
|
||||||
break outer1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t.Logf("During the first two seconds, got %d flows", count)
|
|
||||||
|
|
||||||
if count > 2200 || count < 2000 {
|
|
||||||
t.Fatalf("Got %d flows instead of 2100 (burst included)", count)
|
|
||||||
}
|
|
||||||
|
|
||||||
if nominalRate == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case flow := <-c.Flows():
|
|
||||||
// This is hard to estimate the number of
|
|
||||||
// flows we should have got. We use the
|
|
||||||
// nominal rate but it was done with rate
|
|
||||||
// limiting disabled (so less code).
|
|
||||||
// Therefore, we are super conservative on the
|
|
||||||
// upper limit of the sampling rate. However,
|
|
||||||
// the lower limit should be OK.
|
|
||||||
t.Logf("Nominal rate was %d/second", nominalRate)
|
|
||||||
expectedRate := uint64(30000 / 1000 * nominalRate)
|
|
||||||
if flow.SamplingRate > uint32(1000*expectedRate/100) || flow.SamplingRate < uint32(70*expectedRate/100) {
|
|
||||||
if retry > 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
t.Fatalf("Sampling rate is %d, expected %d", flow.SamplingRate, expectedRate)
|
|
||||||
}
|
|
||||||
case <-time.After(100 * time.Millisecond):
|
|
||||||
t.Fatalf("no flow received")
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,29 +0,0 @@
|
|||||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
|
||||||
// SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
|
|
||||||
package flow
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"akvorado/common/helpers"
|
|
||||||
"akvorado/common/reporter"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestHTTPEndpoints(t *testing.T) {
|
|
||||||
r := reporter.NewMock(t)
|
|
||||||
c := NewMock(t, r, DefaultConfiguration())
|
|
||||||
|
|
||||||
cases := helpers.HTTPEndpointCases{
|
|
||||||
{
|
|
||||||
URL: "/api/v0/inlet/flow/schema.proto",
|
|
||||||
ContentType: "text/plain",
|
|
||||||
FirstLines: []string{
|
|
||||||
"",
|
|
||||||
`syntax = "proto3";`,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
helpers.TestHTTPEndpoints(t, c.d.HTTP.LocalAddr(), cases)
|
|
||||||
}
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
|
||||||
// SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
|
|
||||||
//go:build !release
|
|
||||||
|
|
||||||
package flow
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"akvorado/common/daemon"
|
|
||||||
"akvorado/common/helpers"
|
|
||||||
"akvorado/common/httpserver"
|
|
||||||
"akvorado/common/reporter"
|
|
||||||
"akvorado/common/schema"
|
|
||||||
"akvorado/inlet/flow/input/udp"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewMock creates a new flow importer listening on a random port. It
|
|
||||||
// is autostarted.
|
|
||||||
func NewMock(t *testing.T, r *reporter.Reporter, config Configuration) *Component {
|
|
||||||
t.Helper()
|
|
||||||
if config.Inputs == nil {
|
|
||||||
config.Inputs = []InputConfiguration{
|
|
||||||
{
|
|
||||||
Decoder: "netflow",
|
|
||||||
Config: &udp.Configuration{
|
|
||||||
Listen: "127.0.0.1:0",
|
|
||||||
QueueSize: 10,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c, err := New(r, config, Dependencies{
|
|
||||||
Daemon: daemon.NewMock(t),
|
|
||||||
HTTP: httpserver.NewMock(t, r),
|
|
||||||
Schema: schema.NewMock(t),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("New() error:\n%+v", err)
|
|
||||||
}
|
|
||||||
helpers.StartStop(t, c)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inject inject the provided flow message, as if it was received.
|
|
||||||
func (c *Component) Inject(fmsg *schema.FlowMessage) {
|
|
||||||
c.outgoingFlows <- fmsg
|
|
||||||
}
|
|
||||||
@@ -34,7 +34,7 @@ func DefaultConfiguration() Configuration {
|
|||||||
Configuration: kafka.DefaultConfiguration(),
|
Configuration: kafka.DefaultConfiguration(),
|
||||||
FlushInterval: time.Second,
|
FlushInterval: time.Second,
|
||||||
FlushBytes: int(sarama.MaxRequestSize) - 1,
|
FlushBytes: int(sarama.MaxRequestSize) - 1,
|
||||||
MaxMessageBytes: 1000000,
|
MaxMessageBytes: 1_000_000,
|
||||||
CompressionCodec: CompressionCodec(sarama.CompressionNone),
|
CompressionCodec: CompressionCodec(sarama.CompressionNone),
|
||||||
QueueSize: 32,
|
QueueSize: 32,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,22 +15,22 @@ import (
|
|||||||
"akvorado/common/daemon"
|
"akvorado/common/daemon"
|
||||||
"akvorado/common/helpers"
|
"akvorado/common/helpers"
|
||||||
"akvorado/common/kafka"
|
"akvorado/common/kafka"
|
||||||
|
"akvorado/common/pb"
|
||||||
"akvorado/common/reporter"
|
"akvorado/common/reporter"
|
||||||
"akvorado/common/schema"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestRealKafka(t *testing.T) {
|
func TestRealKafka(t *testing.T) {
|
||||||
client, brokers := kafka.SetupKafkaBroker(t)
|
client, brokers := kafka.SetupKafkaBroker(t)
|
||||||
|
|
||||||
topicName := fmt.Sprintf("test-topic-%d", rand.Int())
|
topicName := fmt.Sprintf("test-topic-%d", rand.Int())
|
||||||
|
expectedTopicName := fmt.Sprintf("%s-v%d", topicName, pb.Version)
|
||||||
configuration := DefaultConfiguration()
|
configuration := DefaultConfiguration()
|
||||||
configuration.Topic = topicName
|
configuration.Topic = topicName
|
||||||
configuration.Brokers = brokers
|
configuration.Brokers = brokers
|
||||||
configuration.Version = kafka.Version(sarama.V2_8_1_0)
|
configuration.Version = kafka.Version(sarama.V2_8_1_0)
|
||||||
configuration.FlushInterval = 100 * time.Millisecond
|
configuration.FlushInterval = 100 * time.Millisecond
|
||||||
expectedTopicName := fmt.Sprintf("%s-%s", topicName, schema.NewMock(t).ProtobufMessageHash())
|
|
||||||
r := reporter.NewMock(t)
|
r := reporter.NewMock(t)
|
||||||
c, err := New(r, configuration, Dependencies{Daemon: daemon.NewMock(t), Schema: schema.NewMock(t)})
|
c, err := New(r, configuration, Dependencies{Daemon: daemon.NewMock(t)})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("New() error:\n%+v", err)
|
t.Fatalf("New() error:\n%+v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,8 +16,8 @@ import (
|
|||||||
|
|
||||||
"akvorado/common/daemon"
|
"akvorado/common/daemon"
|
||||||
"akvorado/common/kafka"
|
"akvorado/common/kafka"
|
||||||
|
"akvorado/common/pb"
|
||||||
"akvorado/common/reporter"
|
"akvorado/common/reporter"
|
||||||
"akvorado/common/schema"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Component represents the Kafka exporter.
|
// Component represents the Kafka exporter.
|
||||||
@@ -27,8 +27,8 @@ type Component struct {
|
|||||||
t tomb.Tomb
|
t tomb.Tomb
|
||||||
config Configuration
|
config Configuration
|
||||||
|
|
||||||
kafkaTopic string
|
|
||||||
kafkaConfig *sarama.Config
|
kafkaConfig *sarama.Config
|
||||||
|
kafkaTopic string
|
||||||
kafkaProducer sarama.AsyncProducer
|
kafkaProducer sarama.AsyncProducer
|
||||||
createKafkaProducer func() (sarama.AsyncProducer, error)
|
createKafkaProducer func() (sarama.AsyncProducer, error)
|
||||||
metrics metrics
|
metrics metrics
|
||||||
@@ -37,7 +37,6 @@ type Component struct {
|
|||||||
// Dependencies define the dependencies of the Kafka exporter.
|
// Dependencies define the dependencies of the Kafka exporter.
|
||||||
type Dependencies struct {
|
type Dependencies struct {
|
||||||
Daemon daemon.Component
|
Daemon daemon.Component
|
||||||
Schema *schema.Component
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new Kafka exporter component.
|
// New creates a new Kafka exporter component.
|
||||||
@@ -66,7 +65,7 @@ func New(reporter *reporter.Reporter, configuration Configuration, dependencies
|
|||||||
config: configuration,
|
config: configuration,
|
||||||
|
|
||||||
kafkaConfig: kafkaConfig,
|
kafkaConfig: kafkaConfig,
|
||||||
kafkaTopic: fmt.Sprintf("%s-%s", configuration.Topic, dependencies.Schema.ProtobufMessageHash()),
|
kafkaTopic: fmt.Sprintf("%s-v%d", configuration.Topic, pb.Version),
|
||||||
}
|
}
|
||||||
c.initMetrics()
|
c.initMetrics()
|
||||||
c.createKafkaProducer = func() (sarama.AsyncProducer, error) {
|
c.createKafkaProducer = func() (sarama.AsyncProducer, error) {
|
||||||
@@ -95,9 +94,10 @@ func (c *Component) Start() error {
|
|||||||
c.t.Go(func() error {
|
c.t.Go(func() error {
|
||||||
defer kafkaProducer.Close()
|
defer kafkaProducer.Close()
|
||||||
errLogger := c.r.Sample(reporter.BurstSampler(10*time.Second, 3))
|
errLogger := c.r.Sample(reporter.BurstSampler(10*time.Second, 3))
|
||||||
|
dying := c.t.Dying()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-c.t.Dying():
|
case <-dying:
|
||||||
c.r.Debug().Msg("stop error logger")
|
c.r.Debug().Msg("stop error logger")
|
||||||
return nil
|
return nil
|
||||||
case msg := <-kafkaProducer.Errors():
|
case msg := <-kafkaProducer.Errors():
|
||||||
|
|||||||
@@ -14,8 +14,8 @@ import (
|
|||||||
|
|
||||||
"akvorado/common/daemon"
|
"akvorado/common/daemon"
|
||||||
"akvorado/common/helpers"
|
"akvorado/common/helpers"
|
||||||
|
"akvorado/common/pb"
|
||||||
"akvorado/common/reporter"
|
"akvorado/common/reporter"
|
||||||
"akvorado/common/schema"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestKafka(t *testing.T) {
|
func TestKafka(t *testing.T) {
|
||||||
@@ -26,8 +26,9 @@ func TestKafka(t *testing.T) {
|
|||||||
received := make(chan bool)
|
received := make(chan bool)
|
||||||
mockProducer.ExpectInputWithMessageCheckerFunctionAndSucceed(func(got *sarama.ProducerMessage) error {
|
mockProducer.ExpectInputWithMessageCheckerFunctionAndSucceed(func(got *sarama.ProducerMessage) error {
|
||||||
defer close(received)
|
defer close(received)
|
||||||
|
topic := fmt.Sprintf("flows-v%d", pb.Version)
|
||||||
expected := sarama.ProducerMessage{
|
expected := sarama.ProducerMessage{
|
||||||
Topic: fmt.Sprintf("flows-%s", c.d.Schema.ProtobufMessageHash()),
|
Topic: topic,
|
||||||
Key: got.Key,
|
Key: got.Key,
|
||||||
Value: sarama.ByteEncoder("hello world!"),
|
Value: sarama.ByteEncoder("hello world!"),
|
||||||
Partition: got.Partition,
|
Partition: got.Partition,
|
||||||
@@ -51,9 +52,9 @@ func TestKafka(t *testing.T) {
|
|||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
gotMetrics := r.GetMetrics("akvorado_inlet_kafka_")
|
gotMetrics := r.GetMetrics("akvorado_inlet_kafka_")
|
||||||
expectedMetrics := map[string]string{
|
expectedMetrics := map[string]string{
|
||||||
`sent_bytes_total{exporter="127.0.0.1"}`: "26",
|
`sent_bytes_total{exporter="127.0.0.1"}`: "26",
|
||||||
fmt.Sprintf(`errors_total{error="kafka: Failed to produce message to topic flows-%s: noooo"}`, c.d.Schema.ProtobufMessageHash()): "1",
|
`errors_total{error="kafka: Failed to produce message to topic flows-v5: noooo"}`: "1",
|
||||||
`sent_messages_total{exporter="127.0.0.1"}`: "2",
|
`sent_messages_total{exporter="127.0.0.1"}`: "2",
|
||||||
}
|
}
|
||||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||||
t.Fatalf("Metrics (-got, +want):\n%s", diff)
|
t.Fatalf("Metrics (-got, +want):\n%s", diff)
|
||||||
@@ -62,7 +63,7 @@ func TestKafka(t *testing.T) {
|
|||||||
|
|
||||||
func TestKafkaMetrics(t *testing.T) {
|
func TestKafkaMetrics(t *testing.T) {
|
||||||
r := reporter.NewMock(t)
|
r := reporter.NewMock(t)
|
||||||
c, err := New(r, DefaultConfiguration(), Dependencies{Daemon: daemon.NewMock(t), Schema: schema.NewMock(t)})
|
c, err := New(r, DefaultConfiguration(), Dependencies{Daemon: daemon.NewMock(t)})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("New() error:\n%+v", err)
|
t.Fatalf("New() error:\n%+v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,6 @@ import (
|
|||||||
"akvorado/common/daemon"
|
"akvorado/common/daemon"
|
||||||
"akvorado/common/helpers"
|
"akvorado/common/helpers"
|
||||||
"akvorado/common/reporter"
|
"akvorado/common/reporter"
|
||||||
"akvorado/common/schema"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewMock creates a new Kafka component with a mocked Kafka. It will
|
// NewMock creates a new Kafka component with a mocked Kafka. It will
|
||||||
@@ -23,7 +22,6 @@ func NewMock(t *testing.T, reporter *reporter.Reporter, configuration Configurat
|
|||||||
t.Helper()
|
t.Helper()
|
||||||
c, err := New(reporter, configuration, Dependencies{
|
c, err := New(reporter, configuration, Dependencies{
|
||||||
Daemon: daemon.NewMock(t),
|
Daemon: daemon.NewMock(t),
|
||||||
Schema: schema.NewMock(t),
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("New() error:\n%+v", err)
|
t.Fatalf("New() error:\n%+v", err)
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ import (
|
|||||||
"akvorado/common/remotedatasourcefetcher"
|
"akvorado/common/remotedatasourcefetcher"
|
||||||
|
|
||||||
"akvorado/common/helpers"
|
"akvorado/common/helpers"
|
||||||
"akvorado/common/kafka"
|
|
||||||
|
|
||||||
"github.com/go-viper/mapstructure/v2"
|
"github.com/go-viper/mapstructure/v2"
|
||||||
)
|
)
|
||||||
@@ -19,8 +18,6 @@ import (
|
|||||||
type Configuration struct {
|
type Configuration struct {
|
||||||
// SkipMigrations tell if we should skip migrations.
|
// SkipMigrations tell if we should skip migrations.
|
||||||
SkipMigrations bool
|
SkipMigrations bool
|
||||||
// Kafka describes Kafka-specific configuration
|
|
||||||
Kafka KafkaConfiguration
|
|
||||||
// Resolutions describe the various resolutions to use to
|
// Resolutions describe the various resolutions to use to
|
||||||
// store data and the associated TTLs.
|
// store data and the associated TTLs.
|
||||||
Resolutions []ResolutionConfiguration `validate:"min=1,dive"`
|
Resolutions []ResolutionConfiguration `validate:"min=1,dive"`
|
||||||
@@ -67,26 +64,9 @@ type ResolutionConfiguration struct {
|
|||||||
TTL time.Duration `validate:"isdefault|min=1h"`
|
TTL time.Duration `validate:"isdefault|min=1h"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// KafkaConfiguration describes Kafka-specific configuration
|
|
||||||
type KafkaConfiguration struct {
|
|
||||||
kafka.Configuration `mapstructure:",squash" yaml:"-,inline"`
|
|
||||||
// Consumers tell how many consumers to use to poll data from Kafka
|
|
||||||
Consumers int `validate:"min=1"`
|
|
||||||
// GroupName defines the Kafka consumers group used to poll data from topic,
|
|
||||||
// shared between all Consumers.
|
|
||||||
GroupName string
|
|
||||||
// EngineSettings allows one to set arbitrary settings for Kafka engine in
|
|
||||||
// ClickHouse.
|
|
||||||
EngineSettings []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultConfiguration represents the default configuration for the ClickHouse configurator.
|
// DefaultConfiguration represents the default configuration for the ClickHouse configurator.
|
||||||
func DefaultConfiguration() Configuration {
|
func DefaultConfiguration() Configuration {
|
||||||
return Configuration{
|
return Configuration{
|
||||||
Kafka: KafkaConfiguration{
|
|
||||||
Consumers: 1,
|
|
||||||
GroupName: "clickhouse",
|
|
||||||
},
|
|
||||||
Resolutions: []ResolutionConfiguration{
|
Resolutions: []ResolutionConfiguration{
|
||||||
{0, 15 * 24 * time.Hour}, // 15 days
|
{0, 15 * 24 * time.Hour}, // 15 days
|
||||||
{time.Minute, 7 * 24 * time.Hour}, // 7 days
|
{time.Minute, 7 * 24 * time.Hour}, // 7 days
|
||||||
@@ -141,6 +121,9 @@ func NetworkAttributesUnmarshallerHook() mapstructure.DecodeHookFunc {
|
|||||||
func init() {
|
func init() {
|
||||||
helpers.RegisterMapstructureUnmarshallerHook(helpers.SubnetMapUnmarshallerHook[NetworkAttributes]())
|
helpers.RegisterMapstructureUnmarshallerHook(helpers.SubnetMapUnmarshallerHook[NetworkAttributes]())
|
||||||
helpers.RegisterMapstructureUnmarshallerHook(NetworkAttributesUnmarshallerHook())
|
helpers.RegisterMapstructureUnmarshallerHook(NetworkAttributesUnmarshallerHook())
|
||||||
helpers.RegisterMapstructureDeprecatedFields[Configuration]("SystemLogTTL", "PrometheusEndpoint")
|
helpers.RegisterMapstructureDeprecatedFields[Configuration](
|
||||||
|
"SystemLogTTL",
|
||||||
|
"PrometheusEndpoint",
|
||||||
|
"Kafka")
|
||||||
helpers.RegisterSubnetMapValidation[NetworkAttributes]()
|
helpers.RegisterSubnetMapValidation[NetworkAttributes]()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -88,7 +88,6 @@ func TestNetworkNamesUnmarshalHook(t *testing.T) {
|
|||||||
|
|
||||||
func TestDefaultConfiguration(t *testing.T) {
|
func TestDefaultConfiguration(t *testing.T) {
|
||||||
config := DefaultConfiguration()
|
config := DefaultConfiguration()
|
||||||
config.Kafka.Topic = "flow"
|
|
||||||
if err := helpers.Validate.Struct(config); err != nil {
|
if err := helpers.Validate.Struct(config); err != nil {
|
||||||
t.Fatalf("validate.Struct() error:\n%+v", err)
|
t.Fatalf("validate.Struct() error:\n%+v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -45,17 +45,12 @@ func (c *Component) migrateDatabase() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Grab some information about the database
|
// Grab some information about the database
|
||||||
var threads uint8
|
|
||||||
var version string
|
var version string
|
||||||
row := c.d.ClickHouse.QueryRow(ctx, `SELECT getSetting('max_threads'), version()`)
|
row := c.d.ClickHouse.QueryRow(ctx, `SELECT version()`)
|
||||||
if err := row.Scan(&threads, &version); err != nil {
|
if err := row.Scan(&version); err != nil {
|
||||||
c.r.Err(err).Msg("unable to parse database settings")
|
c.r.Err(err).Msg("unable to parse database settings")
|
||||||
return fmt.Errorf("unable to parse database settings: %w", err)
|
return fmt.Errorf("unable to parse database settings: %w", err)
|
||||||
}
|
}
|
||||||
if c.config.Kafka.Consumers > int(threads) {
|
|
||||||
c.r.Warn().Msgf("too many consumers requested, capping to %d", threads)
|
|
||||||
c.config.Kafka.Consumers = int(threads)
|
|
||||||
}
|
|
||||||
if err := validateVersion(version); err != nil {
|
if err := validateVersion(version); err != nil {
|
||||||
return fmt.Errorf("incorrect ClickHouse version: %w", err)
|
return fmt.Errorf("incorrect ClickHouse version: %w", err)
|
||||||
}
|
}
|
||||||
@@ -162,12 +157,6 @@ func (c *Component) migrateDatabase() error {
|
|||||||
c.createExportersConsumerView,
|
c.createExportersConsumerView,
|
||||||
c.createRawFlowsTable,
|
c.createRawFlowsTable,
|
||||||
c.createRawFlowsConsumerView,
|
c.createRawFlowsConsumerView,
|
||||||
c.createRawFlowsErrors,
|
|
||||||
func(ctx context.Context) error {
|
|
||||||
return c.createDistributedTable(ctx, "flows_raw_errors")
|
|
||||||
},
|
|
||||||
c.createRawFlowsErrorsConsumerView,
|
|
||||||
c.deleteOldRawFlowsErrorsView,
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -281,36 +281,18 @@ CREATE MATERIALIZED VIEW exporters_consumer TO %s AS %s
|
|||||||
|
|
||||||
// createRawFlowsTable creates the raw flow table
|
// createRawFlowsTable creates the raw flow table
|
||||||
func (c *Component) createRawFlowsTable(ctx context.Context) error {
|
func (c *Component) createRawFlowsTable(ctx context.Context) error {
|
||||||
hash := c.d.Schema.ProtobufMessageHash()
|
hash := c.d.Schema.ClickHouseHash()
|
||||||
tableName := fmt.Sprintf("flows_%s_raw", hash)
|
tableName := fmt.Sprintf("flows_%s_raw", hash)
|
||||||
kafkaSettings := []string{
|
|
||||||
fmt.Sprintf(`kafka_broker_list = %s`,
|
|
||||||
quoteString(strings.Join(c.config.Kafka.Brokers, ","))),
|
|
||||||
fmt.Sprintf(`kafka_topic_list = %s`,
|
|
||||||
quoteString(fmt.Sprintf("%s-%s", c.config.Kafka.Topic, hash))),
|
|
||||||
fmt.Sprintf(`kafka_group_name = %s`, quoteString(c.config.Kafka.GroupName)),
|
|
||||||
`kafka_format = 'Protobuf'`,
|
|
||||||
fmt.Sprintf(`kafka_schema = 'flow-%s.proto:FlowMessagev%s'`, hash, hash),
|
|
||||||
fmt.Sprintf(`kafka_num_consumers = %d`, c.config.Kafka.Consumers),
|
|
||||||
`kafka_thread_per_consumer = 1`,
|
|
||||||
`kafka_handle_error_mode = 'stream'`,
|
|
||||||
}
|
|
||||||
for _, setting := range c.config.Kafka.EngineSettings {
|
|
||||||
kafkaSettings = append(kafkaSettings, setting)
|
|
||||||
}
|
|
||||||
kafkaEngine := fmt.Sprintf("Kafka SETTINGS %s", strings.Join(kafkaSettings, ", "))
|
|
||||||
|
|
||||||
// Build CREATE query
|
// Build CREATE query
|
||||||
createQuery, err := stemplate(
|
createQuery, err := stemplate(
|
||||||
`CREATE TABLE {{ .Database }}.{{ .Table }} ({{ .Schema }}) ENGINE = {{ .Engine }}`,
|
`CREATE TABLE {{ .Database }}.{{ .Table }} ({{ .Schema }}) ENGINE = Null`,
|
||||||
gin.H{
|
gin.H{
|
||||||
"Database": c.d.ClickHouse.DatabaseName(),
|
"Database": c.d.ClickHouse.DatabaseName(),
|
||||||
"Table": tableName,
|
"Table": tableName,
|
||||||
"Schema": c.d.Schema.ClickHouseCreateTable(
|
"Schema": c.d.Schema.ClickHouseCreateTable(
|
||||||
schema.ClickHouseSkipGeneratedColumns,
|
schema.ClickHouseSkipGeneratedColumns,
|
||||||
schema.ClickHouseUseTransformFromType,
|
|
||||||
schema.ClickHouseSkipAliasedColumns),
|
schema.ClickHouseSkipAliasedColumns),
|
||||||
"Engine": kafkaEngine,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot build query to create raw flows table: %w", err)
|
return fmt.Errorf("cannot build query to create raw flows table: %w", err)
|
||||||
@@ -328,7 +310,6 @@ func (c *Component) createRawFlowsTable(ctx context.Context) error {
|
|||||||
c.r.Info().Msg("create raw flows table")
|
c.r.Info().Msg("create raw flows table")
|
||||||
for _, table := range []string{
|
for _, table := range []string{
|
||||||
fmt.Sprintf("%s_consumer", tableName),
|
fmt.Sprintf("%s_consumer", tableName),
|
||||||
fmt.Sprintf("%s_errors", tableName),
|
|
||||||
tableName,
|
tableName,
|
||||||
} {
|
} {
|
||||||
if err := c.d.ClickHouse.ExecOnCluster(ctx, fmt.Sprintf(`DROP TABLE IF EXISTS %s SYNC`, table)); err != nil {
|
if err := c.d.ClickHouse.ExecOnCluster(ctx, fmt.Sprintf(`DROP TABLE IF EXISTS %s SYNC`, table)); err != nil {
|
||||||
@@ -348,20 +329,19 @@ func (c *Component) createRawFlowsTable(ctx context.Context) error {
|
|||||||
var dictionaryNetworksLookupRegex = regexp.MustCompile(`\bc_(Src|Dst)Networks\[([[:lower:]]+)\]\B`)
|
var dictionaryNetworksLookupRegex = regexp.MustCompile(`\bc_(Src|Dst)Networks\[([[:lower:]]+)\]\B`)
|
||||||
|
|
||||||
func (c *Component) createRawFlowsConsumerView(ctx context.Context) error {
|
func (c *Component) createRawFlowsConsumerView(ctx context.Context) error {
|
||||||
tableName := fmt.Sprintf("flows_%s_raw", c.d.Schema.ProtobufMessageHash())
|
tableName := fmt.Sprintf("flows_%s_raw", c.d.Schema.ClickHouseHash())
|
||||||
viewName := fmt.Sprintf("%s_consumer", tableName)
|
viewName := fmt.Sprintf("%s_consumer", tableName)
|
||||||
|
|
||||||
// Build SELECT query
|
// Build SELECT query
|
||||||
args := gin.H{
|
args := gin.H{
|
||||||
"Columns": strings.Join(c.d.Schema.ClickHouseSelectColumns(
|
"Columns": strings.Join(c.d.Schema.ClickHouseSelectColumns(
|
||||||
schema.ClickHouseSubstituteGenerates,
|
schema.ClickHouseSubstituteGenerates,
|
||||||
schema.ClickHouseSubstituteTransforms,
|
|
||||||
schema.ClickHouseSkipAliasedColumns), ", "),
|
schema.ClickHouseSkipAliasedColumns), ", "),
|
||||||
"Database": c.d.ClickHouse.DatabaseName(),
|
"Database": c.d.ClickHouse.DatabaseName(),
|
||||||
"Table": tableName,
|
"Table": tableName,
|
||||||
}
|
}
|
||||||
selectQuery, err := stemplate(
|
selectQuery, err := stemplate(
|
||||||
`SELECT {{ .Columns }} FROM {{ .Database }}.{{ .Table }} WHERE length(_error) = 0`,
|
`SELECT {{ .Columns }} FROM {{ .Database }}.{{ .Table }}`,
|
||||||
args)
|
args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot build select statement for raw flows consumer view: %w", err)
|
return fmt.Errorf("cannot build select statement for raw flows consumer view: %w", err)
|
||||||
@@ -445,105 +425,6 @@ func (c *Component) createRawFlowsConsumerView(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Component) createRawFlowsErrors(ctx context.Context) error {
|
|
||||||
name := c.localTable("flows_raw_errors")
|
|
||||||
createQuery, err := stemplate(`CREATE TABLE {{ .Database }}.{{ .Table }}
|
|
||||||
(`+"`timestamp`"+` DateTime,
|
|
||||||
`+"`topic`"+` LowCardinality(String),
|
|
||||||
`+"`partition`"+` UInt64,
|
|
||||||
`+"`offset`"+` UInt64,
|
|
||||||
`+"`raw`"+` String,
|
|
||||||
`+"`error`"+` String)
|
|
||||||
ENGINE = {{ .Engine }}
|
|
||||||
PARTITION BY toYYYYMMDDhhmmss(toStartOfHour(timestamp))
|
|
||||||
ORDER BY (timestamp, topic, partition, offset)
|
|
||||||
TTL timestamp + toIntervalDay(1)
|
|
||||||
`, gin.H{
|
|
||||||
"Table": name,
|
|
||||||
"Database": c.d.ClickHouse.DatabaseName(),
|
|
||||||
"Engine": c.mergeTreeEngine(name, ""),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cannot build query to create flow error table: %w", err)
|
|
||||||
}
|
|
||||||
if ok, err := c.tableAlreadyExists(ctx, name, "create_table_query", createQuery); err != nil {
|
|
||||||
return err
|
|
||||||
} else if ok {
|
|
||||||
c.r.Info().Msgf("table %s already exists, skip migration", name)
|
|
||||||
return errSkipStep
|
|
||||||
}
|
|
||||||
c.r.Info().Msgf("create table %s", name)
|
|
||||||
createOrReplaceQuery := strings.Replace(createQuery, "CREATE ", "CREATE OR REPLACE ", 1)
|
|
||||||
if err := c.d.ClickHouse.ExecOnCluster(ctx, createOrReplaceQuery); err != nil {
|
|
||||||
return fmt.Errorf("cannot create table %s: %w", name, err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Component) createRawFlowsErrorsConsumerView(ctx context.Context) error {
|
|
||||||
source := fmt.Sprintf("flows_%s_raw", c.d.Schema.ProtobufMessageHash())
|
|
||||||
viewName := "flows_raw_errors_consumer"
|
|
||||||
|
|
||||||
// Build SELECT query
|
|
||||||
selectQuery, err := stemplate(`
|
|
||||||
SELECT
|
|
||||||
now() AS timestamp,
|
|
||||||
_topic AS topic,
|
|
||||||
_partition AS partition,
|
|
||||||
_offset AS offset,
|
|
||||||
_raw_message AS raw,
|
|
||||||
_error AS error
|
|
||||||
FROM {{ .Database }}.{{ .Table }}
|
|
||||||
WHERE length(_error) > 0`, gin.H{
|
|
||||||
"Database": c.d.ClickHouse.DatabaseName(),
|
|
||||||
"Table": source,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cannot build select statement for raw flows error: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the existing one
|
|
||||||
if ok, err := c.tableAlreadyExists(ctx, viewName, "as_select", selectQuery); err != nil {
|
|
||||||
return err
|
|
||||||
} else if ok {
|
|
||||||
c.r.Info().Msg("raw flows errors view already exists, skip migration")
|
|
||||||
return errSkipStep
|
|
||||||
}
|
|
||||||
|
|
||||||
// Drop and create
|
|
||||||
c.r.Info().Msg("create raw flows errors view")
|
|
||||||
if err := c.d.ClickHouse.ExecOnCluster(ctx, fmt.Sprintf(`DROP TABLE IF EXISTS %s SYNC`, viewName)); err != nil {
|
|
||||||
return fmt.Errorf("cannot drop table %s: %w", viewName, err)
|
|
||||||
}
|
|
||||||
if err := c.d.ClickHouse.ExecOnCluster(ctx,
|
|
||||||
fmt.Sprintf(`CREATE MATERIALIZED VIEW %s TO %s AS %s`,
|
|
||||||
viewName, c.distributedTable("flows_raw_errors"), selectQuery)); err != nil {
|
|
||||||
return fmt.Errorf("cannot create raw flows errors view: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Component) deleteOldRawFlowsErrorsView(ctx context.Context) error {
|
|
||||||
tableName := fmt.Sprintf("flows_%s_raw", c.d.Schema.ProtobufMessageHash())
|
|
||||||
viewName := fmt.Sprintf("%s_errors", tableName)
|
|
||||||
|
|
||||||
// Check the existing one
|
|
||||||
if ok, err := c.tableAlreadyExists(ctx, viewName, "name", viewName); err != nil {
|
|
||||||
return err
|
|
||||||
} else if !ok {
|
|
||||||
c.r.Debug().Msg("old raw flows errors view does not exist, skip migration")
|
|
||||||
return errSkipStep
|
|
||||||
}
|
|
||||||
|
|
||||||
// Drop
|
|
||||||
c.r.Info().Msg("delete old raw flows errors view")
|
|
||||||
if err := c.d.ClickHouse.ExecOnCluster(ctx, fmt.Sprintf(`DROP TABLE IF EXISTS %s SYNC`, viewName)); err != nil {
|
|
||||||
return fmt.Errorf("cannot drop table %s: %w", viewName, err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Component) createOrUpdateFlowsTable(ctx context.Context, resolution ResolutionConfiguration) error {
|
func (c *Component) createOrUpdateFlowsTable(ctx context.Context, resolution ResolutionConfiguration) error {
|
||||||
ctx = clickhouse.Context(ctx, clickhouse.WithSettings(clickhouse.Settings{
|
ctx = clickhouse.Context(ctx, clickhouse.WithSettings(clickhouse.Settings{
|
||||||
"allow_suspicious_low_cardinality_types": 1,
|
"allow_suspicious_low_cardinality_types": 1,
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ import (
|
|||||||
"akvorado/common/daemon"
|
"akvorado/common/daemon"
|
||||||
"akvorado/common/helpers"
|
"akvorado/common/helpers"
|
||||||
"akvorado/common/httpserver"
|
"akvorado/common/httpserver"
|
||||||
"akvorado/common/kafka"
|
|
||||||
"akvorado/common/reporter"
|
"akvorado/common/reporter"
|
||||||
"akvorado/common/schema"
|
"akvorado/common/schema"
|
||||||
"akvorado/orchestrator/geoip"
|
"akvorado/orchestrator/geoip"
|
||||||
@@ -116,15 +115,23 @@ func loadAllTables(t *testing.T, ch *clickhousedb.Component, sch *schema.Compone
|
|||||||
}
|
}
|
||||||
|
|
||||||
func isOldTable(schema *schema.Component, table string) bool {
|
func isOldTable(schema *schema.Component, table string) bool {
|
||||||
if strings.Contains(table, schema.ProtobufMessageHash()) {
|
if strings.Contains(table, schema.ClickHouseHash()) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if table == "flows_raw_errors" {
|
if strings.HasPrefix(table, "test_") {
|
||||||
return false
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(table, "_raw") || strings.HasSuffix(table, "_raw_consumer") || strings.HasSuffix(table, "_raw_errors") {
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
oldSuffixes := []string{
|
||||||
|
"_raw",
|
||||||
|
"_raw_consumer",
|
||||||
|
"_raw_errors", "_raw_errors_local",
|
||||||
|
"_raw_errors_consumer",
|
||||||
|
}
|
||||||
|
for _, suffix := range oldSuffixes {
|
||||||
|
if strings.HasSuffix(table, suffix) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -136,7 +143,6 @@ func startTestComponent(t *testing.T, r *reporter.Reporter, chComponent *clickho
|
|||||||
}
|
}
|
||||||
configuration := DefaultConfiguration()
|
configuration := DefaultConfiguration()
|
||||||
configuration.OrchestratorURL = "http://127.0.0.1:0"
|
configuration.OrchestratorURL = "http://127.0.0.1:0"
|
||||||
configuration.Kafka.Configuration = kafka.DefaultConfiguration()
|
|
||||||
ch, err := New(r, configuration, Dependencies{
|
ch, err := New(r, configuration, Dependencies{
|
||||||
Daemon: daemon.NewMock(t),
|
Daemon: daemon.NewMock(t),
|
||||||
HTTP: httpserver.NewMock(t, r),
|
HTTP: httpserver.NewMock(t, r),
|
||||||
@@ -234,7 +240,7 @@ WHERE database=currentDatabase() AND table NOT LIKE '.%'`)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Query() error:\n%+v", err)
|
t.Fatalf("Query() error:\n%+v", err)
|
||||||
}
|
}
|
||||||
hash := ch.d.Schema.ProtobufMessageHash()
|
hash := ch.d.Schema.ClickHouseHash()
|
||||||
got := []string{}
|
got := []string{}
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var table string
|
var table string
|
||||||
@@ -263,9 +269,6 @@ WHERE database=currentDatabase() AND table NOT LIKE '.%'`)
|
|||||||
fmt.Sprintf("flows_%s_raw", hash),
|
fmt.Sprintf("flows_%s_raw", hash),
|
||||||
fmt.Sprintf("flows_%s_raw_consumer", hash),
|
fmt.Sprintf("flows_%s_raw_consumer", hash),
|
||||||
"flows_local",
|
"flows_local",
|
||||||
"flows_raw_errors",
|
|
||||||
"flows_raw_errors_consumer",
|
|
||||||
"flows_raw_errors_local",
|
|
||||||
schema.DictionaryICMP,
|
schema.DictionaryICMP,
|
||||||
schema.DictionaryNetworks,
|
schema.DictionaryNetworks,
|
||||||
schema.DictionaryProtocols,
|
schema.DictionaryProtocols,
|
||||||
@@ -360,9 +363,6 @@ func TestMigrationFromPreviousStates(t *testing.T) {
|
|||||||
schema.ColumnDstASPath,
|
schema.ColumnDstASPath,
|
||||||
schema.ColumnDstCommunities,
|
schema.ColumnDstCommunities,
|
||||||
schema.ColumnDstLargeCommunities,
|
schema.ColumnDstLargeCommunities,
|
||||||
schema.ColumnDstLargeCommunitiesASN,
|
|
||||||
schema.ColumnDstLargeCommunitiesLocalData1,
|
|
||||||
schema.ColumnDstLargeCommunitiesLocalData2,
|
|
||||||
}
|
}
|
||||||
sch, err := schema.New(schConfig)
|
sch, err := schema.New(schConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -442,7 +442,7 @@ AND name LIKE $3`, "flows", ch.d.ClickHouse.DatabaseName(), "%DimensionAttribute
|
|||||||
// Check if the rows were created in the consumer flows table
|
// Check if the rows were created in the consumer flows table
|
||||||
rowConsumer := ch.d.ClickHouse.QueryRow(
|
rowConsumer := ch.d.ClickHouse.QueryRow(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
fmt.Sprintf(`SHOW CREATE flows_%s_raw_consumer`, ch.d.Schema.ProtobufMessageHash()))
|
fmt.Sprintf(`SHOW CREATE flows_%s_raw_consumer`, ch.d.Schema.ClickHouseHash()))
|
||||||
var existingConsumer string
|
var existingConsumer string
|
||||||
if err := rowConsumer.Scan(&existingConsumer); err != nil {
|
if err := rowConsumer.Scan(&existingConsumer); err != nil {
|
||||||
t.Fatalf("Scan() error:\n%+v", err)
|
t.Fatalf("Scan() error:\n%+v", err)
|
||||||
@@ -517,7 +517,7 @@ AND name LIKE $3`, "flows", ch.d.ClickHouse.DatabaseName(), "%DimensionAttribute
|
|||||||
// Check if the rows were removed in the consumer flows table
|
// Check if the rows were removed in the consumer flows table
|
||||||
rowConsumer := ch.d.ClickHouse.QueryRow(
|
rowConsumer := ch.d.ClickHouse.QueryRow(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
fmt.Sprintf(`SHOW CREATE flows_%s_raw_consumer`, ch.d.Schema.ProtobufMessageHash()))
|
fmt.Sprintf(`SHOW CREATE flows_%s_raw_consumer`, ch.d.Schema.ClickHouseHash()))
|
||||||
var existingConsumer string
|
var existingConsumer string
|
||||||
if err := rowConsumer.Scan(&existingConsumer); err != nil {
|
if err := rowConsumer.Scan(&existingConsumer); err != nil {
|
||||||
t.Fatalf("Scan() error:\n%+v", err)
|
t.Fatalf("Scan() error:\n%+v", err)
|
||||||
|
|||||||
21
orchestrator/clickhouse/testdata/states/002-cluster.csv
vendored
Normal file
21
orchestrator/clickhouse/testdata/states/002-cluster.csv
vendored
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
asns,"CREATE DICTIONARY default.asns (`asn` UInt32 INJECTIVE, `name` String) PRIMARY KEY asn SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/asns.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(HASHED()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
||||||
|
icmp,"CREATE DICTIONARY default.icmp (`proto` UInt8, `type` UInt8, `code` UInt8, `name` String) PRIMARY KEY proto, type, code SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/icmp.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(COMPLEX_KEY_HASHED()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
||||||
|
networks,"CREATE DICTIONARY default.networks (`network` String, `name` String, `role` String, `site` String, `region` String, `city` String, `state` String, `country` String, `tenant` String, `asn` UInt32) PRIMARY KEY network SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/networks.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(IP_TRIE()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
||||||
|
protocols,"CREATE DICTIONARY default.protocols (`proto` UInt8 INJECTIVE, `name` String, `description` String) PRIMARY KEY proto SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/protocols.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(HASHED()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
||||||
|
tcp,"CREATE DICTIONARY default.tcp (`port` UInt16 INJECTIVE, `name` String) PRIMARY KEY port SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/tcp.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(HASHED()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
||||||
|
udp,"CREATE DICTIONARY default.udp (`port` UInt16 INJECTIVE, `name` String) PRIMARY KEY port SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/udp.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(HASHED()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
||||||
|
exporters,"CREATE TABLE default.exporters (`TimeReceived` DateTime, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `IfName` LowCardinality(String), `IfDescription` LowCardinality(String), `IfSpeed` UInt32, `IfConnectivity` LowCardinality(String), `IfProvider` LowCardinality(String), `IfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2)) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/shard-{shard}/exporters', 'replica-{replica}', TimeReceived) ORDER BY (ExporterAddress, IfName) TTL TimeReceived + toIntervalDay(1) SETTINGS index_granularity = 8192"
|
||||||
|
flows_1h0m0s_local,"CREATE TABLE default.flows_1h0m0s_local (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64 CODEC(T64, LZ4), `Packets` UInt64 CODEC(T64, LZ4), `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/shard-{shard}/flows_1h0m0s_local', 'replica-{replica}', (Bytes, Packets)) PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(622080))) PRIMARY KEY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate) ORDER BY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant, SrcCountry, DstCountry, SrcGeoCity, DstGeoCity, SrcGeoState, DstGeoState, Dst1stAS, Dst2ndAS, Dst3rdAS) TTL TimeReceived + toIntervalSecond(31104000) SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1"
|
||||||
|
flows_1m0s_local,"CREATE TABLE default.flows_1m0s_local (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64 CODEC(T64, LZ4), `Packets` UInt64 CODEC(T64, LZ4), `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/shard-{shard}/flows_1m0s_local', 'replica-{replica}', (Bytes, Packets)) PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(12096))) PRIMARY KEY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate) ORDER BY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant, SrcCountry, DstCountry, SrcGeoCity, DstGeoCity, SrcGeoState, DstGeoState, Dst1stAS, Dst2ndAS, Dst3rdAS) TTL TimeReceived + toIntervalSecond(604800) SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1"
|
||||||
|
flows_5m0s_local,"CREATE TABLE default.flows_5m0s_local (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64 CODEC(T64, LZ4), `Packets` UInt64 CODEC(T64, LZ4), `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/shard-{shard}/flows_5m0s_local', 'replica-{replica}', (Bytes, Packets)) PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(155520))) PRIMARY KEY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate) ORDER BY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant, SrcCountry, DstCountry, SrcGeoCity, DstGeoCity, SrcGeoState, DstGeoState, Dst1stAS, Dst2ndAS, Dst3rdAS) TTL TimeReceived + toIntervalSecond(7776000) SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1"
|
||||||
|
flows_I6D3KDQCRUBCNCGF4BSOWTRMVIv5_raw,"CREATE TABLE default.flows_I6D3KDQCRUBCNCGF4BSOWTRMVIv5_raw (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6 CODEC(ZSTD(1)), `DstAddr` IPv6 CODEC(ZSTD(1)), `SrcNetMask` UInt8, `DstNetMask` UInt8, `SrcAS` UInt32, `DstAS` UInt32, `DstASPath` Array(UInt32), `DstCommunities` Array(UInt32), `DstLargeCommunities` Array(UInt128), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt16, `DstPort` UInt16, `Bytes` UInt64 CODEC(T64, LZ4), `Packets` UInt64 CODEC(T64, LZ4), `ForwardingStatus` UInt32) ENGINE = Null"
|
||||||
|
flows_local,"CREATE TABLE default.flows_local (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6 CODEC(ZSTD(1)), `DstAddr` IPv6 CODEC(ZSTD(1)), `SrcNetMask` UInt8, `DstNetMask` UInt8, `SrcNetPrefix` String ALIAS multiIf(EType = 2048, concat(replaceRegexpOne(CAST(IPv6CIDRToRange(SrcAddr, CAST(96 + SrcNetMask, 'UInt8')).1, 'String'), '^::ffff:', ''), '/', CAST(SrcNetMask, 'String')), EType = 34525, concat(CAST(IPv6CIDRToRange(SrcAddr, SrcNetMask).1, 'String'), '/', CAST(SrcNetMask, 'String')), ''), `DstNetPrefix` String ALIAS multiIf(EType = 2048, concat(replaceRegexpOne(CAST(IPv6CIDRToRange(DstAddr, CAST(96 + DstNetMask, 'UInt8')).1, 'String'), '^::ffff:', ''), '/', CAST(DstNetMask, 'String')), EType = 34525, concat(CAST(IPv6CIDRToRange(DstAddr, DstNetMask).1, 'String'), '/', CAST(DstNetMask, 'String')), ''), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `DstASPath` Array(UInt32), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `DstCommunities` Array(UInt32), `DstLargeCommunities` Array(UInt128), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt16, `DstPort` UInt16, `Bytes` UInt64 CODEC(T64, LZ4), `Packets` UInt64 CODEC(T64, LZ4), `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/shard-{shard}/flows_local', 'replica-{replica}') PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(25920))) ORDER BY (toStartOfFiveMinutes(TimeReceived), ExporterAddress, InIfName, OutIfName) TTL TimeReceived + toIntervalSecond(1296000) SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1"
|
||||||
|
flows,"CREATE TABLE default.flows (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6 CODEC(ZSTD(1)), `DstAddr` IPv6 CODEC(ZSTD(1)), `SrcNetMask` UInt8, `DstNetMask` UInt8, `SrcNetPrefix` String ALIAS multiIf(EType = 2048, concat(replaceRegexpOne(CAST(IPv6CIDRToRange(SrcAddr, CAST(96 + SrcNetMask, 'UInt8')).1, 'String'), '^::ffff:', ''), '/', CAST(SrcNetMask, 'String')), EType = 34525, concat(CAST(IPv6CIDRToRange(SrcAddr, SrcNetMask).1, 'String'), '/', CAST(SrcNetMask, 'String')), ''), `DstNetPrefix` String ALIAS multiIf(EType = 2048, concat(replaceRegexpOne(CAST(IPv6CIDRToRange(DstAddr, CAST(96 + DstNetMask, 'UInt8')).1, 'String'), '^::ffff:', ''), '/', CAST(DstNetMask, 'String')), EType = 34525, concat(CAST(IPv6CIDRToRange(DstAddr, DstNetMask).1, 'String'), '/', CAST(DstNetMask, 'String')), ''), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `DstASPath` Array(UInt32), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `DstCommunities` Array(UInt32), `DstLargeCommunities` Array(UInt128), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt16, `DstPort` UInt16, `Bytes` UInt64 CODEC(T64, LZ4), `Packets` UInt64 CODEC(T64, LZ4), `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = Distributed('akvorado', 'default', 'flows_local', rand())"
|
||||||
|
flows_1h0m0s,"CREATE TABLE default.flows_1h0m0s (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64 CODEC(T64, LZ4), `Packets` UInt64 CODEC(T64, LZ4), `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = Distributed('akvorado', 'default', 'flows_1h0m0s_local', rand())"
|
||||||
|
flows_1m0s,"CREATE TABLE default.flows_1m0s (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64 CODEC(T64, LZ4), `Packets` UInt64 CODEC(T64, LZ4), `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = Distributed('akvorado', 'default', 'flows_1m0s_local', rand())"
|
||||||
|
flows_5m0s,"CREATE TABLE default.flows_5m0s (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64 CODEC(T64, LZ4), `Packets` UInt64 CODEC(T64, LZ4), `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = Distributed('akvorado', 'default', 'flows_5m0s_local', rand())"
|
||||||
|
exporters_consumer,"CREATE MATERIALIZED VIEW default.exporters_consumer TO default.exporters (`TimeReceived` DateTime, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `IfName` String, `IfDescription` String, `IfSpeed` UInt32, `IfConnectivity` String, `IfProvider` String, `IfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2)) AS SELECT DISTINCT TimeReceived, ExporterAddress, ExporterName, ExporterGroup, ExporterRole, ExporterSite, ExporterRegion, ExporterTenant, [InIfName, OutIfName][num] AS IfName, [InIfDescription, OutIfDescription][num] AS IfDescription, [InIfSpeed, OutIfSpeed][num] AS IfSpeed, [InIfConnectivity, OutIfConnectivity][num] AS IfConnectivity, [InIfProvider, OutIfProvider][num] AS IfProvider, [InIfBoundary, OutIfBoundary][num] AS IfBoundary FROM default.flows ARRAY JOIN arrayEnumerate([1, 2]) AS num"
|
||||||
|
flows_1h0m0s_consumer,"CREATE MATERIALIZED VIEW default.flows_1h0m0s_consumer TO default.flows_1h0m0s_local (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT toStartOfInterval(TimeReceived, toIntervalSecond(3600)) AS TimeReceived, SamplingRate, ExporterAddress, ExporterName, ExporterGroup, ExporterRole, ExporterSite, ExporterRegion, ExporterTenant, SrcAS, DstAS, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant, SrcCountry, DstCountry, SrcGeoCity, DstGeoCity, SrcGeoState, DstGeoState, Dst1stAS, Dst2ndAS, Dst3rdAS, InIfName, OutIfName, InIfDescription, OutIfDescription, InIfSpeed, OutIfSpeed, InIfConnectivity, OutIfConnectivity, InIfProvider, OutIfProvider, InIfBoundary, OutIfBoundary, EType, Proto, Bytes, Packets, ForwardingStatus FROM default.flows_local"
|
||||||
|
flows_1m0s_consumer,"CREATE MATERIALIZED VIEW default.flows_1m0s_consumer TO default.flows_1m0s_local (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT toStartOfInterval(TimeReceived, toIntervalSecond(60)) AS TimeReceived, SamplingRate, ExporterAddress, ExporterName, ExporterGroup, ExporterRole, ExporterSite, ExporterRegion, ExporterTenant, SrcAS, DstAS, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant, SrcCountry, DstCountry, SrcGeoCity, DstGeoCity, SrcGeoState, DstGeoState, Dst1stAS, Dst2ndAS, Dst3rdAS, InIfName, OutIfName, InIfDescription, OutIfDescription, InIfSpeed, OutIfSpeed, InIfConnectivity, OutIfConnectivity, InIfProvider, OutIfProvider, InIfBoundary, OutIfBoundary, EType, Proto, Bytes, Packets, ForwardingStatus FROM default.flows_local"
|
||||||
|
flows_5m0s_consumer,"CREATE MATERIALIZED VIEW default.flows_5m0s_consumer TO default.flows_5m0s_local (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT toStartOfInterval(TimeReceived, toIntervalSecond(300)) AS TimeReceived, SamplingRate, ExporterAddress, ExporterName, ExporterGroup, ExporterRole, ExporterSite, ExporterRegion, ExporterTenant, SrcAS, DstAS, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant, SrcCountry, DstCountry, SrcGeoCity, DstGeoCity, SrcGeoState, DstGeoState, Dst1stAS, Dst2ndAS, Dst3rdAS, InIfName, OutIfName, InIfDescription, OutIfDescription, InIfSpeed, OutIfSpeed, InIfConnectivity, OutIfConnectivity, InIfProvider, OutIfProvider, InIfBoundary, OutIfBoundary, EType, Proto, Bytes, Packets, ForwardingStatus FROM default.flows_local"
|
||||||
|
flows_I6D3KDQCRUBCNCGF4BSOWTRMVIv5_raw_consumer,"CREATE MATERIALIZED VIEW default.flows_I6D3KDQCRUBCNCGF4BSOWTRMVIv5_raw_consumer TO default.flows (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcNetMask` UInt8, `DstNetMask` UInt8, `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` String, `DstNetName` String, `SrcNetRole` String, `DstNetRole` String, `SrcNetSite` String, `DstNetSite` String, `SrcNetRegion` String, `DstNetRegion` String, `SrcNetTenant` String, `DstNetTenant` String, `SrcCountry` String, `DstCountry` String, `SrcGeoCity` String, `DstGeoCity` String, `SrcGeoState` String, `DstGeoState` String, `DstASPath` Array(UInt32), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `DstCommunities` Array(UInt32), `DstLargeCommunities` Array(UInt128), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt16, `DstPort` UInt16, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS WITH arrayCompact(DstASPath) AS c_DstASPath, dictGet('default.networks', ('asn', 'name', 'role', 'site', 'region', 'tenant', 'country', 'city', 'state'), SrcAddr) AS c_SrcNetworks, dictGet('default.networks', ('asn', 'name', 'role', 'site', 'region', 'tenant', 'country', 'city', 'state'), DstAddr) AS c_DstNetworks SELECT TimeReceived, SamplingRate, ExporterAddress, ExporterName, ExporterGroup, ExporterRole, ExporterSite, ExporterRegion, ExporterTenant, SrcAddr, DstAddr, SrcNetMask, DstNetMask, if(SrcAS = 0, c_SrcNetworks.1, SrcAS) AS SrcAS, if(DstAS = 0, c_DstNetworks.1, DstAS) AS DstAS, c_SrcNetworks.2 AS SrcNetName, c_DstNetworks.2 AS DstNetName, c_SrcNetworks.3 AS SrcNetRole, c_DstNetworks.3 AS DstNetRole, c_SrcNetworks.4 AS SrcNetSite, c_DstNetworks.4 AS DstNetSite, c_SrcNetworks.5 AS SrcNetRegion, c_DstNetworks.5 AS DstNetRegion, c_SrcNetworks.6 AS SrcNetTenant, c_DstNetworks.6 AS DstNetTenant, c_SrcNetworks.7 AS SrcCountry, c_DstNetworks.7 AS DstCountry, c_SrcNetworks.8 AS SrcGeoCity, c_DstNetworks.8 AS DstGeoCity, c_SrcNetworks.9 AS SrcGeoState, c_DstNetworks.9 AS DstGeoState, DstASPath, c_DstASPath[1] AS Dst1stAS, c_DstASPath[2] AS Dst2ndAS, c_DstASPath[3] AS Dst3rdAS, DstCommunities, DstLargeCommunities, InIfName, OutIfName, InIfDescription, OutIfDescription, InIfSpeed, OutIfSpeed, InIfConnectivity, OutIfConnectivity, InIfProvider, OutIfProvider, InIfBoundary, OutIfBoundary, EType, Proto, SrcPort, DstPort, Bytes, Packets, ForwardingStatus FROM default.flows_I6D3KDQCRUBCNCGF4BSOWTRMVIv5_raw"
|
||||||
|
@@ -2,6 +2,8 @@ asns,"CREATE DICTIONARY default.asns (`asn` UInt32 INJECTIVE, `name` String) PRI
|
|||||||
icmp,"CREATE DICTIONARY default.icmp (`proto` UInt8, `type` UInt8, `code` UInt8, `name` String) PRIMARY KEY proto, type, code SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/icmp.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(COMPLEX_KEY_HASHED()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
icmp,"CREATE DICTIONARY default.icmp (`proto` UInt8, `type` UInt8, `code` UInt8, `name` String) PRIMARY KEY proto, type, code SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/icmp.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(COMPLEX_KEY_HASHED()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
||||||
networks,"CREATE DICTIONARY default.networks (`network` String, `name` String, `role` String, `site` String, `region` String, `city` String, `state` String, `country` String, `tenant` String, `asn` UInt32) PRIMARY KEY network SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/networks.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(IP_TRIE()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
networks,"CREATE DICTIONARY default.networks (`network` String, `name` String, `role` String, `site` String, `region` String, `city` String, `state` String, `country` String, `tenant` String, `asn` UInt32) PRIMARY KEY network SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/networks.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(IP_TRIE()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
||||||
protocols,"CREATE DICTIONARY default.protocols (`proto` UInt8 INJECTIVE, `name` String, `description` String) PRIMARY KEY proto SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/protocols.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(HASHED()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
protocols,"CREATE DICTIONARY default.protocols (`proto` UInt8 INJECTIVE, `name` String, `description` String) PRIMARY KEY proto SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/protocols.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(HASHED()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
||||||
|
tcp,"CREATE DICTIONARY default.tcp (`port` UInt16 INJECTIVE, `name` String) PRIMARY KEY port SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/tcp.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(HASHED()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
||||||
|
udp,"CREATE DICTIONARY default.udp (`port` UInt16 INJECTIVE, `name` String) PRIMARY KEY port SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/udp.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(HASHED()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
||||||
exporters,"CREATE TABLE default.exporters (`TimeReceived` DateTime, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `IfName` LowCardinality(String), `IfDescription` LowCardinality(String), `IfSpeed` UInt32, `IfConnectivity` LowCardinality(String), `IfProvider` LowCardinality(String), `IfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2)) ENGINE = ReplacingMergeTree(TimeReceived) ORDER BY (ExporterAddress, IfName) TTL TimeReceived + toIntervalDay(1) SETTINGS index_granularity = 8192"
|
exporters,"CREATE TABLE default.exporters (`TimeReceived` DateTime, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `IfName` LowCardinality(String), `IfDescription` LowCardinality(String), `IfSpeed` UInt32, `IfConnectivity` LowCardinality(String), `IfProvider` LowCardinality(String), `IfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2)) ENGINE = ReplacingMergeTree(TimeReceived) ORDER BY (ExporterAddress, IfName) TTL TimeReceived + toIntervalDay(1) SETTINGS index_granularity = 8192"
|
||||||
flows,"CREATE TABLE default.flows (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6 CODEC(ZSTD(1)), `DstAddr` IPv6 CODEC(ZSTD(1)), `SrcNetMask` UInt8, `DstNetMask` UInt8, `SrcNetPrefix` String ALIAS multiIf(EType = 2048, concat(replaceRegexpOne(CAST(IPv6CIDRToRange(SrcAddr, CAST(96 + SrcNetMask, 'UInt8')).1, 'String'), '^::ffff:', ''), '/', CAST(SrcNetMask, 'String')), EType = 34525, concat(CAST(IPv6CIDRToRange(SrcAddr, SrcNetMask).1, 'String'), '/', CAST(SrcNetMask, 'String')), ''), `DstNetPrefix` String ALIAS multiIf(EType = 2048, concat(replaceRegexpOne(CAST(IPv6CIDRToRange(DstAddr, CAST(96 + DstNetMask, 'UInt8')).1, 'String'), '^::ffff:', ''), '/', CAST(DstNetMask, 'String')), EType = 34525, concat(CAST(IPv6CIDRToRange(DstAddr, DstNetMask).1, 'String'), '/', CAST(DstNetMask, 'String')), ''), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `DstASPath` Array(UInt32), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `DstCommunities` Array(UInt32), `DstLargeCommunities` Array(UInt128), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt16, `DstPort` UInt16, `Bytes` UInt64 CODEC(T64, LZ4), `Packets` UInt64 CODEC(T64, LZ4), `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = MergeTree PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(25920))) ORDER BY (toStartOfFiveMinutes(TimeReceived), ExporterAddress, InIfName, OutIfName) TTL TimeReceived + toIntervalSecond(1296000) SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1"
|
flows,"CREATE TABLE default.flows (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6 CODEC(ZSTD(1)), `DstAddr` IPv6 CODEC(ZSTD(1)), `SrcNetMask` UInt8, `DstNetMask` UInt8, `SrcNetPrefix` String ALIAS multiIf(EType = 2048, concat(replaceRegexpOne(CAST(IPv6CIDRToRange(SrcAddr, CAST(96 + SrcNetMask, 'UInt8')).1, 'String'), '^::ffff:', ''), '/', CAST(SrcNetMask, 'String')), EType = 34525, concat(CAST(IPv6CIDRToRange(SrcAddr, SrcNetMask).1, 'String'), '/', CAST(SrcNetMask, 'String')), ''), `DstNetPrefix` String ALIAS multiIf(EType = 2048, concat(replaceRegexpOne(CAST(IPv6CIDRToRange(DstAddr, CAST(96 + DstNetMask, 'UInt8')).1, 'String'), '^::ffff:', ''), '/', CAST(DstNetMask, 'String')), EType = 34525, concat(CAST(IPv6CIDRToRange(DstAddr, DstNetMask).1, 'String'), '/', CAST(DstNetMask, 'String')), ''), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `DstASPath` Array(UInt32), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `DstCommunities` Array(UInt32), `DstLargeCommunities` Array(UInt128), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt16, `DstPort` UInt16, `Bytes` UInt64 CODEC(T64, LZ4), `Packets` UInt64 CODEC(T64, LZ4), `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = MergeTree PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(25920))) ORDER BY (toStartOfFiveMinutes(TimeReceived), ExporterAddress, InIfName, OutIfName) TTL TimeReceived + toIntervalSecond(1296000) SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1"
|
||||||
flows_1h0m0s,"CREATE TABLE default.flows_1h0m0s (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64 CODEC(T64, LZ4), `Packets` UInt64 CODEC(T64, LZ4), `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = SummingMergeTree((Bytes, Packets)) PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(622080))) PRIMARY KEY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate) ORDER BY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant, SrcCountry, DstCountry, SrcGeoCity, DstGeoCity, SrcGeoState, DstGeoState, Dst1stAS, Dst2ndAS, Dst3rdAS) TTL TimeReceived + toIntervalSecond(31104000) SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1"
|
flows_1h0m0s,"CREATE TABLE default.flows_1h0m0s (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64 CODEC(T64, LZ4), `Packets` UInt64 CODEC(T64, LZ4), `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = SummingMergeTree((Bytes, Packets)) PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(622080))) PRIMARY KEY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate) ORDER BY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant, SrcCountry, DstCountry, SrcGeoCity, DstGeoCity, SrcGeoState, DstGeoState, Dst1stAS, Dst2ndAS, Dst3rdAS) TTL TimeReceived + toIntervalSecond(31104000) SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1"
|
||||||
@@ -15,5 +17,3 @@ flows_1m0s_consumer,"CREATE MATERIALIZED VIEW default.flows_1m0s_consumer TO def
|
|||||||
flows_5m0s_consumer,"CREATE MATERIALIZED VIEW default.flows_5m0s_consumer TO default.flows_5m0s (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT toStartOfInterval(TimeReceived, toIntervalSecond(300)) AS TimeReceived, SamplingRate, ExporterAddress, ExporterName, ExporterGroup, ExporterRole, ExporterSite, ExporterRegion, ExporterTenant, SrcAS, DstAS, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant, SrcCountry, DstCountry, SrcGeoCity, DstGeoCity, SrcGeoState, DstGeoState, Dst1stAS, Dst2ndAS, Dst3rdAS, InIfName, OutIfName, InIfDescription, OutIfDescription, InIfSpeed, OutIfSpeed, InIfConnectivity, OutIfConnectivity, InIfProvider, OutIfProvider, InIfBoundary, OutIfBoundary, EType, Proto, Bytes, Packets, ForwardingStatus FROM default.flows"
|
flows_5m0s_consumer,"CREATE MATERIALIZED VIEW default.flows_5m0s_consumer TO default.flows_5m0s (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT toStartOfInterval(TimeReceived, toIntervalSecond(300)) AS TimeReceived, SamplingRate, ExporterAddress, ExporterName, ExporterGroup, ExporterRole, ExporterSite, ExporterRegion, ExporterTenant, SrcAS, DstAS, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant, SrcCountry, DstCountry, SrcGeoCity, DstGeoCity, SrcGeoState, DstGeoState, Dst1stAS, Dst2ndAS, Dst3rdAS, InIfName, OutIfName, InIfDescription, OutIfDescription, InIfSpeed, OutIfSpeed, InIfConnectivity, OutIfConnectivity, InIfProvider, OutIfProvider, InIfBoundary, OutIfBoundary, EType, Proto, Bytes, Packets, ForwardingStatus FROM default.flows"
|
||||||
flows_LAABIGYMRYZPTGOYIIFZNYDEQM_raw_consumer,"CREATE MATERIALIZED VIEW default.flows_LAABIGYMRYZPTGOYIIFZNYDEQM_raw_consumer TO default.flows (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcNetMask` UInt8, `DstNetMask` UInt8, `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` String, `DstNetName` String, `SrcNetRole` String, `DstNetRole` String, `SrcNetSite` String, `DstNetSite` String, `SrcNetRegion` String, `DstNetRegion` String, `SrcNetTenant` String, `DstNetTenant` String, `SrcCountry` String, `DstCountry` String, `SrcGeoCity` String, `DstGeoCity` String, `SrcGeoState` String, `DstGeoState` String, `DstASPath` Array(UInt32), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `DstCommunities` Array(UInt32), `DstLargeCommunities` Array(UInt128), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt16, `DstPort` UInt16, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS WITH arrayCompact(DstASPath) AS c_DstASPath, dictGet('default.networks', ('asn', 'name', 'role', 'site', 'region', 'tenant', 'country', 'city', 'state'), SrcAddr) AS c_SrcNetworks, dictGet('default.networks', ('asn', 'name', 'role', 'site', 'region', 'tenant', 'country', 'city', 'state'), DstAddr) AS c_DstNetworks SELECT TimeReceived, SamplingRate, ExporterAddress, ExporterName, ExporterGroup, ExporterRole, ExporterSite, ExporterRegion, ExporterTenant, SrcAddr, DstAddr, SrcNetMask, DstNetMask, if(SrcAS = 0, c_SrcNetworks.1, SrcAS) AS SrcAS, if(DstAS = 0, c_DstNetworks.1, DstAS) AS DstAS, c_SrcNetworks.2 AS SrcNetName, c_DstNetworks.2 AS DstNetName, c_SrcNetworks.3 AS SrcNetRole, c_DstNetworks.3 AS DstNetRole, c_SrcNetworks.4 AS SrcNetSite, c_DstNetworks.4 AS DstNetSite, c_SrcNetworks.5 AS SrcNetRegion, c_DstNetworks.5 AS DstNetRegion, c_SrcNetworks.6 AS SrcNetTenant, c_DstNetworks.6 AS DstNetTenant, c_SrcNetworks.7 AS SrcCountry, c_DstNetworks.7 AS DstCountry, c_SrcNetworks.8 AS SrcGeoCity, c_DstNetworks.8 AS DstGeoCity, c_SrcNetworks.9 AS SrcGeoState, c_DstNetworks.9 AS DstGeoState, DstASPath, c_DstASPath[1] AS Dst1stAS, c_DstASPath[2] AS Dst2ndAS, c_DstASPath[3] AS Dst3rdAS, DstCommunities, arrayMap((asn, l1, l2) -> ((bitShiftLeft(CAST(asn, 'UInt128'), 64) + bitShiftLeft(CAST(l1, 'UInt128'), 32)) + CAST(l2, 'UInt128')), DstLargeCommunitiesASN, DstLargeCommunitiesLocalData1, DstLargeCommunitiesLocalData2) AS DstLargeCommunities, InIfName, OutIfName, InIfDescription, OutIfDescription, InIfSpeed, OutIfSpeed, InIfConnectivity, OutIfConnectivity, InIfProvider, OutIfProvider, InIfBoundary, OutIfBoundary, EType, Proto, SrcPort, DstPort, Bytes, Packets, ForwardingStatus FROM default.flows_LAABIGYMRYZPTGOYIIFZNYDEQM_raw WHERE length(_error) = 0"
|
flows_LAABIGYMRYZPTGOYIIFZNYDEQM_raw_consumer,"CREATE MATERIALIZED VIEW default.flows_LAABIGYMRYZPTGOYIIFZNYDEQM_raw_consumer TO default.flows (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcNetMask` UInt8, `DstNetMask` UInt8, `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` String, `DstNetName` String, `SrcNetRole` String, `DstNetRole` String, `SrcNetSite` String, `DstNetSite` String, `SrcNetRegion` String, `DstNetRegion` String, `SrcNetTenant` String, `DstNetTenant` String, `SrcCountry` String, `DstCountry` String, `SrcGeoCity` String, `DstGeoCity` String, `SrcGeoState` String, `DstGeoState` String, `DstASPath` Array(UInt32), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `DstCommunities` Array(UInt32), `DstLargeCommunities` Array(UInt128), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt16, `DstPort` UInt16, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS WITH arrayCompact(DstASPath) AS c_DstASPath, dictGet('default.networks', ('asn', 'name', 'role', 'site', 'region', 'tenant', 'country', 'city', 'state'), SrcAddr) AS c_SrcNetworks, dictGet('default.networks', ('asn', 'name', 'role', 'site', 'region', 'tenant', 'country', 'city', 'state'), DstAddr) AS c_DstNetworks SELECT TimeReceived, SamplingRate, ExporterAddress, ExporterName, ExporterGroup, ExporterRole, ExporterSite, ExporterRegion, ExporterTenant, SrcAddr, DstAddr, SrcNetMask, DstNetMask, if(SrcAS = 0, c_SrcNetworks.1, SrcAS) AS SrcAS, if(DstAS = 0, c_DstNetworks.1, DstAS) AS DstAS, c_SrcNetworks.2 AS SrcNetName, c_DstNetworks.2 AS DstNetName, c_SrcNetworks.3 AS SrcNetRole, c_DstNetworks.3 AS DstNetRole, c_SrcNetworks.4 AS SrcNetSite, c_DstNetworks.4 AS DstNetSite, c_SrcNetworks.5 AS SrcNetRegion, c_DstNetworks.5 AS DstNetRegion, c_SrcNetworks.6 AS SrcNetTenant, c_DstNetworks.6 AS DstNetTenant, c_SrcNetworks.7 AS SrcCountry, c_DstNetworks.7 AS DstCountry, c_SrcNetworks.8 AS SrcGeoCity, c_DstNetworks.8 AS DstGeoCity, c_SrcNetworks.9 AS SrcGeoState, c_DstNetworks.9 AS DstGeoState, DstASPath, c_DstASPath[1] AS Dst1stAS, c_DstASPath[2] AS Dst2ndAS, c_DstASPath[3] AS Dst3rdAS, DstCommunities, arrayMap((asn, l1, l2) -> ((bitShiftLeft(CAST(asn, 'UInt128'), 64) + bitShiftLeft(CAST(l1, 'UInt128'), 32)) + CAST(l2, 'UInt128')), DstLargeCommunitiesASN, DstLargeCommunitiesLocalData1, DstLargeCommunitiesLocalData2) AS DstLargeCommunities, InIfName, OutIfName, InIfDescription, OutIfDescription, InIfSpeed, OutIfSpeed, InIfConnectivity, OutIfConnectivity, InIfProvider, OutIfProvider, InIfBoundary, OutIfBoundary, EType, Proto, SrcPort, DstPort, Bytes, Packets, ForwardingStatus FROM default.flows_LAABIGYMRYZPTGOYIIFZNYDEQM_raw WHERE length(_error) = 0"
|
||||||
flows_raw_errors_consumer,"CREATE MATERIALIZED VIEW default.flows_raw_errors_consumer TO default.flows_raw_errors (`timestamp` DateTime, `topic` LowCardinality(String), `partition` UInt64, `offset` UInt64, `raw` String, `error` String) AS SELECT now() AS timestamp, _topic AS topic, _partition AS partition, _offset AS offset, _raw_message AS raw, _error AS error FROM default.flows_LAABIGYMRYZPTGOYIIFZNYDEQM_raw WHERE length(_error) > 0"
|
flows_raw_errors_consumer,"CREATE MATERIALIZED VIEW default.flows_raw_errors_consumer TO default.flows_raw_errors (`timestamp` DateTime, `topic` LowCardinality(String), `partition` UInt64, `offset` UInt64, `raw` String, `error` String) AS SELECT now() AS timestamp, _topic AS topic, _partition AS partition, _offset AS offset, _raw_message AS raw, _error AS error FROM default.flows_LAABIGYMRYZPTGOYIIFZNYDEQM_raw WHERE length(_error) > 0"
|
||||||
tcp,"CREATE DICTIONARY default.tcp (`port` UInt16 INJECTIVE, `name` String) PRIMARY KEY port SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/tcp.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(HASHED()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
|
||||||
udp,"CREATE DICTIONARY default.udp (`port` UInt16 INJECTIVE, `name` String) PRIMARY KEY port SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/udp.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(HASHED()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
|
||||||
|
|||||||
|
17
orchestrator/clickhouse/testdata/states/012.csv
vendored
Normal file
17
orchestrator/clickhouse/testdata/states/012.csv
vendored
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
asns,"CREATE DICTIONARY default.asns (`asn` UInt32 INJECTIVE, `name` String) PRIMARY KEY asn SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/asns.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(HASHED()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
||||||
|
icmp,"CREATE DICTIONARY default.icmp (`proto` UInt8, `type` UInt8, `code` UInt8, `name` String) PRIMARY KEY proto, type, code SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/icmp.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(COMPLEX_KEY_HASHED()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
||||||
|
networks,"CREATE DICTIONARY default.networks (`network` String, `name` String, `role` String, `site` String, `region` String, `city` String, `state` String, `country` String, `tenant` String, `asn` UInt32) PRIMARY KEY network SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/networks.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(IP_TRIE()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
||||||
|
protocols,"CREATE DICTIONARY default.protocols (`proto` UInt8 INJECTIVE, `name` String, `description` String) PRIMARY KEY proto SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/protocols.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(HASHED()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
||||||
|
tcp,"CREATE DICTIONARY default.tcp (`port` UInt16 INJECTIVE, `name` String) PRIMARY KEY port SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/tcp.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(HASHED()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
||||||
|
udp,"CREATE DICTIONARY default.udp (`port` UInt16 INJECTIVE, `name` String) PRIMARY KEY port SOURCE(HTTP(URL 'http://127.0.0.1:0/api/v0/orchestrator/clickhouse/udp.csv' FORMAT 'CSVWithNames')) LIFETIME(MIN 0 MAX 3600) LAYOUT(HASHED()) SETTINGS(format_csv_allow_single_quotes = 0)"
|
||||||
|
exporters,"CREATE TABLE default.exporters (`TimeReceived` DateTime, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `IfName` LowCardinality(String), `IfDescription` LowCardinality(String), `IfSpeed` UInt32, `IfConnectivity` LowCardinality(String), `IfProvider` LowCardinality(String), `IfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2)) ENGINE = ReplacingMergeTree(TimeReceived) ORDER BY (ExporterAddress, IfName) TTL TimeReceived + toIntervalDay(1) SETTINGS index_granularity = 8192"
|
||||||
|
flows,"CREATE TABLE default.flows (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6 CODEC(ZSTD(1)), `DstAddr` IPv6 CODEC(ZSTD(1)), `SrcNetMask` UInt8, `DstNetMask` UInt8, `SrcNetPrefix` String ALIAS multiIf(EType = 2048, concat(replaceRegexpOne(CAST(IPv6CIDRToRange(SrcAddr, CAST(96 + SrcNetMask, 'UInt8')).1, 'String'), '^::ffff:', ''), '/', CAST(SrcNetMask, 'String')), EType = 34525, concat(CAST(IPv6CIDRToRange(SrcAddr, SrcNetMask).1, 'String'), '/', CAST(SrcNetMask, 'String')), ''), `DstNetPrefix` String ALIAS multiIf(EType = 2048, concat(replaceRegexpOne(CAST(IPv6CIDRToRange(DstAddr, CAST(96 + DstNetMask, 'UInt8')).1, 'String'), '^::ffff:', ''), '/', CAST(DstNetMask, 'String')), EType = 34525, concat(CAST(IPv6CIDRToRange(DstAddr, DstNetMask).1, 'String'), '/', CAST(DstNetMask, 'String')), ''), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `DstASPath` Array(UInt32), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `DstCommunities` Array(UInt32), `DstLargeCommunities` Array(UInt128), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt16, `DstPort` UInt16, `Bytes` UInt64 CODEC(T64, LZ4), `Packets` UInt64 CODEC(T64, LZ4), `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = MergeTree PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(25920))) ORDER BY (toStartOfFiveMinutes(TimeReceived), ExporterAddress, InIfName, OutIfName) TTL TimeReceived + toIntervalSecond(1296000) SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1"
|
||||||
|
flows_1h0m0s,"CREATE TABLE default.flows_1h0m0s (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64 CODEC(T64, LZ4), `Packets` UInt64 CODEC(T64, LZ4), `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = SummingMergeTree((Bytes, Packets)) PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(622080))) PRIMARY KEY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate) ORDER BY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant, SrcCountry, DstCountry, SrcGeoCity, DstGeoCity, SrcGeoState, DstGeoState, Dst1stAS, Dst2ndAS, Dst3rdAS) TTL TimeReceived + toIntervalSecond(31104000) SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1"
|
||||||
|
flows_1m0s,"CREATE TABLE default.flows_1m0s (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64 CODEC(T64, LZ4), `Packets` UInt64 CODEC(T64, LZ4), `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = SummingMergeTree((Bytes, Packets)) PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(12096))) PRIMARY KEY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate) ORDER BY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant, SrcCountry, DstCountry, SrcGeoCity, DstGeoCity, SrcGeoState, DstGeoState, Dst1stAS, Dst2ndAS, Dst3rdAS) TTL TimeReceived + toIntervalSecond(604800) SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1"
|
||||||
|
flows_5m0s,"CREATE TABLE default.flows_5m0s (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64 CODEC(T64, LZ4), `Packets` UInt64 CODEC(T64, LZ4), `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = SummingMergeTree((Bytes, Packets)) PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(155520))) PRIMARY KEY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate) ORDER BY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant, SrcCountry, DstCountry, SrcGeoCity, DstGeoCity, SrcGeoState, DstGeoState, Dst1stAS, Dst2ndAS, Dst3rdAS) TTL TimeReceived + toIntervalSecond(7776000) SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1"
|
||||||
|
flows_I6D3KDQCRUBCNCGF4BSOWTRMVIv5_raw,"CREATE TABLE default.flows_I6D3KDQCRUBCNCGF4BSOWTRMVIv5_raw (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6 CODEC(ZSTD(1)), `DstAddr` IPv6 CODEC(ZSTD(1)), `SrcNetMask` UInt8, `DstNetMask` UInt8, `SrcAS` UInt32, `DstAS` UInt32, `DstASPath` Array(UInt32), `DstCommunities` Array(UInt32), `DstLargeCommunities` Array(UInt128), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt16, `DstPort` UInt16, `Bytes` UInt64 CODEC(T64, LZ4), `Packets` UInt64 CODEC(T64, LZ4), `ForwardingStatus` UInt32) ENGINE = Null"
|
||||||
|
exporters_consumer,"CREATE MATERIALIZED VIEW default.exporters_consumer TO default.exporters (`TimeReceived` DateTime, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `IfName` String, `IfDescription` String, `IfSpeed` UInt32, `IfConnectivity` String, `IfProvider` String, `IfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2)) AS SELECT DISTINCT TimeReceived, ExporterAddress, ExporterName, ExporterGroup, ExporterRole, ExporterSite, ExporterRegion, ExporterTenant, [InIfName, OutIfName][num] AS IfName, [InIfDescription, OutIfDescription][num] AS IfDescription, [InIfSpeed, OutIfSpeed][num] AS IfSpeed, [InIfConnectivity, OutIfConnectivity][num] AS IfConnectivity, [InIfProvider, OutIfProvider][num] AS IfProvider, [InIfBoundary, OutIfBoundary][num] AS IfBoundary FROM default.flows ARRAY JOIN arrayEnumerate([1, 2]) AS num"
|
||||||
|
flows_1h0m0s_consumer,"CREATE MATERIALIZED VIEW default.flows_1h0m0s_consumer TO default.flows_1h0m0s (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT toStartOfInterval(TimeReceived, toIntervalSecond(3600)) AS TimeReceived, SamplingRate, ExporterAddress, ExporterName, ExporterGroup, ExporterRole, ExporterSite, ExporterRegion, ExporterTenant, SrcAS, DstAS, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant, SrcCountry, DstCountry, SrcGeoCity, DstGeoCity, SrcGeoState, DstGeoState, Dst1stAS, Dst2ndAS, Dst3rdAS, InIfName, OutIfName, InIfDescription, OutIfDescription, InIfSpeed, OutIfSpeed, InIfConnectivity, OutIfConnectivity, InIfProvider, OutIfProvider, InIfBoundary, OutIfBoundary, EType, Proto, Bytes, Packets, ForwardingStatus FROM default.flows"
|
||||||
|
flows_1m0s_consumer,"CREATE MATERIALIZED VIEW default.flows_1m0s_consumer TO default.flows_1m0s (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT toStartOfInterval(TimeReceived, toIntervalSecond(60)) AS TimeReceived, SamplingRate, ExporterAddress, ExporterName, ExporterGroup, ExporterRole, ExporterSite, ExporterRegion, ExporterTenant, SrcAS, DstAS, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant, SrcCountry, DstCountry, SrcGeoCity, DstGeoCity, SrcGeoState, DstGeoState, Dst1stAS, Dst2ndAS, Dst3rdAS, InIfName, OutIfName, InIfDescription, OutIfDescription, InIfSpeed, OutIfSpeed, InIfConnectivity, OutIfConnectivity, InIfProvider, OutIfProvider, InIfBoundary, OutIfBoundary, EType, Proto, Bytes, Packets, ForwardingStatus FROM default.flows"
|
||||||
|
flows_5m0s_consumer,"CREATE MATERIALIZED VIEW default.flows_5m0s_consumer TO default.flows_5m0s (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `SrcGeoCity` LowCardinality(String), `DstGeoCity` LowCardinality(String), `SrcGeoState` LowCardinality(String), `DstGeoState` LowCardinality(String), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT toStartOfInterval(TimeReceived, toIntervalSecond(300)) AS TimeReceived, SamplingRate, ExporterAddress, ExporterName, ExporterGroup, ExporterRole, ExporterSite, ExporterRegion, ExporterTenant, SrcAS, DstAS, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant, SrcCountry, DstCountry, SrcGeoCity, DstGeoCity, SrcGeoState, DstGeoState, Dst1stAS, Dst2ndAS, Dst3rdAS, InIfName, OutIfName, InIfDescription, OutIfDescription, InIfSpeed, OutIfSpeed, InIfConnectivity, OutIfConnectivity, InIfProvider, OutIfProvider, InIfBoundary, OutIfBoundary, EType, Proto, Bytes, Packets, ForwardingStatus FROM default.flows"
|
||||||
|
flows_I6D3KDQCRUBCNCGF4BSOWTRMVIv5_raw_consumer,"CREATE MATERIALIZED VIEW default.flows_I6D3KDQCRUBCNCGF4BSOWTRMVIv5_raw_consumer TO default.flows (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcNetMask` UInt8, `DstNetMask` UInt8, `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` String, `DstNetName` String, `SrcNetRole` String, `DstNetRole` String, `SrcNetSite` String, `DstNetSite` String, `SrcNetRegion` String, `DstNetRegion` String, `SrcNetTenant` String, `DstNetTenant` String, `SrcCountry` String, `DstCountry` String, `SrcGeoCity` String, `DstGeoCity` String, `SrcGeoState` String, `DstGeoState` String, `DstASPath` Array(UInt32), `Dst1stAS` UInt32, `Dst2ndAS` UInt32, `Dst3rdAS` UInt32, `DstCommunities` Array(UInt32), `DstLargeCommunities` Array(UInt128), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` LowCardinality(String), `OutIfDescription` LowCardinality(String), `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt16, `DstPort` UInt16, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS WITH arrayCompact(DstASPath) AS c_DstASPath, dictGet('default.networks', ('asn', 'name', 'role', 'site', 'region', 'tenant', 'country', 'city', 'state'), SrcAddr) AS c_SrcNetworks, dictGet('default.networks', ('asn', 'name', 'role', 'site', 'region', 'tenant', 'country', 'city', 'state'), DstAddr) AS c_DstNetworks SELECT TimeReceived, SamplingRate, ExporterAddress, ExporterName, ExporterGroup, ExporterRole, ExporterSite, ExporterRegion, ExporterTenant, SrcAddr, DstAddr, SrcNetMask, DstNetMask, if(SrcAS = 0, c_SrcNetworks.1, SrcAS) AS SrcAS, if(DstAS = 0, c_DstNetworks.1, DstAS) AS DstAS, c_SrcNetworks.2 AS SrcNetName, c_DstNetworks.2 AS DstNetName, c_SrcNetworks.3 AS SrcNetRole, c_DstNetworks.3 AS DstNetRole, c_SrcNetworks.4 AS SrcNetSite, c_DstNetworks.4 AS DstNetSite, c_SrcNetworks.5 AS SrcNetRegion, c_DstNetworks.5 AS DstNetRegion, c_SrcNetworks.6 AS SrcNetTenant, c_DstNetworks.6 AS DstNetTenant, c_SrcNetworks.7 AS SrcCountry, c_DstNetworks.7 AS DstCountry, c_SrcNetworks.8 AS SrcGeoCity, c_DstNetworks.8 AS DstGeoCity, c_SrcNetworks.9 AS SrcGeoState, c_DstNetworks.9 AS DstGeoState, DstASPath, c_DstASPath[1] AS Dst1stAS, c_DstASPath[2] AS Dst2ndAS, c_DstASPath[3] AS Dst3rdAS, DstCommunities, DstLargeCommunities, InIfName, OutIfName, InIfDescription, OutIfDescription, InIfSpeed, OutIfSpeed, InIfConnectivity, OutIfConnectivity, InIfProvider, OutIfProvider, InIfBoundary, OutIfBoundary, EType, Proto, SrcPort, DstPort, Bytes, Packets, ForwardingStatus FROM default.flows_I6D3KDQCRUBCNCGF4BSOWTRMVIv5_raw"
|
||||||
|
@@ -12,6 +12,7 @@ import (
|
|||||||
|
|
||||||
"akvorado/common/helpers"
|
"akvorado/common/helpers"
|
||||||
"akvorado/common/kafka"
|
"akvorado/common/kafka"
|
||||||
|
"akvorado/common/pb"
|
||||||
"akvorado/common/reporter"
|
"akvorado/common/reporter"
|
||||||
"akvorado/common/schema"
|
"akvorado/common/schema"
|
||||||
)
|
)
|
||||||
@@ -24,7 +25,7 @@ func TestTopicCreation(t *testing.T) {
|
|||||||
segmentBytes := "107374184"
|
segmentBytes := "107374184"
|
||||||
segmentBytes2 := "10737184"
|
segmentBytes2 := "10737184"
|
||||||
cleanupPolicy := "delete"
|
cleanupPolicy := "delete"
|
||||||
expectedTopicName := fmt.Sprintf("%s-%s", topicName, schema.NewMock(t).ProtobufMessageHash())
|
expectedTopicName := fmt.Sprintf("%s-v%d", topicName, pb.Version)
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
Name string
|
Name string
|
||||||
@@ -103,7 +104,7 @@ func TestTopicMorePartitions(t *testing.T) {
|
|||||||
client, brokers := kafka.SetupKafkaBroker(t)
|
client, brokers := kafka.SetupKafkaBroker(t)
|
||||||
|
|
||||||
topicName := fmt.Sprintf("test-topic-%d", rand.Int())
|
topicName := fmt.Sprintf("test-topic-%d", rand.Int())
|
||||||
expectedTopicName := fmt.Sprintf("%s-%s", topicName, schema.NewMock(t).ProtobufMessageHash())
|
expectedTopicName := fmt.Sprintf("%s-v%d", topicName, pb.Version)
|
||||||
|
|
||||||
configuration := DefaultConfiguration()
|
configuration := DefaultConfiguration()
|
||||||
configuration.Topic = topicName
|
configuration.Topic = topicName
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
"github.com/IBM/sarama"
|
"github.com/IBM/sarama"
|
||||||
|
|
||||||
"akvorado/common/kafka"
|
"akvorado/common/kafka"
|
||||||
|
"akvorado/common/pb"
|
||||||
"akvorado/common/reporter"
|
"akvorado/common/reporter"
|
||||||
"akvorado/common/schema"
|
"akvorado/common/schema"
|
||||||
)
|
)
|
||||||
@@ -40,14 +41,15 @@ func New(r *reporter.Reporter, config Configuration, dependencies Dependencies)
|
|||||||
return nil, fmt.Errorf("cannot validate Kafka configuration: %w", err)
|
return nil, fmt.Errorf("cannot validate Kafka configuration: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Component{
|
c := Component{
|
||||||
r: r,
|
r: r,
|
||||||
d: dependencies,
|
d: dependencies,
|
||||||
config: config,
|
config: config,
|
||||||
|
|
||||||
kafkaConfig: kafkaConfig,
|
kafkaConfig: kafkaConfig,
|
||||||
kafkaTopic: fmt.Sprintf("%s-%s", config.Topic, dependencies.Schema.ProtobufMessageHash()),
|
kafkaTopic: fmt.Sprintf("%s-v%d", config.Topic, pb.Version),
|
||||||
}, nil
|
}
|
||||||
|
return &c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start starts Kafka configuration.
|
// Start starts Kafka configuration.
|
||||||
|
|||||||
@@ -32,6 +32,8 @@ type ServiceType string
|
|||||||
var (
|
var (
|
||||||
// InletService represents the inlet service type
|
// InletService represents the inlet service type
|
||||||
InletService ServiceType = "inlet"
|
InletService ServiceType = "inlet"
|
||||||
|
// OutletService represents the outlet service type
|
||||||
|
OutletService ServiceType = "outlet"
|
||||||
// OrchestratorService represents the orchestrator service type
|
// OrchestratorService represents the orchestrator service type
|
||||||
OrchestratorService ServiceType = "orchestrator"
|
OrchestratorService ServiceType = "orchestrator"
|
||||||
// ConsoleService represents the console service type
|
// ConsoleService represents the console service type
|
||||||
|
|||||||
24
outlet/clickhouse/config.go
Normal file
24
outlet/clickhouse/config.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
// SPDX-FileCopyrightText: 2025 Free Mobile
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
|
||||||
|
package clickhouse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Configuration describes the configuration for the ClickHouse exporter.
|
||||||
|
type Configuration struct {
|
||||||
|
// MaximumBatchSize is the maximum number of rows to send to ClickHouse in one batch.
|
||||||
|
MaximumBatchSize uint `validate:"min=1"`
|
||||||
|
// MaximumWaitTime is the maximum number of seconds to wait before sending the current batch.
|
||||||
|
MaximumWaitTime time.Duration `validate:"min=100ms"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultConfiguration represents the default configuration for the ClickHouse exporter.
|
||||||
|
func DefaultConfiguration() Configuration {
|
||||||
|
return Configuration{
|
||||||
|
MaximumBatchSize: 5000,
|
||||||
|
MaximumWaitTime: time.Second,
|
||||||
|
}
|
||||||
|
}
|
||||||
140
outlet/clickhouse/example_test.go
Normal file
140
outlet/clickhouse/example_test.go
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
// SPDX-FileCopyrightText: 2016-2023 ClickHouse, Inc.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
// SPDX-FileComment: This is basically a copy of https://github.com/ClickHouse/ch-go/blob/main/examples/insert/main.go
|
||||||
|
|
||||||
|
package clickhouse_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ClickHouse/ch-go"
|
||||||
|
"github.com/ClickHouse/ch-go/proto"
|
||||||
|
|
||||||
|
"akvorado/common/helpers"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInsertMemory(t *testing.T) {
|
||||||
|
server := helpers.CheckExternalService(t, "ClickHouse", []string{"clickhouse:9000", "127.0.0.1:9000"})
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
conn, err := ch.Dial(ctx, ch.Options{
|
||||||
|
Address: server,
|
||||||
|
DialTimeout: 100 * time.Millisecond,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Dial() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := conn.Do(ctx, ch.Query{
|
||||||
|
Body: `CREATE OR REPLACE TABLE test_table_insert
|
||||||
|
(
|
||||||
|
ts DateTime64(9),
|
||||||
|
severity_text Enum8('INFO'=1, 'DEBUG'=2),
|
||||||
|
severity_number UInt8,
|
||||||
|
service_name LowCardinality(String),
|
||||||
|
body String,
|
||||||
|
name String,
|
||||||
|
arr Array(String)
|
||||||
|
) ENGINE = Memory`,
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatalf("Do() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Define all columns of table.
|
||||||
|
var (
|
||||||
|
body proto.ColStr
|
||||||
|
name proto.ColStr
|
||||||
|
sevText proto.ColEnum
|
||||||
|
sevNumber proto.ColUInt8
|
||||||
|
|
||||||
|
// or new(proto.ColStr).LowCardinality()
|
||||||
|
serviceName = proto.NewLowCardinality(new(proto.ColStr))
|
||||||
|
ts = new(proto.ColDateTime64).WithPrecision(proto.PrecisionNano) // DateTime64(9)
|
||||||
|
arr = new(proto.ColStr).Array() // Array(String)
|
||||||
|
now = time.Date(2010, 1, 1, 10, 22, 33, 345678, time.UTC)
|
||||||
|
)
|
||||||
|
|
||||||
|
input := proto.Input{
|
||||||
|
{Name: "ts", Data: ts},
|
||||||
|
{Name: "severity_text", Data: &sevText},
|
||||||
|
{Name: "severity_number", Data: &sevNumber},
|
||||||
|
{Name: "service_name", Data: serviceName},
|
||||||
|
{Name: "body", Data: &body},
|
||||||
|
{Name: "name", Data: &name},
|
||||||
|
{Name: "arr", Data: arr},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("one block", func(t *testing.T) {
|
||||||
|
// Append 10 rows to initial data block.
|
||||||
|
for range 10 {
|
||||||
|
body.AppendBytes([]byte("Hello"))
|
||||||
|
ts.Append(now)
|
||||||
|
name.Append("name")
|
||||||
|
sevText.Append("INFO")
|
||||||
|
sevNumber.Append(10)
|
||||||
|
arr.Append([]string{"foo", "bar", "baz"})
|
||||||
|
serviceName.Append("service")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert single data block.
|
||||||
|
if err := conn.Do(ctx, ch.Query{
|
||||||
|
Body: "INSERT INTO test_table_insert VALUES",
|
||||||
|
Input: input,
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatalf("Do() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("streaming", func(t *testing.T) {
|
||||||
|
// Stream data to ClickHouse server in multiple data blocks.
|
||||||
|
var blocks int
|
||||||
|
if err := conn.Do(ctx, ch.Query{
|
||||||
|
Body: input.Into("test_table_insert"), // helper that generates INSERT INTO query with all columns
|
||||||
|
Input: input,
|
||||||
|
|
||||||
|
// OnInput is called to prepare Input data before encoding and sending
|
||||||
|
// to ClickHouse server.
|
||||||
|
OnInput: func(ctx context.Context) error {
|
||||||
|
// On OnInput call, you should fill the input data.
|
||||||
|
//
|
||||||
|
// NB: You should reset the input columns, they are
|
||||||
|
// not reset automatically.
|
||||||
|
//
|
||||||
|
// That is, we are re-using the same input columns and
|
||||||
|
// if we will return nil without doing anything, data will be
|
||||||
|
// just duplicated.
|
||||||
|
|
||||||
|
input.Reset() // calls "Reset" on each column
|
||||||
|
|
||||||
|
if blocks >= 10 {
|
||||||
|
// Stop streaming.
|
||||||
|
//
|
||||||
|
// This will also write tailing input data if any,
|
||||||
|
// but we just reset the input, so it is currently blank.
|
||||||
|
return io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append new values:
|
||||||
|
for range 10 {
|
||||||
|
body.AppendBytes([]byte("Hello"))
|
||||||
|
ts.Append(now)
|
||||||
|
name.Append("name")
|
||||||
|
sevText.Append("DEBUG")
|
||||||
|
sevNumber.Append(10)
|
||||||
|
arr.Append([]string{"foo", "bar", "baz"})
|
||||||
|
serviceName.Append("service")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Data will be encoded and sent to ClickHouse server after returning nil.
|
||||||
|
// The Do method will return error if any.
|
||||||
|
blocks++
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatalf("Do() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
211
outlet/clickhouse/functional_test.go
Normal file
211
outlet/clickhouse/functional_test.go
Normal file
@@ -0,0 +1,211 @@
|
|||||||
|
// SPDX-FileCopyrightText: 2025 Free Mobile
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
|
||||||
|
package clickhouse_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
clickhousego "github.com/ClickHouse/clickhouse-go/v2"
|
||||||
|
|
||||||
|
"akvorado/common/clickhousedb"
|
||||||
|
"akvorado/common/daemon"
|
||||||
|
"akvorado/common/helpers"
|
||||||
|
"akvorado/common/reporter"
|
||||||
|
"akvorado/common/schema"
|
||||||
|
"akvorado/outlet/clickhouse"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInsert(t *testing.T) {
|
||||||
|
server := helpers.CheckExternalService(t, "ClickHouse", []string{"clickhouse:9000", "127.0.0.1:9000"})
|
||||||
|
r := reporter.NewMock(t)
|
||||||
|
sch := schema.NewMock(t)
|
||||||
|
bf := sch.NewFlowMessage()
|
||||||
|
ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
ctx = clickhousego.Context(ctx, clickhousego.WithSettings(clickhousego.Settings{
|
||||||
|
"allow_suspicious_low_cardinality_types": 1,
|
||||||
|
}))
|
||||||
|
|
||||||
|
// Create components
|
||||||
|
dbConf := clickhousedb.DefaultConfiguration()
|
||||||
|
dbConf.Servers = []string{server}
|
||||||
|
dbConf.DialTimeout = 100 * time.Millisecond
|
||||||
|
chdb, err := clickhousedb.New(r, dbConf, clickhousedb.Dependencies{
|
||||||
|
Daemon: daemon.NewMock(t),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("clickhousedb.New() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
helpers.StartStop(t, chdb)
|
||||||
|
conf := clickhouse.DefaultConfiguration()
|
||||||
|
conf.MaximumBatchSize = 10
|
||||||
|
conf.MaximumWaitTime = time.Second
|
||||||
|
ch, err := clickhouse.New(r, conf, clickhouse.Dependencies{
|
||||||
|
ClickHouse: chdb,
|
||||||
|
Schema: sch,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("clickhouse.New() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
helpers.StartStop(t, ch)
|
||||||
|
|
||||||
|
// Create table
|
||||||
|
tableName := fmt.Sprintf("flows_%s_raw", sch.ClickHouseHash())
|
||||||
|
err = chdb.Exec(ctx, fmt.Sprintf("CREATE OR REPLACE TABLE %s (%s) ENGINE = Memory", tableName,
|
||||||
|
sch.ClickHouseCreateTable(
|
||||||
|
schema.ClickHouseSkipGeneratedColumns,
|
||||||
|
schema.ClickHouseSkipAliasedColumns)))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("chdb.Exec() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
// Drop any left-over consumer (from orchestrator tests). Otherwise, we get an error like this:
|
||||||
|
// Bad URI syntax: bad or invalid port number: 0
|
||||||
|
err = chdb.Exec(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s_consumer", tableName))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("chdb.Exec() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expected records
|
||||||
|
type result struct {
|
||||||
|
TimeReceived time.Time
|
||||||
|
SrcAS uint32
|
||||||
|
DstAS uint32
|
||||||
|
ExporterName string
|
||||||
|
EType uint32
|
||||||
|
}
|
||||||
|
expected := []result{}
|
||||||
|
|
||||||
|
// Create one worker and send some values
|
||||||
|
w := ch.NewWorker(1, bf)
|
||||||
|
for i := range 23 {
|
||||||
|
i = i + 1
|
||||||
|
// 1: first batch (max time)
|
||||||
|
// 2 to 11: second batch (max batch)
|
||||||
|
// 12 to 15: third batch (max time)
|
||||||
|
// 16 to 23: third batch (last one)
|
||||||
|
bf.TimeReceived = uint32(100 + i)
|
||||||
|
bf.SrcAS = uint32(65400 + i)
|
||||||
|
bf.DstAS = uint32(65500 + i)
|
||||||
|
bf.AppendString(schema.ColumnExporterName, fmt.Sprintf("exporter-%d", i))
|
||||||
|
bf.AppendString(schema.ColumnExporterName, "emptyness")
|
||||||
|
bf.AppendUint(schema.ColumnEType, helpers.ETypeIPv6)
|
||||||
|
expected = append(expected, result{
|
||||||
|
TimeReceived: time.Unix(int64(bf.TimeReceived), 0).UTC(),
|
||||||
|
SrcAS: bf.SrcAS,
|
||||||
|
DstAS: bf.DstAS,
|
||||||
|
ExporterName: fmt.Sprintf("exporter-%d", i),
|
||||||
|
EType: helpers.ETypeIPv6,
|
||||||
|
})
|
||||||
|
if i == 15 {
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
w.FinalizeAndSend(ctx)
|
||||||
|
if i == 23 {
|
||||||
|
w.Flush(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check metrics
|
||||||
|
gotMetrics := r.GetMetrics("akvorado_outlet_clickhouse_", "-insert_time", "-wait_time")
|
||||||
|
var expectedMetrics map[string]string
|
||||||
|
if i < 11 {
|
||||||
|
expectedMetrics = map[string]string{
|
||||||
|
"batches_total": "1",
|
||||||
|
"flows_total": "1",
|
||||||
|
}
|
||||||
|
} else if i < 15 {
|
||||||
|
expectedMetrics = map[string]string{
|
||||||
|
"batches_total": "2",
|
||||||
|
"flows_total": "11",
|
||||||
|
}
|
||||||
|
} else if i < 23 {
|
||||||
|
expectedMetrics = map[string]string{
|
||||||
|
"batches_total": "3",
|
||||||
|
"flows_total": "15",
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
expectedMetrics = map[string]string{
|
||||||
|
"batches_total": "4",
|
||||||
|
"flows_total": "23",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||||
|
t.Errorf("Metrics, iteration %d, (-got, +want):\n%s", i, diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we have anything inserted in the table
|
||||||
|
var results []result
|
||||||
|
err := chdb.Select(ctx, &results,
|
||||||
|
fmt.Sprintf("SELECT TimeReceived, SrcAS, DstAS, ExporterName, EType FROM %s ORDER BY TimeReceived ASC", tableName))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("chdb.Select() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
reallyExpected := expected
|
||||||
|
if i < 11 {
|
||||||
|
reallyExpected = expected[:min(len(expected), 1)]
|
||||||
|
} else if i < 15 {
|
||||||
|
reallyExpected = expected[:min(len(expected), 11)]
|
||||||
|
} else if i < 23 {
|
||||||
|
reallyExpected = expected[:min(len(expected), 15)]
|
||||||
|
}
|
||||||
|
if diff := helpers.Diff(results, reallyExpected); diff != "" {
|
||||||
|
t.Fatalf("chdb.Select(), iteration %d, (-got, +want):\n%s", i, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMultipleServers(t *testing.T) {
|
||||||
|
servers := []string{
|
||||||
|
helpers.CheckExternalService(t, "ClickHouse", []string{"clickhouse:9000", "127.0.0.1:9000"}),
|
||||||
|
}
|
||||||
|
for range 100 {
|
||||||
|
servers = append(servers, "127.0.0.1:0")
|
||||||
|
}
|
||||||
|
for range 10 {
|
||||||
|
r := reporter.NewMock(t)
|
||||||
|
sch := schema.NewMock(t)
|
||||||
|
ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
ctx = clickhousego.Context(ctx, clickhousego.WithSettings(clickhousego.Settings{
|
||||||
|
"allow_suspicious_low_cardinality_types": 1,
|
||||||
|
}))
|
||||||
|
|
||||||
|
// Create components
|
||||||
|
dbConf := clickhousedb.DefaultConfiguration()
|
||||||
|
dbConf.Servers = servers
|
||||||
|
dbConf.DialTimeout = 100 * time.Millisecond
|
||||||
|
chdb, err := clickhousedb.New(r, dbConf, clickhousedb.Dependencies{
|
||||||
|
Daemon: daemon.NewMock(t),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("clickhousedb.New() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
helpers.StartStop(t, chdb)
|
||||||
|
conf := clickhouse.DefaultConfiguration()
|
||||||
|
conf.MaximumBatchSize = 10
|
||||||
|
ch, err := clickhouse.New(r, conf, clickhouse.Dependencies{
|
||||||
|
ClickHouse: chdb,
|
||||||
|
Schema: sch,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("clickhouse.New() error:\n%+v", err)
|
||||||
|
}
|
||||||
|
helpers.StartStop(t, ch)
|
||||||
|
|
||||||
|
// Trigger an empty send
|
||||||
|
bf := sch.NewFlowMessage()
|
||||||
|
w := ch.NewWorker(1, bf)
|
||||||
|
w.Flush(ctx)
|
||||||
|
|
||||||
|
// Check metrics
|
||||||
|
gotMetrics := r.GetMetrics("akvorado_outlet_clickhouse_", "errors_total")
|
||||||
|
if gotMetrics[`errors_total{error="connect"}`] == "0" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.Fatalf("w.Flush(): cannot trigger connect error")
|
||||||
|
}
|
||||||
48
outlet/clickhouse/metrics.go
Normal file
48
outlet/clickhouse/metrics.go
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
// SPDX-FileCopyrightText: 2025 Free Mobile
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
|
||||||
|
package clickhouse
|
||||||
|
|
||||||
|
import "akvorado/common/reporter"
|
||||||
|
|
||||||
|
type metrics struct {
|
||||||
|
batches reporter.Counter
|
||||||
|
flows reporter.Counter
|
||||||
|
waitTime reporter.Histogram
|
||||||
|
insertTime reporter.Histogram
|
||||||
|
errors *reporter.CounterVec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *realComponent) initMetrics() {
|
||||||
|
c.metrics.batches = c.r.Counter(
|
||||||
|
reporter.CounterOpts{
|
||||||
|
Name: "batches_total",
|
||||||
|
Help: "Number of batches of flows sent to ClickHouse",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
c.metrics.flows = c.r.Counter(
|
||||||
|
reporter.CounterOpts{
|
||||||
|
Name: "flows_total",
|
||||||
|
Help: "Number of flows sent to ClickHouse",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
c.metrics.waitTime = c.r.Histogram(
|
||||||
|
reporter.HistogramOpts{
|
||||||
|
Name: "wait_time_seconds",
|
||||||
|
Help: "Time spent waiting before sending a batch to ClickHouse",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
c.metrics.insertTime = c.r.Histogram(
|
||||||
|
reporter.HistogramOpts{
|
||||||
|
Name: "insert_time_seconds",
|
||||||
|
Help: "Time spent inserting data to ClickHouse",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
c.metrics.errors = c.r.CounterVec(
|
||||||
|
reporter.CounterOpts{
|
||||||
|
Name: "errors_total",
|
||||||
|
Help: "Errors while inserting into ClickHouse",
|
||||||
|
},
|
||||||
|
[]string{"error"},
|
||||||
|
)
|
||||||
|
}
|
||||||
44
outlet/clickhouse/root.go
Normal file
44
outlet/clickhouse/root.go
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
// SPDX-FileCopyrightText: 2025 Free Mobile
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
|
||||||
|
// Package clickhouse handles flow exports to ClickHouse. This component is
|
||||||
|
// "inert" and does not track its spawned workers. It is the responsability of
|
||||||
|
// the dependent component to flush data before shutting down.
|
||||||
|
package clickhouse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"akvorado/common/clickhousedb"
|
||||||
|
"akvorado/common/reporter"
|
||||||
|
"akvorado/common/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Component is the interface for the ClickHouse exporter component.
|
||||||
|
type Component interface {
|
||||||
|
NewWorker(int, *schema.FlowMessage) Worker
|
||||||
|
}
|
||||||
|
|
||||||
|
// realComponent implements the ClickHouse exporter
|
||||||
|
type realComponent struct {
|
||||||
|
r *reporter.Reporter
|
||||||
|
d *Dependencies
|
||||||
|
config Configuration
|
||||||
|
|
||||||
|
metrics metrics
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dependencies defines the dependencies of the ClickHouse exporter
|
||||||
|
type Dependencies struct {
|
||||||
|
ClickHouse *clickhousedb.Component
|
||||||
|
Schema *schema.Component
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new core component.
|
||||||
|
func New(r *reporter.Reporter, configuration Configuration, dependencies Dependencies) (Component, error) {
|
||||||
|
c := realComponent{
|
||||||
|
r: r,
|
||||||
|
d: &dependencies,
|
||||||
|
config: configuration,
|
||||||
|
}
|
||||||
|
c.initMetrics()
|
||||||
|
return &c, nil
|
||||||
|
}
|
||||||
54
outlet/clickhouse/root_test.go
Normal file
54
outlet/clickhouse/root_test.go
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
// SPDX-FileCopyrightText: 2025 Free Mobile
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
|
||||||
|
package clickhouse_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"akvorado/common/helpers"
|
||||||
|
"akvorado/common/schema"
|
||||||
|
"akvorado/outlet/clickhouse"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMock(t *testing.T) {
|
||||||
|
sch := schema.NewMock(t)
|
||||||
|
bf := sch.NewFlowMessage()
|
||||||
|
|
||||||
|
var messages []*schema.FlowMessage
|
||||||
|
var messagesMutex sync.Mutex
|
||||||
|
ch := clickhouse.NewMock(t, func(msg *schema.FlowMessage) {
|
||||||
|
messagesMutex.Lock()
|
||||||
|
defer messagesMutex.Unlock()
|
||||||
|
messages = append(messages, msg)
|
||||||
|
})
|
||||||
|
helpers.StartStop(t, ch)
|
||||||
|
|
||||||
|
expected := []*schema.FlowMessage{}
|
||||||
|
w := ch.NewWorker(1, bf)
|
||||||
|
for i := range 20 {
|
||||||
|
i = i + 1 // 1 to 20
|
||||||
|
bf.TimeReceived = uint32(100 + i)
|
||||||
|
bf.SrcAS = uint32(65400 + i)
|
||||||
|
bf.DstAS = uint32(65500 + i)
|
||||||
|
bf.AppendString(schema.ColumnExporterName, fmt.Sprintf("exporter-%d", i))
|
||||||
|
expected = append(expected, &schema.FlowMessage{
|
||||||
|
TimeReceived: bf.TimeReceived,
|
||||||
|
SrcAS: bf.SrcAS,
|
||||||
|
DstAS: bf.DstAS,
|
||||||
|
OtherColumns: map[schema.ColumnKey]any{
|
||||||
|
schema.ColumnExporterName: fmt.Sprintf("exporter-%d", i),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
w.FinalizeAndSend(t.Context())
|
||||||
|
|
||||||
|
// Check if we have anything inserted in the table
|
||||||
|
messagesMutex.Lock()
|
||||||
|
if diff := helpers.Diff(messages, expected); diff != "" {
|
||||||
|
t.Fatalf("Mock(), iteration %d, (-got, +want):\n%s", i, diff)
|
||||||
|
}
|
||||||
|
messagesMutex.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
51
outlet/clickhouse/tests.go
Normal file
51
outlet/clickhouse/tests.go
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
// SPDX-FileCopyrightText: 2025 Free Mobile
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
|
||||||
|
//go:build !release
|
||||||
|
|
||||||
|
package clickhouse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"akvorado/common/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// mockComponent is a mock version of the ClickHouse exporter.
|
||||||
|
type mockComponent struct {
|
||||||
|
callback func(*schema.FlowMessage)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMock creates a new mock exporter that calls the provided callback function with each received flow message.
|
||||||
|
func NewMock(_ *testing.T, callback func(*schema.FlowMessage)) Component {
|
||||||
|
return &mockComponent{
|
||||||
|
callback: callback,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWorker creates a new mock worker.
|
||||||
|
func (c *mockComponent) NewWorker(_ int, bf *schema.FlowMessage) Worker {
|
||||||
|
return &mockWorker{
|
||||||
|
c: c,
|
||||||
|
bf: bf,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// mockWorker is a mock version of the ClickHouse worker.
|
||||||
|
type mockWorker struct {
|
||||||
|
c *mockComponent
|
||||||
|
bf *schema.FlowMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// FinalizeAndSend always "send" the current flows.
|
||||||
|
func (w *mockWorker) FinalizeAndSend(ctx context.Context) {
|
||||||
|
w.Flush(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send will record the sent flows for testing purpose.
|
||||||
|
func (w *mockWorker) Flush(_ context.Context) {
|
||||||
|
clone := *w.bf
|
||||||
|
w.c.callback(&clone)
|
||||||
|
w.bf.Clear() // Clear instead of finalizing
|
||||||
|
}
|
||||||
146
outlet/clickhouse/worker.go
Normal file
146
outlet/clickhouse/worker.go
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
// SPDX-FileCopyrightText: 2025 Free Mobile
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
|
||||||
|
package clickhouse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ClickHouse/ch-go"
|
||||||
|
"github.com/cenkalti/backoff/v4"
|
||||||
|
|
||||||
|
"akvorado/common/reporter"
|
||||||
|
"akvorado/common/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Worker represents a worker sending to ClickHouse. It is synchronous (no
|
||||||
|
// goroutines) and most functions are bound to a context.
|
||||||
|
type Worker interface {
|
||||||
|
FinalizeAndSend(context.Context)
|
||||||
|
Flush(context.Context)
|
||||||
|
}
|
||||||
|
|
||||||
|
// realWorker is a working implementation of Worker.
|
||||||
|
type realWorker struct {
|
||||||
|
c *realComponent
|
||||||
|
bf *schema.FlowMessage
|
||||||
|
last time.Time
|
||||||
|
logger reporter.Logger
|
||||||
|
|
||||||
|
conn *ch.Client
|
||||||
|
servers []string
|
||||||
|
options ch.Options
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWorker creates a new worker to push data to ClickHouse.
|
||||||
|
func (c *realComponent) NewWorker(i int, bf *schema.FlowMessage) Worker {
|
||||||
|
opts, servers := c.d.ClickHouse.ChGoOptions()
|
||||||
|
w := realWorker{
|
||||||
|
c: c,
|
||||||
|
bf: bf,
|
||||||
|
logger: c.r.With().Int("worker", i).Logger(),
|
||||||
|
|
||||||
|
servers: servers,
|
||||||
|
options: opts,
|
||||||
|
}
|
||||||
|
return &w
|
||||||
|
}
|
||||||
|
|
||||||
|
// FinalizeAndSend sends data to ClickHouse after finalizing if we have a full batch or exceeded the maximum wait time.
|
||||||
|
func (w *realWorker) FinalizeAndSend(ctx context.Context) {
|
||||||
|
w.bf.Finalize()
|
||||||
|
now := time.Now()
|
||||||
|
if w.bf.FlowCount() >= int(w.c.config.MaximumBatchSize) || w.last.Add(w.c.config.MaximumWaitTime).Before(now) {
|
||||||
|
// Record wait time since last send
|
||||||
|
if !w.last.IsZero() {
|
||||||
|
waitTime := now.Sub(w.last)
|
||||||
|
w.c.metrics.waitTime.Observe(waitTime.Seconds())
|
||||||
|
}
|
||||||
|
w.Flush(ctx)
|
||||||
|
w.last = now
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush sends remaining data to ClickHouse without an additional condition. It
|
||||||
|
// should be called before shutting down to flush remaining data. Otherwise,
|
||||||
|
// FinalizeAndSend() should be used instead.
|
||||||
|
func (w *realWorker) Flush(ctx context.Context) {
|
||||||
|
// We try to send as long as possible. The only exit condition is an
|
||||||
|
// expiration of the context.
|
||||||
|
b := backoff.NewExponentialBackOff()
|
||||||
|
b.MaxElapsedTime = 0
|
||||||
|
b.MaxInterval = 30 * time.Second
|
||||||
|
b.InitialInterval = 20 * time.Millisecond
|
||||||
|
backoff.Retry(func() error {
|
||||||
|
// Connect or reconnect if connection is broken.
|
||||||
|
if err := w.connect(ctx); err != nil {
|
||||||
|
w.logger.Err(err).Msg("cannot connect to ClickHouse")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send to ClickHouse in flows_XXXXX_raw.
|
||||||
|
start := time.Now()
|
||||||
|
if err := w.conn.Do(ctx, ch.Query{
|
||||||
|
Body: w.bf.ClickHouseProtoInput().Into(fmt.Sprintf("flows_%s_raw", w.c.d.Schema.ClickHouseHash())),
|
||||||
|
Input: w.bf.ClickHouseProtoInput(),
|
||||||
|
}); err != nil {
|
||||||
|
w.logger.Err(err).Msg("cannot send batch to ClickHouse")
|
||||||
|
w.c.metrics.errors.WithLabelValues("send").Inc()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pushDuration := time.Since(start)
|
||||||
|
w.c.metrics.insertTime.Observe(pushDuration.Seconds())
|
||||||
|
w.c.metrics.batches.Inc()
|
||||||
|
w.c.metrics.flows.Add(float64(w.bf.FlowCount()))
|
||||||
|
|
||||||
|
// Clear batch
|
||||||
|
w.bf.Clear()
|
||||||
|
return nil
|
||||||
|
}, backoff.WithContext(b, ctx))
|
||||||
|
}
|
||||||
|
|
||||||
|
// connect establishes or reestablish the connection to ClickHouse.
|
||||||
|
func (w *realWorker) connect(ctx context.Context) error {
|
||||||
|
// If connection exists and is healthy, reuse it
|
||||||
|
if w.conn != nil {
|
||||||
|
if err := w.conn.Ping(ctx); err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Connection is unhealthy, close it
|
||||||
|
w.conn.Close()
|
||||||
|
w.conn = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try each server until one connects successfully
|
||||||
|
var lastErr error
|
||||||
|
for _, idx := range rand.Perm(len(w.servers)) {
|
||||||
|
w.options.Address = w.servers[idx]
|
||||||
|
conn, err := ch.Dial(ctx, w.options)
|
||||||
|
if err != nil {
|
||||||
|
w.logger.Err(err).Str("server", w.options.Address).Msg("failed to connect to ClickHouse server")
|
||||||
|
w.c.metrics.errors.WithLabelValues("connect").Inc()
|
||||||
|
lastErr = err
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test the connection
|
||||||
|
if err := conn.Ping(ctx); err != nil {
|
||||||
|
w.logger.Err(err).Str("server", w.options.Address).Msg("ClickHouse server ping failed")
|
||||||
|
w.c.metrics.errors.WithLabelValues("ping").Inc()
|
||||||
|
conn.Close()
|
||||||
|
conn = nil
|
||||||
|
lastErr = err
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Success
|
||||||
|
w.conn = conn
|
||||||
|
w.logger.Info().Str("server", w.options.Address).Msg("connected to ClickHouse server")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return lastErr
|
||||||
|
}
|
||||||
@@ -16,8 +16,6 @@ import (
|
|||||||
|
|
||||||
// Configuration describes the configuration for the core component.
|
// Configuration describes the configuration for the core component.
|
||||||
type Configuration struct {
|
type Configuration struct {
|
||||||
// Number of workers for the core component
|
|
||||||
Workers int `validate:"min=1"`
|
|
||||||
// ExporterClassifiers defines rules for exporter classification
|
// ExporterClassifiers defines rules for exporter classification
|
||||||
ExporterClassifiers []ExporterClassifierRule
|
ExporterClassifiers []ExporterClassifierRule
|
||||||
// InterfaceClassifiers defines rules for interface classification
|
// InterfaceClassifiers defines rules for interface classification
|
||||||
@@ -39,7 +37,6 @@ type Configuration struct {
|
|||||||
// DefaultConfiguration represents the default configuration for the core component.
|
// DefaultConfiguration represents the default configuration for the core component.
|
||||||
func DefaultConfiguration() Configuration {
|
func DefaultConfiguration() Configuration {
|
||||||
return Configuration{
|
return Configuration{
|
||||||
Workers: 1,
|
|
||||||
ExporterClassifiers: []ExporterClassifierRule{},
|
ExporterClassifiers: []ExporterClassifierRule{},
|
||||||
InterfaceClassifiers: []InterfaceClassifierRule{},
|
InterfaceClassifiers: []InterfaceClassifierRule{},
|
||||||
ClassifierCacheDuration: 5 * time.Minute,
|
ClassifierCacheDuration: 5 * time.Minute,
|
||||||
@@ -154,4 +151,5 @@ func init() {
|
|||||||
helpers.RegisterMapstructureUnmarshallerHook(ASNProviderUnmarshallerHook())
|
helpers.RegisterMapstructureUnmarshallerHook(ASNProviderUnmarshallerHook())
|
||||||
helpers.RegisterMapstructureUnmarshallerHook(NetProviderUnmarshallerHook())
|
helpers.RegisterMapstructureUnmarshallerHook(NetProviderUnmarshallerHook())
|
||||||
helpers.RegisterMapstructureUnmarshallerHook(helpers.SubnetMapUnmarshallerHook[uint]())
|
helpers.RegisterMapstructureUnmarshallerHook(helpers.SubnetMapUnmarshallerHook[uint]())
|
||||||
|
helpers.RegisterMapstructureDeprecatedFields[Configuration]("Workers")
|
||||||
}
|
}
|
||||||
@@ -19,7 +19,7 @@ type exporterAndInterfaceInfo struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// enrichFlow adds more data to a flow.
|
// enrichFlow adds more data to a flow.
|
||||||
func (c *Component) enrichFlow(exporterIP netip.Addr, exporterStr string, flow *schema.FlowMessage) (skip bool) {
|
func (w *worker) enrichFlow(exporterIP netip.Addr, exporterStr string) (skip bool) {
|
||||||
var flowExporterName string
|
var flowExporterName string
|
||||||
var flowInIfName, flowInIfDescription, flowOutIfName, flowOutIfDescription string
|
var flowInIfName, flowInIfDescription, flowOutIfName, flowOutIfDescription string
|
||||||
var flowInIfSpeed, flowOutIfSpeed, flowInIfIndex, flowOutIfIndex uint32
|
var flowInIfSpeed, flowOutIfSpeed, flowInIfIndex, flowOutIfIndex uint32
|
||||||
@@ -30,6 +30,9 @@ func (c *Component) enrichFlow(exporterIP netip.Addr, exporterStr string, flow *
|
|||||||
inIfClassification := interfaceClassification{}
|
inIfClassification := interfaceClassification{}
|
||||||
outIfClassification := interfaceClassification{}
|
outIfClassification := interfaceClassification{}
|
||||||
|
|
||||||
|
flow := w.bf
|
||||||
|
c := w.c
|
||||||
|
|
||||||
if flow.InIf != 0 {
|
if flow.InIf != 0 {
|
||||||
answer, ok := c.d.Metadata.Lookup(t, exporterIP, uint(flow.InIf))
|
answer, ok := c.d.Metadata.Lookup(t, exporterIP, uint(flow.InIf))
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -87,11 +90,11 @@ func (c *Component) enrichFlow(exporterIP netip.Addr, exporterStr string, flow *
|
|||||||
}
|
}
|
||||||
|
|
||||||
if samplingRate, ok := c.config.OverrideSamplingRate.Lookup(exporterIP); ok && samplingRate > 0 {
|
if samplingRate, ok := c.config.OverrideSamplingRate.Lookup(exporterIP); ok && samplingRate > 0 {
|
||||||
flow.SamplingRate = uint32(samplingRate)
|
flow.SamplingRate = uint64(samplingRate)
|
||||||
}
|
}
|
||||||
if flow.SamplingRate == 0 {
|
if flow.SamplingRate == 0 {
|
||||||
if samplingRate, ok := c.config.DefaultSamplingRate.Lookup(exporterIP); ok && samplingRate > 0 {
|
if samplingRate, ok := c.config.DefaultSamplingRate.Lookup(exporterIP); ok && samplingRate > 0 {
|
||||||
flow.SamplingRate = uint32(samplingRate)
|
flow.SamplingRate = uint64(samplingRate)
|
||||||
} else {
|
} else {
|
||||||
c.metrics.flowsErrors.WithLabelValues(exporterStr, "sampling rate missing").Inc()
|
c.metrics.flowsErrors.WithLabelValues(exporterStr, "sampling rate missing").Inc()
|
||||||
skip = true
|
skip = true
|
||||||
@@ -128,28 +131,22 @@ func (c *Component) enrichFlow(exporterIP netip.Addr, exporterStr string, flow *
|
|||||||
// set asns according to user config
|
// set asns according to user config
|
||||||
flow.SrcAS = c.getASNumber(flow.SrcAS, sourceRouting.ASN)
|
flow.SrcAS = c.getASNumber(flow.SrcAS, sourceRouting.ASN)
|
||||||
flow.DstAS = c.getASNumber(flow.DstAS, destRouting.ASN)
|
flow.DstAS = c.getASNumber(flow.DstAS, destRouting.ASN)
|
||||||
if !flow.GotCommunities {
|
flow.AppendArrayUInt32(schema.ColumnDstCommunities, destRouting.Communities)
|
||||||
for _, comm := range destRouting.Communities {
|
flow.AppendArrayUInt32(schema.ColumnDstASPath, destRouting.ASPath)
|
||||||
c.d.Schema.ProtobufAppendVarint(flow, schema.ColumnDstCommunities, uint64(comm))
|
if len(destRouting.LargeCommunities) > 0 {
|
||||||
|
communities := make([]schema.UInt128, len(destRouting.LargeCommunities))
|
||||||
|
for i, comm := range destRouting.LargeCommunities {
|
||||||
|
communities[i] = schema.UInt128{
|
||||||
|
High: uint64(comm.ASN),
|
||||||
|
Low: (uint64(comm.LocalData1) << 32) + uint64(comm.LocalData2),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
flow.AppendArrayUInt128(schema.ColumnDstLargeCommunities, communities)
|
||||||
if !flow.GotASPath {
|
|
||||||
for _, asn := range destRouting.ASPath {
|
|
||||||
c.d.Schema.ProtobufAppendVarint(flow, schema.ColumnDstASPath, uint64(asn))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, comm := range destRouting.LargeCommunities {
|
|
||||||
c.d.Schema.ProtobufAppendVarintForce(flow,
|
|
||||||
schema.ColumnDstLargeCommunitiesASN, uint64(comm.ASN))
|
|
||||||
c.d.Schema.ProtobufAppendVarintForce(flow,
|
|
||||||
schema.ColumnDstLargeCommunitiesLocalData1, uint64(comm.LocalData1))
|
|
||||||
c.d.Schema.ProtobufAppendVarintForce(flow,
|
|
||||||
schema.ColumnDstLargeCommunitiesLocalData2, uint64(comm.LocalData2))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnExporterName, []byte(flowExporterName))
|
flow.AppendString(schema.ColumnExporterName, flowExporterName)
|
||||||
c.d.Schema.ProtobufAppendVarint(flow, schema.ColumnInIfSpeed, uint64(flowInIfSpeed))
|
flow.AppendUint(schema.ColumnInIfSpeed, uint64(flowInIfSpeed))
|
||||||
c.d.Schema.ProtobufAppendVarint(flow, schema.ColumnOutIfSpeed, uint64(flowOutIfSpeed))
|
flow.AppendUint(schema.ColumnOutIfSpeed, uint64(flowOutIfSpeed))
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -219,11 +216,11 @@ func (c *Component) writeExporter(flow *schema.FlowMessage, classification expor
|
|||||||
if classification.Reject {
|
if classification.Reject {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnExporterGroup, []byte(classification.Group))
|
flow.AppendString(schema.ColumnExporterGroup, classification.Group)
|
||||||
c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnExporterRole, []byte(classification.Role))
|
flow.AppendString(schema.ColumnExporterRole, classification.Role)
|
||||||
c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnExporterSite, []byte(classification.Site))
|
flow.AppendString(schema.ColumnExporterSite, classification.Site)
|
||||||
c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnExporterRegion, []byte(classification.Region))
|
flow.AppendString(schema.ColumnExporterRegion, classification.Region)
|
||||||
c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnExporterTenant, []byte(classification.Tenant))
|
flow.AppendString(schema.ColumnExporterTenant, classification.Tenant)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -264,17 +261,17 @@ func (c *Component) writeInterface(flow *schema.FlowMessage, classification inte
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if directionIn {
|
if directionIn {
|
||||||
c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnInIfName, []byte(classification.Name))
|
flow.AppendString(schema.ColumnInIfName, classification.Name)
|
||||||
c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnInIfDescription, []byte(classification.Description))
|
flow.AppendString(schema.ColumnInIfDescription, classification.Description)
|
||||||
c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnInIfConnectivity, []byte(classification.Connectivity))
|
flow.AppendString(schema.ColumnInIfConnectivity, classification.Connectivity)
|
||||||
c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnInIfProvider, []byte(classification.Provider))
|
flow.AppendString(schema.ColumnInIfProvider, classification.Provider)
|
||||||
c.d.Schema.ProtobufAppendVarint(flow, schema.ColumnInIfBoundary, uint64(classification.Boundary))
|
flow.AppendUint(schema.ColumnInIfBoundary, uint64(classification.Boundary))
|
||||||
} else {
|
} else {
|
||||||
c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnOutIfName, []byte(classification.Name))
|
flow.AppendString(schema.ColumnOutIfName, classification.Name)
|
||||||
c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnOutIfDescription, []byte(classification.Description))
|
flow.AppendString(schema.ColumnOutIfDescription, classification.Description)
|
||||||
c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnOutIfConnectivity, []byte(classification.Connectivity))
|
flow.AppendString(schema.ColumnOutIfConnectivity, classification.Connectivity)
|
||||||
c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnOutIfProvider, []byte(classification.Provider))
|
flow.AppendString(schema.ColumnOutIfProvider, classification.Provider)
|
||||||
c.d.Schema.ProtobufAppendVarint(flow, schema.ColumnOutIfBoundary, uint64(classification.Boundary))
|
flow.AppendUint(schema.ColumnOutIfBoundary, uint64(classification.Boundary))
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -4,24 +4,29 @@
|
|||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/gob"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/IBM/sarama"
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/go-viper/mapstructure/v2"
|
"github.com/go-viper/mapstructure/v2"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
"akvorado/common/daemon"
|
"akvorado/common/daemon"
|
||||||
"akvorado/common/helpers"
|
"akvorado/common/helpers"
|
||||||
"akvorado/common/httpserver"
|
"akvorado/common/httpserver"
|
||||||
|
"akvorado/common/pb"
|
||||||
"akvorado/common/reporter"
|
"akvorado/common/reporter"
|
||||||
"akvorado/common/schema"
|
"akvorado/common/schema"
|
||||||
"akvorado/inlet/flow"
|
"akvorado/outlet/clickhouse"
|
||||||
"akvorado/inlet/kafka"
|
"akvorado/outlet/flow"
|
||||||
"akvorado/inlet/metadata"
|
"akvorado/outlet/kafka"
|
||||||
"akvorado/inlet/routing"
|
"akvorado/outlet/metadata"
|
||||||
|
"akvorado/outlet/routing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestEnrich(t *testing.T) {
|
func TestEnrich(t *testing.T) {
|
||||||
@@ -44,8 +49,10 @@ func TestEnrich(t *testing.T) {
|
|||||||
},
|
},
|
||||||
OutputFlow: &schema.FlowMessage{
|
OutputFlow: &schema.FlowMessage{
|
||||||
SamplingRate: 1000,
|
SamplingRate: 1000,
|
||||||
|
InIf: 100,
|
||||||
|
OutIf: 200,
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
OtherColumns: map[schema.ColumnKey]interface{}{
|
||||||
schema.ColumnExporterName: "192_0_2_142",
|
schema.ColumnExporterName: "192_0_2_142",
|
||||||
schema.ColumnInIfName: "Gi0/0/100",
|
schema.ColumnInIfName: "Gi0/0/100",
|
||||||
schema.ColumnOutIfName: "Gi0/0/200",
|
schema.ColumnOutIfName: "Gi0/0/200",
|
||||||
@@ -72,8 +79,10 @@ func TestEnrich(t *testing.T) {
|
|||||||
},
|
},
|
||||||
OutputFlow: &schema.FlowMessage{
|
OutputFlow: &schema.FlowMessage{
|
||||||
SamplingRate: 500,
|
SamplingRate: 500,
|
||||||
|
InIf: 100,
|
||||||
|
OutIf: 200,
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
OtherColumns: map[schema.ColumnKey]interface{}{
|
||||||
schema.ColumnExporterName: "192_0_2_142",
|
schema.ColumnExporterName: "192_0_2_142",
|
||||||
schema.ColumnInIfName: "Gi0/0/100",
|
schema.ColumnInIfName: "Gi0/0/100",
|
||||||
schema.ColumnOutIfName: "Gi0/0/200",
|
schema.ColumnOutIfName: "Gi0/0/200",
|
||||||
@@ -95,8 +104,10 @@ func TestEnrich(t *testing.T) {
|
|||||||
},
|
},
|
||||||
OutputFlow: &schema.FlowMessage{
|
OutputFlow: &schema.FlowMessage{
|
||||||
SamplingRate: 500,
|
SamplingRate: 500,
|
||||||
|
InIf: 100,
|
||||||
|
OutIf: 200,
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
OtherColumns: map[schema.ColumnKey]interface{}{
|
||||||
schema.ColumnExporterName: "192_0_2_142",
|
schema.ColumnExporterName: "192_0_2_142",
|
||||||
schema.ColumnInIfName: "Gi0/0/100",
|
schema.ColumnInIfName: "Gi0/0/100",
|
||||||
schema.ColumnOutIfName: "Gi0/0/200",
|
schema.ColumnOutIfName: "Gi0/0/200",
|
||||||
@@ -122,8 +133,10 @@ func TestEnrich(t *testing.T) {
|
|||||||
},
|
},
|
||||||
OutputFlow: &schema.FlowMessage{
|
OutputFlow: &schema.FlowMessage{
|
||||||
SamplingRate: 500,
|
SamplingRate: 500,
|
||||||
|
InIf: 100,
|
||||||
|
OutIf: 200,
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
OtherColumns: map[schema.ColumnKey]interface{}{
|
||||||
schema.ColumnExporterName: "192_0_2_142",
|
schema.ColumnExporterName: "192_0_2_142",
|
||||||
schema.ColumnInIfName: "Gi0/0/100",
|
schema.ColumnInIfName: "Gi0/0/100",
|
||||||
schema.ColumnOutIfName: "Gi0/0/200",
|
schema.ColumnOutIfName: "Gi0/0/200",
|
||||||
@@ -152,8 +165,10 @@ func TestEnrich(t *testing.T) {
|
|||||||
},
|
},
|
||||||
OutputFlow: &schema.FlowMessage{
|
OutputFlow: &schema.FlowMessage{
|
||||||
SamplingRate: 1000,
|
SamplingRate: 1000,
|
||||||
|
InIf: 100,
|
||||||
|
OutIf: 200,
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
OtherColumns: map[schema.ColumnKey]interface{}{
|
||||||
schema.ColumnExporterName: "192_0_2_142",
|
schema.ColumnExporterName: "192_0_2_142",
|
||||||
schema.ColumnExporterRegion: "asia",
|
schema.ColumnExporterRegion: "asia",
|
||||||
schema.ColumnExporterTenant: "alfred",
|
schema.ColumnExporterTenant: "alfred",
|
||||||
@@ -184,8 +199,10 @@ func TestEnrich(t *testing.T) {
|
|||||||
},
|
},
|
||||||
OutputFlow: &schema.FlowMessage{
|
OutputFlow: &schema.FlowMessage{
|
||||||
SamplingRate: 1000,
|
SamplingRate: 1000,
|
||||||
|
InIf: 100,
|
||||||
|
OutIf: 200,
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
OtherColumns: map[schema.ColumnKey]interface{}{
|
||||||
schema.ColumnExporterName: "192_0_2_142",
|
schema.ColumnExporterName: "192_0_2_142",
|
||||||
schema.ColumnExporterTenant: "alfred",
|
schema.ColumnExporterTenant: "alfred",
|
||||||
schema.ColumnInIfName: "Gi0/0/100",
|
schema.ColumnInIfName: "Gi0/0/100",
|
||||||
@@ -246,8 +263,10 @@ func TestEnrich(t *testing.T) {
|
|||||||
},
|
},
|
||||||
OutputFlow: &schema.FlowMessage{
|
OutputFlow: &schema.FlowMessage{
|
||||||
SamplingRate: 1000,
|
SamplingRate: 1000,
|
||||||
|
InIf: 100,
|
||||||
|
OutIf: 200,
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
OtherColumns: map[schema.ColumnKey]interface{}{
|
||||||
schema.ColumnExporterName: "192_0_2_142",
|
schema.ColumnExporterName: "192_0_2_142",
|
||||||
schema.ColumnInIfProvider: "index1",
|
schema.ColumnInIfProvider: "index1",
|
||||||
schema.ColumnOutIfProvider: "index2",
|
schema.ColumnOutIfProvider: "index2",
|
||||||
@@ -277,8 +296,10 @@ func TestEnrich(t *testing.T) {
|
|||||||
},
|
},
|
||||||
OutputFlow: &schema.FlowMessage{
|
OutputFlow: &schema.FlowMessage{
|
||||||
SamplingRate: 1000,
|
SamplingRate: 1000,
|
||||||
|
InIf: 100,
|
||||||
|
OutIf: 200,
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
OtherColumns: map[schema.ColumnKey]interface{}{
|
||||||
schema.ColumnExporterName: "192_0_2_142",
|
schema.ColumnExporterName: "192_0_2_142",
|
||||||
schema.ColumnInIfName: "eth100",
|
schema.ColumnInIfName: "eth100",
|
||||||
schema.ColumnOutIfName: "Gi0/0/200",
|
schema.ColumnOutIfName: "Gi0/0/200",
|
||||||
@@ -300,15 +321,19 @@ func TestEnrich(t *testing.T) {
|
|||||||
SamplingRate: 1000,
|
SamplingRate: 1000,
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
||||||
InIf: 100,
|
InIf: 100,
|
||||||
SrcVlan: 10,
|
|
||||||
OutIf: 200,
|
OutIf: 200,
|
||||||
|
SrcVlan: 10,
|
||||||
DstVlan: 300,
|
DstVlan: 300,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
OutputFlow: &schema.FlowMessage{
|
OutputFlow: &schema.FlowMessage{
|
||||||
SamplingRate: 1000,
|
SamplingRate: 1000,
|
||||||
|
InIf: 100,
|
||||||
|
OutIf: 200,
|
||||||
|
SrcVlan: 10,
|
||||||
|
DstVlan: 300,
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
OtherColumns: map[schema.ColumnKey]interface{}{
|
||||||
schema.ColumnExporterName: "192_0_2_142",
|
schema.ColumnExporterName: "192_0_2_142",
|
||||||
schema.ColumnInIfName: "Gi0/0/100",
|
schema.ColumnInIfName: "Gi0/0/100",
|
||||||
schema.ColumnOutIfName: "Gi0/0/200.300",
|
schema.ColumnOutIfName: "Gi0/0/200.300",
|
||||||
@@ -340,8 +365,10 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
|
|||||||
},
|
},
|
||||||
OutputFlow: &schema.FlowMessage{
|
OutputFlow: &schema.FlowMessage{
|
||||||
SamplingRate: 1000,
|
SamplingRate: 1000,
|
||||||
|
InIf: 100,
|
||||||
|
OutIf: 200,
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
OtherColumns: map[schema.ColumnKey]interface{}{
|
||||||
schema.ColumnExporterName: "192_0_2_142",
|
schema.ColumnExporterName: "192_0_2_142",
|
||||||
schema.ColumnInIfName: "Gi0/0/100",
|
schema.ColumnInIfName: "Gi0/0/100",
|
||||||
schema.ColumnOutIfName: "Gi0/0/200",
|
schema.ColumnOutIfName: "Gi0/0/200",
|
||||||
@@ -371,8 +398,10 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
|
|||||||
},
|
},
|
||||||
OutputFlow: &schema.FlowMessage{
|
OutputFlow: &schema.FlowMessage{
|
||||||
SamplingRate: 1000,
|
SamplingRate: 1000,
|
||||||
|
InIf: 100,
|
||||||
|
OutIf: 200,
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
OtherColumns: map[schema.ColumnKey]interface{}{
|
||||||
schema.ColumnExporterName: "192_0_2_142",
|
schema.ColumnExporterName: "192_0_2_142",
|
||||||
schema.ColumnInIfName: "Gi0/0/100",
|
schema.ColumnInIfName: "Gi0/0/100",
|
||||||
schema.ColumnOutIfName: "Gi0/0/200",
|
schema.ColumnOutIfName: "Gi0/0/200",
|
||||||
@@ -402,8 +431,10 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
|
|||||||
},
|
},
|
||||||
OutputFlow: &schema.FlowMessage{
|
OutputFlow: &schema.FlowMessage{
|
||||||
SamplingRate: 1000,
|
SamplingRate: 1000,
|
||||||
|
InIf: 100,
|
||||||
|
OutIf: 200,
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
OtherColumns: map[schema.ColumnKey]interface{}{
|
||||||
schema.ColumnExporterName: "192_0_2_142",
|
schema.ColumnExporterName: "192_0_2_142",
|
||||||
schema.ColumnInIfName: "Gi0/0/100",
|
schema.ColumnInIfName: "Gi0/0/100",
|
||||||
schema.ColumnOutIfName: "Gi0/0/200",
|
schema.ColumnOutIfName: "Gi0/0/200",
|
||||||
@@ -434,8 +465,10 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
|
|||||||
},
|
},
|
||||||
OutputFlow: &schema.FlowMessage{
|
OutputFlow: &schema.FlowMessage{
|
||||||
SamplingRate: 1000,
|
SamplingRate: 1000,
|
||||||
|
InIf: 100,
|
||||||
|
OutIf: 200,
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
OtherColumns: map[schema.ColumnKey]interface{}{
|
||||||
schema.ColumnExporterName: "192_0_2_142",
|
schema.ColumnExporterName: "192_0_2_142",
|
||||||
schema.ColumnInIfName: "Gi0/0/100",
|
schema.ColumnInIfName: "Gi0/0/100",
|
||||||
schema.ColumnOutIfName: "Gi0/0/200",
|
schema.ColumnOutIfName: "Gi0/0/200",
|
||||||
@@ -471,8 +504,10 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
|
|||||||
},
|
},
|
||||||
OutputFlow: &schema.FlowMessage{
|
OutputFlow: &schema.FlowMessage{
|
||||||
SamplingRate: 1000,
|
SamplingRate: 1000,
|
||||||
|
InIf: 1010,
|
||||||
|
OutIf: 2010,
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
OtherColumns: map[schema.ColumnKey]interface{}{
|
||||||
schema.ColumnExporterName: "192_0_2_142",
|
schema.ColumnExporterName: "192_0_2_142",
|
||||||
schema.ColumnExporterGroup: "metadata group",
|
schema.ColumnExporterGroup: "metadata group",
|
||||||
schema.ColumnExporterRegion: "metadata region",
|
schema.ColumnExporterRegion: "metadata region",
|
||||||
@@ -509,26 +544,28 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
|
|||||||
},
|
},
|
||||||
OutputFlow: &schema.FlowMessage{
|
OutputFlow: &schema.FlowMessage{
|
||||||
SamplingRate: 1000,
|
SamplingRate: 1000,
|
||||||
|
InIf: 100,
|
||||||
|
OutIf: 200,
|
||||||
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
ExporterAddress: netip.MustParseAddr("::ffff:192.0.2.142"),
|
||||||
SrcAddr: netip.MustParseAddr("::ffff:192.0.2.142"),
|
SrcAddr: netip.MustParseAddr("::ffff:192.0.2.142"),
|
||||||
DstAddr: netip.MustParseAddr("::ffff:192.0.2.10"),
|
DstAddr: netip.MustParseAddr("::ffff:192.0.2.10"),
|
||||||
SrcAS: 1299,
|
SrcAS: 1299,
|
||||||
DstAS: 174,
|
DstAS: 174,
|
||||||
ProtobufDebug: map[schema.ColumnKey]interface{}{
|
SrcNetMask: 27,
|
||||||
schema.ColumnExporterName: "192_0_2_142",
|
DstNetMask: 27,
|
||||||
schema.ColumnInIfName: "Gi0/0/100",
|
OtherColumns: map[schema.ColumnKey]interface{}{
|
||||||
schema.ColumnOutIfName: "Gi0/0/200",
|
schema.ColumnExporterName: "192_0_2_142",
|
||||||
schema.ColumnInIfDescription: "Interface 100",
|
schema.ColumnInIfName: "Gi0/0/100",
|
||||||
schema.ColumnOutIfDescription: "Interface 200",
|
schema.ColumnOutIfName: "Gi0/0/200",
|
||||||
schema.ColumnInIfSpeed: 1000,
|
schema.ColumnInIfDescription: "Interface 100",
|
||||||
schema.ColumnOutIfSpeed: 1000,
|
schema.ColumnOutIfDescription: "Interface 200",
|
||||||
schema.ColumnDstASPath: []uint32{64200, 1299, 174},
|
schema.ColumnInIfSpeed: 1000,
|
||||||
schema.ColumnDstCommunities: []uint32{100, 200, 400},
|
schema.ColumnOutIfSpeed: 1000,
|
||||||
schema.ColumnDstLargeCommunitiesASN: []int32{64200},
|
schema.ColumnDstASPath: []uint32{64200, 1299, 174},
|
||||||
schema.ColumnDstLargeCommunitiesLocalData1: []int32{2},
|
schema.ColumnDstCommunities: []uint32{100, 200, 400},
|
||||||
schema.ColumnDstLargeCommunitiesLocalData2: []int32{3},
|
schema.ColumnDstLargeCommunities: []schema.UInt128{
|
||||||
schema.ColumnSrcNetMask: 27,
|
{High: 64200, Low: (uint64(2) << 32) + uint64(3)},
|
||||||
schema.ColumnDstNetMask: 27,
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -541,11 +578,21 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
|
|||||||
daemonComponent := daemon.NewMock(t)
|
daemonComponent := daemon.NewMock(t)
|
||||||
metadataComponent := metadata.NewMock(t, r, metadata.DefaultConfiguration(),
|
metadataComponent := metadata.NewMock(t, r, metadata.DefaultConfiguration(),
|
||||||
metadata.Dependencies{Daemon: daemonComponent})
|
metadata.Dependencies{Daemon: daemonComponent})
|
||||||
flowComponent := flow.NewMock(t, r, flow.DefaultConfiguration())
|
flowComponent, err := flow.New(r, flow.Dependencies{Schema: schema.NewMock(t)})
|
||||||
kafkaComponent, kafkaProducer := kafka.NewMock(t, r, kafka.DefaultConfiguration())
|
if err != nil {
|
||||||
|
t.Fatalf("flow.New() error:\n%+v", err)
|
||||||
|
}
|
||||||
httpComponent := httpserver.NewMock(t, r)
|
httpComponent := httpserver.NewMock(t, r)
|
||||||
routingComponent := routing.NewMock(t, r)
|
routingComponent := routing.NewMock(t, r)
|
||||||
routingComponent.PopulateRIB(t)
|
routingComponent.PopulateRIB(t)
|
||||||
|
kafkaComponent, incoming := kafka.NewMock(t, kafka.DefaultConfiguration())
|
||||||
|
var clickhouseMessages []*schema.FlowMessage
|
||||||
|
var clickhouseMessagesMutex sync.Mutex
|
||||||
|
clickhouseComponent := clickhouse.NewMock(t, func(msg *schema.FlowMessage) {
|
||||||
|
clickhouseMessagesMutex.Lock()
|
||||||
|
defer clickhouseMessagesMutex.Unlock()
|
||||||
|
clickhouseMessages = append(clickhouseMessages, msg)
|
||||||
|
})
|
||||||
|
|
||||||
// Prepare a configuration
|
// Prepare a configuration
|
||||||
configuration := DefaultConfiguration()
|
configuration := DefaultConfiguration()
|
||||||
@@ -559,55 +606,70 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
|
|||||||
|
|
||||||
// Instantiate and start core
|
// Instantiate and start core
|
||||||
c, err := New(r, configuration, Dependencies{
|
c, err := New(r, configuration, Dependencies{
|
||||||
Daemon: daemonComponent,
|
Daemon: daemonComponent,
|
||||||
Flow: flowComponent,
|
Flow: flowComponent,
|
||||||
Metadata: metadataComponent,
|
Metadata: metadataComponent,
|
||||||
Kafka: kafkaComponent,
|
Kafka: kafkaComponent,
|
||||||
HTTP: httpComponent,
|
ClickHouse: clickhouseComponent,
|
||||||
Routing: routingComponent,
|
HTTP: httpComponent,
|
||||||
Schema: schema.NewMock(t),
|
Routing: routingComponent,
|
||||||
|
Schema: schema.NewMock(t),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("New() error:\n%+v", err)
|
t.Fatalf("New() error:\n%+v", err)
|
||||||
}
|
}
|
||||||
helpers.StartStop(t, c)
|
|
||||||
|
|
||||||
// Inject twice since otherwise, we get a cache miss
|
helpers.StartStop(t, c)
|
||||||
received := make(chan bool)
|
clickhouseMessagesMutex.Lock()
|
||||||
if tc.OutputFlow != nil {
|
clickhouseMessages = clickhouseMessages[:0]
|
||||||
kafkaProducer.ExpectInputWithMessageCheckerFunctionAndSucceed(
|
clickhouseMessagesMutex.Unlock()
|
||||||
func(msg *sarama.ProducerMessage) error {
|
|
||||||
defer close(received)
|
inputFlow := tc.InputFlow()
|
||||||
b, err := msg.Value.Encode()
|
var buf bytes.Buffer
|
||||||
if err != nil {
|
encoder := gob.NewEncoder(&buf)
|
||||||
t.Fatalf("Kafka message encoding error:\n%+v", err)
|
if err := encoder.Encode(inputFlow); err != nil {
|
||||||
}
|
t.Fatalf("gob.Encode() error: %v", err)
|
||||||
t.Logf("Raw message: %v", b)
|
|
||||||
got := c.d.Schema.ProtobufDecode(t, b)
|
|
||||||
if diff := helpers.Diff(&got, tc.OutputFlow); diff != "" {
|
|
||||||
t.Errorf("Classifier (-got, +want):\n%s", diff)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
// Else, we should not get a message, but that's not possible to test.
|
|
||||||
flowComponent.Inject(tc.InputFlow())
|
rawFlow := &pb.RawFlow{
|
||||||
time.Sleep(50 * time.Millisecond) // Needed to let poller does its job
|
TimeReceived: uint64(time.Now().Unix()),
|
||||||
flowComponent.Inject(tc.InputFlow())
|
Payload: buf.Bytes(),
|
||||||
if tc.OutputFlow != nil {
|
SourceAddress: inputFlow.ExporterAddress.AsSlice(),
|
||||||
select {
|
UseSourceAddress: false,
|
||||||
case <-received:
|
Decoder: pb.RawFlow_DECODER_GOB,
|
||||||
case <-time.After(1 * time.Second):
|
TimestampSource: pb.RawFlow_TS_INPUT,
|
||||||
t.Fatal("Kafka message not received")
|
}
|
||||||
|
|
||||||
|
data, err := proto.Marshal(rawFlow)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("proto.Marshal() error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test twice to check cache behavior
|
||||||
|
incoming <- data
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
incoming <- data
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
|
clickhouseMessagesMutex.Lock()
|
||||||
|
clickhouseMessagesLen := len(clickhouseMessages)
|
||||||
|
var lastMessage *schema.FlowMessage
|
||||||
|
if clickhouseMessagesLen > 0 {
|
||||||
|
lastMessage = clickhouseMessages[clickhouseMessagesLen-1]
|
||||||
|
}
|
||||||
|
clickhouseMessagesMutex.Unlock()
|
||||||
|
|
||||||
|
if tc.OutputFlow != nil && clickhouseMessagesLen > 0 {
|
||||||
|
if diff := helpers.Diff(lastMessage, tc.OutputFlow); diff != "" {
|
||||||
|
t.Errorf("Enriched flow differs (-got, +want):\n%s", diff)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
|
||||||
}
|
}
|
||||||
gotMetrics := r.GetMetrics("akvorado_inlet_core_", "-processing_", "flows_", "received_", "forwarded_")
|
gotMetrics := r.GetMetrics("akvorado_outlet_core_", "-processing_", "flows_", "received_", "forwarded_")
|
||||||
expectedMetrics := map[string]string{
|
expectedMetrics := map[string]string{
|
||||||
`flows_errors_total{error="SNMP cache miss",exporter="192.0.2.142"}`: "1",
|
`flows_errors_total{error="SNMP cache miss",exporter="192.0.2.142"}`: "1",
|
||||||
`flows_http_clients`: "0",
|
`flows_http_clients`: "0",
|
||||||
`received_flows_total{exporter="192.0.2.142"}`: "2",
|
`received_flows_total{exporter="192.0.2.142"}`: "2",
|
||||||
|
`received_raw_flows_total`: "2",
|
||||||
}
|
}
|
||||||
if tc.OutputFlow != nil {
|
if tc.OutputFlow != nil {
|
||||||
expectedMetrics[`forwarded_flows_total{exporter="192.0.2.142"}`] = "1"
|
expectedMetrics[`forwarded_flows_total{exporter="192.0.2.142"}`] = "1"
|
||||||
@@ -18,7 +18,7 @@ type flowsParameters struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FlowsHTTPHandler streams a JSON copy of all flows just after
|
// FlowsHTTPHandler streams a JSON copy of all flows just after
|
||||||
// sending them to Kafka. Under load, some flows may not be sent. This
|
// sending them to ClickHouse. Under load, some flows may not be sent. This
|
||||||
// is intended for debug only.
|
// is intended for debug only.
|
||||||
func (c *Component) FlowsHTTPHandler(gc *gin.Context) {
|
func (c *Component) FlowsHTTPHandler(gc *gin.Context) {
|
||||||
var params flowsParameters
|
var params flowsParameters
|
||||||
@@ -27,7 +27,7 @@ func (c *Component) FlowsHTTPHandler(gc *gin.Context) {
|
|||||||
gc.JSON(http.StatusBadRequest, gin.H{"message": helpers.Capitalize(err.Error())})
|
gc.JSON(http.StatusBadRequest, gin.H{"message": helpers.Capitalize(err.Error())})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
format := gc.NegotiateFormat("application/json", "application/x-protobuf")
|
dying := c.t.Dying()
|
||||||
|
|
||||||
atomic.AddUint32(&c.httpFlowClients, 1)
|
atomic.AddUint32(&c.httpFlowClients, 1)
|
||||||
defer atomic.AddUint32(&c.httpFlowClients, ^uint32(0))
|
defer atomic.AddUint32(&c.httpFlowClients, ^uint32(0))
|
||||||
@@ -40,23 +40,15 @@ func (c *Component) FlowsHTTPHandler(gc *gin.Context) {
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-c.t.Dying():
|
case <-dying:
|
||||||
return
|
return
|
||||||
case <-gc.Request.Context().Done():
|
case <-gc.Request.Context().Done():
|
||||||
return
|
return
|
||||||
case msg := <-c.httpFlowChannel:
|
case msg := <-c.httpFlowChannel:
|
||||||
switch format {
|
gc.Header("Content-Type", "application/json")
|
||||||
case "application/json":
|
gc.Status(http.StatusOK)
|
||||||
if params.Limit == 1 {
|
gc.Writer.Write(msg)
|
||||||
gc.IndentedJSON(http.StatusOK, msg)
|
gc.Writer.Write([]byte("\n"))
|
||||||
} else {
|
|
||||||
gc.JSON(http.StatusOK, msg)
|
|
||||||
gc.Writer.Write([]byte("\n"))
|
|
||||||
}
|
|
||||||
case "application/x-protobuf":
|
|
||||||
gc.Set("Content-Type", format)
|
|
||||||
gc.Writer.Write(msg.Bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
count++
|
count++
|
||||||
if params.Limit > 0 && count == params.Limit {
|
if params.Limit > 0 && count == params.Limit {
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user