common/schema: turns into a component

This is a first step to make it accept configuration. Most of the
changes are quite trivial, but I also ran into some difficulties with
query columns and filters. They need the schema for parsing, but parsing
happens before dependencies are instantiated (and even if it was not the
case, parsing is stateless). Therefore, I have added a `Validate()`
method that must be called after instantiation. Various bits `panic()`
if not validated to ensure we catch all cases.

The alternative to make the component manages a global state would have
been simpler but it would break once we add the ability to add or
disable columns.
This commit is contained in:
Vincent Bernat
2023-01-18 12:14:23 +01:00
parent 3c55f90fdf
commit c6a9319b57
58 changed files with 1049 additions and 655 deletions

View File

@@ -12,6 +12,7 @@ import (
"akvorado/common/daemon" "akvorado/common/daemon"
"akvorado/common/http" "akvorado/common/http"
"akvorado/common/reporter" "akvorado/common/reporter"
"akvorado/common/schema"
"akvorado/console" "akvorado/console"
"akvorado/console/authentication" "akvorado/console/authentication"
"akvorado/console/database" "akvorado/console/database"
@@ -103,12 +104,17 @@ func consoleStart(r *reporter.Reporter, config ConsoleConfiguration, checkOnly b
if err != nil { if err != nil {
return fmt.Errorf("unable to initialize database component: %w", err) return fmt.Errorf("unable to initialize database component: %w", err)
} }
schemaComponent, err := schema.New()
if err != nil {
return fmt.Errorf("unable to initialize schema component: %w", err)
}
consoleComponent, err := console.New(r, config.Console, console.Dependencies{ consoleComponent, err := console.New(r, config.Console, console.Dependencies{
Daemon: daemonComponent, Daemon: daemonComponent,
HTTP: httpComponent, HTTP: httpComponent,
ClickHouseDB: clickhouseComponent, ClickHouseDB: clickhouseComponent,
Auth: authenticationComponent, Auth: authenticationComponent,
Database: databaseComponent, Database: databaseComponent,
Schema: schemaComponent,
}) })
if err != nil { if err != nil {
return fmt.Errorf("unable to initialize console component: %w", err) return fmt.Errorf("unable to initialize console component: %w", err)

View File

@@ -11,6 +11,7 @@ import (
"akvorado/common/daemon" "akvorado/common/daemon"
"akvorado/common/http" "akvorado/common/http"
"akvorado/common/reporter" "akvorado/common/reporter"
"akvorado/common/schema"
"akvorado/inlet/bmp" "akvorado/inlet/bmp"
"akvorado/inlet/core" "akvorado/inlet/core"
"akvorado/inlet/flow" "akvorado/inlet/flow"
@@ -95,9 +96,14 @@ func inletStart(r *reporter.Reporter, config InletConfiguration, checkOnly bool)
if err != nil { if err != nil {
return fmt.Errorf("unable to initialize http component: %w", err) return fmt.Errorf("unable to initialize http component: %w", err)
} }
schemaComponent, err := schema.New()
if err != nil {
return fmt.Errorf("unable to initialize schema component: %w", err)
}
flowComponent, err := flow.New(r, config.Flow, flow.Dependencies{ flowComponent, err := flow.New(r, config.Flow, flow.Dependencies{
Daemon: daemonComponent, Daemon: daemonComponent,
HTTP: httpComponent, HTTP: httpComponent,
Schema: schemaComponent,
}) })
if err != nil { if err != nil {
return fmt.Errorf("unable to initialize flow component: %w", err) return fmt.Errorf("unable to initialize flow component: %w", err)
@@ -122,6 +128,7 @@ func inletStart(r *reporter.Reporter, config InletConfiguration, checkOnly bool)
} }
kafkaComponent, err := kafka.New(r, config.Kafka, kafka.Dependencies{ kafkaComponent, err := kafka.New(r, config.Kafka, kafka.Dependencies{
Daemon: daemonComponent, Daemon: daemonComponent,
Schema: schemaComponent,
}) })
if err != nil { if err != nil {
return fmt.Errorf("unable to initialize Kafka component: %w", err) return fmt.Errorf("unable to initialize Kafka component: %w", err)
@@ -134,6 +141,7 @@ func inletStart(r *reporter.Reporter, config InletConfiguration, checkOnly bool)
GeoIP: geoipComponent, GeoIP: geoipComponent,
Kafka: kafkaComponent, Kafka: kafkaComponent,
HTTP: httpComponent, HTTP: httpComponent,
Schema: schemaComponent,
}) })
if err != nil { if err != nil {
return fmt.Errorf("unable to initialize core component: %w", err) return fmt.Errorf("unable to initialize core component: %w", err)

View File

@@ -12,6 +12,7 @@ import (
"akvorado/common/daemon" "akvorado/common/daemon"
"akvorado/common/http" "akvorado/common/http"
"akvorado/common/reporter" "akvorado/common/reporter"
"akvorado/common/schema"
"akvorado/orchestrator" "akvorado/orchestrator"
"akvorado/orchestrator/clickhouse" "akvorado/orchestrator/clickhouse"
"akvorado/orchestrator/kafka" "akvorado/orchestrator/kafka"
@@ -111,7 +112,11 @@ func orchestratorStart(r *reporter.Reporter, config OrchestratorConfiguration, c
if err != nil { if err != nil {
return fmt.Errorf("unable to initialize HTTP component: %w", err) return fmt.Errorf("unable to initialize HTTP component: %w", err)
} }
kafkaComponent, err := kafka.New(r, config.Kafka) schemaComponent, err := schema.New()
if err != nil {
return fmt.Errorf("unable to initialize schema component: %w", err)
}
kafkaComponent, err := kafka.New(r, config.Kafka, kafka.Dependencies{Schema: schemaComponent})
if err != nil { if err != nil {
return fmt.Errorf("unable to initialize kafka component: %w", err) return fmt.Errorf("unable to initialize kafka component: %w", err)
} }
@@ -125,6 +130,7 @@ func orchestratorStart(r *reporter.Reporter, config OrchestratorConfiguration, c
Daemon: daemonComponent, Daemon: daemonComponent,
HTTP: httpComponent, HTTP: httpComponent,
ClickHouse: clickhouseDBComponent, ClickHouse: clickhouseDBComponent,
Schema: schemaComponent,
}) })
if err != nil { if err != nil {
return fmt.Errorf("unable to initialize clickhouse component: %w", err) return fmt.Errorf("unable to initialize clickhouse component: %w", err)

View File

@@ -23,12 +23,24 @@ var prettyC = pretty.Config{
} }
func defaultPrettyFormatters() map[reflect.Type]interface{} { func defaultPrettyFormatters() map[reflect.Type]interface{} {
return map[reflect.Type]interface{}{ result := map[reflect.Type]interface{}{
reflect.TypeOf(net.IP{}): fmt.Sprint, reflect.TypeOf(net.IP{}): fmt.Sprint,
reflect.TypeOf(netip.Addr{}): fmt.Sprint, reflect.TypeOf(netip.Addr{}): fmt.Sprint,
reflect.TypeOf(time.Time{}): fmt.Sprint, reflect.TypeOf(time.Time{}): fmt.Sprint,
reflect.TypeOf(SubnetMap[string]{}): fmt.Sprint, reflect.TypeOf(SubnetMap[string]{}): fmt.Sprint,
} }
for t, fn := range nonDefaultPrettyFormatters {
result[t] = fn
}
return result
}
var nonDefaultPrettyFormatters = map[reflect.Type]interface{}{}
// AddPrettyFormatter add a global pretty formatter. We cannot put everything in
// the default map due to cycles.
func AddPrettyFormatter(t reflect.Type, fn interface{}) {
nonDefaultPrettyFormatters[t] = fn
} }
// DiffOption changes the behavior of the Diff function. // DiffOption changes the behavior of the Diff function.

View File

@@ -151,165 +151,167 @@ func (c ColumnKey) String() string {
// Flows is the data schema for flows tables. Any column starting with Src/InIf // Flows is the data schema for flows tables. Any column starting with Src/InIf
// will be duplicated as Dst/OutIf during init. That's not the case for columns // will be duplicated as Dst/OutIf during init. That's not the case for columns
// in `PrimaryKeys'. // in `PrimaryKeys'.
var Flows = Schema{ func flows() Schema {
clickHousePrimaryKeys: []ColumnKey{ return Schema{
ColumnTimeReceived, clickHousePrimaryKeys: []ColumnKey{
ColumnExporterAddress, ColumnTimeReceived,
ColumnEType, ColumnExporterAddress,
ColumnProto, ColumnEType,
ColumnInIfName, ColumnProto,
ColumnSrcAS, ColumnInIfName,
ColumnForwardingStatus, ColumnSrcAS,
ColumnOutIfName, ColumnForwardingStatus,
ColumnDstAS, ColumnOutIfName,
ColumnSamplingRate, ColumnDstAS,
}, ColumnSamplingRate,
columns: []Column{
{
Key: ColumnTimeReceived,
ClickHouseType: "DateTime",
ClickHouseCodec: "DoubleDelta, LZ4",
ConsoleNotDimension: true,
ProtobufType: protoreflect.Uint64Kind,
}, },
{Key: ColumnSamplingRate, ClickHouseType: "UInt64", ConsoleNotDimension: true}, columns: []Column{
{Key: ColumnExporterAddress, ClickHouseType: "LowCardinality(IPv6)"}, {
{Key: ColumnExporterName, ClickHouseType: "LowCardinality(String)", ClickHouseNotSortingKey: true}, Key: ColumnTimeReceived,
{Key: ColumnExporterGroup, ClickHouseType: "LowCardinality(String)", ClickHouseNotSortingKey: true}, ClickHouseType: "DateTime",
{Key: ColumnExporterRole, ClickHouseType: "LowCardinality(String)", ClickHouseNotSortingKey: true}, ClickHouseCodec: "DoubleDelta, LZ4",
{Key: ColumnExporterSite, ClickHouseType: "LowCardinality(String)", ClickHouseNotSortingKey: true}, ConsoleNotDimension: true,
{Key: ColumnExporterRegion, ClickHouseType: "LowCardinality(String)", ClickHouseNotSortingKey: true}, ProtobufType: protoreflect.Uint64Kind,
{Key: ColumnExporterTenant, ClickHouseType: "LowCardinality(String)", ClickHouseNotSortingKey: true}, },
{ {Key: ColumnSamplingRate, ClickHouseType: "UInt64", ConsoleNotDimension: true},
Key: ColumnSrcAddr, {Key: ColumnExporterAddress, ClickHouseType: "LowCardinality(IPv6)"},
MainOnly: true, {Key: ColumnExporterName, ClickHouseType: "LowCardinality(String)", ClickHouseNotSortingKey: true},
ClickHouseType: "IPv6", {Key: ColumnExporterGroup, ClickHouseType: "LowCardinality(String)", ClickHouseNotSortingKey: true},
}, { {Key: ColumnExporterRole, ClickHouseType: "LowCardinality(String)", ClickHouseNotSortingKey: true},
Key: ColumnSrcNetMask, {Key: ColumnExporterSite, ClickHouseType: "LowCardinality(String)", ClickHouseNotSortingKey: true},
MainOnly: true, {Key: ColumnExporterRegion, ClickHouseType: "LowCardinality(String)", ClickHouseNotSortingKey: true},
ClickHouseType: "UInt8", {Key: ColumnExporterTenant, ClickHouseType: "LowCardinality(String)", ClickHouseNotSortingKey: true},
ConsoleNotDimension: true, {
}, { Key: ColumnSrcAddr,
Key: ColumnSrcNetPrefix, MainOnly: true,
MainOnly: true, ClickHouseType: "IPv6",
ClickHouseType: "String", }, {
ClickHouseAlias: `CASE Key: ColumnSrcNetMask,
MainOnly: true,
ClickHouseType: "UInt8",
ConsoleNotDimension: true,
}, {
Key: ColumnSrcNetPrefix,
MainOnly: true,
ClickHouseType: "String",
ClickHouseAlias: `CASE
WHEN EType = 0x800 THEN concat(replaceRegexpOne(IPv6CIDRToRange(SrcAddr, (96 + SrcNetMask)::UInt8).1::String, '^::ffff:', ''), '/', SrcNetMask::String) WHEN EType = 0x800 THEN concat(replaceRegexpOne(IPv6CIDRToRange(SrcAddr, (96 + SrcNetMask)::UInt8).1::String, '^::ffff:', ''), '/', SrcNetMask::String)
WHEN EType = 0x86dd THEN concat(IPv6CIDRToRange(SrcAddr, SrcNetMask).1::String, '/', SrcNetMask::String) WHEN EType = 0x86dd THEN concat(IPv6CIDRToRange(SrcAddr, SrcNetMask).1::String, '/', SrcNetMask::String)
ELSE '' ELSE ''
END`, END`,
}, },
{Key: ColumnSrcAS, ClickHouseType: "UInt32"}, {Key: ColumnSrcAS, ClickHouseType: "UInt32"},
{ {
Key: ColumnSrcNetName, Key: ColumnSrcNetName,
ClickHouseType: "LowCardinality(String)", ClickHouseType: "LowCardinality(String)",
ClickHouseGenerateFrom: "dictGetOrDefault('networks', 'name', SrcAddr, '')", ClickHouseGenerateFrom: "dictGetOrDefault('networks', 'name', SrcAddr, '')",
}, { }, {
Key: ColumnSrcNetRole, Key: ColumnSrcNetRole,
ClickHouseType: "LowCardinality(String)", ClickHouseType: "LowCardinality(String)",
ClickHouseGenerateFrom: "dictGetOrDefault('networks', 'role', SrcAddr, '')", ClickHouseGenerateFrom: "dictGetOrDefault('networks', 'role', SrcAddr, '')",
}, { }, {
Key: ColumnSrcNetSite, Key: ColumnSrcNetSite,
ClickHouseType: "LowCardinality(String)", ClickHouseType: "LowCardinality(String)",
ClickHouseGenerateFrom: "dictGetOrDefault('networks', 'site', SrcAddr, '')", ClickHouseGenerateFrom: "dictGetOrDefault('networks', 'site', SrcAddr, '')",
}, { }, {
Key: ColumnSrcNetRegion, Key: ColumnSrcNetRegion,
ClickHouseType: "LowCardinality(String)", ClickHouseType: "LowCardinality(String)",
ClickHouseGenerateFrom: "dictGetOrDefault('networks', 'region', SrcAddr, '')", ClickHouseGenerateFrom: "dictGetOrDefault('networks', 'region', SrcAddr, '')",
}, { }, {
Key: ColumnSrcNetTenant, Key: ColumnSrcNetTenant,
ClickHouseType: "LowCardinality(String)", ClickHouseType: "LowCardinality(String)",
ClickHouseGenerateFrom: "dictGetOrDefault('networks', 'tenant', SrcAddr, '')", ClickHouseGenerateFrom: "dictGetOrDefault('networks', 'tenant', SrcAddr, '')",
}, },
{Key: ColumnSrcCountry, ClickHouseType: "FixedString(2)"}, {Key: ColumnSrcCountry, ClickHouseType: "FixedString(2)"},
{ {
Key: ColumnDstASPath, Key: ColumnDstASPath,
MainOnly: true, MainOnly: true,
ClickHouseType: "Array(UInt32)", ClickHouseType: "Array(UInt32)",
}, { }, {
Key: ColumnDst1stAS, Key: ColumnDst1stAS,
ClickHouseType: "UInt32", ClickHouseType: "UInt32",
ClickHouseGenerateFrom: "c_DstASPath[1]", ClickHouseGenerateFrom: "c_DstASPath[1]",
}, { }, {
Key: ColumnDst2ndAS, Key: ColumnDst2ndAS,
ClickHouseType: "UInt32", ClickHouseType: "UInt32",
ClickHouseGenerateFrom: "c_DstASPath[2]", ClickHouseGenerateFrom: "c_DstASPath[2]",
}, { }, {
Key: ColumnDst3rdAS, Key: ColumnDst3rdAS,
ClickHouseType: "UInt32", ClickHouseType: "UInt32",
ClickHouseGenerateFrom: "c_DstASPath[3]", ClickHouseGenerateFrom: "c_DstASPath[3]",
}, { }, {
Key: ColumnDstCommunities, Key: ColumnDstCommunities,
MainOnly: true, MainOnly: true,
ClickHouseType: "Array(UInt32)", ClickHouseType: "Array(UInt32)",
}, { }, {
Key: ColumnDstLargeCommunities, Key: ColumnDstLargeCommunities,
MainOnly: true, MainOnly: true,
ClickHouseType: "Array(UInt128)", ClickHouseType: "Array(UInt128)",
ClickHouseTransformFrom: []Column{ ClickHouseTransformFrom: []Column{
{ {
Key: ColumnDstLargeCommunitiesASN, Key: ColumnDstLargeCommunitiesASN,
ClickHouseType: "Array(UInt32)", ClickHouseType: "Array(UInt32)",
},
{
Key: ColumnDstLargeCommunitiesLocalData1,
ClickHouseType: "Array(UInt32)",
},
{
Key: ColumnDstLargeCommunitiesLocalData2,
ClickHouseType: "Array(UInt32)",
},
}, },
{ ClickHouseTransformTo: "arrayMap((asn, l1, l2) -> ((bitShiftLeft(CAST(asn, 'UInt128'), 64) + bitShiftLeft(CAST(l1, 'UInt128'), 32)) + CAST(l2, 'UInt128')), DstLargeCommunitiesASN, DstLargeCommunitiesLocalData1, DstLargeCommunitiesLocalData2)",
Key: ColumnDstLargeCommunitiesLocalData1, ConsoleNotDimension: true,
ClickHouseType: "Array(UInt32)", },
}, {Key: ColumnInIfName, ClickHouseType: "LowCardinality(String)"},
{ {Key: ColumnInIfDescription, ClickHouseType: "String", ClickHouseNotSortingKey: true},
Key: ColumnDstLargeCommunitiesLocalData2, {Key: ColumnInIfSpeed, ClickHouseType: "UInt32", ClickHouseNotSortingKey: true},
ClickHouseType: "Array(UInt32)", {Key: ColumnInIfConnectivity, ClickHouseType: "LowCardinality(String)", ClickHouseNotSortingKey: true},
{Key: ColumnInIfProvider, ClickHouseType: "LowCardinality(String)", ClickHouseNotSortingKey: true},
{
Key: ColumnInIfBoundary,
ClickHouseType: "Enum8('undefined' = 0, 'external' = 1, 'internal' = 2)",
ClickHouseNotSortingKey: true,
ProtobufType: protoreflect.EnumKind,
ProtobufEnumName: "Boundary",
ProtobufEnum: map[int]string{
0: "UNDEFINED",
1: "EXTERNAL",
2: "INTERNAL",
}, },
}, },
ClickHouseTransformTo: "arrayMap((asn, l1, l2) -> ((bitShiftLeft(CAST(asn, 'UInt128'), 64) + bitShiftLeft(CAST(l1, 'UInt128'), 32)) + CAST(l2, 'UInt128')), DstLargeCommunitiesASN, DstLargeCommunitiesLocalData1, DstLargeCommunitiesLocalData2)", {Key: ColumnEType, ClickHouseType: "UInt32"}, // TODO: UInt16 but hard to change, primary key
ConsoleNotDimension: true, {Key: ColumnProto, ClickHouseType: "UInt32"}, // TODO: UInt8 but hard to change, primary key
}, {Key: ColumnSrcPort, ClickHouseType: "UInt16", MainOnly: true},
{Key: ColumnInIfName, ClickHouseType: "LowCardinality(String)"}, {Key: ColumnBytes, ClickHouseType: "UInt64", ClickHouseNotSortingKey: true, ConsoleNotDimension: true},
{Key: ColumnInIfDescription, ClickHouseType: "String", ClickHouseNotSortingKey: true}, {Key: ColumnPackets, ClickHouseType: "UInt64", ClickHouseNotSortingKey: true, ConsoleNotDimension: true},
{Key: ColumnInIfSpeed, ClickHouseType: "UInt32", ClickHouseNotSortingKey: true}, {
{Key: ColumnInIfConnectivity, ClickHouseType: "LowCardinality(String)", ClickHouseNotSortingKey: true}, Key: ColumnPacketSize,
{Key: ColumnInIfProvider, ClickHouseType: "LowCardinality(String)", ClickHouseNotSortingKey: true}, ClickHouseType: "UInt64",
{ ClickHouseAlias: "intDiv(Bytes, Packets)",
Key: ColumnInIfBoundary, ConsoleNotDimension: true,
ClickHouseType: "Enum8('undefined' = 0, 'external' = 1, 'internal' = 2)", }, {
ClickHouseNotSortingKey: true, Key: ColumnPacketSizeBucket,
ProtobufType: protoreflect.EnumKind, ClickHouseType: "LowCardinality(String)",
ProtobufEnumName: "Boundary", ClickHouseAlias: func() string {
ProtobufEnum: map[int]string{ boundaries := []int{64, 128, 256, 512, 768, 1024, 1280, 1501,
0: "UNDEFINED", 2048, 3072, 4096, 8192, 10240, 16384, 32768, 65536}
1: "EXTERNAL", conditions := []string{}
2: "INTERNAL", last := 0
for _, boundary := range boundaries {
conditions = append(conditions, fmt.Sprintf("PacketSize < %d, '%d-%d'",
boundary, last, boundary-1))
last = boundary
}
conditions = append(conditions, fmt.Sprintf("'%d-Inf'", last))
return fmt.Sprintf("multiIf(%s)", strings.Join(conditions, ", "))
}(),
}, },
{Key: ColumnForwardingStatus, ClickHouseType: "UInt32"}, // TODO: UInt8 but hard to change, primary key
}, },
{Key: ColumnEType, ClickHouseType: "UInt32"}, // TODO: UInt16 but hard to change, primary key }.finalize()
{Key: ColumnProto, ClickHouseType: "UInt32"}, // TODO: UInt8 but hard to change, primary key }
{Key: ColumnSrcPort, ClickHouseType: "UInt16", MainOnly: true},
{Key: ColumnBytes, ClickHouseType: "UInt64", ClickHouseNotSortingKey: true, ConsoleNotDimension: true},
{Key: ColumnPackets, ClickHouseType: "UInt64", ClickHouseNotSortingKey: true, ConsoleNotDimension: true},
{
Key: ColumnPacketSize,
ClickHouseType: "UInt64",
ClickHouseAlias: "intDiv(Bytes, Packets)",
ConsoleNotDimension: true,
}, {
Key: ColumnPacketSizeBucket,
ClickHouseType: "LowCardinality(String)",
ClickHouseAlias: func() string {
boundaries := []int{64, 128, 256, 512, 768, 1024, 1280, 1501,
2048, 3072, 4096, 8192, 10240, 16384, 32768, 65536}
conditions := []string{}
last := 0
for _, boundary := range boundaries {
conditions = append(conditions, fmt.Sprintf("PacketSize < %d, '%d-%d'",
boundary, last, boundary-1))
last = boundary
}
conditions = append(conditions, fmt.Sprintf("'%d-Inf'", last))
return fmt.Sprintf("multiIf(%s)", strings.Join(conditions, ", "))
}(),
},
{Key: ColumnForwardingStatus, ClickHouseType: "UInt32"}, // TODO: UInt8 but hard to change, primary key
},
}.finalize()
func (schema Schema) finalize() Schema { func (schema Schema) finalize() Schema {
ncolumns := []Column{} ncolumns := []Column{}

View File

@@ -8,8 +8,9 @@ import (
) )
func TestFlowsClickHouse(t *testing.T) { func TestFlowsClickHouse(t *testing.T) {
for _, key := range Flows.clickHousePrimaryKeys { c := NewMock(t)
if column := Flows.columnIndex[key]; column.Key == 0 { for _, key := range c.clickHousePrimaryKeys {
if column := c.columnIndex[key]; column.Key == 0 {
t.Errorf("primary key %q not a column", key) t.Errorf("primary key %q not a column", key)
} else { } else {
if column.ClickHouseNotSortingKey { if column.ClickHouseNotSortingKey {
@@ -20,7 +21,8 @@ func TestFlowsClickHouse(t *testing.T) {
} }
func TestFlowsProtobuf(t *testing.T) { func TestFlowsProtobuf(t *testing.T) {
for _, column := range Flows.Columns() { c := NewMock(t)
for _, column := range c.Columns() {
if column.ProtobufIndex >= 0 { if column.ProtobufIndex >= 0 {
if column.ProtobufType == 0 { if column.ProtobufType == 0 {
t.Errorf("column %s has not protobuf type", column.Name) t.Errorf("column %s has not protobuf type", column.Name)
@@ -30,8 +32,9 @@ func TestFlowsProtobuf(t *testing.T) {
} }
func TestColumnIndex(t *testing.T) { func TestColumnIndex(t *testing.T) {
c := NewMock(t)
for i := ColumnTimeReceived; i < ColumnLast; i++ { for i := ColumnTimeReceived; i < ColumnLast; i++ {
if _, ok := Flows.LookupColumnByKey(i); !ok { if _, ok := c.LookupColumnByKey(i); !ok {
t.Errorf("column %s cannot be looked up by key", i) t.Errorf("column %s cannot be looked up by key", i)
} }
} }

View File

@@ -3,9 +3,12 @@
package schema package schema
import "testing" import (
"testing"
)
func TestLookupColumnByName(t *testing.T) { func TestLookupColumnByName(t *testing.T) {
c := NewMock(t)
cases := []string{ cases := []string{
"TimeReceived", "TimeReceived",
"InIfProvider", "InIfProvider",
@@ -14,7 +17,7 @@ func TestLookupColumnByName(t *testing.T) {
"ForwardingStatus", "ForwardingStatus",
} }
for _, name := range cases { for _, name := range cases {
column, ok := Flows.LookupColumnByName(name) column, ok := c.LookupColumnByName(name)
if !ok { if !ok {
t.Fatalf("LookupByName(%q) not found", name) t.Fatalf("LookupByName(%q) not found", name)
} }
@@ -25,6 +28,7 @@ func TestLookupColumnByName(t *testing.T) {
} }
func TestReverseColumnDirection(t *testing.T) { func TestReverseColumnDirection(t *testing.T) {
c := NewMock(t)
cases := []struct { cases := []struct {
Input ColumnKey Input ColumnKey
Output ColumnKey Output ColumnKey
@@ -37,7 +41,7 @@ func TestReverseColumnDirection(t *testing.T) {
{ColumnExporterName, ColumnExporterName}, {ColumnExporterName, ColumnExporterName},
} }
for _, tc := range cases { for _, tc := range cases {
got := Flows.ReverseColumnDirection(tc.Input) got := c.ReverseColumnDirection(tc.Input)
if got != tc.Output { if got != tc.Output {
t.Errorf("ReverseColumnDirection(%q) == %q but expected %q", t.Errorf("ReverseColumnDirection(%q) == %q but expected %q",
tc.Input.String(), got.String(), tc.Output.String()) tc.Input.String(), got.String(), tc.Output.String())

View File

@@ -4,9 +4,7 @@
package schema package schema
import ( import (
"fmt"
"net/netip" "net/netip"
"reflect"
"strings" "strings"
"testing" "testing"
@@ -113,18 +111,19 @@ message FlowMessagevLH2TTFF7P352DSYYCJYWFCXHAM {
} }
func TestProtobufMarshal(t *testing.T) { func TestProtobufMarshal(t *testing.T) {
c := NewMock(t)
exporterAddress := netip.MustParseAddr("::ffff:203.0.113.14") exporterAddress := netip.MustParseAddr("::ffff:203.0.113.14")
bf := &FlowMessage{} bf := &FlowMessage{}
bf.TimeReceived = 1000 bf.TimeReceived = 1000
bf.SamplingRate = 20000 bf.SamplingRate = 20000
bf.ExporterAddress = exporterAddress bf.ExporterAddress = exporterAddress
Flows.ProtobufAppendVarint(bf, ColumnDstAS, 65000) c.ProtobufAppendVarint(bf, ColumnDstAS, 65000)
Flows.ProtobufAppendVarint(bf, ColumnBytes, 200) c.ProtobufAppendVarint(bf, ColumnBytes, 200)
Flows.ProtobufAppendVarint(bf, ColumnPackets, 300) c.ProtobufAppendVarint(bf, ColumnPackets, 300)
Flows.ProtobufAppendVarint(bf, ColumnBytes, 300) // duplicate! c.ProtobufAppendVarint(bf, ColumnBytes, 300) // duplicate!
Flows.ProtobufAppendBytes(bf, ColumnDstCountry, []byte("FR")) c.ProtobufAppendBytes(bf, ColumnDstCountry, []byte("FR"))
got := Flows.ProtobufMarshal(bf) got := c.ProtobufMarshal(bf)
size, n := protowire.ConsumeVarint(got) size, n := protowire.ConsumeVarint(got)
if uint64(len(got)-n) != size { if uint64(len(got)-n) != size {
@@ -154,7 +153,7 @@ func TestProtobufMarshal(t *testing.T) {
}) })
t.Run("compare as protobuf message", func(t *testing.T) { t.Run("compare as protobuf message", func(t *testing.T) {
got := Flows.ProtobufDecode(t, got) got := c.ProtobufDecode(t, got)
expected := FlowMessage{ expected := FlowMessage{
TimeReceived: 1000, TimeReceived: 1000,
SamplingRate: 20000, SamplingRate: 20000,
@@ -166,13 +165,14 @@ func TestProtobufMarshal(t *testing.T) {
ColumnDstCountry: "FR", ColumnDstCountry: "FR",
}, },
} }
if diff := helpers.Diff(got, expected, helpers.DiffFormatter(reflect.TypeOf(ColumnBytes), fmt.Sprint)); diff != "" { if diff := helpers.Diff(got, expected); diff != "" {
t.Fatalf("ProtobufDecode() (-got, +want):\n%s", diff) t.Fatalf("ProtobufDecode() (-got, +want):\n%s", diff)
} }
}) })
} }
func BenchmarkProtobufMarshal(b *testing.B) { func BenchmarkProtobufMarshal(b *testing.B) {
c := NewMock(b)
exporterAddress := netip.MustParseAddr("::ffff:203.0.113.14") exporterAddress := netip.MustParseAddr("::ffff:203.0.113.14")
DisableDebug(b) DisableDebug(b)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@@ -181,11 +181,11 @@ func BenchmarkProtobufMarshal(b *testing.B) {
SamplingRate: 20000, SamplingRate: 20000,
ExporterAddress: exporterAddress, ExporterAddress: exporterAddress,
} }
Flows.ProtobufAppendVarint(bf, ColumnDstAS, 65000) c.ProtobufAppendVarint(bf, ColumnDstAS, 65000)
Flows.ProtobufAppendVarint(bf, ColumnBytes, 200) c.ProtobufAppendVarint(bf, ColumnBytes, 200)
Flows.ProtobufAppendVarint(bf, ColumnPackets, 300) c.ProtobufAppendVarint(bf, ColumnPackets, 300)
Flows.ProtobufAppendVarint(bf, ColumnBytes, 300) // duplicate! c.ProtobufAppendVarint(bf, ColumnBytes, 300) // duplicate!
Flows.ProtobufAppendBytes(bf, ColumnDstCountry, []byte("FR")) c.ProtobufAppendBytes(bf, ColumnDstCountry, []byte("FR"))
Flows.ProtobufMarshal(bf) c.ProtobufMarshal(bf)
} }
} }

19
common/schema/root.go Normal file
View File

@@ -0,0 +1,19 @@
// SPDX-FileCopyrightText: 2023 Free Mobile
// SPDX-License-Identifier: AGPL-3.0-only
// Package schema is an abstraction of the data schema for flows used by
// Akvorado. It is a leaky abstraction as there are multiple parts dependant of
// the subsystem that will use it.
package schema
// Component represents the schema compomenent.
type Component struct {
Schema
}
// New creates a new schema component.
func New() (*Component, error) {
return &Component{
Schema: flows(),
}, nil
}

View File

@@ -6,11 +6,14 @@
package schema package schema
import ( import (
"fmt"
"net/netip" "net/netip"
"reflect" "reflect"
"strings" "strings"
"testing" "testing"
"akvorado/common/helpers"
"github.com/jhump/protoreflect/desc" "github.com/jhump/protoreflect/desc"
"github.com/jhump/protoreflect/desc/protoparse" "github.com/jhump/protoreflect/desc/protoparse"
"github.com/jhump/protoreflect/dynamic" "github.com/jhump/protoreflect/dynamic"
@@ -19,7 +22,8 @@ import (
var debug = true var debug = true
// DisableDebug disables debug during the provided test. // DisableDebug disables debug during the provided test. We keep this as a
// global function for performance reason (when release, debug is a const).
func DisableDebug(t testing.TB) { func DisableDebug(t testing.TB) {
debug = false debug = false
t.Cleanup(func() { t.Cleanup(func() {
@@ -27,6 +31,16 @@ func DisableDebug(t testing.TB) {
}) })
} }
// NewMock create a new schema component.
func NewMock(t testing.TB) *Component {
t.Helper()
c, err := New()
if err != nil {
t.Fatalf("New() error:\n%+v", err)
}
return c
}
// ProtobufDecode decodes the provided protobuf message. // ProtobufDecode decodes the provided protobuf message.
func (schema *Schema) ProtobufDecode(t *testing.T, input []byte) *FlowMessage { func (schema *Schema) ProtobufDecode(t *testing.T, input []byte) *FlowMessage {
parser := protoparse.Parser{ parser := protoparse.Parser{
@@ -101,3 +115,7 @@ func (schema *Schema) ProtobufDecode(t *testing.T, input []byte) *FlowMessage {
return &flow return &flow
} }
func init() {
helpers.AddPrettyFormatter(reflect.TypeOf(ColumnBytes), fmt.Sprint)
}

View File

@@ -1,9 +1,6 @@
// SPDX-FileCopyrightText: 2022 Free Mobile // SPDX-FileCopyrightText: 2022 Free Mobile
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-License-Identifier: AGPL-3.0-only
// Package schema is an abstraction of the data schema for flows used by
// Akvorado. It is a leaky abstraction as there are multiple parts dependant of
// the subsystem that will use it.
package schema package schema
import ( import (

View File

@@ -12,6 +12,8 @@ import (
"strings" "strings"
"text/template" "text/template"
"time" "time"
"akvorado/console/query"
) )
// flowsTable describe a consolidated or unconsolidated flows table. // flowsTable describe a consolidated or unconsolidated flows table.
@@ -122,11 +124,11 @@ func templateEscape(input string) string {
} }
// templateWhere transforms a filter to a WHERE clause // templateWhere transforms a filter to a WHERE clause
func templateWhere(qf queryFilter) string { func templateWhere(qf query.Filter) string {
if qf.Filter == "" { if qf.Direct() == "" {
return `{{ .Timefilter }}` return `{{ .Timefilter }}`
} }
return fmt.Sprintf(`{{ .Timefilter }} AND (%s)`, templateEscape(qf.Filter)) return fmt.Sprintf(`{{ .Timefilter }} AND (%s)`, templateEscape(qf.Direct()))
} }
// templateTable builds a template directive to select the right table // templateTable builds a template directive to select the right table

View File

@@ -7,7 +7,7 @@ import (
"net/http" "net/http"
"time" "time"
"akvorado/common/schema" "akvorado/console/query"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
) )
@@ -39,7 +39,7 @@ type VisualizeOptionsConfiguration struct {
// Filter is the the filter string // Filter is the the filter string
Filter string `json:"filter"` Filter string `json:"filter"`
// Dimensions is the array of dimensions to use // Dimensions is the array of dimensions to use
Dimensions []queryColumn `json:"dimensions"` Dimensions []query.Column `json:"dimensions"`
// Limit is the default limit to use // Limit is the default limit to use
Limit int `json:"limit" validate:"min=5"` Limit int `json:"limit" validate:"min=5"`
} }
@@ -52,7 +52,7 @@ func DefaultConfiguration() Configuration {
Start: "6 hours ago", Start: "6 hours ago",
End: "now", End: "now",
Filter: "InIfBoundary = external", Filter: "InIfBoundary = external",
Dimensions: []queryColumn{queryColumn(schema.ColumnSrcAS)}, Dimensions: []query.Column{query.NewColumn("SrcAS")},
Limit: 10, Limit: 10,
}, },
HomepageTopWidgets: []string{"src-as", "src-port", "protocol", "src-country", "etype"}, HomepageTopWidgets: []string{"src-as", "src-port", "protocol", "src-country", "etype"},
@@ -63,7 +63,7 @@ func DefaultConfiguration() Configuration {
func (c *Component) configHandlerFunc(gc *gin.Context) { func (c *Component) configHandlerFunc(gc *gin.Context) {
dimensions := []string{} dimensions := []string{}
for _, column := range schema.Flows.Columns() { for _, column := range c.d.Schema.Columns() {
if column.ConsoleNotDimension { if column.ConsoleNotDimension {
continue continue
} }

View File

@@ -42,7 +42,7 @@ func (c *Component) filterValidateHandlerFunc(gc *gin.Context) {
}) })
return return
} }
got, err := filter.Parse("", []byte(input.Filter), filter.GlobalStore("meta", &filter.Meta{})) got, err := filter.Parse("", []byte(input.Filter), filter.GlobalStore("meta", &filter.Meta{Schema: c.d.Schema}))
if err == nil { if err == nil {
gc.JSON(http.StatusOK, filterValidateHandlerOutput{ gc.JSON(http.StatusOK, filterValidateHandlerOutput{
Message: "ok", Message: "ok",
@@ -85,7 +85,7 @@ func (c *Component) filterCompleteHandlerFunc(gc *gin.Context) {
switch input.What { switch input.What {
case "column": case "column":
_, err := filter.Parse("", []byte{}, _, err := filter.Parse("", []byte{},
filter.Entrypoint("ConditionExpr"), filter.GlobalStore("meta", &filter.Meta{})) filter.Entrypoint("ConditionExpr"), filter.GlobalStore("meta", &filter.Meta{Schema: c.d.Schema}))
if err != nil { if err != nil {
for _, candidate := range filter.Expected(err) { for _, candidate := range filter.Expected(err) {
if !strings.HasSuffix(candidate, `"i`) { if !strings.HasSuffix(candidate, `"i`) {
@@ -102,7 +102,7 @@ func (c *Component) filterCompleteHandlerFunc(gc *gin.Context) {
_, err := filter.Parse("", _, err := filter.Parse("",
[]byte(fmt.Sprintf("%s ", input.Column)), []byte(fmt.Sprintf("%s ", input.Column)),
filter.Entrypoint("ConditionExpr"), filter.Entrypoint("ConditionExpr"),
filter.GlobalStore("meta", &filter.Meta{})) filter.GlobalStore("meta", &filter.Meta{Schema: c.d.Schema}))
if err != nil { if err != nil {
for _, candidate := range filter.Expected(err) { for _, candidate := range filter.Expected(err) {
if !strings.HasPrefix(candidate, `"`) { if !strings.HasPrefix(candidate, `"`) {
@@ -213,7 +213,7 @@ LIMIT 20`
Label string `ch:"label"` Label string `ch:"label"`
Detail string `ch:"detail"` Detail string `ch:"detail"`
}{} }{}
columnName := fixQueryColumnName(input.Column) columnName := c.fixQueryColumnName(input.Column)
if columnName == "DstASPath" { if columnName == "DstASPath" {
columnName = "DstAS" columnName = "DstAS"
} }
@@ -270,7 +270,7 @@ LIMIT 20`, attributeName, attributeName, attributeName), input.Prefix); err != n
} }
input.Prefix = "" input.Prefix = ""
case "exportername", "exportergroup", "exporterrole", "exportersite", "exporterregion", "exportertenant": case "exportername", "exportergroup", "exporterrole", "exportersite", "exporterregion", "exportertenant":
column = fixQueryColumnName(inputColumn) column = c.fixQueryColumnName(inputColumn)
detail = fmt.Sprintf("exporter %s", inputColumn[8:]) detail = fmt.Sprintf("exporter %s", inputColumn[8:])
case "inifname", "outifname": case "inifname", "outifname":
column = "IfName" column = "IfName"

View File

@@ -7,13 +7,14 @@ import (
"testing" "testing"
"akvorado/common/helpers" "akvorado/common/helpers"
"akvorado/common/schema"
) )
func TestFilterHumanError(t *testing.T) { func TestFilterHumanError(t *testing.T) {
_, err := Parse("", []byte(` _, err := Parse("", []byte(`
InIfDescription = "Gi0/0/0/0" InIfDescription = "Gi0/0/0/0"
AND Proto = 1000 AND Proto = 1000
OR `), GlobalStore("meta", &Meta{})) OR `), GlobalStore("meta", &Meta{Schema: schema.NewMock(t)}))
expected := "at line 3, position 13: expecting an unsigned 8-bit integer" expected := "at line 3, position 13: expecting an unsigned 8-bit integer"
if diff := helpers.Diff(HumanError(err), expected); diff != "" { if diff := helpers.Diff(HumanError(err), expected); diff != "" {
t.Errorf("HumanError() (-got, +want):\n%s", diff) t.Errorf("HumanError() (-got, +want):\n%s", diff)
@@ -24,7 +25,7 @@ func TestAllErrors(t *testing.T) {
_, err := Parse("", []byte(` _, err := Parse("", []byte(`
InIfDescription = "Gi0/0/0/0" InIfDescription = "Gi0/0/0/0"
AND Proto = 1000 AND Proto = 1000
OR`), GlobalStore("meta", &Meta{})) OR`), GlobalStore("meta", &Meta{Schema: schema.NewMock(t)}))
// Currently, the parser stops at the first error. // Currently, the parser stops at the first error.
expected := Errors{ expected := Errors{
oneError{ oneError{
@@ -40,7 +41,8 @@ OR`), GlobalStore("meta", &Meta{}))
} }
func TestExpected(t *testing.T) { func TestExpected(t *testing.T) {
_, err := Parse("", []byte{}, Entrypoint("ConditionBoundaryExpr"), GlobalStore("meta", &Meta{})) _, err := Parse("", []byte{}, Entrypoint("ConditionBoundaryExpr"),
GlobalStore("meta", &Meta{Schema: schema.NewMock(t)}))
expected := []string{`"InIfBoundary"i`, `"OutIfBoundary"i`} expected := []string{`"InIfBoundary"i`, `"OutIfBoundary"i`}
if diff := helpers.Diff(Expected(err), expected); diff != "" { if diff := helpers.Diff(Expected(err), expected); diff != "" {
t.Errorf("AllErrors() (-got, +want):\n%s", diff) t.Errorf("AllErrors() (-got, +want):\n%s", diff)

View File

@@ -14,14 +14,16 @@ import (
// Meta is used to inject/retrieve state from the parser. // Meta is used to inject/retrieve state from the parser.
type Meta struct { type Meta struct {
// Schema is the data schema (used as input)
Schema *schema.Component
// ReverseDirection tells if we require the reverse direction for the provided filter (used as input) // ReverseDirection tells if we require the reverse direction for the provided filter (used as input)
ReverseDirection bool ReverseDirection bool
// MainTableRequired tells if the main table is required to execute the expression (used as output) // MainTableRequired tells if the main table is required to execute the expression (used as output)
MainTableRequired bool MainTableRequired bool
} }
// ReverseColumnDirection reverts the direction of a provided column name. // reverseColumnDirection reverts the direction of a provided column name.
func ReverseColumnDirection(name string) string { func reverseColumnDirection(schema *schema.Component, name string) string {
var candidate string var candidate string
if strings.HasPrefix(name, "Src") { if strings.HasPrefix(name, "Src") {
candidate = "Dst" + name[3:] candidate = "Dst" + name[3:]
@@ -35,7 +37,7 @@ func ReverseColumnDirection(name string) string {
if strings.HasPrefix(name, "Out") { if strings.HasPrefix(name, "Out") {
candidate = "In" + name[3:] candidate = "In" + name[3:]
} }
if column, ok := schema.Flows.LookupColumnByName(candidate); ok { if column, ok := schema.LookupColumnByName(candidate); ok {
return column.Name return column.Name
} }
return name return name
@@ -45,10 +47,11 @@ func ReverseColumnDirection(name string) string {
// in predicate code blocks. // in predicate code blocks.
func (c *current) acceptColumn() (string, error) { func (c *current) acceptColumn() (string, error) {
name := string(c.text) name := string(c.text)
for _, column := range schema.Flows.Columns() { schema := c.globalStore["meta"].(*Meta).Schema
for _, column := range schema.Columns() {
if strings.EqualFold(name, column.Name) { if strings.EqualFold(name, column.Name) {
if c.globalStore["meta"].(*Meta).ReverseDirection { if c.globalStore["meta"].(*Meta).ReverseDirection {
return ReverseColumnDirection(column.Name), nil return reverseColumnDirection(schema, column.Name), nil
} }
return column.Name, nil return column.Name, nil
} }
@@ -60,7 +63,7 @@ func (c *current) acceptColumn() (string, error) {
// in state change blocks. Unfortunately, it cannot extract matched text, so it // in state change blocks. Unfortunately, it cannot extract matched text, so it
// should be provided. // should be provided.
func (c *current) metaColumn(name string) error { func (c *current) metaColumn(name string) error {
if column, ok := schema.Flows.LookupColumnByName(name); ok { if column, ok := c.globalStore["meta"].(*Meta).Schema.LookupColumnByName(name); ok {
if column.MainOnly { if column.MainOnly {
c.state["main-table-only"] = true c.state["main-table-only"] = true
} }

View File

@@ -7,6 +7,7 @@ import (
"testing" "testing"
"akvorado/common/helpers" "akvorado/common/helpers"
"akvorado/common/schema"
) )
func TestValidFilter(t *testing.T) { func TestValidFilter(t *testing.T) {
@@ -216,6 +217,8 @@ output provider */ = 'telia'`,
{Input: `DstCommunities != 65000:100:200`, Output: `NOT has(DstLargeCommunities, bitShiftLeft(65000::UInt128, 64) + bitShiftLeft(100::UInt128, 32) + 200::UInt128)`, MetaOut: Meta{MainTableRequired: true}}, {Input: `DstCommunities != 65000:100:200`, Output: `NOT has(DstLargeCommunities, bitShiftLeft(65000::UInt128, 64) + bitShiftLeft(100::UInt128, 32) + 200::UInt128)`, MetaOut: Meta{MainTableRequired: true}},
} }
for _, tc := range cases { for _, tc := range cases {
tc.MetaIn.Schema = schema.NewMock(t)
tc.MetaOut.Schema = tc.MetaIn.Schema
got, err := Parse("", []byte(tc.Input), GlobalStore("meta", &tc.MetaIn)) got, err := Parse("", []byte(tc.Input), GlobalStore("meta", &tc.MetaIn))
if err != nil { if err != nil {
t.Errorf("Parse(%q) error:\n%+v", tc.Input, err) t.Errorf("Parse(%q) error:\n%+v", tc.Input, err)
@@ -260,7 +263,7 @@ func TestInvalidFilter(t *testing.T) {
{`SrcAS IN (AS12322,`}, {`SrcAS IN (AS12322,`},
} }
for _, tc := range cases { for _, tc := range cases {
out, err := Parse("", []byte(tc.Input), GlobalStore("meta", &Meta{})) out, err := Parse("", []byte(tc.Input), GlobalStore("meta", &Meta{Schema: schema.NewMock(t)}))
t.Logf("out: %v", out) t.Logf("out: %v", out)
if err == nil { if err == nil {
t.Errorf("Parse(%q) didn't throw an error", tc.Input) t.Errorf("Parse(%q) didn't throw an error", tc.Input)

View File

@@ -11,22 +11,25 @@ import (
"time" "time"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"golang.org/x/exp/slices"
"akvorado/common/helpers" "akvorado/common/helpers"
"akvorado/common/schema" "akvorado/common/schema"
"akvorado/console/query"
) )
// graphHandlerInput describes the input for the /graph endpoint. // graphHandlerInput describes the input for the /graph endpoint.
type graphHandlerInput struct { type graphHandlerInput struct {
Start time.Time `json:"start" binding:"required"` schema *schema.Component
End time.Time `json:"end" binding:"required,gtfield=Start"` Start time.Time `json:"start" binding:"required"`
Points uint `json:"points" binding:"required,min=5,max=2000"` // minimum number of points End time.Time `json:"end" binding:"required,gtfield=Start"`
Dimensions []queryColumn `json:"dimensions"` // group by ... Points uint `json:"points" binding:"required,min=5,max=2000"` // minimum number of points
Limit int `json:"limit" binding:"min=1"` // limit product of dimensions Dimensions []query.Column `json:"dimensions"` // group by ...
Filter queryFilter `json:"filter"` // where ... Limit int `json:"limit" binding:"min=1"` // limit product of dimensions
Units string `json:"units" binding:"required,oneof=pps l2bps l3bps"` Filter query.Filter `json:"filter"` // where ...
Bidirectional bool `json:"bidirectional"` Units string `json:"units" binding:"required,oneof=pps l2bps l3bps"`
PreviousPeriod bool `json:"previous-period"` Bidirectional bool `json:"bidirectional"`
PreviousPeriod bool `json:"previous-period"`
} }
// graphHandlerOutput describes the output for the /graph endpoint. A // graphHandlerOutput describes the output for the /graph endpoint. A
@@ -45,14 +48,12 @@ type graphHandlerOutput struct {
NinetyFivePercentile []int `json:"95th"` // row → 95th xps NinetyFivePercentile []int `json:"95th"` // row → 95th xps
} }
// reverseDirection reverts the direction of a provided input // reverseDirection reverts the direction of a provided input. It does not
// modify the original.
func (input graphHandlerInput) reverseDirection() graphHandlerInput { func (input graphHandlerInput) reverseDirection() graphHandlerInput {
input.Filter.Filter, input.Filter.ReverseFilter = input.Filter.ReverseFilter, input.Filter.Filter input.Filter.Swap()
dimensions := input.Dimensions input.Dimensions = slices.Clone(input.Dimensions)
input.Dimensions = make([]queryColumn, len(dimensions)) query.Columns(input.Dimensions).Reverse(input.schema)
for i := range dimensions {
input.Dimensions[i] = queryColumn(schema.Flows.ReverseColumnDirection(schema.ColumnKey(dimensions[i])))
}
return input return input
} }
@@ -82,7 +83,7 @@ func nearestPeriod(period time.Duration) (time.Duration, string) {
// for less than 2-months, this is the month, otherwise, this is the // for less than 2-months, this is the month, otherwise, this is the
// year. Also, dimensions are stripped. // year. Also, dimensions are stripped.
func (input graphHandlerInput) previousPeriod() graphHandlerInput { func (input graphHandlerInput) previousPeriod() graphHandlerInput {
input.Dimensions = []queryColumn{} input.Dimensions = []query.Column{}
diff := input.End.Sub(input.Start) diff := input.End.Sub(input.Start)
period, _ := nearestPeriod(diff) period, _ := nearestPeriod(diff)
if period == 0 { if period == 0 {
@@ -123,7 +124,7 @@ func (input graphHandlerInput) toSQL1(axis int, options toSQL1Options) string {
dimensionsInterpolate := "" dimensionsInterpolate := ""
others := []string{} others := []string{}
for _, column := range input.Dimensions { for _, column := range input.Dimensions {
field := column.toSQLSelect() field := column.ToSQLSelect()
selectFields = append(selectFields, field) selectFields = append(selectFields, field)
dimensions = append(dimensions, column.String()) dimensions = append(dimensions, column.String())
others = append(others, "'Other'") others = append(others, "'Other'")
@@ -172,7 +173,7 @@ ORDER BY time WITH FILL
Start: input.Start, Start: input.Start,
End: input.End, End: input.End,
StartForInterval: startForInterval, StartForInterval: startForInterval,
MainTableRequired: requireMainTable(input.Dimensions, input.Filter), MainTableRequired: requireMainTable(input.schema, input.Dimensions, input.Filter),
Points: input.Points, Points: input.Points,
Units: input.Units, Units: input.Units,
}), }),
@@ -207,11 +208,19 @@ func (input graphHandlerInput) toSQL() string {
func (c *Component) graphHandlerFunc(gc *gin.Context) { func (c *Component) graphHandlerFunc(gc *gin.Context) {
ctx := c.t.Context(gc.Request.Context()) ctx := c.t.Context(gc.Request.Context())
var input graphHandlerInput input := graphHandlerInput{schema: c.d.Schema}
if err := gc.ShouldBindJSON(&input); err != nil { if err := gc.ShouldBindJSON(&input); err != nil {
gc.JSON(http.StatusBadRequest, gin.H{"message": helpers.Capitalize(err.Error())}) gc.JSON(http.StatusBadRequest, gin.H{"message": helpers.Capitalize(err.Error())})
return return
} }
if err := query.Columns(input.Dimensions).Validate(input.schema); err != nil {
gc.JSON(http.StatusBadRequest, gin.H{"message": helpers.Capitalize(err.Error())})
return
}
if err := input.Filter.Validate(input.schema); err != nil {
gc.JSON(http.StatusBadRequest, gin.H{"message": helpers.Capitalize(err.Error())})
return
}
if input.Limit > c.config.DimensionsLimit { if input.Limit > c.config.DimensionsLimit {
gc.JSON(http.StatusBadRequest, gc.JSON(http.StatusBadRequest,
gin.H{"message": fmt.Sprintf("Limit is set beyond maximum value (%d)", gin.H{"message": fmt.Sprintf("Limit is set beyond maximum value (%d)",

View File

@@ -14,38 +14,36 @@ import (
"akvorado/common/helpers" "akvorado/common/helpers"
"akvorado/common/schema" "akvorado/common/schema"
"akvorado/console/query"
) )
func TestGraphInputReverseDirection(t *testing.T) { func TestGraphInputReverseDirection(t *testing.T) {
input := graphHandlerInput{ input := graphHandlerInput{
schema: schema.NewMock(t),
Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC), Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC),
End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC), End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC),
Points: 100, Points: 100,
Dimensions: []queryColumn{ Dimensions: query.Columns{
queryColumn(schema.ColumnExporterName), query.NewColumn("ExporterName"),
queryColumn(schema.ColumnInIfProvider), query.NewColumn("InIfProvider"),
}, },
Filter: queryFilter{ Filter: query.NewFilter("DstCountry = 'FR' AND SrcCountry = 'US'"),
Filter: "DstCountry = 'FR' AND SrcCountry = 'US'", Units: "l3bps",
ReverseFilter: "SrcCountry = 'FR' AND DstCountry = 'US'",
},
Units: "l3bps",
} }
original1 := fmt.Sprintf("%+v", input) original1 := fmt.Sprintf("%+v", input)
expected := graphHandlerInput{ expected := graphHandlerInput{
Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC), Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC),
End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC), End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC),
Points: 100, Points: 100,
Dimensions: []queryColumn{ Dimensions: query.Columns{
queryColumn(schema.ColumnExporterName), query.NewColumn("ExporterName"),
queryColumn(schema.ColumnOutIfProvider), query.NewColumn("OutIfProvider"),
}, },
Filter: queryFilter{ Filter: query.NewFilter("SrcCountry = 'FR' AND DstCountry = 'US'"),
Filter: "SrcCountry = 'FR' AND DstCountry = 'US'", Units: "l3bps",
ReverseFilter: "DstCountry = 'FR' AND SrcCountry = 'US'",
},
Units: "l3bps",
} }
query.Columns(input.Dimensions).Validate(input.schema)
query.Columns(expected.Dimensions).Validate(input.schema)
got := input.reverseDirection() got := input.reverseDirection()
original2 := fmt.Sprintf("%+v", input) original2 := fmt.Sprintf("%+v", input)
if diff := helpers.Diff(got, expected); diff != "" { if diff := helpers.Diff(got, expected); diff != "" {
@@ -118,18 +116,20 @@ func TestGraphPreviousPeriod(t *testing.T) {
t.Fatalf("time.Parse(%q) error:\n%+v", tc.ExpectedEnd, err) t.Fatalf("time.Parse(%q) error:\n%+v", tc.ExpectedEnd, err)
} }
input := graphHandlerInput{ input := graphHandlerInput{
Start: start, schema: schema.NewMock(t),
End: end, Start: start,
Dimensions: []queryColumn{ End: end,
queryColumn(schema.ColumnExporterAddress), Dimensions: query.Columns{
queryColumn(schema.ColumnExporterName), query.NewColumn("ExporterAddress"),
query.NewColumn("ExporterName"),
}, },
} }
query.Columns(input.Dimensions).Validate(input.schema)
got := input.previousPeriod() got := input.previousPeriod()
expected := graphHandlerInput{ expected := graphHandlerInput{
Start: expectedStart, Start: expectedStart,
End: expectedEnd, End: expectedEnd,
Dimensions: []queryColumn{}, Dimensions: []query.Column{},
} }
if diff := helpers.Diff(got, expected); diff != "" { if diff := helpers.Diff(got, expected); diff != "" {
t.Fatalf("previousPeriod() (-got, +want):\n%s", diff) t.Fatalf("previousPeriod() (-got, +want):\n%s", diff)
@@ -150,8 +150,8 @@ func TestGraphQuerySQL(t *testing.T) {
Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC), Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC),
End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC), End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC),
Points: 100, Points: 100,
Dimensions: []queryColumn{}, Dimensions: []query.Column{},
Filter: queryFilter{}, Filter: query.Filter{},
Units: "l3bps", Units: "l3bps",
}, },
Expected: ` Expected: `
@@ -176,8 +176,8 @@ ORDER BY time WITH FILL
Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC), Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC),
End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC), End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC),
Points: 100, Points: 100,
Dimensions: []queryColumn{}, Dimensions: []query.Column{},
Filter: queryFilter{}, Filter: query.Filter{},
Units: "l2bps", Units: "l2bps",
}, },
Expected: ` Expected: `
@@ -203,8 +203,8 @@ ORDER BY time WITH FILL
Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC), Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC),
End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC), End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC),
Points: 100, Points: 100,
Dimensions: []queryColumn{}, Dimensions: []query.Column{},
Filter: queryFilter{}, Filter: query.Filter{},
Units: "pps", Units: "pps",
}, },
Expected: ` Expected: `
@@ -229,8 +229,8 @@ ORDER BY time WITH FILL
Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC), Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC),
End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC), End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC),
Points: 100, Points: 100,
Dimensions: []queryColumn{}, Dimensions: []query.Column{},
Filter: queryFilter{Filter: "DstCountry = 'FR' AND SrcCountry = 'US'"}, Filter: query.NewFilter("DstCountry = 'FR' AND SrcCountry = 'US'"),
Units: "l3bps", Units: "l3bps",
}, },
Expected: ` Expected: `
@@ -255,8 +255,8 @@ ORDER BY time WITH FILL
Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC), Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC),
End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC), End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC),
Points: 100, Points: 100,
Dimensions: []queryColumn{}, Dimensions: []query.Column{},
Filter: queryFilter{Filter: "InIfDescription = '{{ hello }}' AND SrcCountry = 'US'"}, Filter: query.NewFilter("InIfDescription = '{{ hello }}' AND SrcCountry = 'US'"),
Units: "l3bps", Units: "l3bps",
}, },
Expected: ` Expected: `
@@ -278,14 +278,11 @@ ORDER BY time WITH FILL
}, { }, {
Description: "no dimensions, reverse direction", Description: "no dimensions, reverse direction",
Input: graphHandlerInput{ Input: graphHandlerInput{
Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC), Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC),
End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC), End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC),
Points: 100, Points: 100,
Dimensions: []queryColumn{}, Dimensions: []query.Column{},
Filter: queryFilter{ Filter: query.NewFilter("DstCountry = 'FR' AND SrcCountry = 'US'"),
Filter: "DstCountry = 'FR' AND SrcCountry = 'US'",
ReverseFilter: "SrcCountry = 'FR' AND DstCountry = 'US'",
},
Units: "l3bps", Units: "l3bps",
Bidirectional: true, Bidirectional: true,
}, },
@@ -328,11 +325,11 @@ ORDER BY time WITH FILL
End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC), End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC),
Points: 100, Points: 100,
Limit: 20, Limit: 20,
Dimensions: []queryColumn{ Dimensions: []query.Column{
queryColumn(schema.ColumnExporterName), query.NewColumn("ExporterName"),
queryColumn(schema.ColumnInIfProvider), query.NewColumn("InIfProvider"),
}, },
Filter: queryFilter{}, Filter: query.Filter{},
Units: "l3bps", Units: "l3bps",
}, },
Expected: ` Expected: `
@@ -360,11 +357,11 @@ ORDER BY time WITH FILL
End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC), End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC),
Points: 100, Points: 100,
Limit: 20, Limit: 20,
Dimensions: []queryColumn{ Dimensions: []query.Column{
queryColumn(schema.ColumnExporterName), query.NewColumn("ExporterName"),
queryColumn(schema.ColumnInIfProvider), query.NewColumn("InIfProvider"),
}, },
Filter: queryFilter{}, Filter: query.Filter{},
Units: "l3bps", Units: "l3bps",
Bidirectional: true, Bidirectional: true,
}, },
@@ -409,11 +406,11 @@ ORDER BY time WITH FILL
End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC), End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC),
Points: 100, Points: 100,
Limit: 20, Limit: 20,
Dimensions: []queryColumn{ Dimensions: []query.Column{
queryColumn(schema.ColumnExporterName), query.NewColumn("ExporterName"),
queryColumn(schema.ColumnInIfProvider), query.NewColumn("InIfProvider"),
}, },
Filter: queryFilter{}, Filter: query.Filter{},
Units: "l3bps", Units: "l3bps",
PreviousPeriod: true, PreviousPeriod: true,
}, },
@@ -454,6 +451,13 @@ ORDER BY time WITH FILL
}, },
} }
for _, tc := range cases { for _, tc := range cases {
tc.Input.schema = schema.NewMock(t)
if err := query.Columns(tc.Input.Dimensions).Validate(tc.Input.schema); err != nil {
t.Fatalf("Validate() error:\n%+v", err)
}
if err := tc.Input.Filter.Validate(tc.Input.schema); err != nil {
t.Fatalf("Validate() error:\n%+v", err)
}
tc.Expected = strings.ReplaceAll(tc.Expected, "@@", "`") tc.Expected = strings.ReplaceAll(tc.Expected, "@@", "`")
t.Run(tc.Description, func(t *testing.T) { t.Run(tc.Description, func(t *testing.T) {
got := tc.Input.toSQL() got := tc.Input.toSQL()

View File

@@ -4,109 +4,28 @@
package console package console
import ( import (
"errors"
"fmt"
"strings" "strings"
"akvorado/common/helpers"
"akvorado/common/schema" "akvorado/common/schema"
"akvorado/console/filter" "akvorado/console/query"
) )
type queryColumn schema.ColumnKey func requireMainTable(sch *schema.Component, qcs []query.Column, qf query.Filter) bool {
if qf.MainTableRequired() {
func (qc queryColumn) MarshalText() ([]byte, error) {
return []byte(schema.ColumnKey(qc).String()), nil
}
func (qc queryColumn) String() string {
return schema.ColumnKey(qc).String()
}
func (qc *queryColumn) UnmarshalText(input []byte) error {
name := string(input)
if column, ok := schema.Flows.LookupColumnByName(name); ok && !column.ConsoleNotDimension {
*qc = queryColumn(column.Key)
return nil
}
return errors.New("unknown field")
}
func requireMainTable(qcs []queryColumn, qf queryFilter) bool {
if qf.MainTableRequired {
return true return true
} }
for _, qc := range qcs { for _, qc := range qcs {
if column, ok := schema.Flows.LookupColumnByKey(schema.ColumnKey(qc)); ok && column.MainOnly { if column, ok := sch.LookupColumnByKey(qc.Key()); ok && column.MainOnly {
return true return true
} }
} }
return false return false
} }
type queryFilter struct {
Filter string
ReverseFilter string
MainTableRequired bool
}
func (qf queryFilter) String() string {
return qf.Filter
}
func (qf queryFilter) MarshalText() ([]byte, error) {
return []byte(qf.Filter), nil
}
func (qf *queryFilter) UnmarshalText(input []byte) error {
if strings.TrimSpace(string(input)) == "" {
*qf = queryFilter{}
return nil
}
meta := &filter.Meta{}
direct, err := filter.Parse("", input, filter.GlobalStore("meta", meta))
if err != nil {
return fmt.Errorf("cannot parse filter: %s", filter.HumanError(err))
}
meta = &filter.Meta{ReverseDirection: true}
reverse, err := filter.Parse("", input, filter.GlobalStore("meta", meta))
if err != nil {
return fmt.Errorf("cannot parse reverse filter: %s", filter.HumanError(err))
}
*qf = queryFilter{
Filter: direct.(string),
ReverseFilter: reverse.(string),
MainTableRequired: meta.MainTableRequired,
}
return nil
}
// toSQLSelect transforms a column into an expression to use in SELECT
func (qc queryColumn) toSQLSelect() string {
var strValue string
switch schema.ColumnKey(qc) {
case schema.ColumnExporterAddress, schema.ColumnSrcAddr, schema.ColumnDstAddr:
strValue = fmt.Sprintf("replaceRegexpOne(IPv6NumToString(%s), '^::ffff:', '')", qc)
case schema.ColumnSrcAS, schema.ColumnDstAS, schema.ColumnDst1stAS, schema.ColumnDst2ndAS, schema.ColumnDst3rdAS:
strValue = fmt.Sprintf(`concat(toString(%s), ': ', dictGetOrDefault('asns', 'name', %s, '???'))`,
qc, qc)
case schema.ColumnEType:
strValue = fmt.Sprintf(`if(EType = %d, 'IPv4', if(EType = %d, 'IPv6', '???'))`,
helpers.ETypeIPv4, helpers.ETypeIPv6)
case schema.ColumnProto:
strValue = `dictGetOrDefault('protocols', 'name', Proto, '???')`
case schema.ColumnInIfSpeed, schema.ColumnOutIfSpeed, schema.ColumnSrcPort, schema.ColumnDstPort, schema.ColumnForwardingStatus, schema.ColumnInIfBoundary, schema.ColumnOutIfBoundary:
strValue = fmt.Sprintf("toString(%s)", qc)
case schema.ColumnDstASPath:
strValue = `arrayStringConcat(DstASPath, ' ')`
case schema.ColumnDstCommunities:
strValue = `arrayStringConcat(arrayConcat(arrayMap(c -> concat(toString(bitShiftRight(c, 16)), ':', toString(bitAnd(c, 0xffff))), DstCommunities), arrayMap(c -> concat(toString(bitAnd(bitShiftRight(c, 64), 0xffffffff)), ':', toString(bitAnd(bitShiftRight(c, 32), 0xffffffff)), ':', toString(bitAnd(c, 0xffffffff))), DstLargeCommunities)), ' ')`
default:
strValue = qc.String()
}
return strValue
}
// fixQueryColumnName fix capitalization of the provided column name // fixQueryColumnName fix capitalization of the provided column name
func fixQueryColumnName(name string) string { func (c *Component) fixQueryColumnName(name string) string {
name = strings.ToLower(name) name = strings.ToLower(name)
for _, column := range schema.Flows.Columns() { for _, column := range c.d.Schema.Columns() {
if strings.ToLower(column.Name) == name { if strings.ToLower(column.Name) == name {
return column.Name return column.Name
} }

121
console/query/column.go Normal file
View File

@@ -0,0 +1,121 @@
// SPDX-FileCopyrightText: 2023 Free Mobile
// SPDX-License-Identifier: AGPL-3.0-only
// Package query provides query columns and query filters. These
// types are special as they need a schema to be validated.
package query
import (
"fmt"
"akvorado/common/helpers"
"akvorado/common/schema"
)
// Column represents a query column. It should be instantiated with NewColumn() or
// Unmarshal(), then call Validate().
type Column struct {
validated bool
name string
key schema.ColumnKey
}
// Columns is a set of query columns.
type Columns []Column
// NewColumn creates a new column. Validate() should be called before using it.
func NewColumn(name string) Column {
return Column{name: name}
}
func (qc Column) check() {
if !qc.validated {
panic("query column not validated")
}
}
func (qc Column) String() string {
return qc.name
}
// MarshalText turns a column into a string.
func (qc Column) MarshalText() ([]byte, error) {
return []byte(qc.name), nil
}
// UnmarshalText parses a column. Validate() should be called before use.
func (qc *Column) UnmarshalText(input []byte) error {
name := string(input)
*qc = Column{name: name}
return nil
}
// Key returns the key for the column.
func (qc *Column) Key() schema.ColumnKey {
qc.check()
return qc.key
}
// Validate should be called before using the column. We need a schema component
// for that.
func (qc *Column) Validate(schema *schema.Component) error {
if column, ok := schema.LookupColumnByName(qc.name); ok && !column.ConsoleNotDimension {
qc.key = column.Key
qc.validated = true
return nil
}
return fmt.Errorf("unknown column name %s", qc.name)
}
// Reverse reverses the column direction
func (qc *Column) Reverse(schema *schema.Component) {
name := schema.ReverseColumnDirection(qc.Key()).String()
reverted := Column{name: name}
if reverted.Validate(schema) == nil {
*qc = reverted
}
// No modification otherwise
}
// Reverse reverses the direction of all columns
func (qcs Columns) Reverse(schema *schema.Component) {
for i := range qcs {
qcs[i].Reverse(schema)
}
}
// Validate call Validate on each column.
func (qcs Columns) Validate(schema *schema.Component) error {
for i := range qcs {
if err := qcs[i].Validate(schema); err != nil {
return err
}
}
return nil
}
// ToSQLSelect transforms a column into an expression to use in SELECT
func (qc Column) ToSQLSelect() string {
var strValue string
switch qc.Key() {
case schema.ColumnExporterAddress, schema.ColumnSrcAddr, schema.ColumnDstAddr:
strValue = fmt.Sprintf("replaceRegexpOne(IPv6NumToString(%s), '^::ffff:', '')", qc)
case schema.ColumnSrcAS, schema.ColumnDstAS, schema.ColumnDst1stAS, schema.ColumnDst2ndAS, schema.ColumnDst3rdAS:
strValue = fmt.Sprintf(`concat(toString(%s), ': ', dictGetOrDefault('asns', 'name', %s, '???'))`,
qc, qc)
case schema.ColumnEType:
strValue = fmt.Sprintf(`if(EType = %d, 'IPv4', if(EType = %d, 'IPv6', '???'))`,
helpers.ETypeIPv4, helpers.ETypeIPv6)
case schema.ColumnProto:
strValue = `dictGetOrDefault('protocols', 'name', Proto, '???')`
case schema.ColumnInIfSpeed, schema.ColumnOutIfSpeed, schema.ColumnSrcPort, schema.ColumnDstPort, schema.ColumnForwardingStatus, schema.ColumnInIfBoundary, schema.ColumnOutIfBoundary:
strValue = fmt.Sprintf("toString(%s)", qc)
case schema.ColumnDstASPath:
strValue = `arrayStringConcat(DstASPath, ' ')`
case schema.ColumnDstCommunities:
strValue = `arrayStringConcat(arrayConcat(arrayMap(c -> concat(toString(bitShiftRight(c, 16)), ':', toString(bitAnd(c, 0xffff))), DstCommunities), arrayMap(c -> concat(toString(bitAnd(bitShiftRight(c, 64), 0xffffffff)), ':', toString(bitAnd(bitShiftRight(c, 32), 0xffffffff)), ':', toString(bitAnd(c, 0xffffffff))), DstLargeCommunities)), ' ')`
default:
strValue = qc.String()
}
return strValue
}

View File

@@ -0,0 +1,119 @@
// SPDX-FileCopyrightText: 2023 Free Mobile
// SPDX-License-Identifier: AGPL-3.0-only
package query_test
import (
"fmt"
"reflect"
"testing"
"akvorado/common/helpers"
"akvorado/common/schema"
"akvorado/console/query"
)
func TestUnmarshalQueryColumn(t *testing.T) {
cases := []struct {
Input string
Expected schema.ColumnKey
Error bool
}{
{"DstAddr", schema.ColumnDstAddr, false},
{"TimeReceived", 0, true},
{"Nothing", 0, true},
}
for _, tc := range cases {
var qc query.Column
if err := qc.UnmarshalText([]byte(tc.Input)); err != nil {
t.Fatalf("UnmarshalText() error:\n%+v", err)
}
err := qc.Validate(schema.NewMock(t))
if err != nil && !tc.Error {
t.Fatalf("Validate(%q) error:\n%+v", tc.Input, err)
}
if err == nil && tc.Error {
t.Fatalf("Validate(%q) did not error", tc.Input)
}
if err != nil {
continue
}
if diff := helpers.Diff(qc.Key(), tc.Expected, helpers.DiffFormatter(reflect.TypeOf(schema.ColumnBytes), fmt.Sprint)); diff != "" {
t.Fatalf("UnmarshalText(%q) (-got, +want):\n%s", tc.Input, diff)
}
}
}
func TestQueryColumnSQLSelect(t *testing.T) {
cases := []struct {
Input schema.ColumnKey
Expected string
}{
{
Input: schema.ColumnSrcAddr,
Expected: `replaceRegexpOne(IPv6NumToString(SrcAddr), '^::ffff:', '')`,
}, {
Input: schema.ColumnDstAS,
Expected: `concat(toString(DstAS), ': ', dictGetOrDefault('asns', 'name', DstAS, '???'))`,
}, {
Input: schema.ColumnDst2ndAS,
Expected: `concat(toString(Dst2ndAS), ': ', dictGetOrDefault('asns', 'name', Dst2ndAS, '???'))`,
}, {
Input: schema.ColumnProto,
Expected: `dictGetOrDefault('protocols', 'name', Proto, '???')`,
}, {
Input: schema.ColumnEType,
Expected: `if(EType = 2048, 'IPv4', if(EType = 34525, 'IPv6', '???'))`,
}, {
Input: schema.ColumnOutIfSpeed,
Expected: `toString(OutIfSpeed)`,
}, {
Input: schema.ColumnExporterName,
Expected: `ExporterName`,
}, {
Input: schema.ColumnPacketSizeBucket,
Expected: `PacketSizeBucket`,
}, {
Input: schema.ColumnDstASPath,
Expected: `arrayStringConcat(DstASPath, ' ')`,
}, {
Input: schema.ColumnDstCommunities,
Expected: `arrayStringConcat(arrayConcat(arrayMap(c -> concat(toString(bitShiftRight(c, 16)), ':', toString(bitAnd(c, 0xffff))), DstCommunities), arrayMap(c -> concat(toString(bitAnd(bitShiftRight(c, 64), 0xffffffff)), ':', toString(bitAnd(bitShiftRight(c, 32), 0xffffffff)), ':', toString(bitAnd(c, 0xffffffff))), DstLargeCommunities)), ' ')`,
},
}
for _, tc := range cases {
t.Run(tc.Input.String(), func(t *testing.T) {
column := query.NewColumn(tc.Input.String())
if err := column.Validate(schema.NewMock(t)); err != nil {
t.Fatalf("Validate() error:\n%+v", err)
}
got := column.ToSQLSelect()
if diff := helpers.Diff(got, tc.Expected); diff != "" {
t.Errorf("toSQLWhere (-got, +want):\n%s", diff)
}
})
}
}
func TestReverseDirection(t *testing.T) {
columns := query.Columns{
query.NewColumn("SrcAS"),
query.NewColumn("DstAS"),
query.NewColumn("ExporterName"),
query.NewColumn("InIfProvider"),
}
sch := schema.NewMock(t)
if err := columns.Validate(sch); err != nil {
t.Fatalf("Validate() error:\n%+v", err)
}
columns.Reverse(sch)
expected := query.Columns{
query.NewColumn("DstAS"),
query.NewColumn("SrcAS"),
query.NewColumn("ExporterName"),
query.NewColumn("OutIfProvider"),
}
if diff := helpers.Diff(columns, expected, helpers.DiffFormatter(reflect.TypeOf(query.Column{}), fmt.Sprint)); diff != "" {
t.Fatalf("Reverse() (-got, +want):\n%s", diff)
}
}

95
console/query/filter.go Normal file
View File

@@ -0,0 +1,95 @@
// SPDX-FileCopyrightText: 2023 Free Mobile
// SPDX-License-Identifier: AGPL-3.0-only
package query
import (
"fmt"
"strings"
"akvorado/common/schema"
"akvorado/console/filter"
)
// Filter represents a query filter. It should be instantiated with NewFilter() and validated with Validate().
type Filter struct {
validated bool
filter string
reverseFilter string
mainTableRequired bool
}
// NewFilter creates a new filter. It should be validated with Validate() before use.
func NewFilter(input string) Filter {
return Filter{filter: input}
}
func (qf Filter) check() {
if !qf.validated {
panic("query filter not validated")
}
}
func (qf Filter) String() string {
return qf.filter
}
// MarshalText turns a filter into a string.
func (qf Filter) MarshalText() ([]byte, error) {
return []byte(qf.filter), nil
}
// UnmarshalText parses a filter. Validate() should be called before use.
func (qf *Filter) UnmarshalText(input []byte) error {
*qf = Filter{
filter: strings.TrimSpace(string(input)),
}
return nil
}
// Validate validates a query filter with the provided schema.
func (qf *Filter) Validate(sch *schema.Component) error {
if qf.filter == "" {
qf.validated = true
return nil
}
input := []byte(qf.filter)
meta := &filter.Meta{Schema: sch}
direct, err := filter.Parse("", input, filter.GlobalStore("meta", meta))
if err != nil {
return fmt.Errorf("cannot parse filter: %s", filter.HumanError(err))
}
meta = &filter.Meta{Schema: sch, ReverseDirection: true}
reverse, err := filter.Parse("", input, filter.GlobalStore("meta", meta))
if err != nil {
return fmt.Errorf("cannot parse reverse filter: %s", filter.HumanError(err))
}
qf.filter = direct.(string)
qf.reverseFilter = reverse.(string)
qf.mainTableRequired = meta.MainTableRequired
qf.validated = true
return nil
}
// MainTableRequired tells if the main table is required for this filter.
func (qf Filter) MainTableRequired() bool {
qf.check()
return qf.mainTableRequired
}
// Reverse provides the reverse filter.
func (qf Filter) Reverse() string {
qf.check()
return qf.reverseFilter
}
// Direct provides the filter.
func (qf Filter) Direct() string {
qf.check()
return qf.filter
}
// Swap swap direct and reverse filter.
func (qf *Filter) Swap() {
qf.filter, qf.reverseFilter = qf.reverseFilter, qf.filter
}

View File

@@ -0,0 +1,61 @@
// SPDX-FileCopyrightText: 2023 Free Mobile
// SPDX-License-Identifier: AGPL-3.0-only
package query_test
import (
"testing"
"akvorado/common/helpers"
"akvorado/common/schema"
"akvorado/console/query"
)
func TestUnmarshalFilter(t *testing.T) {
cases := []struct {
Input string
Expected string
Error bool
}{
{"", "", false},
{" ", "", false},
{"SrcPort=12322", "SrcPort = 12322", false},
{"NoPort = 12322", "", true},
}
sch := schema.NewMock(t)
for _, tc := range cases {
t.Run(tc.Input, func(t *testing.T) {
var qf query.Filter
if err := qf.UnmarshalText([]byte(tc.Input)); err != nil {
t.Fatalf("UnmarshalText() error:\n%+v", err)
}
err := qf.Validate(sch)
if err != nil && !tc.Error {
t.Fatalf("Validate() error:\n%+v", err)
}
if err == nil && tc.Error {
t.Fatal("Validate() did not error")
}
if err != nil {
return
}
if diff := helpers.Diff(qf.Direct(), tc.Expected); diff != "" {
t.Fatalf("UnmarshalText(%q) (-got, +want):\n%s", tc.Input, diff)
}
})
}
}
func TestFilterSwap(t *testing.T) {
filter := query.NewFilter("SrcAS = 12322")
if err := filter.Validate(schema.NewMock(t)); err != nil {
t.Fatalf("Validate() error:\n%+v", err)
}
filter.Swap()
if diff := helpers.Diff(filter.Direct(), "DstAS = 12322"); diff != "" {
t.Fatalf("Swap() (-got, +want):\n%s", diff)
}
if diff := helpers.Diff(filter.Reverse(), "SrcAS = 12322"); diff != "" {
t.Fatalf("Swap() (-got, +want):\n%s", diff)
}
}

View File

@@ -6,126 +6,38 @@ package console
import ( import (
"testing" "testing"
"akvorado/common/helpers"
"akvorado/common/schema" "akvorado/common/schema"
"akvorado/console/query"
) )
func TestRequireMainTable(t *testing.T) { func TestRequireMainTable(t *testing.T) {
cases := []struct { cases := []struct {
Columns []queryColumn Columns []query.Column
Filter queryFilter Filter query.Filter
Expected bool Expected bool
}{ }{
{[]queryColumn{}, queryFilter{}, false}, {[]query.Column{}, query.NewFilter(""), false},
{[]queryColumn{queryColumn(schema.ColumnSrcAS)}, queryFilter{}, false}, {[]query.Column{query.NewColumn("SrcAS")}, query.NewFilter(""), false},
{[]queryColumn{queryColumn(schema.ColumnExporterAddress)}, queryFilter{}, false}, {[]query.Column{query.NewColumn("ExporterAddress")}, query.NewFilter(""), false},
{[]queryColumn{queryColumn(schema.ColumnSrcPort)}, queryFilter{}, true}, {[]query.Column{query.NewColumn("SrcPort")}, query.NewFilter(""), true},
{[]queryColumn{queryColumn(schema.ColumnSrcAddr)}, queryFilter{}, true}, {[]query.Column{query.NewColumn("SrcAddr")}, query.NewFilter(""), true},
{[]queryColumn{queryColumn(schema.ColumnDstPort)}, queryFilter{}, true}, {[]query.Column{query.NewColumn("DstPort")}, query.NewFilter(""), true},
{[]queryColumn{queryColumn(schema.ColumnDstAddr)}, queryFilter{}, true}, {[]query.Column{query.NewColumn("DstAddr")}, query.NewFilter(""), true},
{[]queryColumn{queryColumn(schema.ColumnSrcAS), queryColumn(schema.ColumnDstAddr)}, queryFilter{}, true}, {[]query.Column{query.NewColumn("SrcAS"), query.NewColumn("DstAddr")}, query.NewFilter(""), true},
{[]queryColumn{queryColumn(schema.ColumnDstAddr), queryColumn(schema.ColumnSrcAS)}, queryFilter{}, true}, {[]query.Column{query.NewColumn("DstAddr"), query.NewColumn("SrcAS")}, query.NewFilter(""), true},
{[]queryColumn{}, queryFilter{MainTableRequired: true}, true}, {[]query.Column{}, query.NewFilter("SrcAddr = 203.0.113.15"), true},
} }
sch := schema.NewMock(t)
for idx, tc := range cases { for idx, tc := range cases {
got := requireMainTable(tc.Columns, tc.Filter) if err := query.Columns(tc.Columns).Validate(sch); err != nil {
t.Fatalf("Validate() error:\n%+v", err)
}
if err := tc.Filter.Validate(sch); err != nil {
t.Fatalf("Validate() error:\n%+v", err)
}
got := requireMainTable(sch, tc.Columns, tc.Filter)
if got != tc.Expected { if got != tc.Expected {
t.Errorf("requireMainTable(%d) == %v but expected %v", idx, got, tc.Expected) t.Errorf("requireMainTable(%d) == %v but expected %v", idx, got, tc.Expected)
} }
} }
} }
func TestUnmarshalQueryColumn(t *testing.T) {
cases := []struct {
Input string
Expected schema.ColumnKey
Error bool
}{
{"DstAddr", schema.ColumnDstAddr, false},
{"TimeReceived", 0, true},
{"Nothing", 0, true},
}
for _, tc := range cases {
var qc queryColumn
err := qc.UnmarshalText([]byte(tc.Input))
if err != nil && !tc.Error {
t.Fatalf("UnmarshalText(%q) error:\n%+v", tc.Input, err)
}
if err == nil && tc.Error {
t.Fatalf("UnmarshalText(%q) did not error", tc.Input)
}
if diff := helpers.Diff(qc, tc.Expected); diff != "" {
t.Fatalf("UnmarshalText(%q) (-got, +want):\n%s", tc.Input, diff)
}
}
}
func TestQueryColumnSQLSelect(t *testing.T) {
cases := []struct {
Input schema.ColumnKey
Expected string
}{
{
Input: schema.ColumnSrcAddr,
Expected: `replaceRegexpOne(IPv6NumToString(SrcAddr), '^::ffff:', '')`,
}, {
Input: schema.ColumnDstAS,
Expected: `concat(toString(DstAS), ': ', dictGetOrDefault('asns', 'name', DstAS, '???'))`,
}, {
Input: schema.ColumnDst2ndAS,
Expected: `concat(toString(Dst2ndAS), ': ', dictGetOrDefault('asns', 'name', Dst2ndAS, '???'))`,
}, {
Input: schema.ColumnProto,
Expected: `dictGetOrDefault('protocols', 'name', Proto, '???')`,
}, {
Input: schema.ColumnEType,
Expected: `if(EType = 2048, 'IPv4', if(EType = 34525, 'IPv6', '???'))`,
}, {
Input: schema.ColumnOutIfSpeed,
Expected: `toString(OutIfSpeed)`,
}, {
Input: schema.ColumnExporterName,
Expected: `ExporterName`,
}, {
Input: schema.ColumnPacketSizeBucket,
Expected: `PacketSizeBucket`,
}, {
Input: schema.ColumnDstASPath,
Expected: `arrayStringConcat(DstASPath, ' ')`,
}, {
Input: schema.ColumnDstCommunities,
Expected: `arrayStringConcat(arrayConcat(arrayMap(c -> concat(toString(bitShiftRight(c, 16)), ':', toString(bitAnd(c, 0xffff))), DstCommunities), arrayMap(c -> concat(toString(bitAnd(bitShiftRight(c, 64), 0xffffffff)), ':', toString(bitAnd(bitShiftRight(c, 32), 0xffffffff)), ':', toString(bitAnd(c, 0xffffffff))), DstLargeCommunities)), ' ')`,
},
}
for _, tc := range cases {
t.Run(queryColumn(tc.Input).String(), func(t *testing.T) {
got := queryColumn(tc.Input).toSQLSelect()
if diff := helpers.Diff(got, tc.Expected); diff != "" {
t.Errorf("toSQLWhere (-got, +want):\n%s", diff)
}
})
}
}
func TestUnmarshalFilter(t *testing.T) {
cases := []struct {
Input string
Expected string
}{
{"", ""},
{" ", ""},
{"SrcPort=12322", "SrcPort = 12322"},
}
for _, tc := range cases {
t.Run(tc.Input, func(t *testing.T) {
var qf queryFilter
err := qf.UnmarshalText([]byte(tc.Input))
if err != nil {
t.Fatalf("UnmarshalText(%q) error:\n%+v", tc.Input, err)
}
if diff := helpers.Diff(qf.Filter, tc.Expected); diff != "" {
t.Fatalf("UnmarshalText(%q) (-got, +want):\n%s", tc.Input, diff)
}
})
}
}

View File

@@ -21,8 +21,10 @@ import (
"akvorado/common/daemon" "akvorado/common/daemon"
"akvorado/common/http" "akvorado/common/http"
"akvorado/common/reporter" "akvorado/common/reporter"
"akvorado/common/schema"
"akvorado/console/authentication" "akvorado/console/authentication"
"akvorado/console/database" "akvorado/console/database"
"akvorado/console/query"
) )
// Component represents the console component. // Component represents the console component.
@@ -48,6 +50,7 @@ type Dependencies struct {
Clock clock.Clock Clock clock.Clock
Auth *authentication.Component Auth *authentication.Component
Database *database.Component Database *database.Component
Schema *schema.Component
} }
// New creates a new console component. // New creates a new console component.
@@ -55,6 +58,9 @@ func New(r *reporter.Reporter, config Configuration, dependencies Dependencies)
if dependencies.Clock == nil { if dependencies.Clock == nil {
dependencies.Clock = clock.New() dependencies.Clock = clock.New()
} }
if err := query.Columns(config.DefaultVisualizeOptions.Dimensions).Validate(dependencies.Schema); err != nil {
return nil, err
}
c := Component{ c := Component{
r: r, r: r,
d: &dependencies, d: &dependencies,

View File

@@ -13,16 +13,19 @@ import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"akvorado/common/helpers" "akvorado/common/helpers"
"akvorado/common/schema"
"akvorado/console/query"
) )
// sankeyHandlerInput describes the input for the /sankey endpoint. // sankeyHandlerInput describes the input for the /sankey endpoint.
type sankeyHandlerInput struct { type sankeyHandlerInput struct {
Start time.Time `json:"start" binding:"required"` schema *schema.Component
End time.Time `json:"end" binding:"required,gtfield=Start"` Start time.Time `json:"start" binding:"required"`
Dimensions []queryColumn `json:"dimensions" binding:"required,min=2"` // group by ... End time.Time `json:"end" binding:"required,gtfield=Start"`
Limit int `json:"limit" binding:"min=1,max=50"` // limit product of dimensions Dimensions []query.Column `json:"dimensions" binding:"required,min=2"` // group by ...
Filter queryFilter `json:"filter"` // where ... Limit int `json:"limit" binding:"min=1,max=50"` // limit product of dimensions
Units string `json:"units" binding:"required,oneof=pps l3bps l2bps"` Filter query.Filter `json:"filter"` // where ...
Units string `json:"units" binding:"required,oneof=pps l3bps l2bps"`
} }
// sankeyHandlerOutput describes the output for the /sankey endpoint. // sankeyHandlerOutput describes the output for the /sankey endpoint.
@@ -51,7 +54,7 @@ func (input sankeyHandlerInput) toSQL() (string, error) {
arrayFields = append(arrayFields, fmt.Sprintf(`if(%s IN (SELECT %s FROM rows), %s, 'Other')`, arrayFields = append(arrayFields, fmt.Sprintf(`if(%s IN (SELECT %s FROM rows), %s, 'Other')`,
column.String(), column.String(),
column.String(), column.String(),
column.toSQLSelect())) column.ToSQLSelect()))
dimensions = append(dimensions, column.String()) dimensions = append(dimensions, column.String())
} }
fields := []string{ fields := []string{
@@ -84,7 +87,7 @@ ORDER BY xps DESC
templateContext(inputContext{ templateContext(inputContext{
Start: input.Start, Start: input.Start,
End: input.End, End: input.End,
MainTableRequired: requireMainTable(input.Dimensions, input.Filter), MainTableRequired: requireMainTable(input.schema, input.Dimensions, input.Filter),
Points: 20, Points: 20,
Units: input.Units, Units: input.Units,
}), }),
@@ -94,11 +97,19 @@ ORDER BY xps DESC
func (c *Component) sankeyHandlerFunc(gc *gin.Context) { func (c *Component) sankeyHandlerFunc(gc *gin.Context) {
ctx := c.t.Context(gc.Request.Context()) ctx := c.t.Context(gc.Request.Context())
var input sankeyHandlerInput input := sankeyHandlerInput{schema: c.d.Schema}
if err := gc.ShouldBindJSON(&input); err != nil { if err := gc.ShouldBindJSON(&input); err != nil {
gc.JSON(http.StatusBadRequest, gin.H{"message": helpers.Capitalize(err.Error())}) gc.JSON(http.StatusBadRequest, gin.H{"message": helpers.Capitalize(err.Error())})
return return
} }
if err := query.Columns(input.Dimensions).Validate(input.schema); err != nil {
gc.JSON(http.StatusBadRequest, gin.H{"message": helpers.Capitalize(err.Error())})
return
}
if err := input.Filter.Validate(input.schema); err != nil {
gc.JSON(http.StatusBadRequest, gin.H{"message": helpers.Capitalize(err.Error())})
return
}
sqlQuery, err := input.toSQL() sqlQuery, err := input.toSQL()
if err != nil { if err != nil {

View File

@@ -13,6 +13,7 @@ import (
"akvorado/common/helpers" "akvorado/common/helpers"
"akvorado/common/schema" "akvorado/common/schema"
"akvorado/console/query"
) )
func TestSankeyQuerySQL(t *testing.T) { func TestSankeyQuerySQL(t *testing.T) {
@@ -24,12 +25,14 @@ func TestSankeyQuerySQL(t *testing.T) {
{ {
Description: "two dimensions, no filters, l3 bps", Description: "two dimensions, no filters, l3 bps",
Input: sankeyHandlerInput{ Input: sankeyHandlerInput{
Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC), Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC),
End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC), End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC),
Dimensions: []queryColumn{queryColumn(schema.ColumnSrcAS), queryColumn(schema.ColumnExporterName)}, Dimensions: []query.Column{
Limit: 5, query.NewColumn("SrcAS"),
Filter: queryFilter{}, query.NewColumn("ExporterName")},
Units: "l3bps", Limit: 5,
Filter: query.Filter{},
Units: "l3bps",
}, },
Expected: ` Expected: `
{{ with context @@{"start":"2022-04-10T15:45:10Z","end":"2022-04-11T15:45:10Z","points":20,"units":"l3bps"}@@ }} {{ with context @@{"start":"2022-04-10T15:45:10Z","end":"2022-04-11T15:45:10Z","points":20,"units":"l3bps"}@@ }}
@@ -48,12 +51,14 @@ ORDER BY xps DESC
}, { }, {
Description: "two dimensions, no filters, l2 bps", Description: "two dimensions, no filters, l2 bps",
Input: sankeyHandlerInput{ Input: sankeyHandlerInput{
Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC), Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC),
End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC), End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC),
Dimensions: []queryColumn{queryColumn(schema.ColumnSrcAS), queryColumn(schema.ColumnExporterName)}, Dimensions: []query.Column{
Limit: 5, query.NewColumn("SrcAS"),
Filter: queryFilter{}, query.NewColumn("ExporterName")},
Units: "l2bps", Limit: 5,
Filter: query.Filter{},
Units: "l2bps",
}, },
Expected: ` Expected: `
{{ with context @@{"start":"2022-04-10T15:45:10Z","end":"2022-04-11T15:45:10Z","points":20,"units":"l2bps"}@@ }} {{ with context @@{"start":"2022-04-10T15:45:10Z","end":"2022-04-11T15:45:10Z","points":20,"units":"l2bps"}@@ }}
@@ -73,12 +78,14 @@ ORDER BY xps DESC
}, { }, {
Description: "two dimensions, no filters, pps", Description: "two dimensions, no filters, pps",
Input: sankeyHandlerInput{ Input: sankeyHandlerInput{
Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC), Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC),
End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC), End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC),
Dimensions: []queryColumn{queryColumn(schema.ColumnSrcAS), queryColumn(schema.ColumnExporterName)}, Dimensions: []query.Column{
Limit: 5, query.NewColumn("SrcAS"),
Filter: queryFilter{}, query.NewColumn("ExporterName")},
Units: "pps", Limit: 5,
Filter: query.Filter{},
Units: "pps",
}, },
Expected: ` Expected: `
{{ with context @@{"start":"2022-04-10T15:45:10Z","end":"2022-04-11T15:45:10Z","points":20,"units":"pps"}@@ }} {{ with context @@{"start":"2022-04-10T15:45:10Z","end":"2022-04-11T15:45:10Z","points":20,"units":"pps"}@@ }}
@@ -97,12 +104,14 @@ ORDER BY xps DESC
}, { }, {
Description: "two dimensions, with filter", Description: "two dimensions, with filter",
Input: sankeyHandlerInput{ Input: sankeyHandlerInput{
Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC), Start: time.Date(2022, 04, 10, 15, 45, 10, 0, time.UTC),
End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC), End: time.Date(2022, 04, 11, 15, 45, 10, 0, time.UTC),
Dimensions: []queryColumn{queryColumn(schema.ColumnSrcAS), queryColumn(schema.ColumnExporterName)}, Dimensions: []query.Column{
Limit: 10, query.NewColumn("SrcAS"),
Filter: queryFilter{Filter: "DstCountry = 'FR'"}, query.NewColumn("ExporterName")},
Units: "l3bps", Limit: 10,
Filter: query.NewFilter("DstCountry = 'FR'"),
Units: "l3bps",
}, },
Expected: ` Expected: `
{{ with context @@{"start":"2022-04-10T15:45:10Z","end":"2022-04-11T15:45:10Z","points":20,"units":"l3bps"}@@ }} {{ with context @@{"start":"2022-04-10T15:45:10Z","end":"2022-04-11T15:45:10Z","points":20,"units":"l3bps"}@@ }}
@@ -121,6 +130,13 @@ ORDER BY xps DESC
}, },
} }
for _, tc := range cases { for _, tc := range cases {
tc.Input.schema = schema.NewMock(t)
if err := query.Columns(tc.Input.Dimensions).Validate(tc.Input.schema); err != nil {
t.Fatalf("Validate() error:\n%+v", err)
}
if err := tc.Input.Filter.Validate(tc.Input.schema); err != nil {
t.Fatalf("Validate() error:\n%+v", err)
}
tc.Expected = strings.ReplaceAll(tc.Expected, "@@", "`") tc.Expected = strings.ReplaceAll(tc.Expected, "@@", "`")
t.Run(tc.Description, func(t *testing.T) { t.Run(tc.Description, func(t *testing.T) {
got, _ := tc.Input.toSQL() got, _ := tc.Input.toSQL()

View File

@@ -16,6 +16,7 @@ import (
"akvorado/common/helpers" "akvorado/common/helpers"
"akvorado/common/http" "akvorado/common/http"
"akvorado/common/reporter" "akvorado/common/reporter"
"akvorado/common/schema"
"akvorado/console/authentication" "akvorado/console/authentication"
"akvorado/console/database" "akvorado/console/database"
) )
@@ -34,6 +35,7 @@ func NewMock(t *testing.T, config Configuration) (*Component, *http.Component, *
Clock: mockClock, Clock: mockClock,
Auth: authentication.NewMock(t, r), Auth: authentication.NewMock(t, r),
Database: database.NewMock(t, r, database.DefaultConfiguration()), Database: database.NewMock(t, r, database.DefaultConfiguration()),
Schema: schema.NewMock(t),
}) })
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)

View File

@@ -19,7 +19,7 @@ import (
func TestGetNetflowData(t *testing.T) { func TestGetNetflowData(t *testing.T) {
r := reporter.NewMock(t) r := reporter.NewMock(t)
nfdecoder := netflow.New(r) nfdecoder := netflow.New(r, decoder.Dependencies{Schema: schema.NewMock(t)})
ch := getNetflowTemplates( ch := getNetflowTemplates(
context.Background(), context.Background(),

View File

@@ -98,30 +98,30 @@ func (c *Component) enrichFlow(exporterIP netip.Addr, exporterStr string, flow *
destBMP := c.d.BMP.Lookup(flow.DstAddr, flow.NextHop) destBMP := c.d.BMP.Lookup(flow.DstAddr, flow.NextHop)
flow.SrcAS = c.getASNumber(flow.SrcAddr, flow.SrcAS, sourceBMP.ASN) flow.SrcAS = c.getASNumber(flow.SrcAddr, flow.SrcAS, sourceBMP.ASN)
flow.DstAS = c.getASNumber(flow.DstAddr, flow.DstAS, destBMP.ASN) flow.DstAS = c.getASNumber(flow.DstAddr, flow.DstAS, destBMP.ASN)
schema.Flows.ProtobufAppendBytes(flow, schema.ColumnSrcCountry, []byte(c.d.GeoIP.LookupCountry(flow.SrcAddr))) c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnSrcCountry, []byte(c.d.GeoIP.LookupCountry(flow.SrcAddr)))
schema.Flows.ProtobufAppendBytes(flow, schema.ColumnDstCountry, []byte(c.d.GeoIP.LookupCountry(flow.DstAddr))) c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnDstCountry, []byte(c.d.GeoIP.LookupCountry(flow.DstAddr)))
for _, comm := range destBMP.Communities { for _, comm := range destBMP.Communities {
schema.Flows.ProtobufAppendVarint(flow, schema.ColumnDstCommunities, uint64(comm)) c.d.Schema.ProtobufAppendVarint(flow, schema.ColumnDstCommunities, uint64(comm))
} }
for _, asn := range destBMP.ASPath { for _, asn := range destBMP.ASPath {
schema.Flows.ProtobufAppendVarint(flow, schema.ColumnDstASPath, uint64(asn)) c.d.Schema.ProtobufAppendVarint(flow, schema.ColumnDstASPath, uint64(asn))
} }
for _, comm := range destBMP.LargeCommunities { for _, comm := range destBMP.LargeCommunities {
schema.Flows.ProtobufAppendVarintForce(flow, c.d.Schema.ProtobufAppendVarintForce(flow,
schema.ColumnDstLargeCommunitiesASN, uint64(comm.ASN)) schema.ColumnDstLargeCommunitiesASN, uint64(comm.ASN))
schema.Flows.ProtobufAppendVarintForce(flow, c.d.Schema.ProtobufAppendVarintForce(flow,
schema.ColumnDstLargeCommunitiesLocalData1, uint64(comm.LocalData1)) schema.ColumnDstLargeCommunitiesLocalData1, uint64(comm.LocalData1))
schema.Flows.ProtobufAppendVarintForce(flow, c.d.Schema.ProtobufAppendVarintForce(flow,
schema.ColumnDstLargeCommunitiesLocalData2, uint64(comm.LocalData2)) schema.ColumnDstLargeCommunitiesLocalData2, uint64(comm.LocalData2))
} }
schema.Flows.ProtobufAppendBytes(flow, schema.ColumnExporterName, []byte(flowExporterName)) c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnExporterName, []byte(flowExporterName))
schema.Flows.ProtobufAppendBytes(flow, schema.ColumnInIfName, []byte(flowInIfName)) c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnInIfName, []byte(flowInIfName))
schema.Flows.ProtobufAppendBytes(flow, schema.ColumnInIfDescription, []byte(flowInIfDescription)) c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnInIfDescription, []byte(flowInIfDescription))
schema.Flows.ProtobufAppendBytes(flow, schema.ColumnOutIfName, []byte(flowOutIfName)) c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnOutIfName, []byte(flowOutIfName))
schema.Flows.ProtobufAppendBytes(flow, schema.ColumnOutIfDescription, []byte(flowOutIfDescription)) c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnOutIfDescription, []byte(flowOutIfDescription))
schema.Flows.ProtobufAppendVarint(flow, schema.ColumnInIfSpeed, uint64(flowInIfSpeed)) c.d.Schema.ProtobufAppendVarint(flow, schema.ColumnInIfSpeed, uint64(flowInIfSpeed))
schema.Flows.ProtobufAppendVarint(flow, schema.ColumnOutIfSpeed, uint64(flowOutIfSpeed)) c.d.Schema.ProtobufAppendVarint(flow, schema.ColumnOutIfSpeed, uint64(flowOutIfSpeed))
return return
} }
@@ -154,12 +154,12 @@ func (c *Component) getASNumber(flowAddr netip.Addr, flowAS, bmpAS uint32) (asn
return asn return asn
} }
func writeExporter(flow *schema.FlowMessage, classification exporterClassification) { func (c *Component) writeExporter(flow *schema.FlowMessage, classification exporterClassification) {
schema.Flows.ProtobufAppendBytes(flow, schema.ColumnExporterGroup, []byte(classification.Group)) c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnExporterGroup, []byte(classification.Group))
schema.Flows.ProtobufAppendBytes(flow, schema.ColumnExporterRole, []byte(classification.Role)) c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnExporterRole, []byte(classification.Role))
schema.Flows.ProtobufAppendBytes(flow, schema.ColumnExporterSite, []byte(classification.Site)) c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnExporterSite, []byte(classification.Site))
schema.Flows.ProtobufAppendBytes(flow, schema.ColumnExporterRegion, []byte(classification.Region)) c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnExporterRegion, []byte(classification.Region))
schema.Flows.ProtobufAppendBytes(flow, schema.ColumnExporterTenant, []byte(classification.Tenant)) c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnExporterTenant, []byte(classification.Tenant))
} }
func (c *Component) classifyExporter(ip string, name string, flow *schema.FlowMessage) { func (c *Component) classifyExporter(ip string, name string, flow *schema.FlowMessage) {
@@ -168,7 +168,7 @@ func (c *Component) classifyExporter(ip string, name string, flow *schema.FlowMe
} }
si := exporterInfo{IP: ip, Name: name} si := exporterInfo{IP: ip, Name: name}
if classification, ok := c.classifierExporterCache.Get(si); ok { if classification, ok := c.classifierExporterCache.Get(si); ok {
writeExporter(flow, classification) c.writeExporter(flow, classification)
return return
} }
@@ -190,18 +190,18 @@ func (c *Component) classifyExporter(ip string, name string, flow *schema.FlowMe
break break
} }
c.classifierExporterCache.Set(si, classification) c.classifierExporterCache.Set(si, classification)
writeExporter(flow, classification) c.writeExporter(flow, classification)
} }
func writeInterface(flow *schema.FlowMessage, classification interfaceClassification, directionIn bool) { func (c *Component) writeInterface(flow *schema.FlowMessage, classification interfaceClassification, directionIn bool) {
if directionIn { if directionIn {
schema.Flows.ProtobufAppendBytes(flow, schema.ColumnInIfConnectivity, []byte(classification.Connectivity)) c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnInIfConnectivity, []byte(classification.Connectivity))
schema.Flows.ProtobufAppendBytes(flow, schema.ColumnInIfProvider, []byte(classification.Provider)) c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnInIfProvider, []byte(classification.Provider))
schema.Flows.ProtobufAppendVarint(flow, schema.ColumnInIfBoundary, uint64(classification.Boundary)) c.d.Schema.ProtobufAppendVarint(flow, schema.ColumnInIfBoundary, uint64(classification.Boundary))
} else { } else {
schema.Flows.ProtobufAppendBytes(flow, schema.ColumnOutIfConnectivity, []byte(classification.Connectivity)) c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnOutIfConnectivity, []byte(classification.Connectivity))
schema.Flows.ProtobufAppendBytes(flow, schema.ColumnOutIfProvider, []byte(classification.Provider)) c.d.Schema.ProtobufAppendBytes(flow, schema.ColumnOutIfProvider, []byte(classification.Provider))
schema.Flows.ProtobufAppendVarint(flow, schema.ColumnOutIfBoundary, uint64(classification.Boundary)) c.d.Schema.ProtobufAppendVarint(flow, schema.ColumnOutIfBoundary, uint64(classification.Boundary))
} }
} }
@@ -216,7 +216,7 @@ func (c *Component) classifyInterface(ip string, exporterName string, fl *schema
Interface: ii, Interface: ii,
} }
if classification, ok := c.classifierInterfaceCache.Get(key); ok { if classification, ok := c.classifierInterfaceCache.Get(key); ok {
writeInterface(fl, classification, directionIn) c.writeInterface(fl, classification, directionIn)
return return
} }
@@ -243,7 +243,7 @@ func (c *Component) classifyInterface(ip string, exporterName string, fl *schema
break break
} }
c.classifierInterfaceCache.Set(key, classification) c.classifierInterfaceCache.Set(key, classification)
writeInterface(fl, classification, directionIn) c.writeInterface(fl, classification, directionIn)
} }
func isPrivateAS(as uint32) bool { func isPrivateAS(as uint32) bool {

View File

@@ -6,7 +6,6 @@ package core
import ( import (
"fmt" "fmt"
"net/netip" "net/netip"
"reflect"
"testing" "testing"
"time" "time"
@@ -372,6 +371,7 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
Kafka: kafkaComponent, Kafka: kafkaComponent,
HTTP: httpComponent, HTTP: httpComponent,
BMP: bmpComponent, BMP: bmpComponent,
Schema: schema.NewMock(t),
}) })
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)
@@ -388,8 +388,8 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
t.Fatalf("Kafka message encoding error:\n%+v", err) t.Fatalf("Kafka message encoding error:\n%+v", err)
} }
t.Logf("Raw message: %v", b) t.Logf("Raw message: %v", b)
got := schema.Flows.ProtobufDecode(t, b) got := c.d.Schema.ProtobufDecode(t, b)
if diff := helpers.Diff(&got, tc.OutputFlow, helpers.DiffFormatter(reflect.TypeOf(schema.ColumnBytes), fmt.Sprint)); diff != "" { if diff := helpers.Diff(&got, tc.OutputFlow); diff != "" {
t.Errorf("Classifier (-got, +want):\n%s", diff) t.Errorf("Classifier (-got, +want):\n%s", diff)
} }
return nil return nil
@@ -461,6 +461,7 @@ func TestGetASNumber(t *testing.T) {
Daemon: daemon.NewMock(t), Daemon: daemon.NewMock(t),
GeoIP: geoip.NewMock(t, r), GeoIP: geoip.NewMock(t, r),
BMP: bmpComponent, BMP: bmpComponent,
Schema: schema.NewMock(t),
}) })
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)

View File

@@ -51,6 +51,7 @@ type Dependencies struct {
GeoIP *geoip.Component GeoIP *geoip.Component
Kafka *kafka.Component Kafka *kafka.Component
HTTP *http.Component HTTP *http.Component
Schema *schema.Component
} }
// New creates a new core component. // New creates a new core component.
@@ -119,7 +120,7 @@ func (c *Component) runWorker(workerID int) error {
} }
// Serialize flow to Protobuf // Serialize flow to Protobuf
buf := schema.Flows.ProtobufMarshal(flow) buf := c.d.Schema.ProtobufMarshal(flow)
c.metrics.flowsProcessingTime.Observe(time.Now().Sub(start).Seconds()) c.metrics.flowsProcessingTime.Observe(time.Now().Sub(start).Seconds())
// Forward to Kafka. This could block and buf is now owned by the // Forward to Kafka. This could block and buf is now owned by the

View File

@@ -53,6 +53,7 @@ func TestCore(t *testing.T) {
Kafka: kafkaComponent, Kafka: kafkaComponent,
HTTP: httpComponent, HTTP: httpComponent,
BMP: bmpComponent, BMP: bmpComponent,
Schema: schema.NewMock(t),
}) })
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)

View File

@@ -15,7 +15,7 @@ import (
"github.com/netsampler/goflow2/producer" "github.com/netsampler/goflow2/producer"
) )
func decode(msgDec interface{}, samplingRateSys producer.SamplingRateSystem) []*schema.FlowMessage { func (nd *Decoder) decode(msgDec interface{}, samplingRateSys producer.SamplingRateSystem) []*schema.FlowMessage {
flowMessageSet := []*schema.FlowMessage{} flowMessageSet := []*schema.FlowMessage{}
var obsDomainID uint32 var obsDomainID uint32
var dataFlowSet []netflow.DataFlowSet var dataFlowSet []netflow.DataFlowSet
@@ -47,7 +47,7 @@ func decode(msgDec interface{}, samplingRateSys producer.SamplingRateSystem) []*
// Parse fields // Parse fields
for _, dataFlowSetItem := range dataFlowSet { for _, dataFlowSetItem := range dataFlowSet {
for _, record := range dataFlowSetItem.Records { for _, record := range dataFlowSetItem.Records {
flow := decodeRecord(version, record.Values) flow := nd.decodeRecord(version, record.Values)
if flow != nil { if flow != nil {
flow.SamplingRate = samplingRate flow.SamplingRate = samplingRate
flowMessageSet = append(flowMessageSet, flow) flowMessageSet = append(flowMessageSet, flow)
@@ -58,7 +58,7 @@ func decode(msgDec interface{}, samplingRateSys producer.SamplingRateSystem) []*
return flowMessageSet return flowMessageSet
} }
func decodeRecord(version int, fields []netflow.DataField) *schema.FlowMessage { func (nd *Decoder) decodeRecord(version int, fields []netflow.DataField) *schema.FlowMessage {
var etype uint16 var etype uint16
bf := &schema.FlowMessage{} bf := &schema.FlowMessage{}
for _, field := range fields { for _, field := range fields {
@@ -73,9 +73,9 @@ func decodeRecord(version int, fields []netflow.DataField) *schema.FlowMessage {
switch field.Type { switch field.Type {
// Statistics // Statistics
case netflow.NFV9_FIELD_IN_BYTES, netflow.NFV9_FIELD_OUT_BYTES: case netflow.NFV9_FIELD_IN_BYTES, netflow.NFV9_FIELD_OUT_BYTES:
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnBytes, decodeUNumber(v)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnBytes, decodeUNumber(v))
case netflow.NFV9_FIELD_IN_PKTS, netflow.NFV9_FIELD_OUT_PKTS: case netflow.NFV9_FIELD_IN_PKTS, netflow.NFV9_FIELD_OUT_PKTS:
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnPackets, decodeUNumber(v)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnPackets, decodeUNumber(v))
// L3 // L3
case netflow.NFV9_FIELD_IPV4_SRC_ADDR: case netflow.NFV9_FIELD_IPV4_SRC_ADDR:
@@ -91,19 +91,19 @@ func decodeRecord(version int, fields []netflow.DataField) *schema.FlowMessage {
etype = helpers.ETypeIPv6 etype = helpers.ETypeIPv6
bf.DstAddr = decodeIP(v) bf.DstAddr = decodeIP(v)
case netflow.NFV9_FIELD_SRC_MASK, netflow.NFV9_FIELD_IPV6_SRC_MASK: case netflow.NFV9_FIELD_SRC_MASK, netflow.NFV9_FIELD_IPV6_SRC_MASK:
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnSrcNetMask, decodeUNumber(v)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnSrcNetMask, decodeUNumber(v))
case netflow.NFV9_FIELD_DST_MASK, netflow.NFV9_FIELD_IPV6_DST_MASK: case netflow.NFV9_FIELD_DST_MASK, netflow.NFV9_FIELD_IPV6_DST_MASK:
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnDstNetMask, decodeUNumber(v)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnDstNetMask, decodeUNumber(v))
case netflow.NFV9_FIELD_IPV4_NEXT_HOP, netflow.NFV9_FIELD_BGP_IPV4_NEXT_HOP, netflow.NFV9_FIELD_IPV6_NEXT_HOP, netflow.NFV9_FIELD_BGP_IPV6_NEXT_HOP: case netflow.NFV9_FIELD_IPV4_NEXT_HOP, netflow.NFV9_FIELD_BGP_IPV4_NEXT_HOP, netflow.NFV9_FIELD_IPV6_NEXT_HOP, netflow.NFV9_FIELD_BGP_IPV6_NEXT_HOP:
bf.NextHop = decodeIP(v) bf.NextHop = decodeIP(v)
// L4 // L4
case netflow.NFV9_FIELD_L4_SRC_PORT: case netflow.NFV9_FIELD_L4_SRC_PORT:
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnSrcPort, decodeUNumber(v)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnSrcPort, decodeUNumber(v))
case netflow.NFV9_FIELD_L4_DST_PORT: case netflow.NFV9_FIELD_L4_DST_PORT:
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnDstPort, decodeUNumber(v)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnDstPort, decodeUNumber(v))
case netflow.NFV9_FIELD_PROTOCOL: case netflow.NFV9_FIELD_PROTOCOL:
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnProto, decodeUNumber(v)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnProto, decodeUNumber(v))
// Network // Network
case netflow.NFV9_FIELD_SRC_AS: case netflow.NFV9_FIELD_SRC_AS:
@@ -119,10 +119,10 @@ func decodeRecord(version int, fields []netflow.DataField) *schema.FlowMessage {
// Remaining // Remaining
case netflow.NFV9_FIELD_FORWARDING_STATUS: case netflow.NFV9_FIELD_FORWARDING_STATUS:
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnForwardingStatus, decodeUNumber(v)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnForwardingStatus, decodeUNumber(v))
} }
} }
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnEType, uint64(etype)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnEType, uint64(etype))
return bf return bf
} }

View File

@@ -21,6 +21,7 @@ import (
// Decoder contains the state for the Netflow v9 decoder. // Decoder contains the state for the Netflow v9 decoder.
type Decoder struct { type Decoder struct {
r *reporter.Reporter r *reporter.Reporter
d decoder.Dependencies
// Templates and sampling systems // Templates and sampling systems
systemsLock sync.RWMutex systemsLock sync.RWMutex
@@ -37,9 +38,10 @@ type Decoder struct {
} }
// New instantiates a new netflow decoder. // New instantiates a new netflow decoder.
func New(r *reporter.Reporter) decoder.Decoder { func New(r *reporter.Reporter, dependencies decoder.Dependencies) decoder.Decoder {
nd := &Decoder{ nd := &Decoder{
r: r, r: r,
d: dependencies,
templates: map[string]*templateSystem{}, templates: map[string]*templateSystem{},
sampling: map[string]producer.SamplingRateSystem{}, sampling: map[string]producer.SamplingRateSystem{},
} }
@@ -208,7 +210,7 @@ func (nd *Decoder) Decode(in decoder.RawFlow) []*schema.FlowMessage {
} }
} }
flowMessageSet := decode(msgDec, sampling) flowMessageSet := nd.decode(msgDec, sampling)
exporterAddress, _ := netip.AddrFromSlice(in.Source.To16()) exporterAddress, _ := netip.AddrFromSlice(in.Source.To16())
for _, fmsg := range flowMessageSet { for _, fmsg := range flowMessageSet {
fmsg.TimeReceived = ts fmsg.TimeReceived = ts

View File

@@ -4,11 +4,9 @@
package netflow package netflow
import ( import (
"fmt"
"net" "net"
"net/netip" "net/netip"
"path/filepath" "path/filepath"
"reflect"
"testing" "testing"
"akvorado/common/helpers" "akvorado/common/helpers"
@@ -19,7 +17,7 @@ import (
func TestDecode(t *testing.T) { func TestDecode(t *testing.T) {
r := reporter.NewMock(t) r := reporter.NewMock(t)
nfdecoder := New(r) nfdecoder := New(r, decoder.Dependencies{Schema: schema.NewMock(t)})
// Send an option template // Send an option template
template := helpers.ReadPcapPayload(t, filepath.Join("testdata", "options-template-257.pcap")) template := helpers.ReadPcapPayload(t, filepath.Join("testdata", "options-template-257.pcap"))
@@ -183,7 +181,7 @@ func TestDecode(t *testing.T) {
f.TimeReceived = 0 f.TimeReceived = 0
} }
if diff := helpers.Diff(got, expectedFlows, helpers.DiffFormatter(reflect.TypeOf(schema.ColumnBytes), fmt.Sprint)); diff != "" { if diff := helpers.Diff(got, expectedFlows); diff != "" {
t.Fatalf("Decode() (-got, +want):\n%s", diff) t.Fatalf("Decode() (-got, +want):\n%s", diff)
} }
gotMetrics = r.GetMetrics( gotMetrics = r.GetMetrics(

View File

@@ -24,6 +24,11 @@ type Decoder interface {
Name() string Name() string
} }
// Dependencies are the dependencies for the decoder
type Dependencies struct {
Schema *schema.Component
}
// RawFlow is an undecoded flow. // RawFlow is an undecoded flow.
type RawFlow struct { type RawFlow struct {
TimeReceived time.Time TimeReceived time.Time
@@ -32,4 +37,4 @@ type RawFlow struct {
} }
// NewDecoderFunc is the signature of a function to instantiate a decoder. // NewDecoderFunc is the signature of a function to instantiate a decoder.
type NewDecoderFunc func(*reporter.Reporter) Decoder type NewDecoderFunc func(*reporter.Reporter, Dependencies) Decoder

View File

@@ -14,7 +14,7 @@ import (
"github.com/netsampler/goflow2/decoders/sflow" "github.com/netsampler/goflow2/decoders/sflow"
) )
func decode(msgDec interface{}) []*schema.FlowMessage { func (nd *Decoder) decode(msgDec interface{}) []*schema.FlowMessage {
flowMessageSet := []*schema.FlowMessage{} flowMessageSet := []*schema.FlowMessage{}
switch msgDec.(type) { switch msgDec.(type) {
case sflow.Packet: case sflow.Packet:
@@ -54,33 +54,33 @@ func decode(msgDec interface{}) []*schema.FlowMessage {
} }
bf.ExporterAddress = decodeIP(packet.AgentIP) bf.ExporterAddress = decodeIP(packet.AgentIP)
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnPackets, 1) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnPackets, 1)
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnForwardingStatus, uint64(forwardingStatus)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnForwardingStatus, uint64(forwardingStatus))
for _, record := range records { for _, record := range records {
switch recordData := record.Data.(type) { switch recordData := record.Data.(type) {
case sflow.SampledHeader: case sflow.SampledHeader:
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnBytes, uint64(recordData.FrameLength)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnBytes, uint64(recordData.FrameLength))
parseSampledHeader(bf, &recordData) nd.parseSampledHeader(bf, &recordData)
case sflow.SampledIPv4: case sflow.SampledIPv4:
bf.SrcAddr = decodeIP(recordData.Base.SrcIP) bf.SrcAddr = decodeIP(recordData.Base.SrcIP)
bf.DstAddr = decodeIP(recordData.Base.DstIP) bf.DstAddr = decodeIP(recordData.Base.DstIP)
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnBytes, uint64(recordData.Base.Length)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnBytes, uint64(recordData.Base.Length))
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnProto, uint64(recordData.Base.Protocol)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnProto, uint64(recordData.Base.Protocol))
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnSrcPort, uint64(recordData.Base.SrcPort)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnSrcPort, uint64(recordData.Base.SrcPort))
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnDstPort, uint64(recordData.Base.DstPort)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnDstPort, uint64(recordData.Base.DstPort))
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnEType, helpers.ETypeIPv4) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnEType, helpers.ETypeIPv4)
case sflow.SampledIPv6: case sflow.SampledIPv6:
bf.SrcAddr = decodeIP(recordData.Base.SrcIP) bf.SrcAddr = decodeIP(recordData.Base.SrcIP)
bf.DstAddr = decodeIP(recordData.Base.DstIP) bf.DstAddr = decodeIP(recordData.Base.DstIP)
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnBytes, uint64(recordData.Base.Length)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnBytes, uint64(recordData.Base.Length))
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnProto, uint64(recordData.Base.Protocol)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnProto, uint64(recordData.Base.Protocol))
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnSrcPort, uint64(recordData.Base.SrcPort)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnSrcPort, uint64(recordData.Base.SrcPort))
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnDstPort, uint64(recordData.Base.DstPort)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnDstPort, uint64(recordData.Base.DstPort))
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnEType, helpers.ETypeIPv6) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnEType, helpers.ETypeIPv6)
case sflow.ExtendedRouter: case sflow.ExtendedRouter:
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnSrcNetMask, uint64(recordData.SrcMaskLen)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnSrcNetMask, uint64(recordData.SrcMaskLen))
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnDstNetMask, uint64(recordData.DstMaskLen)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnDstNetMask, uint64(recordData.DstMaskLen))
bf.NextHop = decodeIP(recordData.NextHop) bf.NextHop = decodeIP(recordData.NextHop)
case sflow.ExtendedGateway: case sflow.ExtendedGateway:
bf.NextHop = decodeIP(recordData.NextHop) bf.NextHop = decodeIP(recordData.NextHop)
@@ -101,15 +101,15 @@ func decode(msgDec interface{}) []*schema.FlowMessage {
return flowMessageSet return flowMessageSet
} }
func parseSampledHeader(bf *schema.FlowMessage, header *sflow.SampledHeader) { func (nd *Decoder) parseSampledHeader(bf *schema.FlowMessage, header *sflow.SampledHeader) {
data := header.HeaderData data := header.HeaderData
switch header.Protocol { switch header.Protocol {
case 1: // Ethernet case 1: // Ethernet
parseEthernetHeader(bf, data) nd.parseEthernetHeader(bf, data)
} }
} }
func parseEthernetHeader(bf *schema.FlowMessage, data []byte) { func (nd *Decoder) parseEthernetHeader(bf *schema.FlowMessage, data []byte) {
if len(data) < 14 { if len(data) < 14 {
return return
} }
@@ -150,7 +150,7 @@ func parseEthernetHeader(bf *schema.FlowMessage, data []byte) {
if len(data) < 20 { if len(data) < 20 {
return return
} }
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnEType, helpers.ETypeIPv4) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnEType, helpers.ETypeIPv4)
bf.SrcAddr = decodeIP(data[12:16]) bf.SrcAddr = decodeIP(data[12:16])
bf.DstAddr = decodeIP(data[16:20]) bf.DstAddr = decodeIP(data[16:20])
proto = data[9] proto = data[9]
@@ -165,19 +165,19 @@ func parseEthernetHeader(bf *schema.FlowMessage, data []byte) {
if len(data) < 40 { if len(data) < 40 {
return return
} }
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnEType, helpers.ETypeIPv6) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnEType, helpers.ETypeIPv6)
bf.SrcAddr = decodeIP(data[8:24]) bf.SrcAddr = decodeIP(data[8:24])
bf.DstAddr = decodeIP(data[24:40]) bf.DstAddr = decodeIP(data[24:40])
proto = data[6] proto = data[6]
data = data[40:] data = data[40:]
} }
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnProto, uint64(proto)) nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnProto, uint64(proto))
if proto == 6 || proto == 17 { if proto == 6 || proto == 17 {
if len(data) > 4 { if len(data) > 4 {
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnSrcPort, nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnSrcPort,
uint64(binary.BigEndian.Uint16(data[0:2]))) uint64(binary.BigEndian.Uint16(data[0:2])))
schema.Flows.ProtobufAppendVarint(bf, schema.ColumnDstPort, nd.d.Schema.ProtobufAppendVarint(bf, schema.ColumnDstPort,
uint64(binary.BigEndian.Uint16(data[2:4]))) uint64(binary.BigEndian.Uint16(data[2:4])))
} }
} }

View File

@@ -30,6 +30,7 @@ const (
// Decoder contains the state for the sFlow v5 decoder. // Decoder contains the state for the sFlow v5 decoder.
type Decoder struct { type Decoder struct {
r *reporter.Reporter r *reporter.Reporter
d decoder.Dependencies
metrics struct { metrics struct {
errors *reporter.CounterVec errors *reporter.CounterVec
@@ -40,9 +41,10 @@ type Decoder struct {
} }
// New instantiates a new sFlow decoder. // New instantiates a new sFlow decoder.
func New(r *reporter.Reporter) decoder.Decoder { func New(r *reporter.Reporter, dependencies decoder.Dependencies) decoder.Decoder {
nd := &Decoder{ nd := &Decoder{
r: r, r: r,
d: dependencies,
} }
nd.metrics.errors = nd.r.CounterVec( nd.metrics.errors = nd.r.CounterVec(
@@ -129,7 +131,7 @@ func (nd *Decoder) Decode(in decoder.RawFlow) []*schema.FlowMessage {
} }
} }
flowMessageSet := decode(msgDec) flowMessageSet := nd.decode(msgDec)
for _, fmsg := range flowMessageSet { for _, fmsg := range flowMessageSet {
fmsg.TimeReceived = ts fmsg.TimeReceived = ts
} }

View File

@@ -4,11 +4,9 @@
package sflow package sflow
import ( import (
"fmt"
"net" "net"
"net/netip" "net/netip"
"path/filepath" "path/filepath"
"reflect"
"testing" "testing"
"akvorado/common/helpers" "akvorado/common/helpers"
@@ -19,7 +17,7 @@ import (
func TestDecode(t *testing.T) { func TestDecode(t *testing.T) {
r := reporter.NewMock(t) r := reporter.NewMock(t)
sdecoder := New(r) sdecoder := New(r, decoder.Dependencies{Schema: schema.NewMock(t)})
// Send data // Send data
data := helpers.ReadPcapPayload(t, filepath.Join("testdata", "data-1140.pcap")) data := helpers.ReadPcapPayload(t, filepath.Join("testdata", "data-1140.pcap"))
@@ -119,7 +117,7 @@ func TestDecode(t *testing.T) {
f.TimeReceived = 0 f.TimeReceived = 0
} }
if diff := helpers.Diff(got, expectedFlows, helpers.DiffFormatter(reflect.TypeOf(schema.ColumnBytes), fmt.Sprint)); diff != "" { if diff := helpers.Diff(got, expectedFlows); diff != "" {
t.Fatalf("Decode() (-got, +want):\n%s", diff) t.Fatalf("Decode() (-got, +want):\n%s", diff)
} }
gotMetrics := r.GetMetrics( gotMetrics := r.GetMetrics(
@@ -139,7 +137,7 @@ func TestDecode(t *testing.T) {
func TestDecodeInterface(t *testing.T) { func TestDecodeInterface(t *testing.T) {
r := reporter.NewMock(t) r := reporter.NewMock(t)
sdecoder := New(r) sdecoder := New(r, decoder.Dependencies{Schema: schema.NewMock(t)})
t.Run("local interface", func(t *testing.T) { t.Run("local interface", func(t *testing.T) {
// Send data // Send data
@@ -170,7 +168,7 @@ func TestDecodeInterface(t *testing.T) {
f.TimeReceived = 0 f.TimeReceived = 0
} }
if diff := helpers.Diff(got, expectedFlows, helpers.DiffFormatter(reflect.TypeOf(schema.ColumnBytes), fmt.Sprint)); diff != "" { if diff := helpers.Diff(got, expectedFlows); diff != "" {
t.Fatalf("Decode() (-got, +want):\n%s", diff) t.Fatalf("Decode() (-got, +want):\n%s", diff)
} }
}) })
@@ -205,7 +203,7 @@ func TestDecodeInterface(t *testing.T) {
f.TimeReceived = 0 f.TimeReceived = 0
} }
if diff := helpers.Diff(got, expectedFlows, helpers.DiffFormatter(reflect.TypeOf(schema.ColumnBytes), fmt.Sprint)); diff != "" { if diff := helpers.Diff(got, expectedFlows); diff != "" {
t.Fatalf("Decode() (-got, +want):\n%s", diff) t.Fatalf("Decode() (-got, +want):\n%s", diff)
} }
}) })
@@ -239,7 +237,7 @@ func TestDecodeInterface(t *testing.T) {
f.TimeReceived = 0 f.TimeReceived = 0
} }
if diff := helpers.Diff(got, expectedFlows, helpers.DiffFormatter(reflect.TypeOf(schema.ColumnBytes), fmt.Sprint)); diff != "" { if diff := helpers.Diff(got, expectedFlows); diff != "" {
t.Fatalf("Decode() (-got, +want):\n%s", diff) t.Fatalf("Decode() (-got, +want):\n%s", diff)
} }
}) })
@@ -278,7 +276,7 @@ func TestDecodeInterface(t *testing.T) {
f.TimeReceived = 0 f.TimeReceived = 0
} }
if diff := helpers.Diff(got, expectedFlows, helpers.DiffFormatter(reflect.TypeOf(schema.ColumnBytes), fmt.Sprint)); diff != "" { if diff := helpers.Diff(got, expectedFlows); diff != "" {
t.Fatalf("Decode() (-got, +want):\n%s", diff) t.Fatalf("Decode() (-got, +want):\n%s", diff)
} }

View File

@@ -13,7 +13,9 @@ import (
// DummyDecoder is a simple decoder producing flows from random data. // DummyDecoder is a simple decoder producing flows from random data.
// The payload is copied in IfDescription // The payload is copied in IfDescription
type DummyDecoder struct{} type DummyDecoder struct {
Schema *schema.Component
}
// Decode returns uninteresting flow messages. // Decode returns uninteresting flow messages.
func (dc *DummyDecoder) Decode(in RawFlow) []*schema.FlowMessage { func (dc *DummyDecoder) Decode(in RawFlow) []*schema.FlowMessage {
@@ -22,9 +24,9 @@ func (dc *DummyDecoder) Decode(in RawFlow) []*schema.FlowMessage {
TimeReceived: uint64(in.TimeReceived.UTC().Unix()), TimeReceived: uint64(in.TimeReceived.UTC().Unix()),
ExporterAddress: exporterAddress, ExporterAddress: exporterAddress,
} }
schema.Flows.ProtobufAppendVarint(f, schema.ColumnBytes, uint64(len(in.Payload))) dc.Schema.ProtobufAppendVarint(f, schema.ColumnBytes, uint64(len(in.Payload)))
schema.Flows.ProtobufAppendVarint(f, schema.ColumnPackets, 1) dc.Schema.ProtobufAppendVarint(f, schema.ColumnPackets, 1)
schema.Flows.ProtobufAppendBytes(f, schema.ColumnInIfDescription, in.Payload) dc.Schema.ProtobufAppendBytes(f, schema.ColumnInIfDescription, in.Payload)
return []*schema.FlowMessage{f} return []*schema.FlowMessage{f}
} }

View File

@@ -21,7 +21,8 @@ import (
func BenchmarkDecodeEncodeNetflow(b *testing.B) { func BenchmarkDecodeEncodeNetflow(b *testing.B) {
schema.DisableDebug(b) schema.DisableDebug(b)
r := reporter.NewMock(b) r := reporter.NewMock(b)
nfdecoder := netflow.New(r) sch := schema.NewMock(b)
nfdecoder := netflow.New(r, decoder.Dependencies{Schema: sch})
template := helpers.ReadPcapPayload(b, filepath.Join("decoder", "netflow", "testdata", "options-template-257.pcap")) template := helpers.ReadPcapPayload(b, filepath.Join("decoder", "netflow", "testdata", "options-template-257.pcap"))
got := nfdecoder.Decode(decoder.RawFlow{Payload: template, Source: net.ParseIP("127.0.0.1")}) got := nfdecoder.Decode(decoder.RawFlow{Payload: template, Source: net.ParseIP("127.0.0.1")})
@@ -50,7 +51,7 @@ func BenchmarkDecodeEncodeNetflow(b *testing.B) {
got = nfdecoder.Decode(decoder.RawFlow{Payload: data, Source: net.ParseIP("127.0.0.1")}) got = nfdecoder.Decode(decoder.RawFlow{Payload: data, Source: net.ParseIP("127.0.0.1")})
if withEncoding { if withEncoding {
for _, flow := range got { for _, flow := range got {
schema.Flows.ProtobufMarshal(flow) sch.ProtobufMarshal(flow)
} }
} }
} }
@@ -64,7 +65,8 @@ func BenchmarkDecodeEncodeNetflow(b *testing.B) {
func BenchmarkDecodeEncodeSflow(b *testing.B) { func BenchmarkDecodeEncodeSflow(b *testing.B) {
schema.DisableDebug(b) schema.DisableDebug(b)
r := reporter.NewMock(b) r := reporter.NewMock(b)
sdecoder := sflow.New(r) sch := schema.NewMock(b)
sdecoder := sflow.New(r, decoder.Dependencies{Schema: sch})
data := helpers.ReadPcapPayload(b, filepath.Join("decoder", "sflow", "testdata", "data-1140.pcap")) data := helpers.ReadPcapPayload(b, filepath.Join("decoder", "sflow", "testdata", "data-1140.pcap"))
for _, withEncoding := range []bool{true, false} { for _, withEncoding := range []bool{true, false} {
@@ -78,7 +80,7 @@ func BenchmarkDecodeEncodeSflow(b *testing.B) {
got = sdecoder.Decode(decoder.RawFlow{Payload: data, Source: net.ParseIP("127.0.0.1")}) got = sdecoder.Decode(decoder.RawFlow{Payload: data, Source: net.ParseIP("127.0.0.1")})
if withEncoding { if withEncoding {
for _, flow := range got { for _, flow := range got {
schema.Flows.ProtobufMarshal(flow) sch.ProtobufMarshal(flow)
} }
} }
} }

View File

@@ -19,7 +19,9 @@ func TestFileInput(t *testing.T) {
r := reporter.NewMock(t) r := reporter.NewMock(t)
configuration := DefaultConfiguration().(*Configuration) configuration := DefaultConfiguration().(*Configuration)
configuration.Paths = []string{path.Join("testdata", "file1.txt"), path.Join("testdata", "file2.txt")} configuration.Paths = []string{path.Join("testdata", "file1.txt"), path.Join("testdata", "file2.txt")}
in, err := configuration.New(r, daemon.NewMock(t), &decoder.DummyDecoder{}) in, err := configuration.New(r, daemon.NewMock(t), &decoder.DummyDecoder{
Schema: schema.NewMock(t),
})
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)
} }

View File

@@ -4,10 +4,8 @@
package udp package udp
import ( import (
"fmt"
"net" "net"
"net/netip" "net/netip"
"reflect"
"testing" "testing"
"time" "time"
@@ -22,7 +20,7 @@ func TestUDPInput(t *testing.T) {
r := reporter.NewMock(t) r := reporter.NewMock(t)
configuration := DefaultConfiguration().(*Configuration) configuration := DefaultConfiguration().(*Configuration)
configuration.Listen = "127.0.0.1:0" configuration.Listen = "127.0.0.1:0"
in, err := configuration.New(r, daemon.NewMock(t), &decoder.DummyDecoder{}) in, err := configuration.New(r, daemon.NewMock(t), &decoder.DummyDecoder{Schema: schema.NewMock(t)})
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)
} }
@@ -73,7 +71,7 @@ func TestUDPInput(t *testing.T) {
}, },
}, },
} }
if diff := helpers.Diff(got, expected, helpers.DiffFormatter(reflect.TypeOf(schema.ColumnBytes), fmt.Sprint)); diff != "" { if diff := helpers.Diff(got, expected); diff != "" {
t.Fatalf("Input data (-got, +want):\n%s", diff) t.Fatalf("Input data (-got, +want):\n%s", diff)
} }
@@ -99,7 +97,9 @@ func TestOverflow(t *testing.T) {
configuration := DefaultConfiguration().(*Configuration) configuration := DefaultConfiguration().(*Configuration)
configuration.Listen = "127.0.0.1:0" configuration.Listen = "127.0.0.1:0"
configuration.QueueSize = 1 configuration.QueueSize = 1
in, err := configuration.New(r, daemon.NewMock(t), &decoder.DummyDecoder{}) in, err := configuration.New(r, daemon.NewMock(t), &decoder.DummyDecoder{
Schema: schema.NewMock(t),
})
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)
} }

View File

@@ -47,6 +47,7 @@ type Component struct {
type Dependencies struct { type Dependencies struct {
Daemon daemon.Component Daemon daemon.Component
HTTP *http.Component HTTP *http.Component
Schema *schema.Component
} }
// New creates a new flow component. // New creates a new flow component.
@@ -77,7 +78,7 @@ func New(r *reporter.Reporter, configuration Configuration, dependencies Depende
if !ok { if !ok {
return nil, fmt.Errorf("unknown decoder %q", input.Decoder) return nil, fmt.Errorf("unknown decoder %q", input.Decoder)
} }
dec = decoderfunc(r) dec = decoderfunc(r, decoder.Dependencies{Schema: c.d.Schema})
alreadyInitialized[input.Decoder] = dec alreadyInitialized[input.Decoder] = dec
decs[idx] = c.wrapDecoder(dec, input.UseSrcAddrForExporterAddr) decs[idx] = c.wrapDecoder(dec, input.UseSrcAddrForExporterAddr)
} }
@@ -120,7 +121,7 @@ func New(r *reporter.Reporter, configuration Configuration, dependencies Depende
c.d.HTTP.AddHandler("/api/v0/inlet/flow/schema.proto", c.d.HTTP.AddHandler("/api/v0/inlet/flow/schema.proto",
netHTTP.HandlerFunc(func(w netHTTP.ResponseWriter, r *netHTTP.Request) { netHTTP.HandlerFunc(func(w netHTTP.ResponseWriter, r *netHTTP.Request) {
w.Header().Set("Content-Type", "text/plain") w.Header().Set("Content-Type", "text/plain")
w.Write([]byte(schema.Flows.ProtobufDefinition())) w.Write([]byte(c.d.Schema.ProtobufDefinition()))
})) }))
return &c, nil return &c, nil

View File

@@ -34,6 +34,7 @@ func NewMock(t *testing.T, r *reporter.Reporter, config Configuration) *Componen
c, err := New(r, config, Dependencies{ c, err := New(r, config, Dependencies{
Daemon: daemon.NewMock(t), Daemon: daemon.NewMock(t),
HTTP: http.NewMock(t, r), HTTP: http.NewMock(t, r),
Schema: schema.NewMock(t),
}) })
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)

View File

@@ -29,9 +29,9 @@ func TestRealKafka(t *testing.T) {
configuration.Brokers = brokers configuration.Brokers = brokers
configuration.Version = kafka.Version(sarama.V2_8_1_0) configuration.Version = kafka.Version(sarama.V2_8_1_0)
configuration.FlushInterval = 100 * time.Millisecond configuration.FlushInterval = 100 * time.Millisecond
expectedTopicName := fmt.Sprintf("%s-%s", topicName, schema.Flows.ProtobufMessageHash()) expectedTopicName := fmt.Sprintf("%s-%s", topicName, schema.NewMock(t).ProtobufMessageHash())
r := reporter.NewMock(t) r := reporter.NewMock(t)
c, err := New(r, configuration, Dependencies{Daemon: daemon.NewMock(t)}) c, err := New(r, configuration, Dependencies{Daemon: daemon.NewMock(t), Schema: schema.NewMock(t)})
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)
} }

View File

@@ -37,6 +37,7 @@ type Component struct {
// Dependencies define the dependencies of the Kafka exporter. // Dependencies define the dependencies of the Kafka exporter.
type Dependencies struct { type Dependencies struct {
Daemon daemon.Component Daemon daemon.Component
Schema *schema.Component
} }
// New creates a new HTTP component. // New creates a new HTTP component.
@@ -65,7 +66,7 @@ func New(reporter *reporter.Reporter, configuration Configuration, dependencies
config: configuration, config: configuration,
kafkaConfig: kafkaConfig, kafkaConfig: kafkaConfig,
kafkaTopic: fmt.Sprintf("%s-%s", configuration.Topic, schema.Flows.ProtobufMessageHash()), kafkaTopic: fmt.Sprintf("%s-%s", configuration.Topic, dependencies.Schema.ProtobufMessageHash()),
} }
c.initMetrics() c.initMetrics()
c.createKafkaProducer = func() (sarama.AsyncProducer, error) { c.createKafkaProducer = func() (sarama.AsyncProducer, error) {

View File

@@ -27,7 +27,7 @@ func TestKafka(t *testing.T) {
mockProducer.ExpectInputWithMessageCheckerFunctionAndSucceed(func(got *sarama.ProducerMessage) error { mockProducer.ExpectInputWithMessageCheckerFunctionAndSucceed(func(got *sarama.ProducerMessage) error {
defer close(received) defer close(received)
expected := sarama.ProducerMessage{ expected := sarama.ProducerMessage{
Topic: fmt.Sprintf("flows-%s", schema.Flows.ProtobufMessageHash()), Topic: fmt.Sprintf("flows-%s", c.d.Schema.ProtobufMessageHash()),
Key: got.Key, Key: got.Key,
Value: sarama.ByteEncoder("hello world!"), Value: sarama.ByteEncoder("hello world!"),
Partition: got.Partition, Partition: got.Partition,
@@ -52,7 +52,7 @@ func TestKafka(t *testing.T) {
gotMetrics := r.GetMetrics("akvorado_inlet_kafka_") gotMetrics := r.GetMetrics("akvorado_inlet_kafka_")
expectedMetrics := map[string]string{ expectedMetrics := map[string]string{
`sent_bytes_total{exporter="127.0.0.1"}`: "26", `sent_bytes_total{exporter="127.0.0.1"}`: "26",
fmt.Sprintf(`errors_total{error="kafka: Failed to produce message to topic flows-%s: noooo"}`, schema.Flows.ProtobufMessageHash()): "1", fmt.Sprintf(`errors_total{error="kafka: Failed to produce message to topic flows-%s: noooo"}`, c.d.Schema.ProtobufMessageHash()): "1",
`sent_messages_total{exporter="127.0.0.1"}`: "2", `sent_messages_total{exporter="127.0.0.1"}`: "2",
} }
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" { if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
@@ -62,7 +62,7 @@ func TestKafka(t *testing.T) {
func TestKafkaMetrics(t *testing.T) { func TestKafkaMetrics(t *testing.T) {
r := reporter.NewMock(t) r := reporter.NewMock(t)
c, err := New(r, DefaultConfiguration(), Dependencies{Daemon: daemon.NewMock(t)}) c, err := New(r, DefaultConfiguration(), Dependencies{Daemon: daemon.NewMock(t), Schema: schema.NewMock(t)})
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)
} }

View File

@@ -14,13 +14,17 @@ import (
"akvorado/common/daemon" "akvorado/common/daemon"
"akvorado/common/helpers" "akvorado/common/helpers"
"akvorado/common/reporter" "akvorado/common/reporter"
"akvorado/common/schema"
) )
// NewMock creates a new Kafka component with a mocked Kafka. It will // NewMock creates a new Kafka component with a mocked Kafka. It will
// panic if it cannot be started. // panic if it cannot be started.
func NewMock(t *testing.T, reporter *reporter.Reporter, configuration Configuration) (*Component, *mocks.AsyncProducer) { func NewMock(t *testing.T, reporter *reporter.Reporter, configuration Configuration) (*Component, *mocks.AsyncProducer) {
t.Helper() t.Helper()
c, err := New(reporter, configuration, Dependencies{Daemon: daemon.NewMock(t)}) c, err := New(reporter, configuration, Dependencies{
Daemon: daemon.NewMock(t),
Schema: schema.NewMock(t),
})
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)
} }

View File

@@ -13,8 +13,6 @@ import (
"strconv" "strconv"
"text/template" "text/template"
"time" "time"
"akvorado/common/schema"
) )
var ( var (
@@ -71,8 +69,8 @@ func (c *Component) registerHTTPHandlers() error {
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var result bytes.Buffer var result bytes.Buffer
if err := initShTemplate.Execute(&result, initShVariables{ if err := initShTemplate.Execute(&result, initShVariables{
FlowSchemaHash: schema.Flows.ProtobufMessageHash(), FlowSchemaHash: c.d.Schema.ProtobufMessageHash(),
FlowSchema: schema.Flows.ProtobufDefinition(), FlowSchema: c.d.Schema.ProtobufDefinition(),
SystemLogTTL: int(c.config.SystemLogTTL.Seconds()), SystemLogTTL: int(c.config.SystemLogTTL.Seconds()),
SystemLogTables: []string{ SystemLogTables: []string{
"asynchronous_metric_log", "asynchronous_metric_log",

View File

@@ -28,6 +28,7 @@ func TestHTTPEndpoints(t *testing.T) {
c, err := New(r, config, Dependencies{ c, err := New(r, config, Dependencies{
Daemon: daemon.NewMock(t), Daemon: daemon.NewMock(t),
HTTP: http.NewMock(t, r), HTTP: http.NewMock(t, r),
Schema: schema.NewMock(t),
}) })
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)
@@ -65,7 +66,7 @@ func TestHTTPEndpoints(t *testing.T) {
``, ``,
`# Install Protobuf schema`, `# Install Protobuf schema`,
fmt.Sprintf(`cat > /var/lib/clickhouse/format_schemas/flow-%s.proto <<'EOPROTO'`, fmt.Sprintf(`cat > /var/lib/clickhouse/format_schemas/flow-%s.proto <<'EOPROTO'`,
schema.Flows.ProtobufMessageHash()), c.d.Schema.ProtobufMessageHash()),
"", "",
`syntax = "proto3";`, `syntax = "proto3";`,
}, },
@@ -84,6 +85,7 @@ func TestAdditionalASNs(t *testing.T) {
c, err := New(r, config, Dependencies{ c, err := New(r, config, Dependencies{
Daemon: daemon.NewMock(t), Daemon: daemon.NewMock(t),
HTTP: http.NewMock(t, r), HTTP: http.NewMock(t, r),
Schema: schema.NewMock(t),
}) })
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)
@@ -177,6 +179,7 @@ func TestNetworkSources(t *testing.T) {
c, err := New(r, config, Dependencies{ c, err := New(r, config, Dependencies{
Daemon: daemon.NewMock(t), Daemon: daemon.NewMock(t),
HTTP: http.NewMock(t, r), HTTP: http.NewMock(t, r),
Schema: schema.NewMock(t),
}) })
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)

View File

@@ -124,7 +124,7 @@ LAYOUT({{ .Layout }}())
func (c *Component) createExportersView(ctx context.Context) error { func (c *Component) createExportersView(ctx context.Context) error {
// Select the columns we need // Select the columns we need
cols := []string{} cols := []string{}
for _, column := range schema.Flows.Columns() { for _, column := range c.d.Schema.Columns() {
if column.Key == schema.ColumnTimeReceived || strings.HasPrefix(column.Name, "Exporter") { if column.Key == schema.ColumnTimeReceived || strings.HasPrefix(column.Name, "Exporter") {
cols = append(cols, column.Name) cols = append(cols, column.Name)
} }
@@ -177,7 +177,7 @@ AS %s
// createRawFlowsTable creates the raw flow table // createRawFlowsTable creates the raw flow table
func (c *Component) createRawFlowsTable(ctx context.Context) error { func (c *Component) createRawFlowsTable(ctx context.Context) error {
hash := schema.Flows.ProtobufMessageHash() hash := c.d.Schema.ProtobufMessageHash()
tableName := fmt.Sprintf("flows_%s_raw", hash) tableName := fmt.Sprintf("flows_%s_raw", hash)
kafkaEngine := fmt.Sprintf("Kafka SETTINGS %s", strings.Join([]string{ kafkaEngine := fmt.Sprintf("Kafka SETTINGS %s", strings.Join([]string{
fmt.Sprintf(`kafka_broker_list = '%s'`, fmt.Sprintf(`kafka_broker_list = '%s'`,
@@ -198,7 +198,7 @@ func (c *Component) createRawFlowsTable(ctx context.Context) error {
gin.H{ gin.H{
"Database": c.config.Database, "Database": c.config.Database,
"Table": tableName, "Table": tableName,
"Schema": schema.Flows.ClickHouseCreateTable( "Schema": c.d.Schema.ClickHouseCreateTable(
schema.ClickHouseSkipGeneratedColumns, schema.ClickHouseSkipGeneratedColumns,
schema.ClickHouseUseTransformFromType, schema.ClickHouseUseTransformFromType,
schema.ClickHouseSkipAliasedColumns), schema.ClickHouseSkipAliasedColumns),
@@ -235,7 +235,7 @@ func (c *Component) createRawFlowsTable(ctx context.Context) error {
} }
func (c *Component) createRawFlowsConsumerView(ctx context.Context) error { func (c *Component) createRawFlowsConsumerView(ctx context.Context) error {
tableName := fmt.Sprintf("flows_%s_raw", schema.Flows.ProtobufMessageHash()) tableName := fmt.Sprintf("flows_%s_raw", c.d.Schema.ProtobufMessageHash())
viewName := fmt.Sprintf("%s_consumer", tableName) viewName := fmt.Sprintf("%s_consumer", tableName)
// Build SELECT query // Build SELECT query
@@ -243,7 +243,7 @@ func (c *Component) createRawFlowsConsumerView(ctx context.Context) error {
`{{ .With }} SELECT {{ .Columns }} FROM {{ .Database }}.{{ .Table }} WHERE length(_error) = 0`, `{{ .With }} SELECT {{ .Columns }} FROM {{ .Database }}.{{ .Table }} WHERE length(_error) = 0`,
gin.H{ gin.H{
"With": "WITH arrayCompact(DstASPath) AS c_DstASPath", "With": "WITH arrayCompact(DstASPath) AS c_DstASPath",
"Columns": strings.Join(schema.Flows.ClickHouseSelectColumns( "Columns": strings.Join(c.d.Schema.ClickHouseSelectColumns(
schema.ClickHouseSubstituteGenerates, schema.ClickHouseSubstituteGenerates,
schema.ClickHouseSubstituteTransforms, schema.ClickHouseSubstituteTransforms,
schema.ClickHouseSkipAliasedColumns), ", "), schema.ClickHouseSkipAliasedColumns), ", "),
@@ -277,7 +277,7 @@ func (c *Component) createRawFlowsConsumerView(ctx context.Context) error {
} }
func (c *Component) createRawFlowsErrorsView(ctx context.Context) error { func (c *Component) createRawFlowsErrorsView(ctx context.Context) error {
tableName := fmt.Sprintf("flows_%s_raw", schema.Flows.ProtobufMessageHash()) tableName := fmt.Sprintf("flows_%s_raw", c.d.Schema.ProtobufMessageHash())
viewName := fmt.Sprintf("%s_errors", tableName) viewName := fmt.Sprintf("%s_errors", tableName)
// Build SELECT query // Build SELECT query
@@ -350,7 +350,7 @@ PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, INTERVAL {{ .Parti
ORDER BY (TimeReceived, ExporterAddress, InIfName, OutIfName) ORDER BY (TimeReceived, ExporterAddress, InIfName, OutIfName)
TTL TimeReceived + toIntervalSecond({{ .TTL }}) TTL TimeReceived + toIntervalSecond({{ .TTL }})
`, gin.H{ `, gin.H{
"Schema": schema.Flows.ClickHouseCreateTable(), "Schema": c.d.Schema.ClickHouseCreateTable(),
"PartitionInterval": partitionInterval, "PartitionInterval": partitionInterval,
"TTL": ttl, "TTL": ttl,
}) })
@@ -364,10 +364,10 @@ ORDER BY ({{ .SortingKey }})
TTL TimeReceived + toIntervalSecond({{ .TTL }}) TTL TimeReceived + toIntervalSecond({{ .TTL }})
`, gin.H{ `, gin.H{
"Table": tableName, "Table": tableName,
"Schema": schema.Flows.ClickHouseCreateTable(schema.ClickHouseSkipMainOnlyColumns), "Schema": c.d.Schema.ClickHouseCreateTable(schema.ClickHouseSkipMainOnlyColumns),
"PartitionInterval": partitionInterval, "PartitionInterval": partitionInterval,
"PrimaryKey": strings.Join(schema.Flows.ClickHousePrimaryKeys(), ", "), "PrimaryKey": strings.Join(c.d.Schema.ClickHousePrimaryKeys(), ", "),
"SortingKey": strings.Join(schema.Flows.ClickHouseSortingKeys(), ", "), "SortingKey": strings.Join(c.d.Schema.ClickHouseSortingKeys(), ", "),
"TTL": ttl, "TTL": ttl,
}) })
} }
@@ -402,7 +402,7 @@ ORDER BY position ASC
modifications := []string{} modifications := []string{}
previousColumn := "" previousColumn := ""
outer: outer:
for _, wantedColumn := range schema.Flows.Columns() { for _, wantedColumn := range c.d.Schema.Columns() {
if resolution.Interval > 0 && wantedColumn.MainOnly { if resolution.Interval > 0 && wantedColumn.MainOnly {
continue continue
} }
@@ -411,12 +411,12 @@ outer:
if wantedColumn.Name == existingColumn.Name { if wantedColumn.Name == existingColumn.Name {
// Do a few sanity checks // Do a few sanity checks
if wantedColumn.ClickHouseType != existingColumn.Type { if wantedColumn.ClickHouseType != existingColumn.Type {
if slices.Contains(schema.Flows.ClickHousePrimaryKeys(), wantedColumn.Name) { if slices.Contains(c.d.Schema.ClickHousePrimaryKeys(), wantedColumn.Name) {
return fmt.Errorf("table %s, primary key column %s has a non-matching type: %s vs %s", return fmt.Errorf("table %s, primary key column %s has a non-matching type: %s vs %s",
tableName, wantedColumn.Name, existingColumn.Type, wantedColumn.ClickHouseType) tableName, wantedColumn.Name, existingColumn.Type, wantedColumn.ClickHouseType)
} }
} }
if resolution.Interval > 0 && slices.Contains(schema.Flows.ClickHousePrimaryKeys(), wantedColumn.Name) && existingColumn.IsPrimaryKey == 0 { if resolution.Interval > 0 && slices.Contains(c.d.Schema.ClickHousePrimaryKeys(), wantedColumn.Name) && existingColumn.IsPrimaryKey == 0 {
return fmt.Errorf("table %s, column %s should be a primary key, cannot change that", return fmt.Errorf("table %s, column %s should be a primary key, cannot change that",
tableName, wantedColumn.Name) tableName, wantedColumn.Name)
} }
@@ -440,7 +440,7 @@ outer:
} }
} }
// Add the missing column. Only if not primary. // Add the missing column. Only if not primary.
if resolution.Interval > 0 && slices.Contains(schema.Flows.ClickHousePrimaryKeys(), wantedColumn.Name) { if resolution.Interval > 0 && slices.Contains(c.d.Schema.ClickHousePrimaryKeys(), wantedColumn.Name) {
return fmt.Errorf("table %s, column %s is missing but it is a primary key", return fmt.Errorf("table %s, column %s is missing but it is a primary key",
tableName, wantedColumn.Name) tableName, wantedColumn.Name)
} }
@@ -453,7 +453,7 @@ outer:
// Also update ORDER BY // Also update ORDER BY
if resolution.Interval > 0 { if resolution.Interval > 0 {
modifications = append(modifications, modifications = append(modifications,
fmt.Sprintf("MODIFY ORDER BY (%s)", strings.Join(schema.Flows.ClickHouseSortingKeys(), ", "))) fmt.Sprintf("MODIFY ORDER BY (%s)", strings.Join(c.d.Schema.ClickHouseSortingKeys(), ", ")))
} }
c.r.Info().Msgf("apply %d modifications to %s", len(modifications), tableName) c.r.Info().Msgf("apply %d modifications to %s", len(modifications), tableName)
if resolution.Interval > 0 { if resolution.Interval > 0 {
@@ -503,7 +503,7 @@ SELECT
FROM {{ .Database }}.flows`, gin.H{ FROM {{ .Database }}.flows`, gin.H{
"Database": c.config.Database, "Database": c.config.Database,
"Seconds": uint64(resolution.Interval.Seconds()), "Seconds": uint64(resolution.Interval.Seconds()),
"Columns": strings.Join(schema.Flows.ClickHouseSelectColumns( "Columns": strings.Join(c.d.Schema.ClickHouseSelectColumns(
schema.ClickHouseSkipTimeReceived, schema.ClickHouseSkipTimeReceived,
schema.ClickHouseSkipMainOnlyColumns, schema.ClickHouseSkipMainOnlyColumns,
schema.ClickHouseSkipAliasedColumns), ",\n "), schema.ClickHouseSkipAliasedColumns), ",\n "),

View File

@@ -62,7 +62,7 @@ FROM system.tables
WHERE database=currentDatabase() AND table NOT LIKE '.%' WHERE database=currentDatabase() AND table NOT LIKE '.%'
ORDER BY length(table) ASC` ORDER BY length(table) ASC`
func dumpAllTables(t *testing.T, ch *clickhousedb.Component) map[string]string { func dumpAllTables(t *testing.T, ch *clickhousedb.Component, schemaComponent *schema.Component) map[string]string {
// TODO: find the right ordering, this one does not totally work // TODO: find the right ordering, this one does not totally work
rows, err := ch.Query(context.Background(), dumpAllTablesQuery) rows, err := ch.Query(context.Background(), dumpAllTablesQuery)
if err != nil { if err != nil {
@@ -74,7 +74,7 @@ func dumpAllTables(t *testing.T, ch *clickhousedb.Component) map[string]string {
if err := rows.Scan(&table, &schema); err != nil { if err := rows.Scan(&table, &schema); err != nil {
t.Fatalf("Scan() error:\n%+v", err) t.Fatalf("Scan() error:\n%+v", err)
} }
if !oldTable(table) { if !oldTable(schemaComponent, table) {
schemas[table] = schema schemas[table] = schema
} }
} }
@@ -86,9 +86,9 @@ type tableWithSchema struct {
schema string schema string
} }
func loadTables(t *testing.T, ch *clickhousedb.Component, schemas []tableWithSchema) { func loadTables(t *testing.T, ch *clickhousedb.Component, sch *schema.Component, schemas []tableWithSchema) {
for _, tws := range schemas { for _, tws := range schemas {
if oldTable(tws.table) { if oldTable(sch, tws.table) {
continue continue
} }
t.Logf("Load table %s", tws.table) t.Logf("Load table %s", tws.table)
@@ -98,8 +98,8 @@ func loadTables(t *testing.T, ch *clickhousedb.Component, schemas []tableWithSch
} }
} }
func oldTable(table string) bool { func oldTable(schema *schema.Component, table string) bool {
if strings.Contains(table, schema.Flows.ProtobufMessageHash()) { if strings.Contains(table, schema.ProtobufMessageHash()) {
return false return false
} }
if strings.HasSuffix(table, "_raw") || strings.HasSuffix(table, "_raw_consumer") || strings.HasSuffix(table, "_raw_errors") { if strings.HasSuffix(table, "_raw") || strings.HasSuffix(table, "_raw_consumer") || strings.HasSuffix(table, "_raw_errors") {
@@ -110,7 +110,7 @@ func oldTable(table string) bool {
// loadAllTables load tables from a CSV file. Use `format CSV` with // loadAllTables load tables from a CSV file. Use `format CSV` with
// query from dumpAllTables. // query from dumpAllTables.
func loadAllTables(t *testing.T, ch *clickhousedb.Component, filename string) { func loadAllTables(t *testing.T, ch *clickhousedb.Component, sch *schema.Component, filename string) {
input, err := os.Open(filename) input, err := os.Open(filename)
if err != nil { if err != nil {
t.Fatalf("Open(%q) error:\n%+v", filename, err) t.Fatalf("Open(%q) error:\n%+v", filename, err)
@@ -136,7 +136,7 @@ func loadAllTables(t *testing.T, ch *clickhousedb.Component, filename string) {
} }
dropAllTables(t, ch) dropAllTables(t, ch)
t.Logf("(%s) Load all tables from dump %s", time.Now(), filename) t.Logf("(%s) Load all tables from dump %s", time.Now(), filename)
loadTables(t, ch, schemas) loadTables(t, ch, sch, schemas)
t.Logf("(%s) Loaded all tables from dump %s", time.Now(), filename) t.Logf("(%s) Loaded all tables from dump %s", time.Now(), filename)
} }
@@ -161,6 +161,7 @@ func TestGetHTTPBaseURL(t *testing.T) {
c, err := New(r, DefaultConfiguration(), Dependencies{ c, err := New(r, DefaultConfiguration(), Dependencies{
Daemon: daemon.NewMock(t), Daemon: daemon.NewMock(t),
HTTP: http, HTTP: http,
Schema: schema.NewMock(t),
}) })
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)
@@ -201,7 +202,7 @@ func TestMigration(t *testing.T) {
} }
for _, f := range files { for _, f := range files {
t.Run(f.Name(), func(t *testing.T) { t.Run(f.Name(), func(t *testing.T) {
loadAllTables(t, chComponent, path.Join("testdata/states", f.Name())) loadAllTables(t, chComponent, schema.NewMock(t), path.Join("testdata/states", f.Name()))
r := reporter.NewMock(t) r := reporter.NewMock(t)
configuration := DefaultConfiguration() configuration := DefaultConfiguration()
configuration.OrchestratorURL = "http://something" configuration.OrchestratorURL = "http://something"
@@ -209,6 +210,7 @@ func TestMigration(t *testing.T) {
ch, err := New(r, configuration, Dependencies{ ch, err := New(r, configuration, Dependencies{
Daemon: daemon.NewMock(t), Daemon: daemon.NewMock(t),
HTTP: http.NewMock(t, r), HTTP: http.NewMock(t, r),
Schema: schema.NewMock(t),
ClickHouse: chComponent, ClickHouse: chComponent,
}) })
if err != nil { if err != nil {
@@ -225,14 +227,14 @@ WHERE database=currentDatabase() AND table NOT LIKE '.%'`)
if err != nil { if err != nil {
t.Fatalf("Query() error:\n%+v", err) t.Fatalf("Query() error:\n%+v", err)
} }
hash := schema.Flows.ProtobufMessageHash() hash := ch.d.Schema.ProtobufMessageHash()
got := []string{} got := []string{}
for rows.Next() { for rows.Next() {
var table string var table string
if err := rows.Scan(&table); err != nil { if err := rows.Scan(&table); err != nil {
t.Fatalf("Scan() error:\n%+v", err) t.Fatalf("Scan() error:\n%+v", err)
} }
if !oldTable(table) { if !oldTable(ch.d.Schema, table) {
got = append(got, table) got = append(got, table)
} }
} }
@@ -256,7 +258,7 @@ WHERE database=currentDatabase() AND table NOT LIKE '.%'`)
t.Fatalf("SHOW TABLES (-got, +want):\n%s", diff) t.Fatalf("SHOW TABLES (-got, +want):\n%s", diff)
} }
currentRun := dumpAllTables(t, chComponent) currentRun := dumpAllTables(t, chComponent, ch.d.Schema)
if lastRun != nil { if lastRun != nil {
if diff := helpers.Diff(lastRun, currentRun); diff != "" { if diff := helpers.Diff(lastRun, currentRun); diff != "" {
t.Fatalf("Final state is different (-last, +current):\n%s", diff) t.Fatalf("Final state is different (-last, +current):\n%s", diff)
@@ -296,6 +298,7 @@ LIMIT 1`, proto.ClientName)
ch, err := New(r, configuration, Dependencies{ ch, err := New(r, configuration, Dependencies{
Daemon: daemon.NewMock(t), Daemon: daemon.NewMock(t),
HTTP: http.NewMock(t, r), HTTP: http.NewMock(t, r),
Schema: schema.NewMock(t),
ClickHouse: chComponent, ClickHouse: chComponent,
}) })
if err != nil { if err != nil {

View File

@@ -18,6 +18,7 @@ import (
"akvorado/common/daemon" "akvorado/common/daemon"
"akvorado/common/http" "akvorado/common/http"
"akvorado/common/reporter" "akvorado/common/reporter"
"akvorado/common/schema"
) )
// Component represents the ClickHouse configurator. // Component represents the ClickHouse configurator.
@@ -40,6 +41,7 @@ type Dependencies struct {
Daemon daemon.Component Daemon daemon.Component
HTTP *http.Component HTTP *http.Component
ClickHouse *clickhousedb.Component ClickHouse *clickhousedb.Component
Schema *schema.Component
} }
// New creates a new ClickHouse component. // New creates a new ClickHouse component.

View File

@@ -22,11 +22,11 @@ func TestTopicCreation(t *testing.T) {
rand.Seed(time.Now().UnixMicro()) rand.Seed(time.Now().UnixMicro())
topicName := fmt.Sprintf("test-topic-%d", rand.Int()) topicName := fmt.Sprintf("test-topic-%d", rand.Int())
expectedTopicName := fmt.Sprintf("%s-%s", topicName, schema.Flows.ProtobufMessageHash())
retentionMs := "76548" retentionMs := "76548"
segmentBytes := "107374184" segmentBytes := "107374184"
segmentBytes2 := "10737184" segmentBytes2 := "10737184"
cleanupPolicy := "delete" cleanupPolicy := "delete"
expectedTopicName := fmt.Sprintf("%s-%s", topicName, schema.NewMock(t).ProtobufMessageHash())
cases := []struct { cases := []struct {
Name string Name string
@@ -65,7 +65,7 @@ func TestTopicCreation(t *testing.T) {
} }
configuration.Brokers = brokers configuration.Brokers = brokers
configuration.Version = kafka.Version(sarama.V2_8_1_0) configuration.Version = kafka.Version(sarama.V2_8_1_0)
c, err := New(reporter.NewMock(t), configuration) c, err := New(reporter.NewMock(t), configuration, Dependencies{Schema: schema.NewMock(t)})
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)
} }
@@ -96,7 +96,7 @@ func TestTopicMorePartitions(t *testing.T) {
rand.Seed(time.Now().UnixMicro()) rand.Seed(time.Now().UnixMicro())
topicName := fmt.Sprintf("test-topic-%d", rand.Int()) topicName := fmt.Sprintf("test-topic-%d", rand.Int())
expectedTopicName := fmt.Sprintf("%s-%s", topicName, schema.Flows.ProtobufMessageHash()) expectedTopicName := fmt.Sprintf("%s-%s", topicName, schema.NewMock(t).ProtobufMessageHash())
configuration := DefaultConfiguration() configuration := DefaultConfiguration()
configuration.Topic = topicName configuration.Topic = topicName
@@ -108,7 +108,7 @@ func TestTopicMorePartitions(t *testing.T) {
configuration.Brokers = brokers configuration.Brokers = brokers
configuration.Version = kafka.Version(sarama.V2_8_1_0) configuration.Version = kafka.Version(sarama.V2_8_1_0)
c, err := New(reporter.NewMock(t), configuration) c, err := New(reporter.NewMock(t), configuration, Dependencies{Schema: schema.NewMock(t)})
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)
} }
@@ -133,7 +133,7 @@ func TestTopicMorePartitions(t *testing.T) {
// Increase number of partitions // Increase number of partitions
configuration.TopicConfiguration.NumPartitions = 4 configuration.TopicConfiguration.NumPartitions = 4
c, err = New(reporter.NewMock(t), configuration) c, err = New(reporter.NewMock(t), configuration, Dependencies{Schema: schema.NewMock(t)})
if err != nil { if err != nil {
t.Fatalf("New() error:\n%+v", err) t.Fatalf("New() error:\n%+v", err)
} }

View File

@@ -18,14 +18,20 @@ import (
// Component represents the Kafka configurator. // Component represents the Kafka configurator.
type Component struct { type Component struct {
r *reporter.Reporter r *reporter.Reporter
d Dependencies
config Configuration config Configuration
kafkaConfig *sarama.Config kafkaConfig *sarama.Config
kafkaTopic string kafkaTopic string
} }
// Dependencies are the dependencies for the Kafka component
type Dependencies struct {
Schema *schema.Component
}
// New creates a new Kafka configurator. // New creates a new Kafka configurator.
func New(r *reporter.Reporter, config Configuration) (*Component, error) { func New(r *reporter.Reporter, config Configuration, dependencies Dependencies) (*Component, error) {
kafkaConfig, err := kafka.NewConfig(config.Configuration) kafkaConfig, err := kafka.NewConfig(config.Configuration)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -36,10 +42,11 @@ func New(r *reporter.Reporter, config Configuration) (*Component, error) {
return &Component{ return &Component{
r: r, r: r,
d: dependencies,
config: config, config: config,
kafkaConfig: kafkaConfig, kafkaConfig: kafkaConfig,
kafkaTopic: fmt.Sprintf("%s-%s", config.Topic, schema.Flows.ProtobufMessageHash()), kafkaTopic: fmt.Sprintf("%s-%s", config.Topic, dependencies.Schema.ProtobufMessageHash()),
}, nil }, nil
} }