inlet: split inlet into new inlet and outlet

This change split the inlet component into a simpler inlet and a new
outlet component. The new inlet component receive flows and put them in
Kafka, unparsed. The outlet component takes them from Kafka and resume
the processing from here (flow parsing, enrichment) and puts them in
ClickHouse.

The main goal is to ensure the inlet does a minimal work to not be late
when processing packets (and restart faster). It also brings some
simplification as the number of knobs to tune everything is reduced: for
inlet, we only need to tune the queue size for UDP, the number of
workers and a few Kafka parameters; for outlet, we need to tune a few
Kafka parameters, the number of workers and a few ClickHouse parameters.

The outlet component features a simple Kafka input component. The core
component becomes just a callback function. There is also a new
ClickHouse component to push data to ClickHouse using the low-level
ch-go library with batch inserts.

This processing has an impact on the internal representation of a
FlowMessage. Previously, it was tailored to dynamically build the
protobuf message to be put in Kafka. Now, it builds the batch request to
be sent to ClickHouse. This makes the FlowMessage structure hides the
content of the next batch request and therefore, it should be reused.
This also changes the way we decode flows as they don't output
FlowMessage anymore, they reuse one that is provided to each worker.

The ClickHouse tables are slightly updated. Instead of using Kafka
engine, the Null engine is used instead.

Fix #1122
This commit is contained in:
Vincent Bernat
2024-12-17 06:31:10 +01:00
parent ad59598831
commit ac68c5970e
231 changed files with 6488 additions and 3891 deletions

View File

@@ -0,0 +1,164 @@
// SPDX-FileCopyrightText: 2023 Free Mobile
// SPDX-FileCopyrightText: 2021 NetSampler
// SPDX-License-Identifier: AGPL-3.0-only AND BSD-3-Clause
package sflow
import (
"net"
"akvorado/common/helpers"
"akvorado/common/schema"
"akvorado/outlet/flow/decoder"
"github.com/netsampler/goflow2/v2/decoders/sflow"
)
func (nd *Decoder) decode(packet sflow.Packet, bf *schema.FlowMessage, finalize decoder.FinalizeFlowFunc) error {
for _, flowSample := range packet.Samples {
var records []sflow.FlowRecord
forwardingStatus := 0
switch flowSample := flowSample.(type) {
case sflow.FlowSample:
records = flowSample.Records
bf.SamplingRate = uint64(flowSample.SamplingRate)
bf.InIf = flowSample.Input
bf.OutIf = flowSample.Output
switch bf.OutIf & interfaceOutMask {
case interfaceOutDiscard:
bf.OutIf = 0
forwardingStatus = 128
case interfaceOutMultiple:
bf.OutIf = 0
}
case sflow.ExpandedFlowSample:
records = flowSample.Records
bf.SamplingRate = uint64(flowSample.SamplingRate)
bf.InIf = flowSample.InputIfValue
bf.OutIf = flowSample.OutputIfValue
}
if bf.InIf == interfaceLocal {
bf.InIf = 0
}
if bf.OutIf == interfaceLocal {
bf.OutIf = 0
}
bf.ExporterAddress = decoder.DecodeIP(packet.AgentIP)
bf.AppendUint(schema.ColumnPackets, 1)
bf.AppendUint(schema.ColumnForwardingStatus, uint64(forwardingStatus))
// Optimization: avoid parsing sampled header if we have everything already parsed
hasSampledIPv4 := false
hasSampledIPv6 := false
hasSampledEthernet := false
hasExtendedSwitch := false
for _, record := range records {
switch record.Data.(type) {
case sflow.SampledIPv4:
hasSampledIPv4 = true
case sflow.SampledIPv6:
hasSampledIPv6 = true
case sflow.SampledEthernet:
hasSampledEthernet = true
case sflow.ExtendedSwitch:
hasExtendedSwitch = true
}
}
var l3length uint64
for _, record := range records {
switch recordData := record.Data.(type) {
case sflow.SampledHeader:
// Only process this header if:
// - we don't have a sampled IPv4 header nor a sampled IPv4 header, or
// - we need L2 data and we don't have sampled ethernet header or we don't have extended switch record
// - we need L3/L4 data
if !hasSampledIPv4 && !hasSampledIPv6 || !nd.d.Schema.IsDisabled(schema.ColumnGroupL2) && (!hasSampledEthernet || !hasExtendedSwitch) || !nd.d.Schema.IsDisabled(schema.ColumnGroupL3L4) {
if l := nd.parseSampledHeader(bf, &recordData); l > 0 {
l3length = l
}
}
case sflow.SampledIPv4:
bf.SrcAddr = decoder.DecodeIP(recordData.SrcIP)
bf.DstAddr = decoder.DecodeIP(recordData.DstIP)
l3length = uint64(recordData.Length)
bf.AppendUint(schema.ColumnProto, uint64(recordData.Protocol))
bf.AppendUint(schema.ColumnSrcPort, uint64(recordData.SrcPort))
bf.AppendUint(schema.ColumnDstPort, uint64(recordData.DstPort))
bf.AppendUint(schema.ColumnEType, helpers.ETypeIPv4)
bf.AppendUint(schema.ColumnIPTos, uint64(recordData.Tos))
case sflow.SampledIPv6:
bf.SrcAddr = decoder.DecodeIP(recordData.SrcIP)
bf.DstAddr = decoder.DecodeIP(recordData.DstIP)
l3length = uint64(recordData.Length)
bf.AppendUint(schema.ColumnProto, uint64(recordData.Protocol))
bf.AppendUint(schema.ColumnSrcPort, uint64(recordData.SrcPort))
bf.AppendUint(schema.ColumnDstPort, uint64(recordData.DstPort))
bf.AppendUint(schema.ColumnEType, helpers.ETypeIPv6)
bf.AppendUint(schema.ColumnIPTos, uint64(recordData.Priority))
case sflow.SampledEthernet:
if l3length == 0 {
// That's the best we can guess. sFlow says: For a layer 2
// header_protocol, length is total number of octets of data
// received on the network (excluding framing bits but
// including FCS octets).
l3length = uint64(recordData.Length) - 16
}
if !nd.d.Schema.IsDisabled(schema.ColumnGroupL2) {
bf.AppendUint(schema.ColumnSrcMAC, helpers.MACToUint64(net.HardwareAddr(recordData.SrcMac)))
bf.AppendUint(schema.ColumnDstMAC, helpers.MACToUint64(net.HardwareAddr(recordData.DstMac)))
}
case sflow.ExtendedSwitch:
if !nd.d.Schema.IsDisabled(schema.ColumnGroupL2) {
if recordData.SrcVlan < 4096 {
bf.SrcVlan = uint16(recordData.SrcVlan)
}
if recordData.DstVlan < 4096 {
bf.DstVlan = uint16(recordData.DstVlan)
}
}
case sflow.ExtendedRouter:
bf.SrcNetMask = uint8(recordData.SrcMaskLen)
bf.DstNetMask = uint8(recordData.DstMaskLen)
bf.NextHop = decoder.DecodeIP(recordData.NextHop)
case sflow.ExtendedGateway:
bf.NextHop = decoder.DecodeIP(recordData.NextHop)
bf.DstAS = recordData.AS
bf.SrcAS = recordData.AS
if recordData.SrcAS > 0 {
bf.SrcAS = recordData.SrcAS
}
if len(recordData.ASPath) > 0 {
bf.DstAS = recordData.ASPath[len(recordData.ASPath)-1]
bf.AppendArrayUInt32(schema.ColumnDstASPath, recordData.ASPath)
}
if len(recordData.Communities) > 0 {
bf.AppendArrayUInt32(schema.ColumnDstCommunities, recordData.Communities)
}
}
}
if l3length > 0 {
bf.AppendUint(schema.ColumnBytes, l3length)
}
finalize()
}
return nil
}
func (nd *Decoder) parseSampledHeader(bf *schema.FlowMessage, header *sflow.SampledHeader) uint64 {
data := header.HeaderData
switch header.Protocol {
case 1: // Ethernet
return decoder.ParseEthernet(nd.d.Schema, bf, data)
case 11: // IPv4
return decoder.ParseIPv4(nd.d.Schema, bf, data)
case 12: // IPv6
return decoder.ParseIPv6(nd.d.Schema, bf, data)
}
return 0
}

View File

@@ -0,0 +1,133 @@
// SPDX-FileCopyrightText: 2022 Tchadel Icard
// SPDX-License-Identifier: AGPL-3.0-only
// Package sflow handles sFlow v5 decoding.
package sflow
import (
"bytes"
"fmt"
"net"
"time"
"github.com/netsampler/goflow2/v2/decoders/sflow"
"akvorado/common/reporter"
"akvorado/common/schema"
"akvorado/outlet/flow/decoder"
)
const (
// interfaceLocal is used for InIf and OutIf when the traffic is
// locally originated or terminated. We need to translate it to 0.
interfaceLocal = 0x3fffffff
// interfaceOutMask is the mask to interpret output interface type
interfaceOutMask = 0xc0000000
// interfaceOutDiscard is used for OutIf when the traffic is discarded
interfaceOutDiscard = 0x40000000
// interfaceOutMultiple is used when there are multiple output interfaces
interfaceOutMultiple = 0x80000000
)
// Decoder contains the state for the sFlow v5 decoder.
type Decoder struct {
r *reporter.Reporter
d decoder.Dependencies
errLogger reporter.Logger
metrics struct {
errors *reporter.CounterVec
stats *reporter.CounterVec
sampleRecordsStatsSum *reporter.CounterVec
sampleStatsSum *reporter.CounterVec
}
}
// New instantiates a new sFlow decoder.
func New(r *reporter.Reporter, dependencies decoder.Dependencies) decoder.Decoder {
nd := &Decoder{
r: r,
d: dependencies,
errLogger: r.Sample(reporter.BurstSampler(30*time.Second, 3)),
}
nd.metrics.errors = nd.r.CounterVec(
reporter.CounterOpts{
Name: "errors_total",
Help: "sFlows processed errors.",
},
[]string{"exporter", "error"},
)
nd.metrics.stats = nd.r.CounterVec(
reporter.CounterOpts{
Name: "flows_total",
Help: "sFlows processed.",
},
[]string{"exporter", "agent", "version"},
)
nd.metrics.sampleRecordsStatsSum = nd.r.CounterVec(
reporter.CounterOpts{
Name: "sample_records_sum",
Help: "sFlows samples sum of records.",
},
[]string{"exporter", "agent", "version", "type"},
)
nd.metrics.sampleStatsSum = nd.r.CounterVec(
reporter.CounterOpts{
Name: "sample_sum",
Help: "sFlows samples sum.",
},
[]string{"exporter", "agent", "version", "type"},
)
return nd
}
// Decode decodes an sFlow payload.
func (nd *Decoder) Decode(in decoder.RawFlow, _ decoder.Option, bf *schema.FlowMessage, finalize decoder.FinalizeFlowFunc) (int, error) {
buf := bytes.NewBuffer(in.Payload)
key := in.Source.String()
ts := uint64(in.TimeReceived.UTC().Unix())
var packet sflow.Packet
if err := sflow.DecodeMessageVersion(buf, &packet); err != nil {
nd.metrics.errors.WithLabelValues(key, "sFlow decoding error").Inc()
nd.errLogger.Err(err).Str("exporter", key).Msg("error while decoding sFlow")
return 0, fmt.Errorf("error while decoding sFlow: %w", err)
}
// Update some stats
agent := net.IP(packet.AgentIP).String()
version := "5"
samples := packet.Samples
nd.metrics.stats.WithLabelValues(key, agent, version).Inc()
for _, s := range samples {
switch sConv := s.(type) {
case sflow.FlowSample:
nd.metrics.sampleStatsSum.WithLabelValues(key, agent, version, "FlowSample").
Inc()
nd.metrics.sampleRecordsStatsSum.WithLabelValues(key, agent, version, "FlowSample").
Add(float64(len(sConv.Records)))
case sflow.ExpandedFlowSample:
nd.metrics.sampleStatsSum.WithLabelValues(key, agent, version, "ExpandedFlowSample").
Inc()
nd.metrics.sampleRecordsStatsSum.WithLabelValues(key, agent, version, "ExpandedFlowSample").
Add(float64(len(sConv.Records)))
case sflow.CounterSample:
nd.metrics.sampleStatsSum.WithLabelValues(key, agent, version, "CounterSample").
Inc()
nd.metrics.sampleRecordsStatsSum.WithLabelValues(key, agent, version, "CounterSample").
Add(float64(len(sConv.Records)))
}
}
return len(samples), nd.decode(packet, bf, func() {
bf.TimeReceived = uint32(ts)
finalize()
})
}
// Name returns the name of the decoder.
func (nd *Decoder) Name() string {
return "sflow"
}

View File

@@ -0,0 +1,554 @@
// SPDX-FileCopyrightText: 2022 Tchadel Icard
// SPDX-License-Identifier: AGPL-3.0-only
package sflow
import (
"net/netip"
"path/filepath"
"testing"
"akvorado/common/helpers"
"akvorado/common/reporter"
"akvorado/common/schema"
"akvorado/outlet/flow/decoder"
)
func TestDecode(t *testing.T) {
r := reporter.NewMock(t)
sch := schema.NewMock(t).EnableAllColumns()
sdecoder := New(r, decoder.Dependencies{Schema: sch})
options := decoder.Option{}
bf := sch.NewFlowMessage()
got := []*schema.FlowMessage{}
finalize := func() {
bf.TimeReceived = 0
// Keep a copy of the current flow message
clone := *bf
got = append(got, &clone)
// And clear the flow message
bf.Clear()
}
// Send data
t.Run("basic", func(t *testing.T) {
got = got[:0]
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-1140.pcap"))
_, err := sdecoder.Decode(
decoder.RawFlow{Payload: data, Source: netip.MustParseAddr("::ffff:127.0.0.1")},
options, bf, finalize)
if err != nil {
t.Fatalf("Decode() error:\n%+v", err)
}
expectedFlows := []*schema.FlowMessage{
{
SamplingRate: 1024,
InIf: 27,
OutIf: 28,
SrcVlan: 100,
DstVlan: 100,
SrcAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:38"),
DstAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:39"),
ExporterAddress: netip.MustParseAddr("::ffff:172.16.0.3"),
OtherColumns: map[schema.ColumnKey]any{
schema.ColumnBytes: 1500,
schema.ColumnPackets: 1,
schema.ColumnEType: helpers.ETypeIPv6,
schema.ColumnProto: 6,
schema.ColumnSrcPort: 46026,
schema.ColumnDstPort: 22,
schema.ColumnSrcMAC: 40057391053392,
schema.ColumnDstMAC: 40057381862408,
schema.ColumnIPTTL: 64,
schema.ColumnIPTos: 0x8,
schema.ColumnIPv6FlowLabel: 0x68094,
schema.ColumnTCPFlags: 0x10,
},
}, {
SamplingRate: 1024,
SrcAddr: netip.MustParseAddr("::ffff:104.26.8.24"),
DstAddr: netip.MustParseAddr("::ffff:45.90.161.46"),
ExporterAddress: netip.MustParseAddr("::ffff:172.16.0.3"),
NextHop: netip.MustParseAddr("::ffff:45.90.161.46"),
InIf: 49001,
OutIf: 25,
DstVlan: 100,
SrcAS: 13335,
DstAS: 39421,
SrcNetMask: 20,
DstNetMask: 27,
OtherColumns: map[schema.ColumnKey]any{
schema.ColumnBytes: 421,
schema.ColumnPackets: 1,
schema.ColumnEType: helpers.ETypeIPv4,
schema.ColumnProto: 6,
schema.ColumnSrcPort: 443,
schema.ColumnDstPort: 56876,
schema.ColumnSrcMAC: 216372595274807,
schema.ColumnDstMAC: 191421060163210,
schema.ColumnIPFragmentID: 0xa572,
schema.ColumnIPTTL: 59,
schema.ColumnTCPFlags: 0x18,
},
}, {
SamplingRate: 1024,
SrcAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:38"),
DstAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:39"),
ExporterAddress: netip.MustParseAddr("::ffff:172.16.0.3"),
InIf: 27,
OutIf: 28,
SrcVlan: 100,
DstVlan: 100,
OtherColumns: map[schema.ColumnKey]any{
schema.ColumnBytes: 1500,
schema.ColumnPackets: 1,
schema.ColumnEType: helpers.ETypeIPv6,
schema.ColumnProto: 6,
schema.ColumnSrcPort: 46026,
schema.ColumnDstPort: 22,
schema.ColumnSrcMAC: 40057391053392,
schema.ColumnDstMAC: 40057381862408,
schema.ColumnIPTTL: 64,
schema.ColumnIPTos: 0x8,
schema.ColumnIPv6FlowLabel: 0x68094,
schema.ColumnTCPFlags: 0x10,
},
}, {
SamplingRate: 1024,
InIf: 28,
OutIf: 49001,
SrcVlan: 100,
SrcAS: 39421,
DstAS: 26615,
SrcAddr: netip.MustParseAddr("::ffff:45.90.161.148"),
DstAddr: netip.MustParseAddr("::ffff:191.87.91.27"),
ExporterAddress: netip.MustParseAddr("::ffff:172.16.0.3"),
NextHop: netip.MustParseAddr("::ffff:31.14.69.110"),
SrcNetMask: 27,
DstNetMask: 17,
OtherColumns: map[schema.ColumnKey]any{
schema.ColumnBytes: 40,
schema.ColumnPackets: 1,
schema.ColumnEType: helpers.ETypeIPv4,
schema.ColumnProto: 6,
schema.ColumnSrcPort: 55658,
schema.ColumnDstPort: 5555,
schema.ColumnSrcMAC: 138617863011056,
schema.ColumnDstMAC: 216372595274807,
schema.ColumnDstASPath: []uint32{203698, 6762, 26615},
schema.ColumnDstCommunities: []uint64{2583495656, 2583495657, 4259880000, 4259880001, 4259900001},
schema.ColumnIPFragmentID: 0xd431,
schema.ColumnIPTTL: 255,
schema.ColumnTCPFlags: 0x2,
},
}, {
SamplingRate: 1024,
SrcAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:38"),
DstAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:39"),
ExporterAddress: netip.MustParseAddr("::ffff:172.16.0.3"),
InIf: 27,
OutIf: 28,
SrcVlan: 100,
DstVlan: 100,
OtherColumns: map[schema.ColumnKey]any{
schema.ColumnBytes: 1500,
schema.ColumnPackets: 1,
schema.ColumnEType: helpers.ETypeIPv6,
schema.ColumnProto: 6,
schema.ColumnSrcPort: 46026,
schema.ColumnDstPort: 22,
schema.ColumnSrcMAC: 40057391053392,
schema.ColumnDstMAC: 40057381862408,
schema.ColumnIPTTL: 64,
schema.ColumnIPTos: 0x8,
schema.ColumnIPv6FlowLabel: 0x68094,
schema.ColumnTCPFlags: 0x10,
},
},
}
if diff := helpers.Diff(got, expectedFlows); diff != "" {
t.Fatalf("Decode() (-got, +want):\n%s", diff)
}
gotMetrics := r.GetMetrics(
"akvorado_outlet_flow_decoder_sflow_",
"flows_total",
"sample_",
)
expectedMetrics := map[string]string{
`flows_total{agent="172.16.0.3",exporter="::ffff:127.0.0.1",version="5"}`: "1",
`sample_records_sum{agent="172.16.0.3",exporter="::ffff:127.0.0.1",type="FlowSample",version="5"}`: "14",
`sample_sum{agent="172.16.0.3",exporter="::ffff:127.0.0.1",type="FlowSample",version="5"}`: "5",
}
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
t.Fatalf("Metrics after data (-got, +want):\n%s", diff)
}
})
t.Run("local interface", func(t *testing.T) {
got = got[:0]
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-local-interface.pcap"))
_, err := sdecoder.Decode(
decoder.RawFlow{Payload: data, Source: netip.MustParseAddr("::ffff:127.0.0.1")},
options, bf, finalize)
if err != nil {
t.Fatalf("Decode() error:\n%+v", err)
}
expectedFlows := []*schema.FlowMessage{
{
SamplingRate: 1024,
SrcAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:38"),
DstAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:39"),
ExporterAddress: netip.MustParseAddr("::ffff:172.16.0.3"),
InIf: 27,
OutIf: 0, // local interface
SrcVlan: 100,
DstVlan: 100,
OtherColumns: map[schema.ColumnKey]any{
schema.ColumnBytes: 1500,
schema.ColumnPackets: 1,
schema.ColumnEType: helpers.ETypeIPv6,
schema.ColumnProto: 6,
schema.ColumnSrcPort: 46026,
schema.ColumnDstPort: 22,
schema.ColumnSrcMAC: 40057391053392,
schema.ColumnDstMAC: 40057381862408,
schema.ColumnTCPFlags: 16,
schema.ColumnIPv6FlowLabel: 426132,
schema.ColumnIPTTL: 64,
schema.ColumnIPTos: 8,
},
},
}
if diff := helpers.Diff(got, expectedFlows); diff != "" {
t.Fatalf("Decode() (-got, +want):\n%s", diff)
}
})
t.Run("discard interface", func(t *testing.T) {
got = got[:0]
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-discard-interface.pcap"))
_, err := sdecoder.Decode(
decoder.RawFlow{Payload: data, Source: netip.MustParseAddr("::ffff:127.0.0.1")},
options, bf, finalize)
if err != nil {
t.Fatalf("Decode() error:\n%+v", err)
}
expectedFlows := []*schema.FlowMessage{
{
SamplingRate: 1024,
SrcAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:38"),
DstAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:39"),
ExporterAddress: netip.MustParseAddr("::ffff:172.16.0.3"),
InIf: 27,
OutIf: 0, // discard interface
SrcVlan: 100,
DstVlan: 100,
OtherColumns: map[schema.ColumnKey]any{
schema.ColumnBytes: 1500,
schema.ColumnPackets: 1,
schema.ColumnEType: helpers.ETypeIPv6,
schema.ColumnProto: 6,
schema.ColumnSrcPort: 46026,
schema.ColumnDstPort: 22,
schema.ColumnForwardingStatus: 128,
schema.ColumnSrcMAC: 40057391053392,
schema.ColumnDstMAC: 40057381862408,
schema.ColumnTCPFlags: 16,
schema.ColumnIPv6FlowLabel: 426132,
schema.ColumnIPTTL: 64,
schema.ColumnIPTos: 8,
},
},
}
if diff := helpers.Diff(got, expectedFlows); diff != "" {
t.Fatalf("Decode() (-got, +want):\n%s", diff)
}
})
t.Run("multiple interfaces", func(t *testing.T) {
got = got[:0]
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-multiple-interfaces.pcap"))
_, err := sdecoder.Decode(
decoder.RawFlow{Payload: data, Source: netip.MustParseAddr("::ffff:127.0.0.1")},
options, bf, finalize)
if err != nil {
t.Fatalf("Decode() error:\n%+v", err)
}
expectedFlows := []*schema.FlowMessage{
{
SamplingRate: 1024,
SrcAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:38"),
DstAddr: netip.MustParseAddr("2a0c:8880:2:0:185:21:130:39"),
ExporterAddress: netip.MustParseAddr("::ffff:172.16.0.3"),
InIf: 27,
OutIf: 0, // multiple interfaces
SrcVlan: 100,
DstVlan: 100,
OtherColumns: map[schema.ColumnKey]any{
schema.ColumnBytes: 1500,
schema.ColumnPackets: 1,
schema.ColumnEType: helpers.ETypeIPv6,
schema.ColumnProto: 6,
schema.ColumnSrcPort: 46026,
schema.ColumnDstPort: 22,
schema.ColumnSrcMAC: 40057391053392,
schema.ColumnDstMAC: 40057381862408,
schema.ColumnTCPFlags: 16,
schema.ColumnIPv6FlowLabel: 426132,
schema.ColumnIPTTL: 64,
schema.ColumnIPTos: 8,
},
},
}
if diff := helpers.Diff(got, expectedFlows); diff != "" {
t.Fatalf("Decode() (-got, +want):\n%s", diff)
}
})
t.Run("expanded flow sample", func(t *testing.T) {
got = got[:0]
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-sflow-expanded-sample.pcap"))
_, err := sdecoder.Decode(
decoder.RawFlow{Payload: data, Source: netip.MustParseAddr("::ffff:127.0.0.1")},
options, bf, finalize)
if err != nil {
t.Fatalf("Decode() error:\n%+v", err)
}
expectedFlows := []*schema.FlowMessage{
{
SamplingRate: 1000,
InIf: 29001,
OutIf: 1285816721,
SrcAddr: netip.MustParseAddr("::ffff:52.52.52.52"),
DstAddr: netip.MustParseAddr("::ffff:53.53.53.53"),
ExporterAddress: netip.MustParseAddr("::ffff:49.49.49.49"),
NextHop: netip.MustParseAddr("::ffff:54.54.54.54"),
SrcAS: 203476,
DstAS: 203361,
SrcVlan: 809,
SrcNetMask: 32,
DstNetMask: 22,
OtherColumns: map[schema.ColumnKey]any{
schema.ColumnBytes: 104,
schema.ColumnPackets: 1,
schema.ColumnEType: helpers.ETypeIPv4,
schema.ColumnProto: 6,
schema.ColumnSrcPort: 22,
schema.ColumnDstPort: 52237,
schema.ColumnDstASPath: []uint32{8218, 29605, 203361},
schema.ColumnDstCommunities: []uint64{538574949, 1911619684, 1911669584, 1911671290},
schema.ColumnTCPFlags: 0x18,
schema.ColumnIPFragmentID: 0xab4e,
schema.ColumnIPTTL: 61,
schema.ColumnIPTos: 0x8,
schema.ColumnSrcMAC: 0x948ed30a713b,
schema.ColumnDstMAC: 0x22421f4a9fcd,
},
},
}
if diff := helpers.Diff(got, expectedFlows); diff != "" {
t.Fatalf("Decode() (-got, +want):\n%s", diff)
}
})
t.Run("flow sample with IPv4 data", func(t *testing.T) {
got = got[:0]
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-sflow-ipv4-data.pcap"))
_, err := sdecoder.Decode(
decoder.RawFlow{Payload: data, Source: netip.MustParseAddr("::ffff:127.0.0.1")},
options, bf, finalize)
if err != nil {
t.Fatalf("Decode() error:\n%+v", err)
}
expectedFlows := []*schema.FlowMessage{
{
SamplingRate: 256,
InIf: 0,
OutIf: 182,
DstVlan: 3001,
SrcAddr: netip.MustParseAddr("::ffff:50.50.50.50"),
DstAddr: netip.MustParseAddr("::ffff:51.51.51.51"),
ExporterAddress: netip.MustParseAddr("::ffff:49.49.49.49"),
OtherColumns: map[schema.ColumnKey]any{
schema.ColumnBytes: 1344,
schema.ColumnPackets: 1,
schema.ColumnEType: helpers.ETypeIPv4,
schema.ColumnProto: 17,
schema.ColumnSrcPort: 46622,
schema.ColumnDstPort: 58631,
schema.ColumnSrcMAC: 1094287164743,
schema.ColumnDstMAC: 1101091482116,
schema.ColumnIPFragmentID: 41647,
schema.ColumnIPTTL: 64,
},
},
}
if diff := helpers.Diff(got, expectedFlows); diff != "" {
t.Fatalf("Decode() (-got, +want):\n%s", diff)
}
})
t.Run("flow sample with IPv4 raw packet", func(t *testing.T) {
got = got[:0]
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-sflow-raw-ipv4.pcap"))
_, err := sdecoder.Decode(
decoder.RawFlow{Payload: data, Source: netip.MustParseAddr("::ffff:127.0.0.1")},
options, bf, finalize)
if err != nil {
t.Fatalf("Decode() error:\n%+v", err)
}
expectedFlows := []*schema.FlowMessage{
{
SamplingRate: 1,
InIf: 0,
OutIf: 2,
SrcAddr: netip.MustParseAddr("::ffff:69.58.92.107"),
DstAddr: netip.MustParseAddr("::ffff:92.222.186.1"),
ExporterAddress: netip.MustParseAddr("::ffff:172.19.64.116"),
OtherColumns: map[schema.ColumnKey]any{
schema.ColumnBytes: 32,
schema.ColumnPackets: 1,
schema.ColumnEType: helpers.ETypeIPv4,
schema.ColumnProto: 1,
schema.ColumnIPFragmentID: 4329,
schema.ColumnIPTTL: 64,
schema.ColumnIPTos: 8,
},
}, {
SamplingRate: 1,
InIf: 0,
OutIf: 2,
SrcAddr: netip.MustParseAddr("::ffff:69.58.92.107"),
DstAddr: netip.MustParseAddr("::ffff:92.222.184.1"),
ExporterAddress: netip.MustParseAddr("::ffff:172.19.64.116"),
OtherColumns: map[schema.ColumnKey]any{
schema.ColumnBytes: 32,
schema.ColumnPackets: 1,
schema.ColumnEType: helpers.ETypeIPv4,
schema.ColumnProto: 1,
schema.ColumnIPFragmentID: 62945,
schema.ColumnIPTTL: 64,
schema.ColumnIPTos: 8,
},
},
}
if diff := helpers.Diff(got, expectedFlows); diff != "" {
t.Fatalf("Decode() (-got, +want):\n%s", diff)
}
})
t.Run("flow sample with ICMPv4", func(t *testing.T) {
got = got[:0]
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-icmpv4.pcap"))
_, err := sdecoder.Decode(
decoder.RawFlow{Payload: data, Source: netip.MustParseAddr("::ffff:127.0.0.1")},
options, bf, finalize)
if err != nil {
t.Fatalf("Decode() error:\n%+v", err)
}
expectedFlows := []*schema.FlowMessage{
{
SamplingRate: 1,
SrcAddr: netip.MustParseAddr("::ffff:203.0.113.4"),
DstAddr: netip.MustParseAddr("::ffff:203.0.113.5"),
ExporterAddress: netip.MustParseAddr("::ffff:127.0.0.1"),
OtherColumns: map[schema.ColumnKey]any{
schema.ColumnBytes: 84,
schema.ColumnPackets: 1,
schema.ColumnEType: helpers.ETypeIPv4,
schema.ColumnProto: 1,
schema.ColumnDstMAC: 0xd25b45ee5ecf,
schema.ColumnSrcMAC: 0xe2efc68f8cd4,
schema.ColumnICMPv4Type: 8,
// schema.ColumnICMPv4Code: 0,
schema.ColumnIPTTL: 64,
schema.ColumnIPFragmentID: 0x90c5,
},
},
}
if diff := helpers.Diff(got, expectedFlows); diff != "" {
t.Fatalf("Decode() (-got, +want):\n%s", diff)
}
})
t.Run("flow sample with ICMPv6", func(t *testing.T) {
got = got[:0]
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-icmpv6.pcap"))
_, err := sdecoder.Decode(
decoder.RawFlow{Payload: data, Source: netip.MustParseAddr("::ffff:127.0.0.1")},
options, bf, finalize)
if err != nil {
t.Fatalf("Decode() error:\n%+v", err)
}
expectedFlows := []*schema.FlowMessage{
{
SamplingRate: 1,
SrcAddr: netip.MustParseAddr("fe80::d05b:45ff:feee:5ecf"),
DstAddr: netip.MustParseAddr("2001:db8::"),
ExporterAddress: netip.MustParseAddr("::ffff:127.0.0.1"),
OtherColumns: map[schema.ColumnKey]any{
schema.ColumnBytes: 72,
schema.ColumnPackets: 1,
schema.ColumnEType: helpers.ETypeIPv6,
schema.ColumnProto: 58,
schema.ColumnSrcMAC: 0xd25b45ee5ecf,
schema.ColumnDstMAC: 0xe2efc68f8cd4,
schema.ColumnIPTTL: 255,
schema.ColumnICMPv6Type: 135,
// schema.ColumnICMPv6Code: 0,
},
},
}
if diff := helpers.Diff(got, expectedFlows); diff != "" {
t.Fatalf("Decode() (-got, +want):\n%s", diff)
}
})
t.Run("flow sample with QinQ", func(t *testing.T) {
got = got[:0]
data := helpers.ReadPcapL4(t, filepath.Join("testdata", "data-qinq.pcap"))
_, err := sdecoder.Decode(
decoder.RawFlow{Payload: data, Source: netip.MustParseAddr("::ffff:127.0.0.1")},
options, bf, finalize)
if err != nil {
t.Fatalf("Decode() error:\n%+v", err)
}
expectedFlows := []*schema.FlowMessage{
{
SamplingRate: 4096,
InIf: 369098852,
OutIf: 369098851,
SrcVlan: 1493,
SrcAddr: netip.MustParseAddr("::ffff:49.49.49.2"),
DstAddr: netip.MustParseAddr("::ffff:49.49.49.109"),
ExporterAddress: netip.MustParseAddr("::ffff:172.17.128.58"),
OtherColumns: map[schema.ColumnKey]any{
schema.ColumnBytes: 80,
schema.ColumnPackets: 1,
schema.ColumnEType: helpers.ETypeIPv4,
schema.ColumnProto: 6,
schema.ColumnSrcMAC: 0x4caea3520ff6,
schema.ColumnDstMAC: 0x000110621493,
schema.ColumnIPTTL: 62,
schema.ColumnIPFragmentID: 56159,
schema.ColumnTCPFlags: 16,
schema.ColumnSrcPort: 32017,
schema.ColumnDstPort: 443,
},
},
}
if diff := helpers.Diff(got, expectedFlows); diff != "" {
t.Fatalf("Decode() (-got, +want):\n%s", diff)
}
})
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.